ext
stringclasses
9 values
sha
stringlengths
40
40
content
stringlengths
3
1.04M
py
1a46b3090f5495caef113388693122270eb59d0c
from StringIO import StringIO from mimetypes import guess_all_extensions, guess_type import zipfile import logging import os from django.contrib.auth.decorators import login_required import json from django.core.urlresolvers import reverse from django.utils.decorators import method_decorator from django.views.generic import View, TemplateView from couchdbkit.exceptions import ResourceNotFound from django.http import HttpResponse, Http404, HttpResponseRedirect, HttpResponseServerError, HttpResponseBadRequest from django.shortcuts import render from corehq.apps.app_manager.decorators import safe_download, require_can_edit_apps from corehq.apps.app_manager.view_helpers import ApplicationViewMixin from corehq.apps.app_manager.models import get_app, RemoteApp from corehq.apps.hqmedia.cache import BulkMultimediaStatusCache from corehq.apps.hqmedia.controller import MultimediaBulkUploadController, MultimediaImageUploadController, MultimediaAudioUploadController, MultimediaVideoUploadController from corehq.apps.hqmedia.decorators import login_with_permission_from_post from corehq.apps.hqmedia.models import CommCareImage, CommCareAudio, CommCareMultimedia, MULTIMEDIA_PREFIX, CommCareVideo from corehq.apps.hqmedia.tasks import process_bulk_upload_zip from corehq.apps.users.decorators import require_permission from corehq.apps.users.models import Permissions from corehq.util.zip_utils import DownloadZip from dimagi.utils.decorators.memoized import memoized from dimagi.utils.django.cached_object import CachedObject from soil.util import expose_download from django.utils.translation import ugettext as _ class BaseMultimediaView(ApplicationViewMixin, View): @method_decorator(require_permission(Permissions.edit_apps, login_decorator=login_with_permission_from_post())) def dispatch(self, request, *args, **kwargs): return super(BaseMultimediaView, self).dispatch(request, *args, **kwargs) class BaseMultimediaTemplateView(BaseMultimediaView, TemplateView): """ The base view for all the multimedia templates. """ @property def page_context(self): return {} def get_context_data(self, **kwargs): context = { "domain": self.domain, "app": self.app, } context.update(self.page_context) return context def render_to_response(self, context, **response_kwargs): return render(self.request, self.template_name, context) @require_can_edit_apps def search_for_media(request, domain, app_id): media_type = request.GET['t'] if media_type == 'Image': files = CommCareImage.search(request.GET['q']) elif media_type == 'Audio': files = CommCareAudio.search(request.GET['q']) else: raise Http404() return HttpResponse(json.dumps([ {'url': i.url(), 'licenses': [license.display_name for license in i.licenses], 'tags': [tag for tags in i.tags.values() for tag in tags], 'm_id': i._id} for i in files])) @require_can_edit_apps def choose_media(request, domain, app_id): # TODO: Add error handling app = get_app(domain, app_id) media_type = request.POST['media_type'] media_id = request.POST['id'] if media_type == 'Image': file = CommCareImage.get(media_id) elif media_type == 'Audio': file = CommCareImage.get(media_id) else: raise Http404() if file is None or not file.is_shared: return HttpResponse(json.dumps({ 'match_found': False })) file.add_domain(domain) app.create_mapping(file, request.POST['path']) if media_type == 'Image': return HttpResponse(json.dumps({ 'match_found': True, 'image': {'m_id': file._id, 'url': file.url()}, 'file': True })) elif media_type == 'Audio': return HttpResponse(json.dumps({'match_found': True, 'audio': {'m_id': file._id, 'url': file.url()}})) else: raise Http404() @require_can_edit_apps def media_urls(request, domain, app_id): # IS THIS USED????? # I rewrote it so it actually produces _something_, but is it useful??? app = get_app(domain, app_id) # todo remove get_media_references multimedia = app.get_media_references() pathUrls = {} for section, types in multimedia['references'].items(): for media_type, info in types.items(): for m in info['maps']: if m.get('path'): pathUrls[m['path']] = m return HttpResponse(json.dumps(pathUrls)) def media_from_path(request, domain, app_id, file_path): # Not sure what the intentions were for this. I didn't see it getting used anywhere. # Rewrote it to use new media refs. # Yedi, care to comment? app = get_app(domain, app_id) if isinstance(app, RemoteApp): raise Http404('Media not yet available for remote apps') # todo remove get_media_references multimedia = app.get_media_references() for section, types in multimedia['references'].items(): for media_type, info in types.items(): for media_map in info['maps']: # [10:] is to remove the 'jr://file/' if media_map['path'][10:] == file_path and media_map.get('url'): return HttpResponseRedirect(media_map['url']) raise Http404('No Media Found') class BaseMultimediaUploaderView(BaseMultimediaTemplateView): @property def page_context(self): return { 'uploaders': self.upload_controllers, "sessionid": self.request.COOKIES.get('sessionid'), } @property def upload_controllers(self): """ Return a list of Upload Controllers """ raise NotImplementedError("You must specify a list of upload controllers") class MultimediaReferencesView(BaseMultimediaUploaderView): name = "hqmedia_references" template_name = "hqmedia/references.html" @property def page_context(self): context = super(MultimediaReferencesView, self).page_context if self.app is None: raise Http404(self) context.update({ "references": self.app.get_references(), "object_map": self.app.get_object_map(), "totals": self.app.get_reference_totals(), "sessionid": self.request.COOKIES.get('sessionid'), }) return context @property def upload_controllers(self): return [ MultimediaImageUploadController("hqimage", reverse(ProcessImageFileUploadView.name, args=[self.domain, self.app_id])), MultimediaAudioUploadController("hqaudio", reverse(ProcessAudioFileUploadView.name, args=[self.domain, self.app_id])), MultimediaVideoUploadController("hqvideo", reverse(ProcessVideoFileUploadView.name, args=[self.domain, self.app_id])), ] class BulkUploadMultimediaView(BaseMultimediaUploaderView): name = "hqmedia_bulk_upload" template_name = "hqmedia/bulk_upload.html" @property def upload_controllers(self): return [MultimediaBulkUploadController("hqmedia_bulk", reverse(ProcessBulkUploadView.name, args=[self.domain, self.app_id]))] class BadMediaFileException(Exception): pass class BaseProcessUploadedView(BaseMultimediaView): @property def username(self): return self.request.couch_user.username if self.request.couch_user else None @property def share_media(self): return self.request.POST.get('shared') == 't' @property def license_used(self): return self.request.POST.get('license', '') @property def author(self): return self.request.POST.get('author', '') @property def attribution_notes(self): return self.request.POST.get('attribution-notes', '') @property @memoized def uploaded_file(self): return self.request.FILES.get('Filedata') @property @memoized def mime_type(self): try: data = self.uploaded_file.file.read() return CommCareMultimedia.get_mime_type(data, filename=self.uploaded_file.name) except Exception as e: raise BadMediaFileException("There was an error fetching the MIME type of your file. Error: %s" % e) def get(self, request, *args, **kwargs): return HttpResponseBadRequest("You may only post to this URL.") def post(self, request, *args, **kwargs): self.errors = [] response = {} try: self.validate_file() response.update(self.process_upload()) except BadMediaFileException as e: self.errors.append(e.message) response.update({ 'errors': self.errors, }) return HttpResponse(json.dumps(response)) def validate_file(self): raise NotImplementedError("You must validate your uploaded file!") def process_upload(self): raise NotImplementedError("You definitely need to implement this guy.") class ProcessBulkUploadView(BaseProcessUploadedView): name = "hqmedia_uploader_bulk" @property @memoized def uploaded_zip(self): try: self.uploaded_file.file.seek(0) return zipfile.ZipFile(self.uploaded_file) except Exception as e: raise BadMediaFileException("There was an issue processing the zip file you provided. Error: %s" % e) def validate_file(self): if not self.mime_type in self.valid_mime_types(): raise BadMediaFileException("Your zip file doesn't have a valid mimetype.") if not self.uploaded_zip: raise BadMediaFileException("There is no ZIP file.") if self.uploaded_zip.testzip(): raise BadMediaFileException("The ZIP file provided was bad.") def process_upload(self): # save the file w/ soil self.uploaded_file.file.seek(0) saved_file = expose_download(self.uploaded_file.file.read(), expiry=BulkMultimediaStatusCache.cache_expiry) processing_id = saved_file.download_id status = BulkMultimediaStatusCache(processing_id) status.save() process_bulk_upload_zip.delay(processing_id, self.domain, self.app_id, username=self.username, share_media=self.share_media, license_name=self.license_used, author=self.author, attribution_notes=self.attribution_notes) return status.get_response() @classmethod def valid_mime_types(cls): return [ 'application/zip', 'application/x-zip', 'application/octet-stream', 'application/x-zip-compressed', ] class BaseProcessFileUploadView(BaseProcessUploadedView): media_class = None @property def form_path(self): return self.request.POST.get('path', '') def validate_file(self): def file_ext(filename): _, extension = os.path.splitext(filename) return extension def possible_extensions(filename): possible_type = guess_type(filename)[0] if not possible_type: return [] return guess_all_extensions(guess_type(filename)[0]) if not self.mime_type: raise BadMediaFileException("Did not process a mime type!") base_type = self.mime_type.split('/')[0] if base_type not in self.valid_base_types(): raise BadMediaFileException("Not a valid %s file." % self.media_class.get_nice_name().lower()) ext = file_ext(self.uploaded_file.name) if ext.lower() not in possible_extensions(self.form_path): raise BadMediaFileException("File %s has an incorrect file type (%s)." % (self.uploaded_file.name, ext)) def process_upload(self): self.uploaded_file.file.seek(0) data = self.uploaded_file.file.read() multimedia = self.media_class.get_by_data(data) multimedia.attach_data(data, original_filename=self.uploaded_file.name, username=self.username) multimedia.add_domain(self.domain, owner=True) if self.share_media: multimedia.update_or_add_license(self.domain, type=self.license_used, author=self.author, attribution_notes=self.attribution_notes) self.app.create_mapping(multimedia, self.form_path) return { 'ref': multimedia.get_media_info(self.form_path), } @classmethod def valid_base_types(cls): raise NotImplementedError("You need to specify a list of valid base mime types!") class ProcessImageFileUploadView(BaseProcessFileUploadView): media_class = CommCareImage name = "hqmedia_uploader_image" @classmethod def valid_base_types(cls): return ['image'] class ProcessAudioFileUploadView(BaseProcessFileUploadView): media_class = CommCareAudio name = "hqmedia_uploader_audio" @classmethod def valid_base_types(cls): return ['audio'] class ProcessVideoFileUploadView(BaseProcessFileUploadView): media_class = CommCareVideo name = "hqmedia_uploader_video" @classmethod def valid_base_types(cls): return ['video'] class CheckOnProcessingFile(BaseMultimediaView): name = "hqmedia_check_processing" def get(self, request, *args, **kwargs): return HttpResponse("workin on it") def _iter_media_files(media_objects): """ take as input the output of get_media_objects and return an iterator of (path, data) tuples for the media files as they should show up in the .zip as well as a list of error messages as a side effect of implementation, errors will not include all error messages until the iterator is exhausted """ errors = [] def _media_files(): for path, media in media_objects: try: data, _ = media.get_display_file() folder = path.replace(MULTIMEDIA_PREFIX, "") if not isinstance(data, unicode): yield os.path.join(folder), data except NameError as e: errors.append("%(path)s produced an ERROR: %(error)s" % { 'path': path, 'error': e, }) return _media_files(), errors class DownloadMultimediaZip(DownloadZip, ApplicationViewMixin): """ This is where the Multimedia for an application gets generated. Expects domain and app_id to be in its args """ name = "download_multimedia_zip" compress_zip = False zip_name = 'commcare.zip' def iter_files(self): self.app.remove_unused_mappings() return _iter_media_files(self.app.get_media_objects()) def check_before_zipping(self): if not self.app.multimedia_map: return HttpResponse("You have no multimedia to download.") def log_errors(self, errors): logging.error( "Error downloading multimedia ZIP " "for domain %s and application %s." % ( self.domain, self.app_id) ) return HttpResponseServerError( "Errors were encountered while " "retrieving media for this application.<br /> %s" % ( "<br />".join(errors)) ) @method_decorator(safe_download) def dispatch(self, request, *args, **kwargs): return super(DownloadMultimediaZip, self).dispatch(request, *args, **kwargs) class MultimediaUploadStatusView(View): name = "hqmedia_upload_status" @property @memoized def processing_id(self): return self.request.POST.get('processing_id') @method_decorator(login_required) def dispatch(self, request, *args, **kwargs): return super(MultimediaUploadStatusView, self).dispatch(request, *args, **kwargs) def get(self, request, *args, **kwargs): return HttpResponseBadRequest("Please post to this.") def post(self, request, *args, **kwargs): if not self.processing_id: return HttpResponseBadRequest("A processing_id is required.") status = BulkMultimediaStatusCache.get(self.processing_id) if status is None: # No status could be retrieved from the cache fake_status = BulkMultimediaStatusCache(self.processing_id) fake_status.complete = True fake_status.errors.append(_('There was an issue retrieving the status from the cache. ' 'We are looking into it. Please try uploading again.')) logging.error("[Multimedia Bulk Upload] Process ID #%s encountered an issue while retrieving " "a status from the cache." % self.processing_id) response = fake_status.get_response() else: response = status.get_response() return HttpResponse(json.dumps(response)) class ViewMultimediaFile(View): name = "hqmedia_download" @property @memoized def media_class(self): media_type = self.kwargs.get('media_type') try: return CommCareMultimedia.get_doc_class(media_type) except KeyError: raise Http404("Could not find media of that type.") @property @memoized def doc_id(self): return self.kwargs.get('doc_id') @property @memoized def multimedia(self): try: return self.media_class.get(self.doc_id) except ResourceNotFound: raise Http404("Media not found.") @property @memoized def thumb(self): thumb = self.request.GET.get('thumb') try: return int(thumb), int(thumb) except Exception: return None def get(self, request, *args, **kwargs): obj = CachedObject(str(self.doc_id) + ':' + self.kwargs.get('media_type') + ':' + str(self.thumb)) if not obj.is_cached(): data, content_type = self.multimedia.get_display_file() if self.thumb: data = CommCareImage.get_thumbnail_data(data, self.thumb) buffer = StringIO(data) metadata = {'content_type': content_type} obj.cache_put(buffer, metadata, timeout=0) else: metadata, buffer = obj.get() data = buffer.getvalue() content_type = metadata['content_type'] return HttpResponse(data, mimetype=content_type)
py
1a46b34a112e7d40ebd6fc3525538ef7d8d370cb
import sys import os SUMMARYSTUFF = """ ## Contents {:.no_toc} * {: toc} """ filetoread = sys.argv[1] fdtoread = open(filetoread) fileprefix = ".".join(filetoread.split('.')[:-1]) filetowrite = fileprefix+".newmd" buffer = "" for line in fdtoread: if line[0:2]=='# ':#assume title title = line.strip()[2:] else: buffer = buffer + line fdtoread.close() preamble = "title: {}\nnotebook: {}\n".format(title, fileprefix+".ipynb" ) preamble = "---\n"+preamble+"---\n" fdtowrite=open(filetowrite, "w") summarystuff = SUMMARYSTUFF fdtowrite.write(preamble+summarystuff+buffer) fdtowrite.close() os.rename(filetowrite, filetoread)
py
1a46b467dd3a3fd125e4b07d7c98f2f7d143c124
"""Worker pool executor base classes.""" import numbers import os import threading import time import datetime import pprint import traceback from schema import Or, And from testplan.common.config import ConfigOption, validate_func from testplan.common import entity from testplan.common.utils.thread import interruptible_join from testplan.common.utils.strings import Color from testplan.common.utils.timing import wait_until_predicate from testplan.runners.base import Executor, ExecutorConfig from .communication import Message from .connection import QueueClient, QueueServer from .tasks import Task, TaskResult from testplan.common.entity import ResourceStatus class WorkerConfig(entity.ResourceConfig): """ Configuration object for :py:class:`~testplan.runners.pools.base.Worker` resource entity. """ @classmethod def get_options(cls): """ Schema for options validation and assignment of default values. """ return { "index": Or(int, str), ConfigOption("transport", default=QueueClient): object, ConfigOption("restart_count", default=3): int, } class Worker(entity.Resource): """ Worker resource that pulls tasks from the transport provided, executes them and sends back task results. :param index: Worker index id. :type index: ``int`` or ``str`` :param transport: Transport class for pool/worker communication. :type transport: :py:class:`~testplan.runners.pools.connection.Client` :param restart_count: How many times the worker had restarted. :type restart_count: ``int`` Also inherits all :py:class:`~testplan.common.entity.base.Resource` options. """ CONFIG = WorkerConfig def __init__(self, **options): super(Worker, self).__init__(**options) self._metadata = None self._transport = self.cfg.transport() self._handler = None self.last_heartbeat = None self.assigned = set() self.requesting = 0 self.restart_count = self.cfg.restart_count @property def handler(self): return self._handler @property def transport(self): """Pool/Worker communication transport.""" return self._transport @property def metadata(self): """Worker metadata information.""" if not self._metadata: self._metadata = { "thread": threading.current_thread(), "index": self.cfg.index, } return self._metadata @property def outfile(self): """Stdout file.""" return os.path.join( self.parent.runpath, "{}_startup".format(self.cfg.index) ) def uid(self): """Worker unique index.""" return self.cfg.index def starting(self): """Starts the daemonic worker loop.""" self.make_runpath_dirs() self._handler = threading.Thread( target=self._loop, args=(self._transport,) ) self._handler.daemon = True self._handler.start() self.status.change(self.STATUS.STARTED) def stopping(self): """Stops the worker.""" if self._handler: interruptible_join(self._handler) self._handler = None self.status.change(self.STATUS.STOPPED) def aborting(self): """Aborting logic, will not wait running tasks.""" self._transport.disconnect() @property def is_alive(self): """Poll the loop handler thread to check it is running as expected.""" return self._handler.is_alive() def _loop(self, transport): message = Message(**self.metadata) while self.active and self.status.tag not in ( self.status.STOPPING, self.status.STOPPED, ): received = transport.send_and_receive( message.make(message.TaskPullRequest, data=1) ) if received is None or received.cmd == Message.Stop: break elif received.cmd == Message.TaskSending: results = [] for item in received.data: results.append(self.execute(item)) transport.send_and_receive( message.make(message.TaskResults, data=results), expect=message.Ack, ) elif received.cmd == Message.Ack: pass time.sleep(self.cfg.active_loop_sleep) def execute(self, task): """ Executes a task and return the associated task result. :param task: Task that worker pulled for execution. :type task: :py:class:`~testplan.runners.pools.tasks.base.Task` :return: Task result. :rtype: :py:class:`~testplan.runners.pools.tasks.base.TaskResult` """ try: target = task.materialize() if isinstance(target, entity.Runnable): if not target.parent: target.parent = self if not target.cfg.parent: target.cfg.parent = self.cfg result = target.run() elif callable(target): result = target() else: result = target.run() except BaseException: task_result = TaskResult( task=task, result=None, status=False, reason=traceback.format_exc(), ) else: task_result = TaskResult(task=task, result=result, status=True) return task_result def respond(self, msg): """ Method that the pool uses to respond with a message to the worker. :param msg: Response message. :type msg: :py:class:`~testplan.runners.pools.communication.Message` """ self._transport.respond(msg) def __repr__(self): return "{}[{}]".format(self.__class__.__name__, self.cfg.index) def default_check_reschedule(pool, task_result): """ Determines if a task should be rescheduled based on the task result info. """ return False class PoolConfig(ExecutorConfig): """ Configuration object for :py:class:`~testplan.runners.pools.base.Pool` executor resource entity. """ @classmethod def get_options(cls): """ Schema for options validation and assignment of default values. """ return { "name": str, ConfigOption("size", default=4): And(int, lambda x: x > 0), ConfigOption("worker_type", default=Worker): object, ConfigOption("worker_heartbeat", default=None): Or( int, float, None ), ConfigOption("heartbeats_miss_limit", default=3): int, ConfigOption("task_retries_limit", default=3): int, ConfigOption("max_active_loop_sleep", default=5): numbers.Number, ConfigOption("restart_count", default=3): int, } class Pool(Executor): """ Pool task executor object that initializes workers and dispatches tasks. :param name: Pool name. :type name: ``str`` :param size: Pool workers size. Default: 4 :type size: ``int`` :param worker_type: Type of worker to be initialized. :type worker_type: :py:class:`~testplan.runners.pools.base.Worker` :param worker_heartbeat: Worker heartbeat period. :type worker_heartbeat: ``int`` or ``float`` or ``NoneType`` :param heartbeats_miss_limit: Maximum times a heartbeat is missed. :type heartbeats_miss_limit: ``int`` :param task_retries_limit: Maximum times a task can be re-assigned to pool. :type task_retries_limit: ``int`` :param max_active_loop_sleep: Maximum value for delay logic in active sleep. :type max_active_loop_sleep: ``int`` or ``float`` :param restart_count: How many times the pool had restarted. :type restart_count: ``int`` Also inherits all :py:class:`~testplan.runners.base.Executor` options. """ CONFIG = PoolConfig CONN_MANAGER = QueueServer def __init__( self, name, size=4, worker_type=Worker, worker_heartbeat=None, heartbeats_miss_limit=3, task_retries_limit=3, max_active_loop_sleep=5, restart_count=3, **options ): options.update(self.filter_locals(locals())) super(Pool, self).__init__(**options) self.unassigned = [] # unassigned tasks self.task_assign_cnt = {} # uid: times_assigned self.should_reschedule = default_check_reschedule self._workers = entity.Environment(parent=self) self._workers_last_result = {} self._conn = self.CONN_MANAGER() self._conn.parent = self self._pool_lock = threading.Lock() self._metadata = None # Set when Pool is started. self._exit_loop = False self._start_monitor_thread = True # Methods for handling different Message types. These are expected to # take the worker, request and response objects as the only required # positional args. self._request_handlers = { Message.ConfigRequest: self._handle_cfg_request, Message.TaskPullRequest: self._handle_taskpull_request, Message.TaskResults: self._handle_taskresults, Message.Heartbeat: self._handle_heartbeat, Message.SetupFailed: self._handle_setupfailed, } def uid(self): """Pool name.""" return self.cfg.name def add(self, task, uid): """ Add a task for execution. :param task: Task to be scheduled to workers. :type task: :py:class:`~testplan.runners.pools.tasks.base.Task` :param uid: Task uid. :type uid: ``str`` """ if not isinstance(task, Task): raise ValueError( "Task was expected, got {} instead.".format(type(task)) ) super(Pool, self).add(task, uid) self.unassigned.append(uid) def set_reschedule_check(self, check_reschedule): """ Sets callable with custom rules to determine if a task should be rescheduled. It must accept the pool object and the task result, and based on these it returns if the task should be rescheduled (i.e due to a known rare system error). :param check_reschedule: Custom callable for task reschedule. :type check_reschedule: ``callable`` that takes ``pool``, ``task_result`` arguments. :return: True if Task should be rescheduled else False. :rtype: ``bool`` """ validate_func("pool", "task_result")(check_reschedule) self.should_reschedule = check_reschedule def _loop(self): """ Main executor work loop - runs in a seperate thread when the Pool is started. """ if self._start_monitor_thread: self.logger.debug("Starting worker monitor thread.") self._worker_monitor = threading.Thread( target=self._workers_monitoring ) self._worker_monitor.daemon = True self._worker_monitor.start() while self.active and not self._exit_loop: msg = self._conn.accept() if msg: try: self.logger.debug("Received message from worker: %s.", msg) self.handle_request(msg) except Exception: self.logger.error(traceback.format_exc()) time.sleep(self.cfg.active_loop_sleep) def handle_request(self, request): """ Handles a worker request. I.e TaskPull, TaskResults, Heartbeat etc. :param request: Worker request. :type request: :py:class:`~testplan.runners.pools.communication.Message` """ sender_index = request.sender_metadata["index"] worker = self._workers[sender_index] if not worker.active: self.logger.warning( "Message {} - {} from inactive worker {}".format( request.cmd, request.data, worker ) ) self.logger.debug( "Pool {} request received by {} - {}, {}".format( self.cfg.name, worker, request.cmd, request.data ) ) response = Message(**self._metadata) if not self.active or self.status.tag == self.STATUS.STOPPING: worker.respond(response.make(Message.Stop)) elif request.cmd in self._request_handlers: self._request_handlers[request.cmd](worker, request, response) else: self.logger.error( "Unknown request: {} {} {} {}".format( request, dir(request), request.cmd, request.data ) ) worker.respond(response.make(Message.Ack)) def _handle_cfg_request(self, worker, _, response): """Handle a ConfigRequest from a worker.""" options = [] cfg = self.cfg while cfg: options.append(cfg.denormalize()) cfg = cfg.parent worker.respond(response.make(Message.ConfigSending, data=options)) def _handle_taskpull_request(self, worker, request, response): """Handle a TaskPullRequest from a worker.""" tasks = [] if self.status.tag == self.status.STARTED: for _ in range(request.data): try: uid = self.unassigned.pop(0) except IndexError: break if uid not in self.task_assign_cnt: self.task_assign_cnt[uid] = 0 if self.task_assign_cnt[uid] >= self.cfg.task_retries_limit: self._discard_task( uid, "{} already reached max retries: {}".format( self._input[uid], self.cfg.task_retries_limit ), ) continue else: self.task_assign_cnt[uid] += 1 task = self._input[uid] self.logger.test_info( "Scheduling {} to {}".format(task, worker) ) worker.assigned.add(uid) tasks.append(task) if tasks: worker.respond(response.make(Message.TaskSending, data=tasks)) worker.requesting = request.data - len(tasks) return worker.requesting = request.data worker.respond(response.make(Message.Ack)) def _handle_taskresults(self, worker, request, response): """Handle a TaskResults message from a worker.""" worker.respond(response.make(Message.Ack)) for task_result in request.data: uid = task_result.task.uid() worker.assigned.remove(uid) if worker not in self._workers_last_result: self._workers_last_result[worker] = time.time() self.logger.test_info( "De-assign {} from {}".format(task_result.task, worker) ) if self.should_reschedule(self, task_result): if self.task_assign_cnt[uid] >= self.cfg.task_retries_limit: self.logger.test_info( "Will not reschedule %(input)s again as it " "reached max retries %(retries)d", { "input": self._input[uid], "retries": self.cfg.task_retries_limit, }, ) else: self.logger.test_info( "Rescheduling {} due to " "should_reschedule() cfg option of {}".format( task_result.task, self ) ) self.unassigned.append(uid) continue self._print_test_result(task_result) self._results[uid] = task_result self.ongoing.remove(uid) def _handle_heartbeat(self, worker, request, response): """Handle a Heartbeat message received from a worker.""" worker.last_heartbeat = time.time() self.logger.debug( "Received heartbeat from {} at {} after {}s.".format( worker, request.data, time.time() - request.data ) ) worker.respond(response.make(Message.Ack, data=worker.last_heartbeat)) def _handle_setupfailed(self, worker, request, response): """Handle a SetupFailed message received from a worker.""" self.logger.test_info( "Worker {} setup failed:{}{}".format( worker, os.linesep, request.data ) ) worker.respond(response.make(Message.Ack)) self._deco_worker(worker, "Aborting {}, setup failed.") def _deco_worker(self, worker, message): """ Decommission a worker by move all assigned task back to pool """ self.logger.critical(message.format(worker)) if os.path.exists(worker.outfile): self.logger.critical("\tlogfile: {}".format(worker.outfile)) while worker.assigned: uid = worker.assigned.pop() self.logger.test_info( "Re-assigning {} from {} to {}.".format( self._input[uid], worker, self ) ) self.unassigned.append(uid) def _workers_monitoring(self): """ Worker fault tolerance logic. Check is based on: 1) handler status 2) heartbeat if available """ previous_status = {"active": [], "inactive": [], "initializing": []} if self.cfg.worker_heartbeat: loop_interval = self.cfg.worker_heartbeat else: loop_interval = 5 # seconds break_outer_loop = False while self.active: hosts_status = {"active": [], "inactive": [], "initializing": []} for worker in self._workers: status, reason = self._query_worker_status(worker) if status == "inactive": with self._pool_lock: if self.active and self.status.tag not in ( self.status.STOPPING, self.status.STOPPED, ): if self._handle_inactive(worker, reason): status = "active" else: # if pool is aborting/stopping, exit monitor break_outer_loop = True break hosts_status[status].append(worker) if break_outer_loop: break if hosts_status != previous_status: self.logger.info( "%s Hosts status update", datetime.datetime.now() ) self.logger.info(pprint.pformat(hosts_status)) previous_status = hosts_status if ( not hosts_status["active"] and not hosts_status["initializing"] and hosts_status["inactive"] ): self.logger.critical( "All workers of {} are inactive.".format(self) ) self.abort() break try: # For early finish of worker monitoring thread. wait_until_predicate( lambda: not self.is_alive, timeout=loop_interval, interval=0.05, ) except RuntimeError: break def _query_worker_status(self, worker): """ Query the current status of a worker. If heartbeat monitoring is enabled, check the last heartbeat time is within threshold. :param worker: Pool worker to query :return: worker status string - one of 'initializing', 'inactive' or 'active', and an optional reason string """ if not worker.active or worker.status.tag in ( worker.status.STOPPING, worker.status.STOPPED, ): return "inactive", "Worker in stop/abort status" if worker.status.tag in (worker.status.NONE, worker.status.STARTING): return "initializing", None # else: worker must be in state STARTED if worker.status.tag != worker.status.STARTED: raise RuntimeError( "Worker in unexpected state {}".format(worker.status.tag) ) if not worker.is_alive: # handler based monitoring return ( "inactive", "Deco {}, handler no longer alive".format(worker), ) # If no heartbeart is configured, we treat the worker as "active" # since it is in state STARTED and its handler is alive. if not self.cfg.worker_heartbeat: return "active", None # else: do heartbeat based monitoring lag = time.time() - worker.last_heartbeat if lag > self.cfg.worker_heartbeat * self.cfg.heartbeats_miss_limit: return ( "inactive", "Has not been receiving heartbeat from {} for {} " "sec".format(worker, lag), ) return "active", None def _handle_inactive(self, worker, reason): """ Handle an inactive worker. :param worker: worker object :param reason: why worker is considered inactive :return: True if worker restarted, else False """ if worker.status.tag != worker.status.STARTED: return False self._deco_worker(worker, reason) if worker.restart_count: worker.restart_count -= 1 try: worker.restart() return True except Exception as exc: self.logger.critical( "Worker {} failed to restart: {}".format(worker, exc) ) else: worker.abort() return False def _discard_task(self, uid, reason): self.logger.critical( "Discard task {} of {} - {}.".format( self._input[uid], self, reason ) ) self._results[uid] = TaskResult( task=self._input[uid], status=False, reason="Task discarded by {} - {}.".format(self, reason), ) self.ongoing.remove(uid) def _discard_pending_tasks(self): self.logger.critical("Discard pending tasks of {}.".format(self)) while self.ongoing: uid = self.ongoing[0] self._results[uid] = TaskResult( task=self._input[uid], status=False, reason="Task [{}] discarding due to {} abort.".format( self._input[uid]._target, self ), ) self.ongoing.pop(0) def _print_test_result(self, task_result): if (not isinstance(task_result.result, entity.RunnableResult)) or ( not hasattr(task_result.result, "report") ): return # Currently prints report top level result and not details. name = task_result.result.report.name if task_result.result.report.passed is True: self.logger.test_info("{} -> {}".format(name, Color.green("Pass"))) else: self.logger.test_info("{} -> {}".format(name, Color.red("Fail"))) def _add_workers(self): """Initialise worker instances.""" for idx in (str(i) for i in range(self.cfg.size)): worker = self.cfg.worker_type( index=idx, restart_count=self.cfg.restart_count ) worker.parent = self worker.cfg.parent = self.cfg self._workers.add(worker, uid=idx) self.logger.debug( "Added worker %(index)s (outfile = %(outfile)s)", {"index": idx, "outfile": worker.outfile}, ) def _start_workers(self): """Start all workers of the pool""" for worker in self._workers: self._conn.register(worker) self._workers.start() def starting(self): """Starting the pool and workers.""" # TODO do we need a lock here? self.make_runpath_dirs() if self.runpath is None: raise RuntimeError("runpath was not set correctly") self._metadata = {"runpath": self.runpath} self._conn.start() for worker in self._workers: # reset worker (if any) status worker.status.change(ResourceStatus.STARTING) self._exit_loop = False super(Pool, self).starting() # start the loop & monitor if not self._workers: self._add_workers() self._start_workers() if self._workers.start_exceptions: for msg in self._workers.start_exceptions.values(): self.logger.error(msg) self.abort() raise RuntimeError( "All workers of {} failed to start.".format(self) ) self.status.change(self.status.STARTED) self.logger.debug("%s started.", self.__class__.__name__) def workers_requests(self): """Count how many tasks workers are requesting.""" return sum(worker.requesting for worker in self._workers) def _stop_workers(self): self._workers.stop() def stopping(self): """Stop connections and workers.""" with self._pool_lock: self._stop_workers() for worker in self._workers: worker.transport.disconnect() self._exit_loop = True super(Pool, self).stopping() # stop the loop and the monitor self._conn.stop() self.status.change(self.status.STOPPED) self.logger.debug("Stopped %s", self.__class__.__name__) def abort_dependencies(self): """Empty generator to override parent implementation.""" return yield def aborting(self): """Aborting logic.""" self.logger.debug("Aborting pool {}".format(self)) for worker in self._workers: worker.abort() super(Pool, self).stopping() # stop the loop and the monitor self._conn.abort() self._discard_pending_tasks() self.logger.debug("Aborted pool {}".format(self))
py
1a46b49c5290a531017a82eddd46e5de5cc51ee1
from collections import MutableMapping from pyjexl.exceptions import MissingTransformError class Context(MutableMapping): def __init__(self, context_data=None): self.data = context_data or {} self.relative_value = {} def __getitem__(self, key): return self.data[key] def __setitem__(self, key, value): self.data[key] = value def __delitem__(self, key): del self.data[key] def __iter__(self): return iter(self.data) def __len__(self): return len(self.data) def with_relative(self, relative_value): new_context = Context(self.data) new_context.relative_value = relative_value return new_context class Evaluator(object): def __init__(self, jexl_config): self.config = jexl_config def evaluate(self, expression, context=None): method = getattr(self, 'visit_' + type(expression).__name__, self.generic_visit) context = context or Context() return method(expression, context) def visit_BinaryExpression(self, exp, context): left = self.evaluate(exp.left, context) right = self.evaluate(exp.right, context) return exp.operator.evaluate(left, right) def visit_UnaryExpression(self, exp, context): right = self.evaluate(exp.right, context) return exp.operator.evaluate(right) def visit_Literal(self, literal, context): return literal.value def visit_Identifier(self, identifier, context): if identifier.relative: subject = context.relative_value elif identifier.subject: subject = self.evaluate(identifier.subject, context) else: subject = context return subject.get(identifier.value, None) def visit_ObjectLiteral(self, object_literal, context): return dict( (key, self.evaluate(value, context)) for key, value in object_literal.value.items() ) def visit_ArrayLiteral(self, array_literal, context): return [self.evaluate(value, context) for value in array_literal.value] def visit_Transform(self, transform, context): try: transform_func = self.config.transforms[transform.name] except KeyError: raise MissingTransformError( 'No transform found with the name "{name}"'.format(name=transform.name) ) args = [self.evaluate(arg) for arg in transform.args] return transform_func(self.evaluate(transform.subject, context), *args) def visit_FilterExpression(self, filter_expression, context): values = self.evaluate(filter_expression.subject, context) if filter_expression.relative: return [ value for value in values if self.evaluate(filter_expression.expression, context.with_relative(value)) ] else: filter_value = self.evaluate(filter_expression.expression, context) if filter_value is True: return values elif filter_value is False: return None else: try: return values[filter_value] except (IndexError, KeyError): return None def visit_ConditionalExpression(self, conditional, context): if self.evaluate(conditional.test, context): return self.evaluate(conditional.consequent, context) else: return self.evaluate(conditional.alternate, context) def generic_visit(self, expression, context): raise ValueError('Could not evaluate expression: ' + repr(expression))
py
1a46b544f75e0b60359dd6e464e71096065428a2
import os from typing import Optional import time from fastapi import FastAPI from transformers import pipeline from pydantic import BaseModel, PositiveInt, constr import ray from ray import serve app = FastAPI() class Request(BaseModel): text: constr(min_length=1, strip_whitespace=True) min_length: Optional[PositiveInt] max_length: Optional[PositiveInt] @serve.deployment @serve.ingress(app) class Summarizer: def __init__(self): self.summarize = pipeline("summarization", model="t5-small") @app.post("/") def get_summary(self, payload: Request): summary_list = self.summarize( payload.text, min_length=payload.min_length or 0, max_length=payload.max_length or 256, ) summary = summary_list[0]["summary_text"] return summary ray.init(_node_ip_address="0.0.0.0") # needed for gcloud container compatibility serve.start( http_options={"host": "0.0.0.0", "port": int(os.environ.get("PORT", "8000"))} ) Summarizer.deploy() # Block the container process from exit while True: time.sleep(5)
py
1a46b6b25fdbabdd1e4e7837098b45859d36fbac
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import List, Optional from azure.core.exceptions import HttpResponseError import msrest.serialization class ChildProduct(msrest.serialization.Model): """The product documentation. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar const_property: Required. Constant string. Default value: "constant". :vartype const_property: str :param count: Count. :type count: int """ _validation = { 'const_property': {'required': True, 'constant': True}, } _attribute_map = { 'const_property': {'key': 'constProperty', 'type': 'str'}, 'count': {'key': 'count', 'type': 'int'}, } const_property = "constant" def __init__( self, *, count: Optional[int] = None, **kwargs ): super(ChildProduct, self).__init__(**kwargs) self.count = count class ConstantProduct(msrest.serialization.Model): """The product documentation. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar const_property: Required. Constant string. Default value: "constant". :vartype const_property: str :ivar const_property2: Required. Constant string2. Default value: "constant2". :vartype const_property2: str """ _validation = { 'const_property': {'required': True, 'constant': True}, 'const_property2': {'required': True, 'constant': True}, } _attribute_map = { 'const_property': {'key': 'constProperty', 'type': 'str'}, 'const_property2': {'key': 'constProperty2', 'type': 'str'}, } const_property = "constant" const_property2 = "constant2" def __init__( self, **kwargs ): super(ConstantProduct, self).__init__(**kwargs) class Error(msrest.serialization.Model): """Error. :param code: :type code: int :param message: :type message: str :param fields: :type fields: str """ _attribute_map = { 'code': {'key': 'code', 'type': 'int'}, 'message': {'key': 'message', 'type': 'str'}, 'fields': {'key': 'fields', 'type': 'str'}, } def __init__( self, *, code: Optional[int] = None, message: Optional[str] = None, fields: Optional[str] = None, **kwargs ): super(Error, self).__init__(**kwargs) self.code = code self.message = message self.fields = fields class Product(msrest.serialization.Model): """The product documentation. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param display_names: Non required array of unique items from 0 to 6 elements. :type display_names: list[str] :param capacity: Non required int betwen 0 and 100 exclusive. :type capacity: int :param image: Image URL representing the product. :type image: str :param child: Required. The product documentation. :type child: ~validation.models.ChildProduct :param const_child: Required. The product documentation. :type const_child: ~validation.models.ConstantProduct :ivar const_int: Required. Constant int. Default value: "0". :vartype const_int: int :ivar const_string: Required. Constant string. Default value: "constant". :vartype const_string: str :ivar const_string_as_enum: Constant string as Enum. Default value: "constant_string_as_enum". :vartype const_string_as_enum: str """ _validation = { 'display_names': {'max_items': 6, 'min_items': 0, 'unique': True}, 'capacity': {'maximum_ex': 100, 'minimum_ex': 0}, 'image': {'pattern': r'http://\w+'}, 'child': {'required': True}, 'const_child': {'required': True}, 'const_int': {'required': True, 'constant': True}, 'const_string': {'required': True, 'constant': True}, 'const_string_as_enum': {'constant': True}, } _attribute_map = { 'display_names': {'key': 'display_names', 'type': '[str]'}, 'capacity': {'key': 'capacity', 'type': 'int'}, 'image': {'key': 'image', 'type': 'str'}, 'child': {'key': 'child', 'type': 'ChildProduct'}, 'const_child': {'key': 'constChild', 'type': 'ConstantProduct'}, 'const_int': {'key': 'constInt', 'type': 'int'}, 'const_string': {'key': 'constString', 'type': 'str'}, 'const_string_as_enum': {'key': 'constStringAsEnum', 'type': 'str'}, } const_int = 0 const_string = "constant" const_string_as_enum = "constant_string_as_enum" def __init__( self, *, child: "ChildProduct", const_child: "ConstantProduct", display_names: Optional[List[str]] = None, capacity: Optional[int] = None, image: Optional[str] = None, **kwargs ): super(Product, self).__init__(**kwargs) self.display_names = display_names self.capacity = capacity self.image = image self.child = child self.const_child = const_child
py
1a46b6c7dbff79430e040fd48e7dd6dcb62bcf84
#!/bin/python # -*- coding: utf-8 -*- import csv import json import requests from ctrls import * from settings import * from os import listdir from datetime import date class Tester(): '''To Test models''' def __init__(self, numbers, Model): # check the number from Database, remove error numbers tsecNumbers = [ n[:-4] for n in listdir(TSEC_DATA_PATH) if n[-4:] == '.csv' ] for number in numbers: if number not in tsecNumbers: numbers.remove(number) self.numbers = numbers self.Model = Model def _getTmpData(self, number): page = requests.get('http://mis.twse.com.tw/stock/api/getStockInfo.jsp?ex_ch=tse_'+number+'.tw&json=1&delay=0') content = json.loads(page.content) vals = content['msgArray'][0] t = date.today() return [str(t.year-1911)+'/'+str(t.month).zfill(2)+'/'+str(t.day).zfill(2), vals['v'], 0, vals['o'], vals['h'], vals['l'], vals['z'], 0, 0] def _notInPeriod(self, row, dateFrom, dateTo): data_day = date(int(row[0].split('/')[0])+1911, int(row[0].split('/')[1]), int(row[0].split('/')[2])) if dateFrom and (data_day - dateFrom).days < 0: return True elif dateTo and (data_day - dateTo).days > 0: return True else: return False def getROI(self, result): if len(result["Asset Series"]) > 0: return str(round((float(result["Asset Series"][-1])/result["Asset Series"][0] - 1)*100, 3)) + '%' else: return "0.000 %" def run(self, mode = 'train', noLog = False, noRecord = False, dateFrom = None, dateTo = None, roiThr = -100, drawCandle = True): ''' noLog 和 noRecord 只對 train 模式有用,其他模式一律預設不會輸出, drawCandle 只對非 train 模式有用,避免一次輸出太多圖檔,跑得很慢 ''' # tmpFlag 會用 api 抓最新資料 if mode == 'tmpGood' or mode == 'tmpHold': master_tmp_flag = True else: master_tmp_flag = False for number in self.numbers: reader = Reader(number) model = self.Model() trader = Trader(model.infos, number, noLog) tmp_flag = master_tmp_flag while True: row = reader.getInput() if row == None: if tmp_flag: row = self._getTmpData(number) tmp_flag = False else: break last_row = row if (dateFrom or dateTo) and self._notInPeriod(row, dateFrom, dateTo): model.updateData(row) else: # 更新 Trader 資訊 trader.updateData(row) # 開盤的買:用開盤價交易 order = model.predict('start', float(row[3])) trade = trader.place('start', order) model.updateTrade(trade) # 開盤後,盤中掛單 order = model.predict('mid', float(row[3])) trade = trader.place('mid', order) model.updateTrade(trade) # 盤末的更新 model.updateData(row) # 收盤的買:用收盤價交易 order = model.predict('end', float(row[6])) trade = trader.place('end', order) model.updateTrade(trade) result = trader.getResult() if mode == 'train' and not noRecord: tr = TraderRecorder() tr.record(result) elif mode == 'tmpGood' or mode == 'tmrGood': # Model 預測出要買,而且指定時間內累計 ROI 高於 ROI Threshold if order["Type"] == 'Buy' and result["ROI"] > roiThr: print last_row[0], number, ' at ', float(last_row[6]), '該買囉, ROI 累計:', self.getROI(result) # 預設買前看一下 CandleStick 確定一下 if drawCandle: CandleDrawer().draw(number) elif mode == 'tmpHold' or mode == 'tmrHold': if order["Type"] == 'Sell': print last_row[0], number, ' at ', float(last_row[6]), '該賣囉, ROI 累計:', self.getROI(result) elif order["Type"] == 'Nothing': print last_row[0], number, ' at ', float(last_row[6]), '不要動, ROI 累計:', self.getROI(result) # 做操作前看一下 CandleStick 確定一下 if drawCandle: CandleDrawer().draw(number)
py
1a46b71bd2204ed2a852fb0f50421303fbf23aa4
# Generated by Django 2.2 on 2019-10-10 05:29 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0011_update_proxy_permissions'), ] operations = [ migrations.CreateModel( name='UserProfile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('password', models.CharField(max_length=128, verbose_name='password')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')), ('email', models.EmailField(max_length=255, unique=True)), ('name', models.CharField(max_length=255)), ('is_active', models.BooleanField(default=True)), ('is_staff', models.BooleanField(default=False)), ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')), ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')), ], options={ 'abstract': False, }, ), ]
py
1a46b7de12d45014a807912b87fd0e946d2d0812
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Mar 26 09:02:10 2018 @author: shirhe-lyh """ """Generate tfrecord file from images. Example Usage: --------------- python3 train.py \ --images_path: Path to the training images (directory). --output_path: Path to .record. """ import glob import io import os import tensorflow as tf from PIL import Image flags = tf.app.flags flags.DEFINE_string('images_path', None, 'Path to images (directory).') flags.DEFINE_string('output_path', None, 'Path to output tfrecord file.') FLAGS = flags.FLAGS def int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def int64_list_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=value)) def bytes_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def bytes_list_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=value)) def float_list_feature(value): return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def create_tf_example(image_path): with tf.gfile.GFile(image_path, 'rb') as fid: encoded_jpg = fid.read() encoded_jpg_io = io.BytesIO(encoded_jpg) image = Image.open(encoded_jpg_io) width, height = image.size label = int(image_path.split('_')[-1].split('.')[0]) tf_example = tf.train.Example( features=tf.train.Features(feature={ 'image/encoded': bytes_feature(encoded_jpg), 'image/format': bytes_feature('jpg'.encode()), 'image/class/label': int64_feature(label), 'image/height': int64_feature(height), 'image/width': int64_feature(width)})) return tf_example def generate_tfrecord(images_path, output_path): writer = tf.python_io.TFRecordWriter(output_path) for image_file in glob.glob(images_path): tf_example = create_tf_example(image_file) writer.write(tf_example.SerializeToString()) writer.close() def main(_): images_path = os.path.join(FLAGS.images_path, '*.jpg') images_record_path = FLAGS.output_path generate_tfrecord(images_path, images_record_path) if __name__ == '__main__': tf.app.run()
py
1a46b7ee665c465153dc4680ef77ba57f87afd4f
""" manipulando strings * string indices *fatiamento de string [inici:fim:passo] *funcoes buit-in len, abs, type, print, etc essa funcoes podem ser usadas diretamente em cada tipo. """ # positivos [0123456789] nome = 'Daniel wol' print( nome[7])#para acessar o indice usase [] e o numero da casa decimal print(nome[:-1])# chama o indice e exclui o ultimo caracter apelido = nome[0:4]# tirou da casa 0 ate ate a 3 casa lembraqndo que a ultima casa nao aparede apelido = nome[:4]# pode deixar o valor de partida vazio se partir do inicio print(apelido)
py
1a46b871bb8b058ec6a3e5ecd6a25ed63a21e9e3
import pytest from django_forex_trading_framework.users.models import User pytestmark = pytest.mark.django_db def test_user_get_absolute_url(user: User): assert user.get_absolute_url() == f"/users/{user.username}/"
py
1a46b94aa58a31f80a96dfae7aadaec605c9f9ad
from fairseq.models.roberta import RobertaModel from fairseq.data.data_utils import collate_tokens import nltk import random # DOWNLOAD: wget https://storage.googleapis.com/poloma-models/airbnb_model.tar.gz # EXTRACT: tar -xvzf airbnb_model.tar.gz # MAKE SURE model directory points to where you downloaded the model MODEL_DIR = './airbnb_train/' # DEPENDENCIES: # pip install fairseq # pip install nltk # import nltk # nltk.download('punkt') ## USAGE: # from run_inf import Roberta # model = Roberta(use_gpu=False, model_dir='./airbnb_train/') # label = model.classify(review) CHECKPOINT_FILE = 'checkpoint_best.pt' CLASSES = ['NOT_GREAT', 'GREAT'] # how many sentences to run through at the same time. Tweak if running out of memory CHUNK_SIZE=4 # set bias based on excel spreadsheet BIAS = 10 class Roberta (object): def __init__(self,model_dir=MODEL_DIR,ckpt_file=CHECKPOINT_FILE, use_gpu=False): self.model = RobertaModel.from_pretrained(model_dir, checkpoint_file=ckpt_file) self.model.eval() # disable dropout if use_gpu: self.model.cuda() def classify(self, review, logits=False): reviews = self.batch_review(review) roberta = self.model tokens = map(lambda x: x if len(x) < 512 else x[:511], [roberta.encode(r) for r in reviews]) batch = collate_tokens(list(tokens), pad_idx=1) label = roberta.predict('sentence_classification_head', batch) if logits: return label.sum(dim=0).tolist() else: logits = label.sum(dim=0).tolist() return CLASSES[0] if logits[0] > logits[1] + BIAS else CLASSES[1] def batch_review(self, review): sents = nltk.sent_tokenize(review) buffer = [] chunks = [] for sent in sents: buffer.append(sent) if (len(buffer)) % CHUNK_SIZE == 0: chunks.append(" ".join(buffer)) buffer = [buffer[random.randint(0,CHUNK_SIZE-1)]] chunks.append(" ".join(buffer)) return chunks
py
1a46b9be640189827675386270fc24758536690d
""" This module contains classes describing a HTTP request received by the container. """ import abc import asyncio class Request: """ This class represents a HTTP request received by the container """ @abc.abstractmethod async def body(self) -> bytes: """ Return the body of the request as a sequence of bytes """ @abc.abstractmethod def headers(self) -> dict: """ Return a dictionary containing the headers as a dictionary """ @abc.abstractmethod def http_version(self) -> str: """ Return the HTTP version as a string """ @abc.abstractmethod def keep_alive(self) -> bool: """ Return true if we want to keep the connection open """ class HTTPToolsRequest(Request): """ An implementation of the abstract Request class using the HttpTools library """ def __init__(self, future: asyncio.Future, headers: dict = None, http_version: str = "1.1", keep_alive: bool = True) -> None: self._future = future self._headers = headers self._http_version = http_version self._keep_alive = keep_alive async def body(self) -> bytes: return await self._future def headers(self) -> dict: if self._headers is None: return {} return self._headers def http_version(self) -> str: return self._http_version def keep_alive(self) -> bool: return self._keep_alive
py
1a46ba6b669e97c64a2cd925e311fc87ee9de364
"""marineplanner URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.10/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf import settings from django.conf.urls import include, url from django.contrib import admin from django.views.static import serve ### INSERT ADDITIONAL IMPORTS HERE ### import accounts.urls from fishpass.views import get_filter_results, get_filter_count, import_PAD, import_barrier_costs ### END PROJECT URL IMPORTS ### urlpatterns = [ url(r'^admin/import_PAD/?', import_PAD), url(r'^adminfishpass/import_PAD/?', import_PAD), url(r'^admin/import_barrier_costs/?', import_barrier_costs), url(r'^adminfishpass/import_barrier_costs/?', import_barrier_costs), url(r'^admin/?', admin.site.urls), ### INSERT PROJECT URL INCLUDES HERE ### url(r'^features/', include('features.urls')), url(r'^manipulators/', include('manipulators.urls')), url(r'^scenarios/get_filter_results/(?P<project_id>[\w_]+)/$', get_filter_results), url(r'^scenarios/get_filter_results/', get_filter_results), url(r'^scenarios/get_filter_count', get_filter_count), url(r'^scenarios/', include('scenarios.urls')), url(r'^account/auth/', include('social.apps.django_app.urls', namespace='social')), url(r'^accounts/', include('accounts.urls', namespace="account")), url(r'^data_manager/', include('data_manager.urls')), url(r'^drawing/', include('drawing.urls')), # url(r'^visualize/', include('visualize.urls')), url(r'^fishpass/', include('fishpass.urls')), url(r'^', include('fishpass.urls')), # url(r'^', fishpass.views.home, name='home'), ### END PROJECT URL INCLUDES ### # url(r'^visualize/', include('visualize.urls')), # url(r'^account/auth/', include('social.apps.django_app.urls', namespace='social')), # url(r'^account/', include('accounts.urls', namespace="account")), # url(r'^data_manager/', include('data_manager.urls', namespace="data_manager")), ] if settings.DEBUG: urlpatterns +=[ url(r'^media/(?P<path>.*)$', serve, {'document_root': settings.MEDIA_ROOT}), # url(r'^static/(?P.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}), ]
py
1a46bac793922b14346a964991c2e7b69c1d2602
import asyncio from typing import Any from app.recorder.recorder import Recorder from ib_insync import IB class MarketRecorder(object): def __init__(self, ib: IB, recorder: Recorder) -> None: self._ib = ib self._recorder = recorder self._ib.pendingTickersEvent += self.on_market_data self._queue: asyncio.Queue = asyncio.Queue() self._task: asyncio.Task = asyncio.create_task(self._process()) async def _process(self): while True: file_name, *data = await self._queue.get() await self._recorder.consume(file_name, data) def on_market_data(self, tickers: Any) -> None: for ticker in tickers: depth = min(len(ticker.domBids), len(ticker.domAsks)) if depth == 0: continue symbol = ticker.contract.symbol file_name = f'{symbol}' data = [file_name, ticker.time.timestamp()] for i in range(0, depth): data.append(ticker.domBids[i].price) data.append(ticker.domBids[i].size) data.append(ticker.domAsks[i].price) data.append(ticker.domAsks[i].size) self._queue.put_nowait(tuple(data))
py
1a46bb3e3e18edeb2d065996f1ac294a89df75aa
# -*- coding: utf-8 -*- from collections import OrderedDict import pykintone import numpy as np import pandas as pd from karura.core.dataframe_extension import DataFrameExtension, FType from karura.env import get_kintone_env class Field(): def __init__(self, code, f_type, label, is_unique): self.code = code self.f_type = f_type self.label = label self.is_unique = is_unique @classmethod def create(cls, f_code, f_dict): f_type = f_dict["type"] f_label = f_dict["label"] is_unique = False if "unique" in f_dict: is_unique = f_dict["unique"] f = Field(f_code, f_type, f_label, is_unique) if f.get_feature_type() is not None: return f else: return None def get_feature_type(self): ftype = None if self.f_type in ["CREATED_TIME", "CREATOR", "MODIFIER", "UPDATED_TIME", "RECORD_NUMBER", "作成日時", "作成者", "更新者", "更新日時", "レコード番号"]: pass elif self.f_type in ["FILE", "LINK", "RICH_TEXT", "STATUS_ASSIGNEE", "SUBTABLE"]: pass elif self.is_unique: ftype = FType.unique elif self.f_type in ["RADIO_BUTTON", "DROP_DOWN", "CHECK_BOX", "MULTI_SELECT", "CATEGORY", "STATUS", "カテゴリー", "ステータス", "USER_SELECT"]: # todo: think about multiselect ftype = FType.categorical elif self.f_type in ["DATE", "DATETIME"]: ftype = FType.datetime elif self.f_type in ["NUMBER", "CALC"]: ftype = FType.numerical elif self.f_type in ["MULTI_LINE_TEXT", "SINGLE_LINE_TEXT"]: # optional inferring if self.code.endswith("_type") or self.code.endswith("_category"): ftype = FType.categorical elif not self.is_unique and (self.code.endswith("_id") or self.code.endswith("_cd")): ftype = FType.categorical elif self.code.endswith("_value"): ftype = FType.numerical else: ftype = FType.text return ftype def __repr__(self): repr = "<{}:{}->{} {}>".format(self.__class__, self.f_type, self.get_feature_type(), self.label) return repr class Application(): def __init__(self, env=None): self.env = env if env is not None else get_kintone_env() self.max_count = 10000 self._kintone_limit = 500 def get_app_id(self, app_name): kintone = pykintone.login(self.env.domain, self.env.login_id, self.env.password) result = kintone.administration().select_app_info() if result.ok: matched = [a for a in result.infos if a.name == app_name] if len(matched) > 0: return matched[0].app_id else: return "" else: raise Exception("Error occurred when getting the app_id") def load(self, app_id, query="", fields=(), target=""): app = pykintone.login(self.env.domain, self.env.login_id, self.env.password).app(app_id) fields_d = self.get_fields(app_id) if len(fields) > 0: d = OrderedDict() for f in fields: if f in fields_d: d[f] = fields_d[f] fields_d = d q = query + " " if query else "" records = [] _fields = list(fields_d.keys()) selected = app.select(query=q + "limit {}".format(self._kintone_limit), fields=_fields) records = selected.records if selected.total_count > self._kintone_limit: repeat = np.floor(min(self.max_count, selected.total_count) / self._kintone_limit) for i in range(int(repeat)): selected = app.select(query=q + "limit {} offset {}".format(self._kintone_limit, (i + 1) * self._kintone_limit), fields=_fields) if len(selected.records) > 0: records += selected.records data = [] columns = [] for i, r in enumerate(records): row = [] if i == 0: columns = [f for f in _fields if f in r] for f in columns: v = r[f]["value"] row.append(v) if len(row) > 0: data.append(row) fs = [fields_d[c] for c in columns] df = pd.DataFrame(np.array(data), columns=[f.label for f in fs]) categoricals = [f.label for f in fs if f.get_feature_type() == FType.categorical] numericals = [f.label for f in fs if f.get_feature_type() == FType.numerical] datetimes = [f.label for f in fs if f.get_feature_type() == FType.datetime] texts = [f.label for f in fs if f.get_feature_type() == FType.text] uniques = [f.label for f in fs if f.get_feature_type() == FType.unique] dfe = DataFrameExtension(df, categoricals, numericals, datetimes, texts, uniques) if target: dfe.target = fields_d[target].label return dfe def get_fields(self, app_id): # todo: if app_id exists on karura app, get field definition from it. app = pykintone.login(self.env.domain, self.env.login_id, self.env.password).app(app_id) fields = app.administration().form().get() if not fields.ok: raise Exception("Error occurred when getting the form information from kintone.") fs = fields.raw d = OrderedDict() for f_code in fs: f = Field.create(f_code, fs[f_code]) if f: d[f_code] = f return d
py
1a46bbc5e94dff56bd920a4dee95f6388cb12ecc
import subprocess from .app import * class ssh_client(object): def __init__(self, inject_host_port, proxy_command): super(ssh_client, self).__init__() self.inject_host, self.inject_port = inject_host_port self.proxy_command = proxy_command self.account = {} self.reconnect = False def log(self, value, color='[G1]'): log_file(real_path('/../storage/app.log'), value, color=color) def start(self): while True: account = self.account host = account['host'] port = account['port'] username = account['username'] password = account['password'] sockport = account['sockport'] proxy_command = self.proxy_command response = subprocess.Popen( ( 'sshpass -p "{password}" ssh -v -CND 0.0.0.0:{sockport} {host} -p {port} -l "{username}" ' + \ '-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ProxyCommand="{}"'.format(proxy_command) ).format( host=host, port=port, username=username, password=password, sockport=sockport, inject_host=self.inject_host, inject_port=self.inject_port ), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) for line in response.stdout: line = line.decode().lstrip(r'(debug1|Warning):').strip() + '\r' if 'pledge: proc' in line: self.reconnect = True self.log('Connected', color='[Y1]') elif 'Permission denied' in line: self.log('Access Denied', color='[R1]') break elif 'Connection closed' in line: self.log('Connection closed', color='[R1]') break elif 'Could not request local forwarding' in line: self.log('Port used by another programs', color='[R1]') break self.log('Disconnected', color='[R1]') if self.reconnect == False: break
py
1a46bbf0fb881a5dad88ba8e81a256279438a3d9
"""Base settings to build other settings files upon.""" import environ ROOT_DIR = environ.Path(__file__) - 3 APPS_DIR = ROOT_DIR.path('cride') env = environ.Env() # Base DEBUG = env.bool('DJANGO_DEBUG', False) # Language and timezone TIME_ZONE = 'America/Mexico_City' LANGUAGE_CODE = 'en-us' SITE_ID = 1 USE_I18N = True USE_L10N = True USE_TZ = True # DATABASES DATABASES = { 'default': env.db('DATABASE_URL'), } DATABASES['default']['ATOMIC_REQUESTS'] = True # URLs ROOT_URLCONF = 'config.urls' # WSGI WSGI_APPLICATION = 'config.wsgi.application' # Users & Authentication AUTH_USER_MODEL = 'users.User' # Apps DJANGO_APPS = [ 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.admin', ] THIRD_PARTY_APPS = [ 'rest_framework', 'rest_framework.authtoken', 'django_filters', ] LOCAL_APPS = [ 'cride.users.apps.UsersAppConfig', 'cride.circles.apps.CircleAppConfig', 'cride.rides.apps.RidesAppConfig', ] INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS # Passwords PASSWORD_HASHERS = [ 'django.contrib.auth.hashers.Argon2PasswordHasher', 'django.contrib.auth.hashers.PBKDF2PasswordHasher', 'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher', 'django.contrib.auth.hashers.BCryptSHA256PasswordHasher', 'django.contrib.auth.hashers.BCryptPasswordHasher', ] AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Middlewares MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] # Static files STATIC_ROOT = str(ROOT_DIR('staticfiles')) STATIC_URL = '/static/' STATICFILES_DIRS = [ str(APPS_DIR.path('static')), ] STATICFILES_FINDERS = [ 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ] # Media MEDIA_ROOT = str(APPS_DIR('media')) MEDIA_URL = '/media/' # Templates TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ str(APPS_DIR.path('templates')), ], 'OPTIONS': { 'debug': DEBUG, 'loaders': [ 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ], 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.template.context_processors.i18n', 'django.template.context_processors.media', 'django.template.context_processors.static', 'django.template.context_processors.tz', 'django.contrib.messages.context_processors.messages', ], }, }, ] # Security SESSION_COOKIE_HTTPONLY = True CSRF_COOKIE_HTTPONLY = True SECURE_BROWSER_XSS_FILTER = True X_FRAME_OPTIONS = 'DENY' # Email EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend') # Admin ADMIN_URL = 'admin/' ADMINS = [ ("""Pablo Trinidad""", '[email protected]'), ] MANAGERS = ADMINS # Celery INSTALLED_APPS += ['cride.taskapp.celery.CeleryAppConfig'] if USE_TZ: CELERY_TIMEZONE = TIME_ZONE CELERY_BROKER_URL = env('CELERY_BROKER_URL') CELERY_RESULT_BACKEND = CELERY_BROKER_URL CELERY_ACCEPT_CONTENT = ['json'] CELERY_TASK_SERIALIZER = 'json' CELERY_RESULT_SERIALIZER = 'json' CELERYD_TASK_TIME_LIMIT = 5 * 60 CELERYD_TASK_SOFT_TIME_LIMIT = 60 # Django REST Framework REST_FRAMEWORK = { 'DEFAULT_RENDERER_CLASSES': ( 'rest_framework.renderers.JSONRenderer', ), 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework.authentication.TokenAuthentication', ), 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination', 'PAGE_SIZE': 10, }
py
1a46bc05446f8f6d6c11e0944eb0e15f43e28466
#!/usr/bin/env python from __future__ import print_function from collections import OrderedDict import re regexes = { 'hybrid-assembly': ['v_pipeline.txt', r"(\S+)"], 'Nextflow': ['v_nextflow.txt', r"(\S+)"], 'FastQC': ['v_fastqc.txt', r"FastQC v(\S+)"], 'MultiQC': ['v_multiqc.txt', r"multiqc, version (\S+)"], 'QUAST': ['v_quast.txt', r"WARNING: Python locale settings can't be changed\nQUAST v(\S+)"], 'Canu': ['v_canu.txt', r"Canu (\S+)"], 'SPAdes': ['v_spades.txt', r"SPAdes v(\S+)"], 'minimap2': ['v_minimap.txt', r"(\S+)"], 'pilon': ['v_pilon.txt', r"Pilon version (\S+)"] } results = OrderedDict() results['hybrid-assembly'] = '<span style="color:#999999;\">N/A</span>' results['Nextflow'] = '<span style="color:#999999;\">N/A</span>' results['FastQC'] = '<span style="color:#999999;\">N/A</span>' results['MultiQC'] = '<span style="color:#999999;\">N/A</span>' results['QUAST'] = '<span style="color:#999999;\">N/A</span>' results['Canu'] = '<span style="color:#999999;\">N/A</span>' results['SPAdes'] = '<span style="color:#999999;\">N/A</span>' results['minimap2'] = '<span style="color:#999999;\">N/A</span>' results['pilon'] = '<span style="color:#999999;\">N/A</span>' # Search each file using its regex for k, v in regexes.items(): with open(v[0]) as x: versions = x.read() match = re.search(v[1], versions) if match: results[k] = "v{}".format(match.group(1)) # Dump to YAML print (''' id: 'hybrid-assembly-software-versions' section_name: 'hybrid-assembly Software Versions' section_href: 'https://github.com/kevinmenden/hybrid-assembly' plot_type: 'html' description: 'are collected at run time from the software output.' data: | <dl class="dl-horizontal"> ''') for k,v in results.items(): print(" <dt>{}</dt><dd>{}</dd>".format(k,v)) print (" </dl>")
py
1a46bc5f58b0969fa78068782708b55c8a8cf115
# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mox from cinder import context from cinder import exception from cinder.openstack.common import log as logging from cinder.openstack.common import timeutils from cinder import test from cinder import units from cinder.volume import configuration as conf from cinder.volume.drivers.solidfire import SolidFireDriver from cinder.volume import qos_specs from cinder.volume import volume_types LOG = logging.getLogger(__name__) def create_configuration(): configuration = mox.MockObject(conf.Configuration) configuration.san_is_local = False configuration.append_config_values(mox.IgnoreArg()) return configuration class SolidFireVolumeTestCase(test.TestCase): def setUp(self): self.ctxt = context.get_admin_context() self._mox = mox.Mox() self.configuration = mox.MockObject(conf.Configuration) self.configuration.sf_allow_tenant_qos = True self.configuration.san_is_local = True self.configuration.sf_emulate_512 = True self.configuration.sf_account_prefix = 'cinder' super(SolidFireVolumeTestCase, self).setUp() self.stubs.Set(SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) self.expected_qos_results = {'minIOPS': 1000, 'maxIOPS': 10000, 'burstIOPS': 20000} def fake_issue_api_request(obj, method, params, version='1.0'): if method is 'GetClusterCapacity' and version == '1.0': LOG.info('Called Fake GetClusterCapacity...') data = {'result': {'clusterCapacity': {'maxProvisionedSpace': 99999999, 'usedSpace': 999, 'compressionPercent': 100, 'deDuplicationPercent': 100, 'thinProvisioningPercent': 100}}} return data elif method is 'GetClusterInfo' and version == '1.0': LOG.info('Called Fake GetClusterInfo...') results = {'result': {'clusterInfo': {'name': 'fake-cluster', 'mvip': '1.1.1.1', 'svip': '1.1.1.1', 'uniqueID': 'unqid', 'repCount': 2, 'attributes': {}}}} return results elif method is 'AddAccount' and version == '1.0': LOG.info('Called Fake AddAccount...') return {'result': {'accountID': 25}, 'id': 1} elif method is 'GetAccountByName' and version == '1.0': LOG.info('Called Fake GetAccountByName...') results = {'result': {'account': {'accountID': 25, 'username': params['username'], 'status': 'active', 'initiatorSecret': '123456789012', 'targetSecret': '123456789012', 'attributes': {}, 'volumes': [6, 7, 20]}}, "id": 1} return results elif method is 'CreateVolume' and version == '1.0': LOG.info('Called Fake CreateVolume...') return {'result': {'volumeID': 5}, 'id': 1} elif method is 'DeleteVolume' and version == '1.0': LOG.info('Called Fake DeleteVolume...') return {'result': {}, 'id': 1} elif method is 'ModifyVolume' and version == '5.0': LOG.info('Called Fake ModifyVolume...') return {'result': {}, 'id': 1} elif method is 'CloneVolume': return {'result': {'volumeID': 6}, 'id': 2} elif method is 'ModifyVolume': return elif method is 'ListVolumesForAccount' and version == '1.0': test_name = 'OS-VOLID-a720b3c0-d1f0-11e1-9b23-0800200c9a66' LOG.info('Called Fake ListVolumesForAccount...') result = {'result': { 'volumes': [{'volumeID': 5, 'name': test_name, 'accountID': 25, 'sliceCount': 1, 'totalSize': 1 * units.GiB, 'enable512e': True, 'access': "readWrite", 'status': "active", 'attributes': None, 'qos': None, 'iqn': test_name}]}} return result else: LOG.error('Crap, unimplemented API call in Fake:%s' % method) def fake_issue_api_request_fails(obj, method, params, version='1.0'): return {'error': {'code': 000, 'name': 'DummyError', 'message': 'This is a fake error response'}, 'id': 1} def fake_set_qos_by_volume_type(self, type_id, ctxt): return {'minIOPS': 500, 'maxIOPS': 1000, 'burstIOPS': 1000} def fake_volume_get(obj, key, default=None): return {'qos': 'fast'} def fake_update_cluster_status(self): return def fake_get_model_info(self, account, vid): return {'fake': 'fake-model'} def test_create_with_qos_type(self): self.stubs.Set(SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) self.stubs.Set(SolidFireDriver, '_set_qos_by_volume_type', self.fake_set_qos_by_volume_type) testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': 'fast', 'created_at': timeutils.utcnow()} sfv = SolidFireDriver(configuration=self.configuration) model_update = sfv.create_volume(testvol) self.assertIsNotNone(model_update) def test_create_volume(self): self.stubs.Set(SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': None, 'created_at': timeutils.utcnow()} sfv = SolidFireDriver(configuration=self.configuration) model_update = sfv.create_volume(testvol) self.assertIsNotNone(model_update) self.assertIsNone(model_update.get('provider_geometry', None)) def test_create_volume_non_512(self): self.stubs.Set(SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': None, 'created_at': timeutils.utcnow()} self.configuration.sf_emulate_512 = False sfv = SolidFireDriver(configuration=self.configuration) model_update = sfv.create_volume(testvol) self.assertEqual(model_update.get('provider_geometry', None), '4096 4096') self.configuration.sf_emulate_512 = True def test_create_snapshot(self): self.stubs.Set(SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) self.stubs.Set(SolidFireDriver, '_get_model_info', self.fake_get_model_info) testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': None, 'created_at': timeutils.utcnow()} testsnap = {'project_id': 'testprjid', 'name': 'testvol', 'volume_size': 1, 'id': 'b831c4d1-d1f0-11e1-9b23-0800200c9a66', 'volume_id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': None, 'created_at': timeutils.utcnow()} sfv = SolidFireDriver(configuration=self.configuration) model_update = sfv.create_volume(testvol) sfv.create_snapshot(testsnap) def test_create_clone(self): self.stubs.Set(SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) self.stubs.Set(SolidFireDriver, '_get_model_info', self.fake_get_model_info) testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': None, 'created_at': timeutils.utcnow()} testvol_b = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'b831c4d1-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': None, 'created_at': timeutils.utcnow()} sfv = SolidFireDriver(configuration=self.configuration) sfv.create_cloned_volume(testvol_b, testvol) def test_initialize_connector_with_blocksizes(self): connector = {'initiator': 'iqn.2012-07.org.fake:01'} testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': None, 'provider_location': '10.10.7.1:3260 iqn.2010-01.com.' 'solidfire:87hg.uuid-2cc06226-cc' '74-4cb7-bd55-14aed659a0cc.4060 0', 'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2' 'c76370d66b 2FE0CQ8J196R', 'provider_geometry': '4096 4096', 'created_at': timeutils.utcnow(), } sfv = SolidFireDriver(configuration=self.configuration) properties = sfv.initialize_connection(testvol, connector) self.assertEqual(properties['data']['physical_block_size'], '4096') self.assertEqual(properties['data']['logical_block_size'], '4096') def test_create_volume_with_qos(self): preset_qos = {} preset_qos['qos'] = 'fast' self.stubs.Set(SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'metadata': [preset_qos], 'volume_type_id': None, 'created_at': timeutils.utcnow()} sfv = SolidFireDriver(configuration=self.configuration) model_update = sfv.create_volume(testvol) self.assertIsNotNone(model_update) def test_create_volume_fails(self): # NOTE(JDG) This test just fakes update_cluster_status # this is inentional for this test self.stubs.Set(SolidFireDriver, '_update_cluster_status', self.fake_update_cluster_status) self.stubs.Set(SolidFireDriver, '_issue_api_request', self.fake_issue_api_request_fails) testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'created_at': timeutils.utcnow()} sfv = SolidFireDriver(configuration=self.configuration) try: sfv.create_volume(testvol) self.fail("Should have thrown Error") except Exception: pass def test_create_sfaccount(self): sfv = SolidFireDriver(configuration=self.configuration) self.stubs.Set(SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) account = sfv._create_sfaccount('project-id') self.assertIsNotNone(account) def test_create_sfaccount_fails(self): sfv = SolidFireDriver(configuration=self.configuration) self.stubs.Set(SolidFireDriver, '_issue_api_request', self.fake_issue_api_request_fails) account = sfv._create_sfaccount('project-id') self.assertIsNone(account) def test_get_sfaccount_by_name(self): sfv = SolidFireDriver(configuration=self.configuration) self.stubs.Set(SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) account = sfv._get_sfaccount_by_name('some-name') self.assertIsNotNone(account) def test_get_sfaccount_by_name_fails(self): sfv = SolidFireDriver(configuration=self.configuration) self.stubs.Set(SolidFireDriver, '_issue_api_request', self.fake_issue_api_request_fails) account = sfv._get_sfaccount_by_name('some-name') self.assertIsNone(account) def test_delete_volume(self): self.stubs.Set(SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) testvol = {'project_id': 'testprjid', 'name': 'test_volume', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'created_at': timeutils.utcnow()} sfv = SolidFireDriver(configuration=self.configuration) sfv.delete_volume(testvol) def test_delete_volume_fails_no_volume(self): self.stubs.Set(SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) testvol = {'project_id': 'testprjid', 'name': 'no-name', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'created_at': timeutils.utcnow()} sfv = SolidFireDriver(configuration=self.configuration) try: sfv.delete_volume(testvol) self.fail("Should have thrown Error") except Exception: pass def test_delete_volume_fails_account_lookup(self): # NOTE(JDG) This test just fakes update_cluster_status # this is inentional for this test self.stubs.Set(SolidFireDriver, '_update_cluster_status', self.fake_update_cluster_status) self.stubs.Set(SolidFireDriver, '_issue_api_request', self.fake_issue_api_request_fails) testvol = {'project_id': 'testprjid', 'name': 'no-name', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'created_at': timeutils.utcnow()} sfv = SolidFireDriver(configuration=self.configuration) self.assertRaises(exception.SolidFireAccountNotFound, sfv.delete_volume, testvol) def test_get_cluster_info(self): self.stubs.Set(SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) sfv = SolidFireDriver(configuration=self.configuration) sfv._get_cluster_info() def test_get_cluster_info_fail(self): # NOTE(JDG) This test just fakes update_cluster_status # this is inentional for this test self.stubs.Set(SolidFireDriver, '_update_cluster_status', self.fake_update_cluster_status) self.stubs.Set(SolidFireDriver, '_issue_api_request', self.fake_issue_api_request_fails) sfv = SolidFireDriver(configuration=self.configuration) self.assertRaises(exception.SolidFireAPIException, sfv._get_cluster_info) def test_extend_volume(self): self.stubs.Set(SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) testvol = {'project_id': 'testprjid', 'name': 'test_volume', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'created_at': timeutils.utcnow()} sfv = SolidFireDriver(configuration=self.configuration) sfv.extend_volume(testvol, 2) def test_extend_volume_fails_no_volume(self): self.stubs.Set(SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) testvol = {'project_id': 'testprjid', 'name': 'no-name', 'size': 1, 'id': 'not-found'} sfv = SolidFireDriver(configuration=self.configuration) self.assertRaises(exception.VolumeNotFound, sfv.extend_volume, testvol, 2) def test_extend_volume_fails_account_lookup(self): # NOTE(JDG) This test just fakes update_cluster_status # this is intentional for this test self.stubs.Set(SolidFireDriver, '_update_cluster_status', self.fake_update_cluster_status) self.stubs.Set(SolidFireDriver, '_issue_api_request', self.fake_issue_api_request_fails) testvol = {'project_id': 'testprjid', 'name': 'no-name', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'created_at': timeutils.utcnow()} sfv = SolidFireDriver(configuration=self.configuration) self.assertRaises(exception.SolidFireAccountNotFound, sfv.extend_volume, testvol, 2) def test_set_by_qos_spec_with_scoping(self): sfv = SolidFireDriver(configuration=self.configuration) qos_ref = qos_specs.create(self.ctxt, 'qos-specs-1', {'qos:minIOPS': '1000', 'qos:maxIOPS': '10000', 'qos:burstIOPS': '20000'}) type_ref = volume_types.create(self.ctxt, "type1", {"qos:minIOPS": "100", "qos:burstIOPS": "300", "qos:maxIOPS": "200"}) qos_specs.associate_qos_with_type(self.ctxt, qos_ref['id'], type_ref['id']) qos = sfv._set_qos_by_volume_type(self.ctxt, type_ref['id']) self.assertEqual(qos, self.expected_qos_results) def test_set_by_qos_spec(self): sfv = SolidFireDriver(configuration=self.configuration) qos_ref = qos_specs.create(self.ctxt, 'qos-specs-1', {'minIOPS': '1000', 'maxIOPS': '10000', 'burstIOPS': '20000'}) type_ref = volume_types.create(self.ctxt, "type1", {"qos:minIOPS": "100", "qos:burstIOPS": "300", "qos:maxIOPS": "200"}) qos_specs.associate_qos_with_type(self.ctxt, qos_ref['id'], type_ref['id']) qos = sfv._set_qos_by_volume_type(self.ctxt, type_ref['id']) self.assertEqual(qos, self.expected_qos_results) def test_set_by_qos_by_type_only(self): sfv = SolidFireDriver(configuration=self.configuration) type_ref = volume_types.create(self.ctxt, "type1", {"qos:minIOPS": "100", "qos:burstIOPS": "300", "qos:maxIOPS": "200"}) qos = sfv._set_qos_by_volume_type(self.ctxt, type_ref['id']) self.assertEqual(qos, {'minIOPS': 100, 'maxIOPS': 200, 'burstIOPS': 300})
py
1a46bcd74998a8f30b72694cf19e96f4cde558cb
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test for multi-worker training tutorial.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import contextlib import os import re from absl.testing import parameterized import numpy as np from tensorflow.python import keras from tensorflow.python.data.experimental.ops import distribute_options from tensorflow.python.data.ops import dataset_ops from tensorflow.python.distribute import collective_all_reduce_strategy from tensorflow.python.distribute import combinations from tensorflow.python.distribute import multi_process_runner from tensorflow.python.distribute import multi_worker_test_base from tensorflow.python.framework import errors_impl from tensorflow.python.framework import test_util from tensorflow.python.keras.datasets import mnist from tensorflow.python.keras.optimizer_v2 import gradient_descent from tensorflow.python.platform import test from tensorflow.python.util import nest class MultiWorkerTutorialTest(parameterized.TestCase, test.TestCase): """Test multi-worker training flow demo'ed in go/multi-worker-with-keras.""" @contextlib.contextmanager def skip_fetch_failure_exception(self): try: yield except Exception as e: # pylint: disable=broad-except if 'URL fetch failure' in str(e): self.skipTest('URL fetch error not considered failure of the test.') else: raise @combinations.generate( combinations.combine( mode=['eager'], shard_policy=[None] + list(distribute_options.AutoShardPolicy))) def testMultiWorkerTutorial(self, mode, shard_policy): """Test multi-worker training flow demo'ed in go/multi-worker-with-keras. This test should be kept in sync with the code samples in go/multi-worker-with-keras. Args: mode: Runtime mode. shard_policy: None or any of tf.data.experimental.AutoShardPolicy for testing. """ if shard_policy is distribute_options.AutoShardPolicy.FILE: self.skipTest('TensorSliceDataset is not shardable with FILE policy.') def mnist_dataset(batch_size): with self.skip_fetch_failure_exception(): (x_train, y_train), _ = mnist.load_data() # The `x` arrays are in uint8 and have values in the range [0, 255]. # We need to convert them to float32 with values in the range [0, 1] x_train = x_train / np.float32(255) y_train = y_train.astype(np.int64) train_dataset = dataset_ops.DatasetV2.from_tensor_slices( (x_train, y_train)).shuffle(60000).repeat().batch(batch_size) return train_dataset def build_and_compile_cnn_model(): model = keras.Sequential([ keras.layers.Input(shape=(28, 28)), keras.layers.Reshape(target_shape=(28, 28, 1)), keras.layers.Conv2D(32, 3, activation='relu'), keras.layers.Flatten(), keras.layers.Dense(128, activation='relu'), keras.layers.Dense(10) ]) model.compile( loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer=gradient_descent.SGD(learning_rate=0.001), metrics=['accuracy']) return model per_worker_batch_size = 64 single_worker_dataset = mnist_dataset(per_worker_batch_size) single_worker_model = build_and_compile_cnn_model() single_worker_model.fit(single_worker_dataset, epochs=3, steps_per_epoch=70) num_workers = 4 def proc_func(): global_batch_size = per_worker_batch_size * num_workers strategy = collective_all_reduce_strategy.CollectiveAllReduceStrategy() with strategy.scope(): multi_worker_model = build_and_compile_cnn_model() callbacks = [ keras.callbacks.ModelCheckpoint( filepath=os.path.join(self.get_temp_dir(), 'checkpoint')) ] multi_worker_dataset = mnist_dataset(global_batch_size) if shard_policy: options = dataset_ops.Options() options.experimental_distribute.auto_shard_policy = shard_policy multi_worker_dataset = multi_worker_dataset.with_options(options) multi_worker_model.fit( multi_worker_dataset, epochs=3, steps_per_epoch=70, callbacks=callbacks) with test_util.skip_if_error(self, errors_impl.UnavailableError): mpr_result = multi_process_runner.run( proc_func, multi_worker_test_base.create_cluster_spec(num_workers=num_workers), list_stdout=True) def extract_accuracy(worker_id, input_string): match = re.match( r'\[worker\-{}\].*accuracy: (\d+\.\d+).*'.format(worker_id), input_string) return None if match is None else float(match.group(1)) for worker_id in range(num_workers): accu_result = nest.map_structure( lambda x: extract_accuracy(worker_id, x), # pylint: disable=cell-var-from-loop mpr_result.stdout) self.assertTrue( any(accu_result), 'Every worker is supposed to have accuracy result.') if __name__ == '__main__': multi_process_runner.test_main()
py
1a46bcfe1a3dffbddf021ecb5264952076e59a81
""" mod:`urls` Title Search App URL routing """ __author__ = "Jeremy Nelson" try: from django.conf.urls.defaults import * except ImportError: from django.conf.urls import * urlpatterns = patterns('person_authority.views', url(r"$^","app",name="person-authority-default"), url(r"search$","search",name="person-authority-search-json") )
py
1a46bd7916cee3e92824ec50d6c6aa7d31381f80
from django.contrib import admin from .models import Attribute class AttributeAdmin(admin.ModelAdmin): list_display = ('uri', ) search_fields = ('uri', ) readonly_fields = ('uri', 'path') admin.site.register(Attribute, AttributeAdmin)
py
1a46bdb3f397167d4a9ade08891be6223271e041
class User: # blueprint def __init__(self, use_email, name, password, current_job_title): self.email = use_email self.name = name self.password = password self.current_job_title = current_job_title def change_password(self, new_password): # function that belongs a class # is called "methods" self.password = new_password def change_job_title(self, new_job_title): # "self" parameter used for # access data in all methods in one class self.current_job_title = new_job_title def get_user_info(self): print(f"User {self.name} currently works as a {self.current_job_title}" f".You can contact them as {self.email} ")
py
1a46bddd2ec9ab766a2435eef89a948bbe50742a
# coding: utf-8 """ InsightVM API # Overview This guide documents the InsightVM Application Programming Interface (API) Version 3. This API supports the Representation State Transfer (REST) design pattern. Unless noted otherwise this API accepts and produces the `application/json` media type. This API uses Hypermedia as the Engine of Application State (HATEOAS) and is hypermedia friendly. All API connections must be made to the security console using HTTPS. ## Versioning Versioning is specified in the URL and the base path of this API is: `https://<host>:<port>/api/3/`. ## Specification An <a target=\"_blank\" href=\"https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md\">OpenAPI v2</a> specification (also known as Swagger 2) of this API is available. Tools such as <a target=\"_blank\" href=\"https://github.com/swagger-api/swagger-codegen\">swagger-codegen</a> can be used to generate an API client in the language of your choosing using this specification document. <p class=\"openapi\">Download the specification: <a class=\"openapi-button\" target=\"_blank\" download=\"\" href=\"/api/3/json\"> Download </a></p> ## Authentication Authorization to the API uses HTTP Basic Authorization (see <a target=\"_blank\" href=\"https://www.ietf.org/rfc/rfc2617.txt\">RFC 2617</a> for more information). Requests must supply authorization credentials in the `Authorization` header using a Base64 encoded hash of `\"username:password\"`. <!-- ReDoc-Inject: <security-definitions> --> ### 2FA This API supports two-factor authentication (2FA) by supplying an authentication token in addition to the Basic Authorization. The token is specified using the `Token` request header. To leverage two-factor authentication, this must be enabled on the console and be configured for the account accessing the API. ## Resources ### Naming Resource names represent nouns and identify the entity being manipulated or accessed. All collection resources are pluralized to indicate to the client they are interacting with a collection of multiple resources of the same type. Singular resource names are used when there exists only one resource available to interact with. The following naming conventions are used by this API: | Type | Case | | --------------------------------------------- | ------------------------ | | Resource names | `lower_snake_case` | | Header, body, and query parameters parameters | `camelCase` | | JSON fields and property names | `camelCase` | #### Collections A collection resource is a parent resource for instance resources, but can itself be retrieved and operated on independently. Collection resources use a pluralized resource name. The resource path for collection resources follow the convention: ``` /api/3/{resource_name} ``` #### Instances An instance resource is a \"leaf\" level resource that may be retrieved, optionally nested within a collection resource. Instance resources are usually retrievable with opaque identifiers. The resource path for instance resources follows the convention: ``` /api/3/{resource_name}/{instance_id}... ``` ## Verbs The following HTTP operations are supported throughout this API. The general usage of the operation and both its failure and success status codes are outlined below. | Verb | Usage | Success | Failure | | --------- | ------------------------------------------------------------------------------------- | ----------- | -------------------------------------------------------------- | | `GET` | Used to retrieve a resource by identifier, or a collection of resources by type. | `200` | `400`, `401`, `402`, `404`, `405`, `408`, `410`, `415`, `500` | | `POST` | Creates a resource with an application-specified identifier. | `201` | `400`, `401`, `404`, `405`, `408`, `413`, `415`, `500` | | `POST` | Performs a request to queue an asynchronous job. | `202` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Creates a resource with a client-specified identifier. | `200` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Performs a full update of a resource with a specified identifier. | `201` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `DELETE` | Deletes a resource by identifier or an entire collection of resources. | `204` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `OPTIONS` | Requests what operations are available on a resource. | `200` | `401`, `404`, `405`, `408`, `500` | ### Common Operations #### OPTIONS All resources respond to the `OPTIONS` request, which allows discoverability of available operations that are supported. The `OPTIONS` response returns the acceptable HTTP operations on that resource within the `Allow` header. The response is always a `200 OK` status. ### Collection Resources Collection resources can support the `GET`, `POST`, `PUT`, and `DELETE` operations. #### GET The `GET` operation invoked on a collection resource indicates a request to retrieve all, or some, of the entities contained within the collection. This also includes the optional capability to filter or search resources during the request. The response from a collection listing is a paginated document. See [hypermedia links](#section/Overview/Paging) for more information. #### POST The `POST` is a non-idempotent operation that allows for the creation of a new resource when the resource identifier is not provided by the system during the creation operation (i.e. the Security Console generates the identifier). The content of the `POST` request is sent in the request body. The response to a successful `POST` request should be a `201 CREATED` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. The `POST` to a collection resource can also be used to interact with asynchronous resources. In this situation, instead of a `201 CREATED` response, the `202 ACCEPTED` response indicates that processing of the request is not fully complete but has been accepted for future processing. This request will respond similarly with a `Location` header with link to the job-oriented asynchronous resource that was created and/or queued. #### PUT The `PUT` is an idempotent operation that either performs a create with user-supplied identity, or a full replace or update of a resource by a known identifier. The response to a `PUT` operation to create an entity is a `201 Created` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. `PUT` on a collection resource replaces all values in the collection. The typical response to a `PUT` operation that updates an entity is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. #### DELETE The `DELETE` is an idempotent operation that physically deletes a resource, or removes an association between resources. The typical response to a `DELETE` operation is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. ### Instance Resources Instance resources can support the `GET`, `PUT`, `POST`, `PATCH` and `DELETE` operations. #### GET Retrieves the details of a specific resource by its identifier. The details retrieved can be controlled through property selection and property views. The content of the resource is returned within the body of the response in the acceptable media type. #### PUT Allows for and idempotent \"full update\" (complete replacement) on a specific resource. If the resource does not exist, it will be created; if it does exist, it is completely overwritten. Any omitted properties in the request are assumed to be undefined/null. For \"partial updates\" use `POST` or `PATCH` instead. The content of the `PUT` request is sent in the request body. The identifier of the resource is specified within the URL (not the request body). The response to a successful `PUT` request is a `201 CREATED` to represent the created status, with a valid `Location` header field set to the URI that can be used to access to the newly created (or fully replaced) resource. #### POST Performs a non-idempotent creation of a new resource. The `POST` of an instance resource most commonly occurs with the use of nested resources (e.g. searching on a parent collection resource). The response to a `POST` of an instance resource is typically a `200 OK` if the resource is non-persistent, and a `201 CREATED` if there is a resource created/persisted as a result of the operation. This varies by endpoint. #### PATCH The `PATCH` operation is used to perform a partial update of a resource. `PATCH` is a non-idempotent operation that enforces an atomic mutation of a resource. Only the properties specified in the request are to be overwritten on the resource it is applied to. If a property is missing, it is assumed to not have changed. #### DELETE Permanently removes the individual resource from the system. If the resource is an association between resources, only the association is removed, not the resources themselves. A successful deletion of the resource should return `204 NO CONTENT` with no response body. This operation is not fully idempotent, as follow-up requests to delete a non-existent resource should return a `404 NOT FOUND`. ## Requests Unless otherwise indicated, the default request body media type is `application/json`. ### Headers Commonly used request headers include: | Header | Example | Purpose | | ------------------ | --------------------------------------------- | ---------------------------------------------------------------------------------------------- | | `Accept` | `application/json` | Defines what acceptable content types are allowed by the client. For all types, use `*/*`. | | `Accept-Encoding` | `deflate, gzip` | Allows for the encoding to be specified (such as gzip). | | `Accept-Language` | `en-US` | Indicates to the server the client's locale (defaults `en-US`). | | `Authorization ` | `Basic Base64(\"username:password\")` | Basic authentication | | `Token ` | `123456` | Two-factor authentication token (if enabled) | ### Dates & Times Dates and/or times are specified as strings in the ISO 8601 format(s). The following formats are supported as input: | Value | Format | Notes | | --------------------------- | ------------------------------------------------------ | ----------------------------------------------------- | | Date | YYYY-MM-DD | Defaults to 12 am UTC (if used for a date & time | | Date & time only | YYYY-MM-DD'T'hh:mm:ss[.nnn] | Defaults to UTC | | Date & time in UTC | YYYY-MM-DD'T'hh:mm:ss[.nnn]Z | | | Date & time w/ offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+&#124;-]hh:mm | | | Date & time w/ zone-offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+&#124;-]hh:mm[<zone-id>] | | ### Timezones Timezones are specified in the regional zone format, such as `\"America/Los_Angeles\"`, `\"Asia/Tokyo\"`, or `\"GMT\"`. ### Paging Pagination is supported on certain collection resources using a combination of two query parameters, `page` and `size`. As these are control parameters, they are prefixed with the underscore character. The page parameter dictates the zero-based index of the page to retrieve, and the `size` indicates the size of the page. For example, `/resources?page=2&size=10` will return page 3, with 10 records per page, giving results 21-30. The maximum page size for a request is 500. ### Sorting Sorting is supported on paginated resources with the `sort` query parameter(s). The sort query parameter(s) supports identifying a single or multi-property sort with a single or multi-direction output. The format of the parameter is: ``` sort=property[,ASC|DESC]... ``` Therefore, the request `/resources?sort=name,title,DESC` would return the results sorted by the name and title descending, in that order. The sort directions are either ascending `ASC` or descending `DESC`. With single-order sorting, all properties are sorted in the same direction. To sort the results with varying orders by property, multiple sort parameters are passed. For example, the request `/resources?sort=name,ASC&sort=title,DESC` would sort by name ascending and title descending, in that order. ## Responses The following response statuses may be returned by this API. | Status | Meaning | Usage | | ------ | ------------------------ |------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `200` | OK | The operation performed without error according to the specification of the request, and no more specific 2xx code is suitable. | | `201` | Created | A create request has been fulfilled and a resource has been created. The resource is available as the URI specified in the response, including the `Location` header. | | `202` | Accepted | An asynchronous task has been accepted, but not guaranteed, to be processed in the future. | | `400` | Bad Request | The request was invalid or cannot be otherwise served. The request is not likely to succeed in the future without modifications. | | `401` | Unauthorized | The user is unauthorized to perform the operation requested, or does not maintain permissions to perform the operation on the resource specified. | | `403` | Forbidden | The resource exists to which the user has access, but the operating requested is not permitted. | | `404` | Not Found | The resource specified could not be located, does not exist, or an unauthenticated client does not have permissions to a resource. | | `405` | Method Not Allowed | The operations may not be performed on the specific resource. Allowed operations are returned and may be performed on the resource. | | `408` | Request Timeout | The client has failed to complete a request in a timely manner and the request has been discarded. | | `413` | Request Entity Too Large | The request being provided is too large for the server to accept processing. | | `415` | Unsupported Media Type | The media type is not supported for the requested resource. | | `500` | Internal Server Error | An internal and unexpected error has occurred on the server at no fault of the client. | ### Security The response statuses 401, 403 and 404 need special consideration for security purposes. As necessary, error statuses and messages may be obscured to strengthen security and prevent information exposure. The following is a guideline for privileged resource response statuses: | Use Case | Access | Resource | Permission | Status | | ------------------------------------------------------------------ | ------------------ |------------------- | ------------ | ------------ | | Unauthenticated access to an unauthenticated resource. | Unauthenticated | Unauthenticated | Yes | `20x` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Authenticated | No | `401` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Non-existent | No | `401` | | Authenticated access to a unauthenticated resource. | Authenticated | Unauthenticated | Yes | `20x` | | Authenticated access to an authenticated, unprivileged resource. | Authenticated | Authenticated | No | `404` | | Authenticated access to an authenticated, privileged resource. | Authenticated | Authenticated | Yes | `20x` | | Authenticated access to an authenticated, non-existent resource | Authenticated | Non-existent | Yes | `404` | ### Headers Commonly used response headers include: | Header | Example | Purpose | | -------------------------- | --------------------------------- | --------------------------------------------------------------- | | `Allow` | `OPTIONS, GET` | Defines the allowable HTTP operations on a resource. | | `Cache-Control` | `no-store, must-revalidate` | Disables caching of resources (as they are all dynamic). | | `Content-Encoding` | `gzip` | The encoding of the response body (if any). | | `Location` | | Refers to the URI of the resource created by a request. | | `Transfer-Encoding` | `chunked` | Specified the encoding used to transform response. | | `Retry-After` | 5000 | Indicates the time to wait before retrying a request. | | `X-Content-Type-Options` | `nosniff` | Disables MIME type sniffing. | | `X-XSS-Protection` | `1; mode=block` | Enables XSS filter protection. | | `X-Frame-Options` | `SAMEORIGIN` | Prevents rendering in a frame from a different origin. | | `X-UA-Compatible` | `IE=edge,chrome=1` | Specifies the browser mode to render in. | ### Format When `application/json` is returned in the response body it is always pretty-printed (indented, human readable output). Additionally, gzip compression/encoding is supported on all responses. #### Dates & Times Dates or times are returned as strings in the ISO 8601 'extended' format. When a date and time is returned (instant) the value is converted to UTC. For example: | Value | Format | Example | | --------------- | ------------------------------ | --------------------- | | Date | `YYYY-MM-DD` | 2017-12-03 | | Date & Time | `YYYY-MM-DD'T'hh:mm:ss[.nnn]Z` | 2017-12-03T10:15:30Z | #### Content In some resources a Content data type is used. This allows for multiple formats of representation to be returned within resource, specifically `\"html\"` and `\"text\"`. The `\"text\"` property returns a flattened representation suitable for output in textual displays. The `\"html\"` property returns an HTML fragment suitable for display within an HTML element. Note, the HTML returned is not a valid stand-alone HTML document. #### Paging The response to a paginated request follows the format: ```json { resources\": [ ... ], \"page\": { \"number\" : ..., \"size\" : ..., \"totalResources\" : ..., \"totalPages\" : ... }, \"links\": [ \"first\" : { \"href\" : \"...\" }, \"prev\" : { \"href\" : \"...\" }, \"self\" : { \"href\" : \"...\" }, \"next\" : { \"href\" : \"...\" }, \"last\" : { \"href\" : \"...\" } ] } ``` The `resources` property is an array of the resources being retrieved from the endpoint, each which should contain at minimum a \"self\" relation hypermedia link. The `page` property outlines the details of the current page and total possible pages. The object for the page includes the following properties: - number - The page number (zero-based) of the page returned. - size - The size of the pages, which is less than or equal to the maximum page size. - totalResources - The total amount of resources available across all pages. - totalPages - The total amount of pages. The last property of the paged response is the `links` array, which contains all available hypermedia links. For paginated responses, the \"self\", \"next\", \"previous\", \"first\", and \"last\" links are returned. The \"self\" link must always be returned and should contain a link to allow the client to replicate the original request against the collection resource in an identical manner to that in which it was invoked. The \"next\" and \"previous\" links are present if either or both there exists a previous or next page, respectively. The \"next\" and \"previous\" links have hrefs that allow \"natural movement\" to the next page, that is all parameters required to move the next page are provided in the link. The \"first\" and \"last\" links provide references to the first and last pages respectively. Requests outside the boundaries of the pageable will result in a `404 NOT FOUND`. Paginated requests do not provide a \"stateful cursor\" to the client, nor does it need to provide a read consistent view. Records in adjacent pages may change while pagination is being traversed, and the total number of pages and resources may change between requests within the same filtered/queries resource collection. #### Property Views The \"depth\" of the response of a resource can be configured using a \"view\". All endpoints supports two views that can tune the extent of the information returned in the resource. The supported views are `summary` and `details` (the default). View are specified using a query parameter, in this format: ```bash /<resource>?view={viewName} ``` #### Error Any error responses can provide a response body with a message to the client indicating more information (if applicable) to aid debugging of the error. All 40x and 50x responses will return an error response in the body. The format of the response is as follows: ```json { \"status\": <statusCode>, \"message\": <message>, \"links\" : [ { \"rel\" : \"...\", \"href\" : \"...\" } ] } ``` The `status` property is the same as the HTTP status returned in the response, to ease client parsing. The message property is a localized message in the request client's locale (if applicable) that articulates the nature of the error. The last property is the `links` property. This may contain additional [hypermedia links](#section/Overview/Authentication) to troubleshoot. #### Search Criteria <a section=\"section/Responses/SearchCriteria\"></a> Multiple resources make use of search criteria to match assets. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The operator is a type and property-specific operating performed on the filtered property. The valid values for fields and operators are outlined in the table below. Every filter also defines one or more values that are supplied to the operator. The valid values vary by operator and are outlined below. ##### Fields The following table outlines the search criteria fields and the available operators: | Field | Operators | | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | | `alternate-address-type` | `in` | | `container-image` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-like` ` not-like` | | `container-status` | `is` ` is-not` | | `containers` | `are` | | `criticality-tag` | `is` ` is-not` ` is-greater-than` ` is-less-than` ` is-applied` ` is-not-applied` | | `custom-tag` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-applied` ` is-not-applied` | | `cve` | `is` ` is-not` ` contains` ` does-not-contain` | | `cvss-access-complexity` | `is` ` is-not` | | `cvss-authentication-required` | `is` ` is-not` | | `cvss-access-vector` | `is` ` is-not` | | `cvss-availability-impact` | `is` ` is-not` | | `cvss-confidentiality-impact` | `is` ` is-not` | | `cvss-integrity-impact` | `is` ` is-not` | | `cvss-v3-confidentiality-impact` | `is` ` is-not` | | `cvss-v3-integrity-impact` | `is` ` is-not` | | `cvss-v3-availability-impact` | `is` ` is-not` | | `cvss-v3-attack-vector` | `is` ` is-not` | | `cvss-v3-attack-complexity` | `is` ` is-not` | | `cvss-v3-user-interaction` | `is` ` is-not` | | `cvss-v3-privileges-required` | `is` ` is-not` | | `host-name` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-empty` ` is-not-empty` ` is-like` ` not-like` | | `host-type` | `in` ` not-in` | | `ip-address` | `is` ` is-not` ` in-range` ` not-in-range` ` is-like` ` not-like` | | `ip-address-type` | `in` ` not-in` | | `last-scan-date` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `location-tag` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-applied` ` is-not-applied` | | `mobile-device-last-sync-time` | `is-within-the-last` ` is-earlier-than` | | `open-ports` | `is` ` is-not` ` in-range` | | `operating-system` | `contains` ` does-not-contain` ` is-empty` ` is-not-empty` | | `owner-tag` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-applied` ` is-not-applied` | | `pci-compliance` | `is` | | `risk-score` | `is` ` is-not` ` in-range` ` greater-than` ` less-than` | | `service-name` | `contains` ` does-not-contain` | | `site-id` | `in` ` not-in` | | `software` | `contains` ` does-not-contain` | | `vAsset-cluster` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `vAsset-datacenter` | `is` ` is-not` | | `vAsset-host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `vAsset-power-state` | `in` ` not-in` | | `vAsset-resource-pool-path` | `contains` ` does-not-contain` | | `vulnerability-assessed` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `vulnerability-category` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` | | `vulnerability-cvss-v3-score` | `is` ` is-not` | | `vulnerability-cvss-score` | `is` ` is-not` ` in-range` ` is-greater-than` ` is-less-than` | | `vulnerability-exposures` | `includes` ` does-not-include` | | `vulnerability-title` | `contains` ` does-not-contain` ` is` ` is-not` ` starts-with` ` ends-with` | | `vulnerability-validated-status` | `are` | ##### Enumerated Properties The following fields have enumerated values: | Field | Acceptable Values | | ----------------------------------------- | ------------------------------------------------------------------------------------------------------------- | | `alternate-address-type` | 0=IPv4, 1=IPv6 | | `containers` | 0=present, 1=not present | | `container-status` | `created` `running` `paused` `restarting` `exited` `dead` `unknown` | | `cvss-access-complexity` | <ul><li><code>L</code> = Low</li><li><code>M</code> = Medium</li><li><code>H</code> = High</li></ul> | | `cvss-integrity-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-confidentiality-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-availability-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-access-vector` | <ul><li><code>L</code> = Local</li><li><code>A</code> = Adjacent</li><li><code>N</code> = Network</li></ul> | | `cvss-authentication-required` | <ul><li><code>N</code> = None</li><li><code>S</code> = Single</li><li><code>M</code> = Multiple</li></ul> | | `cvss-v3-confidentiality-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-integrity-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-availability-impact` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-attack-vector` | <ul><li><code>N</code> = Network</li><li><code>A</code> = Adjacent</li><li><code>L</code> = Local</li><li><code>P</code> = Physical</li></ul> | | `cvss-v3-attack-complexity` | <ul><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-user-interaction` | <ul><li><code>N</code> = None</li><li><code>R</code> = Required</li></ul> | | `cvss-v3-privileges-required` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `host-type` | 0=Unknown, 1=Guest, 2=Hypervisor, 3=Physical, 4=Mobile | | `ip-address-type` | 0=IPv4, 1=IPv6 | | `pci-compliance` | 0=fail, 1=pass | | `vulnerability-validated-status` | 0=present, 1=not present | ##### Operator Properties <a section=\"section/Responses/SearchCriteria/OperatorProperties\"></a> The following table outlines which properties are required for each operator and the appropriate data type(s): | Operator | `value` | `lower` | `upper` | | ----------------------|-----------------------|-----------------------|-----------------------| | `are` | `string` | | | | `contains` | `string` | | | | `does-not-contain` | `string` | | | | `ends with` | `string` | | | | `in` | `Array[ string ]` | | | | `in-range` | | `numeric` | `numeric` | | `includes` | `Array[ string ]` | | | | `is` | `string` | | | | `is-applied` | | | | | `is-between` | | `numeric` | `numeric` | | `is-earlier-than` | `numeric` | | | | `is-empty` | | | | | `is-greater-than` | `numeric` | | | | `is-on-or-after` | `string` (yyyy-MM-dd) | | | | `is-on-or-before` | `string` (yyyy-MM-dd) | | | | `is-not` | `string` | | | | `is-not-applied` | | | | | `is-not-empty` | | | | | `is-within-the-last` | `numeric` | | | | `less-than` | `string` | | | | `like` | `string` | | | | `not-contains` | `string` | | | | `not-in` | `Array[ string ]` | | | | `not-in-range` | | `numeric` | `numeric` | | `not-like` | `string` | | | | `starts-with` | `string` | | | #### Discovery Connection Search Criteria <a section=\"section/Responses/DiscoverySearchCriteria\"></a> Dynamic sites make use of search criteria to match assets from a discovery connection. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The list of supported fields vary depending on the type of discovery connection configured for the dynamic site (e.g vSphere, ActiveSync, etc.). The operator is a type and property-specific operating performed on the filtered property. The valid values for fields outlined in the tables below and are grouped by the type of connection. Every filter also defines one or more values that are supplied to the operator. See <a href=\"#section/Responses/SearchCriteria/OperatorProperties\">Search Criteria Operator Properties</a> for more information on the valid values for each operator. ##### Fields (ActiveSync) This section documents search criteria information for ActiveSync discovery connections. The discovery connections must be one of the following types: `\"activesync-ldap\"`, `\"activesync-office365\"`, or `\"activesync-powershell\"`. The following table outlines the search criteria fields and the available operators for ActiveSync connections: | Field | Operators | | --------------------------------- | ------------------------------------------------------------- | | `last-sync-time` | `is-within-the-last` ` is-earlier-than` | | `operating-system` | `contains` ` does-not-contain` | | `user` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (AWS) This section documents search criteria information for AWS discovery connections. The discovery connections must be the type `\"aws\"`. The following table outlines the search criteria fields and the available operators for AWS connections: | Field | Operators | | ----------------------- | ------------------------------------------------------------- | | `availability-zone` | `contains` ` does-not-contain` | | `guest-os-family` | `contains` ` does-not-contain` | | `instance-id` | `contains` ` does-not-contain` | | `instance-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `instance-state` | `in` ` not-in` | | `instance-type` | `in` ` not-in` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `region` | `in` ` not-in` | | `vpc-id` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (DHCP) This section documents search criteria information for DHCP discovery connections. The discovery connections must be the type `\"dhcp\"`. The following table outlines the search criteria fields and the available operators for DHCP connections: | Field | Operators | | --------------- | ------------------------------------------------------------- | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `mac-address` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (Sonar) This section documents search criteria information for Sonar discovery connections. The discovery connections must be the type `\"sonar\"`. The following table outlines the search criteria fields and the available operators for Sonar connections: | Field | Operators | | ------------------- | -------------------- | | `search-domain` | `contains` ` is` | | `ip-address` | `in-range` ` is` | | `sonar-scan-date` | `is-within-the-last` | ##### Fields (vSphere) This section documents search criteria information for vSphere discovery connections. The discovery connections must be the type `\"vsphere\"`. The following table outlines the search criteria fields and the available operators for vSphere connections: | Field | Operators | | -------------------- | ------------------------------------------------------------------------------------------ | | `cluster` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `data-center` | `is` ` is-not` | | `discovered-time` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `guest-os-family` | `contains` ` does-not-contain` | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `power-state` | `in` ` not-in` | | `resource-pool-path` | `contains` ` does-not-contain` | | `last-time-seen` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `vm` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Enumerated Properties (vSphere) The following fields have enumerated values: | Field | Acceptable Values | | ------------- | ------------------------------------ | | `power-state` | `poweredOn` `poweredOff` `suspended` | ## HATEOAS This API follows Hypermedia as the Engine of Application State (HATEOAS) principals and is therefore hypermedia friendly. Hyperlinks are returned in the `links` property of any given resource and contain a fully-qualified hyperlink to the corresponding resource. The format of the hypermedia link adheres to both the <a target=\"_blank\" href=\"http://jsonapi.org\">{json:api} v1</a> <a target=\"_blank\" href=\"http://jsonapi.org/format/#document-links\">\"Link Object\"</a> and <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html\">JSON Hyper-Schema</a> <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html#rfc.section.5.2\">\"Link Description Object\"</a> formats. For example: ```json \"links\": [{ \"rel\": \"<relation>\", \"href\": \"<href>\" ... }] ``` Where appropriate link objects may also contain additional properties than the `rel` and `href` properties, such as `id`, `type`, etc. See the [Root](#tag/Root) resources for the entry points into API discovery. # noqa: E501 OpenAPI spec version: 3 Contact: [email protected] Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six from py_insightvm_sdk.models.link import Link # noqa: F401,E501 class PolicyOverrideScope(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'asset': 'int', 'links': 'list[Link]', 'new_result': 'str', 'original_result': 'str', 'rule': 'int', 'type': 'str' } attribute_map = { 'asset': 'asset', 'links': 'links', 'new_result': 'newResult', 'original_result': 'originalResult', 'rule': 'rule', 'type': 'type' } def __init__(self, asset=None, links=None, new_result=None, original_result=None, rule=None, type=None): # noqa: E501 """PolicyOverrideScope - a model defined in Swagger""" # noqa: E501 self._asset = None self._links = None self._new_result = None self._original_result = None self._rule = None self._type = None self.discriminator = None if asset is not None: self.asset = asset if links is not None: self.links = links self.new_result = new_result if original_result is not None: self.original_result = original_result self.rule = rule self.type = type @property def asset(self): """Gets the asset of this PolicyOverrideScope. # noqa: E501 The identifier of the asset whose compliance results are to be overridden. Property is required if the property `scope` is set to either `\"specific-asset\"` or `\"specific-asset-until-next-scan\"`. # noqa: E501 :return: The asset of this PolicyOverrideScope. # noqa: E501 :rtype: int """ return self._asset @asset.setter def asset(self, asset): """Sets the asset of this PolicyOverrideScope. The identifier of the asset whose compliance results are to be overridden. Property is required if the property `scope` is set to either `\"specific-asset\"` or `\"specific-asset-until-next-scan\"`. # noqa: E501 :param asset: The asset of this PolicyOverrideScope. # noqa: E501 :type: int """ self._asset = asset @property def links(self): """Gets the links of this PolicyOverrideScope. # noqa: E501 :return: The links of this PolicyOverrideScope. # noqa: E501 :rtype: list[Link] """ return self._links @links.setter def links(self, links): """Sets the links of this PolicyOverrideScope. :param links: The links of this PolicyOverrideScope. # noqa: E501 :type: list[Link] """ self._links = links @property def new_result(self): """Gets the new_result of this PolicyOverrideScope. # noqa: E501 The new policy rule result after the override is applied. # noqa: E501 :return: The new_result of this PolicyOverrideScope. # noqa: E501 :rtype: str """ return self._new_result @new_result.setter def new_result(self, new_result): """Sets the new_result of this PolicyOverrideScope. The new policy rule result after the override is applied. # noqa: E501 :param new_result: The new_result of this PolicyOverrideScope. # noqa: E501 :type: str """ if new_result is None: raise ValueError("Invalid value for `new_result`, must not be `None`") # noqa: E501 allowed_values = ["pass", "fail", "not-applicable", "fixed"] # noqa: E501 if new_result not in allowed_values: raise ValueError( "Invalid value for `new_result` ({0}), must be one of {1}" # noqa: E501 .format(new_result, allowed_values) ) self._new_result = new_result @property def original_result(self): """Gets the original_result of this PolicyOverrideScope. # noqa: E501 The original policy rule result before the override was applied. This property only applies to overrides with a scope of either `\"specific-asset\"` or `\"specific-asset-until-next-scan\"`. # noqa: E501 :return: The original_result of this PolicyOverrideScope. # noqa: E501 :rtype: str """ return self._original_result @original_result.setter def original_result(self, original_result): """Sets the original_result of this PolicyOverrideScope. The original policy rule result before the override was applied. This property only applies to overrides with a scope of either `\"specific-asset\"` or `\"specific-asset-until-next-scan\"`. # noqa: E501 :param original_result: The original_result of this PolicyOverrideScope. # noqa: E501 :type: str """ allowed_values = ["pass", "fail", "error", "unknown", "not-applicable", "not-checked", "not-selected", "informational", "fixed"] # noqa: E501 if original_result not in allowed_values: raise ValueError( "Invalid value for `original_result` ({0}), must be one of {1}" # noqa: E501 .format(original_result, allowed_values) ) self._original_result = original_result @property def rule(self): """Gets the rule of this PolicyOverrideScope. # noqa: E501 The identifier of the policy rule whose compliance results are to be overridden. # noqa: E501 :return: The rule of this PolicyOverrideScope. # noqa: E501 :rtype: int """ return self._rule @rule.setter def rule(self, rule): """Sets the rule of this PolicyOverrideScope. The identifier of the policy rule whose compliance results are to be overridden. # noqa: E501 :param rule: The rule of this PolicyOverrideScope. # noqa: E501 :type: int """ if rule is None: raise ValueError("Invalid value for `rule`, must not be `None`") # noqa: E501 self._rule = rule @property def type(self): """Gets the type of this PolicyOverrideScope. # noqa: E501 The scope of assets affected by the policy override. Can be one of the following values: | Value | Description | | ---------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | | `\"all-assets\"` | Overrides the compliance result of all assets evaluated with the specified policy rule. | | `\"specific-asset\"` | Overrides the compliance result of a single asset evaluated with the specified policy rule. | | `\"specific-asset-until-next-scan\"` | Overrides the compliance result of a single asset evaluated with the specified policy rule until the next time asset is evaluated against that policy rule. | # noqa: E501 :return: The type of this PolicyOverrideScope. # noqa: E501 :rtype: str """ return self._type @type.setter def type(self, type): """Sets the type of this PolicyOverrideScope. The scope of assets affected by the policy override. Can be one of the following values: | Value | Description | | ---------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | | `\"all-assets\"` | Overrides the compliance result of all assets evaluated with the specified policy rule. | | `\"specific-asset\"` | Overrides the compliance result of a single asset evaluated with the specified policy rule. | | `\"specific-asset-until-next-scan\"` | Overrides the compliance result of a single asset evaluated with the specified policy rule until the next time asset is evaluated against that policy rule. | # noqa: E501 :param type: The type of this PolicyOverrideScope. # noqa: E501 :type: str """ if type is None: raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501 self._type = type def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(PolicyOverrideScope, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, PolicyOverrideScope): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
py
1a46be1483aa26456fceb170be120774dd1a3b34
""" setup for vmnlcli package """ from setuptools import setup, find_packages with open('README.md') as f: long_description = f.read() # remove header, but have one \n before first headline start = long_description.find('# vmnlcli') assert start >= 0 long_description = '\n' + long_description[start:] setup( name='vmnlcli', version='0.2.0', url='http://github.com/thomaswaldmann/velomobielnl/', license='MIT', author='Thomas Waldmann', author_email='[email protected]', description='command line interface for some dutch velomobile websites', long_description=long_description, long_description_content_type='text/markdown', keywords="velomobile odometer cli velomobiel.nl intercitybike.nl welmers.net", packages=find_packages('src'), package_dir={'': 'src'}, include_package_data=True, entry_points={ 'console_scripts': [ 'vmnlcli = vmnlcli:main', ] }, platforms='any', setup_requires=[], install_requires=[ 'requests', ], python_requires='>=3.5', classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Topic :: Utilities', 'Topic :: Database :: Front-Ends', 'Topic :: Internet :: WWW/HTTP :: Site Management', ], )
py
1a46be24fcc07b57bc9b481224557f752f34e7b9
# Generated by Django 2.2.19 on 2021-11-19 15:10 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('posts', '0002_post_title'), ] operations = [ migrations.AlterField( model_name='post', name='title', field=models.CharField(max_length=200), ), ]
py
1a46be411a147155e831b13dae6f875edecc9002
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from .....testing import assert_equal from ..gtract import compareTractInclusion def test_compareTractInclusion_inputs(): input_map = dict(args=dict(argstr='%s', ), closeness=dict(argstr='--closeness %f', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), numberOfPoints=dict(argstr='--numberOfPoints %d', ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), standardFiber=dict(argstr='--standardFiber %s', ), terminal_output=dict(nohash=True, ), testFiber=dict(argstr='--testFiber %s', ), testForBijection=dict(argstr='--testForBijection ', ), testForFiberCardinality=dict(argstr='--testForFiberCardinality ', ), writeXMLPolyDataFile=dict(argstr='--writeXMLPolyDataFile ', ), ) inputs = compareTractInclusion.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_compareTractInclusion_outputs(): output_map = dict() outputs = compareTractInclusion.output_spec() for key, metadata in list(output_map.items()): for metakey, value in list(metadata.items()): yield assert_equal, getattr(outputs.traits()[key], metakey), value
py
1a46be6409b242b1be7e4e8e2c6c9e98d2d820e2
""" This file offers the methods to automatically retrieve the graph Streptomyces cyaneogriseus subsp. noncyanogenus. The graph is automatically retrieved from the STRING repository. References --------------------- Please cite the following if you use the data: ```bib @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } ``` """ from typing import Dict from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph from ...ensmallen import Graph # pylint: disable=import-error def StreptomycesCyaneogriseusSubspNoncyanogenus( directed: bool = False, preprocess: bool = True, load_nodes: bool = True, verbose: int = 2, cache: bool = True, cache_path: str = "graphs/string", version: str = "links.v11.5", **additional_graph_kwargs: Dict ) -> Graph: """Return new instance of the Streptomyces cyaneogriseus subsp. noncyanogenus graph. The graph is automatically retrieved from the STRING repository. Parameters ------------------- directed: bool = False Wether to load the graph as directed or undirected. By default false. preprocess: bool = True Whether to preprocess the graph to be loaded in optimal time and memory. load_nodes: bool = True, Whether to load the nodes vocabulary or treat the nodes simply as a numeric range. verbose: int = 2, Wether to show loading bars during the retrieval and building of the graph. cache: bool = True Whether to use cache, i.e. download files only once and preprocess them only once. cache_path: str = "graphs" Where to store the downloaded graphs. version: str = "links.v11.5" The version of the graph to retrieve. The available versions are: - homology.v11.5 - physical.links.v11.5 - links.v11.5 additional_graph_kwargs: Dict Additional graph kwargs. Returns ----------------------- Instace of Streptomyces cyaneogriseus subsp. noncyanogenus graph. References --------------------- Please cite the following if you use the data: ```bib @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } ``` """ return AutomaticallyRetrievedGraph( graph_name="StreptomycesCyaneogriseusSubspNoncyanogenus", repository="string", version=version, directed=directed, preprocess=preprocess, load_nodes=load_nodes, verbose=verbose, cache=cache, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs )()
py
1a46bedb0d421e264f29c935a64cab4f033f82e3
''' This profiler uses jstat on 12/12/2014 The processing may be changed with jstat version ''' import sys import subprocess import re import time import pandas as pd import numpy as np import argparse import os argparser = argparse.ArgumentParser() argparser.add_argument('--RunCommand',help='Command to Run the Java file in order to collect profile data') argparser.add_argument('--ProgramName',help='What to search when executing the Java file inorder to indentify the process id') argparser.add_argument('--RunType',help='Run Type Specifies default or tuned') ''' java -jar dacapo-9.12-bach.jar avrora -n 1 ''' class JVMProfiler: def __init__(self,optionset,program,benchmark): self.name = 'JVM_Profiler' self.options = optionset #self.java_program_run_command='java -jar dacapo-9.12-bach.jar '+program+' -n 1' self.java_program_run_command=program self.final_df=pd.DataFrame() if not os.path.exists('jstat_temp_files/'): os.makedirs('jstat_temp_files') def getProfileData(self,tag,program,benchmark,interval,samples,runtype): java_process=subprocess.Popen(self.java_program_run_command,stdout=subprocess.PIPE,shell=True) #print self.java_program_run_command #time.sleep(2) optionset = self.options p = subprocess.Popen('ps -ef | grep java > jstat_temp_files/vmstat.txt', stdout=subprocess.PIPE, shell=True) p_status = p.wait() f = open ('jstat_temp_files/vmstat.txt','r') lines = f.readlines() f.close() subprocesses = [] subprocesses.append(java_process) #time.sleep(5) for line in lines: if ((str(benchmark) or str(':XX') in line) and ('python' not in line) and ('/bin/sh' not in line)): print 'found = ',line words = re.split("\s+", line) process_ID = words[1] ''' Run the subprocesses ''' print 'process id = ',process_ID for option in optionset: jstat_command = 'jstat -'+str(option)+' '+str(process_ID)+' '+str(interval)+' '+str(samples) print 'jstat command = ',jstat_command,'for benchmark = ',benchmark p = subprocess.Popen(jstat_command + ' > jstat_temp_files/jstat_'+str(benchmark)+'_'+str(option)+str(runtype)+'.txt', stdout=subprocess.PIPE, shell=True) subprocesses.append(p) for p in subprocesses: status = p.wait() time.sleep(5) break _class = 'Loaded,LoadedBytes,Unloaded,UnloadedBytes,ClassTime' compiler = 'Compiled,Failed,Invalid,CompilerTime' gc='S0C,S1C,S0U,S1U,EC,EU,OC,OU,PC,PU,YGC,YGCT,FGC,FGCT,GCT' coldict = {'class' : _class, 'compiler':compiler, 'gc':gc} for option in optionset: file = open('jstat_temp_files/jstat_'+str(benchmark)+'_'+str(option)+str(runtype)+'.txt','r') stat_lines = file.read().splitlines() file.close() for linenum in range(0,len(stat_lines)): line = stat_lines[linenum] line = re.sub('\s+',',',line) if linenum !=0: line = line[1:] else: linelist = list(line) if linelist[0] == ',': linelist[0]='' line="".join(linelist) stat_lines[linenum] = line #print type(line) if option == 'compiler': commas = line.count(',') if commas > 5: lastcomma = line.rfind(',') listline = list(line) listline[lastcomma] = '/' line="".join(listline) stat_lines[linenum] = line if option == 'compiler': stat_lines[0]='Compiled,Failed,Invalid,CompilerTime,FailedType,FailedMethod' if option =='class': stat_lines[0]='Loaded,LoadedBytes,Unloaded,UnloadedBytes,ClassTime' statfile = open ('jstat_temp_files/jstat_'+str(benchmark)+'_'+str(option)+str(runtype)+'.csv','w') statfile.write("\n".join(stat_lines)) def getStatistics(self,program,benchmark,runtype): optionset = ['gc','compiler','class'] dataframe_dict = {} _class = ['Loaded','LoadedBytes','Unloaded','UnloadedBytes','ClassTime'] compiler = ['Compiled','Failed','Invalid','CompilerTime'] gc=['S0C','S1C','S0U','S1U','EC','EU','OC','OU','PC','PU','YGC','YGCT','FGC','FGCT','GCT'] coldict = {'class' : _class, 'compiler':compiler, 'gc':gc} for option in optionset: #print option dataframe_dict[option] = pd.read_csv('jstat_temp_files/jstat_'+str(benchmark)+'_'+str(option)+str(runtype)+'.csv',usecols=coldict[option]) gcdf = dataframe_dict['gc'] gcdf['HU']=(gcdf.S0U+gcdf.S1U+gcdf.EU+gcdf.OU+gcdf.PU)*100/(gcdf.S0C+gcdf.S1C+gcdf.EC+gcdf.OC+gcdf.PC) jitdf=dataframe_dict['compiler'] jitdf['CR']=jitdf.Compiled*100/jitdf.CompilerTime classdf=dataframe_dict['class'] classdf['CLR']=classdf.Loaded*100/classdf.ClassTime dataframe_dict['gc']=gcdf dataframe_dict['compiler']=jitdf dataframe_dict['class']=classdf for option in optionset: #print option dataframe_dict[option].to_csv('jstat_temp_files/jstat_'+str(benchmark)+'_'+str(option)+str(runtype)+'.csv',index=False) if __name__ == '__main__': #programs = ['sunflow','avrora','jython','h2','','luindex','lusearch','pmd','sunflow','tomcat','tradebeans','tradesoap','xalan'] #file = open('OptimizedConfigurations/'+'ALL.txt',"r") args=argparser.parse_args() optionset = ['gc','compiler','class'] profiler = JVMProfiler(optionset,args.RunCommand,args.ProgramName) profiler.getProfileData('dacapo',args.RunCommand,args.ProgramName,10,'',args.RunType) profiler.getStatistics(args.RunCommand,args.ProgramName,args.RunType) ''' program = programs[4] profiler = JVMProfiler(optionset,program) profiler.getProfileData('dacapo',program,250,100) profiler.getStatistics(program) '''
py
1a46bf337b284b5976d04546a8b17035c6cc122f
from enum import Enum from typing import List, NamedTuple, Optional # Callable import random # from math import sqrt from generic_search import dfs, node_to_path, Node, bfs # astar class Cell(str, Enum): EMPTY = " " BLOCKED = "X" START = "S" GOAL = "G" PATH = "*" class MazeLocation(NamedTuple): row: int column: int class Maze: def __init__( self, rows: int = 10, columns: int = 10, sparseness: float = 0.2, start: MazeLocation = MazeLocation(0, 0), goal: MazeLocation = MazeLocation(9, 9), ) -> None: self._rows: int = rows self._columns: int = columns self.start: MazeLocation = start self.goal: MazeLocation = goal # preenche a grade com células vazias self._grid: List[List[Cell]] = [ [Cell.EMPTY for c in range(columns)] for r in range(rows) ] # preenche a grade com células bloqueadas self._randomly_fill(rows, columns, sparseness) # preenche as posições inicial e final self._grid[start.row][start.column] = Cell.START self._grid[goal.row][goal.column] = Cell.GOAL def _randomly_fill(self, rows: int, columns: int, sparseness: float): for row in range(rows): for column in range(columns): if random.uniform(0, 1.0) < sparseness: self._grid[row][column] = Cell.BLOCKED def __str__(self) -> str: output: str = "------------\n" for row in self._grid: output += "|" + "".join([c.value for c in row]) + "|\n" output += "------------\n" return output def goal_test(self, ml: MazeLocation) -> bool: return ml == self.goal # flake8: noqa def successors(self, ml: MazeLocation) -> List[MazeLocation]: locations: List[MazeLocation] = [] if ( ml.row + 1 < self._rows and self._grid[ml.row + 1][ml.column] != Cell.BLOCKED ): locations.append(MazeLocation(ml.row + 1, ml.column)) if ml.row - 1 >= 0 and self._grid[ml.row - 1][ml.column] != Cell.BLOCKED: locations.append(MazeLocation(ml.row - 1, ml.column)) if ( ml.column + 1 < self._columns and self._grid[ml.row][ml.column + 1] != Cell.BLOCKED ): locations.append(MazeLocation(ml.row, ml.column + 1)) if ml.column - 1 >= 0 and self._grid[ml.row][ml.column - 1] != Cell.BLOCKED: locations.append(MazeLocation(ml.row, ml.column - 1)) return locations def mark(self, path: List[MazeLocation]): for maze_location in path: self._grid[maze_location.row][maze_location.column] = Cell.PATH self._grid[self.start.row][self.start.column] = Cell.START self._grid[self.goal.row][self.goal.column] = Cell.GOAL def clear(self, path: List[MazeLocation]): for maze_location in path: self._grid[maze_location.row][maze_location.column] = Cell.EMPTY self._grid[self.start.row][self.start.column] = Cell.START self._grid[self.goal.row][self.goal.column] = Cell.GOAL if __name__ == "__main__": # DFS - Pesquisa em profundidade maze: Maze = Maze() print(maze) solution_one: Optional[Node[MazeLocation]] = dfs( maze.start, maze.goal_test, maze.successors ) if solution_one is None: print("No solution found using depth-first search!") else: path_one: List[MazeLocation] = node_to_path(solution_one) maze.mark(path_one) print(maze) maze.clear(path_one) solution_two: Optional[Node[MazeLocation]] = bfs( maze.start, maze.goal_test, maze.successors ) if solution_two is None: print("No solution found using breadth-first search!") else: path_two: List[MazeLocation] = node_to_path(solution_two) maze.mark(path_two) print(maze) maze.clear(path_two)
py
1a46bf8d77e6b6f20ebd04e69c679552cb0db33e
""" "vendors" notary into docker and runs integration tests - then builds the docker client binary with an API version compatible with the existing daemon Usage: python docker-integration-test.py This assumes that your docker directory is in $GOPATH/src/github.com/docker/docker and your notary directory, irrespective of where this script is located, is at $GOPATH/src/github.com/docker/notary. """ from __future__ import print_function import os import re import shutil import subprocess import sys def from_gopath(gopkg): """ Gets the location of the go source given go package, based on the $GOPATH. """ gopaths = os.getenv("GOPATH") for path in gopaths.split(":"): maybe_path = os.path.abspath(os.path.expanduser(os.path.join( path, "src", *gopkg.split("/")))) if os.path.isdir(maybe_path): return maybe_path return "" DOCKER_DIR = from_gopath("github.com/docker/docker") NOTARY_DIR = from_gopath("github.com/docker/notary") def fake_vendor(): """ "vendors" notary into docker by copying all of notary into the docker vendor directory - also appending several lines into the Dockerfile because it pulls down notary from github and builds the binaries """ docker_notary_relpath = "vendor/src/github.com/docker/notary" docker_notary_abspath = os.path.join(DOCKER_DIR, docker_notary_relpath) print("copying notary ({0}) into {1}".format(NOTARY_DIR, docker_notary_abspath)) def ignore_dirs(walked_dir, _): """ Don't vendor everything, particularly not the docker directory recursively, if it happened to be in the notary directory """ if walked_dir == NOTARY_DIR: return [".git", ".cover", "docs", "bin"] elif walked_dir == os.path.join(NOTARY_DIR, "fixtures"): return ["compatibility"] return [] if os.path.exists(docker_notary_abspath): shutil.rmtree(docker_notary_abspath) shutil.copytree( NOTARY_DIR, docker_notary_abspath, symlinks=True, ignore=ignore_dirs) # hack this because docker/docker's Dockerfile checks out a particular version of notary # based on a tag or SHA, and we want to build based on what was vendored in dockerfile_addition = ("\n" "RUN set -x && " "export GO15VENDOREXPERIMENT=1 && " "go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server &&" "go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary") with open(os.path.join(DOCKER_DIR, "Dockerfile")) as dockerfile: text = dockerfile.read() if not text.endswith(dockerfile_addition): with open(os.path.join(DOCKER_DIR, "Dockerfile"), 'a+') as dockerfile: dockerfile.write(dockerfile_addition) # hack the makefile so that we tag the built image as something else so we # don't interfere with any other docker test builds with open(os.path.join(DOCKER_DIR, "Makefile"), 'r') as makefile: makefiletext = makefile.read() with open(os.path.join(DOCKER_DIR, "Makefile"), 'wb') as makefile: image_name = os.getenv("DOCKER_TEST_IMAGE_NAME", "notary-docker-vendor-test") text = re.sub("^DOCKER_IMAGE := .+$", "DOCKER_IMAGE := {0}".format(image_name), makefiletext, 1, flags=re.M) makefile.write(text) def run_integration_test(): """ Presumes that the fake vendoring has already happened - this runs the integration tests. """ env = os.environ.copy() env["TESTFLAGS"] = '-check.f DockerTrustSuite*' subprocess.check_call( "make test-integration-cli".split(), cwd=DOCKER_DIR, env=env) if __name__ == "__main__": if len(sys.argv) > 1: print("\nWarning: Ignoring all extra arguments: {0}".format(" ".join(sys.argv[1:]))) print("\nUsage: python {0}\n\n".format(sys.argv[0])) if DOCKER_DIR == "": print("ERROR: Could not find github.com/docker/docker in your GOPATH='{0}'" .format(os.getenv("GOPATH"))) sys.exit(1) if NOTARY_DIR == "": print("ERROR: Could not find github.com/docker/notary in your GOPATH='{0}'" .format(os.getenv("GOPATH"))) sys.exit(1) fake_vendor() run_integration_test()
py
1a46c0777cf5d7050288e3023ed40b50704074aa
# -*- coding: utf-8 -*- # Generated by Django 1.11.14 on 2018-10-20 08:02 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('petition', '0010_auto_20181007_1620'), ] operations = [ migrations.AddField( model_name='pytitionuser', name='invitations', field=models.ManyToManyField(blank=True, related_name='invited', to='petition.Organization'), ), ]
py
1a46c08f79e04520d87cbdbf17600310f35005c2
args_count = {'documentclass':(1, 1), 'usepackage':(1, 1), 'title':(1, 0), 'author':(1, 0), 'date':(1, 0), 'color':(1, 0), 'input':(1, 0), 'part':(1, 0), 'chapter':(1, 1), 'section':(1, 1), 'subsection':(1, 1), 'subsubsection':(1, 1), 'paragraph':(1, 1), 'subparagraph':(1, 1), 'chapter*':(1, 1), 'section*':(1, 1), 'subsection*':(1, 1), 'subsubsection*':(1, 1), 'paragraph*':(1, 1), 'subparagraph*':(1, 1), 'setcounter':(2, 0), 'addcontentsline':(3, 0), 'linespread':(1, 0), 'mbox':(1, 0), 'hyphenation':(1, 0), 'todo':(1, 0), 'begin':(1, 0), 'end':(1, 0), 'emph':(1, 0), 'renewcommand':(2, 0), 'left':(1, 0), 'right':(1, 0), 'verb':(1, 0), 'frac':(2, 0), 'textsubscript':(1, 0), 'textsuperscript':(1, 0), 'hspace':(1, 0), 'vspace':(1, 0), 'setlength':(2, 0), 'textcolor':(2, 0), 'pagecolor':(1, 0), 'colorbox':(2, 0), 'fcolorbox':(3, 0), 'textit':(1, 0), 'textbf':(1, 0), 'textrm':(1, 0), 'textsf':(1, 0), 'texttt':(1, 0), 'textup':(1, 0), 'textsl':(1, 0), 'textsc':(1, 0), 'textmd':(1, 0), 'textlf':(1, 0), 'upperaces':(1, 0), 'underline':(1, 0), 'textnormal':(1, 0), 'emph':(1, 0), 'fontsize':(2, 0), 'setmainfont':(1, 1), 'setsansfont':(1, 1), 'addfontfeatures':(1, 0), 'item': (0,1), 'inputencoding': (1,0), 'url':(1,0), 'hat':(1, 0), 'widehat':(1, 0), 'check':(1, 0), 'tilde':(1, 0), 'widetilde':(1, 0), 'acute':(1, 0), 'grave':(1, 0), 'dot':(1, 0), 'ddot':(1, 0), 'breve':(1, 0), 'bar':(1, 0), 'vec':(1, 0), 'resizebox':(3,0), 'scalebox':(2,0), 'cline':(1,0), 'cellcolor':(1,1), 'caption':(1,0), 'includegraphics':(1,1), 'sqrt':(1,1), 'mathrm':(1,0), 'substack':(1,0), } env_opts = {'figure': (0, 1), 'array': (1, 1), 'minipage': (2, 0), 'tabular': (1, 1), 'spacing':(1, 0), 'turn':(1, 0), 'rotate':(1, 0), 'table':(0, 1), 'wrapfigure':(2, 0), 'subfigure':(1, 1),} shortcuts = [('\\-', '\u00AD'), ('---', '\u2014'), ('~', '\u00A0'), ('"=', '\u2010')] verb_tags = ['verb'] verb_envs = ['verbatim', 'alltt', 'lstlisting', 'listing']
py
1a46c0e2df153949b3975658bdb919584fa3e8b1
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class BgpPeerStatus(Model): """BGP peer status details. Variables are only populated by the server, and will be ignored when sending a request. :ivar local_address: The virtual network gateway's local address :vartype local_address: str :ivar neighbor: The remote BGP peer :vartype neighbor: str :ivar asn: The autonomous system number of the remote BGP peer :vartype asn: int :ivar state: The BGP peer state. Possible values include: 'Unknown', 'Stopped', 'Idle', 'Connecting', 'Connected' :vartype state: str or ~azure.mgmt.network.v2018_01_01.models.BgpPeerState :ivar connected_duration: For how long the peering has been up :vartype connected_duration: str :ivar routes_received: The number of routes learned from this peer :vartype routes_received: long :ivar messages_sent: The number of BGP messages sent :vartype messages_sent: long :ivar messages_received: The number of BGP messages received :vartype messages_received: long """ _validation = { 'local_address': {'readonly': True}, 'neighbor': {'readonly': True}, 'asn': {'readonly': True}, 'state': {'readonly': True}, 'connected_duration': {'readonly': True}, 'routes_received': {'readonly': True}, 'messages_sent': {'readonly': True}, 'messages_received': {'readonly': True}, } _attribute_map = { 'local_address': {'key': 'localAddress', 'type': 'str'}, 'neighbor': {'key': 'neighbor', 'type': 'str'}, 'asn': {'key': 'asn', 'type': 'int'}, 'state': {'key': 'state', 'type': 'str'}, 'connected_duration': {'key': 'connectedDuration', 'type': 'str'}, 'routes_received': {'key': 'routesReceived', 'type': 'long'}, 'messages_sent': {'key': 'messagesSent', 'type': 'long'}, 'messages_received': {'key': 'messagesReceived', 'type': 'long'}, } def __init__(self, **kwargs) -> None: super(BgpPeerStatus, self).__init__(**kwargs) self.local_address = None self.neighbor = None self.asn = None self.state = None self.connected_duration = None self.routes_received = None self.messages_sent = None self.messages_received = None
py
1a46c13b224846cc05b834e6c1d3b071bb0044d1
# wiki/tests.py from django.test import TestCase from django.contrib.auth.models import User from django.urls import reverse from wiki.models import Page class PageListViewTests(TestCase): def test_multiple_pages(self): # Make some test data to be displayed on the page. user = User.objects.create() Page.objects.create(title="My Test Page", content="test", author=user) Page.objects.create(title="Another Test Page", content="test", author=user) # Issue a GET request to the MakeWiki homepage. # When we make a request, we get a response back. response = self.client.get('/') # Check that the response is 200 OK. self.assertEqual(response.status_code, 200) # Check that the number of pages passed to the template # matches the number of pages we have in the database. responses = response.context['pages'] self.assertEqual(len(responses), 2) self.assertQuerysetEqual( responses, ['<Page: My Test Page>', '<Page: Another Test Page>'], ordered=False ) class PageDetailViewTests(TestCase): def test_single_page(self): # Create a test page user = User.objects.create() page = Page(title="Test Page", content="Test", author=user) page.save() response = self.client.get(reverse('wiki-details-page', args=[page.slug])) self.assertEqual(response.status_code, 200) def test_new_page_form(self): response = self.client.get(reverse('wiki-new-page')) self.assertIn(b'Title of your page', response.content) def test_create_page(self): user = User.objects.create() args = {'title': "Test Page", 'content': 'TEST', 'author': user.id} response = self.client.post(reverse('wiki-new-page'), args) self.assertEqual(response.status_code, 302) response = self.client.get('/') responses = response.context['pages'] self.assertQuerysetEqual(responses, ['<Page: Test Page>']) class WikiTestCase(TestCase): def test_true_is_true(self): """ Tests if True is equal to True. Should always pass. """ self.assertEqual(True, True) def test_page_slugify_on_save(self): """ Tests the slug generated when saving a Page. """ # Author is a required field in our model. # Create a user for this test and save it to the test database. user = User() user.save() # Create and save a new page to the test database. page = Page(title="My Test Page", content="test", author=user) page.save() # Make sure the slug that was generated in Page.save() # matches what we think it should be. self.assertEqual(page.slug, "my-test-page")
py
1a46c2a0df196a5dcedb62e47a698bcfce348f67
#!/usr/bin/env python3 from contextlib import ExitStack from time import sleep from urllib.request import urlopen import argparse import json import random import sys import yaml from plumbum import local from dyno_cluster import DynoCluster, DynoSpec from func_test import comparison_test from utils import generate_ips, setup_temp_dir, sleep_with_animation from redis_node import RedisNode REDIS_PORT = 1212 STATS_PORT = 22222 SETTLE_TIME = 3 def main(): parser = argparse.ArgumentParser( description='Autogenerates a Dynomite cluster and runs functional ' + 'tests against it') parser.add_argument('dynospec_file', default='test/dyno_spec_file.yaml', help='YAML file describing desired cluster', nargs='?') args = parser.parse_args() # Setup a temporary directory to store logs and configs for this cluster. temp = setup_temp_dir() specs = parse_dynospec_file(args.dynospec_file) # Create a standalone Redis node. standalone_redis_ip = redis_ip(len(specs)) standalone_redis = RedisNode(standalone_redis_ip, REDIS_PORT) # Create a Dynomite cluster. dynomite_cluster = DynoCluster.fromDynomiteSpecs(specs, launch_nodes=False) with ExitStack() as stack: # Make sure to change the working directory to the temp dir before running the # tests. stack.enter_context(local.cwd(temp)) # Launch the standalone Redis node and the dynomite cluster. stack.enter_context(standalone_redis) stack.enter_context(dynomite_cluster) # Wait for a while for the above nodes to start. sleep_with_animation(SETTLE_TIME, "Waiting for cluster to start") # Run all the functional comparison tests. comparison_test(standalone_redis, dynomite_cluster, False) random_node = random.choice(dynomite_cluster.nodes) stats_url = 'http://{}:{}/info'.format(random_node.ip, STATS_PORT) json.loads(urlopen(stats_url).read().decode('ascii')) def redis_ip(dyno_node_count): assert dyno_node_count < 254 return "127.0.0.254" def parse_dynospec_file(filename): with open(filename, 'r') as f: specs = yaml.safe_load(f) return [DynoSpec(**dct) for dct in specs] if __name__ == '__main__': sys.exit(main())
py
1a46c2aa23c0f04b68ffc28967c88d417458a373
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import re import threading import time import sys import math import signal import configparser import audioop import subprocess as sp import argparse import os import os.path import pymumble_py3 as pymumble import pymumble_py3.constants import variables as var import logging import logging.handlers import traceback import struct from packaging import version import util import command import constants import media.playlist from constants import tr_cli as tr from database import SettingsDatabase, MusicDatabase, DatabaseMigration from media.item import ValidationFailedError, PreparationFailedError from media.cache import MusicCache class MumbleBot: version = 'git' def __init__(self, args): self.log = logging.getLogger("bot") self.log.info(f"bot: botamusique version {self.get_version()}, starting...") signal.signal(signal.SIGINT, self.ctrl_caught) self.cmd_handle = {} self.stereo = var.config.getboolean('bot', 'stereo', fallback=True) if args.channel: self.channel = args.channel else: self.channel = var.config.get("server", "channel", fallback=None) var.user = args.user var.is_proxified = var.config.getboolean( "webinterface", "is_web_proxified") # Flags to indicate the bot is exiting (Ctrl-C, or !kill) self.exit = False self.nb_exit = 0 # Related to ffmpeg thread self.thread = None self.thread_stderr = None self.read_pcm_size = 0 self.pcm_buffer_size = 0 self.last_ffmpeg_err = "" # Play/pause status self.is_pause = False self.pause_at_id = "" self.playhead = -1 # current position in a song. self.song_start_at = -1 self.wait_for_ready = False # flag for the loop are waiting for download to complete in the other thread # self.on_interrupting = False if args.host: host = args.host else: host = var.config.get("server", "host") if args.port: port = args.port else: port = var.config.getint("server", "port") if args.password: password = args.password else: password = var.config.get("server", "password") if args.channel: self.channel = args.channel else: self.channel = var.config.get("server", "channel") if args.certificate: certificate = args.certificate else: certificate = util.solve_filepath(var.config.get("server", "certificate")) if args.tokens: tokens = args.tokens else: tokens = var.config.get("server", "tokens") tokens = tokens.split(',') if args.user: self.username = args.user else: self.username = var.config.get("bot", "username") if args.bandwidth: self.bandwidth = args.bandwidth else: self.bandwidth = var.config.getint("bot", "bandwidth") self.mumble = pymumble.Mumble(host, user=self.username, port=port, password=password, tokens=tokens, stereo=self.stereo, debug=var.config.getboolean('debug', 'mumbleConnection'), certfile=certificate) self.mumble.callbacks.set_callback(pymumble.constants.PYMUMBLE_CLBK_TEXTMESSAGERECEIVED, self.message_received) self.mumble.set_codec_profile("audio") self.mumble.start() # start the mumble thread self.mumble.is_ready() # wait for the connection if self.mumble.connected >= pymumble.constants.PYMUMBLE_CONN_STATE_FAILED: exit() self.set_comment() self.mumble.users.myself.unmute() # by sure the user is not muted self.join_channel() self.mumble.set_bandwidth(self.bandwidth) # ====== Volume ====== self.volume_helper = util.VolumeHelper() _volume = var.config.getfloat('bot', 'volume', fallback=0.8) if var.db.has_option('bot', 'volume'): _volume = var.db.getfloat('bot', 'volume') self.volume_helper.set_volume(_volume) self.is_ducking = False self.on_ducking = False self.ducking_release = time.time() self.last_volume_cycle_time = time.time() self._ducking_volume = 0 _ducking_volume = var.config.getfloat("bot", "ducking_volume", fallback=0.50) _ducking_volume = var.db.getfloat("bot", "ducking_volume", fallback=_ducking_volume) self.volume_helper.set_ducking_volume(_ducking_volume) self.ducking_threshold = var.config.getfloat("bot", "ducking_threshold", fallback=5000) self.ducking_threshold = var.db.getfloat("bot", "ducking_threshold", fallback=self.ducking_threshold) if not var.db.has_option("bot", "ducking") and var.config.getboolean("bot", "ducking", fallback=False) \ or var.config.getboolean("bot", "ducking"): self.is_ducking = True self.mumble.callbacks.set_callback(pymumble.constants.PYMUMBLE_CLBK_SOUNDRECEIVED, self.ducking_sound_received) self.mumble.set_receive_sound(True) assert var.config.get("bot", "when_nobody_in_channel") in ['pause', 'pause_resume', 'stop', 'nothing', ''], \ "Unknown action for when_nobody_in_channel" if var.config.get("bot", "when_nobody_in_channel", fallback='') in ['pause', 'pause_resume', 'stop']: user_change_callback = \ lambda user, action: threading.Thread(target=self.users_changed, args=(user, action), daemon=True).start() self.mumble.callbacks.set_callback(pymumble.constants.PYMUMBLE_CLBK_USERREMOVED, user_change_callback) self.mumble.callbacks.set_callback(pymumble.constants.PYMUMBLE_CLBK_USERUPDATED, user_change_callback) # Debug use self._loop_status = 'Idle' self._display_rms = False self._max_rms = 0 self.redirect_ffmpeg_log = var.config.getboolean('debug', 'redirect_ffmpeg_log', fallback=True) if var.config.getboolean("bot", "auto_check_update"): def check_update(): nonlocal self new_version, changelog = util.check_update(self.get_version()) if new_version: self.send_channel_msg(tr('new_version_found', new_version=new_version, changelog=changelog)) th = threading.Thread(target=check_update, name="UpdateThread") th.daemon = True th.start() last_startup_version = var.db.get("bot", "version", fallback=None) if not last_startup_version or version.parse(last_startup_version) < version.parse(self.version): var.db.set("bot", "version", self.version) changelog = util.fetch_changelog() self.send_channel_msg(tr("update_successful", version=self.version, changelog=changelog)) # Set the CTRL+C shortcut def ctrl_caught(self, signal, frame): self.log.info( "\nSIGINT caught, quitting, {} more to kill".format(2 - self.nb_exit)) if var.config.getboolean('bot', 'save_playlist', fallback=True) \ and var.config.get("bot", "save_music_library", fallback=True): self.log.info("bot: save playlist into database") var.playlist.save() if self.nb_exit > 1: self.log.info("Forced Quit") sys.exit(0) self.nb_exit += 1 self.exit = True def get_version(self): if self.version != "git": return self.version else: return util.get_snapshot_version() def register_command(self, cmd, handle, no_partial_match=False, access_outside_channel=False, admin=False): cmds = cmd.split(",") for command in cmds: command = command.strip() if command: self.cmd_handle[command] = {'handle': handle, 'partial_match': not no_partial_match, 'access_outside_channel': access_outside_channel, 'admin': admin} self.log.debug("bot: command added: " + command) def set_comment(self): self.mumble.users.myself.comment(var.config.get('bot', 'comment')) def join_channel(self): if self.channel: if '/' in self.channel: self.mumble.channels.find_by_tree(self.channel.split('/')).move_in() else: self.mumble.channels.find_by_name(self.channel).move_in() # ======================= # Message # ======================= # All text send to the chat is analysed by this function def message_received(self, text): raw_message = text.message.strip() message = re.sub(r'<.*?>', '', raw_message) user = self.mumble.users[text.actor]['name'] if var.config.getboolean('commands', 'split_username_at_space'): # in can you use https://github.com/Natenom/mumblemoderator-module-collection/tree/master/os-suffixes , # you want to split the username user = user.split()[0] if message[0] in var.config.get('commands', 'command_symbol'): # remove the symbol from the message message = message[1:].split(' ', 1) # use the first word as a command, the others one as parameters if len(message) > 0: command = message[0].lower() parameter = '' if len(message) > 1: parameter = message[1].rstrip() else: return self.log.info('bot: received command ' + command + ' - ' + parameter + ' by ' + user) # Anti stupid guy function if not self.is_admin(user) and not var.config.getboolean('bot', 'allow_private_message') and text.session: self.mumble.users[text.actor].send_text_message( tr('pm_not_allowed')) return for i in var.db.items("user_ban"): if user.lower() == i[0]: self.mumble.users[text.actor].send_text_message( tr('user_ban')) return if not self.is_admin(user) and parameter: input_url = util.get_url_from_input(parameter) if input_url and var.db.has_option('url_ban', input_url): self.mumble.users[text.actor].send_text_message( tr('url_ban')) return command_exc = "" try: if command in self.cmd_handle: command_exc = command else: # try partial match cmds = self.cmd_handle.keys() matches = [] for cmd in cmds: if cmd.startswith(command) and self.cmd_handle[cmd]['partial_match']: matches.append(cmd) if len(matches) == 1: self.log.info("bot: {:s} matches {:s}".format(command, matches[0])) command_exc = matches[0] elif len(matches) > 1: self.mumble.users[text.actor].send_text_message( tr('which_command', commands="<br>".join(matches))) return else: self.mumble.users[text.actor].send_text_message( tr('bad_command', command=command)) return if self.cmd_handle[command_exc]['admin'] and not self.is_admin(user): self.mumble.users[text.actor].send_text_message(tr('not_admin')) return if not self.cmd_handle[command_exc]['access_outside_channel'] \ and not self.is_admin(user) \ and not var.config.getboolean('bot', 'allow_other_channel_message') \ and self.mumble.users[text.actor]['channel_id'] != self.mumble.users.myself['channel_id']: self.mumble.users[text.actor].send_text_message( tr('not_in_my_channel')) return self.cmd_handle[command_exc]['handle'](self, user, text, command_exc, parameter) except: error_traceback = traceback.format_exc() error = error_traceback.rstrip().split("\n")[-1] self.log.error(f"bot: command {command_exc} failed with error: {error_traceback}\n") self.send_msg(tr('error_executing_command', command=command_exc, error=error), text) def send_msg(self, msg, text): msg = msg.encode('utf-8', 'ignore').decode('utf-8') # text if the object message, contain information if direct message or channel message self.mumble.users[text.actor].send_text_message(msg) def send_channel_msg(self, msg): msg = msg.encode('utf-8', 'ignore').decode('utf-8') own_channel = self.mumble.channels[self.mumble.users.myself['channel_id']] own_channel.send_text_message(msg) @staticmethod def is_admin(user): list_admin = var.config.get('bot', 'admin').rstrip().split(';') if user in list_admin: return True else: return False # ======================= # Users changed # ======================= def users_changed(self, user, message): own_channel = self.mumble.channels[self.mumble.users.myself['channel_id']] # only check if there is one more user currently in the channel # else when the music is paused and somebody joins, music would start playing again if len(own_channel.get_users()) == 2: if var.config.get("bot", "when_nobody_in_channel") == "pause_resume": self.resume() elif var.config.get("bot", "when_nobody_in_channel") == "pause" and self.is_pause: self.send_channel_msg(tr("auto_paused")) elif len(own_channel.get_users()) == 1 and len(var.playlist) != 0: # if the bot is the only user left in the channel and the playlist isn't empty self.log.info('bot: Other users in the channel left. Stopping music now.') if var.config.get("bot", "when_nobody_in_channel") == "stop": self.clear() else: self.pause() # ======================= # Launch and Download # ======================= def launch_music(self, music_wrapper, start_from=0): assert music_wrapper.is_ready() uri = music_wrapper.uri() self.log.info("bot: play music " + music_wrapper.format_debug_string()) if var.config.getboolean('bot', 'announce_current_music'): self.send_channel_msg(music_wrapper.format_current_playing()) if var.config.getboolean('debug', 'ffmpeg'): ffmpeg_debug = "debug" else: ffmpeg_debug = "warning" channels = 2 if self.stereo else 1 self.pcm_buffer_size = 960 * channels command = ("ffmpeg", '-v', ffmpeg_debug, '-nostdin', '-i', uri, '-ss', f"{start_from:f}", '-ac', str(channels), '-f', 's16le', '-ar', '48000', '-') self.log.debug("bot: execute ffmpeg command: " + " ".join(command)) # The ffmpeg process is a thread # prepare pipe for catching stderr of ffmpeg if self.redirect_ffmpeg_log: pipe_rd, pipe_wd = util.pipe_no_wait() # Let the pipe work in non-blocking mode self.thread_stderr = os.fdopen(pipe_rd) else: pipe_rd, pipe_wd = None, None self.thread = sp.Popen(command, stdout=sp.PIPE, stderr=pipe_wd, bufsize=self.pcm_buffer_size) def async_download_next(self): # Function start if the next music isn't ready # Do nothing in case the next music is already downloaded self.log.debug("bot: Async download next asked ") while var.playlist.next_item(): # usually, all validation will be done when adding to the list. # however, for performance consideration, youtube playlist won't be validate when added. # the validation has to be done here. next = var.playlist.next_item() try: if not next.is_ready(): self.async_download(next) break except ValidationFailedError as e: self.send_channel_msg(e.msg) var.playlist.remove_by_id(next.id) var.cache.free_and_delete(next.id) def async_download(self, item): th = threading.Thread( target=self._download, name="Prepare-" + item.id[:7], args=(item,)) self.log.info(f"bot: start preparing item in thread: {item.format_debug_string()}") th.daemon = True th.start() return th def start_download(self, item): if not item.is_ready(): self.log.info("bot: current music isn't ready, start downloading.") self.async_download(item) self.send_channel_msg( tr('download_in_progress', item=item.format_title())) def _download(self, item): ver = item.version try: item.validate() if item.is_ready(): return True except ValidationFailedError as e: self.send_channel_msg(e.msg) var.playlist.remove_by_id(item.id) var.cache.free_and_delete(item.id) return False try: item.prepare() if item.version > ver: var.playlist.version += 1 return True except PreparationFailedError as e: self.send_channel_msg(e.msg) return False # ======================= # Loop # ======================= # Main loop of the Bot def loop(self): while not self.exit and self.mumble.is_alive(): while self.thread and self.mumble.sound_output.get_buffer_size() > 0.5 and not self.exit: # If the buffer isn't empty, I cannot send new music part, so I wait self._loop_status = f'Wait for buffer {self.mumble.sound_output.get_buffer_size():.3f}' time.sleep(0.01) raw_music = None if self.thread: # I get raw from ffmpeg thread # move playhead forward self._loop_status = 'Reading raw' if self.song_start_at == -1: self.song_start_at = time.time() - self.playhead self.playhead = time.time() - self.song_start_at raw_music = self.thread.stdout.read(self.pcm_buffer_size) self.read_pcm_size += len(raw_music) if self.redirect_ffmpeg_log: try: self.last_ffmpeg_err = self.thread_stderr.readline() if self.last_ffmpeg_err: self.log.debug("ffmpeg: " + self.last_ffmpeg_err.strip("\n")) except: pass if raw_music: # Adjust the volume and send it to mumble self.volume_cycle() if not self.on_interrupting and len(raw_music) == self.pcm_buffer_size: self.mumble.sound_output.add_sound( audioop.mul(raw_music, 2, self.volume_helper.real_volume)) elif self.read_pcm_size == 0: self.mumble.sound_output.add_sound( audioop.mul(self._fadeout(raw_music, self.stereo, fadein=True), 2, self.volume_helper.real_volume)) elif self.on_interrupting or len(raw_music) < self.pcm_buffer_size: self.mumble.sound_output.add_sound( audioop.mul(self._fadeout(raw_music, self.stereo, fadein=False), 2, self.volume_helper.real_volume)) self.thread.kill() self.thread = None time.sleep(0.1) self.on_interrupting = False else: time.sleep(0.1) else: time.sleep(0.1) if not self.is_pause and not raw_music: self.thread = None # bot is not paused, but ffmpeg thread has gone. # indicate that last song has finished, or the bot just resumed from pause, or something is wrong. if self.read_pcm_size < self.pcm_buffer_size \ and var.playlist.current_index != -1 \ and self.last_ffmpeg_err: current = var.playlist.current_item() self.log.error("bot: cannot play music %s", current.format_debug_string()) self.log.error("bot: with ffmpeg error: %s", self.last_ffmpeg_err) self.last_ffmpeg_err = "" self.send_channel_msg(tr('unable_play', item=current.format_title())) var.playlist.remove_by_id(current.id) var.cache.free_and_delete(current.id) # move to the next song. if not self.wait_for_ready: # if wait_for_ready flag is not true, move to the next song. if var.playlist.next(): current = var.playlist.current_item() self.log.debug(f"bot: next into the song: {current.format_debug_string()}") try: self.start_download(current) self.wait_for_ready = True self.song_start_at = -1 self.playhead = 0 except ValidationFailedError as e: self.send_channel_msg(e.msg) var.playlist.remove_by_id(current.id) var.cache.free_and_delete(current.id) else: self._loop_status = 'Empty queue' else: # if wait_for_ready flag is true, means the pointer is already # pointing to target song. start playing current = var.playlist.current_item() if current: if current.is_ready(): self.wait_for_ready = False self.read_pcm_size = 0 self.launch_music(current, self.playhead) self.last_volume_cycle_time = time.time() self.async_download_next() elif current.is_failed(): var.playlist.remove_by_id(current.id) self.wait_for_ready = False else: self._loop_status = 'Wait for the next item to be ready' else: self.wait_for_ready = False while self.mumble.sound_output.get_buffer_size() > 0 and self.mumble.is_alive(): # Empty the buffer before exit time.sleep(0.01) time.sleep(0.5) if self.exit: self._loop_status = "exited" if var.config.getboolean('bot', 'save_playlist', fallback=True) \ and var.config.get("bot", "save_music_library", fallback=True): self.log.info("bot: save playlist into database") var.playlist.save() def volume_cycle(self): delta = time.time() - self.last_volume_cycle_time if self.on_ducking and self.ducking_release < time.time(): self.on_ducking = False self._max_rms = 0 if delta > 0.001: if self.is_ducking and self.on_ducking: self.volume_helper.real_volume = \ (self.volume_helper.real_volume - self.volume_helper.ducking_volume_set) * math.exp(- delta / 0.2) \ + self.volume_helper.ducking_volume_set else: self.volume_helper.real_volume = self.volume_helper.volume_set - \ (self.volume_helper.volume_set - self.volume_helper.real_volume) * math.exp(- delta / 0.5) self.last_volume_cycle_time = time.time() def ducking_sound_received(self, user, sound): rms = audioop.rms(sound.pcm, 2) self._max_rms = max(rms, self._max_rms) if self._display_rms: if rms < self.ducking_threshold: print('%6d/%6d ' % (rms, self._max_rms) + '-' * int(rms / 200), end='\r') else: print('%6d/%6d ' % (rms, self._max_rms) + '-' * int(self.ducking_threshold / 200) + '+' * int((rms - self.ducking_threshold) / 200), end='\r') if rms > self.ducking_threshold: if self.on_ducking is False: self.log.debug("bot: ducking triggered") self.on_ducking = True self.ducking_release = time.time() + 1 # ducking release after 1s def _fadeout(self, _pcm_data, stereo=False, fadein=False): pcm_data = bytearray(_pcm_data) if stereo: if not fadein: mask = [math.exp(-x / 60) for x in range(0, int(len(pcm_data) / 4))] else: mask = [math.exp(-x / 60) for x in reversed(range(0, int(len(pcm_data) / 4)))] for i in range(int(len(pcm_data) / 4)): pcm_data[4 * i:4 * i + 2] = struct.pack("<h", round(struct.unpack("<h", pcm_data[4 * i:4 * i + 2])[0] * mask[i])) pcm_data[4 * i + 2:4 * i + 4] = struct.pack("<h", round( struct.unpack("<h", pcm_data[4 * i + 2:4 * i + 4])[0] * mask[i])) else: mask = [math.exp(-x / 60) for x in range(0, int(len(pcm_data) / 2))] for i in range(int(len(pcm_data) / 2)): pcm_data[2 * i:2 * i + 2] = struct.pack("<h", round(struct.unpack("<h", pcm_data[2 * i:2 * i + 2])[0] * mask[i])) return bytes(pcm_data) + bytes(len(pcm_data)) # ======================= # Play Control # ======================= def play(self, index=-1, start_at=0): if not self.is_pause: self.interrupt() if index != -1: var.playlist.point_to(index) current = var.playlist.current_item() self.start_download(current) self.is_pause = False self.wait_for_ready = True self.song_start_at = -1 self.playhead = start_at def clear(self): # Kill the ffmpeg thread and empty the playlist self.interrupt() var.playlist.clear() self.wait_for_ready = False self.log.info("bot: music stopped. playlist trashed.") def stop(self): self.interrupt() self.is_pause = True if len(var.playlist) > 0: self.wait_for_ready = True else: self.wait_for_ready = False self.log.info("bot: music stopped.") def interrupt(self): # Kill the ffmpeg thread if self.thread: self.on_interrupting = True time.sleep(0.1) self.song_start_at = -1 self.read_pcm_size = 0 def pause(self): # Kill the ffmpeg thread self.interrupt() self.is_pause = True self.song_start_at = -1 if len(var.playlist) > 0: self.pause_at_id = var.playlist.current_item().id self.log.info(f"bot: music paused at {self.playhead:.2f} seconds.") def resume(self): self.is_pause = False if var.playlist.current_index == -1: var.playlist.next() self.playhead = 0 return music_wrapper = var.playlist.current_item() if not music_wrapper or not music_wrapper.id == self.pause_at_id or not music_wrapper.is_ready(): self.playhead = 0 return self.wait_for_ready = True self.pause_at_id = "" def start_web_interface(addr, port): global formatter import interface # setup logger werkzeug_logger = logging.getLogger('werkzeug') logfile = util.solve_filepath(var.config.get('webinterface', 'web_logfile')) if logfile: handler = logging.handlers.RotatingFileHandler(logfile, mode='a', maxBytes=10240) # Rotate after 10KB else: handler = logging.StreamHandler() werkzeug_logger.addHandler(handler) interface.init_proxy() interface.web.env = 'development' interface.web.secret_key = var.config.get('webinterface', 'flask_secret') interface.web.run(port=port, host=addr) if __name__ == '__main__': supported_languages = util.get_supported_language() parser = argparse.ArgumentParser( description='Bot for playing music on Mumble') # General arguments parser.add_argument("--config", dest='config', type=str, default='configuration.ini', help='Load configuration from this file. Default: configuration.ini') parser.add_argument("--db", dest='db', type=str, default=None, help='Settings database file') parser.add_argument("--music-db", dest='music_db', type=str, default=None, help='Music library database file') parser.add_argument("--lang", dest='lang', type=str, default=None, help='Preferred language. Support ' + ", ".join(supported_languages)) parser.add_argument("-q", "--quiet", dest="quiet", action="store_true", help="Only Error logs") parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", help="Show debug log") # Mumble arguments parser.add_argument("-s", "--server", dest="host", type=str, help="Hostname of the Mumble server") parser.add_argument("-u", "--user", dest="user", type=str, help="Username for the bot") parser.add_argument("-P", "--password", dest="password", type=str, help="Server password, if required") parser.add_argument("-T", "--tokens", dest="tokens", type=str, help="Server tokens to enter a channel, if required (multiple entries separated with comma ','") parser.add_argument("-p", "--port", dest="port", type=int, help="Port for the Mumble server") parser.add_argument("-c", "--channel", dest="channel", type=str, help="Default channel for the bot") parser.add_argument("-C", "--cert", dest="certificate", type=str, default=None, help="Certificate file") parser.add_argument("-b", "--bandwidth", dest="bandwidth", type=int, help="Bandwidth used by the bot") args = parser.parse_args() # ====================== # Load Config # ====================== config = configparser.ConfigParser(interpolation=None, allow_no_value=True) var.config = config parsed_configs = config.read([util.solve_filepath('configuration.default.ini'), util.solve_filepath(args.config)], encoding='utf-8') if len(parsed_configs) == 0: logging.error('Could not read configuration from file \"{}\"'.format(args.config)) sys.exit() # ====================== # Setup Logger # ====================== bot_logger = logging.getLogger("bot") bot_logger.setLevel(logging.INFO) if args.verbose: bot_logger.setLevel(logging.DEBUG) bot_logger.debug("Starting in DEBUG loglevel") elif args.quiet: bot_logger.setLevel(logging.ERROR) bot_logger.error("Starting in ERROR loglevel") logfile = util.solve_filepath(var.config.get('bot', 'logfile').strip()) handler = None if logfile: print(f"Redirecting stdout and stderr to log file: {logfile}") handler = logging.handlers.RotatingFileHandler(logfile, mode='a', maxBytes=10240) # Rotate after 10KB sys.stdout = util.LoggerIOWrapper(bot_logger, logging.INFO, fallback_io_buffer=sys.stdout.buffer) sys.stderr = util.LoggerIOWrapper(bot_logger, logging.INFO, fallback_io_buffer=sys.stderr.buffer) else: handler = logging.StreamHandler() util.set_logging_formatter(handler, bot_logger.level) bot_logger.addHandler(handler) logging.getLogger("root").addHandler(handler) var.bot_logger = bot_logger # ====================== # Load Database # ====================== if args.user: username = args.user else: username = var.config.get("bot", "username") sanitized_username = "".join([x if x.isalnum() else "_" for x in username]) var.settings_db_path = args.db if args.db is not None else util.solve_filepath( config.get("bot", "database_path", fallback=f"settings-{sanitized_username}.db")) var.music_db_path = args.music_db if args.music_db is not None else util.solve_filepath( config.get("bot", "music_database_path", fallback="music.db")) var.db = SettingsDatabase(var.settings_db_path) if var.config.get("bot", "save_music_library", fallback=True): var.music_db = MusicDatabase(var.music_db_path) else: var.music_db = MusicDatabase(":memory:") DatabaseMigration(var.db, var.music_db).migrate() var.music_folder = util.solve_filepath(var.config.get('bot', 'music_folder')) if not var.music_folder.endswith(os.sep): # The file searching logic assumes that the music folder ends in a / var.music_folder = var.music_folder + os.sep var.tmp_folder = util.solve_filepath(var.config.get('bot', 'tmp_folder')) # ====================== # Translation # ====================== lang = "" if args.lang: lang = args.lang else: lang = var.config.get('bot', 'language', fallback='en_US') if lang not in supported_languages: raise KeyError(f"Unsupported language {lang}") var.language = lang constants.load_lang(lang) # ====================== # Prepare Cache # ====================== var.cache = MusicCache(var.music_db) if var.config.getboolean("bot", "refresh_cache_on_startup", fallback=True): var.cache.build_dir_cache() # ====================== # Load playback mode # ====================== playback_mode = None if var.db.has_option("playlist", "playback_mode"): playback_mode = var.db.get('playlist', 'playback_mode') else: playback_mode = var.config.get('bot', 'playback_mode', fallback="one-shot") if playback_mode in ["one-shot", "repeat", "random", "autoplay"]: var.playlist = media.playlist.get_playlist(playback_mode) else: raise KeyError(f"Unknown playback mode '{playback_mode}'") # ====================== # Create bot instance # ====================== var.bot = MumbleBot(args) command.register_all_commands(var.bot) # load playlist if var.config.getboolean('bot', 'save_playlist', fallback=True): var.bot_logger.info("bot: load playlist from previous session") var.playlist.load() # ============================ # Start the web interface # ============================ if var.config.getboolean("webinterface", "enabled"): wi_addr = var.config.get("webinterface", "listening_addr") wi_port = var.config.getint("webinterface", "listening_port") tt = threading.Thread( target=start_web_interface, name="WebThread", args=(wi_addr, wi_port)) tt.daemon = True bot_logger.info('Starting web interface on {}:{}'.format(wi_addr, wi_port)) tt.start() # Start the main loop. var.bot.loop()
py
1a46c3411668a8c7e851859c926918e3313b9c92
# Copyright 2021 (David) Siu-Kei Muk. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function from afb.core.primitives.dict_lib import config from afb.core.primitives.dict_lib import direct FACTORIES = { "from-config": config.get_load_config, "direct": direct.get_direct, }
py
1a46c37ef269c2fde39ccc5cc2ef258ac07e8445
for _ in range(int(input())): n = int(input()) l = list(map(int,input().split())) m = max(l) ans = [] for i in l: if i!=m: ans = [i]+ans if l[0]==m: print(m,*ans) elif l[-1]==m: print(m,*ans) else: print(-1)
py
1a46c59f9c4d96c9da3dd06155ea0862319e8503
import os import importlib.util from setuptools import setup # Boilerplate to load commonalities spec = importlib.util.spec_from_file_location( "setup_common", os.path.join(os.path.dirname(__file__), "setup_common.py") ) common = importlib.util.module_from_spec(spec) spec.loader.exec_module(common) common.KWARGS["install_requires"] += [ "aiohttp>=3.5.4", "bandit>=1.6.2", "safety>=1.8.5", ] common.KWARGS["entry_points"] = { "console_scripts": ["shouldi = shouldi.cli:ShouldI.main"], "dffml.operation": [ "run_bandit = shouldi.python.bandit:run_bandit", "safety_check = shouldi.python.safety:safety_check", "pypi_package_json = shouldi.python.pypi:pypi_package_json", "pypi_package_contents = shouldi.python.pypi:pypi_package_contents", "cleanup_pypi_package = shouldi.python.pypi:cleanup_pypi_package", ], } # Hiding down hear away from operations tutorial common.KWARGS["install_requires"] += [ "PyYAML>=5.1.2", ] common.KWARGS["entry_points"].update( { "shouldi.project.bom.db": [ "yaml = shouldi.project.bom.db.yaml:YAMLDB", "pypi = shouldi.project.bom.db.pypi:PyPiDB", ] } ) setup(**common.KWARGS)
py
1a46c5f3b1c07f6dec983ed7aae6355e8b8a61a9
#!/usr/bin/env python3 # Copyright (c) 2019-2020 The YEP developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # -*- coding: utf-8 -*- from time import sleep from test_framework.test_framework import YepTestFramework from test_framework.util import set_node_times, assert_equal class YEP_RPCSporkTest(YepTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.extra_args = [[]] * self.num_nodes self.extra_args[0].append('-sporkkey=932HEevBSujW2ud7RfB1YF91AFygbBRQj3de3LyaCRqNzKKgWXi') def setup_chain(self): # Start with clean chain self._initialize_chain_clean() self.enable_mocktime() def log_title(self): title = "*** Starting %s ***" % self.__class__.__name__ underline = "-" * len(title) description = "Performs tests on the Spork RPC" self.log.info("\n\n%s\n%s\n%s\n", title, underline, description) def run_test(self): self.log_title() set_node_times(self.nodes, self.mocktime) sporkName = "SPORK_8_MASTERNODE_PAYMENT_ENFORCEMENT" # 0 - check SPORK 8 status from node 1 (must be inactive) assert_equal(False, self.is_spork_active(1, sporkName)) # 1 - activate SPORK 8 with nodes[0] assert_equal("success", self.activate_spork(0, sporkName)) sleep(1) # check SPORK 8 status from nodes[1] (must be active) assert_equal(True, self.is_spork_active(1, sporkName)) # 2 - Adjust time to 1 sec in the future and deactivate SPORK 8 with node[0] self.mocktime += 1 set_node_times(self.nodes, self.mocktime) assert_equal("success", self.deactivate_spork(0, sporkName)) sleep(1) # check SPORK 8 value from nodes[1] (must be inactive again) assert_equal(False, self.is_spork_active(1, sporkName)) # 3 - Adjust time to 1 sec in the future and set new value (mocktime) for SPORK 8 with node[0] self.mocktime += 1 set_node_times(self.nodes, self.mocktime) assert_equal("success", self.set_spork(0, sporkName, self.mocktime)) sleep(1) # check SPORK 8 value from nodes[1] (must be equal to mocktime) assert_equal(self.mocktime, self.get_spork(1, sporkName)) # 4 - Stop nodes and check value again after restart self.log.info("Stopping nodes...") self.stop_nodes() self.log.info("Restarting node 1...") self.start_node(1, []) assert_equal(self.mocktime, self.get_spork(1, sporkName)) self.log.info("%s: TEST PASSED" % self.__class__.__name__) if __name__ == '__main__': YEP_RPCSporkTest().main()
py
1a46c60f9e6274175e5cfef94830c58859c2c2de
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Copyright (c) 2010 - 2021, Fraunhofer-Gesellschaft zur Foerderung der angewandten Forschung e.V. # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # We kindly request you to use one or more of the following phrases to refer to # foxBMS in your hardware, software, documentation or advertising materials: # # - "This product uses parts of foxBMS®" # - "This product includes parts of foxBMS®" # - "This product is derived from foxBMS®" # pylint: disable=invalid-name """Testing 'python-dateutil' package""" import logging import argparse # packages needed for tests import datetime # package to test import dateutil import dateutil.relativedelta def main(): """Testing 'python-dateutil' package""" parser = argparse.ArgumentParser() parser.add_argument( "-v", "--verbosity", dest="verbosity", action="count", default=0, help="set verbosity level", ) args = parser.parse_args() if args.verbosity == 1: logging.basicConfig(level=logging.INFO) elif args.verbosity > 1: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.ERROR) logging.debug( datetime.datetime.now() + dateutil.relativedelta.relativedelta(months=+1) ) if __name__ == "__main__": main()
py
1a46c6ea05b38c0d22252a01e18bf2c6590041bf
#!/usr/bin/env python3 """ Fonctions principales d'assistant de paris """ import copy import datetime import requests import socket import sys import traceback import urllib import urllib.error import urllib.request from itertools import combinations, permutations, product from multiprocessing.pool import ThreadPool from pprint import pprint import numpy as np import selenium import selenium.common import tabulate import sportsbetting as sb from sportsbetting import selenium_init from sportsbetting.database_functions import (get_id_from_competition_name, get_competition_by_id, import_teams_by_url, import_teams_by_sport, import_teams_by_competition_id_thesportsdb) from sportsbetting.parser_functions import parse from sportsbetting.auxiliary_functions import (valid_odds, format_team_names, merge_dict_odds, afficher_mises_combine, cotes_combine_all_sites, defined_bets, binomial, best_match_base, filter_dict_dates, get_nb_outcomes, best_combine_reduit, filter_dict_minimum_odd, cotes_combine_reduit_all_sites, copy_to_clipboard) from sportsbetting.basic_functions import (gain2, mises2, gain, mises, mises_freebet, cotes_freebet, gain_pari_rembourse_si_perdant, gain_freebet2, mises_freebet2, mises_pari_rembourse_si_perdant, gain_promo_gain_cote, mises_promo_gain_cote, gain_gains_nets_boostes, mises_gains_nets_boostes, gain3, mises3, cotes_combine_optimise, gain_defi_rembourse_ou_gagnant, mises_defi_rembourse_ou_gagnant) from sportsbetting.lambda_functions import get_best_odds, get_profit def parse_competition(competition, sport, *sites): """ Retourne les cotes d'une competition donnée pour un ou plusieurs sites de paris. Si aucun site n'est choisi, le parsing se fait sur l'ensemble des bookmakers reconnus par l'ARJEL """ if sb.ABORT: raise sb.AbortException try: _id = get_id_from_competition_name(competition, sport) except TypeError: print("Competition inconnue") return print(competition, *sites) if not sites: sites = sb.BOOKMAKERS res_parsing = {} for site in sites: if len(sites) > 1: print(site) database_site = site if site not in ["barrierebet", "vbet"] else "pasinobet" url = get_competition_by_id(_id, database_site) try: if url: res_parsing[site] = parse(site, url) else: print("Pas d'url en base pour {} sur {}".format(competition, site)) except urllib.error.URLError: print("{} non accessible sur {} (délai écoulé)".format(competition, site)) except KeyboardInterrupt: res_parsing[site] = {} except selenium.common.exceptions.TimeoutException: print("Element non trouvé par selenium ({} sur {})".format(competition, site)) except sb.UnavailableCompetitionException: print("{} non disponible sur {}".format(competition, site)) except socket.timeout: print("{} non accessible sur {} (timeout socket)".format(competition, site)) except selenium.common.exceptions.StaleElementReferenceException: print("StaleElement non trouvé par selenium ({} sur {})".format(competition, site)) except selenium.common.exceptions.WebDriverException: print("Connection closed ({} sur {})".format(competition, site)) except requests.exceptions.SSLError: print("Max retries ({} sur {})".format(competition, site)) res = format_team_names(res_parsing, sport, competition) out = valid_odds(merge_dict_odds(res), sport) return out def parse_competitions_site(competitions, sport, site): list_odds = [] if len(competitions) > 40 and site == "winamax": # to avoid being blocked by winamax competitions = competitions[:40] sb.SITE_PROGRESS[site] = 0 try: for competition in competitions: list_odds.append(parse_competition(competition, sport, site)) sb.PROGRESS += 100 / (len(competitions) * sb.SUB_PROGRESS_LIMIT) sb.SITE_PROGRESS[site] += 100 / len(competitions) except sb.UnavailableSiteException: print("{} non accessible".format(site)) sb.SITE_PROGRESS[site] = 100 except sb.AbortException: print("Interruption", site) return merge_dict_odds(list_odds) def parse_competitions(competitions, sport, *sites): sites_order = ['betfair', 'joa', 'betway', 'pmu', 'barrierebet', 'pasinobet', 'vbet', 'france_pari', 'netbet', 'zebet', 'winamax', 'pinnacle', 'betclic', 'pokerstars', 'unibet', 'unibet_boost', 'bwin', 'parionssport'] if not sites: sites = sites_order sb.EXPECTED_TIME = 28 + len(competitions) * 12.5 sites = [site for site in sites_order if site in sites] sb.PROGRESS = 0 selenium_sites = sb.SELENIUM_SITES.intersection(sites) for site in selenium_sites: selenium_init.start_driver(site) sb.PROGRESS += 100/len(selenium_sites) sb.PROGRESS = 0 sb.SUB_PROGRESS_LIMIT = len(sites) if sb.DB_MANAGEMENT: for competition in competitions: if competition == sport or "Tout le" in competition: import_teams_by_sport(sport) else: id_competition = get_id_from_competition_name(competition, sport) if id_competition < 0: import_teams_by_competition_id_thesportsdb(id_competition) else: import_teams_by_url("http://www.comparateur-de-cotes.fr/comparateur/" + sport + "/a-ed" + str(id_competition)) list_odds = [] try: sb.IS_PARSING = True list_odds = ThreadPool(3).map(lambda x: parse_competitions_site(competitions, sport, x), sites) sb.ODDS[sport] = merge_dict_odds(list_odds) except Exception: print(traceback.format_exc(), file=sys.stderr) sb.IS_PARSING = False sb.ABORT = False sb.SEEN_SUREBET[sport] = False print("Dernière récupération des cotes à", datetime.datetime.today().strftime("%H:%M")) def odds_match(match, sport="football"): """ Retourne les cotes d'un match donné sur tous les sites de l'ARJEL """ odds_match = sb.ODDS[sport].get(match) if odds_match: return match, copy.deepcopy(odds_match) return None, None def best_stakes_match(match, site, bet, minimum_odd, sport="football"): """ Pour un match, un bookmaker, une somme à miser sur ce bookmaker et une cote minimale donnés, retourne la meilleure combinaison de paris à placer """ best_match, all_odds = odds_match(match, sport) if not all_odds: print("No match found") return print(best_match) pprint(all_odds) odds_site = all_odds['odds'][site] best_odds = copy.deepcopy(odds_site) best_profit = -float("inf") n = len(all_odds['odds'][site]) best_sites = [site for _ in range(n)] best_i = 0 best_overall_odds = None bets = None sites = None for odds in all_odds['odds'].items(): if odds[0] == "unibet_boost": continue for i in range(n): if odds[1][i] > best_odds[i] and (odds[1][i] >= 1.1 or odds[0] == "pmu"): best_odds[i] = odds[1][i] best_sites[i] = odds[0] for i in range(n): if odds_site[i] >= minimum_odd: odds_to_check = (best_odds[:i] + [odds_site[i]] + best_odds[i + 1:]) profit = gain2(odds_to_check, i, bet) if profit > best_profit: best_profit = profit best_overall_odds = odds_to_check sites = best_sites[:i] + [site] + best_sites[i + 1:] bets = mises2(odds_to_check, bet, i) best_i = i if best_overall_odds: mises2(best_overall_odds, bet, best_i, True) afficher_mises_combine(best_match.split(" / "), [sites], [bets], all_odds["odds"], sport, profit=best_profit) else: print("No match found") def best_match_under_conditions(site, minimum_odd, bet, sport="football", date_max=None, time_max=None, date_min=None, time_min=None, one_site=False): """ Retourne le meilleur match sur lequel miser lorsqu'on doit miser une somme donnée à une cote donnée. Cette somme peut-être sur seulement une issue (one_site=False) ou bien répartie sur plusieurs issues d'un même match (one_site=True), auquel cas, chacune des cotes du match doivent respecter le critère de cote minimale. """ odds_function = get_best_odds(one_site) profit_function = get_profit(bet, one_site) criteria = lambda odds_to_check, i: ((not one_site and odds_to_check[i] >= minimum_odd) or (one_site and all(odd >= minimum_odd for odd in odds_to_check))) display_function = lambda best_overall_odds, best_rank: (mises2(best_overall_odds, bet, best_rank, True) if not one_site else mises(best_overall_odds, bet, True)) result_function = lambda best_overall_odds, best_rank: (mises2(best_overall_odds, bet, best_rank, False) if not one_site else mises(best_overall_odds, bet, False)) best_match_base(odds_function, profit_function, criteria, display_function, result_function, site, sport, date_max, time_max, date_min, time_min, one_site=one_site) def best_match_under_conditions2(site, minimum_odd, stake, sport="football", date_max=None, time_max=None, date_min=None, time_min=None, miles=False, rate_eur_miles=0, multiplicator=1): all_odds = filter_dict_dates(sb.ODDS[sport], date_max, time_max, date_min, time_min) best_profit = -float("inf") best_match = None sites = None nb_matches = len(all_odds) n = get_nb_outcomes(sport) for match in all_odds: sb.PROGRESS += 100 / nb_matches if site in all_odds[match]['odds']: odds_site = all_odds[match]['odds'][site] best_odds = copy.deepcopy(odds_site) best_sites = [site for _ in range(n)] for odds in all_odds[match]['odds'].items(): if odds[0] == "unibet_boost": continue for i in range(n): if odds[1][i] > best_odds[i] and (odds[1][i] >= 1.1 or odds[0] == "pmu"): best_odds[i] = odds[1][i] best_sites[i] = odds[0] for odd_i, site_i in zip(best_odds, best_sites): if odd_i < 1.1 and site_i != "pmu": break else: profit = gain3(odds_site, best_odds, stake, minimum_odd, miles, rate_eur_miles, multiplicator) if profit > best_profit: best_profit = profit best_odds_site = copy.deepcopy(odds_site) best_best_odds = copy.deepcopy(best_odds) best_match = match stakes, best_indices = mises3(odds_site, best_odds, stake, minimum_odd, False, miles, rate_eur_miles, multiplicator) sites = [site if i in best_indices else best_sites[i] for i in range(n)] if best_match: print(best_match) pprint(all_odds[best_match]) mises3(best_odds_site, best_best_odds, stake, minimum_odd, True, miles, rate_eur_miles, multiplicator) afficher_mises_combine([best_match], [sites], [stakes], all_odds[best_match]["odds"], sport, profit=best_profit) else: print("No match found") def best_match_pari_gagnant(site, minimum_odd, bet, sport="football", date_max=None, time_max=None, date_min=None, time_min=None, nb_matches_combine=1): """ Retourne le meilleur match sur lequel miser lorsqu'on doit gagner un pari à une cote donnée sur un site donné. """ stakes = [] n = get_nb_outcomes(sport) for _ in range(n**nb_matches_combine): stakes.append([bet, site, minimum_odd]) best_match_stakes_to_bet(stakes, nb_matches_combine, sport, date_max, time_max, True) def best_match_freebet(site, freebet, sport="football", live=False, date_max=None, time_max=None, date_min=None, time_min=None): """ Retourne le match qui génère le meilleur gain pour un unique freebet placé, couvert avec de l'argent réel. """ fact_live = 1 - 0.2 * live odds_function = lambda best_odds, odds_site, i: (best_odds[:i] + [odds_site[i] * fact_live - 1] + best_odds[i + 1:]) profit_function = lambda odds_to_check, i: gain2(odds_to_check, i) + 1 criteria = lambda odds_to_check, i: True display_function = lambda x, i: mises_freebet(x[:i] + [x[i] + 1] + x[i + 1:], freebet, i, True) result_function = lambda x, i: mises_freebet(x[:i] + [x[i] + 1] + x[i + 1:], freebet, i, False) best_match_base(odds_function, profit_function, criteria, display_function, result_function, site, sport, date_max, time_max, date_min, time_min, freebet=True) def best_match_freebet2(site, freebet, sport="football", live=False, date_max=None, time_max=None, date_min=None, time_min=None): """ Retourne le match qui génère le meilleur gain pour un unique freebet placé, couvert avec de l'argent réel. """ fact_live = 1 - 0.2 * live odds_function = lambda best_odds, odds_site, i: (best_odds[:i] + [odds_site[i] * fact_live - 1] + best_odds[i + 1:]) profit_function = lambda x, i: gain_freebet2(x[:i] + [x[i] + 1] + x[i + 1:], freebet, i) criteria = lambda odds_to_check, i: True display_function = lambda x, i: mises_freebet2(x[:i] + [x[i] + 1] + x[i + 1:], freebet, i, True) result_function = lambda x, i: mises_freebet2(x[:i] + [x[i] + 1] + x[i + 1:], freebet, i, False) best_match_base(odds_function, profit_function, criteria, display_function, result_function, site, sport, date_max, time_max, date_min, time_min, freebet=True) def best_match_cashback(site, minimum_odd, bet, sport="football", freebet=True, combi_max=0, combi_odd=1, rate_cashback=1, date_max=None, time_max=None, date_min=None, time_min=None): """ Retourne le match qui génère le meilleur gain pour une promotion de type "Pari remboursé si perdant". Le bonus combi-max, la côte des sélections supposées sûres (dans le cadre d'une promotion sur combiné) ainsi que le bonus combi-max sont également paramétrables """ odds_function = lambda best_odds, odds_site, i: (best_odds[:i] + [combi_odd * odds_site[i] * (1 + combi_max) - combi_max] + best_odds[i + 1:]) profit_function = lambda odds_to_check, i: gain_pari_rembourse_si_perdant(odds_to_check, bet, i, freebet, rate_cashback) criteria = lambda odds_to_check, i: (odds_to_check[i] + combi_max) / (1 + combi_max) >= minimum_odd display_function = lambda x, i: mises_pari_rembourse_si_perdant(x, bet, i, freebet, rate_cashback, True) result_function = lambda x, i: mises_pari_rembourse_si_perdant(x, bet, i, freebet, rate_cashback, False) best_match_base(odds_function, profit_function, criteria, display_function, result_function, site, sport, date_max, time_max, date_min, time_min) def best_matches_combine(site, minimum_odd, bet, sport="football", nb_matches=2, one_site=False, date_max=None, time_max=None, date_min=None, time_min=None, minimum_odd_selection=1.01): """ Retourne les meilleurs matches sur lesquels miser lorsqu'on doit miser une somme donnée à une cote donnée sur un combiné """ all_odds = filter_dict_dates(sb.ODDS[sport], date_max, time_max, date_min, time_min) all_odds = filter_dict_minimum_odd(all_odds, minimum_odd_selection, site) sb.ALL_ODDS_COMBINE = {} nb_combine = binomial(len(all_odds), nb_matches) sb.PROGRESS = 0 def compute_all_odds_combine(nb_combine, combine): sb.PROGRESS += 100/nb_combine try: sb.ALL_ODDS_COMBINE[" / ".join([match[0] for match in combine])] = cotes_combine_all_sites( *[match[1] for match in combine] ) except KeyError: pass ThreadPool(4).map(lambda x: compute_all_odds_combine(nb_combine, x), combinations(all_odds.items(), nb_matches)) sb.PROGRESS = 0 odds_function = get_best_odds(one_site) profit_function = get_profit(bet, one_site) criteria = lambda odds_to_check, i: ((not one_site and odds_to_check[i] >= minimum_odd) or (one_site and all(odd >= minimum_odd for odd in odds_to_check))) display_function = lambda best_overall_odds, best_rank: (mises2(best_overall_odds, bet, best_rank, True) if not one_site else mises(best_overall_odds, bet, True)) result_function = lambda best_overall_odds, best_rank: (mises2(best_overall_odds, bet, best_rank, False) if not one_site else mises(best_overall_odds, bet, False)) best_match_base(odds_function, profit_function, criteria, display_function, result_function, site, sport, date_max, time_max, date_min, time_min, True, nb_matches, one_site=one_site, combine_opt=True) def best_matches_combine_cashback_une_selection_perdante(site, cote_minimale_selection, combi_max=0, nb_matches=2, date_max=None, time_max=None, date_min=None, time_min=None): """ Calcule la meilleure combinaison de matches et les mises à jouer pour une promotion du type "Combiné remboursé si une seule selection perdante, sans limite du nombre de paris remboursés" """ sport = "football" bet = 10000 all_odds = sb.ODDS[sport] sb.ALL_ODDS_COMBINE = {} for combine in combinations(all_odds.items(), nb_matches): try: if all([odd >= cote_minimale_selection for odds in list(all_odds[match[0]]["odds"][site] for match in combine) for odd in odds]): sb.ALL_ODDS_COMBINE[" / ".join([match[0] for match in combine])] = cotes_combine_all_sites( *[match[1] for match in combine] ) except KeyError: pass odds_function = lambda best_odds, odds_site, i: list( map(lambda x: x * (1 + combi_max) - combi_max, odds_site)) profit_function = lambda odds_to_check, i: gain(odds_to_check, bet) - bet criteria = lambda odds_to_check, i: (odds_to_check[i] + combi_max) / (1 + combi_max) >= 1.1 display_function = lambda x, i: mises(x, bet, True) return_function = lambda x, i: mises(x, bet, False) best_match_base(odds_function, profit_function, criteria, display_function, return_function, site, sport, date_max, time_max, date_min, time_min, True, nb_matches, one_site=True, recalcul=True) def best_matches_combine_cashback(site, minimum_odd, bet, sport="football", freebet=True, combi_max=0, rate_cashback=1, nb_matches=2, date_max=None, time_max=None, date_min=None, time_min=None): """ Calcule la répartition des mises lorsqu'un unique combiné est remboursé s'il est perdant """ all_odds = sb.ODDS[sport] sb.ALL_ODDS_COMBINE = {} for combine in combinations(all_odds.items(), nb_matches): sb.ALL_ODDS_COMBINE[" / ".join([match[0] for match in combine])] = cotes_combine_all_sites( *[match[1] for match in combine] ) odds_function = lambda best_odds, odds_site, i: (best_odds[:i] + [odds_site[i] * (1 + combi_max) - combi_max] + best_odds[i + 1:]) profit_function = lambda odds_to_check, i: gain_pari_rembourse_si_perdant(odds_to_check, bet, i, freebet, rate_cashback) criteria = lambda odds_to_check, i: (odds_to_check[i] + combi_max) / (1 + combi_max) >= minimum_odd display_function = lambda x, i: mises_pari_rembourse_si_perdant(x, bet, i, freebet, rate_cashback, True) return_function = lambda x, i: mises_pari_rembourse_si_perdant(x, bet, i, freebet, rate_cashback, False) best_match_base(odds_function, profit_function, criteria, display_function, return_function, site, sport, date_max, time_max, date_min, time_min, True, nb_matches, combine_opt=True, taux_cashback=rate_cashback, cashback_freebet=freebet) def best_match_stakes_to_bet(stakes, nb_matches=1, sport="football", date_max=None, time_max=None, identical_stakes=False): second_sites = {stake[1] for stake in stakes} main_sites = sb.BOOKMAKERS all_odds = filter_dict_dates(sb.ODDS[sport], date_max, time_max) best_profit = -sum(stake[0] for stake in stakes) n = get_nb_outcomes(sport) ** nb_matches nb_stakes = len(stakes) all_odds_combine = {} combis = list(combinations(all_odds.items(), nb_matches)) nb_combis = len(combis) best_combine = None best_bets = None main_site_odds = [] main_sites_distribution = [] sb.PROGRESS = 0 for i, combine in enumerate(combis): sb.PROGRESS += 100 / nb_combis match_combine = " / ".join([match[0] for match in combine]) all_odds_combine[match_combine] = cotes_combine_all_sites(*[match[1] for match in combine]) for main0 in main_sites: try: main_sites_distribution = [main0 for _ in range(n)] main_site_odds = copy.deepcopy(all_odds_combine[match_combine]["odds"][main0]) break except KeyError: pass for main in main_sites[:i] + main_sites[i + 1:]: try: potential_odds = all_odds_combine[match_combine]["odds"][main] for j, odd in enumerate(potential_odds): if odd > main_site_odds[j]: main_site_odds[j] = odd main_sites_distribution[j] = main except KeyError: pass second_odds = {second_site: all_odds_combine[match_combine]["odds"][second_site] for second_site in second_sites if second_site in all_odds_combine[match_combine]["odds"]} if not second_odds: continue dict_combine_odds = copy.deepcopy(second_odds) for perm in permutations(range(n), nb_stakes): valid_perm = True defined_second_sites = [[perm[j], stake[0], stake[1]] for j, stake in enumerate(stakes)] for j, stake in enumerate(stakes): if dict_combine_odds[defined_second_sites[j][2]][defined_second_sites[j][0]] < stake[2]: valid_perm = False break if not valid_perm: if identical_stakes: break continue defined_bets_temp = defined_bets(main_site_odds, dict_combine_odds, main_sites_distribution, defined_second_sites) profit = defined_bets_temp[0] - np.sum(defined_bets_temp[1]) if profit > best_profit: best_profit = profit best_combine = combine best_bets = defined_bets_temp if identical_stakes: break if best_combine: best_match_combine = " / ".join([match[0] for match in best_combine]) odds_best_match = copy.deepcopy(all_odds_combine[best_match_combine]) all_sites = main_sites + list(second_sites) for site in all_odds_combine[best_match_combine]["odds"]: if site not in all_sites: del odds_best_match["odds"][site] print(best_match_combine) pprint(odds_best_match, compact=1) print("Plus-value =", round(best_profit, 2)) print("Gain référence =", round(best_bets[0], 2)) print("Somme des mises =", round(np.sum(best_bets[1]), 2)) afficher_mises_combine([x[0] for x in best_combine], best_bets[2], best_bets[1], all_odds_combine[best_match_combine]["odds"], sport, profit=best_profit) else: print("No match found") def best_matches_freebet(main_sites, freebets, sport, *matches): """ Compute of the best way to bet freebets following the model [[bet, bookmaker], ...] :param main_sites: :type freebets: List[List[List[str] or str]] """ second_sites = {freebet[1] for freebet in freebets} if not second_sites: print("Veuillez sélectionner des freebets secondaires") return if matches: new_odds = {} for match in matches: match_name, odds = odds_match(match) new_odds[match_name] = odds else: new_odds = sb.ODDS[sport] all_odds = {} for match in new_odds: if (not (any([site not in new_odds[match]["odds"].keys() for site in main_sites]) or any([site not in new_odds[match]["odds"].keys() for site in second_sites]))): if new_odds[match]["odds"]: all_odds[match] = new_odds[match] best_rate = 0 nb_matches = 2 n = 3 ** nb_matches nb_freebets = len(freebets) all_odds_combine = {} combis = list(combinations(all_odds.items(), nb_matches)) best_combine = None real_odds = {} for combine in combis: match_combine = " / ".join([match[0] for match in combine]) all_odds_combine[match_combine] = cotes_combine_all_sites(*[match[1] for match in combine], freebet=True) real_odds[match_combine] = cotes_combine_all_sites(*[match[1] for match in combine]) main_sites_distribution = [main_sites[0] for _ in range(n)] main_site_odds = copy.deepcopy(all_odds_combine[match_combine]["odds"][main_sites[0]]) for main in main_sites[1:]: potential_odds = all_odds_combine[match_combine]["odds"][main] for j, odd in enumerate(potential_odds): if odd > main_site_odds[j]: main_site_odds[j] = odd main_sites_distribution[j] = main second_odds = {second_site: all_odds_combine[match_combine]["odds"][second_site] for second_site in second_sites} dict_combine_odds = copy.deepcopy(second_odds) for perm in permutations(range(n), nb_freebets): defined_second_sites = [[perm[i], freebet[0], freebet[1]] for i, freebet in enumerate(freebets)] defined_bets_temp = defined_bets(main_site_odds, dict_combine_odds, main_sites_distribution, defined_second_sites) if defined_bets_temp[0] / np.sum(defined_bets_temp[1]) > best_rate: best_rate = defined_bets_temp[0] / np.sum(defined_bets_temp[1]) best_combine = combine best_bets = defined_bets_temp if best_combine: best_match_combine = " / ".join([match[0] for match in best_combine]) odds_best_match = copy.deepcopy(all_odds_combine[best_match_combine]) all_sites = main_sites + list(second_sites) for site in all_odds_combine[best_match_combine]["odds"]: if site not in all_sites: del odds_best_match["odds"][site] print(best_match_combine) pprint(odds_best_match, compact=1) print("Taux =", best_rate) print("Gain référence =", best_bets[0]) print("Somme des mises =", np.sum(best_bets[1])) afficher_mises_combine([x[0] for x in best_combine], best_bets[2], best_bets[1], real_odds[best_match_combine]["odds"], "football", uniquement_freebet=True, profit=best_rate) def best_matches_freebet_one_site(site, freebet, sport="football", nb_matches=2, minimum_odd=1.1, date_max=None, time_max=None, date_min=None, time_min=None): """ Calcule la répartition des paris gratuits sur un unique site """ all_odds = sb.ODDS[sport] sb.ALL_ODDS_COMBINE = {} for combine in combinations(all_odds.items(), nb_matches): sb.ALL_ODDS_COMBINE[" / ".join([match[0] for match in combine])] = cotes_combine_all_sites( *[match[1] for match in combine] ) odds_function = lambda best_odds, odds_site, i: cotes_freebet(odds_site) profit_function = lambda odds_to_check, i: gain(odds_to_check, freebet) - freebet criteria = lambda odds_to_check, i: all(odd >= minimum_odd for odd in odds_to_check) display_function = lambda best_overall_odds, best_rank: mises(best_overall_odds, freebet, True, True) result_function = lambda best_overall_odds, best_rank: mises(best_overall_odds, freebet, False) best_match_base(odds_function, profit_function, criteria, display_function, result_function, site, sport, date_max, time_max, date_min, time_min, True, nb_matches, True, one_site=True) def best_match_gain_cote(site, bet, sport="football", date_max=None, time_max=None, date_min=None, time_min=None): """ Retourne le match sur lequel miser pour optimiser une promotion du type "gain de la cote gagnée" """ odds_function = get_best_odds(False) profit_function = lambda odds_to_check, i: gain_promo_gain_cote(odds_to_check, bet, i) criteria = lambda odds_to_check, i: True display_function = lambda best_overall_odds, best_rank: mises_promo_gain_cote(best_overall_odds, bet, best_rank, True) result_function = lambda best_overall_odds, best_rank: mises_promo_gain_cote(best_overall_odds, bet, best_rank, False) best_match_base(odds_function, profit_function, criteria, display_function, result_function, site, sport, date_max, time_max, date_min, time_min) def best_match_cotes_boostees(site, gain_max, sport="football", date_max=None, time_max=None, date_min=None, time_min=None): odds_function = get_best_odds(True) profit_function = lambda odds_to_check, i: gain_gains_nets_boostes(odds_to_check, gain_max, False) criteria = lambda odds_to_check, i: odds_to_check[i] >= 1.5 display_function = lambda odds_to_check, i: mises_gains_nets_boostes(odds_to_check, gain_max, False, True) result_function = lambda odds_to_check, i: mises_gains_nets_boostes(odds_to_check, gain_max, False, False) best_match_base(odds_function, profit_function, criteria, display_function, result_function, site, sport, date_max, time_max, date_min, time_min) def best_combine_booste(matches, combinaison_boostee, site_combinaison, mise, sport, cote_boostee): best_combine_reduit(matches, combinaison_boostee, site_combinaison, mise, sport, cote_boostee) def best_combine_booste_progressif(matches, combinaison_boostee, site_combinaison, mise, sport, cote_boostee): outcomes = [] odds = [] bookmakers = [] stakes = [] simulated_odds = [] outcome_boost = [] matches.sort(key=lambda x: sb.ODDS[sport][x]["date"], reverse=True) time_intervals = [sb.ODDS[sport][x]["date"] - sb.ODDS[sport][y]["date"] for x, y in zip(matches[:-1], matches[1:])] print("Répartition des mises (les totaux affichés prennent en compte les éventuels freebets):") if time_intervals and min(time_intervals) < datetime.timedelta(hours=2): print("Methode impossible (pas assez de temps entre 2 matches)") return reference_gain = round(mise * cote_boostee, 2) sum_stakes = 0 for j, match in enumerate(matches): sum_stakes_match = 0 teams = match.split(" - ") if get_nb_outcomes(sport) == 3: teams.insert(1, "Nul ({} - {})".format(*teams)) _, bookmakers_match, odds_match = trj_match(sb.ODDS[sport][match]) for i, team in enumerate(teams): if combinaison_boostee[j] == i: outcome_boost.append(team) continue outcomes.append(team) odds.append(odds_match[i]) bookmakers.append(bookmakers_match[i]) stake = round((reference_gain - sum_stakes) / odds_match[i], 2) stakes.append(stake) sum_stakes_match += stake simulated_odds.append(reference_gain / stake) sum_stakes += sum_stakes_match outcomes.append(" / ".join(outcome_boost)) odds.append(cote_boostee) bookmakers.append(site_combinaison) stakes.append(mise) simulated_odds.append(cote_boostee) totals = [round(stake * odd, 2) for (stake, odd) in zip(stakes, odds)] table = {"Issue": reversed(outcomes), "Bookmaker": reversed(bookmakers), "Cote": reversed(odds), "Mise": reversed(stakes), "Total": reversed(totals), "TRJ":[round(100*gain(simulated_odds), 3), "Bénéfice", round(reference_gain-sum(stakes), 2)]} text = tabulate.tabulate(table, headers='keys', tablefmt='fancy_grid') print(text) print("Ne couvrir un match qu'une fois le résultat du match précédent connu") if sys.platform.startswith("win"): copy_to_clipboard(text) def trj_match(match_odds): odds = [] bookmakers = [] for bookmaker in match_odds["odds"]: if bookmaker == "unibet_boost": continue tmp_odds = match_odds["odds"][bookmaker] tmp_bookmakers = [bookmaker for _ in tmp_odds] if not odds: odds = copy.deepcopy(tmp_odds) bookmakers = copy.deepcopy(tmp_bookmakers) continue for i, tmp_odd in enumerate(tmp_odds): if not odds[i]: odds[i] = 1.01 if not tmp_odd: continue try: if tmp_odd > odds[i]: odds[i] = tmp_odd bookmakers[i] = bookmaker except TypeError: print(match_odds, tmp_odd, odds[i]) if not odds or 1.01 in odds: return 0, bookmakers, odds return gain(odds), bookmakers, odds def get_values(match_odds, rate): odds = [] bookmakers = [] sums = [] for bookmaker in match_odds["odds"]: if bookmaker == "unibet_boost": continue tmp_odds = match_odds["odds"][bookmaker] tmp_bookmakers = [bookmaker for _ in tmp_odds] if not odds: odds = copy.deepcopy(tmp_odds) sums = copy.deepcopy(tmp_odds) bookmakers = copy.deepcopy(tmp_bookmakers) continue for i, tmp_odd in enumerate(tmp_odds): sums[i] += tmp_odd if tmp_odd > odds[i]: odds[i] = tmp_odd bookmakers[i] = bookmaker values = [] best_rate = rate-1 n = len(match_odds["odds"]) i = 0 has_pinnacle = "pinnacle" in match_odds["odds"] for odd, sum, bookmaker in zip(odds, sums, bookmakers): if odd < 1.1: return 0, [] ref = sum/n if not has_pinnacle else match_odds["odds"]["pinnacle"][i] if ref < 1.1: return 0, [] rate_tmp = odd/ref-1 if rate_tmp >= rate: best_rate = max(best_rate, rate_tmp) value = [odd, rate_tmp, bookmaker] values.append(value) i += 1 return best_rate, values def best_matches_combine2(site, minimum_odd, bet, sport, minimum_odd_selection, date_max=None, time_max=None, date_min=None, time_min=None): nb_matches = 2 all_odds = filter_dict_dates(sb.ODDS[sport], date_max, time_max, date_min, time_min) all_odds = filter_dict_minimum_odd(all_odds, minimum_odd_selection, site) odds_combine_opt = [{} for _ in range(6)] nb_combine = binomial(len(all_odds), nb_matches) sb.PROGRESS = 0 combis = cotes_combine_optimise([[1 for _ in range(3)] for i in range(nb_matches)])[1] print(combis) def compute_all_odds_combine_optimise(nb_combine, combine, odds_combine_opt): sb.PROGRESS += 100/nb_combine try: cotes_combination = cotes_combine_reduit_all_sites( *[match[1] for match in combine] ) for i in range(6): odds_combine_opt[i][" / ".join([match[0] for match in combine])] = cotes_combination[i] # combis[i] = cotes_combination[i][1] except KeyError: pass ThreadPool(4).map(lambda x: compute_all_odds_combine_optimise(nb_combine, x, odds_combine_opt), combinations(all_odds.items(), nb_matches)) sb.PROGRESS = 0 odds_function = get_best_odds(False) profit_function = get_profit(bet, False) criteria = lambda odds_to_check, i: all(odd >= minimum_odd for odd in odds_to_check) for i, combination in enumerate(combis): sb.ALL_ODDS_COMBINE = odds_combine_opt[i] # display_function = lambda odds_to_check, i: mises_combine_optimise(odds_to_check, combination, bet, minimum_odd, True) # result_function = lambda odds_to_check, i: mises_combine_optimise(odds_to_check, combination, bet, minimum_odd, False) display_function = lambda best_overall_odds, best_rank: mises2(best_overall_odds, bet, best_rank, True) result_function = lambda best_overall_odds, best_rank: mises2(best_overall_odds, bet, best_rank, False) best_match_base(odds_function, profit_function, criteria, display_function, result_function, site, sport, date_max, time_max, date_min, time_min, True, nb_matches, combine_opt=True) def best_matches_combine3(site, minimum_odd, bet, sport="football", date_max=None, time_max=None, date_min=None, time_min=None, nb_matches_combine=2): stakes = [] for _ in range(5): stakes.append([bet, site, minimum_odd]) best_match_stakes_to_bet2(stakes, nb_matches_combine, sport, date_max, time_max, True) def convert_indices_to_opponents(combination_indices, matches, sport): combination_opponents = [] matches_outcomes = [match.split(" - ") for match in matches] if get_nb_outcomes(sport) == 3: for match in matches_outcomes: match.insert(1, "Nul") for indices in combination_indices: opponents = [] for i, index in enumerate(indices): if index == float("inf"): continue opponents.append(matches_outcomes[i][index]) combination_opponents.append(tuple(opponents)) return combination_opponents def best_match_stakes_to_bet2(stakes, nb_matches=2, sport="football", date_max=None, time_max=None, identical_stakes=False): second_sites = {stake[1] for stake in stakes if stake[1] != "unibet_boost"} main_sites = sb.BOOKMAKERS all_odds = get_matches_with_best_trj(sport, 20) all_odds = filter_dict_dates(all_odds, date_max, time_max) best_profit = -sum(stake[0] for stake in stakes) n = 5#get_nb_outcomes(sport) ** nb_matches nb_stakes = len(stakes) all_odds_combine = [{} for _ in range(6)] combis = list(combinations(all_odds.items(), nb_matches)) nb_combis = len(combis) best_combine = None best_bets = None main_site_odds = [] main_sites_distribution = [] sb.PROGRESS = 0 list_combinations = cotes_combine_optimise([[1 for _ in range(3)] for i in range(nb_matches)])[1] for k in range(6): for i, combine in enumerate(combis): sb.PROGRESS += 100 / nb_combis match_combine = " / ".join([match[0] for match in combine]) cotes_combination = cotes_combine_reduit_all_sites( *[match[1] for match in combine] ) # print(cotes_combination[k]) all_odds_combine[k][match_combine] = cotes_combination[k] for main0 in main_sites: try: main_sites_distribution = [main0 for _ in range(n)] main_site_odds = copy.deepcopy(all_odds_combine[k][match_combine]["odds"][main0]) break except KeyError: pass for main in main_sites[:i] + main_sites[i + 1:]: try: potential_odds = all_odds_combine[k][match_combine]["odds"][main] for j, odd in enumerate(potential_odds): if odd > main_site_odds[j]: main_site_odds[j] = odd main_sites_distribution[j] = main except KeyError: pass second_odds = {second_site: all_odds_combine[k][match_combine]["odds"][second_site] for second_site in second_sites if second_site in all_odds_combine[k][match_combine]["odds"]} if not second_odds: continue dict_combine_odds = copy.deepcopy(second_odds) for perm in permutations(range(n), nb_stakes): valid_perm = True defined_second_sites = [[perm[j], stake[0], stake[1]] for j, stake in enumerate(stakes)] for j, stake in enumerate(stakes): if dict_combine_odds[defined_second_sites[j][2]][defined_second_sites[j][0]] < stake[2]: valid_perm = False break if not valid_perm: if identical_stakes: break continue defined_bets_temp = defined_bets(main_site_odds, dict_combine_odds, main_sites_distribution, defined_second_sites) profit = defined_bets_temp[0] - np.sum(defined_bets_temp[1]) if profit > best_profit: best_profit = profit best_combine = combine best_bets = defined_bets_temp best_combination = k if identical_stakes: break if best_combine: best_match_combine = " / ".join([match[0] for match in best_combine]) odds_best_match = copy.deepcopy(all_odds_combine[best_combination][best_match_combine]) all_sites = main_sites + list(second_sites) for site in all_odds_combine[best_combination][best_match_combine]["odds"]: if site not in all_sites: del odds_best_match["odds"][site] print(best_match_combine) pprint(odds_best_match, compact=1) print("Plus-value =", round(best_profit, 2)) print("Gain référence =", round(best_bets[0], 2)) print("Somme des mises =", round(np.sum(best_bets[1]), 2)) afficher_mises_combine([x[0] for x in best_combine], best_bets[2], best_bets[1], all_odds_combine[best_combination][best_match_combine]["odds"], sport, combinaisons=convert_indices_to_opponents(list_combinations[best_combination], [x[0] for x in best_combine], sport), profit=best_profit) else: print("No match found") def best_matches_freebet2(site, freebet, sport, nb_matches=2): # all_odds = sb.ODDS[sport] all_odds = get_matches_with_best_trj(sport, 10, site) best_profit = float("-inf") combis = list(combinations(all_odds.items(), nb_matches)) if not combis: print("No match found") return nb_combis = len(combis) best_combine = None best_bets = None best_matches = [] best_choice = [0 for _ in range(nb_matches)] best_odd = 1.01 choices = list(product(*[range(get_nb_outcomes(sport)) for _ in range(nb_matches)])) for combi in combis: if any([site not in x[1]["odds"] for x in combi]): continue matches = [x[0] for x in combi] for choice in choices: choice_list = list(choice) odd = 1 for match, outcome in zip(combi, choice_list): odd *= match[1]["odds"][site][outcome] profit = best_combine_reduit(matches, choice_list, site, freebet, sport, odd-1, output=False) if profit < best_profit: continue best_profit = profit best_matches = matches best_choice = choice_list best_odd = odd best_combine_reduit(best_matches, best_choice, site, freebet, sport, best_odd-1, freebet=True) def get_matches_with_best_trj(sport, nb_matches, site=None): matches = sorted(filter(lambda x: not site or site in x[1]["odds"], sb.ODDS[sport].items()), key=lambda x:trj_match(x[1])[0], reverse=True)[:nb_matches] return {match:odds for match, odds in matches} def best_match_defi_rembourse_ou_gagnant(site, minimum_odd, stake, sport, date_max=None, time_max=None, date_min=None, time_min=None): odds_function = get_best_odds(False) profit_function = lambda best_overall_odds, best_rank: gain_defi_rembourse_ou_gagnant(best_overall_odds, stake, best_rank, True) profit_function = lambda odds_to_check, i: gain_defi_rembourse_ou_gagnant(odds_to_check, stake, i) criteria = lambda odds_to_check, i: odds_to_check[i] >= minimum_odd display_function = lambda best_overall_odds, best_rank: mises_defi_rembourse_ou_gagnant(best_overall_odds, stake, best_rank, True) result_function = lambda best_overall_odds, best_rank: mises_defi_rembourse_ou_gagnant(best_overall_odds, stake, best_rank, False) best_match_base(odds_function, profit_function, criteria, display_function, result_function, site, sport, date_max, time_max, date_min, time_min) def get_sports_with_surebet(): sports_with_surebet = [] for sport in sb.SPORTS: if sb.SEEN_SUREBET[sport]: continue if sport not in sb.ODDS: continue for match in sb.ODDS[sport]: if trj_match(sb.ODDS[sport][match])[0]>=1: sports_with_surebet.append(sport) break return sports_with_surebet
py
1a46c7292d6443eac69421033dcdfb0f4f7456e2
#!/usr/bin/env python3 # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Setup.py for the Provider packages of Airflow project.""" import collections import difflib import glob import importlib import json import logging import os import re import shutil import subprocess import sys import tempfile import textwrap import warnings from contextlib import contextmanager from copy import deepcopy from datetime import datetime, timedelta from enum import Enum from functools import lru_cache from os.path import dirname, relpath from pathlib import Path from shutil import copyfile from typing import Any, Dict, Iterable, List, NamedTuple, Optional, Set, Tuple, Type, Union import jsonschema import rich_click as click from github import Github, Issue, PullRequest, UnknownObjectException from packaging.version import Version from rich.console import Console from rich.progress import Progress from rich.syntax import Syntax from airflow.utils.yaml import safe_load ALL_PYTHON_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] INITIAL_CHANGELOG_CONTENT = """ .. Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at .. http://www.apache.org/licenses/LICENSE-2.0 .. Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Changelog --------- 1.0.0 ..... Initial version of the provider. """ HTTPS_REMOTE = "apache-https-for-providers" HEAD_OF_HTTPS_REMOTE = f"{HTTPS_REMOTE}/main" MY_DIR_PATH = Path(__file__).parent SOURCE_DIR_PATH = MY_DIR_PATH.parent.parent AIRFLOW_PATH = SOURCE_DIR_PATH / "airflow" DIST_PATH = SOURCE_DIR_PATH / "dist" PROVIDERS_PATH = AIRFLOW_PATH / "providers" DOCUMENTATION_PATH = SOURCE_DIR_PATH / "docs" TARGET_PROVIDER_PACKAGES_PATH = SOURCE_DIR_PATH / "provider_packages" GENERATED_AIRFLOW_PATH = TARGET_PROVIDER_PACKAGES_PATH / "airflow" GENERATED_PROVIDERS_PATH = GENERATED_AIRFLOW_PATH / "providers" PROVIDER_RUNTIME_DATA_SCHEMA_PATH = SOURCE_DIR_PATH / "airflow" / "provider_info.schema.json" sys.path.insert(0, str(SOURCE_DIR_PATH)) # those imports need to come after the above sys.path.insert to make sure that Airflow # sources are importable without having to add the airflow sources to the PYTHONPATH before # running the script from dev.import_all_classes import import_all_classes # noqa # isort:skip from setup import PROVIDERS_REQUIREMENTS # type: ignore[attr-defined] # isort:skip # noqa from setup import PREINSTALLED_PROVIDERS # type: ignore[attr-defined] # isort:skip # noqa # Note - we do not test protocols as they are not really part of the official API of # Apache Airflow logger = logging.getLogger(__name__) PY3 = sys.version_info[0] == 3 console = Console(width=400, color_system="standard") @click.group(context_settings={'help_option_names': ['-h', '--help'], 'max_content_width': 500}) def cli(): ... option_skip_tag_check = click.option( "--skip-tag-check/--no-skip-tag-check", default=False, is_flag=True, help="Skip checking if the tag already exists in the remote repository", ) option_git_update = click.option( '--git-update/--no-git-update', default=True, is_flag=True, help=f"If the git remote {HTTPS_REMOTE} already exists, don't try to update it", ) option_version_suffix = click.option( "--version-suffix", metavar="suffix", help=textwrap.dedent( """ adds version suffix to version of the packages. only useful when generating rc candidates for pypi.""" ), ) option_verbose = click.option( "--verbose", is_flag=True, help="Print verbose information about performed steps", ) option_force = click.option( "--force", is_flag=True, help="Forces regeneration of already generated documentation", ) argument_package_id = click.argument('package_id') argument_changelog_files = click.argument('changelog_files', nargs=-1) argument_package_ids = click.argument('package_ids', nargs=-1) @contextmanager def with_group(title): """ If used in GitHub Action, creates an expandable group in the GitHub Action log. Otherwise, display simple text groups. For more information, see: https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-commands-for-github-actions#grouping-log-lines """ if os.environ.get('GITHUB_ACTIONS', 'false') != "true": console.print("[blue]" + "#" * 10 + ' ' + title + ' ' + "#" * 10 + "[/]") yield return console.print(f"::group::{title}") yield console.print("::endgroup::") class EntityType(Enum): Operators = "Operators" Transfers = "Transfers" Sensors = "Sensors" Hooks = "Hooks" Secrets = "Secrets" class EntityTypeSummary(NamedTuple): entities: List[str] new_entities_table: str wrong_entities: List[Tuple[type, str]] class VerifiedEntities(NamedTuple): all_entities: Set[str] wrong_entities: List[Tuple[type, str]] class ProviderPackageDetails(NamedTuple): provider_package_id: str full_package_name: str pypi_package_name: str source_provider_package_path: str documentation_provider_package_path: str provider_description: str versions: List[str] excluded_python_versions: List[str] ENTITY_NAMES = { EntityType.Operators: "Operators", EntityType.Transfers: "Transfer Operators", EntityType.Sensors: "Sensors", EntityType.Hooks: "Hooks", EntityType.Secrets: "Secrets", } TOTALS: Dict[EntityType, int] = { EntityType.Operators: 0, EntityType.Hooks: 0, EntityType.Sensors: 0, EntityType.Transfers: 0, EntityType.Secrets: 0, } OPERATORS_PATTERN = r".*Operator$" SENSORS_PATTERN = r".*Sensor$" HOOKS_PATTERN = r".*Hook$" SECRETS_PATTERN = r".*Backend$" TRANSFERS_PATTERN = r".*To[A-Z0-9].*Operator$" WRONG_TRANSFERS_PATTERN = r".*Transfer$|.*TransferOperator$" ALL_PATTERNS = { OPERATORS_PATTERN, SENSORS_PATTERN, HOOKS_PATTERN, SECRETS_PATTERN, TRANSFERS_PATTERN, WRONG_TRANSFERS_PATTERN, } EXPECTED_SUFFIXES: Dict[EntityType, str] = { EntityType.Operators: "Operator", EntityType.Hooks: "Hook", EntityType.Sensors: "Sensor", EntityType.Secrets: "Backend", EntityType.Transfers: "Operator", } def get_source_airflow_folder() -> str: """ Returns source directory for whole airflow (from the main airflow project). :return: the folder path """ return os.path.abspath(SOURCE_DIR_PATH) def get_source_providers_folder() -> str: """ Returns source directory for providers (from the main airflow project). :return: the folder path """ return os.path.join(get_source_airflow_folder(), "airflow", "providers") def get_target_folder() -> str: """ Returns target directory for providers (in the provider_packages folder) :return: the folder path """ return os.path.abspath(os.path.join(dirname(__file__), os.pardir, os.pardir, "provider_packages")) def get_target_providers_folder() -> str: """ Returns target directory for providers (in the provider_packages folder) :return: the folder path """ return os.path.abspath(os.path.join(get_target_folder(), "airflow", "providers")) def get_target_providers_package_folder(provider_package_id: str) -> str: """ Returns target package folder based on package_id :return: the folder path """ return os.path.join(get_target_providers_folder(), *provider_package_id.split(".")) DEPENDENCIES_JSON_FILE = os.path.join(PROVIDERS_PATH, "dependencies.json") def get_pip_package_name(provider_package_id: str) -> str: """ Returns PIP package name for the package id. :param provider_package_id: id of the package :return: the name of pip package """ return "apache-airflow-providers-" + provider_package_id.replace(".", "-") def get_wheel_package_name(provider_package_id: str) -> str: """ Returns PIP package name for the package id. :param provider_package_id: id of the package :return: the name of pip package """ return "apache_airflow_providers_" + provider_package_id.replace(".", "_") def get_long_description(provider_package_id: str) -> str: """ Gets long description of the package. :param provider_package_id: package id :return: content of the description: README file """ package_folder = get_target_providers_package_folder(provider_package_id) readme_file = os.path.join(package_folder, "README.md") if not os.path.exists(readme_file): return "" with open(readme_file, encoding='utf-8') as file: readme_contents = file.read() copying = True long_description = "" for line in readme_contents.splitlines(keepends=True): if line.startswith("**Table of contents**"): copying = False continue header_line = "## Provider package" if line.startswith(header_line): copying = True if copying: long_description += line return long_description def get_install_requirements(provider_package_id: str, version_suffix: str) -> str: """ Returns install requirements for the package. :param provider_package_id: id of the provider package :param version_suffix: optional version suffix for packages :return: install requirements of the package """ dependencies = PROVIDERS_REQUIREMENTS[provider_package_id] provider_yaml = get_provider_yaml(provider_package_id) install_requires = [] if "additional-dependencies" in provider_yaml: additional_dependencies = provider_yaml['additional-dependencies'] if version_suffix: # In case we are preparing "rc" or dev0 packages, we should also # make sure that cross-dependency with Airflow or Airflow Providers will # contain the version suffix, otherwise we will have conflicting dependencies. # For example if (in sftp) we have ssh>=2.0.1 and release ssh==2.0.1 # we want to turn this into ssh>=2.0.1.dev0 if we build dev0 version of the packages # or >=2.0.1rc1 if we build rc1 version of the packages. for dependency in additional_dependencies: if dependency.startswith("apache-airflow") and ">=" in dependency: dependency = ( dependency + ("." if not version_suffix.startswith(".") else "") + version_suffix ) install_requires.append(dependency) else: install_requires.extend(additional_dependencies) install_requires.extend(dependencies) prefix = "\n " return prefix + prefix.join(install_requires) def get_setup_requirements() -> str: """ Returns setup requirements (common for all package for now). :return: setup requirements """ return """ setuptools wheel """ def get_package_extras(provider_package_id: str) -> Dict[str, List[str]]: """ Finds extras for the package specified. :param provider_package_id: id of the package """ if provider_package_id == 'providers': return {} with open(DEPENDENCIES_JSON_FILE) as dependencies_file: cross_provider_dependencies: Dict[str, List[str]] = json.load(dependencies_file) extras_dict = ( { module: [get_pip_package_name(module)] for module in cross_provider_dependencies[provider_package_id] } if cross_provider_dependencies.get(provider_package_id) else {} ) provider_yaml_dict = get_provider_yaml(provider_package_id) additional_extras = provider_yaml_dict.get('additional-extras') if additional_extras: for key in additional_extras: if key in extras_dict: extras_dict[key].append(additional_extras[key]) else: extras_dict[key] = additional_extras[key] return extras_dict def get_provider_packages() -> List[str]: """ Returns all provider packages. """ return list(PROVIDERS_REQUIREMENTS.keys()) def is_imported_from_same_module(the_class: str, imported_name: str) -> bool: """ Is the class imported from another module? :param the_class: the class object itself :param imported_name: name of the imported class :return: true if the class was imported from another module """ return ".".join(imported_name.split(".")[:-1]) == the_class.__module__ def is_example_dag(imported_name: str) -> bool: """ Is the class an example_dag class? :param imported_name: name where the class is imported from :return: true if it is an example_dags class """ return ".example_dags." in imported_name def is_from_the_expected_base_package(the_class: Type, expected_package: str) -> bool: """ Returns true if the class is from the package expected. :param the_class: the class object :param expected_package: package expected for the class :return: """ return the_class.__module__.startswith(expected_package) def inherits_from(the_class: Type, expected_ancestor: Optional[Type] = None) -> bool: """ Returns true if the class inherits (directly or indirectly) from the class specified. :param the_class: The class to check :param expected_ancestor: expected class to inherit from :return: true is the class inherits from the class expected """ if expected_ancestor is None: return False import inspect mro = inspect.getmro(the_class) return the_class is not expected_ancestor and expected_ancestor in mro def is_class(the_class: Type) -> bool: """ Returns true if the object passed is a class :param the_class: the class to pass :return: true if it is a class """ import inspect return inspect.isclass(the_class) def package_name_matches(the_class: Type, expected_pattern: Optional[str] = None) -> bool: """ In case expected_pattern is set, it checks if the package name matches the pattern. . :param the_class: imported class :param expected_pattern: the pattern that should match the package :return: true if the expected_pattern is None or the pattern matches the package """ return expected_pattern is None or re.match(expected_pattern, the_class.__module__) is not None def find_all_entities( imported_classes: List[str], base_package: str, ancestor_match: Type, sub_package_pattern_match: str, expected_class_name_pattern: str, unexpected_class_name_patterns: Set[str], exclude_class_type: Optional[Type] = None, false_positive_class_names: Optional[Set[str]] = None, ) -> VerifiedEntities: """ Returns set of entities containing all subclasses in package specified. :param imported_classes: entities imported from providers :param base_package: base package name where to start looking for the entities :param sub_package_pattern_match: this string is expected to appear in the sub-package name :param ancestor_match: type of the object the method looks for :param expected_class_name_pattern: regexp of class name pattern to expect :param unexpected_class_name_patterns: set of regexp of class name pattern that are not expected :param exclude_class_type: exclude class of this type (Sensor are also Operators, so they should be excluded from the list) :param false_positive_class_names: set of class names that are wrongly recognised as badly named """ found_entities: Set[str] = set() wrong_entities: List[Tuple[type, str]] = [] for imported_name in imported_classes: module, class_name = imported_name.rsplit(".", maxsplit=1) the_class = getattr(importlib.import_module(module), class_name) if ( is_class(the_class=the_class) and not is_example_dag(imported_name=imported_name) and is_from_the_expected_base_package(the_class=the_class, expected_package=base_package) and is_imported_from_same_module(the_class=the_class, imported_name=imported_name) and inherits_from(the_class=the_class, expected_ancestor=ancestor_match) and not inherits_from(the_class=the_class, expected_ancestor=exclude_class_type) and package_name_matches(the_class=the_class, expected_pattern=sub_package_pattern_match) ): if not false_positive_class_names or class_name not in false_positive_class_names: if not re.match(expected_class_name_pattern, class_name): wrong_entities.append( ( the_class, f"The class name {class_name} is wrong. " f"It should match {expected_class_name_pattern}", ) ) continue if unexpected_class_name_patterns: for unexpected_class_name_pattern in unexpected_class_name_patterns: if re.match(unexpected_class_name_pattern, class_name): wrong_entities.append( ( the_class, f"The class name {class_name} is wrong. " f"It should not match {unexpected_class_name_pattern}", ) ) continue found_entities.add(imported_name) return VerifiedEntities(all_entities=found_entities, wrong_entities=wrong_entities) def convert_classes_to_table(entity_type: EntityType, entities: List[str], full_package_name: str) -> str: """ Converts new entities to a Markdown table. :param entity_type: entity type to convert to markup :param entities: list of entities :param full_package_name: name of the provider package :return: table of new classes """ from tabulate import tabulate headers = [f"New Airflow 2.0 {entity_type.value.lower()}: `{full_package_name}` package"] table = [(get_class_code_link(full_package_name, class_name, "main"),) for class_name in entities] return tabulate(table, headers=headers, tablefmt="pipe") def get_details_about_classes( entity_type: EntityType, entities: Set[str], wrong_entities: List[Tuple[type, str]], full_package_name: str, ) -> EntityTypeSummary: """ Get details about entities. :param entity_type: type of entity (Operators, Hooks etc.) :param entities: set of entities found :param wrong_entities: wrong entities found for that type :param full_package_name: full package name :return: """ all_entities = list(entities) all_entities.sort() TOTALS[entity_type] += len(all_entities) return EntityTypeSummary( entities=all_entities, new_entities_table=convert_classes_to_table( entity_type=entity_type, entities=all_entities, full_package_name=full_package_name, ), wrong_entities=wrong_entities, ) def strip_package_from_class(base_package: str, class_name: str) -> str: """ Strips base package name from the class (if it starts with the package name). """ if class_name.startswith(base_package): return class_name[len(base_package) + 1 :] else: return class_name def convert_class_name_to_url(base_url: str, class_name) -> str: """ Converts the class name to URL that the class can be reached :param base_url: base URL to use :param class_name: name of the class :return: URL to the class """ return base_url + os.path.sep.join(class_name.split(".")[:-1]) + ".py" def get_class_code_link(base_package: str, class_name: str, git_tag: str) -> str: """ Provides a Markdown link for the class passed as parameter. :param base_package: base package to strip from most names :param class_name: name of the class :param git_tag: tag to use for the URL link :return: URL to the class """ url_prefix = f'https://github.com/apache/airflow/blob/{git_tag}/' return ( f'[{strip_package_from_class(base_package, class_name)}]' f'({convert_class_name_to_url(url_prefix, class_name)})' ) def print_wrong_naming(entity_type: EntityType, wrong_classes: List[Tuple[type, str]]): """ Prints wrong entities of a given entity type if there are any :param entity_type: type of the class to print :param wrong_classes: list of wrong entities """ if wrong_classes: console.print(f"\n[red]There are wrongly named entities of type {entity_type}:[/]\n") for wrong_entity_type, message in wrong_classes: console.print(f"{wrong_entity_type}: {message}") def get_package_class_summary( full_package_name: str, imported_classes: List[str] ) -> Dict[EntityType, EntityTypeSummary]: """ Gets summary of the package in the form of dictionary containing all types of entities :param full_package_name: full package name :param imported_classes: entities imported_from providers :return: dictionary of objects usable as context for JINJA2 templates - or None if there are some errors """ from airflow.hooks.base import BaseHook from airflow.models.baseoperator import BaseOperator from airflow.secrets import BaseSecretsBackend from airflow.sensors.base import BaseSensorOperator all_verified_entities: Dict[EntityType, VerifiedEntities] = { EntityType.Operators: find_all_entities( imported_classes=imported_classes, base_package=full_package_name, sub_package_pattern_match=r".*\.operators\..*", ancestor_match=BaseOperator, expected_class_name_pattern=OPERATORS_PATTERN, unexpected_class_name_patterns=ALL_PATTERNS - {OPERATORS_PATTERN}, exclude_class_type=BaseSensorOperator, false_positive_class_names={ 'CloudVisionAddProductToProductSetOperator', 'CloudDataTransferServiceGCSToGCSOperator', 'CloudDataTransferServiceS3ToGCSOperator', 'BigQueryCreateDataTransferOperator', 'CloudTextToSpeechSynthesizeOperator', 'CloudSpeechToTextRecognizeSpeechOperator', }, ), EntityType.Sensors: find_all_entities( imported_classes=imported_classes, base_package=full_package_name, sub_package_pattern_match=r".*\.sensors\..*", ancestor_match=BaseSensorOperator, expected_class_name_pattern=SENSORS_PATTERN, unexpected_class_name_patterns=ALL_PATTERNS - {OPERATORS_PATTERN, SENSORS_PATTERN}, ), EntityType.Hooks: find_all_entities( imported_classes=imported_classes, base_package=full_package_name, sub_package_pattern_match=r".*\.hooks\..*", ancestor_match=BaseHook, expected_class_name_pattern=HOOKS_PATTERN, unexpected_class_name_patterns=ALL_PATTERNS - {HOOKS_PATTERN}, ), EntityType.Secrets: find_all_entities( imported_classes=imported_classes, sub_package_pattern_match=r".*\.secrets\..*", base_package=full_package_name, ancestor_match=BaseSecretsBackend, expected_class_name_pattern=SECRETS_PATTERN, unexpected_class_name_patterns=ALL_PATTERNS - {SECRETS_PATTERN}, ), EntityType.Transfers: find_all_entities( imported_classes=imported_classes, base_package=full_package_name, sub_package_pattern_match=r".*\.transfers\..*", ancestor_match=BaseOperator, expected_class_name_pattern=TRANSFERS_PATTERN, unexpected_class_name_patterns=ALL_PATTERNS - {OPERATORS_PATTERN, TRANSFERS_PATTERN}, ), } for entity in EntityType: print_wrong_naming(entity, all_verified_entities[entity].wrong_entities) entities_summary: Dict[EntityType, EntityTypeSummary] = {} for entity_type in EntityType: entities_summary[entity_type] = get_details_about_classes( entity_type, all_verified_entities[entity_type].all_entities, all_verified_entities[entity_type].wrong_entities, full_package_name, ) return entities_summary def render_template( template_name: str, context: Dict[str, Any], extension: str, autoescape: bool = True, keep_trailing_newline: bool = False, ) -> str: """ Renders template based on its name. Reads the template from <name>_TEMPLATE.md.jinja2 in current dir. :param template_name: name of the template to use :param context: Jinja2 context :param extension: Target file extension :param autoescape: Whether to autoescape HTML :param keep_trailing_newline: Whether to keep the newline in rendered output :return: rendered template """ import jinja2 template_loader = jinja2.FileSystemLoader(searchpath=MY_DIR_PATH) template_env = jinja2.Environment( loader=template_loader, undefined=jinja2.StrictUndefined, autoescape=autoescape, keep_trailing_newline=keep_trailing_newline, ) template = template_env.get_template(f"{template_name}_TEMPLATE{extension}.jinja2") content: str = template.render(context) return content PR_PATTERN = re.compile(r".*\(#([0-9]+)\)") class Change(NamedTuple): """Stores details about commits""" full_hash: str short_hash: str date: str version: str message: str message_without_backticks: str pr: Optional[str] def get_change_from_line(line: str, version: str): split_line = line.split(" ", maxsplit=3) message = split_line[3] pr = None pr_match = PR_PATTERN.match(message) if pr_match: pr = pr_match.group(1) return Change( full_hash=split_line[0], short_hash=split_line[1], date=split_line[2], version=version, message=message, message_without_backticks=message.replace("`", "'").replace("&39;", "'"), pr=pr, ) def convert_git_changes_to_table( version: str, changes: str, base_url: str, markdown: bool = True ) -> Tuple[str, List[Change]]: """ Converts list of changes from its string form to markdown/RST table and array of change information The changes are in the form of multiple lines where each line consists of: FULL_COMMIT_HASH SHORT_COMMIT_HASH COMMIT_DATE COMMIT_SUBJECT The subject can contain spaces but one of the preceding values can, so we can make split 3 times on spaces to break it up. :param version: Version from which the changes are :param changes: list of changes in a form of multiple-line string :param base_url: base url for the commit URL :param markdown: if True, Markdown format is used else rst :return: formatted table + list of changes (starting from the latest) """ from tabulate import tabulate lines = changes.split("\n") headers = ["Commit", "Committed", "Subject"] table_data = [] changes_list: List[Change] = [] for line in lines: if line == "": continue change = get_change_from_line(line, version) table_data.append( ( f"[{change.short_hash}]({base_url}{change.full_hash})" if markdown else f"`{change.short_hash} <{base_url}{change.full_hash}>`_", change.date, f"`{change.message_without_backticks}`" if markdown else f"``{change.message_without_backticks}``", ) ) changes_list.append(change) header = "" if not table_data: return header, [] table = tabulate(table_data, headers=headers, tablefmt="pipe" if markdown else "rst") if not markdown: header += f"\n\n{version}\n" + "." * len(version) + "\n\n" release_date = table_data[0][1] header += f"Latest change: {release_date}\n\n" return header + table, changes_list def convert_pip_requirements_to_table(requirements: Iterable[str], markdown: bool = True) -> str: """ Converts PIP requirement list to a Markdown table. :param requirements: requirements list :param markdown: if True, Markdown format is used else rst :return: formatted table """ from tabulate import tabulate headers = ["PIP package", "Version required"] table_data = [] for dependency in requirements: found = re.match(r"(^[^<=>~]*)([^<=>~]?.*)$", dependency) if found: package = found.group(1) version_required = found.group(2) if version_required != "": version_required = f"`{version_required}`" if markdown else f'``{version_required}``' table_data.append((f"`{package}`" if markdown else f"``{package}``", version_required)) else: table_data.append((dependency, "")) return tabulate(table_data, headers=headers, tablefmt="pipe" if markdown else "rst") def convert_cross_package_dependencies_to_table( cross_package_dependencies: List[str], markdown: bool = True, ) -> str: """ Converts cross-package dependencies to a Markdown table :param cross_package_dependencies: list of cross-package dependencies :param markdown: if True, Markdown format is used else rst :return: formatted table """ from tabulate import tabulate headers = ["Dependent package", "Extra"] table_data = [] prefix = "apache-airflow-providers-" base_url = "https://airflow.apache.org/docs/" for dependency in cross_package_dependencies: pip_package_name = f"{prefix}{dependency.replace('.','-')}" url_suffix = f"{dependency.replace('.','-')}" if markdown: url = f"[{pip_package_name}]({base_url}{url_suffix})" else: url = f"`{pip_package_name} <{base_url}{prefix}{url_suffix}>`_" table_data.append((url, f"`{dependency}`" if markdown else f"``{dependency}``")) return tabulate(table_data, headers=headers, tablefmt="pipe" if markdown else "rst") LICENCE = """<!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> """ LICENCE_RST = """ .. Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at .. http://www.apache.org/licenses/LICENSE-2.0 .. Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ """ Keeps information about historical releases. """ ReleaseInfo = collections.namedtuple( "ReleaseInfo", "release_version release_version_no_leading_zeros last_commit_hash content file_name" ) def strip_leading_zeros(version: str) -> str: """ Strips leading zeros from version number. This converts 1974.04.03 to 1974.4.3 as the format with leading month and day zeros is not accepted by PIP versioning. :param version: version number in CALVER format (potentially with leading 0s in date and month) :return: string with leading 0s after dot replaced. """ return ".".join(str(int(i)) for i in version.split(".")) def get_previous_release_info( previous_release_version: Optional[str], past_releases: List[ReleaseInfo], current_release_version: str ) -> Optional[str]: """ Find previous release. In case we are re-running current release we assume that last release was the previous one. This is needed so that we can generate list of changes since the previous release. :param previous_release_version: known last release version :param past_releases: list of past releases :param current_release_version: release that we are working on currently :return: """ previous_release = None if previous_release_version == current_release_version: # Re-running for current release - use previous release as base for git log if len(past_releases) > 1: previous_release = past_releases[1].last_commit_hash else: previous_release = past_releases[0].last_commit_hash if past_releases else None return previous_release def check_if_release_version_ok( past_releases: List[ReleaseInfo], current_release_version: str, ) -> Tuple[str, Optional[str]]: """ Check if the release version passed is not later than the last release version :param past_releases: all past releases (if there are any) :param current_release_version: release version to check :return: Tuple of current/previous_release (previous might be None if there are no releases) """ previous_release_version = past_releases[0].release_version if past_releases else None if current_release_version == '': if previous_release_version: current_release_version = previous_release_version else: current_release_version = (datetime.today() + timedelta(days=5)).strftime('%Y.%m.%d') if previous_release_version: if Version(current_release_version) < Version(previous_release_version): console.print( f"[red]The release {current_release_version} must be not less than " f"{previous_release_version} - last release for the package[/]" ) raise Exception("Bad release version") return current_release_version, previous_release_version def get_cross_provider_dependent_packages(provider_package_id: str) -> List[str]: """ Returns cross-provider dependencies for the package. :param provider_package_id: package id :return: list of cross-provider dependencies """ with open(os.path.join(PROVIDERS_PATH, "dependencies.json")) as dependencies_file: dependent_packages = json.load(dependencies_file).get(provider_package_id) or [] return dependent_packages def make_sure_remote_apache_exists_and_fetch(git_update: bool, verbose: bool): """ Make sure that apache remote exist in git. We need to take a log from the apache repository - not locally. Also, the local repo might be shallow, so we need to un-shallow it. This will: * check if the remote exists and add if it does not * check if the local repo is shallow, mark it to un-shallow in this case * fetch from the remote including all tags and overriding local tags in case they are set differently :param git_update: If the git remote already exists, should we try to update it :param verbose: print verbose messages while fetching """ try: check_remote_command = ["git", "remote", "get-url", HTTPS_REMOTE] if verbose: console.print(f"Running command: '{' '.join(check_remote_command)}'") subprocess.check_call( check_remote_command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, ) # Remote already exists, don't update it again! if not git_update: return except subprocess.CalledProcessError as ex: if ex.returncode == 128 or ex.returncode == 2: remote_add_command = [ "git", "remote", "add", HTTPS_REMOTE, "https://github.com/apache/airflow.git", ] if verbose: console.print(f"Running command: '{' '.join(remote_add_command)}'") try: subprocess.check_output( remote_add_command, stderr=subprocess.STDOUT, ) except subprocess.CalledProcessError as ex: console.print("[red]Error: when adding remote:[/]", ex) sys.exit(128) else: raise if verbose: console.print("Fetching full history and tags from remote. ") console.print("This might override your local tags!") is_shallow_repo = ( subprocess.check_output(["git", "rev-parse", "--is-shallow-repository"], stderr=subprocess.DEVNULL) == 'true' ) fetch_command = ["git", "fetch", "--tags", "--force", HTTPS_REMOTE] if is_shallow_repo: if verbose: console.print( "This will also un-shallow the repository, " "making all history available and increasing storage!" ) fetch_command.append("--unshallow") if verbose: console.print(f"Running command: '{' '.join(fetch_command)}'") subprocess.check_call( fetch_command, stderr=subprocess.DEVNULL, ) def get_git_log_command( verbose: bool, from_commit: Optional[str] = None, to_commit: Optional[str] = None ) -> List[str]: """ Get git command to run for the current repo from the current folder (which is the package folder). :param verbose: whether to print verbose info while getting the command :param from_commit: if present - base commit from which to start the log from :param to_commit: if present - final commit which should be the start of the log :return: git command to run """ git_cmd = [ "git", "log", "--pretty=format:%H %h %cd %s", "--date=short", ] if from_commit and to_commit: git_cmd.append(f"{from_commit}...{to_commit}") elif from_commit: git_cmd.append(from_commit) git_cmd.extend(['--', '.']) if verbose: console.print(f"Command to run: '{' '.join(git_cmd)}'") return git_cmd def get_git_tag_check_command(tag: str) -> List[str]: """ Get git command to check if tag exits. :param tag: Tag to check :return: git command to run """ return [ "git", "rev-parse", tag, ] def get_source_package_path(provider_package_id: str) -> str: """ Retrieves source package path from package id. :param provider_package_id: id of the package :return: path of the providers folder """ return os.path.join(PROVIDERS_PATH, *provider_package_id.split(".")) def get_documentation_package_path(provider_package_id: str) -> str: """ Retrieves documentation package path from package id. :param provider_package_id: id of the package :return: path of the documentation folder """ return os.path.join( DOCUMENTATION_PATH, f"apache-airflow-providers-{provider_package_id.replace('.','-')}" ) def get_generated_package_path(provider_package_id: str) -> str: """ Retrieves generated package path from package id. :param provider_package_id: id of the package :return: path of the providers folder """ provider_package_path = os.path.join(GENERATED_PROVIDERS_PATH, *provider_package_id.split(".")) return provider_package_path def get_additional_package_info(provider_package_path: str) -> str: """ Returns additional info for the package. :param provider_package_path: path for the package :return: additional information for the path (empty string if missing) """ additional_info_file_path = os.path.join(provider_package_path, "ADDITIONAL_INFO.md") if os.path.isfile(additional_info_file_path): with open(additional_info_file_path) as additional_info_file: additional_info = additional_info_file.read() additional_info_lines = additional_info.splitlines(keepends=True) result = "" skip_comment = True for line in additional_info_lines: if line.startswith(" -->"): skip_comment = False continue if not skip_comment: result += line return result return "" def is_camel_case_with_acronyms(s: str): """ Checks if the string passed is Camel Case (with capitalised acronyms allowed). :param s: string to check :return: true if the name looks cool as Class name. """ return s != s.lower() and s != s.upper() and "_" not in s and s[0].upper() == s[0] def check_if_classes_are_properly_named( entity_summary: Dict[EntityType, EntityTypeSummary] ) -> Tuple[int, int]: """ Check if all entities in the dictionary are named properly. It prints names at the output and returns the status of class names. :param entity_summary: dictionary of class names to check, grouped by types. :return: Tuple of 2 ints = total number of entities and number of badly named entities """ total_class_number = 0 badly_named_class_number = 0 for entity_type, class_suffix in EXPECTED_SUFFIXES.items(): for class_full_name in entity_summary[entity_type].entities: _, class_name = class_full_name.rsplit(".", maxsplit=1) error_encountered = False if not is_camel_case_with_acronyms(class_name): console.print( f"[red]The class {class_full_name} is wrongly named. The " f"class name should be CamelCaseWithACRONYMS ![/]" ) error_encountered = True if not class_name.endswith(class_suffix): console.print( f"[red]The class {class_full_name} is wrongly named. It is one of the {entity_type.value}" f" so it should end with {class_suffix}[/]" ) error_encountered = True total_class_number += 1 if error_encountered: badly_named_class_number += 1 return total_class_number, badly_named_class_number def get_package_pip_name(provider_package_id: str): return f"apache-airflow-providers-{provider_package_id.replace('.', '-')}" def validate_provider_info_with_runtime_schema(provider_info: Dict[str, Any]) -> None: """ Validates provider info against the runtime schema. This way we check if the provider info in the packages is future-compatible. The Runtime Schema should only change when there is a major version change. :param provider_info: provider info to validate """ with open(PROVIDER_RUNTIME_DATA_SCHEMA_PATH) as schema_file: schema = json.load(schema_file) try: jsonschema.validate(provider_info, schema=schema) except jsonschema.ValidationError as ex: console.print("[red]Provider info not validated against runtime schema[/]") raise Exception( "Error when validating schema. The schema must be compatible with " "airflow/provider_info.schema.json.", ex, ) def get_provider_yaml(provider_package_id: str) -> Dict[str, Any]: """ Retrieves provider info from the provider yaml file. The provider yaml file contains more information than provider_info that is used at runtime. This method converts the full provider yaml file into stripped-down provider info and validates it against deprecated 2.0.0 schema and runtime schema. :param provider_package_id: package id to retrieve provider.yaml from :return: provider_info dictionary """ provider_yaml_file_name = os.path.join(get_source_package_path(provider_package_id), "provider.yaml") if not os.path.exists(provider_yaml_file_name): raise Exception(f"The provider.yaml file is missing: {provider_yaml_file_name}") with open(provider_yaml_file_name) as provider_file: provider_yaml_dict = safe_load(provider_file) return provider_yaml_dict def get_provider_info_from_provider_yaml(provider_package_id: str) -> Dict[str, Any]: """ Retrieves provider info from the provider yaml file. :param provider_package_id: package id to retrieve provider.yaml from :return: provider_info dictionary """ provider_yaml_dict = get_provider_yaml(provider_package_id=provider_package_id) validate_provider_info_with_runtime_schema(provider_yaml_dict) return provider_yaml_dict def get_version_tag(version: str, provider_package_id: str, version_suffix: str = ''): if version_suffix is None: version_suffix = '' return f"providers-{provider_package_id.replace('.','-')}/{version}{version_suffix}" def print_changes_table(changes_table): syntax = Syntax(changes_table, "rst", theme="ansi_dark") console.print(syntax) def get_all_changes_for_package( versions: List[str], provider_package_id: str, source_provider_package_path: str, verbose: bool, ) -> Tuple[bool, Optional[Union[List[List[Change]], Change]], str]: """ Retrieves all changes for the package. :param versions: list of versions :param provider_package_id: provider package id :param source_provider_package_path: path where package is located :param verbose: whether to print verbose messages """ current_version = versions[0] current_tag_no_suffix = get_version_tag(current_version, provider_package_id) if verbose: console.print(f"Checking if tag '{current_tag_no_suffix}' exist.") if not subprocess.call( get_git_tag_check_command(current_tag_no_suffix), cwd=source_provider_package_path, stderr=subprocess.DEVNULL, ): if verbose: console.print(f"The tag {current_tag_no_suffix} exists.") # The tag already exists changes = subprocess.check_output( get_git_log_command(verbose, HEAD_OF_HTTPS_REMOTE, current_tag_no_suffix), cwd=source_provider_package_path, universal_newlines=True, ) if changes: provider_details = get_provider_details(provider_package_id) doc_only_change_file = os.path.join( provider_details.source_provider_package_path, ".latest-doc-only-change.txt" ) if os.path.exists(doc_only_change_file): with open(doc_only_change_file) as f: last_doc_only_hash = f.read().strip() try: changes_since_last_doc_only_check = subprocess.check_output( get_git_log_command(verbose, HEAD_OF_HTTPS_REMOTE, last_doc_only_hash), cwd=source_provider_package_path, universal_newlines=True, ) if not changes_since_last_doc_only_check: console.print() console.print( "[yellow]The provider has doc-only changes since the last release. Skipping[/]" ) # Returns 66 in case of doc-only changes sys.exit(66) if len(changes) > len(changes_since_last_doc_only_check): # if doc-only was released after previous release - use it as starting point # but if before - stay with the releases from last tag. changes = changes_since_last_doc_only_check except subprocess.CalledProcessError: # ignore when the commit mentioned as last doc-only change is obsolete pass console.print(f"[yellow]The provider {provider_package_id} has changes since last release[/]") console.print() console.print( "[yellow]Please update version in " f"'airflow/providers/{provider_package_id.replace('-','/')}/'" "provider.yaml'[/]\n" ) console.print("[yellow]Or mark the changes as doc-only[/]") changes_table, array_of_changes = convert_git_changes_to_table( "UNKNOWN", changes, base_url="https://github.com/apache/airflow/commit/", markdown=False, ) print_changes_table(changes_table) return False, array_of_changes[0], changes_table else: console.print(f"No changes for {provider_package_id}") return False, None, "" if verbose: console.print("The tag does not exist. ") if len(versions) == 1: console.print( f"The provider '{provider_package_id}' has never been released but it is ready to release!\n" ) else: console.print(f"New version of the '{provider_package_id}' package is ready to be released!\n") next_version_tag = HEAD_OF_HTTPS_REMOTE changes_table = '' current_version = versions[0] list_of_list_of_changes: List[List[Change]] = [] for version in versions[1:]: version_tag = get_version_tag(version, provider_package_id) changes = subprocess.check_output( get_git_log_command(verbose, next_version_tag, version_tag), cwd=source_provider_package_path, universal_newlines=True, ) changes_table_for_version, array_of_changes_for_version = convert_git_changes_to_table( current_version, changes, base_url="https://github.com/apache/airflow/commit/", markdown=False ) changes_table += changes_table_for_version list_of_list_of_changes.append(array_of_changes_for_version) next_version_tag = version_tag current_version = version changes = subprocess.check_output( get_git_log_command(verbose, next_version_tag), cwd=source_provider_package_path, universal_newlines=True, ) changes_table_for_version, array_of_changes_for_version = convert_git_changes_to_table( current_version, changes, base_url="https://github.com/apache/airflow/commit/", markdown=False ) changes_table += changes_table_for_version if verbose: print_changes_table(changes_table) return True, list_of_list_of_changes if len(list_of_list_of_changes) > 0 else None, changes_table def get_provider_details(provider_package_id: str) -> ProviderPackageDetails: provider_info = get_provider_info_from_provider_yaml(provider_package_id) return ProviderPackageDetails( provider_package_id=provider_package_id, full_package_name=f"airflow.providers.{provider_package_id}", pypi_package_name=f"apache-airflow-providers-{provider_package_id.replace('.', '-')}", source_provider_package_path=get_source_package_path(provider_package_id), documentation_provider_package_path=get_documentation_package_path(provider_package_id), provider_description=provider_info['description'], versions=provider_info['versions'], excluded_python_versions=provider_info.get("excluded-python-versions") or [], ) def get_provider_requirements(provider_package_id: str) -> List[str]: provider_yaml = get_provider_yaml(provider_package_id) requirements = ( provider_yaml['additional-dependencies'].copy() if 'additional-dependencies' in provider_yaml else [] ) requirements.extend(PROVIDERS_REQUIREMENTS[provider_package_id]) return requirements def get_provider_jinja_context( provider_info: Dict[str, Any], provider_details: ProviderPackageDetails, current_release_version: str, version_suffix: str, ): verify_provider_package(provider_details.provider_package_id) changelog_path = verify_changelog_exists(provider_details.provider_package_id) cross_providers_dependencies = get_cross_provider_dependent_packages( provider_package_id=provider_details.provider_package_id ) release_version_no_leading_zeros = strip_leading_zeros(current_release_version) pip_requirements_table = convert_pip_requirements_to_table( get_provider_requirements(provider_details.provider_package_id) ) pip_requirements_table_rst = convert_pip_requirements_to_table( get_provider_requirements(provider_details.provider_package_id), markdown=False ) cross_providers_dependencies_table = convert_cross_package_dependencies_to_table( cross_providers_dependencies ) cross_providers_dependencies_table_rst = convert_cross_package_dependencies_to_table( cross_providers_dependencies, markdown=False ) with open(changelog_path) as changelog_file: changelog = changelog_file.read() supported_python_versions = [ p for p in ALL_PYTHON_VERSIONS if p not in provider_details.excluded_python_versions ] python_requires = "~=3.6" for p in provider_details.excluded_python_versions: python_requires += f", !={p}" context: Dict[str, Any] = { "ENTITY_TYPES": list(EntityType), "README_FILE": "README.rst", "PROVIDER_PACKAGE_ID": provider_details.provider_package_id, "PACKAGE_PIP_NAME": get_pip_package_name(provider_details.provider_package_id), "PACKAGE_WHEEL_NAME": get_wheel_package_name(provider_details.provider_package_id), "FULL_PACKAGE_NAME": provider_details.full_package_name, "PROVIDER_PATH": provider_details.full_package_name.replace(".", "/"), "RELEASE": current_release_version, "RELEASE_NO_LEADING_ZEROS": release_version_no_leading_zeros, "VERSION_SUFFIX": version_suffix or '', "ADDITIONAL_INFO": get_additional_package_info( provider_package_path=provider_details.source_provider_package_path ), "CROSS_PROVIDERS_DEPENDENCIES": cross_providers_dependencies, "PIP_REQUIREMENTS": PROVIDERS_REQUIREMENTS[provider_details.provider_package_id], "PROVIDER_TYPE": "Provider", "PROVIDERS_FOLDER": "providers", "PROVIDER_DESCRIPTION": provider_details.provider_description, "INSTALL_REQUIREMENTS": get_install_requirements( provider_package_id=provider_details.provider_package_id, version_suffix=version_suffix ), "SETUP_REQUIREMENTS": get_setup_requirements(), "EXTRAS_REQUIREMENTS": get_package_extras(provider_package_id=provider_details.provider_package_id), "CROSS_PROVIDERS_DEPENDENCIES_TABLE": cross_providers_dependencies_table, "CROSS_PROVIDERS_DEPENDENCIES_TABLE_RST": cross_providers_dependencies_table_rst, "PIP_REQUIREMENTS_TABLE": pip_requirements_table, "PIP_REQUIREMENTS_TABLE_RST": pip_requirements_table_rst, "PROVIDER_INFO": provider_info, "CHANGELOG_RELATIVE_PATH": relpath( provider_details.source_provider_package_path, provider_details.documentation_provider_package_path, ), "CHANGELOG": changelog, "SUPPORTED_PYTHON_VERSIONS": supported_python_versions, "PYTHON_REQUIRES": python_requires, } return context def prepare_readme_file(context): readme_content = LICENCE_RST + render_template( template_name="PROVIDER_README", context=context, extension=".rst" ) readme_file_path = os.path.join(TARGET_PROVIDER_PACKAGES_PATH, "README.rst") with open(readme_file_path, "wt") as readme_file: readme_file.write(readme_content) def confirm(message: str, answer: Optional[str] = None) -> bool: """ Ask user to confirm (case-insensitive). :param message: message to display :param answer: force answer if set :return: True if the answer is any form of y/yes. Exits with 65 exit code if any form of q/quit is chosen. """ given_answer = answer.lower() if answer is not None else "" while given_answer not in ["y", "n", "q", "yes", "no", "quit"]: console.print(f"[yellow]{message}[y/n/q]?[/] ", end='') given_answer = input("").lower() if given_answer.lower() in ["q", "quit"]: # Returns 65 in case user decided to quit sys.exit(65) return given_answer in ["y", "yes"] def mark_latest_changes_as_documentation_only( provider_details: ProviderPackageDetails, latest_change: Change ): console.print( f"Marking last change: {latest_change.short_hash} and all above changes since the last release " "as doc-only changes!" ) with open( os.path.join(provider_details.source_provider_package_path, ".latest-doc-only-change.txt"), "tw" ) as f: f.write(latest_change.full_hash + "\n") # exit code 66 marks doc-only change marked sys.exit(66) def update_release_notes( provider_package_id: str, version_suffix: str, force: bool, verbose: bool, answer: Optional[str], ) -> bool: """ Updates generated files (readme, changes and/or setup.cfg/setup.py/manifest.in/provider_info) :param provider_package_id: id of the package :param version_suffix: version suffix corresponding to the version in the code :param force: regenerate already released documentation :param verbose: whether to print verbose messages :param answer: force answer to questions if set. :returns False if the package should be skipped, True if everything generated properly """ verify_provider_package(provider_package_id) provider_details = get_provider_details(provider_package_id) provider_info = get_provider_info_from_provider_yaml(provider_package_id) current_release_version = provider_details.versions[0] jinja_context = get_provider_jinja_context( provider_info=provider_info, provider_details=provider_details, current_release_version=current_release_version, version_suffix=version_suffix, ) proceed, latest_change, changes = get_all_changes_for_package( provider_details.versions, provider_package_id, provider_details.source_provider_package_path, verbose, ) if not force: if proceed: if not confirm("Provider marked for release. Proceed", answer=answer): return False elif not latest_change: console.print() console.print( f"[yellow]Provider: {provider_package_id} - skipping documentation generation. No changes![/]" ) console.print() return False else: if confirm("Are those changes documentation-only?", answer=answer): if isinstance(latest_change, Change): mark_latest_changes_as_documentation_only(provider_details, latest_change) else: raise ValueError( "Expected only one change to be present to mark changes " f"in provider {provider_package_id} as docs-only. " f"Received {len(latest_change)}." ) return False jinja_context["DETAILED_CHANGES_RST"] = changes jinja_context["DETAILED_CHANGES_PRESENT"] = len(changes) > 0 update_commits_rst( jinja_context, provider_package_id, provider_details.documentation_provider_package_path ) return True def update_setup_files( provider_package_id: str, version_suffix: str, ): """ Updates generated setup.cfg/setup.py/manifest.in/provider_info for packages :param provider_package_id: id of the package :param version_suffix: version suffix corresponding to the version in the code :returns False if the package should be skipped, True if everything generated properly """ verify_provider_package(provider_package_id) provider_details = get_provider_details(provider_package_id) provider_info = get_provider_info_from_provider_yaml(provider_package_id) current_release_version = provider_details.versions[0] jinja_context = get_provider_jinja_context( provider_info=provider_info, provider_details=provider_details, current_release_version=current_release_version, version_suffix=version_suffix, ) console.print() console.print(f"Generating setup files for {provider_package_id}") console.print() prepare_setup_py_file(jinja_context) prepare_setup_cfg_file(jinja_context) prepare_get_provider_info_py_file(jinja_context, provider_package_id) prepare_manifest_in_file(jinja_context) prepare_readme_file(jinja_context) return True def replace_content(file_path, old_text, new_text, provider_package_id): if new_text != old_text: _, temp_file_path = tempfile.mkstemp() try: if os.path.isfile(file_path): copyfile(file_path, temp_file_path) with open(file_path, "wt") as readme_file: readme_file.write(new_text) console.print() console.print(f"Generated {file_path} file for the {provider_package_id} provider") console.print() if old_text != "": subprocess.call(["diff", "--color=always", temp_file_path, file_path]) finally: os.remove(temp_file_path) AUTOMATICALLY_GENERATED_MARKER = "AUTOMATICALLY GENERATED" AUTOMATICALLY_GENERATED_CONTENT = ( f".. THE REMAINDER OF THE FILE IS {AUTOMATICALLY_GENERATED_MARKER}. " f"IT WILL BE OVERWRITTEN AT RELEASE TIME!" ) def update_index_rst( context, provider_package_id, target_path, ): index_update = render_template( template_name="PROVIDER_INDEX", context=context, extension='.rst', keep_trailing_newline=True ) index_file_path = os.path.join(target_path, "index.rst") old_text = "" if os.path.isfile(index_file_path): with open(index_file_path) as readme_file_read: old_text = readme_file_read.read() new_text = deepcopy(old_text) lines = old_text.splitlines(keepends=False) for index, line in enumerate(lines): if AUTOMATICALLY_GENERATED_MARKER in line: new_text = "\n".join(lines[:index]) new_text += "\n" + AUTOMATICALLY_GENERATED_CONTENT + "\n" new_text += index_update replace_content(index_file_path, old_text, new_text, provider_package_id) def update_commits_rst( context, provider_package_id, target_path, ): new_text = render_template( template_name="PROVIDER_COMMITS", context=context, extension='.rst', keep_trailing_newline=True ) index_file_path = os.path.join(target_path, "commits.rst") old_text = "" if os.path.isfile(index_file_path): with open(index_file_path) as readme_file_read: old_text = readme_file_read.read() replace_content(index_file_path, old_text, new_text, provider_package_id) @lru_cache(maxsize=None) def black_mode(): from black import Mode, parse_pyproject_toml, target_version_option_callback config = parse_pyproject_toml(os.path.join(SOURCE_DIR_PATH, "pyproject.toml")) target_versions = set( target_version_option_callback(None, None, tuple(config.get('target_version', ()))), ) return Mode( target_versions=target_versions, line_length=config.get('line_length', Mode.line_length), is_pyi=bool(config.get('is_pyi', Mode.is_pyi)), string_normalization=not bool(config.get('skip_string_normalization', not Mode.string_normalization)), experimental_string_processing=bool( config.get('experimental_string_processing', Mode.experimental_string_processing) ), ) def black_format(content) -> str: from black import format_str return format_str(content, mode=black_mode()) def prepare_setup_py_file(context): setup_py_template_name = "SETUP" setup_py_file_path = os.path.abspath(os.path.join(get_target_folder(), "setup.py")) setup_py_content = render_template( template_name=setup_py_template_name, context=context, extension='.py', autoescape=False ) with open(setup_py_file_path, "wt") as setup_py_file: setup_py_file.write(black_format(setup_py_content)) def prepare_setup_cfg_file(context): setup_cfg_template_name = "SETUP" setup_cfg_file_path = os.path.abspath(os.path.join(get_target_folder(), "setup.cfg")) setup_cfg_content = render_template( template_name=setup_cfg_template_name, context=context, extension='.cfg', autoescape=False, keep_trailing_newline=True, ) with open(setup_cfg_file_path, "wt") as setup_cfg_file: setup_cfg_file.write(setup_cfg_content) def prepare_get_provider_info_py_file(context, provider_package_id: str): get_provider_template_name = "get_provider_info" get_provider_file_path = os.path.abspath( os.path.join( get_target_providers_package_folder(provider_package_id), "get_provider_info.py", ) ) get_provider_content = render_template( template_name=get_provider_template_name, context=context, extension='.py', autoescape=False, keep_trailing_newline=True, ) with open(get_provider_file_path, "wt") as get_provider_file: get_provider_file.write(black_format(get_provider_content)) def prepare_manifest_in_file(context): target = os.path.abspath(os.path.join(get_target_folder(), "MANIFEST.in")) content = render_template( template_name="MANIFEST", context=context, extension='.in', autoescape=False, keep_trailing_newline=True, ) with open(target, "wt") as fh: fh.write(content) def get_all_providers() -> List[str]: """ Returns all providers for regular packages. :return: list of providers that are considered for provider packages """ return list(PROVIDERS_REQUIREMENTS.keys()) def verify_provider_package(provider_package_id: str) -> None: """ Verifies if the provider package is good. :param provider_package_id: package id to verify :return: None """ if provider_package_id not in get_provider_packages(): console.print(f"[red]Wrong package name: {provider_package_id}[/]") console.print("Use one of:") console.print(get_provider_packages()) raise Exception(f"The package {provider_package_id} is not a provider package.") def verify_changelog_exists(package: str) -> str: provider_details = get_provider_details(package) changelog_path = os.path.join(provider_details.source_provider_package_path, "CHANGELOG.rst") if not os.path.isfile(changelog_path): console.print(f"[red]ERROR: Missing ${changelog_path}[/]") console.print("Please add the file with initial content:") console.print() syntax = Syntax( INITIAL_CHANGELOG_CONTENT, "rst", theme="ansi_dark", ) console.print(syntax) console.print() raise Exception(f"Missing {changelog_path}") return changelog_path @cli.command() def list_providers_packages(): """List all provider packages.""" providers = get_all_providers() for provider in providers: console.print(provider) @cli.command() @option_version_suffix @option_git_update @argument_package_id @option_force @option_verbose @click.option( "-a", "--answer", type=click.Choice(['y', 'n', 'q', 'yes', 'no', 'quit']), help="Force answer to questions.", envvar='ANSWER', ) def update_package_documentation( version_suffix: str, git_update: bool, answer: Optional[str], package_id: str, force: bool, verbose: bool, ): """ Updates package documentation. See `list-providers-packages` subcommand for the possible PACKAGE_ID values """ provider_package_id = package_id verify_provider_package(provider_package_id) with with_group(f"Update release notes for package '{provider_package_id}' "): console.print("Updating documentation for the latest release version.") make_sure_remote_apache_exists_and_fetch(git_update, verbose) if not update_release_notes( provider_package_id, version_suffix, force=force, verbose=verbose, answer=answer ): # Returns 64 in case of skipped package sys.exit(64) def tag_exists_for_version(provider_package_id: str, current_tag: str, verbose: bool): provider_details = get_provider_details(provider_package_id) if verbose: console.print(f"Checking if tag `{current_tag}` exists.") if not subprocess.call( get_git_tag_check_command(current_tag), cwd=provider_details.source_provider_package_path, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL, ): if verbose: console.print(f"Tag `{current_tag}` exists.") return True if verbose: console.print(f"Tag `{current_tag}` does not exist.") return False @cli.command() @option_version_suffix @option_git_update @argument_package_id @option_verbose @option_skip_tag_check def generate_setup_files( version_suffix: str, git_update: bool, package_id: str, verbose: bool, skip_tag_check: bool ): """ Generates setup files for the package. See `list-providers-packages` subcommand for the possible PACKAGE_ID values """ provider_package_id = package_id with with_group(f"Generate setup files for '{provider_package_id}'"): if not skip_tag_check: current_tag = get_current_tag(provider_package_id, version_suffix, git_update, verbose) if tag_exists_for_version(provider_package_id, current_tag, verbose): console.print(f"[yellow]The tag {current_tag} exists. Not preparing the package.[/]") # Returns 1 in case of skipped package sys.exit(1) if update_setup_files(provider_package_id, version_suffix): console.print(f"[green]Generated regular package setup files for {provider_package_id}[/]") else: # Returns 64 in case of skipped package sys.exit(64) def get_current_tag(provider_package_id: str, suffix: str, git_update: bool, verbose: bool): verify_provider_package(provider_package_id) make_sure_remote_apache_exists_and_fetch(git_update, verbose) provider_info = get_provider_info_from_provider_yaml(provider_package_id) versions: List[str] = provider_info['versions'] current_version = versions[0] current_tag = get_version_tag(current_version, provider_package_id, suffix) return current_tag def cleanup_remnants(verbose: bool): if verbose: console.print("Cleaning remnants") files = glob.glob("*.egg-info") for file in files: shutil.rmtree(file, ignore_errors=True) files = glob.glob("build") for file in files: shutil.rmtree(file, ignore_errors=True) def verify_setup_cfg_prepared(provider_package): with open("setup.cfg") as f: setup_content = f.read() search_for = f"providers-{provider_package.replace('.','-')} for Apache Airflow" if search_for not in setup_content: console.print( f"[red]The setup.py is probably prepared for another package. " f"It does not contain [bold]{search_for}[/bold]![/]" ) console.print( f"\nRun:\n\n[bold]./dev/provider_packages/prepare_provider_packages.py " f"generate-setup-files {provider_package}[/bold]\n" ) raise Exception("Wrong setup!") @cli.command() @click.option( '--package-format', type=click.Choice(['sdist', 'wheel', 'both']), default='wheel', help='Optional format - only used in case of building packages (default: wheel)', ) @option_git_update @option_version_suffix @argument_package_id @option_verbose @option_skip_tag_check def build_provider_packages( package_format: str, git_update: bool, version_suffix: str, package_id: str, verbose: bool, skip_tag_check: bool, ): """ Builds provider package. See `list-providers-packages` subcommand for the possible PACKAGE_ID values """ import tempfile # we cannot use context managers because if the directory gets deleted (which bdist_wheel does), # the context manager will throw an exception when trying to delete it again tmp_build_dir = tempfile.TemporaryDirectory().name tmp_dist_dir = tempfile.TemporaryDirectory().name try: provider_package_id = package_id with with_group(f"Prepare provider package for '{provider_package_id}'"): if not skip_tag_check and (version_suffix.startswith("rc") or version_suffix == ""): # For RC and official releases we check if the "officially released" version exists # and skip the released if it was. This allows to skip packages that have not been # marked for release. For "dev" suffixes, we always build all packages released_tag = get_current_tag(provider_package_id, "", git_update, verbose) if tag_exists_for_version(provider_package_id, released_tag, verbose): console.print(f"[yellow]The tag {released_tag} exists. Skipping the package.[/]") return False console.print(f"Changing directory to {TARGET_PROVIDER_PACKAGES_PATH}") os.chdir(TARGET_PROVIDER_PACKAGES_PATH) cleanup_remnants(verbose) provider_package = package_id verify_setup_cfg_prepared(provider_package) console.print(f"Building provider package: {provider_package} in format {package_format}") command = ["python3", "setup.py", "build", "--build-temp", tmp_build_dir] if version_suffix is not None: command.extend(['egg_info', '--tag-build', version_suffix]) if package_format in ['sdist', 'both']: command.append("sdist") if package_format in ['wheel', 'both']: command.extend(["bdist_wheel", "--bdist-dir", tmp_dist_dir]) console.print(f"Executing command: '{' '.join(command)}'") try: subprocess.check_call(command, stdout=subprocess.DEVNULL) except subprocess.CalledProcessError as ex: console.print(ex.output.decode()) raise Exception("The command returned an error %s", command) console.print( f"[green]Prepared provider package {provider_package} in format {package_format}[/]" ) finally: shutil.rmtree(tmp_build_dir, ignore_errors=True) shutil.rmtree(tmp_dist_dir, ignore_errors=True) def verify_provider_classes_for_single_provider(imported_classes: List[str], provider_package_id: str): """Verify naming of provider classes for single provider.""" full_package_name = f"airflow.providers.{provider_package_id}" entity_summaries = get_package_class_summary(full_package_name, imported_classes) total, bad = check_if_classes_are_properly_named(entity_summaries) bad += sum(len(entity_summary.wrong_entities) for entity_summary in entity_summaries.values()) if bad != 0: console.print() console.print(f"[red]There are {bad} errors of {total} entities for {provider_package_id}[/]") console.print() return total, bad def summarise_total_vs_bad_and_warnings(total: int, bad: int, warns: List[warnings.WarningMessage]) -> bool: """Summarises Bad/Good class names for providers and warnings""" raise_error = False if bad == 0: console.print() console.print(f"[green]OK: All {total} entities are properly named[/]") console.print() console.print("Totals:") console.print() for entity in EntityType: console.print(f"{entity.value}: {TOTALS[entity]}") console.print() else: console.print() console.print( f"[red]ERROR! There are in total: {bad} entities badly named out of {total} entities[/]" ) console.print() raise_error = True if warns: if os.environ.get('GITHUB_ACTIONS'): # Ends group in GitHub Actions so that the errors are immediately visible in CI log console.print("::endgroup::") console.print() console.print("[red]Unknown warnings generated:[/]") console.print() for w in warns: one_line_message = str(w.message).replace('\n', ' ') console.print(f"{w.filename}:{w.lineno}:[yellow]{one_line_message}[/]") console.print() console.print(f"[red]ERROR! There were {len(warns)} warnings generated during the import[/]") console.print() console.print("[yellow]Ideally, fix it, so that no warnings are generated during import.[/]") console.print("[yellow]There are three cases that are legitimate deprecation warnings though:[/]") console.print("[yellow] 1) when you deprecate whole module or class and replace it in provider[/]") console.print("[yellow] 2) when 3rd-party module generates Deprecation and you cannot upgrade it[/]") console.print( "[yellow] 3) when many 3rd-party module generates same Deprecation warning that " "comes from another common library[/]" ) console.print() console.print( "[yellow]In case 1), add the deprecation message to " "the KNOWN_DEPRECATED_DIRECT_IMPORTS in prepare_provider_packages.py[/]" ) console.print( "[yellow]In case 2), add the deprecation message together with module it generates to " "the KNOWN_DEPRECATED_MESSAGES in prepare_provider_packages.py[/]" ) console.print( "[yellow]In case 3), add the deprecation message to " "the KNOWN_COMMON_DEPRECATED_MESSAGES in prepare_provider_packages.py[/]" ) console.print() raise_error = True else: console.print() console.print("[green]OK: No warnings generated[/]") console.print() if raise_error: console.print("[red]Please fix the problems listed above [/]") return False return True # The set of known deprecation messages that we know about. # It contains tuples of "message" and the module that generates the warning - so when the # Same warning is generated by different module, it is not treated as "known" warning. KNOWN_DEPRECATED_MESSAGES: Set[Tuple[str, str]] = { ( 'This version of Apache Beam has not been sufficiently tested on Python 3.9. ' 'You may encounter bugs or missing features.', "apache_beam", ), ( 'This version of Apache Beam has not been sufficiently tested on Python 3.10. ' 'You may encounter bugs or missing features.', "apache_beam", ), ( "Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated since" " Python 3.3,and in 3.9 it will stop working", "apache_beam", ), ( 'pyarrow.HadoopFileSystem is deprecated as of 2.0.0, please use pyarrow.fs.HadoopFileSystem instead.', "papermill", ), ( "You have an incompatible version of 'pyarrow' installed (4.0.1), please install a version that " "adheres to: 'pyarrow<3.1.0,>=3.0.0; extra == \"pandas\"'", "apache_beam", ), ( "You have an incompatible version of 'pyarrow' installed (4.0.1), please install a version that " "adheres to: 'pyarrow<5.1.0,>=5.0.0; extra == \"pandas\"'", "snowflake", ), ("dns.hash module will be removed in future versions. Please use hashlib instead.", "dns"), ("PKCS#7 support in pyOpenSSL is deprecated. You should use the APIs in cryptography.", "eventlet"), ("PKCS#12 support in pyOpenSSL is deprecated. You should use the APIs in cryptography.", "eventlet"), ( "the imp module is deprecated in favour of importlib; see the module's documentation" " for alternative uses", "hdfs", ), ("This operator is deprecated. Please use `airflow.providers.tableau.operators.tableau`.", "salesforce"), ( "You have an incompatible version of 'pyarrow' installed (4.0.1), please install a version that" " adheres to: 'pyarrow<3.1.0,>=3.0.0; extra == \"pandas\"'", "snowflake", ), ( "You have an incompatible version of 'pyarrow' installed (6.0.1), please install a version that" " adheres to: 'pyarrow<5.1.0,>=5.0.0; extra == \"pandas\"'", "snowflake", ), ("SelectableGroups dict interface is deprecated. Use select.", "kombu"), ("The module cloudant is now deprecated. The replacement is ibmcloudant.", "cloudant"), ("This module is deprecated. Please use `airflow.operators.empty`.", "dbt"), ("This module is deprecated. Please use `airflow.operators.empty`.", "jdbc"), ("This module is deprecated. Please use `airflow.operators.empty`.", "azure"), ("This module is deprecated. Please use `airflow.operators.empty`.", "qubole"), ("This module is deprecated. Please use `airflow.operators.empty`.", "winrm"), ("This class is deprecated. Please use `airflow.operators.empty.EmptyOperator`.", "dbt"), ("This class is deprecated. Please use `airflow.operators.empty.EmptyOperator`.", "jdbc"), ("This class is deprecated. Please use `airflow.operators.empty.EmptyOperator`.", "azure"), ("This class is deprecated. Please use `airflow.operators.empty.EmptyOperator`.", "qubole"), ("This class is deprecated. Please use `airflow.operators.empty.EmptyOperator`.", "winrm"), } KNOWN_COMMON_DEPRECATED_MESSAGES: Set[str] = { "distutils Version classes are deprecated. Use packaging.version instead." } # The set of warning messages generated by direct importing of some deprecated modules. We should only # ignore those messages when the warnings are generated directly by importlib - which means that # we imported it directly during module walk by the importlib library KNOWN_DEPRECATED_DIRECT_IMPORTS: Set[str] = { "This module is deprecated. Please use `airflow.providers.microsoft.azure.hooks.batch`.", "This module is deprecated. Please use `airflow.providers.microsoft.azure.hooks.container_instance`.", "This module is deprecated. Please use `airflow.providers.microsoft.azure.hooks.container_registry`.", "This module is deprecated. Please use `airflow.providers.microsoft.azure.hooks.container_volume`.", "This module is deprecated. Please use `airflow.providers.microsoft.azure.hooks.cosmos`.", "This module is deprecated. Please use `airflow.providers.microsoft.azure.hooks.data_factory`.", "This module is deprecated. Please use `airflow.providers.microsoft.azure.hooks.data_lake`.", "This module is deprecated. Please use `airflow.providers.microsoft.azure.hooks.fileshare`.", "This module is deprecated. Please use `airflow.providers.microsoft.azure.operators.batch`.", "This module is deprecated. " "Please use `airflow.providers.microsoft.azure.operators.container_instances`.", "This module is deprecated. Please use `airflow.providers.microsoft.azure.operators.cosmos`.", "This module is deprecated. Please use `airflow.providers.microsoft.azure.secrets.key_vault`.", "This module is deprecated. Please use `airflow.providers.microsoft.azure.sensors.cosmos`.", "This module is deprecated. Please use `airflow.providers.amazon.aws.hooks.dynamodb`.", "This module is deprecated. Please use `airflow.providers.microsoft.azure.transfers.local_to_wasb`.", "This module is deprecated. Please use `airflow.providers.tableau.operators.tableau_refresh_workbook`.", "This module is deprecated. Please use `airflow.providers.tableau.sensors.tableau_job_status`.", "This module is deprecated. Please use `airflow.providers.tableau.hooks.tableau`.", "This module is deprecated. Please use `kubernetes.client.models.V1Volume`.", "This module is deprecated. Please use `kubernetes.client.models.V1VolumeMount`.", ( "This module is deprecated. Please use `kubernetes.client.models.V1ResourceRequirements`" " and `kubernetes.client.models.V1ContainerPort`." ), "This module is deprecated. Please use `kubernetes.client.models.V1EnvVar`.", 'numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header,' ' got 216 from PyObject', "This module is deprecated. Please use `airflow.providers.amazon.aws.sensors.step_function`.", "This module is deprecated. Please use `airflow.providers.amazon.aws.operators.step_function`.", 'This module is deprecated. Please use `airflow.providers.amazon.aws.operators.ec2`.', 'This module is deprecated. Please use `airflow.providers.amazon.aws.sensors.ec2`.', "This module is deprecated. Please use `airflow.providers.amazon.aws.sensors.s3`.", "This module is deprecated. Please use `airflow.providers.amazon.aws.operators.s3`.", "This module is deprecated. Please use `airflow.providers.amazon.aws.operators.dms`.", "This module is deprecated. Please use `airflow.providers.amazon.aws.sensors.dms`.", 'This module is deprecated. Please use `airflow.providers.amazon.aws.operators.emr`.', 'This module is deprecated. Please use `airflow.providers.amazon.aws.sensors.emr`.', "This module is deprecated. Please use `airflow.providers.amazon.aws.hooks.redshift_cluster` " "or `airflow.providers.amazon.aws.hooks.redshift_sql` as appropriate.", "This module is deprecated. Please use `airflow.providers.amazon.aws.operators.redshift_sql` " "or `airflow.providers.amazon.aws.operators.redshift_cluster` as appropriate.", "This module is deprecated. Please use `airflow.providers.amazon.aws.sensors.redshift_cluster`.", 'This module is deprecated. Please use `airflow.providers.amazon.aws.operators.sagemaker`.', 'This module is deprecated. Please use `airflow.providers.amazon.aws.sensors.sagemaker`.', 'This module is deprecated. Please use `airflow.providers.amazon.aws.hooks.emr`.', 'This module is deprecated. Please use `airflow.providers.opsgenie.hooks.opsgenie`.', 'This module is deprecated. Please use `airflow.providers.opsgenie.operators.opsgenie`.', 'This module is deprecated. Please use `airflow.hooks.redshift_sql` ' 'or `airflow.hooks.redshift_cluster` as appropriate.', 'This module is deprecated. Please use `airflow.providers.amazon.aws.operators.redshift_sql` or ' '`airflow.providers.amazon.aws.operators.redshift_cluster` as appropriate.', 'This module is deprecated. Please use `airflow.providers.amazon.aws.sensors.redshift_cluster`.', "This module is deprecated. Please use airflow.providers.amazon.aws.transfers.sql_to_s3`.", } def filter_known_warnings(warn: warnings.WarningMessage) -> bool: msg_string = str(warn.message).replace("\n", " ") for m in KNOWN_DEPRECATED_MESSAGES: expected_package_string = "/" + m[1] + "/" if msg_string == m[0] and warn.filename.find(expected_package_string) != -1: return False return True def filter_direct_importlib_warning(warn: warnings.WarningMessage) -> bool: msg_string = str(warn.message).replace("\n", " ") for m in KNOWN_DEPRECATED_DIRECT_IMPORTS: if msg_string == m and warn.filename.find("/importlib/") != -1: return False return True def filter_known_common_deprecated_messages(warn: warnings.WarningMessage) -> bool: msg_string = str(warn.message).replace("\n", " ") for m in KNOWN_COMMON_DEPRECATED_MESSAGES: if msg_string == m: return False return True @cli.command() def verify_provider_classes(): """Verifies names for all provider classes.""" with with_group("Verifies names for all provider classes"): provider_ids = get_all_providers() imported_classes, warns = import_all_classes( provider_ids=provider_ids, print_imports=True, paths=[str(PROVIDERS_PATH)], prefix="airflow.providers.", ) total = 0 bad = 0 for provider_package_id in provider_ids: inc_total, inc_bad = verify_provider_classes_for_single_provider( imported_classes, provider_package_id ) total += inc_total bad += inc_bad warns = list(filter(filter_known_warnings, warns)) warns = list(filter(filter_direct_importlib_warning, warns)) warns = list(filter(filter_known_common_deprecated_messages, warns)) if not summarise_total_vs_bad_and_warnings(total, bad, warns): sys.exit(1) def find_insertion_index_for_version(content: List[str], version: str) -> Tuple[int, bool]: """ Finds insertion index for the specified version from the .rst changelog content. :param content: changelog split into separate lines :param version: version to look for :return: Tuple : insertion_index, append (whether to append or insert the changelog) """ changelog_found = False skip_next_line = False index = 0 for index, line in enumerate(content): if not changelog_found and line.strip() == version: changelog_found = True skip_next_line = True elif not skip_next_line and line and all(char == '.' for char in line): return index - 2, changelog_found else: skip_next_line = False return index, changelog_found class ClassifiedChanges(NamedTuple): """Stores lists of changes classified automatically""" fixes: List[Change] = [] features: List[Change] = [] breaking_changes: List[Change] = [] other: List[Change] = [] def get_changes_classified(changes: List[Change]) -> ClassifiedChanges: """ Pre-classifies changes based on commit message, it's wildly guessing now, but if we switch to semantic commits, it could be automated. This list is supposed to be manually reviewed and re-classified by release manager anyway. :param changes: list of changes :return: list of changes classified semi-automatically to the fix/feature/breaking/other buckets """ classified_changes = ClassifiedChanges() for change in changes: if "fix" in change.message.lower(): classified_changes.fixes.append(change) elif "add" in change.message.lower(): classified_changes.features.append(change) elif "breaking" in change.message.lower(): classified_changes.breaking_changes.append(change) else: classified_changes.other.append(change) return classified_changes @cli.command() @argument_package_id @option_verbose def update_changelog(package_id: str, verbose: bool): """Updates changelog for the provider.""" if _update_changelog(package_id, verbose): sys.exit(64) def _update_changelog(package_id: str, verbose: bool) -> bool: """ Internal update changelog method :param package_id: package id :param verbose: verbose flag :return: true if package is skipped """ with with_group("Updates changelog for last release"): verify_provider_package(package_id) provider_details = get_provider_details(package_id) provider_info = get_provider_info_from_provider_yaml(package_id) current_release_version = provider_details.versions[0] jinja_context = get_provider_jinja_context( provider_info=provider_info, provider_details=provider_details, current_release_version=current_release_version, version_suffix='', ) changelog_path = os.path.join(provider_details.source_provider_package_path, "CHANGELOG.rst") proceed, changes, _ = get_all_changes_for_package( provider_details.versions, package_id, provider_details.source_provider_package_path, verbose, ) if not proceed: console.print( f"[yellow]The provider {package_id} is not being released. Skipping the package.[/]" ) return True generate_new_changelog(package_id, provider_details, changelog_path, changes) console.print() console.print(f"Update index.rst for {package_id}") console.print() update_index_rst(jinja_context, package_id, provider_details.documentation_provider_package_path) return False def generate_new_changelog(package_id, provider_details, changelog_path, changes): latest_version = provider_details.versions[0] with open(changelog_path) as changelog: current_changelog = changelog.read() current_changelog_lines = current_changelog.splitlines() insertion_index, append = find_insertion_index_for_version(current_changelog_lines, latest_version) if append: if not changes: console.print( f"[green]The provider {package_id} changelog for `{latest_version}` " "has first release. Not updating the changelog.[/]" ) return new_changes = [ change for change in changes[0] if change.pr and "(#" + change.pr + ")" not in current_changelog ] if not new_changes: console.print( f"[green]The provider {package_id} changelog for `{latest_version}` " "has no new changes. Not updating the changelog.[/]" ) return context = {"new_changes": new_changes} generated_new_changelog = render_template( template_name='UPDATE_CHANGELOG', context=context, extension=".rst" ) else: classified_changes = get_changes_classified(changes[0]) context = { "version": latest_version, "version_header": "." * len(latest_version), "classified_changes": classified_changes, } generated_new_changelog = render_template( template_name='CHANGELOG', context=context, extension=".rst" ) new_changelog_lines = current_changelog_lines[0:insertion_index] new_changelog_lines.extend(generated_new_changelog.splitlines()) new_changelog_lines.extend(current_changelog_lines[insertion_index:]) diff = "\n".join(difflib.context_diff(current_changelog_lines, new_changelog_lines, n=5)) syntax = Syntax(diff, "diff") console.print(syntax) if not append: console.print( f"[green]The provider {package_id} changelog for `{latest_version}` " "version is missing. Generating fresh changelog.[/]" ) else: console.print( f"[green]Appending the provider {package_id} changelog for" f"`{latest_version}` version.[/]" ) with open(changelog_path, "wt") as changelog: changelog.write("\n".join(new_changelog_lines)) changelog.write("\n") def get_package_from_changelog(changelog_path: str): folder = Path(changelog_path).parent package = '' separator = '' while not os.path.basename(folder) == 'providers': package = os.path.basename(folder) + separator + package separator = '.' folder = Path(folder).parent return package @cli.command() @argument_changelog_files @option_git_update @option_verbose def update_changelogs(changelog_files: List[str], git_update: bool, verbose: bool): """Updates changelogs for multiple packages.""" if git_update: make_sure_remote_apache_exists_and_fetch(git_update, verbose) for changelog_file in changelog_files: package_id = get_package_from_changelog(changelog_file) _update_changelog(package_id=package_id, verbose=verbose) def get_prs_for_package(package_id: str) -> List[int]: pr_matcher = re.compile(r".*\(#([0-9]*)\)``$") verify_provider_package(package_id) changelog_path = verify_changelog_exists(package_id) provider_details = get_provider_details(package_id) current_release_version = provider_details.versions[0] prs = [] with open(changelog_path) as changelog_file: changelog_lines = changelog_file.readlines() extract_prs = False skip_line = False for line in changelog_lines: if skip_line: # Skip first "....." header skip_line = False continue if line.strip() == current_release_version: extract_prs = True skip_line = True continue if extract_prs: if len(line) > 1 and all(c == '.' for c in line.strip()): # Header for next version reached break if line.startswith('.. Below changes are excluded from the changelog'): # The reminder of PRs is not important skipping it break match_result = pr_matcher.match(line.strip()) if match_result: prs.append(int(match_result.group(1))) return prs PullRequestOrIssue = Union[PullRequest.PullRequest, Issue.Issue] class ProviderPRInfo(NamedTuple): provider_details: ProviderPackageDetails pr_list: List[PullRequestOrIssue] def is_package_in_dist(dist_files: List[str], package: str) -> bool: """Check if package has been prepared in dist folder.""" for file in dist_files: if file.startswith(f'apache_airflow_providers_{package.replace(".","_")}') or file.startswith( f'apache-airflow-providers-{package.replace(".","-")}' ): return True return False @cli.command() @click.option( '--github-token', envvar='GITHUB_TOKEN', help=textwrap.dedent( """ Github token used to authenticate. You can set omit it if you have GITHUB_TOKEN env variable set. Can be generated with: https://github.com/settings/tokens/new?description=Read%20sssues&scopes=repo:status""" ), ) @click.option('--suffix', default='rc1') @click.option( '--only-available-in-dist', is_flag=True, help='Only consider package ids with packages prepared in the dist folder', ) @click.option('--excluded-pr-list', type=str, help="Coma-separated list of PRs to exclude from the issue.") @argument_package_ids def generate_issue_content( package_ids: List[str], github_token: str, suffix: str, only_available_in_dist: bool, excluded_pr_list: str, ): if not package_ids: package_ids = get_all_providers() """Generates content for issue to test the release.""" with with_group("Generates GitHub issue content with people who can test it"): if excluded_pr_list: excluded_prs = [int(pr) for pr in excluded_pr_list.split(",")] else: excluded_prs = [] all_prs: Set[int] = set() provider_prs: Dict[str, List[int]] = {} if only_available_in_dist: files_in_dist = os.listdir(str(DIST_PATH)) prepared_package_ids = [] for package_id in package_ids: if not only_available_in_dist or is_package_in_dist(files_in_dist, package_id): console.print(f"Extracting PRs for provider {package_id}") prepared_package_ids.append(package_id) else: console.print(f"Skipping extracting PRs for provider {package_id} as it is missing in dist") continue prs = get_prs_for_package(package_id) provider_prs[package_id] = list(filter(lambda pr: pr not in excluded_prs, prs)) all_prs.update(provider_prs[package_id]) g = Github(github_token) repo = g.get_repo("apache/airflow") pull_requests: Dict[int, PullRequestOrIssue] = {} with Progress(console=console) as progress: task = progress.add_task(f"Retrieving {len(all_prs)} PRs ", total=len(all_prs)) pr_list = list(all_prs) for i in range(len(pr_list)): pr_number = pr_list[i] progress.console.print( f"Retrieving PR#{pr_number}: " f"https://github.com/apache/airflow/pull/{pr_number}" ) try: pull_requests[pr_number] = repo.get_pull(pr_number) except UnknownObjectException: # Fallback to issue if PR not found try: pull_requests[pr_number] = repo.get_issue(pr_number) # (same fields as PR) except UnknownObjectException: console.print(f"[red]The PR #{pr_number} could not be found[/]") progress.advance(task) interesting_providers: Dict[str, ProviderPRInfo] = {} non_interesting_providers: Dict[str, ProviderPRInfo] = {} for package_id in prepared_package_ids: pull_request_list = [pull_requests[pr] for pr in provider_prs[package_id] if pr in pull_requests] provider_details = get_provider_details(package_id) if pull_request_list: interesting_providers[package_id] = ProviderPRInfo(provider_details, pull_request_list) else: non_interesting_providers[package_id] = ProviderPRInfo(provider_details, pull_request_list) context = { 'interesting_providers': interesting_providers, 'date': datetime.now(), 'suffix': suffix, 'non_interesting_providers': non_interesting_providers, } issue_content = render_template(template_name="PROVIDER_ISSUE", context=context, extension=".md") console.print() console.print( "[green]Below you can find the issue content that you can use " "to ask contributor to test providers![/]" ) console.print() console.print() console.print( "Issue title: [yellow]Status of testing Providers that were " f"prepared on { datetime.now().strftime('%B %d, %Y') }[/]" ) console.print() syntax = Syntax(issue_content, "markdown", theme="ansi_dark") console.print(syntax) console.print() users: Set[str] = set() for provider_info in interesting_providers.values(): for pr in provider_info.pr_list: users.add("@" + pr.user.login) console.print("All users involved in the PRs:") console.print(" ".join(users)) if __name__ == "__main__": # The cli exit code is: # * 0 in case of success # * 1 in case of error # * 64 in case of skipped package # * 65 in case user decided to quit # * 66 in case package has doc-only changes try: cli() except KeyboardInterrupt: print('Interrupted') try: sys.exit(65) except SystemExit: os._exit(65)
py
1a46c78b327b56a43175c30de275a93e212e7b85
#!/usr/bin/env python # -*- coding: utf-8 -*- # File: shufflenet.py import argparse import math import numpy as np import os import cv2 import tensorflow as tf from tensorpack import * from tensorpack.dataflow import imgaug from tensorpack.tfutils import argscope, get_model_loader, model_utils from tensorpack.tfutils.scope_utils import under_name_scope from tensorpack.utils import logger from tensorpack.utils.gpu import get_num_gpu from imagenet_utils import ImageNetModel, eval_on_ILSVRC12, get_imagenet_dataflow @layer_register(log_shape=True) def DepthConv(x, out_channel, kernel_shape, padding='SAME', stride=1, W_init=None, activation=tf.identity): in_shape = x.get_shape().as_list() in_channel = in_shape[1] assert out_channel % in_channel == 0, (out_channel, in_channel) channel_mult = out_channel // in_channel if W_init is None: W_init = tf.variance_scaling_initializer(2.0) kernel_shape = [kernel_shape, kernel_shape] filter_shape = kernel_shape + [in_channel, channel_mult] W = tf.get_variable('W', filter_shape, initializer=W_init) conv = tf.nn.depthwise_conv2d(x, W, [1, 1, stride, stride], padding=padding, data_format='NCHW') return activation(conv, name='output') @under_name_scope() def channel_shuffle(l, group): in_shape = l.get_shape().as_list() in_channel = in_shape[1] assert in_channel % group == 0, in_channel l = tf.reshape(l, [-1, in_channel // group, group] + in_shape[-2:]) l = tf.transpose(l, [0, 2, 1, 3, 4]) l = tf.reshape(l, [-1, in_channel] + in_shape[-2:]) return l @layer_register() def shufflenet_unit(l, out_channel, group, stride): in_shape = l.get_shape().as_list() in_channel = in_shape[1] shortcut = l # "We do not apply group convolution on the first pointwise layer # because the number of input channels is relatively small." first_split = group if in_channel > 24 else 1 l = Conv2D('conv1', l, out_channel // 4, 1, split=first_split, activation=BNReLU) l = channel_shuffle(l, group) l = DepthConv('dconv', l, out_channel // 4, 3, stride=stride) l = BatchNorm('dconv_bn', l) l = Conv2D('conv2', l, out_channel if stride == 1 else out_channel - in_channel, 1, split=group) l = BatchNorm('conv2_bn', l) if stride == 1: # unit (b) output = tf.nn.relu(shortcut + l) else: # unit (c) shortcut = AvgPooling('avgpool', shortcut, 3, 2, padding='SAME') output = tf.concat([shortcut, tf.nn.relu(l)], axis=1) return output @layer_register() def shufflenet_unit_v2(l, out_channel, stride): if stride == 1: shortcut, l = tf.split(l, 2, axis=1) else: shortcut, l = l, l shortcut_channel = int(shortcut.shape[1]) l = Conv2D('conv1', l, out_channel // 2, 1, activation=BNReLU) l = DepthConv('dconv', l, out_channel // 2, 3, stride=stride) l = BatchNorm('dconv_bn', l) l = Conv2D('conv2', l, out_channel - shortcut_channel, 1, activation=BNReLU) if stride == 2: shortcut = DepthConv('shortcut_dconv', shortcut, shortcut_channel, 3, stride=2) shortcut = BatchNorm('shortcut_dconv_bn', shortcut) shortcut = Conv2D('shortcut_conv', shortcut, shortcut_channel, 1, activation=BNReLU) output = tf.concat([shortcut, l], axis=1) output = channel_shuffle(output, 2) return output @layer_register(log_shape=True) def shufflenet_stage(input, channel, num_blocks, group): l = input for i in range(num_blocks): name = 'block{}'.format(i) if args.v2: l = shufflenet_unit_v2(name, l, channel, 2 if i == 0 else 1) else: l = shufflenet_unit(name, l, channel, group, 2 if i == 0 else 1) return l class Model(ImageNetModel): weight_decay = 4e-5 def get_logits(self, image): with argscope([Conv2D, MaxPooling, AvgPooling, GlobalAvgPooling, BatchNorm], data_format='channels_first'), \ argscope(Conv2D, use_bias=False): group = args.group if not args.v2: # Copied from the paper channels = { 3: [240, 480, 960], 4: [272, 544, 1088], 8: [384, 768, 1536] } mul = group * 4 # #chan has to be a multiple of this number channels = [int(math.ceil(x * args.ratio / mul) * mul) for x in channels[group]] # The first channel must be a multiple of group first_chan = int(math.ceil(24 * args.ratio / group) * group) else: # Copied from the paper channels = { 0.5: [48, 96, 192], 1.: [116, 232, 464] }[args.ratio] first_chan = 24 logger.info("#Channels: " + str([first_chan] + channels)) l = Conv2D('conv1', image, first_chan, 3, strides=2, activation=BNReLU) l = MaxPooling('pool1', l, 3, 2, padding='SAME') l = shufflenet_stage('stage2', l, channels[0], 4, group) l = shufflenet_stage('stage3', l, channels[1], 8, group) l = shufflenet_stage('stage4', l, channels[2], 4, group) if args.v2: l = Conv2D('conv5', l, 1024, 1, activation=BNReLU) l = GlobalAvgPooling('gap', l) logits = FullyConnected('linear', l, 1000) return logits def get_data(name, batch): isTrain = name == 'train' if isTrain: augmentors = [ # use lighter augs if model is too small imgaug.GoogleNetRandomCropAndResize(crop_area_fraction=(0.49 if args.ratio < 1 else 0.08, 1.)), imgaug.RandomOrderAug( [imgaug.BrightnessScale((0.6, 1.4), clip=False), imgaug.Contrast((0.6, 1.4), clip=False), imgaug.Saturation(0.4, rgb=False), # rgb-bgr conversion for the constants copied from fb.resnet.torch imgaug.Lighting(0.1, eigval=np.asarray( [0.2175, 0.0188, 0.0045][::-1]) * 255.0, eigvec=np.array( [[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203]], dtype='float32')[::-1, ::-1] )]), imgaug.Flip(horiz=True), ] else: augmentors = [ imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC), imgaug.CenterCrop((224, 224)), ] return get_imagenet_dataflow( args.data, name, batch, augmentors) def get_config(model, nr_tower): batch = TOTAL_BATCH_SIZE // nr_tower logger.info("Running on {} towers. Batch size per tower: {}".format(nr_tower, batch)) dataset_train = get_data('train', batch) dataset_val = get_data('val', batch) step_size = 1280000 // TOTAL_BATCH_SIZE max_iter = 3 * 10**5 max_epoch = (max_iter // step_size) + 1 callbacks = [ ModelSaver(), ScheduledHyperParamSetter('learning_rate', [(0, 0.5), (max_iter, 0)], interp='linear', step_based=True), EstimatedTimeLeft() ] infs = [ClassificationError('wrong-top1', 'val-error-top1'), ClassificationError('wrong-top5', 'val-error-top5')] if nr_tower == 1: # single-GPU inference with queue prefetch callbacks.append(InferenceRunner(QueueInput(dataset_val), infs)) else: # multi-GPU inference (with mandatory queue prefetch) callbacks.append(DataParallelInferenceRunner( dataset_val, infs, list(range(nr_tower)))) return TrainConfig( model=model, dataflow=dataset_train, callbacks=callbacks, steps_per_epoch=step_size, max_epoch=max_epoch, ) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--data', help='ILSVRC dataset dir') parser.add_argument('-r', '--ratio', type=float, default=0.5, choices=[1., 0.5]) parser.add_argument('--group', type=int, default=8, choices=[3, 4, 8], help="Number of groups for ShuffleNetV1") parser.add_argument('--v2', action='store_true', help='Use ShuffleNetV2') parser.add_argument('--batch', type=int, default=1024, help='total batch size') parser.add_argument('--load', help='path to load a model from') parser.add_argument('--eval', action='store_true') parser.add_argument('--flops', action='store_true', help='print flops and exit') args = parser.parse_args() if args.gpu: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu if args.v2 and args.group != parser.get_default('group'): logger.error("group= is not used in ShuffleNetV2!") if args.batch != 1024: logger.warn("Total batch size != 1024, you need to change other hyperparameters to get the same results.") TOTAL_BATCH_SIZE = args.batch model = Model() if args.eval: batch = 128 # something that can run on one gpu ds = get_data('val', batch) eval_on_ILSVRC12(model, get_model_loader(args.load), ds) elif args.flops: # manually build the graph with batch=1 with TowerContext('', is_training=False): model.build_graph( tf.placeholder(tf.float32, [1, 224, 224, 3], 'input'), tf.placeholder(tf.int32, [1], 'label') ) model_utils.describe_trainable_vars() tf.profiler.profile( tf.get_default_graph(), cmd='op', options=tf.profiler.ProfileOptionBuilder.float_operation()) logger.info("Note that TensorFlow counts flops in a different way from the paper.") logger.info("TensorFlow counts multiply+add as two flops, however the paper counts them " "as 1 flop because it can be executed in one instruction.") else: if args.v2: name = "ShuffleNetV2-{}x".format(args.ratio) else: name = "ShuffleNetV1-{}x-g{}".format(args.ratio, args.group) logger.set_logger_dir(os.path.join('train_log', name)) nr_tower = max(get_num_gpu(), 1) config = get_config(model, nr_tower) if args.load: config.session_init = get_model_loader(args.load) launch_train_with_config(config, SyncMultiGPUTrainerParameterServer(nr_tower))
py
1a46c7b06ecf3a302aba7301de828bdfd2111a06
from elasticsearch import Elasticsearch from rdflib import Graph from constants import CLASS_INDEX, RELATION_INDEX from constants import ENTITY_INDEX from constants import LABEL_PRED_LOWER es = Elasticsearch(['http://geo-qa.cs.upb.de:9200/']) def indexClasses(filepath): g = Graph() g.parse(filepath) for stmt in g: if str(stmt[1]).lower() == LABEL_PRED_LOWER and stmt[2]._language.lower() == "en-gb": # Only english labels addToIndexAlt(CLASS_INDEX, str(stmt[0]), stmt[2]._value) def indexEntities(filepath): g = Graph() g.parse(filepath) for stmt in g: if str(stmt[1]).lower() == LABEL_PRED_LOWER: # Only labels addToIndexAlt(ENTITY_INDEX, str(stmt[0]), stmt[2]._value) def indexProperties(filepath): g = Graph() g.parse(filepath) for stmt in g: if str(stmt[1]).lower() == LABEL_PRED_LOWER: # Only english labels addToIndexAlt(RELATION_INDEX, str(stmt[0]), stmt[2]._value) def addToIndexAlt(index_name, uri, label): try: es.index(index=index_name, body={"uri": uri, "label": label}) print(label) return True except: return 'error' if __name__ == "__main__": # indexEntities("./cutomizations/bremen-entitiy-all-labels.nt") # print("Entities done") # print() # indexClasses("./cutomizations/lgdo_2014-07-26.n3") # print("Classes done") indexProperties("/home/hardik/Projects/falcon2.0/customizations/props.nt") print("Properties done")
py
1a46c7e94441079b15b76f478077729aeb904fff
# -*- coding: utf-8 -*- """Manages custom event formatter helpers.""" class FormattersManager(object): """Custom event formatter helpers manager.""" _custom_formatter_helpers = {} @classmethod def GetEventFormatterHelper(cls, identifier): """Retrieves a custom event formatter helper. Args: identifier (str): identifier. Returns: CustomEventFormatterHelper: custom event formatter or None if not available. """ identifier = identifier.lower() return cls._custom_formatter_helpers.get(identifier) @classmethod def RegisterEventFormatterHelper(cls, formatter_helper_class): """Registers a custom event formatter helper. The custom event formatter helpers are identified based on their lower case identifier. Args: formatter_helper_class (type): class of the custom event formatter helper. Raises: KeyError: if a custom formatter helper is already set for the corresponding identifier. """ identifier = formatter_helper_class.IDENTIFIER.lower() if identifier in cls._custom_formatter_helpers: raise KeyError(( 'Custom event formatter helper already set for identifier: ' '{0:s}.').format(formatter_helper_class.IDENTIFIER)) cls._custom_formatter_helpers[identifier] = formatter_helper_class() @classmethod def RegisterEventFormatterHelpers(cls, formatter_helper_classes): """Registers custom event formatter helpers. The formatter classes are identified based on their lower case data type. Args: formatter_helper_classes (list[type]): classes of the custom event formatter helpers. Raises: KeyError: if a custom formatter helper is already set for the corresponding data type. """ for formatter_helper_class in formatter_helper_classes: cls.RegisterEventFormatterHelper(formatter_helper_class)
py
1a46c81ac715e6a23fc61e666c0776d4154760a7
# -*- coding: utf-8 -*- # # Copyright 2014 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Command for deleting firewall rules.""" from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals from googlecloudsdk.api_lib.compute import base_classes from googlecloudsdk.api_lib.compute import utils from googlecloudsdk.calliope import base from googlecloudsdk.command_lib.compute import flags as compute_flags from googlecloudsdk.command_lib.compute.firewall_rules import flags class Delete(base.DeleteCommand): """Delete Compute Engine firewall rules. *{command}* deletes one or more Compute Engine firewall rules. """ FIREWALL_ARG = None @staticmethod def Args(parser): Delete.FIREWALL_ARG = flags.FirewallRuleArgument(plural=True) Delete.FIREWALL_ARG.AddArgument(parser, operation_type='delete') parser.display_info.AddCacheUpdater(flags.FirewallsCompleter) def Run(self, args): holder = base_classes.ComputeApiHolder(self.ReleaseTrack()) client = holder.client firewall_refs = Delete.FIREWALL_ARG.ResolveAsResource( args, holder.resources, scope_lister=compute_flags.GetDefaultScopeLister(client)) utils.PromptForDeletion(firewall_refs) requests = [] for firewall_ref in firewall_refs: requests.append((client.apitools_client.firewalls, 'Delete', client.messages.ComputeFirewallsDeleteRequest( **firewall_ref.AsDict()))) return client.MakeRequests(requests)
py
1a46ca3efe175cf8e1dabe918459a5fb61b65295
""" Decorator Parametors In the previous ideos we saw some built-in decorators that can handle some arguments: @wraps(fn) @lru_cache(maxsize=256) <\ def inner(): def factorial(n): \ ... ... \>function call This should look quite differient grom the decorators we have been creating and using: @timed <----------- no function call def Fibonacci(n): ... """ from symbol import parameters from time import perf_counter from unittest import result def timed(fn): from time import perf_counter def inner(*arhs, **kwarrgs): total_elapse = 0 for i in range(10): # hardcoded value 10 # need to pass as a parameter start = perf_counter() result = fn(*args, **kwargs) total_elapsed += (perf_counter() - start) avg_elapsed = total_elapsed / 10 print(avg_elapsed) return result return inner """ @timed def my_func(): or my_func = timed(my_func) ... On e Approach to passing (line 24) as a parameter / < extra parameter def timed(fn, reps): from time import perf_counter def inner(*args, **kwargs): total_elapsed = 0 / free variable for i in range(reps): < start = perf_counter() result = fn(*ars, **kwargs) total_elapsed += (perf_counter() - start) avg_elapsed = total_elapsed / reps print(avg_elapsed) return result return inner """
py
1a46ca6f3e384efaf0cf9f3ffa3abf590e7f3685
from flask import jsonify, request, g, url_for, current_app, abort from . import api from ..models import Post, Permission from .decorators import permission_required from .. import db from .errors import forbidden @api.route('/posts/') def get_posts(): page = request.args.get('page', 1, type=int) pagination = Post.query.paginate( page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'], error_out=False) posts = pagination.items prev = None if pagination.has_prev: prev = url_for('api.get_posts', page=page-1, _external=True) next = None if pagination.has_next: next = url_for('api.get_posts', page=page+1, _external=True) return jsonify({ 'posts': [post.to_json() for post in posts], 'prev': prev, 'next': next, 'count': pagination.total }) @api.route('/posts/<int:id>') def get_post(id): post = Post.query.get_or_404(id) return jsonify(post.to_json()) @api.route('/posts/', methods=['POST']) @permission_required(Permission.WRITE_ARTICLES) def new_post(): post = Post.from_json(request.json) post.author = g.current_user db.session.add(post) db.session.commit() return jsonify(post.to_json()), 201, \ {'Location': url_for('api.get_post', id=post.id, _external=True)} @api.route('/posts/<int:id>', methods=['PUT']) @permission_required(Permission.WRITE_ARTICLES) def edit_post(id): post = Post.query.get_or_404(id) if g.current_user != post.author and \ not g.current_user.can(Permission.ADMINISTER): return forbidden('Insufficient permissions') post.name = request.json.get('name', post.name) post.summary = request.json.get('summary', post.summary) post.body = request.json.get('body', post.body) db.session.add(post) return jsonify(post.to_json())
py
1a46caddca1e1089963c66d00ab9202d282f930f
#!/usr/bin/env python3 # # author: Michael Brockus # contact: <mailto:[email protected]> # license: Apache 2.0 :http://www.apache.org/licenses/LICENSE-2.0 # # copyright 2020 The Meson-UI development team # import subprocess import logging color = { 'green': '\x1B[01;32m', 'blue': '\033[94m', 'bold': '\033[1m', 'reset': '\x1B[0m' } log_format = ( f'{color["bold"]} cat_log: {color["reset"]}' f'{color["blue"]} %(funcName)s - {color["reset"]}' f'{color["bold"]} %(levelname)s: {color["reset"]}' f'{color["green"]} %(message)s {color["reset"]}' ) logging.basicConfig(level=logging.INFO, format=log_format) USER_DEPS: list = [ 'git-all', 'libc6-dev', 'gcc', 'g++', 'gobjc', 'gobjc++', 'gfortran', 'ldc', 'rustc', 'default-jre', 'mono-complete' ] PYPI_DEPS: list = [ 'meson==0.53.2', 'cmake==3.16.3', 'ninja==1.9.0', 'pytest==5.3.2', 'pytest-cov==2.8.1', 'codecov==2.0.15', 'PyQt5==5.14.1' ] def install_user_packages(deps: list, dry_run: bool = False): for dep in deps: logging.info(f'installing: {dep}') subprocess.check_call([ 'apt-get', 'install', dep, '--yes', '-qq']) for dep in deps: logging.info(f'user dep: {dep}') def install_pypi_packages(deps: list, dry_run: bool = False): for dep in deps: logging.info(f'installing: {dep}') subprocess.check_call([ 'python3', '-m', 'pip', 'install', '--quiet', dep]) for dep in deps: logging.info(f'pypi dep: {dep}') def main(): logging.info('Running install commands for both "user" and "python3"') install_user_packages(USER_DEPS) install_pypi_packages(PYPI_DEPS) logging.info('Process done.') if __name__ == "__main__": main()
py
1a46cb36fa16a0233a3edceb0301074847451947
""" AI Name: Random AI Made by: Carter Strategy: Move around randomly. Attack any robot in front of you. """ import random class AI: def __init__(self): #Anything the AI needs to do before the game starts goes here. pass def turn(self): if self.robot.lookInFront() == "bot": self.robot.attack() return else: random.choice([self.robot.turnLeft,self.robot.turnRight,self.robot.goForth,self.robot.goForth])()
py
1a46cb6591a61f7318ac30ce883f23c0668f0f10
# qubit number=2 # total number=25 import pyquil from pyquil.api import local_forest_runtime, QVMConnection from pyquil import Program, get_qc from pyquil.gates import * import numpy as np conn = QVMConnection() def make_circuit()-> Program: prog = Program() # circuit begin prog += H(0) # number=1 prog += H(2) # number=22 prog += CZ(0,2) # number=23 prog += H(2) # number=24 prog += X(2) # number=12 prog += CNOT(0,2) # number=13 prog += H(1) # number=7 prog += CZ(2,1) # number=8 prog += H(1) # number=9 prog += H(1) # number=18 prog += CZ(2,1) # number=19 prog += H(1) # number=20 prog += Y(1) # number=14 prog += CNOT(2,1) # number=10 prog += Z(2) # number=3 prog += X(1) # number=17 prog += Y(2) # number=5 prog += X(2) # number=21 prog += CNOT(1,0) # number=15 prog += CNOT(1,0) # number=16 # circuit end return prog def summrise_results(bitstrings) -> dict: d = {} for l in bitstrings: if d.get(l) is None: d[l] = 1 else: d[l] = d[l] + 1 return d if __name__ == '__main__': prog = make_circuit() qvm = get_qc('1q-qvm') results = qvm.run_and_measure(prog,1024) bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T bitstrings = [''.join(map(str, l)) for l in bitstrings] writefile = open("../data/startPyquil147.csv","w") print(summrise_results(bitstrings),file=writefile) writefile.close()
py
1a46cba5c4141eb420949389ea12835cf09f8d4b
from __future__ import division import operator import numpy as np from scipy import stats, interpolate #============================================================================== # This library module is full of functions and classes to compute the maximum # mutual information (Capacity) between an input (x) (voltage) and output (y) (resistance) # distribution transmitted through a noisy channel (Pyx) (device). # Free to use and distribute and alter. # Created by Jesse Engel (Stanford, UC Berkeley) Sept 2, 2014 #============================================================================== #============================================================================== # Discrete Distribution Functions #============================================================================== def h(p): """Shannon Information """ info = -1*np.log2(p) if np.isscalar(info): if np.isinf(info) or info == -0.: info = 0 else: info[np.where(info == -0.)] = 0 info[np.where(np.isinf(info))] = 0 return info def H(p): """Entropy """ return p * h(p) def H2(p): """Binary Entropy """ return H(p) + H(1-p) def D_KL(p, q): ''' Compute the KL Diveregence of two finite distributions p and q Params ------ p (array) [np] q (array) [nq] Returns ------- D_KL (float) in Bits ''' if p.ndim == 2: #D_KL for each row of p d = p * np.log2(p / np.r_[q]) d[np.logical_not(np.isfinite(d))] = 0 return np.sum(d,1) else: d = p * np.log2(p / q) d[np.logical_not(np.isfinite(d))] = 0 return np.sum(d) def I(Pyx, Px): ''' Compute the mutual information of distribution Px traveling through channel Pyx. Params ------ Pyx (array) [ny, nx] Px (array) [nx] Returns ------- I (float) in Bits ''' Pyx = Pyx.T Py = np.dot(Px, Pyx) I = np.dot(Px, D_KL(Pyx, Py)) return I #============================================================================== # Vectorized Blahut-Arimoto algorithm #============================================================================== def blahut_arimoto(Pyx, tolerance = 1e-2, iterations = 1000, e=np.empty(0), s=0, debug=False, Px0=np.empty(0)): ''' Blahut-Arimoto algorithm for computing the Capacity of a discrete input-output channel Based on a matlab code by: Kenneth Shum http://home.ie.cuhk.edu.hk/~wkshum/wordpress/?p=825 Adapted from Blahut 1972, IEEE Trans. on Info. Theory Params ---- Pyx Discrete conditional probability matrix. (array) [ny, nx] Keywords ----- e Vector of expenses for given x input states s Lagrange multiplier. One to one mapping to an average tolerance End when IU - IL < tolerance iterations Max number of iterations debug: Print to console while running Outputs ---- C (float) Capacity in bits Px (array) [nx] Optimal input distribution E (float) Expense. Only output if 'e' is defined ''' Pyx = Pyx.T # (inputs, outputs) m, n = Pyx.shape # (m inputs, n outputs) Px = [np.ones(m)/m, Px0][Px0.any()] # initial distribution for channel input Py = np.ones(n)/n # initial distribution for channel output c = np.zeros(m) energy_constraint = e.any() D = D_KL(Pyx, Py) #Vector temp = Pyx / np.c_[np.sum(Pyx,1)] ind = np.isfinite(temp) Pyx[ind] = temp[ind] #Optimizaiton for i_iter in np.arange(iterations): if energy_constraint: c = np.exp( D - s*e ) else: c = np.exp( D ) #Update Px = Px * c Px = Px/np.sum(Px) Py = np.dot(Px, Pyx) D = D_KL(Pyx, Py) #Vector IL = np.log(np.dot(Px, c)) IU = np.log(max(c)) if debug: if energy_constraint: E = np.dot(Px, e) print ('\nE: %.2e' % E) print ('IL: %.2e IU: %.2e' % (IL, IU)) print ('Iter: %d' % (i_iter+1)) else: print ('\nIL: %.2e IU: %.2e' % (IL, IU)) print ('Iter: %d' % (i_iter+1)) if tolerance: if IU-IL < tolerance: break C = I(Pyx.T, Px) if debug: print ('iterations: %s' % (i_iter+1)) print ('C:', C) if energy_constraint: E = np.dot(Px, e) return C, Px, E else: return C, Px # def rate_distortion() # Rate-Distortion is for SOURCE compression. # It calculates the lower bound of required description length to compress and # reconstruct a GIVEN source (Px (can be multidimensional and dependent, like in images)). # It does NOT tell you how to achieve that compression 'codebook and code points', except # for simple cases like independent (iid) gaussian sources. In that case it actually works out that # doing multidimensional to single dimensional compression (vector quantization) is better than scalar quantization # Problems of communication theory: # ------------ # 1) WHAT information should be transmitted? (source coding) # 2) HOW should it be transmitted? (channel coding) # These two problems can be separated by Shannon's separation theorem, and the distortion # will never exceed D(R) as long as R < C. # But what of joint coding? #============================================================================== # Quantization of high-D quantized channel to low-D quantized channel #============================================================================== def find_closest(vector, value): ''' Find the closest index of a vector to a value. If value is a vector, returns an array of indicies that are closest to the values. Rounds down. ''' if isinstance(value, np.ndarray) or isinstance(value, list): diff = np.outer(vector, np.ones(len(value))) - np.outer(np.ones(len(vector)), value) inds = np.argmin( np.abs(diff), axis=0) else: inds = np.argmin( np.abs( vector - value) ) return inds def calc_subsample_inds(x, y, xinputs=None, ydividers=None): ''' Find closest indexes for a discetization of x and y ''' if np.any(xinputs): xinds = find_closest(x, xinputs) else: xinds = np.arange(x.size) if np.any(ydividers): yinds = find_closest(y, ydividers) else: yinds = np.arange(y.size) return xinds, yinds def subsample(Pyx, xinds, yinds): ''' Subsample a density matrix a locations xinds(columns), and sum between dividers yinds(row). ''' Pyx_sub = np.zeros( [len(yinds)+1, len(xinds)]) bounds = np.r_[0, yinds, Pyx.shape[0]] for i in np.arange(len(bounds)-1): bl = bounds[i] bu = bounds[i+1] Pyx_sub[i,:] = np.sum(Pyx[bl:bu, xinds], axis = 0) # Normalize Pyx_sub = Pyx_sub / np.sum(Pyx_sub, axis=0) return Pyx_sub def quantize(Pyx, x, y, xinputs=None, ydividers=None): '''Chops up a matrix Pyx, into xinputs columns, and sums rows between y dividers ''' xinds, yinds = calc_subsample_inds(x, y, xinputs, ydividers) Pyx_sub = subsample(Pyx, xinds, yinds) x_sub = x[xinds] y_sub = y[::-1][yinds] return Pyx_sub, x_sub, y_sub def trim(Pyx, cum_low=1e-1, cum_high=1e-1, index=False): '''Returns Pyx only rows where cumsum(P) > cum_low and cumsum(P) < 1 - cum_high''' low = min( np.where( np.cumsum(Pyx, axis=0) > cum_low )[0]) high = max( np.where( np.cumsum(Pyx, axis=0) < 1-cum_high )[0]) if not index: return Pyx[low:high, :] else: return Pyx[low:high, :], np.arange(low, high+1) #============================================================================== # Gaussian Kernel Density Estimate from Data #============================================================================== def Q(Varray, Rarray, nx=2000, ny=2000,print_points=True): ''' Take in all Voltage/Resistance Pairs and return the conditional PDF: Q= P(R|V) Performs Gaussian Kernel Density Estimate and Linear Interpolation Params ------ Varray, Rarray ndarray, same size (n_examples,) Returns ------- Q ndarray (__, __) ''' V_list = np.sort(np.unique(Varray)) #Gaussian KDE Pyx_func = [] for i, v in enumerate(V_list): idx = (Varray == v) data = Rarray[idx] if print_points == True: print ('%0.2f Volts, %d Points' % (v, sum(idx))) Pyx_func.append(stats.gaussian_kde(data, bw_method='scott' )) #scott, silvermann, scalar Pyx_func = FunctionList(Pyx_func) x = np.linspace(V_list.min(), V_list.max(), nx) y = np.linspace(Rarray.min()*0.7, Rarray.max()*1.3, ny) # Bivariate Spline Pyx = np.atleast_2d(Pyx_func(y)) Pyx_interp = interpolate.RectBivariateSpline( V_list, y, Pyx, kx=3, ky=3, s=0) Pyx_new = np.rot90(Pyx_interp(x,y)) # Normalize (each input needs to end up in an output (column=1)) Pyx_new = Pyx_new / np.sum(Pyx_new, axis=0) return Pyx_new, x, y def moments(Varray, Rarray): '''Returns mean, std of a R(V) dataset''' V_list = np.sort(np.unique(Varray)) data_mean = np.zeros(V_list.size) data_std = np.zeros(V_list.size) Vs = np.zeros(V_list.size) for i, v in enumerate(V_list): idx = (Varray == v) data = Rarray[idx] data_mean[i] = np.mean(data) data_std[i] = np.std(data) Vs[i] = v return data_mean, data_std, Vs #============================================================================== # Classes #============================================================================== class FunctionList(object): def __init__(self, f_list): """ FunctionList is a list of function objects that can be added, multiplied, summed, and dot producted with ints/floats, functions, np.array()s, and other FunctionLists. This is a bit of a hack to allow for making an array of functions. Parameters ---------- f_list : list of functions Examples -------- >>> f = lambda x: x >>> g = FunctionList([f, f]) >>> h=g.dot([1,2]) >>> g(2) [2, 2] >>> h(2) 6 """ if type(f_list) is FunctionList: self.f_list = f_list.f_list elif hasattr(f_list, '__call__'): self.f_list = [f_list] else: self.f_list = f_list def __call__(self, x): result = [] for f in self.f_list: result.append( f(x) ) return result def __add__(self, other): """ Add the function list, elementwise: Returns a function list """ return self.__apply_op(other, op=operator.add) def __sub__(self, other): """ Add the function list, elementwise: Returns a function list """ return self.__apply_op(other, op=operator.sub) def __mul__(self, other): """ Multiply the function list, elementwise: Returns a function list """ return self.__apply_op(other, op=operator.mul) def __div__(self, other): """ Divide the function list, elementwise: Returns a function list """ return self.__apply_op(other, op=operator.div) def __apply_op(self, other, op=operator.add): result = [] if type(other) is FunctionList: for i, f in enumerate(self.f_list): g = other[i] result.append( lambda x, f=f, g=g: op(f(x), g(x)) ) elif hasattr(other, '__call__'): for f in self.f_list: g = other result.append( lambda x, f=f, g=g: op(f(x), g(x)) ) elif type(other) in (np.ndarray, list): for i, f in enumerate(self.f_list): g = other[i] result.append( lambda x, f=f, g=g: op(f(x), g) ) elif type(other) in (int, float): for f in self.f_list: g = other result.append( lambda x, f=f, g=g: op(f(x), other) ) else: print ('Add FunctionList with: FunctionList, ndarray, int, or float') pass return FunctionList(result) def sum(self): result = self.f_list[0] for i, g in enumerate(self.f_list[1:]): f = result result = lambda x, f=f, g=g: f(x) + g(x) return result def dot(self, other): """Take the dot product of a function vector and either another function vector, or a normal vector. """ result = self.__mul__(other) result = result.sum() return result def __getitem__(self,index): return self.f_list[index] def __setitem__(self,index,value): self.f_list[index] = value def __len__(self): return len(self.f_list)
py
1a46cd5e535799c8cd11721c9f787536bd6f8f0b
""" Support for MQTT JSON lights. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/light.mqtt_json/ """ import json import logging import voluptuous as vol from homeassistant.components import mqtt from homeassistant.components.light import ( ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_EFFECT, ATTR_FLASH, ATTR_HS_COLOR, ATTR_TRANSITION, ATTR_WHITE_VALUE, FLASH_LONG, FLASH_SHORT, SUPPORT_BRIGHTNESS, SUPPORT_COLOR, SUPPORT_COLOR_TEMP, SUPPORT_EFFECT, SUPPORT_FLASH, SUPPORT_TRANSITION, SUPPORT_WHITE_VALUE, Light, ) from homeassistant.components.mqtt import ( CONF_COMMAND_TOPIC, CONF_QOS, CONF_RETAIN, CONF_STATE_TOPIC, CONF_UNIQUE_ID, MqttAttributes, MqttAvailability, MqttDiscoveryUpdate, MqttEntityDeviceInfo, subscription, ) from homeassistant.const import ( CONF_BRIGHTNESS, CONF_COLOR_TEMP, CONF_DEVICE, CONF_EFFECT, CONF_NAME, CONF_OPTIMISTIC, CONF_RGB, CONF_WHITE_VALUE, CONF_XY, STATE_ON, ) from homeassistant.core import callback import homeassistant.helpers.config_validation as cv from homeassistant.helpers.restore_state import RestoreEntity from homeassistant.helpers.typing import ConfigType import homeassistant.util.color as color_util from . import MQTT_LIGHT_SCHEMA_SCHEMA from .schema_basic import CONF_BRIGHTNESS_SCALE _LOGGER = logging.getLogger(__name__) DOMAIN = "mqtt_json" DEFAULT_BRIGHTNESS = False DEFAULT_COLOR_TEMP = False DEFAULT_EFFECT = False DEFAULT_FLASH_TIME_LONG = 10 DEFAULT_FLASH_TIME_SHORT = 2 DEFAULT_NAME = "MQTT JSON Light" DEFAULT_OPTIMISTIC = False DEFAULT_RGB = False DEFAULT_WHITE_VALUE = False DEFAULT_XY = False DEFAULT_HS = False DEFAULT_BRIGHTNESS_SCALE = 255 CONF_EFFECT_LIST = "effect_list" CONF_FLASH_TIME_LONG = "flash_time_long" CONF_FLASH_TIME_SHORT = "flash_time_short" CONF_HS = "hs" # Stealing some of these from the base MQTT configs. PLATFORM_SCHEMA_JSON = ( mqtt.MQTT_RW_PLATFORM_SCHEMA.extend( { vol.Optional(CONF_BRIGHTNESS, default=DEFAULT_BRIGHTNESS): cv.boolean, vol.Optional( CONF_BRIGHTNESS_SCALE, default=DEFAULT_BRIGHTNESS_SCALE ): vol.All(vol.Coerce(int), vol.Range(min=1)), vol.Optional(CONF_COLOR_TEMP, default=DEFAULT_COLOR_TEMP): cv.boolean, vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA, vol.Optional(CONF_EFFECT, default=DEFAULT_EFFECT): cv.boolean, vol.Optional(CONF_EFFECT_LIST): vol.All(cv.ensure_list, [cv.string]), vol.Optional( CONF_FLASH_TIME_LONG, default=DEFAULT_FLASH_TIME_LONG ): cv.positive_int, vol.Optional( CONF_FLASH_TIME_SHORT, default=DEFAULT_FLASH_TIME_SHORT ): cv.positive_int, vol.Optional(CONF_HS, default=DEFAULT_HS): cv.boolean, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean, vol.Optional(CONF_QOS, default=mqtt.DEFAULT_QOS): vol.All( vol.Coerce(int), vol.In([0, 1, 2]) ), vol.Optional(CONF_RETAIN, default=mqtt.DEFAULT_RETAIN): cv.boolean, vol.Optional(CONF_RGB, default=DEFAULT_RGB): cv.boolean, vol.Optional(CONF_STATE_TOPIC): mqtt.valid_subscribe_topic, vol.Optional(CONF_UNIQUE_ID): cv.string, vol.Optional(CONF_WHITE_VALUE, default=DEFAULT_WHITE_VALUE): cv.boolean, vol.Optional(CONF_XY, default=DEFAULT_XY): cv.boolean, } ) .extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema) .extend(mqtt.MQTT_JSON_ATTRS_SCHEMA.schema) .extend(MQTT_LIGHT_SCHEMA_SCHEMA.schema) ) async def async_setup_entity_json( config: ConfigType, async_add_entities, config_entry, discovery_hash ): """Set up a MQTT JSON Light.""" async_add_entities([MqttLightJson(config, config_entry, discovery_hash)]) # pylint: disable=too-many-ancestors class MqttLightJson( MqttAttributes, MqttAvailability, MqttDiscoveryUpdate, MqttEntityDeviceInfo, Light, RestoreEntity, ): """Representation of a MQTT JSON light.""" def __init__(self, config, config_entry, discovery_hash): """Initialize MQTT JSON light.""" self._state = False self._sub_state = None self._supported_features = 0 self._topic = None self._optimistic = False self._brightness = None self._color_temp = None self._effect = None self._hs = None self._white_value = None self._flash_times = None self._unique_id = config.get(CONF_UNIQUE_ID) # Load config self._setup_from_config(config) device_config = config.get(CONF_DEVICE) MqttAttributes.__init__(self, config) MqttAvailability.__init__(self, config) MqttDiscoveryUpdate.__init__(self, discovery_hash, self.discovery_update) MqttEntityDeviceInfo.__init__(self, device_config, config_entry) async def async_added_to_hass(self): """Subscribe to MQTT events.""" await super().async_added_to_hass() await self._subscribe_topics() async def discovery_update(self, discovery_payload): """Handle updated discovery message.""" config = PLATFORM_SCHEMA_JSON(discovery_payload) self._setup_from_config(config) await self.attributes_discovery_update(config) await self.availability_discovery_update(config) await self.device_info_discovery_update(config) await self._subscribe_topics() self.async_write_ha_state() def _setup_from_config(self, config): """(Re)Setup the entity.""" self._config = config self._topic = { key: config.get(key) for key in (CONF_STATE_TOPIC, CONF_COMMAND_TOPIC) } optimistic = config[CONF_OPTIMISTIC] self._optimistic = optimistic or self._topic[CONF_STATE_TOPIC] is None brightness = config[CONF_BRIGHTNESS] if brightness: self._brightness = 255 else: self._brightness = None color_temp = config[CONF_COLOR_TEMP] if color_temp: self._color_temp = 150 else: self._color_temp = None effect = config[CONF_EFFECT] if effect: self._effect = "none" else: self._effect = None white_value = config[CONF_WHITE_VALUE] if white_value: self._white_value = 255 else: self._white_value = None if config[CONF_HS] or config[CONF_RGB] or config[CONF_XY]: self._hs = [0, 0] else: self._hs = None self._flash_times = { key: config.get(key) for key in (CONF_FLASH_TIME_SHORT, CONF_FLASH_TIME_LONG) } self._supported_features = SUPPORT_TRANSITION | SUPPORT_FLASH self._supported_features |= config[CONF_RGB] and SUPPORT_COLOR self._supported_features |= brightness and SUPPORT_BRIGHTNESS self._supported_features |= color_temp and SUPPORT_COLOR_TEMP self._supported_features |= effect and SUPPORT_EFFECT self._supported_features |= white_value and SUPPORT_WHITE_VALUE self._supported_features |= config[CONF_XY] and SUPPORT_COLOR self._supported_features |= config[CONF_HS] and SUPPORT_COLOR async def _subscribe_topics(self): """(Re)Subscribe to topics.""" last_state = await self.async_get_last_state() @callback def state_received(msg): """Handle new MQTT messages.""" values = json.loads(msg.payload) if values["state"] == "ON": self._state = True elif values["state"] == "OFF": self._state = False if self._hs is not None: try: red = int(values["color"]["r"]) green = int(values["color"]["g"]) blue = int(values["color"]["b"]) self._hs = color_util.color_RGB_to_hs(red, green, blue) except KeyError: pass except ValueError: _LOGGER.warning("Invalid RGB color value received") try: x_color = float(values["color"]["x"]) y_color = float(values["color"]["y"]) self._hs = color_util.color_xy_to_hs(x_color, y_color) except KeyError: pass except ValueError: _LOGGER.warning("Invalid XY color value received") try: hue = float(values["color"]["h"]) saturation = float(values["color"]["s"]) self._hs = (hue, saturation) except KeyError: pass except ValueError: _LOGGER.warning("Invalid HS color value received") if self._brightness is not None: try: self._brightness = int( values["brightness"] / float(self._config[CONF_BRIGHTNESS_SCALE]) * 255 ) except KeyError: pass except ValueError: _LOGGER.warning("Invalid brightness value received") if self._color_temp is not None: try: self._color_temp = int(values["color_temp"]) except KeyError: pass except ValueError: _LOGGER.warning("Invalid color temp value received") if self._effect is not None: try: self._effect = values["effect"] except KeyError: pass except ValueError: _LOGGER.warning("Invalid effect value received") if self._white_value is not None: try: self._white_value = int(values["white_value"]) except KeyError: pass except ValueError: _LOGGER.warning("Invalid white value received") self.async_write_ha_state() if self._topic[CONF_STATE_TOPIC] is not None: self._sub_state = await subscription.async_subscribe_topics( self.hass, self._sub_state, { "state_topic": { "topic": self._topic[CONF_STATE_TOPIC], "msg_callback": state_received, "qos": self._config[CONF_QOS], } }, ) if self._optimistic and last_state: self._state = last_state.state == STATE_ON if last_state.attributes.get(ATTR_BRIGHTNESS): self._brightness = last_state.attributes.get(ATTR_BRIGHTNESS) if last_state.attributes.get(ATTR_HS_COLOR): self._hs = last_state.attributes.get(ATTR_HS_COLOR) if last_state.attributes.get(ATTR_COLOR_TEMP): self._color_temp = last_state.attributes.get(ATTR_COLOR_TEMP) if last_state.attributes.get(ATTR_EFFECT): self._effect = last_state.attributes.get(ATTR_EFFECT) if last_state.attributes.get(ATTR_WHITE_VALUE): self._white_value = last_state.attributes.get(ATTR_WHITE_VALUE) async def async_will_remove_from_hass(self): """Unsubscribe when removed.""" self._sub_state = await subscription.async_unsubscribe_topics( self.hass, self._sub_state ) await MqttAttributes.async_will_remove_from_hass(self) await MqttAvailability.async_will_remove_from_hass(self) @property def brightness(self): """Return the brightness of this light between 0..255.""" return self._brightness @property def color_temp(self): """Return the color temperature in mired.""" return self._color_temp @property def effect(self): """Return the current effect.""" return self._effect @property def effect_list(self): """Return the list of supported effects.""" return self._config.get(CONF_EFFECT_LIST) @property def hs_color(self): """Return the hs color value.""" return self._hs @property def white_value(self): """Return the white property.""" return self._white_value @property def should_poll(self): """No polling needed for a MQTT light.""" return False @property def name(self): """Return the name of the device if any.""" return self._config[CONF_NAME] @property def unique_id(self): """Return a unique ID.""" return self._unique_id @property def is_on(self): """Return true if device is on.""" return self._state @property def assumed_state(self): """Return true if we do optimistic updates.""" return self._optimistic @property def supported_features(self): """Flag supported features.""" return self._supported_features async def async_turn_on(self, **kwargs): """Turn the device on. This method is a coroutine. """ should_update = False message = {"state": "ON"} if ATTR_HS_COLOR in kwargs and ( self._config[CONF_HS] or self._config[CONF_RGB] or self._config[CONF_XY] ): hs_color = kwargs[ATTR_HS_COLOR] message["color"] = {} if self._config[CONF_RGB]: # If there's a brightness topic set, we don't want to scale the # RGB values given using the brightness. if self._brightness is not None: brightness = 255 else: brightness = kwargs.get( ATTR_BRIGHTNESS, self._brightness if self._brightness else 255 ) rgb = color_util.color_hsv_to_RGB( hs_color[0], hs_color[1], brightness / 255 * 100 ) message["color"]["r"] = rgb[0] message["color"]["g"] = rgb[1] message["color"]["b"] = rgb[2] if self._config[CONF_XY]: xy_color = color_util.color_hs_to_xy(*kwargs[ATTR_HS_COLOR]) message["color"]["x"] = xy_color[0] message["color"]["y"] = xy_color[1] if self._config[CONF_HS]: message["color"]["h"] = hs_color[0] message["color"]["s"] = hs_color[1] if self._optimistic: self._hs = kwargs[ATTR_HS_COLOR] should_update = True if ATTR_FLASH in kwargs: flash = kwargs.get(ATTR_FLASH) if flash == FLASH_LONG: message["flash"] = self._flash_times[CONF_FLASH_TIME_LONG] elif flash == FLASH_SHORT: message["flash"] = self._flash_times[CONF_FLASH_TIME_SHORT] if ATTR_TRANSITION in kwargs: message["transition"] = int(kwargs[ATTR_TRANSITION]) if ATTR_BRIGHTNESS in kwargs and self._brightness is not None: message["brightness"] = int( kwargs[ATTR_BRIGHTNESS] / float(DEFAULT_BRIGHTNESS_SCALE) * self._config[CONF_BRIGHTNESS_SCALE] ) if self._optimistic: self._brightness = kwargs[ATTR_BRIGHTNESS] should_update = True if ATTR_COLOR_TEMP in kwargs: message["color_temp"] = int(kwargs[ATTR_COLOR_TEMP]) if self._optimistic: self._color_temp = kwargs[ATTR_COLOR_TEMP] should_update = True if ATTR_EFFECT in kwargs: message["effect"] = kwargs[ATTR_EFFECT] if self._optimistic: self._effect = kwargs[ATTR_EFFECT] should_update = True if ATTR_WHITE_VALUE in kwargs: message["white_value"] = int(kwargs[ATTR_WHITE_VALUE]) if self._optimistic: self._white_value = kwargs[ATTR_WHITE_VALUE] should_update = True mqtt.async_publish( self.hass, self._topic[CONF_COMMAND_TOPIC], json.dumps(message), self._config[CONF_QOS], self._config[CONF_RETAIN], ) if self._optimistic: # Optimistically assume that the light has changed state. self._state = True should_update = True if should_update: self.async_write_ha_state() async def async_turn_off(self, **kwargs): """Turn the device off. This method is a coroutine. """ message = {"state": "OFF"} if ATTR_TRANSITION in kwargs: message["transition"] = int(kwargs[ATTR_TRANSITION]) mqtt.async_publish( self.hass, self._topic[CONF_COMMAND_TOPIC], json.dumps(message), self._config[CONF_QOS], self._config[CONF_RETAIN], ) if self._optimistic: # Optimistically assume that the light has changed state. self._state = False self.async_write_ha_state()
py
1a46cef0278e026b6da3f1f5f795bd042115c790
import socket import asyncore import time import random import common def get_uuid(): hex_digit = "0123456789abcdefABCDEF" len_ = len(hex_digit) - 1 ret = [] for i in [8, 4, 4, 4, 12]: ret.append("".join([hex_digit[random.randint(0, len_)] for x in range(i)])) return "-".join(ret) NOTIFY_ALIVE = common.CRLF.join(["NOTIFY * HTTP/1.1", "HOST: 239.255.255.250:1900", "CACHE-CONTROL: max-age=1800", "LOCATION: http://%s:%s/upnp-description", "SERVER: dragonkeeper", "NT: upnp:rootdevice", "NT2: urn:opera-com:device:OperaDragonfly:1", "NTS: ssdp:alive", "USN: uuid:%s::urn:opera-com:device:OperaDragonfly:1", common.CRLF]) NOTIFY_BYBY = common.CRLF.join(["NOTIFY * HTTP/1.1", "HOST: 239.255.255.250:1900", "NT: upnp:rootdevice", "NTS: ssdp:byebye", "USN: uuid:%s::urn:opera-com:device:OperaDragonfly:1", common.CRLF]) SEARCH_RESPONSE = common.CRLF.join(["HTTP/1.1 200 OK", "CACHE-CONTROL: max-age=1800", "EXT:", "LOCATION: http://%s:%s/upnp-description", "SERVER: dragonkeeper", "ST: urn:opera-com:device:OperaDragonfly:1", "USN: uuid:%s::urn:opera-com:device:OperaDragonfly:1", common.CRLF]) DEVICE_DESCRIPTION = """<?xml version="1.0" encoding="UTF-8"?> <root xmlns="urn:schemas-upnp-org:device-1-0"> <specVersion> <major>1</major> <minor>0</minor> </specVersion> <device> <deviceType>urn:opera-com:device:OperaDragonfly:1</deviceType> <friendlyName>dragonkeeper</friendlyName> <manufacturer>Opera Software ASA</manufacturer> <manufacturerURL>http://www.opera.com/</manufacturerURL> <payload>http://%s:%s</payload> <deviceicon>http://%s:%s/device-favicon.png</deviceicon> <serviceList/> </device> </root> """ class SimpleUPnPDevice(asyncore.dispatcher): MCAST_GRP = "239.255.255.250" MCAST_PORT = 1900 UPnP_ADDR = ("239.255.255.250", 1900) SEARCH_TARGETS = ["urn:opera-com:device:OperaDragonfly:1"] # "ssdp:all", "upnp:rootdevice", def __init__(self, ip="", http_port=0, stp_port=0, sniff=False): asyncore.dispatcher.__init__(self) self.create_socket(socket.AF_INET, socket.SOCK_DGRAM) self.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.bind(("0.0.0.0", self.MCAST_PORT)) mreq = socket.inet_aton(self.MCAST_GRP) + socket.inet_aton("0.0.0.0") self.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq) self.ip = ip self.http_port = http_port self.stp_port = stp_port self.send_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.send_socket.bind((self.ip, 0)) self.uuid = get_uuid() self.msg_queue = [] self.expire_queue = [] self.msg_alive = NOTIFY_ALIVE % (self.ip, self.http_port, self.uuid) self.msg_byby = NOTIFY_BYBY % self.uuid self.search_resp = SEARCH_RESPONSE % (self.ip, self.http_port, self.uuid) self.sniff = sniff self.is_alive = False def notify_alive(self): self.is_alive = True t = time.time() * 1000 for i in range(1, 4): self.queue_msg(t + i * 100, self.msg_alive, self.UPnP_ADDR) self.expire_queue.append((t + 1700 * 1000, self.notify_alive)) def notify_byby(self, cb=None): self.is_alive = False t = time.time() * 1000 for i in range(1, 4): self.queue_msg(t + i * 100, self.msg_byby, self.UPnP_ADDR) def queue_msg(self, delay, msg, addr): self.msg_queue.append((delay, msg, addr)) def process_msg_queue(self): cur = 0 t = time.time() * 1000 TIME = 0 MSG = 1 ADDR = 2 while cur < len(self.msg_queue): if t > self.msg_queue[cur][TIME]: msg = self.msg_queue.pop(cur) self.send_socket.sendto(msg[MSG], msg[ADDR]) else: cur += 1 def process_expire_queue(self): cur = 0 t = time.time() * 1000 TIME = 0 CB = 1 while cur < len(self.expire_queue): if t > self.expire_queue[cur][TIME]: self.expire_queue.pop(cur)[CB]() else: cur += 1 def handle_read(self): msg, addr = self.recvfrom(common.BUFFERSIZE) if self.sniff: print addr, '\n', msg else: parsed_headers = common.parse_headers(msg) if parsed_headers: raw, first_line, headers, msg = parsed_headers method, path, protocol = first_line.split(common.BLANK, 2) st = headers.get("ST") if self.is_alive and method == "M-SEARCH" and st in self.SEARCH_TARGETS: t = time.time() * 1000 mx = int(headers.get("MX", 3)) * 1000 self.queue_msg(random.randint(100, mx), self.search_resp, addr) else: self.process_msg(method, headers) def writable(self): if len(self.msg_queue): self.process_msg_queue() if len(self.expire_queue): self.process_expire_queue() return False def get_description(self, headers): content = DEVICE_DESCRIPTION % (self.ip, self.stp_port, self.ip, self.http_port) args = (common.get_timestamp(), "", "text/xml", len(content), content) return common.RESPONSE_OK_CONTENT % args def handle_close(self): self.close() def process_msg(self, method, headers): pass if __name__ == "__main__": try: SimpleUPnPDevice(sniff=True) asyncore.loop(timeout=0.1) except KeyboardInterrupt: for fd, obj in asyncore.socket_map.items(): obj.close()
py
1a46cf02b514c4d09d622b80edbf1372e085e64c
# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys sys.path.insert(0, os.path.abspath('../..')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', #'sphinx.ext.intersphinx', 'openstackdocstheme' ] # openstackdocstheme options openstackdocs_repo_name = 'openstack/reviewstats' openstackdocs_auto_name = False openstackdocs_use_storyboard = True html_theme = 'openstackdocs' # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'reviewstats' copyright = u'2013, OpenStack Foundation' # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' # html_static_path = ['static'] # Output file base name for HTML help builder. htmlhelp_basename = '%sdoc' % project # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', '%s.tex' % project, u'%s Documentation' % project, u'OpenStack Foundation', 'manual'), ] # Example configuration for intersphinx: refer to the Python standard library. #intersphinx_mapping = {'http://docs.python.org/': None}
py
1a46cf560d95dd29d53a38a5499225f15e2a36e9
#!/usr/bin/env python # -*- coding: utf-8 -*- from argparse import ArgumentParser import logging import sys from io import open from os import path from time import time from glob import glob from textblob import Blobber from textblob_aptagger import PerceptronTagger from collections import Counter, defaultdict import numpy as np import pandas as pd __author__ = "Vivek Kulkarni" __email__ = "[email protected]" LOGFORMAT = "%(asctime).19s %(levelname)s %(filename)s: %(lineno)s %(message)s" def main(args): f = open(args.filename) D = {} tag_set = set([]) tb = Blobber(pos_tagger=PerceptronTagger()) for i, line in enumerate(f): b1 = tb(line) for w, t in b1.tags: tag_set.add(t) if w not in D: D[w] = Counter() D[w][t] = float(D[w][t] + 1) sorted_pos_tags = sorted(list(tag_set)) rows = [] for w in D.keys(): row = [w] pos_counts_word = np.array([float(D[w][t]) for t in sorted_pos_tags]) pos_dist_word = pos_counts_word / float(np.sum(pos_counts_word)) assert(np.isclose(np.sum(pos_dist_word), 1.0)) row = row + list(pos_dist_word) rows.append(row) header = ['word'] + sorted_pos_tags print("Set of POS tags in sorted order", header) df = pd.DataFrame().from_records(rows, columns=header) print("Dumping the POS distribution.") df.to_csv(args.outputfile, index=None, encoding='utf-8') def debug(type_, value, tb): if hasattr(sys, 'ps1') or not sys.stderr.isatty(): # we are in interactive mode or we don't have a tty-like # device, so we call the default hook sys.__excepthook__(type_, value, tb) else: import traceback import pdb # we are NOT in interactive mode, print the exception... traceback.print_exception(type_, value, tb) print("\n") # ...then start the debugger in post-mortem mode. pdb.pm() if __name__ == "__main__": parser = ArgumentParser() parser.add_argument("-f", "--file", dest="filename", help="Input file") parser.add_argument("-o", "--outputfile", dest="outputfile", help="Output file") parser.add_argument("-l", "--log", dest="log", help="log verbosity level", default="INFO") args = parser.parse_args() if args.log == 'DEBUG': sys.excepthook = debug numeric_level = getattr(logging, args.log.upper(), None) logging.basicConfig(level=numeric_level, format=LOGFORMAT) main(args)
py
1a46d0292b11b96b6f08a27d6804172cc7c142cc
def main() -> None: a = int(input()) b = int(input()) print((a + b - 1) // b * b - a) if __name__ == "__main__": main()
py
1a46d0a115b32d7ff6c6db2d71f59e05c37b173c
import telraam_data.query as query import telraam_data.download as download from .utils import get_data_keys import datetime as dt import shutil import pandas as pd import pathlib as pl import random import pytest @pytest.fixture() def one_segment(): all_segments = query.query_active_segments() segment_idx = random.randrange(1, len(all_segments)) - 1 return all_segments["features"][segment_idx] @pytest.fixture() def tmp_path(): path = pl.Path('./tmp/data.csv') yield path shutil.rmtree('./tmp/') def test_list_segments(): # As of April 2020 there were more than 900 active segments. segments = download.list_segments() assert len(segments) > 900 def test_list_segments_by_coordinates(): # As of April 2020 there are more than 30 active segments in Schaarbeek segments = download.list_segments_by_coordinates(lon=4.373, lat=50.867, radius=2) assert len(segments) > 30 # 1003073114 should be one of them assert 1003073114 in segments # 1003063473 should not be one of them assert 1003063473 not in segments def test_download_one_segment(one_segment, tmp_path): segment_id = one_segment["properties"]["segment_id"] segment_last_time = one_segment["properties"]["last_data_package"] # Query that segment for the last live day end_date = dt.datetime.fromisoformat(segment_last_time).date() start_date = end_date - dt.timedelta(days=1) df = download.download_one_segment( segment_id=segment_id, start_date=start_date, end_date=end_date, out_filepath=tmp_path) required_keys = get_data_keys() required_keys.remove('date') # 'date' has become the index # 1. Check returned data assert len(df) > 0 assert df.index.name == 'date' assert (df.index >= str(start_date)).all() assert (df.index <= str(end_date + dt.timedelta(days=1))).all() assert set(required_keys) == set(required_keys).intersection(df.columns) assert (df['segment_id'] == segment_id).all() # 2. Check stored data df_local = pd.read_csv(tmp_path, parse_dates=["date"], index_col="date") from ast import literal_eval df_local.car_speed_hist_0to70plus = df_local.car_speed_hist_0to70plus.apply(literal_eval) df_local.car_speed_hist_0to120plus = df_local.car_speed_hist_0to120plus.apply(literal_eval) assert (df_local == df).all().all()
py
1a46d38c642d66cecc90b5078a4ce17813d40ee8
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: tensorflow/contrib/boosted_trees/proto/split_info.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from tensorflow.contrib.boosted_trees.proto import tree_config_pb2 as tensorflow_dot_contrib_dot_boosted__trees_dot_proto_dot_tree__config__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='tensorflow/contrib/boosted_trees/proto/split_info.proto', package='tensorflow.boosted_trees.learner', syntax='proto3', serialized_options=_b('\370\001\001'), serialized_pb=_b('\n7tensorflow/contrib/boosted_trees/proto/split_info.proto\x12 tensorflow.boosted_trees.learner\x1a\x38tensorflow/contrib/boosted_trees/proto/tree_config.proto\"\xbe\x01\n\tSplitInfo\x12<\n\nsplit_node\x18\x01 \x01(\x0b\x32(.tensorflow.boosted_trees.trees.TreeNode\x12\x38\n\nleft_child\x18\x02 \x01(\x0b\x32$.tensorflow.boosted_trees.trees.Leaf\x12\x39\n\x0bright_child\x18\x03 \x01(\x0b\x32$.tensorflow.boosted_trees.trees.Leaf\"\xa6\x01\n\x12ObliviousSplitInfo\x12<\n\nsplit_node\x18\x01 \x01(\x0b\x32(.tensorflow.boosted_trees.trees.TreeNode\x12\x36\n\x08\x63hildren\x18\x02 \x03(\x0b\x32$.tensorflow.boosted_trees.trees.Leaf\x12\x1a\n\x12\x63hildren_parent_id\x18\x03 \x03(\x05\x42\x03\xf8\x01\x01\x62\x06proto3') , dependencies=[tensorflow_dot_contrib_dot_boosted__trees_dot_proto_dot_tree__config__pb2.DESCRIPTOR,]) _SPLITINFO = _descriptor.Descriptor( name='SplitInfo', full_name='tensorflow.boosted_trees.learner.SplitInfo', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='split_node', full_name='tensorflow.boosted_trees.learner.SplitInfo.split_node', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='left_child', full_name='tensorflow.boosted_trees.learner.SplitInfo.left_child', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='right_child', full_name='tensorflow.boosted_trees.learner.SplitInfo.right_child', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=152, serialized_end=342, ) _OBLIVIOUSSPLITINFO = _descriptor.Descriptor( name='ObliviousSplitInfo', full_name='tensorflow.boosted_trees.learner.ObliviousSplitInfo', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='split_node', full_name='tensorflow.boosted_trees.learner.ObliviousSplitInfo.split_node', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='children', full_name='tensorflow.boosted_trees.learner.ObliviousSplitInfo.children', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='children_parent_id', full_name='tensorflow.boosted_trees.learner.ObliviousSplitInfo.children_parent_id', index=2, number=3, type=5, cpp_type=1, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=345, serialized_end=511, ) _SPLITINFO.fields_by_name['split_node'].message_type = tensorflow_dot_contrib_dot_boosted__trees_dot_proto_dot_tree__config__pb2._TREENODE _SPLITINFO.fields_by_name['left_child'].message_type = tensorflow_dot_contrib_dot_boosted__trees_dot_proto_dot_tree__config__pb2._LEAF _SPLITINFO.fields_by_name['right_child'].message_type = tensorflow_dot_contrib_dot_boosted__trees_dot_proto_dot_tree__config__pb2._LEAF _OBLIVIOUSSPLITINFO.fields_by_name['split_node'].message_type = tensorflow_dot_contrib_dot_boosted__trees_dot_proto_dot_tree__config__pb2._TREENODE _OBLIVIOUSSPLITINFO.fields_by_name['children'].message_type = tensorflow_dot_contrib_dot_boosted__trees_dot_proto_dot_tree__config__pb2._LEAF DESCRIPTOR.message_types_by_name['SplitInfo'] = _SPLITINFO DESCRIPTOR.message_types_by_name['ObliviousSplitInfo'] = _OBLIVIOUSSPLITINFO _sym_db.RegisterFileDescriptor(DESCRIPTOR) SplitInfo = _reflection.GeneratedProtocolMessageType('SplitInfo', (_message.Message,), { 'DESCRIPTOR' : _SPLITINFO, '__module__' : 'tensorflow.contrib.boosted_trees.proto.split_info_pb2' # @@protoc_insertion_point(class_scope:tensorflow.boosted_trees.learner.SplitInfo) }) _sym_db.RegisterMessage(SplitInfo) ObliviousSplitInfo = _reflection.GeneratedProtocolMessageType('ObliviousSplitInfo', (_message.Message,), { 'DESCRIPTOR' : _OBLIVIOUSSPLITINFO, '__module__' : 'tensorflow.contrib.boosted_trees.proto.split_info_pb2' # @@protoc_insertion_point(class_scope:tensorflow.boosted_trees.learner.ObliviousSplitInfo) }) _sym_db.RegisterMessage(ObliviousSplitInfo) DESCRIPTOR._options = None # @@protoc_insertion_point(module_scope)
py
1a46d42315ea815982b8a2799411801aa22545a8
import time import paho.mqtt.client as paho import httplib2 from urllib import urlencode import json def call_get_arrivals(line): h = httplib2.Http(disable_ssl_certificate_validation=True) # h.add_credentials(intro_username, intro_password) resp, content = h.request("https://api.tfl.gov.uk/Line/"+line+"/Arrivals") # print resp try: response=json.loads(content) for i in response: line = i['lineName'] trainNumber = i['vehicleId'] stationId = i['destinationNaptanId'] stationName = i['destinationName'] expArrival = i['expectedArrival'] timestamp = i['timestamp'] ttl = i['timeToLive'] data = dict(line=line, trainNumber = trainNumber, stationId = stationId, stationName=stationName, timestamp=timestamp, expArrival = expArrival, ttl = ttl) #print data client.publish("/tfl/", payload=json.dumps(data),qos=0) except Exception as inst: pass client.loop() lines = ["victoria","circle","district","northern","jubilee","piccadilly","metropolitan","bakerloo","central" ] client = paho.Client() client.connect('mqtt') while 1==1: for line in lines: call_get_arrivals(line) time.sleep(1)
py
1a46d42e6f0873cd8a44c345a167735e338ea16b
# -*- coding: utf-8 -*- import bisect class Phrase: """ This class stores phrases information (mainly, sources and translations). It also contains a flag to know if a phrase is contained in a segment (validated) and a pointer to the phrase in which the segment is stored. Phrases data structures are ordered according to source. """ def __init__(self, src): """ This method initializes a new Phrase. The method receives the source to which is originally associated. """ self.sources = [src] self.translation = '' self.segment_position = None self.validated = False def addTranslation(self, trans): """ This method ads more translation to the current translation of the phrase. The method receives a string with the new translation. """ if self.translation != '': self.translation += ' ' self.translation += trans def addSource(self, src): """ This method ads a source to the list of sources contained in the phrase. The method receives the position of the new source. """ if src not in self.sources: bisect.insort(self.sources, src)
py
1a46d576532b7afb8a2dafcb995647219f7f1a46
__author__ = 'Almenon' from re import compile, search import logging logging.basicConfig(level="DEBUG") # set level to INFO to get rid of debug messages regex = compile(r"\b(s|e)[a-s]*\ *?(\d+)" # season or episode r"[\: ,_\[\]\-x]*" # followed by optional seperator r"(s|e)[a-s]*\ *?(\d+)") # season/episode class ParseError(Exception): pass def parse(request): logging.info("request: " + request) # PARSE REQUEST request = request.lower() season_episode = search(regex,request) if season_episode is None: raise ParseError("request does not contain correct format") elif season_episode.group(1) is 'e' and season_episode.group(3) is 's': episode = season_episode.group(2) season = season_episode.group(4) elif season_episode.group(1) is 's' and season_episode.group(3) is 'e': season = season_episode.group(2) episode = season_episode.group(4) else: # s s or e e raise ParseError("request does not contain correct format") return season, episode # testing # try: # answer = parse("yo man e3s2") # print(answer) # except ParseError as e: # print(e)
py
1a46d6e74a61f7ca6701776b724b68d09861d77a
import numpy as np import matplotlib.pylab as plt import multiprocessing as mp from matplotlib import figure from data import * FIG = plt.figure() def draw_coord(coord, name, lab=[1.0, 0.0]): color = 1.0 if lab[0] > lab[1] else -1.0 ret = np.zeros(shape=[L,L,1]) coord_x, coord_y = coord coord_x_idx = np.argmax(coord_x) coord_y_idx = np.argmax(coord_y) ret[coord_x_idx][coord_y_idx][0] = color draw(ret, name) def draw(m, name, extra=None): FIG.clf() matrix = m orig_shape = np.shape(matrix) # lose the channel shape in the end of orig_shape new_shape = orig_shape[:-1] matrix = np.reshape(matrix, new_shape) ax = FIG.add_subplot(1,1,1) ax.set_aspect('equal') plt.imshow(matrix, interpolation='nearest', cmap=plt.cm.gray) # plt.imshow(matrix, interpolation='nearest', cmap=plt.cm.ocean) plt.colorbar() if extra != None: greens, reds = extra grn_x, grn_y, = greens red_x, red_y = reds plt.scatter(x=grn_x, y=grn_y, c='g', s=40) plt.scatter(x=red_x, y=red_y, c='r', s=40) # # put a blue dot at (10, 20) # plt.scatter([10], [20]) # # put a red dot, size 40, at 2 locations: # plt.scatter(x=[3, 4], y=[5, 6], c='r', s=40) # # plt.plot() plt.savefig(name) def draw_orig(img, name): ret = np.reshape(img, [L,L,1]) draw(ret, name) def draw_allob(img, name, ob_prefix): ret = np.zeros([L,L,1]) for ii in range(L): for jj in range(L): labb = img[ii][jj][0] - img[ii][jj][1] ret[ii][jj][0] = labb grn_x = [] grn_y = [] red_x = [] red_y = [] for obob in ob_prefix: ob_c, labb = obob if labb[0] > labb[1]: grn_x.append(ob_c[0]) grn_y.append(ob_c[1]) else: red_x.append(ob_c[0]) red_y.append(ob_c[1]) draw(ret, name, ((grn_y, grn_x), (red_y, red_x))) def draw_obs(obs, name): ret_shape = [L, L, 1] ret = np.zeros(shape=ret_shape) for ob, lab in obs: ii, jj = ob labb = 1.0 if lab[0] > lab[1] else -1.0 # labb = lab[0] ret[ii][jj][0] = labb draw(ret, name) def draw_annotate(x_cords, y_cords, anns, name): FIG.clf() y = x_cords z = y_cords n = anns fig = FIG ax = fig.add_subplot(1,1,1) ax.set_xlim([0,L]) ax.set_ylim([0,L]) ax.set_ylim(ax.get_ylim()[::-1]) ax.scatter(z, y) for i, txt in enumerate(n): ax.annotate(txt, (z[i],y[i])) fig.savefig(name) def draw_obs_trace(obs, name): x_coords = [] y_coords = [] anno = [] for i, ob in enumerate(obs): ob_coord, ob_outcome = ob x_coords.append(ob_coord[0]) y_coords.append(ob_coord[1]) anno.append("O"+str(i)+str(int(ob_outcome[0]))) draw_annotate(x_coords, y_coords, anno, name) def draw_all_preds(all_preds, name): ret_shape = [L, L, 1] ret = np.zeros(shape=ret_shape) for qq, labb in all_preds: i, j = qq # ret[i][j][0] = 1.0 if labb[0] > labb[1] else 0.0 # ret[i][j][0] = labb[0] ret[i][j][0] = labb[0] draw(ret, name)
py
1a46d8cd1d3b13788c28b89b7818e812f45af04c
import os import math import torch import numpy as np from PIL import Image, ImageDraw from torch.utils.data import random_split, DataLoader from matplotlib import pyplot as plt from data_utils import MyTestDataset, get_test_transforms, my_collate from conf.settings import BASE_DIR from faster_rcnn.predict import predict as faster_predict from yolo_v3.predict import predict as yolo_predict from unet.predict import predict as unet_predict models_path = os.path.join(BASE_DIR, "models") images_path = os.path.join(BASE_DIR, "images") if __name__ == "__main__": torch.manual_seed(0) from faster_rcnn.models import model as faster from yolo_v3.models import Darknet from unet.models import UNet faster_name = "faster_rcnn_7_30.pt" yolo_name = "yolo_v3_4_20.pt" unet_name = "unet_2_15.pt" split = "stage1_test" device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(f"Running on {device}") print(f"Loading {faster_name}") faster.load_state_dict(torch.load(os.path.join(models_path, faster_name), map_location=device)) faster.to(device=device) dataset = MyTestDataset(split=split, transforms=get_test_transforms(rescale_size=(256, 256))) faster_loader = DataLoader(dataset, batch_size=1, num_workers=0, shuffle=False) print(f"Loading {yolo_name}") yolo = Darknet(os.path.join(BASE_DIR, "yolo_v3/config/yolov3-custom.cfg")) yolo.load_state_dict(torch.load(os.path.join(models_path, yolo_name), map_location=device)) yolo.to(device=device) dataset = MyTestDataset(split=split, transforms=get_test_transforms(rescale_size=(416, 416))) yolo_loader = DataLoader(dataset, batch_size=1, num_workers=0, shuffle=False) print(f"Loading {unet_name}") unet = UNet(n_channels=1, n_classes=1) unet.load_state_dict(torch.load(os.path.join(models_path, unet_name), map_location=device)) unet.to(device=device) dataset = MyTestDataset(split=split, model="unet") unet_loader = DataLoader(dataset, batch_size=1, num_workers=0, shuffle=False) for i, ((f_im, f_tar), (y_im, y_tar), (u_im, u_tar)) in enumerate(zip(faster_loader, yolo_loader, unet_loader)): name = u_tar[0] try: f_image, f_x, f_y = faster_predict(faster, image=f_im) y_image, y_x, y_y = yolo_predict(yolo, image=y_im) u_image, u_x, u_y = unet_predict(unet, image=u_im) except: print(f"Skipping {name}") continue fig = plt.figure(dpi=300) ax1 = fig.add_subplot(1, 3, 1) ax1.imshow(f_image, cmap="gray") ax1.plot(f_x, f_y, 'r+', linewidth=3, markersize=8) ax1.set_title('Faster R-CNN') ax1.axis('off') ax2 = fig.add_subplot(1, 3, 2) ax2.imshow(y_image, cmap="gray") ax2.plot(y_x, y_y, 'r+', linewidth=3, markersize=8) ax2.set_title('YOLOv3') ax2.axis('off') ax3 = fig.add_subplot(1, 3, 3) ax3.imshow(u_image, cmap="gray") ax3.plot(u_x, u_y, 'r+', linewidth=3, markersize=8) ax3.set_title('U-Net') ax3.axis('off') plt.tight_layout() plt.show() # os.makedirs(os.path.join(images_path, f"all/{name}"), exist_ok=True) # # fig = plt.figure(dpi=200) # ax1 = fig.add_subplot(1, 1, 1) # ax1.imshow(f_image, cmap="gray") # ax1.plot(f_x, f_y, 'r+', linewidth=3, markersize=12) # ax1.axis('off') # # plt.show() # plt.savefig(os.path.join(images_path, f"all/{name}/f_det_{i}.png"), dpi=200) # # fig = plt.figure(dpi=200) # ax1 = fig.add_subplot(1, 1, 1) # ax1.imshow(y_image, cmap="gray") # ax1.plot(y_x, y_y, 'r+', linewidth=3, markersize=12) # ax1.axis('off') # # plt.show() # plt.savefig(os.path.join(images_path, f"all/{name}/y_det_{i}.png"), dpi=200) # # fig = plt.figure(dpi=200) # ax1 = fig.add_subplot(1, 1, 1) # ax1.imshow(u_image, cmap="gray") # ax1.plot(u_x, u_y, 'r+', linewidth=3, markersize=12) # ax1.axis('off') # # plt.show() # plt.savefig(os.path.join(images_path, f"all/{name}/u_det_{i}.png"), dpi=200) # # plt.close('all') print(name) break
py
1a46d90123f05105d6cd2de08880b3a308d669cf
# -*- coding: utf-8 -*- """Redundancy.""" from proselint.tools import memoize, preferred_forms_check @memoize def check(text): """Suggest the preferred forms.""" err = "redundancy.wallace" msg = "Redundancy. Use '{}' instead of '{}'." redundancies = [ ["rectangular", ["rectangular in shape"]], ["audible", ["audible to the ear"]], ] return preferred_forms_check(text, redundancies, err, msg) @memoize def check_garner(text): """Suggest the preferred forms. source: Garner's Modern American Usage source_url: http://bit.ly/1T4alrY """ err = "redundancy.garner" msg = "Redundancy. Use '{}' instead of '{}'." redundancies = [ ["adequate", ["adequate enough"]], ["admitted", ["self-admitted"]], ["affidavit", ["sworn affidavit"]], ["agreement", ["mutual agreement"]], ["alumnus", ["former alumnus"]], ["antithetical", ["directly antithetical"]], ["approximately", ["approximately about"]], ["associate", ["associate together(?: in groups)?"]], ["bivouac", ["temporary bivouac", "bivouac camp"]], ["blend", ["blend together"]], ["but", ["but nevertheless"]], ["charged with...", ["accused of a charge"]], ["circumstances of", ["circumstances surrounding"]], ["circumstances", ["surrounding circumstances"]], ["close", ["close proximity"]], ["collaborate", ["collaborate together"]], ["collaborator", ["fellow collaborator"]], ["collaborators", ["fellow collaborators"]], ["collocated", ["collocated together"]], ["colleagues", ["fellow colleagues"]], ["combine", ["combine together"]], ["complacent", ["self-complacent"]], ["confessed", ["self-confessed"]], ["connect", ["connect together"]], ["consensus", ["(?:general )?consensus of opinion"]], ["consolidate", ["consolidate together"]], ["continues to", ["still continues to"]], ["contradictory", ["mutually contradictory"]], ["cooperation", ["mutual cooperation"]], ["couple", ["couple together"]], ["crisis", ["serious crisis"]], ["eliminate", ["entirely eliminate"]], ["especially", ["most especially"]], ["fact", ["actual fact"]], ["facts", ["true facts"]], ["forecast", ["future forecast"]], ["founding fathers", ["founding forefathers"]], ["free", ["free and gratis"]], ["free", ["free gratis"]], ["full", ["completely full"]], ["fundamentals", ["basic fundamentals"]], ["gift", ["free gift"]], ["innovation", ["new innovation"]], ["interact", ["interact with each other"]], ["large", ["large-size"]], ["meld", ["meld together"]], ["merge", ["merge together"]], ["mingle", ["mingle together"]], ["mix", ["mix together"]], ["mutual feelings", ["mutual feelings for eachother"]], ["mutual respect", ["mutual respect for each other"]], ["native citizen", ["native-born citizen"]], ["necessity", ["absolute necessity"]], ["obvious", ["blatantly obvious"]], ["pause", ["pause for a moment"]], ["planning", ["advance planning"]], ["plans", ["future plans"]], ["pooled", ["pooled together"]], ["potable water", ["potable drinking water"]], ["potable water", ["potable drinking water"]], ["recruit", ["new recruit"]], ["reelected", ["reelected for another term"]], ["refer", ["refer back"]], ["regress", ["regress back"]], ["repay them", ["repay them back"]], ["repay", ["repay back"]], ["repeat", ["repeat again"]], ["repeat", ["repeat back"]], ["repeat", ["repeat the same"]], ["repeated", ["repeated the same"]], ["reprieve", ["temporary reprieve"]], ["respite", ["brief respite"]], ["retirement", ["retiral", "retiracy"]], ["retreat", ["retreat back"]], ["return", ["return back"]], ["scrutinize", ["closely scrutinize"]], ["software", ["software program"]], ["surrounded", ["surrounded on all sides"]], ["the nation", ["the whole entire nation"]], ["throughout the", ["throughout the entire"]], ["timpani", ["timpani drum"]], ["twins", ["pair of twins"]], ["vacancy", ["unfilled vacancy"]], ["various", ["various different"]], ["veteran", ["former veteran"]], ["visible", ["visible to the eye"]], ["vocation", ["professional vocation"]], ["while", ["while at the same time"]], ] return preferred_forms_check(text, redundancies, err, msg) @memoize def check_nordquist(text): """Suggest the preferred forms. source: Richard Nordquist source_url: http://grammar.about.com/bio/Richard-Nordquist-22176.htm """ err = "redundancy.nordquist" msg = "Redundancy. Use '{}' instead of '{}'." redundancies = [ ["essential", ["absolutely essential"]], ["necessary", ["absolutely necessary"]], ["a.m.", ["a.m. in the morning"]], ["p.m.", ["p.m. at night"]], ] return preferred_forms_check(text, redundancies, err, msg) @memoize def check_atd(text): """Check for redundancies from After the Deadline.""" err = "after_the_deadline.redundancy" msg = "Redundancy. Use '{}' instead of '{}'." redundancies = [ [u"Bō", ["Bo Staff"]], ["Challah", ["Challah bread"]], ["Hallah", ["Hallah bread"]], ["Challah", ["Challah bread"]], ["I", ["I myself", "I personally"]], ["Mount Fuji", ["Mount Fujiyama"]], ["Milky Way", ["Milky Way galaxy"]], ["Rio Grande", ["Rio Grande river"]], ["adage", ["old adage"]], ["add", ["add a further", "add an additional"]], ["advance", ["advance forward"]], ["alternative", ["alternative choice"]], ["amaretto", ["amaretto almond"]], ["annihilate", ["completely annihilate"]], ["anniversary", ["annual anniversary"]], ["anonymous", ["unnamed anonymous"]], ["as", ["equally as"]], ["ascend", ["ascend up"]], ["ask", ["ask the question"]], ["assemble", ["assemble together"]], ["at present the", ["at the present time the"]], ["at this point", ["at this point in time"]], ["attach", ["attach together"]], ["autumn", ["autumn season"]], ["bald", ["bald-headed"]], ["balsa", ["balsa wood"]], ["belongings", ["personal belongings"]], ["benefits", ["desirable benefits"]], ["bento", ["bento box"]], ["best", ["best ever"]], ["bit", ["tiny bit"]], ["blend", ["blend together"]], ["bond", ["common bond"]], ["bonus", ["added bonus", "extra bonus"]], ["bouquet", ["bouquet of flowers"]], ["breakthrough", ["major breakthrough"]], ["bride", ["new bride"]], ["brief", ["brief in duration"]], ["bruin", ["bruin bear"]], ["hot", ["burning hot"]], ["cacophony", ["cacophony of sound"]], ["cameo", ["brief cameo", "cameo appearance"]], ["cancel", ["cancel out"]], ["cash", ["cash money"]], ["chai", ["chai tea"]], ["chance", ["random chance"]], ["charm", ["personal charm"]], ["circle", ["circle around", "round circle"]], ["circulate", ["circulate around"]], ["classify", ["classify into groups"]], ["classmates", ["fellow classmates"]], ["cliche", ["old cliche", "overused cliche"]], ["climb", ["climb up"]], ["clock", ["time clock"]], ["collaborate", ["collaborate together"]], ["collaboration", ["joint collaboration"]], ["colleague", ["fellow colleague"]], ["combine", ["combine together"]], ["commute", ["commute back and forth"]], ["compete", ["compete with each other"]], ["comprise", ["comprise of"]], ["comprises", ["comprises of"]], ["conceived", ["first conceived"]], ["conclusion", ["final conclusion"]], ["confer", ["confer together"]], ["confrontation", ["direct confrontation"]], # ["confused", ["confused state"]], ["connect", ["connect together", "connect up"]], ["consensus", ["consensus of opinion", "general consensus"]], ["consult", ["consult with"]], ["conversation", ["oral conversation"]], ["cool", ["cool down"]], ["cooperate", ["cooperate together"]], ["cooperation", ["mutual cooperation"]], ["copy", ["duplicate copy"]], ["core", ["inner core"]], ["cost", ["cost the sum of"]], ["could", ["could possibly"]], ["coupon", ["money-saving coupon"]], ["created", ["originally created"]], ["crisis", ["crisis situation"]], ["crouch", ["crouch down"]], ["currently", ["now currently"]], ["custom", ["old custom", "usual custom"]], ["danger", ["serious danger"]], ["dates", ["dates back"]], ["decision", ["definite decision"]], ["depreciate", ["depreciate in value"]], ["descend", ["descend down"]], ["destroy", ["totally destroy"]], ["destroyed", ["completely destroyed"]], ["destruction", ["total destruction"]], ["details", ["specific details"]], ["dilemma", ["difficult dilemma"]], ["disappear", ["disappear from sight"]], ["discovered", ["originally discovered"]], ["dive", ["dive down"]], ["done", ["over and done with"]], ["drawing", ["illustrated drawing"]], ["drop", ["drop down"]], ["dune", ["sand dune"]], ["during", ["during the course of"]], ["dwindle", ["dwindle down"]], ["dwindled", ["dwindled down"]], ["every", ["each and every"]], ["earlier", ["earlier in time"]], ["eliminate", ["completely eliminate", "eliminate altogether", "entirely eliminate"]], ["ember", ["glowing ember"]], ["embers", ["burning embers"]], ["emergency", ["emergency situation", "unexpected emergency"]], ["empty", ["empty out"]], ["enclosed", ["enclosed herein"]], ["end", ["final end"]], ["engulfed", ["completely engulfed"]], ["enter", ["enter in", "enter into"]], ["equal", ["equal to one another"]], ["eradicate", ["eradicate completely"]], ["essential", ["absolutely essential"]], ["estimated at", ["estimated at about", "estimated at approximately", "estimated at around"]], ["etc.", ["and etc."]], ["evolve", ["evolve over time"]], ["exaggerate", ["over exaggerate"]], ["exited", ["exited from"]], ["experience", ["actual experience", "past experience"]], ["experts", ["knowledgeable experts"]], ["extradite", ["extradite back"]], ["face the consequences", ["face up to the consequences"]], ["face the fact", ["face up to the fact"]], ["face the challenge", ["face up to the challenge"]], ["face the problem", ["face up to the problem"]], ["facilitate", ["facilitate easier"]], ["fact", ["established fact"]], ["facts", ["actual facts", "hard facts", "true facts"]], ["fad", ["passing fad"]], ["fall", ["fall down"]], ["fall", ["fall season"]], ["feat", ["major feat"]], ["feel", ["feel inside"]], ["feelings", ["inner feelings"]], ["few", ["few in number"]], ["filled", ["completely filled", "filled to capacity"]], ["first", ["first of all"]], ["first time", ["first time ever"]], ["fist", ["closed fist"]], ["fly", ["fly through the air"]], ["focus", ["focus in", "main focus"]], ["follow", ["follow after"]], ["for example", ["as for example"]], # ["foremost", ["first and foremost"]], ["forever", ["forever and ever"]], ["free", ["for free"]], ["friend", ["personal friend"]], ["friendship", ["personal friendship"]], ["full", ["full to capacity"]], ["fundamentals", ["basic fundamentals"]], ["fuse", ["fuse together"]], ["gather", ["gather together", "gather up"]], ["get up", ["get up on his feet", "get up on your feet"]], ["gift", ["free gift"]], ["gifts", ["free gifts"]], ["goal", ["ultimate goal"]], # ["graduate", ["former graduate"]], ["grow", ["grow in size"]], ["guarantee", ["absolute guarantee"]], ["gunman", ["armed gunman"]], ["gunmen", ["armed gunmen"]], ["habitat", ["native habitat"]], ["had done", ["had done previously"]], ["halves", ["two equal halves"]], # ["has", ["has got"]], # ["have", ["have got"]], ["haven", ["safe haven"]], # ["he", ["he himself"]], ["heat", ["heat up"]], ["history", ["past history"]], ["hoist", ["hoist up"]], ["hole", ["empty hole"]], ["honcho", ["head honcho"]], ["ice", ["frozen ice"]], ["ideal", ["perfect ideal"]], ["identical", ["same identical"]], ["identification", ["positive identification"]], ["imports", ["foreign imports"]], ["impulse", ["sudden impulse"]], ["in fact", ["in actual fact"]], ["in the yard", ["outside in the yard"]], ["inclusive", ["all inclusive"]], ["incredible", ["incredible to believe"]], ["incumbent", ["present incumbent"]], # ["indicted", ["indicted on a charge"]], ["industry", ["private industry"]], ["injuries", ["harmful injuries"]], ["innovation", ["new innovation"]], ["innovative", ["innovative new", "new innovative"]], # ["input", ["input into"]], ["instinct", ["natural instinct", "naturally instinct"]], ["integrate", ["integrate together", "integrate with each other"]], ["interdependent", ["interdependent on each other", "mutually interdependent"]], ["introduced", ["introduced for the first time"]], ["invention", ["new invention"]], ["kneel", ["kneel down"]], ["knots", ["knots per hour"]], # ["last", ["last of all"]], # ["later", ["later time"]], ["lift", ["lift up"]], ["lingers", ["still lingers"]], ["look to the future", ["look ahead to the future"]], ["love triangle", ["three-way love triangle"]], ["maintained", ["constantly maintained"]], ["manually", ["manually by hand"]], ["marina", ["boat marina"]], ["may", ["may possibly"]], ["meet", ["meet together", "meet with each other"]], ["memories", ["past memories"]], ["merge", ["merge together"]], ["merged", ["merged together"]], ["meshed", ["meshed together"]], ["midnight", ["twelve midnight"]], ["migraine", ["migraine headache"]], ["minestrone", ["minestrone soup"]], ["mix", ["mix together"]], ["moment", ["brief moment", "moment in time"]], ["monopoly", ["complete monopoly"]], ["mural", ["wall mural"]], ["mutual respect", ["mutual respect for each other"]], ["mutually dependent", ["mutually dependent on each other"]], ["mystery", ["unsolved mystery"]], # ["naked", ["bare naked"]], ["nape", ["nape of her neck"]], ["necessary", ["absolutely necessary"]], ["never", ["never at any time"]], ["noon", ["12 noon", "12 o'clock noon", "high noon", "twelve noon"]], ["nostalgia", ["nostalgia for the past"]], ["number of", ["number of different"]], ["opening", ["exposed opening"]], ["my opinion", ["my personal opinion"]], ["opposites", ["exact opposites", "polar opposites"]], ["opposite", ["exact opposite", "polar opposite"]], ["orbits", ["orbits around"]], ["outcome", ["final outcome"]], ["panacea", ["universal panacea"]], ["pending", ["now pending"]], ["penetrate", ["penetrate through"]], ["persists", ["still persists"]], ["pioneer", ["old pioneer"]], ["plan", ["plan ahead", "plan in advance", "proposed plan"]], ["planning", ["advance planning", "forward planning"]], ["plans", ["future plans"]], ["plan", ["future plan"]], ["point", ["point in time"]], ["point", ["sharp point"]], ["postpone", ["postpone until later"]], ["pouring rain", ["pouring down rain"]], ["preview", ["advance preview"]], ["previously listed", ["previously listed above"]], ["probed", ["probed into"]], ["proceed", ["proceed ahead"]], ["prosthesis", ["artificial prosthesis"]], # ["protrude", ["protrude out"]], ["proverb", ["old proverb"]], # ["proximity", ["close proximity"]], ["put off", ["put off until later"]], # ["raise", ["raise up"]], ["re-elect", ["re-elect for another term"]], ["reason is", ["reason is because"]], ["recur", ["recur again"]], ["recurrence", ["future recurrence"]], ["refer", ["refer back"]], ["reflect", ["reflect back"]], # ["relevant", ["highly relevant"]], ["remain", ["continue to remain"]], ["remains", ["still remains"]], ["replica", ["exact replica"]], ["reply", ["reply back"]], # ["requirements", ["necessary requirements"]], ["reservations", ["advance reservations"]], ["retreat", ["retreat back"]], ["revert", ["revert back"]], ["round", ["round in shape"]], ["rule of thumb", ["rough rule of thumb"]], ["rumor", ["unconfirmed rumor"]], ["rustic", ["rustic country"]], ["same", ["exact same", "precise same", "same exact"]], ["sanctuary", ["safe sanctuary"]], ["satisfaction", ["full satisfaction"]], ["scrutinize", ["scrutinize in detail"]], ["scrutiny", ["careful scrutiny", "close scrutiny"]], ["secret", ["secret that cannot be told"]], ["seek", ["seek to find"]], ["separated", ["separated apart from each other"]], ["share", ["share together"]], ["shiny", ["shiny in appearance"]], ["sincere", ["truly sincere"]], ["sink", ["sink down"]], ["skipped", ["skipped over"]], # ["slow", ["slow speed"]], # ["small", ["small size"]], ["soft", ["soft in texture", "soft to the touch"]], ["sole", ["sole of the foot"]], ["some time", ["some time to come"]], ["speck", ["small speck"]], ["speed", ["rate of speed"]], ["spell out", ["spell out in detail"]], ["spiked", ["spiked upward", "spiked upwards"]], ["spring", ["spring season"]], ["stranger", ["anonymous stranger"]], ["studio audience", ["live studio audience"]], ["subway", ["underground subway"]], ["sufficient", ["sufficient enough"]], ["summer", ["summer season"]], ["sure", ["absolutely sure"]], ["surprise", ["unexpected surprise"]], ["surround", ["completely surround"]], ["surrounded", ["surrounded on all sides"]], ["tall", ["tall in height", "tall in stature"]], ["telepathy", ["mental telepathy"]], ["ten", ["ten in number"]], ["these", ["these ones"]], # ["they", ["they themselves"]], ["those", ["those ones"]], ["trench", ["open trench"]], ["truth", ["honest truth"]], ["tundra", ["frozen tundra"]], ["ultimatum", ["final ultimatum"]], # ["undeniable", ["undeniable truth"]], ["undergraduate", ["undergraduate student"]], # ["unintentional", ["unintentional mistake"]], ["vacillate", ["vacillate back and forth"]], ["veteran", ["former veteran"]], ["visible", ["visible to the eye"]], ["warn", ["warn in advance"]], ["warning", ["advance warning"]], ["water heater", ["hot water heater"]], ["in which we live", ["in which we live in"]], ["winter", ["winter season"]], ["witness", ["live witness"]], ["yakitori", ["yakitori chicken"]], ["yerba mate", ["yerba mate tea"]], ["yes", ["affirmative yes"]], ] return preferred_forms_check(text, redundancies, err, msg)
py
1a46d949b7604a15d668b62b4c88c09b54e5d4f0
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class RPhilentropy(RPackage): """Similarity and Distance Quantification Between Probability Functions. Computes 46 optimized distance and similarity measures for comparing probability functions (Drost (2018) <doi:10.21105/joss.00765>). These comparisons between probability functions have their foundations in a broad range of scientific disciplines from mathematics to ecology. The aim of this package is to provide a core framework for clustering, classification, statistical inference, goodness-of-fit, non-parametric statistics, information theory, and machine learning tasks that are based on comparing univariate or multivariate probability functions.""" cran = "philentropy" version('0.5.0', sha256='b39e9a825458f3377e23b2a133180566780e89019e9d22a6a5b7ca87c49c412f') version('0.4.0', sha256='bfd30bf5635aab6a82716299a87d44cf96c7ab7f4ee069843869bcc85c357127') depends_on('[email protected]:', type=('build', 'run')) depends_on('r-rcpp', type=('build', 'run')) depends_on('r-dplyr', type=('build', 'run')) depends_on('r-kernsmooth', type=('build', 'run'))
py
1a46d96bfc52f4f2f74d96883e2cbe23d4ff25b8
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """misc helper functions for pyLSV2""" import struct from datetime import datetime def decode_system_parameters(result_set): """decode the result system parameter query :param tuple result_set: bytes returned by the system parameter query command R_PR :returns: dictionary with system parameter values :rtype: dict """ message_length = len(result_set) info_list = list() # as per comment in eclipse plugin, there might be a difference between a programming station and a real machine if message_length == 120: info_list = struct.unpack('!14L8B8L2BH4B2L2HL', result_set) elif message_length == 124: info_list = struct.unpack('!14L8B8L2BH4B2L2HLL', result_set) else: raise ValueError('unexpected length {} of message content {}'.format( message_length, result_set)) sys_par = dict() sys_par['Marker_Start'] = info_list[0] sys_par['Markers'] = info_list[1] sys_par['Input_Start'] = info_list[2] sys_par['Inputs'] = info_list[3] sys_par['Output_Start'] = info_list[4] sys_par['Outputs'] = info_list[5] sys_par['Counter_Start'] = info_list[6] sys_par['Counters'] = info_list[7] sys_par['Timer_Start'] = info_list[8] sys_par['Timers'] = info_list[9] sys_par['Word_Start'] = info_list[10] sys_par['Words'] = info_list[11] sys_par['String_Start'] = info_list[12] sys_par['Strings'] = info_list[13] sys_par['String_Length'] = info_list[14] sys_par['Input_Word_Start'] = info_list[22] sys_par['Input Words'] = info_list[23] sys_par['Output_Word_Start'] = info_list[24] sys_par['Output_Words'] = info_list[25] sys_par['LSV2_Version'] = info_list[30] sys_par['LSV2_Version_Flags'] = info_list[31] sys_par['Max_Block_Length'] = info_list[32] sys_par['HDH_Bin_Version'] = info_list[33] sys_par['HDH_Bin_Revision'] = info_list[34] sys_par['ISO_Bin_Version'] = info_list[35] sys_par['ISO_Bin_Revision'] = info_list[36] sys_par['HardwareVersion'] = info_list[37] sys_par['LSV2_Version_Flags_Ex'] = info_list[38] sys_par['Max_Trace_Line'] = info_list[39] sys_par['Scope_Channels'] = info_list[40] sys_par['PW_Encryption_Key'] = info_list[41] return sys_par def decode_file_system_info(data_set): """decode result from file system entry :param tuple result_set: bytes returned by the system parameter query command R_FI or CR_DR :returns: dictionary with file system entry parameters :rtype: dict """ file_info = dict() file_info['Size'] = struct.unpack('!L', data_set[:4])[0] file_info['Timestamp'] = datetime.fromtimestamp(struct.unpack('!L', data_set[4:8])[0]) arrtibutes = struct.unpack('!L', data_set[8:12])[0] file_info['Attributs'] = arrtibutes file_info['is_file'] = False file_info['is_directory'] = False file_info['is_drive'] = False if arrtibutes > 0: if bool(arrtibutes & 0x10): file_info['is_drive'] = True elif bool(arrtibutes & 0x20): file_info['is_directory'] = True else: file_info['is_file'] = True file_info['is_write_protected'] = bool(arrtibutes & 0x40) file_info['Name'] = data_set[12:].decode().strip('\x00').replace('\\', '/') return file_info def decode_directory_info(data_set): """decode result from directory entry :param tuple result_set: bytes returned by the system parameter query command R_DI :returns: dictionary with file system entry parameters :rtype: dict """ dir_info = dict() dir_info['Free Size'] = struct.unpack('!L', data_set[:4])[0] attribute_list = list() for i in range(4, len(data_set[4:132]), 4): attr = data_set[i:i + 4].decode().strip('\x00') if len(attr) > 0: attribute_list.append(attr) dir_info['Dir_Attributs'] = attribute_list dir_info['Attributs'] = struct.unpack('!32B', data_set[132:164]) dir_info['Path'] = data_set[164:].decode().strip('\x00').replace('\\', '/') return dir_info def decode_tool_information(data_set): """decode result from tool info :param tuple result_set: bytes returned by the system parameter query command R_RI for tool info :returns: dictionary with tool info values :rtype: dict """ tool_info = dict() tool_info['Number'] = struct.unpack('!L', data_set[0:4])[0] tool_info['Index'] = struct.unpack('!H', data_set[4:6])[0] tool_info['Axis'] = {0: 'X', 1: 'Y', 2: 'Z'}.get(struct.unpack('!H', data_set[6:8])[0], 'unknown') tool_info['Length'] = struct.unpack('<d', data_set[8:16])[0] tool_info['Radius'] = struct.unpack('<d', data_set[16:24])[0] return tool_info def decode_override_information(data_set): """decode result from override info :param tuple result_set: bytes returned by the system parameter query command R_RI for override info :returns: dictionary with override info values :rtype: dict """ override_info = dict() override_info['Feed_override']=struct.unpack('!L', data_set[0:4])[0]/100 override_info['Speed_override']=struct.unpack('!L', data_set[4:8])[0]/100 override_info['Rapid_override']=struct.unpack('!L', data_set[8:12])[0]/100 return override_info
py
1a46d9f1d7ef37ece098456637f7ba6ce0ea0175
import pytest from django.db import transaction from django.db.utils import IntegrityError from psqlextra.fields import HStoreField from . import migrations from .util import get_fake_model def test_migration_create_drop_model(): """Tests whether indexes are properly created and dropped when creating and dropping a model.""" uniqueness = ["beer", "cookies"] test = migrations.create_drop_model( HStoreField(uniqueness=uniqueness), ["CREATE UNIQUE", "DROP INDEX"] ) with test as calls: assert len(calls["CREATE UNIQUE"]) == len(uniqueness) assert len(calls["DROP INDEX"]) == len(uniqueness) def test_migration_alter_db_table(): """Tests whether indexes are renamed properly when renaming the database table.""" test = migrations.alter_db_table( HStoreField(uniqueness=["beer", "cookie"]), ["RENAME TO", "CREATE INDEX", "DROP INDEX"], ) with test as calls: # 1 rename for table, 2 for hstore keys assert len(calls["RENAME TO"]) == 3 assert len(calls.get("CREATE UNIQUE", [])) == 0 assert len(calls.get("DROP INDEX", [])) == 0 def test_add_field(): """Tests whether adding a field properly creates the indexes.""" test = migrations.add_field( HStoreField(uniqueness=["beer"]), ["CREATE UNIQUE", "DROP INDEX"] ) with test as calls: assert len(calls.get("CREATE UNIQUE", [])) == 1 assert len(calls.get("DROP INDEX", [])) == 0 def test_remove_field(): """Tests whether removing a field properly removes the index.""" test = migrations.remove_field( HStoreField(uniqueness=["beer"]), ["CREATE UNIQUE", "DROP INDEX"] ) with test as calls: assert len(calls.get("CREATE UNIQUE", [])) == 0 assert len(calls.get("DROP INDEX", [])) == 1 def test_alter_field_nothing(): """Tests whether no indexes are dropped when not changing anything in the uniqueness.""" test = migrations.alter_field( HStoreField(uniqueness=["beer"]), HStoreField(uniqueness=["beer"]), ["CREATE UNIQUE", "DROP INDEX"], ) with test as calls: assert len(calls.get("CREATE UNIQUE", [])) == 0 assert len(calls.get("DROP INDEX", [])) == 0 def test_alter_field_add(): """Tests whether only one index is created when adding another key to the uniqueness.""" test = migrations.alter_field( HStoreField(uniqueness=["beer"]), HStoreField(uniqueness=["beer", "beer1"]), ["CREATE UNIQUE", "DROP INDEX"], ) with test as calls: assert len(calls.get("CREATE UNIQUE", [])) == 1 assert len(calls.get("DROP INDEX", [])) == 0 def test_alter_field_remove(): """Tests whether one index is dropped when removing a key from uniqueness.""" test = migrations.alter_field( HStoreField(uniqueness=["beer"]), HStoreField(uniqueness=[]), ["CREATE UNIQUE", "DROP INDEX"], ) with test as calls: assert len(calls.get("CREATE UNIQUE", [])) == 0 assert len(calls.get("DROP INDEX", [])) == 1 def test_alter_field_add_together(): """Tests whether adding one index is created when adding a "unique together".""" test = migrations.alter_field( HStoreField(uniqueness=["beer"]), HStoreField(uniqueness=["beer", ("beer1", "beer2")]), ["CREATE UNIQUE", "DROP INDEX"], ) with test as calls: assert len(calls.get("CREATE UNIQUE", [])) == 1 assert len(calls.get("DROP INDEX", [])) == 0 def test_alter_field_remove_together(): """Tests whether adding one index is dropped when adding a "unique together".""" test = migrations.alter_field( HStoreField(uniqueness=[("beer1", "beer2")]), HStoreField(uniqueness=[]), ["CREATE UNIQUE", "DROP INDEX"], ) with test as calls: assert len(calls.get("CREATE UNIQUE", [])) == 0 assert len(calls.get("DROP INDEX", [])) == 1 def test_rename_field(): """Tests whether renaming a field doesn't cause the index to be re-created.""" test = migrations.rename_field( HStoreField(uniqueness=["beer", "cookies"]), ["RENAME TO", "CREATE INDEX", "DROP INDEX"], ) with test as calls: assert len(calls.get("RENAME TO", [])) == 2 assert len(calls.get("CREATE UNIQUE", [])) == 0 assert len(calls.get("DROP INDEX", [])) == 0 def test_enforcement(): """Tests whether the constraints are actually properly enforced.""" model = get_fake_model({"title": HStoreField(uniqueness=["en"])}) # should pass, table is empty and 'ar' does not have to be unique model.objects.create(title={"en": "unique", "ar": "notunique"}) model.objects.create(title={"en": "elseunique", "ar": "notunique"}) # this should fail, key 'en' must be unique with transaction.atomic(): with pytest.raises(IntegrityError): model.objects.create(title={"en": "unique", "ar": "notunique"}) def test_enforcement_together(): """Tests whether unique_together style constraints are enforced properly.""" model = get_fake_model({"title": HStoreField(uniqueness=[("en", "ar")])}) model.objects.create(title={"en": "unique", "ar": "notunique"}) with transaction.atomic(): with pytest.raises(IntegrityError): model.objects.create(title={"en": "unique", "ar": "notunique"}) model.objects.create(title={"en": "notunique", "ar": "unique"})
py
1a46da01d2ee4d183b24ed29eab74c7a5705aee1
from json import ( JSONDecodeError, ) import logging import os from pathlib import ( Path, ) import socket import sys import threading from types import ( TracebackType, ) from typing import ( Any, Type, ) from web3._utils.threads import ( Timeout, ) from web3.types import ( RPCEndpoint, RPCResponse, ) from .base import ( JSONBaseProvider, ) def get_ipc_socket(ipc_path: str, timeout: float=0.1) -> socket.socket: if sys.platform == 'win32': # On Windows named pipe is used. Simulate socket with it. from web3._utils.windows import NamedPipe return NamedPipe(ipc_path) else: sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.connect(ipc_path) sock.settimeout(timeout) return sock class PersistantSocket: sock = None def __init__(self, ipc_path: str) -> None: self.ipc_path = ipc_path def __enter__(self) -> socket.socket: if not self.ipc_path: raise FileNotFoundError("cannot connect to IPC socket at path: %r" % self.ipc_path) if not self.sock: self.sock = self._open() return self.sock def __exit__( self, exc_type: Type[BaseException], exc_value: BaseException, traceback: TracebackType ) -> None: # only close the socket if there was an error if exc_value is not None: try: self.sock.close() except Exception: pass self.sock = None def _open(self) -> socket.socket: return get_ipc_socket(self.ipc_path) def reset(self) -> socket.socket: self.sock.close() self.sock = self._open() return self.sock # type ignored b/c missing return statement is by design here def get_default_ipc_path() -> str: # type: ignore if sys.platform == 'darwin': ipc_path = os.path.expanduser(os.path.join( "~", "Library", "Ethereum", "geth.ipc" )) if os.path.exists(ipc_path): return ipc_path ipc_path = os.path.expanduser(os.path.join( "~", "Library", "Application Support", "io.parity.ethereum", "jsonrpc.ipc" )) if os.path.exists(ipc_path): return ipc_path base_trinity_path = Path('~').expanduser() / '.local' / 'share' / 'trinity' ipc_path = str(base_trinity_path / 'mainnet' / 'jsonrpc.ipc') if Path(ipc_path).exists(): return str(ipc_path) elif sys.platform.startswith('linux') or sys.platform.startswith('freebsd'): ipc_path = os.path.expanduser(os.path.join( "~", ".ethereum", "geth.ipc" )) if os.path.exists(ipc_path): return ipc_path ipc_path = os.path.expanduser(os.path.join( "~", ".local", "share", "io.parity.ethereum", "jsonrpc.ipc" )) if os.path.exists(ipc_path): return ipc_path base_trinity_path = Path('~').expanduser() / '.local' / 'share' / 'trinity' ipc_path = str(base_trinity_path / 'mainnet' / 'jsonrpc.ipc') if Path(ipc_path).exists(): return str(ipc_path) elif sys.platform == 'win32': ipc_path = os.path.join( "\\\\", ".", "pipe", "geth.ipc" ) if os.path.exists(ipc_path): return ipc_path ipc_path = os.path.join( "\\\\", ".", "pipe", "jsonrpc.ipc" ) if os.path.exists(ipc_path): return ipc_path else: raise ValueError( "Unsupported platform '{0}'. Only darwin/linux/win32/freebsd are " "supported. You must specify the ipc_path".format(sys.platform) ) # type ignored b/c missing return statement is by design here def get_dev_ipc_path() -> str: # type: ignore if sys.platform == 'darwin': tmpdir = os.environ.get('TMPDIR', '') ipc_path = os.path.expanduser(os.path.join( tmpdir, "geth.ipc" )) if os.path.exists(ipc_path): return ipc_path elif sys.platform.startswith('linux') or sys.platform.startswith('freebsd'): ipc_path = os.path.expanduser(os.path.join( "/tmp", "geth.ipc" )) if os.path.exists(ipc_path): return ipc_path elif sys.platform == 'win32': ipc_path = os.path.join( "\\\\", ".", "pipe", "geth.ipc" ) if os.path.exists(ipc_path): return ipc_path ipc_path = os.path.join( "\\\\", ".", "pipe", "jsonrpc.ipc" ) if os.path.exists(ipc_path): return ipc_path else: raise ValueError( "Unsupported platform '{0}'. Only darwin/linux/win32/freebsd are " "supported. You must specify the ipc_path".format(sys.platform) ) class IPCProvider(JSONBaseProvider): logger = logging.getLogger("web3.providers.IPCProvider") _socket = None def __init__(self, ipc_path: str=None, timeout: int=10, *args: Any, **kwargs: Any) -> None: if ipc_path is None: self.ipc_path = get_default_ipc_path() elif isinstance(ipc_path, str) or isinstance(ipc_path, Path): self.ipc_path = str(Path(ipc_path).expanduser().resolve()) else: raise TypeError("ipc_path must be of type string or pathlib.Path") self.timeout = timeout self._lock = threading.Lock() self._socket = PersistantSocket(self.ipc_path) super().__init__() def __str__(self) -> str: return f"<{self.__class__.__name__} {self.ipc_path}>" def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse: self.logger.debug("Making request IPC. Path: %s, Method: %s", self.ipc_path, method) request = self.encode_rpc_request(method, params) with self._lock, self._socket as sock: try: sock.sendall(request) except BrokenPipeError: # one extra attempt, then give up sock = self._socket.reset() sock.sendall(request) raw_response = b"" with Timeout(self.timeout) as timeout: while True: try: raw_response += sock.recv(4096) except socket.timeout: timeout.sleep(0) continue if raw_response == b"": timeout.sleep(0) elif has_valid_json_rpc_ending(raw_response): try: response = self.decode_rpc_response(raw_response) except JSONDecodeError: timeout.sleep(0) continue else: return response else: timeout.sleep(0) continue # A valid JSON RPC response can only end in } or ] http://www.jsonrpc.org/specification def has_valid_json_rpc_ending(raw_response: bytes) -> bool: stripped_raw_response = raw_response.rstrip() for valid_ending in [b"}", b"]"]: if stripped_raw_response.endswith(valid_ending): return True else: return False
py
1a46da4a3734f32326bef5f2f8affd37c2bdf9b3
# Thsi is my python cheet sheet # Basic vector maths def SumVector(a,b): sum = [(a[0]+b[0]),(a[1]+b[1])] return sum def ProVector(a,s): pro = [(s*a[0]),(s*a[1])] return pro a = [-1,2] b = [4,5] s = 10 print(f"Sum of a and b is:{SumVector(a,b)}") print(f"Product of a and s is:{ProVector(a,s)}") # Introduction to python dictionaries prices = {'apple':4.99,'orange':3.99,'banana':2.99} askf = input('apple , orange or banana ') print(f'the price of {askf} is {prices[askf.lower()]}')
py
1a46da78225f4f304711bd784dae13c689f7fbb7
# -*- coding: utf-8 -*- # Copyright 2015 Fanficdownloader team, 2019 FanFicFare team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import absolute_import import re import codecs # py2 vs py3 transition from . import six from .six.moves import configparser from .six.moves.configparser import DEFAULTSECT, MissingSectionHeaderError, ParsingError if six.PY2: ConfigParser = configparser.SafeConfigParser else: # PY3 ConfigParser = configparser.ConfigParser from .six.moves import urllib from .six.moves.urllib.parse import (urlencode, quote_plus) from .six.moves.urllib.request import (build_opener, HTTPCookieProcessor, Request) from .six.moves.urllib.error import HTTPError from .six.moves import http_cookiejar as cl from .six import text_type as unicode from .six import string_types as basestring from .six import ensure_binary, ensure_text import time import logging import sys import pickle from . import exceptions logger = logging.getLogger(__name__) try: import chardet except ImportError: chardet = None from .gziphttp import GZipProcessor from .htmlcleanup import reduce_zalgo # All of the writers(epub,html,txt) and adapters(ffnet,twlt,etc) # inherit from Configurable. The config file(s) uses ini format: # [sections] with key:value settings. # # [defaults] # titlepage_entries: category,genre, status # [www.whofic.com] # titlepage_entries: category,genre, status,dateUpdated,rating # [epub] # titlepage_entries: category,genre, status,datePublished,dateUpdated,dateCreated # [www.whofic.com:epub] # titlepage_entries: category,genre, status,datePublished # [overrides] # titlepage_entries: category # Work around for fact that py3 apparently doesn't allow/ignore # recursive imports like py2 does. try: from . import adapters except ImportError: import sys if "fanficfare.adapters" in sys.modules: adapters = sys.modules["fanficfare.adapters"] elif "calibre_plugins.fanficfare_plugin.fanficfare.adapters" in sys.modules: adapters = sys.modules["calibre_plugins.fanficfare_plugin.fanficfare.adapters"] def re_compile(regex,line): try: return re.compile(regex,re.DOTALL) except Exception as e: raise exceptions.RegularExpresssionFailed(e,regex,line) # fall back labels. titleLabels = { 'category':'Category', 'genre':'Genre', 'language':'Language', 'status':'Status', 'series':'Series', 'characters':'Characters', 'ships':'Relationships', 'datePublished':'Published', 'dateUpdated':'Updated', 'dateCreated':'Packaged', 'rating':'Rating', 'warnings':'Warnings', 'numChapters':'Chapters', 'numWords':'Words', 'words_added':'Words Added', # logpage only 'site':'Site', 'publisher':'Publisher', 'storyId':'Story ID', 'authorId':'Author ID', 'extratags':'Extra Tags', 'title':'Title', 'storyUrl':'Story URL', 'sectionUrl':'Story URL Section', 'description':'Summary', 'author':'Author', 'authorUrl':'Author URL', 'formatname':'File Format', 'formatext':'File Extension', 'siteabbrev':'Site Abbrev', 'version':'Downloader Version' } formatsections = ['html','txt','epub','mobi'] othersections = ['defaults','overrides'] def get_valid_sections(): sites = adapters.getConfigSections() sitesections = list(othersections) for section in sites: sitesections.append(section) # also allows [www.base_efiction] and [www.base_xenforoforum]. Not # likely to matter. if section.startswith('www.'): # add w/o www if has www sitesections.append(section[4:]) else: # add w/ www if doesn't www sitesections.append('www.%s'%section) allowedsections = [] allowedsections.extend(formatsections) for section in sitesections: allowedsections.append(section) for f in formatsections: allowedsections.append('%s:%s'%(section,f)) return allowedsections def get_valid_list_entries(): return list(['category', 'genre', 'characters', 'ships', 'warnings', 'extratags', 'author', 'authorId', 'authorUrl', 'lastupdate', ]) boollist=['true','false'] base_xenforo2_list=['base_xenforo2forum', 'forums.sufficientvelocity.com', ] base_xenforo_list=base_xenforo2_list+['base_xenforoforum', 'forums.spacebattles.com', 'forum.questionablequesting.com', 'www.alternatehistory.com', ] def get_valid_set_options(): ''' dict() of names of boolean options, but as a tuple with valid sites, valid formats and valid values (None==all) This is to further restrict keywords to certain sections and/or values. get_valid_keywords() below is the list of allowed keywords. Any keyword listed here must also be listed there. This is what's used by the code when you save personal.ini in plugin that stops and points out possible errors in keyword *values*. It doesn't flag 'bad' keywords. Note that it's separate from color highlighting and most keywords need to be added to both. ''' valdict = {'collect_series':(None,None,boollist), 'include_titlepage':(None,None,boollist), 'include_tocpage':(None,None,boollist), 'is_adult':(None,None,boollist), 'keep_style_attr':(None,None,boollist), 'keep_title_attr':(None,None,boollist), 'make_firstimage_cover':(None,None,boollist), 'never_make_cover':(None,None,boollist), 'nook_img_fix':(None,None,boollist), 'replace_br_with_p':(None,None,boollist), 'replace_hr':(None,None,boollist), 'sort_ships':(None,None,boollist), 'strip_chapter_numbers':(None,None,boollist), 'mark_new_chapters':(None,None,boollist+['latestonly']), 'titlepage_use_table':(None,None,boollist), 'use_ssl_unverified_context':(None,None,boollist), 'continue_on_chapter_error':(None,None,boollist), 'conditionals_use_lists':(None,None,boollist), 'dedup_chapter_list':(None,None,boollist), 'add_chapter_numbers':(None,None,boollist+['toconly']), 'check_next_chapter':(['fanfiction.net'],None,boollist), 'tweak_fg_sleep':(['fanfiction.net'],None,boollist), 'skip_author_cover':(['fanfiction.net'],None,boollist), 'fix_fimf_blockquotes':(['fimfiction.net'],None,boollist), 'fail_on_password':(['fimfiction.net'],None,boollist), 'keep_prequel_in_description':(['fimfiction.net'],None,boollist), 'include_author_notes':(['fimfiction.net'],None,boollist), 'do_update_hook':(['fimfiction.net', 'archiveofourown.org'],None,boollist), 'always_login':(['archiveofourown.org']+base_xenforo_list,None,boollist), 'use_archived_author':(['archiveofourown.org'],None,boollist), 'use_view_full_work':(['archiveofourown.org'],None,boollist), 'remove_authorfootnotes_on_update':(['archiveofourown.org'],None,boollist), 'force_login':(['phoenixsong.net'],None,boollist), 'non_breaking_spaces':(['fictionmania.tv'],None,boollist), 'download_text_version':(['fictionmania.tv'],None,boollist), 'universe_as_series':(['storiesonline.net','finestories.com','scifistories.com'],None,boollist), 'strip_text_links':(['bloodshedverse.com','asexstories.com'],None,boollist), 'centeredcat_to_characters':(['tthfanfic.org'],None,boollist), 'pairingcat_to_characters_ships':(['tthfanfic.org'],None,boollist), 'romancecat_to_characters_ships':(['tthfanfic.org'],None,boollist), 'use_meta_keywords':(['literotica.com'],None,boollist), 'chapter_categories_use_all':(['literotica.com'],None,boollist), 'clean_chapter_titles':(['literotica.com'],None,boollist), 'description_in_chapter':(['literotica.com'],None,boollist), 'inject_chapter_title':(['asianfanfics.com','storiesonline.net','finestories.com','scifistories.com'],None,boollist), 'auto_sub':(['asianfanfics.com'],None,boollist), # eFiction Base adapters allow bulk_load # kept forgetting to add them, so now it's automatic. 'bulk_load':(adapters.get_bulk_load_sites(), None,boollist), 'include_logpage':(None,['epub'],boollist+['smart']), 'logpage_at_end':(None,['epub'],boollist), 'calibre_series_meta':(None,['epub'],boollist), 'windows_eol':(None,['txt'],boollist), 'include_images':(None,['epub','html'],boollist), 'grayscale_images':(None,['epub','html'],boollist), 'no_image_processing':(None,['epub','html'],boollist), 'normalize_text_links':(None,['epub','html'],boollist), 'internalize_text_links':(None,['epub','html'],boollist), 'capitalize_forumtags':(base_xenforo_list,None,boollist), 'minimum_threadmarks':(base_xenforo_list,None,None), 'first_post_title':(base_xenforo_list,None,None), 'always_include_first_post':(base_xenforo_list,None,boollist), 'always_reload_first_chapter':(base_xenforo_list,None,boollist), 'always_use_forumtags':(base_xenforo_list,None,boollist), 'use_reader_mode':(base_xenforo_list,None,boollist), 'author_avatar_cover':(base_xenforo_list,None,boollist), 'remove_spoilers':(base_xenforo_list+['royalroad.com'],None,boollist), 'legend_spoilers':(base_xenforo_list+['royalroad.com', 'fiction.live'],None,boollist), 'apocrypha_to_omake':(base_xenforo_list,None,boollist), 'replace_failed_smilies_with_alt_text':(base_xenforo_list,None,boollist), 'use_threadmark_wordcounts':(base_xenforo_list,None,boollist), 'always_include_first_post_chapters':(base_xenforo_list,None,boollist), 'order_threadmarks_by_date':(base_xenforo_list,None,boollist), 'use_threadmarks_description':(base_xenforo2_list,None,boollist), 'use_threadmarks_status':(base_xenforo2_list,None,boollist), 'use_threadmarks_cover':(base_xenforo2_list,None,boollist), 'skip_sticky_first_posts':(base_xenforo2_list,None,boollist), 'fix_pseudo_html': (['webnovel.com'], None, boollist), 'fix_excess_space': (['novelonlinefull.com', 'novelall.com'], ['epub', 'html'], boollist), 'dedup_order_chapter_list': (['wuxiaworld.co'], None, boollist), 'show_nsfw_cover_images': (['fiction.live'], None, boollist), 'show_timestamps': (['fiction.live'], None, boollist), 'show_nsfw_cover_images': (['fiction.live'], None, boollist) } return dict(valdict) def get_valid_scalar_entries(): return list(['series', 'seriesUrl', 'language', 'status', 'datePublished', 'dateUpdated', 'dateCreated', 'rating', 'numChapters', 'numWords', 'words_added', # logpage only. 'site', 'publisher', 'storyId', 'title', 'titleHTML', 'storyUrl', 'sectionUrl', 'description', 'formatname', 'formatext', 'siteabbrev', 'version', # internal stuff. 'authorHTML', 'seriesHTML', 'langcode', 'output_css', 'cover_image', ]) def get_valid_entries(): return get_valid_list_entries() + get_valid_scalar_entries() # *known* keywords -- or rather regexps for them. def get_valid_keywords(): ''' Among other things, this list is used by the color highlighting in personal.ini editing in plugin. Note that it's separate from value checking and most keywords need to be added to both. ''' return list(['(in|ex)clude_metadata_(pre|post)', 'add_chapter_numbers', 'add_genre_when_multi_category', 'add_category_when_multi_category', 'adult_ratings', 'allow_unsafe_filename', 'always_overwrite', 'anthology_tags', 'anthology_title_pattern', 'background_color', 'bulk_load', 'chapter_end', 'chapter_start', 'chapter_title_strip_pattern', 'chapter_title_def_pattern', 'chapter_title_add_pattern', 'chapter_title_new_pattern', 'chapter_title_addnew_pattern', 'title_chapter_range_pattern', 'mark_new_chapters', 'check_next_chapter', 'skip_author_cover', 'collect_series', 'comma_entries', 'connect_timeout', 'convert_images_to', 'cover_content', 'cover_exclusion_regexp', 'custom_columns_settings', 'dateCreated_format', 'datePublished_format', 'dateUpdated_format', 'default_cover_image', 'description_limit', 'do_update_hook', 'use_archived_author', 'use_view_full_work', 'always_login', 'exclude_notes', 'remove_authorfootnotes_on_update', 'exclude_editor_signature', 'extra_logpage_entries', 'extra_subject_tags', 'extra_titlepage_entries', 'extra_valid_entries', 'extratags', 'extracategories', 'extragenres', 'extracharacters', 'extraships', 'extrawarnings', 'fail_on_password', 'file_end', 'file_start', 'fileformat', 'find_chapters', 'fix_fimf_blockquotes', 'keep_prequel_in_description', 'include_author_notes', 'force_login', 'generate_cover_settings', 'grayscale_images', 'image_max_size', 'include_images', 'include_logpage', 'logpage_at_end', 'calibre_series_meta', 'include_subject_tags', 'include_titlepage', 'include_tocpage', 'chardet_confidence_limit', 'is_adult', 'join_string_authorHTML', 'keep_style_attr', 'keep_title_attr', 'keep_html_attrs', 'replace_tags_with_spans', 'keep_empty_tags', 'keep_summary_html', 'logpage_end', 'logpage_entries', 'logpage_entry', 'logpage_start', 'logpage_update_end', 'logpage_update_start', 'make_directories', 'make_firstimage_cover', 'make_linkhtml_entries', 'max_fg_sleep', 'max_fg_sleep_at_downloads', 'min_fg_sleep', 'never_make_cover', 'cover_min_size', 'no_image_processing', 'non_breaking_spaces', 'download_text_version', 'nook_img_fix', 'output_css', 'output_filename', 'output_filename_safepattern', 'password', 'post_process_cmd', 'rating_titles', 'remove_transparency', 'replace_br_with_p', 'replace_hr', 'replace_xbr_with_hr', 'replace_metadata', 'slow_down_sleep_time', 'sort_ships', 'sort_ships_splits', 'strip_chapter_numbers', 'strip_chapter_numeral', 'strip_text_links', 'centeredcat_to_characters', 'pairingcat_to_characters_ships', 'romancecat_to_characters_ships', 'use_meta_keywords', 'chapter_categories_use_all', 'clean_chapter_titles', 'conditionals_use_lists', 'description_in_chapter', 'inject_chapter_title', 'auto_sub', 'titlepage_end', 'titlepage_entries', 'titlepage_entry', 'titlepage_no_title_entry', 'titlepage_start', 'titlepage_use_table', 'titlepage_wide_entry', 'tocpage_end', 'tocpage_entry', 'tocpage_start', 'tweak_fg_sleep', 'universe_as_series', 'use_ssl_unverified_context', 'user_agent', 'username', 'website_encodings', 'wide_titlepage_entries', 'windows_eol', 'wrap_width', 'zip_filename', 'zip_output', 'capitalize_forumtags', 'continue_on_chapter_error', 'chapter_title_error_mark', 'minimum_threadmarks', 'first_post_title', 'always_include_first_post', 'always_reload_first_chapter', 'always_use_forumtags', 'use_reader_mode', 'author_avatar_cover', 'reader_posts_per_page', 'remove_spoilers', 'legend_spoilers', 'apocrypha_to_omake', 'skip_threadmarks_categories', 'normalize_text_links', 'internalize_text_links', 'replace_failed_smilies_with_alt_text', 'use_threadmark_wordcounts', 'always_include_first_post_chapters', 'order_threadmarks_by_date', 'use_threadmarks_description', 'use_threadmarks_status', 'use_threadmarks_cover', 'skip_sticky_first_posts', 'datethreadmark_format', 'fix_pseudo_html', 'fix_excess_space', 'dedup_order_chapter_list', 'ignore_chapter_url_list', 'dedup_chapter_list', 'show_timestamps', 'show_nsfw_cover_images', 'show_spoiler_tags', 'max_zalgo', 'epub_version', ]) # *known* entry keywords -- or rather regexps for them. def get_valid_entry_keywords(): return list(['%s_(label|format)', '(default_value|include_in|join_string|keep_in_order)_%s',]) # Moved here for test_config. def make_generate_cover_settings(param): vlist = [] for line in param.splitlines(): if "=>" in line: try: (template,regexp,setting) = [ x.strip() for x in line.split("=>") ] re_compile(regexp,line) vlist.append((template,regexp,setting)) except Exception as e: raise exceptions.PersonalIniFailed(e,line,param) return vlist class Configuration(ConfigParser): def __init__(self, sections, fileform, lightweight=False): site = sections[-1] # first section is site DN. ConfigParser.__init__(self) self.lightweight = lightweight self.use_pagecache = False # default to false for old adapters. self.linenos=dict() # key by section or section,key -> lineno ## [injected] section has even less priority than [defaults] self.sectionslist = ['defaults','injected'] ## add other sections (not including site DN) after defaults, ## but before site-specific. for section in sections[:-1]: self.addConfigSection(section) if site.startswith("www."): sitewith = site sitewithout = site.replace("www.","") else: sitewith = "www."+site sitewithout = site self.addConfigSection(sitewith) self.addConfigSection(sitewithout) if fileform: self.addConfigSection(fileform) ## add other sections:fileform (not including site DN) ## after fileform, but before site-specific:fileform. for section in sections[:-1]: self.addConfigSection(section+":"+fileform) self.addConfigSection(sitewith+":"+fileform) self.addConfigSection(sitewithout+":"+fileform) self.addConfigSection("overrides") self.listTypeEntries = get_valid_list_entries() self.validEntries = get_valid_entries() self.url_config_set = False self.override_sleep = None self.cookiejar = self.get_empty_cookiejar() self.opener = build_opener(HTTPCookieProcessor(self.cookiejar),GZipProcessor()) self.pagecache = self.get_empty_pagecache() self.save_cache_file = None self.save_cookiejar_file = None def section_url_names(self,domain,section_url_f): ## domain is passed as a method to limit the damage if/when an ## adapter screws up _section_url domain = domain.replace('www.','') ## let's not confuse the issue any more than it is. try: ## OrderDict (the default for ConfigParser) has to be ## reconstructed completely because removing and re-adding ## a section would mess up the order. ## assumes _dict and _sections from ConfigParser parent. self._sections = self._dict((section_url_f(k) if (domain in k and 'http' in k) else k, v) for k, v in six.viewitems(self._sections)) # logger.debug(self._sections.keys()) except Exception as e: logger.warning("Failed to perform section_url_names: %s"%e) def addUrlConfigSection(self,url): if not self.lightweight: # don't need when just checking for normalized URL. # replace if already set once. if self.url_config_set: self.sectionslist[self.sectionslist.index('overrides')+1]=url else: self.addConfigSection(url,'overrides') self.url_config_set=True def addConfigSection(self,section,before=None): if section not in self.sectionslist: # don't add if already present. if before is None: self.sectionslist.insert(0,section) else: ## because sectionslist is hi-pri first, lo-pri last, ## 'before' means after in the list. self.sectionslist.insert(self.sectionslist.index(before)+1,section) def isListType(self,key): return key in self.listTypeEntries or self.hasConfig("include_in_"+key) def isValidMetaEntry(self, key): return key in self.getValidMetaList() def getValidMetaList(self): return self.validEntries + self.getConfigList("extra_valid_entries") # used by adapters & writers, non-convention naming style def hasConfig(self, key): return self.has_config(self.sectionslist, key) def has_config(self, sections, key): for section in sections: try: self.get(section,key) #print("found %s in section [%s]"%(key,section)) return True except: try: self.get(section,key+"_filelist") #print("found %s_filelist in section [%s]"%(key,section)) return True except: try: self.get(section,"add_to_"+key) #print("found add_to_%s in section [%s]"%(key,section)) return True except: pass return False # used by adapters & writers, non-convention naming style def getConfig(self, key, default=""): return self.get_config(self.sectionslist,key,default) def get_config(self, sections, key, default=""): val = default val_files = [] if not key.endswith("_filelist"): ## <key>_filelist overrides <key>, but add_to_<key> is ## still used. By using self.get_config_list(), ## add_to_<key>_filelist also works. (But not ## <key>_filelist_filelist--that way lies madness--and ## infinite recursion.) self.get_config_list() also does ## the list split & clean up. val_files = self.get_config_list(sections, key+"_filelist") file_val = False if val_files: val = '' for v in val_files: try: val = val + self._fetchUrl(v) file_val = True except: pass if not file_val: logger.warning("All files for (%s) failed! Using (%s) instead. Filelist: (%s)"% (key+"_filelist",key,val_files)) if not file_val: for section in sections: try: val = self.get(section,key) if val and val.lower() == "false": val = False #print("getConfig(%s)=[%s]%s" % (key,section,val)) break except (configparser.NoOptionError, configparser.NoSectionError) as e: pass for section in sections[::-1]: # 'martian smiley' [::-1] reverses list by slicing whole list with -1 step. try: val = val + self.get(section,"add_to_"+key) #print("getConfig(add_to_%s)=[%s]%s" % (key,section,val)) except (configparser.NoOptionError, configparser.NoSectionError) as e: pass return val # split and strip each. def get_config_list(self, sections, key, default=[]): vlist = re.split(r'(?<!\\),',self.get_config(sections,key)) # don't split on \, vlist = [x for x in [ v.strip().replace(r'\,',',') for v in vlist ] if x !=''] #print("vlist("+key+"):"+unicode(vlist)) if not vlist: return default else: return vlist # used by adapters & writers, non-convention naming style def getConfigList(self, key, default=[]): return self.get_config_list(self.sectionslist, key, default) # Moved here for test_config. def get_generate_cover_settings(self): return make_generate_cover_settings(self.getConfig('generate_cover_settings')) def get_lineno(self,section,key=None): if key: return self.linenos.get(section+','+key,None) else: return self.linenos.get(section,None) ## Copied from Python 2.7 library so as to make read utf8. def read(self, filenames): """Read and parse a filename or a list of filenames. Files that cannot be opened are silently ignored; this is designed so that you can specify a list of potential configuration file locations (e.g. current directory, user's home directory, systemwide directory), and all existing configuration files in the list will be read. A single filename may also be given. Return list of successfully read files. """ if isinstance(filenames, basestring): filenames = [filenames] read_ok = [] for filename in filenames: try: fp = codecs.open(filename,encoding='utf-8') except IOError: continue self._read(fp, filename) fp.close() read_ok.append(filename) return read_ok ## Copied from Python 2.7 library so as to make it save linenos too. # # Regular expressions for parsing section headers and options. # def _read(self, fp, fpname): """Parse a sectioned setup file. The sections in setup file contains a title line at the top, indicated by a name in square brackets (`[]'), plus key/value options lines, indicated by `name: value' format lines. Continuations are represented by an embedded newline then leading whitespace. Blank lines, lines beginning with a '#', and just about everything else are ignored. """ cursect = None # None, or a dictionary optname = None lineno = 0 e = None # None, or an exception while True: line = fp.readline() if not line: break lineno = lineno + 1 # comment or blank line? if line.strip() == '' or line[0] in '#;': continue if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR": # no leading whitespace continue # continuation line? if line[0].isspace() and cursect is not None and optname: value = line.strip() if value: cursect[optname] = "%s\n%s" % (cursect[optname], value) # a section header or option header? else: # is it a section header? mo = self.SECTCRE.match(line) if mo: sectname = mo.group('header') if sectname in self._sections: cursect = self._sections[sectname] elif sectname == DEFAULTSECT: cursect = self._defaults else: cursect = self._dict() cursect['__name__'] = sectname self._sections[sectname] = cursect self.linenos[sectname]=lineno # So sections can't start with a continuation line optname = None # no section header in the file? elif cursect is None: if not e: e = ParsingError(fpname) e.append(lineno, u'(Line outside section) '+line) #raise MissingSectionHeaderError(fpname, lineno, line) # an option line? else: mo = self.OPTCRE.match(line) # OPTCRE instead of # _optcre so it works # with python 2.6 if mo: optname, vi, optval = mo.group('option', 'vi', 'value') # This check is fine because the OPTCRE cannot # match if it would set optval to None if optval is not None: if vi in ('=', ':') and ';' in optval: # ';' is a comment delimiter only if it follows # a spacing character pos = optval.find(';') if pos != -1 and optval[pos-1].isspace(): optval = optval[:pos] optval = optval.strip() # allow empty values if optval == '""': optval = '' optname = self.optionxform(optname.rstrip()) cursect[optname] = optval self.linenos[cursect['__name__']+','+optname]=lineno else: # a non-fatal parsing error occurred. set up the # exception but keep going. the exception will be # raised at the end of the file and will contain a # list of all bogus lines if not e: e = ParsingError(fpname) e.append(lineno, line) # if any parsing errors occurred, raise an exception if e: raise e def test_config(self): errors=[] ## too complicated right now to enforce ## get_valid_set_options() warnings on teststory and ## [storyUrl] sections. allow_all_sections_re = re.compile(r'^(teststory:(defaults|[0-9]+)|https?://.*)$') allowedsections = get_valid_sections() clude_metadata_re = re.compile(r'(add_to_)?(in|ex)clude_metadata_(pre|post)$') replace_metadata_re = re.compile(r'(add_to_)?replace_metadata$') from .story import set_in_ex_clude, make_replacements custom_columns_settings_re = re.compile(r'(add_to_)?custom_columns_settings$') generate_cover_settings_re = re.compile(r'(add_to_)?generate_cover_settings$') valdict = get_valid_set_options() for section in self.sections(): allow_all_section = allow_all_sections_re.match(section) if section not in allowedsections and not allow_all_section: errors.append((self.get_lineno(section),"Bad Section Name: [%s]"%section)) else: sitename = section.replace('www.','') if ':' in sitename: formatname = sitename[sitename.index(':')+1:] sitename = sitename[:sitename.index(':')] elif sitename in formatsections: formatname = sitename sitename = None elif sitename in othersections: formatname = None sitename = None ## check each keyword in section. Due to precedence ## order of sections, it's possible for bad lines to ## never be used. for keyword,value in self.items(section): try: ## check regex bearing keywords first. Each ## will raise exceptions if flawed. if clude_metadata_re.match(keyword): set_in_ex_clude(value) if replace_metadata_re.match(keyword): make_replacements(value) if generate_cover_settings_re.match(keyword): make_generate_cover_settings(value) # if custom_columns_settings_re.match(keyword): #custom_columns_settings: # cliches=>#acolumn # themes=>#bcolumn,a # timeline=>#ccolumn,n # "FanFiction"=>#collection if not allow_all_section: def make_sections(x): return '['+'], ['.join(x)+']' if keyword in valdict: (valsites,valformats,vals)=valdict[keyword] if valsites != None and sitename != None and sitename not in valsites: errors.append((self.get_lineno(section,keyword),"%s not valid in section [%s] -- only valid in %s sections."%(keyword,section,make_sections(valsites)))) if valformats != None and formatname != None and formatname not in valformats: errors.append((self.get_lineno(section,keyword),"%s not valid in section [%s] -- only valid in %s sections."%(keyword,section,make_sections(valformats)))) if vals != None and value not in vals: errors.append((self.get_lineno(section,keyword),"%s not a valid value for %s"%(value,keyword))) ## skipping output_filename_safepattern ## regex--not used with plugin and this isn't ## used with CLI/web yet. except Exception as e: errors.append((self.get_lineno(section,keyword),"Error:%s in (%s:%s)"%(e,keyword,value))) return errors #### methods for fetching. Moved here from base_adapter when #### *_filelist feature was added. @staticmethod def get_empty_cookiejar(): return cl.LWPCookieJar() @staticmethod def get_empty_pagecache(): return {} def get_cookiejar(self): return self.cookiejar def set_cookiejar(self,cj,save_cookiejar_file=None): self.cookiejar = cj self.save_cookiejar_file = save_cookiejar_file saveheaders = self.opener.addheaders self.opener = build_opener(HTTPCookieProcessor(self.cookiejar),GZipProcessor()) self.opener.addheaders = saveheaders def load_cookiejar(self,filename): ''' Needs to be called after adapter create, but before any fetchs are done. Takes file *name*. ''' self.get_cookiejar().load(filename, ignore_discard=True, ignore_expires=True) def get_pagecache(self): return self.pagecache def set_pagecache(self,d,save_cache_file=None): self.save_cache_file = save_cache_file self.pagecache=d def _get_cachekey(self, url, parameters=None, headers=None): keylist=[url] if parameters != None: keylist.append('&'.join('{0}={1}'.format(key, val) for key, val in sorted(parameters.items()))) if headers != None: keylist.append('&'.join('{0}={1}'.format(key, val) for key, val in sorted(headers.items()))) return unicode('?'.join(keylist)) def _has_cachekey(self,cachekey): return self.use_pagecache and cachekey in self.get_pagecache() def _get_from_pagecache(self,cachekey): if self.use_pagecache: return self.get_pagecache().get(cachekey) else: return None def _set_to_pagecache(self,cachekey,data,redirectedurl): if self.use_pagecache: self.get_pagecache()[cachekey] = (data,ensure_text(redirectedurl)) if self.save_cache_file: with open(self.save_cache_file,'wb') as jout: pickle.dump(self.get_pagecache(),jout,protocol=2) if self.save_cookiejar_file: self.get_cookiejar().save(self.save_cookiejar_file) ## website encoding(s)--in theory, each website reports the character ## encoding they use for each page. In practice, some sites report it ## incorrectly. Each adapter has a default list, usually "utf8, ## Windows-1252" or "Windows-1252, utf8". The special value 'auto' ## will call chardet and use the encoding it reports if it has +90% ## confidence. 'auto' is not reliable. 1252 is a superset of ## iso-8859-1. Most sites that claim to be iso-8859-1 (and some that ## claim to be utf8) are really windows-1252. def _decode(self,data): if not hasattr(data,'decode'): ## py3 str() from pickle doesn't have .decode and is ## already decoded. return data decode = self.getConfigList('website_encodings', default=["utf8", "Windows-1252", "iso-8859-1"]) for code in decode: try: logger.debug("Encoding:%s"%code) errors=None if ':' in code: (code,errors)=code.split(':') if code == "auto": if not chardet: logger.info("chardet not available, skipping 'auto' encoding") continue detected = chardet.detect(data) #print(detected) if detected['confidence'] > float(self.getConfig("chardet_confidence_limit",0.9)): logger.debug("using chardet detected encoding:%s(%s)"%(detected['encoding'],detected['confidence'])) code=detected['encoding'] else: logger.debug("chardet confidence too low:%s(%s)"%(detected['encoding'],detected['confidence'])) continue if errors == 'ignore': # only allow ignore. return data.decode(code,errors='ignore') else: return data.decode(code) except Exception as e: logger.debug("code failed:"+code) logger.debug(e) pass logger.info("Could not decode story, tried:%s Stripping non-ASCII."%decode) try: # python2 return "".join([x for x in data if ord(x) < 128]) except TypeError: # python3 return "".join([chr(x) for x in data if x < 128]) def _progressbar(self): if self.getConfig('progressbar'): sys.stdout.write('.') sys.stdout.flush() def _do_reduce_zalgo(self,data): max_zalgo = int(self.getConfig('max_zalgo',-1)) if max_zalgo > -1: logger.debug("Applying max_zalgo:%s"%max_zalgo) try: return reduce_zalgo(data,max_zalgo) except Exception as e: logger.warning("reduce_zalgo failed(%s), continuing."%e) return data # Assumes application/x-www-form-urlencoded. parameters, headers are dict()s def _postUrl(self, url, parameters={}, headers={}, extrasleep=None, usecache=True): ''' When should cache be cleared or not used? logins... extrasleep is primarily for ffnet adapter which has extra sleeps. Passed into fetchs so it can be bypassed when cache hits. ''' url = quote_plus(ensure_binary(url),safe=';/?:@&=+$,%&#') if self.getConfig('force_https'): ## For developer testing only. url = url.replace("http:","https:") cachekey=self._get_cachekey(url, parameters, headers) if usecache and self._has_cachekey(cachekey) and not cachekey.startswith('file:'): logger.debug("#####################################\npagecache(POST) HIT: %s"%safe_url(cachekey)) data,redirecturl = self._get_from_pagecache(cachekey) return data logger.debug("#####################################\npagecache(POST) MISS: %s"%safe_url(cachekey)) if not cachekey.startswith('file:'): # don't sleep for file: URLs. self.do_sleep(extrasleep) ## Request assumes POST when data!=None. Also assumes data ## is application/x-www-form-urlencoded. if 'Content-type' not in headers: headers['Content-type']='application/x-www-form-urlencoded' if 'Accept' not in headers: headers['Accept']="text/html,*/*" # logger.debug("POST http login for SB xf2test %s"%url) # if "xf2test" in url: # import base64 # base64string = base64.encodestring(b"sbreview2019:Fs2PwuVE9").replace(b'\n', b'') # headers['Authorization']=b"Basic %s" % base64string # logger.debug("http login for SB xf2test") req = Request(url, data=ensure_binary(urlencode(parameters)), headers=headers) ## Specific UA because too many sites are blocking the default python UA. self.opener.addheaders = [('User-Agent', self.getConfig('user_agent')), ('X-Clacks-Overhead','GNU Terry Pratchett')] data = self._do_reduce_zalgo(self._decode(self.opener.open(req,None,float(self.getConfig('connect_timeout',30.0))).read())) self._progressbar() ## postURL saves data to the pagecache *after* _decode() while ## fetchRaw saves it *before* _decode()--because raw. self._set_to_pagecache(cachekey,data,url) return data def _fetchUrl(self, url, parameters=None, usecache=True, extrasleep=None): return self._fetchUrlOpened(url, parameters, usecache, extrasleep)[0] def _fetchUrlRawOpened(self, url, parameters=None, extrasleep=None, usecache=True, referer=None): ''' When should cache be cleared or not used? logins... extrasleep is primarily for ffnet adapter which has extra sleeps. Passed into fetchs so it can be bypassed when cache hits. ''' if not url.startswith('file:'): # file fetches fail on + for space url = quote_plus(ensure_binary(url),safe=';/?:@&=+$,%&#') if self.getConfig('force_https'): ## For developer testing only. url = url.replace("http:","https:") cachekey=self._get_cachekey(url, parameters) if usecache and self._has_cachekey(cachekey) and not cachekey.startswith('file:'): logger.debug("#####################################\npagecache(GET) HIT: %s"%safe_url(cachekey)) data,redirecturl = self._get_from_pagecache(cachekey) class FakeOpened: def __init__(self,data,url): self.data=data self.url=url def geturl(self): return self.url def read(self): return self.data return (data,FakeOpened(data,redirecturl)) logger.debug("#####################################\npagecache(GET) MISS: %s"%safe_url(cachekey)) # print(self.get_pagecache().keys()) if not cachekey.startswith('file:'): # don't sleep for file: URLs. self.do_sleep(extrasleep) ## Specific UA because too many sites are blocking the default python UA. headers = [('User-Agent', self.getConfig('user_agent')), ## starslibrary.net throws a "HTTP Error 403: Bad ## Behavior" over the X-Clacks-Overhead. Which ## both against standard and rather a dick-move. #('X-Clacks-Overhead','GNU Terry Pratchett'), ] if referer: ## hpfanficarchive.com complains about Referer: None. ## Could have defaulted to "" instead, but this way it's ## not present at all headers.append(('Referer',referer)) # logger.debug("GET http login for SB xf2test %s"%url) # if "xf2test" in url: # import base64 # base64string = base64.encodestring(b"sbreview2019:Fs2PwuVE9").replace(b'\n', b'') # headers.append(('Authorization',b"Basic %s" % base64string)) # logger.debug("http login for SB xf2test") self.opener.addheaders = headers if parameters != None: opened = self.opener.open(url, ensure_binary(urlencode(parameters)), float(self.getConfig('connect_timeout',30.0))) else: opened = self.opener.open(url, None, float(self.getConfig('connect_timeout',30.0))) self._progressbar() data = opened.read() ## postURL saves data to the pagecache *after* _decode() while ## fetchRaw saves it *before* _decode()--because raw. self._set_to_pagecache(cachekey,data,opened.url) return (data,opened) def set_sleep(self,val): logger.debug("\n===========\n set sleep time %s\n==========="%val) self.override_sleep = val def do_sleep(self,extrasleep=None): if extrasleep: time.sleep(float(extrasleep)) if self.override_sleep: time.sleep(float(self.override_sleep)) elif self.getConfig('slow_down_sleep_time'): time.sleep(float(self.getConfig('slow_down_sleep_time'))) # parameters is a dict() def _fetchUrlOpened(self, url, parameters=None, usecache=True, extrasleep=None, referer=None): excpt=None if url.startswith("file://"): # only one try for file:s. sleeptimes = [0] else: sleeptimes = [0, 0.5, 4, 9] for sleeptime in sleeptimes: time.sleep(sleeptime) try: (data,opened)=self._fetchUrlRawOpened(url, parameters=parameters, usecache=usecache, extrasleep=extrasleep, referer=referer) return (self._do_reduce_zalgo(self._decode(data)),opened) except HTTPError as he: excpt=he if he.code in (403,404,410): logger.debug("Caught an exception reading URL: %s Exception %s."%(unicode(safe_url(url)),unicode(he))) break # break out on 404 ## trekfanfiction.net has started returning the page, ## but with a 500 code. We can use the HTTPError as ## the 'opened' in such case. if he.code == 500 and 'trekfanfiction.net' in url: data = he.read() return (self._do_reduce_zalgo(self._decode(data)),he) except Exception as e: excpt=e logger.debug("Caught an exception reading URL: %s sleeptime(%s) Exception %s."%(unicode(safe_url(url)),sleeptime,unicode(e))) raise logger.debug("Giving up on %s" %safe_url(url)) logger.debug(excpt, exc_info=True) raise(excpt) # extended by adapter, writer and story for ease of calling configuration. class Configurable(object): def __init__(self, configuration): self.configuration = configuration ## use_pagecache() is on adapters--not all have been updated ## to deal with caching correctly if hasattr(self, 'use_pagecache'): self.configuration.use_pagecache = self.use_pagecache() def section_url_names(self,domain,section_url_f): return self.configuration.section_url_names(domain,section_url_f) def get_configuration(self): return self.configuration def is_lightweight(self): return self.configuration.lightweight def addUrlConfigSection(self,url): self.configuration.addUrlConfigSection(url) def isListType(self,key): return self.configuration.isListType(key) def isValidMetaEntry(self, key): return self.configuration.isValidMetaEntry(key) def getValidMetaList(self): return self.configuration.getValidMetaList() def hasConfig(self, key): return self.configuration.hasConfig(key) def has_config(self, sections, key): return self.configuration.has_config(sections, key) def getConfig(self, key, default=""): return self.configuration.getConfig(key,default) def get_config(self, sections, key, default=""): return self.configuration.get_config(sections,key,default) def getConfigList(self, key, default=[]): return self.configuration.getConfigList(key,default) def get_config_list(self, sections, key): return self.configuration.get_config_list(sections,key) def get_label(self, entry): if self.hasConfig(entry+"_label"): label=self.getConfig(entry+"_label") elif entry in titleLabels: label=titleLabels[entry] else: label=entry.title() return label def do_sleep(self,extrasleep=None): return self.configuration.do_sleep(extrasleep) def set_decode(self,decode): self.configuration.decode = decode def _postUrl(self, url, parameters={}, headers={}, extrasleep=None, usecache=True): return self.configuration._postUrl(url, parameters, headers, extrasleep, usecache) def _fetchUrlRawOpened(self, url, parameters=None, extrasleep=None, usecache=True, referer=None): return self.configuration._fetchUrlRawOpened(url, parameters, extrasleep, usecache, referer=referer) def _fetchUrlOpened(self, url, parameters=None, usecache=True, extrasleep=None, referer=None): return self.configuration._fetchUrlOpened(url, parameters, usecache, extrasleep, referer=referer) def _fetchUrl(self, url, parameters=None, usecache=True, extrasleep=None, referer=None): return self._fetchUrlOpened(url, parameters, usecache, extrasleep, referer=referer)[0] def _fetchUrlRaw(self, url, parameters=None, extrasleep=None, usecache=True, referer=None): return self._fetchUrlRawOpened(url, parameters, extrasleep, usecache, referer=referer)[0] # .? for AO3's ']' in param names. safe_url_re = re.compile(r'(?P<attr>(pass(word)?|name|login).?=)[^&]*(?P<amp>&|$)',flags=re.MULTILINE) def safe_url(url): # return url with password attr (if present) obscured. return re.sub(safe_url_re,r'\g<attr>XXXXXXXX\g<amp>',url)
py
1a46dbb42d35467ae02b5384b3b8cd20f8c2a9a4
#!/usr/bin/env python3 """ # scripts/manifest/lib/get_versions.py # (c) 2020 Sam Caldwell. See LICENSE.txt. # # get the versions list # """ def get_versions(inp_block: dict) -> list: """ evaluate a dictionary and return a list of the keys. :param inp_block: dict :return: list """ return ["all"] + [k for k, _ in inp_block.items()]
py
1a46dbbd8a0f5e8c6bd0d17c448d6bcb10f06e6f
from django.test import TestCase from ..forms import SignUpForm class SignUpFormTest(TestCase): def test_form_has_fields(self): form = SignUpForm() expected = ['username', 'email', 'password1', 'password2', ] actual = list(form.fields) self.assertSequenceEqual(expected, actual)
py
1a46dd1bca40d4a61c77c273e31ec62b43918007
from collections import Counter, defaultdict from copy import deepcopy from random import Random import pytest from hypothesis import assume, event from hypothesis.stateful import ( Bundle, RuleBasedStateMachine, consumes, initialize, invariant, rule, ) from hypothesis.strategies import builds, composite, integers, random_module, randoms from raiden.constants import GENESIS_BLOCK_NUMBER from raiden.settings import DEFAULT_WAIT_BEFORE_LOCK_REMOVAL from raiden.tests.utils import factories from raiden.transfer import channel, node from raiden.transfer.events import EventPaymentSentFailed from raiden.transfer.mediated_transfer.events import SendLockedTransfer, SendSecretReveal from raiden.transfer.mediated_transfer.state_change import ( ActionInitInitiator, ReceiveSecretRequest, TransferDescriptionWithSecretState, ) from raiden.transfer.state import ChainState, PaymentNetworkState, TokenNetworkState from raiden.transfer.state_change import ( Block, ContractReceiveChannelNew, ContractReceiveChannelSettled, ) from raiden.utils import random_secret, sha3 from raiden.utils.typing import BlockNumber @composite def secret(draw): return draw(builds(random_secret)) def event_types_match(events, *expected_types): return Counter([type(event) for event in events]) == Counter(expected_types) def transferred_amount(state): return 0 if not state.balance_proof else state.balance_proof.transferred_amount partners = Bundle('partners') # shared bundle of ChainStateStateMachine and all mixin classes class ChainStateStateMachine(RuleBasedStateMachine): def __init__(self, address=None): self.address = address or factories.make_address() self.replay_path = False self.address_to_channel = dict() self.address_to_privkey = dict() self.our_previous_deposit = defaultdict(int) self.partner_previous_deposit = defaultdict(int) self.our_previous_transferred = defaultdict(int) self.partner_previous_transferred = defaultdict(int) self.our_previous_unclaimed = defaultdict(int) self.partner_previous_unclaimed = defaultdict(int) self.expected_expiry = dict() super().__init__() def new_channel(self): """Create a new partner address with private key and channel. The private key and channels are listed in the instance's dictionaries, the address is returned and should be added to the partners Bundle. """ partner_privkey, partner_address = factories.make_privkey_address() self.address_to_privkey[partner_address] = partner_privkey self.address_to_channel[partner_address] = factories.make_channel( our_balance=1000, partner_balance=1000, token_network_identifier=self.token_network_id, our_address=self.address, partner_address=partner_address, ) return partner_address def new_channel_with_transaction(self): partner_address = self.new_channel() channel_new_state_change = ContractReceiveChannelNew( transaction_hash=factories.make_transaction_hash(), token_network_identifier=self.token_network_id, channel_state=self.address_to_channel[partner_address], block_number=self.block_number, block_hash=factories.make_block_hash(), ) node.state_transition(self.chain_state, channel_new_state_change) return partner_address @initialize( target=partners, block_number=integers(min_value=GENESIS_BLOCK_NUMBER + 1), random=randoms(), random_seed=random_module(), ) def initialize(self, block_number, random, random_seed): self.random_seed = random_seed self.block_number = block_number self.block_hash = factories.make_block_hash() self.random = random self.private_key, self.address = factories.make_privkey_address() self.chain_state = ChainState( pseudo_random_generator=self.random, block_number=self.block_number, block_hash=self.block_hash, our_address=self.address, chain_id=factories.UNIT_CHAIN_ID, ) self.token_network_id = factories.make_address() self.token_id = factories.make_address() self.token_network_state = TokenNetworkState(self.token_network_id, self.token_id) self.payment_network_id = factories.make_payment_network_identifier() self.payment_network_state = PaymentNetworkState( self.payment_network_id, [self.token_network_state], ) self.chain_state.identifiers_to_paymentnetworks[ self.payment_network_id ] = self.payment_network_state return self.new_channel_with_transaction() def event(self, description): """ Wrapper for hypothesis' event function. hypothesis.event raises an exception when invoked outside of hypothesis context, so skip it when we are replaying a failed path. """ if not self.replay_path: event(description) @invariant() def monotonicity(self): """ Check monotonicity properties as given in Raiden specification """ for address, netting_channel in self.address_to_channel.items(): # constraint (1TN) assert netting_channel.our_total_deposit >= self.our_previous_deposit[address] assert netting_channel.partner_total_deposit >= self.partner_previous_deposit[address] self.our_previous_deposit[address] = netting_channel.our_total_deposit self.partner_previous_deposit[address] = netting_channel.partner_total_deposit # TODO add constraint (2TN) when withdrawal is implemented # constraint (3R) and (4R) our_transferred = transferred_amount(netting_channel.our_state) partner_transferred = transferred_amount(netting_channel.partner_state) our_unclaimed = channel.get_amount_unclaimed_onchain(netting_channel.our_state) partner_unclaimed = channel.get_amount_unclaimed_onchain( netting_channel.partner_state, ) assert our_transferred >= self.our_previous_transferred[address] assert partner_transferred >= self.partner_previous_transferred[address] assert ( our_unclaimed + our_transferred >= self.our_previous_transferred[address] + self.our_previous_unclaimed[address] ) assert ( partner_unclaimed + partner_transferred >= self.our_previous_transferred[address] + self.our_previous_unclaimed[address] ) self.our_previous_transferred[address] = our_transferred self.partner_previous_transferred[address] = partner_transferred self.our_previous_unclaimed[address] = our_unclaimed self.partner_previous_unclaimed[address] = partner_unclaimed @invariant() def channel_state_invariants(self): """ Check the invariants for the channel state given in the Raiden specification """ for netting_channel in self.address_to_channel.values(): our_state = netting_channel.our_state partner_state = netting_channel.partner_state our_transferred_amount = 0 if our_state.balance_proof: our_transferred_amount = our_state.balance_proof.transferred_amount assert our_transferred_amount >= 0 partner_transferred_amount = 0 if partner_state.balance_proof: partner_transferred_amount = partner_state.balance_proof.transferred_amount assert partner_transferred_amount >= 0 assert channel.get_distributable(our_state, partner_state) >= 0 assert channel.get_distributable(partner_state, our_state) >= 0 our_deposit = netting_channel.our_total_deposit partner_deposit = netting_channel.partner_total_deposit total_deposit = our_deposit + partner_deposit our_amount_locked = channel.get_amount_locked(our_state) our_balance = channel.get_balance(our_state, partner_state) partner_amount_locked = channel.get_amount_locked(partner_state) partner_balance = channel.get_balance(partner_state, our_state) # invariant (5.1R), add withdrawn amounts when implemented assert 0 <= our_amount_locked <= our_balance assert 0 <= partner_amount_locked <= partner_balance assert our_amount_locked <= total_deposit assert partner_amount_locked <= total_deposit our_transferred = partner_transferred_amount - our_transferred_amount netted_transferred = our_transferred + partner_amount_locked - our_amount_locked # invariant (6R), add withdrawn amounts when implemented assert 0 <= our_deposit + our_transferred - our_amount_locked <= total_deposit assert 0 <= partner_deposit - our_transferred - partner_amount_locked <= total_deposit # invariant (7R), add withdrawn amounts when implemented assert - our_deposit <= netted_transferred <= partner_deposit class InitiatorMixin: def __init__(self): super().__init__() self.used_secrets = set() self.processed_secret_requests = set() self.initiated = set() self.failing_path_2 = False def _action_init_initiator(self, transfer: TransferDescriptionWithSecretState): channel = self.address_to_channel[transfer.target] if transfer.secrethash not in self.expected_expiry: self.expected_expiry[transfer.secrethash] = self.block_number + 10 return ActionInitInitiator( transfer, [factories.route_from_channel(channel)], ) def _receive_secret_request(self, transfer: TransferDescriptionWithSecretState): secrethash = sha3(transfer.secret) return ReceiveSecretRequest( payment_identifier=transfer.payment_identifier, amount=transfer.amount, expiration=self.expected_expiry[transfer.secrethash], secrethash=secrethash, sender=transfer.target, ) def _new_transfer_description(self, target, payment_id, amount, secret): self.used_secrets.add(secret) return TransferDescriptionWithSecretState( payment_network_identifier=self.payment_network_id, payment_identifier=payment_id, amount=amount, token_network_identifier=self.token_network_id, initiator=self.address, target=target, secret=secret, ) def _invalid_authentic_secret_request(self, previous, action): result = node.state_transition(self.chain_state, action) if action.secrethash in self.processed_secret_requests or self._is_removed(previous): assert not result.events else: self.processed_secret_requests.add(action.secrethash) def _unauthentic_secret_request(self, action): result = node.state_transition(self.chain_state, action) assert not result.events def _available_amount(self, partner_address): netting_channel = self.address_to_channel[partner_address] return channel.get_distributable(netting_channel.our_state, netting_channel.partner_state) def _assume_channel_opened(self, action): if not self.failing_path_2: needed_channel = self.address_to_channel[action.transfer.target] assume(channel.get_status(needed_channel) == channel.CHANNEL_STATE_OPENED) def _is_removed(self, action): expiry = self.expected_expiry[action.transfer.secrethash] return self.block_number >= expiry + DEFAULT_WAIT_BEFORE_LOCK_REMOVAL init_initiators = Bundle('init_initiators') @rule( target=init_initiators, partner=partners, payment_id=integers(min_value=1), amount=integers(min_value=1, max_value=100), secret=secret(), # pylint: disable=no-value-for-parameter ) def valid_init_initiator(self, partner, payment_id, amount, secret): assume(amount <= self._available_amount(partner)) assume(secret not in self.used_secrets) transfer = self._new_transfer_description(partner, payment_id, amount, secret) action = self._action_init_initiator(transfer) result = node.state_transition(self.chain_state, action) assert event_types_match(result.events, SendLockedTransfer) self.initiated.add(transfer.secret) self.expected_expiry[transfer.secrethash] = self.block_number + 10 return action @rule( partner=partners, payment_id=integers(min_value=1), excess_amount=integers(min_value=1), secret=secret(), # pylint: disable=no-value-for-parameter ) def exceeded_capacity_init_initiator(self, partner, payment_id, excess_amount, secret): amount = self._available_amount(partner) + excess_amount transfer = self._new_transfer_description(partner, payment_id, amount, secret) action = self._action_init_initiator(transfer) result = node.state_transition(self.chain_state, action) assert event_types_match(result.events, EventPaymentSentFailed) self.event('ActionInitInitiator failed: Amount exceeded') @rule( previous_action=init_initiators, partner=partners, payment_id=integers(min_value=1), amount=integers(min_value=1), ) def used_secret_init_initiator(self, previous_action, partner, payment_id, amount): assume(not self._is_removed(previous_action)) secret = previous_action.transfer.secret transfer = self._new_transfer_description(partner, payment_id, amount, secret) action = self._action_init_initiator(transfer) result = node.state_transition(self.chain_state, action) assert not result.events self.event('ActionInitInitiator failed: Secret already in use.') @rule(previous_action=init_initiators) def replay_init_initator(self, previous_action): assume(not self._is_removed(previous_action)) result = node.state_transition(self.chain_state, previous_action) assert not result.events @rule(previous_action=init_initiators) def valid_secret_request(self, previous_action): action = self._receive_secret_request(previous_action.transfer) self._assume_channel_opened(previous_action) result = node.state_transition(self.chain_state, action) if action.secrethash in self.processed_secret_requests: assert not result.events self.event('Valid SecretRequest dropped due to previous invalid one.') elif self._is_removed(previous_action): assert not result.events self.event('Ohterwise valid SecretRequest dropped due to expired lock.') else: assert event_types_match(result.events, SendSecretReveal) self.event('Valid SecretRequest accepted.') self.processed_secret_requests.add(action.secrethash) @rule(previous_action=init_initiators, amount=integers()) def wrong_amount_secret_request(self, previous_action, amount): assume(amount != previous_action.transfer.amount) self._assume_channel_opened(previous_action) transfer = deepcopy(previous_action.transfer) transfer.amount = amount action = self._receive_secret_request(transfer) self._invalid_authentic_secret_request(previous_action, action) @rule( previous_action=init_initiators, secret=secret(), # pylint: disable=no-value-for-parameter ) def secret_request_with_wrong_secrethash(self, previous_action, secret): assume(sha3(secret) != sha3(previous_action.transfer.secret)) self._assume_channel_opened(previous_action) transfer = deepcopy(previous_action.transfer) transfer.secret = secret action = self._receive_secret_request(transfer) return self._unauthentic_secret_request(action) @rule(previous_action=init_initiators, payment_identifier=integers()) def secret_request_with_wrong_payment_id(self, previous_action, payment_identifier): assume(payment_identifier != previous_action.transfer.payment_identifier) self._assume_channel_opened(previous_action) transfer = deepcopy(previous_action.transfer) transfer.payment_identifier = payment_identifier action = self._receive_secret_request(transfer) self._unauthentic_secret_request(action) class OnChainMixin: block_number: BlockNumber @rule(number=integers(min_value=1, max_value=50)) def new_blocks(self, number): events = list() for _ in range(number): block_state_change = Block( block_number=self.block_number + 1, gas_limit=1, block_hash=factories.make_keccak_hash(), ) result = node.state_transition(self.chain_state, block_state_change) events.extend(result.events) self.block_number += 1 @rule(target=partners) def open_channel(self): return self.new_channel_with_transaction() @rule(partner=consumes(partners)) def settle_channel(self, partner): channel = self.address_to_channel[partner] channel_settled_state_change = ContractReceiveChannelSettled( transaction_hash=factories.make_transaction_hash(), token_network_identifier=channel.token_network_identifier, channel_identifier=channel.identifier, block_number=self.block_number + 1, block_hash=factories.make_block_hash(), ) node.state_transition(self.chain_state, channel_settled_state_change) class InitiatorStateMachine(InitiatorMixin, ChainStateStateMachine): pass class OnChainStateMachine(OnChainMixin, ChainStateStateMachine): pass class MultiChannelInitiatorStateMachine(InitiatorMixin, OnChainMixin, ChainStateStateMachine): pass TestInitiator = InitiatorStateMachine.TestCase TestOnChain = OnChainStateMachine.TestCase TestMultiChannelInitiator = MultiChannelInitiatorStateMachine.TestCase def test_regression_malicious_secret_request_handled_properly(): state = InitiatorStateMachine() state.replay_path = True v1 = state.initialize(block_number=1, random=Random(), random_seed=None) v2 = state.valid_init_initiator( partner=v1, amount=1, payment_id=1, secret=b'\x00' * 32, ) state.wrong_amount_secret_request(amount=0, previous_action=v2) state.replay_init_initator(previous_action=v2) state.teardown() @pytest.mark.skip def test_try_secret_request_after_settle_channel(): state = MultiChannelInitiatorStateMachine() state.replay_path = True state.failing_path_2 = True v1 = state.initialize(block_number=1, random=Random(), random_seed=None) v2 = state.valid_init_initiator(amount=1, partner=v1, payment_id=1, secret=b'\x91' * 32) state.settle_channel(partner=v1) state.valid_secret_request(previous_action=v2) state.teardown()
py
1a46dd515ae27096540b16b3f8d4775685f8eec4
#!/usr/bin/env python import unittest from ngcccbase.services.blockchain import BlockchainInfoInterface, AbeInterface from ngcccbase.services.electrum import (ConnectionError, ElectrumInterface, EnhancedBlockchainState) class TestElectrum(unittest.TestCase): def setUp(self): self.server_url = "electrum.cafebitcoin.com" self.ei = ElectrumInterface(self.server_url, 50001) self.bcs = EnhancedBlockchainState(self.server_url, 50001) self.txhash = 'b1c68049c1349399fb867266fa146a854c16cd8a18a01d3cd7921ab9d5af1a8b' self.height = 277287 self.raw_tx = '01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4803273b04062f503253482f049fe1bd5208ee5f364f06648bae2e522cfabe6d6de0a3e574e400f64403ea10de4ff3bc0dcb42d49549e273a9faa4eac33b04734804000000000000000000000001cde08d95000000001976a91480ad90d403581fa3bf46086a91b2d9d4125db6c188ac00000000' self.address = '1CC3X2gu58d6wXUWMffpuzN9JAfTUWu4Kj' def test_connect(self): self.assertRaises(ConnectionError, ElectrumInterface, 'cnn.com', 50001) def test_get_utxo(self): self.assertEqual(self.ei.get_utxo(self.address), []) def test_get_version(self): self.assertTrue(float(self.ei.get_version()) >= 0.8) def test_get_raw_transaction(self): self.assertEqual(self.ei.get_raw_transaction(self.txhash, self.height), self.raw_tx) self.assertEqual(self.bcs.get_raw_transaction(self.txhash), self.raw_tx) def test_get_height(self): self.assertEqual(self.bcs.get_tx_block_height(self.txhash)[0], self.height) def test_get_tx(self): self.assertEqual(self.bcs.get_tx(self.txhash).hash, self.txhash) class TestBlockchain(unittest.TestCase): def setUp(self): self.address = '13ph5zPCBLeZcPph9FBZKeeyDjvU2tvcMY' self.txhash = 'd7b9a9da6becbf47494c27e913241e5a2b85c5cceba4b2f0d8305e0a87b92d98' self.address2 = '1CC3X2gu58d6wXUWMffpuzN9JAfTUWu4Kj' def test_blockchain(self): self.assertEqual(BlockchainInfoInterface.get_utxo(self.address)[0][0], self.txhash) self.assertEqual(BlockchainInfoInterface.get_utxo(self.address2), []) if __name__ == '__main__': unittest.main()
py
1a46dd8ffb260c0064551a974e03d88fb7859211
from lost.db import state # def add_user(data_man, user): # '''add user to user meta # Args: # db_man (obj): Project database manager. # user (obj): User object # ''' # user = model.User(idx=user.id, user_name=user.username, # first_name=user.first_name, last_name=user.last_name, # email=user.email) # data_man.save_obj(user) # def add_superuser(data_man, user): # '''add superuser to user meta # Args: # db_man (obj): Project database manager. # user (obj): User object # ''' # user = model.User(idx=user.id) # data_man.save_obj(user) # def update_user(data_man, user): # '''update existing user in user meta # Args: # db_man (obj): Project database manager. # user (obj): User object # ''' # usermeta = data_man.get_user_meta(user_id=user.id) # usermeta.first_name = user.first_name # usermeta.last_name = user.last_name # usermeta.user_name = user.username # usermeta.email = user.email # data_man.save_obj(usermeta) def release_user_annos(dbm, user_id): '''Release locked annos for a specific user. Args: dbm (object): DBMan object. user_id (int): ID of the user to release locked annos. ''' print('Was Here! User id is: {}'.format(user_id)) for anno_task in dbm.get_anno_task(state=state.AnnoTask.IN_PROGRESS): locked_annos = dbm.get_locked_img_annos(anno_task.idx) print('locked annos') print(locked_annos) for anno in locked_annos: print('UserID: {}, AnnoID: {}'.format(anno.user_id, anno.idx)) locked_user_annos = [anno for anno in locked_annos if anno.user_id == user_id] print(locked_user_annos) for anno in locked_user_annos: anno.state = state.Anno.UNLOCKED anno.timestamp_lock = None anno.user_id = None dbm.add(anno) locked_annos = dbm.get_locked_two_d_annos(anno_task.idx) print('locked 2d annos') print(locked_annos) for anno in locked_annos: print('UserID: {} AnnoID: {}'.format(anno.user_id, anno.idx)) locked_user_annos = [anno for anno in locked_annos if anno.user_id == user_id] print(locked_user_annos) for anno in locked_user_annos: anno.state = state.Anno.UNLOCKED anno.timestamp_lock = None anno.user_id = None dbm.add(anno) dbm.commit()
py
1a46ddf916e3f1c7719c47e708635d8dc95ffd39
import os import numpy as np import tifffile as tiff from PIL import Image from sklearn.model_selection import train_test_split from torch.utils.data import DataLoader from torch.utils.data.dataset import Dataset from torch.utils.data.sampler import RandomSampler, SequentialSampler from torchvision.transforms import Compose, Resize, ToTensor from pytorch_unet.processing.augments import augmentations class DataTransformer(Dataset): """Dataset loader to pass to the pytorch DataLoader. Note: This is an abstract class representing a dataset. You don't have to write a function like this but it helps in applying transformations to the dataset and in supplying the dataset to the data loader, which is where all these transformations are actually applied. Arguments: train_filename (string) : is the path to the training data. labels_filename (string) : is the path to the labels for the training data. image_transform (tensor) : is data in the tensor format to be used to apply transformation. image_augmentation (tensor) : is a set of transformations to be applied to the data. Returns: the dataset. """ def __init__(self, train_filename, labels_filename, image_transform=None, image_augmentation=None): self.train_filename = train_filename self.labels_filename = labels_filename self.image_transform = image_transform self.image_augmentation = image_augmentation self.len_train = tiff.imread(self.train_filename).shape[0] def __len__(self): return self.len_train def _read_data(self, index): return Image.fromarray((tiff.imread(self.train_filename))[index]) def _read_labels(self, index): return Image.fromarray((tiff.imread(self.labels_filename))[index]) def __getitem__(self, index): if self.labels_filename is not None: images = self._read_data(index) labels = self._read_labels(index) if self.image_augmentation is not None: x = np.array(images) y = np.array(labels) data = {'input': x, 'mask': y} aug_data = self.image_augmentation(data) trans_images = aug_data['input'] trans_labels = aug_data['mask'] if self.image_augmentation is None: trans_images = self.image_transform(images) trans_labels = self.image_transform(labels) return [trans_images, trans_labels] if self.labels_filename is None: images = self._read_data(index) trans_images = self.image_transform(images) return trans_images def load_data(args): """Load data from here and return. Note: Compose Composes several transforms together and if augmentation is chosen you compose an additional bunch of transforms to be applied to the train data and you send this to the DataTransformer class which returns the data set that is used in the data loader. The data loader then takes in this dataset with a batch size and sampler. Sampler is defines the strategy to draw samples from the dataset. Here for training data random sampling is used and for validation sequential is used. You can also write a custom sampler class if you want. :param args: main_dir (string) : path to the main directory from the args. image_size (int) : size of the image to be resized. transform_prob (float) : probability to apply transformations on the data. batch_size (int) : batch size to be used in the data loader. :return: the train loader and validation loader to be used for training and validating. """ # get data set file path data_path = os.path.join(args.main_dir, 'data', 'train-volume.tif') labels_path = os.path.join(args.main_dir, 'data', 'train-labels.tif') # compose the transforms for the train set train_data = Compose([Resize(args.image_size), ToTensor()]) # choose between augmentations for train data if args.augment: train_augment = augmentations(args) train_transform = DataTransformer(data_path, labels_path, image_transform=train_data, image_augmentation=train_augment) else: # transforming the train data and returning a 4D tensor train_transform = DataTransformer(data_path, labels_path, image_transform=train_data, image_augmentation=None) # transform for validation data val_data = Compose([Resize(args.image_size), ToTensor()]) val_transform = DataTransformer(data_path, labels_path, image_transform=val_data, image_augmentation=None) # split the train and validation indices train_indices, validation_indices = train_test_split(range(len(train_transform)), test_size=0.15) # call the sampler for the train and validation data train_samples = RandomSampler(train_indices) validation_samples = SequentialSampler(validation_indices) # load train and validation data train_loader = DataLoader(train_transform, batch_size=args.batch_size, sampler=train_samples) val_loader = DataLoader(val_transform, batch_size=args.batch_size, sampler=validation_samples) return train_loader, val_loader
py
1a46de385fea81bc228009aff4d6adf6af788a39
""" Module containing raster blocks for spatial operations. """ import math from scipy import ndimage import numpy as np from osgeo import ogr from dask_geomodeling.utils import ( EPSG3857, EPSG4326, POLYGON, get_sr, Extent, get_dtype_min, get_footprint, get_index, shapely_transform, ) from dask_geomodeling.raster.reduction import reduce_rasters, check_statistic from .base import BaseSingle, RasterBlock from shapely.geometry import Point __all__ = ["Dilate", "Smooth", "MovingMax", "HillShade", "Place"] def expand_request_pixels(request, radius=1): """ Expand request by `radius` pixels. Returns None for non-vals requests or point requests. """ if request["mode"] != "vals": # do nothing with time and meta requests return None width, height = request["width"], request["height"] x1, y1, x2, y2 = request["bbox"] pwidth, pheight = x2 - x1, y2 - y1 if pwidth == 0 or pheight == 0: # cannot dilate a point request return None amount_x = pwidth / width * radius amount_y = pheight / height * radius new_request = request.copy() new_request["bbox"] = (x1 - amount_x, y1 - amount_x, x2 + amount_y, y2 + amount_y) new_request["width"] += 2 * radius new_request["height"] += 2 * radius return new_request def expand_request_meters(request, radius_m=1): """ Expand request by `radius_m` meters, rounded so that an integer number of pixels is added to all sides. Returns a tuple of: - new request with adapted bbox, width and height - the radius transformed to pixels as a (y, x) tuple of floats - the added margins as a (y, x) tuple of integers """ sr = get_sr(request["projection"]) bbox = request["bbox"] # throughout, variables in the projected unit ( = meters, mostly) are # suffixed by _m, in pixels by _px if sr.IsGeographic(): # expand geographic bbox in EPSG3857 extent_geom = Extent(bbox, sr) bbox = extent_geom.transformed(EPSG3857).bbox else: # most Projected projections are in meters, but to be sure: radius_m /= sr.GetLinearUnits() # compute the initial zoom factors: how much to expand the bbox to obtain # margins of exactly 'radius' (in meters) x1, y1, x2, y2 = bbox shape_m = y2 - y1, x2 - x1 if shape_m[0] > 0 and shape_m[1] > 0: # Resolution in pixels per meter: resolution = request["height"] / shape_m[0], request["width"] / shape_m[1] # How many pixels to add: radius_px = [radius_m * res for res in resolution] # How many pixels to add, rounded to integers: margins_px = [int(round(r)) for r in radius_px] # How many meters to add (based on rounded pixels): margins_m = [m / res for m, res in zip(margins_px, resolution)] else: # There is no resolution. Add MARGIN_THRESHOLD pixels to the request. radius_px = margins_px = [Smooth.MARGIN_THRESHOLD] * 2 # Expand the request with radius_m exactly. margins_m = [radius_m] * 2 # assemble the request new_request = request.copy() new_request["bbox"] = ( x1 - margins_m[1], y1 - margins_m[0], x2 + margins_m[1], y2 + margins_m[0], ) if sr.IsGeographic(): # transform back to original projection extent_proj = Extent(new_request["bbox"], EPSG3857) new_request["bbox"] = extent_proj.transformed(sr).bbox new_request["height"] += 2 * margins_px[0] new_request["width"] += 2 * margins_px[1] return new_request, radius_px class Dilate(BaseSingle): """ Perform spatial dilation on specific cell values. Cells with values in the supplied list are spatially dilated by one cell in each direction, including diagonals. Dilation is performed in the order of the values parameter. Args: store (RasterBlock): Raster to perform dilation on. values (list): Only cells with these values are dilated. Returns: RasterBlock where cells in values list are dilated. See also: https://en.wikipedia.org/wiki/Dilation_%28morphology%29 """ def __init__(self, store, values): values = np.asarray(values, dtype=store.dtype) super().__init__(store, values.tolist()) @property def values(self): return self.args[1] def get_sources_and_requests(self, **request): new_request = expand_request_pixels(request, radius=1) if new_request is None: # not an expandable request: do nothing return [(self.store, request)] else: return [(self.store, new_request), (self.values, None)] @staticmethod def process(data, values=None): if data is None or values is None or "values" not in data: return data original = data["values"] dilated = original.copy() for value in np.asarray(values, dtype=original.dtype): dilated[ndimage.binary_dilation(original == value)] = value dilated = dilated[:, 1:-1, 1:-1] return {"values": dilated, "no_data_value": data["no_data_value"]} class MovingMax(BaseSingle): """ Apply a spatial maximum filter to the data using a circular footprint. This can be used for visualization of sparse data. Args: store (RasterBlock): Raster to which the filter is applied size (integer): Diameter of the circular footprint. This should always be an odd number larger than 1. Returns: RasterBlock with maximum values inside the footprint of each input cell. """ def __init__(self, store, size): # round size to nearest odd integer size = int(2 * round((size - 1) / 2) + 1) if size < 3: raise ValueError("The size should be odd and larger than 1") super(MovingMax, self).__init__(store, size) @property def size(self): return self.args[1] def get_sources_and_requests(self, **request): size = self.size new_request = expand_request_pixels(request, radius=int(size // 2)) if new_request is None: # not an expandable request: do nothing return [(self.store, request)] else: return [(self.store, new_request), (size, None)] @staticmethod def process(data, size=None): if data is None or size is None or "values" not in data: return data radius = int(size // 2) footprint = get_footprint(size)[np.newaxis] # put absolute minimum on no data pixels array = data["values"].copy() minimum = get_dtype_min(array.dtype) no_data_mask = array == data["no_data_value"] array[no_data_mask] = minimum # apply maximum filter filtered = ndimage.maximum_filter(array, footprint=footprint) # replace absolute minimum with original fillvalue filtered[(filtered == minimum) & no_data_mask] = data["no_data_value"] # cut out the result filtered = filtered[:, radius:-radius, radius:-radius] return {"values": filtered, "no_data_value": data["no_data_value"]} class Smooth(BaseSingle): """ Smooth the values from a raster spatially using Gaussian smoothing. Args: store (RasterBlock): Raster to be smoothed size (number): The extent of the smoothing in meters. The 'sigma' value for the Gaussian kernal equals ``size / 3``. fill (number): 'no data' are replaced by this value during smoothing, defaults to 0. Returns: RasterBlock with spatially smoothed values. See Also: https://en.wikipedia.org/wiki/Gaussian_blur """ MARGIN_THRESHOLD = 6 def __init__(self, store, size, fill=0): for x in (size, fill): if not isinstance(x, (int, float)): raise TypeError("'{}' object is not allowed".format(type(x))) super(Smooth, self).__init__(store, size, fill) @property def size(self): return self.args[1] @property def fill(self): return self.args[2] def get_sources_and_requests(self, **request): if request["mode"] != "vals": # do nothing with time and meta requests return [(self.store, request)] new_request, size = expand_request_meters(request, self.size) # check how many pixels will be added by the request if any([s > self.MARGIN_THRESHOLD for s in size]): smooth_mode = "zoom" # rescale the size zoom = [new_request[x] / request[x] for x in ("height", "width")] size = [s / z for s, z in zip(size, zoom)] # request the original (not expanded) shape new_request["height"] = request["height"] new_request["width"] = request["width"] else: smooth_mode = "exact" process_kwargs = dict(smooth_mode=smooth_mode, fill=self.fill, size=size) return [(self.store, new_request), (process_kwargs, None)] @staticmethod def process(data, process_kwargs=None): if data is None or process_kwargs is None: return data smooth_mode = process_kwargs["smooth_mode"] size_px = process_kwargs["size"] fill = process_kwargs["fill"] # fill in nodata values values = data["values"].copy() no_data_value = data["no_data_value"] values[values == no_data_value] = fill # compute the sigma sigma = 0, size_px[0] / 3, size_px[1] / 3 ndimage.gaussian_filter( values, sigma, output=values, mode="constant", cval=fill ) # remove the margins if smooth_mode == "exact": my, mx = [int(round(s)) for s in size_px] values = values[:, my : values.shape[1] - my, mx : values.shape[2] - mx] else: _, ny, nx = values.shape zy, zx = [1 - 2 * size_px[0] / ny, 1 - 2 * size_px[1] / nx] values = ndimage.affine_transform( values, order=0, matrix=np.diag([1, zy, zx]), offset=[0, size_px[0], size_px[1]], ) return {"values": values, "no_data_value": no_data_value} class HillShade(BaseSingle): """ Calculate a hillshade from the raster values. Args: store (RasterBlock): Raster to which the hillshade algorithm is applied. altitude (number): Light source altitude in degrees, defaults to 45. azimuth (number): Light source azimuth in degrees, defaults to 315. fill (number): Fill value to be used for 'no data' values. Returns: Hillshaded raster See also: https://pro.arcgis.com/en/pro-app/tool-reference/3d-analyst/how-hillshade-works.htm """ def __init__(self, store, altitude=45, azimuth=315, fill=0): for x in (altitude, azimuth, fill): if not isinstance(x, (int, float)): raise TypeError("'{}' object is not allowed".format(type(x))) super(HillShade, self).__init__(store, float(altitude), float(azimuth), fill) @property def altitude(self): return self.args[1] @property def azimuth(self): return self.args[2] @property def fill(self): return self.args[3] @property def dtype(self): return np.dtype("u1") @property def fillvalue(self): return 256 # on purpose, it does not exist in bytes @staticmethod def process(data, process_kwargs=None): """ Adapted from: https://github.com/OSGeo/gdal/blob/2.0/gdal/apps/gdaldem.cpp#L481 Edges are not implemented, result clips one pixel from array. """ if process_kwargs is None: return data array = data["values"].copy() array[array == data["no_data_value"]] = process_kwargs["fill"] xres, yres = process_kwargs["resolution"] alt = math.radians(process_kwargs["altitude"]) az = math.radians(process_kwargs["azimuth"]) zsf = 1 / 8 # vertical scale factor square_zsf = zsf * zsf # gradient s0 = slice(None, None), slice(None, -2), slice(None, -2) s1 = slice(None, None), slice(None, -2), slice(1, -1) s2 = slice(None, None), slice(None, -2), slice(2, None) s3 = slice(None, None), slice(1, -1), slice(None, -2) s4 = slice(None, None), slice(1, -1), slice(1, -1) s5 = slice(None, None), slice(1, -1), slice(2, None) s6 = slice(None, None), slice(2, None), slice(None, -2) s7 = slice(None, None), slice(2, None), slice(1, -1) s8 = slice(None, None), slice(2, None), slice(2, None) # angle calculation y = np.empty(array.shape, dtype="f4") y[s4] = ( array[s0] + 2 * array[s1] + array[s2] - array[s6] - 2 * array[s7] - array[s8] ) / yres x = np.empty(array.shape, dtype="f4") x[s4] = ( array[s0] + 2 * array[s3] + array[s6] - array[s2] - 2 * array[s5] - array[s8] ) / xres with np.errstate(all="ignore"): xx_plus_yy = x * x + y * y aspect = np.arctan2(y, x) # shading cang = ( math.sin(alt) - math.cos(alt) * zsf * np.sqrt(xx_plus_yy) * np.sin(aspect - az) ) / np.sqrt(1 + square_zsf * xx_plus_yy) cang = cang[..., 1:-1, 1:-1] result = np.where(cang <= 0, 0, 255 * cang).astype("u1") return {"values": result, "no_data_value": 256} def get_sources_and_requests(self, **request): new_request = expand_request_pixels(request, radius=1) if new_request is None: # not an expandable request: do nothing return [(self.store, request)] # determine resolution bbox = request["bbox"] resolution = ( (bbox[2] - bbox[0]) / request["width"], (bbox[3] - bbox[1]) / request["height"], ) process_kwargs = dict( resolution=resolution, altitude=self.altitude, azimuth=self.azimuth, fill=self.fill, ) return [(self.store, new_request), (process_kwargs, None)] class Place(BaseSingle): """Place an input raster at given coordinates Note that if the store's projection is different from the requested one, the data will be reprojected before placing it at a different position. Args: store (RasterBlock): Raster that will be placed. place_projection (str): The projection in which this operation is done. This also specifies the projection of the ``anchor`` and ``coordinates`` args. anchor (list of 2 numbers): The anchor into the source raster that will be placed at given coordinates. coordinates (list of lists of 2 numbers): The target coordinates. The center of the bbox will be placed on each of these coordinates. statistic (str): What method to use to merge overlapping rasters. One of: {"last", "first", "count", "sum", "mean", "min", "max", "argmin", "argmax", "product", "std", "var", "p<number>"} Returns: RasterBlock with the source raster placed """ def __init__(self, store, place_projection, anchor, coordinates, statistic="last"): if not isinstance(store, RasterBlock): raise TypeError("'{}' object is not allowed".format(type(store))) try: get_sr(place_projection) except RuntimeError: raise ValueError( "'{}' is not a valid projection string".format(place_projection) ) anchor = list(anchor) if len(anchor) != 2: raise ValueError("Expected 2 numbers in the 'anchor' parameter") for x in anchor: if not isinstance(x, (int, float)): raise TypeError("'{}' object is not allowed".format(type(x))) if coordinates is None or len(coordinates) == 0: coordinates = [] else: coordinates = np.asarray(coordinates, dtype=float) if coordinates.ndim != 2 or coordinates.shape[1] != 2: raise ValueError( "Expected a list of lists of 2 numbers in the " "'coordinates' parameter" ) coordinates = coordinates.tolist() check_statistic(statistic) super().__init__(store, place_projection, anchor, coordinates, statistic) @property def place_projection(self): return self.args[1] @property def anchor(self): return self.args[2] @property def coordinates(self): return self.args[3] @property def statistic(self): return self.args[4] @property def projection(self): """The native projection of this block. Only returns something if the place projection equals the store projection""" store_projection = self.store.projection if store_projection is None: return if get_sr(self.place_projection).IsSame(get_sr(store_projection)): return store_projection @property def geo_transform(self): """The native geo_transform of this block Returns None if the store projection and place projections differ.""" if self.projection is not None: return self.store.geo_transform @property def extent(self): geometry = self.geometry if geometry is None: return if not geometry.GetSpatialReference().IsSame(EPSG4326): geometry = geometry.Clone() geometry.TransformTo(EPSG4326) x1, x2, y1, y2 = geometry.GetEnvelope() return x1, y1, x2, y2 @property def geometry(self): """Combined geometry in this block's native projection. """ store_geometry = self.store.geometry if store_geometry is None: return sr = get_sr(self.place_projection) if not store_geometry.GetSpatialReference().IsSame(sr): store_geometry = store_geometry.Clone() store_geometry.TransformTo(sr) _x1, _x2, _y1, _y2 = store_geometry.GetEnvelope() p, q = self.anchor P, Q = zip(*self.coordinates) x1, x2 = _x1 + min(P) - p, _x2 + max(P) - p y1, y2 = _y1 + min(Q) - q, _y2 + max(Q) - q return ogr.CreateGeometryFromWkt(POLYGON.format(x1, y1, x2, y2), sr) def get_sources_and_requests(self, **request): if request["mode"] != "vals": return ({"mode": request["mode"]}, None), (self.store, request) # transform the anchor and coordinates into the requested projection anchor = shapely_transform( Point(self.anchor), self.place_projection, request["projection"] ).coords[0] coordinates = [ shapely_transform( Point(coord), self.place_projection, request["projection"] ).coords[0] for coord in self.coordinates ] # transform the source's extent extent_geometry = self.store.geometry if extent_geometry is None: # no geometry means: no data return (({"mode": "null"}, None),) sr = get_sr(request["projection"]) if not extent_geometry.GetSpatialReference().IsSame(sr): extent_geometry = extent_geometry.Clone() extent_geometry.TransformTo(sr) xmin, xmax, ymin, ymax = extent_geometry.GetEnvelope() # compute the requested cellsize x1, y1, x2, y2 = request["bbox"] size_x = (x2 - x1) / request["width"] size_y = (y2 - y1) / request["height"] # point requests: never request the full source extent if size_x > 0 and size_y > 0: # check what the full source extent would require full_height = math.ceil((ymax - ymin) / size_y) full_width = math.ceil((xmax - xmin) / size_x) if full_height * full_width <= request["width"] * request["height"]: _request = request.copy() _request["width"] = full_width _request["height"] = full_height _request["bbox"] = ( xmin, ymin, xmin + full_width * size_x, ymin + full_height * size_y, ) process_kwargs = { "mode": "warp", "anchor": anchor, "coordinates": coordinates, "src_bbox": _request["bbox"], "dst_bbox": request["bbox"], "cellsize": (size_x, size_y), "statistic": self.statistic, } return [(process_kwargs, None), (self.store, _request)] # generate a new (backwards shifted) bbox for each coordinate sources_and_requests = [] filtered_coordinates = [] for _x, _y in coordinates: bbox = [ x1 + anchor[0] - _x, y1 + anchor[1] - _y, x2 + anchor[0] - _x, y2 + anchor[1] - _y, ] # check the overlap with the source's extent # Note that raster cells are defined [xmin, xmax) and (ymin, ymax] # so points precisely at xmax or ymin certainly do not have data. if bbox[0] >= xmax or bbox[1] > ymax or bbox[2] < xmin or bbox[3] <= ymin: continue filtered_coordinates.append((_x, _y)) _request = request.copy() _request["bbox"] = bbox sources_and_requests.append((self.store, _request)) if len(sources_and_requests) == 0: # No coordinates inside: we still need to return an array # of the correct shape. Send a time request to get the depth. _request = request.copy() _request["mode"] = "time" process_kwargs = { "mode": "empty", "dtype": self.dtype, "fillvalue": self.fillvalue, "width": request["width"], "height": request["height"], "statistic": self.statistic, } return [(process_kwargs, None), (self.store, _request)] process_kwargs = {"mode": "group", "statistic": self.statistic} return [(process_kwargs, None)] + sources_and_requests @staticmethod def process(process_kwargs, *multi): if process_kwargs["mode"] in {"meta", "time"}: return multi[0] if process_kwargs["mode"] == "null": return if process_kwargs["mode"] == "empty": data = multi[0] if data is None: return out_shape = ( len(data["time"]), process_kwargs["height"], process_kwargs["width"], ) out_no_data_value = process_kwargs["fillvalue"] out_dtype = process_kwargs["dtype"] stack = [] elif process_kwargs["mode"] == "group": # We have a bunch of arrays that are already shifted. Stack them. stack = [data for data in multi if data is not None] if len(stack) == 0: return # instead of returning nodata (because inputs are None) elif process_kwargs["mode"] == "warp": # There is a single 'source' raster that we are going to shift # multiple times into the result. The cellsize is already correct. data = multi[0] if data is None: return out_no_data_value = data["no_data_value"] source = data["values"] out_dtype = source.dtype # convert the anchor to pixels (indices inside 'source') anchor = process_kwargs["anchor"] src_bbox = process_kwargs["src_bbox"] size_x, size_y = process_kwargs["cellsize"] anchor_px = ( (anchor[0] - src_bbox[0]) / size_x, (anchor[1] - src_bbox[1]) / size_y, ) # compute the output shape x1, y1, x2, y2 = process_kwargs["dst_bbox"] coordinates = process_kwargs["coordinates"] dst_h = round((y2 - y1) / size_y) dst_w = round((x2 - x1) / size_x) src_d, src_h, src_w = source.shape out_shape = (src_d, dst_h, dst_w) # determine what indices in 'source' have data k, j, i = np.where(get_index(source, out_no_data_value)) # place the data on each coordinate stack = [] for x, y in coordinates: if i.size == 0: # shortcut: no data at all to place break # transform coordinate into pixels (indices in 'values') coord_px = (x - x1) / size_x, (y - y1) / size_y di = round(coord_px[0] - anchor_px[0]) dj = round(coord_px[1] - anchor_px[1]) # because of the y-axis inversion: dj is measured from the # other side of the array. if you draw it, you'll arrive at: dj = dst_h - src_h - dj if di <= -src_w or di >= dst_w or dj <= -src_h or dj >= dst_h: # skip as it would shift completely outside continue elif 0 <= di <= (dst_w - src_w) and 0 <= dj <= (dst_h - src_h): # complete place values = np.full(out_shape, out_no_data_value, out_dtype) values[k, j + dj, i + di] = source[k, j, i] stack.append({"values": values, "no_data_value": out_no_data_value}) else: # partial place i_s = i + di j_s = j + dj m = (i_s >= 0) & (j_s >= 0) & (i_s < dst_w) & (j_s < dst_h) if not m.any(): continue values = np.full(out_shape, out_no_data_value, out_dtype) values[k[m], j_s[m], i_s[m]] = source[k[m], j[m], i[m]] stack.append({"values": values, "no_data_value": out_no_data_value}) # merge the values_stack if len(stack) == 0: return { "values": np.full(out_shape, out_no_data_value, out_dtype), "no_data_value": out_no_data_value, } else: return reduce_rasters(stack, process_kwargs["statistic"])
py
1a46de3bf50a2c124a481f47b994ddaa59ea4e18
import functools import itertools from collections import (OrderedDict, abc, deque) from operator import is_not from typing import (Any, Hashable, Iterable, MutableMapping, Sequence, Sized, Tuple, Type) from .functional import flatmap from .hints import (Domain, Map, Operator, Range) @functools.singledispatch def capacity(iterable: Iterable[Any]) -> int: """ Returns number of elements in iterable. >>> capacity(range(0)) 0 >>> capacity(range(10)) 10 """ counter = itertools.count() # order matters: if `counter` goes first, # then it will be incremented even for empty `iterable` deque(zip(iterable, counter), maxlen=0) return next(counter) @capacity.register(abc.Sized) def _(iterable: Sized) -> int: """ Returns number of elements in sized iterable. """ return len(iterable) def first(iterable: Iterable[Domain]) -> Domain: """ Returns first element of iterable. >>> first(range(10)) 0 """ try: return next(iter(iterable)) except StopIteration as error: raise ValueError('Argument supposed to be non-empty.') from error def last(iterable: Iterable[Domain]) -> Domain: """ Returns last element of iterable. >>> last(range(10)) 9 """ try: return deque(iterable, maxlen=1)[0] except IndexError as error: raise ValueError('Argument supposed to be non-empty.') from error def cut(iterable: Iterable[Domain], *, slice_: slice) -> Iterable[Domain]: """ Selects elements from iterable based on given slice. Slice fields supposed to be unset or non-negative since it is hard to evaluate negative indices/step for arbitrary iterable which may be potentially infinite or change previous elements if iterating made backwards. """ yield from itertools.islice(iterable, slice_.start, slice_.stop, slice_.step) def cutter(slice_: slice) -> Operator[Iterable[Domain]]: """ Returns function that selects elements from iterable based on given slice. >>> to_first_triplet = cutter(slice(3)) >>> list(to_first_triplet(range(10))) [0, 1, 2] >>> to_second_triplet = cutter(slice(3, 6)) >>> list(to_second_triplet(range(10))) [3, 4, 5] >>> cut_out_every_third = cutter(slice(0, None, 3)) >>> list(cut_out_every_third(range(10))) [0, 3, 6, 9] """ result = functools.partial(cut, slice_=slice_) result.__doc__ = ('Selects elements from iterable {slice}.' .format(slice=_slice_to_description(slice_))) return result def _slice_to_description(slice_: slice) -> str: """Generates human readable representation of `slice` object.""" slice_description_parts = [] start_is_specified = bool(slice_.start) if start_is_specified: slice_description_parts.append('starting from position {start}' .format(start=slice_.start)) step_is_specified = slice_.step is not None if step_is_specified: slice_description_parts.append('with step {step}' .format(step=slice_.step)) if slice_.stop is not None: stop_description_part = ('stopping at position {stop}' .format(stop=slice_.stop)) if start_is_specified or step_is_specified: stop_description_part = 'and ' + stop_description_part slice_description_parts.append(stop_description_part) return ' '.join(slice_description_parts) def chopper(size: int) -> Map[Iterable[Domain], Iterable[Sequence[Domain]]]: """ Returns function that splits iterable into chunks of given size. >>> in_three = chopper(3) >>> list(map(tuple, in_three(range(10)))) [(0, 1, 2), (3, 4, 5), (6, 7, 8), (9,)] """ result = functools.partial(chop, size=size) result.__doc__ = ('Splits iterable into chunks of size {size}.\n' .format(size=size)) return result @functools.singledispatch def chop(iterable: Iterable[Domain], *, size: int) -> Iterable[Sequence[Domain]]: """ Splits iterable into chunks of given size. """ iterator = iter(iterable) yield from iter(lambda: tuple(itertools.islice(iterator, size)), ()) @chop.register(abc.Sequence) def _(iterable: Sequence[Domain], *, size: int) -> Iterable[Sequence[Domain]]: """ Splits sequence into chunks of given size. """ if not size: return for start in range(0, len(iterable), size): yield iterable[start:start + size] # deque do not support slice notation chop.register(deque, chop.registry[object]) in_two = chopper(2) in_three = chopper(3) in_four = chopper(4) def slide(iterable: Iterable[Domain], *, size: int) -> Iterable[Tuple[Domain, ...]]: """ Slides over iterable with window of given size. """ iterator = iter(iterable) initial = tuple(itertools.islice(iterator, size)) def shift(previous: Tuple[Domain, ...], element: Domain) -> Tuple[Domain, ...]: return previous[1:] + (element,) yield from itertools.accumulate(itertools.chain([initial], iterator), shift) def slider(size: int) -> Map[Iterable[Domain], Iterable[Tuple[Domain, ...]]]: """ Returns function that slides over iterable with window of given size. >>> pairwise = slider(2) >>> list(pairwise(range(10))) [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9)] """ result = functools.partial(slide, size=size) result.__doc__ = ('Slides over iterable with window of size {size}.' .format(size=size)) return result pairwise = slider(2) triplewise = slider(3) quadruplewise = slider(4) def header(size: int) -> Operator[Iterable[Domain]]: """ Returns function that selects elements from the beginning of iterable. Resulted iterable will have size not greater than given one. >>> to_first_pair = header(2) >>> list(to_first_pair(range(10))) [0, 1] """ result = cutter(slice(size)) result.__doc__ = ('Selects {size} elements from the beginning of iterable.' .format(size=size)) return result @functools.singledispatch def trail(iterable: Iterable[Domain], *, size: int) -> Iterable[Domain]: """ Selects elements from the end of iterable. Resulted iterable will have size not greater than given one. """ return deque(iterable, maxlen=size) @trail.register(abc.Sequence) def _(iterable: Sequence[Domain], *, size: int) -> Sequence[Domain]: """ Selects elements from the end of sequence. Resulted sequence will have size not greater than given one. """ return iterable[-size:] if size else iterable[:size] # deque do not support slice notation trail.register(deque, trail.registry[object]) def trailer(size: int) -> Operator[Iterable[Domain]]: """ Returns function that selects elements from the end of iterable. Resulted iterable will have size not greater than given one. >>> to_last_pair = trailer(2) >>> list(to_last_pair(range(10))) [8, 9] """ result = functools.partial(trail, size=size) result.__doc__ = ('Selects {size} elements from the end of iterable.' .format(size=size)) return result def mapper(map_: Map) -> Map[Iterable[Domain], Iterable[Range]]: """ Returns function that applies given map to the each element of iterable. >>> to_str = mapper(str) >>> list(to_str(range(10))) ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] """ return functools.partial(map, map_) def flatmapper(map_: Map[Domain, Iterable[Range]] ) -> Map[Iterable[Domain], Iterable[Range]]: """ Returns function that applies map to the each element of iterable and flattens results. >>> relay = flatmapper(range) >>> list(relay(range(5))) [0, 0, 1, 0, 1, 2, 0, 1, 2, 3] """ return functools.partial(flatmap, map_) Group = Tuple[Hashable, Iterable[Domain]] def group_by(iterable: Iterable[Domain], *, key: Map[Domain, Hashable], mapping_cls: Type[MutableMapping]) -> Iterable[Group]: """ Groups iterable elements based on given key. """ groups = mapping_cls() for element in iterable: groups.setdefault(key(element), []).append(element) yield from groups.items() def grouper(key: Map[Domain, Hashable], *, mapping_cls: Type[MutableMapping] = OrderedDict ) -> Map[Iterable[Domain], Iterable[Group]]: """ Returns function that groups iterable elements based on given key. >>> group_by_absolute_value = grouper(abs) >>> list(group_by_absolute_value(range(-5, 5))) [(5, [-5]), (4, [-4, 4]), (3, [-3, 3]), (2, [-2, 2]), (1, [-1, 1]), (0, [0])] >>> def modulo_two(number: int) -> int: ... return number % 2 >>> group_by_evenness = grouper(modulo_two) >>> list(group_by_evenness(range(10))) [(0, [0, 2, 4, 6, 8]), (1, [1, 3, 5, 7, 9])] """ return functools.partial(group_by, key=key, mapping_cls=mapping_cls) def expand(object_: Domain) -> Iterable[Domain]: """ Wraps object into iterable. >>> list(expand(0)) [0] """ yield object_ def flatten(iterable: Iterable[Iterable[Domain]]) -> Iterable[Domain]: """ Returns plain iterable from iterable of iterables. >>> list(flatten([range(5), range(10, 20)])) [0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] """ yield from itertools.chain.from_iterable(iterable) def interleave(iterable: Iterable[Iterable[Domain]]) -> Iterable[Domain]: """ Interleaves elements from given iterable of iterables. >>> list(interleave([range(5), range(10, 20)])) [0, 10, 1, 11, 2, 12, 3, 13, 4, 14, 15, 16, 17, 18, 19] """ iterators = itertools.cycle(map(iter, iterable)) while True: try: for iterator in iterators: yield next(iterator) except StopIteration: is_not_exhausted = functools.partial(is_not, iterator) iterators = itertools.cycle(itertools.takewhile(is_not_exhausted, iterators)) else: return
py
1a46df242117ff51572a224941f3113d11c8642c
from hippy.module.spl.funcs import * import hippy.module.spl.arrayiter import hippy.module.spl.iterator
py
1a46dffd08c6a07f0e4eadcf70c55b22d139dc22
"""rest_test URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.9/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url, include from django.contrib import admin from rest_framework import routers #DRF url router router = routers.DefaultRouter() import app1.views router.register(r'app1', app1.views.RecordViewSet, base_name='app1') import app2.views router.register(r'app2', app2.views.RecordViewSet, base_name='app2') urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^api/', include(router.urls)), ]
py
1a46e09b39997454fa3dd8f930fc6f39a8c212aa
from .fhirbase import fhirbase class EnrollmentRequest(fhirbase): """ This resource provides the insurance enrollment details to the insurer regarding a specified coverage. Args: resourceType: This is a EnrollmentRequest resource identifier: The Response business identifier. status: The status of the resource instance. created: The date when this resource was created. insurer: The Insurer who is target of the request. provider: The practitioner who is responsible for the services rendered to the patient. organization: The organization which is responsible for the services rendered to the patient. subject: Patient Resource. coverage: Reference to the program or plan identification, underwriter or payor. """ __name__ = 'EnrollmentRequest' def __init__(self, dict_values=None): self.resourceType = 'EnrollmentRequest' # type: str # possible values: EnrollmentRequest self.status = None # type: str self.created = None # type: str self.insurer = None # reference to Reference: identifier self.provider = None # reference to Reference: identifier self.organization = None # reference to Reference: identifier self.subject = None # reference to Reference: identifier self.coverage = None # reference to Reference: identifier self.identifier = None # type: list # reference to Identifier if dict_values: self.set_attributes(dict_values) self.assert_type() def get_relationships(self): return [ {'parent_entity': 'Reference', 'parent_variable': 'identifier', 'child_entity': 'EnrollmentRequest', 'child_variable': 'subject'}, {'parent_entity': 'Reference', 'parent_variable': 'identifier', 'child_entity': 'EnrollmentRequest', 'child_variable': 'coverage'}, {'parent_entity': 'Reference', 'parent_variable': 'identifier', 'child_entity': 'EnrollmentRequest', 'child_variable': 'insurer'}, {'parent_entity': 'Identifier', 'parent_variable': 'object_id', 'child_entity': 'EnrollmentRequest', 'child_variable': 'identifier'}, {'parent_entity': 'Reference', 'parent_variable': 'identifier', 'child_entity': 'EnrollmentRequest', 'child_variable': 'organization'}, {'parent_entity': 'Reference', 'parent_variable': 'identifier', 'child_entity': 'EnrollmentRequest', 'child_variable': 'provider'}, ]
py
1a46e0f5b07ca93dec054c15427569472eef876a
from mysql.connector import Timestamp import datetime __author__ = 'David' class FacebookPost: facebookPostId = 0 createdTime = datetime.datetime.now() text = '' facebookUserID = 0 likeCount = 0 def __init__(self, facebookPostId: int = 0, createdTime: Timestamp = datetime.datetime.now(), text: str = '', facebookUserID: int = 0, likeCount: int = 0): self.facebookPostId = facebookPostId self.createdTime = createdTime self.text = text self.facebookUserID = facebookUserID self.likeCount = likeCount
py
1a46e3590575f97a6f09015462cb8c73ce0e3eef
import unittest from unittest.mock import patch, call import argparse from deba.commands.test import add_subcommand class TestCommandTestCase(unittest.TestCase): @patch("builtins.print") def test_run(self, mock_print): parser = argparse.ArgumentParser("deba") subparsers = parser.add_subparsers() add_subcommand(subparsers) args = parser.parse_args( ["test", r"`*_df`.to_csv(r'.+\.csv')", r"my_df.to_csv('my_data.csv')"] ) args.exec( None, args, ) mock_print.assert_has_calls([call('Extracted "my_data.csv"')]) args = parser.parse_args( ["test", r"`*_df`.to_csv(r'.+\.csv')", r"my_df.t_csv('my_data.csv')"] ) with self.assertRaises(SystemExit) as cm: args.exec( None, args, ) mock_print.assert_has_calls([call("Does not match")]) self.assertEqual( cm.exception.args, (1,), )
py
1a46e38cae654fafeba2649c2ec4a46dc35028e6
# Volatility # Copyright (C) 2007-2013 Volatility Foundation # # This file is part of Volatility. # # Volatility is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License Version 2 as # published by the Free Software Foundation. You may not use, modify or # distribute this program under any other version of the GNU General # Public License. # # Volatility is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Volatility. If not, see <http://www.gnu.org/licenses/>. # """ @author: Andrew Case @license: GNU General Public License 2.0 @contact: [email protected] @organization: """ import sys, os import volatility.obj as obj import volatility.plugins.linux.common as linux_common import volatility.plugins.linux.mount as linux_mount import volatility.plugins.linux.flags as linux_flags import volatility.debug as debug import volatility.utils as utils class linux_find_file(linux_common.AbstractLinuxCommand): '''Lists and recovers files from memory''' def __init__(self, config, *args, **kwargs): linux_common.AbstractLinuxCommand.__init__(self, config, *args, **kwargs) config.add_option('FIND', short_option = 'F', default = None, help = 'file (path) to find', action = 'store', type = 'str') config.add_option('INODE', short_option = 'i', default = None, help = 'inode to write to disk', action = 'store', type = 'int') config.add_option('OUTFILE', short_option = 'O', default = None, help = 'output file path', action = 'store', type = 'str') config.remove_option("LIST_SBS") config.add_option('LISTFILES', short_option = 'L', default = None, help = 'list all files cached in memory', action = 'count') self.ptr_size = -1 def _walk_sb(self, dentry_param, parent): ret = [] if hasattr(dentry_param, "d_child"): walk_member = "d_child" else: walk_member = "d_u" for dentry in dentry_param.d_subdirs.list_of_type("dentry", walk_member): # corruption if dentry.v() == dentry_param.v(): continue if not dentry.d_name.name.is_valid(): continue inode = dentry.d_inode ivalid = False if inode and inode.is_valid(): if inode.i_ino == 0 or inode.i_ino > 100000000000: continue ivalid = True # do not use os.path.join # this allows us to have consistent paths from the user name = dentry.d_name.name.dereference_as("String", length = 255) new_file = parent + "/" + name ret.append((new_file, dentry)) if ivalid and inode.is_dir(): ret = ret + self._walk_sb(dentry, new_file) return ret def _get_sbs(self): ret = [] for (sb, _dev_name, path, fstype, _rr, _mnt_string) in linux_mount.linux_mount(self._config).calculate(): ret.append((sb, path)) return ret def walk_sbs(self, sbs = []): if sbs == []: linux_common.set_plugin_members(self) sbs = self._get_sbs() for (sb, sb_path) in sbs: if sb_path != "/": parent = sb_path else: parent = "" rname = sb.s_root.d_name.name.dereference_as("String", length = 255) if rname and len(rname) > 0: yield (sb, sb_path, sb_path, sb.s_root) for (file_path, file_dentry) in self._walk_sb(sb.s_root, parent): yield (sb, sb_path, file_path, file_dentry) def calculate(self): linux_common.set_plugin_members(self) find_file = self._config.FIND inode_addr = self._config.inode outfile = self._config.outfile listfiles = self._config.LISTFILES if listfiles: for (_, _, file_path, file_dentry) in self.walk_sbs(): yield (file_path, file_dentry.d_inode) elif find_file and len(find_file): for (_, _, file_path, file_dentry) in self.walk_sbs(): if file_path == find_file: yield (file_path, file_dentry.d_inode) break elif inode_addr and inode_addr > 0 and outfile and len(outfile) > 0: inode = obj.Object("inode", offset = inode_addr, vm = self.addr_space) try: f = open(outfile, "wb") except IOError, e: debug.error("Unable to open output file (%s): %s" % (outfile, str(e))) for page in self.get_file_contents(inode): f.write(page) f.close() else: debug.error("Incorrect command line parameters given.") def render_text(self, outfd, data): shown_header = 0 for (file_path, inode) in data: if not shown_header: self.table_header(outfd, [("Inode Number", "16"), ("Inode", "[addr]"), ("File Path", "")]) shown_header = 1 inode_num = inode.i_ino self.table_row(outfd, inode_num, inode, file_path) # from here down is code to walk the page cache and mem_map / mem_section page structs# def radix_tree_is_internal_node(self, ptr): if hasattr(ptr, "v"): ptr = ptr.v() return ptr & 3 == 1 def radix_tree_is_indirect_ptr(self, ptr): return ptr & 1 def radix_tree_indirect_to_ptr(self, ptr): return obj.Object("radix_tree_node", offset = ptr & ~1, vm = self.addr_space) def index_is_valid(self, root, index): node = root.rnode if self.radix_tree_is_internal_node(node): maxindex = (self.RADIX_TREE_MAP_SIZE << node.shift) - 1 else: maxindex = 0 if index > maxindex: node = None return node def is_sibling_entry(self, parent, node): parent_ptr = parent.slots.obj_offset node_ptr = node return (parent_ptr <= node_ptr) and \ (node_ptr < parent_ptr + (self.ptr_size * self.RADIX_TREE_MAP_SIZE)) def get_slot_offset(self, parent, slot): return (slot.v() - parent.slots.obj_offset) / self.ptr_size def radix_tree_descend(self, parent, node, index): offset = (index >> parent.shift) & self.RADIX_TREE_MAP_MASK ent_ptr = parent.slots.obj_offset + (self.ptr_size * offset) entry = obj.Object(theType="Pointer", targetType="unsigned long", offset = ent_ptr, vm = self.addr_space) if 1: # TODO - multi order if self.radix_tree_is_internal_node(entry): print "multi internal" if self.is_sibling_entry(parent, entry): print "sibling ptr" sibentry = self.radix_tree_indirect_to_ptr(entry) offset = self.get_slot_offset(parent, sibentry) entry = sibentry.v() node = entry return offset, node def find_slot_post_4_11(self, root, index): node = self.index_is_valid(root, index) if node == None: return None slot = root.rnode.v() while self.radix_tree_is_internal_node(node): if node == 1: return None else: parent = self.radix_tree_indirect_to_ptr(node) offset, node = self.radix_tree_descend(parent, node, index) slot_addr = parent.slots.obj_offset + (offset * self.ptr_size) slot = obj.Object(theType="Pointer", targetType="unsigned long", offset = slot_addr, vm = self.addr_space) slot = slot.v() return slot def radix_tree_lookup_slot(self, root, index): self.RADIX_TREE_MAP_SHIFT = 6 self.RADIX_TREE_MAP_SIZE = 1 << self.RADIX_TREE_MAP_SHIFT self.RADIX_TREE_MAP_MASK = self.RADIX_TREE_MAP_SIZE - 1 node = root.rnode if not node.is_valid(): return None post_4_11 = False if hasattr(node, "height"): height = node.height height = height & 0xfff # this check is needed as gcc seems to produce a 0 value when a shift value is negative # Python throws a backtrace in this situation though # setting to 0 will cause the later -1 to equal 0, and match the runtime behaviour of the kernel if height == 0: height = 1 elif hasattr(node, "path"): height = node.path height = height & 0xfff # this check is needed as gcc seems to produce a 0 value when a shift value is negative # Python throws a backtrace in this situation though # setting to 0 will cause the later -1 to equal 0, and match the runtime behaviour of the kernel if height == 0: height = 1 else: post_4_11 = True if post_4_11: slot = self.find_slot_post_4_11(root, index) else: if self.radix_tree_is_indirect_ptr(node) == 0: if index > 0: return None off = root.obj_offset + self.profile.get_obj_offset("radix_tree_root", "rnode") page = obj.Object("Pointer", offset = off, vm = self.addr_space) return page node = self.radix_tree_indirect_to_ptr(node) if hasattr(node, "shift"): shift = node.shift else: shift = (height - 1) * self.RADIX_TREE_MAP_SHIFT slot = -1 while 1: idx = (index >> shift) & self.RADIX_TREE_MAP_MASK slot = node.slots[idx] node = self.radix_tree_indirect_to_ptr(slot) shift = shift - self.RADIX_TREE_MAP_SHIFT height = height - 1 if height <= 0: break if slot == -1: return None return slot def SHMEM_I(self, inode): offset = self.profile.get_obj_offset("shmem_inode_info", "vfs_inode") return obj.Object("shmem_inode_info", offset = inode.obj_offset - offset, vm = self.addr_space) def find_get_page(self, inode, offset): page = self.radix_tree_lookup_slot(inode.i_mapping.page_tree, offset) #if not page: # FUTURE swapper_space support # print "no page" return page def get_page_contents(self, inode, idx): page_addr = self.find_get_page(inode, idx) if page_addr: page = obj.Object("page", offset = page_addr, vm = self.addr_space) phys_offset = page.to_paddr() if phys_offset > 0: phys_as = utils.load_as(self._config, astype = 'physical') data = phys_as.zread(phys_offset, 4096) else: data = "\x00" * 4096 else: data = "\x00" * 4096 return data # main function to be called, handles getting all the pages of an inode # and handles the last page not being page_size aligned def get_file_contents(self, inode): linux_common.set_plugin_members(self) if self.addr_space.profile.metadata.get('memory_model', '32bit') == "32bit": self.ptr_size = 4 else: self.ptr_size = 8 data = "" file_size = inode.i_size if not inode.is_valid() or file_size == None: raise StopIteration extra = file_size % 4096 idxs = file_size / 4096 if extra > 0: extra = 4096 - extra idxs = idxs + 1 if idxs > 1000000000: raise StopIteration for idx in range(0, idxs): data = self.get_page_contents(inode, idx) # this is to chop off any extra data on the last page if idx == idxs - 1: if extra > 0: extra = extra * -1 data = data[:extra] yield data
py
1a46e4ec8bb73af89cbfc50eba7bf46dd766bf40
from ..utils import Object class PageBlockVerticalAlignmentTop(Object): """ The content should be top-aligned Attributes: ID (:obj:`str`): ``PageBlockVerticalAlignmentTop`` No parameters required. Returns: PageBlockVerticalAlignment Raises: :class:`telegram.Error` """ ID = "pageBlockVerticalAlignmentTop" def __init__(self, **kwargs): pass @staticmethod def read(q: dict, *args) -> "PageBlockVerticalAlignmentTop": return PageBlockVerticalAlignmentTop()
py
1a46e5825f722aed220773adf5a77aa7e082b27d
# As a developer, I want to use Python’s proper snake_case for variable names. # As a developer, I want to create a AlarmClock class. # As a developer, I want the AlarmClock class to have class instance variables to keep track of the AlarmClock’s current time, whether the alarm is on or off, and the time the alarm is set to. (You can use arbitrary strings to represent the time, it does not need to accurately tell the current time or change over time). # As a developer, I want the AlarmClock class to have a method to set (or change) the current time and print to the console the current time. # As a developer, I want the AlarmClock class to have a method to toggle the alarm on or off. # As a developer, I want the AlarmClock class to have a method to set the current alarm time and print to the console the current alarm time. # As a developer, I want to import the AlarmClock class into main.py so I can instantiate it as a new AlarmClock object and call methods on it. from alarmclock import AlarmClock alarm_clock = AlarmClock("5 PM", False, "") print(alarm_clock.current_time) alarm_clock.set_time() alarm_clock.toggle_alarm()
py
1a46e6191f72367f1297eeb153c15762deeeb1f0
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from ... import _utilities, _tables from . import outputs __all__ = [ 'GetMachineLearningComputeResult', 'AwaitableGetMachineLearningComputeResult', 'get_machine_learning_compute', ] @pulumi.output_type class GetMachineLearningComputeResult: """ Machine Learning compute object wrapped into ARM resource envelope. """ def __init__(__self__, id=None, identity=None, location=None, name=None, properties=None, sku=None, tags=None, type=None): if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if identity and not isinstance(identity, dict): raise TypeError("Expected argument 'identity' to be a dict") pulumi.set(__self__, "identity", identity) if location and not isinstance(location, str): raise TypeError("Expected argument 'location' to be a str") pulumi.set(__self__, "location", location) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if properties and not isinstance(properties, dict): raise TypeError("Expected argument 'properties' to be a dict") pulumi.set(__self__, "properties", properties) if sku and not isinstance(sku, dict): raise TypeError("Expected argument 'sku' to be a dict") pulumi.set(__self__, "sku", sku) if tags and not isinstance(tags, dict): raise TypeError("Expected argument 'tags' to be a dict") pulumi.set(__self__, "tags", tags) if type and not isinstance(type, str): raise TypeError("Expected argument 'type' to be a str") pulumi.set(__self__, "type", type) @property @pulumi.getter def id(self) -> str: """ Specifies the resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def identity(self) -> Optional['outputs.IdentityResponse']: """ The identity of the resource. """ return pulumi.get(self, "identity") @property @pulumi.getter def location(self) -> Optional[str]: """ Specifies the location of the resource. """ return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> str: """ Specifies the name of the resource. """ return pulumi.get(self, "name") @property @pulumi.getter def properties(self) -> Any: """ Compute properties """ return pulumi.get(self, "properties") @property @pulumi.getter def sku(self) -> Optional['outputs.SkuResponse']: """ The sku of the workspace. """ return pulumi.get(self, "sku") @property @pulumi.getter def tags(self) -> Optional[Mapping[str, str]]: """ Contains resource tags defined as key/value pairs. """ return pulumi.get(self, "tags") @property @pulumi.getter def type(self) -> str: """ Specifies the type of the resource. """ return pulumi.get(self, "type") class AwaitableGetMachineLearningComputeResult(GetMachineLearningComputeResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetMachineLearningComputeResult( id=self.id, identity=self.identity, location=self.location, name=self.name, properties=self.properties, sku=self.sku, tags=self.tags, type=self.type) def get_machine_learning_compute(compute_name: Optional[str] = None, resource_group_name: Optional[str] = None, workspace_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetMachineLearningComputeResult: """ Machine Learning compute object wrapped into ARM resource envelope. :param str compute_name: Name of the Azure Machine Learning compute. :param str resource_group_name: Name of the resource group in which workspace is located. :param str workspace_name: Name of Azure Machine Learning workspace. """ __args__ = dict() __args__['computeName'] = compute_name __args__['resourceGroupName'] = resource_group_name __args__['workspaceName'] = workspace_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-native:machinelearningservices/v20200501preview:getMachineLearningCompute', __args__, opts=opts, typ=GetMachineLearningComputeResult).value return AwaitableGetMachineLearningComputeResult( id=__ret__.id, identity=__ret__.identity, location=__ret__.location, name=__ret__.name, properties=__ret__.properties, sku=__ret__.sku, tags=__ret__.tags, type=__ret__.type)
py
1a46e7974691cca90d36d2ca1b0d845f05a3f847
""" Copyright 2017-present, Airbnb Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from botocore.exceptions import ClientError from mock import Mock, patch from nose.tools import assert_equal from stream_alert.classifier.clients.firehose import FirehoseClient class TestFirehoseClient(object): """Test class for FirehoseClient""" # pylint: disable=protected-access,no-self-use,attribute-defined-outside-init def setup(self): """Setup before each method""" with patch('boto3.client'): # patch to speed up unit tests slightly self._client = FirehoseClient() def teardown(self): """Teardown after each method""" FirehoseClient._ENABLED_LOGS.clear() @property def _sample_payloads(self): return [ Mock( log_schema_type='log_type_01_sub_type_01', parsed_records=[ { 'unit_key_01': 1, 'unit_key_02': 'test' }, { 'unit_key_01': 2, 'unit_key_02': 'test' } ] ), Mock( log_schema_type='log_type_02_sub_type_01', parsed_records=[ { 'date': 'January 01, 3005', 'unixtime': '32661446400', 'host': 'my-host.name.website.com', 'data': { 'super': 'secret' } } ] ) ] @classmethod def _sample_raw_records(cls, count=2): return [ {'key_{}'.format(i): 'value_{}'.format(i)} for i in range(count) ] def test_records_to_json_list(self): """FirehoseClient - Records JSON Lines""" records = self._sample_raw_records() expected_result = [ '{"key_0":"value_0"}\n', '{"key_1":"value_1"}\n' ] result = FirehoseClient._records_to_json_list(records) assert_equal(result, expected_result) def test_record_batches(self): """FirehoseClient - Record Batches""" records = self._sample_raw_records() expected_result = [ [ '{"key_0":"value_0"}\n', '{"key_1":"value_1"}\n' ] ] result = list(FirehoseClient._record_batches(records)) assert_equal(result, expected_result) @patch.object(FirehoseClient, '_log_failed') def test_record_batches_rec_too_large(self, failure_mock): """FirehoseClient - Record Batches, Record Too Large""" records = [ {'key': 'test' * 1000 * 1000} ] result = list(FirehoseClient._record_batches(records)) assert_equal(result, [[]]) failure_mock.assert_called_with(1) def test_record_batches_max_batch_count(self): """FirehoseClient - Record Batches, Max Batch Count""" records = self._sample_raw_records(count=501) result = list(FirehoseClient._record_batches(records)) assert_equal(len(result), 2) assert_equal(len(result[0]), 500) assert_equal(len(result[1]), 1) def test_record_batches_max_batch_size(self): """FirehoseClient - Record Batches, Max Batch Size""" records = [ {'key_{}'.format(i): 'test' * 100000} for i in range(10) ] result = list(FirehoseClient._record_batches(records)) assert_equal(len(result), 2) assert_equal(len(result[0]), 9) assert_equal(len(result[1]), 1) batch_size_01 = sum(len(rec) for rec in result[0]) batch_size_02 = sum(len(rec) for rec in result[1]) assert_equal(batch_size_01 < FirehoseClient.MAX_BATCH_SIZE, True) assert_equal(batch_size_02 < FirehoseClient.MAX_BATCH_SIZE, True) assert_equal(batch_size_01 + batch_size_02 > FirehoseClient.MAX_BATCH_SIZE, True) def test_sanitize_keys(self): """FirehoseClient - Sanitize Keys""" test_event = { 'date': 'January 01, 3005', 'data': { 'super-duper': 'secret', 'do_not_sanitize_me': 1, 'example-key': 2, 'moar**data': 3, 'even.more': 4 } } expected_sanitized_event = { 'date': 'January 01, 3005', 'data': { 'super_duper': 'secret', 'do_not_sanitize_me': 1, 'example_key': 2, 'moar__data': 3, 'even_more': 4 } } sanitized_event = FirehoseClient.sanitize_keys(test_event) assert_equal(sanitized_event, expected_sanitized_event) def test_strip_successful_records(self): """FirehoseClient - Strip Successful Records""" batch = [{'test': 'success'}, {'other': 'failure'}, {'other': 'info'}] response = { 'FailedPutCount': 1, 'RequestResponses': [ {'RecordId': 'rec_id_01'}, {'ErrorCode': 10, 'ErrorMessage': 'foo'}, {'RecordId': 'rec_id_03'} ] } expected_batch = [{'other': 'failure'}] FirehoseClient._strip_successful_records(batch, response) assert_equal(batch, expected_batch) def test_categorize_records(self): """FirehoseClient - Categorize Records""" FirehoseClient._ENABLED_LOGS = { 'log_type_01_sub_type_01': 'log_type_01:sub_type_01', 'log_type_02_sub_type_01': 'log_type_02:sub_type_01' } payloads = self._sample_payloads result = self._client._categorize_records(payloads) expected_result = { 'log_type_01_sub_type_01': payloads[0].parsed_records, 'log_type_02_sub_type_01': payloads[1].parsed_records } assert_equal(dict(result), expected_result) def test_categorize_records_none_enabled(self): """FirehoseClient - Categorize Records, None Enabled""" payloads = self._sample_payloads result = self._client._categorize_records(payloads) assert_equal(dict(result), dict()) def test_categorize_records_subset_enabled(self): """FirehoseClient - Categorize Records, Subset Enabled""" FirehoseClient._ENABLED_LOGS = { 'log_type_01_sub_type_01': 'log_type_01:sub_type_01' } payloads = self._sample_payloads result = self._client._categorize_records(payloads) expected_result = { 'log_type_01_sub_type_01': payloads[0].parsed_records } assert_equal(dict(result), expected_result) @patch.object(FirehoseClient, '_log_failed') def test_finalize_failures(self, failure_mock): """FirehoseClient - Finalize, With Failures""" response = { 'FailedPutCount': 1, 'RequestResponses': [ {'RecordId': 'rec_id_01'}, {'ErrorCode': 10, 'ErrorMessage': 'foo'}, {'RecordId': 'rec_id_03'} ] } FirehoseClient._finalize(response, 'stream_name', 3) failure_mock.assert_called_with(1) @patch('logging.Logger.info') def test_finalize_success(self, log_mock): """FirehoseClient - Finalize, Success""" request_id = 'success_id' stream_name = 'stream_name' count = 3 response = { 'ResponseMetadata': { 'RequestId': request_id } } FirehoseClient._finalize(response, stream_name, count) log_mock.assert_called_with( 'Successfully sent %d message(s) to firehose %s with RequestId \'%s\'', count, stream_name, request_id ) def test_send_batch(self): """FirehoseClient - Send Batch""" records = [ '{"unit_key_02":"test","unit_key_01":1}\n', '{"unit_key_02":"test","unit_key_01":2}\n' ] stream_name = 'test_stream_name' expected_second_call = [ {'Data': records[1]} ] with patch.object(self._client, '_client') as boto_mock: boto_mock.put_record_batch.side_effect = [ { 'FailedPutCount': 1, 'RequestResponses': [ {'RecordId': 'rec_id_01'}, {'ErrorCode': 10, 'ErrorMessage': 'foo'} ] }, { 'FailedPutCount': 0, 'RequestResponses': [ {'RecordId': 'rec_id_02'}, ] } ] self._client._send_batch(stream_name, records) boto_mock.put_record_batch.assert_called_with( DeliveryStreamName=stream_name, Records=expected_second_call ) @patch('logging.Logger.exception') @patch.object(FirehoseClient, 'MAX_BACKOFF_ATTEMPTS', 1) def test_send_batch_error(self, log_mock): """FirehoseClient - Send Batch, Error""" stream_name = 'test_stream_name' with patch.object(self._client, '_client') as boto_mock: error = ClientError({'Error': {'Code': 10}}, 'InvalidRequestException') boto_mock.put_record_batch.side_effect = error self._client._send_batch(stream_name, ['data']) log_mock.assert_called_with('Firehose request failed') def test_firehose_log_name(self): """FirehoseClient - Firehose Log Name""" expected_result = 'test_log_type_name' result = FirehoseClient.firehose_log_name('test*log.type-name') assert_equal(result, expected_result) def test_enabled_log_source(self): """FirehoseClient - Enabled Log Source""" log = 'enabled_log' FirehoseClient._ENABLED_LOGS = { log: 'enabled:log' } assert_equal(FirehoseClient.enabled_log_source(log), True) def test_enabled_log_source_false(self): """FirehoseClient - Enabled Log Source, False""" log = 'enabled_log' assert_equal(FirehoseClient.enabled_log_source(log), False) def test_load_enabled_sources(self): """FirehoseClient - Load Enabled Log Sources""" logs_config = { 'log_type_01:sub_type_01': {}, 'log_type_01:sub_type_02': {}, # This log type should is not enabled 'log_type_02:sub_type_01': {}, 'log_type_02:sub_type_02': {}, } firehose_config = { 'enabled_logs': [ 'log_type_01:sub_type_01', # One log for log_type_01 'log_type_02' # All of log_type_02 ] } expected_result = { 'log_type_01_sub_type_01': 'log_type_01:sub_type_01', 'log_type_02_sub_type_01': 'log_type_02:sub_type_01', 'log_type_02_sub_type_02': 'log_type_02:sub_type_02' } enabled_logs = FirehoseClient.load_enabled_log_sources(firehose_config, logs_config) assert_equal(enabled_logs, expected_result) @patch('logging.Logger.error') def test_load_enabled_sources_invalid_log(self, log_mock): """FirehoseClient - Load Enabled Log Sources, Invalid Log Type""" logs_config = { 'log_type_01:sub_type_01': {}, 'log_type_01:sub_type_02': {} } log_type = 'log_type_03' firehose_config = { 'enabled_logs': [ log_type ] } enabled_logs = FirehoseClient.load_enabled_log_sources(firehose_config, logs_config) assert_equal(enabled_logs, dict()) log_mock.assert_called_with( 'Enabled Firehose log %s not declared in logs.json', log_type ) @patch('logging.Logger.error') def test_load_enabled_sources_invalid_log_subtype(self, log_mock): """FirehoseClient - Load Enabled Log Sources, Invalid Log Sub-type""" logs_config = { 'log_type_01:sub_type_01': {} } log_type = 'log_type_01:sub_type_02' firehose_config = { 'enabled_logs': [ log_type ] } enabled_logs = FirehoseClient.load_enabled_log_sources(firehose_config, logs_config) assert_equal(enabled_logs, dict()) log_mock.assert_called_with( 'Enabled Firehose log %s not declared in logs.json', log_type ) def test_load_from_config(self): """FirehoseClient - Load From Config""" with patch('boto3.client'): # patch to speed up unit tests slightly client = FirehoseClient.load_from_config({'enabled': True}, None) assert_equal(isinstance(client, FirehoseClient), True) def test_load_from_config_disabled(self): """FirehoseClient - Load From Config, Disabled""" assert_equal(FirehoseClient.load_from_config({}, None), None) @patch.object(FirehoseClient, '_send_batch') def test_send(self, send_batch_mock): """FirehoseClient - Send""" FirehoseClient._ENABLED_LOGS = { 'log_type_01_sub_type_01': 'log_type_01:sub_type_01' } expected_batch = [ '{"unit_key_02":"test","unit_key_01":1}\n', '{"unit_key_02":"test","unit_key_01":2}\n' ] self._client.send(self._sample_payloads) send_batch_mock.assert_called_with( 'streamalert_data_log_type_01_sub_type_01', expected_batch )
py
1a46e7dad7972b1a0ef3c107062a5be2326c668d
from numpy import nan from pandas import DataFrame, concat, read_sql_table from pandas._testing import assert_frame_equal from df_to_azure import df_to_azure from df_to_azure.db import auth_azure # ############################# # #### APPEND METHOD TESTS #### # ############################# def test_append(): df = DataFrame({"A": [1, 2, 3], "B": list("abc"), "C": [4.0, 5.0, nan]}) # 1. we create a new dataframe df_to_azure( df=df, tablename="append", schema="test", method="create", wait_till_finished=True, ) # 2. we append the same data df_to_azure( df=df, tablename="append", schema="test", method="append", wait_till_finished=True, ) # 3. we test if the data is what we expect with auth_azure() as con: result = read_sql_table(table_name="append", con=con, schema="test") expected = concat([df, df], ignore_index=True) assert_frame_equal(result, expected)
py
1a46e81e264dd9e24532b0177329e335c1482ed0
from typing import Callable, List from gaia_sdk.api.VariableRegistry import VariableRegistry from gaia_sdk.graphql.request.enumeration.Order import Order from gaia_sdk.graphql.request.enumeration.OrderByField import OrderByField from gaia_sdk.graphql.request.enumeration.EdgeOrderByField import EdgeOrderByField from gaia_sdk.graphql.request.enumeration.EdgeType import EdgeType class Activation(list): def tmp(self): self.append(lambda x: "tmp") def render(self, registry: VariableRegistry): return " ".join(map(lambda e: e(registry), self))
py
1a46e86dd7628102dbabccb63de3a9bb92741c0f
"""btre URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.1/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path, include from django.conf import settings from django.conf.urls.static import static urlpatterns = [ path('', include('pages.urls', namespace='pages')), path('listings/', include('listings.urls', namespace='listings')), path('accounts/', include('accounts.urls', namespace='accounts')), path('contacts/', include('contacts.urls', namespace='contacts')), path('admin/', admin.site.urls), ] if settings.DEBUG: urlpatterns = urlpatterns + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) urlpatterns = urlpatterns + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
py
1a46ea4771af887a50abec08714a9df00e7345fe
import itertools from typing import Any import torch from torch.autograd import DeviceType from torch.futures import Future from collections import defaultdict, namedtuple from operator import attrgetter from typing import Dict, List, Tuple, Optional import math try: # Available in Python >= 3.2 from contextlib import ContextDecorator except ImportError: import functools class ContextDecorator(object): # type: ignore[no-redef] def __enter__(self): raise NotImplementedError def __exit__(self, exc_type, exc_val, exc_tb): raise NotImplementedError def __call__(self, func): @functools.wraps(func) def wrapped(*args, **kwargs): with self: return func(*args, **kwargs) return wrapped class EventList(list): """A list of Events (for pretty printing)""" def __init__(self, *args, **kwargs): use_cuda = kwargs.pop('use_cuda', True) profile_memory = kwargs.pop('profile_memory', False) with_flops = kwargs.pop('with_flops', False) super(EventList, self).__init__(*args, **kwargs) self._use_cuda = use_cuda self._profile_memory = profile_memory self._tree_built = False self._with_flops = with_flops def _build_tree(self): self._populate_cpu_children() self._remove_dup_nodes() self._set_backward_stacktraces() self._tree_built = True def __str__(self): return self.table() def _remove_dup_nodes(self): while True: to_delete = [] for idx in range(len(self)): if (self[idx].cpu_parent is not None and self[idx].cpu_parent.name == self[idx].name and len(self[idx].cpu_parent.cpu_children) == 1): self[idx].cpu_parent.cpu_children = self[idx].cpu_children self[idx].cpu_parent.kernels = self[idx].kernels # lift kernels up for ch in self[idx].cpu_children: ch.cpu_parent = self[idx].cpu_parent to_delete.append(idx) if len(to_delete) == 0: break new_evts = [ev for ind, ev in enumerate(self) if ind not in to_delete] self.clear() self.extend(new_evts) def _populate_cpu_children(self): """Populates child events into each underlying FunctionEvent object. One event is a child of another if [s1, e1) is inside [s2, e2). Where s1 and e1 would be start and end of the child event's interval. And s2 and e2 start and end of the parent event's interval Example: In event list [[0, 10], [1, 3], [3, 4]] would have make [0, 10] be a parent of two other intervals. If for any reason two intervals intersect only partially, this function will not record a parent child relationship between then. """ # Some events can be async (i.e. start and end on different threads), # since it's generally undefined how to attribute children ranges to # async ranges, we do not use them when calculating nested ranges and stats sync_events = [evt for evt in self if not evt.is_async and evt.device_type == DeviceType.CPU] events = sorted( sync_events, key=attrgetter("thread"), ) # Group by both thread and node_id, so that events that happen to have # the same thread_id but are from different nodes aren't incorrectly # grouped together. threads = itertools.groupby( events, key=lambda event: (event.thread, event.node_id) ) # For each thread we keep a stack of current nested parents. # We maintain the invariant that each interval is a subset of all other # intervals lower in the stack. # # First we sort the intervals by their start time. Then we iterate over them. # Every time we see a new interval we remove several parents from # the top until we restore the invariant. Then parent child relationship # if recorded if the stack is not empty. # Finally we add new interval to the list # # Algorithm has O(N * log(N)) complexity where N is number of # intervals for thread_id, thread_events in threads: thread_events_ = sorted( thread_events, key=lambda event: [event.time_range.start, -event.time_range.end], ) current_events: List[FunctionEvent] = [] cur_end = 0 for event in thread_events_: while len(current_events) > 0: parent = current_events[-1] if event.time_range.start >= parent.time_range.end or \ event.time_range.end > parent.time_range.end: # this can't be a parent current_events.pop() else: parent.append_cpu_child(event) assert ( event.cpu_parent is None ), "There is already a CPU parent event for {}".format( event.key ) event.set_cpu_parent(parent) break current_events.append(event) def _set_backward_stacktraces(self): def bw_parent(evt): if evt is None: return None elif evt.scope == 1: # BACKWARD_FUNCTION return evt else: return bw_parent(evt.cpu_parent) fwd_stacks = {} for evt in self: if bw_parent(evt) is None and evt.stack is not None: t = (evt.sequence_nr, evt.thread) if t not in fwd_stacks: fwd_stacks[t] = evt.stack for evt in self: p = bw_parent(evt) if p is not None: assert p.fwd_thread is not None t = (p.sequence_nr, p.fwd_thread) if t in fwd_stacks: evt.stack = fwd_stacks[t] else: evt.stack = [] @property def self_cpu_time_total(self): return sum([event.self_cpu_time_total for event in self]) def table(self, sort_by=None, row_limit=100, max_src_column_width=75, header=None, top_level_events_only=False): """Prints an EventList as a nicely formatted table. Args: sort_by (str, optional): Attribute used to sort entries. By default they are printed in the same order as they were registered. Valid keys include: ``cpu_time``, ``cuda_time``, ``cpu_time_total``, ``cuda_time_total``, ``cpu_memory_usage``, ``cuda_memory_usage``, ``self_cpu_memory_usage``, ``self_cuda_memory_usage``, ``count``. top_level_events_only(bool, optional): Boolean flag to determine the selection of events to display. If true, the profiler will only display events at top level like top-level invocation of python `lstm`, python `add` or other functions, nested events like low-level cpu/cuda ops events are omitted for profiler result readability. Returns: A string containing the table. """ return build_table( self, sort_by=sort_by, row_limit=row_limit, max_src_column_width=max_src_column_width, header=header, profile_memory=self._profile_memory, with_flops=self._with_flops, top_level_events_only=top_level_events_only) def export_chrome_trace(self, path): """Exports an EventList as a Chrome tracing tools file. The checkpoint can be later loaded and inspected under ``chrome://tracing`` URL. Args: path (str): Path where the trace will be written. """ import os with open(path, 'w') as f: chrome_events = [] next_id = 0 # Use file IO over using json.dump since JSON dumping is very slow and # this technique is proven to give a 4x speedup. f.write("[") for evt in self: if evt.trace_name is None: continue f.write( '{"name": "%s", ' '"ph": "X", ' '"ts": %s, ' '"dur": %s, ' '"tid": %s, ' '"pid": "CPU functions", ' '"args": {}}, ' % ( evt.trace_name, evt.time_range.start, evt.time_range.elapsed_us(), evt.thread if not evt.is_remote else f'" node_id:{evt.node_id}, thread_id:{evt.thread} "', ) ) for k in evt.kernels: # 's' and 'f' draw Flow arrows from # the CPU launch to the GPU kernel f.write('{"name": "%s", ' '"ph": "s", ' '"ts": %s, ' '"tid": %s, ' '"pid": "CPU functions", ' '"id": %s, ' '"cat": "cpu_to_cuda", ' '"args": {}}, ' % (evt.trace_name, evt.time_range.start, evt.thread, next_id)) # Note: use torch.profiler to get device kernel trace next_id += 1 # remove trailing whitespace and comma f.seek(f.tell() - 2, os.SEEK_SET) f.truncate() f.write("]") def supported_export_stacks_metrics(self): return ["self_cpu_time_total", "self_cuda_time_total"] def export_stacks(self, path: str, metric: str): if metric not in self.supported_export_stacks_metrics(): raise ValueError("metric should be one of: " + str(self.supported_export_stacks_metrics())) translate_table = str.maketrans(" ;\t\n", "____") with open(path, 'w') as f: for evt in self: if evt.stack and len(evt.stack) > 0: metric_value = getattr(evt, metric) if int(metric_value) > 0: stack_str = "" for entry in reversed(evt.stack): stack_str += entry.translate(translate_table) stack_str += ";" stack_str = stack_str[:-1] + " " + str(int(metric_value)) f.write(stack_str + "\n") def key_averages(self, group_by_input_shapes=False, group_by_stack_n=0): """Averages all function events over their keys. Args: group_by_input_shapes: group entries by (event name, input shapes) rather than just event name. This is useful to see which input shapes contribute to the runtime the most and may help with size-specific optimizations or choosing the best candidates for quantization (aka fitting a roof line) group_by_stack_n: group by top n stack trace entries Returns: An EventList containing FunctionEventAvg objects. """ assert self._tree_built stats: Dict[Tuple[str, ...], FunctionEventAvg] = defaultdict(FunctionEventAvg) def get_key(event, group_by_input_shapes, group_by_stack_n) -> Tuple[str, ...]: key = [str(event.key), str(event.node_id), str(event.device_type), str(event.is_legacy)] if group_by_input_shapes: key.append(str(event.input_shapes)) if group_by_stack_n > 0: key += event.stack[:group_by_stack_n] return tuple(key) for evt in self: stats[get_key(evt, group_by_input_shapes, group_by_stack_n)].add(evt) avg_list = EventList( stats.values(), use_cuda=self._use_cuda, profile_memory=self._profile_memory, with_flops=self._with_flops) for evt in avg_list: evt.stack = evt.stack[:group_by_stack_n] if not group_by_input_shapes: evt.input_shapes = "" return avg_list def total_average(self): """Averages all events. Returns: A FunctionEventAvg object. """ total_stat = FunctionEventAvg() for evt in self: total_stat += evt total_stat.key = None total_stat.key = 'Total' return total_stat class profile(object): """Context manager that manages autograd profiler state and holds a summary of results. Under the hood it just records events of functions being executed in C++ and exposes those events to Python. You can wrap any code into it and it will only report runtime of PyTorch functions. Note: profiler is thread local and is automatically propagated into the async tasks Args: enabled (bool, optional): Setting this to False makes this context manager a no-op. use_cuda (bool, optional): Enables timing of CUDA events as well using the cudaEvent API. Adds approximately 4us of overhead to each tensor operation. record_shapes (bool, optional): If shapes recording is set, information about input dimensions will be collected. This allows one to see which dimensions have been used under the hood and further group by them using prof.key_averages(group_by_input_shape=True). Please note that shape recording might skew your profiling data. It is recommended to use separate runs with and without shape recording to validate the timing. Most likely the skew will be negligible for bottom most events (in a case of nested function calls). But for higher level functions the total self cpu time might be artificially increased because of the shape collection. with_flops (bool, optional): If with_flops is set, the profiler will estimate the FLOPS (floating pointer operations per second) value using the operator's input shape and total time. This allows one to estimate the hardware performance. Currently, this option only works for the matrix multiplication and 2D convolution operators. profile_memory (bool, optional): track tensor memory allocation/deallocation. with_stack (bool, optional): record source information (file and line number) for the ops. use_kineto (bool, optional): experimental, enable profiling with Kineto profiler. use_cpu (bool, optional): profile CPU events; setting to ``False`` requires ``use_kineto=True`` and can be used to lower the overhead for GPU-only profiling. .. warning: Enabling memory profiling or source attribution incurs additional profiler overhead .. warning: This context managers should not be called recursively, i.e. no nested instances are allowed .. warning: Due to some CUDA multiprocessing limitations (multiprocessing-cuda-note_), one cannot use the profiler with ``use_cuda = True`` to benchmark DataLoaders with ``num_workers > 0``. If you wish to benchmark data loading, please use ``use_cuda = False`` or ``num_workers = 0``. Example: >>> x = torch.randn((1, 1), requires_grad=True) >>> with torch.autograd.profiler.profile() as prof: >>> for _ in range(100): # any normal python code, really! >>> y = x ** 2 >> y.backward() >>> # NOTE: some columns were removed for brevity >>> print(prof.key_averages().table(sort_by="self_cpu_time_total")) ----------------------------------- --------------- --------------- --------------- Name Self CPU total CPU time avg Number of Calls ----------------------------------- --------------- --------------- --------------- mul 32.048ms 32.048ms 200 pow 27.041ms 27.041ms 200 PowBackward0 9.727ms 55.483ms 100 torch::autograd::AccumulateGrad 9.148ms 9.148ms 100 torch::autograd::GraphRoot 691.816us 691.816us 100 ----------------------------------- --------------- --------------- --------------- """ def __init__( self, enabled=True, *, use_cuda=False, record_shapes=False, with_flops=False, profile_memory=False, with_stack=False, use_kineto=False, use_cpu=True): self.enabled: bool = enabled if not self.enabled: return self.use_cuda = use_cuda self.function_events = None self.entered = False self.record_shapes = record_shapes self.with_flops = with_flops self.record_shapes |= self.with_flops self.profile_memory = profile_memory self.with_stack = with_stack self.use_cpu = use_cpu self.kineto_results = None if not self.use_cpu: assert use_kineto, \ "Device-only events supported only with Kineto (use_kineto=True)" self.profiler_kind = None self.kineto_activities = set() if use_kineto: self.profiler_kind = torch.autograd.ProfilerState.KINETO if self.use_cpu: self.kineto_activities.add(torch.autograd.ProfilerActivity.CPU) if self.use_cuda: self.kineto_activities.add( # uses CUPTI torch.autograd.ProfilerActivity.CUDA) assert len(self.kineto_activities) > 0, \ "No activities specified for Kineto profiler" elif self.use_cuda: # legacy CUDA mode self.profiler_kind = torch.autograd.ProfilerState.CUDA else: self.profiler_kind = torch.autograd.ProfilerState.CPU if self.profiler_kind == torch.autograd.ProfilerState.KINETO: assert ( torch.autograd.kineto_available() ), """Requested Kineto profiling but Kineto is not available, make sure PyTorch is built with USE_KINETO=1""" def config(self): assert self.profiler_kind is not None return torch.autograd.ProfilerConfig( self.profiler_kind, self.record_shapes, self.profile_memory, self.with_stack, self.with_flops) def __enter__(self): if not self.enabled: return if self.entered: raise RuntimeError("profiler context manager is not reentrant") self.entered = True if self.kineto_activities: torch.autograd._prepare_profiler(self.config(), self.kineto_activities) torch.autograd._enable_profiler(self.config(), self.kineto_activities) else: torch.autograd._enable_profiler_legacy(self.config()) return self def _prepare_kineto_trace(self): assert self.kineto_activities self.entered = True torch.autograd._prepare_profiler(self.config(), self.kineto_activities) def _start_kineto_trace(self): assert self.kineto_activities torch.autograd._enable_profiler(self.config(), self.kineto_activities) def __exit__(self, exc_type, exc_val, exc_tb): if not self.enabled: return if self.kineto_activities: self.kineto_results = torch.autograd._disable_profiler() parsed_results = parse_kineto_results(self.kineto_results) else: records = torch.autograd._disable_profiler_legacy() parsed_results = parse_legacy_records(records) self.function_events = EventList( parsed_results, use_cuda=self.use_cuda, profile_memory=self.profile_memory, with_flops=self.with_flops) self.function_events._build_tree() return False def __repr__(self): if self.function_events is None: return '<unfinished torch.autograd.profile>' return repr(self.function_events) def __str__(self): if self.function_events is None: return '<unfinished torch.autograd.profile>' return str(self.function_events) def _check_finish(self): if self.function_events is None: raise RuntimeError("can't export a trace that didn't finish running") def table(self, sort_by=None, row_limit=100, max_src_column_width=75, header=None, top_level_events_only=False): self._check_finish() assert self.function_events is not None return self.function_events.table( sort_by=sort_by, row_limit=row_limit, max_src_column_width=max_src_column_width, header=header, top_level_events_only=top_level_events_only ) table.__doc__ = EventList.table.__doc__ def export_chrome_trace(self, path): self._check_finish() if self.kineto_results is not None: self.kineto_results.save(path) else: assert self.function_events is not None return self.function_events.export_chrome_trace(path) export_chrome_trace.__doc__ = EventList.export_chrome_trace.__doc__ def export_stacks(self, path: str, metric: str = "self_cpu_time_total"): self._check_finish() assert self.function_events is not None, "Expected profiling results" assert self.with_stack, "export_stacks() requires with_stack=True" return self.function_events.export_stacks(path, metric) def key_averages(self, group_by_input_shape=False, group_by_stack_n=0): self._check_finish() assert self.function_events is not None, "Expected profiling results" return self.function_events.key_averages(group_by_input_shape, group_by_stack_n) key_averages.__doc__ = EventList.key_averages.__doc__ def total_average(self): self._check_finish() assert self.function_events is not None, "Expected profiling results" return self.function_events.total_average() total_average.__doc__ = EventList.total_average.__doc__ @property def self_cpu_time_total(self): """ Returns total time spent on CPU obtained as a sum of all self times across all the events. """ self._check_finish() assert self.function_events is not None return self.function_events.self_cpu_time_total class record_function(ContextDecorator): """Context manager/function decorator that adds a label to a block of Python code (or function) when running autograd profiler. It is useful when tracing the code profile. Args: name (str): Label assigned to the block of code. node_id (int): ID of node, for distributed profiling. Unset in non-distributed cases. Example: >>> x = torch.randn((1, 1), requires_grad=True) >>> with torch.autograd.profiler.profile() as prof: ... y = x ** 2 ... with torch.autograd.profiler.record_function("label-z"): # label the block ... z = y ** 3 ... y.backward() ... >>> # NOTE: some columns were removed for brevity >>> print(prof.key_averages().table(sort_by="self_cpu_time_total")) ----------------------------------- --------------- --------------- --------------- Name Self CPU total % CPU time avg Number of Calls ----------------------------------- --------------- --------------- --------------- pow 60.77% 47.470us 3 mul 21.73% 25.465us 2 PowBackward0 12.03% 121.891us 1 torch::autograd::AccumulateGrad 2.70% 6.324us 1 label-z 2.13% 12.421us 1 torch::autograd::GraphRoot 0.64% 1.503us 1 ----------------------------------- --------------- --------------- --------------- Self CPU time total: 234.344us CUDA time total: 0.000us """ def __init__(self, name: str): self.name: str = name # Whether or not we should run record function's end callbacks when exiting. self.run_callbacks_on_exit: bool = True # Stores underlying RecordFunction as a tensor. TODO: move to custom # class (https://github.com/pytorch/pytorch/issues/35026). self.handle: torch.Tensor = torch.zeros(1) def __enter__(self): self.handle = torch.ops.profiler._record_function_enter(self.name) return self def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any): if self.run_callbacks_on_exit: torch.ops.profiler._record_function_exit(self.handle) def _call_end_callbacks_on_future(self, fut: Future[Any]) -> Future[Any]: """ _call_end_callbacks_on_future is meant to be used for profiling async calls that return a future. Calling this function will extend recording beyond this scope, until the future is satisfied. It is useful for profiling the end to end time of asynchronous calls. This function should only be called once to attach the callback onto the future, and will throw if called multiple times. Args: fut: (torch._C.Future): future for which to schedule callback for. Returns: A future that completes with the value of the passed in future when the profiling callbacks have ran. """ # Throw if we have already attached a callback onto the future. if not self.run_callbacks_on_exit: raise RuntimeError("_call_end_callbacks_on_future can only be called once.") # We are scheduling to run this RecordFunction's end callbacks when the # passed in future completes, so don't run end callbacks on exit. self.run_callbacks_on_exit = False profiled_future = torch.ops.profiler._call_end_callbacks_on_jit_fut(self.handle, fut) return profiled_future class emit_nvtx(object): """Context manager that makes every autograd operation emit an NVTX range. It is useful when running the program under nvprof:: nvprof --profile-from-start off -o trace_name.prof -- <regular command here> Unfortunately, there's no way to force nvprof to flush the data it collected to disk, so for CUDA profiling one has to use this context manager to annotate nvprof traces and wait for the process to exit before inspecting them. Then, either NVIDIA Visual Profiler (nvvp) can be used to visualize the timeline, or :func:`torch.autograd.profiler.load_nvprof` can load the results for inspection e.g. in Python REPL. .. warning: This context manager should not be called recursively, i.e. at most one instance should be enabled at any given time. Args: enabled (bool, optional, default=True): Setting ``enabled=False`` makes this context manager a no-op. Default: ``True``. record_shapes (bool, optional, default=False): If ``record_shapes=True``, the nvtx range wrapping each autograd op will append information about the sizes of Tensor arguments received by that op, in the following format: ``[[arg0.size(0), arg0.size(1), ...], [arg1.size(0), arg1.size(1), ...], ...]`` Non-tensor arguments will be represented by ``[]``. Arguments will be listed in the order they are received by the backend op. Please note that this order may not match the order in which those arguments were passed on the Python side. Also note that shape recording may increase the overhead of nvtx range creation. Example: >>> with torch.cuda.profiler.profile(): ... model(x) # Warmup CUDA memory allocator and profiler ... with torch.autograd.profiler.emit_nvtx(): ... model(x) **Forward-backward correlation** When viewing a profile created using :class:`emit_nvtx` in the Nvidia Visual Profiler, correlating each backward-pass op with the corresponding forward-pass op can be difficult. To ease this task, :class:`emit_nvtx` appends sequence number information to the ranges it generates. During the forward pass, each function range is decorated with ``seq=<N>``. ``seq`` is a running counter, incremented each time a new backward Function object is created and stashed for backward. Thus, the ``seq=<N>`` annotation associated with each forward function range tells you that if a backward Function object is created by this forward function, the backward object will receive sequence number N. During the backward pass, the top-level range wrapping each C++ backward Function's ``apply()`` call is decorated with ``stashed seq=<M>``. ``M`` is the sequence number that the backward object was created with. By comparing ``stashed seq`` numbers in backward with ``seq`` numbers in forward, you can track down which forward op created each backward Function. Any functions executed during the backward pass are also decorated with ``seq=<N>``. During default backward (with ``create_graph=False``) this information is irrelevant, and in fact, ``N`` may simply be 0 for all such functions. Only the top-level ranges associated with backward Function objects' ``apply()`` methods are useful, as a way to correlate these Function objects with the earlier forward pass. **Double-backward** If, on the other hand, a backward pass with ``create_graph=True`` is underway (in other words, if you are setting up for a double-backward), each function's execution during backward is given a nonzero, useful ``seq=<N>``. Those functions may themselves create Function objects to be executed later during double-backward, just as the original functions in the forward pass did. The relationship between backward and double-backward is conceptually the same as the relationship between forward and backward: The functions still emit current-sequence-number-tagged ranges, the Function objects they create still stash those sequence numbers, and during the eventual double-backward, the Function objects' ``apply()`` ranges are still tagged with ``stashed seq`` numbers, which can be compared to `seq` numbers from the backward pass. .. warning: The sequence number is thread-local, and some forward functions don't create an associated backward Function object (instead delegating that to sub-functions further down the call chain). For these reasons, the correspondence of stashed sequence numbers in backward Function ``apply()`` ranges with `seq` numbers in forward-pass ranges is not guaranteed to be 1 to 1. The sequence numbers alone may not be enough to fully disambiguate which forward function created which backward Function object. You may need to make a judgment based on analytic knowledge of what the expected correspondence should be. """ def __init__(self, enabled=True, record_shapes=False): self.enabled = enabled self.entered = False self.record_shapes = record_shapes def __enter__(self): if not self.enabled: return if self.entered: raise RuntimeError("NVTX annotation context manager is not reentrant") self.entered = True torch.cuda.synchronize() torch.autograd._enable_profiler_legacy( torch.autograd.ProfilerConfig( torch.autograd.ProfilerState.NVTX, self.record_shapes, False, False, False) ) return self def __exit__(self, exc_type, exc_val, exc_tb): if not self.enabled: return torch.cuda.synchronize() torch.autograd._disable_profiler_legacy() return False def load_nvprof(path): """Opens an nvprof trace file and parses autograd annotations. Args: path (str): path to nvprof trace """ return EventList(parse_nvprof_trace(path)) ################################################################################ # FunctionEvent def format_time(time_us): """Defines how to format time in FunctionEvent""" US_IN_SECOND = 1000.0 * 1000.0 US_IN_MS = 1000.0 if time_us >= US_IN_SECOND: return '{:.3f}s'.format(time_us / US_IN_SECOND) if time_us >= US_IN_MS: return '{:.3f}ms'.format(time_us / US_IN_MS) return '{:.3f}us'.format(time_us) def format_time_share(time_us, total_time_us): """Defines how to format time in FunctionEvent""" if total_time_us == 0: assert time_us == 0, "Expected time_us == 0 but got {}".format(time_us) return "NaN" return '{:.2f}%'.format(time_us * 100.0 / total_time_us) def format_memory(nbytes): """Returns a formatted memory size string""" KB = 1024 MB = 1024 * KB GB = 1024 * MB if (abs(nbytes) >= GB): return '{:.2f} Gb'.format(nbytes * 1.0 / GB) elif (abs(nbytes) >= MB): return '{:.2f} Mb'.format(nbytes * 1.0 / MB) elif (abs(nbytes) >= KB): return '{:.2f} Kb'.format(nbytes * 1.0 / KB) else: return str(nbytes) + ' b' def attr_formatter(name): return property(lambda self: format_time(getattr(self, name))) class FormattedTimesMixin(object): """Helpers for FunctionEvent and FunctionEventAvg. The subclass should define `*_time_total` and `count` attributes. """ cpu_time_str = attr_formatter('cpu_time') cuda_time_str = attr_formatter('cuda_time') cpu_time_total_str = attr_formatter('cpu_time_total') cuda_time_total_str = attr_formatter('cuda_time_total') self_cpu_time_total_str = attr_formatter('self_cpu_time_total') self_cuda_time_total_str = attr_formatter('self_cuda_time_total') @property def cpu_time(self): return 0.0 if self.count == 0 else 1.0 * self.cpu_time_total / self.count # type: ignore[attr-defined] @property def cuda_time(self): return 0.0 if self.count == 0 else 1.0 * self.cuda_time_total / self.count # type: ignore[attr-defined] class Interval(object): def __init__(self, start, end): self.start = start self.end = end def elapsed_us(self): return self.end - self.start Kernel = namedtuple('Kernel', ['name', 'device', 'duration']) class FunctionEvent(FormattedTimesMixin): """Profiling information about a single function.""" def __init__( self, id, name, thread, start_us, end_us, fwd_thread=None, input_shapes=None, stack=None, scope=0, cpu_memory_usage=0, cuda_memory_usage=0, is_async=False, is_remote=False, sequence_nr=-1, node_id=-1, device_type=DeviceType.CPU, device_index=0, is_legacy=False, flops=None, trace_name=None): self.id: int = id self.node_id: int = node_id self.name: str = name self.trace_name: str = trace_name self.time_range: Interval = Interval(start_us, end_us) self.thread: int = thread self.fwd_thread: Optional[int] = fwd_thread self.kernels: List[Kernel] = [] self.count: int = 1 self.cpu_children: List[FunctionEvent] = [] self.cpu_parent: Optional[FunctionEvent] = None self.input_shapes: Tuple[int, ...] = input_shapes self.stack: List = stack self.scope: int = scope self.cpu_memory_usage: int = cpu_memory_usage self.cuda_memory_usage: int = cuda_memory_usage self.is_async: bool = is_async self.is_remote: bool = is_remote self.sequence_nr: int = sequence_nr self.device_type: DeviceType = device_type self.device_index: int = device_index self.is_legacy: bool = is_legacy self.flops: Optional[float] = flops def append_kernel(self, name, device, duration): assert self.device_type == DeviceType.CPU self.kernels.append(Kernel(name, device, duration)) def append_cpu_child(self, child): """Append a CPU child of type FunctionEvent. One is supposed to append only direct children to the event to have correct self cpu time being reported. """ assert(self.device_type == DeviceType.CPU) assert(isinstance(child, FunctionEvent)) assert(child.device_type == DeviceType.CPU) self.cpu_children.append(child) def set_cpu_parent(self, parent): """Set the immediate CPU parent of type FunctionEvent One profiling FunctionEvent should have only one CPU parent such that the child's range interval is completely inside the parent's. We use this connection to determine the event is from top-level op or not. """ assert(self.device_type == DeviceType.CPU) assert(isinstance(parent, FunctionEvent)) assert(parent.device_type == DeviceType.CPU) self.cpu_parent = parent # Note: async events don't have children, are not used when computing 'self' # metrics of other events, have only total cpu time @property def self_cpu_memory_usage(self): if self.is_async or self.device_type != DeviceType.CPU: return 0 return self.cpu_memory_usage - sum( [child.cpu_memory_usage for child in self.cpu_children] ) @property def self_cuda_memory_usage(self): if self.is_async or self.device_type != DeviceType.CPU: return 0 return self.cuda_memory_usage - sum( [child.cuda_memory_usage for child in self.cpu_children] ) @property def self_cpu_time_total(self): if self.is_async or self.device_type != DeviceType.CPU: return 0 return self.cpu_time_total - sum( [child.cpu_time_total for child in self.cpu_children] ) @property def cuda_time_total(self): if self.is_async: return 0 if self.device_type == DeviceType.CPU: if not self.is_legacy: # account for the kernels in the children ops return (sum(kinfo.duration for kinfo in self.kernels) + sum(ch.cuda_time_total for ch in self.cpu_children)) else: # each legacy cpu events has a single (fake) kernel return sum(kinfo.duration for kinfo in self.kernels) else: assert self.device_type == DeviceType.CUDA return self.time_range.elapsed_us() @property def self_cuda_time_total(self): if self.is_async: return 0 if self.device_type == DeviceType.CPU: return self.cuda_time_total - \ sum([child.cuda_time_total for child in self.cpu_children]) else: assert(self.device_type == DeviceType.CUDA) return self.cuda_time_total @property def cpu_time_total(self): if self.device_type == DeviceType.CPU: return self.time_range.elapsed_us() else: return 0 @property def key(self): return self.name def __repr__(self): return ( '<FunctionEvent id={} name={} device_type={} node_id={} cpu_time={} start_us={} end_us={} ' 'cpu_children={} cuda_time={} name={} thread={} input_shapes={} ' 'cpu_memory_usage={} cuda_memory_usage={} is_async={} is_remote={} seq_nr={} is_legacy={}>'.format( self.id, self.name, self.device_type, self.node_id, self.cpu_time_str, self.time_range.start, self.time_range.end, str([child.id for child in self.cpu_children]), self.cuda_time_str, self.name, self.thread, str(self.input_shapes), self.cpu_memory_usage, self.cuda_memory_usage, self.is_async, self.is_remote, self.sequence_nr, self.is_legacy, ) ) class FunctionEventAvg(FormattedTimesMixin): """Used to average stats over multiple FunctionEvent objects.""" def __init__(self): self.key: Optional[str] = None self.count: int = 0 self.node_id: int = 0 self.is_async: bool = False self.is_remote: bool = False self.cpu_time_total: int = 0 self.cuda_time_total: int = 0 self.self_cpu_time_total: int = 0 self.self_cuda_time_total: int = 0 self.input_shapes: Optional[List[List[int]]] = None self.stack: Optional[List] = None self.scope: Optional[int] = None self.cpu_memory_usage: int = 0 self.cuda_memory_usage: int = 0 self.self_cpu_memory_usage: int = 0 self.self_cuda_memory_usage: int = 0 self.cpu_children: Optional[List[FunctionEvent]] = None self.cpu_parent: Optional[FunctionEvent] = None self.device_type: DeviceType = DeviceType.CPU self.is_legacy: bool = False self.flops: float = 0.0 def add(self, other): if self.key is None: # First function being recorded as part of FunctionEventAvg, propagate # fields. self.key = other.key self.node_id = other.node_id self.is_async = other.is_async self.is_remote = other.is_remote self.cpu_parent = other.cpu_parent self.cpu_children = other.cpu_children self.input_shapes = other.input_shapes self.stack = other.stack self.scope = other.scope self.device_type = other.device_type self.is_legacy = other.is_legacy assert isinstance(other, (FunctionEvent, FunctionEventAvg)) assert other.key == self.key self.cpu_time_total += other.cpu_time_total self.cuda_time_total += other.cuda_time_total self.self_cpu_time_total += other.self_cpu_time_total self.self_cuda_time_total += other.self_cuda_time_total self.cpu_memory_usage += other.cpu_memory_usage self.cuda_memory_usage += other.cuda_memory_usage self.self_cpu_memory_usage += other.self_cpu_memory_usage self.self_cuda_memory_usage += other.self_cuda_memory_usage self.count += other.count if self.flops is None: self.flops = other.flops elif other.flops is not None: self.flops += other.flops return self def __iadd__(self, other): return self.add(other) def __repr__(self): return ( '<FunctionEventAvg key={} self_cpu_time={} cpu_time={} ' ' self_cuda_time={} cuda_time={} input_shapes={} ' 'cpu_memory_usage={} cuda_memory_usage={}>'.format( self.key, self.self_cpu_time_total_str, self.cpu_time_str, self.self_cuda_time_total_str, self.cuda_time_str, str(self.input_shapes), self.cpu_memory_usage, self.cuda_memory_usage, ) ) ################################################################################ # Utilities class StringTable(defaultdict): def __missing__(self, key): # manage cases like 't' (demangled to 'unsigned short') separately, # for now simply check the length to avoid unexpected results for # the short sequences self[key] = torch._C._demangle(key) if len(key) > 1 else key return self[key] def filter_stack_entry(entry): filtered_entries = [ ("autograd/__init__", "_make_grads"), ("autograd/__init__", "backward"), ("torch/tensor", "backward"), ("_internal/common_utils", "prof_callable"), ("_internal/common_utils", "prof_func_call"), ("_internal/common_utils", "prof_meth_call"), ] return all([not (f[0] in entry and f[1] in entry) for f in filtered_entries]) def filter_name(name): # ignoring the following utility ops filtered_out_names = [ "profiler::_record_function_enter", "profiler::_record_function_exit", "aten::is_leaf", "aten::output_nr", "aten::_version", ] return name in filtered_out_names # Demangles and optionally rewrites the provided event name, # with_wildcard - whether to replace certain numbered event names # with a wildcard name to aggregate them together in the profiler table # output def rewrite_name(name, with_wildcard=False): string_table = StringTable() name = string_table[name] if with_wildcard: if name.startswith("ProfilerStep#"): name = "ProfilerStep*" return name # Parsing of kineto profiler events def parse_kineto_results(result): # result.events() has most of the events - PyTorch op-level and device-level events # result.legacy_events() has events not yet ported to kineto # (e.g. start/stop marks, tensor memory allocator events) # First, find __start_profile mark to get the absolute time of the start of the trace; # save memory allocation records start_record = None mem_records = [] for record in itertools.chain(*result.legacy_events()): if record.kind() == 'mark' and record.name() == '__start_profile': assert start_record is None start_record = record if record.kind() == 'memory_alloc': mem_records.append([record, False]) assert start_record is not None, "Invalid profiler output, __start_profile is missing" # Create and return FunctionEvent list function_events = [] cuda_corr_map: Dict[int, List[FunctionEvent]] = {} for kineto_event in result.events(): if filter_name(kineto_event.name()): continue rel_start_us = kineto_event.start_us() - start_record.start_us() rel_end_us = rel_start_us + kineto_event.duration_us() abs_end_us = kineto_event.start_us() + kineto_event.duration_us() cpu_memory_usage = 0 cuda_memory_usage = 0 if kineto_event.device_type() == DeviceType.CPU: # find the corresponding memory allocation events for mem_record in mem_records: if (mem_record[0].start_us() >= kineto_event.start_us() and mem_record[0].start_us() <= abs_end_us): cpu_memory_usage += mem_record[0].cpu_memory_usage() cuda_memory_usage += mem_record[0].cuda_memory_usage() mem_record[1] = True is_async = kineto_event.start_thread_id() != kineto_event.end_thread_id() fe = FunctionEvent( id=kineto_event.correlation_id(), name=rewrite_name(name=kineto_event.name(), with_wildcard=True), trace_name=rewrite_name(name=kineto_event.name(), with_wildcard=False), thread=kineto_event.start_thread_id(), start_us=rel_start_us, end_us=rel_end_us, fwd_thread=kineto_event.fwd_thread_id(), input_shapes=kineto_event.shapes(), stack=[entry for entry in kineto_event.stack() if filter_stack_entry(entry)], scope=kineto_event.scope(), cpu_memory_usage=cpu_memory_usage, cuda_memory_usage=cuda_memory_usage, is_async=is_async, sequence_nr=kineto_event.sequence_nr(), device_type=kineto_event.device_type(), device_index=kineto_event.device_index(), flops=kineto_event.flops(), ) function_events.append(fe) corr_id = kineto_event.linked_correlation_id() if corr_id > 0: if corr_id not in cuda_corr_map: cuda_corr_map[corr_id] = [] cuda_corr_map[corr_id].append(fe) # associate CUDA kernels and CUDA runtime (CPU) with CPU events for fe in function_events: if (fe.device_type == DeviceType.CPU and not fe.is_async and fe.id in cuda_corr_map): for f_evt in cuda_corr_map[fe.id]: if f_evt.device_type == DeviceType.CUDA: fe.append_kernel( f_evt.name, f_evt.device_index, f_evt.time_range.end - f_evt.time_range.start) elif f_evt.device_type == DeviceType.CPU: # make sure that 'thread' of a CPU Kineto (e.g. CUDA Runtime) event is associated # with the 'thread' of the corresponding linked PyTorch event to properly track # parents and children f_evt.thread = fe.thread # output top-level memory events for mem_record in mem_records: if not mem_record[1]: rel_start_us = mem_record[0].start_us() - start_record.start_us() fe = FunctionEvent( id=mem_record[0].handle(), name="[memory]", trace_name=None, # not outputting in the trace thread=mem_record[0].thread_id(), start_us=rel_start_us, end_us=rel_start_us, # no duration fwd_thread=mem_record[0].fwd_thread_id(), input_shapes=[], stack=[], scope=mem_record[0].scope(), cpu_memory_usage=mem_record[0].cpu_memory_usage(), cuda_memory_usage=mem_record[0].cuda_memory_usage(), is_async=False, sequence_nr=-1, device_type=DeviceType.CPU, device_index=0, ) function_events.append(fe) function_events.sort(key=lambda evt: [evt.time_range.start, -evt.time_range.end]) return function_events # Parsing of legacy profiler events def parse_legacy_records(thread_records): def get_record_key(record): """ Returns a tuple to be used by parse_legacy_records for correlating start and end records. """ return (record.handle(), record.node_id()) next_id = 0 start_record = None functions = [] record_stack = [] # '__start_profile' is not guaranteed to be first, so we must find it here for record in itertools.chain(*thread_records): name = record.name() if start_record is None and name == '__start_profile': start_record = record assert start_record is not None and not start_record.is_remote() for thread_record_list in thread_records: # accumulated memory allocations per handle cpu_memory_allocs = {} cuda_memory_allocs = {} # ranges per handle range_starts = {} filtered_handles = set() prev_record = None for record in thread_record_list: record_key = get_record_key(record) if (filter_name(record.name()) or record_key in filtered_handles): filtered_handles.add(record_key) continue if record.kind() == 'push': # workaround to reduce double logging from operator # wrappers and redispatch if prev_record is not None: duplicate = ( prev_record.name() == record.name() and prev_record.kind() == record.kind() and prev_record.node_id() == record.node_id() ) if duplicate: filtered_handles.add(record_key) continue range_starts[record_key] = record cpu_memory_allocs[record_key] = 0 cuda_memory_allocs[record_key] = 0 elif record.kind() == 'pop': assert ( record_key in range_starts ), """Expected record with key {} to exist in range_starts. This means that the pop event did not have a corresponding push.""".format( record_key ) start = range_starts[record_key] cpu_memory_usage = cpu_memory_allocs[record_key] cuda_memory_usage = cuda_memory_allocs[record_key] is_async = start.thread_id() != record.thread_id() is_remote_event = record.is_remote() start_flops = start.flops() fe = FunctionEvent( id=record.handle(), node_id=record.node_id(), name=rewrite_name(name=start.name(), with_wildcard=True), trace_name=rewrite_name(name=start.name(), with_wildcard=False), thread=start.thread_id(), start_us=start_record.cpu_elapsed_us(start), end_us=start_record.cpu_elapsed_us(record), fwd_thread=start.fwd_thread_id(), input_shapes=start.shapes(), stack=[entry for entry in start.stack() if filter_stack_entry(entry)], scope=start.scope(), cpu_memory_usage=cpu_memory_usage, cuda_memory_usage=cuda_memory_usage, is_async=is_async, is_remote=is_remote_event, sequence_nr=start.sequence_nr(), device_type=DeviceType.CPU, is_legacy=True, flops=start_flops, ) # note: async events have only cpu total time if not is_async and start.has_cuda(): duration = start.cuda_elapsed_us(record) if duration > 0: fe.append_kernel( start.name(), start.device(), duration) functions.append(fe) del range_starts[record_key] del cpu_memory_allocs[record_key] del cuda_memory_allocs[record_key] elif record.kind() == 'memory_alloc': num_open_handles_cpu = len(cpu_memory_allocs) num_open_handles_cuda = len(cuda_memory_allocs) assert num_open_handles_cpu == num_open_handles_cuda for handle in cpu_memory_allocs.keys(): cpu_memory_allocs[handle] += record.cpu_memory_usage() for handle in cuda_memory_allocs.keys(): cuda_memory_allocs[handle] += record.cuda_memory_usage() if num_open_handles_cpu == 0: # output event as a top-level memory event fe = FunctionEvent( id=0, name="[memory]", trace_name=None, thread=0, start_us=0, end_us=0, stack=[], cpu_memory_usage=record.cpu_memory_usage(), cuda_memory_usage=record.cuda_memory_usage(), is_legacy=True, ) functions.append(fe) prev_record = record # Sort functions by start time then by end time ascending. # This ensures that--in the case of nested events which # have the same start time (which may happen due to the # granularity of the given clock tick)--we always show # the outermost nested call first. This adds stability # in how FunctionEvents appear functions.sort(key=lambda evt: [evt.time_range.start, -evt.time_range.end]) return functions ################################################################################ # CUDA checkpoints class EnforceUnique(object): """Raises an error if a key is seen more than once.""" def __init__(self): self.seen = set() def see(self, *key): if key in self.seen: raise RuntimeError('duplicate key: ' + str(key)) self.seen.add(key) def parse_nvprof_trace(path): import sqlite3 conn = sqlite3.connect(path) conn.row_factory = sqlite3.Row # Parse strings table strings = {} for r in conn.execute("SELECT _id_ as id, value FROM StringTable"): strings[r["id"]] = torch._C._demangle(r["value"]) # First, find all functions and create FunctionEvents for them marker_query = """ SELECT start.id AS marker_id, start.name, start.timestamp AS start_time, end.timestamp AS end_time FROM CUPTI_ACTIVITY_KIND_MARKER AS start INNER JOIN CUPTI_ACTIVITY_KIND_MARKER AS end ON start.id = end.id WHERE start.name != 0 AND end.name = 0 """ functions = [] functions_map = {} unique = EnforceUnique() for row in conn.execute(marker_query): unique.see(row['marker_id']) evt = FunctionEvent(id=row['marker_id'], node_id=0, # missing a node_id when calling FunctionEvent. This is just to ensure # that pytorch doesn't crash when creating a FunctionEvent() object name=strings[row['name']], start_us=row['start_time'], end_us=row['end_time'], thread=0) # TODO: find in sqlite database functions.append(evt) functions_map[evt.id] = evt # Now, correlate all kernels with FunctionEvents kernel_query = """ SELECT start.id AS marker_id, start.name, start.timestamp, end.timestamp, runtime._id_ AS runtime_id, runtime.cbid, runtime.start AS runtime_start, runtime.end AS runtime_end, kernel.start AS kernel_start, kernel.end AS kernel_end, kernel.name AS kernel_name FROM CUPTI_ACTIVITY_KIND_MARKER AS start INNER JOIN CUPTI_ACTIVITY_KIND_MARKER AS end ON start.id = end.id INNER JOIN CUPTI_ACTIVITY_KIND_RUNTIME as runtime ON (start.timestamp < runtime.start AND runtime.end < end.timestamp) INNER JOIN CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL AS kernel ON kernel.correlationId = runtime.correlationId """ unique = EnforceUnique() for row in conn.execute(kernel_query): unique.see(row['marker_id'], row['runtime_id']) # 211 is cudaKernelLaunch for cuda >= 9.2; 13 is for older cuda versions assert (row['cbid'] == 211) or (row['cbid'] == 13) evt = functions_map[row['marker_id']] evt.append_kernel(row['kernel_name'], 0, row['kernel_end'] - row['kernel_start']) functions.sort(key=lambda evt: evt.time_range.start) return functions ################################################################################ # Pretty printer def build_table( events, sort_by=None, header=None, row_limit=100, max_src_column_width=75, with_flops=False, profile_memory=False, top_level_events_only=False): """Prints a summary of events (which can be a list of FunctionEvent or FunctionEventAvg).""" if len(events) == 0: return "" has_cuda_time = any([event.self_cuda_time_total > 0 for event in events]) has_cuda_mem = any([event.self_cuda_memory_usage > 0 for event in events]) has_input_shapes = any( [(event.input_shapes is not None and len(event.input_shapes) > 0) for event in events]) if sort_by is not None: events = EventList(sorted( events, key=lambda evt: getattr(evt, sort_by), reverse=True ), use_cuda=has_cuda_time, profile_memory=profile_memory, with_flops=with_flops) MAX_NAME_COLUMN_WIDTH = 55 name_column_width = max([len(evt.key) for evt in events]) + 4 name_column_width = min(name_column_width, MAX_NAME_COLUMN_WIDTH) DEFAULT_COLUMN_WIDTH = 12 shapes_column_width = max([len(str(evt.input_shapes)) for evt in events]) + 4 shapes_column_width = min(shapes_column_width, 45) flops_column_width = DEFAULT_COLUMN_WIDTH src_column_width = None stacks = [] for evt in events: if evt.stack is not None and len(evt.stack) > 0: stacks.append(evt.stack) has_stack = len(stacks) > 0 if has_stack: src_column_width = max([max([len(entry) for entry in stack]) for stack in stacks]) + 4 src_column_width = min(src_column_width, max_src_column_width) headers = [ 'Name', 'Self CPU %', 'Self CPU', 'CPU total %', 'CPU total', 'CPU time avg', ] if has_cuda_time: headers.extend([ 'Self CUDA', 'Self CUDA %', 'CUDA total', 'CUDA time avg', ]) if profile_memory: headers.extend([ 'CPU Mem', 'Self CPU Mem', ]) if has_cuda_mem: headers.extend([ 'CUDA Mem', 'Self CUDA Mem', ]) headers.append( '# of Calls' ) # Only append Node ID if any event has a valid (>= 0) Node ID append_node_id = any([evt.node_id != -1 for evt in events]) if append_node_id: headers.append('Node ID') # Have to use a list because nonlocal is Py3 only... SPACING_SIZE = 2 row_format_lst = [""] header_sep_lst = [""] line_length_lst = [-SPACING_SIZE] MAX_STACK_ENTRY = 5 def add_column(padding, text_dir='>'): row_format_lst[0] += '{: ' + text_dir + str(padding) + '}' + (' ' * SPACING_SIZE) header_sep_lst[0] += '-' * padding + (' ' * SPACING_SIZE) line_length_lst[0] += padding + SPACING_SIZE def auto_scale_flops(flops): flop_headers = [ 'FLOPS', 'KFLOPS', 'MFLOPS', 'GFLOPS', 'TFLOPS', 'PFLOPS', ] assert flops > 0 log_flops = max(0, min(math.log10(flops) / 3, float(len(flop_headers) - 1))) assert log_flops >= 0 and log_flops < len(flop_headers) return (pow(10, (math.floor(log_flops) * -3.0)), flop_headers[int(log_flops)]) add_column(name_column_width) for _ in headers[1:]: add_column(DEFAULT_COLUMN_WIDTH) if has_input_shapes: headers.append('Input Shapes') add_column(shapes_column_width) if has_stack: headers.append('Source Location') add_column(src_column_width, text_dir='<') if with_flops: # Auto-scaling of flops header US_IN_SECOND = 1000.0 * 1000.0 # cpu_time_total is in us raw_flops = [] for evt in events: if evt.flops > 0: if evt.cuda_time_total != 0: evt.flops = float(evt.flops) / evt.cuda_time_total * US_IN_SECOND else: evt.flops = float(evt.flops) / evt.cpu_time_total * US_IN_SECOND raw_flops.append(evt.flops) if len(raw_flops) != 0: (flops_scale, flops_header) = auto_scale_flops(min(raw_flops)) headers.append(flops_header) add_column(flops_column_width) else: with_flops = False # can't find any valid flops row_format = row_format_lst[0] header_sep = header_sep_lst[0] line_length = line_length_lst[0] add_column = None # type: ignore[assignment] # Have to use a list because nonlocal is Py3 only... result = [] def append(s): result.append(s) result.append('\n') # Yes, newline after the end as well sum_self_cpu_time_total = sum([event.self_cpu_time_total for event in events]) sum_self_cuda_time_total = 0 for evt in events: if evt.device_type == DeviceType.CPU: # in legacy profiler, kernel info is stored in cpu events if evt.is_legacy: sum_self_cuda_time_total += evt.self_cuda_time_total elif evt.device_type == DeviceType.CUDA: # in kineto profiler, there're events with the correct device type (e.g. CUDA) sum_self_cuda_time_total += evt.self_cuda_time_total # Actual printing if header is not None: append('=' * line_length) append(header) if top_level_events_only: append('=' * line_length) append('This report only display top-level ops statistics') append(header_sep) append(row_format.format(*headers)) append(header_sep) def trim_path(path, src_column_width): if len(path) > src_column_width: offset = len(path) - src_column_width path = path[offset:] if len(path) > 3: path = "..." + path[3:] return path event_limit = 0 for evt in events: if event_limit == row_limit: break if top_level_events_only and evt.cpu_parent is not None: continue else: event_limit += 1 name = evt.key if len(name) >= MAX_NAME_COLUMN_WIDTH - 3: name = name[:(MAX_NAME_COLUMN_WIDTH - 3)] + "..." row_values = [ name, # Self CPU total %, 0 for async events. format_time_share(evt.self_cpu_time_total, sum_self_cpu_time_total), evt.self_cpu_time_total_str, # Self CPU total # CPU total %, 0 for async events. format_time_share(evt.cpu_time_total, sum_self_cpu_time_total) if not evt.is_async else 0, evt.cpu_time_total_str, # CPU total evt.cpu_time_str, # CPU time avg ] if has_cuda_time: row_values.extend([ evt.self_cuda_time_total_str, # CUDA time total % format_time_share(evt.self_cuda_time_total, sum_self_cuda_time_total), evt.cuda_time_total_str, evt.cuda_time_str, # Cuda time avg ]) if profile_memory: row_values.extend([ # CPU Mem Total format_memory(evt.cpu_memory_usage), # Self CPU Mem Total format_memory(evt.self_cpu_memory_usage), ]) if has_cuda_mem: row_values.extend([ # CUDA Mem Total format_memory(evt.cuda_memory_usage), # Self CUDA Mem Total format_memory(evt.self_cuda_memory_usage), ]) row_values.append( evt.count, # Number of calls ) if append_node_id: row_values.append(evt.node_id) if has_input_shapes: row_values.append(str(evt.input_shapes)[:shapes_column_width]) if with_flops: if evt.flops <= 0.0: row_values.append("--") else: row_values.append('{0:8.3f}'.format(evt.flops * flops_scale)) if has_stack: src_field = "" if len(evt.stack) > 0: src_field = trim_path(evt.stack[0], src_column_width) row_values.append(src_field) append(row_format.format(*row_values)) if has_stack: empty_headers = [""] * (len(headers) - 1) for entry in evt.stack[1:MAX_STACK_ENTRY]: append(row_format.format(*(empty_headers + [trim_path(entry, src_column_width)]))) empty_headers.append("") append(row_format.format(*empty_headers)) append(header_sep) append("Self CPU time total: {}".format(format_time(sum_self_cpu_time_total))) if has_cuda_time: append("Self CUDA time total: {}".format(format_time(sum_self_cuda_time_total))) return ''.join(result)
py
1a46eba8fd47ad5e9a6646c7aefa94303114c30c
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from builtins import str import six import logging import filecmp import os import re import sys import uuid import json import time import tempfile from nose.tools import assert_raises, assert_equals, assert_true, assert_in import shutil from mock import patch import synapseclient import synapseclient.utils as utils import synapseclient.__main__ as cmdline from synapseclient.evaluation import Evaluation import integration from integration import schedule_for_cleanup, QUERY_TIMEOUT_SEC if six.PY2: from StringIO import StringIO else: from io import StringIO def setup_module(module): module.syn = integration.syn module.project = integration.project module.parser = cmdline.build_parser() # used for --description and --descriptionFile tests module.upload_filename = _create_temp_file_with_cleanup() module.description_text = "'some description text'" module.desc_filename = _create_temp_file_with_cleanup(module.description_text) module.update_description_text = \ "'SOMEBODY ONCE TOLD ME THE WORLD WAS GONNA ROLL ME I AINT THE SHARPEST TOOL IN THE SHED'" def run(*command, **kwargs): """ Sends the given command list to the command line client. :returns: The STDOUT output of the command. """ old_stdout = sys.stdout capturedSTDOUT = StringIO() syn_client = kwargs.get('syn', syn) stream_handler = logging.StreamHandler(capturedSTDOUT) try: sys.stdout = capturedSTDOUT syn_client.logger.addHandler(stream_handler) sys.argv = [item for item in command] args = parser.parse_args() args.debug = True cmdline.perform_main(args, syn_client) except SystemExit: pass # Prevent the test from quitting prematurely finally: sys.stdout = old_stdout syn_client.logger.handlers.remove(stream_handler) capturedSTDOUT = capturedSTDOUT.getvalue() return capturedSTDOUT def parse(regex, output): """Returns the first match.""" m = re.search(regex, output) if m: if len(m.groups()) > 0: return m.group(1).strip() else: raise Exception('ERROR parsing output: "' + str(output) + '"') def test_command_line_client(): # Create a Project output = run('synapse', '--skip-checks', 'create', '-name', str(uuid.uuid4()), '-description', 'test of command line client', 'Project') project_id = parse(r'Created entity:\s+(syn\d+)\s+', output) schedule_for_cleanup(project_id) # Create a File filename = utils.make_bogus_data_file() schedule_for_cleanup(filename) output = run('synapse', '--skip-checks', 'add', '-name', 'BogusFileEntity', '-description', 'Bogus data to test file upload', '-parentid', project_id, filename) file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output) # Verify that we stored the file in Synapse f1 = syn.get(file_entity_id) fh = syn._getFileHandle(f1.dataFileHandleId) assert_equals(fh['concreteType'], 'org.sagebionetworks.repo.model.file.S3FileHandle') # Get File from the command line output = run('synapse', '--skip-checks', 'get', file_entity_id) downloaded_filename = parse(r'Downloaded file:\s+(.*)', output) schedule_for_cleanup(downloaded_filename) assert_true(os.path.exists(downloaded_filename)) assert_true(filecmp.cmp(filename, downloaded_filename)) # Update the File filename = utils.make_bogus_data_file() schedule_for_cleanup(filename) output = run('synapse', '--skip-checks', 'store', '--id', file_entity_id, filename) # Get the File again output = run('synapse', '--skip-checks', 'get', file_entity_id) downloaded_filename = parse(r'Downloaded file:\s+(.*)', output) schedule_for_cleanup(downloaded_filename) assert_true(os.path.exists(downloaded_filename)) assert_true(filecmp.cmp(filename, downloaded_filename)) # Move the file to new folder folder = syn.store(synapseclient.Folder(parentId=project_id)) output = run('synapse', 'mv', '--id', file_entity_id, '--parentid', folder.id) movedFile = syn.get(file_entity_id, downloadFile=False) assert_equals(movedFile.parentId, folder.id) # Test Provenance repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient' output = run('synapse', '--skip-checks', 'set-provenance', '-id', file_entity_id, '-name', 'TestActivity', '-description', 'A very excellent provenance', '-used', file_entity_id, '-executed', repo_url) output = run('synapse', '--skip-checks', 'get-provenance', '--id', file_entity_id) activity = json.loads(output) assert_equals(activity['name'], 'TestActivity') assert_equals(activity['description'], 'A very excellent provenance') used = utils._find_used(activity, lambda used: 'reference' in used) assert_equals(used['reference']['targetId'], file_entity_id) used = utils._find_used(activity, lambda used: 'url' in used) assert_equals(used['url'], repo_url) assert_true(used['wasExecuted']) # Note: Tests shouldn't have external dependencies # but this is a pretty picture of Singapore singapore_url = 'http://upload.wikimedia.org/wikipedia/commons/' \ 'thumb/3/3e/1_singapore_city_skyline_dusk_panorama_2011.jpg' \ '/1280px-1_singapore_city_skyline_dusk_panorama_2011.jpg' # Test external file handle output = run('synapse', '--skip-checks', 'add', '-name', 'Singapore', '-description', 'A nice picture of Singapore', '-parentid', project_id, singapore_url) exteral_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output) # Verify that we created an external file handle f2 = syn.get(exteral_entity_id) fh = syn._getFileHandle(f2.dataFileHandleId) assert_equals(fh['concreteType'], 'org.sagebionetworks.repo.model.file.ExternalFileHandle') output = run('synapse', '--skip-checks', 'get', exteral_entity_id) downloaded_filename = parse(r'Downloaded file:\s+(.*)', output) schedule_for_cleanup(downloaded_filename) assert_true(os.path.exists(downloaded_filename)) # Delete the Project run('synapse', '--skip-checks', 'delete', project_id) def test_command_line_client_annotations(): # Create a Project output = run('synapse', '--skip-checks', 'create', '-name', str(uuid.uuid4()), '-description', 'test of command line client', 'Project') project_id = parse(r'Created entity:\s+(syn\d+)\s+', output) schedule_for_cleanup(project_id) # Create a File filename = utils.make_bogus_data_file() schedule_for_cleanup(filename) output = run('synapse', '--skip-checks', 'add', '-name', 'BogusFileEntity', '-description', 'Bogus data to test file upload', '-parentid', project_id, filename) file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output) # Test setting annotations run('synapse', '--skip-checks', 'set-annotations', '--id', file_entity_id, '--annotations', '{"foo": 1, "bar": "1", "baz": [1, 2, 3]}') # Test getting annotations # check that the three things set are correct # This test should be adjusted to check for equality of the # whole annotation dictionary once the issue of other # attributes (creationDate, eTag, id, uri) being returned is resolved # See: https://sagebionetworks.jira.com/browse/SYNPY-175 output = run('synapse', '--skip-checks', 'get-annotations', '--id', file_entity_id) annotations = json.loads(output) assert_equals(annotations['foo'], [1]) assert_equals(annotations['bar'], [u"1"]) assert_equals(annotations['baz'], [1, 2, 3]) # Test setting annotations by replacing existing ones. output = run('synapse', '--skip-checks', 'set-annotations', '--id', file_entity_id, '--annotations', '{"foo": 2}', '--replace') # Test that the annotation was updated output = run('synapse', '--skip-checks', 'get-annotations', '--id', file_entity_id) annotations = json.loads(output) assert_equals(annotations['foo'], [2]) # Since this replaces the existing annotations, previous values # Should not be available. assert_raises(KeyError, lambda key: annotations[key], 'bar') assert_raises(KeyError, lambda key: annotations[key], 'baz') # Test running add command to set annotations on a new object filename2 = utils.make_bogus_data_file() schedule_for_cleanup(filename2) output = run('synapse', '--skip-checks', 'add', '-name', 'BogusData2', '-description', 'Bogus data to test file upload with add and add annotations', '-parentid', project_id, '--annotations', '{"foo": 123}', filename2) file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output) # Test that the annotation was updated output = run('synapse', '--skip-checks', 'get-annotations', '--id', file_entity_id) annotations = json.loads(output) assert_equals(annotations['foo'], [123]) # Test running store command to set annotations on a new object filename3 = utils.make_bogus_data_file() schedule_for_cleanup(filename3) output = run('synapse', '--skip-checks', 'store', '--name', 'BogusData3', '--description', '\"Bogus data to test file upload with store and add annotations\"', '--parentid', project_id, '--annotations', '{"foo": 456}', filename3) file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output) # Test that the annotation was updated output = run('synapse', '--skip-checks', 'get-annotations', '--id', file_entity_id) annotations = json.loads(output) assert_equals(annotations['foo'], [456]) def test_command_line_store_and_submit(): # Create a Project output = run('synapse', '--skip-checks', 'store', '--name', str(uuid.uuid4()), '--description', 'test of store command', '--type', 'Project') project_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output) schedule_for_cleanup(project_id) # Create and upload a file filename = utils.make_bogus_data_file() schedule_for_cleanup(filename) output = run('synapse', '--skip-checks', 'store', '--description', 'Bogus data to test file upload', '--parentid', project_id, '--file', filename) file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output) # Verify that we stored the file in Synapse f1 = syn.get(file_entity_id) fh = syn._getFileHandle(f1.dataFileHandleId) assert_equals(fh['concreteType'], 'org.sagebionetworks.repo.model.file.S3FileHandle') # Test that entity is named after the file it contains assert_equals(f1.name, os.path.basename(filename)) # Create an Evaluation to submit to eval = Evaluation(name=str(uuid.uuid4()), contentSource=project_id) eval = syn.store(eval) schedule_for_cleanup(eval) # Submit a bogus file output = run('synapse', '--skip-checks', 'submit', '--evaluation', eval.id, '--name', 'Some random name', '--entity', file_entity_id) submission_id = parse(r'Submitted \(id: (\d+)\) entity:\s+', output) # testing different commmand line options for submitting to an evaluation # submitting to an evaluation by evaluationID output = run('synapse', '--skip-checks', 'submit', '--evalID', eval.id, '--name', 'Some random name', '--alias', 'My Team', '--entity', file_entity_id) submission_id = parse(r'Submitted \(id: (\d+)\) entity:\s+', output) # Update the file filename = utils.make_bogus_data_file() schedule_for_cleanup(filename) output = run('synapse', '--skip-checks', 'store', '--id', file_entity_id, '--file', filename) updated_entity_id = parse(r'Updated entity:\s+(syn\d+)', output) schedule_for_cleanup(updated_entity_id) # Submit an updated bogus file and this time by evaluation name output = run('synapse', '--skip-checks', 'submit', '--evaluationName', eval.name, '--entity', file_entity_id) # Tests shouldn't have external dependencies, but here it's required ducky_url = 'https://www.synapse.org/Portal/clear.cache.gif' # Test external file handle output = run('synapse', '--skip-checks', 'store', '--name', 'Rubber Ducky', '--description', 'I like rubber duckies', '--parentid', project_id, '--file', ducky_url) exteral_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output) schedule_for_cleanup(exteral_entity_id) # Verify that we created an external file handle f2 = syn.get(exteral_entity_id) fh = syn._getFileHandle(f2.dataFileHandleId) assert_equals(fh['concreteType'], 'org.sagebionetworks.repo.model.file.ExternalFileHandle') # submit an external file to an evaluation and use provenance filename = utils.make_bogus_data_file() schedule_for_cleanup(filename) repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient' run('synapse', '--skip-checks', 'submit', '--evalID', eval.id, '--file', filename, '--parent', project_id, '--used', exteral_entity_id, '--executed', repo_url) # Delete project run('synapse', '--skip-checks', 'delete', project_id) def test_command_get_recursive_and_query(): """Tests the 'synapse get -r' and 'synapse get -q' functions""" project_entity = project # Create Folders in Project folder_entity = syn.store(synapseclient.Folder(name=str(uuid.uuid4()), parent=project_entity)) folder_entity2 = syn.store(synapseclient.Folder(name=str(uuid.uuid4()), parent=folder_entity)) # Create and upload two files in sub-Folder uploaded_paths = [] file_entities = [] for i in range(2): f = utils.make_bogus_data_file() uploaded_paths.append(f) schedule_for_cleanup(f) file_entity = synapseclient.File(f, parent=folder_entity2) file_entity = syn.store(file_entity) file_entities.append(file_entity) schedule_for_cleanup(f) # Add a file in the Folder as well f = utils.make_bogus_data_file() uploaded_paths.append(f) schedule_for_cleanup(f) file_entity = synapseclient.File(f, parent=folder_entity) file_entity = syn.store(file_entity) file_entities.append(file_entity) # get -r uses syncFromSynapse() which uses getChildren(), which is not immediately consistent, # but faster than chunked queries. time.sleep(2) # Test recursive get run('synapse', '--skip-checks', 'get', '-r', folder_entity.id) # Verify that we downloaded files: new_paths = [os.path.join('.', folder_entity2.name, os.path.basename(f)) for f in uploaded_paths[:-1]] new_paths.append(os.path.join('.', os.path.basename(uploaded_paths[-1]))) schedule_for_cleanup(folder_entity.name) for downloaded, uploaded in zip(new_paths, uploaded_paths): assert_true(os.path.exists(downloaded)) assert_true(filecmp.cmp(downloaded, uploaded)) schedule_for_cleanup(downloaded) # Test query get using a Table with an entity column # This should be replaced when Table File Views are implemented in the client cols = [synapseclient.Column(name='id', columnType='ENTITYID')] schema1 = syn.store(synapseclient.Schema(name='Foo Table', columns=cols, parent=project_entity)) schedule_for_cleanup(schema1.id) data1 = [[x.id] for x in file_entities] syn.store(synapseclient.RowSet(schema=schema1, rows=[synapseclient.Row(r) for r in data1])) time.sleep(3) # get -q are eventually consistent # Test Table/View query get output = run('synapse', '--skip-checks', 'get', '-q', "select id from %s" % schema1.id) # Verify that we downloaded files: new_paths = [os.path.join('.', os.path.basename(f)) for f in uploaded_paths[:-1]] new_paths.append(os.path.join('.', os.path.basename(uploaded_paths[-1]))) schedule_for_cleanup(folder_entity.name) for downloaded, uploaded in zip(new_paths, uploaded_paths): assert_true(os.path.exists(downloaded)) assert_true(filecmp.cmp(downloaded, uploaded)) schedule_for_cleanup(downloaded) schedule_for_cleanup(new_paths[0]) def test_command_copy(): """Tests the 'synapse cp' function""" # Create a Project project_entity = syn.store(synapseclient.Project(name=str(uuid.uuid4()))) schedule_for_cleanup(project_entity.id) # Create a Folder in Project folder_entity = syn.store(synapseclient.Folder(name=str(uuid.uuid4()), parent=project_entity)) schedule_for_cleanup(folder_entity.id) # Create and upload a file in Folder repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient' annots = {'test': ['hello_world']} # Create, upload, and set annotations on a file in Folder filename = utils.make_bogus_data_file() schedule_for_cleanup(filename) file_entity = syn.store(synapseclient.File(filename, parent=folder_entity)) externalURL_entity = syn.store(synapseclient.File(repo_url, name='rand', parent=folder_entity, synapseStore=False)) syn.setAnnotations(file_entity, annots) syn.setAnnotations(externalURL_entity, annots) schedule_for_cleanup(file_entity.id) schedule_for_cleanup(externalURL_entity.id) # Test cp function output = run('synapse', '--skip-checks', 'cp', file_entity.id, '--destinationId', project_entity.id) output_URL = run('synapse', '--skip-checks', 'cp', externalURL_entity.id, '--destinationId', project_entity.id) copied_id = parse(r'Copied syn\d+ to (syn\d+)', output) copied_URL_id = parse(r'Copied syn\d+ to (syn\d+)', output_URL) # Verify that our copied files are identical copied_ent = syn.get(copied_id) copied_URL_ent = syn.get(copied_URL_id, downloadFile=False) schedule_for_cleanup(copied_id) schedule_for_cleanup(copied_URL_id) copied_ent_annot = syn.getAnnotations(copied_id) copied_url_annot = syn.getAnnotations(copied_URL_id) copied_prov = syn.getProvenance(copied_id)['used'][0]['reference']['targetId'] copied_url_prov = syn.getProvenance(copied_URL_id)['used'][0]['reference']['targetId'] # Make sure copied files are the same assert_equals(copied_prov, file_entity.id) assert_equals(copied_ent_annot, annots) assert_equals(copied_ent.properties.dataFileHandleId, file_entity.properties.dataFileHandleId) # Make sure copied URLs are the same assert_equals(copied_url_prov, externalURL_entity.id) assert_equals(copied_url_annot, annots) assert_equals(copied_URL_ent.externalURL, repo_url) assert_equals(copied_URL_ent.name, 'rand') assert_equals(copied_URL_ent.properties.dataFileHandleId, externalURL_entity.properties.dataFileHandleId) # Verify that errors are being thrown when a # file is copied to a folder/project that has a file with the same filename assert_raises(ValueError, run, 'synapse', '--debug', '--skip-checks', 'cp', file_entity.id, '--destinationId', project_entity.id) def test_command_line_using_paths(): # Create a Project project_entity = syn.store(synapseclient.Project(name=str(uuid.uuid4()))) schedule_for_cleanup(project_entity.id) # Create a Folder in Project folder_entity = syn.store(synapseclient.Folder(name=str(uuid.uuid4()), parent=project_entity)) # Create and upload a file in Folder filename = utils.make_bogus_data_file() schedule_for_cleanup(filename) file_entity = syn.store(synapseclient.File(filename, parent=folder_entity)) # Verify that we can use show with a filename output = run('synapse', '--skip-checks', 'show', filename) id = parse(r'File: %s\s+\((syn\d+)\)\s+' % os.path.split(filename)[1], output) assert_equals(file_entity.id, id) # Verify that limitSearch works by making sure we get the file entity # that's inside the folder file_entity2 = syn.store(synapseclient.File(filename, parent=project_entity)) output = run('synapse', '--skip-checks', 'get', '--limitSearch', folder_entity.id, filename) id = parse(r'Associated file: .* with synapse ID (syn\d+)', output) name = parse(r'Associated file: (.*) with synapse ID syn\d+', output) assert_equals(file_entity.id, id) assert_true(utils.equal_paths(name, filename)) # Verify that set-provenance works with filepath repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient' output = run('synapse', '--skip-checks', 'set-provenance', '-id', file_entity2.id, '-name', 'TestActivity', '-description', 'A very excellent provenance', '-used', filename, '-executed', repo_url, '-limitSearch', folder_entity.id) activity_id = parse(r'Set provenance record (\d+) on entity syn\d+', output) output = run('synapse', '--skip-checks', 'get-provenance', '-id', file_entity2.id) activity = json.loads(output) assert_equals(activity['name'], 'TestActivity') assert_equals(activity['description'], 'A very excellent provenance') # Verify that store works with provenance specified with filepath repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient' filename2 = utils.make_bogus_data_file() schedule_for_cleanup(filename2) output = run('synapse', '--skip-checks', 'add', filename2, '-parentid', project_entity.id, '-used', filename, '-executed', '%s %s' % (repo_url, filename)) entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output) output = run('synapse', '--skip-checks', 'get-provenance', '-id', entity_id) activity = json.loads(output) a = [a for a in activity['used'] if not a['wasExecuted']] assert_in(a[0]['reference']['targetId'], [file_entity.id, file_entity2.id]) # Test associate command # I have two files in Synapse filename and filename2 path = tempfile.mkdtemp() schedule_for_cleanup(path) shutil.copy(filename, path) shutil.copy(filename2, path) run('synapse', '--skip-checks', 'associate', path, '-r') run('synapse', '--skip-checks', 'show', filename) def test_table_query(): """Test command line ability to do table query.""" cols = [synapseclient.Column(name='name', columnType='STRING', maximumSize=1000), synapseclient.Column(name='foo', columnType='STRING', enumValues=['foo', 'bar', 'bat']), synapseclient.Column(name='x', columnType='DOUBLE'), synapseclient.Column(name='age', columnType='INTEGER'), synapseclient.Column(name='cartoon', columnType='BOOLEAN')] project_entity = project schema1 = syn.store(synapseclient.Schema(name=str(uuid.uuid4()), columns=cols, parent=project_entity)) schedule_for_cleanup(schema1.id) data1 = [['Chris', 'bar', 11.23, 45, False], ['Jen', 'bat', 14.56, 40, False], ['Jane', 'bat', 17.89, 6, False], ['Henry', 'bar', 10.12, 1, False]] syn.store(synapseclient.RowSet(schema=schema1, rows=[synapseclient.Row(r) for r in data1])) # Test query output = run('synapse', '--skip-checks', 'query', 'select * from %s' % schema1.id) output_rows = output.rstrip("\n").split("\n") # Check the length of the output assert_equals(len(output_rows), 5, "got %s rows" % (len(output_rows),)) # Check that headers are correct. # Should be column names in schema plus the ROW_ID and ROW_VERSION my_headers_set = output_rows[0].split("\t") expected_headers_set = ["ROW_ID", "ROW_VERSION"] + list(map(lambda x: x.name, cols)) assert_equals(my_headers_set, expected_headers_set, "%r != %r" % (my_headers_set, expected_headers_set)) def test_login(): alt_syn = synapseclient.Synapse() username = "username" password = "password" with patch.object(alt_syn, "login") as mock_login, \ patch.object(alt_syn, "getUserProfile", return_value={"userName": "test_user", "ownerId": "ownerId"})\ as mock_get_user_profile: run('synapse', '--skip-checks', 'login', '-u', username, '-p', password, '--rememberMe', syn=alt_syn) mock_login.assert_called_once_with(username, password, forced=True, rememberMe=True, silent=False) mock_get_user_profile.assert_called_once_with() def test_configPath(): """Test using a user-specified configPath for Synapse configuration file.""" tmp_config_file = tempfile.NamedTemporaryFile(suffix='.synapseConfig', delete=False) shutil.copyfile(synapseclient.client.CONFIG_FILE, tmp_config_file.name) # Create a File filename = utils.make_bogus_data_file() schedule_for_cleanup(filename) output = run('synapse', '--skip-checks', '--configPath', tmp_config_file.name, 'add', '-name', 'BogusFileEntityTwo', '-description', 'Bogus data to test file upload', '-parentid', project.id, filename) file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output) # Verify that we stored the file in Synapse f1 = syn.get(file_entity_id) fh = syn._getFileHandle(f1.dataFileHandleId) assert_equals(fh['concreteType'], 'org.sagebionetworks.repo.model.file.S3FileHandle') def _description_wiki_check(run_output, expected_description): entity_id = parse(r'Created.* entity:\s+(syn\d+)\s+', run_output) wiki = syn.getWiki(entity_id) assert_equals(expected_description, wiki.markdown) def _create_temp_file_with_cleanup(specific_file_text=None): if specific_file_text: with tempfile.NamedTemporaryFile(mode="w", suffix=".txt", delete=False) as file: file.write(specific_file_text) filename = file.name else: filename = utils.make_bogus_data_file() schedule_for_cleanup(filename) return filename def test_create__with_description(): output = run('synapse', 'create', 'Folder', '-name', str(uuid.uuid4()), '-parentid', project.id, '--description', description_text ) _description_wiki_check(output, description_text) def test_store__with_description(): output = run('synapse', 'store', upload_filename, '-name', str(uuid.uuid4()), '-parentid', project.id, '--description', description_text ) _description_wiki_check(output, description_text) def test_add__with_description(): output = run('synapse', 'add', upload_filename, '-name', str(uuid.uuid4()), '-parentid', project.id, '--description', description_text ) _description_wiki_check(output, description_text) def test_create__with_descriptionFile(): output = run('synapse', 'create', 'Folder', '-name', str(uuid.uuid4()), '-parentid', project.id, '--descriptionFile', desc_filename ) _description_wiki_check(output, description_text) def test_store__with_descriptionFile(): output = run('synapse', 'store', upload_filename, '-name', str(uuid.uuid4()), '-parentid', project.id, '--descriptionFile', desc_filename ) _description_wiki_check(output, description_text) def test_add__with_descriptionFile(): output = run('synapse', 'add', upload_filename, '-name', str(uuid.uuid4()), '-parentid', project.id, '--descriptionFile', desc_filename ) _description_wiki_check(output, description_text) def test_create__update_description(): name = str(uuid.uuid4()) output = run('synapse', 'create', 'Folder', '-name', name, '-parentid', project.id, '--descriptionFile', desc_filename ) _description_wiki_check(output, description_text) output = run('synapse', 'create', 'Folder', '-name', name, '-parentid', project.id, '--description', update_description_text ) _description_wiki_check(output, update_description_text) def test_store__update_description(): name = str(uuid.uuid4()) output = run('synapse', 'store', upload_filename, '-name', name, '-parentid', project.id, '--descriptionFile', desc_filename ) _description_wiki_check(output, description_text) output = run('synapse', 'store', upload_filename, '-name', name, '-parentid', project.id, '--description', update_description_text ) _description_wiki_check(output, update_description_text) def test_add__update_description(): name = str(uuid.uuid4()) output = run('synapse', 'add', upload_filename, '-name', name, '-parentid', project.id, '--descriptionFile', desc_filename ) _description_wiki_check(output, description_text) output = run('synapse', 'add', upload_filename, '-name', name, '-parentid', project.id, '--description', update_description_text ) _description_wiki_check(output, update_description_text)
py
1a46ebb104cd3e28527a8df48e05d60fbb03a2e9
""" WSGI config for photos project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'photos.settings') application = get_wsgi_application()
py
1a46ec3f5db89c55eb85e905e0d26e333e561d0a
from collections import OrderedDict from django.utils.functional import cached_property from six import iteritems from slyd.orm.exceptions import ValidationError __all__ = [ 'cached_property', 'cached_property_ignore_set', 'class_property', 'unspecified', 'validate_type', 'AttributeDict', ] class cached_property_ignore_set(cached_property): def __set__(self, instance, value): pass class class_property(object): """A read-only descriptor that works on the class too""" def __init__(self, fget=None): if fget is not None and not isinstance(fget, classmethod): fget = classmethod(fget) self.fget = fget def __get__(self, instance, instance_type=None): return self.fget.__get__(instance, instance_type)() unspecified = object() def validate_type(value, model): if not isinstance(value, model): raise ValidationError( "'{!r}' is not an instance of type '{}'".format( value, model.__name__)) def unwrap_envelopes(data, many, pk_field, remove_key): unwrapped = [] for pk, obj in iteritems(data): if not remove_key: try: if obj[pk_field] != pk: raise ValidationError( u"Envelope id does not match value of primary key " u"field") except KeyError: pass obj[pk_field] = pk unwrapped.append(obj) if not many and len(unwrapped) == 1: return unwrapped[0] return unwrapped def wrap_envelopes(data, many, pk_field, remove_key): if not many: data = [data] wrapped = OrderedDict() for obj in data: pk = obj[pk_field] if remove_key: del obj[pk_field] wrapped[pk] = obj return wrapped class AttributeDict(dict): def __getattr__(self, name): try: return self[name] except KeyError: raise AttributeError( u"'{}' object has no attribute '{}'".format( self.__class__.__name__, name))
py
1a46ed9fea343607bef9b8f77936b5fb35d900a2
#!/usr/bin/env python3 # Copyright (c) 2016-2018 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test segwit transactions and blocks on P2P network.""" from binascii import hexlify import math import random import struct import time from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, get_witness_script, WITNESS_COMMITMENT_HEADER from test_framework.key import ECKey from test_framework.messages import ( BIP125_SEQUENCE_NUMBER, CBlock, CBlockHeader, CInv, COutPoint, CTransaction, CTxIn, CTxInWitness, CTxOut, CTxWitness, MAX_BLOCK_BASE_SIZE, MSG_WITNESS_FLAG, NODE_NETWORK, NODE_WITNESS, msg_block, msg_getdata, msg_headers, msg_inv, msg_tx, msg_witness_block, msg_witness_tx, ser_uint256, ser_vector, sha256, uint256_from_str, FromHex, ) from test_framework.mininode import ( P2PInterface, mininode_lock, ) from test_framework.script import ( CScript, CScriptNum, CScriptOp, MAX_SCRIPT_ELEMENT_SIZE, OP_0, OP_1, OP_16, OP_2DROP, OP_CHECKMULTISIG, OP_CHECKSIG, OP_DROP, OP_DUP, OP_ELSE, OP_ENDIF, OP_EQUAL, OP_EQUALVERIFY, OP_HASH160, OP_IF, OP_RETURN, OP_TRUE, SIGHASH_ALL, SIGHASH_ANYONECANPAY, SIGHASH_NONE, SIGHASH_SINGLE, SegwitVersion1SignatureHash, SignatureHash, hash160, ) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, bytes_to_hex_str, connect_nodes, disconnect_nodes, get_bip9_status, hex_str_to_bytes, sync_blocks, sync_mempools, assert_raises_rpc_error, ) # The versionbit bit used to signal activation of SegWit VB_WITNESS_BIT = 1 VB_PERIOD = 144 VB_TOP_BITS = 0x20000000 MAX_SIGOP_COST = 80000 class UTXO(): """Used to keep track of anyone-can-spend outputs that we can use in the tests.""" def __init__(self, sha256, n, value): self.sha256 = sha256 self.n = n self.nValue = value def get_p2pkh_script(pubkeyhash): """Get the script associated with a P2PKH.""" return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)]) def sign_p2pk_witness_input(script, tx_to, in_idx, hashtype, value, key): """Add signature for a P2PK witness program.""" tx_hash = SegwitVersion1SignatureHash(script, tx_to, in_idx, hashtype, value) signature = key.sign_ecdsa(tx_hash) + chr(hashtype).encode('latin-1') tx_to.wit.vtxinwit[in_idx].scriptWitness.stack = [signature, script] tx_to.rehash() def get_virtual_size(witness_block): """Calculate the virtual size of a witness block. Virtual size is base + witness/4.""" base_size = len(witness_block.serialize(with_witness=False)) total_size = len(witness_block.serialize(with_witness=True)) # the "+3" is so we round up vsize = int((3 * base_size + total_size + 3) / 4) return vsize def test_transaction_acceptance(node, p2p, tx, with_witness, accepted, reason=None): """Send a transaction to the node and check that it's accepted to the mempool - Submit the transaction over the p2p interface - use the getrawmempool rpc to check for acceptance.""" reason = [reason] if reason else [] with node.assert_debug_log(expected_msgs=reason): p2p.send_message(msg_witness_tx(tx) if with_witness else msg_tx(tx)) p2p.sync_with_ping() assert_equal(tx.hash in node.getrawmempool(), accepted) def test_witness_block(node, p2p, block, accepted, with_witness=True, reason=None): """Send a block to the node and check that it's accepted - Submit the block over the p2p interface - use the getbestblockhash rpc to check for acceptance.""" reason = [reason] if reason else [] with node.assert_debug_log(expected_msgs=reason): p2p.send_message(msg_witness_block(block) if with_witness else msg_block(block)) p2p.sync_with_ping() assert_equal(node.getbestblockhash() == block.hash, accepted) class TestP2PConn(P2PInterface): def __init__(self): super().__init__() self.getdataset = set() def on_getdata(self, message): for inv in message.inv: self.getdataset.add(inv.hash) def announce_tx_and_wait_for_getdata(self, tx, timeout=60, success=True): with mininode_lock: self.last_message.pop("getdata", None) self.send_message(msg_inv(inv=[CInv(1, tx.sha256)])) if success: self.wait_for_getdata(timeout) else: time.sleep(timeout) assert not self.last_message.get("getdata") def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60): with mininode_lock: self.last_message.pop("getdata", None) self.last_message.pop("getheaders", None) msg = msg_headers() msg.headers = [CBlockHeader(block)] if use_header: self.send_message(msg) else: self.send_message(msg_inv(inv=[CInv(2, block.sha256)])) self.wait_for_getheaders() self.send_message(msg) self.wait_for_getdata() def request_block(self, blockhash, inv_type, timeout=60): with mininode_lock: self.last_message.pop("block", None) self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)])) self.wait_for_block(blockhash, timeout) return self.last_message["block"].block class SegWitTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 3 # This test tests SegWit both pre and post-activation, so use the normal BIP9 activation. self.extra_args = [["-whitelist=127.0.0.1", "-vbparams=segwit:0:999999999999", "-mempoolreplacement=1"], ["-whitelist=127.0.0.1", "-acceptnonstdtxn=0", "-vbparams=segwit:0:999999999999", "-mempoolreplacement=1"], ["-whitelist=127.0.0.1", "-vbparams=segwit:0:0", "-mempoolreplacement=1"]] def skip_test_if_missing_module(self): self.skip_if_no_wallet() def setup_network(self): self.setup_nodes() connect_nodes(self.nodes[0], 1) connect_nodes(self.nodes[0], 2) self.sync_all() # Helper functions def build_next_block(self, version=VB_TOP_BITS): """Build a block on top of node0's tip.""" tip = self.nodes[0].getbestblockhash() height = self.nodes[0].getblockcount() + 1 block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1 block = create_block(int(tip, 16), create_coinbase(height), block_time) block.nVersion = version block.rehash() return block def update_witness_block_with_transactions(self, block, tx_list, nonce=0): """Add list of transactions to block, adds witness commitment, then solves.""" block.vtx.extend(tx_list) add_witness_commitment(block, nonce) block.solve() def run_test(self): # Setup the p2p connections # self.test_node sets NODE_WITNESS|NODE_NETWORK self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS) # self.old_node sets only NODE_NETWORK self.old_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK) # self.std_node is for testing node1 (fRequireStandard=true) self.std_node = self.nodes[1].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS) assert self.test_node.nServices & NODE_WITNESS != 0 # Keep a place to store utxo's that can be used in later tests self.utxo = [] # Segwit status 'defined' self.segwit_status = 'defined' self.test_non_witness_transaction() self.test_unnecessary_witness_before_segwit_activation() self.test_v0_outputs_arent_spendable() self.test_block_relay() self.advance_to_segwit_started() # Segwit status 'started' self.test_getblocktemplate_before_lockin() self.advance_to_segwit_lockin() # Segwit status 'locked_in' self.test_unnecessary_witness_before_segwit_activation() self.test_witness_tx_relay_before_segwit_activation() self.test_block_relay() self.test_standardness_v0() self.advance_to_segwit_active() # Segwit status 'active' self.test_p2sh_witness() self.test_witness_commitments() self.test_block_malleability() self.test_witness_block_size() self.test_submit_block() self.test_extra_witness_data() self.test_max_witness_push_length() self.test_max_witness_program_length() self.test_witness_input_length() self.test_block_relay() self.test_tx_relay_after_segwit_activation() self.test_standardness_v0() self.test_segwit_versions() self.test_premature_coinbase_witness_spend() self.test_uncompressed_pubkey() self.test_signature_version_1() self.test_non_standard_witness_blinding() self.test_non_standard_witness() self.test_upgrade_after_activation() self.test_witness_sigops() self.test_superfluous_witness() # Individual tests def subtest(func): # noqa: N805 """Wraps the subtests for logging and state assertions.""" def func_wrapper(self, *args, **kwargs): self.log.info("Subtest: {} (Segwit status = {})".format(func.__name__, self.segwit_status)) # Assert segwit status is as expected assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], self.segwit_status) func(self, *args, **kwargs) # Each subtest should leave some utxos for the next subtest assert self.utxo sync_blocks(self.nodes) # Assert segwit status is as expected at end of subtest assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], self.segwit_status) return func_wrapper @subtest def test_non_witness_transaction(self): """See if sending a regular transaction works, and create a utxo to use in later tests.""" # Mine a block with an anyone-can-spend coinbase, # let it mature, then try to spend it. block = self.build_next_block(version=1) block.solve() self.test_node.send_message(msg_block(block)) self.test_node.sync_with_ping() # make sure the block was processed txid = block.vtx[0].sha256 self.nodes[0].generate(99) # let the block mature # Create a transaction that spends the coinbase tx = CTransaction() tx.vin.append(CTxIn(COutPoint(txid, 0), b"")) tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))) tx.calc_sha256() # Check that serializing it with or without witness is the same # This is a sanity check of our testing framework. assert_equal(msg_tx(tx).serialize(), msg_witness_tx(tx).serialize()) self.test_node.send_message(msg_witness_tx(tx)) self.test_node.sync_with_ping() # make sure the tx was processed assert(tx.hash in self.nodes[0].getrawmempool()) # Save this transaction for later self.utxo.append(UTXO(tx.sha256, 0, 49 * 100000000)) self.nodes[0].generate(1) @subtest def test_unnecessary_witness_before_segwit_activation(self): """Verify that blocks with witnesses are rejected before activation.""" tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE]))) tx.wit.vtxinwit.append(CTxInWitness()) tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])] # Verify the hash with witness differs from the txid # (otherwise our testing framework must be broken!) tx.rehash() assert(tx.sha256 != tx.calc_sha256(with_witness=True)) # Construct a segwit-signaling block that includes the transaction. block = self.build_next_block(version=(VB_TOP_BITS | (1 << VB_WITNESS_BIT))) self.update_witness_block_with_transactions(block, [tx]) # Sending witness data before activation is not allowed (anti-spam # rule). test_witness_block(self.nodes[0], self.test_node, block, accepted=False, reason='unexpected-witness') # But it should not be permanently marked bad... # Resend without witness information. self.test_node.send_message(msg_block(block)) self.test_node.sync_with_ping() assert_equal(self.nodes[0].getbestblockhash(), block.hash) # Update our utxo list; we spent the first entry. self.utxo.pop(0) self.utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue)) @subtest def test_block_relay(self): """Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG. This is true regardless of segwit activation. Also test that we don't ask for blocks from unupgraded peers.""" blocktype = 2 | MSG_WITNESS_FLAG # test_node has set NODE_WITNESS, so all getdata requests should be for # witness blocks. # Test announcing a block via inv results in a getdata, and that # announcing a version 4 or random VB block with a header results in a getdata block1 = self.build_next_block() block1.solve() self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False) assert(self.test_node.last_message["getdata"].inv[0].type == blocktype) test_witness_block(self.nodes[0], self.test_node, block1, True) block2 = self.build_next_block() block2.solve() self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True) assert(self.test_node.last_message["getdata"].inv[0].type == blocktype) test_witness_block(self.nodes[0], self.test_node, block2, True) block3 = self.build_next_block(version=(VB_TOP_BITS | (1 << 15))) block3.solve() self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True) assert(self.test_node.last_message["getdata"].inv[0].type == blocktype) test_witness_block(self.nodes[0], self.test_node, block3, True) # Check that we can getdata for witness blocks or regular blocks, # and the right thing happens. if self.segwit_status != 'active': # Before activation, we should be able to request old blocks with # or without witness, and they should be the same. chain_height = self.nodes[0].getblockcount() # Pick 10 random blocks on main chain, and verify that getdata's # for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal. all_heights = list(range(chain_height + 1)) random.shuffle(all_heights) all_heights = all_heights[0:10] for height in all_heights: block_hash = self.nodes[0].getblockhash(height) rpc_block = self.nodes[0].getblock(block_hash, False) block_hash = int(block_hash, 16) block = self.test_node.request_block(block_hash, 2) wit_block = self.test_node.request_block(block_hash, 2 | MSG_WITNESS_FLAG) assert_equal(block.serialize(True), wit_block.serialize(True)) assert_equal(block.serialize(), hex_str_to_bytes(rpc_block)) else: # After activation, witness blocks and non-witness blocks should # be different. Verify rpc getblock() returns witness blocks, while # getdata respects the requested type. block = self.build_next_block() self.update_witness_block_with_transactions(block, []) # This gives us a witness commitment. assert(len(block.vtx[0].wit.vtxinwit) == 1) assert(len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1) test_witness_block(self.nodes[0], self.test_node, block, accepted=True) # Now try to retrieve it... rpc_block = self.nodes[0].getblock(block.hash, False) non_wit_block = self.test_node.request_block(block.sha256, 2) wit_block = self.test_node.request_block(block.sha256, 2 | MSG_WITNESS_FLAG) assert_equal(wit_block.serialize(True), hex_str_to_bytes(rpc_block)) assert_equal(wit_block.serialize(False), non_wit_block.serialize()) assert_equal(wit_block.serialize(True), block.serialize(True)) # Test size, vsize, weight rpc_details = self.nodes[0].getblock(block.hash, True) assert_equal(rpc_details["size"], len(block.serialize(True))) assert_equal(rpc_details["strippedsize"], len(block.serialize(False))) weight = 3 * len(block.serialize(False)) + len(block.serialize(True)) assert_equal(rpc_details["weight"], weight) # Upgraded node should not ask for blocks from unupgraded block4 = self.build_next_block(version=4) block4.solve() self.old_node.getdataset = set() # Blocks can be requested via direct-fetch (immediately upon processing the announcement) # or via parallel download (with an indeterminate delay from processing the announcement) # so to test that a block is NOT requested, we could guess a time period to sleep for, # and then check. We can avoid the sleep() by taking advantage of transaction getdata's # being processed after block getdata's, and announce a transaction as well, # and then check to see if that particular getdata has been received. # Since 0.14, inv's will only be responded to with a getheaders, so send a header # to announce this block. msg = msg_headers() msg.headers = [CBlockHeader(block4)] self.old_node.send_message(msg) self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0]) assert(block4.sha256 not in self.old_node.getdataset) @subtest def test_v0_outputs_arent_spendable(self): """Test that v0 outputs aren't spendable before segwit activation. ~6 months after segwit activation, the SCRIPT_VERIFY_WITNESS flag was backdated so that it applies to all blocks, going back to the genesis block. Consequently, version 0 witness outputs are never spendable without witness, and so can't be spent before segwit activation (the point at which blocks are permitted to contain witnesses).""" # node2 doesn't need to be connected for this test. # (If it's connected, node0 may propagate an invalid block to it over # compact blocks and the nodes would have inconsistent tips.) disconnect_nodes(self.nodes[0], 2) # Create two outputs, a p2wsh and p2sh-p2wsh witness_program = CScript([OP_TRUE]) witness_hash = sha256(witness_program) script_pubkey = CScript([OP_0, witness_hash]) p2sh_pubkey = hash160(script_pubkey) p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL]) value = self.utxo[0].nValue // 3 tx = CTransaction() tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b'')] tx.vout = [CTxOut(value, script_pubkey), CTxOut(value, p2sh_script_pubkey)] tx.vout.append(CTxOut(value, CScript([OP_TRUE]))) tx.rehash() txid = tx.sha256 # Add it to a block block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx]) # Verify that segwit isn't activated. A block serialized with witness # should be rejected prior to activation. test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason='unexpected-witness') # Now send the block without witness. It should be accepted test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=False) # Now try to spend the outputs. This should fail since SCRIPT_VERIFY_WITNESS is always enabled. p2wsh_tx = CTransaction() p2wsh_tx.vin = [CTxIn(COutPoint(txid, 0), b'')] p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))] p2wsh_tx.wit.vtxinwit.append(CTxInWitness()) p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])] p2wsh_tx.rehash() p2sh_p2wsh_tx = CTransaction() p2sh_p2wsh_tx.vin = [CTxIn(COutPoint(txid, 1), CScript([script_pubkey]))] p2sh_p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))] p2sh_p2wsh_tx.wit.vtxinwit.append(CTxInWitness()) p2sh_p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])] p2sh_p2wsh_tx.rehash() for tx in [p2wsh_tx, p2sh_p2wsh_tx]: block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx]) # When the block is serialized with a witness, the block will be rejected because witness # data isn't allowed in blocks that don't commit to witness data. test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason='unexpected-witness') # When the block is serialized without witness, validation fails because the transaction is # invalid (transactions are always validated with SCRIPT_VERIFY_WITNESS so a segwit v0 transaction # without a witness is invalid). # Note: The reject reason for this failure could be # 'block-validation-failed' (if script check threads > 1) or # 'non-mandatory-script-verify-flag (Witness program was passed an # empty witness)' (otherwise). # TODO: support multiple acceptable reject reasons. # WinstarRxcoin: SCRIPT_VERIFY_WITNESS is enforced when segwit is activated test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=False) connect_nodes(self.nodes[0], 2) self.utxo.pop(0) self.utxo.append(UTXO(txid, 2, value)) @subtest def advance_to_segwit_started(self): """Mine enough blocks for segwit's vb state to be 'started'.""" height = self.nodes[0].getblockcount() # Will need to rewrite the tests here if we are past the first period assert(height < VB_PERIOD - 1) # Advance to end of period, status should now be 'started' self.nodes[0].generate(VB_PERIOD - height - 1) assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started') self.segwit_status = 'started' @subtest def test_getblocktemplate_before_lockin(self): txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16) for node in [self.nodes[0], self.nodes[2]]: gbt_results = node.getblocktemplate({"rules": ["segwit"]}) block_version = gbt_results['version'] if node == self.nodes[2]: # If this is a non-segwit node, we should not get a witness # commitment, nor a version bit signalling segwit. assert_equal(block_version & (1 << VB_WITNESS_BIT), 0) assert('default_witness_commitment' not in gbt_results) else: # For segwit-aware nodes, check the version bit and the witness # commitment are correct. assert(block_version & (1 << VB_WITNESS_BIT) != 0) assert('default_witness_commitment' in gbt_results) witness_commitment = gbt_results['default_witness_commitment'] # Check that default_witness_commitment is present. witness_root = CBlock.get_merkle_root([ser_uint256(0), ser_uint256(txid)]) script = get_witness_script(witness_root, 0) assert_equal(witness_commitment, bytes_to_hex_str(script)) @subtest def advance_to_segwit_lockin(self): """Mine enough blocks to lock in segwit, but don't activate.""" height = self.nodes[0].getblockcount() # Advance to end of period, and verify lock-in happens at the end self.nodes[0].generate(VB_PERIOD - 1) height = self.nodes[0].getblockcount() assert((height % VB_PERIOD) == VB_PERIOD - 2) assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started') self.nodes[0].generate(1) assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in') self.segwit_status = 'locked_in' @subtest def test_witness_tx_relay_before_segwit_activation(self): # Generate a transaction that doesn't require a witness, but send it # with a witness. Should be rejected for premature-witness, but should # not be added to recently rejected list. tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))) tx.wit.vtxinwit.append(CTxInWitness()) tx.wit.vtxinwit[0].scriptWitness.stack = [b'a'] tx.rehash() tx_hash = tx.sha256 tx_value = tx.vout[0].nValue # Verify that if a peer doesn't set nServices to include NODE_WITNESS, # the getdata is just for the non-witness portion. self.old_node.announce_tx_and_wait_for_getdata(tx) assert(self.old_node.last_message["getdata"].inv[0].type == 1) # Since we haven't delivered the tx yet, inv'ing the same tx from # a witness transaction ought not result in a getdata. self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2, success=False) # Delivering this transaction with witness should fail (no matter who # its from) assert_equal(len(self.nodes[0].getrawmempool()), 0) assert_equal(len(self.nodes[1].getrawmempool()), 0) test_transaction_acceptance(self.nodes[0], self.old_node, tx, with_witness=True, accepted=False) test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False) # But eliminating the witness should fix it test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True) # Cleanup: mine the first transaction and update utxo self.nodes[0].generate(1) assert_equal(len(self.nodes[0].getrawmempool()), 0) self.utxo.pop(0) self.utxo.append(UTXO(tx_hash, 0, tx_value)) @subtest def test_standardness_v0(self): """Test V0 txout standardness. V0 segwit outputs and inputs are always standard. V0 segwit inputs may only be mined after activation, but not before.""" witness_program = CScript([OP_TRUE]) witness_hash = sha256(witness_program) script_pubkey = CScript([OP_0, witness_hash]) p2sh_pubkey = hash160(witness_program) p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL]) # First prepare a p2sh output (so that spending it will pass standardness) p2sh_tx = CTransaction() p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")] p2sh_tx.vout = [CTxOut(self.utxo[0].nValue - 1000, p2sh_script_pubkey)] p2sh_tx.rehash() # Mine it on test_node to create the confirmed output. test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_tx, with_witness=True, accepted=True) self.nodes[0].generate(1) sync_blocks(self.nodes) # Now test standardness of v0 P2WSH outputs. # Start by creating a transaction with two outputs. tx = CTransaction() tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))] tx.vout = [CTxOut(p2sh_tx.vout[0].nValue - 10000, script_pubkey)] tx.vout.append(CTxOut(8000, script_pubkey)) # Might burn this later tx.vin[0].nSequence = BIP125_SEQUENCE_NUMBER # Just to have the option to bump this tx from the mempool tx.rehash() # This is always accepted, since the mempool policy is to consider segwit as always active # and thus allow segwit outputs test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=True) # Now create something that looks like a P2PKH output. This won't be spendable. script_pubkey = CScript([OP_0, hash160(witness_hash)]) tx2 = CTransaction() # tx was accepted, so we spend the second output. tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")] tx2.vout = [CTxOut(7000, script_pubkey)] tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program] tx2.rehash() test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=True) # Now update self.utxo for later tests. tx3 = CTransaction() # tx and tx2 were both accepted. Don't bother trying to reclaim the # P2PKH output; just send tx's first output back to an anyone-can-spend. sync_mempools([self.nodes[0], self.nodes[1]]) tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")] tx3.vout = [CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))] tx3.wit.vtxinwit.append(CTxInWitness()) tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program] tx3.rehash() if self.segwit_status != 'active': # Just check mempool acceptance, but don't add the transaction to the mempool, since witness is disallowed # in blocks and the tx is impossible to mine right now. assert_equal(self.nodes[0].testmempoolaccept([bytes_to_hex_str(tx3.serialize_with_witness())]), [{'txid': tx3.hash, 'allowed': True}]) # Create the same output as tx3, but by replacing tx tx3_out = tx3.vout[0] tx3 = tx tx3.vout = [tx3_out] tx3.rehash() assert_equal(self.nodes[0].testmempoolaccept([bytes_to_hex_str(tx3.serialize_with_witness())]), [{'txid': tx3.hash, 'allowed': True}]) test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True) self.nodes[0].generate(1) sync_blocks(self.nodes) self.utxo.pop(0) self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue)) assert_equal(len(self.nodes[1].getrawmempool()), 0) @subtest def advance_to_segwit_active(self): """Mine enough blocks to activate segwit.""" height = self.nodes[0].getblockcount() self.nodes[0].generate(VB_PERIOD - (height % VB_PERIOD) - 2) assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in') self.nodes[0].generate(1) assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'active') self.segwit_status = 'active' @subtest def test_p2sh_witness(self): """Test P2SH wrapped witness programs.""" # Prepare the p2sh-wrapped witness output witness_program = CScript([OP_DROP, OP_TRUE]) witness_hash = sha256(witness_program) p2wsh_pubkey = CScript([OP_0, witness_hash]) p2sh_witness_hash = hash160(p2wsh_pubkey) script_pubkey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL]) script_sig = CScript([p2wsh_pubkey]) # a push of the redeem script # Fund the P2SH output tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey)) tx.rehash() # Verify mempool acceptance and block validity test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True) block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx]) test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=True) sync_blocks(self.nodes) # Now test attempts to spend the output. spend_tx = CTransaction() spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), script_sig)) spend_tx.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE]))) spend_tx.rehash() # This transaction should not be accepted into the mempool pre- or # post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which # will require a witness to spend a witness program regardless of # segwit activation. Note that older bitcoind's that are not # segwit-aware would also reject this for failing CLEANSTACK. with self.nodes[0].assert_debug_log( expected_msgs=(spend_tx.hash, 'was not accepted: non-mandatory-script-verify-flag (Witness program was passed an empty witness)')): test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False) # Try to put the witness script in the scriptSig, should also fail. spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a']) spend_tx.rehash() with self.nodes[0].assert_debug_log( expected_msgs=(spend_tx.hash, 'was not accepted: mandatory-script-verify-flag-failed (Script evaluated without error but finished with a false/empty top stack element)')): test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False) # Now put the witness script in the witness, should succeed after # segwit activates. spend_tx.vin[0].scriptSig = script_sig spend_tx.rehash() spend_tx.wit.vtxinwit.append(CTxInWitness()) spend_tx.wit.vtxinwit[0].scriptWitness.stack = [b'a', witness_program] # Verify mempool acceptance test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=True, accepted=True) block = self.build_next_block() self.update_witness_block_with_transactions(block, [spend_tx]) # If we're after activation, then sending this with witnesses should be valid. # This no longer works before activation, because SCRIPT_VERIFY_WITNESS # is always set. # TODO: rewrite this test to make clear that it only works after activation. test_witness_block(self.nodes[0], self.test_node, block, accepted=True) # Update self.utxo self.utxo.pop(0) self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue)) @subtest def test_witness_commitments(self): """Test witness commitments. This test can only be run after segwit has activated.""" # First try a correct witness commitment. block = self.build_next_block() add_witness_commitment(block) block.solve() # Test the test -- witness serialization should be different assert(msg_witness_block(block).serialize() != msg_block(block).serialize()) # This empty block should be valid. test_witness_block(self.nodes[0], self.test_node, block, accepted=True) # Try to tweak the nonce block_2 = self.build_next_block() add_witness_commitment(block_2, nonce=28) block_2.solve() # The commitment should have changed! assert(block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1]) # This should also be valid. test_witness_block(self.nodes[0], self.test_node, block_2, accepted=True) # Now test commitments with actual transactions tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) # Let's construct a witness program witness_program = CScript([OP_TRUE]) witness_hash = sha256(witness_program) script_pubkey = CScript([OP_0, witness_hash]) tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey)) tx.rehash() # tx2 will spend tx1, and send back to a regular anyone-can-spend address tx2 = CTransaction() tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program)) tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program] tx2.rehash() block_3 = self.build_next_block() self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1) # Add an extra OP_RETURN output that matches the witness commitment template, # even though it has extra data after the incorrect commitment. # This block should fail. block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10]))) block_3.vtx[0].rehash() block_3.hashMerkleRoot = block_3.calc_merkle_root() block_3.rehash() block_3.solve() test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False) # Add a different commitment with different nonce, but in the # right location, and with some funds burned(!). # This should succeed (nValue shouldn't affect finding the # witness commitment). add_witness_commitment(block_3, nonce=0) block_3.vtx[0].vout[0].nValue -= 1 block_3.vtx[0].vout[-1].nValue += 1 block_3.vtx[0].rehash() block_3.hashMerkleRoot = block_3.calc_merkle_root() block_3.rehash() assert(len(block_3.vtx[0].vout) == 4) # 3 OP_returns block_3.solve() test_witness_block(self.nodes[0], self.test_node, block_3, accepted=True) # Finally test that a block with no witness transactions can # omit the commitment. block_4 = self.build_next_block() tx3 = CTransaction() tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b"")) tx3.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program)) tx3.rehash() block_4.vtx.append(tx3) block_4.hashMerkleRoot = block_4.calc_merkle_root() block_4.solve() test_witness_block(self.nodes[0], self.test_node, block_4, with_witness=False, accepted=True) # Update available utxo's for use in later test. self.utxo.pop(0) self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue)) @subtest def test_block_malleability(self): # Make sure that a block that has too big a virtual size # because of a too-large coinbase witness is not permanently # marked bad. block = self.build_next_block() add_witness_commitment(block) block.solve() block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a' * 5000000) assert(get_virtual_size(block) > MAX_BLOCK_BASE_SIZE) # We can't send over the p2p network, because this is too big to relay # TODO: repeat this test with a block that can be relayed self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True))) assert(self.nodes[0].getbestblockhash() != block.hash) block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop() assert(get_virtual_size(block) < MAX_BLOCK_BASE_SIZE) self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True))) assert(self.nodes[0].getbestblockhash() == block.hash) # Now make sure that malleating the witness reserved value doesn't # result in a block permanently marked bad. block = self.build_next_block() add_witness_commitment(block) block.solve() # Change the nonce -- should not cause the block to be permanently # failed block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(1)] test_witness_block(self.nodes[0], self.test_node, block, accepted=False) # Changing the witness reserved value doesn't change the block hash block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)] test_witness_block(self.nodes[0], self.test_node, block, accepted=True) @subtest def test_witness_block_size(self): # TODO: Test that non-witness carrying blocks can't exceed 1MB # Skipping this test for now; this is covered in p2p-fullblocktest.py # Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB. block = self.build_next_block() assert(len(self.utxo) > 0) # Create a P2WSH transaction. # The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE. # This should give us plenty of room to tweak the spending tx's # virtual size. NUM_DROPS = 200 # 201 max ops per script! NUM_OUTPUTS = 50 witness_program = CScript([OP_2DROP] * NUM_DROPS + [OP_TRUE]) witness_hash = uint256_from_str(sha256(witness_program)) script_pubkey = CScript([OP_0, ser_uint256(witness_hash)]) prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n) value = self.utxo[0].nValue parent_tx = CTransaction() parent_tx.vin.append(CTxIn(prevout, b"")) child_value = int(value / NUM_OUTPUTS) for i in range(NUM_OUTPUTS): parent_tx.vout.append(CTxOut(child_value, script_pubkey)) parent_tx.vout[0].nValue -= 50000 assert(parent_tx.vout[0].nValue > 0) parent_tx.rehash() child_tx = CTransaction() for i in range(NUM_OUTPUTS): child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b"")) child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))] for i in range(NUM_OUTPUTS): child_tx.wit.vtxinwit.append(CTxInWitness()) child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a' * 195] * (2 * NUM_DROPS) + [witness_program] child_tx.rehash() self.update_witness_block_with_transactions(block, [parent_tx, child_tx]) vsize = get_virtual_size(block) additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize) * 4 i = 0 while additional_bytes > 0: # Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1 extra_bytes = min(additional_bytes + 1, 55) block.vtx[-1].wit.vtxinwit[int(i / (2 * NUM_DROPS))].scriptWitness.stack[i % (2 * NUM_DROPS)] = b'a' * (195 + extra_bytes) additional_bytes -= extra_bytes i += 1 block.vtx[0].vout.pop() # Remove old commitment add_witness_commitment(block) block.solve() vsize = get_virtual_size(block) assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1) # Make sure that our test case would exceed the old max-network-message # limit assert(len(block.serialize(True)) > 2 * 1024 * 1024) test_witness_block(self.nodes[0], self.test_node, block, accepted=False) # Now resize the second transaction to make the block fit. cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0]) block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (cur_length - 1) block.vtx[0].vout.pop() add_witness_commitment(block) block.solve() assert(get_virtual_size(block) == MAX_BLOCK_BASE_SIZE) test_witness_block(self.nodes[0], self.test_node, block, accepted=True) # Update available utxo's self.utxo.pop(0) self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue)) @subtest def test_submit_block(self): """Test that submitblock adds the nonce automatically when possible.""" block = self.build_next_block() # Try using a custom nonce and then don't supply it. # This shouldn't possibly work. add_witness_commitment(block, nonce=1) block.vtx[0].wit = CTxWitness() # drop the nonce block.solve() self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True))) assert(self.nodes[0].getbestblockhash() != block.hash) # Now redo commitment with the standard nonce, but let bitcoind fill it in. add_witness_commitment(block, nonce=0) block.vtx[0].wit = CTxWitness() block.solve() self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True))) assert_equal(self.nodes[0].getbestblockhash(), block.hash) # This time, add a tx with non-empty witness, but don't supply # the commitment. block_2 = self.build_next_block() add_witness_commitment(block_2) block_2.solve() # Drop commitment and nonce -- submitblock should not fill in. block_2.vtx[0].vout.pop() block_2.vtx[0].wit = CTxWitness() self.nodes[0].submitblock(bytes_to_hex_str(block_2.serialize(True))) # Tip should not advance! assert(self.nodes[0].getbestblockhash() != block_2.hash) @subtest def test_extra_witness_data(self): """Test extra witness data in a transaction.""" block = self.build_next_block() witness_program = CScript([OP_DROP, OP_TRUE]) witness_hash = sha256(witness_program) script_pubkey = CScript([OP_0, witness_hash]) # First try extra witness data on a tx that doesn't require a witness tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) tx.vout.append(CTxOut(self.utxo[0].nValue - 2000, script_pubkey)) tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output tx.wit.vtxinwit.append(CTxInWitness()) tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])] tx.rehash() self.update_witness_block_with_transactions(block, [tx]) # Extra witness data should not be allowed. test_witness_block(self.nodes[0], self.test_node, block, accepted=False) # Try extra signature data. Ok if we're not spending a witness output. block.vtx[1].wit.vtxinwit = [] block.vtx[1].vin[0].scriptSig = CScript([OP_0]) block.vtx[1].rehash() add_witness_commitment(block) block.solve() test_witness_block(self.nodes[0], self.test_node, block, accepted=True) # Now try extra witness/signature data on an input that DOES require a # witness tx2 = CTransaction() tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE]))) tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()]) tx2.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program] tx2.wit.vtxinwit[1].scriptWitness.stack = [CScript([OP_TRUE])] block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx2]) # This has extra witness data, so it should fail. test_witness_block(self.nodes[0], self.test_node, block, accepted=False) # Now get rid of the extra witness, but add extra scriptSig data tx2.vin[0].scriptSig = CScript([OP_TRUE]) tx2.vin[1].scriptSig = CScript([OP_TRUE]) tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0) tx2.wit.vtxinwit[1].scriptWitness.stack = [] tx2.rehash() add_witness_commitment(block) block.solve() # This has extra signature data for a witness input, so it should fail. test_witness_block(self.nodes[0], self.test_node, block, accepted=False) # Now get rid of the extra scriptsig on the witness input, and verify # success (even with extra scriptsig data in the non-witness input) tx2.vin[0].scriptSig = b"" tx2.rehash() add_witness_commitment(block) block.solve() test_witness_block(self.nodes[0], self.test_node, block, accepted=True) # Update utxo for later tests self.utxo.pop(0) self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) @subtest def test_max_witness_push_length(self): """Test that witness stack can only allow up to 520 byte pushes.""" block = self.build_next_block() witness_program = CScript([OP_DROP, OP_TRUE]) witness_hash = sha256(witness_program) script_pubkey = CScript([OP_0, witness_hash]) tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey)) tx.rehash() tx2 = CTransaction() tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE]))) tx2.wit.vtxinwit.append(CTxInWitness()) # First try a 521-byte stack element tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * (MAX_SCRIPT_ELEMENT_SIZE + 1), witness_program] tx2.rehash() self.update_witness_block_with_transactions(block, [tx, tx2]) test_witness_block(self.nodes[0], self.test_node, block, accepted=False) # Now reduce the length of the stack element tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (MAX_SCRIPT_ELEMENT_SIZE) add_witness_commitment(block) block.solve() test_witness_block(self.nodes[0], self.test_node, block, accepted=True) # Update the utxo for later tests self.utxo.pop() self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) @subtest def test_max_witness_program_length(self): """Test that witness outputs greater than 10kB can't be spent.""" MAX_PROGRAM_LENGTH = 10000 # This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes. long_witness_program = CScript([b'a' * 520] * 19 + [OP_DROP] * 63 + [OP_TRUE]) assert(len(long_witness_program) == MAX_PROGRAM_LENGTH + 1) long_witness_hash = sha256(long_witness_program) long_script_pubkey = CScript([OP_0, long_witness_hash]) block = self.build_next_block() tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, long_script_pubkey)) tx.rehash() tx2 = CTransaction() tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE]))) tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 44 + [long_witness_program] tx2.rehash() self.update_witness_block_with_transactions(block, [tx, tx2]) test_witness_block(self.nodes[0], self.test_node, block, accepted=False) # Try again with one less byte in the witness program witness_program = CScript([b'a' * 520] * 19 + [OP_DROP] * 62 + [OP_TRUE]) assert(len(witness_program) == MAX_PROGRAM_LENGTH) witness_hash = sha256(witness_program) script_pubkey = CScript([OP_0, witness_hash]) tx.vout[0] = CTxOut(tx.vout[0].nValue, script_pubkey) tx.rehash() tx2.vin[0].prevout.hash = tx.sha256 tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 43 + [witness_program] tx2.rehash() block.vtx = [block.vtx[0]] self.update_witness_block_with_transactions(block, [tx, tx2]) test_witness_block(self.nodes[0], self.test_node, block, accepted=True) self.utxo.pop() self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) @subtest def test_witness_input_length(self): """Test that vin length must match vtxinwit length.""" witness_program = CScript([OP_DROP, OP_TRUE]) witness_hash = sha256(witness_program) script_pubkey = CScript([OP_0, witness_hash]) # Create a transaction that splits our utxo into many outputs tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) value = self.utxo[0].nValue for i in range(10): tx.vout.append(CTxOut(int(value / 10), script_pubkey)) tx.vout[0].nValue -= 1000 assert(tx.vout[0].nValue >= 0) block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx]) test_witness_block(self.nodes[0], self.test_node, block, accepted=True) # Try various ways to spend tx that should all break. # This "broken" transaction serializer will not normalize # the length of vtxinwit. class BrokenCTransaction(CTransaction): def serialize_with_witness(self): flags = 0 if not self.wit.is_null(): flags |= 1 r = b"" r += struct.pack("<i", self.nVersion) if flags: dummy = [] r += ser_vector(dummy) r += struct.pack("<B", flags) r += ser_vector(self.vin) r += ser_vector(self.vout) if flags & 1: r += self.wit.serialize() r += struct.pack("<I", self.nLockTime) return r tx2 = BrokenCTransaction() for i in range(10): tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b"")) tx2.vout.append(CTxOut(value - 3000, CScript([OP_TRUE]))) # First try using a too long vtxinwit for i in range(11): tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program] block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx2]) test_witness_block(self.nodes[0], self.test_node, block, accepted=False) # Now try using a too short vtxinwit tx2.wit.vtxinwit.pop() tx2.wit.vtxinwit.pop() block.vtx = [block.vtx[0]] self.update_witness_block_with_transactions(block, [tx2]) test_witness_block(self.nodes[0], self.test_node, block, accepted=False) # Now make one of the intermediate witnesses be incorrect tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program] tx2.wit.vtxinwit[5].scriptWitness.stack = [witness_program] block.vtx = [block.vtx[0]] self.update_witness_block_with_transactions(block, [tx2]) test_witness_block(self.nodes[0], self.test_node, block, accepted=False) # Fix the broken witness and the block should be accepted. tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program] block.vtx = [block.vtx[0]] self.update_witness_block_with_transactions(block, [tx2]) test_witness_block(self.nodes[0], self.test_node, block, accepted=True) self.utxo.pop() self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) @subtest def test_tx_relay_after_segwit_activation(self): """Test transaction relay after segwit activation. After segwit activates, verify that mempool: - rejects transactions with unnecessary/extra witnesses - accepts transactions with valid witnesses and that witness transactions are relayed to non-upgraded peers.""" # Generate a transaction that doesn't require a witness, but send it # with a witness. Should be rejected because we can't use a witness # when spending a non-witness output. tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))) tx.wit.vtxinwit.append(CTxInWitness()) tx.wit.vtxinwit[0].scriptWitness.stack = [b'a'] tx.rehash() tx_hash = tx.sha256 # Verify that unnecessary witnesses are rejected. self.test_node.announce_tx_and_wait_for_getdata(tx) assert_equal(len(self.nodes[0].getrawmempool()), 0) test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False) # Verify that removing the witness succeeds. self.test_node.announce_tx_and_wait_for_getdata(tx) test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True) # Now try to add extra witness data to a valid witness tx. witness_program = CScript([OP_TRUE]) witness_hash = sha256(witness_program) script_pubkey = CScript([OP_0, witness_hash]) tx2 = CTransaction() tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b"")) tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey)) tx2.rehash() tx3 = CTransaction() tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b"")) tx3.wit.vtxinwit.append(CTxInWitness()) # Add too-large for IsStandard witness and check that it does not enter reject filter p2sh_program = CScript([OP_TRUE]) p2sh_pubkey = hash160(p2sh_program) witness_program2 = CScript([b'a' * 400000]) tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL]))) tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2] tx3.rehash() # Node will not be blinded to the transaction self.std_node.announce_tx_and_wait_for_getdata(tx3) test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size') self.std_node.announce_tx_and_wait_for_getdata(tx3) test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size') # Remove witness stuffing, instead add extra witness push on stack tx3.vout[0] = CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])) tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program] tx3.rehash() test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True) test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False) # Get rid of the extra witness, and verify acceptance. tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program] # Also check that old_node gets a tx announcement, even though this is # a witness transaction. self.old_node.wait_for_inv([CInv(1, tx2.sha256)]) # wait until tx2 was inv'ed test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True) self.old_node.wait_for_inv([CInv(1, tx3.sha256)]) # Test that getrawtransaction returns correct witness information # hash, size, vsize raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1) assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True)) assert_equal(raw_tx["size"], len(tx3.serialize_with_witness())) weight = len(tx3.serialize_with_witness()) + 3 * len(tx3.serialize_without_witness()) vsize = math.ceil(weight / 4) assert_equal(raw_tx["vsize"], vsize) assert_equal(raw_tx["weight"], weight) assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1) assert_equal(raw_tx["vin"][0]["txinwitness"][0], hexlify(witness_program).decode('ascii')) assert(vsize != raw_tx["size"]) # Cleanup: mine the transactions and update utxo for next test self.nodes[0].generate(1) assert_equal(len(self.nodes[0].getrawmempool()), 0) self.utxo.pop(0) self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue)) @subtest def test_segwit_versions(self): """Test validity of future segwit version transactions. Future segwit version transactions are non-standard, but valid in blocks. Can run this before and after segwit activation.""" NUM_SEGWIT_VERSIONS = 17 # will test OP_0, OP1, ..., OP_16 if len(self.utxo) < NUM_SEGWIT_VERSIONS: tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) split_value = (self.utxo[0].nValue - 4000) // NUM_SEGWIT_VERSIONS for i in range(NUM_SEGWIT_VERSIONS): tx.vout.append(CTxOut(split_value, CScript([OP_TRUE]))) tx.rehash() block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx]) test_witness_block(self.nodes[0], self.test_node, block, accepted=True) self.utxo.pop(0) for i in range(NUM_SEGWIT_VERSIONS): self.utxo.append(UTXO(tx.sha256, i, split_value)) sync_blocks(self.nodes) temp_utxo = [] tx = CTransaction() witness_program = CScript([OP_TRUE]) witness_hash = sha256(witness_program) assert_equal(len(self.nodes[1].getrawmempool()), 0) for version in list(range(OP_1, OP_16 + 1)) + [OP_0]: # First try to spend to a future version segwit script_pubkey. script_pubkey = CScript([CScriptOp(version), witness_hash]) tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")] tx.vout = [CTxOut(self.utxo[0].nValue - 1000, script_pubkey)] tx.rehash() test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=False) test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True) self.utxo.pop(0) temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue)) self.nodes[0].generate(1) # Mine all the transactions sync_blocks(self.nodes) assert(len(self.nodes[0].getrawmempool()) == 0) # Finally, verify that version 0 -> version 1 transactions # are non-standard script_pubkey = CScript([CScriptOp(OP_1), witness_hash]) tx2 = CTransaction() tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")] tx2.vout = [CTxOut(tx.vout[0].nValue - 1000, script_pubkey)] tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program] tx2.rehash() # Gets accepted to test_node, because standardness of outputs isn't # checked with fRequireStandard test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True) test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=False) temp_utxo.pop() # last entry in temp_utxo was the output we just spent temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) # Spend everything in temp_utxo back to an OP_TRUE output. tx3 = CTransaction() total_value = 0 for i in temp_utxo: tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b"")) tx3.wit.vtxinwit.append(CTxInWitness()) total_value += i.nValue tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program] tx3.vout.append(CTxOut(total_value - 1000, CScript([OP_TRUE]))) tx3.rehash() # Spending a higher version witness output is not allowed by policy, # even with fRequireStandard=false. test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False, reason="reserved for soft-fork upgrades") # Building a block with the transaction must be valid, however. block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx2, tx3]) test_witness_block(self.nodes[0], self.test_node, block, accepted=True) sync_blocks(self.nodes) # Add utxo to our list self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue)) @subtest def test_premature_coinbase_witness_spend(self): block = self.build_next_block() # Change the output of the block to be a witness output. witness_program = CScript([OP_TRUE]) witness_hash = sha256(witness_program) script_pubkey = CScript([OP_0, witness_hash]) block.vtx[0].vout[0].scriptPubKey = script_pubkey # This next line will rehash the coinbase and update the merkle # root, and solve. self.update_witness_block_with_transactions(block, []) test_witness_block(self.nodes[0], self.test_node, block, accepted=True) spend_tx = CTransaction() spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")] spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)] spend_tx.wit.vtxinwit.append(CTxInWitness()) spend_tx.wit.vtxinwit[0].scriptWitness.stack = [witness_program] spend_tx.rehash() # Now test a premature spend. self.nodes[0].generate(98) sync_blocks(self.nodes) block2 = self.build_next_block() self.update_witness_block_with_transactions(block2, [spend_tx]) test_witness_block(self.nodes[0], self.test_node, block2, accepted=False) # Advancing one more block should allow the spend. self.nodes[0].generate(1) block2 = self.build_next_block() self.update_witness_block_with_transactions(block2, [spend_tx]) test_witness_block(self.nodes[0], self.test_node, block2, accepted=True) sync_blocks(self.nodes) @subtest def test_uncompressed_pubkey(self): """Test uncompressed pubkey validity in segwit transactions. Uncompressed pubkeys are no longer supported in default relay policy, but (for now) are still valid in blocks.""" # Segwit transactions using uncompressed pubkeys are not accepted # under default policy, but should still pass consensus. key = ECKey() key.generate(False) pubkey = key.get_pubkey().get_bytes() assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey utxo = self.utxo.pop(0) # Test 1: P2WPKH # First create a P2WPKH output that uses an uncompressed pubkey pubkeyhash = hash160(pubkey) script_pkh = CScript([OP_0, pubkeyhash]) tx = CTransaction() tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b"")) tx.vout.append(CTxOut(utxo.nValue - 1000, script_pkh)) tx.rehash() # Confirm it in a block. block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx]) test_witness_block(self.nodes[0], self.test_node, block, accepted=True) # Now try to spend it. Send it to a P2WSH output, which we'll # use in the next test. witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)]) witness_hash = sha256(witness_program) script_wsh = CScript([OP_0, witness_hash]) tx2 = CTransaction() tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_wsh)) script = get_p2pkh_script(pubkeyhash) sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue) signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey] tx2.rehash() # Should fail policy test. test_transaction_acceptance(self.nodes[0], self.test_node, tx2, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)') # But passes consensus. block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx2]) test_witness_block(self.nodes[0], self.test_node, block, accepted=True) # Test 2: P2WSH # Try to spend the P2WSH output created in last test. # Send it to a P2SH(P2WSH) output, which we'll use in the next test. p2sh_witness_hash = hash160(script_wsh) script_p2sh = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL]) script_sig = CScript([script_wsh]) tx3 = CTransaction() tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b"")) tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, script_p2sh)) tx3.wit.vtxinwit.append(CTxInWitness()) sign_p2pk_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key) # Should fail policy test. test_transaction_acceptance(self.nodes[0], self.test_node, tx3, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)') # But passes consensus. block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx3]) test_witness_block(self.nodes[0], self.test_node, block, accepted=True) # Test 3: P2SH(P2WSH) # Try to spend the P2SH output created in the last test. # Send it to a P2PKH output, which we'll use in the next test. script_pubkey = get_p2pkh_script(pubkeyhash) tx4 = CTransaction() tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), script_sig)) tx4.vout.append(CTxOut(tx3.vout[0].nValue - 1000, script_pubkey)) tx4.wit.vtxinwit.append(CTxInWitness()) sign_p2pk_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key) # Should fail policy test. test_transaction_acceptance(self.nodes[0], self.test_node, tx4, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)') block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx4]) test_witness_block(self.nodes[0], self.test_node, block, accepted=True) # Test 4: Uncompressed pubkeys should still be valid in non-segwit # transactions. tx5 = CTransaction() tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b"")) tx5.vout.append(CTxOut(tx4.vout[0].nValue - 1000, CScript([OP_TRUE]))) (sig_hash, err) = SignatureHash(script_pubkey, tx5, 0, SIGHASH_ALL) signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL tx5.vin[0].scriptSig = CScript([signature, pubkey]) tx5.rehash() # Should pass policy and consensus. test_transaction_acceptance(self.nodes[0], self.test_node, tx5, True, True) block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx5]) test_witness_block(self.nodes[0], self.test_node, block, accepted=True) self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue)) @subtest def test_signature_version_1(self): key = ECKey() key.generate() pubkey = key.get_pubkey().get_bytes() witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)]) witness_hash = sha256(witness_program) script_pubkey = CScript([OP_0, witness_hash]) # First create a witness output for use in the tests. tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey)) tx.rehash() test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True) # Mine this transaction in preparation for following tests. block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx]) test_witness_block(self.nodes[0], self.test_node, block, accepted=True) sync_blocks(self.nodes) self.utxo.pop(0) # Test each hashtype prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue) for sigflag in [0, SIGHASH_ANYONECANPAY]: for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]: hashtype |= sigflag block = self.build_next_block() tx = CTransaction() tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b"")) tx.vout.append(CTxOut(prev_utxo.nValue - 1000, script_pubkey)) tx.wit.vtxinwit.append(CTxInWitness()) # Too-large input value sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue + 1, key) self.update_witness_block_with_transactions(block, [tx]) test_witness_block(self.nodes[0], self.test_node, block, accepted=False) # Too-small input value sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue - 1, key) block.vtx.pop() # remove last tx self.update_witness_block_with_transactions(block, [tx]) test_witness_block(self.nodes[0], self.test_node, block, accepted=False) # Now try correct value sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key) block.vtx.pop() self.update_witness_block_with_transactions(block, [tx]) test_witness_block(self.nodes[0], self.test_node, block, accepted=True) prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue) # Test combinations of signature hashes. # Split the utxo into a lot of outputs. # Randomly choose up to 10 to spend, sign with different hashtypes, and # output to a random number of outputs. Repeat NUM_SIGHASH_TESTS times. # Ensure that we've tested a situation where we use SIGHASH_SINGLE with # an input index > number of outputs. NUM_SIGHASH_TESTS = 500 temp_utxos = [] tx = CTransaction() tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b"")) split_value = prev_utxo.nValue // NUM_SIGHASH_TESTS for i in range(NUM_SIGHASH_TESTS): tx.vout.append(CTxOut(split_value, script_pubkey)) tx.wit.vtxinwit.append(CTxInWitness()) sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key) for i in range(NUM_SIGHASH_TESTS): temp_utxos.append(UTXO(tx.sha256, i, split_value)) block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx]) test_witness_block(self.nodes[0], self.test_node, block, accepted=True) block = self.build_next_block() used_sighash_single_out_of_bounds = False for i in range(NUM_SIGHASH_TESTS): # Ping regularly to keep the connection alive if (not i % 100): self.test_node.sync_with_ping() # Choose random number of inputs to use. num_inputs = random.randint(1, 10) # Create a slight bias for producing more utxos num_outputs = random.randint(1, 11) random.shuffle(temp_utxos) assert(len(temp_utxos) > num_inputs) tx = CTransaction() total_value = 0 for i in range(num_inputs): tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b"")) tx.wit.vtxinwit.append(CTxInWitness()) total_value += temp_utxos[i].nValue split_value = total_value // num_outputs for i in range(num_outputs): tx.vout.append(CTxOut(split_value, script_pubkey)) for i in range(num_inputs): # Now try to sign each input, using a random hashtype. anyonecanpay = 0 if random.randint(0, 1): anyonecanpay = SIGHASH_ANYONECANPAY hashtype = random.randint(1, 3) | anyonecanpay sign_p2pk_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key) if (hashtype == SIGHASH_SINGLE and i >= num_outputs): used_sighash_single_out_of_bounds = True tx.rehash() for i in range(num_outputs): temp_utxos.append(UTXO(tx.sha256, i, split_value)) temp_utxos = temp_utxos[num_inputs:] block.vtx.append(tx) # Test the block periodically, if we're close to maxblocksize if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000): self.update_witness_block_with_transactions(block, []) test_witness_block(self.nodes[0], self.test_node, block, accepted=True) block = self.build_next_block() if (not used_sighash_single_out_of_bounds): self.log.info("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value") # Test the transactions we've added to the block if (len(block.vtx) > 1): self.update_witness_block_with_transactions(block, []) test_witness_block(self.nodes[0], self.test_node, block, accepted=True) # Now test witness version 0 P2PKH transactions pubkeyhash = hash160(pubkey) script_pkh = CScript([OP_0, pubkeyhash]) tx = CTransaction() tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b"")) tx.vout.append(CTxOut(temp_utxos[0].nValue, script_pkh)) tx.wit.vtxinwit.append(CTxInWitness()) sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key) tx2 = CTransaction() tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE]))) script = get_p2pkh_script(pubkeyhash) sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue) signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL # Check that we can't have a scriptSig tx2.vin[0].scriptSig = CScript([signature, pubkey]) block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx, tx2]) test_witness_block(self.nodes[0], self.test_node, block, accepted=False) # Move the signature to the witness. block.vtx.pop() tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey] tx2.vin[0].scriptSig = b"" tx2.rehash() self.update_witness_block_with_transactions(block, [tx2]) test_witness_block(self.nodes[0], self.test_node, block, accepted=True) temp_utxos.pop(0) # Update self.utxos for later tests by creating two outputs # that consolidate all the coins in temp_utxos. output_value = sum(i.nValue for i in temp_utxos) // 2 tx = CTransaction() index = 0 # Just spend to our usual anyone-can-spend output tx.vout = [CTxOut(output_value, CScript([OP_TRUE]))] * 2 for i in temp_utxos: # Use SIGHASH_ALL|SIGHASH_ANYONECANPAY so we can build up # the signatures as we go. tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b"")) tx.wit.vtxinwit.append(CTxInWitness()) sign_p2pk_witness_input(witness_program, tx, index, SIGHASH_ALL | SIGHASH_ANYONECANPAY, i.nValue, key) index += 1 block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx]) test_witness_block(self.nodes[0], self.test_node, block, accepted=True) for i in range(len(tx.vout)): self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue)) @subtest def test_non_standard_witness_blinding(self): """Test behavior of unnecessary witnesses in transactions does not blind the node for the transaction""" # Create a p2sh output -- this is so we can pass the standardness # rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped # in P2SH). p2sh_program = CScript([OP_TRUE]) p2sh_pubkey = hash160(p2sh_program) script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL]) # Now check that unnecessary witnesses can't be used to blind a node # to a transaction, eg by violating standardness checks. tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey)) tx.rehash() test_transaction_acceptance(self.nodes[0], self.test_node, tx, False, True) self.nodes[0].generate(1) sync_blocks(self.nodes) # We'll add an unnecessary witness to this transaction that would cause # it to be non-standard, to test that violating policy with a witness # doesn't blind a node to a transaction. Transactions # rejected for having a witness shouldn't be added # to the rejection cache. tx2 = CTransaction() tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), CScript([p2sh_program]))) tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey)) tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * 400] tx2.rehash() # This will be rejected due to a policy check: # No witness is allowed, since it is not a witness program but a p2sh program test_transaction_acceptance(self.nodes[1], self.std_node, tx2, True, False, 'bad-witness-nonstandard') # If we send without witness, it should be accepted. test_transaction_acceptance(self.nodes[1], self.std_node, tx2, False, True) # Now create a new anyone-can-spend utxo for the next test. tx3 = CTransaction() tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program]))) tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))) tx3.rehash() test_transaction_acceptance(self.nodes[0], self.test_node, tx2, False, True) test_transaction_acceptance(self.nodes[0], self.test_node, tx3, False, True) self.nodes[0].generate(1) sync_blocks(self.nodes) # Update our utxo list; we spent the first entry. self.utxo.pop(0) self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue)) @subtest def test_non_standard_witness(self): """Test detection of non-standard P2WSH witness""" pad = chr(1).encode('latin-1') # Create scripts for tests scripts = [] scripts.append(CScript([OP_DROP] * 100)) scripts.append(CScript([OP_DROP] * 99)) scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60)) scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61)) p2wsh_scripts = [] tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) # For each script, generate a pair of P2WSH and P2SH-P2WSH output. outputvalue = (self.utxo[0].nValue - 1000) // (len(scripts) * 2) for i in scripts: p2wsh = CScript([OP_0, sha256(i)]) p2sh = hash160(p2wsh) p2wsh_scripts.append(p2wsh) tx.vout.append(CTxOut(outputvalue, p2wsh)) tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL]))) tx.rehash() txid = tx.sha256 test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True) self.nodes[0].generate(1) sync_blocks(self.nodes) # Creating transactions for tests p2wsh_txs = [] p2sh_txs = [] for i in range(len(scripts)): p2wsh_tx = CTransaction() p2wsh_tx.vin.append(CTxIn(COutPoint(txid, i * 2))) p2wsh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))]))) p2wsh_tx.wit.vtxinwit.append(CTxInWitness()) p2wsh_tx.rehash() p2wsh_txs.append(p2wsh_tx) p2sh_tx = CTransaction() p2sh_tx.vin.append(CTxIn(COutPoint(txid, i * 2 + 1), CScript([p2wsh_scripts[i]]))) p2sh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))]))) p2sh_tx.wit.vtxinwit.append(CTxInWitness()) p2sh_tx.rehash() p2sh_txs.append(p2sh_tx) # Testing native P2WSH # Witness stack size, excluding witnessScript, over 100 is non-standard p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]] test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[0], True, False, 'bad-witness-nonstandard') # Non-standard nodes should accept test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[0], True, True) # Stack element size over 80 bytes is non-standard p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]] test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, False, 'bad-witness-nonstandard') # Non-standard nodes should accept test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[1], True, True) # Standard nodes should accept if element size is not over 80 bytes p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]] test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, True) # witnessScript size at 3600 bytes is standard p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]] test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[2], True, True) test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[2], True, True) # witnessScript size at 3601 bytes is non-standard p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]] test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[3], True, False, 'bad-witness-nonstandard') # Non-standard nodes should accept test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[3], True, True) # Repeating the same tests with P2SH-P2WSH p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]] test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[0], True, False, 'bad-witness-nonstandard') test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[0], True, True) p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]] test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, False, 'bad-witness-nonstandard') test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[1], True, True) p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]] test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, True) p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]] test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[2], True, True) test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[2], True, True) p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]] test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[3], True, False, 'bad-witness-nonstandard') test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[3], True, True) self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node # Valid but non-standard transactions in a block should be accepted by standard node sync_blocks(self.nodes) assert_equal(len(self.nodes[0].getrawmempool()), 0) assert_equal(len(self.nodes[1].getrawmempool()), 0) self.utxo.pop(0) @subtest def test_upgrade_after_activation(self): """Test the behavior of starting up a segwit-aware node after the softfork has activated.""" self.log.info("Testing rejection of block.nVersion < BIP9_TOP_BITS blocks") block = self.build_next_block(version=4) block.solve() resp = self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True))) assert_equal(resp, 'bad-version(0x00000004)') # Restart with the new binary self.stop_node(2) self.start_node(2, extra_args=["-vbparams=segwit:0:999999999999"]) connect_nodes(self.nodes[0], 2) sync_blocks(self.nodes) # Make sure that this peer thinks segwit has activated. assert(get_bip9_status(self.nodes[2], 'segwit')['status'] == "active") # Make sure this peer's blocks match those of node0. height = self.nodes[2].getblockcount() while height >= 0: block_hash = self.nodes[2].getblockhash(height) assert_equal(block_hash, self.nodes[0].getblockhash(height)) assert_equal(self.nodes[0].getblock(block_hash), self.nodes[2].getblock(block_hash)) height -= 1 @subtest def test_witness_sigops(self): """Test sigop counting is correct inside witnesses.""" # Keep this under MAX_OPS_PER_SCRIPT (201) witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG] * 5 + [OP_CHECKSIG] * 193 + [OP_ENDIF]) witness_hash = sha256(witness_program) script_pubkey = CScript([OP_0, witness_hash]) sigops_per_script = 20 * 5 + 193 * 1 # We'll produce 2 extra outputs, one with a program that would take us # over max sig ops, and one with a program that would exactly reach max # sig ops outputs = (MAX_SIGOP_COST // sigops_per_script) + 2 extra_sigops_available = MAX_SIGOP_COST % sigops_per_script # We chose the number of checkmultisigs/checksigs to make this work: assert(extra_sigops_available < 100) # steer clear of MAX_OPS_PER_SCRIPT # This script, when spent with the first # N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction, # would push us just over the block sigop limit. witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available + 1) + [OP_ENDIF]) witness_hash_toomany = sha256(witness_program_toomany) script_pubkey_toomany = CScript([OP_0, witness_hash_toomany]) # If we spend this script instead, we would exactly reach our sigop # limit (for witness sigops). witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available) + [OP_ENDIF]) witness_hash_justright = sha256(witness_program_justright) script_pubkey_justright = CScript([OP_0, witness_hash_justright]) # First split our available utxo into a bunch of outputs split_value = self.utxo[0].nValue // outputs tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) for i in range(outputs): tx.vout.append(CTxOut(split_value, script_pubkey)) tx.vout[-2].scriptPubKey = script_pubkey_toomany tx.vout[-1].scriptPubKey = script_pubkey_justright tx.rehash() block_1 = self.build_next_block() self.update_witness_block_with_transactions(block_1, [tx]) test_witness_block(self.nodes[0], self.test_node, block_1, accepted=True) tx2 = CTransaction() # If we try to spend the first n-1 outputs from tx, that should be # too many sigops. total_value = 0 for i in range(outputs - 1): tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b"")) tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program] total_value += tx.vout[i].nValue tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_toomany] tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE]))) tx2.rehash() block_2 = self.build_next_block() self.update_witness_block_with_transactions(block_2, [tx2]) test_witness_block(self.nodes[0], self.test_node, block_2, accepted=False) # Try dropping the last input in tx2, and add an output that has # too many sigops (contributing to legacy sigop count). checksig_count = (extra_sigops_available // 4) + 1 script_pubkey_checksigs = CScript([OP_CHECKSIG] * checksig_count) tx2.vout.append(CTxOut(0, script_pubkey_checksigs)) tx2.vin.pop() tx2.wit.vtxinwit.pop() tx2.vout[0].nValue -= tx.vout[-2].nValue tx2.rehash() block_3 = self.build_next_block() self.update_witness_block_with_transactions(block_3, [tx2]) test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False) # If we drop the last checksig in this output, the tx should succeed. block_4 = self.build_next_block() tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG] * (checksig_count - 1)) tx2.rehash() self.update_witness_block_with_transactions(block_4, [tx2]) test_witness_block(self.nodes[0], self.test_node, block_4, accepted=True) # Reset the tip back down for the next test sync_blocks(self.nodes) for x in self.nodes: x.invalidateblock(block_4.hash) # Try replacing the last input of tx2 to be spending the last # output of tx block_5 = self.build_next_block() tx2.vout.pop() tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs - 1), b"")) tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_justright] tx2.rehash() self.update_witness_block_with_transactions(block_5, [tx2]) test_witness_block(self.nodes[0], self.test_node, block_5, accepted=True) # TODO: test p2sh sigop counting def test_superfluous_witness(self): # Serialization of tx that puts witness flag to 3 always def serialize_with_bogus_witness(tx): flags = 3 r = b"" r += struct.pack("<i", tx.nVersion) if flags: dummy = [] r += ser_vector(dummy) r += struct.pack("<B", flags) r += ser_vector(tx.vin) r += ser_vector(tx.vout) if flags & 1: if (len(tx.wit.vtxinwit) != len(tx.vin)): # vtxinwit must have the same length as vin tx.wit.vtxinwit = tx.wit.vtxinwit[:len(tx.vin)] for i in range(len(tx.wit.vtxinwit), len(tx.vin)): tx.wit.vtxinwit.append(CTxInWitness()) r += tx.wit.serialize() r += struct.pack("<I", tx.nLockTime) return r class msg_bogus_tx(msg_tx): def serialize(self): return serialize_with_bogus_witness(self.tx) self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(address_type='bech32'), 5) self.nodes[0].generate(1) unspent = next(u for u in self.nodes[0].listunspent() if u['spendable'] and u['address'].startswith('rltc')) raw = self.nodes[0].createrawtransaction([{"txid": unspent['txid'], "vout": unspent['vout']}], {self.nodes[0].getnewaddress(): 1}) tx = FromHex(CTransaction(), raw) assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, serialize_with_bogus_witness(tx).hex()) with self.nodes[0].assert_debug_log(['Superfluous witness record']): self.nodes[0].p2p.send_message(msg_bogus_tx(tx)) self.nodes[0].p2p.sync_with_ping() raw = self.nodes[0].signrawtransactionwithwallet(raw) assert raw['complete'] raw = raw['hex'] tx = FromHex(CTransaction(), raw) assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, serialize_with_bogus_witness(tx).hex()) with self.nodes[0].assert_debug_log(['Unknown transaction optional data']): self.nodes[0].p2p.send_message(msg_bogus_tx(tx)) self.nodes[0].p2p.sync_with_ping() if __name__ == '__main__': SegWitTest().main()