ext
stringclasses
9 values
sha
stringlengths
40
40
content
stringlengths
3
1.04M
py
1a3db0d0d2ffee1f6ff8389f1ef924c449f07dea
#!/usr/bin/env python3 # {address space usage: 359067648 bytes/342MB} {rss usage: 107823104 bytes/102MB} [pid: 11266|app: 0|req: 99163/885977] 64.208.17.170 () {48 vars in 1249 bytes} [Thu Feb 15 16:28:43 2018] GET /runestone/ajax/getnumonline => generated 16 bytes in 2553 msecs (HTTP/1.1 200) 8 headers in 381 bytes (1 switches on core 0) import re, sys timepat = re.compile(r'.*\[((Mon|Tue|Wed|Thu|Fri|Sat|Sun) (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec).*\d\d\d\d)\].*/runestone/ajax/(\w+)(\s|\?).*\s(\d+)\s+msecs.*') logfile = open(sys.argv[1], 'r') for line in logfile: g = timepat.match(line) if g: print("{},{},{}".format(g.group(1),g.group(4), g.group(6)))
py
1a3db16b48d215109082009c8ef693cea2d21e05
import logging import uuid from django.contrib import auth from django.shortcuts import render from django.shortcuts import redirect from django.contrib import messages from django.conf import settings from apiclient import errors from smtplib import SMTPException from .models import User from mysite import const from mysite import helper logger = logging.getLogger(const.LOGGER_NAME) def login(request): if request.user.is_authenticated: return redirect(settings.LOGIN_REDIRECT_URL) if not request.POST: return render(request, 'login.html', {'type': 'login'}) username = request.POST.get('email', '') password = request.POST.get('password', '') user = auth.authenticate(username=username, password=password) has_user = User.objects.filter(username=username).count() > 0 if not has_user: error_msg = "Don't have this account." elif user is None: error_msg = "Your password was incorrect." elif not user.verified: error_msg = "The account not verified yet." elif not user.is_active: error_msg = "The account has been disabled." else: auth.login(request, user) logger.info("User name: {username} login successful".format(username=username)) return redirect(settings.LOGIN_REDIRECT_URL) messages.error(request, error_msg) return render(request, 'login.html', {'type': 'login'}) def signup(request): username = request.POST.get('email', '') password = request.POST.get('password', '') confirm_password = request.POST.get('confirm_password', '') has_user = User.objects.filter(username=username).count() > 0 if has_user: error_msg = "Already had this account." elif password != confirm_password: error_msg = "Password not matching." else: try: verify_uuid = str(uuid.uuid4()) verify_url = "{scheme}://{host}{verify_url}?verify_uuid={uuid}".format( scheme=request.scheme, host=request.get_host(), verify_url=settings.VERIFY_URL, uuid=verify_uuid ) mail_content = settings.EMAIL_CONTEXT.format( verify_url=verify_url ) gmail_service = helper.get_gmail_service() message = helper.create_message( sender=settings.EMAIL_HOST_USER, to=username, subject=settings.EMAIL_SUBJECT, msgplain=mail_content ) helper.send_message_internal( service=gmail_service, user_id="me", message=message ) messages.success(request, "Sent verify email success.") user = User.objects.create_user( username=username, email=username, password=password, verified=False, verify_uuid=verify_uuid ) user.save() except (SMTPException, errors.HttpError) as err: logger.error("Username: {username}, Error: {err}".format( username=username, err=err )) messages.error(request, "Sent verify email fail.") return render(request, 'login.html', {'type': 'login'}) messages.error(request, error_msg) return render(request, 'login.html', {'type': 'signup'}) def logout(request): auth.logout(request) return redirect(settings.LOGIN_REDIRECT_URL) def verify(request): verify_uuid = request.GET.get('verify_uuid', '') user = User.objects.filter(verify_uuid=verify_uuid).first() if not user: logger.warning("Don't have this account. verify_uuid: {verify_uuid}".format( verify_uuid=verify_uuid )) return redirect(settings.LOGIN_URL) elif user.verified: return redirect(settings.LOGIN_URL) user.verified = True user.save() user.backend = 'django.contrib.auth.backends.ModelBackend' auth.login(request, user) return redirect(settings.LOGIN_REDIRECT_URL) def success(request): return render(request, 'success.html')
py
1a3db2667a27023c96fb9dbcd029167d1ef0b7d6
from django.shortcuts import render from django.contrib.auth.decorators import login_required from data_capture.models import SubmittedPriceList @login_required def index(request): user = request.user MAX_RECENT = 5 total_approved = SubmittedPriceList.objects.filter( submitter=user, status=SubmittedPriceList.STATUS_APPROVED).count() total_unreviewed = SubmittedPriceList.objects.filter( submitter=user, status=SubmittedPriceList.STATUS_UNREVIEWED).count() total_rejected = SubmittedPriceList.objects.filter( submitter=user, status=SubmittedPriceList.STATUS_REJECTED).count() total_submitted = SubmittedPriceList.objects.filter(submitter=user).count() recently_approved_price_lists = SubmittedPriceList.objects.filter( submitter=user, status=SubmittedPriceList.STATUS_APPROVED).order_by( '-status_changed_at')[:MAX_RECENT] # TODO: this list is actually both newly created and newly modified # Should there be a differentiation? recently_submitted_price_lists = SubmittedPriceList.objects.filter( submitter=user, status=SubmittedPriceList.STATUS_UNREVIEWED).order_by( '-status_changed_at')[:MAX_RECENT] return render(request, 'account.html', { 'total_approved': total_approved, 'total_unreviewed': total_unreviewed, 'total_rejected': total_rejected, 'total_submitted': total_submitted, 'recently_approved_price_lists': recently_approved_price_lists, 'recently_submitted_price_lists': recently_submitted_price_lists, })
py
1a3db2bb12577fca26dd27230eaaa24969a06dfa
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class DialignTx(MakefilePackage): """DIALIGN-TX: greedy and progressive approaches for segment-based multiple sequence alignment""" homepage = "http://dialign-tx.gobics.de/" url = "http://dialign-tx.gobics.de/DIALIGN-TX_1.0.2.tar.gz" version('1.0.2', '8ccfb1d91136157324d1e513f184ca29') build_directory = 'source' conflicts('%gcc@6:') def edit(self, spec, prefix): with working_dir(self.build_directory): makefile = FileFilter('Makefile') makefile.filter(' -march=i686 ', ' ') makefile.filter('CC=gcc', 'CC=%s' % spack_cc) def install(self, spec, prefix): mkdirp(prefix.bin) with working_dir(self.build_directory): install('dialign-tx', prefix.bin) # t-coffee recognizes as dialign-t install('dialign-tx', join_path(prefix.bin, 'dialign-t'))
py
1a3db2e8e96d80b5f46d67d0cbc3ad36694adeba
#!/usr/bin/env python # ****************************************************************************** # Copyright 2017-2018 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ****************************************************************************** from __future__ import print_function import os import glob import json import numpy as np from PIL import Image import math from tqdm import tqdm from collections import OrderedDict import ingest_utils as util from neon.util.persist import get_data_cache_or_nothing def get_ssd_config(img_reshape, inference=False): ssd_config = OrderedDict() ssd_config['batch_size'] = 32 if inference: ssd_config['batch_size'] = 1 ssd_config['block_size'] = 50 ssd_config['cache_directory'] = get_data_cache_or_nothing(subdir='kitti_cache') ssd_config["etl"] = [{ "type": "localization_ssd", "height": img_reshape[0], "width": img_reshape[1], "max_gt_boxes": 500, "class_names": ['__background__', 'Car', 'Van', 'Truck', 'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram', 'Misc', 'DontCare'] }, { "type": "image", "height": img_reshape[0], "width": img_reshape[1], "channels": 3 }] if not inference: ssd_config["augmentation"] = [{ "type": "image", "batch_samplers": [ { "max_sample": 1, "max_trials": 1 }, { "max_sample": 1, "max_trials": 50, "sampler": {"scale": [0.3, 1.0], "aspect_ratio": [0.5, 2.0]}, "sample_constraint": {"min_jaccard_overlap": 0.1} }, { "max_sample": 1, "max_trials": 50, "sampler": {"scale": [0.3, 1.0], "aspect_ratio": [0.5, 2.0]}, "sample_constraint": {"min_jaccard_overlap": 0.3} }, { "max_sample": 1, "max_trials": 50, "sampler": {"scale": [0.3, 1.0], "aspect_ratio": [0.5, 2.0]}, "sample_constraint": {"min_jaccard_overlap": 0.5} }, { "max_sample": 1, "max_trials": 50, "sampler": {"scale": [0.3, 1.0], "aspect_ratio": [0.5, 2.0]}, "sample_constraint": {"min_jaccard_overlap": 0.7} }, { "max_sample": 1, "max_trials": 50, "sampler": {"scale": [0.3, 1.0], "aspect_ratio": [0.5, 2.0]}, "sample_constraint": {"min_jaccard_overlap": 0.9} }, { "max_sample": 1, "max_trials": 50, "sampler": {"scale": [0.3, 1.0], "aspect_ratio": [0.5, 2.0]}, "sample_constraint": {"max_jaccard_overlap": 1.0, "min_jaccard_overlap": 0.1} } ] }] ssd_config['ssd_config'] = OrderedDict( [('conv4_3', {'min_sizes': 30.0, 'max_sizes': 60.0, 'aspect_ratios': 2.0, 'step': 8, 'normalize': True}), ('fc7', {'min_sizes': 60.0, 'max_sizes': 111.0, 'aspect_ratios': (2.0, 3.0), 'step': 16}), ('conv6_2', {'min_sizes': 111.0, 'max_sizes': 162.0, 'aspect_ratios': (2.0, 3.0), 'step': 32}), ('conv7_2', {'min_sizes': 162.0, 'max_sizes': 213.0, 'aspect_ratios': (2.0, 3.0), 'step': 64}), ('conv8_2', {'min_sizes': 213.0, 'max_sizes': 264.0, 'aspect_ratios': 2.0, 'step': 100}), ('conv9_2', {'min_sizes': 264.0, 'max_sizes': 315.0, 'aspect_ratios': 2.0, 'step': {'step_h': 300, 'step_w': 100}})]) return ssd_config def convert_annot_to_json(path, im_path, out_path, difficult, img_reshape=None): """ Converts the KITTI annotations to json file. Uses the below reference for the KITTI dataset: OO representation of label format used in Kitti dataset. Description of fields from Kitti dataset dev kit: (link)[] The label files contain the following information, which can be read and written using the matlab tools (readLabels.m, writeLabels.m) provided within this devkit. All values (numerical or strings) are separated via spaces, each row corresponds to one object. The 15 columns represent: #Values Name Description ---------------------------------------------------------------------------- 1 type Describes the type of object: 'Car', 'Van', 'Truck', 'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram', 'Misc' or 'DontCare' 1 truncated Float from 0 (non-truncated) to 1 (truncated), where truncated refers to the object leaving image boundaries 1 occluded Integer (0,1,2,3) indicating occlusion state: 0 = fully visible, 1 = partly occluded 2 = largely occluded, 3 = unknown 1 alpha Observation angle of object, ranging [-pi..pi] 4 bbox 2D bounding box of object in the image (0-based index): contains left, top, right, bottom pixel coordinates 3 dimensions 3D object dimensions: height, width, length (in meters) 3 location 3D object location x,y,z in camera coordinates (in meters) 1 rotation_y Rotation ry around Y-axis in camera coordinates [-pi..pi] 1 score Only for results: Float, indicating confidence in detection, needed for p/r curves, higher is better. Arguments: path (string): path to KITTI annotation file im_path (string): path to image out_path (string): path to save the json file difficult (bool): include difficult objects img_reshape (tuple of int): if a tuple of H,W values is given, image will be reshaped """ with open(path) as f: labels = f.readlines() # start empty dictionary annot = {'object': []} # load image im = np.array(Image.open(im_path)) scale, (h, w) = util.get_image_scale(im.shape[:2], img_reshape) c = im.shape[2] annot['size'] = {'depth': c, 'height': h, 'width': w} for label in labels: vals = label.split() typeid = vals[0] truncated = float(vals[1]) occluded = int(vals[2]) bbox = [float(x) for x in vals[4:8]] bbox = util.scale_boxes(bbox, scale) bbox_int = tuple([int(math.floor(x)) for x in bbox]) if typeid == 'DontCare': assert truncated == -1 assert occluded == -1 else: assert occluded in (0, 1, 2, 3) diff = truncated > 0.5 or occluded == 2 # add object to annotation obj = {'bndbox': {'xmin': bbox_int[0], 'ymin': bbox_int[1], 'xmax': bbox_int[2], 'ymax': bbox_int[3]}, 'difficult': diff, 'name': typeid, 'truncated': truncated > 0.5, 'occluded': occluded } if not diff or difficult: annot['object'].append(obj) with open(out_path, 'w') as f: json.dump(annot, f, indent=4) def ingest_kitti(input_dir, out_dir, img_reshape=(300, 994), train_percent=90, overwrite=False, skip_unzip=False): """ Ingests the KITTI dataset. Peforms the following ops: 0. Unzips the files into output directory. 1. Reshapes image to lower resolution (default reshape of 300x994 maintains KITTI image AR) 1. Convert annotations to json format 2. Split the training data into train and validation sets 3. Write manifest file 4. Write configuration file Arguments: input_dir (string): path to folder with KITTI zip files. out_dir (string): path to unzip KITTI data img_reshape (tuple of int): size to reshape image (default = (300, 994)) train_percent (float): percent of data to use for training. overwrite (bool): overwrite existing files """ assert img_reshape is not None, "Target image reshape required." hw = '{}x{}'.format(img_reshape[0], img_reshape[1]) zip_files = ['data_object_image_2.zip', 'data_object_label_2.zip'] root_dir = os.path.join(out_dir, 'kitti') train_manifest = os.path.join(root_dir, 'train_{}.csv'.format(hw)) val_manifest = os.path.join(root_dir, 'val_{}.csv'.format(hw)) if os.path.exists(train_manifest) and os.path.exists(val_manifest) and not overwrite: print("Manifest files already found, skipping ingest.") print("Use --overwrite flag to force re-ingest.") return util.make_dir(root_dir) tags = {'trainval': [], 'test': []} if skip_unzip is False: util.unzip_files(zip_files, input_dir, root_dir) img_folder = os.path.join(root_dir, 'training', 'image_2') annot_folder = os.path.join(root_dir, 'training', 'label_2') target_img_folder = os.path.join(root_dir, 'training', 'image_2-converted') target_annot_folder = os.path.join(root_dir, 'training', 'label_2-json') tags = glob.glob(os.path.join(img_folder, '*.png')) tags = [os.path.basename(os.path.splitext(tag)[0]) for tag in tags] assert len(tags) > 0, "No images found in {}".format(img_folder) util.make_dir(target_img_folder) util.make_dir(target_annot_folder) manifest = [] for tag in tqdm(tags): image = os.path.join(img_folder, tag + '.png') annot = os.path.join(annot_folder, tag + '.txt') assert os.path.exists(image), "{} not found.".format(image) assert os.path.exists(annot), "{} not found.".format(annot) target_image = os.path.join(target_img_folder, tag + '.png') target_annot = os.path.join(target_annot_folder, tag + '.json') convert_annot_to_json(annot, image, target_annot, difficult=True, img_reshape=None) util.resize_image(image, target_image, img_reshape=None) manifest.append((target_image, target_annot)) # shuffle files and split into training and validation set. np.random.seed(0) np.random.shuffle(manifest) train_count = (len(manifest) * train_percent) // 100 train = manifest[:train_count] val = manifest[train_count:] util.create_manifest(train_manifest, train, root_dir) util.create_manifest(val_manifest, val, root_dir) # write SSD CONFIG ssd_config = get_ssd_config(img_reshape) ssd_config_path = os.path.join(root_dir, 'kitti_ssd_{}.cfg'.format(hw)) util.write_ssd_config(ssd_config, ssd_config_path, True) # write SSD VAL CONFIG ssd_config_val = get_ssd_config(img_reshape, True) ssd_config_path_val = os.path.join(root_dir, 'kitti_ssd_{}_val.cfg'.format(hw)) util.write_ssd_config(ssd_config_val, ssd_config_path_val, True) config_path = os.path.join(root_dir, 'kitti_{}.cfg'.format(hw)) config = {'manifest': '[train:{}, val:{}]'.format(train_manifest, val_manifest), 'manifest_root': root_dir, 'epochs': 100, 'height': img_reshape[0], 'width': img_reshape[0], 'ssd_config': '[train:{}, val:{}]'.format(ssd_config_path, ssd_config_path_val) } util.write_config(config, config_path) if __name__ == '__main__': from configargparse import ArgumentParser parser = ArgumentParser() parser.add_argument('--input_dir', required=True, help='path to dir with KITTI zip files.') parser.add_argument('--output_dir', required=True, help='path to unzip data.') parser.add_argument('--overwrite', action='store_true', help='overwrite files') parser.add_argument('--training_pct', default=90, help='fraction of data used for training.') parser.add_argument('--skip_unzip', action='store_true', help='skip unzip') args = parser.parse_args() ingest_kitti(args.input_dir, args.output_dir, train_percent=args.training_pct, overwrite=args.overwrite, skip_unzip=args.skip_unzip)
py
1a3db420f0b3562e9f94f29c014053ba9b4a9528
#!/usr/bin/python """Script to create LLVM trunk devel subvolume or snapshot. Creates BTRFS subvolume with trunk git-on-svn client, plus binutils, then runs cmake to set up ninja build. More details at: http://llvm.org/docs/GettingStarted.html http://llvm.org/docs/DeveloperPolicy.html This script has two modes: initial "-r" run to create master subvolume, then "-s" mode to create working snapshot (also kicks off cmake and build in snapshot). Todos: - add build.bootstrap.rel and build.bootstrap.opt build dirs that pick up compilers from build.opt and build.rel """ import getopt import multiprocessing import os import re import sys import script_utils as u # Name of root subvolume flag_subvol = None # Name of snapshot flag_snapshot = None # User flag_user = None # Whether to do binutils build in new snapshot flag_binutils_build = True # Whether configure in snapshot/subvol flag_configure = False # Whether to run ninja in new snapshot flag_run_ninja = True # Echo command before executing flag_echo = True # Dry run mode flag_dryrun = False # SCM flavor: git, svn or git-svn (default: git-svn) flag_scm_flavor = "git-svn" # Default CMake build type flag_cmake_type = "Debug" # Default CMake compiler selection flag_cmake_ccomp = "" # Update master subvolume before snapshotting flag_do_fetch = False # Whether to include clang tools in repo flag_include_tools = True # Whether to include llgo in repo flag_include_llgo = False # Whether to include polly in repo flag_include_polly = False # Whether to include libcxx in repo flag_include_libcxx = False # Run cmake cmds in parallel flag_parallel = True # Place from which to copy binutils flag_binutils_location = None # SSD root or root dir ssdroot = None # If false, no btrfs stuff flag_btrfs = True # Various repositories llvm_rw_svn = "https://[email protected]/svn/llvm-project" llvm_git_on_svn = "https://llvm.org/svn/llvm-project" llvm_ro_svn = "http://llvm.org/svn/llvm-project" binutils_git = "git://sourceware.org/git/binutils-gdb.git" llvm_git = "http://llvm.org/git/llvm.git" clang_git = "http://llvm.org/git/clang.git" clang_tools_git = "http://llvm.org/git/clang-tools-extra.git" llgo_git = "http://llvm.org/git/llgo.git" polly_git = "http://llvm.org/git/polly.git" polly_svn = "http://llvm.org/svn/llvm-project/polly" libcxx_svn = "https://llvm.org/svn/llvm-project/libcxx" libcxx_git = "http://llvm.org/git/libcxx.git" libcxxabi_svn = "https://llvm.org/svn/llvm-project/libcxxabi" libcxxabi_git = "http://llvm.org/git/libcxxabi.git" compiler_rt_svn = "https://llvm.org/svn/llvm-project/compiler-rt" compiler_rt_git = "http://llvm.org/git/compiler-rt.git" # Clang compilers. Must be full path. clang_c_compiler = "/usr/bin/clang-3.9" clang_cxx_compiler = "/usr/bin/clang++-3.9" # Gcc compilers. Must be full path. gcc_c_compiler = "/usr/bin/gcc-4.8" gcc_cxx_compiler = "/usr/bin/g++-4.8" # Coverage related stuff covopt = "--coverage" cov_cmake_ccomp = "" # Table with info on various cmake flavors. Key is build flavor, # value is a direct with various settings. Keys in this dict include: # # cmflav: cmake flavor (ex: Release, Debug, etc). Set to # None for the default. # # ccflav: C/C++ compiler to use for the build. Possible # values include "clang", "gcc", "def" (to skip # explicitly setting CMAKE_C/CXX_COMPILER) and # "bootstrap=%B" where %B is another build dir. # # extra: extra cmake arguments for this build. # # early: if present, run cmake for this build at snapshot # creation time # # Notes: # - coverage testing works better with installed "cc" # - release build is done with gcc # legal_tags = {"cmflav": 1, "ccflav": 1, "extra": 1, "early": 1} cmake_flavors = { "opt": {"cmflav": None, "early": 1, "ccflav": "clang", "extra": None}, "dbg": {"cmflav": None, "early": 1, "ccflav": "clang", "extra": ("-DCXX_SUPPORTS_COVERED_SWITCH_DEFAULT_FLAG=0 " "-DCMAKE_C_FLAGS=\'-g -O0\' " "-DCMAKE_CXX_FLAGS=\'-g -O0\'")}, "rel": {"cmflav": "Release", "early": 1, "ccflav": "gcc", "extra": None}, "cov": {"cmflav": None, "ccflav": "def", "extra": ("-DCMAKE_C_FLAGS=\'%s\' " "-DCMAKE_CXX_FLAGS=\'%s\'" % (covopt, covopt))}, "clbootstrap.rel": {"cmflav": "Release", "ccflav": "gcc", "extra": "-DCLANG_ENABLE_BOOTSTRAP=On"}, } def docmd(cmd): """Execute a command.""" if flag_echo: sys.stderr.write("executing: " + cmd + "\n") if flag_dryrun: return u.docmd(cmd) def doscmd(cmd): """Execute a command.""" if flag_echo: sys.stderr.write("executing: " + cmd + "\n") if flag_dryrun: return u.doscmd(cmd) def dochdir(thedir): """Switch to dir.""" if flag_echo or flag_dryrun: sys.stderr.write("cd " + thedir + "\n") if flag_dryrun: return try: os.chdir(thedir) except OSError as err: u.error("chdir failed: %s" % err) def do_llvmtool_create(top, tool, pdir, gitloc, svnloc): """Create new sub-repo within llvm/tools or llvm/projects.""" dochdir("%s/llvm/%s" % (top, pdir)) if flag_scm_flavor == "git": doscmd("svn co %s/%s/trunk %s" % (llvm_ro_svn, tool)) else: doscmd("git clone %s %s" % (gitloc, tool)) if flag_scm_flavor == "git-svn": dochdir("%s" % tool) doscmd("git svn init %s/trunk " "--username=%s" % (svnloc, flag_user)) doscmd("git config svn-remote.svn.fetch :refs/remotes/origin/master") doscmd("git svn rebase -l") def do_subvol_create(): """Create new LLVM trunk subvolume if needed.""" sv = "%s/%s" % (ssdroot, flag_subvol) if os.path.exists(sv): u.verbose(1, "subvolume %s already exists, skipping creation" % sv) return here = os.getcwd() if flag_btrfs: docmd("snapshotutil.py mkvol %s" % flag_subvol) else: docmd("mkdir %s" % flag_subvol) dochdir(ssdroot) dochdir(flag_subvol) top = "%s/%s" % (ssdroot, flag_subvol) # First llvm if flag_scm_flavor == "svn": doscmd("svn co %s/llvm/trunk llvm" % llvm_rw_svn) else: doscmd("git clone %s" % llvm_git) if flag_scm_flavor == "git-svn": dochdir("llvm") doscmd("git svn init %s/llvm/trunk " "--username=%s" % (llvm_git_on_svn, flag_user)) doscmd("git config svn-remote.svn.fetch :refs/remotes/origin/master") doscmd("git svn rebase -l") # Next clang dochdir("%s/llvm/tools" % top) if flag_scm_flavor == "svn": doscmd("svn co %s/cfe/trunk clang" % llvm_ro_svn) else: doscmd("git clone %s" % clang_git) if flag_scm_flavor == "git-svn": dochdir("clang") doscmd("git svn init %s/cfe/trunk " "--username=%s" % (llvm_git_on_svn, flag_user)) doscmd("git config svn-remote.svn.fetch :refs/remotes/origin/master") doscmd("git svn rebase -l") # Now clang tools if flag_include_tools: dochdir("%s/llvm/tools/clang/tools" % top) if flag_scm_flavor == "git": doscmd("svn co %s/clang-tools-extra/trunk extra" % llvm_ro_svn) else: doscmd("git clone %s extra" % clang_tools_git) if flag_scm_flavor == "git-svn": dochdir("extra") doscmd("git svn init %s/clang-tools-extra/trunk " "--username=%s" % (llvm_git_on_svn, flag_user)) doscmd("git config svn-remote.svn.fetch :refs/remotes/origin/master") doscmd("git svn rebase -l") # Now llgo if flag_include_llgo: do_llvmtool_create(top, "llgo", "tools", llgo_git, llgo_svn) # Now polly if flag_include_polly: do_llvmtool_create(top, "polly", "tools", polly_git, polly_svn) # Now libcxx if flag_include_libcxx: do_llvmtool_create(top, "libcxx", "projects", libcxx_git, libcxx_svn) do_llvmtool_create(top, "libcxxabi", "projects", libcxxabi_git, libcxxabi_svn) # Now compiler-rt do_llvmtool_create(top, "compiler-rt", "projects", compiler_rt_git, compiler_rt_svn) # Now binutils. NB: git clone can be incredibly slow sometimes. # Consider adding --depth 1 maybe? dochdir(top) if flag_binutils_location: doscmd("cp -r %s binutils" % flag_binutils_location) else: doscmd("git clone %s binutils" % binutils_git) dochdir(here) def do_fetch(flavor, where): """Update with svn or git.""" here = os.getcwd() dochdir(where) if flavor == "git": docmd("git fetch") elif flavor == "git-svn": docmd("git fetch") docmd("git svn rebase -l") else: docmd("svn update") dochdir(here) def fetch_in_volume(): """Update subvolume with svn or git.""" top = "%s/%s" % (ssdroot, flag_subvol) dochdir(top) # First binutils (which is only git) do_fetch("git", "binutils") dochdir("llvm") # Next llvm stuff tofind = ".git" if flag_scm_flavor == "svn": tofind = ".svn" lines = u.docmdlines("find . -depth -name %s -print" % tofind) for line in lines: do_fetch(flag_scm_flavor, line.strip()) dochdir(top) def bootstrap_tooldir(flav): """Return tool directory for bootstrap build.""" fd = cmake_flavors[flav] ccflav = fd["ccflav"] rx = re.compile(r"^bootstrap\.(\S+)$") m = rx.match(ccflav) if not m: return None tb = m.group(1) tbdir = "%s/%s/%s" % (ssdroot, flag_subvol, tb) return tbdir def select_cmake_type(flav): """Return cmake type for build.""" fd = cmake_flavors[flav] if "cmflav" not in fd: u.error("internal error: build flavor %s has no cmflav setting" % flav) cmflav = fd["cmflav"] if not cmflav: cmflav = flag_cmake_type return cmflav def select_cmake_extras(flav): """Return cmake extras for build.""" fd = cmake_flavors[flav] if "extra" not in fd: return "" cmflav = fd["extra"] if not cmflav: return "" return cmflav def select_dyld_library_path(flav): """Return DYLD_LIBRARY_PATH for cmake if needed.""" tbdir = bootstrap_tooldir(flav) if not tbdir: return "" return "env DYLD_LIBRARY_PATH=%s/lib" % tbdir def select_compiler_flavor(flav): """Returns string with cmake compiler setup.""" extrastuff = "" if flav not in cmake_flavors: u.error("internal error -- flavor %s not in cmake_flavors" % flav) fd = cmake_flavors[flav] if "ccflav" not in fd: u.error("internal error: build flavor %s has no ccflav setting" % flav) ccflav = fd["ccflav"] tbdir = bootstrap_tooldir(flav) if ccflav == "gcc": build_c_compiler = gcc_c_compiler build_cxx_compiler = gcc_cxx_compiler elif ccflav == "clang": build_c_compiler = clang_c_compiler build_cxx_compiler = clang_cxx_compiler elif ccflav == "def": return "" elif tbdir: build_c_compiler = "%s/bin/clang" % tbdir build_cxx_compiler = "%s/bin/clang++" % tbdir extrastuff = ("-DCMAKE_RANLIB=%s/bin/llvm-ranlib " "-DCMAKE_AR=%s/bin/llvm-ar " % (tbdir, tbdir)) else: u.error("internal error -- bad ccflav setting %s" % ccflav) return ("%s-DCMAKE_C_COMPILER=%s -DCMAKE_ASM_COMPILER=%s " "-DCMAKE_CXX_COMPILER=%s" % (extrastuff, build_c_compiler, build_c_compiler, build_cxx_compiler)) def emit_cmake_cmd_script(flav, targdir): """Emit/archive cmake cmds for flav.""" bpath = ("LLVM_BINUTILS_INCDIR=%s/%s" "/binutils/include" % (ssdroot, targdir)) u.verbose(0, "...kicking off cmake for %s in parallel..." % flav) dyldsetting = select_dyld_library_path(flav) ccomp = select_compiler_flavor(flav) cmake_type = select_cmake_type(flav) extra = select_cmake_extras(flav) limitlink = "LLVM_PARALLEL_LINK_JOBS=8" cmake_cmd = ("%s cmake -D%s -DCMAKE_BUILD_TYPE=%s -D%s %s %s -G Ninja " "../llvm" % (dyldsetting, limitlink, cmake_type, bpath, ccomp, extra)) if flag_dryrun: print "+++ archiving cmake cmd: %s" % cmake_cmd else: try: with open("./.cmake_cmd", "w") as wf: wf.write(cmake_cmd) wf.write("\n") except IOError: u.error("open/write failed for .cmake_cmd") return cmake_cmd def emit_rebuild_scripts(flav, targdir): """Emit top-level clean, rebuild scripts.""" bpath = "%s/%s/build.%s" % (ssdroot, targdir, flav) if flag_dryrun: print "+++ archiving clean + build cmds" return # Emit clean script try: with open("./.clean.sh", "w") as wf: wf.write("#!/bin/sh\n") wf.write("set -e\n") wf.write("cd %s || exit 9\n" % bpath) wf.write("cd ../binutils-build\n") wf.write("echo ... cleaning binutils-build\n") wf.write("make clean 1> ../build.%s/.clean.err 2>&1\n" % flav) wf.write("echo ... cleaning llvm\n") wf.write("cd ../build.%s\n" % flav) wf.write("ninja clean 1>> .clean.err 2>&1\n") wf.write("exit 0\n") except IOError: u.error("open/write failed for .clean.sh") # Emit build-all script try: with open("./.build-all.sh", "w") as wf: wf.write("#!/bin/sh\n") wf.write("set -e\n") wf.write("cd %s || exit 9\n" % bpath) wf.write("cd ../binutils-build\n") wf.write("echo ... running make in binutils-build\n") wf.write("NP=`nproc`\n") wf.write("make -j${NP} 1> ../build.%s/.binutils-build.err 2>&1\n" % flav) wf.write("make -j${NP} all-gold 1> " "../build.%s/.binutils-build.err 2>&1\n" % flav) wf.write("cd ../build.%s\n" % flav) wf.write("echo ... running ninja build\n") wf.write("ninja\n") wf.write("exit 0\n") except IOError: u.error("open/write failed for .build-all.sh") # Emit clean-and-build-all script try: with open("./.clean-and-build-all.sh", "w") as wf: wf.write("#!/bin/sh\n") wf.write("set -e\n") wf.write("cd %s || exit 9\n" % bpath) wf.write("sh ./.clean.sh\n") wf.write("sh ./.build-all.sh\n") wf.write("exit 0\n") except IOError: u.error("open/write failed for .cmake_cmd") def do_configure_binutils(targdir): """Create binutils bin dir and run configure.""" dochdir(ssdroot) dochdir(targdir) docmd("mkdir binutils-build") dochdir("binutils-build") doscmd("../binutils/configure --enable-gold " "--enable-plugins --disable-werror") dochdir("..") def run_cmake(builddir, cmake_cmd): """Cmake run helper.""" try: os.chdir(builddir) except OSError as err: u.warning("chdir failed: %s" % err) return 1 rv = u.doscmd(cmake_cmd, True) if not rv: u.warning("cmd command returned bad status: %s" % cmake_cmd) return 1 return 0 def do_setup_cmake(targdir): """Run cmake in each of the bin dirs.""" dochdir(ssdroot) dochdir(targdir) pool = None if flag_parallel: nworkers = len(cmake_flavors) pool = multiprocessing.Pool(processes=nworkers) results = [] for flav in cmake_flavors: docmd("mkdir build.%s" % flav) dochdir("build.%s" % flav) emit_rebuild_scripts(flav, targdir) cmake_cmd = emit_cmake_cmd_script(flav, targdir) if flag_parallel and not flag_dryrun: u.verbose(0, "...kicking off cmake for %s in parallel..." % flav) builddir = "%s/%s/build.%s" % (ssdroot, targdir, flav) r = pool.apply_async(run_cmake, [builddir, cmake_cmd]) results.append(r) else: doscmd(cmake_cmd) dochdir("..") nr = len(results) rc = 0 for idx in range(0, nr): r = results[idx] u.verbose(1, "waiting on result %d" % idx) res = r.get(timeout=600) if res != 0: rc = 1 if rc: u.error("one or more cmake cmds failed") def do_snapshot_create(): """Create new LLVM trunk snapshot.""" if flag_do_fetch: fetch_in_volume() if flag_btrfs: docmd("snapshotutil.py mksnap %s %s" % (flag_subvol, flag_snapshot)) def do_configure(): """Run configure/setup/cmake in snapshot or subvol.""" if flag_do_fetch: fetch_in_volume() dochdir(ssdroot) targdir = flag_subvol if flag_snapshot: targdir = flag_snapshot do_configure_binutils(targdir) do_setup_cmake(targdir) def do_build(): """Perform build in snapshot or subvol.""" dochdir(ssdroot) if flag_snapshot: dochdir(flag_snapshot) else: dochdir(flag_subvol) if flag_binutils_build: dochdir("binutils-build") nworkers = multiprocessing.cpu_count() doscmd("make -j%d" % nworkers) doscmd("make -j%d all-gold" % nworkers) dochdir("..") else: u.verbose(0, "... binutils build stubbed out") if flag_run_ninja: dochdir("build.opt") docmd("ninja") dochdir("..") else: u.verbose(0, "... ninja build stubbed out") def perform(): """Main driver routine.""" do_subvol_create() if flag_snapshot: do_snapshot_create() if flag_configure: do_configure() do_build() def usage(msgarg): """Print usage and exit.""" me = os.path.basename(sys.argv[0]) if msgarg: sys.stderr.write("error: %s\n" % msgarg) print """\ usage: %s [options] options: -d increase debug msg verbosity level -r R root subvolume is R -s S snapshot is S -c run configure in subvol, not snapshot -n stub out ninja build -N stub out binutils build -q quiet mode (do not echo commands before executing) -S X use SCM flavor X (either git, svn, or git-svn). Def: git-svn -B D copy binutils from dir D instead of performing 'git clone' -D dryrun mode (echo commands but do not execute) -X set default build type to RelWithDebInfo -T avoid setting up clang tools -J run cmake steps serially (default is in parallel) -G include llgo when setting up repo -P include polly when setting up repo -L include libcxx when setting up repo -F run 'git fetch' or 'svn update' in subvolume before creating snapshot -M disable BTRFS (assume regular dirs). Implies -c. Example 1: creates new subvolume 'llvm-trunk', no build or configure %s -r llvm-trunk Example 2: snapshot subvol 'llvm-trunk' creating 'llvm-trunk-snap' w/ builds %s -r llvm-trunk -c -s llvm-snap Example 3: snapshot subvol 'llvm-trunk' to create 'llvm-gronk', stubbing out ninja build %s -r llvm-trunk -c -n -s llvm-gronk Example 4: create new subvol, then configure and build there instead of later in snapshot %s -r llvm-trunk -c """ % (me, me, me, me, me) sys.exit(1) def parse_args(): """Command line argument parsing.""" global flag_subvol, flag_snapshot, flag_echo, flag_dryrun, flag_configure global flag_scm_flavor, flag_cmake_type, flag_include_llgo global flag_do_fetch, flag_include_tools, flag_include_polly, flag_parallel global flag_binutils_build, flag_run_ninja, llvm_rw_svn, flag_user global ssdroot, flag_binutils_location, flag_btrfs, flag_include_libcxx try: optlist, args = getopt.getopt(sys.argv[1:], "DPGJB:S:FTLMXqcdnNs:r:") except getopt.GetoptError as err: # unrecognized option usage(str(err)) for opt, arg in optlist: if opt == "-d": u.increment_verbosity() elif opt == "-N": flag_binutils_build = False elif opt == "-n": flag_run_ninja = False elif opt == "-c": flag_configure = True elif opt == "-M": flag_configure = True flag_btrfs = False elif opt == "-B": if os.path.exists(arg) and os.path.isdir(arg): u.verbose(1, "drawing binutils from %s" % arg) flag_binutils_location = arg else: usage("inaccessable/unknown binutils location %s" %arg) elif opt == "-S": if arg != "git" and arg != "svn" and arg != "git-svn": usage("illegal SCM flavor %s" % arg) flag_scm_flavor = arg elif opt == "-q": flag_echo = False elif opt == "-D": flag_dryrun = True elif opt == "-G": flag_include_llgo = True elif opt == "-P": flag_include_polly = True elif opt == "-L": flag_include_libcxx = True elif opt == "-F": flag_do_fetch = True elif opt == "-J": flag_parallel = True elif opt == "-X": flag_cmake_type = "RelWithDebInfo" elif opt == "-T": flag_include_tools = False elif opt == "-r": flag_subvol = arg elif opt == "-s": flag_snapshot = arg if args: usage("unknown extra args") if not flag_subvol: usage("specify subvol name with -r") if flag_snapshot and not flag_subvol: usage("specify subvol name with -r") if not flag_btrfs and flag_snapshot: usage("can't use -s with -M") lines = u.docmdlines("whoami") flag_user = lines[0] if flag_user == "root": u.error("please don't run this script as root") llvm_rw_svn = re.sub("REPLACE_WITH_USER", flag_user, llvm_rw_svn) u.verbose(2, "llvm_rw_svn is: %s" % llvm_rw_svn) # Validate cmake_flavors for tag, d in cmake_flavors.iteritems(): for subtag in d: if subtag not in legal_tags: u.error("internal error: cmake_flavors entry %s " "has unknown tag %s" % (tag, subtag)) # Set ssd root here = os.getcwd() if flag_btrfs: ssdroot = u.determine_btrfs_ssdroot(here) else: ssdroot = here # #...................................................................... # # Main portion of script # parse_args() u.setdeflanglocale() perform() exit(0)
py
1a3db4b2a09568a152e9379708b268bcb9dbb00f
#!/usr/bin/env python import argparse import re import sys from os.path import isfile, join from subprocess import PIPE, TimeoutExpired, run class BaseTask: TIME_LIMIT_SECONDS = 1 SPACES_RE = re.compile(r"\s+", re.M) def __init__(self, continue_on_error=True, only_matching=None): self.continue_on_error = continue_on_error self.only_matching = only_matching self.tested_cases = set() self.passed_cases = set() self.tested_units = set() self.passed_units = set() self.show_all_errors = False def strip_spaces(self, text): return self.SPACES_RE.sub(" ", text.strip()) def read_file_utf8(self, file_name): assert isfile(file_name), f"Não existe o arquivo {file_name}" try: with open(file_name, encoding="utf-8", errors="strict") as f: return f.read() except ValueError: assert False, f"Enconding inválido em {file_name}. Por favor, use UTF-8." except Exception as e: # noqa assert False, f"Falha ao ler arquivo {file_name}: {e}" def compare_stripped(self, left, right): return self.strip_spaces(left) == self.strip_spaces(right) def compare_files(self, out, res): left = self.read_file_utf8(out) right = self.read_file_utf8(res) return self.compare_stripped(left, right) def exists(self, file_name): assert isfile(file_name), f"você deve criar um arquivo {file_name}" def run_binary_inner(self, cmd, stdin, stdout, input): if input is None: p = run( cmd, stdin=stdin, stdout=stdout, encoding="utf8", errors="ignore", timeout=self.TIME_LIMIT_SECONDS, ) else: p = run( cmd, input=input, stdout=stdout, encoding="utf8", errors="ignore", timeout=self.TIME_LIMIT_SECONDS, ) assert p.returncode == 0, f"código de saída é {p.returncode}" return p def run_binary( self, cmd, stdin, stdout, input=None, in_filename=None, out_filename=None, ): cmd_str = " ".join([c if " " not in c and c != "" else f'"{c}"' for c in cmd]) if in_filename: cmd_str += f" < {in_filename}" if out_filename: cmd_str += f" > {out_filename}" if input: cmd_str += f' com entrada "{input}"' try: return self.run_binary_inner(cmd, stdin, stdout, input) except AssertionError as e: assert False, f"falha ao executar {cmd_str} : {e}" except TimeoutExpired: assert ( False ), f"falha ao executar {cmd_str} : tempo limite de {self.TIME_LIMIT_SECONDS}s excedido" def test_one_case(self, script, in_filename_name): out_filename_name = in_filename_name.replace(".in", ".out") res_file_name = in_filename_name.replace(".in", ".res") self.exists(script) with open(in_filename_name) as i, open(out_filename_name, "w") as o: self.run_binary( ["python3", script], i, o, in_filename=in_filename_name, out_filename=out_filename_name, ) assert self.compare_files( out_filename_name, res_file_name ), f'execute: diff "{out_filename_name}" "{res_file_name}"' def test_cases(self, script, in_filename_names, folder="testes"): assert type(in_filename_names) != str, "erro no caso de teste, deveria ser lista de strings" errors = [] for in_filename_name in in_filename_names: in_filename_name = join(folder, in_filename_name) try: self.tested_cases.add(in_filename_name) self.test_one_case(script, in_filename_name) self.passed_cases.add(in_filename_name) print(f" -> {in_filename_name} passou") except AssertionError as e: print(f" -> {in_filename_name} falhou") errors.append(f"{e}") if not self.continue_on_error: break if errors: assert False, "\n -> ".join(errors) def input_output(self, script, input_content, expected_output): self.exists(script) p = self.run_binary(["python3", script], None, PIPE, input=input_content) assert self.compare_stripped( p.stdout, expected_output ), f'para entrada "{input_content}", a saída é "{p.stdout.strip()}", mas era esperado "{expected_output}"' def should_test(self, name): if not name.startswith("teste_"): return False if not self.only_matching: return True for pattern in self.only_matching: if pattern in name: return True return False def test_units(self): for name in sorted(dir(self)): if not self.should_test(name): continue print() print(f"Executando {name}...") sys.stderr.flush() sys.stdout.flush() try: test = getattr(self, name) self.tested_units.add(name) test() self.passed_units.add(name) print(f"{name}: OK") except AssertionError as e: print(f"{name}: FALHOU") if "privado" not in name or self.show_all_errors: print(f" -> {e}\n") if not self.continue_on_error: break def case_range(self, input_template, start, end): input_files = [] for i in range(start, end + 1): input_files.append(input_template.format(i)) return input_files class Task(BaseTask): def teste_1_bomdia(self): script = "bomdia.py" self.input_output(script, "Antônio", "Bom dia, Antônio.") self.test_cases(script, ["bomdia.in"]) def teste_2_boanoite(self): script = "boanoite.py" self.test_cases(script, self.case_range("boanoite{}.in", 1, 2)) if __name__ == "__main__": parser = argparse.ArgumentParser(description="Testa tarefa.") parser.add_argument("only", nargs="*", help="apenas unidades contendo essa string") parser.add_argument("-c", action="store_true", help="continua mesmo que anteriores falhem") args = parser.parse_args() Task(args.c, args.only).test_units()
py
1a3db5f7e4a715506c034143a0dfb68b797124da
class BaseMapperBase: """ Base class for mapping labels to zero indexed integers """ def map(self, item) -> int: """ Maps the raw label to corresponding zero indexed integer. E.g. if the raw labels are "Positive" & "Negative", then the corresponding integers would be 0,1 :param item: The raw label to map. e.g. "positive" :return: returns the corresponding zero indexed integer, e.g. 1 """ raise NotImplementedError def reverse_map(self, item: int): """ Reverse maps the integer label to corresponding raw labels. E.g. if the integer labels are 0,1, then the corresponding raw labels are "Positive" & "Negative" :param item: The int label to map. e.g. 1 :return: returns the corresponding raw label , e.g. "Positive" """ raise NotImplementedError @property def num_classes(self) -> int: """ The total number of unique classes. E.g. if you are performing sentiment analysis for positive, negative & neutral, then you would return 3 :return: The total number of unique classes """ raise NotImplementedError @property def positive_label(self): """ The raw positive label. Useful for unbalanced dataset when you want to use F-score as the measure :return: The raw positive label , e.g. "positive" """ raise NotImplementedError @property def positive_label_index(self) -> int: """ The raw positive label index :return: The integer index corresponding to the raw positive_label """ raise NotImplementedError
py
1a3db6d2033f66a1513a211ad097cd8106fca8ea
PATH_ROOT = "/" PATH_CHARTS = "/charts/" PATH_DISCOVER = "/discover/" PATH_PLAY = "/play/" PATH_SEARCH = "/search/" PATH_SEARCH_LEGACY = "/search/query/" PATH_SETTINGS_CACHE_CLEAR = "/settings/cache/clear/" PATH_USER = "/user/"
py
1a3db7490d44cf2b710168e12ddd280ed714df94
import argparse import bs4 import json import io import os import requests import zipfile class Scraper(): """A scraper with which to scrape Scratch projects. Typical usage example: from ccl_scratch_tools import Scraper scraper = Scraper() project = scraper.download_project(555555555) """ def __init__(self, studio_url = None, project_url = None, project_meta_url = None, comments_url = None, user_url = None, studio_meta_url = None): """Initializes scraper with studio and project URLs.""" if studio_url is None: self.STUDIO_URL = "https://api.scratch.mit.edu/studios/{0}/projects?limit=40&offset={1}" else: self.STUDIO_URL = studio_url if studio_meta_url is None: self.STUDIO_META_URL = "https://api.scratch.mit.edu/studios/{0}" else: self.STUDIO_META_URL = studio_meta_url if project_url is None: self.PROJECT_URL = "https://projects.scratch.mit.edu/{0}" else: self.PROJECT_URL = project_url if project_meta_url is None: self.PROJECT_META_URL = "https://api.scratch.mit.edu/projects/{0}" else: self.PROJECT_META_URL = project_meta_url if comments_url is None: self.COMMENTS_URL = "https://scratch.mit.edu/site-api/comments/project/{0}/?page={1}" else: self.COMMENTS_URL = comments_url if user_url is None: self.USER_URL = "https://api.scratch.mit.edu/users/{0}" else: self.USER_URL = user_url def download_project(self, id): """Downloads an individual project JSON and returns it as a Python object. Args: id: An integer Scratch project ID. Returns: A dictionary object representing the Scratch project JSON. Raises: RuntimeError: An error occurred accessing the Scratch API, or the project couldn't be downloaded in/converted to JSON format. """ url = self.PROJECT_URL.format(id) r = requests.get(url) if r.status_code != 200: raise RuntimeError("GET {0} failed with status code {1}".format(url, r.status_code)) project = "" try: project = r.json() except: # In some cases, a binary archive will download -- handle that if json.decoder.JSONDecodeError: try: f = io.BytesIO(r.content) archive = zipfile.ZipFile(f) if "project.json" in archive.namelist(): proj = archive.read("project.json") project = json.loads(proj.decode("utf-8")) except: raise RuntimeError("Cannot handle format of project {0}".format(id)) return project def download_projects(self, ids, projects_to_studio=dict(), output_directory=None, file_name=None): """Given project IDs, download the JSON files. Args: ids: array-like collection of Scratch project IDs. projects_to_studio: dictionary mapping project IDs to studio IDs. If set, creates subdirectories for each studio. output_directory (str): directory for output; if not set, defaults to current working directory. file_name (str): if set, combines projects into one JSON file with file_name; else creates a separate JSON file for each project. Returns: None. """ if output_directory is None: output_directory = os.getcwd() self.make_dir(output_directory) projects = list() for id in ids: project = self.download_project(id) if len(project) < 1: break if file_name is None: od = output_directory if len(projects_to_studio) > 0: od = "{0}/{1}".format(od, projects_to_studio[id]) self.make_dir(od) with open("{0}/{1}.json".format(od, id), "w") as f: json.dump(project, f) else: projects.append(project) # If projects has at least one item, we should write to a single file if len(projects) > 0 and file_name is not None: with open("{0}/{1}".format(output_directory, file_name), "w") as f: json.dump(projects, f) def get_id(self, url): """Returns the integer ID from a string that may be a URL or an ID. Args: url: The string representing the URL, or ID, to be extracted. Returns: An integer ID of a Scratch object, whether a studio or project. In case of error, returns None. """ url = url.rstrip() a = url.rstrip("/") try: return int(a.split("/")[-1]) except: return None def get_ids_from_file(self, filename): """Returns a list of IDs from a newline-separated file. Project/studio link agnostic. Works with links and IDs. Args: filename: String file name of a text file with line-separated URLs or IDs. Returns: A list of integer IDs. Empty if error reading file. """ ids = list() try: ids = list() with open(filename) as f: for l in f.readlines(): ids.append(self.get_id(l)) except: pass return ids def get_project_comments(self, id): """Returns the comments on a given Scratch project. Args: id (int): a Scratch project ID. Returns: A list of dictionaries, each with keys for author, comment, and timestamp. Raises: RuntimeError: An error occurred accessing the Scratch API, or the project doesn't exist. """ # This is all a remastered version of GSE-CCL/scratch-comments comments = list() page = 1 while True: # Call API url = self.COMMENTS_URL.format(id, page) r = requests.get(url) if r.status_code == 404 and page > 1: break elif r.status_code != 200: raise RuntimeError("GET {0} failed with status code {1}".format(url, r.status_code)) # Use Beautiful Soup to scrape the webpage for comments soup = bs4.BeautifulSoup(r.content, "html.parser") all_comments = soup.select(".comment") if len(all_comments) < 1: break # Go through each comment and clean for comment in all_comments: content = comment.select_one(".content").get_text().strip() if content != "[deleted]": cid = int(comment["data-comment-id"]) user = comment.select_one(".name").get_text().strip() time = comment.select_one(".time")["title"] comments.append({"id": cid, "username": user, "comment": content, "timestamp": time}) page += 1 return comments def get_project_meta(self, id): """Returns the publicly-available metadata about a given Scratch project. Args: id (int): a Scratch project ID. Returns: A dictionary with the entire API response from project meta API endpoint. None if the studio doesn't exist. Raises: RuntimeError: An error occurred accessing the Scratch API. """ url = self.PROJECT_META_URL.format(id) r = requests.get(url) if r.status_code != 200 and r.status_code != 404: raise RuntimeError("GET {0} failed with status code {1}".format(url, r.status_code)) project = r.json() if "code" in project and project["code"] == "NotFound": return None return project def get_projects_in_studio(self, id): """Returns the set of project IDs contained in a given Scratch studio. Args: id: An integer Scratch studio ID. Returns: A set of project IDs. Raises: RuntimeError: An error occurred accessing the Scratch API. """ offset = 0 project_ids = set() while True: url = self.STUDIO_URL.format(id, offset) r = requests.get(url) if r.status_code != 200: raise RuntimeError("GET {0} failed with status code {1}".format(url, r.status_code)) # No more projects projects = r.json() if len(projects) < 1: break else: for project in projects: project_ids.add(project["id"]) offset += 40 return project_ids def get_studio_meta(self, id): """Returns the metadata for a given Scratch studio. Args: id: An integer Scratch studio ID. Returns: A dictionary with the studio's metadata. None if the studio doesn't exist. Raises: RuntimeError: An error occurred accessing the Scratch API. """ url = self.STUDIO_META_URL.format(id) r = requests.get(url) if r.status_code != 200 and r.status_code != 404: raise RuntimeError("GET {0} failed with status code {1}".format(url, r.status_code)) studio_meta = r.json() if "code" in studio_meta and studio_meta["code"] == "NotFound": return None return studio_meta def get_user_info(self, username): """Gets a Scratch user's publicly-available information. Args: username (str): the username to look up. Returns: A dictionary with the results of the API call. Raises: RuntimeError: An error occurred accessing the Scratch API, or the user doesn't exist. """ url = self.USER_URL.format(username) r = requests.get(url) if r.status_code != 200: raise RuntimeError("GET {0} failed with status code {1}".format(url, r.status_code)) return r.json() def make_dir(self, path): """Creates a directory given path. Args: path (str): A file path on the current system. Returns: True, if directory was successfully created or already existed. Raises: RuntimeError: Failed to create the directory. """ try: os.mkdir(path) except OSError: if FileExistsError: return True else: raise RuntimeError("Creation of directory '{0}' failed".format(path)) else: return True
py
1a3db7776e41aec651d04f8affa955f3c3ed13fe
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Copyright (C) 2020 The Project U-Ray Authors. # # Use of this source code is governed by a ISC-style # license that can be found in the LICENSE file or at # https://opensource.org/licenses/ISC # # SPDX-License-Identifier: ISC import csv import numpy as np from utils import util from utils.clock_utils import MAX_GLOBAL_CLOCKS from prjuray.db import Database def ps8_bufg_pin_map_by_tile(): tiles = {} with open('../ps8_bufg_pin_map.csv') as f: for row in csv.DictReader(f): clock_tiles = row['clock_tiles'].split(' ') assert len(clock_tiles) == 1, (row['pin'], clock_tiles) tile = clock_tiles[0] if tile not in tiles: tiles[tile] = [] tiles[tile].append(row['pin'].split('/')[1]) return tiles def get_ps8_pin_map(): with open('../ps8_pin_map.csv') as f: for row in csv.DictReader(f): yield row['pin'] def print_top(seed): np.random.seed(seed) options_by_tile = {} with open('../permutations.csv') as f: for row in csv.DictReader(f): tile = row['tile'] opt = {} for bufg_idx in range(MAX_GLOBAL_CLOCKS): input_idx = row['bufg{}_input'.format(bufg_idx)] if input_idx == "": continue opt[bufg_idx] = int(input_idx) if tile not in options_by_tile: options_by_tile[tile] = [] options_by_tile[tile].append(opt) ps8_pins = sorted(get_ps8_pin_map()) bus_widths = {} for pin in ps8_pins: parts = pin.split('/')[-1].split('[') if len(parts) == 1: bus_widths[parts[0]] = 1 elif len(parts) == 2: if parts[0] not in bus_widths: bus_widths[parts[0]] = 0 width = int(parts[1][:-1]) + 1 if width > bus_widths[parts[0]]: bus_widths[parts[0]] = width else: assert False, pin tiles = ps8_bufg_pin_map_by_tile() for tile in tiles: tiles[tile].sort() db = Database(util.get_db_root(), util.get_part()) grid = db.grid() bufgs_by_tile = {} for tile in tiles: bufgs_by_tile[tile] = [] gridinfo = grid.gridinfo_at_tilename(tile) for site, site_type in gridinfo.sites.items(): if site_type == 'BUFG_PS': bufgs_by_tile[tile].append(site) for tile in tiles: bufgs_by_tile[tile].sort() assert len(bufgs_by_tile[tile]) == MAX_GLOBAL_CLOCKS, tile opins = [] sinks = [] print(''' module top(); ''') spec_num = util.specn() - 1 for tile in tiles: opts = options_by_tile[tile] if spec_num < len(opts): # Use permutation from permutations.csv opt = opts[spec_num] else: # Use a random permutation. opt = {} bufgs = set(range(MAX_GLOBAL_CLOCKS)) for input_idx in range(len(tiles[tile])): bufg_idx = np.random.choice(sorted(bufgs)) bufgs.remove(bufg_idx) opt[bufg_idx] = input_idx for bufg_idx, input_idx in opt.items(): bufg = bufgs_by_tile[tile][bufg_idx] input_pin = tiles[tile][input_idx] idx = len(opins) print(""" wire bufg_{idx}; (* LOC="{loc}", KEEP, DONT_TOUCH *) BUFG_PS bufg_{idx} ( .I(bufg_{idx}) ); """.format(loc=bufg, idx=idx)) sinks.append('bufg_{idx}'.format(idx=idx)) opins.append(input_pin) busses = set() for pin in opins: busses.add(pin.split('[')[0]) for bus in busses: print('wire [{width}-1:0] {bus};'.format( bus=bus, width=bus_widths[bus])) print('PS8 ps8 (') connections = [] for bus in busses: connections.append(' .{bus}({bus})'.format(bus=bus)) print(',\n'.join(connections)) print('\n);') for pin, sink in zip(opins, sinks): print('assign {sink} = {pin};'.format(pin=pin, sink=sink)) print('endmodule')
py
1a3db7f66c9465ac14454387101841d65706deb3
"""Unittests for W0404 (reimport)""" from __future__ import absolute_import, print_function import sys import xml.etree.ElementTree from xml.etree import ElementTree from email import encoders import email.encoders import sys #pylint: disable=ungrouped-imports,wrong-import-order __revision__ = 0 def no_reimport(): """docstring""" import os #pylint: disable=import-outside-toplevel print(os) def reimport(): """This function contains a reimport.""" import sys #pylint: disable=import-outside-toplevel del sys del sys, ElementTree, xml.etree.ElementTree, encoders, email.encoders
py
1a3db80bfcf336e18a9c5b5c286d5c9b6d8540d7
# -*- coding: utf-8 -*- from pyramid_oereb.lib.sources import Base from pyramid_oereb.lib.records.view_service import LegendEntryRecord class LegendBaseSource(Base): """ Base class for exclusion of liability sources. Attributes: records (list of pyramid_oereb.lib.records.view_service.LegendEntryRecord): List of legend entry records. """ _record_class_ = LegendEntryRecord def read(self, params, **kwargs): """ Every legend entry source has to implement a read method. If you want adapt to your own source for legend entries, this is the point where to hook in. Args: params (pyramid_oereb.views.webservice.Parameter): The parameters of the extract request. (kwargs): Arbitrary keyword arguments. """ pass # pragma: no cover
py
1a3db859fd91056a204192d9d32a7006a2b8a82c
from blazingsql import DataType from Configuration import ExecutionMode from Configuration import Settings as Settings from DataBase import createSchema as cs from pynvml import nvmlInit from Runner import runTest from Utils import Execution, gpuMemory, init_context, skip_test queryType = "Full outer join" def main(dask_client, drill, dir_data_file, bc, nRals): start_mem = gpuMemory.capture_gpu_memory_usage() def executionTest(): tables = ["nation"] data_types = [ DataType.DASK_CUDF, DataType.CUDF, DataType.CSV, DataType.ORC, DataType.PARQUET, ] # TODO json # Create Tables ----------------------------------------------------- for fileSchemaType in data_types: if skip_test(dask_client, nRals, fileSchemaType, queryType): continue cs.create_tables(bc, dir_data_file, fileSchemaType, tables=tables) # Run Query ------------------------------------------------------ # Parameter to indicate if its necessary to order # the resulsets before compare them worder = 1 use_percentage = False acceptable_difference = 0 print("==============================") print(queryType) print("==============================") queryId = "TEST_01" query = """select n1.n_nationkey as n1key, n2.n_nationkey as n2key, n1.n_nationkey + n2.n_nationkey from nation as n1 full outer join nation as n2 on n1.n_nationkey = n2.n_nationkey + 6""" runTest.run_query( bc, drill, query, queryId, queryType, worder, "", acceptable_difference, use_percentage, fileSchemaType, ) queryId = "TEST_02" query = """select n1.n_nationkey as n1key, n2.n_nationkey as n2key, n1.n_nationkey + n2.n_nationkey from nation as n1 full outer join nation as n2 on n1.n_nationkey = n2.n_nationkey + 6 where n1.n_nationkey < 10""" runTest.run_query( bc, drill, query, queryId, queryType, worder, "", acceptable_difference, use_percentage, fileSchemaType, ) queryId = "TEST_03" query = """select n1.n_nationkey as n1key, n2.n_nationkey as n2key, n1.n_nationkey + n2.n_nationkey from nation as n1 full outer join nation as n2 on n1.n_nationkey = n2.n_nationkey + 6 where n1.n_nationkey < 10 and n1.n_nationkey > 5""" runTest.run_query( bc, drill, query, queryId, queryType, worder, "", acceptable_difference, use_percentage, fileSchemaType, ) queryId = "TEST_04" query = """select n1.n_nationkey as n1key, n2.n_nationkey as n2key, n1.n_nationkey + n2.n_nationkey from nation as n1 full outer join nation as n2 on n1.n_nationkey = n2.n_nationkey + 6 and n1.n_nationkey + 1 = n2.n_nationkey + 7 and n1.n_nationkey + 2 = n2.n_nationkey + 8""" runTest.run_query( bc, drill, query, queryId, queryType, worder, "", acceptable_difference, use_percentage, fileSchemaType, ) if Settings.execution_mode == ExecutionMode.GENERATOR: print("==============================") break executionTest() end_mem = gpuMemory.capture_gpu_memory_usage() gpuMemory.log_memory_usage(queryType, start_mem, end_mem) if __name__ == "__main__": Execution.getArgs() nvmlInit() drill = "drill" # None compareResults = True if "compare_results" in Settings.data["RunSettings"]: compareResults = Settings.data["RunSettings"]["compare_results"] if ((Settings.execution_mode == ExecutionMode.FULL and compareResults == "true") or Settings.execution_mode == ExecutionMode.GENERATOR): # Create Table Drill ------------------------------------------------ print("starting drill") from pydrill.client import PyDrill drill = PyDrill(host="localhost", port=8047) cs.init_drill_schema(drill, Settings.data["TestSettings"]["dataDirectory"]) # Create Context For BlazingSQL bc, dask_client = init_context() nRals = Settings.data["RunSettings"]["nRals"] main(dask_client, drill, Settings.data["TestSettings"]["dataDirectory"], bc, nRals) if Settings.execution_mode != ExecutionMode.GENERATOR: runTest.save_log() gpuMemory.print_log_gpu_memory()
py
1a3dba9da555fb454d7ed39db2abc74d82cf29fa
""" This file offers the methods to automatically retrieve the graph G54. The graph is automatically retrieved from the NetworkRepository repository. References --------------------- Please cite the following if you use the data: ```bib @inproceedings{nr, title = {The Network Data Repository with Interactive Graph Analytics and Visualization}, author={Ryan A. Rossi and Nesreen K. Ahmed}, booktitle = {AAAI}, url={http://networkrepository.com}, year={2015} } ``` """ from typing import Dict from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph from ...ensmallen import Graph # pylint: disable=import-error def G54( directed: bool = False, preprocess: bool = True, load_nodes: bool = True, verbose: int = 2, cache: bool = True, cache_path: str = "graphs/networkrepository", version: str = "latest", **additional_graph_kwargs: Dict ) -> Graph: """Return new instance of the G54 graph. The graph is automatically retrieved from the NetworkRepository repository. Parameters ------------------- directed: bool = False Wether to load the graph as directed or undirected. By default false. preprocess: bool = True Whether to preprocess the graph to be loaded in optimal time and memory. load_nodes: bool = True, Whether to load the nodes vocabulary or treat the nodes simply as a numeric range. verbose: int = 2, Wether to show loading bars during the retrieval and building of the graph. cache: bool = True Whether to use cache, i.e. download files only once and preprocess them only once. cache_path: str = "graphs" Where to store the downloaded graphs. version: str = "latest" The version of the graph to retrieve. additional_graph_kwargs: Dict Additional graph kwargs. Returns ----------------------- Instace of G54 graph. References --------------------- Please cite the following if you use the data: ```bib @inproceedings{nr, title = {The Network Data Repository with Interactive Graph Analytics and Visualization}, author={Ryan A. Rossi and Nesreen K. Ahmed}, booktitle = {AAAI}, url={http://networkrepository.com}, year={2015} } ``` """ return AutomaticallyRetrievedGraph( graph_name="G54", repository="networkrepository", version=version, directed=directed, preprocess=preprocess, load_nodes=load_nodes, verbose=verbose, cache=cache, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs )()
py
1a3dbafcc18c4f8578be9d6a126a58b5114aea2f
# app server config # MySQL Database MASTER_MYSQL_HOST = 'mysql' MASTER_MYSQL_USER = 'root' MASTER_MYSQL_PASSWORD = 'password' MASTER_MYSQL_DB_NAME = 'flask_vue_project_seed' # Redis Cache MASTER_REDIS_HOST = 'redis' MASTER_REDIS_PORT = 6379
py
1a3dbb00cecbc34b01168f36693b2a947c5188c2
# Local imports from gmprocess.metrics.imt.imt import IMT class PGA(IMT): """Class defining steps and invalid imts, for peak ground acceleration.""" # making invalid IMCs a class variable because # 1) it doesn't change with instances # 2) information can now be retrieved without # instantiating first _invalid_imcs = [] def __init__(self, imt, imc, period=None): """ Args: imt (string): Intensity measurement type. imc (string): Intensity measurement component. period (float): Period for fourier amplitude spectra and spectral amplitudes. Default is None. Not used by PGA. """ super().__init__(imt, imc, period=None) self._steps = { 'Transform2': 'null_transform', 'Transform3': 'null_transform', 'Combination1': 'null_combination', 'Reduction': 'max', }
pyw
1a3dbba4d4e20929a30717e4202086d36b5ffeaf
from Tkinter import * root = Tk() widget = Label(None, text="Hello, GUI World!") widget.pack() root.title("hello") widget.mainloop()
py
1a3dbd0249f57a2c83b8faa19689e9dae372dac4
import frappe from frappe import _ def execute(filters=None): columns = get_columns(filters) if filters.summary_based_on_month: month_summary, chart = get_summary_based_on_month(filters) if month_summary: data = month_summary if not filters.summary_based_on_month: chart = {} fee_data = get_fees(filters) if fee_data: data = fee_data return columns, data, chart def get_filter_condtions(filters): conditions = "" if filters.get("company"): conditions += " AND fe.company = %(company)s " if filters.get("academic_year"): conditions += " AND fe.academic_year = %(academic_year)s " conditions += " AND p_en.academic_year = %(academic_year)s " conditions += " AND sg.academic_year = %(academic_year)s " if filters.get("program"): conditions += " AND fe.program = %(program)s " conditions += " AND p_en.program = %(program)s " conditions += " AND sg.program = %(program)s " return conditions def get_columns(filters): if filters.summary_based_on_month: columns = [ {"fieldname": "year", "fieldtype": "Data", "label": _("Year of Due Date")}, {"fieldname": "month", "fieldtype": "Data", "label": _("Month")}, {"fieldname": "program", "fieldtype": "Data", "label": _("Program")}, {"fieldname": "total_amount_to_be_paid", "fieldtype": "Currency", "label": _("Total Fee to be Collected")}, {"fieldname": "total_paid_amount", "fieldtype": "Currency", "label": _("Total Fee Collected")}, {"fieldname": "outstanding_amount", "fieldtype": "Currency", "label": _("Outstanding Amount")} ] else: columns = [ {"fieldname": "academic_year", "fieldtype": "Data", "label": _("Academic Year")}, {"fieldname": "student_name", "fieldtype": "Data", "label": _("Student Name")}, {"fieldname": "program", "fieldtype": "Data", "label": _("Program")}, {"fieldname": "class_name", "fieldtype": "Data", "label": _("Class Name")}, {"fieldname": "student_category", "fieldtype": "Data", "label": _("Fee Category")}, {"fieldname": "total_amount_to_be_paid", "fieldtype": "Currency", "label": _("Fee to be Collected")}, {"fieldname": "1st_installment_paid_amount", "fieldtype": "Currency", "label": _("1st Installment Paid")}, {"fieldname": "2nd_installment_paid_amount", "fieldtype": "Currency", "label": _("2nd Installment Paid")}, {"fieldname": "3rd_installment_paid_amount", "fieldtype": "Currency", "label":_("3rd Installment Paid")}, {"fieldname": "4th_installment_paid_amount", "fieldtype": "Currency", "label": _("4th Installment Paid")}, {"fieldname": "5th_installment_paid_amount", "fieldtype": "Currency", "label": _("5th Installment Paid")}, {"fieldname": "total_paid_amount", "fieldtype": "Currency", "label": _("Total Fee Collected")}, {"fieldname": "outstanding_amount", "fieldtype": "Currency", "label": _("Outstanding Amount")} ] return columns def get_fees(filters): fees_record = [] student_records = [] if not filters.summary_based_on_program: student_details, student_name_list = get_fee_details(filters) first_installment_list = [] second_installment_list = [] third_installment_list = [] fourth_installment_list = [] fifth_installment_list = [] for st_name in student_name_list: total_amount_to_be_paid = total_unpaid_amount = 0 for student_row in student_details: if ( st_name["student"] == student_row["student"] and st_name["program"] == student_row["program"] and st_name["class_name"] == student_row["class_name"] ): total_amount_to_be_paid += student_row["grand_total"] total_unpaid_amount += student_row["outstanding_amount"] if st_name not in first_installment_list: st_name.update({ "paid_amount1": student_row["grand_total"] - student_row["outstanding_amount"] }) first_installment_list.append(st_name) elif st_name not in second_installment_list: st_name["paid_amount2"] = student_row["grand_total"] - student_row["outstanding_amount"] second_installment_list.append(st_name) elif st_name not in third_installment_list: st_name.update({ "paid_amount3": student_row["grand_total"] - student_row["outstanding_amount"] }) third_installment_list.append(st_name) elif st_name not in fourth_installment_list: st_name.update({ "paid_amount4": student_row["grand_total"] - student_row["outstanding_amount"] }) fourth_installment_list.append(st_name) else: st_name.update({ "paid_amount5": student_row["grand_total"] - student_row["outstanding_amount"] }) fifth_installment_list.append(st_name) st_name.update({ "total_amount_to_be_paid": total_amount_to_be_paid, "outstanding_amount": total_unpaid_amount }) student_records.append(st_name) for record in student_records: paid_amount = 0 for first in first_installment_list: if (record["student"] == first["student"] and record["program"] == first["program"] and record["class_name"] == first["class_name"] ): record.update({ "1st_installment_paid_amount": first["paid_amount1"], }) paid_amount += first["paid_amount1"] for second in second_installment_list: if (record["student"] == second["student"] and record["program"] == second["program"] and record["class_name"] == second["class_name"] ): record.update({ "2nd_installment_paid_amount": second["paid_amount2"] }) paid_amount += second["paid_amount2"] for third in third_installment_list: if (record["student"] == third["student"] and record["program"] == third["program"] and record["class_name"] == third["class_name"] ): record.update({ "3rd_installment_paid_amount": third["paid_amount3"] }) paid_amount += third["paid_amount3"] for fourth in fourth_installment_list: if (record["student"] == fourth["student"] and record["program"] == fourth["program"] and record["class_name"] == fourth["class_name"] ): record.update({ "4th_installment_paid_amount": fourth["paid_amount4"] }) paid_amount += fourth["paid_amount4"] for fifth in fifth_installment_list: if (record["student"] == fifth["student"] and record["program"] == fifth["program"] and record["class_name"] == fifth["class_name"] ): record.update({ "5th_installment_paid_amount": fifth["paid_amount5"] }) paid_amount += fifth["paid_amount5"] record["total_paid_amount"] = paid_amount fees_record.append(record) return fees_record def get_fee_details(filters): name_list = [] student_list = [] student_details = [] conditions = get_filter_condtions(filters) fee_details = frappe.db.sql(""" SELECT fe.due_date, fe.student, fe.student_name, fe.program, fe.grand_total, fe.outstanding_amount, p_en.student_category, sg.academic_year, sgs.parent FROM `tabFees` fe INNER JOIN `tabProgram Enrollment` p_en ON fe.student = p_en.student LEFT JOIN `tabStudent Group Student` sgs ON sgs.student = fe.student AND sgs.active = 1 LEFT JOIN `tabStudent Group` sg ON sgs.parent = sg.name AND sg.disabled = 0 WHERE fe.docstatus = 1 AND p_en.docstatus = 1 {conditions} ORDER BY fe.due_date asc, fe.student asc """.format(conditions=conditions), filters, as_dict=1) for student in fee_details: txt = student.parent program_class = "" if (student.academic_year != 2020 and "FORM" in txt and "TODDLERS" not in txt): year, stream = txt.split("-") program_class += stream elif (student.academic_year != 2020 and "FORM" not in txt and "TODDLERS" in txt): year, stream = txt.split("-") program_class += stream elif (student.academic_year != 2020 and "FORM" not in txt and "TODDLERS" not in txt): year, pro, stream = txt.split("-") program_class += pro +' - '+ stream else: program_class += txt student.update({ "class_name": program_class }) student_details.append(student) if student.student not in name_list: name_list.append(student.student) student_list.append(student) return student_details, student_list def get_summary_based_on_month(filters): if filters.summary_based_on_month: chart = {} summary_data = [] conditions = "" if filters.get("company"): conditions += " AND fe.company = %(company)s " if filters.get("academic_year"): conditions += " AND fe.academic_year = %(academic_year)s " conditions += " AND p_en.academic_year = %(academic_year)s " fee_details = frappe.db.sql(""" SELECT YEAR(fe.due_date) as year, MONTHNAME(fe.due_date) AS month, fe.program, SUM(fe.grand_total) AS grand_total, SUM(fe.outstanding_amount) AS outstanding_amount FROM `tabFees` fe INNER JOIN `tabProgram Enrollment` p_en ON fe.student = p_en.student AND fe.program = p_en.program WHERE fe.docstatus = 1 AND p_en.docstatus = 1 {conditions} GROUP BY MONTHNAME(fe.due_date), fe.program ORDER BY YEAR(fe.due_date), MONTHNAME(fe.due_date), fe.program """.format(conditions=conditions), filters, as_dict=1 ) for fee in fee_details: summary_data.append({ "year": fee.year, "month": fee.month, "program": fee.program, "total_paid_amount": fee.grand_total - fee.outstanding_amount, "outstanding_amount": fee.outstanding_amount, "total_amount_to_be_paid": fee.grand_total }) chart = get_chart_data(summary_data) return summary_data, chart def get_chart_data(summary_data): if not summary_data: return labels = [] fees_collected = [] outstanding_amount = [] fees_to_be_collected = [] for entry in summary_data: labels.append(entry.get('program')) fees_collected.append(entry.get('total_paid_amount')) outstanding_amount.append(entry.get('outstanding_amount')) fees_to_be_collected.append(entry.get('total_amount_to_be_paid')) return { 'data': { 'labels': labels, 'datasets': [ { 'name': _('Fee to be Collected'), 'values': fees_to_be_collected }, { 'name': _('Fees Collected'), 'values': fees_collected }, { 'name': _('Outstanding Amount'), 'values': outstanding_amount } ] }, 'type': 'bar' }
py
1a3dbd3ef5c3936dc7e8c96d370bb9ccc0d47e75
import torch import torch.nn as nn class MNIST_Network(nn.Module): def __init__(self): super(MNIST_Network, self).__init__() self.conv1 = nn.Conv2d(1, 32, kernel_size = 5, padding=2) self.relu1 = nn.ReLU() self.pool1 = nn.MaxPool2d(2, stride=2) self.conv2 = nn.Conv2d(32, 64, kernel_size = 5, padding=2) self.relu2 = nn.ReLU() self.pool2 = nn.MaxPool2d(2, stride=2) self.fc3 = nn.Linear(7*7*64, 1024) self.relu3 = nn.ReLU() self.fc4 = nn.Linear(1024, 10) self.softmax4 = nn.Softmax(dim=1) # 前向传播 def forward(self, input1): x = self.conv1(input1) x = self.relu1(x) x = self.pool1(x) x = self.conv2(x) x = self.relu2(x) x = self.pool2(x) x = x.view(x.size()[0], -1) x = self.fc3(x) x = self.relu3(x) x = self.fc4(x) x = self.softmax4(x) return x
py
1a3dbd68066a72384589ac24579e0540b5484a6e
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division import math import sys import random import numpy as np import numbers import types import collections import warnings import traceback from paddle.utils import try_import from . import functional as F if sys.version_info < (3, 3): Sequence = collections.Sequence Iterable = collections.Iterable else: Sequence = collections.abc.Sequence Iterable = collections.abc.Iterable __all__ = [] def _get_image_size(img): if F._is_pil_image(img): return img.size elif F._is_numpy_image(img): return img.shape[:2][::-1] elif F._is_tensor_image(img): return img.shape[1:][::-1] # chw else: raise TypeError("Unexpected type {}".format(type(img))) def _check_input(value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True): if isinstance(value, numbers.Number): if value < 0: raise ValueError( "If {} is a single number, it must be non negative.".format( name)) value = [center - value, center + value] if clip_first_on_zero: value[0] = max(value[0], 0) elif isinstance(value, (tuple, list)) and len(value) == 2: if not bound[0] <= value[0] <= value[1] <= bound[1]: raise ValueError("{} values should be between {}".format(name, bound)) else: raise TypeError( "{} should be a single number or a list/tuple with lenght 2.". format(name)) if value[0] == value[1] == center: value = None return value class Compose(object): """ Composes several transforms together use for composing list of transforms together for a dataset transform. Args: transforms (list|tuple): List/Tuple of transforms to compose. Returns: A compose object which is callable, __call__ for this Compose object will call each given :attr:`transforms` sequencely. Examples: .. code-block:: python from paddle.vision.datasets import Flowers from paddle.vision.transforms import Compose, ColorJitter, Resize transform = Compose([ColorJitter(), Resize(size=608)]) flowers = Flowers(mode='test', transform=transform) for i in range(10): sample = flowers[i] print(sample[0].size, sample[1]) """ def __init__(self, transforms): self.transforms = transforms def __call__(self, data): for f in self.transforms: try: data = f(data) except Exception as e: stack_info = traceback.format_exc() print("fail to perform transform [{}] with error: " "{} and stack:\n{}".format(f, e, str(stack_info))) raise e return data def __repr__(self): format_string = self.__class__.__name__ + '(' for t in self.transforms: format_string += '\n' format_string += ' {0}'.format(t) format_string += '\n)' return format_string class BaseTransform(object): """ Base class of all transforms used in computer vision. calling logic: if keys is None: _get_params -> _apply_image() else: _get_params -> _apply_*() for * in keys If you want to implement a self-defined transform method for image, rewrite _apply_* method in subclass. Args: keys (list[str]|tuple[str], optional): Input type. Input is a tuple contains different structures, key is used to specify the type of input. For example, if your input is image type, then the key can be None or ("image"). if your input is (image, image) type, then the keys should be ("image", "image"). if your input is (image, boxes), then the keys should be ("image", "boxes"). Current available strings & data type are describe below: - "image": input image, with shape of (H, W, C) - "coords": coordinates, with shape of (N, 2) - "boxes": bounding boxes, with shape of (N, 4), "xyxy" format, the 1st "xy" represents top left point of a box, the 2nd "xy" represents right bottom point. - "mask": map used for segmentation, with shape of (H, W, 1) You can also customize your data types only if you implement the corresponding _apply_*() methods, otherwise ``NotImplementedError`` will be raised. Examples: .. code-block:: python import numpy as np from PIL import Image import paddle.vision.transforms.functional as F from paddle.vision.transforms import BaseTransform def _get_image_size(img): if F._is_pil_image(img): return img.size elif F._is_numpy_image(img): return img.shape[:2][::-1] else: raise TypeError("Unexpected type {}".format(type(img))) class CustomRandomFlip(BaseTransform): def __init__(self, prob=0.5, keys=None): super(CustomRandomFlip, self).__init__(keys) self.prob = prob def _get_params(self, inputs): image = inputs[self.keys.index('image')] params = {} params['flip'] = np.random.random() < self.prob params['size'] = _get_image_size(image) return params def _apply_image(self, image): if self.params['flip']: return F.hflip(image) return image # if you only want to transform image, do not need to rewrite this function def _apply_coords(self, coords): if self.params['flip']: w = self.params['size'][0] coords[:, 0] = w - coords[:, 0] return coords # if you only want to transform image, do not need to rewrite this function def _apply_boxes(self, boxes): idxs = np.array([(0, 1), (2, 1), (0, 3), (2, 3)]).flatten() coords = np.asarray(boxes).reshape(-1, 4)[:, idxs].reshape(-1, 2) coords = self._apply_coords(coords).reshape((-1, 4, 2)) minxy = coords.min(axis=1) maxxy = coords.max(axis=1) trans_boxes = np.concatenate((minxy, maxxy), axis=1) return trans_boxes # if you only want to transform image, do not need to rewrite this function def _apply_mask(self, mask): if self.params['flip']: return F.hflip(mask) return mask # create fake inputs fake_img = Image.fromarray((np.random.rand(400, 500, 3) * 255.).astype('uint8')) fake_boxes = np.array([[2, 3, 200, 300], [50, 60, 80, 100]]) fake_mask = fake_img.convert('L') # only transform for image: flip_transform = CustomRandomFlip(1.0) converted_img = flip_transform(fake_img) # transform for image, boxes and mask flip_transform = CustomRandomFlip(1.0, keys=('image', 'boxes', 'mask')) (converted_img, converted_boxes, converted_mask) = flip_transform((fake_img, fake_boxes, fake_mask)) print('converted boxes', converted_boxes) """ def __init__(self, keys=None): if keys is None: keys = ("image", ) elif not isinstance(keys, Sequence): raise ValueError( "keys should be a sequence, but got keys={}".format(keys)) for k in keys: if self._get_apply(k) is None: raise NotImplementedError( "{} is unsupported data structure".format(k)) self.keys = keys # storage some params get from function get_params() self.params = None def _get_params(self, inputs): pass def __call__(self, inputs): """Apply transform on single input data""" if not isinstance(inputs, tuple): inputs = (inputs, ) self.params = self._get_params(inputs) outputs = [] for i in range(min(len(inputs), len(self.keys))): apply_func = self._get_apply(self.keys[i]) if apply_func is None: outputs.append(inputs[i]) else: outputs.append(apply_func(inputs[i])) if len(inputs) > len(self.keys): outputs.extend(inputs[len(self.keys):]) if len(outputs) == 1: outputs = outputs[0] else: outputs = tuple(outputs) return outputs def _get_apply(self, key): return getattr(self, "_apply_{}".format(key), None) def _apply_image(self, image): raise NotImplementedError def _apply_boxes(self, boxes): raise NotImplementedError def _apply_mask(self, mask): raise NotImplementedError class ToTensor(BaseTransform): """Convert a ``PIL.Image`` or ``numpy.ndarray`` to ``paddle.Tensor``. Converts a PIL.Image or numpy.ndarray (H x W x C) to a paddle.Tensor of shape (C x H x W). If input is a grayscale image (H x W), it will be converted to a image of shape (H x W x 1). And the shape of output tensor will be (1 x H x W). If you want to keep the shape of output tensor as (H x W x C), you can set data_format = ``HWC`` . Converts a PIL.Image or numpy.ndarray in the range [0, 255] to a paddle.Tensor in the range [0.0, 1.0] if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1) or if the numpy.ndarray has dtype = np.uint8. In the other cases, tensors are returned without scaling. Args: data_format (str, optional): Data format of output tensor, should be 'HWC' or 'CHW'. Default: 'CHW'. keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None. Shape: - img(PIL.Image|np.ndarray): The input image with shape (H x W x C). - output(np.ndarray): A tensor with shape (C x H x W) or (H x W x C) according option data_format. Returns: A callable object of ToTensor. Examples: .. code-block:: python import numpy as np from PIL import Image import paddle.vision.transforms as T import paddle.vision.transforms.functional as F fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8)) transform = T.ToTensor() tensor = transform(fake_img) """ def __init__(self, data_format='CHW', keys=None): super(ToTensor, self).__init__(keys) self.data_format = data_format def _apply_image(self, img): """ Args: img (PIL.Image|np.ndarray): Image to be converted to tensor. Returns: Tensor: Converted image. """ return F.to_tensor(img, self.data_format) class Resize(BaseTransform): """Resize the input Image to the given size. Args: size (int|list|tuple): Desired output size. If size is a sequence like (h, w), output size will be matched to this. If size is an int, smaller edge of the image will be matched to this number. i.e, if height > width, then image will be rescaled to (size * height / width, size) interpolation (int|str, optional): Interpolation method. Default: 'bilinear'. when use pil backend, support method are as following: - "nearest": Image.NEAREST, - "bilinear": Image.BILINEAR, - "bicubic": Image.BICUBIC, - "box": Image.BOX, - "lanczos": Image.LANCZOS, - "hamming": Image.HAMMING when use cv2 backend, support method are as following: - "nearest": cv2.INTER_NEAREST, - "bilinear": cv2.INTER_LINEAR, - "area": cv2.INTER_AREA, - "bicubic": cv2.INTER_CUBIC, - "lanczos": cv2.INTER_LANCZOS4 keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None. Shape: - img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C). - output(PIL.Image|np.ndarray|Paddle.Tensor): A resized image. Returns: A callable object of Resize. Examples: .. code-block:: python import numpy as np from PIL import Image from paddle.vision.transforms import Resize transform = Resize(size=224) fake_img = Image.fromarray((np.random.rand(100, 120, 3) * 255.).astype(np.uint8)) fake_img = transform(fake_img) print(fake_img.size) """ def __init__(self, size, interpolation='bilinear', keys=None): super(Resize, self).__init__(keys) assert isinstance(size, int) or (isinstance(size, Iterable) and len(size) == 2) self.size = size self.interpolation = interpolation def _apply_image(self, img): return F.resize(img, self.size, self.interpolation) class RandomResizedCrop(BaseTransform): """Crop the input data to random size and aspect ratio. A crop of random size (default: of 0.08 to 1.0) of the original size and a random aspect ratio (default: of 3/4 to 1.33) of the original aspect ratio is made. After applying crop transfrom, the input data will be resized to given size. Args: size (int|list|tuple): Target size of output image, with (height, width) shape. scale (list|tuple): Scale range of the cropped image before resizing, relatively to the origin image. Default: (0.08, 1.0) ratio (list|tuple): Range of aspect ratio of the origin aspect ratio cropped. Default: (0.75, 1.33) interpolation (int|str, optional): Interpolation method. Default: 'bilinear'. when use pil backend, support method are as following: - "nearest": Image.NEAREST, - "bilinear": Image.BILINEAR, - "bicubic": Image.BICUBIC, - "box": Image.BOX, - "lanczos": Image.LANCZOS, - "hamming": Image.HAMMING when use cv2 backend, support method are as following: - "nearest": cv2.INTER_NEAREST, - "bilinear": cv2.INTER_LINEAR, - "area": cv2.INTER_AREA, - "bicubic": cv2.INTER_CUBIC, - "lanczos": cv2.INTER_LANCZOS4 keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None. Shape: - img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C). - output(PIL.Image|np.ndarray|Paddle.Tensor): A cropped image. Returns: A callable object of RandomResizedCrop. Examples: .. code-block:: python import numpy as np from PIL import Image from paddle.vision.transforms import RandomResizedCrop transform = RandomResizedCrop(224) fake_img = Image.fromarray((np.random.rand(300, 320, 3) * 255.).astype(np.uint8)) fake_img = transform(fake_img) print(fake_img.size) """ def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4, 4. / 3), interpolation='bilinear', keys=None): super(RandomResizedCrop, self).__init__(keys) if isinstance(size, int): self.size = (size, size) else: self.size = size assert (scale[0] <= scale[1]), "scale should be of kind (min, max)" assert (ratio[0] <= ratio[1]), "ratio should be of kind (min, max)" self.scale = scale self.ratio = ratio self.interpolation = interpolation def _get_param(self, image, attempts=10): width, height = _get_image_size(image) area = height * width for _ in range(attempts): target_area = np.random.uniform(*self.scale) * area log_ratio = tuple(math.log(x) for x in self.ratio) aspect_ratio = math.exp(np.random.uniform(*log_ratio)) w = int(round(math.sqrt(target_area * aspect_ratio))) h = int(round(math.sqrt(target_area / aspect_ratio))) if 0 < w <= width and 0 < h <= height: i = random.randint(0, height - h) j = random.randint(0, width - w) return i, j, h, w # Fallback to central crop in_ratio = float(width) / float(height) if in_ratio < min(self.ratio): w = width h = int(round(w / min(self.ratio))) elif in_ratio > max(self.ratio): h = height w = int(round(h * max(self.ratio))) else: # return whole image w = width h = height i = (height - h) // 2 j = (width - w) // 2 return i, j, h, w def _apply_image(self, img): i, j, h, w = self._get_param(img) cropped_img = F.crop(img, i, j, h, w) return F.resize(cropped_img, self.size, self.interpolation) class CenterCrop(BaseTransform): """Crops the given the input data at the center. Args: size (int|list|tuple): Target size of output image, with (height, width) shape. keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None. Shape: - img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C). - output(PIL.Image|np.ndarray|Paddle.Tensor): A cropped image. Returns: A callable object of CenterCrop. Examples: .. code-block:: python import numpy as np from PIL import Image from paddle.vision.transforms import CenterCrop transform = CenterCrop(224) fake_img = Image.fromarray((np.random.rand(300, 320, 3) * 255.).astype(np.uint8)) fake_img = transform(fake_img) print(fake_img.size) """ def __init__(self, size, keys=None): super(CenterCrop, self).__init__(keys) if isinstance(size, numbers.Number): self.size = (int(size), int(size)) else: self.size = size def _apply_image(self, img): return F.center_crop(img, self.size) class RandomHorizontalFlip(BaseTransform): """Horizontally flip the input data randomly with a given probability. Args: prob (float, optional): Probability of the input data being flipped. Should be in [0, 1]. Default: 0.5 keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None. Shape: - img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C). - output(PIL.Image|np.ndarray|Paddle.Tensor): A horiziotal flipped image. Returns: A callable object of RandomHorizontalFlip. Examples: .. code-block:: python import numpy as np from PIL import Image from paddle.vision.transforms import RandomHorizontalFlip transform = RandomHorizontalFlip(0.5) fake_img = Image.fromarray((np.random.rand(300, 320, 3) * 255.).astype(np.uint8)) fake_img = transform(fake_img) print(fake_img.size) """ def __init__(self, prob=0.5, keys=None): super(RandomHorizontalFlip, self).__init__(keys) assert 0 <= prob <= 1, "probability must be between 0 and 1" self.prob = prob def _apply_image(self, img): if random.random() < self.prob: return F.hflip(img) return img class RandomVerticalFlip(BaseTransform): """Vertically flip the input data randomly with a given probability. Args: prob (float, optional): Probability of the input data being flipped. Default: 0.5 keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None. Shape: - img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C). - output(PIL.Image|np.ndarray|Paddle.Tensor): A vertical flipped image. Returns: A callable object of RandomVerticalFlip. Examples: .. code-block:: python import numpy as np from PIL import Image from paddle.vision.transforms import RandomVerticalFlip transform = RandomVerticalFlip() fake_img = Image.fromarray((np.random.rand(300, 320, 3) * 255.).astype(np.uint8)) fake_img = transform(fake_img) print(fake_img.size) """ def __init__(self, prob=0.5, keys=None): super(RandomVerticalFlip, self).__init__(keys) assert 0 <= prob <= 1, "probability must be between 0 and 1" self.prob = prob def _apply_image(self, img): if random.random() < self.prob: return F.vflip(img) return img class Normalize(BaseTransform): """Normalize the input data with mean and standard deviation. Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this transform will normalize each channel of the input data. ``output[channel] = (input[channel] - mean[channel]) / std[channel]`` Args: mean (int|float|list|tuple): Sequence of means for each channel. std (int|float|list|tuple): Sequence of standard deviations for each channel. data_format (str, optional): Data format of img, should be 'HWC' or 'CHW'. Default: 'CHW'. to_rgb (bool, optional): Whether to convert to rgb. Default: False. keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None. Shape: - img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C). - output(PIL.Image|np.ndarray|Paddle.Tensor): A normalized array or tensor. Returns: A callable object of Normalize. Examples: .. code-block:: python import numpy as np from PIL import Image from paddle.vision.transforms import Normalize normalize = Normalize(mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], data_format='HWC') fake_img = Image.fromarray((np.random.rand(300, 320, 3) * 255.).astype(np.uint8)) fake_img = normalize(fake_img) print(fake_img.shape) print(fake_img.max, fake_img.max) """ def __init__(self, mean=0.0, std=1.0, data_format='CHW', to_rgb=False, keys=None): super(Normalize, self).__init__(keys) if isinstance(mean, numbers.Number): mean = [mean, mean, mean] if isinstance(std, numbers.Number): std = [std, std, std] self.mean = mean self.std = std self.data_format = data_format self.to_rgb = to_rgb def _apply_image(self, img): return F.normalize(img, self.mean, self.std, self.data_format, self.to_rgb) class Transpose(BaseTransform): """Transpose input data to a target format. For example, most transforms use HWC mode image, while the Neural Network might use CHW mode input tensor. output image will be an instance of numpy.ndarray. Args: order (list|tuple, optional): Target order of input data. Default: (2, 0, 1). keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None. Shape: - img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C). - output(np.ndarray|Paddle.Tensor): A transposed array or tensor. If input is a PIL.Image, output will be converted to np.ndarray automatically. Returns: A callable object of Transpose. Examples: .. code-block:: python import numpy as np from PIL import Image from paddle.vision.transforms import Transpose transform = Transpose() fake_img = Image.fromarray((np.random.rand(300, 320, 3) * 255.).astype(np.uint8)) fake_img = transform(fake_img) print(fake_img.shape) """ def __init__(self, order=(2, 0, 1), keys=None): super(Transpose, self).__init__(keys) self.order = order def _apply_image(self, img): if F._is_tensor_image(img): return img.transpose(self.order) if F._is_pil_image(img): img = np.asarray(img) if len(img.shape) == 2: img = img[..., np.newaxis] return img.transpose(self.order) class BrightnessTransform(BaseTransform): """Adjust brightness of the image. Args: value (float): How much to adjust the brightness. Can be any non negative number. 0 gives the original image keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None. Shape: - img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C). - output(PIL.Image|np.ndarray|Paddle.Tensor): An image with a transform in brghtness. Returns: A callable object of BrightnessTransform. Examples: .. code-block:: python import numpy as np from PIL import Image from paddle.vision.transforms import BrightnessTransform transform = BrightnessTransform(0.4) fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8)) fake_img = transform(fake_img) """ def __init__(self, value, keys=None): super(BrightnessTransform, self).__init__(keys) self.value = _check_input(value, 'brightness') def _apply_image(self, img): if self.value is None: return img brightness_factor = random.uniform(self.value[0], self.value[1]) return F.adjust_brightness(img, brightness_factor) class ContrastTransform(BaseTransform): """Adjust contrast of the image. Args: value (float): How much to adjust the contrast. Can be any non negative number. 0 gives the original image keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None. Shape: - img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C). - output(PIL.Image|np.ndarray|Paddle.Tensor): An image with a transform in contrast. Returns: A callable object of ContrastTransform. Examples: .. code-block:: python import numpy as np from PIL import Image from paddle.vision.transforms import ContrastTransform transform = ContrastTransform(0.4) fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8)) fake_img = transform(fake_img) """ def __init__(self, value, keys=None): super(ContrastTransform, self).__init__(keys) if value < 0: raise ValueError("contrast value should be non-negative") self.value = _check_input(value, 'contrast') def _apply_image(self, img): if self.value is None: return img contrast_factor = random.uniform(self.value[0], self.value[1]) return F.adjust_contrast(img, contrast_factor) class SaturationTransform(BaseTransform): """Adjust saturation of the image. Args: value (float): How much to adjust the saturation. Can be any non negative number. 0 gives the original image keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None. Shape: - img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C). - output(PIL.Image|np.ndarray|Paddle.Tensor): An image with a transform in saturation. Returns: A callable object of SaturationTransform. Examples: .. code-block:: python import numpy as np from PIL import Image from paddle.vision.transforms import SaturationTransform transform = SaturationTransform(0.4) fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8)) fake_img = transform(fake_img) """ def __init__(self, value, keys=None): super(SaturationTransform, self).__init__(keys) self.value = _check_input(value, 'saturation') def _apply_image(self, img): if self.value is None: return img saturation_factor = random.uniform(self.value[0], self.value[1]) return F.adjust_saturation(img, saturation_factor) class HueTransform(BaseTransform): """Adjust hue of the image. Args: value (float): How much to adjust the hue. Can be any number between 0 and 0.5, 0 gives the original image keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None. Shape: - img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C). - output(PIL.Image|np.ndarray|Paddle.Tensor): An image with a transform in hue. Returns: A callable object of HueTransform. Examples: .. code-block:: python import numpy as np from PIL import Image from paddle.vision.transforms import HueTransform transform = HueTransform(0.4) fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8)) fake_img = transform(fake_img) """ def __init__(self, value, keys=None): super(HueTransform, self).__init__(keys) self.value = _check_input( value, 'hue', center=0, bound=(-0.5, 0.5), clip_first_on_zero=False) def _apply_image(self, img): if self.value is None: return img hue_factor = random.uniform(self.value[0], self.value[1]) return F.adjust_hue(img, hue_factor) class ColorJitter(BaseTransform): """Randomly change the brightness, contrast, saturation and hue of an image. Args: brightness (float): How much to jitter brightness. Chosen uniformly from [max(0, 1 - brightness), 1 + brightness]. Should be non negative numbers. contrast (float): How much to jitter contrast. Chosen uniformly from [max(0, 1 - contrast), 1 + contrast]. Should be non negative numbers. saturation (float): How much to jitter saturation. Chosen uniformly from [max(0, 1 - saturation), 1 + saturation]. Should be non negative numbers. hue (float): How much to jitter hue. Chosen uniformly from [-hue, hue]. Should have 0<= hue <= 0.5. keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None. Shape: - img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C). - output(PIL.Image|np.ndarray|Paddle.Tensor): A color jittered image. Returns: A callable object of ColorJitter. Examples: .. code-block:: python import numpy as np from PIL import Image from paddle.vision.transforms import ColorJitter transform = ColorJitter(0.4, 0.4, 0.4, 0.4) fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8)) fake_img = transform(fake_img) """ def __init__(self, brightness=0, contrast=0, saturation=0, hue=0, keys=None): super(ColorJitter, self).__init__(keys) self.brightness = brightness self.contrast = contrast self.saturation = saturation self.hue = hue def _get_param(self, brightness, contrast, saturation, hue): """Get a randomized transform to be applied on image. Arguments are same as that of __init__. Returns: Transform which randomly adjusts brightness, contrast and saturation in a random order. """ transforms = [] if brightness is not None: transforms.append(BrightnessTransform(brightness, self.keys)) if contrast is not None: transforms.append(ContrastTransform(contrast, self.keys)) if saturation is not None: transforms.append(SaturationTransform(saturation, self.keys)) if hue is not None: transforms.append(HueTransform(hue, self.keys)) random.shuffle(transforms) transform = Compose(transforms) return transform def _apply_image(self, img): """ Args: img (PIL Image): Input image. Returns: PIL Image: Color jittered image. """ transform = self._get_param(self.brightness, self.contrast, self.saturation, self.hue) return transform(img) class RandomCrop(BaseTransform): """Crops the given CV Image at a random location. Args: size (sequence|int): Desired output size of the crop. If size is an int instead of sequence like (h, w), a square crop (size, size) is made. padding (int|sequence|optional): Optional padding on each border of the image. If a sequence of length 4 is provided, it is used to pad left, top, right, bottom borders respectively. Default: 0. pad_if_needed (boolean|optional): It will pad the image if smaller than the desired size to avoid raising an exception. Default: False. keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None. Shape: - img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C). - output(PIL.Image|np.ndarray|Paddle.Tensor): A random cropped image. Returns: A callable object of RandomCrop. Examples: .. code-block:: python import numpy as np from PIL import Image from paddle.vision.transforms import RandomCrop transform = RandomCrop(224) fake_img = Image.fromarray((np.random.rand(324, 300, 3) * 255.).astype(np.uint8)) fake_img = transform(fake_img) print(fake_img.size) """ def __init__(self, size, padding=None, pad_if_needed=False, fill=0, padding_mode='constant', keys=None): super(RandomCrop, self).__init__(keys) if isinstance(size, numbers.Number): self.size = (int(size), int(size)) else: self.size = size self.padding = padding self.pad_if_needed = pad_if_needed self.fill = fill self.padding_mode = padding_mode def _get_param(self, img, output_size): """Get parameters for ``crop`` for a random crop. Args: img (PIL Image): Image to be cropped. output_size (tuple): Expected output size of the crop. Returns: tuple: params (i, j, h, w) to be passed to ``crop`` for random crop. """ w, h = _get_image_size(img) th, tw = output_size if w == tw and h == th: return 0, 0, h, w i = random.randint(0, h - th) j = random.randint(0, w - tw) return i, j, th, tw def _apply_image(self, img): """ Args: img (PIL Image): Image to be cropped. Returns: PIL Image: Cropped image. """ if self.padding is not None: img = F.pad(img, self.padding, self.fill, self.padding_mode) w, h = _get_image_size(img) # pad the width if needed if self.pad_if_needed and w < self.size[1]: img = F.pad(img, (self.size[1] - w, 0), self.fill, self.padding_mode) # pad the height if needed if self.pad_if_needed and h < self.size[0]: img = F.pad(img, (0, self.size[0] - h), self.fill, self.padding_mode) i, j, h, w = self._get_param(img, self.size) return F.crop(img, i, j, h, w) class Pad(BaseTransform): """Pads the given CV Image on all sides with the given "pad" value. Args: padding (int|list|tuple): Padding on each border. If a single int is provided this is used to pad all borders. If list/tuple of length 2 is provided this is the padding on left/right and top/bottom respectively. If a list/tuple of length 4 is provided this is the padding for the left, top, right and bottom borders respectively. fill (int|list|tuple): Pixel fill value for constant fill. Default is 0. If a list/tuple of length 3, it is used to fill R, G, B channels respectively. This value is only used when the padding_mode is constant padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant. ``constant`` means pads with a constant value, this value is specified with fill. ``edge`` means pads with the last value at the edge of the image. ``reflect`` means pads with reflection of image (without repeating the last value on the edge) padding ``[1, 2, 3, 4]`` with 2 elements on both sides in reflect mode will result in ``[3, 2, 1, 2, 3, 4, 3, 2]``. ``symmetric`` menas pads with reflection of image (repeating the last value on the edge) padding ``[1, 2, 3, 4]`` with 2 elements on both sides in symmetric mode will result in ``[2, 1, 1, 2, 3, 4, 4, 3]``. keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None. Shape: - img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C). - output(PIL.Image|np.ndarray|Paddle.Tensor): A paded image. Returns: A callable object of Pad. Examples: .. code-block:: python import numpy as np from PIL import Image from paddle.vision.transforms import Pad transform = Pad(2) fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8)) fake_img = transform(fake_img) print(fake_img.size) """ def __init__(self, padding, fill=0, padding_mode='constant', keys=None): assert isinstance(padding, (numbers.Number, list, tuple)) assert isinstance(fill, (numbers.Number, str, list, tuple)) assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'] if isinstance(padding, list): padding = tuple(padding) if isinstance(fill, list): fill = tuple(fill) if isinstance(padding, Sequence) and len(padding) not in [2, 4]: raise ValueError( "Padding must be an int or a 2, or 4 element tuple, not a " + "{} element tuple".format(len(padding))) super(Pad, self).__init__(keys) self.padding = padding self.fill = fill self.padding_mode = padding_mode def _apply_image(self, img): """ Args: img (PIL Image): Image to be padded. Returns: PIL Image: Padded image. """ return F.pad(img, self.padding, self.fill, self.padding_mode) class RandomRotation(BaseTransform): """Rotates the image by angle. Args: degrees (sequence or float or int): Range of degrees to select from. If degrees is a number instead of sequence like (min, max), the range of degrees will be (-degrees, +degrees) clockwise order. interpolation (str, optional): Interpolation method. If omitted, or if the image has only one channel, it is set to PIL.Image.NEAREST or cv2.INTER_NEAREST according the backend. when use pil backend, support method are as following: - "nearest": Image.NEAREST, - "bilinear": Image.BILINEAR, - "bicubic": Image.BICUBIC when use cv2 backend, support method are as following: - "nearest": cv2.INTER_NEAREST, - "bilinear": cv2.INTER_LINEAR, - "bicubic": cv2.INTER_CUBIC expand (bool|optional): Optional expansion flag. Default: False. If true, expands the output to make it large enough to hold the entire rotated image. If false or omitted, make the output image the same size as the input image. Note that the expand flag assumes rotation around the center and no translation. center (2-tuple|optional): Optional center of rotation. Origin is the upper left corner. Default is the center of the image. keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None. Shape: - img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C). - output(PIL.Image|np.ndarray|Paddle.Tensor): A rotated image. Returns: A callable object of RandomRotation. Examples: .. code-block:: python import numpy as np from PIL import Image from paddle.vision.transforms import RandomRotation transform = RandomRotation(90) fake_img = Image.fromarray((np.random.rand(200, 150, 3) * 255.).astype(np.uint8)) fake_img = transform(fake_img) print(fake_img.size) """ def __init__(self, degrees, interpolation='nearest', expand=False, center=None, fill=0, keys=None): if isinstance(degrees, numbers.Number): if degrees < 0: raise ValueError( "If degrees is a single number, it must be positive.") self.degrees = (-degrees, degrees) else: if len(degrees) != 2: raise ValueError( "If degrees is a sequence, it must be of len 2.") self.degrees = degrees super(RandomRotation, self).__init__(keys) self.interpolation = interpolation self.expand = expand self.center = center self.fill = fill def _get_param(self, degrees): angle = random.uniform(degrees[0], degrees[1]) return angle def _apply_image(self, img): """ Args: img (PIL.Image|np.array): Image to be rotated. Returns: PIL.Image or np.array: Rotated image. """ angle = self._get_param(self.degrees) return F.rotate(img, angle, self.interpolation, self.expand, self.center, self.fill) class Grayscale(BaseTransform): """Converts image to grayscale. Args: num_output_channels (int): (1 or 3) number of channels desired for output image keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None. Shape: - img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C). - output(PIL.Image|np.ndarray|Paddle.Tensor): Grayscale version of the input image. - If output_channels == 1 : returned image is single channel - If output_channels == 3 : returned image is 3 channel with r == g == b Returns: A callable object of Grayscale. Examples: .. code-block:: python import numpy as np from PIL import Image from paddle.vision.transforms import Grayscale transform = Grayscale() fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8)) fake_img = transform(fake_img) print(np.array(fake_img).shape) """ def __init__(self, num_output_channels=1, keys=None): super(Grayscale, self).__init__(keys) self.num_output_channels = num_output_channels def _apply_image(self, img): """ Args: img (PIL Image): Image to be converted to grayscale. Returns: PIL Image: Randomly grayscaled image. """ return F.to_grayscale(img, self.num_output_channels)
py
1a3dbdc61dd30b415a0a945f3618f5b721cb8f46
import numpy as np import pandas as pd try: import cudf.dataframe as gdf except ImportError as e: print("Failed to import cuDF: " + str(e)) print("Skipping this test") from sklearn import datasets import sys import unittest import xgboost as xgb from regression_test_utilities import run_suite, parameter_combinations, \ assert_results_non_increasing, Dataset def get_gdf(): rng = np.random.RandomState(199) n = 50000 m = 20 sparsity = 0.25 X, y = datasets.make_regression(n, m, random_state=rng) Xy = (np.ascontiguousarray (np.transpose(np.concatenate((X, np.expand_dims(y, axis=1)), axis=1)))) df = gdf.DataFrame(list(zip(['col%d' % i for i in range(m+1)], Xy))) all_columns = list(df.columns) cols_X = all_columns[0:len(all_columns)-1] cols_y = [all_columns[len(all_columns)-1]] return df[cols_X], df[cols_y] class TestGPU(unittest.TestCase): gdf_datasets = [Dataset("GDF", get_gdf, "reg:linear", "rmse")] def test_gdf(self): variable_param = {'n_gpus': [1], 'max_depth': [10], 'max_leaves': [255], 'max_bin': [255], 'grow_policy': ['lossguide']} for param in parameter_combinations(variable_param): param['tree_method'] = 'gpu_hist' gpu_results = run_suite(param, num_rounds=20, select_datasets=self.gdf_datasets) assert_results_non_increasing(gpu_results, 1e-2)
py
1a3dbdd98612670818664dd11d47311b8fd78f66
from django.shortcuts import render from django.http import HttpResponse,JsonResponse,StreamingHttpResponse from django.views.decorators.csrf import csrf_exempt import csv from io import BytesIO as IO import xlsxwriter import pandas as pd def base(request): return render(request,'login.html',{}) def home(request): return render(request,'home.html',{}) @csrf_exempt def skillset(request): return render(request,'skillfinder.html',{}) def homepage(request): return render(request,'base.html',{}) def skills(request): skillz=request.GET.get('q') print(skillz) return render(request,'skills.html',{'skillz':skillz})
py
1a3dbf3e5dad8e21a11731c58b93a2de2b63d9f7
from unittest import TestCase from fastapi import FastAPI from piccolo.table import Table from piccolo.columns import Varchar, Integer from piccolo.columns.readable import Readable from starlette.testclient import TestClient from piccolo_api.crud.endpoints import PiccoloCRUD from piccolo_api.fastapi.endpoints import FastAPIWrapper class Movie(Table): name = Varchar(length=100) rating = Integer() @classmethod def get_readable(cls) -> Readable: return Readable(template="%s", columns=[cls.name]) app = FastAPI() FastAPIWrapper( root_url="/movies/", fastapi_app=app, piccolo_crud=PiccoloCRUD( table=Movie, read_only=False, allow_bulk_delete=True ), ) class TestOpenAPI(TestCase): def setUp(self): Movie.create_table(if_not_exists=True).run_sync() def tearDown(self): Movie.alter().drop_table().run_sync() def test_200_response(self): client = TestClient(app) response = client.get("/openapi.json") self.assertEqual(response.status_code, 200) class TestResponses(TestCase): def setUp(self): Movie.create_table(if_not_exists=True).run_sync() def tearDown(self): Movie.alter().drop_table().run_sync() def test_get_responses(self): Movie(name="Star Wars", rating=93).save().run_sync() client = TestClient(app) response = client.get("/movies/") self.assertEqual(response.status_code, 200) self.assertEqual( response.json(), {"rows": [{"id": 1, "name": "Star Wars", "rating": 93}]}, ) response = client.get("/movies/1/") self.assertEqual(response.status_code, 200) self.assertEqual( response.json(), {"id": 1, "name": "Star Wars", "rating": 93}, ) response = client.get("/movies/count/") self.assertEqual(response.status_code, 200) self.assertEqual( response.json(), {"count": 1, "page_size": 15}, ) response = client.get("/movies/schema/") self.assertEqual(response.status_code, 200) self.assertEqual( response.json(), { "title": "MovieIn", "type": "object", "properties": { "name": { "title": "Name", "extra": {"help_text": None, "choices": None}, "maxLength": 100, "nullable": False, "type": "string", }, "rating": { "title": "Rating", "extra": {"help_text": None, "choices": None}, "nullable": False, "type": "integer", }, }, "help_text": None, }, ) response = client.get("/movies/ids/") self.assertEqual(response.status_code, 200) self.assertEqual(response.json(), {"1": "Star Wars"}) response = client.get("/movies/new/") self.assertEqual(response.status_code, 200) self.assertEqual( response.json(), {"id": None, "name": "", "rating": 0}, ) response = client.get("/movies/references/") self.assertEqual(response.status_code, 200) self.assertEqual(response.json(), {"references": []})
py
1a3dbfb4615f21d4bccce24fa0ce578927c89332
from ..utils import Object class TestUseUpdate(Object): """ Does nothing and ensures that the Update object is used; for testing only. This is an offline method. Can be called before authorization Attributes: ID (:obj:`str`): ``TestUseUpdate`` No parameters required. Returns: Update Raises: :class:`telegram.Error` """ ID = "testUseUpdate" def __init__(self, extra=None, **kwargs): self.extra = extra pass @staticmethod def read(q: dict, *args) -> "TestUseUpdate": return TestUseUpdate()
py
1a3dbfe564e530da0f67156c81e829be0e554307
# # Parse tree nodes # from __future__ import absolute_import import cython cython.declare(sys=object, os=object, copy=object, Builtin=object, error=object, warning=object, Naming=object, PyrexTypes=object, py_object_type=object, ModuleScope=object, LocalScope=object, ClosureScope=object, StructOrUnionScope=object, PyClassScope=object, CppClassScope=object, UtilityCode=object, EncodedString=object, absolute_path_length=cython.Py_ssize_t, error_type=object, _py_int_types=object) import sys, os, copy from itertools import chain from . import Builtin from .Errors import error, warning, InternalError, CompileError from . import Naming from . import PyrexTypes from . import TypeSlots from .PyrexTypes import py_object_type, error_type from .Symtab import (ModuleScope, LocalScope, ClosureScope, StructOrUnionScope, PyClassScope, CppClassScope, TemplateScope) from .Code import UtilityCode from .StringEncoding import EncodedString from . import Future from . import Options from . import DebugFlags from ..Utils import add_metaclass absolute_path_length = 0 if sys.version_info[0] >= 3: _py_int_types = int else: _py_int_types = (int, long) def relative_position(pos): """ We embed the relative filename in the generated C file, since we don't want to have to regenerate and compile all the source code whenever the Python install directory moves (which could happen, e.g,. when distributing binaries.) INPUT: a position tuple -- (absolute filename, line number column position) OUTPUT: relative filename line number AUTHOR: William Stein """ global absolute_path_length if absolute_path_length == 0: absolute_path_length = len(os.path.abspath(os.getcwd())) return (pos[0].get_filenametable_entry()[absolute_path_length+1:], pos[1]) def embed_position(pos, docstring): if not Options.embed_pos_in_docstring: return docstring pos_line = u'File: %s (starting at line %s)' % relative_position(pos) if docstring is None: # unicode string return EncodedString(pos_line) # make sure we can encode the filename in the docstring encoding # otherwise make the docstring a unicode string encoding = docstring.encoding if encoding is not None: try: pos_line.encode(encoding) except UnicodeEncodeError: encoding = None if not docstring: # reuse the string encoding of the original docstring doc = EncodedString(pos_line) else: doc = EncodedString(pos_line + u'\n' + docstring) doc.encoding = encoding return doc def _analyse_signature_annotation(annotation, env): base_type = None explicit_pytype = explicit_ctype = False if annotation.is_dict_literal: for name, value in annotation.key_value_pairs: if not name.is_string_literal: continue if name.value in ('type', b'type'): explicit_pytype = True if not explicit_ctype: annotation = value elif name.value in ('ctype', b'ctype'): explicit_ctype = True annotation = value if explicit_pytype and explicit_ctype: warning(annotation.pos, "Duplicate type declarations found in signature annotation") arg_type = annotation.analyse_as_type(env) if arg_type is not None: if explicit_pytype and not explicit_ctype and not arg_type.is_pyobject: warning(annotation.pos, "Python type declaration in signature annotation does not refer to a Python type") base_type = CAnalysedBaseTypeNode( annotation.pos, type=arg_type, is_arg=True) else: warning(annotation.pos, "Unknown type declaration found in signature annotation") return base_type, arg_type def write_func_call(func, codewriter_class): def f(*args, **kwds): if len(args) > 1 and isinstance(args[1], codewriter_class): # here we annotate the code with this function call # but only if new code is generated node, code = args[:2] marker = ' /* %s -> %s.%s %s */' % ( ' ' * code.call_level, node.__class__.__name__, func.__name__, node.pos[1:]) pristine = code.buffer.stream.tell() code.putln(marker) start = code.buffer.stream.tell() code.call_level += 4 res = func(*args, **kwds) code.call_level -= 4 if start == code.buffer.stream.tell(): # no code written => undo writing marker code.buffer.stream.truncate(pristine) else: marker = marker.replace('->', '<-', 1) code.putln(marker) return res else: return func(*args, **kwds) return f class VerboseCodeWriter(type): # Set this as a metaclass to trace function calls in code. # This slows down code generation and makes much larger files. def __new__(cls, name, bases, attrs): from types import FunctionType from .Code import CCodeWriter attrs = dict(attrs) for mname, m in attrs.items(): if isinstance(m, FunctionType): attrs[mname] = write_func_call(m, CCodeWriter) return super(VerboseCodeWriter, cls).__new__(cls, name, bases, attrs) class CheckAnalysers(type): """Metaclass to check that type analysis functions return a node. """ methods = set(['analyse_types', 'analyse_expressions', 'analyse_target_types']) def __new__(cls, name, bases, attrs): from types import FunctionType def check(name, func): def call(*args, **kwargs): retval = func(*args, **kwargs) if retval is None: print('%s %s %s' % (name, args, kwargs)) return retval return call attrs = dict(attrs) for mname, m in attrs.items(): if isinstance(m, FunctionType) and mname in cls.methods: attrs[mname] = check(mname, m) return super(CheckAnalysers, cls).__new__(cls, name, bases, attrs) def _with_metaclass(cls): if DebugFlags.debug_trace_code_generation: return add_metaclass(VerboseCodeWriter)(cls) #return add_metaclass(CheckAnalysers)(cls) return cls @_with_metaclass class Node(object): # pos (string, int, int) Source file position # is_name boolean Is a NameNode # is_literal boolean Is a ConstNode is_name = 0 is_none = 0 is_nonecheck = 0 is_literal = 0 is_terminator = 0 temps = None # All descendants should set child_attrs to a list of the attributes # containing nodes considered "children" in the tree. Each such attribute # can either contain a single node or a list of nodes. See Visitor.py. child_attrs = None cf_state = None # This may be an additional (or 'actual') type that will be checked when # this node is coerced to another type. This could be useful to set when # the actual type to which it can coerce is known, but you want to leave # the type a py_object_type coercion_type = None def __init__(self, pos, **kw): self.pos = pos self.__dict__.update(kw) gil_message = "Operation" nogil_check = None def gil_error(self, env=None): error(self.pos, "%s not allowed without gil" % self.gil_message) cpp_message = "Operation" def cpp_check(self, env): if not env.is_cpp(): self.cpp_error() def cpp_error(self): error(self.pos, "%s only allowed in c++" % self.cpp_message) def clone_node(self): """Clone the node. This is defined as a shallow copy, except for member lists amongst the child attributes (from get_child_accessors) which are also copied. Lists containing child nodes are thus seen as a way for the node to hold multiple children directly; the list is not treated as a separate level in the tree.""" result = copy.copy(self) for attrname in result.child_attrs: value = getattr(result, attrname) if isinstance(value, list): setattr(result, attrname, [x for x in value]) return result # # There are 3 phases of parse tree processing, applied in order to # all the statements in a given scope-block: # # (0) analyse_declarations # Make symbol table entries for all declarations at the current # level, both explicit (def, cdef, etc.) and implicit (assignment # to an otherwise undeclared name). # # (1) analyse_expressions # Determine the result types of expressions and fill in the # 'type' attribute of each ExprNode. Insert coercion nodes into the # tree where needed to convert to and from Python objects. # Allocate temporary locals for intermediate results. Fill # in the 'result_code' attribute of each ExprNode with a C code # fragment. # # (2) generate_code # Emit C code for all declarations, statements and expressions. # Recursively applies the 3 processing phases to the bodies of # functions. # def analyse_declarations(self, env): pass def analyse_expressions(self, env): raise InternalError("analyse_expressions not implemented for %s" % \ self.__class__.__name__) def generate_code(self, code): raise InternalError("generate_code not implemented for %s" % \ self.__class__.__name__) def annotate(self, code): # mro does the wrong thing if isinstance(self, BlockNode): self.body.annotate(code) def end_pos(self): try: return self._end_pos except AttributeError: pos = self.pos if not self.child_attrs: self._end_pos = pos return pos for attr in self.child_attrs: child = getattr(self, attr) # Sometimes lists, sometimes nodes if child is None: pass elif isinstance(child, list): for c in child: pos = max(pos, c.end_pos()) else: pos = max(pos, child.end_pos()) self._end_pos = pos return pos def dump(self, level=0, filter_out=("pos",), cutoff=100, encountered=None): """Debug helper method that returns a recursive string representation of this node. """ if cutoff == 0: return "<...nesting level cutoff...>" if encountered is None: encountered = set() if id(self) in encountered: return "<%s (0x%x) -- already output>" % (self.__class__.__name__, id(self)) encountered.add(id(self)) def dump_child(x, level): if isinstance(x, Node): return x.dump(level, filter_out, cutoff-1, encountered) elif isinstance(x, list): return "[%s]" % ", ".join([dump_child(item, level) for item in x]) else: return repr(x) attrs = [(key, value) for key, value in self.__dict__.items() if key not in filter_out] if len(attrs) == 0: return "<%s (0x%x)>" % (self.__class__.__name__, id(self)) else: indent = " " * level res = "<%s (0x%x)\n" % (self.__class__.__name__, id(self)) for key, value in attrs: res += "%s %s: %s\n" % (indent, key, dump_child(value, level + 1)) res += "%s>" % indent return res def dump_pos(self, mark_column=False, marker='(#)'): """Debug helper method that returns the source code context of this node as a string. """ if not self.pos: return u'' source_desc, line, col = self.pos contents = source_desc.get_lines(encoding='ASCII', error_handling='ignore') # line numbers start at 1 lines = contents[max(0, line-3):line] current = lines[-1] if mark_column: current = current[:col] + marker + current[col:] lines[-1] = current.rstrip() + u' # <<<<<<<<<<<<<<\n' lines += contents[line:line+2] return u'"%s":%d:%d\n%s\n' % ( source_desc.get_escaped_description(), line, col, u''.join(lines)) class CompilerDirectivesNode(Node): """ Sets compiler directives for the children nodes """ # directives {string:value} A dictionary holding the right value for # *all* possible directives. # body Node child_attrs = ["body"] def analyse_declarations(self, env): old = env.directives env.directives = self.directives self.body.analyse_declarations(env) env.directives = old def analyse_expressions(self, env): old = env.directives env.directives = self.directives self.body = self.body.analyse_expressions(env) env.directives = old return self def generate_function_definitions(self, env, code): env_old = env.directives code_old = code.globalstate.directives code.globalstate.directives = self.directives self.body.generate_function_definitions(env, code) env.directives = env_old code.globalstate.directives = code_old def generate_execution_code(self, code): old = code.globalstate.directives code.globalstate.directives = self.directives self.body.generate_execution_code(code) code.globalstate.directives = old def annotate(self, code): old = code.globalstate.directives code.globalstate.directives = self.directives self.body.annotate(code) code.globalstate.directives = old class BlockNode(object): # Mixin class for nodes representing a declaration block. def generate_cached_builtins_decls(self, env, code): entries = env.global_scope().undeclared_cached_builtins for entry in entries: code.globalstate.add_cached_builtin_decl(entry) del entries[:] def generate_lambda_definitions(self, env, code): for node in env.lambda_defs: node.generate_function_definitions(env, code) class StatListNode(Node): # stats a list of StatNode child_attrs = ["stats"] @staticmethod def create_analysed(pos, env, *args, **kw): node = StatListNode(pos, *args, **kw) return node # No node-specific analysis needed def analyse_declarations(self, env): #print "StatListNode.analyse_declarations" ### for stat in self.stats: stat.analyse_declarations(env) def analyse_expressions(self, env): #print "StatListNode.analyse_expressions" ### self.stats = [stat.analyse_expressions(env) for stat in self.stats] return self def generate_function_definitions(self, env, code): #print "StatListNode.generate_function_definitions" ### for stat in self.stats: stat.generate_function_definitions(env, code) def generate_execution_code(self, code): #print "StatListNode.generate_execution_code" ### for stat in self.stats: code.mark_pos(stat.pos) stat.generate_execution_code(code) def annotate(self, code): for stat in self.stats: stat.annotate(code) class StatNode(Node): # # Code generation for statements is split into the following subphases: # # (1) generate_function_definitions # Emit C code for the definitions of any structs, # unions, enums and functions defined in the current # scope-block. # # (2) generate_execution_code # Emit C code for executable statements. # def generate_function_definitions(self, env, code): pass def generate_execution_code(self, code): raise InternalError("generate_execution_code not implemented for %s" % \ self.__class__.__name__) class CDefExternNode(StatNode): # include_file string or None # body StatNode child_attrs = ["body"] def analyse_declarations(self, env): if self.include_file: env.add_include_file(self.include_file) old_cinclude_flag = env.in_cinclude env.in_cinclude = 1 self.body.analyse_declarations(env) env.in_cinclude = old_cinclude_flag def analyse_expressions(self, env): return self def generate_execution_code(self, code): pass def annotate(self, code): self.body.annotate(code) class CDeclaratorNode(Node): # Part of a C declaration. # # Processing during analyse_declarations phase: # # analyse # Returns (name, type) pair where name is the # CNameDeclaratorNode of the name being declared # and type is the type it is being declared as. # # calling_convention string Calling convention of CFuncDeclaratorNode # for which this is a base child_attrs = [] calling_convention = "" def analyse_templates(self): # Only C++ functions have templates. return None class CNameDeclaratorNode(CDeclaratorNode): # name string The Cython name being declared # cname string or None C name, if specified # default ExprNode or None the value assigned on declaration child_attrs = ['default'] default = None def analyse(self, base_type, env, nonempty=0): if nonempty and self.name == '': # May have mistaken the name for the type. if base_type.is_ptr or base_type.is_array or base_type.is_buffer: error(self.pos, "Missing argument name") elif base_type.is_void: error(self.pos, "Use spam() rather than spam(void) to declare a function with no arguments.") else: self.name = base_type.declaration_code("", for_display=1, pyrex=1) base_type = py_object_type if base_type.is_fused and env.fused_to_specific: base_type = base_type.specialize(env.fused_to_specific) self.type = base_type return self, base_type class CPtrDeclaratorNode(CDeclaratorNode): # base CDeclaratorNode child_attrs = ["base"] def analyse(self, base_type, env, nonempty=0): if base_type.is_pyobject: error(self.pos, "Pointer base type cannot be a Python object") ptr_type = PyrexTypes.c_ptr_type(base_type) return self.base.analyse(ptr_type, env, nonempty=nonempty) class CReferenceDeclaratorNode(CDeclaratorNode): # base CDeclaratorNode child_attrs = ["base"] def analyse(self, base_type, env, nonempty=0): if base_type.is_pyobject: error(self.pos, "Reference base type cannot be a Python object") ref_type = PyrexTypes.c_ref_type(base_type) return self.base.analyse(ref_type, env, nonempty=nonempty) class CArrayDeclaratorNode(CDeclaratorNode): # base CDeclaratorNode # dimension ExprNode child_attrs = ["base", "dimension"] def analyse(self, base_type, env, nonempty=0): if (base_type.is_cpp_class and base_type.is_template_type()) or base_type.is_cfunction: from .ExprNodes import TupleNode if isinstance(self.dimension, TupleNode): args = self.dimension.args else: args = self.dimension, values = [v.analyse_as_type(env) for v in args] if None in values: ix = values.index(None) error(args[ix].pos, "Template parameter not a type") base_type = error_type else: base_type = base_type.specialize_here(self.pos, values) return self.base.analyse(base_type, env, nonempty=nonempty) if self.dimension: self.dimension = self.dimension.analyse_const_expression(env) if not self.dimension.type.is_int: error(self.dimension.pos, "Array dimension not integer") size = self.dimension.get_constant_c_result_code() if size is not None: try: size = int(size) except ValueError: # runtime constant? pass else: size = None if not base_type.is_complete(): error(self.pos, "Array element type '%s' is incomplete" % base_type) if base_type.is_pyobject: error(self.pos, "Array element cannot be a Python object") if base_type.is_cfunction: error(self.pos, "Array element cannot be a function") array_type = PyrexTypes.c_array_type(base_type, size) return self.base.analyse(array_type, env, nonempty=nonempty) class CFuncDeclaratorNode(CDeclaratorNode): # base CDeclaratorNode # args [CArgDeclNode] # templates [TemplatePlaceholderType] # has_varargs boolean # exception_value ConstNode # exception_check boolean True if PyErr_Occurred check needed # nogil boolean Can be called without gil # with_gil boolean Acquire gil around function body # is_const_method boolean Whether this is a const method child_attrs = ["base", "args", "exception_value"] overridable = 0 optional_arg_count = 0 is_const_method = 0 templates = None def analyse_templates(self): if isinstance(self.base, CArrayDeclaratorNode): from .ExprNodes import TupleNode, NameNode template_node = self.base.dimension if isinstance(template_node, TupleNode): template_nodes = template_node.args elif isinstance(template_node, NameNode): template_nodes = [template_node] else: error(template_node.pos, "Template arguments must be a list of names") return None self.templates = [] for template in template_nodes: if isinstance(template, NameNode): self.templates.append(PyrexTypes.TemplatePlaceholderType(template.name)) else: error(template.pos, "Template arguments must be a list of names") self.base = self.base.base return self.templates else: return None def analyse(self, return_type, env, nonempty=0, directive_locals=None): if directive_locals is None: directive_locals = {} if nonempty: nonempty -= 1 func_type_args = [] for i, arg_node in enumerate(self.args): name_declarator, type = arg_node.analyse( env, nonempty=nonempty, is_self_arg=(i == 0 and env.is_c_class_scope and 'staticmethod' not in env.directives)) name = name_declarator.name if name in directive_locals: type_node = directive_locals[name] other_type = type_node.analyse_as_type(env) if other_type is None: error(type_node.pos, "Not a type") elif (type is not PyrexTypes.py_object_type and not type.same_as(other_type)): error(self.base.pos, "Signature does not agree with previous declaration") error(type_node.pos, "Previous declaration here") else: type = other_type if name_declarator.cname: error(self.pos, "Function argument cannot have C name specification") if i == 0 and env.is_c_class_scope and type.is_unspecified: # fix the type of self type = env.parent_type # Turn *[] argument into ** if type.is_array: type = PyrexTypes.c_ptr_type(type.base_type) # Catch attempted C-style func(void) decl if type.is_void: error(arg_node.pos, "Use spam() rather than spam(void) to declare a function with no arguments.") func_type_args.append( PyrexTypes.CFuncTypeArg(name, type, arg_node.pos)) if arg_node.default: self.optional_arg_count += 1 elif self.optional_arg_count: error(self.pos, "Non-default argument follows default argument") exc_val = None exc_check = 0 if self.exception_check == '+': env.add_include_file('ios') # for std::ios_base::failure env.add_include_file('new') # for std::bad_alloc env.add_include_file('stdexcept') env.add_include_file('typeinfo') # for std::bad_cast if (return_type.is_pyobject and (self.exception_value or self.exception_check) and self.exception_check != '+'): error(self.pos, "Exception clause not allowed for function returning Python object") else: if self.exception_value: self.exception_value = self.exception_value.analyse_const_expression(env) if self.exception_check == '+': exc_val_type = self.exception_value.type if (not exc_val_type.is_error and not exc_val_type.is_pyobject and not (exc_val_type.is_cfunction and not exc_val_type.return_type.is_pyobject and not exc_val_type.args)): error(self.exception_value.pos, "Exception value must be a Python exception or cdef function with no arguments.") exc_val = self.exception_value else: self.exception_value = self.exception_value.coerce_to( return_type, env).analyse_const_expression(env) exc_val = self.exception_value.get_constant_c_result_code() if exc_val is None: raise InternalError( "get_constant_c_result_code not implemented for %s" % self.exception_value.__class__.__name__) if not return_type.assignable_from(self.exception_value.type): error(self.exception_value.pos, "Exception value incompatible with function return type") exc_check = self.exception_check if return_type.is_cfunction: error(self.pos, "Function cannot return a function") func_type = PyrexTypes.CFuncType( return_type, func_type_args, self.has_varargs, optional_arg_count=self.optional_arg_count, exception_value=exc_val, exception_check=exc_check, calling_convention=self.base.calling_convention, nogil=self.nogil, with_gil=self.with_gil, is_overridable=self.overridable, is_const_method=self.is_const_method, templates=self.templates) if self.optional_arg_count: if func_type.is_fused: # This is a bit of a hack... When we need to create specialized CFuncTypes # on the fly because the cdef is defined in a pxd, we need to declare the specialized optional arg # struct def declare_opt_arg_struct(func_type, fused_cname): self.declare_optional_arg_struct(func_type, env, fused_cname) func_type.declare_opt_arg_struct = declare_opt_arg_struct else: self.declare_optional_arg_struct(func_type, env) callspec = env.directives['callspec'] if callspec: current = func_type.calling_convention if current and current != callspec: error(self.pos, "cannot have both '%s' and '%s' " "calling conventions" % (current, callspec)) func_type.calling_convention = callspec return self.base.analyse(func_type, env) def declare_optional_arg_struct(self, func_type, env, fused_cname=None): """ Declares the optional argument struct (the struct used to hold the values for optional arguments). For fused cdef functions, this is deferred as analyse_declarations is called only once (on the fused cdef function). """ scope = StructOrUnionScope() arg_count_member = '%sn' % Naming.pyrex_prefix scope.declare_var(arg_count_member, PyrexTypes.c_int_type, self.pos) for arg in func_type.args[len(func_type.args) - self.optional_arg_count:]: scope.declare_var(arg.name, arg.type, arg.pos, allow_pyobject=1) struct_cname = env.mangle(Naming.opt_arg_prefix, self.base.name) if fused_cname is not None: struct_cname = PyrexTypes.get_fused_cname(fused_cname, struct_cname) op_args_struct = env.global_scope().declare_struct_or_union( name=struct_cname, kind='struct', scope=scope, typedef_flag=0, pos=self.pos, cname=struct_cname) op_args_struct.defined_in_pxd = 1 op_args_struct.used = 1 func_type.op_arg_struct = PyrexTypes.c_ptr_type(op_args_struct.type) class CConstDeclaratorNode(CDeclaratorNode): # base CDeclaratorNode child_attrs = ["base"] def analyse(self, base_type, env, nonempty=0): if base_type.is_pyobject: error(self.pos, "Const base type cannot be a Python object") const = PyrexTypes.c_const_type(base_type) return self.base.analyse(const, env, nonempty=nonempty) class CArgDeclNode(Node): # Item in a function declaration argument list. # # base_type CBaseTypeNode # declarator CDeclaratorNode # not_none boolean Tagged with 'not None' # or_none boolean Tagged with 'or None' # accept_none boolean Resolved boolean for not_none/or_none # default ExprNode or None # default_value PyObjectConst constant for default value # annotation ExprNode or None Py3 function arg annotation # is_self_arg boolean Is the "self" arg of an extension type method # is_type_arg boolean Is the "class" arg of an extension type classmethod # is_kw_only boolean Is a keyword-only argument # is_dynamic boolean Non-literal arg stored inside CyFunction child_attrs = ["base_type", "declarator", "default", "annotation"] is_self_arg = 0 is_type_arg = 0 is_generic = 1 kw_only = 0 not_none = 0 or_none = 0 type = None name_declarator = None default_value = None annotation = None is_dynamic = 0 def analyse(self, env, nonempty=0, is_self_arg=False): if is_self_arg: self.base_type.is_self_arg = self.is_self_arg = True if self.type is None: # The parser may misinterpret names as types. We fix that here. if isinstance(self.declarator, CNameDeclaratorNode) and self.declarator.name == '': if nonempty: if self.base_type.is_basic_c_type: # char, short, long called "int" type = self.base_type.analyse(env, could_be_name=True) arg_name = type.empty_declaration_code() else: arg_name = self.base_type.name self.declarator.name = EncodedString(arg_name) self.base_type.name = None self.base_type.is_basic_c_type = False could_be_name = True else: could_be_name = False self.base_type.is_arg = True base_type = self.base_type.analyse(env, could_be_name=could_be_name) if hasattr(self.base_type, 'arg_name') and self.base_type.arg_name: self.declarator.name = self.base_type.arg_name # The parser is unable to resolve the ambiguity of [] as part of the # type (e.g. in buffers) or empty declarator (as with arrays). # This is only arises for empty multi-dimensional arrays. if (base_type.is_array and isinstance(self.base_type, TemplatedTypeNode) and isinstance(self.declarator, CArrayDeclaratorNode)): declarator = self.declarator while isinstance(declarator.base, CArrayDeclaratorNode): declarator = declarator.base declarator.base = self.base_type.array_declarator base_type = base_type.base_type # inject type declaration from annotations if self.annotation and env.directives['annotation_typing'] and self.base_type.name is None: arg_type = self.inject_type_from_annotations(env) if arg_type is not None: base_type = arg_type return self.declarator.analyse(base_type, env, nonempty=nonempty) else: return self.name_declarator, self.type def inject_type_from_annotations(self, env): annotation = self.annotation if not annotation: return None base_type, arg_type = _analyse_signature_annotation(annotation, env) if base_type is not None: self.base_type = base_type return arg_type def calculate_default_value_code(self, code): if self.default_value is None: if self.default: if self.default.is_literal: # will not output any code, just assign the result_code self.default.generate_evaluation_code(code) return self.type.cast_code(self.default.result()) self.default_value = code.get_argument_default_const(self.type) return self.default_value def annotate(self, code): if self.default: self.default.annotate(code) def generate_assignment_code(self, code, target=None, overloaded_assignment=False): default = self.default if default is None or default.is_literal: return if target is None: target = self.calculate_default_value_code(code) default.generate_evaluation_code(code) default.make_owned_reference(code) result = default.result() if overloaded_assignment else default.result_as(self.type) code.putln("%s = %s;" % (target, result)) if self.type.is_pyobject: code.put_giveref(default.result()) default.generate_post_assignment_code(code) default.free_temps(code) class CBaseTypeNode(Node): # Abstract base class for C base type nodes. # # Processing during analyse_declarations phase: # # analyse # Returns the type. def analyse_as_type(self, env): return self.analyse(env) class CAnalysedBaseTypeNode(Node): # type type child_attrs = [] def analyse(self, env, could_be_name=False): return self.type class CSimpleBaseTypeNode(CBaseTypeNode): # name string # module_path [string] Qualifying name components # is_basic_c_type boolean # signed boolean # longness integer # complex boolean # is_self_arg boolean Is self argument of C method # ##is_type_arg boolean Is type argument of class method child_attrs = [] arg_name = None # in case the argument name was interpreted as a type module_path = [] is_basic_c_type = False complex = False def analyse(self, env, could_be_name=False): # Return type descriptor. #print "CSimpleBaseTypeNode.analyse: is_self_arg =", self.is_self_arg ### type = None if self.is_basic_c_type: type = PyrexTypes.simple_c_type(self.signed, self.longness, self.name) if not type: error(self.pos, "Unrecognised type modifier combination") elif self.name == "object" and not self.module_path: type = py_object_type elif self.name is None: if self.is_self_arg and env.is_c_class_scope: #print "CSimpleBaseTypeNode.analyse: defaulting to parent type" ### type = env.parent_type ## elif self.is_type_arg and env.is_c_class_scope: ## type = Builtin.type_type else: type = py_object_type else: if self.module_path: # Maybe it's a nested C++ class. scope = env for item in self.module_path: entry = scope.lookup(item) if entry is not None and entry.is_cpp_class: scope = entry.type.scope else: scope = None break if scope is None: # Maybe it's a cimport. scope = env.find_imported_module(self.module_path, self.pos) if scope: scope.fused_to_specific = env.fused_to_specific else: scope = env if scope: if scope.is_c_class_scope: scope = scope.global_scope() type = scope.lookup_type(self.name) if type is not None: pass elif could_be_name: if self.is_self_arg and env.is_c_class_scope: type = env.parent_type ## elif self.is_type_arg and env.is_c_class_scope: ## type = Builtin.type_type else: type = py_object_type self.arg_name = EncodedString(self.name) else: if self.templates: if not self.name in self.templates: error(self.pos, "'%s' is not a type identifier" % self.name) type = PyrexTypes.TemplatePlaceholderType(self.name) else: error(self.pos, "'%s' is not a type identifier" % self.name) if self.complex: if not type.is_numeric or type.is_complex: error(self.pos, "can only complexify c numeric types") type = PyrexTypes.CComplexType(type) type.create_declaration_utility_code(env) elif type is Builtin.complex_type: # Special case: optimise builtin complex type into C's # double complex. The parser cannot do this (as for the # normal scalar types) as the user may have redeclared the # 'complex' type. Testing for the exact type here works. type = PyrexTypes.c_double_complex_type type.create_declaration_utility_code(env) self.complex = True if type: return type else: return PyrexTypes.error_type class MemoryViewSliceTypeNode(CBaseTypeNode): name = 'memoryview' child_attrs = ['base_type_node', 'axes'] def analyse(self, env, could_be_name=False): base_type = self.base_type_node.analyse(env) if base_type.is_error: return base_type from . import MemoryView try: axes_specs = MemoryView.get_axes_specs(env, self.axes) except CompileError as e: error(e.position, e.message_only) self.type = PyrexTypes.ErrorType() return self.type if not MemoryView.validate_axes(self.pos, axes_specs): self.type = error_type else: self.type = PyrexTypes.MemoryViewSliceType(base_type, axes_specs) self.type.validate_memslice_dtype(self.pos) self.use_memview_utilities(env) return self.type def use_memview_utilities(self, env): from . import MemoryView env.use_utility_code(MemoryView.view_utility_code) class CNestedBaseTypeNode(CBaseTypeNode): # For C++ classes that live inside other C++ classes. # name string # base_type CBaseTypeNode child_attrs = ['base_type'] def analyse(self, env, could_be_name=None): base_type = self.base_type.analyse(env) if base_type is PyrexTypes.error_type: return PyrexTypes.error_type if not base_type.is_cpp_class: error(self.pos, "'%s' is not a valid type scope" % base_type) return PyrexTypes.error_type type_entry = base_type.scope.lookup_here(self.name) if not type_entry or not type_entry.is_type: error(self.pos, "'%s.%s' is not a type identifier" % (base_type, self.name)) return PyrexTypes.error_type return type_entry.type class TemplatedTypeNode(CBaseTypeNode): # After parsing: # positional_args [ExprNode] List of positional arguments # keyword_args DictNode Keyword arguments # base_type_node CBaseTypeNode # After analysis: # type PyrexTypes.BufferType or PyrexTypes.CppClassType ...containing the right options child_attrs = ["base_type_node", "positional_args", "keyword_args", "dtype_node"] dtype_node = None name = None def analyse(self, env, could_be_name=False, base_type=None): if base_type is None: base_type = self.base_type_node.analyse(env) if base_type.is_error: return base_type if base_type.is_cpp_class and base_type.is_template_type(): # Templated class if self.keyword_args and self.keyword_args.key_value_pairs: error(self.pos, "c++ templates cannot take keyword arguments") self.type = PyrexTypes.error_type else: template_types = [] for template_node in self.positional_args: type = template_node.analyse_as_type(env) if type is None: error(template_node.pos, "unknown type in template argument") return error_type template_types.append(type) self.type = base_type.specialize_here(self.pos, template_types) elif base_type.is_pyobject: # Buffer from . import Buffer options = Buffer.analyse_buffer_options( self.pos, env, self.positional_args, self.keyword_args, base_type.buffer_defaults) if sys.version_info[0] < 3: # Py 2.x enforces byte strings as keyword arguments ... options = dict([(name.encode('ASCII'), value) for name, value in options.items()]) self.type = PyrexTypes.BufferType(base_type, **options) else: # Array empty_declarator = CNameDeclaratorNode(self.pos, name="", cname=None) if len(self.positional_args) > 1 or self.keyword_args.key_value_pairs: error(self.pos, "invalid array declaration") self.type = PyrexTypes.error_type else: # It would be nice to merge this class with CArrayDeclaratorNode, # but arrays are part of the declaration, not the type... if not self.positional_args: dimension = None else: dimension = self.positional_args[0] self.array_declarator = CArrayDeclaratorNode( self.pos, base=empty_declarator, dimension=dimension) self.type = self.array_declarator.analyse(base_type, env)[1] if self.type.is_fused and env.fused_to_specific: self.type = self.type.specialize(env.fused_to_specific) return self.type class CComplexBaseTypeNode(CBaseTypeNode): # base_type CBaseTypeNode # declarator CDeclaratorNode child_attrs = ["base_type", "declarator"] def analyse(self, env, could_be_name=False): base = self.base_type.analyse(env, could_be_name) _, type = self.declarator.analyse(base, env) return type class CTupleBaseTypeNode(CBaseTypeNode): # components [CBaseTypeNode] child_attrs = ["components"] def analyse(self, env, could_be_name=False): component_types = [] for c in self.components: type = c.analyse(env) if type.is_pyobject: error(c.pos, "Tuple types can't (yet) contain Python objects.") return error_type component_types.append(type) entry = env.declare_tuple_type(self.pos, component_types) entry.used = True return entry.type class FusedTypeNode(CBaseTypeNode): """ Represents a fused type in a ctypedef statement: ctypedef cython.fused_type(int, long, long long) integral name str name of this fused type types [CSimpleBaseTypeNode] is the list of types to be fused """ child_attrs = [] def analyse_declarations(self, env): type = self.analyse(env) entry = env.declare_typedef(self.name, type, self.pos) # Omit the typedef declaration that self.declarator would produce entry.in_cinclude = True def analyse(self, env, could_be_name=False): types = [] for type_node in self.types: type = type_node.analyse_as_type(env) if not type: error(type_node.pos, "Not a type") continue if type in types: error(type_node.pos, "Type specified multiple times") else: types.append(type) # if len(self.types) == 1: # return types[0] return PyrexTypes.FusedType(types, name=self.name) class CConstTypeNode(CBaseTypeNode): # base_type CBaseTypeNode child_attrs = ["base_type"] def analyse(self, env, could_be_name=False): base = self.base_type.analyse(env, could_be_name) if base.is_pyobject: error(self.pos, "Const base type cannot be a Python object") return PyrexTypes.c_const_type(base) class CVarDefNode(StatNode): # C variable definition or forward/extern function declaration. # # visibility 'private' or 'public' or 'extern' # base_type CBaseTypeNode # declarators [CDeclaratorNode] # in_pxd boolean # api boolean # overridable boolean whether it is a cpdef # modifiers ['inline'] # decorators [cython.locals(...)] or None # directive_locals { string : NameNode } locals defined by cython.locals(...) child_attrs = ["base_type", "declarators"] decorators = None directive_locals = None def analyse_declarations(self, env, dest_scope=None): if self.directive_locals is None: self.directive_locals = {} if not dest_scope: dest_scope = env self.dest_scope = dest_scope if self.declarators: templates = self.declarators[0].analyse_templates() else: templates = None if templates is not None: if self.visibility != 'extern': error(self.pos, "Only extern functions allowed") if len(self.declarators) > 1: error(self.declarators[1].pos, "Can't multiply declare template types") env = TemplateScope('func_template', env) env.directives = env.outer_scope.directives for template_param in templates: env.declare_type(template_param.name, template_param, self.pos) base_type = self.base_type.analyse(env) if base_type.is_fused and not self.in_pxd and (env.is_c_class_scope or env.is_module_scope): error(self.pos, "Fused types not allowed here") return error_type self.entry = None visibility = self.visibility for declarator in self.declarators: if (len(self.declarators) > 1 and not isinstance(declarator, CNameDeclaratorNode) and env.directives['warn.multiple_declarators']): warning( declarator.pos, "Non-trivial type declarators in shared declaration (e.g. mix of pointers and values). " "Each pointer declaration should be on its own line.", 1) create_extern_wrapper = (self.overridable and self.visibility == 'extern' and env.is_module_scope) if create_extern_wrapper: declarator.overridable = False if isinstance(declarator, CFuncDeclaratorNode): name_declarator, type = declarator.analyse(base_type, env, directive_locals=self.directive_locals) else: name_declarator, type = declarator.analyse(base_type, env) if not type.is_complete(): if not (self.visibility == 'extern' and type.is_array or type.is_memoryviewslice): error(declarator.pos, "Variable type '%s' is incomplete" % type) if self.visibility == 'extern' and type.is_pyobject: error(declarator.pos, "Python object cannot be declared extern") name = name_declarator.name cname = name_declarator.cname if name == '': error(declarator.pos, "Missing name in declaration.") return if type.is_cfunction: if 'staticmethod' in env.directives: type.is_static_method = True self.entry = dest_scope.declare_cfunction( name, type, declarator.pos, cname=cname, visibility=self.visibility, in_pxd=self.in_pxd, api=self.api, modifiers=self.modifiers, overridable=self.overridable) if self.entry is not None: self.entry.directive_locals = copy.copy(self.directive_locals) if create_extern_wrapper: self.entry.type.create_to_py_utility_code(env) self.entry.create_wrapper = True else: if self.directive_locals: error(self.pos, "Decorators can only be followed by functions") self.entry = dest_scope.declare_var( name, type, declarator.pos, cname=cname, visibility=visibility, in_pxd=self.in_pxd, api=self.api, is_cdef=1) if Options.docstrings: self.entry.doc = embed_position(self.pos, self.doc) class CStructOrUnionDefNode(StatNode): # name string # cname string or None # kind "struct" or "union" # typedef_flag boolean # visibility "public" or "private" # api boolean # in_pxd boolean # attributes [CVarDefNode] or None # entry Entry # packed boolean child_attrs = ["attributes"] def declare(self, env, scope=None): self.entry = env.declare_struct_or_union( self.name, self.kind, scope, self.typedef_flag, self.pos, self.cname, visibility=self.visibility, api=self.api, packed=self.packed) def analyse_declarations(self, env): scope = None if self.attributes is not None: scope = StructOrUnionScope(self.name) self.declare(env, scope) if self.attributes is not None: if self.in_pxd and not env.in_cinclude: self.entry.defined_in_pxd = 1 for attr in self.attributes: attr.analyse_declarations(env, scope) if self.visibility != 'extern': for attr in scope.var_entries: type = attr.type while type.is_array: type = type.base_type if type == self.entry.type: error(attr.pos, "Struct cannot contain itself as a member.") def analyse_expressions(self, env): return self def generate_execution_code(self, code): pass class CppClassNode(CStructOrUnionDefNode, BlockNode): # name string # cname string or None # visibility "extern" # in_pxd boolean # attributes [CVarDefNode] or None # entry Entry # base_classes [CBaseTypeNode] # templates [(string, bool)] or None # decorators [DecoratorNode] or None decorators = None def declare(self, env): if self.templates is None: template_types = None else: template_types = [PyrexTypes.TemplatePlaceholderType(template_name, not required) for template_name, required in self.templates] num_optional_templates = sum(not required for _, required in self.templates) if num_optional_templates and not all(required for _, required in self.templates[:-num_optional_templates]): error(self.pos, "Required template parameters must precede optional template parameters.") self.entry = env.declare_cpp_class( self.name, None, self.pos, self.cname, base_classes=[], visibility=self.visibility, templates=template_types) def analyse_declarations(self, env): if self.templates is None: template_types = template_names = None else: template_names = [template_name for template_name, _ in self.templates] template_types = [PyrexTypes.TemplatePlaceholderType(template_name, not required) for template_name, required in self.templates] scope = None if self.attributes is not None: scope = CppClassScope(self.name, env, templates=template_names) def base_ok(base_class): if base_class.is_cpp_class or base_class.is_struct: return True else: error(self.pos, "Base class '%s' not a struct or class." % base_class) base_class_types = filter(base_ok, [b.analyse(scope or env) for b in self.base_classes]) self.entry = env.declare_cpp_class( self.name, scope, self.pos, self.cname, base_class_types, visibility=self.visibility, templates=template_types) if self.entry is None: return self.entry.is_cpp_class = 1 if scope is not None: scope.type = self.entry.type defined_funcs = [] def func_attributes(attributes): for attr in attributes: if isinstance(attr, CFuncDefNode): yield attr elif isinstance(attr, CompilerDirectivesNode): for sub_attr in func_attributes(attr.body.stats): yield sub_attr if self.attributes is not None: if self.in_pxd and not env.in_cinclude: self.entry.defined_in_pxd = 1 for attr in self.attributes: attr.analyse_declarations(scope) for func in func_attributes(self.attributes): defined_funcs.append(func) if self.templates is not None: func.template_declaration = "template <typename %s>" % ", typename ".join(template_names) self.body = StatListNode(self.pos, stats=defined_funcs) self.scope = scope def analyse_expressions(self, env): self.body = self.body.analyse_expressions(self.entry.type.scope) return self def generate_function_definitions(self, env, code): self.body.generate_function_definitions(self.entry.type.scope, code) def generate_execution_code(self, code): self.body.generate_execution_code(code) def annotate(self, code): self.body.annotate(code) class CEnumDefNode(StatNode): # name string or None # cname string or None # items [CEnumDefItemNode] # typedef_flag boolean # visibility "public" or "private" or "extern" # api boolean # in_pxd boolean # create_wrapper boolean # entry Entry child_attrs = ["items"] def declare(self, env): self.entry = env.declare_enum( self.name, self.pos, cname=self.cname, typedef_flag=self.typedef_flag, visibility=self.visibility, api=self.api, create_wrapper=self.create_wrapper and self.name is None) def analyse_declarations(self, env): if self.items is not None: if self.in_pxd and not env.in_cinclude: self.entry.defined_in_pxd = 1 for item in self.items: item.analyse_declarations(env, self.entry) if self.name is not None: self.entry.type.values = set(item.name for item in self.items) if self.create_wrapper and self.name is not None: from .UtilityCode import CythonUtilityCode env.use_utility_code(CythonUtilityCode.load( "EnumType", "CpdefEnums.pyx", context={"name": self.name, "items": tuple(item.name for item in self.items)}, outer_module_scope=env.global_scope())) def analyse_expressions(self, env): return self def generate_execution_code(self, code): if self.visibility == 'public' or self.api: code.mark_pos(self.pos) temp = code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=True) for item in self.entry.enum_values: code.putln("%s = PyInt_FromLong(%s); %s" % ( temp, item.cname, code.error_goto_if_null(temp, item.pos))) code.put_gotref(temp) code.putln('if (PyDict_SetItemString(%s, "%s", %s) < 0) %s' % ( Naming.moddict_cname, item.name, temp, code.error_goto(item.pos))) code.put_decref_clear(temp, PyrexTypes.py_object_type) code.funcstate.release_temp(temp) class CEnumDefItemNode(StatNode): # name string # cname string or None # value ExprNode or None child_attrs = ["value"] def analyse_declarations(self, env, enum_entry): if self.value: self.value = self.value.analyse_const_expression(env) if not self.value.type.is_int: self.value = self.value.coerce_to(PyrexTypes.c_int_type, env) self.value = self.value.analyse_const_expression(env) entry = env.declare_const( self.name, enum_entry.type, self.value, self.pos, cname=self.cname, visibility=enum_entry.visibility, api=enum_entry.api, create_wrapper=enum_entry.create_wrapper and enum_entry.name is None) enum_entry.enum_values.append(entry) if enum_entry.name: enum_entry.type.values.append(entry.cname) class CTypeDefNode(StatNode): # base_type CBaseTypeNode # declarator CDeclaratorNode # visibility "public" or "private" # api boolean # in_pxd boolean child_attrs = ["base_type", "declarator"] def analyse_declarations(self, env): base = self.base_type.analyse(env) name_declarator, type = self.declarator.analyse(base, env) name = name_declarator.name cname = name_declarator.cname entry = env.declare_typedef( name, type, self.pos, cname=cname, visibility=self.visibility, api=self.api) if type.is_fused: entry.in_cinclude = True if self.in_pxd and not env.in_cinclude: entry.defined_in_pxd = 1 def analyse_expressions(self, env): return self def generate_execution_code(self, code): pass class FuncDefNode(StatNode, BlockNode): # Base class for function definition nodes. # # return_type PyrexType # #filename string C name of filename string const # entry Symtab.Entry # needs_closure boolean Whether or not this function has inner functions/classes/yield # needs_outer_scope boolean Whether or not this function requires outer scope # pymethdef_required boolean Force Python method struct generation # directive_locals { string : ExprNode } locals defined by cython.locals(...) # directive_returns [ExprNode] type defined by cython.returns(...) # star_arg PyArgDeclNode or None * argument # starstar_arg PyArgDeclNode or None ** argument # # is_async_def boolean is a Coroutine function # # has_fused_arguments boolean # Whether this cdef function has fused parameters. This is needed # by AnalyseDeclarationsTransform, so it can replace CFuncDefNodes # with fused argument types with a FusedCFuncDefNode py_func = None needs_closure = False needs_outer_scope = False pymethdef_required = False is_generator = False is_generator_body = False is_async_def = False modifiers = [] has_fused_arguments = False star_arg = None starstar_arg = None is_cyfunction = False code_object = None def analyse_default_values(self, env): default_seen = 0 for arg in self.args: if arg.default: default_seen = 1 if arg.is_generic: arg.default = arg.default.analyse_types(env) arg.default = arg.default.coerce_to(arg.type, env) else: error(arg.pos, "This argument cannot have a default value") arg.default = None elif arg.kw_only: default_seen = 1 elif default_seen: error(arg.pos, "Non-default argument following default argument") def analyse_annotations(self, env): for arg in self.args: if arg.annotation: arg.annotation = arg.annotation.analyse_types(env) def align_argument_type(self, env, arg): # @cython.locals() directive_locals = self.directive_locals orig_type = arg.type if arg.name in directive_locals: type_node = directive_locals[arg.name] other_type = type_node.analyse_as_type(env) elif isinstance(arg, CArgDeclNode) and arg.annotation and env.directives['annotation_typing']: type_node = arg.annotation other_type = arg.inject_type_from_annotations(env) if other_type is None: return arg else: return arg if other_type is None: error(type_node.pos, "Not a type") elif orig_type is not py_object_type and not orig_type.same_as(other_type): error(arg.base_type.pos, "Signature does not agree with previous declaration") error(type_node.pos, "Previous declaration here") else: arg.type = other_type return arg def need_gil_acquisition(self, lenv): return 0 def create_local_scope(self, env): genv = env while genv.is_py_class_scope or genv.is_c_class_scope: genv = genv.outer_scope if self.needs_closure: lenv = ClosureScope(name=self.entry.name, outer_scope=genv, parent_scope=env, scope_name=self.entry.cname) else: lenv = LocalScope(name=self.entry.name, outer_scope=genv, parent_scope=env) lenv.return_type = self.return_type type = self.entry.type if type.is_cfunction: lenv.nogil = type.nogil and not type.with_gil self.local_scope = lenv lenv.directives = env.directives return lenv def generate_function_body(self, env, code): self.body.generate_execution_code(code) def generate_function_definitions(self, env, code): from . import Buffer if self.return_type.is_memoryviewslice: from . import MemoryView lenv = self.local_scope if lenv.is_closure_scope and not lenv.is_passthrough: outer_scope_cname = "%s->%s" % (Naming.cur_scope_cname, Naming.outer_scope_cname) else: outer_scope_cname = Naming.outer_scope_cname lenv.mangle_closure_cnames(outer_scope_cname) # Generate closure function definitions self.body.generate_function_definitions(lenv, code) # generate lambda function definitions self.generate_lambda_definitions(lenv, code) is_getbuffer_slot = (self.entry.name == "__getbuffer__" and self.entry.scope.is_c_class_scope) is_releasebuffer_slot = (self.entry.name == "__releasebuffer__" and self.entry.scope.is_c_class_scope) is_buffer_slot = is_getbuffer_slot or is_releasebuffer_slot if is_buffer_slot: if 'cython_unused' not in self.modifiers: self.modifiers = self.modifiers + ['cython_unused'] preprocessor_guard = self.get_preprocessor_guard() profile = code.globalstate.directives['profile'] linetrace = code.globalstate.directives['linetrace'] if profile or linetrace: code.globalstate.use_utility_code( UtilityCode.load_cached("Profile", "Profile.c")) # Generate C code for header and body of function code.enter_cfunc_scope() code.return_from_error_cleanup_label = code.new_label() code.funcstate.gil_owned = not lenv.nogil # ----- Top-level constants used by this function code.mark_pos(self.pos) self.generate_cached_builtins_decls(lenv, code) # ----- Function header code.putln("") if preprocessor_guard: code.putln(preprocessor_guard) with_pymethdef = (self.needs_assignment_synthesis(env, code) or self.pymethdef_required) if self.py_func: self.py_func.generate_function_header( code, with_pymethdef=with_pymethdef, proto_only=True) self.generate_function_header(code, with_pymethdef=with_pymethdef) # ----- Local variable declarations # Find function scope cenv = env while cenv.is_py_class_scope or cenv.is_c_class_scope: cenv = cenv.outer_scope if self.needs_closure: code.put(lenv.scope_class.type.declaration_code(Naming.cur_scope_cname)) code.putln(";") elif self.needs_outer_scope: if lenv.is_passthrough: code.put(lenv.scope_class.type.declaration_code(Naming.cur_scope_cname)) code.putln(";") code.put(cenv.scope_class.type.declaration_code(Naming.outer_scope_cname)) code.putln(";") self.generate_argument_declarations(lenv, code) for entry in lenv.var_entries: if not (entry.in_closure or entry.is_arg): code.put_var_declaration(entry) # Initialize the return variable __pyx_r init = "" if not self.return_type.is_void: if self.return_type.is_pyobject: init = " = NULL" elif self.return_type.is_memoryviewslice: init = ' = ' + MemoryView.memslice_entry_init code.putln("%s%s;" % ( self.return_type.declaration_code(Naming.retval_cname), init)) tempvardecl_code = code.insertion_point() self.generate_keyword_list(code) # ----- Extern library function declarations lenv.generate_library_function_declarations(code) # ----- GIL acquisition acquire_gil = self.acquire_gil # See if we need to acquire the GIL for variable declarations, or for # refnanny only # Closures are not currently possible for cdef nogil functions, # but check them anyway have_object_args = self.needs_closure or self.needs_outer_scope for arg in lenv.arg_entries: if arg.type.is_pyobject: have_object_args = True break used_buffer_entries = [entry for entry in lenv.buffer_entries if entry.used] acquire_gil_for_var_decls_only = ( lenv.nogil and lenv.has_with_gil_block and (have_object_args or used_buffer_entries)) acquire_gil_for_refnanny_only = ( lenv.nogil and lenv.has_with_gil_block and not acquire_gil_for_var_decls_only) use_refnanny = not lenv.nogil or lenv.has_with_gil_block if acquire_gil or acquire_gil_for_var_decls_only: code.put_ensure_gil() code.funcstate.gil_owned = True elif lenv.nogil and lenv.has_with_gil_block: code.declare_gilstate() if profile or linetrace: tempvardecl_code.put_trace_declarations() code_object = self.code_object.calculate_result_code(code) if self.code_object else None code.put_trace_frame_init(code_object) # ----- set up refnanny if use_refnanny: tempvardecl_code.put_declare_refcount_context() code.put_setup_refcount_context( self.entry.name, acquire_gil=acquire_gil_for_refnanny_only) # ----- Automatic lead-ins for certain special functions if is_getbuffer_slot: self.getbuffer_init(code) # ----- Create closure scope object if self.needs_closure: tp_slot = TypeSlots.ConstructorSlot("tp_new", '__new__') slot_func_cname = TypeSlots.get_slot_function(lenv.scope_class.type.scope, tp_slot) if not slot_func_cname: slot_func_cname = '%s->tp_new' % lenv.scope_class.type.typeptr_cname code.putln("%s = (%s)%s(%s, %s, NULL);" % ( Naming.cur_scope_cname, lenv.scope_class.type.empty_declaration_code(), slot_func_cname, lenv.scope_class.type.typeptr_cname, Naming.empty_tuple)) code.putln("if (unlikely(!%s)) {" % Naming.cur_scope_cname) if is_getbuffer_slot: self.getbuffer_error_cleanup(code) if use_refnanny: code.put_finish_refcount_context() if acquire_gil or acquire_gil_for_var_decls_only: code.put_release_ensured_gil() # FIXME: what if the error return value is a Python value? err_val = self.error_value() if err_val is None: if not self.caller_will_check_exceptions(): warning(self.entry.pos, "Unraisable exception in function '%s'." % self.entry.qualified_name, 0) code.put_unraisable(self.entry.qualified_name, lenv.nogil) #if self.return_type.is_void: code.putln("return;") else: code.putln("return %s;" % err_val) code.putln("}") code.put_gotref(Naming.cur_scope_cname) # Note that it is unsafe to decref the scope at this point. if self.needs_outer_scope: if self.is_cyfunction: code.putln("%s = (%s) __Pyx_CyFunction_GetClosure(%s);" % ( outer_scope_cname, cenv.scope_class.type.empty_declaration_code(), Naming.self_cname)) else: code.putln("%s = (%s) %s;" % ( outer_scope_cname, cenv.scope_class.type.empty_declaration_code(), Naming.self_cname)) if lenv.is_passthrough: code.putln("%s = %s;" % (Naming.cur_scope_cname, outer_scope_cname)) elif self.needs_closure: # inner closures own a reference to their outer parent code.put_incref(outer_scope_cname, cenv.scope_class.type) code.put_giveref(outer_scope_cname) # ----- Trace function call if profile or linetrace: # this looks a bit late, but if we don't get here due to a # fatal error before hand, it's not really worth tracing if isinstance(self, DefNode) and self.is_wrapper: trace_name = self.entry.name + " (wrapper)" else: trace_name = self.entry.name code.put_trace_call( trace_name, self.pos, nogil=not code.funcstate.gil_owned) code.funcstate.can_trace = True # ----- Fetch arguments self.generate_argument_parsing_code(env, code) # If an argument is assigned to in the body, we must # incref it to properly keep track of refcounts. is_cdef = isinstance(self, CFuncDefNode) for entry in lenv.arg_entries: if entry.type.is_pyobject: if (acquire_gil or len(entry.cf_assignments) > 1) and not entry.in_closure: code.put_var_incref(entry) # Note: defaults are always incref-ed. For def functions, we # we aquire arguments from object converstion, so we have # new references. If we are a cdef function, we need to # incref our arguments elif is_cdef and entry.type.is_memoryviewslice and len(entry.cf_assignments) > 1: code.put_incref_memoryviewslice(entry.cname, have_gil=code.funcstate.gil_owned) for entry in lenv.var_entries: if entry.is_arg and len(entry.cf_assignments) > 1 and not entry.in_closure: if entry.xdecref_cleanup: code.put_var_xincref(entry) else: code.put_var_incref(entry) # ----- Initialise local buffer auxiliary variables for entry in lenv.var_entries + lenv.arg_entries: if entry.type.is_buffer and entry.buffer_aux.buflocal_nd_var.used: Buffer.put_init_vars(entry, code) # ----- Check and convert arguments self.generate_argument_type_tests(code) # ----- Acquire buffer arguments for entry in lenv.arg_entries: if entry.type.is_buffer: Buffer.put_acquire_arg_buffer(entry, code, self.pos) if acquire_gil_for_var_decls_only: code.put_release_ensured_gil() code.funcstate.gil_owned = False # ------------------------- # ----- Function body ----- # ------------------------- self.generate_function_body(env, code) code.mark_pos(self.pos, trace=False) code.putln("") code.putln("/* function exit code */") # ----- Default return value if not self.body.is_terminator: if self.return_type.is_pyobject: #if self.return_type.is_extension_type: # lhs = "(PyObject *)%s" % Naming.retval_cname #else: lhs = Naming.retval_cname code.put_init_to_py_none(lhs, self.return_type) else: val = self.return_type.default_value if val: code.putln("%s = %s;" % (Naming.retval_cname, val)) # ----- Error cleanup if code.error_label in code.labels_used: if not self.body.is_terminator: code.put_goto(code.return_label) code.put_label(code.error_label) for cname, type in code.funcstate.all_managed_temps(): code.put_xdecref(cname, type, have_gil=not lenv.nogil) # Clean up buffers -- this calls a Python function # so need to save and restore error state buffers_present = len(used_buffer_entries) > 0 #memslice_entries = [e for e in lenv.entries.values() if e.type.is_memoryviewslice] if buffers_present: code.globalstate.use_utility_code(restore_exception_utility_code) code.putln("{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;") code.putln("__Pyx_PyThreadState_declare") code.putln("__Pyx_PyThreadState_assign") code.putln("__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);") for entry in used_buffer_entries: Buffer.put_release_buffer_code(code, entry) #code.putln("%s = 0;" % entry.cname) code.putln("__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}") if self.return_type.is_memoryviewslice: MemoryView.put_init_entry(Naming.retval_cname, code) err_val = Naming.retval_cname else: err_val = self.error_value() exc_check = self.caller_will_check_exceptions() if err_val is not None or exc_check: # TODO: Fix exception tracing (though currently unused by cProfile). # code.globalstate.use_utility_code(get_exception_tuple_utility_code) # code.put_trace_exception() if lenv.nogil and not lenv.has_with_gil_block: code.putln("{") code.put_ensure_gil() code.put_add_traceback(self.entry.qualified_name) if lenv.nogil and not lenv.has_with_gil_block: code.put_release_ensured_gil() code.putln("}") else: warning(self.entry.pos, "Unraisable exception in function '%s'." % self.entry.qualified_name, 0) code.put_unraisable(self.entry.qualified_name, lenv.nogil) default_retval = self.return_type.default_value if err_val is None and default_retval: err_val = default_retval if err_val is not None: code.putln("%s = %s;" % (Naming.retval_cname, err_val)) if is_getbuffer_slot: self.getbuffer_error_cleanup(code) # If we are using the non-error cleanup section we should # jump past it if we have an error. The if-test below determine # whether this section is used. if buffers_present or is_getbuffer_slot or self.return_type.is_memoryviewslice: code.put_goto(code.return_from_error_cleanup_label) # ----- Non-error return cleanup code.put_label(code.return_label) for entry in used_buffer_entries: Buffer.put_release_buffer_code(code, entry) if is_getbuffer_slot: self.getbuffer_normal_cleanup(code) if self.return_type.is_memoryviewslice: # See if our return value is uninitialized on non-error return # from . import MemoryView # MemoryView.err_if_nogil_initialized_check(self.pos, env) cond = code.unlikely(self.return_type.error_condition(Naming.retval_cname)) code.putln( 'if (%s) {' % cond) if env.nogil: code.put_ensure_gil() code.putln( 'PyErr_SetString(PyExc_TypeError, "Memoryview return value is not initialized");') if env.nogil: code.put_release_ensured_gil() code.putln( '}') # ----- Return cleanup for both error and no-error return code.put_label(code.return_from_error_cleanup_label) for entry in lenv.var_entries: if not entry.used or entry.in_closure: continue if entry.type.is_memoryviewslice: code.put_xdecref_memoryviewslice(entry.cname, have_gil=not lenv.nogil) elif entry.type.is_pyobject: if not entry.is_arg or len(entry.cf_assignments) > 1: if entry.xdecref_cleanup: code.put_var_xdecref(entry) else: code.put_var_decref(entry) # Decref any increfed args for entry in lenv.arg_entries: if entry.type.is_pyobject: if (acquire_gil or len(entry.cf_assignments) > 1) and not entry.in_closure: code.put_var_decref(entry) elif (entry.type.is_memoryviewslice and (not is_cdef or len(entry.cf_assignments) > 1)): # decref slices of def functions and acquired slices from cdef # functions, but not borrowed slices from cdef functions. code.put_xdecref_memoryviewslice(entry.cname, have_gil=not lenv.nogil) if self.needs_closure: code.put_decref(Naming.cur_scope_cname, lenv.scope_class.type) # ----- Return # This code is duplicated in ModuleNode.generate_module_init_func if not lenv.nogil: default_retval = self.return_type.default_value err_val = self.error_value() if err_val is None and default_retval: err_val = default_retval # FIXME: why is err_val not used? if self.return_type.is_pyobject: code.put_xgiveref(self.return_type.as_pyobject(Naming.retval_cname)) if self.entry.is_special and self.entry.name == "__hash__": # Returning -1 for __hash__ is supposed to signal an error # We do as Python instances and coerce -1 into -2. code.putln("if (unlikely(%s == -1) && !PyErr_Occurred()) %s = -2;" % ( Naming.retval_cname, Naming.retval_cname)) if profile or linetrace: code.funcstate.can_trace = False if self.return_type.is_pyobject: code.put_trace_return( Naming.retval_cname, nogil=not code.funcstate.gil_owned) else: code.put_trace_return( "Py_None", nogil=not code.funcstate.gil_owned) if not lenv.nogil: # GIL holding function code.put_finish_refcount_context() if acquire_gil or (lenv.nogil and lenv.has_with_gil_block): # release the GIL (note that with-gil blocks acquire it on exit in their EnsureGILNode) code.put_release_ensured_gil() code.funcstate.gil_owned = False if not self.return_type.is_void: code.putln("return %s;" % Naming.retval_cname) code.putln("}") if preprocessor_guard: code.putln("#endif /*!(%s)*/" % preprocessor_guard) # ----- Go back and insert temp variable declarations tempvardecl_code.put_temp_declarations(code.funcstate) # ----- Python version code.exit_cfunc_scope() if self.py_func: self.py_func.generate_function_definitions(env, code) self.generate_wrapper_functions(code) def declare_argument(self, env, arg): if arg.type.is_void: error(arg.pos, "Invalid use of 'void'") elif not arg.type.is_complete() and not (arg.type.is_array or arg.type.is_memoryviewslice): error(arg.pos, "Argument type '%s' is incomplete" % arg.type) return env.declare_arg(arg.name, arg.type, arg.pos) def generate_arg_type_test(self, arg, code): # Generate type test for one argument. if arg.type.typeobj_is_available(): code.globalstate.use_utility_code( UtilityCode.load_cached("ArgTypeTest", "FunctionArguments.c")) typeptr_cname = arg.type.typeptr_cname arg_code = "((PyObject *)%s)" % arg.entry.cname code.putln( 'if (unlikely(!__Pyx_ArgTypeTest(%s, %s, %d, "%s", %s))) %s' % ( arg_code, typeptr_cname, arg.accept_none, arg.name, arg.type.is_builtin_type, code.error_goto(arg.pos))) else: error(arg.pos, "Cannot test type of extern C class without type object name specification") def generate_arg_none_check(self, arg, code): # Generate None check for one argument. if arg.type.is_memoryviewslice: cname = "%s.memview" % arg.entry.cname else: cname = arg.entry.cname code.putln('if (unlikely(((PyObject *)%s) == Py_None)) {' % cname) code.putln('''PyErr_Format(PyExc_TypeError, "Argument '%%.%ds' must not be None", "%s"); %s''' % ( max(200, len(arg.name)), arg.name, code.error_goto(arg.pos))) code.putln('}') def generate_wrapper_functions(self, code): pass def generate_execution_code(self, code): code.mark_pos(self.pos) # Evaluate and store argument default values for arg in self.args: if not arg.is_dynamic: arg.generate_assignment_code(code) # # Special code for the __getbuffer__ function # def getbuffer_init(self, code): info = self.local_scope.arg_entries[1].cname # Python 3.0 betas have a bug in memoryview which makes it call # getbuffer with a NULL parameter. For now we work around this; # the following block should be removed when this bug is fixed. code.putln("if (%s != NULL) {" % info) code.putln("%s->obj = Py_None; __Pyx_INCREF(Py_None);" % info) code.put_giveref("%s->obj" % info) # Do not refnanny object within structs code.putln("}") def getbuffer_error_cleanup(self, code): info = self.local_scope.arg_entries[1].cname code.putln("if (%s != NULL && %s->obj != NULL) {" % (info, info)) code.put_gotref("%s->obj" % info) code.putln("__Pyx_DECREF(%s->obj); %s->obj = NULL;" % (info, info)) code.putln("}") def getbuffer_normal_cleanup(self, code): info = self.local_scope.arg_entries[1].cname code.putln("if (%s != NULL && %s->obj == Py_None) {" % (info, info)) code.put_gotref("Py_None") code.putln("__Pyx_DECREF(Py_None); %s->obj = NULL;" % info) code.putln("}") def get_preprocessor_guard(self): if not self.entry.is_special: return None name = self.entry.name slot = TypeSlots.method_name_to_slot.get(name) if not slot: return None if name == '__long__' and not self.entry.scope.lookup_here('__int__'): return None if name in ("__getbuffer__", "__releasebuffer__") and self.entry.scope.is_c_class_scope: return None return slot.preprocessor_guard_code() class CFuncDefNode(FuncDefNode): # C function definition. # # modifiers ['inline'] # visibility 'private' or 'public' or 'extern' # base_type CBaseTypeNode # declarator CDeclaratorNode # cfunc_declarator the CFuncDeclarator of this function # (this is also available through declarator or a # base thereof) # body StatListNode # api boolean # decorators [DecoratorNode] list of decorators # # with_gil boolean Acquire GIL around body # type CFuncType # py_func wrapper for calling from Python # overridable whether or not this is a cpdef function # inline_in_pxd whether this is an inline function in a pxd file # template_declaration String or None Used for c++ class methods # is_const_method whether this is a const method # is_static_method whether this is a static method # is_c_class_method whether this is a cclass method child_attrs = ["base_type", "declarator", "body", "py_func_stat"] inline_in_pxd = False decorators = None directive_locals = None directive_returns = None override = None template_declaration = None is_const_method = False py_func_stat = None def unqualified_name(self): return self.entry.name def analyse_declarations(self, env): self.is_c_class_method = env.is_c_class_scope if self.directive_locals is None: self.directive_locals = {} self.directive_locals.update(env.directives['locals']) if self.directive_returns is not None: base_type = self.directive_returns.analyse_as_type(env) if base_type is None: error(self.directive_returns.pos, "Not a type") base_type = PyrexTypes.error_type else: base_type = self.base_type.analyse(env) self.is_static_method = 'staticmethod' in env.directives and not env.lookup_here('staticmethod') # The 2 here is because we need both function and argument names. if isinstance(self.declarator, CFuncDeclaratorNode): name_declarator, type = self.declarator.analyse( base_type, env, nonempty=2 * (self.body is not None), directive_locals=self.directive_locals) else: name_declarator, type = self.declarator.analyse( base_type, env, nonempty=2 * (self.body is not None)) if not type.is_cfunction: error(self.pos, "Suite attached to non-function declaration") # Remember the actual type according to the function header # written here, because the type in the symbol table entry # may be different if we're overriding a C method inherited # from the base type of an extension type. self.type = type type.is_overridable = self.overridable declarator = self.declarator while not hasattr(declarator, 'args'): declarator = declarator.base self.cfunc_declarator = declarator self.args = declarator.args opt_arg_count = self.cfunc_declarator.optional_arg_count if (self.visibility == 'public' or self.api) and opt_arg_count: error(self.cfunc_declarator.pos, "Function with optional arguments may not be declared public or api") if type.exception_check == '+' and self.visibility != 'extern': warning(self.cfunc_declarator.pos, "Only extern functions can throw C++ exceptions.") for formal_arg, type_arg in zip(self.args, type.args): self.align_argument_type(env, type_arg) formal_arg.type = type_arg.type formal_arg.name = type_arg.name formal_arg.cname = type_arg.cname self._validate_type_visibility(type_arg.type, type_arg.pos, env) if type_arg.type.is_fused: self.has_fused_arguments = True if type_arg.type.is_buffer and 'inline' in self.modifiers: warning(formal_arg.pos, "Buffer unpacking not optimized away.", 1) if type_arg.type.is_buffer: if self.type.nogil: error(formal_arg.pos, "Buffer may not be acquired without the GIL. Consider using memoryview slices instead.") elif 'inline' in self.modifiers: warning(formal_arg.pos, "Buffer unpacking not optimized away.", 1) self._validate_type_visibility(type.return_type, self.pos, env) name = name_declarator.name cname = name_declarator.cname type.is_const_method = self.is_const_method type.is_static_method = self.is_static_method self.entry = env.declare_cfunction( name, type, self.pos, cname=cname, visibility=self.visibility, api=self.api, defining=self.body is not None, modifiers=self.modifiers, overridable=self.overridable) self.entry.inline_func_in_pxd = self.inline_in_pxd self.return_type = type.return_type if self.return_type.is_array and self.visibility != 'extern': error(self.pos, "Function cannot return an array") if self.return_type.is_cpp_class: self.return_type.check_nullary_constructor(self.pos, "used as a return value") if self.overridable and not env.is_module_scope and not self.is_static_method: if len(self.args) < 1 or not self.args[0].type.is_pyobject: # An error will be produced in the cdef function self.overridable = False self.declare_cpdef_wrapper(env) self.create_local_scope(env) def declare_cpdef_wrapper(self, env): if self.overridable: if self.is_static_method: # TODO(robertwb): Finish this up, perhaps via more function refactoring. error(self.pos, "static cpdef methods not yet supported") name = self.entry.name py_func_body = self.call_self_node(is_module_scope=env.is_module_scope) if self.is_static_method: from .ExprNodes import NameNode decorators = [DecoratorNode(self.pos, decorator=NameNode(self.pos, name='staticmethod'))] decorators[0].decorator.analyse_types(env) else: decorators = [] self.py_func = DefNode(pos=self.pos, name=self.entry.name, args=self.args, star_arg=None, starstar_arg=None, doc=self.doc, body=py_func_body, decorators=decorators, is_wrapper=1) self.py_func.is_module_scope = env.is_module_scope self.py_func.analyse_declarations(env) self.py_func_stat = StatListNode(self.pos, stats=[self.py_func]) self.py_func.type = PyrexTypes.py_object_type self.entry.as_variable = self.py_func.entry self.entry.used = self.entry.as_variable.used = True # Reset scope entry the above cfunction env.entries[name] = self.entry if (not self.entry.is_final_cmethod and (not env.is_module_scope or Options.lookup_module_cpdef)): self.override = OverrideCheckNode(self.pos, py_func=self.py_func) self.body = StatListNode(self.pos, stats=[self.override, self.body]) def _validate_type_visibility(self, type, pos, env): """ Ensure that types used in cdef functions are public or api, or defined in a C header. """ public_or_api = (self.visibility == 'public' or self.api) entry = getattr(type, 'entry', None) if public_or_api and entry and env.is_module_scope: if not (entry.visibility in ('public', 'extern') or entry.api or entry.in_cinclude): error(pos, "Function declared public or api may not have private types") def call_self_node(self, omit_optional_args=0, is_module_scope=0): from . import ExprNodes args = self.type.args if omit_optional_args: args = args[:len(args) - self.type.optional_arg_count] arg_names = [arg.name for arg in args] if is_module_scope: cfunc = ExprNodes.NameNode(self.pos, name=self.entry.name) call_arg_names = arg_names skip_dispatch = Options.lookup_module_cpdef elif self.type.is_static_method: class_entry = self.entry.scope.parent_type.entry class_node = ExprNodes.NameNode(self.pos, name=class_entry.name) class_node.entry = class_entry cfunc = ExprNodes.AttributeNode(self.pos, obj=class_node, attribute=self.entry.name) # Calling static c(p)def methods on an instance disallowed. # TODO(robertwb): Support by passing self to check for override? skip_dispatch = True else: type_entry = self.type.args[0].type.entry type_arg = ExprNodes.NameNode(self.pos, name=type_entry.name) type_arg.entry = type_entry cfunc = ExprNodes.AttributeNode(self.pos, obj=type_arg, attribute=self.entry.name) skip_dispatch = not is_module_scope or Options.lookup_module_cpdef c_call = ExprNodes.SimpleCallNode( self.pos, function=cfunc, args=[ExprNodes.NameNode(self.pos, name=n) for n in arg_names], wrapper_call=skip_dispatch) return ReturnStatNode(pos=self.pos, return_type=PyrexTypes.py_object_type, value=c_call) def declare_arguments(self, env): for arg in self.type.args: if not arg.name: error(arg.pos, "Missing argument name") self.declare_argument(env, arg) def need_gil_acquisition(self, lenv): return self.type.with_gil def nogil_check(self, env): type = self.type with_gil = type.with_gil if type.nogil and not with_gil: if type.return_type.is_pyobject: error(self.pos, "Function with Python return type cannot be declared nogil") for entry in self.local_scope.var_entries: if entry.type.is_pyobject and not entry.in_with_gil_block: error(self.pos, "Function declared nogil has Python locals or temporaries") def analyse_expressions(self, env): self.local_scope.directives = env.directives if self.py_func is not None: # this will also analyse the default values self.py_func = self.py_func.analyse_expressions(env) else: self.analyse_default_values(env) self.analyse_annotations(env) self.acquire_gil = self.need_gil_acquisition(self.local_scope) return self def needs_assignment_synthesis(self, env, code=None): return False def generate_function_header(self, code, with_pymethdef, with_opt_args=1, with_dispatch=1, cname=None): scope = self.local_scope arg_decls = [] type = self.type for arg in type.args[:len(type.args)-type.optional_arg_count]: arg_decl = arg.declaration_code() entry = scope.lookup(arg.name) if not entry.cf_used: arg_decl = 'CYTHON_UNUSED %s' % arg_decl arg_decls.append(arg_decl) if with_dispatch and self.overridable: dispatch_arg = PyrexTypes.c_int_type.declaration_code( Naming.skip_dispatch_cname) if self.override: arg_decls.append(dispatch_arg) else: arg_decls.append('CYTHON_UNUSED %s' % dispatch_arg) if type.optional_arg_count and with_opt_args: arg_decls.append(type.op_arg_struct.declaration_code(Naming.optional_args_cname)) if type.has_varargs: arg_decls.append("...") if not arg_decls: arg_decls = ["void"] if cname is None: cname = self.entry.func_cname entity = type.function_header_code(cname, ', '.join(arg_decls)) if self.entry.visibility == 'private' and '::' not in cname: storage_class = "static " else: storage_class = "" dll_linkage = None modifiers = code.build_function_modifiers(self.entry.func_modifiers) header = self.return_type.declaration_code(entity, dll_linkage=dll_linkage) #print (storage_class, modifiers, header) needs_proto = self.is_c_class_method if self.template_declaration: if needs_proto: code.globalstate.parts['module_declarations'].putln(self.template_declaration) code.putln(self.template_declaration) if needs_proto: code.globalstate.parts['module_declarations'].putln( "%s%s%s; /* proto*/" % (storage_class, modifiers, header)) code.putln("%s%s%s {" % (storage_class, modifiers, header)) def generate_argument_declarations(self, env, code): scope = self.local_scope for arg in self.args: if arg.default: entry = scope.lookup(arg.name) if self.override or entry.cf_used: result = arg.calculate_default_value_code(code) code.putln('%s = %s;' % ( arg.type.declaration_code(arg.cname), result)) def generate_keyword_list(self, code): pass def generate_argument_parsing_code(self, env, code): i = 0 used = 0 scope = self.local_scope if self.type.optional_arg_count: code.putln('if (%s) {' % Naming.optional_args_cname) for arg in self.args: if arg.default: entry = scope.lookup(arg.name) if self.override or entry.cf_used: code.putln('if (%s->%sn > %s) {' % (Naming.optional_args_cname, Naming.pyrex_prefix, i)) declarator = arg.declarator while not hasattr(declarator, 'name'): declarator = declarator.base code.putln('%s = %s->%s;' % (arg.cname, Naming.optional_args_cname, self.type.opt_arg_cname(declarator.name))) used += 1 i += 1 for _ in range(used): code.putln('}') code.putln('}') # Move arguments into closure if required def put_into_closure(entry): if entry.in_closure and not arg.default: code.putln('%s = %s;' % (entry.cname, entry.original_cname)) code.put_var_incref(entry) code.put_var_giveref(entry) for arg in self.args: put_into_closure(scope.lookup_here(arg.name)) def generate_argument_conversion_code(self, code): pass def generate_argument_type_tests(self, code): # Generate type tests for args whose type in a parent # class is a supertype of the declared type. for arg in self.type.args: if arg.needs_type_test: self.generate_arg_type_test(arg, code) elif arg.type.is_pyobject and not arg.accept_none: self.generate_arg_none_check(arg, code) def generate_execution_code(self, code): super(CFuncDefNode, self).generate_execution_code(code) if self.py_func_stat: self.py_func_stat.generate_execution_code(code) def error_value(self): if self.return_type.is_pyobject: return "0" else: #return None return self.entry.type.exception_value def caller_will_check_exceptions(self): return self.entry.type.exception_check def generate_wrapper_functions(self, code): # If the C signature of a function has changed, we need to generate # wrappers to put in the slots here. k = 0 entry = self.entry func_type = entry.type while entry.prev_entry is not None: k += 1 entry = entry.prev_entry entry.func_cname = "%s%swrap_%s" % (self.entry.func_cname, Naming.pyrex_prefix, k) code.putln() self.generate_function_header( code, 0, with_dispatch=entry.type.is_overridable, with_opt_args=entry.type.optional_arg_count, cname=entry.func_cname) if not self.return_type.is_void: code.put('return ') args = self.type.args arglist = [arg.cname for arg in args[:len(args)-self.type.optional_arg_count]] if entry.type.is_overridable: arglist.append(Naming.skip_dispatch_cname) elif func_type.is_overridable: arglist.append('0') if entry.type.optional_arg_count: arglist.append(Naming.optional_args_cname) elif func_type.optional_arg_count: arglist.append('NULL') code.putln('%s(%s);' % (self.entry.func_cname, ', '.join(arglist))) code.putln('}') class PyArgDeclNode(Node): # Argument which must be a Python object (used # for * and ** arguments). # # name string # entry Symtab.Entry # annotation ExprNode or None Py3 argument annotation child_attrs = [] is_self_arg = False is_type_arg = False def generate_function_definitions(self, env, code): self.entry.generate_function_definitions(env, code) class DecoratorNode(Node): # A decorator # # decorator NameNode or CallNode or AttributeNode child_attrs = ['decorator'] class DefNode(FuncDefNode): # A Python function definition. # # name string the Python name of the function # lambda_name string the internal name of a lambda 'function' # decorators [DecoratorNode] list of decorators # args [CArgDeclNode] formal arguments # doc EncodedString or None # body StatListNode # return_type_annotation # ExprNode or None the Py3 return type annotation # # The following subnode is constructed internally # when the def statement is inside a Python class definition. # # fused_py_func DefNode The original fused cpdef DefNode # (in case this is a specialization) # specialized_cpdefs [DefNode] list of specialized cpdef DefNodes # py_cfunc_node PyCFunctionNode/InnerFunctionNode The PyCFunction to create and assign # # decorator_indirection IndirectionNode Used to remove __Pyx_Method_ClassMethod for fused functions child_attrs = ["args", "star_arg", "starstar_arg", "body", "decorators", "return_type_annotation"] lambda_name = None reqd_kw_flags_cname = "0" is_wrapper = 0 no_assignment_synthesis = 0 decorators = None return_type_annotation = None entry = None acquire_gil = 0 self_in_stararg = 0 py_cfunc_node = None requires_classobj = False defaults_struct = None # Dynamic kwrds structure name doc = None fused_py_func = False specialized_cpdefs = None py_wrapper = None py_wrapper_required = True func_cname = None defaults_getter = None def __init__(self, pos, **kwds): FuncDefNode.__init__(self, pos, **kwds) k = rk = r = 0 for arg in self.args: if arg.kw_only: k += 1 if not arg.default: rk += 1 if not arg.default: r += 1 self.num_kwonly_args = k self.num_required_kw_args = rk self.num_required_args = r def as_cfunction(self, cfunc=None, scope=None, overridable=True, returns=None, modifiers=None): if self.star_arg: error(self.star_arg.pos, "cdef function cannot have star argument") if self.starstar_arg: error(self.starstar_arg.pos, "cdef function cannot have starstar argument") if cfunc is None: cfunc_args = [] for formal_arg in self.args: name_declarator, type = formal_arg.analyse(scope, nonempty=1) cfunc_args.append(PyrexTypes.CFuncTypeArg(name=name_declarator.name, cname=None, type=py_object_type, pos=formal_arg.pos)) cfunc_type = PyrexTypes.CFuncType(return_type=py_object_type, args=cfunc_args, has_varargs=False, exception_value=None, exception_check=False, nogil=False, with_gil=False, is_overridable=overridable) cfunc = CVarDefNode(self.pos, type=cfunc_type) else: if scope is None: scope = cfunc.scope cfunc_type = cfunc.type if len(self.args) != len(cfunc_type.args) or cfunc_type.has_varargs: error(self.pos, "wrong number of arguments") error(cfunc.pos, "previous declaration here") for i, (formal_arg, type_arg) in enumerate(zip(self.args, cfunc_type.args)): name_declarator, type = formal_arg.analyse(scope, nonempty=1, is_self_arg=(i == 0 and scope.is_c_class_scope)) if type is None or type is PyrexTypes.py_object_type: formal_arg.type = type_arg.type formal_arg.name_declarator = name_declarator from . import ExprNodes if cfunc_type.exception_value is None: exception_value = None else: exception_value = ExprNodes.ConstNode( self.pos, value=cfunc_type.exception_value, type=cfunc_type.return_type) declarator = CFuncDeclaratorNode(self.pos, base=CNameDeclaratorNode(self.pos, name=self.name, cname=None), args=self.args, has_varargs=False, exception_check=cfunc_type.exception_check, exception_value=exception_value, with_gil=cfunc_type.with_gil, nogil=cfunc_type.nogil) return CFuncDefNode(self.pos, modifiers=modifiers or [], base_type=CAnalysedBaseTypeNode(self.pos, type=cfunc_type.return_type), declarator=declarator, body=self.body, doc=self.doc, overridable=cfunc_type.is_overridable, type=cfunc_type, with_gil=cfunc_type.with_gil, nogil=cfunc_type.nogil, visibility='private', api=False, directive_locals=getattr(cfunc, 'directive_locals', {}), directive_returns=returns) def is_cdef_func_compatible(self): """Determines if the function's signature is compatible with a cdef function. This can be used before calling .as_cfunction() to see if that will be successful. """ if self.needs_closure: return False if self.star_arg or self.starstar_arg: return False return True def analyse_declarations(self, env): self.is_classmethod = self.is_staticmethod = False if self.decorators: for decorator in self.decorators: func = decorator.decorator if func.is_name: self.is_classmethod |= func.name == 'classmethod' self.is_staticmethod |= func.name == 'staticmethod' if self.is_classmethod and env.lookup_here('classmethod'): # classmethod() was overridden - not much we can do here ... self.is_classmethod = False if self.is_staticmethod and env.lookup_here('staticmethod'): # staticmethod() was overridden - not much we can do here ... self.is_staticmethod = False if self.name == '__new__' and env.is_py_class_scope: self.is_staticmethod = 1 self.analyse_argument_types(env) if self.name == '<lambda>': self.declare_lambda_function(env) else: self.declare_pyfunction(env) self.analyse_signature(env) self.return_type = self.entry.signature.return_type() # if a signature annotation provides a more specific return object type, use it if self.return_type is py_object_type and self.return_type_annotation: if env.directives['annotation_typing'] and not self.entry.is_special: _, return_type = _analyse_signature_annotation(self.return_type_annotation, env) if return_type and return_type.is_pyobject: self.return_type = return_type self.create_local_scope(env) self.py_wrapper = DefNodeWrapper( self.pos, target=self, name=self.entry.name, args=self.args, star_arg=self.star_arg, starstar_arg=self.starstar_arg, return_type=self.return_type) self.py_wrapper.analyse_declarations(env) def analyse_argument_types(self, env): self.directive_locals = env.directives['locals'] allow_none_for_extension_args = env.directives['allow_none_for_extension_args'] f2s = env.fused_to_specific env.fused_to_specific = None for arg in self.args: if hasattr(arg, 'name'): name_declarator = None else: base_type = arg.base_type.analyse(env) name_declarator, type = \ arg.declarator.analyse(base_type, env) arg.name = name_declarator.name arg.type = type if type.is_fused: self.has_fused_arguments = True self.align_argument_type(env, arg) if name_declarator and name_declarator.cname: error(self.pos, "Python function argument cannot have C name specification") arg.type = arg.type.as_argument_type() arg.hdr_type = None arg.needs_conversion = 0 arg.needs_type_test = 0 arg.is_generic = 1 if arg.type.is_pyobject or arg.type.is_buffer or arg.type.is_memoryviewslice: if arg.or_none: arg.accept_none = True elif arg.not_none: arg.accept_none = False elif (arg.type.is_extension_type or arg.type.is_builtin_type or arg.type.is_buffer or arg.type.is_memoryviewslice): if arg.default and arg.default.constant_result is None: # special case: def func(MyType obj = None) arg.accept_none = True else: # default depends on compiler directive arg.accept_none = allow_none_for_extension_args else: # probably just a plain 'object' arg.accept_none = True else: arg.accept_none = True # won't be used, but must be there if arg.not_none: error(arg.pos, "Only Python type arguments can have 'not None'") if arg.or_none: error(arg.pos, "Only Python type arguments can have 'or None'") env.fused_to_specific = f2s def analyse_signature(self, env): if self.entry.is_special: if self.decorators: error(self.pos, "special functions of cdef classes cannot have decorators") self.entry.trivial_signature = len(self.args) == 1 and not (self.star_arg or self.starstar_arg) elif not env.directives['always_allow_keywords'] and not (self.star_arg or self.starstar_arg): # Use the simpler calling signature for zero- and one-argument functions. if self.entry.signature is TypeSlots.pyfunction_signature: if len(self.args) == 0: self.entry.signature = TypeSlots.pyfunction_noargs elif len(self.args) == 1: if self.args[0].default is None and not self.args[0].kw_only: self.entry.signature = TypeSlots.pyfunction_onearg elif self.entry.signature is TypeSlots.pymethod_signature: if len(self.args) == 1: self.entry.signature = TypeSlots.unaryfunc elif len(self.args) == 2: if self.args[1].default is None and not self.args[1].kw_only: self.entry.signature = TypeSlots.ibinaryfunc sig = self.entry.signature nfixed = sig.num_fixed_args() if (sig is TypeSlots.pymethod_signature and nfixed == 1 and len(self.args) == 0 and self.star_arg): # this is the only case where a diverging number of # arguments is not an error - when we have no explicit # 'self' parameter as in method(*args) sig = self.entry.signature = TypeSlots.pyfunction_signature # self is not 'really' used self.self_in_stararg = 1 nfixed = 0 if self.is_staticmethod and env.is_c_class_scope: nfixed = 0 self.self_in_stararg = True # FIXME: why for staticmethods? self.entry.signature = sig = copy.copy(sig) sig.fixed_arg_format = "*" sig.is_staticmethod = True sig.has_generic_args = True if ((self.is_classmethod or self.is_staticmethod) and self.has_fused_arguments and env.is_c_class_scope): del self.decorator_indirection.stats[:] for i in range(min(nfixed, len(self.args))): arg = self.args[i] arg.is_generic = 0 if sig.is_self_arg(i) and not self.is_staticmethod: if self.is_classmethod: arg.is_type_arg = 1 arg.hdr_type = arg.type = Builtin.type_type else: arg.is_self_arg = 1 arg.hdr_type = arg.type = env.parent_type arg.needs_conversion = 0 else: arg.hdr_type = sig.fixed_arg_type(i) if not arg.type.same_as(arg.hdr_type): if arg.hdr_type.is_pyobject and arg.type.is_pyobject: arg.needs_type_test = 1 else: arg.needs_conversion = 1 if arg.needs_conversion: arg.hdr_cname = Naming.arg_prefix + arg.name else: arg.hdr_cname = Naming.var_prefix + arg.name if nfixed > len(self.args): self.bad_signature() return elif nfixed < len(self.args): if not sig.has_generic_args: self.bad_signature() for arg in self.args: if arg.is_generic and (arg.type.is_extension_type or arg.type.is_builtin_type): arg.needs_type_test = 1 def bad_signature(self): sig = self.entry.signature expected_str = "%d" % sig.num_fixed_args() if sig.has_generic_args: expected_str += " or more" name = self.name if name.startswith("__") and name.endswith("__"): desc = "Special method" else: desc = "Method" error(self.pos, "%s %s has wrong number of arguments (%d declared, %s expected)" % ( desc, self.name, len(self.args), expected_str)) def declare_pyfunction(self, env): #print "DefNode.declare_pyfunction:", self.name, "in", env ### name = self.name entry = env.lookup_here(name) if entry: if entry.is_final_cmethod and not env.parent_type.is_final_type: error(self.pos, "Only final types can have final Python (def/cpdef) methods") if entry.type.is_cfunction and not entry.is_builtin_cmethod and not self.is_wrapper: warning(self.pos, "Overriding cdef method with def method.", 5) entry = env.declare_pyfunction(name, self.pos, allow_redefine=not self.is_wrapper) self.entry = entry prefix = env.next_id(env.scope_prefix) self.entry.pyfunc_cname = Naming.pyfunc_prefix + prefix + name if Options.docstrings: entry.doc = embed_position(self.pos, self.doc) entry.doc_cname = Naming.funcdoc_prefix + prefix + name if entry.is_special: if entry.name in TypeSlots.invisible or not entry.doc or ( entry.name in '__getattr__' and env.directives['fast_getattr']): entry.wrapperbase_cname = None else: entry.wrapperbase_cname = Naming.wrapperbase_prefix + prefix + name else: entry.doc = None def declare_lambda_function(self, env): entry = env.declare_lambda_function(self.lambda_name, self.pos) entry.doc = None self.entry = entry self.entry.pyfunc_cname = entry.cname def declare_arguments(self, env): for arg in self.args: if not arg.name: error(arg.pos, "Missing argument name") if arg.needs_conversion: arg.entry = env.declare_var(arg.name, arg.type, arg.pos) if arg.type.is_pyobject: arg.entry.init = "0" else: arg.entry = self.declare_argument(env, arg) arg.entry.is_arg = 1 arg.entry.used = 1 arg.entry.is_self_arg = arg.is_self_arg self.declare_python_arg(env, self.star_arg) self.declare_python_arg(env, self.starstar_arg) def declare_python_arg(self, env, arg): if arg: if env.directives['infer_types'] != False: type = PyrexTypes.unspecified_type else: type = py_object_type entry = env.declare_var(arg.name, type, arg.pos) entry.is_arg = 1 entry.used = 1 entry.init = "0" entry.xdecref_cleanup = 1 arg.entry = entry def analyse_expressions(self, env): self.local_scope.directives = env.directives self.analyse_default_values(env) self.analyse_annotations(env) if self.return_type_annotation: self.return_type_annotation = self.return_type_annotation.analyse_types(env) if not self.needs_assignment_synthesis(env) and self.decorators: for decorator in self.decorators[::-1]: decorator.decorator = decorator.decorator.analyse_expressions(env) self.py_wrapper.prepare_argument_coercion(env) return self def needs_assignment_synthesis(self, env, code=None): if self.is_staticmethod: return True if self.is_wrapper or self.specialized_cpdefs or self.entry.is_fused_specialized: return False if self.no_assignment_synthesis: return False # Should enable for module level as well, that will require more testing... if self.entry.is_anonymous: return True if env.is_module_scope: if code is None: return env.directives['binding'] else: return code.globalstate.directives['binding'] return env.is_py_class_scope or env.is_closure_scope def error_value(self): return self.entry.signature.error_value def caller_will_check_exceptions(self): return self.entry.signature.exception_check def generate_function_definitions(self, env, code): if self.defaults_getter: self.defaults_getter.generate_function_definitions(env, code) # Before closure cnames are mangled if self.py_wrapper_required: # func_cname might be modified by @cname self.py_wrapper.func_cname = self.entry.func_cname self.py_wrapper.generate_function_definitions(env, code) FuncDefNode.generate_function_definitions(self, env, code) def generate_function_header(self, code, with_pymethdef, proto_only=0): if proto_only: if self.py_wrapper_required: self.py_wrapper.generate_function_header( code, with_pymethdef, True) return arg_code_list = [] if self.entry.signature.has_dummy_arg: self_arg = 'PyObject *%s' % Naming.self_cname if not self.needs_outer_scope: self_arg = 'CYTHON_UNUSED ' + self_arg arg_code_list.append(self_arg) def arg_decl_code(arg): entry = arg.entry if entry.in_closure: cname = entry.original_cname else: cname = entry.cname decl = entry.type.declaration_code(cname) if not entry.cf_used: decl = 'CYTHON_UNUSED ' + decl return decl for arg in self.args: arg_code_list.append(arg_decl_code(arg)) if self.star_arg: arg_code_list.append(arg_decl_code(self.star_arg)) if self.starstar_arg: arg_code_list.append(arg_decl_code(self.starstar_arg)) arg_code = ', '.join(arg_code_list) dc = self.return_type.declaration_code(self.entry.pyfunc_cname) decls_code = code.globalstate['decls'] preprocessor_guard = self.get_preprocessor_guard() if preprocessor_guard: decls_code.putln(preprocessor_guard) decls_code.putln( "static %s(%s); /* proto */" % (dc, arg_code)) if preprocessor_guard: decls_code.putln("#endif") code.putln("static %s(%s) {" % (dc, arg_code)) def generate_argument_declarations(self, env, code): pass def generate_keyword_list(self, code): pass def generate_argument_parsing_code(self, env, code): # Move arguments into closure if required def put_into_closure(entry): if entry.in_closure: code.putln('%s = %s;' % (entry.cname, entry.original_cname)) code.put_var_incref(entry) code.put_var_giveref(entry) for arg in self.args: put_into_closure(arg.entry) for arg in self.star_arg, self.starstar_arg: if arg: put_into_closure(arg.entry) def generate_argument_type_tests(self, code): pass class DefNodeWrapper(FuncDefNode): # DefNode python wrapper code generator defnode = None target = None # Target DefNode def __init__(self, *args, **kwargs): FuncDefNode.__init__(self, *args, **kwargs) self.num_kwonly_args = self.target.num_kwonly_args self.num_required_kw_args = self.target.num_required_kw_args self.num_required_args = self.target.num_required_args self.self_in_stararg = self.target.self_in_stararg self.signature = None def analyse_declarations(self, env): target_entry = self.target.entry name = self.name prefix = env.next_id(env.scope_prefix) target_entry.func_cname = Naming.pywrap_prefix + prefix + name target_entry.pymethdef_cname = Naming.pymethdef_prefix + prefix + name self.signature = target_entry.signature def prepare_argument_coercion(self, env): # This is only really required for Cython utility code at this time, # everything else can be done during code generation. But we expand # all utility code here, simply because we cannot easily distinguish # different code types. for arg in self.args: if not arg.type.is_pyobject: if not arg.type.create_from_py_utility_code(env): pass # will fail later elif arg.hdr_type and not arg.hdr_type.is_pyobject: if not arg.hdr_type.create_to_py_utility_code(env): pass # will fail later if self.starstar_arg and not self.starstar_arg.entry.cf_used: # we will set the kwargs argument to NULL instead of a new dict # and must therefore correct the control flow state entry = self.starstar_arg.entry entry.xdecref_cleanup = 1 for ass in entry.cf_assignments: if not ass.is_arg and ass.lhs.is_name: ass.lhs.cf_maybe_null = True def signature_has_nongeneric_args(self): argcount = len(self.args) if argcount == 0 or ( argcount == 1 and (self.args[0].is_self_arg or self.args[0].is_type_arg)): return 0 return 1 def signature_has_generic_args(self): return self.signature.has_generic_args def generate_function_body(self, code): args = [] if self.signature.has_dummy_arg: args.append(Naming.self_cname) for arg in self.args: if arg.hdr_type and not (arg.type.is_memoryviewslice or arg.type.is_struct or arg.type.is_complex): args.append(arg.type.cast_code(arg.entry.cname)) else: args.append(arg.entry.cname) if self.star_arg: args.append(self.star_arg.entry.cname) if self.starstar_arg: args.append(self.starstar_arg.entry.cname) args = ', '.join(args) if not self.return_type.is_void: code.put('%s = ' % Naming.retval_cname) code.putln('%s(%s);' % ( self.target.entry.pyfunc_cname, args)) def generate_function_definitions(self, env, code): lenv = self.target.local_scope # Generate C code for header and body of function code.mark_pos(self.pos) code.putln("") code.putln("/* Python wrapper */") preprocessor_guard = self.target.get_preprocessor_guard() if preprocessor_guard: code.putln(preprocessor_guard) code.enter_cfunc_scope() code.return_from_error_cleanup_label = code.new_label() with_pymethdef = (self.target.needs_assignment_synthesis(env, code) or self.target.pymethdef_required) self.generate_function_header(code, with_pymethdef) self.generate_argument_declarations(lenv, code) tempvardecl_code = code.insertion_point() if self.return_type.is_pyobject: retval_init = ' = 0' else: retval_init = '' if not self.return_type.is_void: code.putln('%s%s;' % ( self.return_type.declaration_code(Naming.retval_cname), retval_init)) code.put_declare_refcount_context() code.put_setup_refcount_context('%s (wrapper)' % self.name) self.generate_argument_parsing_code(lenv, code) self.generate_argument_type_tests(code) self.generate_function_body(code) # ----- Go back and insert temp variable declarations tempvardecl_code.put_temp_declarations(code.funcstate) code.mark_pos(self.pos) code.putln("") code.putln("/* function exit code */") # ----- Error cleanup if code.error_label in code.labels_used: code.put_goto(code.return_label) code.put_label(code.error_label) for cname, type in code.funcstate.all_managed_temps(): code.put_xdecref(cname, type) err_val = self.error_value() if err_val is not None: code.putln("%s = %s;" % (Naming.retval_cname, err_val)) # ----- Non-error return cleanup code.put_label(code.return_label) for entry in lenv.var_entries: if entry.is_arg and entry.type.is_pyobject: code.put_var_decref(entry) code.put_finish_refcount_context() if not self.return_type.is_void: code.putln("return %s;" % Naming.retval_cname) code.putln('}') code.exit_cfunc_scope() if preprocessor_guard: code.putln("#endif /*!(%s)*/" % preprocessor_guard) def generate_function_header(self, code, with_pymethdef, proto_only=0): arg_code_list = [] sig = self.signature if sig.has_dummy_arg or self.self_in_stararg: arg_code = "PyObject *%s" % Naming.self_cname if not sig.has_dummy_arg: arg_code = 'CYTHON_UNUSED ' + arg_code arg_code_list.append(arg_code) for arg in self.args: if not arg.is_generic: if arg.is_self_arg or arg.is_type_arg: arg_code_list.append("PyObject *%s" % arg.hdr_cname) else: arg_code_list.append( arg.hdr_type.declaration_code(arg.hdr_cname)) entry = self.target.entry if not entry.is_special and sig.method_flags() == [TypeSlots.method_noargs]: arg_code_list.append("CYTHON_UNUSED PyObject *unused") if entry.scope.is_c_class_scope and entry.name == "__ipow__": arg_code_list.append("CYTHON_UNUSED PyObject *unused") if sig.has_generic_args: arg_code_list.append( "PyObject *%s, PyObject *%s" % ( Naming.args_cname, Naming.kwds_cname)) arg_code = ", ".join(arg_code_list) # Prevent warning: unused function '__pyx_pw_5numpy_7ndarray_1__getbuffer__' mf = "" if (entry.name in ("__getbuffer__", "__releasebuffer__") and entry.scope.is_c_class_scope): mf = "CYTHON_UNUSED " with_pymethdef = False dc = self.return_type.declaration_code(entry.func_cname) header = "static %s%s(%s)" % (mf, dc, arg_code) code.putln("%s; /*proto*/" % header) if proto_only: if self.target.fused_py_func: # If we are the specialized version of the cpdef, we still # want the prototype for the "fused cpdef", in case we're # checking to see if our method was overridden in Python self.target.fused_py_func.generate_function_header( code, with_pymethdef, proto_only=True) return if (Options.docstrings and entry.doc and not self.target.fused_py_func and not entry.scope.is_property_scope and (not entry.is_special or entry.wrapperbase_cname)): # h_code = code.globalstate['h_code'] docstr = entry.doc if docstr.is_unicode: docstr = docstr.as_utf8_string() code.putln( 'static char %s[] = %s;' % ( entry.doc_cname, docstr.as_c_string_literal())) if entry.is_special: code.putln('#if CYTHON_COMPILING_IN_CPYTHON') code.putln( "struct wrapperbase %s;" % entry.wrapperbase_cname) code.putln('#endif') if with_pymethdef or self.target.fused_py_func: code.put( "static PyMethodDef %s = " % entry.pymethdef_cname) code.put_pymethoddef(self.target.entry, ";", allow_skip=False) code.putln("%s {" % header) def generate_argument_declarations(self, env, code): for arg in self.args: if arg.is_generic: if arg.needs_conversion: code.putln("PyObject *%s = 0;" % arg.hdr_cname) else: code.put_var_declaration(arg.entry) for entry in env.var_entries: if entry.is_arg: code.put_var_declaration(entry) def generate_argument_parsing_code(self, env, code): # Generate fast equivalent of PyArg_ParseTuple call for # generic arguments, if any, including args/kwargs old_error_label = code.new_error_label() our_error_label = code.error_label end_label = code.new_label("argument_unpacking_done") has_kwonly_args = self.num_kwonly_args > 0 has_star_or_kw_args = self.star_arg is not None \ or self.starstar_arg is not None or has_kwonly_args for arg in self.args: if not arg.type.is_pyobject: if not arg.type.create_from_py_utility_code(env): pass # will fail later if not self.signature_has_generic_args(): if has_star_or_kw_args: error(self.pos, "This method cannot have * or keyword arguments") self.generate_argument_conversion_code(code) elif not self.signature_has_nongeneric_args(): # func(*args) or func(**kw) or func(*args, **kw) self.generate_stararg_copy_code(code) else: self.generate_tuple_and_keyword_parsing_code(self.args, end_label, code) code.error_label = old_error_label if code.label_used(our_error_label): if not code.label_used(end_label): code.put_goto(end_label) code.put_label(our_error_label) if has_star_or_kw_args: self.generate_arg_decref(self.star_arg, code) if self.starstar_arg: if self.starstar_arg.entry.xdecref_cleanup: code.put_var_xdecref_clear(self.starstar_arg.entry) else: code.put_var_decref_clear(self.starstar_arg.entry) code.put_add_traceback(self.target.entry.qualified_name) code.put_finish_refcount_context() code.putln("return %s;" % self.error_value()) if code.label_used(end_label): code.put_label(end_label) def generate_arg_xdecref(self, arg, code): if arg: code.put_var_xdecref_clear(arg.entry) def generate_arg_decref(self, arg, code): if arg: code.put_var_decref_clear(arg.entry) def generate_stararg_copy_code(self, code): if not self.star_arg: code.globalstate.use_utility_code( UtilityCode.load_cached("RaiseArgTupleInvalid", "FunctionArguments.c")) code.putln("if (unlikely(PyTuple_GET_SIZE(%s) > 0)) {" % Naming.args_cname) code.put('__Pyx_RaiseArgtupleInvalid("%s", 1, 0, 0, PyTuple_GET_SIZE(%s)); return %s;' % ( self.name, Naming.args_cname, self.error_value())) code.putln("}") if self.starstar_arg: if self.star_arg or not self.starstar_arg.entry.cf_used: kwarg_check = "unlikely(%s)" % Naming.kwds_cname else: kwarg_check = "%s" % Naming.kwds_cname else: kwarg_check = "unlikely(%s) && unlikely(PyDict_Size(%s) > 0)" % ( Naming.kwds_cname, Naming.kwds_cname) code.globalstate.use_utility_code( UtilityCode.load_cached("KeywordStringCheck", "FunctionArguments.c")) code.putln( "if (%s && unlikely(!__Pyx_CheckKeywordStrings(%s, \"%s\", %d))) return %s;" % ( kwarg_check, Naming.kwds_cname, self.name, bool(self.starstar_arg), self.error_value())) if self.starstar_arg and self.starstar_arg.entry.cf_used: if all(ref.node.allow_null for ref in self.starstar_arg.entry.cf_references): code.putln("if (%s) {" % kwarg_check) code.putln("%s = PyDict_Copy(%s); if (unlikely(!%s)) return %s;" % ( self.starstar_arg.entry.cname, Naming.kwds_cname, self.starstar_arg.entry.cname, self.error_value())) code.put_gotref(self.starstar_arg.entry.cname) code.putln("} else {") code.putln("%s = NULL;" % (self.starstar_arg.entry.cname,)) code.putln("}") self.starstar_arg.entry.xdecref_cleanup = 1 else: code.put("%s = (%s) ? PyDict_Copy(%s) : PyDict_New(); " % ( self.starstar_arg.entry.cname, Naming.kwds_cname, Naming.kwds_cname)) code.putln("if (unlikely(!%s)) return %s;" % ( self.starstar_arg.entry.cname, self.error_value())) self.starstar_arg.entry.xdecref_cleanup = 0 code.put_gotref(self.starstar_arg.entry.cname) if self.self_in_stararg and not self.target.is_staticmethod: # need to create a new tuple with 'self' inserted as first item code.put("%s = PyTuple_New(PyTuple_GET_SIZE(%s)+1); if (unlikely(!%s)) " % ( self.star_arg.entry.cname, Naming.args_cname, self.star_arg.entry.cname)) if self.starstar_arg and self.starstar_arg.entry.cf_used: code.putln("{") code.put_xdecref_clear(self.starstar_arg.entry.cname, py_object_type) code.putln("return %s;" % self.error_value()) code.putln("}") else: code.putln("return %s;" % self.error_value()) code.put_gotref(self.star_arg.entry.cname) code.put_incref(Naming.self_cname, py_object_type) code.put_giveref(Naming.self_cname) code.putln("PyTuple_SET_ITEM(%s, 0, %s);" % ( self.star_arg.entry.cname, Naming.self_cname)) temp = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False) code.putln("for (%s=0; %s < PyTuple_GET_SIZE(%s); %s++) {" % ( temp, temp, Naming.args_cname, temp)) code.putln("PyObject* item = PyTuple_GET_ITEM(%s, %s);" % ( Naming.args_cname, temp)) code.put_incref("item", py_object_type) code.put_giveref("item") code.putln("PyTuple_SET_ITEM(%s, %s+1, item);" % ( self.star_arg.entry.cname, temp)) code.putln("}") code.funcstate.release_temp(temp) self.star_arg.entry.xdecref_cleanup = 0 elif self.star_arg: code.put_incref(Naming.args_cname, py_object_type) code.putln("%s = %s;" % ( self.star_arg.entry.cname, Naming.args_cname)) self.star_arg.entry.xdecref_cleanup = 0 def generate_tuple_and_keyword_parsing_code(self, args, success_label, code): argtuple_error_label = code.new_label("argtuple_error") positional_args = [] required_kw_only_args = [] optional_kw_only_args = [] for arg in args: if arg.is_generic: if arg.default: if not arg.is_self_arg and not arg.is_type_arg: if arg.kw_only: optional_kw_only_args.append(arg) else: positional_args.append(arg) elif arg.kw_only: required_kw_only_args.append(arg) elif not arg.is_self_arg and not arg.is_type_arg: positional_args.append(arg) # sort required kw-only args before optional ones to avoid special # cases in the unpacking code kw_only_args = required_kw_only_args + optional_kw_only_args min_positional_args = self.num_required_args - self.num_required_kw_args if len(args) > 0 and (args[0].is_self_arg or args[0].is_type_arg): min_positional_args -= 1 max_positional_args = len(positional_args) has_fixed_positional_count = not self.star_arg and \ min_positional_args == max_positional_args has_kw_only_args = bool(kw_only_args) if self.num_required_kw_args: code.globalstate.use_utility_code( UtilityCode.load_cached("RaiseKeywordRequired", "FunctionArguments.c")) if self.starstar_arg or self.star_arg: self.generate_stararg_init_code(max_positional_args, code) code.putln('{') all_args = tuple(positional_args) + tuple(kw_only_args) code.putln("static PyObject **%s[] = {%s,0};" % ( Naming.pykwdlist_cname, ','.join(['&%s' % code.intern_identifier(arg.name) for arg in all_args]))) # Before being converted and assigned to the target variables, # borrowed references to all unpacked argument values are # collected into a local PyObject* array called "values", # regardless if they were taken from default arguments, # positional arguments or keyword arguments. Note that # C-typed default arguments are handled at conversion time, # so their array value is NULL in the end if no argument # was passed for them. self.generate_argument_values_setup_code(all_args, code) # --- optimised code when we receive keyword arguments code.putln("if (%s(%s)) {" % ( (self.num_required_kw_args > 0) and "likely" or "unlikely", Naming.kwds_cname)) self.generate_keyword_unpacking_code( min_positional_args, max_positional_args, has_fixed_positional_count, has_kw_only_args, all_args, argtuple_error_label, code) # --- optimised code when we do not receive any keyword arguments if (self.num_required_kw_args and min_positional_args > 0) or min_positional_args == max_positional_args: # Python raises arg tuple related errors first, so we must # check the length here if min_positional_args == max_positional_args and not self.star_arg: compare = '!=' else: compare = '<' code.putln('} else if (PyTuple_GET_SIZE(%s) %s %d) {' % ( Naming.args_cname, compare, min_positional_args)) code.put_goto(argtuple_error_label) if self.num_required_kw_args: # pure error case: keywords required but not passed if max_positional_args > min_positional_args and not self.star_arg: code.putln('} else if (PyTuple_GET_SIZE(%s) > %d) {' % ( Naming.args_cname, max_positional_args)) code.put_goto(argtuple_error_label) code.putln('} else {') for i, arg in enumerate(kw_only_args): if not arg.default: pystring_cname = code.intern_identifier(arg.name) # required keyword-only argument missing code.put('__Pyx_RaiseKeywordRequired("%s", %s); ' % ( self.name, pystring_cname)) code.putln(code.error_goto(self.pos)) break else: # optimised tuple unpacking code code.putln('} else {') if min_positional_args == max_positional_args: # parse the exact number of positional arguments from # the args tuple for i, arg in enumerate(positional_args): code.putln("values[%d] = PyTuple_GET_ITEM(%s, %d);" % (i, Naming.args_cname, i)) else: # parse the positional arguments from the variable length # args tuple and reject illegal argument tuple sizes code.putln('switch (PyTuple_GET_SIZE(%s)) {' % Naming.args_cname) if self.star_arg: code.putln('default:') reversed_args = list(enumerate(positional_args))[::-1] for i, arg in reversed_args: if i >= min_positional_args-1: code.put('case %2d: ' % (i+1)) code.putln("values[%d] = PyTuple_GET_ITEM(%s, %d);" % (i, Naming.args_cname, i)) if min_positional_args == 0: code.put('case 0: ') code.putln('break;') if self.star_arg: if min_positional_args: for i in range(min_positional_args-1, -1, -1): code.putln('case %2d:' % i) code.put_goto(argtuple_error_label) else: code.put('default: ') code.put_goto(argtuple_error_label) code.putln('}') code.putln('}') # end of the conditional unpacking blocks # Convert arg values to their final type and assign them. # Also inject non-Python default arguments, which do cannot # live in the values[] array. for i, arg in enumerate(all_args): self.generate_arg_assignment(arg, "values[%d]" % i, code) code.putln('}') # end of the whole argument unpacking block if code.label_used(argtuple_error_label): code.put_goto(success_label) code.put_label(argtuple_error_label) code.globalstate.use_utility_code( UtilityCode.load_cached("RaiseArgTupleInvalid", "FunctionArguments.c")) code.put('__Pyx_RaiseArgtupleInvalid("%s", %d, %d, %d, PyTuple_GET_SIZE(%s)); ' % ( self.name, has_fixed_positional_count, min_positional_args, max_positional_args, Naming.args_cname)) code.putln(code.error_goto(self.pos)) def generate_arg_assignment(self, arg, item, code): if arg.type.is_pyobject: # Python default arguments were already stored in 'item' at the very beginning if arg.is_generic: item = PyrexTypes.typecast(arg.type, PyrexTypes.py_object_type, item) entry = arg.entry code.putln("%s = %s;" % (entry.cname, item)) else: func = arg.type.from_py_function if func: if arg.default: # C-typed default arguments must be handled here code.putln('if (%s) {' % item) rhs = "%s(%s)" % (func, item) if arg.type.is_enum: rhs = arg.type.cast_code(rhs) code.putln("%s = %s; %s" % ( arg.entry.cname, rhs, code.error_goto_if(arg.type.error_condition(arg.entry.cname), arg.pos))) if arg.default: code.putln('} else {') code.putln("%s = %s;" % ( arg.entry.cname, arg.calculate_default_value_code(code))) if arg.type.is_memoryviewslice: code.put_incref_memoryviewslice(arg.entry.cname, have_gil=True) code.putln('}') else: error(arg.pos, "Cannot convert Python object argument to type '%s'" % arg.type) def generate_stararg_init_code(self, max_positional_args, code): if self.starstar_arg: self.starstar_arg.entry.xdecref_cleanup = 0 code.putln('%s = PyDict_New(); if (unlikely(!%s)) return %s;' % ( self.starstar_arg.entry.cname, self.starstar_arg.entry.cname, self.error_value())) code.put_gotref(self.starstar_arg.entry.cname) if self.star_arg: self.star_arg.entry.xdecref_cleanup = 0 code.putln('if (PyTuple_GET_SIZE(%s) > %d) {' % ( Naming.args_cname, max_positional_args)) code.putln('%s = PyTuple_GetSlice(%s, %d, PyTuple_GET_SIZE(%s));' % ( self.star_arg.entry.cname, Naming.args_cname, max_positional_args, Naming.args_cname)) code.putln("if (unlikely(!%s)) {" % self.star_arg.entry.cname) if self.starstar_arg: code.put_decref_clear(self.starstar_arg.entry.cname, py_object_type) code.put_finish_refcount_context() code.putln('return %s;' % self.error_value()) code.putln('}') code.put_gotref(self.star_arg.entry.cname) code.putln('} else {') code.put("%s = %s; " % (self.star_arg.entry.cname, Naming.empty_tuple)) code.put_incref(Naming.empty_tuple, py_object_type) code.putln('}') def generate_argument_values_setup_code(self, args, code): max_args = len(args) # the 'values' array collects borrowed references to arguments # before doing any type coercion etc. code.putln("PyObject* values[%d] = {%s};" % ( max_args, ','.join('0'*max_args))) if self.target.defaults_struct: code.putln('%s *%s = __Pyx_CyFunction_Defaults(%s, %s);' % ( self.target.defaults_struct, Naming.dynamic_args_cname, self.target.defaults_struct, Naming.self_cname)) # assign borrowed Python default values to the values array, # so that they can be overwritten by received arguments below for i, arg in enumerate(args): if arg.default and arg.type.is_pyobject: default_value = arg.calculate_default_value_code(code) code.putln('values[%d] = %s;' % (i, arg.type.as_pyobject(default_value))) def generate_keyword_unpacking_code(self, min_positional_args, max_positional_args, has_fixed_positional_count, has_kw_only_args, all_args, argtuple_error_label, code): code.putln('Py_ssize_t kw_args;') code.putln('const Py_ssize_t pos_args = PyTuple_GET_SIZE(%s);' % Naming.args_cname) # copy the values from the args tuple and check that it's not too long code.putln('switch (pos_args) {') if self.star_arg: code.putln('default:') for i in range(max_positional_args-1, -1, -1): code.put('case %2d: ' % (i+1)) code.putln("values[%d] = PyTuple_GET_ITEM(%s, %d);" % ( i, Naming.args_cname, i)) code.putln('case 0: break;') if not self.star_arg: code.put('default: ') # more arguments than allowed code.put_goto(argtuple_error_label) code.putln('}') # The code above is very often (but not always) the same as # the optimised non-kwargs tuple unpacking code, so we keep # the code block above at the very top, before the following # 'external' PyDict_Size() call, to make it easy for the C # compiler to merge the two separate tuple unpacking # implementations into one when they turn out to be identical. # If we received kwargs, fill up the positional/required # arguments with values from the kw dict code.putln('kw_args = PyDict_Size(%s);' % Naming.kwds_cname) if self.num_required_args or max_positional_args > 0: last_required_arg = -1 for i, arg in enumerate(all_args): if not arg.default: last_required_arg = i if last_required_arg < max_positional_args: last_required_arg = max_positional_args-1 if max_positional_args > 0: code.putln('switch (pos_args) {') for i, arg in enumerate(all_args[:last_required_arg+1]): if max_positional_args > 0 and i <= max_positional_args: if self.star_arg and i == max_positional_args: code.putln('default:') else: code.putln('case %2d:' % i) pystring_cname = code.intern_identifier(arg.name) if arg.default: if arg.kw_only: # optional kw-only args are handled separately below continue code.putln('if (kw_args > 0) {') # don't overwrite default argument code.putln('PyObject* value = PyDict_GetItem(%s, %s);' % ( Naming.kwds_cname, pystring_cname)) code.putln('if (value) { values[%d] = value; kw_args--; }' % i) code.putln('}') else: code.putln('if (likely((values[%d] = PyDict_GetItem(%s, %s)) != 0)) kw_args--;' % ( i, Naming.kwds_cname, pystring_cname)) if i < min_positional_args: if i == 0: # special case: we know arg 0 is missing code.put('else ') code.put_goto(argtuple_error_label) else: # print the correct number of values (args or # kwargs) that were passed into positional # arguments up to this point code.putln('else {') code.globalstate.use_utility_code( UtilityCode.load_cached("RaiseArgTupleInvalid", "FunctionArguments.c")) code.put('__Pyx_RaiseArgtupleInvalid("%s", %d, %d, %d, %d); ' % ( self.name, has_fixed_positional_count, min_positional_args, max_positional_args, i)) code.putln(code.error_goto(self.pos)) code.putln('}') elif arg.kw_only: code.putln('else {') code.put('__Pyx_RaiseKeywordRequired("%s", %s); ' % ( self.name, pystring_cname)) code.putln(code.error_goto(self.pos)) code.putln('}') if max_positional_args > 0: code.putln('}') if has_kw_only_args: # unpack optional keyword-only arguments separately because # checking for interned strings in a dict is faster than iterating self.generate_optional_kwonly_args_unpacking_code(all_args, code) code.putln('if (unlikely(kw_args > 0)) {') # non-positional/-required kw args left in dict: default args, # kw-only args, **kwargs or error # # This is sort of a catch-all: except for checking required # arguments, this will always do the right thing for unpacking # keyword arguments, so that we can concentrate on optimising # common cases above. if max_positional_args == 0: pos_arg_count = "0" elif self.star_arg: code.putln("const Py_ssize_t used_pos_args = (pos_args < %d) ? pos_args : %d;" % ( max_positional_args, max_positional_args)) pos_arg_count = "used_pos_args" else: pos_arg_count = "pos_args" code.globalstate.use_utility_code( UtilityCode.load_cached("ParseKeywords", "FunctionArguments.c")) code.putln('if (unlikely(__Pyx_ParseOptionalKeywords(%s, %s, %s, values, %s, "%s") < 0)) %s' % ( Naming.kwds_cname, Naming.pykwdlist_cname, self.starstar_arg and self.starstar_arg.entry.cname or '0', pos_arg_count, self.name, code.error_goto(self.pos))) code.putln('}') def generate_optional_kwonly_args_unpacking_code(self, all_args, code): optional_args = [] first_optional_arg = -1 for i, arg in enumerate(all_args): if not arg.kw_only or not arg.default: continue if not optional_args: first_optional_arg = i optional_args.append(arg.name) if optional_args: if len(optional_args) > 1: # if we receive more than the named kwargs, we either have **kwargs # (in which case we must iterate anyway) or it's an error (which we # also handle during iteration) => skip this part if there are more code.putln('if (kw_args > 0 && %s(kw_args <= %d)) {' % ( not self.starstar_arg and 'likely' or '', len(optional_args))) code.putln('Py_ssize_t index;') # not unrolling the loop here reduces the C code overhead code.putln('for (index = %d; index < %d && kw_args > 0; index++) {' % ( first_optional_arg, first_optional_arg + len(optional_args))) else: code.putln('if (kw_args == 1) {') code.putln('const Py_ssize_t index = %d;' % first_optional_arg) code.putln('PyObject* value = PyDict_GetItem(%s, *%s[index]);' % ( Naming.kwds_cname, Naming.pykwdlist_cname)) code.putln('if (value) { values[index] = value; kw_args--; }') if len(optional_args) > 1: code.putln('}') code.putln('}') def generate_argument_conversion_code(self, code): # Generate code to convert arguments from signature type to # declared type, if needed. Also copies signature arguments # into closure fields. for arg in self.args: if arg.needs_conversion: self.generate_arg_conversion(arg, code) def generate_arg_conversion(self, arg, code): # Generate conversion code for one argument. old_type = arg.hdr_type new_type = arg.type if old_type.is_pyobject: if arg.default: code.putln("if (%s) {" % arg.hdr_cname) else: code.putln("assert(%s); {" % arg.hdr_cname) self.generate_arg_conversion_from_pyobject(arg, code) code.putln("}") elif new_type.is_pyobject: self.generate_arg_conversion_to_pyobject(arg, code) else: if new_type.assignable_from(old_type): code.putln("%s = %s;" % (arg.entry.cname, arg.hdr_cname)) else: error(arg.pos, "Cannot convert 1 argument from '%s' to '%s'" % (old_type, new_type)) def generate_arg_conversion_from_pyobject(self, arg, code): new_type = arg.type func = new_type.from_py_function # copied from CoerceFromPyTypeNode if func: lhs = arg.entry.cname rhs = "%s(%s)" % (func, arg.hdr_cname) if new_type.is_enum: rhs = PyrexTypes.typecast(new_type, PyrexTypes.c_long_type, rhs) code.putln("%s = %s; %s" % ( lhs, rhs, code.error_goto_if(new_type.error_condition(arg.entry.cname), arg.pos))) else: error(arg.pos, "Cannot convert Python object argument to type '%s'" % new_type) def generate_arg_conversion_to_pyobject(self, arg, code): old_type = arg.hdr_type func = old_type.to_py_function if func: code.putln("%s = %s(%s); %s" % ( arg.entry.cname, func, arg.hdr_cname, code.error_goto_if_null(arg.entry.cname, arg.pos))) code.put_var_gotref(arg.entry) else: error(arg.pos, "Cannot convert argument of type '%s' to Python object" % old_type) def generate_argument_type_tests(self, code): # Generate type tests for args whose signature # type is PyObject * and whose declared type is # a subtype thereof. for arg in self.args: if arg.needs_type_test: self.generate_arg_type_test(arg, code) elif not arg.accept_none and (arg.type.is_pyobject or arg.type.is_buffer or arg.type.is_memoryviewslice): self.generate_arg_none_check(arg, code) def error_value(self): return self.signature.error_value class GeneratorDefNode(DefNode): # Generator function node that creates a new generator instance when called. # # gbody GeneratorBodyDefNode the function implementing the generator # is_generator = True is_coroutine = False needs_closure = True child_attrs = DefNode.child_attrs + ["gbody"] def __init__(self, pos, **kwargs): # XXX: don't actually needs a body kwargs['body'] = StatListNode(pos, stats=[], is_terminator=True) super(GeneratorDefNode, self).__init__(pos, **kwargs) def analyse_declarations(self, env): super(GeneratorDefNode, self).analyse_declarations(env) self.gbody.local_scope = self.local_scope self.gbody.analyse_declarations(env) def generate_function_body(self, env, code): body_cname = self.gbody.entry.func_cname name = code.intern_identifier(self.name) qualname = code.intern_identifier(self.qualname) code.putln('{') code.putln('__pyx_CoroutineObject *gen = __Pyx_%s_New(' '(__pyx_coroutine_body_t) %s, (PyObject *) %s, %s, %s); %s' % ( 'Coroutine' if self.is_coroutine else 'Generator', body_cname, Naming.cur_scope_cname, name, qualname, code.error_goto_if_null('gen', self.pos))) code.put_decref(Naming.cur_scope_cname, py_object_type) if self.requires_classobj: classobj_cname = 'gen->classobj' code.putln('%s = __Pyx_CyFunction_GetClassObj(%s);' % ( classobj_cname, Naming.self_cname)) code.put_incref(classobj_cname, py_object_type) code.put_giveref(classobj_cname) code.put_finish_refcount_context() code.putln('return (PyObject *) gen;') code.putln('}') def generate_function_definitions(self, env, code): env.use_utility_code(UtilityCode.load_cached( 'Coroutine' if self.is_coroutine else 'Generator', "Coroutine.c")) self.gbody.generate_function_header(code, proto=True) super(GeneratorDefNode, self).generate_function_definitions(env, code) self.gbody.generate_function_definitions(env, code) class AsyncDefNode(GeneratorDefNode): is_coroutine = True class GeneratorBodyDefNode(DefNode): # Main code body of a generator implemented as a DefNode. # is_generator_body = True is_inlined = False inlined_comprehension_type = None # container type for inlined comprehensions def __init__(self, pos=None, name=None, body=None): super(GeneratorBodyDefNode, self).__init__( pos=pos, body=body, name=name, doc=None, args=[], star_arg=None, starstar_arg=None) def declare_generator_body(self, env): prefix = env.next_id(env.scope_prefix) name = env.next_id('generator') cname = Naming.genbody_prefix + prefix + name entry = env.declare_var(None, py_object_type, self.pos, cname=cname, visibility='private') entry.func_cname = cname entry.qualified_name = EncodedString(self.name) self.entry = entry def analyse_declarations(self, env): self.analyse_argument_types(env) self.declare_generator_body(env) def generate_function_header(self, code, proto=False): header = "static PyObject *%s(__pyx_CoroutineObject *%s, PyObject *%s)" % ( self.entry.func_cname, Naming.generator_cname, Naming.sent_value_cname) if proto: code.putln('%s; /* proto */' % header) else: code.putln('%s /* generator body */\n{' % header) def generate_function_definitions(self, env, code): lenv = self.local_scope # Generate closure function definitions self.body.generate_function_definitions(lenv, code) # Generate C code for header and body of function code.enter_cfunc_scope() code.return_from_error_cleanup_label = code.new_label() # ----- Top-level constants used by this function code.mark_pos(self.pos) self.generate_cached_builtins_decls(lenv, code) # ----- Function header code.putln("") self.generate_function_header(code) closure_init_code = code.insertion_point() # ----- Local variables code.putln("PyObject *%s = NULL;" % Naming.retval_cname) tempvardecl_code = code.insertion_point() code.put_declare_refcount_context() code.put_setup_refcount_context(self.entry.name) # ----- Resume switch point. code.funcstate.init_closure_temps(lenv.scope_class.type.scope) resume_code = code.insertion_point() first_run_label = code.new_label('first_run') code.use_label(first_run_label) code.put_label(first_run_label) code.putln('%s' % (code.error_goto_if_null(Naming.sent_value_cname, self.pos))) # ----- prepare target container for inlined comprehension if self.is_inlined and self.inlined_comprehension_type is not None: target_type = self.inlined_comprehension_type if target_type is Builtin.list_type: comp_init = 'PyList_New(0)' elif target_type is Builtin.set_type: comp_init = 'PySet_New(NULL)' elif target_type is Builtin.dict_type: comp_init = 'PyDict_New()' else: raise InternalError( "invalid type of inlined comprehension: %s" % target_type) code.putln("%s = %s; %s" % ( Naming.retval_cname, comp_init, code.error_goto_if_null(Naming.retval_cname, self.pos))) code.put_gotref(Naming.retval_cname) # ----- Function body self.generate_function_body(env, code) # ----- Closure initialization if lenv.scope_class.type.scope.entries: closure_init_code.putln('%s = %s;' % ( lenv.scope_class.type.declaration_code(Naming.cur_scope_cname), lenv.scope_class.type.cast_code('%s->closure' % Naming.generator_cname))) code.mark_pos(self.pos) code.putln("") code.putln("/* function exit code */") # on normal generator termination, we do not take the exception propagation # path: no traceback info is required and not creating it is much faster if not self.is_inlined and not self.body.is_terminator: code.putln('PyErr_SetNone(PyExc_StopIteration);') # ----- Error cleanup if code.error_label in code.labels_used: if not self.body.is_terminator: code.put_goto(code.return_label) code.put_label(code.error_label) if self.is_inlined and self.inlined_comprehension_type is not None: code.put_xdecref_clear(Naming.retval_cname, py_object_type) if Future.generator_stop in env.global_scope().context.future_directives: # PEP 479: turn accidental StopIteration exceptions into a RuntimeError code.globalstate.use_utility_code(UtilityCode.load_cached("pep479", "Coroutine.c")) code.putln("if (unlikely(PyErr_ExceptionMatches(PyExc_StopIteration))) " "__Pyx_Generator_Replace_StopIteration();") for cname, type in code.funcstate.all_managed_temps(): code.put_xdecref(cname, type) code.put_add_traceback(self.entry.qualified_name) # ----- Non-error return cleanup code.put_label(code.return_label) if self.is_inlined: code.put_xgiveref(Naming.retval_cname) else: code.put_xdecref_clear(Naming.retval_cname, py_object_type) code.putln('%s->resume_label = -1;' % Naming.generator_cname) # clean up as early as possible to help breaking any reference cycles code.putln('__Pyx_Coroutine_clear((PyObject*)%s);' % Naming.generator_cname) code.put_finish_refcount_context() code.putln("return %s;" % Naming.retval_cname) code.putln("}") # ----- Go back and insert temp variable declarations tempvardecl_code.put_temp_declarations(code.funcstate) # ----- Generator resume code resume_code.putln("switch (%s->resume_label) {" % ( Naming.generator_cname)) resume_code.putln("case 0: goto %s;" % first_run_label) for i, label in code.yield_labels: resume_code.putln("case %d: goto %s;" % (i, label)) resume_code.putln("default: /* CPython raises the right error here */") resume_code.put_finish_refcount_context() resume_code.putln("return NULL;") resume_code.putln("}") code.exit_cfunc_scope() class OverrideCheckNode(StatNode): # A Node for dispatching to the def method if it # is overriden. # # py_func # # args # func_temp # body child_attrs = ['body'] body = None def analyse_expressions(self, env): self.args = env.arg_entries if self.py_func.is_module_scope: first_arg = 0 else: first_arg = 1 from . import ExprNodes self.func_node = ExprNodes.RawCNameExprNode(self.pos, py_object_type) call_node = ExprNodes.SimpleCallNode( self.pos, function=self.func_node, args=[ExprNodes.NameNode(self.pos, name=arg.name) for arg in self.args[first_arg:]]) if env.return_type.is_void or env.return_type.is_returncode: self.body = StatListNode(self.pos, stats=[ ExprStatNode(self.pos, expr=call_node), ReturnStatNode(self.pos, value=None)]) else: self.body = ReturnStatNode(self.pos, value=call_node) self.body = self.body.analyse_expressions(env) return self def generate_execution_code(self, code): interned_attr_cname = code.intern_identifier(self.py_func.entry.name) # Check to see if we are an extension type if self.py_func.is_module_scope: self_arg = "((PyObject *)%s)" % Naming.module_cname else: self_arg = "((PyObject *)%s)" % self.args[0].cname code.putln("/* Check if called by wrapper */") code.putln("if (unlikely(%s)) ;" % Naming.skip_dispatch_cname) code.putln("/* Check if overridden in Python */") if self.py_func.is_module_scope: code.putln("else {") else: code.putln("else if (unlikely(Py_TYPE(%s)->tp_dictoffset != 0)) {" % self_arg) func_node_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True) self.func_node.set_cname(func_node_temp) # need to get attribute manually--scope would return cdef method code.globalstate.use_utility_code( UtilityCode.load_cached("PyObjectGetAttrStr", "ObjectHandling.c")) err = code.error_goto_if_null(func_node_temp, self.pos) code.putln("%s = __Pyx_PyObject_GetAttrStr(%s, %s); %s" % ( func_node_temp, self_arg, interned_attr_cname, err)) code.put_gotref(func_node_temp) is_builtin_function_or_method = "PyCFunction_Check(%s)" % func_node_temp is_overridden = "(PyCFunction_GET_FUNCTION(%s) != (PyCFunction)%s)" % ( func_node_temp, self.py_func.entry.func_cname) code.putln("if (!%s || %s) {" % (is_builtin_function_or_method, is_overridden)) self.body.generate_execution_code(code) code.putln("}") code.put_decref_clear(func_node_temp, PyrexTypes.py_object_type) code.funcstate.release_temp(func_node_temp) code.putln("}") class ClassDefNode(StatNode, BlockNode): pass class PyClassDefNode(ClassDefNode): # A Python class definition. # # name EncodedString Name of the class # doc string or None # body StatNode Attribute definition code # entry Symtab.Entry # scope PyClassScope # decorators [DecoratorNode] list of decorators or None # # The following subnodes are constructed internally: # # dict DictNode Class dictionary or Py3 namespace # classobj ClassNode Class object # target NameNode Variable to assign class object to child_attrs = ["body", "dict", "metaclass", "mkw", "bases", "class_result", "target", "class_cell", "decorators"] decorators = None class_result = None is_py3_style_class = False # Python3 style class (kwargs) metaclass = None mkw = None def __init__(self, pos, name, bases, doc, body, decorators=None, keyword_args=None, force_py3_semantics=False): StatNode.__init__(self, pos) self.name = name self.doc = doc self.body = body self.decorators = decorators self.bases = bases from . import ExprNodes if self.doc and Options.docstrings: doc = embed_position(self.pos, self.doc) doc_node = ExprNodes.StringNode(pos, value=doc) else: doc_node = None allow_py2_metaclass = not force_py3_semantics if keyword_args: allow_py2_metaclass = False self.is_py3_style_class = True if keyword_args.is_dict_literal: if keyword_args.key_value_pairs: for i, item in list(enumerate(keyword_args.key_value_pairs))[::-1]: if item.key.value == 'metaclass': if self.metaclass is not None: error(item.pos, "keyword argument 'metaclass' passed multiple times") # special case: we already know the metaclass, # so we don't need to do the "build kwargs, # find metaclass" dance at runtime self.metaclass = item.value del keyword_args.key_value_pairs[i] self.mkw = keyword_args else: assert self.metaclass is not None else: # MergedDictNode self.mkw = ExprNodes.ProxyNode(keyword_args) if force_py3_semantics or self.bases or self.mkw or self.metaclass: if self.metaclass is None: if keyword_args and not keyword_args.is_dict_literal: # **kwargs may contain 'metaclass' arg mkdict = self.mkw else: mkdict = None if (not mkdict and self.bases.is_sequence_constructor and not self.bases.args): pass # no base classes => no inherited metaclass else: self.metaclass = ExprNodes.PyClassMetaclassNode( pos, mkw=mkdict, bases=self.bases) needs_metaclass_calculation = False else: needs_metaclass_calculation = True self.dict = ExprNodes.PyClassNamespaceNode( pos, name=name, doc=doc_node, metaclass=self.metaclass, bases=self.bases, mkw=self.mkw) self.classobj = ExprNodes.Py3ClassNode( pos, name=name, bases=self.bases, dict=self.dict, doc=doc_node, metaclass=self.metaclass, mkw=self.mkw, calculate_metaclass=needs_metaclass_calculation, allow_py2_metaclass=allow_py2_metaclass) else: # no bases, no metaclass => old style class creation self.dict = ExprNodes.DictNode(pos, key_value_pairs=[]) self.classobj = ExprNodes.ClassNode( pos, name=name, bases=bases, dict=self.dict, doc=doc_node) self.target = ExprNodes.NameNode(pos, name=name) self.class_cell = ExprNodes.ClassCellInjectorNode(self.pos) def as_cclass(self): """ Return this node as if it were declared as an extension class """ if self.is_py3_style_class: error(self.classobj.pos, "Python3 style class could not be represented as C class") return bases = self.classobj.bases.args if len(bases) == 0: base_class_name = None base_class_module = None elif len(bases) == 1: base = bases[0] path = [] from .ExprNodes import AttributeNode, NameNode while isinstance(base, AttributeNode): path.insert(0, base.attribute) base = base.obj if isinstance(base, NameNode): path.insert(0, base.name) base_class_name = path[-1] if len(path) > 1: base_class_module = u'.'.join(path[:-1]) else: base_class_module = None else: error(self.classobj.bases.args.pos, "Invalid base class") else: error(self.classobj.bases.args.pos, "C class may only have one base class") return None return CClassDefNode(self.pos, visibility='private', module_name=None, class_name=self.name, base_class_module=base_class_module, base_class_name=base_class_name, decorators=self.decorators, body=self.body, in_pxd=False, doc=self.doc) def create_scope(self, env): genv = env while genv.is_py_class_scope or genv.is_c_class_scope: genv = genv.outer_scope cenv = self.scope = PyClassScope(name=self.name, outer_scope=genv) return cenv def analyse_declarations(self, env): class_result = self.classobj if self.decorators: from .ExprNodes import SimpleCallNode for decorator in self.decorators[::-1]: class_result = SimpleCallNode( decorator.pos, function=decorator.decorator, args=[class_result]) self.decorators = None self.class_result = class_result self.class_result.analyse_declarations(env) self.target.analyse_target_declaration(env) cenv = self.create_scope(env) cenv.directives = env.directives cenv.class_obj_cname = self.target.entry.cname self.body.analyse_declarations(cenv) def analyse_expressions(self, env): if self.bases: self.bases = self.bases.analyse_expressions(env) if self.metaclass: self.metaclass = self.metaclass.analyse_expressions(env) if self.mkw: self.mkw = self.mkw.analyse_expressions(env) self.dict = self.dict.analyse_expressions(env) self.class_result = self.class_result.analyse_expressions(env) cenv = self.scope self.body = self.body.analyse_expressions(cenv) self.target.analyse_target_expression(env, self.classobj) self.class_cell = self.class_cell.analyse_expressions(cenv) return self def generate_function_definitions(self, env, code): self.generate_lambda_definitions(self.scope, code) self.body.generate_function_definitions(self.scope, code) def generate_execution_code(self, code): code.mark_pos(self.pos) code.pyclass_stack.append(self) cenv = self.scope if self.bases: self.bases.generate_evaluation_code(code) if self.mkw: self.mkw.generate_evaluation_code(code) if self.metaclass: self.metaclass.generate_evaluation_code(code) self.dict.generate_evaluation_code(code) cenv.namespace_cname = cenv.class_obj_cname = self.dict.result() self.class_cell.generate_evaluation_code(code) self.body.generate_execution_code(code) self.class_result.generate_evaluation_code(code) self.class_cell.generate_injection_code( code, self.class_result.result()) self.class_cell.generate_disposal_code(code) cenv.namespace_cname = cenv.class_obj_cname = self.classobj.result() self.target.generate_assignment_code(self.class_result, code) self.dict.generate_disposal_code(code) self.dict.free_temps(code) if self.metaclass: self.metaclass.generate_disposal_code(code) self.metaclass.free_temps(code) if self.mkw: self.mkw.generate_disposal_code(code) self.mkw.free_temps(code) if self.bases: self.bases.generate_disposal_code(code) self.bases.free_temps(code) code.pyclass_stack.pop() class CClassDefNode(ClassDefNode): # An extension type definition. # # visibility 'private' or 'public' or 'extern' # typedef_flag boolean # api boolean # module_name string or None For import of extern type objects # class_name string Unqualified name of class # as_name string or None Name to declare as in this scope # base_class_module string or None Module containing the base class # base_class_name string or None Name of the base class # objstruct_name string or None Specified C name of object struct # typeobj_name string or None Specified C name of type object # in_pxd boolean Is in a .pxd file # decorators [DecoratorNode] list of decorators or None # doc string or None # body StatNode or None # entry Symtab.Entry # base_type PyExtensionType or None # buffer_defaults_node DictNode or None Declares defaults for a buffer # buffer_defaults_pos child_attrs = ["body"] buffer_defaults_node = None buffer_defaults_pos = None typedef_flag = False api = False objstruct_name = None typeobj_name = None decorators = None shadow = False def buffer_defaults(self, env): if not hasattr(self, '_buffer_defaults'): from . import Buffer if self.buffer_defaults_node: self._buffer_defaults = Buffer.analyse_buffer_options( self.buffer_defaults_pos, env, [], self.buffer_defaults_node, need_complete=False) else: self._buffer_defaults = None return self._buffer_defaults def declare(self, env): if self.module_name and self.visibility != 'extern': module_path = self.module_name.split(".") home_scope = env.find_imported_module(module_path, self.pos) if not home_scope: return None else: home_scope = env self.entry = home_scope.declare_c_class( name=self.class_name, pos=self.pos, defining=0, implementing=0, module_name=self.module_name, base_type=None, objstruct_cname=self.objstruct_name, typeobj_cname=self.typeobj_name, visibility=self.visibility, typedef_flag=self.typedef_flag, api=self.api, buffer_defaults=self.buffer_defaults(env), shadow=self.shadow) def analyse_declarations(self, env): #print "CClassDefNode.analyse_declarations:", self.class_name #print "...visibility =", self.visibility #print "...module_name =", self.module_name if env.in_cinclude and not self.objstruct_name: error(self.pos, "Object struct name specification required for C class defined in 'extern from' block") if self.decorators: error(self.pos, "Decorators not allowed on cdef classes (used on type '%s')" % self.class_name) self.base_type = None # Now that module imports are cached, we need to # import the modules for extern classes. if self.module_name: self.module = None for module in env.cimported_modules: if module.name == self.module_name: self.module = module if self.module is None: self.module = ModuleScope(self.module_name, None, env.context) self.module.has_extern_class = 1 env.add_imported_module(self.module) if self.base_class_name: if self.base_class_module: base_class_scope = env.find_module(self.base_class_module, self.pos) else: base_class_scope = env if self.base_class_name == 'object': # extension classes are special and don't need to inherit from object if base_class_scope is None or base_class_scope.lookup('object') is None: self.base_class_name = None self.base_class_module = None base_class_scope = None if base_class_scope: base_class_entry = base_class_scope.find(self.base_class_name, self.pos) if base_class_entry: if not base_class_entry.is_type: error(self.pos, "'%s' is not a type name" % self.base_class_name) elif not base_class_entry.type.is_extension_type and \ not (base_class_entry.type.is_builtin_type and base_class_entry.type.objstruct_cname): error(self.pos, "'%s' is not an extension type" % self.base_class_name) elif not base_class_entry.type.is_complete(): error(self.pos, "Base class '%s' of type '%s' is incomplete" % ( self.base_class_name, self.class_name)) elif base_class_entry.type.scope and base_class_entry.type.scope.directives and \ base_class_entry.type.is_final_type: error(self.pos, "Base class '%s' of type '%s' is final" % ( self.base_class_name, self.class_name)) elif base_class_entry.type.is_builtin_type and \ base_class_entry.type.name in ('tuple', 'str', 'bytes'): error(self.pos, "inheritance from PyVarObject types like '%s' is not currently supported" % base_class_entry.type.name) else: self.base_type = base_class_entry.type if env.directives.get('freelist', 0) > 0: warning(self.pos, "freelists cannot be used on subtypes, only the base class can manage them", 1) has_body = self.body is not None if has_body and self.base_type and not self.base_type.scope: # To properly initialize inherited attributes, the base type must # be analysed before this type. self.base_type.defered_declarations.append(lambda : self.analyse_declarations(env)) return if self.module_name and self.visibility != 'extern': module_path = self.module_name.split(".") home_scope = env.find_imported_module(module_path, self.pos) if not home_scope: return else: home_scope = env if self.visibility == 'extern': if (self.module_name == '__builtin__' and self.class_name in Builtin.builtin_types and env.qualified_name[:8] != 'cpython.'): # allow overloaded names for cimporting from cpython warning(self.pos, "%s already a builtin Cython type" % self.class_name, 1) self.entry = home_scope.declare_c_class( name=self.class_name, pos=self.pos, defining=has_body and self.in_pxd, implementing=has_body and not self.in_pxd, module_name=self.module_name, base_type=self.base_type, objstruct_cname=self.objstruct_name, typeobj_cname=self.typeobj_name, visibility=self.visibility, typedef_flag=self.typedef_flag, api=self.api, buffer_defaults=self.buffer_defaults(env), shadow=self.shadow) if self.shadow: home_scope.lookup(self.class_name).as_variable = self.entry if home_scope is not env and self.visibility == 'extern': env.add_imported_entry(self.class_name, self.entry, self.pos) self.scope = scope = self.entry.type.scope if scope is not None: scope.directives = env.directives if self.doc and Options.docstrings: scope.doc = embed_position(self.pos, self.doc) if has_body: self.body.analyse_declarations(scope) if self.in_pxd: scope.defined = 1 else: scope.implemented = 1 env.allocate_vtable_names(self.entry) for thunk in self.entry.type.defered_declarations: thunk() def analyse_expressions(self, env): if self.body: scope = self.entry.type.scope self.body = self.body.analyse_expressions(scope) return self def generate_function_definitions(self, env, code): if self.body: self.generate_lambda_definitions(self.scope, code) self.body.generate_function_definitions(self.scope, code) def generate_execution_code(self, code): # This is needed to generate evaluation code for # default values of method arguments. code.mark_pos(self.pos) if self.body: self.body.generate_execution_code(code) def annotate(self, code): if self.body: self.body.annotate(code) class PropertyNode(StatNode): # Definition of a property in an extension type. # # name string # doc EncodedString or None Doc string # entry Symtab.Entry # body StatListNode child_attrs = ["body"] def analyse_declarations(self, env): self.entry = env.declare_property(self.name, self.doc, self.pos) self.entry.scope.directives = env.directives self.body.analyse_declarations(self.entry.scope) def analyse_expressions(self, env): self.body = self.body.analyse_expressions(env) return self def generate_function_definitions(self, env, code): self.body.generate_function_definitions(env, code) def generate_execution_code(self, code): pass def annotate(self, code): self.body.annotate(code) class GlobalNode(StatNode): # Global variable declaration. # # names [string] child_attrs = [] def analyse_declarations(self, env): for name in self.names: env.declare_global(name, self.pos) def analyse_expressions(self, env): return self def generate_execution_code(self, code): pass class NonlocalNode(StatNode): # Nonlocal variable declaration via the 'nonlocal' keyword. # # names [string] child_attrs = [] def analyse_declarations(self, env): for name in self.names: env.declare_nonlocal(name, self.pos) def analyse_expressions(self, env): return self def generate_execution_code(self, code): pass class ExprStatNode(StatNode): # Expression used as a statement. # # expr ExprNode child_attrs = ["expr"] def analyse_declarations(self, env): from . import ExprNodes if isinstance(self.expr, ExprNodes.GeneralCallNode): func = self.expr.function.as_cython_attribute() if func == u'declare': args, kwds = self.expr.explicit_args_kwds() if len(args): error(self.expr.pos, "Variable names must be specified.") for var, type_node in kwds.key_value_pairs: type = type_node.analyse_as_type(env) if type is None: error(type_node.pos, "Unknown type") else: env.declare_var(var.value, type, var.pos, is_cdef=True) self.__class__ = PassStatNode def analyse_expressions(self, env): self.expr.result_is_used = False # hint that .result() may safely be left empty self.expr = self.expr.analyse_expressions(env) return self def nogil_check(self, env): if self.expr.type.is_pyobject and self.expr.is_temp: self.gil_error() gil_message = "Discarding owned Python object" def generate_execution_code(self, code): code.mark_pos(self.pos) self.expr.generate_evaluation_code(code) if not self.expr.is_temp and self.expr.result(): code.putln("%s;" % self.expr.result()) self.expr.generate_disposal_code(code) self.expr.free_temps(code) def generate_function_definitions(self, env, code): self.expr.generate_function_definitions(env, code) def annotate(self, code): self.expr.annotate(code) class AssignmentNode(StatNode): # Abstract base class for assignment nodes. # # The analyse_expressions and generate_execution_code # phases of assignments are split into two sub-phases # each, to enable all the right hand sides of a # parallel assignment to be evaluated before assigning # to any of the left hand sides. def analyse_expressions(self, env): node = self.analyse_types(env) if isinstance(node, AssignmentNode) and not isinstance(node, ParallelAssignmentNode): if node.rhs.type.is_ptr and node.rhs.is_ephemeral(): error(self.pos, "Storing unsafe C derivative of temporary Python reference") return node # def analyse_expressions(self, env): # self.analyse_expressions_1(env) # self.analyse_expressions_2(env) def generate_execution_code(self, code): code.mark_pos(self.pos) self.generate_rhs_evaluation_code(code) self.generate_assignment_code(code) class SingleAssignmentNode(AssignmentNode): # The simplest case: # # a = b # # lhs ExprNode Left hand side # rhs ExprNode Right hand side # first bool Is this guaranteed the first assignment to lhs? # is_overloaded_assignment bool Is this assignment done via an overloaded operator= # exception_check # exception_value child_attrs = ["lhs", "rhs"] first = False is_overloaded_assignment = False declaration_only = False def analyse_declarations(self, env): from . import ExprNodes # handle declarations of the form x = cython.foo() if isinstance(self.rhs, ExprNodes.CallNode): func_name = self.rhs.function.as_cython_attribute() if func_name: args, kwds = self.rhs.explicit_args_kwds() if func_name in ['declare', 'typedef']: if len(args) > 2: error(args[2].pos, "Invalid positional argument.") return if kwds is not None: kwdict = kwds.compile_time_value(None) if func_name == 'typedef' or 'visibility' not in kwdict: error(kwds.pos, "Invalid keyword argument.") return visibility = kwdict['visibility'] else: visibility = 'private' type = args[0].analyse_as_type(env) if type is None: error(args[0].pos, "Unknown type") return lhs = self.lhs if func_name == 'declare': if isinstance(lhs, ExprNodes.NameNode): vars = [(lhs.name, lhs.pos)] elif isinstance(lhs, ExprNodes.TupleNode): vars = [(var.name, var.pos) for var in lhs.args] else: error(lhs.pos, "Invalid declaration") return for var, pos in vars: env.declare_var(var, type, pos, is_cdef=True, visibility=visibility) if len(args) == 2: # we have a value self.rhs = args[1] else: self.declaration_only = True else: self.declaration_only = True if not isinstance(lhs, ExprNodes.NameNode): error(lhs.pos, "Invalid declaration.") env.declare_typedef(lhs.name, type, self.pos, visibility='private') elif func_name in ['struct', 'union']: self.declaration_only = True if len(args) > 0 or kwds is None: error(self.rhs.pos, "Struct or union members must be given by name.") return members = [] for member, type_node in kwds.key_value_pairs: type = type_node.analyse_as_type(env) if type is None: error(type_node.pos, "Unknown type") else: members.append((member.value, type, member.pos)) if len(members) < len(kwds.key_value_pairs): return if not isinstance(self.lhs, ExprNodes.NameNode): error(self.lhs.pos, "Invalid declaration.") name = self.lhs.name scope = StructOrUnionScope(name) env.declare_struct_or_union(name, func_name, scope, False, self.rhs.pos) for member, type, pos in members: scope.declare_var(member, type, pos) elif func_name == 'fused_type': # dtype = cython.fused_type(...) self.declaration_only = True if kwds: error(self.rhs.function.pos, "fused_type does not take keyword arguments") fusednode = FusedTypeNode(self.rhs.pos, name=self.lhs.name, types=args) fusednode.analyse_declarations(env) if self.declaration_only: return else: self.lhs.analyse_target_declaration(env) def analyse_types(self, env, use_temp=0): from . import ExprNodes self.rhs = self.rhs.analyse_types(env) unrolled_assignment = self.unroll_rhs(env) if unrolled_assignment: return unrolled_assignment self.lhs = self.lhs.analyse_target_types(env) self.lhs.gil_assignment_check(env) unrolled_assignment = self.unroll_lhs(env) if unrolled_assignment: return unrolled_assignment if isinstance(self.lhs, ExprNodes.MemoryViewIndexNode): self.lhs.analyse_broadcast_operation(self.rhs) self.lhs = self.lhs.analyse_as_memview_scalar_assignment(self.rhs) elif self.lhs.type.is_array: if not isinstance(self.lhs, ExprNodes.SliceIndexNode): # cannot assign to C array, only to its full slice self.lhs = ExprNodes.SliceIndexNode(self.lhs.pos, base=self.lhs, start=None, stop=None) self.lhs = self.lhs.analyse_target_types(env) if self.lhs.type.is_cpp_class: op = env.lookup_operator_for_types(self.pos, '=', [self.lhs.type, self.rhs.type]) if op: rhs = self.rhs self.is_overloaded_assignment = True self.exception_check = op.type.exception_check self.exception_value = op.type.exception_value if self.exception_check == '+' and self.exception_value is None: env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp")) else: rhs = self.rhs.coerce_to(self.lhs.type, env) else: rhs = self.rhs.coerce_to(self.lhs.type, env) if use_temp or rhs.is_attribute or ( not rhs.is_name and not rhs.is_literal and rhs.type.is_pyobject): # things like (cdef) attribute access are not safe (traverses pointers) rhs = rhs.coerce_to_temp(env) elif rhs.type.is_pyobject: rhs = rhs.coerce_to_simple(env) self.rhs = rhs return self def unroll(self, node, target_size, env): from . import ExprNodes, UtilNodes base = node start_node = stop_node = step_node = check_node = None if node.type.is_ctuple: slice_size = node.type.size elif node.type.is_ptr or node.type.is_array: while isinstance(node, ExprNodes.SliceIndexNode) and not (node.start or node.stop): base = node = node.base if isinstance(node, ExprNodes.SliceIndexNode): base = node.base start_node = node.start if start_node: start_node = start_node.coerce_to(PyrexTypes.c_py_ssize_t_type, env) stop_node = node.stop if stop_node: stop_node = stop_node.coerce_to(PyrexTypes.c_py_ssize_t_type, env) else: if node.type.is_array and node.type.size: stop_node = ExprNodes.IntNode( self.pos, value=str(node.type.size), constant_result=(node.type.size if isinstance(node.type.size, _py_int_types) else ExprNodes.constant_value_not_set)) else: error(self.pos, "C array iteration requires known end index") return step_node = None #node.step if step_node: step_node = step_node.coerce_to(PyrexTypes.c_py_ssize_t_type, env) # TODO: Factor out SliceIndexNode.generate_slice_guard_code() for use here. def get_const(node, none_value): if node is None: return none_value elif node.has_constant_result(): return node.constant_result else: raise ValueError("Not a constant.") try: slice_size = (get_const(stop_node, None) - get_const(start_node, 0)) / get_const(step_node, 1) except ValueError: error(self.pos, "C array assignment currently requires known endpoints") return elif node.type.is_array: slice_size = node.type.size if not isinstance(slice_size, _py_int_types): return # might still work when coercing to Python else: return else: return if slice_size != target_size: error(self.pos, "Assignment to/from slice of wrong length, expected %s, got %s" % ( slice_size, target_size)) return items = [] base = UtilNodes.LetRefNode(base) refs = [base] if start_node and not start_node.is_literal: start_node = UtilNodes.LetRefNode(start_node) refs.append(start_node) if stop_node and not stop_node.is_literal: stop_node = UtilNodes.LetRefNode(stop_node) refs.append(stop_node) if step_node and not step_node.is_literal: step_node = UtilNodes.LetRefNode(step_node) refs.append(step_node) for ix in range(target_size): ix_node = ExprNodes.IntNode(self.pos, value=str(ix), constant_result=ix, type=PyrexTypes.c_py_ssize_t_type) if step_node is not None: if step_node.has_constant_result(): step_value = ix_node.constant_result * step_node.constant_result ix_node = ExprNodes.IntNode(self.pos, value=str(step_value), constant_result=step_value) else: ix_node = ExprNodes.MulNode(self.pos, operator='*', operand1=step_node, operand2=ix_node) if start_node is not None: if start_node.has_constant_result() and ix_node.has_constant_result(): index_value = ix_node.constant_result + start_node.constant_result ix_node = ExprNodes.IntNode(self.pos, value=str(index_value), constant_result=index_value) else: ix_node = ExprNodes.AddNode( self.pos, operator='+', operand1=start_node, operand2=ix_node) items.append(ExprNodes.IndexNode(self.pos, base=base, index=ix_node.analyse_types(env))) return check_node, refs, items def unroll_assignments(self, refs, check_node, lhs_list, rhs_list, env): from . import UtilNodes assignments = [] for lhs, rhs in zip(lhs_list, rhs_list): assignments.append(SingleAssignmentNode(self.pos, lhs=lhs, rhs=rhs, first=self.first)) node = ParallelAssignmentNode(pos=self.pos, stats=assignments).analyse_expressions(env) if check_node: node = StatListNode(pos=self.pos, stats=[check_node, node]) for ref in refs[::-1]: node = UtilNodes.LetNode(ref, node) return node def unroll_rhs(self, env): from . import ExprNodes if not isinstance(self.lhs, ExprNodes.TupleNode): return if any(arg.is_starred for arg in self.lhs.args): return unrolled = self.unroll(self.rhs, len(self.lhs.args), env) if not unrolled: return check_node, refs, rhs = unrolled return self.unroll_assignments(refs, check_node, self.lhs.args, rhs, env) def unroll_lhs(self, env): if self.lhs.type.is_ctuple: # Handled directly. return from . import ExprNodes if not isinstance(self.rhs, ExprNodes.TupleNode): return unrolled = self.unroll(self.lhs, len(self.rhs.args), env) if not unrolled: return check_node, refs, lhs = unrolled return self.unroll_assignments(refs, check_node, lhs, self.rhs.args, env) def generate_rhs_evaluation_code(self, code): self.rhs.generate_evaluation_code(code) def generate_assignment_code(self, code, overloaded_assignment=False): if self.is_overloaded_assignment: self.lhs.generate_assignment_code( self.rhs, code, overloaded_assignment=self.is_overloaded_assignment, exception_check=self.exception_check, exception_value=self.exception_value) else: self.lhs.generate_assignment_code(self.rhs, code) def generate_function_definitions(self, env, code): self.rhs.generate_function_definitions(env, code) def annotate(self, code): self.lhs.annotate(code) self.rhs.annotate(code) class CascadedAssignmentNode(AssignmentNode): # An assignment with multiple left hand sides: # # a = b = c # # lhs_list [ExprNode] Left hand sides # rhs ExprNode Right hand sides # # Used internally: # # coerced_values [ExprNode] RHS coerced to all distinct LHS types # cloned_values [ExprNode] cloned RHS value for each LHS # assignment_overloads [Bool] If each assignment uses a C++ operator= child_attrs = ["lhs_list", "rhs", "coerced_values", "cloned_values"] cloned_values = None coerced_values = None assignment_overloads = None def analyse_declarations(self, env): for lhs in self.lhs_list: lhs.analyse_target_declaration(env) def analyse_types(self, env, use_temp=0): from .ExprNodes import CloneNode, ProxyNode # collect distinct types used on the LHS lhs_types = set() for i, lhs in enumerate(self.lhs_list): lhs = self.lhs_list[i] = lhs.analyse_target_types(env) lhs.gil_assignment_check(env) lhs_types.add(lhs.type) rhs = self.rhs.analyse_types(env) # common special case: only one type needed on the LHS => coerce only once if len(lhs_types) == 1: # Avoid coercion for overloaded assignment operators. if next(iter(lhs_types)).is_cpp_class: op = env.lookup_operator('=', [lhs, self.rhs]) if not op: rhs = rhs.coerce_to(lhs_types.pop(), env) else: rhs = rhs.coerce_to(lhs_types.pop(), env) if not rhs.is_name and not rhs.is_literal and ( use_temp or rhs.is_attribute or rhs.type.is_pyobject): rhs = rhs.coerce_to_temp(env) else: rhs = rhs.coerce_to_simple(env) self.rhs = ProxyNode(rhs) if rhs.is_temp else rhs # clone RHS and coerce it to all distinct LHS types self.coerced_values = [] coerced_values = {} self.assignment_overloads = [] for lhs in self.lhs_list: overloaded = lhs.type.is_cpp_class and env.lookup_operator('=', [lhs, self.rhs]) self.assignment_overloads.append(overloaded) if lhs.type not in coerced_values and lhs.type != rhs.type: rhs = CloneNode(self.rhs) if not overloaded: rhs = rhs.coerce_to(lhs.type, env) self.coerced_values.append(rhs) coerced_values[lhs.type] = rhs # clone coerced values for all LHS assignments self.cloned_values = [] for lhs in self.lhs_list: rhs = coerced_values.get(lhs.type, self.rhs) self.cloned_values.append(CloneNode(rhs)) return self def generate_rhs_evaluation_code(self, code): self.rhs.generate_evaluation_code(code) def generate_assignment_code(self, code, overloaded_assignment=False): # prepare all coercions for rhs in self.coerced_values: rhs.generate_evaluation_code(code) # assign clones to LHS for lhs, rhs, overload in zip(self.lhs_list, self.cloned_values, self.assignment_overloads): rhs.generate_evaluation_code(code) lhs.generate_assignment_code(rhs, code, overloaded_assignment=overload) # dispose of coerced values and original RHS for rhs_value in self.coerced_values: rhs_value.generate_disposal_code(code) rhs_value.free_temps(code) self.rhs.generate_disposal_code(code) self.rhs.free_temps(code) def generate_function_definitions(self, env, code): self.rhs.generate_function_definitions(env, code) def annotate(self, code): for rhs in self.coerced_values: rhs.annotate(code) for lhs, rhs in zip(self.lhs_list, self.cloned_values): lhs.annotate(code) rhs.annotate(code) self.rhs.annotate(code) class ParallelAssignmentNode(AssignmentNode): # A combined packing/unpacking assignment: # # a, b, c = d, e, f # # This has been rearranged by the parser into # # a = d ; b = e ; c = f # # but we must evaluate all the right hand sides # before assigning to any of the left hand sides. # # stats [AssignmentNode] The constituent assignments child_attrs = ["stats"] def analyse_declarations(self, env): for stat in self.stats: stat.analyse_declarations(env) def analyse_expressions(self, env): self.stats = [stat.analyse_types(env, use_temp=1) for stat in self.stats] return self # def analyse_expressions(self, env): # for stat in self.stats: # stat.analyse_expressions_1(env, use_temp=1) # for stat in self.stats: # stat.analyse_expressions_2(env) def generate_execution_code(self, code): code.mark_pos(self.pos) for stat in self.stats: stat.generate_rhs_evaluation_code(code) for stat in self.stats: stat.generate_assignment_code(code) def generate_function_definitions(self, env, code): for stat in self.stats: stat.generate_function_definitions(env, code) def annotate(self, code): for stat in self.stats: stat.annotate(code) class InPlaceAssignmentNode(AssignmentNode): # An in place arithmetic operand: # # a += b # a -= b # ... # # lhs ExprNode Left hand side # rhs ExprNode Right hand side # operator char one of "+-*/%^&|" # # This code is a bit tricky because in order to obey Python # semantics the sub-expressions (e.g. indices) of the lhs must # not be evaluated twice. So we must re-use the values calculated # in evaluation phase for the assignment phase as well. # Fortunately, the type of the lhs node is fairly constrained # (it must be a NameNode, AttributeNode, or IndexNode). child_attrs = ["lhs", "rhs"] def analyse_declarations(self, env): self.lhs.analyse_target_declaration(env) def analyse_types(self, env): self.rhs = self.rhs.analyse_types(env) self.lhs = self.lhs.analyse_target_types(env) # When assigning to a fully indexed buffer or memoryview, coerce the rhs if self.lhs.is_memview_index or self.lhs.is_buffer_access: self.rhs = self.rhs.coerce_to(self.lhs.type, env) elif self.lhs.type.is_string and self.operator in '+-': # use pointer arithmetic for char* LHS instead of string concat self.rhs = self.rhs.coerce_to(PyrexTypes.c_py_ssize_t_type, env) return self def generate_execution_code(self, code): code.mark_pos(self.pos) lhs, rhs = self.lhs, self.rhs rhs.generate_evaluation_code(code) lhs.generate_subexpr_evaluation_code(code) c_op = self.operator if c_op == "//": c_op = "/" elif c_op == "**": error(self.pos, "No C inplace power operator") if lhs.is_buffer_access or lhs.is_memview_index: if lhs.type.is_pyobject: error(self.pos, "In-place operators not allowed on object buffers in this release.") if c_op in ('/', '%') and lhs.type.is_int and not code.globalstate.directives['cdivision']: error(self.pos, "In-place non-c divide operators not allowed on int buffers.") lhs.generate_buffer_setitem_code(rhs, code, c_op) elif lhs.is_memview_slice: error(self.pos, "Inplace operators not supported on memoryview slices") else: # C++ # TODO: make sure overload is declared code.putln("%s %s= %s;" % (lhs.result(), c_op, rhs.result())) lhs.generate_subexpr_disposal_code(code) lhs.free_subexpr_temps(code) rhs.generate_disposal_code(code) rhs.free_temps(code) def annotate(self, code): self.lhs.annotate(code) self.rhs.annotate(code) def create_binop_node(self): from . import ExprNodes return ExprNodes.binop_node(self.pos, self.operator, self.lhs, self.rhs) class PrintStatNode(StatNode): # print statement # # arg_tuple TupleNode # stream ExprNode or None (stdout) # append_newline boolean child_attrs = ["arg_tuple", "stream"] def analyse_expressions(self, env): if self.stream: stream = self.stream.analyse_expressions(env) self.stream = stream.coerce_to_pyobject(env) arg_tuple = self.arg_tuple.analyse_expressions(env) self.arg_tuple = arg_tuple.coerce_to_pyobject(env) env.use_utility_code(printing_utility_code) if len(self.arg_tuple.args) == 1 and self.append_newline: env.use_utility_code(printing_one_utility_code) return self nogil_check = Node.gil_error gil_message = "Python print statement" def generate_execution_code(self, code): code.mark_pos(self.pos) if self.stream: self.stream.generate_evaluation_code(code) stream_result = self.stream.py_result() else: stream_result = '0' if len(self.arg_tuple.args) == 1 and self.append_newline: arg = self.arg_tuple.args[0] arg.generate_evaluation_code(code) code.putln( "if (__Pyx_PrintOne(%s, %s) < 0) %s" % ( stream_result, arg.py_result(), code.error_goto(self.pos))) arg.generate_disposal_code(code) arg.free_temps(code) else: self.arg_tuple.generate_evaluation_code(code) code.putln( "if (__Pyx_Print(%s, %s, %d) < 0) %s" % ( stream_result, self.arg_tuple.py_result(), self.append_newline, code.error_goto(self.pos))) self.arg_tuple.generate_disposal_code(code) self.arg_tuple.free_temps(code) if self.stream: self.stream.generate_disposal_code(code) self.stream.free_temps(code) def generate_function_definitions(self, env, code): if self.stream: self.stream.generate_function_definitions(env, code) self.arg_tuple.generate_function_definitions(env, code) def annotate(self, code): if self.stream: self.stream.annotate(code) self.arg_tuple.annotate(code) class ExecStatNode(StatNode): # exec statement # # args [ExprNode] child_attrs = ["args"] def analyse_expressions(self, env): for i, arg in enumerate(self.args): arg = arg.analyse_expressions(env) arg = arg.coerce_to_pyobject(env) self.args[i] = arg env.use_utility_code(Builtin.pyexec_utility_code) return self nogil_check = Node.gil_error gil_message = "Python exec statement" def generate_execution_code(self, code): code.mark_pos(self.pos) args = [] for arg in self.args: arg.generate_evaluation_code(code) args.append(arg.py_result()) args = tuple(args + ['0', '0'][:3-len(args)]) temp_result = code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=True) code.putln("%s = __Pyx_PyExec3(%s, %s, %s);" % ((temp_result,) + args)) for arg in self.args: arg.generate_disposal_code(code) arg.free_temps(code) code.putln( code.error_goto_if_null(temp_result, self.pos)) code.put_gotref(temp_result) code.put_decref_clear(temp_result, py_object_type) code.funcstate.release_temp(temp_result) def annotate(self, code): for arg in self.args: arg.annotate(code) class DelStatNode(StatNode): # del statement # # args [ExprNode] child_attrs = ["args"] ignore_nonexisting = False def analyse_declarations(self, env): for arg in self.args: arg.analyse_target_declaration(env) def analyse_expressions(self, env): for i, arg in enumerate(self.args): arg = self.args[i] = arg.analyse_target_expression(env, None) if arg.type.is_pyobject or (arg.is_name and arg.type.is_memoryviewslice): if arg.is_name and arg.entry.is_cglobal: error(arg.pos, "Deletion of global C variable") elif arg.type.is_ptr and arg.type.base_type.is_cpp_class: self.cpp_check(env) elif arg.type.is_cpp_class: error(arg.pos, "Deletion of non-heap C++ object") elif arg.is_subscript and arg.base.type is Builtin.bytearray_type: pass # del ba[i] else: error(arg.pos, "Deletion of non-Python, non-C++ object") #arg.release_target_temp(env) return self def nogil_check(self, env): for arg in self.args: if arg.type.is_pyobject: self.gil_error() gil_message = "Deleting Python object" def generate_execution_code(self, code): code.mark_pos(self.pos) for arg in self.args: if (arg.type.is_pyobject or arg.type.is_memoryviewslice or arg.is_subscript and arg.base.type is Builtin.bytearray_type): arg.generate_deletion_code( code, ignore_nonexisting=self.ignore_nonexisting) elif arg.type.is_ptr and arg.type.base_type.is_cpp_class: arg.generate_result_code(code) code.putln("delete %s;" % arg.result()) # else error reported earlier def annotate(self, code): for arg in self.args: arg.annotate(code) class PassStatNode(StatNode): # pass statement child_attrs = [] def analyse_expressions(self, env): return self def generate_execution_code(self, code): pass class IndirectionNode(StatListNode): """ This adds an indirection so that the node can be shared and a subtree can be removed at any time by clearing self.stats. """ def __init__(self, stats): super(IndirectionNode, self).__init__(stats[0].pos, stats=stats) class BreakStatNode(StatNode): child_attrs = [] is_terminator = True def analyse_expressions(self, env): return self def generate_execution_code(self, code): code.mark_pos(self.pos) if not code.break_label: error(self.pos, "break statement not inside loop") else: code.put_goto(code.break_label) class ContinueStatNode(StatNode): child_attrs = [] is_terminator = True def analyse_expressions(self, env): return self def generate_execution_code(self, code): if not code.continue_label: error(self.pos, "continue statement not inside loop") return code.mark_pos(self.pos) code.put_goto(code.continue_label) class ReturnStatNode(StatNode): # return statement # # value ExprNode or None # return_type PyrexType # in_generator return inside of generator => raise StopIteration child_attrs = ["value"] is_terminator = True in_generator = False # Whether we are in a parallel section in_parallel = False def analyse_expressions(self, env): return_type = env.return_type self.return_type = return_type if not return_type: error(self.pos, "Return not inside a function body") return self if self.value: self.value = self.value.analyse_types(env) if return_type.is_void or return_type.is_returncode: error(self.value.pos, "Return with value in void function") else: self.value = self.value.coerce_to(env.return_type, env) else: if (not return_type.is_void and not return_type.is_pyobject and not return_type.is_returncode): error(self.pos, "Return value required") return self def nogil_check(self, env): if self.return_type.is_pyobject: self.gil_error() gil_message = "Returning Python object" def generate_execution_code(self, code): code.mark_pos(self.pos) if not self.return_type: # error reported earlier return if self.return_type.is_pyobject: code.put_xdecref(Naming.retval_cname, self.return_type) if self.value: self.value.generate_evaluation_code(code) if self.return_type.is_memoryviewslice: from . import MemoryView MemoryView.put_acquire_memoryviewslice( lhs_cname=Naming.retval_cname, lhs_type=self.return_type, lhs_pos=self.value.pos, rhs=self.value, code=code, have_gil=self.in_nogil_context) elif self.in_generator: # return value == raise StopIteration(value), but uncatchable code.globalstate.use_utility_code( UtilityCode.load_cached("ReturnWithStopIteration", "Coroutine.c")) code.putln("%s = NULL; __Pyx_ReturnWithStopIteration(%s);" % ( Naming.retval_cname, self.value.py_result())) self.value.generate_disposal_code(code) else: self.value.make_owned_reference(code) code.putln("%s = %s;" % ( Naming.retval_cname, self.value.result_as(self.return_type))) self.value.generate_post_assignment_code(code) self.value.free_temps(code) else: if self.return_type.is_pyobject: if self.in_generator: code.putln("%s = NULL;" % Naming.retval_cname) else: code.put_init_to_py_none(Naming.retval_cname, self.return_type) elif self.return_type.is_returncode: self.put_return(code, self.return_type.default_value) for cname, type in code.funcstate.temps_holding_reference(): code.put_decref_clear(cname, type) code.put_goto(code.return_label) def put_return(self, code, value): if self.in_parallel: code.putln_openmp("#pragma omp critical(__pyx_returning)") code.putln("%s = %s;" % (Naming.retval_cname, value)) def generate_function_definitions(self, env, code): if self.value is not None: self.value.generate_function_definitions(env, code) def annotate(self, code): if self.value: self.value.annotate(code) class RaiseStatNode(StatNode): # raise statement # # exc_type ExprNode or None # exc_value ExprNode or None # exc_tb ExprNode or None # cause ExprNode or None child_attrs = ["exc_type", "exc_value", "exc_tb", "cause"] is_terminator = True def analyse_expressions(self, env): if self.exc_type: exc_type = self.exc_type.analyse_types(env) self.exc_type = exc_type.coerce_to_pyobject(env) if self.exc_value: exc_value = self.exc_value.analyse_types(env) self.exc_value = exc_value.coerce_to_pyobject(env) if self.exc_tb: exc_tb = self.exc_tb.analyse_types(env) self.exc_tb = exc_tb.coerce_to_pyobject(env) if self.cause: cause = self.cause.analyse_types(env) self.cause = cause.coerce_to_pyobject(env) # special cases for builtin exceptions self.builtin_exc_name = None if self.exc_type and not self.exc_value and not self.exc_tb: exc = self.exc_type from . import ExprNodes if (isinstance(exc, ExprNodes.SimpleCallNode) and not (exc.args or (exc.arg_tuple is not None and exc.arg_tuple.args))): exc = exc.function # extract the exception type if exc.is_name and exc.entry.is_builtin: self.builtin_exc_name = exc.name if self.builtin_exc_name == 'MemoryError': self.exc_type = None # has a separate implementation return self nogil_check = Node.gil_error gil_message = "Raising exception" def generate_execution_code(self, code): code.mark_pos(self.pos) if self.builtin_exc_name == 'MemoryError': code.putln('PyErr_NoMemory(); %s' % code.error_goto(self.pos)) return if self.exc_type: self.exc_type.generate_evaluation_code(code) type_code = self.exc_type.py_result() else: type_code = "0" if self.exc_value: self.exc_value.generate_evaluation_code(code) value_code = self.exc_value.py_result() else: value_code = "0" if self.exc_tb: self.exc_tb.generate_evaluation_code(code) tb_code = self.exc_tb.py_result() else: tb_code = "0" if self.cause: self.cause.generate_evaluation_code(code) cause_code = self.cause.py_result() else: cause_code = "0" code.globalstate.use_utility_code(raise_utility_code) code.putln( "__Pyx_Raise(%s, %s, %s, %s);" % ( type_code, value_code, tb_code, cause_code)) for obj in (self.exc_type, self.exc_value, self.exc_tb, self.cause): if obj: obj.generate_disposal_code(code) obj.free_temps(code) code.putln( code.error_goto(self.pos)) def generate_function_definitions(self, env, code): if self.exc_type is not None: self.exc_type.generate_function_definitions(env, code) if self.exc_value is not None: self.exc_value.generate_function_definitions(env, code) if self.exc_tb is not None: self.exc_tb.generate_function_definitions(env, code) if self.cause is not None: self.cause.generate_function_definitions(env, code) def annotate(self, code): if self.exc_type: self.exc_type.annotate(code) if self.exc_value: self.exc_value.annotate(code) if self.exc_tb: self.exc_tb.annotate(code) if self.cause: self.cause.annotate(code) class ReraiseStatNode(StatNode): child_attrs = [] is_terminator = True def analyse_expressions(self, env): return self nogil_check = Node.gil_error gil_message = "Raising exception" def generate_execution_code(self, code): code.mark_pos(self.pos) vars = code.funcstate.exc_vars if vars: code.globalstate.use_utility_code(restore_exception_utility_code) code.put_giveref(vars[0]) code.put_giveref(vars[1]) # fresh exceptions may not have a traceback yet (-> finally!) code.put_xgiveref(vars[2]) code.putln("__Pyx_ErrRestoreWithState(%s, %s, %s);" % tuple(vars)) for varname in vars: code.put("%s = 0; " % varname) code.putln() code.putln(code.error_goto(self.pos)) else: code.globalstate.use_utility_code( UtilityCode.load_cached("ReRaiseException", "Exceptions.c")) code.putln("__Pyx_ReraiseException(); %s" % code.error_goto(self.pos)) class AssertStatNode(StatNode): # assert statement # # cond ExprNode # value ExprNode or None child_attrs = ["cond", "value"] def analyse_expressions(self, env): self.cond = self.cond.analyse_boolean_expression(env) if self.value: value = self.value.analyse_types(env) if value.type is Builtin.tuple_type or not value.type.is_builtin_type: # prevent tuple values from being interpreted as argument value tuples from .ExprNodes import TupleNode value = TupleNode(value.pos, args=[value], slow=True) self.value = value.analyse_types(env, skip_children=True).coerce_to_pyobject(env) else: self.value = value.coerce_to_pyobject(env) return self nogil_check = Node.gil_error gil_message = "Raising exception" def generate_execution_code(self, code): code.putln("#ifndef CYTHON_WITHOUT_ASSERTIONS") code.putln("if (unlikely(!Py_OptimizeFlag)) {") code.mark_pos(self.pos) self.cond.generate_evaluation_code(code) code.putln( "if (unlikely(!%s)) {" % self.cond.result()) if self.value: self.value.generate_evaluation_code(code) code.putln( "PyErr_SetObject(PyExc_AssertionError, %s);" % self.value.py_result()) self.value.generate_disposal_code(code) self.value.free_temps(code) else: code.putln( "PyErr_SetNone(PyExc_AssertionError);") code.putln( code.error_goto(self.pos)) code.putln( "}") self.cond.generate_disposal_code(code) self.cond.free_temps(code) code.putln( "}") code.putln("#endif") def generate_function_definitions(self, env, code): self.cond.generate_function_definitions(env, code) if self.value is not None: self.value.generate_function_definitions(env, code) def annotate(self, code): self.cond.annotate(code) if self.value: self.value.annotate(code) class IfStatNode(StatNode): # if statement # # if_clauses [IfClauseNode] # else_clause StatNode or None child_attrs = ["if_clauses", "else_clause"] def analyse_declarations(self, env): for if_clause in self.if_clauses: if_clause.analyse_declarations(env) if self.else_clause: self.else_clause.analyse_declarations(env) def analyse_expressions(self, env): self.if_clauses = [if_clause.analyse_expressions(env) for if_clause in self.if_clauses] if self.else_clause: self.else_clause = self.else_clause.analyse_expressions(env) return self def generate_execution_code(self, code): code.mark_pos(self.pos) end_label = code.new_label() last = len(self.if_clauses) if not self.else_clause: last -= 1 # avoid redundant goto at end of last if-clause for i, if_clause in enumerate(self.if_clauses): if_clause.generate_execution_code(code, end_label, is_last=i == last) if self.else_clause: code.mark_pos(self.else_clause.pos) code.putln("/*else*/ {") self.else_clause.generate_execution_code(code) code.putln("}") code.put_label(end_label) def generate_function_definitions(self, env, code): for clause in self.if_clauses: clause.generate_function_definitions(env, code) if self.else_clause is not None: self.else_clause.generate_function_definitions(env, code) def annotate(self, code): for if_clause in self.if_clauses: if_clause.annotate(code) if self.else_clause: self.else_clause.annotate(code) class IfClauseNode(Node): # if or elif clause in an if statement # # condition ExprNode # body StatNode child_attrs = ["condition", "body"] def analyse_declarations(self, env): self.body.analyse_declarations(env) def analyse_expressions(self, env): self.condition = self.condition.analyse_temp_boolean_expression(env) self.body = self.body.analyse_expressions(env) return self def generate_execution_code(self, code, end_label, is_last): self.condition.generate_evaluation_code(code) code.mark_pos(self.pos) code.putln("if (%s) {" % self.condition.result()) self.condition.generate_disposal_code(code) self.condition.free_temps(code) self.body.generate_execution_code(code) code.mark_pos(self.pos, trace=False) if not (is_last or self.body.is_terminator): code.put_goto(end_label) code.putln("}") def generate_function_definitions(self, env, code): self.condition.generate_function_definitions(env, code) self.body.generate_function_definitions(env, code) def annotate(self, code): self.condition.annotate(code) self.body.annotate(code) class SwitchCaseNode(StatNode): # Generated in the optimization of an if-elif-else node # # conditions [ExprNode] # body StatNode child_attrs = ['conditions', 'body'] def generate_execution_code(self, code): for cond in self.conditions: code.mark_pos(cond.pos) cond.generate_evaluation_code(code) code.putln("case %s:" % cond.result()) self.body.generate_execution_code(code) code.mark_pos(self.pos, trace=False) code.putln("break;") def generate_function_definitions(self, env, code): for cond in self.conditions: cond.generate_function_definitions(env, code) self.body.generate_function_definitions(env, code) def annotate(self, code): for cond in self.conditions: cond.annotate(code) self.body.annotate(code) class SwitchStatNode(StatNode): # Generated in the optimization of an if-elif-else node # # test ExprNode # cases [SwitchCaseNode] # else_clause StatNode or None child_attrs = ['test', 'cases', 'else_clause'] def generate_execution_code(self, code): self.test.generate_evaluation_code(code) code.mark_pos(self.pos) code.putln("switch (%s) {" % self.test.result()) for case in self.cases: case.generate_execution_code(code) if self.else_clause is not None: code.putln("default:") self.else_clause.generate_execution_code(code) code.putln("break;") else: # Always generate a default clause to prevent C compiler warnings # about unmatched enum values (it was not the user who decided to # generate the switch statement, so shouldn't be bothered). code.putln("default: break;") code.putln("}") def generate_function_definitions(self, env, code): self.test.generate_function_definitions(env, code) for case in self.cases: case.generate_function_definitions(env, code) if self.else_clause is not None: self.else_clause.generate_function_definitions(env, code) def annotate(self, code): self.test.annotate(code) for case in self.cases: case.annotate(code) if self.else_clause is not None: self.else_clause.annotate(code) class LoopNode(object): pass class WhileStatNode(LoopNode, StatNode): # while statement # # condition ExprNode # body StatNode # else_clause StatNode child_attrs = ["condition", "body", "else_clause"] def analyse_declarations(self, env): self.body.analyse_declarations(env) if self.else_clause: self.else_clause.analyse_declarations(env) def analyse_expressions(self, env): if self.condition: self.condition = self.condition.analyse_temp_boolean_expression(env) self.body = self.body.analyse_expressions(env) if self.else_clause: self.else_clause = self.else_clause.analyse_expressions(env) return self def generate_execution_code(self, code): code.mark_pos(self.pos) old_loop_labels = code.new_loop_labels() code.putln( "while (1) {") if self.condition: self.condition.generate_evaluation_code(code) self.condition.generate_disposal_code(code) code.putln( "if (!%s) break;" % self.condition.result()) self.condition.free_temps(code) self.body.generate_execution_code(code) code.put_label(code.continue_label) code.putln("}") break_label = code.break_label code.set_loop_labels(old_loop_labels) if self.else_clause: code.mark_pos(self.else_clause.pos) code.putln("/*else*/ {") self.else_clause.generate_execution_code(code) code.putln("}") code.put_label(break_label) def generate_function_definitions(self, env, code): if self.condition: self.condition.generate_function_definitions(env, code) self.body.generate_function_definitions(env, code) if self.else_clause is not None: self.else_clause.generate_function_definitions(env, code) def annotate(self, code): if self.condition: self.condition.annotate(code) self.body.annotate(code) if self.else_clause: self.else_clause.annotate(code) class DictIterationNextNode(Node): # Helper node for calling PyDict_Next() inside of a WhileStatNode # and checking the dictionary size for changes. Created in # Optimize.py. child_attrs = ['dict_obj', 'expected_size', 'pos_index_var', 'coerced_key_var', 'coerced_value_var', 'coerced_tuple_var', 'key_target', 'value_target', 'tuple_target', 'is_dict_flag'] coerced_key_var = key_ref = None coerced_value_var = value_ref = None coerced_tuple_var = tuple_ref = None def __init__(self, dict_obj, expected_size, pos_index_var, key_target, value_target, tuple_target, is_dict_flag): Node.__init__( self, dict_obj.pos, dict_obj=dict_obj, expected_size=expected_size, pos_index_var=pos_index_var, key_target=key_target, value_target=value_target, tuple_target=tuple_target, is_dict_flag=is_dict_flag, is_temp=True, type=PyrexTypes.c_bint_type) def analyse_expressions(self, env): from . import ExprNodes self.dict_obj = self.dict_obj.analyse_types(env) self.expected_size = self.expected_size.analyse_types(env) if self.pos_index_var: self.pos_index_var = self.pos_index_var.analyse_types(env) if self.key_target: self.key_target = self.key_target.analyse_target_types(env) self.key_ref = ExprNodes.TempNode(self.key_target.pos, PyrexTypes.py_object_type) self.coerced_key_var = self.key_ref.coerce_to(self.key_target.type, env) if self.value_target: self.value_target = self.value_target.analyse_target_types(env) self.value_ref = ExprNodes.TempNode(self.value_target.pos, type=PyrexTypes.py_object_type) self.coerced_value_var = self.value_ref.coerce_to(self.value_target.type, env) if self.tuple_target: self.tuple_target = self.tuple_target.analyse_target_types(env) self.tuple_ref = ExprNodes.TempNode(self.tuple_target.pos, PyrexTypes.py_object_type) self.coerced_tuple_var = self.tuple_ref.coerce_to(self.tuple_target.type, env) self.is_dict_flag = self.is_dict_flag.analyse_types(env) return self def generate_function_definitions(self, env, code): self.dict_obj.generate_function_definitions(env, code) def generate_execution_code(self, code): code.globalstate.use_utility_code(UtilityCode.load_cached("dict_iter", "Optimize.c")) self.dict_obj.generate_evaluation_code(code) assignments = [] temp_addresses = [] for var, result, target in [(self.key_ref, self.coerced_key_var, self.key_target), (self.value_ref, self.coerced_value_var, self.value_target), (self.tuple_ref, self.coerced_tuple_var, self.tuple_target)]: if target is None: addr = 'NULL' else: assignments.append((var, result, target)) var.allocate(code) addr = '&%s' % var.result() temp_addresses.append(addr) result_temp = code.funcstate.allocate_temp(PyrexTypes.c_int_type, False) code.putln("%s = __Pyx_dict_iter_next(%s, %s, &%s, %s, %s, %s, %s);" % ( result_temp, self.dict_obj.py_result(), self.expected_size.result(), self.pos_index_var.result(), temp_addresses[0], temp_addresses[1], temp_addresses[2], self.is_dict_flag.result() )) code.putln("if (unlikely(%s == 0)) break;" % result_temp) code.putln(code.error_goto_if("%s == -1" % result_temp, self.pos)) code.funcstate.release_temp(result_temp) # evaluate all coercions before the assignments for var, result, target in assignments: code.put_gotref(var.result()) for var, result, target in assignments: result.generate_evaluation_code(code) for var, result, target in assignments: target.generate_assignment_code(result, code) var.release(code) def ForStatNode(pos, **kw): if 'iterator' in kw: if kw['iterator'].is_async: return AsyncForStatNode(pos, **kw) else: return ForInStatNode(pos, **kw) else: return ForFromStatNode(pos, **kw) class _ForInStatNode(LoopNode, StatNode): # Base class of 'for-in' statements. # # target ExprNode # iterator IteratorNode | AwaitExprNode(AsyncIteratorNode) # body StatNode # else_clause StatNode # item NextNode | AwaitExprNode(AsyncNextNode) # is_async boolean true for 'async for' statements child_attrs = ["target", "item", "iterator", "body", "else_clause"] item = None is_async = False def _create_item_node(self): raise NotImplementedError("must be implemented by subclasses") def analyse_declarations(self, env): self.target.analyse_target_declaration(env) self.body.analyse_declarations(env) if self.else_clause: self.else_clause.analyse_declarations(env) self._create_item_node() def analyse_expressions(self, env): self.target = self.target.analyse_target_types(env) self.iterator = self.iterator.analyse_expressions(env) self._create_item_node() # must rewrap self.item after analysis self.item = self.item.analyse_expressions(env) if (not self.is_async and (self.iterator.type.is_ptr or self.iterator.type.is_array) and self.target.type.assignable_from(self.iterator.type)): # C array slice optimization. pass else: self.item = self.item.coerce_to(self.target.type, env) self.body = self.body.analyse_expressions(env) if self.else_clause: self.else_clause = self.else_clause.analyse_expressions(env) return self def generate_execution_code(self, code): code.mark_pos(self.pos) old_loop_labels = code.new_loop_labels() self.iterator.generate_evaluation_code(code) code.putln("for (;;) {") self.item.generate_evaluation_code(code) self.target.generate_assignment_code(self.item, code) self.body.generate_execution_code(code) code.mark_pos(self.pos) code.put_label(code.continue_label) code.putln("}") break_label = code.break_label code.set_loop_labels(old_loop_labels) if self.else_clause: # in nested loops, the 'else' block can contain a # 'continue' statement for the outer loop, but we may need # to generate cleanup code before taking that path, so we # intercept it here orig_continue_label = code.continue_label code.continue_label = code.new_label('outer_continue') code.putln("/*else*/ {") self.else_clause.generate_execution_code(code) code.putln("}") if code.label_used(code.continue_label): code.put_goto(break_label) code.mark_pos(self.pos) code.put_label(code.continue_label) self.iterator.generate_disposal_code(code) code.put_goto(orig_continue_label) code.set_loop_labels(old_loop_labels) code.mark_pos(self.pos) if code.label_used(break_label): code.put_label(break_label) self.iterator.generate_disposal_code(code) self.iterator.free_temps(code) def generate_function_definitions(self, env, code): self.target.generate_function_definitions(env, code) self.iterator.generate_function_definitions(env, code) self.body.generate_function_definitions(env, code) if self.else_clause is not None: self.else_clause.generate_function_definitions(env, code) def annotate(self, code): self.target.annotate(code) self.iterator.annotate(code) self.body.annotate(code) if self.else_clause: self.else_clause.annotate(code) self.item.annotate(code) class ForInStatNode(_ForInStatNode): # 'for' statement is_async = False def _create_item_node(self): from .ExprNodes import NextNode self.item = NextNode(self.iterator) class AsyncForStatNode(_ForInStatNode): # 'async for' statement # # iterator AwaitExprNode(AsyncIteratorNode) # item AwaitIterNextExprNode(AsyncIteratorNode) is_async = True def __init__(self, pos, iterator, **kw): assert 'item' not in kw from . import ExprNodes # AwaitExprNodes must appear before running MarkClosureVisitor kw['iterator'] = ExprNodes.AwaitExprNode(iterator.pos, arg=iterator) kw['item'] = ExprNodes.AwaitIterNextExprNode(iterator.pos, arg=None) _ForInStatNode.__init__(self, pos, **kw) def _create_item_node(self): from . import ExprNodes self.item.arg = ExprNodes.AsyncNextNode(self.iterator) class ForFromStatNode(LoopNode, StatNode): # for name from expr rel name rel expr # # target NameNode # bound1 ExprNode # relation1 string # relation2 string # bound2 ExprNode # step ExprNode or None # body StatNode # else_clause StatNode or None # # Used internally: # # from_range bool # is_py_target bool # loopvar_node ExprNode (usually a NameNode or temp node) # py_loopvar_node PyTempNode or None child_attrs = ["target", "bound1", "bound2", "step", "body", "else_clause"] is_py_target = False loopvar_node = None py_loopvar_node = None from_range = False gil_message = "For-loop using object bounds or target" def nogil_check(self, env): for x in (self.target, self.bound1, self.bound2): if x.type.is_pyobject: self.gil_error() def analyse_declarations(self, env): self.target.analyse_target_declaration(env) self.body.analyse_declarations(env) if self.else_clause: self.else_clause.analyse_declarations(env) def analyse_expressions(self, env): from . import ExprNodes self.target = self.target.analyse_target_types(env) self.bound1 = self.bound1.analyse_types(env) self.bound2 = self.bound2.analyse_types(env) if self.step is not None: if isinstance(self.step, ExprNodes.UnaryMinusNode): warning(self.step.pos, "Probable infinite loop in for-from-by statement. " "Consider switching the directions of the relations.", 2) self.step = self.step.analyse_types(env) if self.target.type.is_numeric: loop_type = self.target.type else: loop_type = PyrexTypes.c_int_type if not self.bound1.type.is_pyobject: loop_type = PyrexTypes.widest_numeric_type(loop_type, self.bound1.type) if not self.bound2.type.is_pyobject: loop_type = PyrexTypes.widest_numeric_type(loop_type, self.bound2.type) if self.step is not None and not self.step.type.is_pyobject: loop_type = PyrexTypes.widest_numeric_type(loop_type, self.step.type) self.bound1 = self.bound1.coerce_to(loop_type, env) self.bound2 = self.bound2.coerce_to(loop_type, env) if not self.bound2.is_literal: self.bound2 = self.bound2.coerce_to_temp(env) if self.step is not None: self.step = self.step.coerce_to(loop_type, env) if not self.step.is_literal: self.step = self.step.coerce_to_temp(env) target_type = self.target.type if not (target_type.is_pyobject or target_type.is_numeric): error(self.target.pos, "for-from loop variable must be c numeric type or Python object") if target_type.is_numeric: self.is_py_target = False if isinstance(self.target, ExprNodes.BufferIndexNode): raise error(self.pos, "Buffer or memoryview slicing/indexing not allowed as for-loop target.") self.loopvar_node = self.target self.py_loopvar_node = None else: self.is_py_target = True c_loopvar_node = ExprNodes.TempNode(self.pos, loop_type, env) self.loopvar_node = c_loopvar_node self.py_loopvar_node = \ ExprNodes.CloneNode(c_loopvar_node).coerce_to_pyobject(env) self.body = self.body.analyse_expressions(env) if self.else_clause: self.else_clause = self.else_clause.analyse_expressions(env) return self def generate_execution_code(self, code): code.mark_pos(self.pos) old_loop_labels = code.new_loop_labels() from_range = self.from_range self.bound1.generate_evaluation_code(code) self.bound2.generate_evaluation_code(code) offset, incop = self.relation_table[self.relation1] if self.step is not None: self.step.generate_evaluation_code(code) step = self.step.result() incop = "%s=%s" % (incop[0], step) from . import ExprNodes if isinstance(self.loopvar_node, ExprNodes.TempNode): self.loopvar_node.allocate(code) if isinstance(self.py_loopvar_node, ExprNodes.TempNode): self.py_loopvar_node.allocate(code) if from_range: loopvar_name = code.funcstate.allocate_temp(self.target.type, False) else: loopvar_name = self.loopvar_node.result() if self.target.type.is_int and not self.target.type.signed and self.relation2[0] == '>': # Handle the case where the endpoint of an unsigned int iteration # is within step of 0. if not self.step: step = 1 code.putln("for (%s = %s%s + %s; %s %s %s + %s; ) { %s%s;" % ( loopvar_name, self.bound1.result(), offset, step, loopvar_name, self.relation2, self.bound2.result(), step, loopvar_name, incop)) else: code.putln("for (%s = %s%s; %s %s %s; %s%s) {" % ( loopvar_name, self.bound1.result(), offset, loopvar_name, self.relation2, self.bound2.result(), loopvar_name, incop)) if self.py_loopvar_node: self.py_loopvar_node.generate_evaluation_code(code) self.target.generate_assignment_code(self.py_loopvar_node, code) elif from_range: code.putln("%s = %s;" % ( self.target.result(), loopvar_name)) self.body.generate_execution_code(code) code.put_label(code.continue_label) if self.py_loopvar_node: # This mess is to make for..from loops with python targets behave # exactly like those with C targets with regards to re-assignment # of the loop variable. if self.target.entry.is_pyglobal: # We know target is a NameNode, this is the only ugly case. target_node = ExprNodes.PyTempNode(self.target.pos, None) target_node.allocate(code) interned_cname = code.intern_identifier(self.target.entry.name) if self.target.entry.scope.is_module_scope: code.globalstate.use_utility_code( UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c")) lookup_func = '__Pyx_GetModuleGlobalName(%s)' else: code.globalstate.use_utility_code( UtilityCode.load_cached("GetNameInClass", "ObjectHandling.c")) lookup_func = '__Pyx_GetNameInClass(%s, %%s)' % ( self.target.entry.scope.namespace_cname) code.putln("%s = %s; %s" % ( target_node.result(), lookup_func % interned_cname, code.error_goto_if_null(target_node.result(), self.target.pos))) code.put_gotref(target_node.result()) else: target_node = self.target from_py_node = ExprNodes.CoerceFromPyTypeNode( self.loopvar_node.type, target_node, self.target.entry.scope) from_py_node.temp_code = loopvar_name from_py_node.generate_result_code(code) if self.target.entry.is_pyglobal: code.put_decref(target_node.result(), target_node.type) target_node.release(code) code.putln("}") if self.py_loopvar_node: # This is potentially wasteful, but we don't want the semantics to # depend on whether or not the loop is a python type. self.py_loopvar_node.generate_evaluation_code(code) self.target.generate_assignment_code(self.py_loopvar_node, code) if from_range: code.funcstate.release_temp(loopvar_name) break_label = code.break_label code.set_loop_labels(old_loop_labels) if self.else_clause: code.putln("/*else*/ {") self.else_clause.generate_execution_code(code) code.putln("}") code.put_label(break_label) self.bound1.generate_disposal_code(code) self.bound1.free_temps(code) self.bound2.generate_disposal_code(code) self.bound2.free_temps(code) if isinstance(self.loopvar_node, ExprNodes.TempNode): self.loopvar_node.release(code) if isinstance(self.py_loopvar_node, ExprNodes.TempNode): self.py_loopvar_node.release(code) if self.step is not None: self.step.generate_disposal_code(code) self.step.free_temps(code) relation_table = { # {relop : (initial offset, increment op)} '<=': ("", "++"), '<' : ("+1", "++"), '>=': ("", "--"), '>' : ("-1", "--"), } def generate_function_definitions(self, env, code): self.target.generate_function_definitions(env, code) self.bound1.generate_function_definitions(env, code) self.bound2.generate_function_definitions(env, code) if self.step is not None: self.step.generate_function_definitions(env, code) self.body.generate_function_definitions(env, code) if self.else_clause is not None: self.else_clause.generate_function_definitions(env, code) def annotate(self, code): self.target.annotate(code) self.bound1.annotate(code) self.bound2.annotate(code) if self.step: self.step.annotate(code) self.body.annotate(code) if self.else_clause: self.else_clause.annotate(code) class WithStatNode(StatNode): """ Represents a Python with statement. Implemented by the WithTransform as follows: MGR = EXPR EXIT = MGR.__exit__ VALUE = MGR.__enter__() EXC = True try: try: TARGET = VALUE # optional BODY except: EXC = False if not EXIT(*EXCINFO): raise finally: if EXC: EXIT(None, None, None) MGR = EXIT = VALUE = None """ # manager The with statement manager object # target ExprNode the target lhs of the __enter__() call # body StatNode # enter_call ExprNode the call to the __enter__() method # exit_var String the cname of the __exit__() method reference child_attrs = ["manager", "enter_call", "target", "body"] enter_call = None target_temp = None def analyse_declarations(self, env): self.manager.analyse_declarations(env) self.enter_call.analyse_declarations(env) self.body.analyse_declarations(env) def analyse_expressions(self, env): self.manager = self.manager.analyse_types(env) self.enter_call = self.enter_call.analyse_types(env) if self.target: # set up target_temp before descending into body (which uses it) from .ExprNodes import TempNode self.target_temp = TempNode(self.enter_call.pos, self.enter_call.type) self.body = self.body.analyse_expressions(env) return self def generate_function_definitions(self, env, code): self.manager.generate_function_definitions(env, code) self.enter_call.generate_function_definitions(env, code) self.body.generate_function_definitions(env, code) def generate_execution_code(self, code): code.mark_pos(self.pos) code.putln("/*with:*/ {") self.manager.generate_evaluation_code(code) self.exit_var = code.funcstate.allocate_temp(py_object_type, manage_ref=False) code.globalstate.use_utility_code( UtilityCode.load_cached("PyObjectLookupSpecial", "ObjectHandling.c")) code.putln("%s = __Pyx_PyObject_LookupSpecial(%s, %s); %s" % ( self.exit_var, self.manager.py_result(), code.intern_identifier(EncodedString('__aexit__' if self.is_async else '__exit__')), code.error_goto_if_null(self.exit_var, self.pos), )) code.put_gotref(self.exit_var) # need to free exit_var in the face of exceptions during setup old_error_label = code.new_error_label() intermediate_error_label = code.error_label self.enter_call.generate_evaluation_code(code) if self.target: # The temp result will be cleaned up by the WithTargetAssignmentStatNode # after assigning its result to the target of the 'with' statement. self.target_temp.allocate(code) self.enter_call.make_owned_reference(code) code.putln("%s = %s;" % (self.target_temp.result(), self.enter_call.result())) self.enter_call.generate_post_assignment_code(code) else: self.enter_call.generate_disposal_code(code) self.enter_call.free_temps(code) self.manager.generate_disposal_code(code) self.manager.free_temps(code) code.error_label = old_error_label self.body.generate_execution_code(code) if code.label_used(intermediate_error_label): step_over_label = code.new_label() code.put_goto(step_over_label) code.put_label(intermediate_error_label) code.put_decref_clear(self.exit_var, py_object_type) code.put_goto(old_error_label) code.put_label(step_over_label) code.funcstate.release_temp(self.exit_var) code.putln('}') class WithTargetAssignmentStatNode(AssignmentNode): # The target assignment of the 'with' statement value (return # value of the __enter__() call). # # This is a special cased assignment that properly cleans up the RHS. # # lhs ExprNode the assignment target # rhs ExprNode a (coerced) TempNode for the rhs (from WithStatNode) # with_node WithStatNode the surrounding with-statement child_attrs = ["rhs", "lhs"] with_node = None rhs = None def analyse_declarations(self, env): self.lhs.analyse_target_declaration(env) def analyse_expressions(self, env): self.lhs = self.lhs.analyse_target_types(env) self.lhs.gil_assignment_check(env) self.rhs = self.with_node.target_temp.coerce_to(self.lhs.type, env) return self def generate_execution_code(self, code): self.rhs.generate_evaluation_code(code) self.lhs.generate_assignment_code(self.rhs, code) self.with_node.target_temp.release(code) def annotate(self, code): self.lhs.annotate(code) self.rhs.annotate(code) class TryExceptStatNode(StatNode): # try .. except statement # # body StatNode # except_clauses [ExceptClauseNode] # else_clause StatNode or None child_attrs = ["body", "except_clauses", "else_clause"] def analyse_declarations(self, env): self.body.analyse_declarations(env) for except_clause in self.except_clauses: except_clause.analyse_declarations(env) if self.else_clause: self.else_clause.analyse_declarations(env) def analyse_expressions(self, env): self.body = self.body.analyse_expressions(env) default_clause_seen = 0 for i, except_clause in enumerate(self.except_clauses): except_clause = self.except_clauses[i] = except_clause.analyse_expressions(env) if default_clause_seen: error(except_clause.pos, "default 'except:' must be last") if not except_clause.pattern: default_clause_seen = 1 self.has_default_clause = default_clause_seen if self.else_clause: self.else_clause = self.else_clause.analyse_expressions(env) return self nogil_check = Node.gil_error gil_message = "Try-except statement" def generate_execution_code(self, code): old_return_label = code.return_label old_break_label = code.break_label old_continue_label = code.continue_label old_error_label = code.new_error_label() our_error_label = code.error_label except_end_label = code.new_label('exception_handled') except_error_label = code.new_label('except_error') except_return_label = code.new_label('except_return') try_return_label = code.new_label('try_return') try_break_label = code.new_label('try_break') try_continue_label = code.new_label('try_continue') try_end_label = code.new_label('try_end') exc_save_vars = [code.funcstate.allocate_temp(py_object_type, False) for _ in range(3)] code.mark_pos(self.pos) code.putln("{") save_exc = code.insertion_point() code.putln( "/*try:*/ {") code.return_label = try_return_label code.break_label = try_break_label code.continue_label = try_continue_label self.body.generate_execution_code(code) code.mark_pos(self.pos, trace=False) code.putln( "}") temps_to_clean_up = code.funcstate.all_free_managed_temps() can_raise = code.label_used(our_error_label) if can_raise: # inject code before the try block to save away the exception state code.globalstate.use_utility_code(reset_exception_utility_code) save_exc.putln("__Pyx_PyThreadState_declare") save_exc.putln("__Pyx_PyThreadState_assign") save_exc.putln("__Pyx_ExceptionSave(%s);" % ( ', '.join(['&%s' % var for var in exc_save_vars]))) for var in exc_save_vars: save_exc.put_xgotref(var) def restore_saved_exception(): for name in exc_save_vars: code.put_xgiveref(name) code.putln("__Pyx_ExceptionReset(%s);" % ', '.join(exc_save_vars)) else: # try block cannot raise exceptions, but we had to allocate the temps above, # so just keep the C compiler from complaining about them being unused save_exc.putln("if (%s); else {/*mark used*/}" % '||'.join(exc_save_vars)) def restore_saved_exception(): pass code.error_label = except_error_label code.return_label = except_return_label normal_case_terminates = self.body.is_terminator if self.else_clause: code.mark_pos(self.else_clause.pos) code.putln( "/*else:*/ {") self.else_clause.generate_execution_code(code) code.putln( "}") if not normal_case_terminates: normal_case_terminates = self.else_clause.is_terminator if can_raise: if not normal_case_terminates: for var in exc_save_vars: code.put_xdecref_clear(var, py_object_type) code.put_goto(try_end_label) code.put_label(our_error_label) code.putln("__Pyx_PyThreadState_assign") # re-assign in case a generator yielded for temp_name, temp_type in temps_to_clean_up: code.put_xdecref_clear(temp_name, temp_type) for except_clause in self.except_clauses: except_clause.generate_handling_code(code, except_end_label) if not self.has_default_clause: code.put_goto(except_error_label) for exit_label, old_label in [(except_error_label, old_error_label), (try_break_label, old_break_label), (try_continue_label, old_continue_label), (try_return_label, old_return_label), (except_return_label, old_return_label)]: if code.label_used(exit_label): if not normal_case_terminates and not code.label_used(try_end_label): code.put_goto(try_end_label) code.put_label(exit_label) code.mark_pos(self.pos, trace=False) if can_raise: code.putln("__Pyx_PyThreadState_assign") # re-assign in case a generator yielded restore_saved_exception() code.put_goto(old_label) if code.label_used(except_end_label): if not normal_case_terminates and not code.label_used(try_end_label): code.put_goto(try_end_label) code.put_label(except_end_label) if can_raise: code.putln("__Pyx_PyThreadState_assign") # re-assign in case a generator yielded restore_saved_exception() if code.label_used(try_end_label): code.put_label(try_end_label) code.putln("}") for cname in exc_save_vars: code.funcstate.release_temp(cname) code.return_label = old_return_label code.break_label = old_break_label code.continue_label = old_continue_label code.error_label = old_error_label def generate_function_definitions(self, env, code): self.body.generate_function_definitions(env, code) for except_clause in self.except_clauses: except_clause.generate_function_definitions(env, code) if self.else_clause is not None: self.else_clause.generate_function_definitions(env, code) def annotate(self, code): self.body.annotate(code) for except_node in self.except_clauses: except_node.annotate(code) if self.else_clause: self.else_clause.annotate(code) class ExceptClauseNode(Node): # Part of try ... except statement. # # pattern [ExprNode] # target ExprNode or None # body StatNode # excinfo_target TupleNode(3*ResultRefNode) or None optional target for exception info (not owned here!) # match_flag string result of exception match # exc_value ExcValueNode used internally # function_name string qualified name of enclosing function # exc_vars (string * 3) local exception variables # is_except_as bool Py3-style "except ... as xyz" # excinfo_target is never set by the parser, but can be set by a transform # in order to extract more extensive information about the exception as a # sys.exc_info()-style tuple into a target variable child_attrs = ["pattern", "target", "body", "exc_value"] exc_value = None excinfo_target = None is_except_as = False def analyse_declarations(self, env): if self.target: self.target.analyse_target_declaration(env) self.body.analyse_declarations(env) def analyse_expressions(self, env): self.function_name = env.qualified_name if self.pattern: # normalise/unpack self.pattern into a list for i, pattern in enumerate(self.pattern): pattern = pattern.analyse_expressions(env) self.pattern[i] = pattern.coerce_to_pyobject(env) if self.target: from . import ExprNodes self.exc_value = ExprNodes.ExcValueNode(self.pos) self.target = self.target.analyse_target_expression(env, self.exc_value) self.body = self.body.analyse_expressions(env) return self def generate_handling_code(self, code, end_label): code.mark_pos(self.pos) if self.pattern: code.globalstate.use_utility_code(UtilityCode.load_cached("PyErrExceptionMatches", "Exceptions.c")) exc_tests = [] for pattern in self.pattern: pattern.generate_evaluation_code(code) exc_tests.append("__Pyx_PyErr_ExceptionMatches(%s)" % pattern.py_result()) match_flag = code.funcstate.allocate_temp(PyrexTypes.c_int_type, False) code.putln( "%s = %s;" % (match_flag, ' || '.join(exc_tests))) for pattern in self.pattern: pattern.generate_disposal_code(code) pattern.free_temps(code) code.putln( "if (%s) {" % match_flag) code.funcstate.release_temp(match_flag) else: code.putln("/*except:*/ {") if (not getattr(self.body, 'stats', True) and self.excinfo_target is None and self.target is None): # most simple case: no exception variable, empty body (pass) # => reset the exception state, done code.globalstate.use_utility_code(UtilityCode.load_cached("PyErrFetchRestore", "Exceptions.c")) code.putln("__Pyx_ErrRestore(0,0,0);") code.put_goto(end_label) code.putln("}") return exc_vars = [code.funcstate.allocate_temp(py_object_type, manage_ref=True) for _ in range(3)] code.put_add_traceback(self.function_name) # We always have to fetch the exception value even if # there is no target, because this also normalises the # exception and stores it in the thread state. code.globalstate.use_utility_code(get_exception_utility_code) exc_args = "&%s, &%s, &%s" % tuple(exc_vars) code.putln("if (__Pyx_GetException(%s) < 0) %s" % ( exc_args, code.error_goto(self.pos))) for x in exc_vars: code.put_gotref(x) if self.target: self.exc_value.set_var(exc_vars[1]) self.exc_value.generate_evaluation_code(code) self.target.generate_assignment_code(self.exc_value, code) if self.excinfo_target is not None: for tempvar, node in zip(exc_vars, self.excinfo_target.args): node.set_var(tempvar) old_break_label, old_continue_label = code.break_label, code.continue_label code.break_label = code.new_label('except_break') code.continue_label = code.new_label('except_continue') old_exc_vars = code.funcstate.exc_vars code.funcstate.exc_vars = exc_vars self.body.generate_execution_code(code) code.funcstate.exc_vars = old_exc_vars if not self.body.is_terminator: for var in exc_vars: code.put_decref_clear(var, py_object_type) code.put_goto(end_label) for new_label, old_label in [(code.break_label, old_break_label), (code.continue_label, old_continue_label)]: if code.label_used(new_label): code.put_label(new_label) for var in exc_vars: code.put_decref_clear(var, py_object_type) code.put_goto(old_label) code.break_label = old_break_label code.continue_label = old_continue_label for temp in exc_vars: code.funcstate.release_temp(temp) code.putln( "}") def generate_function_definitions(self, env, code): if self.target is not None: self.target.generate_function_definitions(env, code) self.body.generate_function_definitions(env, code) def annotate(self, code): if self.pattern: for pattern in self.pattern: pattern.annotate(code) if self.target: self.target.annotate(code) self.body.annotate(code) class TryFinallyStatNode(StatNode): # try ... finally statement # # body StatNode # finally_clause StatNode # finally_except_clause deep-copy of finally_clause for exception case # # Each of the continue, break, return and error gotos runs # into its own deep-copy of the finally block code. # In addition, if we're doing an error, we save the # exception on entry to the finally block and restore # it on exit. child_attrs = ["body", "finally_clause", "finally_except_clause"] preserve_exception = 1 # handle exception case, in addition to return/break/continue handle_error_case = True func_return_type = None finally_except_clause = None is_try_finally_in_nogil = False @staticmethod def create_analysed(pos, env, body, finally_clause): node = TryFinallyStatNode(pos, body=body, finally_clause=finally_clause) return node def analyse_declarations(self, env): self.body.analyse_declarations(env) self.finally_except_clause = copy.deepcopy(self.finally_clause) self.finally_except_clause.analyse_declarations(env) self.finally_clause.analyse_declarations(env) def analyse_expressions(self, env): self.body = self.body.analyse_expressions(env) self.finally_clause = self.finally_clause.analyse_expressions(env) self.finally_except_clause = self.finally_except_clause.analyse_expressions(env) if env.return_type and not env.return_type.is_void: self.func_return_type = env.return_type return self nogil_check = Node.gil_error gil_message = "Try-finally statement" def generate_execution_code(self, code): code.mark_pos(self.pos) old_error_label = code.error_label old_labels = code.all_new_labels() new_labels = code.get_all_labels() new_error_label = code.error_label if not self.handle_error_case: code.error_label = old_error_label catch_label = code.new_label() code.putln("/*try:*/ {") was_in_try_finally = code.funcstate.in_try_finally code.funcstate.in_try_finally = 1 self.body.generate_execution_code(code) code.funcstate.in_try_finally = was_in_try_finally code.putln("}") code.set_all_labels(old_labels) temps_to_clean_up = code.funcstate.all_free_managed_temps() code.mark_pos(self.finally_clause.pos) code.putln("/*finally:*/ {") def fresh_finally_clause(_next=[self.finally_clause]): # generate the original subtree once and always keep a fresh copy node = _next[0] node_copy = copy.deepcopy(node) if node is self.finally_clause: _next[0] = node_copy else: node = node_copy return node preserve_error = self.preserve_exception and code.label_used(new_error_label) needs_success_cleanup = not self.finally_clause.is_terminator if not self.body.is_terminator: code.putln('/*normal exit:*/{') fresh_finally_clause().generate_execution_code(code) if not self.finally_clause.is_terminator: code.put_goto(catch_label) code.putln('}') if preserve_error: code.putln('/*exception exit:*/{') code.putln("__Pyx_PyThreadState_declare") if self.is_try_finally_in_nogil: code.declare_gilstate() if needs_success_cleanup: exc_lineno_cnames = tuple([ code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False) for _ in range(2)]) exc_filename_cname = code.funcstate.allocate_temp( PyrexTypes.CPtrType(PyrexTypes.c_const_type(PyrexTypes.c_char_type)), manage_ref=False) else: exc_lineno_cnames = exc_filename_cname = None exc_vars = tuple([ code.funcstate.allocate_temp(py_object_type, manage_ref=False) for _ in range(6)]) code.put_label(new_error_label) self.put_error_catcher( code, temps_to_clean_up, exc_vars, exc_lineno_cnames, exc_filename_cname) finally_old_labels = code.all_new_labels() code.putln('{') old_exc_vars = code.funcstate.exc_vars code.funcstate.exc_vars = exc_vars[:3] self.finally_except_clause.generate_execution_code(code) code.funcstate.exc_vars = old_exc_vars code.putln('}') if needs_success_cleanup: self.put_error_uncatcher(code, exc_vars, exc_lineno_cnames, exc_filename_cname) if exc_lineno_cnames: for cname in exc_lineno_cnames: code.funcstate.release_temp(cname) if exc_filename_cname: code.funcstate.release_temp(exc_filename_cname) code.put_goto(old_error_label) for new_label, old_label in zip(code.get_all_labels(), finally_old_labels): if not code.label_used(new_label): continue code.put_label(new_label) self.put_error_cleaner(code, exc_vars) code.put_goto(old_label) for cname in exc_vars: code.funcstate.release_temp(cname) code.putln('}') code.set_all_labels(old_labels) return_label = code.return_label for i, (new_label, old_label) in enumerate(zip(new_labels, old_labels)): if not code.label_used(new_label): continue if new_label == new_error_label and preserve_error: continue # handled above code.put('%s: ' % new_label) code.putln('{') ret_temp = None if old_label == return_label and not self.finally_clause.is_terminator: # store away return value for later reuse if (self.func_return_type and not self.is_try_finally_in_nogil and not isinstance(self.finally_clause, GILExitNode)): ret_temp = code.funcstate.allocate_temp( self.func_return_type, manage_ref=False) code.putln("%s = %s;" % (ret_temp, Naming.retval_cname)) if self.func_return_type.is_pyobject: code.putln("%s = 0;" % Naming.retval_cname) fresh_finally_clause().generate_execution_code(code) if ret_temp: code.putln("%s = %s;" % (Naming.retval_cname, ret_temp)) if self.func_return_type.is_pyobject: code.putln("%s = 0;" % ret_temp) code.funcstate.release_temp(ret_temp) ret_temp = None if not self.finally_clause.is_terminator: code.put_goto(old_label) code.putln('}') # End finally code.put_label(catch_label) code.putln( "}") def generate_function_definitions(self, env, code): self.body.generate_function_definitions(env, code) self.finally_clause.generate_function_definitions(env, code) def put_error_catcher(self, code, temps_to_clean_up, exc_vars, exc_lineno_cnames, exc_filename_cname): code.globalstate.use_utility_code(restore_exception_utility_code) code.globalstate.use_utility_code(get_exception_utility_code) code.globalstate.use_utility_code(swap_exception_utility_code) code.putln(' '.join(["%s = 0;"]*len(exc_vars)) % exc_vars) if self.is_try_finally_in_nogil: code.put_ensure_gil(declare_gilstate=False) code.putln("__Pyx_PyThreadState_assign") for temp_name, type in temps_to_clean_up: code.put_xdecref_clear(temp_name, type) # not using preprocessor here to avoid warnings about # unused utility functions and/or temps code.putln("if (PY_MAJOR_VERSION >= 3)" " __Pyx_ExceptionSwap(&%s, &%s, &%s);" % exc_vars[3:]) code.putln("if ((PY_MAJOR_VERSION < 3) ||" # if __Pyx_GetException() fails in Py3, # store the newly raised exception instead " unlikely(__Pyx_GetException(&%s, &%s, &%s) < 0)) " "__Pyx_ErrFetch(&%s, &%s, &%s);" % (exc_vars[:3] * 2)) for var in exc_vars: code.put_xgotref(var) if exc_lineno_cnames: code.putln("%s = %s; %s = %s; %s = %s;" % ( exc_lineno_cnames[0], Naming.lineno_cname, exc_lineno_cnames[1], Naming.clineno_cname, exc_filename_cname, Naming.filename_cname)) if self.is_try_finally_in_nogil: code.put_release_ensured_gil() def put_error_uncatcher(self, code, exc_vars, exc_lineno_cnames, exc_filename_cname): code.globalstate.use_utility_code(restore_exception_utility_code) code.globalstate.use_utility_code(reset_exception_utility_code) if self.is_try_finally_in_nogil: code.put_ensure_gil(declare_gilstate=False) code.putln("__Pyx_PyThreadState_assign") # re-assign in case a generator yielded # not using preprocessor here to avoid warnings about # unused utility functions and/or temps code.putln("if (PY_MAJOR_VERSION >= 3) {") for var in exc_vars[3:]: code.put_xgiveref(var) code.putln("__Pyx_ExceptionReset(%s, %s, %s);" % exc_vars[3:]) code.putln("}") for var in exc_vars[:3]: code.put_xgiveref(var) code.putln("__Pyx_ErrRestore(%s, %s, %s);" % exc_vars[:3]) if self.is_try_finally_in_nogil: code.put_release_ensured_gil() code.putln(' '.join(["%s = 0;"]*len(exc_vars)) % exc_vars) if exc_lineno_cnames: code.putln("%s = %s; %s = %s; %s = %s;" % ( Naming.lineno_cname, exc_lineno_cnames[0], Naming.clineno_cname, exc_lineno_cnames[1], Naming.filename_cname, exc_filename_cname)) def put_error_cleaner(self, code, exc_vars): code.globalstate.use_utility_code(reset_exception_utility_code) if self.is_try_finally_in_nogil: code.put_ensure_gil(declare_gilstate=False) code.putln("__Pyx_PyThreadState_assign") # re-assign in case a generator yielded # not using preprocessor here to avoid warnings about # unused utility functions and/or temps code.putln("if (PY_MAJOR_VERSION >= 3) {") for var in exc_vars[3:]: code.put_xgiveref(var) code.putln("__Pyx_ExceptionReset(%s, %s, %s);" % exc_vars[3:]) code.putln("}") for var in exc_vars[:3]: code.put_xdecref_clear(var, py_object_type) if self.is_try_finally_in_nogil: code.put_release_ensured_gil() code.putln(' '.join(["%s = 0;"]*3) % exc_vars[3:]) def annotate(self, code): self.body.annotate(code) self.finally_clause.annotate(code) class NogilTryFinallyStatNode(TryFinallyStatNode): """ A try/finally statement that may be used in nogil code sections. """ preserve_exception = False nogil_check = None class GILStatNode(NogilTryFinallyStatNode): # 'with gil' or 'with nogil' statement # # state string 'gil' or 'nogil' state_temp = None def __init__(self, pos, state, body): self.state = state self.create_state_temp_if_needed(pos, state, body) TryFinallyStatNode.__init__( self, pos, body=body, finally_clause=GILExitNode( pos, state=state, state_temp=self.state_temp)) def create_state_temp_if_needed(self, pos, state, body): from .ParseTreeTransforms import YieldNodeCollector collector = YieldNodeCollector() collector.visitchildren(body) if not collector.yields and not collector.awaits: return if state == 'gil': temp_type = PyrexTypes.c_gilstate_type else: temp_type = PyrexTypes.c_threadstate_ptr_type from . import ExprNodes self.state_temp = ExprNodes.TempNode(pos, temp_type) def analyse_declarations(self, env): env._in_with_gil_block = (self.state == 'gil') if self.state == 'gil': env.has_with_gil_block = True return super(GILStatNode, self).analyse_declarations(env) def analyse_expressions(self, env): env.use_utility_code( UtilityCode.load_cached("ForceInitThreads", "ModuleSetupCode.c")) was_nogil = env.nogil env.nogil = self.state == 'nogil' node = TryFinallyStatNode.analyse_expressions(self, env) env.nogil = was_nogil return node def generate_execution_code(self, code): code.mark_pos(self.pos) code.begin_block() if self.state_temp: self.state_temp.allocate(code) variable = self.state_temp.result() else: variable = None old_gil_config = code.funcstate.gil_owned if self.state == 'gil': code.put_ensure_gil(variable=variable) code.funcstate.gil_owned = True else: code.put_release_gil(variable=variable) code.funcstate.gil_owned = False TryFinallyStatNode.generate_execution_code(self, code) if self.state_temp: self.state_temp.release(code) code.funcstate.gil_owned = old_gil_config code.end_block() class GILExitNode(StatNode): """ Used as the 'finally' block in a GILStatNode state string 'gil' or 'nogil' """ child_attrs = [] state_temp = None def analyse_expressions(self, env): return self def generate_execution_code(self, code): if self.state_temp: variable = self.state_temp.result() else: variable = None if self.state == 'gil': code.put_release_ensured_gil(variable) else: code.put_acquire_gil(variable) class EnsureGILNode(GILExitNode): """ Ensure the GIL in nogil functions for cleanup before returning. """ def generate_execution_code(self, code): code.put_ensure_gil(declare_gilstate=False) utility_code_for_cimports = { # utility code (or inlining c) in a pxd (or pyx) file. # TODO: Consider a generic user-level mechanism for importing 'cpython.array' : ("ArrayAPI", "arrayarray.h"), 'cpython.array.array' : ("ArrayAPI", "arrayarray.h"), } utility_code_for_imports = { # utility code used when special modules are imported. # TODO: Consider a generic user-level mechanism for importing 'asyncio': ("__Pyx_patch_asyncio", "PatchAsyncIO", "Coroutine.c"), 'inspect': ("__Pyx_patch_inspect", "PatchInspect", "Coroutine.c"), } class CImportStatNode(StatNode): # cimport statement # # module_name string Qualified name of module being imported # as_name string or None Name specified in "as" clause, if any # is_absolute bool True for absolute imports, False otherwise child_attrs = [] is_absolute = False def analyse_declarations(self, env): if not env.is_module_scope: error(self.pos, "cimport only allowed at module level") return module_scope = env.find_module( self.module_name, self.pos, relative_level=0 if self.is_absolute else -1) if "." in self.module_name: names = [EncodedString(name) for name in self.module_name.split(".")] top_name = names[0] top_module_scope = env.context.find_submodule(top_name) module_scope = top_module_scope for name in names[1:]: submodule_scope = module_scope.find_submodule(name) module_scope.declare_module(name, submodule_scope, self.pos) module_scope = submodule_scope if self.as_name: env.declare_module(self.as_name, module_scope, self.pos) else: env.add_imported_module(module_scope) env.declare_module(top_name, top_module_scope, self.pos) else: name = self.as_name or self.module_name env.declare_module(name, module_scope, self.pos) if self.module_name in utility_code_for_cimports: env.use_utility_code(UtilityCode.load_cached( *utility_code_for_cimports[self.module_name])) def analyse_expressions(self, env): return self def generate_execution_code(self, code): pass class FromCImportStatNode(StatNode): # from ... cimport statement # # module_name string Qualified name of module # relative_level int or None Relative import: number of dots before module_name # imported_names [(pos, name, as_name, kind)] Names to be imported child_attrs = [] module_name = None relative_level = None imported_names = None def analyse_declarations(self, env): if not env.is_module_scope: error(self.pos, "cimport only allowed at module level") return if self.relative_level and self.relative_level > env.qualified_name.count('.'): error(self.pos, "relative cimport beyond main package is not allowed") return module_scope = env.find_module(self.module_name, self.pos, relative_level=self.relative_level) module_name = module_scope.qualified_name env.add_imported_module(module_scope) for pos, name, as_name, kind in self.imported_names: if name == "*": for local_name, entry in list(module_scope.entries.items()): env.add_imported_entry(local_name, entry, pos) else: entry = module_scope.lookup(name) if entry: if kind and not self.declaration_matches(entry, kind): entry.redeclared(pos) entry.used = 1 else: if kind == 'struct' or kind == 'union': entry = module_scope.declare_struct_or_union( name, kind=kind, scope=None, typedef_flag=0, pos=pos) elif kind == 'class': entry = module_scope.declare_c_class(name, pos=pos, module_name=module_name) else: submodule_scope = env.context.find_module( name, relative_to=module_scope, pos=self.pos, absolute_fallback=False) if submodule_scope.parent_module is module_scope: env.declare_module(as_name or name, submodule_scope, self.pos) else: error(pos, "Name '%s' not declared in module '%s'" % (name, module_name)) if entry: local_name = as_name or name env.add_imported_entry(local_name, entry, pos) if module_name.startswith('cpython'): # enough for now if module_name in utility_code_for_cimports: env.use_utility_code(UtilityCode.load_cached( *utility_code_for_cimports[module_name])) for _, name, _, _ in self.imported_names: fqname = '%s.%s' % (module_name, name) if fqname in utility_code_for_cimports: env.use_utility_code(UtilityCode.load_cached( *utility_code_for_cimports[fqname])) def declaration_matches(self, entry, kind): if not entry.is_type: return 0 type = entry.type if kind == 'class': if not type.is_extension_type: return 0 else: if not type.is_struct_or_union: return 0 if kind != type.kind: return 0 return 1 def analyse_expressions(self, env): return self def generate_execution_code(self, code): pass class FromImportStatNode(StatNode): # from ... import statement # # module ImportNode # items [(string, NameNode)] # interned_items [(string, NameNode, ExprNode)] # item PyTempNode used internally # import_star boolean used internally child_attrs = ["module"] import_star = 0 def analyse_declarations(self, env): for name, target in self.items: if name == "*": if not env.is_module_scope: error(self.pos, "import * only allowed at module level") return env.has_import_star = 1 self.import_star = 1 else: target.analyse_target_declaration(env) def analyse_expressions(self, env): from . import ExprNodes self.module = self.module.analyse_expressions(env) self.item = ExprNodes.RawCNameExprNode(self.pos, py_object_type) self.interned_items = [] for name, target in self.items: if name == '*': for _, entry in env.entries.items(): if not entry.is_type and entry.type.is_extension_type: env.use_utility_code(UtilityCode.load_cached("ExtTypeTest", "ObjectHandling.c")) break else: entry = env.lookup(target.name) # check whether or not entry is already cimported if (entry.is_type and entry.type.name == name and hasattr(entry.type, 'module_name')): if entry.type.module_name == self.module.module_name.value: # cimported with absolute name continue try: # cimported with relative name module = env.find_module(self.module.module_name.value, pos=self.pos, relative_level=self.module.level) if entry.type.module_name == module.qualified_name: continue except AttributeError: pass target = target.analyse_target_expression(env, None) # FIXME? if target.type is py_object_type: coerced_item = None else: coerced_item = self.item.coerce_to(target.type, env) self.interned_items.append((name, target, coerced_item)) return self def generate_execution_code(self, code): code.mark_pos(self.pos) self.module.generate_evaluation_code(code) if self.import_star: code.putln( 'if (%s(%s) < 0) %s;' % ( Naming.import_star, self.module.py_result(), code.error_goto(self.pos))) item_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True) self.item.set_cname(item_temp) if self.interned_items: code.globalstate.use_utility_code( UtilityCode.load_cached("ImportFrom", "ImportExport.c")) for name, target, coerced_item in self.interned_items: code.putln( '%s = __Pyx_ImportFrom(%s, %s); %s' % ( item_temp, self.module.py_result(), code.intern_identifier(name), code.error_goto_if_null(item_temp, self.pos))) code.put_gotref(item_temp) if coerced_item is None: target.generate_assignment_code(self.item, code) else: coerced_item.allocate_temp_result(code) coerced_item.generate_result_code(code) target.generate_assignment_code(coerced_item, code) code.put_decref_clear(item_temp, py_object_type) code.funcstate.release_temp(item_temp) self.module.generate_disposal_code(code) self.module.free_temps(code) class ParallelNode(Node): """ Base class for cython.parallel constructs. """ nogil_check = None class ParallelStatNode(StatNode, ParallelNode): """ Base class for 'with cython.parallel.parallel():' and 'for i in prange():'. assignments { Entry(var) : (var.pos, inplace_operator_or_None) } assignments to variables in this parallel section parent parent ParallelStatNode or None is_parallel indicates whether this node is OpenMP parallel (true for #pragma omp parallel for and #pragma omp parallel) is_parallel is true for: #pragma omp parallel #pragma omp parallel for sections, but NOT for #pragma omp for We need this to determine the sharing attributes. privatization_insertion_point a code insertion point used to make temps private (esp. the "nsteps" temp) args tuple the arguments passed to the parallel construct kwargs DictNode the keyword arguments passed to the parallel construct (replaced by its compile time value) """ child_attrs = ['body', 'num_threads'] body = None is_prange = False is_nested_prange = False error_label_used = False num_threads = None chunksize = None parallel_exc = ( Naming.parallel_exc_type, Naming.parallel_exc_value, Naming.parallel_exc_tb, ) parallel_pos_info = ( Naming.parallel_filename, Naming.parallel_lineno, Naming.parallel_clineno, ) pos_info = ( Naming.filename_cname, Naming.lineno_cname, Naming.clineno_cname, ) critical_section_counter = 0 def __init__(self, pos, **kwargs): super(ParallelStatNode, self).__init__(pos, **kwargs) # All assignments in this scope self.assignments = kwargs.get('assignments') or {} # All seen closure cnames and their temporary cnames self.seen_closure_vars = set() # Dict of variables that should be declared (first|last|)private or # reduction { Entry: (op, lastprivate) }. # If op is not None, it's a reduction. self.privates = {} # [NameNode] self.assigned_nodes = [] def analyse_declarations(self, env): self.body.analyse_declarations(env) self.num_threads = None if self.kwargs: # Try to find num_threads and chunksize keyword arguments pairs = [] for dictitem in self.kwargs.key_value_pairs: if dictitem.key.value == 'num_threads': self.num_threads = dictitem.value elif self.is_prange and dictitem.key.value == 'chunksize': self.chunksize = dictitem.value else: pairs.append(dictitem) self.kwargs.key_value_pairs = pairs try: self.kwargs = self.kwargs.compile_time_value(env) except Exception as e: error(self.kwargs.pos, "Only compile-time values may be " "supplied as keyword arguments") else: self.kwargs = {} for kw, val in self.kwargs.items(): if kw not in self.valid_keyword_arguments: error(self.pos, "Invalid keyword argument: %s" % kw) else: setattr(self, kw, val) def analyse_expressions(self, env): if self.num_threads: self.num_threads = self.num_threads.analyse_expressions(env) if self.chunksize: self.chunksize = self.chunksize.analyse_expressions(env) self.body = self.body.analyse_expressions(env) self.analyse_sharing_attributes(env) if self.num_threads is not None: if self.parent and self.parent.num_threads is not None and not self.parent.is_prange: error(self.pos, "num_threads already declared in outer section") elif self.parent and not self.parent.is_prange: error(self.pos, "num_threads must be declared in the parent parallel section") elif (self.num_threads.type.is_int and self.num_threads.is_literal and self.num_threads.compile_time_value(env) <= 0): error(self.pos, "argument to num_threads must be greater than 0") if not self.num_threads.is_simple(): self.num_threads = self.num_threads.coerce_to( PyrexTypes.c_int_type, env).coerce_to_temp(env) return self def analyse_sharing_attributes(self, env): """ Analyse the privates for this block and set them in self.privates. This should be called in a post-order fashion during the analyse_expressions phase """ for entry, (pos, op) in self.assignments.items(): if self.is_prange and not self.is_parallel: # closely nested prange in a with parallel block, disallow # assigning to privates in the with parallel block (we # consider it too implicit and magicky for users) if entry in self.parent.assignments: error(pos, "Cannot assign to private of outer parallel block") continue if not self.is_prange and op: # Again possible, but considered to magicky error(pos, "Reductions not allowed for parallel blocks") continue # By default all variables should have the same values as if # executed sequentially lastprivate = True self.propagate_var_privatization(entry, pos, op, lastprivate) def propagate_var_privatization(self, entry, pos, op, lastprivate): """ Propagate the sharing attributes of a variable. If the privatization is determined by a parent scope, done propagate further. If we are a prange, we propagate our sharing attributes outwards to other pranges. If we are a prange in parallel block and the parallel block does not determine the variable private, we propagate to the parent of the parent. Recursion stops at parallel blocks, as they have no concept of lastprivate or reduction. So the following cases propagate: sum is a reduction for all loops: for i in prange(n): for j in prange(n): for k in prange(n): sum += i * j * k sum is a reduction for both loops, local_var is private to the parallel with block: for i in prange(n): with parallel: local_var = ... # private to the parallel for j in prange(n): sum += i * j Nested with parallel blocks are disallowed, because they wouldn't allow you to propagate lastprivates or reductions: #pragma omp parallel for lastprivate(i) for i in prange(n): sum = 0 #pragma omp parallel private(j, sum) with parallel: #pragma omp parallel with parallel: #pragma omp for lastprivate(j) reduction(+:sum) for j in prange(n): sum += i # sum and j are well-defined here # sum and j are undefined here # sum and j are undefined here """ self.privates[entry] = (op, lastprivate) if entry.type.is_memoryviewslice: error(pos, "Memoryview slices can only be shared in parallel sections") return if self.is_prange: if not self.is_parallel and entry not in self.parent.assignments: # Parent is a parallel with block parent = self.parent.parent else: parent = self.parent # We don't need to propagate privates, only reductions and # lastprivates if parent and (op or lastprivate): parent.propagate_var_privatization(entry, pos, op, lastprivate) def _allocate_closure_temp(self, code, entry): """ Helper function that allocate a temporary for a closure variable that is assigned to. """ if self.parent: return self.parent._allocate_closure_temp(code, entry) if entry.cname in self.seen_closure_vars: return entry.cname cname = code.funcstate.allocate_temp(entry.type, True) # Add both the actual cname and the temp cname, as the actual cname # will be replaced with the temp cname on the entry self.seen_closure_vars.add(entry.cname) self.seen_closure_vars.add(cname) self.modified_entries.append((entry, entry.cname)) code.putln("%s = %s;" % (cname, entry.cname)) entry.cname = cname def initialize_privates_to_nan(self, code, exclude=None): first = True for entry, (op, lastprivate) in sorted(self.privates.items()): if not op and (not exclude or entry != exclude): invalid_value = entry.type.invalid_value() if invalid_value: if first: code.putln("/* Initialize private variables to " "invalid values */") first = False code.putln("%s = %s;" % (entry.cname, entry.type.cast_code(invalid_value))) def evaluate_before_block(self, code, expr): c = self.begin_of_parallel_control_block_point_after_decls # we need to set the owner to ourselves temporarily, as # allocate_temp may generate a comment in the middle of our pragma # otherwise when DebugFlags.debug_temp_code_comments is in effect owner = c.funcstate.owner c.funcstate.owner = c expr.generate_evaluation_code(c) c.funcstate.owner = owner return expr.result() def put_num_threads(self, code): """ Write self.num_threads if set as the num_threads OpenMP directive """ if self.num_threads is not None: code.put(" num_threads(%s)" % self.evaluate_before_block(code, self.num_threads)) def declare_closure_privates(self, code): """ If a variable is in a scope object, we need to allocate a temp and assign the value from the temp to the variable in the scope object after the parallel section. This kind of copying should be done only in the outermost parallel section. """ self.modified_entries = [] for entry in sorted(self.assignments): if entry.from_closure or entry.in_closure: self._allocate_closure_temp(code, entry) def release_closure_privates(self, code): """ Release any temps used for variables in scope objects. As this is the outermost parallel block, we don't need to delete the cnames from self.seen_closure_vars. """ for entry, original_cname in self.modified_entries: code.putln("%s = %s;" % (original_cname, entry.cname)) code.funcstate.release_temp(entry.cname) entry.cname = original_cname def privatize_temps(self, code, exclude_temps=()): """ Make any used temporaries private. Before the relevant code block code.start_collecting_temps() should have been called. """ if self.is_parallel: c = self.privatization_insertion_point self.temps = temps = code.funcstate.stop_collecting_temps() privates, firstprivates = [], [] for temp, type in sorted(temps): if type.is_pyobject or type.is_memoryviewslice: firstprivates.append(temp) else: privates.append(temp) if privates: c.put(" private(%s)" % ", ".join(privates)) if firstprivates: c.put(" firstprivate(%s)" % ", ".join(firstprivates)) if self.breaking_label_used: shared_vars = [Naming.parallel_why] if self.error_label_used: shared_vars.extend(self.parallel_exc) c.put(" private(%s, %s, %s)" % self.pos_info) c.put(" shared(%s)" % ', '.join(shared_vars)) def cleanup_temps(self, code): # Now clean up any memoryview slice and object temporaries if self.is_parallel and not self.is_nested_prange: code.putln("/* Clean up any temporaries */") for temp, type in sorted(self.temps): if type.is_memoryviewslice: code.put_xdecref_memoryviewslice(temp, have_gil=False) elif type.is_pyobject: code.put_xdecref(temp, type) code.putln("%s = NULL;" % temp) def setup_parallel_control_flow_block(self, code): """ Sets up a block that surrounds the parallel block to determine how the parallel section was exited. Any kind of return is trapped (break, continue, return, exceptions). This is the idea: { int why = 0; #pragma omp parallel { return # -> goto new_return_label; goto end_parallel; new_return_label: why = 3; goto end_parallel; end_parallel:; #pragma omp flush(why) # we need to flush for every iteration } if (why == 3) goto old_return_label; } """ self.old_loop_labels = code.new_loop_labels() self.old_error_label = code.new_error_label() self.old_return_label = code.return_label code.return_label = code.new_label(name="return") code.begin_block() # parallel control flow block self.begin_of_parallel_control_block_point = code.insertion_point() self.begin_of_parallel_control_block_point_after_decls = code.insertion_point() self.undef_builtin_expect_apple_gcc_bug(code) def begin_parallel_block(self, code): """ Each OpenMP thread in a parallel section that contains a with gil block must have the thread-state initialized. The call to PyGILState_Release() then deallocates our threadstate. If we wouldn't do this, each with gil block would allocate and deallocate one, thereby losing exception information before it can be saved before leaving the parallel section. """ self.begin_of_parallel_block = code.insertion_point() def end_parallel_block(self, code): """ To ensure all OpenMP threads have thread states, we ensure the GIL in each thread (which creates a thread state if it doesn't exist), after which we release the GIL. On exit, reacquire the GIL and release the thread state. If compiled without OpenMP support (at the C level), then we still have to acquire the GIL to decref any object temporaries. """ if self.error_label_used: begin_code = self.begin_of_parallel_block end_code = code begin_code.putln("#ifdef _OPENMP") begin_code.put_ensure_gil(declare_gilstate=True) begin_code.putln("Py_BEGIN_ALLOW_THREADS") begin_code.putln("#endif /* _OPENMP */") end_code.putln("#ifdef _OPENMP") end_code.putln("Py_END_ALLOW_THREADS") end_code.putln("#else") end_code.put_safe("{\n") end_code.put_ensure_gil() end_code.putln("#endif /* _OPENMP */") self.cleanup_temps(end_code) end_code.put_release_ensured_gil() end_code.putln("#ifndef _OPENMP") end_code.put_safe("}\n") end_code.putln("#endif /* _OPENMP */") def trap_parallel_exit(self, code, should_flush=False): """ Trap any kind of return inside a parallel construct. 'should_flush' indicates whether the variable should be flushed, which is needed by prange to skip the loop. It also indicates whether we need to register a continue (we need this for parallel blocks, but not for prange loops, as it is a direct jump there). It uses the same mechanism as try/finally: 1 continue 2 break 3 return 4 error """ save_lastprivates_label = code.new_label() dont_return_label = code.new_label() self.any_label_used = False self.breaking_label_used = False self.error_label_used = False self.parallel_private_temps = [] all_labels = code.get_all_labels() # Figure this out before starting to generate any code for label in all_labels: if code.label_used(label): self.breaking_label_used = (self.breaking_label_used or label != code.continue_label) self.any_label_used = True if self.any_label_used: code.put_goto(dont_return_label) for i, label in enumerate(all_labels): if not code.label_used(label): continue is_continue_label = label == code.continue_label code.put_label(label) if not (should_flush and is_continue_label): if label == code.error_label: self.error_label_used = True self.fetch_parallel_exception(code) code.putln("%s = %d;" % (Naming.parallel_why, i + 1)) if (self.breaking_label_used and self.is_prange and not is_continue_label): code.put_goto(save_lastprivates_label) else: code.put_goto(dont_return_label) if self.any_label_used: if self.is_prange and self.breaking_label_used: # Don't rely on lastprivate, save our lastprivates code.put_label(save_lastprivates_label) self.save_parallel_vars(code) code.put_label(dont_return_label) if should_flush and self.breaking_label_used: code.putln_openmp("#pragma omp flush(%s)" % Naming.parallel_why) def save_parallel_vars(self, code): """ The following shenanigans are instated when we break, return or propagate errors from a prange. In this case we cannot rely on lastprivate() to do its job, as no iterations may have executed yet in the last thread, leaving the values undefined. It is most likely that the breaking thread has well-defined values of the lastprivate variables, so we keep those values. """ section_name = "__pyx_parallel_lastprivates%d" % self.critical_section_counter code.putln_openmp("#pragma omp critical(%s)" % section_name) ParallelStatNode.critical_section_counter += 1 code.begin_block() # begin critical section c = self.begin_of_parallel_control_block_point temp_count = 0 for entry, (op, lastprivate) in sorted(self.privates.items()): if not lastprivate or entry.type.is_pyobject: continue type_decl = entry.type.empty_declaration_code() temp_cname = "__pyx_parallel_temp%d" % temp_count private_cname = entry.cname temp_count += 1 invalid_value = entry.type.invalid_value() if invalid_value: init = ' = ' + invalid_value else: init = '' # Declare the parallel private in the outer block c.putln("%s %s%s;" % (type_decl, temp_cname, init)) # Initialize before escaping code.putln("%s = %s;" % (temp_cname, private_cname)) self.parallel_private_temps.append((temp_cname, private_cname)) code.end_block() # end critical section def fetch_parallel_exception(self, code): """ As each OpenMP thread may raise an exception, we need to fetch that exception from the threadstate and save it for after the parallel section where it can be re-raised in the master thread. Although it would seem that __pyx_filename, __pyx_lineno and __pyx_clineno are only assigned to under exception conditions (i.e., when we have the GIL), and thus should be allowed to be shared without any race condition, they are in fact subject to the same race conditions that they were previously when they were global variables and functions were allowed to release the GIL: thread A thread B acquire set lineno release acquire set lineno release acquire fetch exception release skip the fetch deallocate threadstate deallocate threadstate """ code.begin_block() code.put_ensure_gil(declare_gilstate=True) code.putln_openmp("#pragma omp flush(%s)" % Naming.parallel_exc_type) code.putln( "if (!%s) {" % Naming.parallel_exc_type) code.putln("__Pyx_ErrFetchWithState(&%s, &%s, &%s);" % self.parallel_exc) pos_info = chain(*zip(self.parallel_pos_info, self.pos_info)) code.funcstate.uses_error_indicator = True code.putln("%s = %s; %s = %s; %s = %s;" % tuple(pos_info)) code.put_gotref(Naming.parallel_exc_type) code.putln( "}") code.put_release_ensured_gil() code.end_block() def restore_parallel_exception(self, code): "Re-raise a parallel exception" code.begin_block() code.put_ensure_gil(declare_gilstate=True) code.put_giveref(Naming.parallel_exc_type) code.putln("__Pyx_ErrRestoreWithState(%s, %s, %s);" % self.parallel_exc) pos_info = chain(*zip(self.pos_info, self.parallel_pos_info)) code.putln("%s = %s; %s = %s; %s = %s;" % tuple(pos_info)) code.put_release_ensured_gil() code.end_block() def restore_labels(self, code): """ Restore all old labels. Call this before the 'else' clause to for loops and always before ending the parallel control flow block. """ code.set_all_labels(self.old_loop_labels + (self.old_return_label, self.old_error_label)) def end_parallel_control_flow_block( self, code, break_=False, continue_=False, return_=False): """ This ends the parallel control flow block and based on how the parallel section was exited, takes the corresponding action. The break_ and continue_ parameters indicate whether these should be propagated outwards: for i in prange(...): with cython.parallel.parallel(): continue Here break should be trapped in the parallel block, and propagated to the for loop. """ c = self.begin_of_parallel_control_block_point # Firstly, always prefer errors over returning, continue or break if self.error_label_used: c.putln("const char *%s = NULL; int %s = 0, %s = 0;" % self.parallel_pos_info) c.putln("PyObject *%s = NULL, *%s = NULL, *%s = NULL;" % self.parallel_exc) code.putln( "if (%s) {" % Naming.parallel_exc_type) code.putln("/* This may have been overridden by a continue, " "break or return in another thread. Prefer the error. */") code.putln("%s = 4;" % Naming.parallel_why) code.putln( "}") if continue_: any_label_used = self.any_label_used else: any_label_used = self.breaking_label_used if any_label_used: # __pyx_parallel_why is used, declare and initialize c.putln("int %s;" % Naming.parallel_why) c.putln("%s = 0;" % Naming.parallel_why) code.putln( "if (%s) {" % Naming.parallel_why) for temp_cname, private_cname in self.parallel_private_temps: code.putln("%s = %s;" % (private_cname, temp_cname)) code.putln("switch (%s) {" % Naming.parallel_why) if continue_: code.put(" case 1: ") code.put_goto(code.continue_label) if break_: code.put(" case 2: ") code.put_goto(code.break_label) if return_: code.put(" case 3: ") code.put_goto(code.return_label) if self.error_label_used: code.globalstate.use_utility_code(restore_exception_utility_code) code.putln(" case 4:") self.restore_parallel_exception(code) code.put_goto(code.error_label) code.putln("}") # end switch code.putln( "}") # end if code.end_block() # end parallel control flow block self.redef_builtin_expect_apple_gcc_bug(code) # FIXME: improve with version number for OS X Lion buggy_platform_macro_condition = "(defined(__APPLE__) || defined(__OSX__))" have_expect_condition = "(defined(__GNUC__) && " \ "(__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))" redef_condition = "(%s && %s)" % (buggy_platform_macro_condition, have_expect_condition) def undef_builtin_expect_apple_gcc_bug(self, code): """ A bug on OS X Lion disallows __builtin_expect macros. This code avoids them """ if not self.parent: code.undef_builtin_expect(self.redef_condition) def redef_builtin_expect_apple_gcc_bug(self, code): if not self.parent: code.redef_builtin_expect(self.redef_condition) class ParallelWithBlockNode(ParallelStatNode): """ This node represents a 'with cython.parallel.parallel():' block """ valid_keyword_arguments = ['num_threads'] num_threads = None def analyse_declarations(self, env): super(ParallelWithBlockNode, self).analyse_declarations(env) if self.args: error(self.pos, "cython.parallel.parallel() does not take " "positional arguments") def generate_execution_code(self, code): self.declare_closure_privates(code) self.setup_parallel_control_flow_block(code) code.putln("#ifdef _OPENMP") code.put("#pragma omp parallel ") if self.privates: privates = [e.cname for e in self.privates if not e.type.is_pyobject] code.put('private(%s)' % ', '.join(sorted(privates))) self.privatization_insertion_point = code.insertion_point() self.put_num_threads(code) code.putln("") code.putln("#endif /* _OPENMP */") code.begin_block() # parallel block self.begin_parallel_block(code) self.initialize_privates_to_nan(code) code.funcstate.start_collecting_temps() self.body.generate_execution_code(code) self.trap_parallel_exit(code) self.privatize_temps(code) self.end_parallel_block(code) code.end_block() # end parallel block continue_ = code.label_used(code.continue_label) break_ = code.label_used(code.break_label) return_ = code.label_used(code.return_label) self.restore_labels(code) self.end_parallel_control_flow_block(code, break_=break_, continue_=continue_, return_=return_) self.release_closure_privates(code) class ParallelRangeNode(ParallelStatNode): """ This node represents a 'for i in cython.parallel.prange():' construct. target NameNode the target iteration variable else_clause Node or None the else clause of this loop """ child_attrs = ['body', 'target', 'else_clause', 'args', 'num_threads', 'chunksize'] body = target = else_clause = args = None start = stop = step = None is_prange = True nogil = None schedule = None valid_keyword_arguments = ['schedule', 'nogil', 'num_threads', 'chunksize'] def __init__(self, pos, **kwds): super(ParallelRangeNode, self).__init__(pos, **kwds) # Pretend to be a ForInStatNode for control flow analysis self.iterator = PassStatNode(pos) def analyse_declarations(self, env): super(ParallelRangeNode, self).analyse_declarations(env) self.target.analyse_target_declaration(env) if self.else_clause is not None: self.else_clause.analyse_declarations(env) if not self.args or len(self.args) > 3: error(self.pos, "Invalid number of positional arguments to prange") return if len(self.args) == 1: self.stop, = self.args elif len(self.args) == 2: self.start, self.stop = self.args else: self.start, self.stop, self.step = self.args if hasattr(self.schedule, 'decode'): self.schedule = self.schedule.decode('ascii') if self.schedule not in (None, 'static', 'dynamic', 'guided', 'runtime'): error(self.pos, "Invalid schedule argument to prange: %s" % (self.schedule,)) def analyse_expressions(self, env): was_nogil = env.nogil if self.nogil: env.nogil = True if self.target is None: error(self.pos, "prange() can only be used as part of a for loop") return self self.target = self.target.analyse_target_types(env) if not self.target.type.is_numeric: # Not a valid type, assume one for now anyway if not self.target.type.is_pyobject: # nogil_check will catch the is_pyobject case error(self.target.pos, "Must be of numeric type, not %s" % self.target.type) self.index_type = PyrexTypes.c_py_ssize_t_type else: self.index_type = self.target.type if not self.index_type.signed: warning(self.target.pos, "Unsigned index type not allowed before OpenMP 3.0", level=2) # Setup start, stop and step, allocating temps if needed self.names = 'start', 'stop', 'step' start_stop_step = self.start, self.stop, self.step for node, name in zip(start_stop_step, self.names): if node is not None: node.analyse_types(env) if not node.type.is_numeric: error(node.pos, "%s argument must be numeric" % name) continue if not node.is_literal: node = node.coerce_to_temp(env) setattr(self, name, node) # As we range from 0 to nsteps, computing the index along the # way, we need a fitting type for 'i' and 'nsteps' self.index_type = PyrexTypes.widest_numeric_type( self.index_type, node.type) if self.else_clause is not None: self.else_clause = self.else_clause.analyse_expressions(env) # Although not actually an assignment in this scope, it should be # treated as such to ensure it is unpacked if a closure temp, and to # ensure lastprivate behaviour and propagation. If the target index is # not a NameNode, it won't have an entry, and an error was issued by # ParallelRangeTransform if hasattr(self.target, 'entry'): self.assignments[self.target.entry] = self.target.pos, None node = super(ParallelRangeNode, self).analyse_expressions(env) if node.chunksize: if not node.schedule: error(node.chunksize.pos, "Must provide schedule with chunksize") elif node.schedule == 'runtime': error(node.chunksize.pos, "Chunksize not valid for the schedule runtime") elif (node.chunksize.type.is_int and node.chunksize.is_literal and node.chunksize.compile_time_value(env) <= 0): error(node.chunksize.pos, "Chunksize must not be negative") node.chunksize = node.chunksize.coerce_to( PyrexTypes.c_int_type, env).coerce_to_temp(env) if node.nogil: env.nogil = was_nogil node.is_nested_prange = node.parent and node.parent.is_prange if node.is_nested_prange: parent = node while parent.parent and parent.parent.is_prange: parent = parent.parent parent.assignments.update(node.assignments) parent.privates.update(node.privates) parent.assigned_nodes.extend(node.assigned_nodes) return node def nogil_check(self, env): names = 'start', 'stop', 'step', 'target' nodes = self.start, self.stop, self.step, self.target for name, node in zip(names, nodes): if node is not None and node.type.is_pyobject: error(node.pos, "%s may not be a Python object " "as we don't have the GIL" % name) def generate_execution_code(self, code): """ Generate code in the following steps 1) copy any closure variables determined thread-private into temporaries 2) allocate temps for start, stop and step 3) generate a loop that calculates the total number of steps, which then computes the target iteration variable for every step: for i in prange(start, stop, step): ... becomes nsteps = (stop - start) / step; i = start; #pragma omp parallel for lastprivate(i) for (temp = 0; temp < nsteps; temp++) { i = start + step * temp; ... } Note that accumulation of 'i' would have a data dependency between iterations. Also, you can't do this for (i = start; i < stop; i += step) ... as the '<' operator should become '>' for descending loops. 'for i from x < i < y:' does not suffer from this problem as the relational operator is known at compile time! 4) release our temps and write back any private closure variables """ self.declare_closure_privates(code) # This can only be a NameNode target_index_cname = self.target.entry.cname # This will be used as the dict to format our code strings, holding # the start, stop , step, temps and target cnames fmt_dict = { 'target': target_index_cname, 'target_type': self.target.type.empty_declaration_code() } # Setup start, stop and step, allocating temps if needed start_stop_step = self.start, self.stop, self.step defaults = '0', '0', '1' for node, name, default in zip(start_stop_step, self.names, defaults): if node is None: result = default elif node.is_literal: result = node.get_constant_c_result_code() else: node.generate_evaluation_code(code) result = node.result() fmt_dict[name] = result fmt_dict['i'] = code.funcstate.allocate_temp(self.index_type, False) fmt_dict['nsteps'] = code.funcstate.allocate_temp(self.index_type, False) # TODO: check if the step is 0 and if so, raise an exception in a # 'with gil' block. For now, just abort code.putln("if (%(step)s == 0) abort();" % fmt_dict) self.setup_parallel_control_flow_block(code) # parallel control flow block self.control_flow_var_code_point = code.insertion_point() # Note: nsteps is private in an outer scope if present code.putln("%(nsteps)s = (%(stop)s - %(start)s + %(step)s - %(step)s/abs(%(step)s)) / %(step)s;" % fmt_dict) # The target iteration variable might not be initialized, do it only if # we are executing at least 1 iteration, otherwise we should leave the # target unaffected. The target iteration variable is firstprivate to # shut up compiler warnings caused by lastprivate, as the compiler # erroneously believes that nsteps may be <= 0, leaving the private # target index uninitialized code.putln("if (%(nsteps)s > 0)" % fmt_dict) code.begin_block() # if block self.generate_loop(code, fmt_dict) code.end_block() # end if block self.restore_labels(code) if self.else_clause: if self.breaking_label_used: code.put("if (%s < 2)" % Naming.parallel_why) code.begin_block() # else block code.putln("/* else */") self.else_clause.generate_execution_code(code) code.end_block() # end else block # ------ cleanup ------ self.end_parallel_control_flow_block(code) # end parallel control flow block # And finally, release our privates and write back any closure # variables for temp in start_stop_step + (self.chunksize, self.num_threads): if temp is not None: temp.generate_disposal_code(code) temp.free_temps(code) code.funcstate.release_temp(fmt_dict['i']) code.funcstate.release_temp(fmt_dict['nsteps']) self.release_closure_privates(code) def generate_loop(self, code, fmt_dict): if self.is_nested_prange: code.putln("#if 0") else: code.putln("#ifdef _OPENMP") if not self.is_parallel: code.put("#pragma omp for") self.privatization_insertion_point = code.insertion_point() reduction_codepoint = self.parent.privatization_insertion_point else: code.put("#pragma omp parallel") self.privatization_insertion_point = code.insertion_point() reduction_codepoint = self.privatization_insertion_point code.putln("") code.putln("#endif /* _OPENMP */") code.begin_block() # pragma omp parallel begin block # Initialize the GIL if needed for this thread self.begin_parallel_block(code) if self.is_nested_prange: code.putln("#if 0") else: code.putln("#ifdef _OPENMP") code.put("#pragma omp for") for entry, (op, lastprivate) in sorted(self.privates.items()): # Don't declare the index variable as a reduction if op and op in "+*-&^|" and entry != self.target.entry: if entry.type.is_pyobject: error(self.pos, "Python objects cannot be reductions") else: #code.put(" reduction(%s:%s)" % (op, entry.cname)) # This is the only way reductions + nesting works in gcc4.5 reduction_codepoint.put( " reduction(%s:%s)" % (op, entry.cname)) else: if entry == self.target.entry: code.put(" firstprivate(%s)" % entry.cname) code.put(" lastprivate(%s)" % entry.cname) continue if not entry.type.is_pyobject: if lastprivate: private = 'lastprivate' else: private = 'private' code.put(" %s(%s)" % (private, entry.cname)) if self.schedule: if self.chunksize: chunksize = ", %s" % self.evaluate_before_block(code, self.chunksize) else: chunksize = "" code.put(" schedule(%s%s)" % (self.schedule, chunksize)) self.put_num_threads(reduction_codepoint) code.putln("") code.putln("#endif /* _OPENMP */") code.put("for (%(i)s = 0; %(i)s < %(nsteps)s; %(i)s++)" % fmt_dict) code.begin_block() # for loop block guard_around_body_codepoint = code.insertion_point() # Start if guard block around the body. This may be unnecessary, but # at least it doesn't spoil indentation code.begin_block() code.putln("%(target)s = (%(target_type)s)(%(start)s + %(step)s * %(i)s);" % fmt_dict) self.initialize_privates_to_nan(code, exclude=self.target.entry) if self.is_parallel: code.funcstate.start_collecting_temps() self.body.generate_execution_code(code) self.trap_parallel_exit(code, should_flush=True) self.privatize_temps(code) if self.breaking_label_used: # Put a guard around the loop body in case return, break or # exceptions might be used guard_around_body_codepoint.putln("if (%s < 2)" % Naming.parallel_why) code.end_block() # end guard around loop body code.end_block() # end for loop block if self.is_parallel: # Release the GIL and deallocate the thread state self.end_parallel_block(code) code.end_block() # pragma omp parallel end block class CnameDecoratorNode(StatNode): """ This node is for the cname decorator in CythonUtilityCode: @cname('the_cname') cdef func(...): ... In case of a cdef class the cname specifies the objstruct_cname. node the node to which the cname decorator is applied cname the cname the node should get """ child_attrs = ['node'] def analyse_declarations(self, env): self.node.analyse_declarations(env) node = self.node if isinstance(node, CompilerDirectivesNode): node = node.body.stats[0] self.is_function = isinstance(node, FuncDefNode) is_struct_or_enum = isinstance(node, (CStructOrUnionDefNode, CEnumDefNode)) e = node.entry if self.is_function: e.cname = self.cname e.func_cname = self.cname e.used = True if e.pyfunc_cname and '.' in e.pyfunc_cname: e.pyfunc_cname = self.mangle(e.pyfunc_cname) elif is_struct_or_enum: e.cname = e.type.cname = self.cname else: scope = node.scope e.cname = self.cname e.type.objstruct_cname = self.cname + '_obj' e.type.typeobj_cname = Naming.typeobj_prefix + self.cname e.type.typeptr_cname = self.cname + '_type' e.type.scope.namespace_cname = e.type.typeptr_cname e.as_variable.cname = e.type.typeptr_cname scope.scope_prefix = self.cname + "_" for name, entry in scope.entries.items(): if entry.func_cname: entry.func_cname = self.mangle(entry.cname) if entry.pyfunc_cname: entry.pyfunc_cname = self.mangle(entry.pyfunc_cname) def mangle(self, cname): if '.' in cname: # remove __pyx_base from func_cname cname = cname.split('.')[-1] return '%s_%s' % (self.cname, cname) def analyse_expressions(self, env): self.node = self.node.analyse_expressions(env) return self def generate_function_definitions(self, env, code): "Ensure a prototype for every @cname method in the right place" if self.is_function and env.is_c_class_scope: # method in cdef class, generate a prototype in the header h_code = code.globalstate['utility_code_proto'] if isinstance(self.node, DefNode): self.node.generate_function_header( h_code, with_pymethdef=False, proto_only=True) else: from . import ModuleNode entry = self.node.entry cname = entry.cname entry.cname = entry.func_cname ModuleNode.generate_cfunction_declaration( entry, env.global_scope(), h_code, definition=True) entry.cname = cname self.node.generate_function_definitions(env, code) def generate_execution_code(self, code): self.node.generate_execution_code(code) #------------------------------------------------------------------------------------ # # Runtime support code # #------------------------------------------------------------------------------------ if Options.gcc_branch_hints: branch_prediction_macros = """ /* Test for GCC > 2.95 */ #if defined(__GNUC__) \ && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ """ else: branch_prediction_macros = """ #define likely(x) (x) #define unlikely(x) (x) """ #------------------------------------------------------------------------------------ printing_utility_code = UtilityCode.load_cached("Print", "Printing.c") printing_one_utility_code = UtilityCode.load_cached("PrintOne", "Printing.c") #------------------------------------------------------------------------------------ # Exception raising code # # Exceptions are raised by __Pyx_Raise() and stored as plain # type/value/tb in PyThreadState->curexc_*. When being caught by an # 'except' statement, curexc_* is moved over to exc_* by # __Pyx_GetException() restore_exception_utility_code = UtilityCode.load_cached("PyErrFetchRestore", "Exceptions.c") raise_utility_code = UtilityCode.load_cached("RaiseException", "Exceptions.c") get_exception_utility_code = UtilityCode.load_cached("GetException", "Exceptions.c") swap_exception_utility_code = UtilityCode.load_cached("SwapException", "Exceptions.c") reset_exception_utility_code = UtilityCode.load_cached("SaveResetException", "Exceptions.c") traceback_utility_code = UtilityCode.load_cached("AddTraceback", "Exceptions.c") #------------------------------------------------------------------------------------ get_exception_tuple_utility_code = UtilityCode( proto=""" static PyObject *__Pyx_GetExceptionTuple(PyThreadState *__pyx_tstate); /*proto*/ """, # I doubt that calling __Pyx_GetException() here is correct as it moves # the exception from tstate->curexc_* to tstate->exc_*, which prevents # exception handlers later on from receiving it. # NOTE: "__pyx_tstate" may be used by __Pyx_GetException() macro impl = """ static PyObject *__Pyx_GetExceptionTuple(CYTHON_UNUSED PyThreadState *__pyx_tstate) { PyObject *type = NULL, *value = NULL, *tb = NULL; if (__Pyx_GetException(&type, &value, &tb) == 0) { PyObject* exc_info = PyTuple_New(3); if (exc_info) { Py_INCREF(type); Py_INCREF(value); Py_INCREF(tb); PyTuple_SET_ITEM(exc_info, 0, type); PyTuple_SET_ITEM(exc_info, 1, value); PyTuple_SET_ITEM(exc_info, 2, tb); return exc_info; } } return NULL; } """, requires=[get_exception_utility_code])
py
1a3dc19581202b6aa10b2bd8c27180e52b5e3902
import mxnet as mx import numpy as np class SEC_expand_loss(mx.metric.EvalMetric): def __init__(self): super(SEC_expand_loss, self).__init__("SEC_expand_loss") def update(self, labels, preds): self.num_inst += 1 self.sum_metric += preds[2].asnumpy()[0] class SEC_seed_loss(mx.metric.EvalMetric): def __init__(self): super(SEC_seed_loss, self).__init__("SEC_seed_loss") def update(self, labels, preds): self.num_inst += 1 self.sum_metric += preds[0].asnumpy()[0] class SEC_constrain_loss(mx.metric.EvalMetric): def __init__(self): super(SEC_constrain_loss, self).__init__("SEC_constrain_loss") def update(self, labels, preds): self.num_inst += 1 self.sum_metric += preds[1].asnumpy()[0] class L2Loss(mx.metric.EvalMetric): def __init__(self): super(L2Loss, self).__init__('L2Loss') def update(self, labels, preds): labels = labels[0].asnumpy() preds = preds[0].asnumpy() labels = labels.reshape(-1) preds = preds.reshape(-1) self.num_inst += labels.shape[0] res = np.sum((labels - preds) * (labels - preds)) self.sum_metric += res class MultiLogisticLoss(mx.metric.EvalMetric): def __init__(self, l_index=0, p_index=0): self.epsilon = 1e-20 self.l_index = l_index self.p_index = p_index super(MultiLogisticLoss, self).__init__('MultiLogisticLoss') def update(self, labels, preds): labels = labels[self.l_index].asnumpy() preds = preds[self.p_index].asnumpy() labels = labels.reshape(-1) preds = preds.reshape(-1) self.num_inst += labels.shape[0] res = 0 pred_l1 = preds[labels == 1] pred_l1[pred_l1 <= self.epsilon] = self.epsilon pred_l2 = 1 - preds[labels == 0] pred_l2[pred_l2 <= self.epsilon] = self.epsilon res += -np.log(pred_l1).sum() res += -np.log(pred_l2).sum() self.sum_metric += res class Loss(mx.metric.EvalMetric): """Calculate loss""" def __init__(self): super(Loss, self).__init__('loss') def update(self, labels, preds): label = labels[0].asnumpy() pred = preds[0].asnumpy() pred = pred.reshape(pred.shape[0],pred.shape[1], -1) label = label.astype(np.int32) valid_index = label != 255 prob = np.swapaxes(pred, 0, 1) prob = prob[:, valid_index] label = label[valid_index] loss = np.sum(-np.log(prob[label, np.arange(len(label))])) self.sum_metric += loss self.num_inst += valid_index.sum() class Accuracy(mx.metric.EvalMetric): """Calculate accuracy""" def __init__(self): super(Accuracy, self).__init__('accuracy') def update(self, labels, preds): label = labels[0].asnumpy() pred = preds[0].asnumpy() pred = pred.argmax(1) pred = pred.astype(np.int32).reshape(pred.shape[0], -1) label = label.astype(np.int32) valid_index = label != 255 self.sum_metric += (label[valid_index] == pred[valid_index]).sum() self.num_inst += valid_index.sum() class IOU(object): def __init__(self, class_num, class_names, ignored_label=255): self.ignored_label = ignored_label self.class_num = class_num self.class_names = class_names assert len(class_names) == class_num self.conf_mat = None self.reset() def reset(self): self.conf_mat = np.zeros((self.class_num, self.class_num), dtype=np.ulonglong) def update(self, label, pred_label): label = label.reshape(1, -1) pred_label = pred_label.reshape(1, -1) self.__eval_pair(pred_label, label) def __eval_pair(self, pred_label, label): valid_index = label.flat != self.ignored_label gt = np.extract(valid_index, label.flat) p = np.extract(valid_index, pred_label.flat) temp = np.ravel_multi_index(np.array([gt, p]), (self.conf_mat.shape)) temp_mat = np.bincount(temp, minlength=np.prod(self.conf_mat.shape)).reshape(self.conf_mat.shape) self.conf_mat[:]=self.conf_mat+temp_mat def get(self): return "iou", np.mean(self.get_scores()) def get_scores(self): scores = [] for i in range(self.class_num): tp = np.longlong(self.conf_mat[i, i]) gti = np.longlong(self.conf_mat[i, :].sum()) resi = np.longlong(self.conf_mat[:, i].sum()) denom = gti+resi-tp try: res = float(tp)/denom except ZeroDivisionError: res = 0 scores.append(res) return scores def get_class_values(self): return zip(self.class_names, self.get_scores())
py
1a3dc1fe81b1e2aeffb17bb72388de1c68d7a1cf
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from matplotlib import pyplot as plt data = np.load("blur_experiment.npz") # Make each column a set of data blurs = data['blurs'] detections = data['detections'].T weighted = data['weighted'].T # Which blur configuration had the best results best_detections = np.argmax(detections, axis=1) best_weighted = np.argmax(weighted, axis=1) best_weighted_blurs = blurs[best_weighted] cumsum = np.cumsum(best_weighted_blurs) average = cumsum / np.arange(1, len(cumsum) + 1) print(best_weighted_blurs) print(len(best_weighted_blurs)) max_counts = np.array([np.argmax(np.bincount(best_weighted_blurs[:i+1])) for i in range(len(best_weighted_blurs))]) # plt.plot(best_detections.T) plt.plot(best_weighted_blurs) plt.plot(average) plt.plot(max_counts) plt.ylabel("Blur Amount") plt.xlabel("Frame Number") plt.title("Weighted detection best performance") plt.legend(["Highest Weighted Confidence Blur", "Best Average Blur", "Best Overall Blur"]) # plt.plot(data['weighted'].T) plt.show()
py
1a3dc260afb6f15d087606bde164bbc1687dd85a
# Copyright 2021 The MediaPipe Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless requi_RED by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """MediaPipe solution drawing styles.""" from typing import Mapping, Tuple from mediapipe.python.solutions.drawing_utils import DrawingSpec from mediapipe.python.solutions.hands import HandLandmark _RADIUS = 5 _RED = (54, 67, 244) _GREEN = (118, 230, 0) _BLUE = (192, 101, 21) _YELLOW = (0, 204, 255) _GRAY = (174, 164, 144) _PURPLE = (128, 64, 128) _PEACH = (180, 229, 255) # Hands _THICKNESS_WRIST_MCP = 3 _THICKNESS_FINGER = 2 _THICKNESS_DOT = -1 # Hand landmarks _PALM_LANMARKS = (HandLandmark.WRIST, HandLandmark.THUMB_CMC, HandLandmark.INDEX_FINGER_MCP, HandLandmark.MIDDLE_FINGER_MCP, HandLandmark.RING_FINGER_MCP, HandLandmark.PINKY_MCP) _THUMP_LANDMARKS = (HandLandmark.THUMB_MCP, HandLandmark.THUMB_IP, HandLandmark.THUMB_TIP) _INDEX_FINGER_LANDMARKS = (HandLandmark.INDEX_FINGER_PIP, HandLandmark.INDEX_FINGER_DIP, HandLandmark.INDEX_FINGER_TIP) _MIDDLE_FINGER_LANDMARKS = (HandLandmark.MIDDLE_FINGER_PIP, HandLandmark.MIDDLE_FINGER_DIP, HandLandmark.MIDDLE_FINGER_TIP) _RING_FINGER_LANDMARKS = (HandLandmark.RING_FINGER_PIP, HandLandmark.RING_FINGER_DIP, HandLandmark.RING_FINGER_TIP) _PINKY_FINGER_LANDMARKS = (HandLandmark.PINKY_PIP, HandLandmark.PINKY_DIP, HandLandmark.PINKY_TIP) _HAND_LANDMARK_STYLE = { _PALM_LANMARKS: DrawingSpec( color=_RED, thickness=_THICKNESS_DOT, circle_radius=_RADIUS), _THUMP_LANDMARKS: DrawingSpec( color=_PEACH, thickness=_THICKNESS_DOT, circle_radius=_RADIUS), _INDEX_FINGER_LANDMARKS: DrawingSpec( color=_PURPLE, thickness=_THICKNESS_DOT, circle_radius=_RADIUS), _MIDDLE_FINGER_LANDMARKS: DrawingSpec( color=_YELLOW, thickness=_THICKNESS_DOT, circle_radius=_RADIUS), _RING_FINGER_LANDMARKS: DrawingSpec( color=_GREEN, thickness=_THICKNESS_DOT, circle_radius=_RADIUS), _PINKY_FINGER_LANDMARKS: DrawingSpec( color=_BLUE, thickness=_THICKNESS_DOT, circle_radius=_RADIUS), } # Hand connections _PALM_CONNECTIONS = ((HandLandmark.WRIST, HandLandmark.THUMB_CMC), (HandLandmark.WRIST, HandLandmark.INDEX_FINGER_MCP), (HandLandmark.MIDDLE_FINGER_MCP, HandLandmark.RING_FINGER_MCP), (HandLandmark.RING_FINGER_MCP, HandLandmark.PINKY_MCP), (HandLandmark.INDEX_FINGER_MCP, HandLandmark.MIDDLE_FINGER_MCP), (HandLandmark.WRIST, HandLandmark.PINKY_MCP)) _THUMB_CONNECTIONS = ((HandLandmark.THUMB_CMC, HandLandmark.THUMB_MCP), (HandLandmark.THUMB_MCP, HandLandmark.THUMB_IP), (HandLandmark.THUMB_IP, HandLandmark.THUMB_TIP)) _INDEX_FINGER_CONNECTIONS = ((HandLandmark.INDEX_FINGER_MCP, HandLandmark.INDEX_FINGER_PIP), (HandLandmark.INDEX_FINGER_PIP, HandLandmark.INDEX_FINGER_DIP), (HandLandmark.INDEX_FINGER_DIP, HandLandmark.INDEX_FINGER_TIP)) _MIDDLE_FINGER_CONNECTIONS = ((HandLandmark.MIDDLE_FINGER_MCP, HandLandmark.MIDDLE_FINGER_PIP), (HandLandmark.MIDDLE_FINGER_PIP, HandLandmark.MIDDLE_FINGER_DIP), (HandLandmark.MIDDLE_FINGER_DIP, HandLandmark.MIDDLE_FINGER_TIP)) _RING_FINGER_CONNECTIONS = ((HandLandmark.RING_FINGER_MCP, HandLandmark.RING_FINGER_PIP), (HandLandmark.RING_FINGER_PIP, HandLandmark.RING_FINGER_DIP), (HandLandmark.RING_FINGER_DIP, HandLandmark.RING_FINGER_TIP)) _PINKY_FINGER_CONNECTIONS = ((HandLandmark.PINKY_MCP, HandLandmark.PINKY_PIP), (HandLandmark.PINKY_PIP, HandLandmark.PINKY_DIP), (HandLandmark.PINKY_DIP, HandLandmark.PINKY_TIP)) _HAND_CONNECTION_STYLE = { _PALM_CONNECTIONS: DrawingSpec(color=_GRAY, thickness=_THICKNESS_WRIST_MCP), _THUMB_CONNECTIONS: DrawingSpec(color=_PEACH, thickness=_THICKNESS_FINGER), _INDEX_FINGER_CONNECTIONS: DrawingSpec(color=_PURPLE, thickness=_THICKNESS_FINGER), _MIDDLE_FINGER_CONNECTIONS: DrawingSpec(color=_YELLOW, thickness=_THICKNESS_FINGER), _RING_FINGER_CONNECTIONS: DrawingSpec(color=_GREEN, thickness=_THICKNESS_FINGER), _PINKY_FINGER_CONNECTIONS: DrawingSpec(color=_BLUE, thickness=_THICKNESS_FINGER) } def get_default_hand_landmark_style() -> Mapping[int, DrawingSpec]: """Returns the default hand landmark drawing style. Returns: A mapping from each hand landmark to the default drawing spec. """ hand_landmark_style = {} for k, v in _HAND_LANDMARK_STYLE.items(): for landmark in k: hand_landmark_style[landmark] = v return hand_landmark_style def get_default_hand_connection_style( ) -> Mapping[Tuple[int, int], DrawingSpec]: """Returns the default hand connection drawing style. Returns: A mapping from each hand connection to the default drawing spec. """ hand_connection_style = {} for k, v in _HAND_CONNECTION_STYLE.items(): for connection in k: hand_connection_style[connection] = v return hand_connection_style
py
1a3dc26e666eabe0e4fabdd747830cb101c4fd1b
from sympy.concrete import Sum from sympy.concrete.delta import deltaproduct, deltasummation from sympy.core import Eq, S, symbols, oo from sympy.functions import KroneckerDelta, Piecewise, piecewise_fold from sympy.logic import And i, j, k, l, m = symbols("i j k l m", integer=True) x, y = symbols("x y", commutative=False) KD = KroneckerDelta def test_deltaproduct(): dp = deltaproduct assert dp(x, (j, 1, 0)) == 1 assert dp(x, (j, 1, 3)) == x**3 assert dp(x + y, (j, 1, 3)) == (x + y)**3 assert dp(x*y, (j, 1, 3)) == (x*y)**3 assert dp(KD(i, j), (k, 1, 3)) == KD(i, j) assert dp(x*KD(i, j), (k, 1, 3)) == x**3*KD(i, j) assert dp(x*y*KD(i, j), (k, 1, 3)) == (x*y)**3*KD(i, j) assert dp(KD(i, j), (j, 1, 3)) == 0 assert dp(KD(i, j), (j, 1, 1)) == KD(i, 1) assert dp(KD(i, j), (j, 2, 2)) == KD(i, 2) assert dp(KD(i, j), (j, 3, 3)) == KD(i, 3) assert dp(KD(i, j), (j, 1, k)) == KD(i, 1)*KD(k, 1) + KD(k, 0) assert dp(KD(i, j), (j, k, 3)) == KD(i, 3)*KD(k, 3) + KD(k, 4) assert dp(KD(i, j), (j, k, l)) == KD(i, l)*KD(k, l) + KD(k, l + 1) assert dp(x*KD(i, j), (j, 1, 3)) == 0 assert dp(x*KD(i, j), (j, 1, 1)) == x*KD(i, 1) assert dp(x*KD(i, j), (j, 2, 2)) == x*KD(i, 2) assert dp(x*KD(i, j), (j, 3, 3)) == x*KD(i, 3) assert dp(x*KD(i, j), (j, 1, k)) == x*KD(i, 1)*KD(k, 1) + KD(k, 0) assert dp(x*KD(i, j), (j, k, 3)) == x*KD(i, 3)*KD(k, 3) + KD(k, 4) assert dp(x*KD(i, j), (j, k, l)) == x*KD(i, l)*KD(k, l) + KD(k, l + 1) assert dp((x + y)*KD(i, j), (j, 1, 3)) == 0 assert dp((x + y)*KD(i, j), (j, 1, 1)) == (x + y)*KD(i, 1) assert dp((x + y)*KD(i, j), (j, 2, 2)) == (x + y)*KD(i, 2) assert dp((x + y)*KD(i, j), (j, 3, 3)) == (x + y)*KD(i, 3) assert dp((x + y)*KD(i, j), (j, 1, k)) == \ (x + y)*KD(i, 1)*KD(k, 1) + KD(k, 0) assert dp((x + y)*KD(i, j), (j, k, 3)) == \ (x + y)*KD(i, 3)*KD(k, 3) + KD(k, 4) assert dp((x + y)*KD(i, j), (j, k, l)) == \ (x + y)*KD(i, l)*KD(k, l) + KD(k, l + 1) assert dp(KD(i, k) + KD(j, k), (k, 1, 3)) == 0 assert dp(KD(i, k) + KD(j, k), (k, 1, 1)) == KD(i, 1) + KD(j, 1) assert dp(KD(i, k) + KD(j, k), (k, 2, 2)) == KD(i, 2) + KD(j, 2) assert dp(KD(i, k) + KD(j, k), (k, 3, 3)) == KD(i, 3) + KD(j, 3) assert dp(KD(i, k) + KD(j, k), (k, 1, l)) == KD(l, 0) + \ KD(i, 1)*KD(l, 1) + KD(j, 1)*KD(l, 1) + \ KD(i, 1)*KD(j, 2)*KD(l, 2) + KD(j, 1)*KD(i, 2)*KD(l, 2) assert dp(KD(i, k) + KD(j, k), (k, l, 3)) == KD(l, 4) + \ KD(i, 3)*KD(l, 3) + KD(j, 3)*KD(l, 3) + \ KD(i, 2)*KD(j, 3)*KD(l, 2) + KD(i, 3)*KD(j, 2)*KD(l, 2) assert dp(KD(i, k) + KD(j, k), (k, l, m)) == KD(l, m + 1) + \ KD(i, m)*KD(l, m) + KD(j, m)*KD(l, m) + \ KD(i, m)*KD(j, m - 1)*KD(l, m - 1) + KD(i, m - 1)*KD(j, m)*KD(l, m - 1) assert dp(x*(KD(i, k) + KD(j, k)), (k, 1, 3)) == 0 assert dp(x*(KD(i, k) + KD(j, k)), (k, 1, 1)) == x*(KD(i, 1) + KD(j, 1)) assert dp(x*(KD(i, k) + KD(j, k)), (k, 2, 2)) == x*(KD(i, 2) + KD(j, 2)) assert dp(x*(KD(i, k) + KD(j, k)), (k, 3, 3)) == x*(KD(i, 3) + KD(j, 3)) assert dp(x*(KD(i, k) + KD(j, k)), (k, 1, l)) == KD(l, 0) + \ x*KD(i, 1)*KD(l, 1) + x*KD(j, 1)*KD(l, 1) + \ x**2*KD(i, 1)*KD(j, 2)*KD(l, 2) + x**2*KD(j, 1)*KD(i, 2)*KD(l, 2) assert dp(x*(KD(i, k) + KD(j, k)), (k, l, 3)) == KD(l, 4) + \ x*KD(i, 3)*KD(l, 3) + x*KD(j, 3)*KD(l, 3) + \ x**2*KD(i, 2)*KD(j, 3)*KD(l, 2) + x**2*KD(i, 3)*KD(j, 2)*KD(l, 2) assert dp(x*(KD(i, k) + KD(j, k)), (k, l, m)) == KD(l, m + 1) + \ x*KD(i, m)*KD(l, m) + x*KD(j, m)*KD(l, m) + \ x**2*KD(i, m - 1)*KD(j, m)*KD(l, m - 1) + \ x**2*KD(i, m)*KD(j, m - 1)*KD(l, m - 1) assert dp((x + y)*(KD(i, k) + KD(j, k)), (k, 1, 3)) == 0 assert dp((x + y)*(KD(i, k) + KD(j, k)), (k, 1, 1)) == \ (x + y)*(KD(i, 1) + KD(j, 1)) assert dp((x + y)*(KD(i, k) + KD(j, k)), (k, 2, 2)) == \ (x + y)*(KD(i, 2) + KD(j, 2)) assert dp((x + y)*(KD(i, k) + KD(j, k)), (k, 3, 3)) == \ (x + y)*(KD(i, 3) + KD(j, 3)) assert dp((x + y)*(KD(i, k) + KD(j, k)), (k, 1, l)) == KD(l, 0) + \ (x + y)*KD(i, 1)*KD(l, 1) + (x + y)*KD(j, 1)*KD(l, 1) + \ (x + y)**2*KD(i, 1)*KD(j, 2)*KD(l, 2) + \ (x + y)**2*KD(j, 1)*KD(i, 2)*KD(l, 2) assert dp((x + y)*(KD(i, k) + KD(j, k)), (k, l, 3)) == KD(l, 4) + \ (x + y)*KD(i, 3)*KD(l, 3) + (x + y)*KD(j, 3)*KD(l, 3) + \ (x + y)**2*KD(i, 2)*KD(j, 3)*KD(l, 2) + \ (x + y)**2*KD(i, 3)*KD(j, 2)*KD(l, 2) assert dp((x + y)*(KD(i, k) + KD(j, k)), (k, l, m)) == KD(l, m + 1) + \ (x + y)*KD(i, m)*KD(l, m) + (x + y)*KD(j, m)*KD(l, m) + \ (x + y)**2*KD(i, m - 1)*KD(j, m)*KD(l, m - 1) + \ (x + y)**2*KD(i, m)*KD(j, m - 1)*KD(l, m - 1) assert dp(x*y + x*KD(i, j), (j, 1, 3)) == (x*y)**3 + \ x*(x*y)**2*KD(i, 1) + (x*y)*x*(x*y)*KD(i, 2) + (x*y)**2*x*KD(i, 3) assert dp(x*y + x*KD(i, j), (j, 1, 1)) == x*y + x*KD(i, 1) assert dp(x*y + x*KD(i, j), (j, 2, 2)) == x*y + x*KD(i, 2) assert dp(x*y + x*KD(i, j), (j, 3, 3)) == x*y + x*KD(i, 3) assert dp(x*y + x*KD(i, j), (j, 1, k)) == \ (x*y)**k + (x*y)**(i - 1)*x*(x*y)**(k - i) assert dp(x*y + x*KD(i, j), (j, k, 3)) == \ (x*y)**(-k + 4) + (x*y)**(i - k)*x*(x*y)**(3 - i) assert dp(x*y + x*KD(i, j), (j, k, l)) == \ (x*y)**(-k + l + 1) + (x*y)**(i - k)*x*(x*y)**(l - i) assert dp(x*(y + KD(i, j)), (j, 1, 3)) == (x*y)**3 + \ x*(x*y)**2*KD(i, 1) + (x*y)*x*(x*y)*KD(i, 2) + (x*y)**2*x*KD(i, 3) assert dp(x*(y + KD(i, j)), (j, 1, 1)) == x*(y + KD(i, 1)) assert dp(x*(y + KD(i, j)), (j, 2, 2)) == x*(y + KD(i, 2)) assert dp(x*(y + KD(i, j)), (j, 3, 3)) == x*(y + KD(i, 3)) assert dp(x*(y + KD(i, j)), (j, 1, k)) == \ (x*y)**k + (x*y)**(i - 1)*x*(x*y)**(k - i) assert dp(x*(y + KD(i, j)), (j, k, 3)) == \ (x*y)**(-k + 4) + (x*y)**(i - k)*x*(x*y)**(3 - i) assert dp(x*(y + KD(i, j)), (j, k, l)) == \ (x*y)**(-k + l + 1) + (x*y)**(i - k)*x*(x*y)**(l - i) assert dp(x*(y + 2*KD(i, j)), (j, 1, 3)) == (x*y)**3 + \ 2*x*(x*y)**2*KD(i, 1) + 2*x*y*x*x*y*KD(i, 2) + 2*(x*y)**2*x*KD(i, 3) assert dp(x*(y + 2*KD(i, j)), (j, 1, 1)) == x*(y + 2*KD(i, 1)) assert dp(x*(y + 2*KD(i, j)), (j, 2, 2)) == x*(y + 2*KD(i, 2)) assert dp(x*(y + 2*KD(i, j)), (j, 3, 3)) == x*(y + 2*KD(i, 3)) assert dp(x*(y + 2*KD(i, j)), (j, 1, k)) == \ (x*y)**k + 2*(x*y)**(i - 1)*x*(x*y)**(k - i) assert dp(x*(y + 2*KD(i, j)), (j, k, 3)) == \ (x*y)**(-k + 4) + 2*(x*y)**(i - k)*x*(x*y)**(3 - i) assert dp(x*(y + 2*KD(i, j)), (j, k, l)) == \ (x*y)**(-k + l + 1) + 2*(x*y)**(i - k)*x*(x*y)**(l - i) assert dp((x + y)*(y + KD(i, j)), (j, 1, 3)) == ((x + y)*y)**3 + \ (x + y)*((x + y)*y)**2*KD(i, 1) + \ (x + y)*y*(x + y)**2*y*KD(i, 2) + \ ((x + y)*y)**2*(x + y)*KD(i, 3) assert dp((x + y)*(y + KD(i, j)), (j, 1, 1)) == (x + y)*(y + KD(i, 1)) assert dp((x + y)*(y + KD(i, j)), (j, 2, 2)) == (x + y)*(y + KD(i, 2)) assert dp((x + y)*(y + KD(i, j)), (j, 3, 3)) == (x + y)*(y + KD(i, 3)) assert dp((x + y)*(y + KD(i, j)), (j, 1, k)) == ((x + y)*y)**k + \ ((x + y)*y)**(i - 1)*(x + y)*((x + y)*y)**(k - i) assert dp((x + y)*(y + KD(i, j)), (j, k, 3)) == ((x + y)*y)**(-k + 4) + \ ((x + y)*y)**(i - k)*(x + y)*((x + y)*y)**(3 - i) assert dp((x + y)*(y + KD(i, j)), (j, k, l)) == \ ((x + y)*y)**(-k + l + 1) + \ ((x + y)*y)**(i - k)*(x + y)*((x + y)*y)**(l - i) assert dp((x + KD(i, k))*(y + KD(i, j)), (j, 1, 3)) == \ KD(i, 1)*(KD(i, k) + x)*((KD(i, k) + x)*y)**2 + \ KD(i, 2)*(KD(i, k) + x)*y*(KD(i, k) + x)**2*y + \ KD(i, 3)*((KD(i, k) + x)*y)**2*(KD(i, k) + x) + \ ((KD(i, k) + x)*y)**3 assert dp((x + KD(i, k))*(y + KD(i, j)), (j, 1, 1)) == \ (x + KD(i, k))*(y + KD(i, 1)) assert dp((x + KD(i, k))*(y + KD(i, j)), (j, 2, 2)) == \ (x + KD(i, k))*(y + KD(i, 2)) assert dp((x + KD(i, k))*(y + KD(i, j)), (j, 3, 3)) == \ (x + KD(i, k))*(y + KD(i, 3)) assert dp((x + KD(i, k))*(y + KD(i, j)), (j, 1, k)) == \ ((x + KD(i, k))*y)**k + \ ((x + KD(i, k))*y)**(i - 1)*(x + KD(i, k))*((x + KD(i, k))*y)**(-i + k) assert dp((x + KD(i, k))*(y + KD(i, j)), (j, k, 3)) == \ ((x + KD(i, k))*y)**(4 - k) + \ ((x + KD(i, k))*y)**(i - k)*(x + KD(i, k))*((x + KD(i, k))*y)**(-i + 3) assert dp((x + KD(i, k))*(y + KD(i, j)), (j, k, l)) == \ ((x + KD(i, k))*y)**(-k + l + 1) + \ ((x + KD(i, k))*y)**(i - k)*(x + KD(i, k))*((x + KD(i, k))*y)**(-i + l) def test_deltasummation(): ds = deltasummation assert ds(x, (j, 1, 0)) == 0 assert ds(x, (j, 1, 3)) == 3*x assert ds(x + y, (j, 1, 3)) == 3*(x + y) assert ds(x*y, (j, 1, 3)) == 3*x*y assert ds(KD(i, j), (k, 1, 3)) == 3*KD(i, j) assert ds(x*KD(i, j), (k, 1, 3)) == 3*x*KD(i, j) assert ds(x*y*KD(i, j), (k, 1, 3)) == 3*x*y*KD(i, j) n = symbols('n', integer=True, nonzero=True) assert ds(KD(n, 0), (n, 1, 3)) == 0 # return unevaluated, until it gets implemented assert ds(KD(i**2, j**2), (j, -oo, oo)) == \ Sum(KD(i**2, j**2), (j, -oo, oo)) assert Piecewise((KD(i, k), And(S(1) <= i, i <= 3)), (0, True)) == \ ds(KD(i, j)*KD(j, k), (j, 1, 3)) == \ ds(KD(j, k)*KD(i, j), (j, 1, 3)) assert ds(KD(i, k), (k, -oo, oo)) == 1 assert ds(KD(i, k), (k, 0, oo)) == Piecewise((1, i >= 0), (0, True)) assert ds(KD(i, k), (k, 1, 3)) == \ Piecewise((1, And(S(1) <= i, i <= 3)), (0, True)) assert ds(k*KD(i, j)*KD(j, k), (k, -oo, oo)) == j*KD(i, j) assert ds(j*KD(i, j), (j, -oo, oo)) == i assert ds(i*KD(i, j), (i, -oo, oo)) == j assert ds(x, (i, 1, 3)) == 3*x assert ds((i + j)*KD(i, j), (j, -oo, oo)) == 2*i assert ds(KD(i, j), (j, 1, 3)) == \ Piecewise((1, And(S(1) <= i, i <= 3)), (0, True)) assert ds(KD(i, j), (j, 1, 1)) == Piecewise((1, Eq(i, 1)), (0, True)) assert ds(KD(i, j), (j, 2, 2)) == Piecewise((1, Eq(i, 2)), (0, True)) assert ds(KD(i, j), (j, 3, 3)) == Piecewise((1, Eq(i, 3)), (0, True)) assert ds(KD(i, j), (j, 1, k)) == \ Piecewise((1, And(S(1) <= i, i <= k)), (0, True)) assert ds(KD(i, j), (j, k, 3)) == \ Piecewise((1, And(k <= i, i <= 3)), (0, True)) assert ds(KD(i, j), (j, k, l)) == \ Piecewise((1, And(k <= i, i <= l)), (0, True)) assert ds(x*KD(i, j), (j, 1, 3)) == \ Piecewise((x, And(S(1) <= i, i <= 3)), (0, True)) assert ds(x*KD(i, j), (j, 1, 1)) == Piecewise((x, Eq(i, 1)), (0, True)) assert ds(x*KD(i, j), (j, 2, 2)) == Piecewise((x, Eq(i, 2)), (0, True)) assert ds(x*KD(i, j), (j, 3, 3)) == Piecewise((x, Eq(i, 3)), (0, True)) assert ds(x*KD(i, j), (j, 1, k)) == \ Piecewise((x, And(S(1) <= i, i <= k)), (0, True)) assert ds(x*KD(i, j), (j, k, 3)) == \ Piecewise((x, And(k <= i, i <= 3)), (0, True)) assert ds(x*KD(i, j), (j, k, l)) == \ Piecewise((x, And(k <= i, i <= l)), (0, True)) assert ds((x + y)*KD(i, j), (j, 1, 3)) == \ Piecewise((x + y, And(S(1) <= i, i <= 3)), (0, True)) assert ds((x + y)*KD(i, j), (j, 1, 1)) == \ Piecewise((x + y, Eq(i, 1)), (0, True)) assert ds((x + y)*KD(i, j), (j, 2, 2)) == \ Piecewise((x + y, Eq(i, 2)), (0, True)) assert ds((x + y)*KD(i, j), (j, 3, 3)) == \ Piecewise((x + y, Eq(i, 3)), (0, True)) assert ds((x + y)*KD(i, j), (j, 1, k)) == \ Piecewise((x + y, And(S(1) <= i, i <= k)), (0, True)) assert ds((x + y)*KD(i, j), (j, k, 3)) == \ Piecewise((x + y, And(k <= i, i <= 3)), (0, True)) assert ds((x + y)*KD(i, j), (j, k, l)) == \ Piecewise((x + y, And(k <= i, i <= l)), (0, True)) assert ds(KD(i, k) + KD(j, k), (k, 1, 3)) == piecewise_fold( Piecewise((1, And(S(1) <= i, i <= 3)), (0, True)) + Piecewise((1, And(S(1) <= j, j <= 3)), (0, True))) assert ds(KD(i, k) + KD(j, k), (k, 1, 1)) == piecewise_fold( Piecewise((1, Eq(i, 1)), (0, True)) + Piecewise((1, Eq(j, 1)), (0, True))) assert ds(KD(i, k) + KD(j, k), (k, 2, 2)) == piecewise_fold( Piecewise((1, Eq(i, 2)), (0, True)) + Piecewise((1, Eq(j, 2)), (0, True))) assert ds(KD(i, k) + KD(j, k), (k, 3, 3)) == piecewise_fold( Piecewise((1, Eq(i, 3)), (0, True)) + Piecewise((1, Eq(j, 3)), (0, True))) assert ds(KD(i, k) + KD(j, k), (k, 1, l)) == piecewise_fold( Piecewise((1, And(S(1) <= i, i <= l)), (0, True)) + Piecewise((1, And(S(1) <= j, j <= l)), (0, True))) assert ds(KD(i, k) + KD(j, k), (k, l, 3)) == piecewise_fold( Piecewise((1, And(l <= i, i <= 3)), (0, True)) + Piecewise((1, And(l <= j, j <= 3)), (0, True))) assert ds(KD(i, k) + KD(j, k), (k, l, m)) == piecewise_fold( Piecewise((1, And(l <= i, i <= m)), (0, True)) + Piecewise((1, And(l <= j, j <= m)), (0, True))) assert ds(x*KD(i, k) + KD(j, k), (k, 1, 3)) == piecewise_fold( Piecewise((x, And(S(1) <= i, i <= 3)), (0, True)) + Piecewise((1, And(S(1) <= j, j <= 3)), (0, True))) assert ds(x*KD(i, k) + KD(j, k), (k, 1, 1)) == piecewise_fold( Piecewise((x, Eq(i, 1)), (0, True)) + Piecewise((1, Eq(j, 1)), (0, True))) assert ds(x*KD(i, k) + KD(j, k), (k, 2, 2)) == piecewise_fold( Piecewise((x, Eq(i, 2)), (0, True)) + Piecewise((1, Eq(j, 2)), (0, True))) assert ds(x*KD(i, k) + KD(j, k), (k, 3, 3)) == piecewise_fold( Piecewise((x, Eq(i, 3)), (0, True)) + Piecewise((1, Eq(j, 3)), (0, True))) assert ds(x*KD(i, k) + KD(j, k), (k, 1, l)) == piecewise_fold( Piecewise((x, And(S(1) <= i, i <= l)), (0, True)) + Piecewise((1, And(S(1) <= j, j <= l)), (0, True))) assert ds(x*KD(i, k) + KD(j, k), (k, l, 3)) == piecewise_fold( Piecewise((x, And(l <= i, i <= 3)), (0, True)) + Piecewise((1, And(l <= j, j <= 3)), (0, True))) assert ds(x*KD(i, k) + KD(j, k), (k, l, m)) == piecewise_fold( Piecewise((x, And(l <= i, i <= m)), (0, True)) + Piecewise((1, And(l <= j, j <= m)), (0, True))) assert ds(x*(KD(i, k) + KD(j, k)), (k, 1, 3)) == piecewise_fold( Piecewise((x, And(S(1) <= i, i <= 3)), (0, True)) + Piecewise((x, And(S(1) <= j, j <= 3)), (0, True))) assert ds(x*(KD(i, k) + KD(j, k)), (k, 1, 1)) == piecewise_fold( Piecewise((x, Eq(i, 1)), (0, True)) + Piecewise((x, Eq(j, 1)), (0, True))) assert ds(x*(KD(i, k) + KD(j, k)), (k, 2, 2)) == piecewise_fold( Piecewise((x, Eq(i, 2)), (0, True)) + Piecewise((x, Eq(j, 2)), (0, True))) assert ds(x*(KD(i, k) + KD(j, k)), (k, 3, 3)) == piecewise_fold( Piecewise((x, Eq(i, 3)), (0, True)) + Piecewise((x, Eq(j, 3)), (0, True))) assert ds(x*(KD(i, k) + KD(j, k)), (k, 1, l)) == piecewise_fold( Piecewise((x, And(S(1) <= i, i <= l)), (0, True)) + Piecewise((x, And(S(1) <= j, j <= l)), (0, True))) assert ds(x*(KD(i, k) + KD(j, k)), (k, l, 3)) == piecewise_fold( Piecewise((x, And(l <= i, i <= 3)), (0, True)) + Piecewise((x, And(l <= j, j <= 3)), (0, True))) assert ds(x*(KD(i, k) + KD(j, k)), (k, l, m)) == piecewise_fold( Piecewise((x, And(l <= i, i <= m)), (0, True)) + Piecewise((x, And(l <= j, j <= m)), (0, True))) assert ds((x + y)*(KD(i, k) + KD(j, k)), (k, 1, 3)) == piecewise_fold( Piecewise((x + y, And(S(1) <= i, i <= 3)), (0, True)) + Piecewise((x + y, And(S(1) <= j, j <= 3)), (0, True))) assert ds((x + y)*(KD(i, k) + KD(j, k)), (k, 1, 1)) == piecewise_fold( Piecewise((x + y, Eq(i, 1)), (0, True)) + Piecewise((x + y, Eq(j, 1)), (0, True))) assert ds((x + y)*(KD(i, k) + KD(j, k)), (k, 2, 2)) == piecewise_fold( Piecewise((x + y, Eq(i, 2)), (0, True)) + Piecewise((x + y, Eq(j, 2)), (0, True))) assert ds((x + y)*(KD(i, k) + KD(j, k)), (k, 3, 3)) == piecewise_fold( Piecewise((x + y, Eq(i, 3)), (0, True)) + Piecewise((x + y, Eq(j, 3)), (0, True))) assert ds((x + y)*(KD(i, k) + KD(j, k)), (k, 1, l)) == piecewise_fold( Piecewise((x + y, And(S(1) <= i, i <= l)), (0, True)) + Piecewise((x + y, And(S(1) <= j, j <= l)), (0, True))) assert ds((x + y)*(KD(i, k) + KD(j, k)), (k, l, 3)) == piecewise_fold( Piecewise((x + y, And(l <= i, i <= 3)), (0, True)) + Piecewise((x + y, And(l <= j, j <= 3)), (0, True))) assert ds((x + y)*(KD(i, k) + KD(j, k)), (k, l, m)) == piecewise_fold( Piecewise((x + y, And(l <= i, i <= m)), (0, True)) + Piecewise((x + y, And(l <= j, j <= m)), (0, True))) assert ds(x*y + x*KD(i, j), (j, 1, 3)) == \ Piecewise((3*x*y + x, And(S(1) <= i, i <= 3)), (3*x*y, True)) assert ds(x*y + x*KD(i, j), (j, 1, 1)) == \ Piecewise((x*y + x, Eq(i, 1)), (x*y, True)) assert ds(x*y + x*KD(i, j), (j, 2, 2)) == \ Piecewise((x*y + x, Eq(i, 2)), (x*y, True)) assert ds(x*y + x*KD(i, j), (j, 3, 3)) == \ Piecewise((x*y + x, Eq(i, 3)), (x*y, True)) assert ds(x*y + x*KD(i, j), (j, 1, k)) == \ Piecewise((k*x*y + x, And(S(1) <= i, i <= k)), (k*x*y, True)) assert ds(x*y + x*KD(i, j), (j, k, 3)) == \ Piecewise(((4 - k)*x*y + x, And(k <= i, i <= 3)), ((4 - k)*x*y, True)) assert ds(x*y + x*KD(i, j), (j, k, l)) == Piecewise( ((l - k + 1)*x*y + x, And(k <= i, i <= l)), ((l - k + 1)*x*y, True)) assert ds(x*(y + KD(i, j)), (j, 1, 3)) == \ Piecewise((3*x*y + x, And(S(1) <= i, i <= 3)), (3*x*y, True)) assert ds(x*(y + KD(i, j)), (j, 1, 1)) == \ Piecewise((x*y + x, Eq(i, 1)), (x*y, True)) assert ds(x*(y + KD(i, j)), (j, 2, 2)) == \ Piecewise((x*y + x, Eq(i, 2)), (x*y, True)) assert ds(x*(y + KD(i, j)), (j, 3, 3)) == \ Piecewise((x*y + x, Eq(i, 3)), (x*y, True)) assert ds(x*(y + KD(i, j)), (j, 1, k)) == \ Piecewise((k*x*y + x, And(S(1) <= i, i <= k)), (k*x*y, True)) assert ds(x*(y + KD(i, j)), (j, k, 3)) == \ Piecewise(((4 - k)*x*y + x, And(k <= i, i <= 3)), ((4 - k)*x*y, True)) assert ds(x*(y + KD(i, j)), (j, k, l)) == Piecewise( ((l - k + 1)*x*y + x, And(k <= i, i <= l)), ((l - k + 1)*x*y, True)) assert ds(x*(y + 2*KD(i, j)), (j, 1, 3)) == \ Piecewise((3*x*y + 2*x, And(S(1) <= i, i <= 3)), (3*x*y, True)) assert ds(x*(y + 2*KD(i, j)), (j, 1, 1)) == \ Piecewise((x*y + 2*x, Eq(i, 1)), (x*y, True)) assert ds(x*(y + 2*KD(i, j)), (j, 2, 2)) == \ Piecewise((x*y + 2*x, Eq(i, 2)), (x*y, True)) assert ds(x*(y + 2*KD(i, j)), (j, 3, 3)) == \ Piecewise((x*y + 2*x, Eq(i, 3)), (x*y, True)) assert ds(x*(y + 2*KD(i, j)), (j, 1, k)) == \ Piecewise((k*x*y + 2*x, And(S(1) <= i, i <= k)), (k*x*y, True)) assert ds(x*(y + 2*KD(i, j)), (j, k, 3)) == Piecewise( ((4 - k)*x*y + 2*x, And(k <= i, i <= 3)), ((4 - k)*x*y, True)) assert ds(x*(y + 2*KD(i, j)), (j, k, l)) == Piecewise( ((l - k + 1)*x*y + 2*x, And(k <= i, i <= l)), ((l - k + 1)*x*y, True)) assert ds((x + y)*(y + KD(i, j)), (j, 1, 3)) == Piecewise( (3*(x + y)*y + x + y, And(S(1) <= i, i <= 3)), (3*(x + y)*y, True)) assert ds((x + y)*(y + KD(i, j)), (j, 1, 1)) == \ Piecewise(((x + y)*y + x + y, Eq(i, 1)), ((x + y)*y, True)) assert ds((x + y)*(y + KD(i, j)), (j, 2, 2)) == \ Piecewise(((x + y)*y + x + y, Eq(i, 2)), ((x + y)*y, True)) assert ds((x + y)*(y + KD(i, j)), (j, 3, 3)) == \ Piecewise(((x + y)*y + x + y, Eq(i, 3)), ((x + y)*y, True)) assert ds((x + y)*(y + KD(i, j)), (j, 1, k)) == Piecewise( (k*(x + y)*y + x + y, And(S(1) <= i, i <= k)), (k*(x + y)*y, True)) assert ds((x + y)*(y + KD(i, j)), (j, k, 3)) == Piecewise( ((4 - k)*(x + y)*y + x + y, And(k <= i, i <= 3)), ((4 - k)*(x + y)*y, True)) assert ds((x + y)*(y + KD(i, j)), (j, k, l)) == Piecewise( ((l - k + 1)*(x + y)*y + x + y, And(k <= i, i <= l)), ((l - k + 1)*(x + y)*y, True)) assert ds((x + KD(i, k))*(y + KD(i, j)), (j, 1, 3)) == piecewise_fold( Piecewise((KD(i, k) + x, And(S(1) <= i, i <= 3)), (0, True)) + 3*(KD(i, k) + x)*y) assert ds((x + KD(i, k))*(y + KD(i, j)), (j, 1, 1)) == piecewise_fold( Piecewise((KD(i, k) + x, Eq(i, 1)), (0, True)) + (KD(i, k) + x)*y) assert ds((x + KD(i, k))*(y + KD(i, j)), (j, 2, 2)) == piecewise_fold( Piecewise((KD(i, k) + x, Eq(i, 2)), (0, True)) + (KD(i, k) + x)*y) assert ds((x + KD(i, k))*(y + KD(i, j)), (j, 3, 3)) == piecewise_fold( Piecewise((KD(i, k) + x, Eq(i, 3)), (0, True)) + (KD(i, k) + x)*y) assert ds((x + KD(i, k))*(y + KD(i, j)), (j, 1, k)) == piecewise_fold( Piecewise((KD(i, k) + x, And(S(1) <= i, i <= k)), (0, True)) + k*(KD(i, k) + x)*y) assert ds((x + KD(i, k))*(y + KD(i, j)), (j, k, 3)) == piecewise_fold( Piecewise((KD(i, k) + x, And(k <= i, i <= 3)), (0, True)) + (4 - k)*(KD(i, k) + x)*y) assert ds((x + KD(i, k))*(y + KD(i, j)), (j, k, l)) == piecewise_fold( Piecewise((KD(i, k) + x, And(k <= i, i <= l)), (0, True)) + (l - k + 1)*(KD(i, k) + x)*y)
py
1a3dc3347b79323af5e29d08d5150b99d2f3b68c
class Articles: """ class to define Article objects """ def __init__(self, source: dict, author: str, title: str, description: str, url: str, url_to_image: str, published_at: str): """ method to define Article object properties :param source: :param author: :param title: :param description: :param url: :param url_to_image: """ self.source = source self.author = author self.title = title self.description = description self.url = url self.url_to_image = url_to_image self.published_at = published_at class NewsSources(): """ class to model News Sources objects """ def __init__(self, id: str, name: str, description: str, url: str, category: str, language: str, country: str): """ method to define News Sources properties :param id: :param name: :param description: :param url: :param category: :param language: :param country: """ self.id = id self.name = name self.description = description self.url = url self.category = category self.language = language self.country = country
py
1a3dc3a557136fb890a82c68fb56460ded7de3a1
"""Setup script for gristmill.""" from setuptools import setup, find_packages with open('README.rst', 'r') as readme: DESCRIPTION = readme.read() CLASSIFIERS = [ 'Development Status :: 1 - Planning', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3 :: Only', 'Topic :: Scientific/Engineering :: Mathematics' ] setup( name='gristmill', version='0.5.0dev', description=DESCRIPTION.splitlines()[0], long_description=DESCRIPTION, url='https://github.com/tschijnmo/gristmill', author='Jinmo Zhao and Gustavo E Scuseria', author_email='[email protected]', license='MIT', classifiers=CLASSIFIERS, packages=find_packages(), package_data={'gristmill': ['templates/*']}, install_requires=['drudge', 'Jinja2', 'sympy'] )
py
1a3dc443bea15e750284e358c9524ce53342abac
from ops import * from utils import * from glob import glob import time from tensorflow.contrib.data import prefetch_to_device, shuffle_and_repeat, map_and_batch class DRIT(object) : def __init__(self, sess, args): self.model_name = 'DRIT' self.sess = sess self.checkpoint_dir = args.checkpoint_dir self.result_dir = args.result_dir self.log_dir = args.log_dir self.sample_dir = args.sample_dir self.dataset_name = args.dataset self.augment_flag = args.augment_flag self.epoch = args.epoch self.iteration = args.iteration self.decay_flag = args.decay_flag self.decay_epoch = args.decay_epoch self.gan_type = args.gan_type self.batch_size = args.batch_size self.print_freq = args.print_freq self.save_freq = args.save_freq self.num_attribute = args.num_attribute # for test self.guide_img = args.guide_img self.direction = args.direction self.img_size = args.img_size self.img_ch = args.img_ch self.init_lr = args.lr self.d_content_init_lr = args.lr / 2.5 self.ch = args.ch """ Weight """ self.content_adv_w = args.content_adv_w self.domain_adv_w = args.domain_adv_w self.cycle_w = args.cycle_w self.recon_w = args.recon_w self.latent_w = args.latent_w self.kl_w = args.kl_w """ Generator """ self.n_layer = args.n_layer self.n_z = args.n_z self.concat = args.concat """ Discriminator """ self.n_dis = args.n_dis self.n_scale = args.n_scale self.n_d_con = args.n_d_con self.multi = True if args.n_scale > 1 else False self.sn = args.sn self.sample_dir = os.path.join(args.sample_dir, self.model_dir) check_folder(self.sample_dir) self.trainA_dataset = glob('./dataset/{}/*.*'.format(self.dataset_name + '/trainA')) self.trainB_dataset = glob('./dataset/{}/*.*'.format(self.dataset_name + '/trainB')) self.dataset_num = max(len(self.trainA_dataset), len(self.trainB_dataset)) print("##### Information #####") print("# gan type : ", self.gan_type) print("# dataset : ", self.dataset_name) print("# max dataset number : ", self.dataset_num) print("# batch_size : ", self.batch_size) print("# decay_flag : ", self.decay_flag) print("# epoch : ", self.epoch) print("# decay_epoch : ", self.decay_epoch) print("# iteration per epoch : ", self.iteration) print("# attribute in test phase : ", self.num_attribute) print() print("##### Generator #####") print("# layer : ", self.n_layer) print("# z dimension : ", self.n_z) print("# concat : ", self.concat) print() print("##### Discriminator #####") print("# discriminator layer : ", self.n_dis) print("# multi-scale Dis : ", self.n_scale) print("# updating iteration of con_dis : ", self.n_d_con) print("# spectral_norm : ", self.sn) print() print("##### Weight #####") print("# domain_adv_weight : ", self.domain_adv_w) print("# content_adv_weight : ", self.content_adv_w) print("# cycle_weight : ", self.cycle_w) print("# recon_weight : ", self.recon_w) print("# latent_weight : ", self.latent_w) print("# kl_weight : ", self.kl_w) ################################################################################## # Encoder and Decoders ################################################################################## def content_encoder(self, x, is_training=True, reuse=False, scope='content_encoder'): channel = self.ch with tf.variable_scope(scope, reuse=reuse) : x = conv(x, channel, kernel=7, stride=1, pad=3, pad_type='reflect', scope='conv') x = lrelu(x, 0.01) for i in range(2) : x = conv(x, channel * 2, kernel=3, stride=2, pad=1, pad_type='reflect', scope='conv_' + str(i)) x = instance_norm(x, scope='ins_norm_' + str(i)) x = relu(x) channel = channel * 2 for i in range(1, self.n_layer) : x = resblock(x, channel, scope='resblock_'+str(i)) with tf.variable_scope('content_encoder_share', reuse=tf.AUTO_REUSE) : x = resblock(x, channel, scope='resblock_share') x = gaussian_noise_layer(x, is_training) return x def attribute_encoder(self, x, reuse=False, scope='attribute_encder'): channel = self.ch with tf.variable_scope(scope, reuse=reuse) : x = conv(x, channel, kernel=7, stride=1, pad=3, pad_type='reflect', scope='conv') x = relu(x) channel = channel * 2 x = conv(x, channel, kernel=4, stride=2, pad=1, pad_type='reflect', scope='conv_0') x = relu(x) channel = channel * 2 for i in range(1, self.n_layer) : x = conv(x, channel, kernel=4, stride=2, pad=1, pad_type='reflect', scope='conv_' + str(i)) x = relu(x) x = global_avg_pooling(x) x = conv(x, channels=self.n_z, kernel=1, stride=1, scope='attribute_logit') return x def attribute_encoder_concat(self, x, reuse=False, scope='attribute_encoder_concat'): channel = self.ch with tf.variable_scope(scope, reuse=reuse) : x = conv(x, channel, kernel=4, stride=2, pad=1, pad_type='reflect', scope='conv') for i in range(1, self.n_layer) : channel = channel * (i+1) x = basic_block(x, channel, scope='basic_block_' + str(i)) x = lrelu(x, 0.2) x = global_avg_pooling(x) mean = fully_conneted(x, channels=self.n_z, scope='z_mean') logvar = fully_conneted(x, channels=self.n_z, scope='z_logvar') return mean, logvar def MLP(self, z, reuse=False, scope='MLP'): channel = self.ch * self.n_layer with tf.variable_scope(scope, reuse=reuse): for i in range(2): z = fully_conneted(z, channel, scope='fully_' + str(i)) z = relu(z) z = fully_conneted(z, channel * self.n_layer, scope='fully_logit') return z def generator(self, x, z, reuse=False, scope="generator"): channel = self.ch * self.n_layer with tf.variable_scope(scope, reuse=reuse) : z = self.MLP(z, reuse=reuse) z = tf.split(z, num_or_size_splits=self.n_layer, axis=-1) for i in range(self.n_layer) : x = mis_resblock(x, z[i], channel, scope='mis_resblock_' + str(i)) for i in range(2) : x = deconv(x, channel // 2, kernel=3, stride=2, scope='deconv_' + str(i)) x = layer_norm(x, scope='layer_norm_' + str(i)) x = relu(x) channel = channel // 2 x = deconv(x, channels=self.img_ch, kernel=1, stride=1, scope='G_logit') x = tanh(x) return x def generator_concat(self, x, z, reuse=False, scope='generator_concat'): channel = self.ch * self.n_layer with tf.variable_scope('generator_concat_share', reuse=tf.AUTO_REUSE) : x = resblock(x, channel, scope='resblock') with tf.variable_scope(scope, reuse=reuse) : channel = channel + self.n_z x = expand_concat(x, z) for i in range(1, self.n_layer) : x = resblock(x, channel, scope='resblock_' + str(i)) for i in range(2) : channel = channel + self.n_z x = expand_concat(x, z) x = deconv(x, channel // 2, kernel=3, stride=2, scope='deconv_' + str(i)) x = layer_norm(x, scope='layer_norm_' + str(i)) x = relu(x) channel = channel // 2 x = expand_concat(x, z) x = deconv(x, channels=self.img_ch, kernel=1, stride=1, scope='G_logit') x = tanh(x) return x ################################################################################## # Discriminator ################################################################################## def content_discriminator(self, x, reuse=False, scope='content_discriminator'): D_logit = [] with tf.variable_scope(scope, reuse=reuse) : channel = self.ch * self.n_layer for i in range(3) : x = conv(x, channel, kernel=7, stride=2, pad=1, pad_type='reflect', sn=self.sn, scope='conv_' + str(i)) x = instance_norm(x, scope='ins_norm_' + str(i)) x = lrelu(x, 0.01) x = conv(x, channel, kernel=4, stride=1, scope='conv_3') x = lrelu(x, 0.01) x = conv(x, channels=1, kernel=1, stride=1, scope='D_content_logit') D_logit.append(x) return D_logit def multi_discriminator(self, x_init, reuse=False, scope="multi_discriminator"): D_logit = [] with tf.variable_scope(scope, reuse=reuse) : for scale in range(self.n_scale) : channel = self.ch x = conv(x_init, channel, kernel=4, stride=2, pad=1, pad_type='reflect', sn=self.sn, scope='ms_' + str(scale) + 'conv_0') x = lrelu(x, 0.01) for i in range(1, self.n_dis): x = conv(x, channel * 2, kernel=4, stride=2, pad=1, pad_type='reflect', sn=self.sn, scope='ms_' + str(scale) +'conv_' + str(i)) x = lrelu(x, 0.01) channel = channel * 2 x = conv(x, channels=1, kernel=1, stride=1, sn=self.sn, scope='ms_' + str(scale) + 'D_logit') D_logit.append(x) x_init = down_sample(x_init) return D_logit def discriminator(self, x, reuse=False, scope="discriminator"): D_logit = [] with tf.variable_scope(scope, reuse=reuse) : channel = self.ch x = conv(x, channel, kernel=3, stride=2, pad=1, pad_type='reflect', sn=self.sn, scope='conv') x = lrelu(x, 0.01) for i in range(1, self.n_dis) : x = conv(x, channel * 2, kernel=3, stride=2, pad=1, pad_type='reflect', sn=self.sn, scope='conv_' + str(i)) x = lrelu(x, 0.01) channel = channel * 2 x = conv(x, channels=1, kernel=1, stride=1, sn=self.sn, scope='D_logit') D_logit.append(x) return D_logit ################################################################################## # Model ################################################################################## def Encoder_A(self, x_A, is_training=True, reuse=False): mean = None logvar = None content_A = self.content_encoder(x_A, is_training=is_training, reuse=reuse, scope='content_encoder_A') if self.concat : mean, logvar = self.attribute_encoder_concat(x_A, reuse=reuse, scope='attribute_encoder_concat_A') attribute_A = z_sample(mean, logvar) else : attribute_A = self.attribute_encoder(x_A, reuse=reuse, scope='attribute_encoder_A') return content_A, attribute_A, mean, logvar def Encoder_B(self, x_B, is_training=True, reuse=False): mean = None logvar = None content_B = self.content_encoder(x_B, is_training=is_training, reuse=reuse, scope='content_encoder_B') if self.concat: mean, logvar = self.attribute_encoder_concat(x_B, reuse=reuse, scope='attribute_encoder_concat_B') attribute_B = z_sample(mean, logvar) else: attribute_B = self.attribute_encoder(x_B, reuse=reuse, scope='attribute_encoder_B') return content_B, attribute_B, mean, logvar def Decoder_A(self, content_B, attribute_A, reuse=False): # x = fake_A, identity_A, random_fake_A # x = (B, A), (A, A), (B, z) if self.concat : x = self.generator_concat(x=content_B, z=attribute_A, reuse=reuse, scope='generator_concat_A') else : x = self.generator(x=content_B, z=attribute_A, reuse=reuse, scope='generator_A') return x def Decoder_B(self, content_A, attribute_B, reuse=False): # x = fake_B, identity_B, random_fake_B # x = (A, B), (B, B), (A, z) if self.concat : x = self.generator_concat(x=content_A, z=attribute_B, reuse=reuse, scope='generator_concat_B') else : x = self.generator(x=content_A, z=attribute_B, reuse=reuse, scope='generator_B') return x def discriminate_real(self, x_A, x_B): if self.multi : real_A_logit = self.multi_discriminator(x_A, scope='multi_discriminator_A') real_B_logit = self.multi_discriminator(x_B, scope='multi_discriminator_B') else : real_A_logit = self.discriminator(x_A, scope="discriminator_A") real_B_logit = self.discriminator(x_B, scope="discriminator_B") return real_A_logit, real_B_logit def discriminate_fake(self, x_ba, x_ab): if self.multi : fake_A_logit = self.multi_discriminator(x_ba, reuse=True, scope='multi_discriminator_A') fake_B_logit = self.multi_discriminator(x_ab, reuse=True, scope='multi_discriminator_B') else : fake_A_logit = self.discriminator(x_ba, reuse=True, scope="discriminator_A") fake_B_logit = self.discriminator(x_ab, reuse=True, scope="discriminator_B") return fake_A_logit, fake_B_logit def discriminate_content(self, content_A, content_B, reuse=False): content_A_logit = self.content_discriminator(content_A, reuse=reuse, scope='content_discriminator') content_B_logit = self.content_discriminator(content_B, reuse=True, scope='content_discriminator') return content_A_logit, content_B_logit def build_model(self): self.lr = tf.placeholder(tf.float32, name='lr') """ Input Image""" Image_Data_Class = ImageData(self.img_size, self.img_ch, self.augment_flag) trainA = tf.data.Dataset.from_tensor_slices(self.trainA_dataset) trainB = tf.data.Dataset.from_tensor_slices(self.trainB_dataset) gpu_device = '/gpu:0' trainA = trainA.apply(shuffle_and_repeat(self.dataset_num)).apply(map_and_batch(Image_Data_Class.image_processing, self.batch_size, num_parallel_batches=16, drop_remainder=True)).apply(prefetch_to_device(gpu_device, self.batch_size)) trainB = trainB.apply(shuffle_and_repeat(self.dataset_num)).apply(map_and_batch(Image_Data_Class.image_processing, self.batch_size, num_parallel_batches=16, drop_remainder=True)).apply(prefetch_to_device(gpu_device, self.batch_size)) trainA_iterator = trainA.make_one_shot_iterator() trainB_iterator = trainB.make_one_shot_iterator() self.domain_A = trainA_iterator.get_next() self.domain_B = trainB_iterator.get_next() """ Define Encoder, Generator, Discriminator """ random_z = tf.random_normal(shape=[self.batch_size, self.n_z], mean=0.0, stddev=1.0, dtype=tf.float32) # encode content_a, attribute_a, mean_a, logvar_a = self.Encoder_A(self.domain_A) content_b, attribute_b, mean_b, logvar_b = self.Encoder_B(self.domain_B) # decode (fake, identity, random) fake_a = self.Decoder_A(content_B=content_b, attribute_A=attribute_a) fake_b = self.Decoder_B(content_A=content_a, attribute_B=attribute_b) recon_a = self.Decoder_A(content_B=content_a, attribute_A=attribute_a, reuse=True) recon_b = self.Decoder_B(content_A=content_b, attribute_B=attribute_b, reuse=True) random_fake_a = self.Decoder_A(content_B=content_b, attribute_A=random_z, reuse=True) random_fake_b = self.Decoder_B(content_A=content_a, attribute_B=random_z, reuse=True) # encode & decode again for cycle-consistency content_fake_a, attribute_fake_a, _, _ = self.Encoder_A(fake_a, reuse=True) content_fake_b, attribute_fake_b, _, _ = self.Encoder_B(fake_b, reuse=True) cycle_a = self.Decoder_A(content_B=content_fake_b, attribute_A=attribute_fake_a, reuse=True) cycle_b = self.Decoder_B(content_A=content_fake_a, attribute_B=attribute_fake_b, reuse=True) # for latent regression _, attribute_fake_random_a, _, _ = self.Encoder_A(random_fake_a, reuse=True) _, attribute_fake_random_b, _, _ = self.Encoder_B(random_fake_b, reuse=True) # discriminate real_A_logit, real_B_logit = self.discriminate_real(self.domain_A, self.domain_B) fake_A_logit, fake_B_logit = self.discriminate_fake(fake_a, fake_b) random_fake_A_logit, random_fake_B_logit = self.discriminate_fake(random_fake_a, random_fake_b) content_A_logit, content_B_logit = self.discriminate_content(content_a, content_b) """ Define Loss """ g_adv_loss_a = generator_loss(self.gan_type, fake_A_logit) + generator_loss(self.gan_type, random_fake_A_logit) g_adv_loss_b = generator_loss(self.gan_type, fake_B_logit) + generator_loss(self.gan_type, random_fake_B_logit) g_con_loss_a = generator_loss(self.gan_type, content_A_logit, content=True) g_con_loss_b = generator_loss(self.gan_type, content_B_logit, content=True) g_cyc_loss_a = L1_loss(cycle_a, self.domain_A) g_cyc_loss_b = L1_loss(cycle_b, self.domain_B) g_rec_loss_a = L1_loss(recon_a, self.domain_A) g_rec_loss_b = L1_loss(recon_b, self.domain_B) g_latent_loss_a = L1_loss(attribute_fake_random_a, random_z) g_latent_loss_b = L1_loss(attribute_fake_random_b, random_z) if self.concat : g_kl_loss_a = kl_loss_concat(mean_a, logvar_a) g_kl_loss_b = kl_loss_concat(mean_b, logvar_b) else : g_kl_loss_a = kl_loss(attribute_a) g_kl_loss_b = kl_loss(attribute_b) d_adv_loss_a = discriminator_loss(self.gan_type, real_A_logit, fake_A_logit) d_adv_loss_b = discriminator_loss(self.gan_type, real_B_logit, fake_B_logit) d_con_loss = discriminator_loss(self.gan_type, content_A_logit, content_B_logit) Generator_A_domain_loss = self.domain_adv_w * g_adv_loss_a Generator_A_content_loss = self.content_adv_w * g_con_loss_a Generator_A_cycle_loss = self.cycle_w * g_cyc_loss_b Generator_A_recon_loss = self.recon_w * g_rec_loss_a Generator_A_latent_loss = self.latent_w * g_latent_loss_a Generator_A_kl_loss = self.kl_w * g_kl_loss_a Generator_A_loss = Generator_A_domain_loss + \ Generator_A_content_loss + \ Generator_A_cycle_loss + \ Generator_A_recon_loss + \ Generator_A_latent_loss + \ Generator_A_kl_loss Generator_B_domain_loss = self.domain_adv_w * g_adv_loss_b Generator_B_content_loss = self.content_adv_w * g_con_loss_b Generator_B_cycle_loss = self.cycle_w * g_cyc_loss_a Generator_B_recon_loss = self.recon_w * g_rec_loss_b Generator_B_latent_loss = self.latent_w * g_latent_loss_b Generator_B_kl_loss = self.kl_w * g_kl_loss_b Generator_B_loss = Generator_B_domain_loss + \ Generator_B_content_loss + \ Generator_B_cycle_loss + \ Generator_B_recon_loss + \ Generator_B_latent_loss + \ Generator_B_kl_loss Discriminator_A_loss = self.domain_adv_w * d_adv_loss_a Discriminator_B_loss = self.domain_adv_w * d_adv_loss_b Discriminator_content_loss = self.content_adv_w * d_con_loss self.Generator_loss = Generator_A_loss + Generator_B_loss self.Discriminator_loss = Discriminator_A_loss + Discriminator_B_loss self.Discriminator_content_loss = Discriminator_content_loss """ Training """ t_vars = tf.trainable_variables() G_vars = [var for var in t_vars if 'endoer' in var.name or 'generator' in var.name] D_vars = [var for var in t_vars if 'discriminator' in var.name and 'content' not in var.name] D_content_vars = [var for var in t_vars if 'content_discriminator' in var.name] grads, _ = tf.clip_by_global_norm(tf.gradients(self.Discriminator_content_loss, D_content_vars), clip_norm=5) self.G_optim = tf.train.AdamOptimizer(self.lr, beta1=0.5, beta2=0.999).minimize(self.Generator_loss, var_list=G_vars) self.D_optim = tf.train.AdamOptimizer(self.lr, beta1=0.5, beta2=0.999).minimize(self.Discriminator_loss, var_list=D_vars) self.D_content_optim = tf.train.AdamOptimizer(self.lr, beta1=0.5, beta2=0.999).apply_gradients(zip(grads, D_content_vars)) """" Summary """ self.lr_write = tf.summary.scalar("learning_rate", self.lr) self.all_G_loss = tf.summary.scalar("Generator_loss", self.Generator_loss) self.all_D_loss = tf.summary.scalar("Discriminator_loss", self.Discriminator_loss) self.G_A_loss = tf.summary.scalar("G_A_loss", Generator_A_loss) self.G_A_domain_loss = tf.summary.scalar("G_A_domain_loss", Generator_A_domain_loss) self.G_A_content_loss = tf.summary.scalar("G_A_content_loss", Generator_A_content_loss) self.G_A_cycle_loss = tf.summary.scalar("G_A_cycle_loss", Generator_A_cycle_loss) self.G_A_recon_loss = tf.summary.scalar("G_A_recon_loss", Generator_A_recon_loss) self.G_A_latent_loss = tf.summary.scalar("G_A_latent_loss", Generator_A_latent_loss) self.G_A_kl_loss = tf.summary.scalar("G_A_kl_loss", Generator_A_kl_loss) self.G_B_loss = tf.summary.scalar("G_B_loss", Generator_B_loss) self.G_B_domain_loss = tf.summary.scalar("G_B_domain_loss", Generator_B_domain_loss) self.G_B_content_loss = tf.summary.scalar("G_B_content_loss", Generator_B_content_loss) self.G_B_cycle_loss = tf.summary.scalar("G_B_cycle_loss", Generator_B_cycle_loss) self.G_B_recon_loss = tf.summary.scalar("G_B_recon_loss", Generator_B_recon_loss) self.G_B_latent_loss = tf.summary.scalar("G_B_latent_loss", Generator_B_latent_loss) self.G_B_kl_loss = tf.summary.scalar("G_B_kl_loss", Generator_B_kl_loss) self.D_A_loss = tf.summary.scalar("D_A_loss", Discriminator_A_loss) self.D_B_loss = tf.summary.scalar("D_B_loss", Discriminator_B_loss) self.G_loss = tf.summary.merge([self.G_A_loss, self.G_A_domain_loss, self.G_A_content_loss, self.G_A_cycle_loss, self.G_A_recon_loss, self.G_A_latent_loss, self.G_A_kl_loss, self.G_B_loss, self.G_B_domain_loss, self.G_B_content_loss, self.G_B_cycle_loss, self.G_B_recon_loss, self.G_B_latent_loss, self.G_B_kl_loss, self.all_G_loss]) self.D_loss = tf.summary.merge([self.D_A_loss, self.D_B_loss, self.all_D_loss]) self.D_content_loss = tf.summary.scalar("Discriminator_content_loss", self.Discriminator_content_loss) """ Image """ self.fake_A = fake_a self.fake_B = fake_b self.real_A = self.domain_A self.real_B = self.domain_B """ Test """ self.test_image = tf.placeholder(tf.float32, [1, self.img_size, self.img_size, self.img_ch], name='test_image') self.test_random_z = tf.random_normal(shape=[1, self.n_z], mean=0.0, stddev=1.0, dtype=tf.float32) test_content_a, test_attribute_a, _, _ = self.Encoder_A(self.test_image, is_training=False, reuse=True) test_content_b, test_attribute_b, _, _ = self.Encoder_B(self.test_image, is_training=False, reuse=True) self.test_fake_A = self.Decoder_A(content_B=test_content_b, attribute_A=self.test_random_z, reuse=True) self.test_fake_B = self.Decoder_B(content_A=test_content_a, attribute_B=self.test_random_z, reuse=True) """ Guided Image Translation """ self.content_image = tf.placeholder(tf.float32, [1, self.img_size, self.img_size, self.img_ch], name='content_image') self.attribute_image = tf.placeholder(tf.float32, [1, self.img_size, self.img_size, self.img_ch], name='guide_attribute_image') guide_content_A, guide_attribute_A, _, _ = self.Encoder_A(self.content_image, is_training=False, reuse=True) guide_content_B, guide_attribute_B, _, _ = self.Encoder_B(self.attribute_image, is_training=False, reuse=True) self.guide_fake_A = self.Decoder_A(content_B=guide_content_B, attribute_A=guide_attribute_A, reuse=True) self.guide_fake_B = self.Decoder_B(content_A=guide_content_A, attribute_B=guide_attribute_B, reuse=True) def train(self): # initialize all variables tf.global_variables_initializer().run() # saver to save model self.saver = tf.train.Saver() # summary writer self.writer = tf.summary.FileWriter(self.log_dir + '/' + self.model_dir, self.sess.graph) # restore check-point if it exits could_load, checkpoint_counter = self.load(self.checkpoint_dir) if could_load: start_epoch = (int)(checkpoint_counter / self.iteration) start_batch_id = checkpoint_counter - start_epoch * self.iteration counter = checkpoint_counter print(" [*] Load SUCCESS") else: start_epoch = 0 start_batch_id = 0 counter = 1 print(" [!] Load failed...") # loop for epoch start_time = time.time() lr = self.init_lr for epoch in range(start_epoch, self.epoch): if self.decay_flag: lr = self.init_lr if epoch < self.decay_epoch else self.init_lr * (self.epoch - epoch) / (self.epoch - self.decay_epoch) # linear decay for idx in range(start_batch_id, self.iteration): train_feed_dict = { self.lr : lr } summary_str = self.sess.run(self.lr_write, feed_dict=train_feed_dict) self.writer.add_summary(summary_str, counter) # Update content D _, d_con_loss, summary_str = self.sess.run([self.D_content_optim, self.Discriminator_content_loss, self.D_content_loss], feed_dict=train_feed_dict) self.writer.add_summary(summary_str, counter) if (counter - 1) % self.n_d_con == 0: # Update D _, d_loss, summary_str = self.sess.run([self.D_optim, self.Discriminator_loss, self.D_loss], feed_dict=train_feed_dict) self.writer.add_summary(summary_str, counter) # Update G batch_A_images, batch_B_images, fake_A, fake_B, _, g_loss, summary_str = self.sess.run([self.real_A, self.real_B, self.fake_A, self.fake_B, self.G_optim, self.Generator_loss, self.G_loss], feed_dict=train_feed_dict) self.writer.add_summary(summary_str, counter) print("Epoch: [%2d] [%6d/%6d] time: %4.4f d_con_loss: %.8f, d_loss: %.8f, g_loss: %.8f" \ % (epoch, idx, self.iteration, time.time() - start_time, d_con_loss, d_loss, g_loss)) else: print("Epoch: [%2d] [%6d/%6d] time: %4.4f d_con_loss: %.8f" % ( epoch, idx, self.iteration, time.time() - start_time, d_con_loss)) if np.mod(idx + 1, self.print_freq) == 0: save_images(batch_A_images, [self.batch_size, 1], './{}/real_A_{:03d}_{:05d}.png'.format(self.sample_dir, epoch, idx + 1)) # save_images(batch_B_images, [self.batch_size, 1], # './{}/real_B_{}_{:03d}_{:05d}.png'.format(self.sample_dir, gpu_id, epoch, idx+1)) # save_images(fake_A, [self.batch_size, 1], # './{}/fake_A_{}_{:03d}_{:05d}.png'.format(self.sample_dir, gpu_id, epoch, idx+1)) save_images(fake_B, [self.batch_size, 1], './{}/fake_B_{:03d}_{:05d}.png'.format(self.sample_dir, epoch, idx + 1)) # display training status counter += 1 if np.mod(idx+1, self.save_freq) == 0 : self.save(self.checkpoint_dir, counter) # After an epoch, start_batch_id is set to zero # non-zero value is only for the first epoch after loading pre-trained model start_batch_id = 0 # save model for final step self.save(self.checkpoint_dir, counter) @property def model_dir(self): if self.concat : concat = "_concat" else : concat = "" if self.sn : sn = "_sn" else : sn = "" return "{}{}_{}_{}_{}layer_{}dis_{}scale_{}con{}".format(self.model_name, concat, self.dataset_name, self.gan_type, self.n_layer, self.n_dis, self.n_scale, self.n_d_con, sn) def save(self, checkpoint_dir, step): checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir) if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) self.saver.save(self.sess, os.path.join(checkpoint_dir, self.model_name + '.model'), global_step=step) def load(self, checkpoint_dir): print(" [*] Reading checkpoints...") checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir) ckpt = tf.train.get_checkpoint_state(checkpoint_dir) if ckpt and ckpt.model_checkpoint_path: ckpt_name = os.path.basename(ckpt.model_checkpoint_path) self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name)) counter = int(ckpt_name.split('-')[-1]) print(" [*] Success to read {}".format(ckpt_name)) return True, counter else: print(" [*] Failed to find a checkpoint") return False, 0 def test(self): tf.global_variables_initializer().run() test_A_files = glob('./dataset/{}/*.*'.format(self.dataset_name + '/testA')) test_B_files = glob('./dataset/{}/*.*'.format(self.dataset_name + '/testB')) self.saver = tf.train.Saver() could_load, checkpoint_counter = self.load(self.checkpoint_dir) self.result_dir = os.path.join(self.result_dir, self.model_dir) check_folder(self.result_dir) if could_load : print(" [*] Load SUCCESS") else : print(" [!] Load failed...") # write html for visual comparison index_path = os.path.join(self.result_dir, 'index.html') index = open(index_path, 'w') index.write("<html><body><table><tr>") index.write("<th>name</th><th>input</th><th>output</th></tr>") for sample_file in test_A_files : # A -> B print('Processing A image: ' + sample_file) sample_image = np.asarray(load_test_data(sample_file, size=self.img_size)) file_name = os.path.basename(sample_file).split(".")[0] file_extension = os.path.basename(sample_file).split(".")[1] for i in range(self.num_attribute) : image_path = os.path.join(self.result_dir, '{}_attribute{}.{}'.format(file_name, i, file_extension)) fake_img = self.sess.run(self.test_fake_B, feed_dict = {self.test_image : sample_image}) save_images(fake_img, [1, 1], image_path) index.write("<td>%s</td>" % os.path.basename(image_path)) index.write("<td><img src='%s' width='%d' height='%d'></td>" % (sample_file if os.path.isabs(sample_file) else ( '../..' + os.path.sep + sample_file), self.img_size, self.img_size)) index.write("<td><img src='%s' width='%d' height='%d'></td>" % (image_path if os.path.isabs(image_path) else ( '../..' + os.path.sep + image_path), self.img_size, self.img_size)) index.write("</tr>") for sample_file in test_B_files : # B -> A print('Processing B image: ' + sample_file) sample_image = np.asarray(load_test_data(sample_file, size=self.img_size)) file_name = os.path.basename(sample_file).split(".")[0] file_extension = os.path.basename(sample_file).split(".")[1] for i in range(self.num_attribute): image_path = os.path.join(self.result_dir, '{}_attribute{}.{}'.format(file_name, i, file_extension)) fake_img = self.sess.run(self.test_fake_A, feed_dict={self.test_image: sample_image}) save_images(fake_img, [1, 1], image_path) index.write("<td>%s</td>" % os.path.basename(image_path)) index.write("<td><img src='%s' width='%d' height='%d'></td>" % (sample_file if os.path.isabs(sample_file) else ( '../..' + os.path.sep + sample_file), self.img_size, self.img_size)) index.write("<td><img src='%s' width='%d' height='%d'></td>" % (image_path if os.path.isabs(image_path) else ( '../..' + os.path.sep + image_path), self.img_size, self.img_size)) index.write("</tr>") index.close() def guide_test(self): tf.global_variables_initializer().run() test_A_files = glob('./dataset/{}/*.*'.format(self.dataset_name + '/testA')) test_B_files = glob('./dataset/{}/*.*'.format(self.dataset_name + '/testB')) attribute_file = np.asarray(load_test_data(self.guide_img, size=self.img_size)) self.saver = tf.train.Saver() could_load, checkpoint_counter = self.load(self.checkpoint_dir) self.result_dir = os.path.join(self.result_dir, self.model_dir, 'guide') check_folder(self.result_dir) if could_load: print(" [*] Load SUCCESS") else: print(" [!] Load failed...") # write html for visual comparison index_path = os.path.join(self.result_dir, 'index.html') index = open(index_path, 'w') index.write("<html><body><table><tr>") index.write("<th>name</th><th>input</th><th>output</th></tr>") if self.direction == 'a2b' : for sample_file in test_A_files: # A -> B print('Processing A image: ' + sample_file) sample_image = np.asarray(load_test_data(sample_file, size=self.img_size)) image_path = os.path.join(self.result_dir, '{}'.format(os.path.basename(sample_file))) fake_img = self.sess.run(self.guide_fake_B, feed_dict={self.content_image: sample_image, self.attribute_image : attribute_file}) save_images(fake_img, [1, 1], image_path) index.write("<td>%s</td>" % os.path.basename(image_path)) index.write("<td><img src='%s' width='%d' height='%d'></td>" % (sample_file if os.path.isabs(sample_file) else ( '../../..' + os.path.sep + sample_file), self.img_size, self.img_size)) index.write("<td><img src='%s' width='%d' height='%d'></td>" % (image_path if os.path.isabs(image_path) else ( '../../..' + os.path.sep + image_path), self.img_size, self.img_size)) index.write("</tr>") else : for sample_file in test_B_files: # B -> A print('Processing B image: ' + sample_file) sample_image = np.asarray(load_test_data(sample_file, size=self.img_size)) image_path = os.path.join(self.result_dir, '{}'.format(os.path.basename(sample_file))) fake_img = self.sess.run(self.guide_fake_A, feed_dict={self.content_image: sample_image, self.attribute_image : attribute_file}) save_images(fake_img, [1, 1], image_path) index.write("<td>%s</td>" % os.path.basename(image_path)) index.write("<td><img src='%s' width='%d' height='%d'></td>" % (sample_file if os.path.isabs(sample_file) else ( '../../..' + os.path.sep + sample_file), self.img_size, self.img_size)) index.write("<td><img src='%s' width='%d' height='%d'></td>" % (image_path if os.path.isabs(image_path) else ( '../../..' + os.path.sep + image_path), self.img_size, self.img_size)) index.write("</tr>") index.close()
py
1a3dc51af4df60cf892fdb7b400dfec3a1bafc9f
import discord from discord.ext import commands class Example(commands.Cog): def __init__(self, client): self.client = client @commands.Cog.listener() async def on_ready(self): print('Bot is online') @commands.command() async def loadtest(self, ctx): await ctx.send('yes non1') def setup(client): client.add_cog(Example(client))
py
1a3dc5971d698c5a6a8f5fcd2b6a7b40d4acb153
#! /usr/bin/env python import sys try: import unittest2 as unittest except ImportError: import unittest from os import path from random import Random, randint import coverage class RandomOrderTestSuite(unittest.TestSuite): """ Test Suite that will randomize the order of tests. This avoids the tests becoming dependent on some overlooked state. USE WITH CAUTION. """ def __init__(self, seed, *args, **kwargs): if seed: self.__seed = seed else: self.__seed = randint(0, 9999) super(RandomOrderTestSuite, self).__init__(*args, **kwargs) def __get_all_tests(self, test_case): result = [] for item in test_case: if hasattr(item, "_tests") and len(item._tests) > 0: result += self.__get_all_tests(item) else: result.append(item) return result def run(self, result): cases = self.__get_all_tests(self) r = Random(self.__seed) r.shuffle(cases) for test in cases: if result.shouldStop: break test(result) print print print '>>> python runtests.py --seed={0}'.format(self.__seed) return result if __name__ == "__main__": cov = coverage.coverage(source=[path.join(path.dirname(__file__), 'datatree')]) cov.start() seed = None for arg in sys.argv: if arg.startswith('--seed='): seed = int(arg.split('=')[1]) current_folder = path.dirname(__file__) base_folder = path.join(current_folder, "datatree") sys.path.insert(0, current_folder) suite = RandomOrderTestSuite(seed) loader = unittest.loader.defaultTestLoader suite.addTest(loader.discover(base_folder, pattern="test*.py")) runner = unittest.TextTestRunner() runner.verbosity = 2 runner.run(suite.run) cov.stop() # Output the coverage cov.html_report(directory='htmlcov') print 'Coverage report written to: {0}'.format(path.join(path.dirname(__file__), 'htmlcov', 'index.html'))
py
1a3dc7a89a4c50f717d42b364abecc0105af1b59
import os import subprocess try: from StringIO import StringIO except ImportError: # Py3 from io import StringIO from django.test import TestCase from django.core import mail from django.conf import settings from mock import patch from dbbackup import utils GPG_PUBLIC_PATH = os.path.join(settings.BASE_DIR, 'tests/gpg/pubring.gpg') DEV_NULL = open(os.devnull, 'w') class Bytes_To_StrTest(TestCase): def test_get_gb(self): value = utils.bytes_to_str(byteVal=2**31) self.assertEqual(value, "2.0 GB") def test_0_decimal(self): value = utils.bytes_to_str(byteVal=1.01, decimals=0) self.assertEqual(value, "1 B") def test_2_decimal(self): value = utils.bytes_to_str(byteVal=1.01, decimals=2) self.assertEqual(value, "1.01 B") class Handle_SizeTest(TestCase): def test_func(self): filehandle = StringIO('Test string') value = utils.handle_size(filehandle=filehandle) self.assertEqual(value, '11.0 B') class Email_Uncaught_ExceptionTest(TestCase): def test_success(self): def func(): pass utils.email_uncaught_exception(func) @patch('dbbackup.settings.SEND_EMAIL', False) def test_raise(self): def func(): raise Exception('Foo') with self.assertRaises(Exception): utils.email_uncaught_exception(func)() self.assertEqual(len(mail.outbox), 0) @patch('dbbackup.settings.SEND_EMAIL', True) @patch('dbbackup.settings.FAILURE_RECIPIENTS', ['foo@bar']) def test_raise_with_mail(self): def func(): raise Exception('Foo') with self.assertRaises(Exception): utils.email_uncaught_exception(func)() self.assertEqual(len(mail.outbox), 1) class Encrypt_FileTest(TestCase): def setUp(self): self.path = '/tmp/foo' with open(self.path, 'a') as fd: fd.write('foo') cmd = ('gpg --import %s' % GPG_PUBLIC_PATH).split() subprocess.call(cmd, stdout=DEV_NULL, stderr=DEV_NULL) def tearDown(self): os.remove(self.path) subprocess.call('gpg --batch --yes --delete-key "test@test"'.split(), stdout=DEV_NULL, stderr=DEV_NULL) def test_func(self, *args): with open(self.path) as fd: encrypted_file = utils.encrypt_file(inputfile=fd) encrypted_file.seek(0) self.assertTrue(encrypted_file.read()) class Create_Spooled_Temporary_FileTest(TestCase): def setUp(self): self.path = '/tmp/foo' with open(self.path, 'a') as fd: fd.write('foo') def tearDown(self): os.remove(self.path) def test_func(self, *args): utils.create_spooled_temporary_file(filepath=self.path)
py
1a3dc7c13fa07710d89a9724d654c3e5c1278e71
# Define your item pipelines here # # Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html # useful for handling different item types with a single interface from itemadapter import ItemAdapter class QuotestoscrapePipeline: def process_item(self, item, spider): return item
py
1a3dca47d1d6d2a23e3bded5ab5071174265e3ce
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import os import click import dataloader as torcharrow_dataloader import torch import torch.distributed as dist from fbgemm_gpu.split_embedding_configs import EmbOptimType from torch.distributed.elastic.multiprocessing.errors import record from torchrec import EmbeddingBagCollection from torchrec.datasets.criteo import DEFAULT_CAT_NAMES, INT_FEATURE_COUNT from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder from torchrec.distributed.model_parallel import DistributedModelParallel from torchrec.models.dlrm import DLRM from torchrec.modules.embedding_configs import EmbeddingBagConfig from torchrec.optim.keyed import KeyedOptimizerWrapper @record @click.command() @click.option("--batch_size", default=256) @click.option("--num_embeddings", default=2048) @click.option("--sigrid_hash_salt", default=0) @click.option("--parquet_directory", default="/data/criteo_preproc") def main( batch_size, num_embeddings, sigrid_hash_salt, parquet_directory, ) -> None: rank = int(os.environ["LOCAL_RANK"]) if torch.cuda.is_available(): device = torch.device(f"cuda:{rank}") backend = "nccl" torch.cuda.set_device(device) else: device = torch.device("cpu") backend = "gloo" print( "\033[92m" + f"WARNING: Running in CPU mode. cuda availablility {torch.cuda.is_available()}." ) dist.init_process_group(backend=backend) world_size = dist.get_world_size() dataloader = torcharrow_dataloader.get_dataloader( parquet_directory, world_size, rank, batch_size=batch_size, num_embeddings=num_embeddings, salt=sigrid_hash_salt, ) it = iter(dataloader) model = DLRM( embedding_bag_collection=EmbeddingBagCollection( tables=[ EmbeddingBagConfig( name=f"table_{cat_name}", embedding_dim=64, num_embeddings=num_embeddings, feature_names=[cat_name], ) for cat_name in DEFAULT_CAT_NAMES + ["bucketize_int_0"] ], device=torch.device("meta"), ), dense_in_features=INT_FEATURE_COUNT, dense_arch_layer_sizes=[64], over_arch_layer_sizes=[32, 1], dense_device=device, ) fused_params = { "learning_rate": 0.02, "optimizer": EmbOptimType.EXACT_ROWWISE_ADAGRAD, } sharded_model = DistributedModelParallel( module=model, device=device, sharders=[ EmbeddingBagCollectionSharder(fused_params=fused_params), ], ) optimizer = KeyedOptimizerWrapper( dict(model.named_parameters()), lambda params: torch.optim.SGD(params, lr=0.01), ) loss_fn = torch.nn.BCEWithLogitsLoss() print_example = dist.get_rank() == 0 for (dense_features, kjt, labels) in it: if print_example: print("Example dense_features", dense_features) print("Example KJT input", kjt) print_example = False dense_features = dense_features.to(device) kjt = kjt.to(device) labels = labels.to(device) optimizer.zero_grad() preds = sharded_model(dense_features, kjt) loss = loss_fn(preds.squeeze(), labels.squeeze()) loss.sum().backward() optimizer.step() print("\033[92m" + "DLRM run with torcharrow last-mile preprocessing finished!") if __name__ == "__main__": main()
py
1a3dca6484c907911f2d3ab68c51799771f22ceb
# Copyright 2019 Nokia # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=wildcard-import import os import pytest from tools.test_data_yum import bash_yum_info, \ conntrack_tools_yum_info, \ pacemaker_yum_info from tools.yum import YumInfoParser from tools.yum_test_data import bash_expected, pacemaker_expected, \ yum_info_installed_header, \ yum_info_available_header, \ yum_info_available_header2 from tools.yum_test_data import conntrack_tools_expected @pytest.mark.parametrize('yum_info, expected_output', [ (bash_yum_info, bash_expected), (conntrack_tools_yum_info, conntrack_tools_expected), (pacemaker_yum_info, pacemaker_expected) ]) def test_parse_package(yum_info, expected_output): parsed = YumInfoParser().parse_package(yum_info) expected = expected_output assert parsed == expected def test_parse_installed(): fake_out = '\n'.join([yum_info_installed_header, bash_yum_info, conntrack_tools_yum_info]) parsed = YumInfoParser().parse_installed(fake_out) expected = [bash_expected, conntrack_tools_expected] assert parsed == expected @pytest.mark.parametrize('available_header', [ yum_info_available_header, yum_info_available_header2 ]) def test_parse_available(available_header): fake_out = '\n'.join([available_header, bash_yum_info, conntrack_tools_yum_info]) parsed = YumInfoParser().parse_available(fake_out) expected = [bash_expected, conntrack_tools_expected] assert parsed == expected def test_parse_file(): test_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'yum_info_installed.sample') parsed = YumInfoParser().parse_file(test_file) assert len(parsed) == 14
py
1a3dcbb8f423ac46b657f1cf9b7b1707f8d93bec
# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Entry point for training AttGAN network""" import argparse import datetime import json import math import os from os.path import join import numpy as np import mindspore.common.dtype as mstype from mindspore import Tensor, context from mindspore import nn from mindspore.common import set_seed from mindspore.communication.management import init, get_rank from mindspore.context import ParallelMode from mindspore.train.callback import CheckpointConfig, ModelCheckpoint, _InternalCallbackParam, RunContext from mindspore.train.serialization import load_param_into_net from src.attgan import Gen, Dis from src.cell import TrainOneStepCellGen, TrainOneStepCellDis, init_weights from src.data import data_loader from src.helpers import Progressbar from src.loss import GenLoss, DisLoss from src.utils import resume_generator, resume_discriminator attrs_default = [ 'Bald', 'Bangs', 'Black_Hair', 'Blond_Hair', 'Brown_Hair', 'Bushy_Eyebrows', 'Eyeglasses', 'Male', 'Mouth_Slightly_Open', 'Mustache', 'No_Beard', 'Pale_Skin', 'Young' ] def parse(arg=None): """Define configuration of Model""" parser = argparse.ArgumentParser() parser.add_argument('--attrs', dest='attrs', default=attrs_default, nargs='+', help='attributes to learn') parser.add_argument('--data', dest='data', type=str, choices=['CelebA'], default='CelebA') parser.add_argument('--data_path', dest='data_path', type=str, default='./data/img_align_celeba') parser.add_argument('--attr_path', dest='attr_path', type=str, default='./data/list_attr_celeba.txt') parser.add_argument('--img_size', dest='img_size', type=int, default=128) parser.add_argument('--shortcut_layers', dest='shortcut_layers', type=int, default=1) parser.add_argument('--inject_layers', dest='inject_layers', type=int, default=1) parser.add_argument('--enc_dim', dest='enc_dim', type=int, default=64) parser.add_argument('--dec_dim', dest='dec_dim', type=int, default=64) parser.add_argument('--dis_dim', dest='dis_dim', type=int, default=64) parser.add_argument('--dis_fc_dim', dest='dis_fc_dim', type=int, default=1024) parser.add_argument('--enc_layers', dest='enc_layers', type=int, default=5) parser.add_argument('--dec_layers', dest='dec_layers', type=int, default=5) parser.add_argument('--dis_layers', dest='dis_layers', type=int, default=5) parser.add_argument('--enc_norm', dest='enc_norm', type=str, default='batchnorm') parser.add_argument('--dec_norm', dest='dec_norm', type=str, default='batchnorm') parser.add_argument('--dis_norm', dest='dis_norm', type=str, default='instancenorm') parser.add_argument('--dis_fc_norm', dest='dis_fc_norm', type=str, default='none') parser.add_argument('--enc_acti', dest='enc_acti', type=str, default='lrelu') parser.add_argument('--dec_acti', dest='dec_acti', type=str, default='relu') parser.add_argument('--dis_acti', dest='dis_acti', type=str, default='lrelu') parser.add_argument('--dis_fc_acti', dest='dis_fc_acti', type=str, default='relu') parser.add_argument('--lambda_1', dest='lambda_1', type=float, default=100.0) parser.add_argument('--lambda_2', dest='lambda_2', type=float, default=10.0) parser.add_argument('--lambda_3', dest='lambda_3', type=float, default=1.0) parser.add_argument('--lambda_gp', dest='lambda_gp', type=float, default=10.0) parser.add_argument('--epochs', dest='epochs', type=int, default=200, help='# of epochs') parser.add_argument('--batch_size', dest='batch_size', type=int, default=32) parser.add_argument('--num_workers', dest='num_workers', type=int, default=16) parser.add_argument('--lr', dest='lr', type=float, default=0.0002, help='learning rate') parser.add_argument('--beta1', dest='beta1', type=float, default=0.5) parser.add_argument('--beta2', dest='beta2', type=float, default=0.999) parser.add_argument('--n_d', dest='n_d', type=int, default=5, help='# of d updates per g update') parser.add_argument('--split_point', dest='split_point', type=int, default=182000, help='# of dataset split point') parser.add_argument('--thres_int', dest='thres_int', type=float, default=0.5) parser.add_argument('--test_int', dest='test_int', type=float, default=1.0) parser.add_argument('--save_interval', dest='save_interval', type=int, default=500) parser.add_argument('--experiment_name', dest='experiment_name', default=datetime.datetime.now().strftime("%I:%M%p on %B %d, %Y")) parser.add_argument("--run_distribute", type=int, default=0, help="Run distribute, default: false.") parser.add_argument('--resume_model', action='store_true') parser.add_argument('--gen_ckpt_name', type=str, default='') parser.add_argument('--dis_ckpt_name', type=str, default='') return parser.parse_args(arg) args = parse() print(args) args.lr_base = args.lr args.n_attrs = len(args.attrs) # initialize environment set_seed(1) context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=False) if args.run_distribute: if os.getenv("DEVICE_ID", "not_set").isdigit(): context.set_context(device_id=int(os.getenv("DEVICE_ID"))) device_num = int(os.getenv('RANK_SIZE')) print(device_num) context.reset_auto_parallel_context() context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True, device_num=device_num) init() rank = get_rank() else: if os.getenv("DEVICE_ID", "not_set").isdigit(): context.set_context(device_id=int(os.getenv("DEVICE_ID"))) device_num = int(os.getenv('RANK_SIZE')) rank = 0 print("Initialize successful!") os.makedirs(join('output', args.experiment_name), exist_ok=True) os.makedirs(join('output', args.experiment_name, 'checkpoint'), exist_ok=True) with open(join('output', args.experiment_name, 'setting.txt'), 'w') as f: f.write(json.dumps(vars(args), indent=4, separators=(',', ':'))) if __name__ == '__main__': # Define dataloader train_dataset, train_length = data_loader(img_path=args.data_path, attr_path=args.attr_path, selected_attrs=args.attrs, mode="train", batch_size=args.batch_size, device_num=device_num, shuffle=True, split_point=args.split_point) train_loader = train_dataset.create_dict_iterator() print('Training images:', train_length) # Define network gen = Gen(args.enc_dim, args.enc_layers, args.enc_norm, args.enc_acti, args.dec_dim, args.dec_layers, args.dec_norm, args.dec_acti, args.n_attrs, args.shortcut_layers, args.inject_layers, args.img_size, mode='train') dis = Dis(args.dis_dim, args.dis_norm, args.dis_acti, args.dis_fc_dim, args.dis_fc_norm, args.dis_fc_acti, args.dis_layers, args.img_size, mode='train') # Initialize network init_weights(gen, 'KaimingUniform', math.sqrt(5)) init_weights(dis, 'KaimingUniform', math.sqrt(5)) # Resume from checkpoint if args.resume_model: para_gen = resume_generator(args, gen, args.gen_ckpt_name) para_dis = resume_discriminator(args, dis, args.dis_ckpt_name) load_param_into_net(gen, para_gen) load_param_into_net(dis, para_dis) # Define network with loss G_loss_cell = GenLoss(args, gen, dis) D_loss_cell = DisLoss(args, gen, dis) # Define Optimizer optimizer_G = nn.Adam(params=gen.trainable_params(), learning_rate=args.lr, beta1=args.beta1, beta2=args.beta2) optimizer_D = nn.Adam(params=dis.trainable_params(), learning_rate=args.lr, beta1=args.beta1, beta2=args.beta2) # Define One Step Train G_trainOneStep = TrainOneStepCellGen(G_loss_cell, optimizer_G) D_trainOneStep = TrainOneStepCellDis(D_loss_cell, optimizer_D) # Train G_trainOneStep.set_train(True) D_trainOneStep.set_train(True) print("Start Training") train_iter = train_length // args.batch_size ckpt_config = CheckpointConfig(save_checkpoint_steps=args.save_interval) if rank == 0: local_train_url = os.path.join('output', args.experiment_name, 'checkpoint/rank{}'.format(rank)) ckpt_cb_gen = ModelCheckpoint(config=ckpt_config, directory=local_train_url, prefix='generator') ckpt_cb_dis = ModelCheckpoint(config=ckpt_config, directory=local_train_url, prefix='discriminator') cb_params_gen = _InternalCallbackParam() cb_params_gen.train_network = gen cb_params_gen.cur_epoch_num = 0 gen_run_context = RunContext(cb_params_gen) ckpt_cb_gen.begin(gen_run_context) cb_params_dis = _InternalCallbackParam() cb_params_dis.train_network = dis cb_params_dis.cur_epoch_num = 0 dis_run_context = RunContext(cb_params_dis) ckpt_cb_dis.begin(dis_run_context) # Initialize Progressbar progressbar = Progressbar() it = 0 for epoch in range(args.epochs): for data in progressbar(train_loader, train_iter): img_a = data["image"] att_a = data["attr"] att_a = att_a.asnumpy() att_b = np.random.permutation(att_a) att_a_ = (att_a * 2 - 1) * args.thres_int att_b_ = (att_b * 2 - 1) * args.thres_int att_a = Tensor(att_a, mstype.float32) att_a_ = Tensor(att_a_, mstype.float32) att_b = Tensor(att_b, mstype.float32) att_b_ = Tensor(att_b_, mstype.float32) if (it + 1) % (args.n_d + 1) != 0: d_out, d_real_loss, d_fake_loss, dc_loss, df_gp = D_trainOneStep(img_a, att_a, att_a_, att_b, att_b_) else: g_out, gf_loss, gc_loss, gr_loss = G_trainOneStep(img_a, att_a, att_a_, att_b, att_b_) progressbar.say(epoch=epoch, iter=it + 1, d_loss=d_out, g_loss=g_out, gf_loss=gf_loss, gc_loss=gc_loss, gr_loss=gr_loss, dc_loss=dc_loss, df_gp=df_gp) if (epoch + 1) % 5 == 0 and (it + 1) % args.save_interval == 0 and rank == 0: cb_params_gen.cur_epoch_num = epoch + 1 cb_params_dis.cur_epoch_num = epoch + 1 cb_params_gen.cur_step_num = it + 1 cb_params_dis.cur_step_num = it + 1 cb_params_gen.batch_num = it + 2 cb_params_dis.batch_num = it + 2 ckpt_cb_gen.step_end(gen_run_context) ckpt_cb_dis.step_end(dis_run_context) it += 1
py
1a3dcc36b7a091686704124a6211cd687f36a026
# coding=utf-8 from OTLMOW.OEFModel.EMObject import EMObject # Generated with OEFClassCreator. To modify: extend, do not edit class Brug(EMObject): """Beweegbare brug""" typeURI = 'https://lgc.data.wegenenverkeer.be/ns/installatie#Brug' label = 'Beweegbare brug' def __init__(self): super().__init__()
py
1a3dcc931e4f76a0162374fdae8590641999ff69
#!/usr/bin/env python # Copyright (c) 2014 Wladimir J. van der Laan # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' A script to check that the (Linux) executables produced by gitian only contain allowed gcc, glibc and libstdc++ version symbols. This makes sure they are still compatible with the minimum supported Linux distribution versions. Example usage: find ../gitian-builder/build -type f -executable | xargs python contrib/devtools/symbol-check.py ''' from __future__ import division, print_function, unicode_literals import subprocess import re import sys import os # Debian 6.0.9 (Squeeze) has: # # - g++ version 4.4.5 (https://packages.debian.org/search?suite=default&section=all&arch=any&searchon=names&keywords=g%2B%2B) # - libc version 2.11.3 (https://packages.debian.org/search?suite=default&section=all&arch=any&searchon=names&keywords=libc6) # - libstdc++ version 4.4.5 (https://packages.debian.org/search?suite=default&section=all&arch=any&searchon=names&keywords=libstdc%2B%2B6) # # Ubuntu 10.04.4 (Lucid Lynx) has: # # - g++ version 4.4.3 (http://packages.ubuntu.com/search?keywords=g%2B%2B&searchon=names&suite=lucid&section=all) # - libc version 2.11.1 (http://packages.ubuntu.com/search?keywords=libc6&searchon=names&suite=lucid&section=all) # - libstdc++ version 4.4.3 (http://packages.ubuntu.com/search?suite=lucid&section=all&arch=any&keywords=libstdc%2B%2B&searchon=names) # # Taking the minimum of these as our target. # # According to GNU ABI document (http://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html) this corresponds to: # GCC 4.4.0: GCC_4.4.0 # GCC 4.4.2: GLIBCXX_3.4.13, CXXABI_1.3.3 # (glibc) GLIBC_2_11 # MAX_VERSIONS = { 'GCC': (4,4,0), 'CXXABI': (1,3,3), 'GLIBCXX': (3,4,13), 'GLIBC': (2,11) } # See here for a description of _IO_stdin_used: # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=634261#109 # Ignore symbols that are exported as part of every executable IGNORE_EXPORTS = { b'_edata', b'_end', b'_init', b'__bss_start', b'_fini', b'_IO_stdin_used' } READELF_CMD = os.getenv('READELF', '/usr/bin/readelf') CPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt') # Allowed NEEDED libraries ALLOWED_LIBRARIES = { # digibyted and digibyte-qt b'libgcc_s.so.1', # GCC base support b'libc.so.6', # C library b'libpthread.so.0', # threading b'libanl.so.1', # DNS resolve b'libm.so.6', # math library b'librt.so.1', # real-time (clock) b'ld-linux-x86-64.so.2', # 64-bit dynamic linker b'ld-linux.so.2', # 32-bit dynamic linker # digibyte-qt only b'libX11-xcb.so.1', # part of X11 b'libX11.so.6', # part of X11 b'libxcb.so.1', # part of X11 b'libfontconfig.so.1', # font support b'libfreetype.so.6', # font parsing b'libdl.so.2' # programming interface to dynamic linker } class CPPFilt(object): ''' Demangle C++ symbol names. Use a pipe to the 'c++filt' command. ''' def __init__(self): self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE) def __call__(self, mangled): self.proc.stdin.write(mangled + b'\n') self.proc.stdin.flush() return self.proc.stdout.readline().rstrip() def close(self): self.proc.stdin.close() self.proc.stdout.close() self.proc.wait() def read_symbols(executable, imports=True): ''' Parse an ELF executable and return a list of (symbol,version) tuples for dynamic, imported symbols. ''' p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) (stdout, stderr) = p.communicate() if p.returncode: raise IOError('Could not read symbols for %s: %s' % (executable, stderr.strip())) syms = [] for line in stdout.split(b'\n'): line = line.split() if len(line)>7 and re.match(b'[0-9]+:$', line[0]): (sym, _, version) = line[7].partition(b'@') is_import = line[6] == b'UND' if version.startswith(b'@'): version = version[1:] if is_import == imports: syms.append((sym, version)) return syms def check_version(max_versions, version): if b'_' in version: (lib, _, ver) = version.rpartition(b'_') else: lib = version ver = '0' ver = tuple([int(x) for x in ver.split(b'.')]) if not lib in max_versions: return False return ver <= max_versions[lib] def read_libraries(filename): p = subprocess.Popen([READELF_CMD, '-d', '-W', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) (stdout, stderr) = p.communicate() if p.returncode: raise IOError('Error opening file') libraries = [] for line in stdout.split(b'\n'): tokens = line.split() if len(tokens)>2 and tokens[1] == b'(NEEDED)': match = re.match(b'^Shared library: \[(.*)\]$', b' '.join(tokens[2:])) if match: libraries.append(match.group(1)) else: raise ValueError('Unparseable (NEEDED) specification') return libraries if __name__ == '__main__': cppfilt = CPPFilt() retval = 0 for filename in sys.argv[1:]: # Check imported symbols for sym,version in read_symbols(filename, True): if version and not check_version(MAX_VERSIONS, version): print('%s: symbol %s from unsupported version %s' % (filename, cppfilt(sym).decode('utf-8'), version.decode('utf-8'))) retval = 1 # Check exported symbols for sym,version in read_symbols(filename, False): if sym in IGNORE_EXPORTS: continue print('%s: export of symbol %s not allowed' % (filename, cppfilt(sym).decode('utf-8'))) retval = 1 # Check dependency libraries for library_name in read_libraries(filename): if library_name not in ALLOWED_LIBRARIES: print('%s: NEEDED library %s is not allowed' % (filename, library_name.decode('utf-8'))) retval = 1 sys.exit(retval)
py
1a3dcc9edb3f29eb79580e52b032b1cfec045813
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from __future__ import unicode_literals from ..tracking import DiffusionTensorStreamlineTrack def test_DiffusionTensorStreamlineTrack_inputs(): input_map = dict(args=dict(argstr='%s', ), cutoff_value=dict(argstr='-cutoff %s', units='NA', ), desired_number_of_tracks=dict(argstr='-number %d', ), do_not_precompute=dict(argstr='-noprecomputed', ), environ=dict(nohash=True, usedefault=True, ), exclude_file=dict(argstr='-exclude %s', xor=['exclude_file', 'exclude_spec'], ), exclude_spec=dict(argstr='-exclude %s', position=2, sep=',', units='mm', xor=['exclude_file', 'exclude_spec'], ), gradient_encoding_file=dict(argstr='-grad %s', mandatory=True, position=-2, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=-2, ), include_file=dict(argstr='-include %s', xor=['include_file', 'include_spec'], ), include_spec=dict(argstr='-include %s', position=2, sep=',', units='mm', xor=['include_file', 'include_spec'], ), initial_cutoff_value=dict(argstr='-initcutoff %s', units='NA', ), initial_direction=dict(argstr='-initdirection %s', units='voxels', ), inputmodel=dict(argstr='%s', position=-3, usedefault=True, ), mask_file=dict(argstr='-mask %s', xor=['mask_file', 'mask_spec'], ), mask_spec=dict(argstr='-mask %s', position=2, sep=',', units='mm', xor=['mask_file', 'mask_spec'], ), maximum_number_of_tracks=dict(argstr='-maxnum %d', ), maximum_tract_length=dict(argstr='-length %s', units='mm', ), minimum_radius_of_curvature=dict(argstr='-curvature %s', units='mm', ), minimum_tract_length=dict(argstr='-minlength %s', units='mm', ), no_mask_interpolation=dict(argstr='-nomaskinterp', ), out_file=dict(argstr='%s', name_source=['in_file'], name_template='%s_tracked.tck', output_name='tracked', position=-1, ), seed_file=dict(argstr='-seed %s', xor=['seed_file', 'seed_spec'], ), seed_spec=dict(argstr='-seed %s', position=2, sep=',', units='mm', xor=['seed_file', 'seed_spec'], ), step_size=dict(argstr='-step %s', units='mm', ), stop=dict(argstr='-stop', ), terminal_output=dict(nohash=True, ), unidirectional=dict(argstr='-unidirectional', ), ) inputs = DiffusionTensorStreamlineTrack.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_DiffusionTensorStreamlineTrack_outputs(): output_map = dict(tracked=dict(), ) outputs = DiffusionTensorStreamlineTrack.output_spec() for key, metadata in list(output_map.items()): for metakey, value in list(metadata.items()): assert getattr(outputs.traits()[key], metakey) == value
py
1a3dcd0e46e4964f48998996cb77ed0a961d1144
# sqlalchemy/pool.py # Copyright (C) 2005-2021 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: https://www.opensource.org/licenses/mit-license.php """Base constructs for connection pools. """ from collections import deque import time import weakref from .. import event from .. import exc from .. import log from .. import util reset_rollback = util.symbol("reset_rollback") reset_commit = util.symbol("reset_commit") reset_none = util.symbol("reset_none") class _ConnDialect: """partial implementation of :class:`.Dialect` which provides DBAPI connection methods. When a :class:`_pool.Pool` is combined with an :class:`_engine.Engine`, the :class:`_engine.Engine` replaces this with its own :class:`.Dialect`. """ is_async = False def do_rollback(self, dbapi_connection): dbapi_connection.rollback() def do_commit(self, dbapi_connection): dbapi_connection.commit() def do_close(self, dbapi_connection): dbapi_connection.close() def do_ping(self, dbapi_connection): raise NotImplementedError( "The ping feature requires that a dialect is " "passed to the connection pool." ) def get_driver_connection(self, connection): return connection class _AsyncConnDialect(_ConnDialect): is_async = True class Pool(log.Identified): """Abstract base class for connection pools.""" _dialect = _ConnDialect() def __init__( self, creator, recycle=-1, echo=None, logging_name=None, reset_on_return=True, events=None, dialect=None, pre_ping=False, _dispatch=None, ): """ Construct a Pool. :param creator: a callable function that returns a DB-API connection object. The function will be called with parameters. :param recycle: If set to a value other than -1, number of seconds between connection recycling, which means upon checkout, if this timeout is surpassed the connection will be closed and replaced with a newly opened connection. Defaults to -1. :param logging_name: String identifier which will be used within the "name" field of logging records generated within the "sqlalchemy.pool" logger. Defaults to a hexstring of the object's id. :param echo: if True, the connection pool will log informational output such as when connections are invalidated as well as when connections are recycled to the default log handler, which defaults to ``sys.stdout`` for output.. If set to the string ``"debug"``, the logging will include pool checkouts and checkins. The :paramref:`_pool.Pool.echo` parameter can also be set from the :func:`_sa.create_engine` call by using the :paramref:`_sa.create_engine.echo_pool` parameter. .. seealso:: :ref:`dbengine_logging` - further detail on how to configure logging. :param reset_on_return: Determine steps to take on connections as they are returned to the pool, which were not otherwise handled by a :class:`_engine.Connection`. reset_on_return can have any of these values: * ``"rollback"`` - call rollback() on the connection, to release locks and transaction resources. This is the default value. The vast majority of use cases should leave this value set. * ``True`` - same as 'rollback', this is here for backwards compatibility. * ``"commit"`` - call commit() on the connection, to release locks and transaction resources. A commit here may be desirable for databases that cache query plans if a commit is emitted, such as Microsoft SQL Server. However, this value is more dangerous than 'rollback' because any data changes present on the transaction are committed unconditionally. * ``None`` - don't do anything on the connection. This setting may be appropriate if the database / DBAPI works in pure "autocommit" mode at all times, or if the application uses the :class:`_engine.Engine` with consistent connectivity patterns. See the section :ref:`pool_reset_on_return` for more details. * ``False`` - same as None, this is here for backwards compatibility. .. seealso:: :ref:`pool_reset_on_return` :param events: a list of 2-tuples, each of the form ``(callable, target)`` which will be passed to :func:`.event.listen` upon construction. Provided here so that event listeners can be assigned via :func:`_sa.create_engine` before dialect-level listeners are applied. :param dialect: a :class:`.Dialect` that will handle the job of calling rollback(), close(), or commit() on DBAPI connections. If omitted, a built-in "stub" dialect is used. Applications that make use of :func:`_sa.create_engine` should not use this parameter as it is handled by the engine creation strategy. .. versionadded:: 1.1 - ``dialect`` is now a public parameter to the :class:`_pool.Pool`. :param pre_ping: if True, the pool will emit a "ping" (typically "SELECT 1", but is dialect-specific) on the connection upon checkout, to test if the connection is alive or not. If not, the connection is transparently re-connected and upon success, all other pooled connections established prior to that timestamp are invalidated. Requires that a dialect is passed as well to interpret the disconnection error. .. versionadded:: 1.2 """ if logging_name: self.logging_name = self._orig_logging_name = logging_name else: self._orig_logging_name = None log.instance_logger(self, echoflag=echo) self._creator = creator self._recycle = recycle self._invalidate_time = 0 self._pre_ping = pre_ping self._reset_on_return = util.symbol.parse_user_argument( reset_on_return, { reset_rollback: ["rollback", True], reset_none: ["none", None, False], reset_commit: ["commit"], }, "reset_on_return", resolve_symbol_names=False, ) self.echo = echo if _dispatch: self.dispatch._update(_dispatch, only_propagate=False) if dialect: self._dialect = dialect if events: for fn, target in events: event.listen(self, target, fn) @util.hybridproperty def _is_asyncio(self): return self._dialect.is_async @property def _creator(self): return self.__dict__["_creator"] @_creator.setter def _creator(self, creator): self.__dict__["_creator"] = creator self._invoke_creator = self._should_wrap_creator(creator) def _should_wrap_creator(self, creator): """Detect if creator accepts a single argument, or is sent as a legacy style no-arg function. """ try: argspec = util.get_callable_argspec(self._creator, no_self=True) except TypeError: return lambda crec: creator() defaulted = argspec[3] is not None and len(argspec[3]) or 0 positionals = len(argspec[0]) - defaulted # look for the exact arg signature that DefaultStrategy # sends us if (argspec[0], argspec[3]) == (["connection_record"], (None,)): return creator # or just a single positional elif positionals == 1: return creator # all other cases, just wrap and assume legacy "creator" callable # thing else: return lambda crec: creator() def _close_connection(self, connection): self.logger.debug("Closing connection %r", connection) try: self._dialect.do_close(connection) except Exception: self.logger.error( "Exception closing connection %r", connection, exc_info=True ) def _create_connection(self): """Called by subclasses to create a new ConnectionRecord.""" return _ConnectionRecord(self) def _invalidate(self, connection, exception=None, _checkin=True): """Mark all connections established within the generation of the given connection as invalidated. If this pool's last invalidate time is before when the given connection was created, update the timestamp til now. Otherwise, no action is performed. Connections with a start time prior to this pool's invalidation time will be recycled upon next checkout. """ rec = getattr(connection, "_connection_record", None) if not rec or self._invalidate_time < rec.starttime: self._invalidate_time = time.time() if _checkin and getattr(connection, "is_valid", False): connection.invalidate(exception) def recreate(self): """Return a new :class:`_pool.Pool`, of the same class as this one and configured with identical creation arguments. This method is used in conjunction with :meth:`dispose` to close out an entire :class:`_pool.Pool` and create a new one in its place. """ raise NotImplementedError() def dispose(self): """Dispose of this pool. This method leaves the possibility of checked-out connections remaining open, as it only affects connections that are idle in the pool. .. seealso:: :meth:`Pool.recreate` """ raise NotImplementedError() def connect(self): """Return a DBAPI connection from the pool. The connection is instrumented such that when its ``close()`` method is called, the connection will be returned to the pool. """ return _ConnectionFairy._checkout(self) def _return_conn(self, record): """Given a _ConnectionRecord, return it to the :class:`_pool.Pool`. This method is called when an instrumented DBAPI connection has its ``close()`` method called. """ self._do_return_conn(record) def _do_get(self): """Implementation for :meth:`get`, supplied by subclasses.""" raise NotImplementedError() def _do_return_conn(self, conn): """Implementation for :meth:`return_conn`, supplied by subclasses.""" raise NotImplementedError() def status(self): raise NotImplementedError() class _ConnectionRecord: """Internal object which maintains an individual DBAPI connection referenced by a :class:`_pool.Pool`. The :class:`._ConnectionRecord` object always exists for any particular DBAPI connection whether or not that DBAPI connection has been "checked out". This is in contrast to the :class:`._ConnectionFairy` which is only a public facade to the DBAPI connection while it is checked out. A :class:`._ConnectionRecord` may exist for a span longer than that of a single DBAPI connection. For example, if the :meth:`._ConnectionRecord.invalidate` method is called, the DBAPI connection associated with this :class:`._ConnectionRecord` will be discarded, but the :class:`._ConnectionRecord` may be used again, in which case a new DBAPI connection is produced when the :class:`_pool.Pool` next uses this record. The :class:`._ConnectionRecord` is delivered along with connection pool events, including :meth:`_events.PoolEvents.connect` and :meth:`_events.PoolEvents.checkout`, however :class:`._ConnectionRecord` still remains an internal object whose API and internals may change. .. seealso:: :class:`._ConnectionFairy` """ def __init__(self, pool, connect=True): self.__pool = pool if connect: self.__connect() self.finalize_callback = deque() fresh = False fairy_ref = None starttime = None dbapi_connection = None """A reference to the actual DBAPI connection being tracked. May be ``None`` if this :class:`._ConnectionRecord` has been marked as invalidated; a new DBAPI connection may replace it if the owning pool calls upon this :class:`._ConnectionRecord` to reconnect. For adapted drivers, like the Asyncio implementations, this is a :class:`.AdaptedConnection` that adapts the driver connection to the DBAPI protocol. Use :attr:`._ConnectionRecord.driver_connection` to obtain the connection objected returned by the driver. .. versionadded:: 1.4.24 """ @property def driver_connection(self): """The connection object as returned by the driver after a connect. For normal sync drivers that support the DBAPI protocol, this object is the same as the one referenced by :attr:`._ConnectionRecord.dbapi_connection`. For adapted drivers, like the Asyncio ones, this is the actual object that was returned by the driver ``connect`` call. As :attr:`._ConnectionRecord.dbapi_connection` it may be ``None`` if this :class:`._ConnectionRecord` has been marked as invalidated. .. versionadded:: 1.4.24 """ if self.dbapi_connection is None: return None else: return self.__pool._dialect.get_driver_connection( self.dbapi_connection ) @property def connection(self): """An alias to :attr:`._ConnectionRecord.dbapi_connection`. This alias is deprecated, please use the new name. .. deprecated:: 1.4.24 """ return self.dbapi_connection @connection.setter def connection(self, value): self.dbapi_connection = value _soft_invalidate_time = 0 @util.memoized_property def info(self): """The ``.info`` dictionary associated with the DBAPI connection. This dictionary is shared among the :attr:`._ConnectionFairy.info` and :attr:`_engine.Connection.info` accessors. .. note:: The lifespan of this dictionary is linked to the DBAPI connection itself, meaning that it is **discarded** each time the DBAPI connection is closed and/or invalidated. The :attr:`._ConnectionRecord.record_info` dictionary remains persistent throughout the lifespan of the :class:`._ConnectionRecord` container. """ return {} @util.memoized_property def record_info(self): """An "info' dictionary associated with the connection record itself. Unlike the :attr:`._ConnectionRecord.info` dictionary, which is linked to the lifespan of the DBAPI connection, this dictionary is linked to the lifespan of the :class:`._ConnectionRecord` container itself and will remain persistent throughout the life of the :class:`._ConnectionRecord`. .. versionadded:: 1.1 """ return {} @classmethod def checkout(cls, pool): rec = pool._do_get() try: dbapi_connection = rec.get_connection() except Exception as err: with util.safe_reraise(): rec._checkin_failed(err, _fairy_was_created=False) echo = pool._should_log_debug() fairy = _ConnectionFairy(dbapi_connection, rec, echo) rec.fairy_ref = ref = weakref.ref( fairy, lambda ref: _finalize_fairy and _finalize_fairy(None, rec, pool, ref, echo, True), ) _strong_ref_connection_records[ref] = rec if echo: pool.logger.debug( "Connection %r checked out from pool", dbapi_connection ) return fairy def _checkin_failed(self, err, _fairy_was_created=True): self.invalidate(e=err) self.checkin( _fairy_was_created=_fairy_was_created, ) def checkin(self, _fairy_was_created=True): if self.fairy_ref is None and _fairy_was_created: # _fairy_was_created is False for the initial get connection phase; # meaning there was no _ConnectionFairy and we must unconditionally # do a checkin. # # otherwise, if fairy_was_created==True, if fairy_ref is None here # that means we were checked in already, so this looks like # a double checkin. util.warn("Double checkin attempted on %s" % self) return self.fairy_ref = None connection = self.dbapi_connection pool = self.__pool while self.finalize_callback: finalizer = self.finalize_callback.pop() finalizer(connection) if pool.dispatch.checkin: pool.dispatch.checkin(connection, self) pool._return_conn(self) @property def in_use(self): return self.fairy_ref is not None @property def last_connect_time(self): return self.starttime def close(self): if self.dbapi_connection is not None: self.__close() def invalidate(self, e=None, soft=False): """Invalidate the DBAPI connection held by this :class:`._ConnectionRecord`. This method is called for all connection invalidations, including when the :meth:`._ConnectionFairy.invalidate` or :meth:`_engine.Connection.invalidate` methods are called, as well as when any so-called "automatic invalidation" condition occurs. :param e: an exception object indicating a reason for the invalidation. :param soft: if True, the connection isn't closed; instead, this connection will be recycled on next checkout. .. versionadded:: 1.0.3 .. seealso:: :ref:`pool_connection_invalidation` """ # already invalidated if self.dbapi_connection is None: return if soft: self.__pool.dispatch.soft_invalidate( self.dbapi_connection, self, e ) else: self.__pool.dispatch.invalidate(self.dbapi_connection, self, e) if e is not None: self.__pool.logger.info( "%sInvalidate connection %r (reason: %s:%s)", "Soft " if soft else "", self.dbapi_connection, e.__class__.__name__, e, ) else: self.__pool.logger.info( "%sInvalidate connection %r", "Soft " if soft else "", self.dbapi_connection, ) if soft: self._soft_invalidate_time = time.time() else: self.__close() self.dbapi_connection = None def get_connection(self): recycle = False # NOTE: the various comparisons here are assuming that measurable time # passes between these state changes. however, time.time() is not # guaranteed to have sub-second precision. comparisons of # "invalidation time" to "starttime" should perhaps use >= so that the # state change can take place assuming no measurable time has passed, # however this does not guarantee correct behavior here as if time # continues to not pass, it will try to reconnect repeatedly until # these timestamps diverge, so in that sense using > is safer. Per # https://stackoverflow.com/a/1938096/34549, Windows time.time() may be # within 16 milliseconds accuracy, so unit tests for connection # invalidation need a sleep of at least this long between initial start # time and invalidation for the logic below to work reliably. if self.dbapi_connection is None: self.info.clear() self.__connect() elif ( self.__pool._recycle > -1 and time.time() - self.starttime > self.__pool._recycle ): self.__pool.logger.info( "Connection %r exceeded timeout; recycling", self.dbapi_connection, ) recycle = True elif self.__pool._invalidate_time > self.starttime: self.__pool.logger.info( "Connection %r invalidated due to pool invalidation; " + "recycling", self.dbapi_connection, ) recycle = True elif self._soft_invalidate_time > self.starttime: self.__pool.logger.info( "Connection %r invalidated due to local soft invalidation; " + "recycling", self.dbapi_connection, ) recycle = True if recycle: self.__close() self.info.clear() self.__connect() return self.dbapi_connection def _is_hard_or_soft_invalidated(self): return ( self.dbapi_connection is None or self.__pool._invalidate_time > self.starttime or (self._soft_invalidate_time > self.starttime) ) def __close(self): self.finalize_callback.clear() if self.__pool.dispatch.close: self.__pool.dispatch.close(self.dbapi_connection, self) self.__pool._close_connection(self.dbapi_connection) self.dbapi_connection = None def __connect(self): pool = self.__pool # ensure any existing connection is removed, so that if # creator fails, this attribute stays None self.dbapi_connection = None try: self.starttime = time.time() self.dbapi_connection = connection = pool._invoke_creator(self) pool.logger.debug("Created new connection %r", connection) self.fresh = True except Exception as e: with util.safe_reraise(): pool.logger.debug("Error on connect(): %s", e) else: # in SQLAlchemy 1.4 the first_connect event is not used by # the engine, so this will usually not be set if pool.dispatch.first_connect: pool.dispatch.first_connect.for_modify( pool.dispatch ).exec_once_unless_exception(self.dbapi_connection, self) # init of the dialect now takes place within the connect # event, so ensure a mutex is used on the first run pool.dispatch.connect.for_modify( pool.dispatch )._exec_w_sync_on_first_run(self.dbapi_connection, self) def _finalize_fairy( dbapi_connection, connection_record, pool, ref, # this is None when called directly, not by the gc echo, reset=True, fairy=None, ): """Cleanup for a :class:`._ConnectionFairy` whether or not it's already been garbage collected. When using an async dialect no IO can happen here (without using a dedicated thread), since this is called outside the greenlet context and with an already running loop. In this case function will only log a message and raise a warning. """ if ref: _strong_ref_connection_records.pop(ref, None) elif fairy: _strong_ref_connection_records.pop(weakref.ref(fairy), None) if ref is not None: if connection_record.fairy_ref is not ref: return assert dbapi_connection is None dbapi_connection = connection_record.dbapi_connection # null pool is not _is_asyncio but can be used also with async dialects dont_restore_gced = pool._dialect.is_async if dont_restore_gced: detach = not connection_record or ref can_manipulate_connection = not ref else: detach = not connection_record can_manipulate_connection = True if dbapi_connection is not None: if connection_record and echo: pool.logger.debug( "Connection %r being returned to pool%s", dbapi_connection, ", transaction state was already reset by caller" if not reset else "", ) try: fairy = fairy or _ConnectionFairy( dbapi_connection, connection_record, echo, ) assert fairy.dbapi_connection is dbapi_connection if reset and can_manipulate_connection: fairy._reset(pool) if detach: if connection_record: fairy._pool = pool fairy.detach() if can_manipulate_connection: if pool.dispatch.close_detached: pool.dispatch.close_detached(dbapi_connection) pool._close_connection(dbapi_connection) else: message = ( "The garbage collector is trying to clean up " "connection %r. This feature is unsupported on async " "dbapi, since no IO can be performed at this stage to " "reset the connection. Please close out all " "connections when they are no longer used, calling " "``close()`` or using a context manager to " "manage their lifetime." ) % dbapi_connection pool.logger.error(message) util.warn(message) except BaseException as e: pool.logger.error( "Exception during reset or similar", exc_info=True ) if connection_record: connection_record.invalidate(e=e) if not isinstance(e, Exception): raise if connection_record and connection_record.fairy_ref is not None: connection_record.checkin() # a dictionary of the _ConnectionFairy weakrefs to _ConnectionRecord, so that # GC under pypy will call ConnectionFairy finalizers. linked directly to the # weakref that will empty itself when collected so that it should not create # any unmanaged memory references. _strong_ref_connection_records = {} class _ConnectionFairy: """Proxies a DBAPI connection and provides return-on-dereference support. This is an internal object used by the :class:`_pool.Pool` implementation to provide context management to a DBAPI connection delivered by that :class:`_pool.Pool`. The name "fairy" is inspired by the fact that the :class:`._ConnectionFairy` object's lifespan is transitory, as it lasts only for the length of a specific DBAPI connection being checked out from the pool, and additionally that as a transparent proxy, it is mostly invisible. .. seealso:: :class:`._ConnectionRecord` """ def __init__(self, dbapi_connection, connection_record, echo): self.dbapi_connection = dbapi_connection self._connection_record = connection_record self._echo = echo dbapi_connection = None """A reference to the actual DBAPI connection being tracked. .. versionadded:: 1.4.24 .. seealso:: :attr:`._ConnectionFairy.driver_connection` :attr:`._ConnectionRecord.dbapi_connection` :ref:`faq_dbapi_connection` """ _connection_record = None """A reference to the :class:`._ConnectionRecord` object associated with the DBAPI connection. This is currently an internal accessor which is subject to change. """ @property def driver_connection(self): """The connection object as returned by the driver after a connect. .. versionadded:: 1.4.24 .. seealso:: :attr:`._ConnectionFairy.dbapi_connection` :attr:`._ConnectionRecord.driver_connection` :ref:`faq_dbapi_connection` """ return self._connection_record.driver_connection @property def connection(self): """An alias to :attr:`._ConnectionFairy.dbapi_connection`. This alias is deprecated, please use the new name. .. deprecated:: 1.4.24 """ return self.dbapi_connection @connection.setter def connection(self, value): self.dbapi_connection = value @classmethod def _checkout(cls, pool, threadconns=None, fairy=None): if not fairy: fairy = _ConnectionRecord.checkout(pool) fairy._pool = pool fairy._counter = 0 if threadconns is not None: threadconns.current = weakref.ref(fairy) if fairy.dbapi_connection is None: raise exc.InvalidRequestError("This connection is closed") fairy._counter += 1 if ( not pool.dispatch.checkout and not pool._pre_ping ) or fairy._counter != 1: return fairy # Pool listeners can trigger a reconnection on checkout, as well # as the pre-pinger. # there are three attempts made here, but note that if the database # is not accessible from a connection standpoint, those won't proceed # here. attempts = 2 while attempts > 0: connection_is_fresh = fairy._connection_record.fresh fairy._connection_record.fresh = False try: if pool._pre_ping: if not connection_is_fresh: if fairy._echo: pool.logger.debug( "Pool pre-ping on connection %s", fairy.dbapi_connection, ) result = pool._dialect.do_ping(fairy.dbapi_connection) if not result: if fairy._echo: pool.logger.debug( "Pool pre-ping on connection %s failed, " "will invalidate pool", fairy.dbapi_connection, ) raise exc.InvalidatePoolError() elif fairy._echo: pool.logger.debug( "Connection %s is fresh, skipping pre-ping", fairy.dbapi_connection, ) pool.dispatch.checkout( fairy.dbapi_connection, fairy._connection_record, fairy ) return fairy except exc.DisconnectionError as e: if e.invalidate_pool: pool.logger.info( "Disconnection detected on checkout, " "invalidating all pooled connections prior to " "current timestamp (reason: %r)", e, ) fairy._connection_record.invalidate(e) pool._invalidate(fairy, e, _checkin=False) else: pool.logger.info( "Disconnection detected on checkout, " "invalidating individual connection %s (reason: %r)", fairy.dbapi_connection, e, ) fairy._connection_record.invalidate(e) try: fairy.dbapi_connection = ( fairy._connection_record.get_connection() ) except Exception as err: with util.safe_reraise(): fairy._connection_record._checkin_failed( err, _fairy_was_created=True, ) # prevent _ConnectionFairy from being carried # in the stack trace. Do this after the # connection record has been checked in, so that # if the del triggers a finalize fairy, it won't # try to checkin a second time. del fairy attempts -= 1 pool.logger.info("Reconnection attempts exhausted on checkout") fairy.invalidate() raise exc.InvalidRequestError("This connection is closed") def _checkout_existing(self): return _ConnectionFairy._checkout(self._pool, fairy=self) def _checkin(self, reset=True): _finalize_fairy( self.dbapi_connection, self._connection_record, self._pool, None, self._echo, reset=reset, fairy=self, ) self.dbapi_connection = None self._connection_record = None _close = _checkin def _reset(self, pool): if pool.dispatch.reset: pool.dispatch.reset(self, self._connection_record) if pool._reset_on_return is reset_rollback: if self._echo: pool.logger.debug( "Connection %s rollback-on-return", self.dbapi_connection ) pool._dialect.do_rollback(self) elif pool._reset_on_return is reset_commit: if self._echo: pool.logger.debug( "Connection %s commit-on-return", self.dbapi_connection, ) pool._dialect.do_commit(self) @property def _logger(self): return self._pool.logger @property def is_valid(self): """Return True if this :class:`._ConnectionFairy` still refers to an active DBAPI connection.""" return self.dbapi_connection is not None @util.memoized_property def info(self): """Info dictionary associated with the underlying DBAPI connection referred to by this :class:`.ConnectionFairy`, allowing user-defined data to be associated with the connection. The data here will follow along with the DBAPI connection including after it is returned to the connection pool and used again in subsequent instances of :class:`._ConnectionFairy`. It is shared with the :attr:`._ConnectionRecord.info` and :attr:`_engine.Connection.info` accessors. The dictionary associated with a particular DBAPI connection is discarded when the connection itself is discarded. """ return self._connection_record.info @property def record_info(self): """Info dictionary associated with the :class:`._ConnectionRecord container referred to by this :class:`.ConnectionFairy`. Unlike the :attr:`._ConnectionFairy.info` dictionary, the lifespan of this dictionary is persistent across connections that are disconnected and/or invalidated within the lifespan of a :class:`._ConnectionRecord`. .. versionadded:: 1.1 """ if self._connection_record: return self._connection_record.record_info else: return None def invalidate(self, e=None, soft=False): """Mark this connection as invalidated. This method can be called directly, and is also called as a result of the :meth:`_engine.Connection.invalidate` method. When invoked, the DBAPI connection is immediately closed and discarded from further use by the pool. The invalidation mechanism proceeds via the :meth:`._ConnectionRecord.invalidate` internal method. :param e: an exception object indicating a reason for the invalidation. :param soft: if True, the connection isn't closed; instead, this connection will be recycled on next checkout. .. versionadded:: 1.0.3 .. seealso:: :ref:`pool_connection_invalidation` """ if self.dbapi_connection is None: util.warn("Can't invalidate an already-closed connection.") return if self._connection_record: self._connection_record.invalidate(e=e, soft=soft) if not soft: self.dbapi_connection = None self._checkin() def cursor(self, *args, **kwargs): """Return a new DBAPI cursor for the underlying connection. This method is a proxy for the ``connection.cursor()`` DBAPI method. """ return self.dbapi_connection.cursor(*args, **kwargs) def __getattr__(self, key): return getattr(self.dbapi_connection, key) def detach(self): """Separate this connection from its Pool. This means that the connection will no longer be returned to the pool when closed, and will instead be literally closed. The containing ConnectionRecord is separated from the DB-API connection, and will create a new connection when next used. Note that any overall connection limiting constraints imposed by a Pool implementation may be violated after a detach, as the detached connection is removed from the pool's knowledge and control. """ if self._connection_record is not None: rec = self._connection_record rec.fairy_ref = None rec.dbapi_connection = None # TODO: should this be _return_conn? self._pool._do_return_conn(self._connection_record) self.info = self.info.copy() self._connection_record = None if self._pool.dispatch.detach: self._pool.dispatch.detach(self.dbapi_connection, rec) def close(self): self._counter -= 1 if self._counter == 0: self._checkin() def _close_no_reset(self): self._counter -= 1 if self._counter == 0: self._checkin(reset=False)
py
1a3dcd1e2aab0bae671f0c639a8b9fdf710b082a
# Copyright (c) 2012-2016 Seafile Ltd. # encoding: utf-8 from django.core.management.base import BaseCommand from seaserv import seafile_api from seahub.wiki.models import GroupWiki, Wiki, DuplicateWikiNameError class Command(BaseCommand): help = 'Migrate records in wiki_group_wiki table to wiki_wiki table.' label = "wiki_migrate_group_wiki" def handle(self, *args, **options): print('Start to migrate...') for r in GroupWiki.objects.all(): repo = seafile_api.get_repo(r.repo_id) if not repo: print(('Repo %s not found. Skip.' % r.repo_id)) continue owner = seafile_api.get_repo_owner(r.repo_id) if not owner: print(('Owner of repo %s not found. Skip.' % r.repo_id)) continue wiki_name = 'Group%s-%s' % (r.group_id, repo.name) try: Wiki.objects.add(wiki_name=wiki_name, username=owner, repo_id=r.repo_id) print(('Successfully migrated GroupWiki(%s-%s) to Wiki(%s-%s-%s)' % (r.group_id, r.repo_id, owner, wiki_name, r.repo_id))) except DuplicateWikiNameError: print('Multiple group wiki records found, group: %s, repo_id: %s. Skip.' % (r.group_id, r.repo_id)) continue except Exception as e: print(e) continue print('Done.')
py
1a3dcda23d56690f725f6213a6a72d1ece6d16a7
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Checks that gyp does not fail on executable targets which have several files with the same basename. """ import TestGyp # While MSVS supports building executables that contain several files with the # same name, the msvs gyp generator does not. test = TestGyp.TestGyp(formats=['!msvs']) test.run_gyp('double-executable.gyp', chdir='src') test.relocate('src', 'relocate/src') test.build('double-executable.gyp', test.ALL, chdir='relocate/src') expect = """\ Hello from prog3.c Hello prog3 from func.c Hello prog3 from subdir1/func.c Hello prog3 from subdir2/func.c """ test.run_built_executable('prog3', chdir='relocate/src', stdout=expect) test.pass_test()
py
1a3dcf7281703f179d38d40bd7d138b5afd82c90
import compas_rrc as rrc if __name__ == '__main__': # Create Ros Client ros = rrc.RosClient() ros.run() # Create ABB Client abb = rrc.AbbClient(ros, '/rob1') print('Connected.') # No operation done = abb.send_and_wait(rrc.Noop()) # Print feedback print('Feedback = ', done) # End of Code print('Finished') # Close client ros.close() ros.terminate()
py
1a3dcf8fc6681274afaa8b6d817d3f001b4c0051
from datetime import datetime, timedelta from odoo import fields from odoo.tests.common import TransactionCase class TestCase(TransactionCase): at_install = True post_install = True def setUp(self): super(TestCase, self).setUp() self.event = self.env["event.event"].create( { "name": "TestEvent", "attendee_signup": True, "create_partner": True, "date_begin": fields.Datetime.to_string( datetime.today() + timedelta(days=1) ), "date_end": fields.Datetime.to_string( datetime.today() + timedelta(days=15) ), } ) self.event.write( { "attendee_field_ids": [ ( 6, 0, [ self.env.ref( "website_event_attendee_fields.attendee_field_name" ).id, self.env.ref( "website_event_attendee_fields.attendee_field_email" ).id, self.env.ref( "website_event_attendee_fields.attendee_field_phone" ).id, self.env.ref( "website_event_attendee_fields.attendee_field_country_id" ).id, ], ) ] } )
py
1a3dd090644c2a572df22add8c8d03280d3e36ac
# coding: utf-8 # importes import visao # funcoes def main(): programa = visao.Visao() programa.principal() if __name__ == '__main__': main()
py
1a3dd0ca6ddc9fc9d2b5a4db6d5d64d6d05f215b
# Copyright (c) 2017, Arista Networks, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # Neither the name of Arista Networks nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR # BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE # OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN # IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # from itertools import groupby __metaclass__ = type def make_range_string(data): numlist = [] for item in data: for part in str(item).split(','): if '-' in part: first, last = part.split('-') first, last = int(first), int(last) numlist.extend(range(first, last+1)) else: first = int(part) numlist.append(first) numlist = sorted(set(numlist)) range_list = [] for _, grp in groupby(enumerate(numlist), lambda (i, x): i-x): subset = [ x[1] for x in list(grp) ] first = subset[0] last = subset[-1] if first == last: range_list.append(str(first)) else: substr = "{}-{}".format(first, last) range_list.append(substr) return ','.join(range_list) class FilterModule(object): def filters(self): return { 'make_range_string': make_range_string, }
py
1a3dd0e6fe08b2ef76a9ce4e9e5b5d69a0e9f0f6
import traceback import json from pathlib import Path import time try: print(str(Path().resolve())) commands_dict = { "commands": { "!rng": "You have boosted RNG NAME", "!test": "Test response" } } with open(str(Path().resolve()) + r'\core\commands.json', 'w+') as file: json.dump(commands_dict, file, indent=4) except: print(traceback.print_exc()) time.sleep(10000)
py
1a3dd2e562ff36ff4023f2139dc5bf2378d49bb0
from typing import List from pymongo.database import Database from mongo.schema import Movie def get_movies(db: Database, page: int, size: int) -> List[Movie]: movies = list() for mv in db["movies"].find().skip(((page - 1) * size)).limit(size): movies.append(Movie(**mv)) return movies
py
1a3dd48e7739e7e2c3c57885a90c7db992b6c6ca
import unittest from pathlib import PurePosixPath from foreman import Label class LabelTest(unittest.TestCase): def assertLabel(self, path, name, label): self.assertEqual(path, str(label.path)) self.assertEqual(name, str(label.name)) def test_label(self): self.assertLabel('x/y/z', 'a/b/c', Label.parse('//x/y/z:a/b/c')) with self.assertRaises(ValueError): Label.parse('//x/y/z') self.assertLabel( 'x/y/z', 'a/b/c', Label.parse(':a/b/c', implicit_path=PurePosixPath('x/y/z'))) with self.assertRaises(ValueError): Label.parse('a/b/c') # Test Label.__eq__ and __hash__. self.assertEqual(Label.parse('//x:y'), Label.parse('//x:y')) self.assertNotEqual(Label.parse('//x:y'), Label.parse('//x:z')) self.assertNotEqual(Label.parse('//w:y'), Label.parse('//x:y')) self.assertEqual( hash(Label.parse('//x:y')), hash(Label.parse('//x:y'))) self.assertNotEqual( hash(Label.parse('//x:y')), hash(Label.parse('//x:z'))) self.assertNotEqual( hash(Label.parse('//w:y')), hash(Label.parse('//x:y'))) if __name__ == '__main__': unittest.main()
py
1a3dd51023cdeca21a52e77446a038a4e4481e58
"""wwfh URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.1/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.conf import settings from django.conf.urls import include from django.conf.urls.static import static from django.contrib import admin from django.urls import path urlpatterns = [ path("admin/", admin.site.urls), path( "", include("jobs.urls", namespace="jobs"), ), ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
py
1a3dd73cd87a3a65eeaafe2911b11407e8cb44f4
#!/usr/bin/env python # -*- coding:utf-8 -*- from bs4 import BeautifulSoup from BaseExtractor import BaseExtractor import re class YoukuExtractor(BaseExtractor): def __init__(self, zhubo): super(YoukuExtractor, self).__init__(zhubo) self.host_url = 'http://i.youku.com' self.api_client_id = '5af6a7d8274a36e8' # 根据优酷的视频url地址,解析该视频的vid # From : def _extract_vid_from_url(self, url): res = None if not url or url == '': return res matchObj = re.search(r'id_(.*)\.html', url) if matchObj: res = matchObj.group(1) return res def _extract_videos_by_4_col(self, url, items_list, is_all=False): v_list = [] if is_all: # 由于4列视频列表是动态加载,需要通过phantomjs重新抓取全部页面内容 html = self.get_html_by_phantomjs(url) if html.strip() != '': soup = BeautifulSoup(html, 'html.parser', from_encoding='utf-8') items_list = soup.find_all(class_='yk-col4') for item in items_list: info = {} try: # 提取视频封面 info['avatar'] = item.find(class_='v-thumb').img['src'] v_link = item.find(class_='v-link').a # 提取视频地址 info['link'] = v_link['href'] # 提取视频title info['title'] = v_link['title'] # 提取视频时长 info['time'] = item.find(class_='v-time').string # 提取视频发布时间 info['publish_time'] = item['c_time'] # 提取视频观看量 info['num'] = item.find(class_='v-num').string self._logger.info('{0}:{1}'.format(info['publish_time'], self._convert_publish_time(info['publish_time']))) except Exception, e: # 去除class为yk-col4但不包含视频信息的节点 self._logger.error(e) continue v_list.append(info) print "4 col list size :" + str(len(v_list)) return v_list def _extract_videos_by_5_col(self, url, items_list): v_list = [] for item in items_list: info = {} try: # 提取视频封面 info['avatar'] = item.find(class_='v-thumb').img['src'] v_link = item.find(class_='v-link').a # 提取视频地址 info['link'] = v_link['href'] # 提取视频title info['title'] = v_link['title'] # 提取视频时长 info['time'] = item.find(class_='v-time').string # 提取视频发布时间 info['publish_time'] = item.find(class_='v-publishtime').string # 提取视频观看量 info['num'] = item.find(class_='v-num').string self._logger.info('{0}:{1}'.format(info['publish_time'], self._convert_publish_time(info['publish_time']))) except Exception, e: # 去除class为yk-col4但不包含视频信息的节点 self._logger.error(e) continue v_list.append(info) return v_list # 优酷列表单页面处理 def get_single_list(self, channel_url, is_all=False): self._logger.info('url:{0}'.format(channel_url)) v_list = None # 下一页地址 next_page_url = None html = self.get_html(channel_url) if html.strip() != '': soup = BeautifulSoup(html, 'html.parser', from_encoding='utf-8') # 获取下一页地址 next_page = soup.find(class_='next') if not next_page is None and not next_page.a is None: next_page_url = self.host_url + next_page.a['href'] # 判断页面所用模板 if not soup.find(class_='yk-col4 new') is None: items_list = soup.find_all(class_='yk-col4') v_list = self._extract_videos_by_4_col(channel_url, items_list, is_all) else: items_list = soup.find_all(class_='v va') v_list = self._extract_videos_by_5_col(channel_url, items_list) # 对v_list数据进行组装 for video in v_list: try: video['vid'] = self._extract_vid_from_url(video['link']) video['time'] = self._convert_video_time(video['time']) video['publish_time'] = self._convert_publish_time(video['publish_time']) video['num'] = self._convert_video_num(video['num']) # TODO 提取视频标题中关键词 video['keyword'] = '' except Exception, e: self._logger.error(e) continue return v_list, next_page_url def _pro_video_by_show_api(self, vid): title, avatar, v_time, link, description, uid, tags, publishtime, mark = [None] * 9 OPEN_API = 'https://openapi.youku.com/v2/videos/show.json?client_id={0}&video_id={1}' url = OPEN_API.format(self.api_client_id, vid) response = self.get_json(url) if response and response.get('id', 0) == vid: title = response['title'] avatar = response['bigThumbnail'] v_time = response['duration'] link = response['link'] description = response['description'] uid = response['user']['id'] tags = response['tags'] publishtime = response['published'] # 将视频发布时间转化为Unix时间戳 publishtime = self._str_to_timestamp(publishtime) mark = str(response['view_count']) + '#' + str(response['comment_count']) + "#" + str(response['up_count']) return title, avatar, v_time, link, description, uid, tags, publishtime, mark, vid # 获取视频详情页面信息 def get_single_page(self, vid): pass # 通过Youku API调取列表信息 def get_single_list_by_api(self, zhubo): videos_api = 'https://openapi.youku.com/v2/videos/by_user.json?client_id={0}&user_id={1}&page=1&count=50'.format( self.api_client_id, zhubo['uid']) response = self.get_json(videos_api) v_list = [] # print "response[id] :" + response['id'] if response and response.get('count', 0) == 50: videos = response['videos'] print 'videos length : {0}'.format(len(videos)) for video_item in videos: info = {} new_updatetime = self._str_to_timestamp(video_item['published']) if new_updatetime < zhubo['v_updatetime']: continue info['link'] = video_item['link'] v_list.append(info) return v_list, videos_api if __name__ == '__main__': pass
py
1a3dd79c05315bf2150c483ffe1ce892cc828e63
#!/usr/bin/python # Copyright (c) 2013 The Native Client Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import sys from buildbot_lib import ( BuildContext, BuildStatus, Command, ParseStandardCommandLine, RemoveSconsBuildDirectories, RunBuild, SetupLinuxEnvironment, SetupWindowsEnvironment, SCons, Step ) sys.path.append(os.path.join(os.path.dirname(__file__), '..')) import pynacl.platform def RunSconsTests(status, context): # Clean out build directories, unless we have built elsewhere. if not context['skip_build']: with Step('clobber scons', status): RemoveSconsBuildDirectories() # Run checkdeps script to vet #includes. with Step('checkdeps', status): Command(context, cmd=[sys.executable, 'tools/checkdeps/checkdeps.py']) arch = context['default_scons_platform'] flags_subzero = ['use_sz=1'] flags_build = ['do_not_run_tests=1'] flags_run = [] # This file is run 3 different ways for ARM builds. The qemu-only trybot does # a normal build-and-run with the emulator just like the x86 bots. The panda # build side runs on an x86 machines with skip_run, and then packs up the # result and triggers an ARM hardware tester that run with skip_build if arch != 'arm': # Unlike their arm counterparts we do not run trusted tests on x86 bots. # Trusted tests get plenty of coverage by other bots, e.g. nacl-gcc bots. # We make the assumption here that there are no "exotic tests" which # are trusted in nature but are somehow depedent on the untrusted TC. flags_build.append('skip_trusted_tests=1') flags_run.append('skip_trusted_tests=1') if context['skip_run']: flags_run.append('do_not_run_tests=1') if arch == 'arm': # For ARM hardware bots, force_emulator= disables use of QEMU, which # enables building tests which don't work under QEMU. flags_build.append('force_emulator=') flags_run.append('force_emulator=') if context['skip_build']: flags_run.extend(['naclsdk_validate=0', 'built_elsewhere=1']) if not context['skip_build']: # For ARM builders which will trigger hardware testers, run the hello world # test with the emulator as a basic sanity check before doing anything else. if arch == 'arm' and context['skip_run']: with Step('hello_world ' + arch, status): SCons(context, parallel=True, args=['run_hello_world_test']) with Step('build_all ' + arch, status): SCons(context, parallel=True, args=flags_build) if arch in ('arm', 'x86-32', 'x86-64'): with Step('build_all subzero ' + arch, status): SCons(context, parallel=True, args=flags_build + flags_subzero) smoke_tests = ['small_tests', 'medium_tests'] # Normal pexe-mode tests with Step('smoke_tests ' + arch, status, halt_on_fail=False): SCons(context, parallel=True, args=flags_run + smoke_tests) # Large tests cannot be run in parallel with Step('large_tests ' + arch, status, halt_on_fail=False): SCons(context, parallel=False, args=flags_run + ['large_tests']) # Run small_tests, medium_tests, and large_tests with Subzero. # TODO(stichnot): Move this to the sandboxed translator section # along with the translate_fast flag once pnacl-sz.nexe is ready. if arch in ('arm', 'x86-32', 'x86-64'): # Normal pexe-mode tests with Step('smoke_tests subzero ' + arch, status, halt_on_fail=False): SCons(context, parallel=True, args=flags_run + flags_subzero + smoke_tests) # Large tests cannot be run in parallel with Step('large_tests subzero ' + arch, status, halt_on_fail=False): SCons(context, parallel=False, args=flags_run + flags_subzero + ['large_tests']) with Step('nonpexe_tests ' + arch, status, halt_on_fail=False): SCons(context, parallel=True, args=flags_run + ['pnacl_generate_pexe=0', 'nonpexe_tests']) irt_mode = context['default_scons_mode'] + ['nacl_irt_test'] # Build all the tests with the IRT if not context['skip_build']: with Step('build_all_irt ' + arch, status): SCons(context, parallel=True, mode=irt_mode, args=flags_build) smoke_tests_irt = ['small_tests_irt', 'medium_tests_irt'] # Run tests with the IRT. with Step('smoke_tests_irt ' + arch, status, halt_on_fail=False): SCons(context, parallel=True, mode=irt_mode, args=flags_run + smoke_tests_irt) with Step('large_tests_irt ' + arch, status, halt_on_fail=False): SCons(context, parallel=False, mode=irt_mode, args=flags_run + ['large_tests_irt']) # Run some nacl_clang tests. Eventually we will have bots that just run # buildbot_standard with nacl_clang and this can be split out. context['pnacl'] = False context['nacl_clang'] = True if not context['skip_build']: with Step('build_nacl_clang ' + arch, status, halt_on_fail=False): SCons(context, parallel=True, args=flags_build) with Step('smoke_tests_nacl_clang ' + arch, status, halt_on_fail=False): SCons(context, parallel=True, args=flags_run + ['small_tests', 'medium_tests']) with Step('large_tests_nacl_clang ' + arch, status, halt_on_fail=False): SCons(context, parallel=False, args=flags_run + ['large_tests']) context['pnacl'] = True context['nacl_clang'] = False # Test sandboxed translation # TODO(dschuff): The standalone sandboxed translator driver does not have # the batch script wrappers, so it can't run on Windows. Either add them to # the translator package or make SCons use the pnacl_newlib drivers except # on the ARM bots where we don't have the pnacl_newlib drivers. # TODO(sbc): Enable these tests for mips once we build the version of the # translator nexe if not context.Windows() and arch != 'mips32': flags_run_sbtc = ['use_sandboxed_translator=1'] sbtc_tests = ['toolchain_tests_irt'] if arch == 'arm': # When splitting the build from the run, translate_in_build_step forces # the translation to run on the run side (it usually runs on the build # side because that runs with more parallelism) if context['skip_build'] or context['skip_run']: flags_run_sbtc.append('translate_in_build_step=0') else: # The ARM sandboxed translator is flaky under qemu, so run a very small # set of tests on the qemu-only trybot. sbtc_tests = ['run_hello_world_test_irt'] else: sbtc_tests.append('large_code') with Step('sandboxed_translator_tests ' + arch, status, halt_on_fail=False): SCons(context, parallel=True, mode=irt_mode, args=flags_run + flags_run_sbtc + sbtc_tests) with Step('sandboxed_translator_fast_tests ' + arch, status, halt_on_fail=False): SCons(context, parallel=True, mode=irt_mode, args=flags_run + flags_run_sbtc + ['translate_fast=1'] + sbtc_tests) # Test Non-SFI Mode. # The only architectures that the PNaCl toolchain supports Non-SFI # versions of are currently x86-32 and ARM. # The x86-64 toolchain bot currently also runs these tests from # buildbot_pnacl.sh if context.Linux() and (arch == 'x86-32' or arch == 'arm'): with Step('nonsfi_tests ' + arch, status, halt_on_fail=False): SCons(context, parallel=True, mode=irt_mode, args=flags_run + ['nonsfi_nacl=1', 'nonsfi_tests', 'nonsfi_tests_irt']) # Build with pnacl_generate_pexe=0 to allow using pnacl-clang with # direct-to-native mode. This allows assembly to be used in tests. with Step('nonsfi_tests_nopnacl_generate_pexe ' + arch, status, halt_on_fail=False): extra_args = ['nonsfi_nacl=1', 'pnacl_generate_pexe=0', 'nonsfi_tests', 'nonsfi_tests_irt'] # nonsfi_tests_irt with pnacl_generate_pexe=0 does not pass on x86-32. # https://code.google.com/p/nativeclient/issues/detail?id=4093 if arch == 'x86-32': extra_args.remove('nonsfi_tests_irt') SCons(context, parallel=True, mode=irt_mode, args=flags_run + extra_args) # Test nonsfi_loader linked against host's libc. with Step('nonsfi_tests_host_libc ' + arch, status, halt_on_fail=False): # Using skip_nonstable_bitcode=1 here disables the tests for # zero-cost C++ exception handling, which don't pass for Non-SFI # mode yet because we don't build libgcc_eh for Non-SFI mode. SCons(context, parallel=True, mode=irt_mode, args=flags_run + ['nonsfi_nacl=1', 'use_newlib_nonsfi_loader=0', 'nonsfi_tests', 'nonsfi_tests_irt', 'toolchain_tests_irt', 'skip_nonstable_bitcode=1']) # Test unsandboxed mode. if (context.Linux() or context.Mac()) and arch == 'x86-32': if context.Linux(): tests = ['run_' + test + '_test_irt' for test in ['hello_world', 'irt_futex', 'thread', 'float', 'malloc_realloc_calloc_free', 'dup', 'cond_timedwait', 'getpid']] else: # TODO(mseaborn): Use the same test list as on Linux when the threading # tests pass for Mac. tests = ['run_hello_world_test_irt'] with Step('unsandboxed_tests ' + arch, status, halt_on_fail=False): SCons(context, parallel=True, mode=irt_mode, args=flags_run + ['pnacl_unsandboxed=1'] + tests) def Main(): context = BuildContext() status = BuildStatus(context) ParseStandardCommandLine(context) if context.Linux(): SetupLinuxEnvironment(context) elif context.Windows(): SetupWindowsEnvironment(context) elif context.Mac(): # No setup to do for Mac. pass else: raise Exception('Unsupported platform') # Panda bots only have 2 cores. if pynacl.platform.GetArch() == 'arm': context['max_jobs'] = 2 RunBuild(RunSconsTests, status) if __name__ == '__main__': Main()
py
1a3dd7f6146f1bcad67e75279cd45e5d1f5b123c
from django.contrib import admin from django.urls import path,include urlpatterns = [ path('admin/', admin.site.urls), path('blog/',include('blog.urls')), ]
py
1a3dd9287a507ebed0dc24f641dea285ce008465
class SelectionSequentialTransform(object): def __init__(self, tokenizer, max_len): self.tokenizer = tokenizer self.max_len = max_len def __call__(self, texts): input_ids_list, segment_ids_list, input_masks_list, contexts_masks_list = [], [], [], [] for text in texts: tokenized_dict = self.tokenizer.encode_plus(text, max_length=self.max_len, pad_to_max_length=True) input_ids, input_masks = tokenized_dict['input_ids'], tokenized_dict['attention_mask'] assert len(input_ids) == self.max_len assert len(input_masks) == self.max_len input_ids_list.append(input_ids) input_masks_list.append(input_masks) return input_ids_list, input_masks_list def __str__(self) -> str: return 'maxlen{}'.format(self.max_len) class SelectionJoinTransform(object): def __init__(self, tokenizer, max_len): self.tokenizer = tokenizer self.max_len = max_len self.cls_id = self.tokenizer.convert_tokens_to_ids('[CLS]') self.sep_id = self.tokenizer.convert_tokens_to_ids('[SEP]') self.tokenizer.add_tokens(['\n'], special_tokens=True) self.pad_id = 0 def __call__(self, texts): # another option is to use [SEP], but here we follow the discussion at: # https://github.com/facebookresearch/ParlAI/issues/2306#issuecomment-599180186 context = '\n'.join(texts) tokenized_dict = self.tokenizer.encode_plus(context) input_ids, input_masks = tokenized_dict['input_ids'], tokenized_dict['attention_mask'] input_ids = input_ids[-self.max_len:] input_ids[0] = self.cls_id input_masks = input_masks[-self.max_len:] input_ids += [self.pad_id] * (self.max_len - len(input_ids)) input_masks += [0] * (self.max_len - len(input_masks)) assert len(input_ids) == self.max_len assert len(input_masks) == self.max_len return input_ids, input_masks def __str__(self) -> str: return '[join_str]maxlen{}'.format(self.max_len)
py
1a3dd954499a217df6136e66cff2a5ca18bd889c
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) Philipp Wagner. All rights reserved. # Licensed under the BSD license. See LICENSE file in the project root for full license information. import sys, os sys.path.append("../..") # Import Matplotlib: import matplotlib matplotlib.use('Agg') import matplotlib.cm as cm # For Python2 backward comability: from builtins import range # import facerec modules from facerec.feature import Fisherfaces, SpatialHistogram, Identity from facerec.distance import EuclideanDistance, ChiSquareDistance from facerec.classifier import NearestNeighbor, SVM from facerec.model import PredictableModel from facerec.validation import KFoldCrossValidation from facerec.visual import subplot from facerec.util import minmax_normalize from facerec.serialization import save_model, load_model from facerec.svm import grid_search import numpy as np # try to import the PIL Image module try: from PIL import Image except ImportError: import Image import logging import matplotlib.pyplot as plt from facerec.lbp import LPQ, ExtendedLBP def read_images(path, sz=None): """Reads the images in a given folder, resizes images on the fly if size is given. Args: path: Path to a folder with subfolders representing the subjects (persons). sz: A tuple with the size Resizes Returns: A list [X,y] X: The images, which is a Python list of numpy arrays. y: The corresponding labels (the unique number of the subject, person) in a Python list. """ c = 0 X,y = [], [] for dirname, dirnames, filenames in os.walk(path): for subdirname in dirnames: subject_path = os.path.join(dirname, subdirname) for filename in os.listdir(subject_path): try: im = Image.open(os.path.join(subject_path, filename)) im = im.convert("L") # resize to given size (if given) if (sz is not None): im = im.resize(sz, Image.ANTIALIAS) X.append(np.asarray(im, dtype=np.uint8)) y.append(c) except IOError as e: print("I/O error: {0}".format(e)) raise e except: print("Unexpected error: {0}".format(sys.exc_info()[0])) raise c = c+1 return [X,y] if __name__ == "__main__": # This is where we write the images, if an output_dir is given # in command line: out_dir = None # You'll need at least a path to your image data, please see # the tutorial coming with this source code on how to prepare # your image data: if len(sys.argv) < 2: print("USAGE: facerec_demo.py </path/to/images>") sys.exit() # Now read in the image data. This must be a valid path! [X,y] = read_images(sys.argv[1]) # Then set up a handler for logging: handler = logging.StreamHandler(sys.stdout) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') handler.setFormatter(formatter) # Add handler to facerec modules, so we see what's going on inside: logger = logging.getLogger("facerec") logger.addHandler(handler) logger.setLevel(logging.DEBUG) # Define the Fisherfaces as Feature Extraction method: feature = Fisherfaces() # Define a 1-NN classifier with Euclidean Distance: classifier = SVM() # Define the model as the combination model = PredictableModel(feature=feature, classifier=classifier) # Compute a model: model.compute(X, y) # Save the Model using joblib: save_model('model.pkl', model) # Perform a Grid Search for the Set of Parameters: tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4], 'C': [1, 10, 100, 1000]}, {'kernel': ['linear'], 'C': [1, 10, 100, 1000]}] # Find a good set of parameters: grid_search(model, X, y, tuned_parameters) # Perform a 10-fold cross validation cv = KFoldCrossValidation(model, k=10) cv.validate(X, y) # And print the result: cv.print_results()
py
1a3dd9b4db460b455d18eca2bfc1d927868d137a
# -*- coding: utf-8 -*- # @Time : 09/07/2021 02:56 # @Author : Rodolfo Londero # @Email : [email protected] # @File : test_text.py # @Software : VSCode import pytest class TestText13Bus: @pytest.fixture(scope='function') def dss(self, solve_snap_13bus): dss = solve_snap_13bus dss.solution_solve() return dss # =================================================================== # String methods # =================================================================== def test_text(self, dss): expected = "2000" actual = dss.text('? Line.650632.Length') assert actual == expected
py
1a3ddab66ce499aea9015c019d46950061614938
#!/usr/bin/env python # -*- coding:utf-8 -*- import cv2 import numpy as np import time import math from air_drone_vertical.edge_detection_canny_multi_rects import * if __name__ == '__main__': cnt = 0 while cnt < 600: file_name = 'C:\\Users\\18056\\PycharmProjects\\untitled\\air_drone_vertical\\pic1\\' + str(cnt) + '.jpg' print(file_name) img = cv2.imread(file_name) if (img is None): cnt += 1 continue #cross_detect time compute img1 = np.copy(img) mark = process_pictue2(img) print("结果编码: " + str(mark[0])) if (mark[0] == 12): print("未找到中心点") cnt += 1 continue else: print("成功找到可信中心点") image = mark[1] percent = mark[4] print("percent: " + str(mark[4])) print("centerX: " + str(mark[2])) print("centerY: " + str(mark[3])) #compute time cv2.imshow('origin',img1) cv2.imshow('process',image) if cv2.waitKey(0) == 27: break cv2.destroyWindow('origin') cv2.destroyWindow('process') cnt += 1 cv2.destroyAllWindows()
py
1a3ddba0e3b7d14410832aa10a7f83ea79a15b7e
''' Models ------ This is currently required for: $> python manage.py test rpc4django to run. see: http://code.djangoproject.com/ticket/7198 '''
py
1a3ddbba59b9193316dd74a640979d7c3fc43083
from typing import Optional, Tuple from flask import url_for from app.questionnaire.location import Location from app.questionnaire.path_finder import PathFinder from app.questionnaire.rules import evaluate_when_rules class Router: def __init__(self, schema, answer_store, list_store, progress_store, metadata): self._schema = schema self._answer_store = answer_store self._list_store = list_store self._progress_store = progress_store self._metadata = metadata self._path_finder = PathFinder( self._schema, self._answer_store, self._list_store, self._progress_store, self._metadata, ) @property def enabled_section_ids(self): return [ section["id"] for section in self._schema.get_sections() if self._is_section_enabled(section=section) ] @property def is_questionnaire_complete(self) -> bool: first_incomplete_section_key = self._get_first_incomplete_section_key() return not first_incomplete_section_key def get_first_incomplete_location_in_questionnaire_url(self) -> str: first_incomplete_section_key = self._get_first_incomplete_section_key() if first_incomplete_section_key: section_id, list_item_id = first_incomplete_section_key section_routing_path = self._path_finder.routing_path( section_id=section_id, list_item_id=list_item_id ) return self.get_section_resume_url(section_routing_path) return self.get_next_location_url_for_end_of_section() def get_last_location_in_questionnaire_url(self) -> str: routing_path = self.routing_path(*self._get_last_complete_section_key()) return self.get_last_location_in_section(routing_path).url() def is_list_item_in_list_store(self, list_item_id, list_name): return list_item_id in self._list_store[list_name] def can_access_location(self, location: Location, routing_path): """ Checks whether the location is valid and accessible. :return: boolean """ if location.section_id not in self.enabled_section_ids: return False if location.list_item_id and not self.is_list_item_in_list_store( location.list_item_id, location.list_name ): return False return location.block_id in self._get_allowable_path(routing_path) def can_access_hub(self): return self._schema.is_flow_hub and all( self._progress_store.is_section_complete(section_id) for section_id in self._schema.get_section_ids_required_for_hub() if section_id in self.enabled_section_ids ) def can_display_section_summary(self, section_id, list_item_id=None): return self._schema.get_summary_for_section( section_id ) and self._progress_store.is_section_complete(section_id, list_item_id) def routing_path(self, section_id, list_item_id=None): return self._path_finder.routing_path(section_id, list_item_id) def get_next_location_url(self, location, routing_path, return_to=None): """ Get the next location in the section. If the section is complete, determine where to go next, whether it be a summary, the hub or the next incomplete location. """ is_last_block_in_section = routing_path[-1] == location.block_id if self._progress_store.is_section_complete( location.section_id, location.list_item_id ): if return_to == "section-summary": return self._get_section_url(location) if return_to == "final-summary" and self.is_questionnaire_complete: return url_for("questionnaire.submit_questionnaire") if is_last_block_in_section: return self._get_next_location_url_for_last_block_in_section(location) # Due to backwards routing, you can be on the last block without the section being complete if is_last_block_in_section: return self._get_first_incomplete_location_in_section(routing_path).url() return self.get_next_block_url(location, routing_path) def _get_next_location_url_for_last_block_in_section(self, location): if self._schema.show_summary_on_completion_for_section(location.section_id): return self._get_section_url(location) return self.get_next_location_url_for_end_of_section() def get_previous_location_url(self, location, routing_path): """ Returns the previous 'location' to visit given a set of user answers """ block_id_index = routing_path.index(location.block_id) if block_id_index != 0: previous_block_id = routing_path[block_id_index - 1] previous_block = self._schema.get_block(previous_block_id) if previous_block["type"] == "RelationshipCollector": return url_for( "questionnaire.relationships", last=True, ) return url_for( "questionnaire.block", block_id=previous_block_id, list_name=routing_path.list_name, list_item_id=routing_path.list_item_id, ) if self.can_access_hub(): return url_for("questionnaire.get_questionnaire") return None def get_next_location_url_for_end_of_section(self) -> str: if self._schema.is_flow_hub and self.can_access_hub(): return url_for("questionnaire.get_questionnaire") if self._schema.is_flow_linear and self.is_questionnaire_complete: return url_for("questionnaire.submit_questionnaire") return self.get_first_incomplete_location_in_questionnaire_url() def get_section_resume_url(self, routing_path): section_key = (routing_path.section_id, routing_path.list_item_id) if section_key in self._progress_store: location = self._get_first_incomplete_location_in_section(routing_path) if location: return location.url(resume=True) return self.get_first_location_in_section(routing_path).url() def is_path_complete(self, routing_path): return not bool(self._get_first_incomplete_location_in_section(routing_path)) @staticmethod def get_first_location_in_section(routing_path) -> Location: return Location( block_id=routing_path[0], section_id=routing_path.section_id, list_name=routing_path.list_name, list_item_id=routing_path.list_item_id, ) @staticmethod def get_last_location_in_section(routing_path) -> Location: return Location( block_id=routing_path[-1], section_id=routing_path.section_id, list_name=routing_path.list_name, list_item_id=routing_path.list_item_id, ) def full_routing_path(self): full_routing_path = [] for section_id in self.enabled_section_ids: repeating_list = self._schema.get_repeating_list_for_section(section_id) if repeating_list: for list_item_id in self._list_store[repeating_list]: full_routing_path.append( self._path_finder.routing_path( section_id=section_id, list_item_id=list_item_id ) ) else: full_routing_path.append( self._path_finder.routing_path(section_id=section_id) ) return full_routing_path def _is_block_complete(self, block_id, section_id, list_item_id): return block_id in self._progress_store.get_completed_block_ids( section_id, list_item_id ) def _get_first_incomplete_location_in_section(self, routing_path): for block_id in routing_path: if not self._is_block_complete( block_id, routing_path.section_id, routing_path.list_item_id ): return Location( block_id=block_id, section_id=routing_path.section_id, list_item_id=routing_path.list_item_id, list_name=routing_path.list_name, ) def _get_allowable_path(self, routing_path): """ The allowable path is the completed path plus the next location """ allowable_path = [] if routing_path: for block_id in routing_path: allowable_path.append(block_id) if not self._is_block_complete( block_id, routing_path.section_id, routing_path.list_item_id ): return allowable_path return allowable_path def get_enabled_section_keys(self): for section_id in self.enabled_section_ids: repeating_list = self._schema.get_repeating_list_for_section(section_id) if repeating_list: for list_item_id in self._list_store[repeating_list]: section_key = (section_id, list_item_id) yield section_key else: section_key = (section_id, None) yield section_key def _get_first_incomplete_section_key(self): for section_id, list_item_id in self.get_enabled_section_keys(): if not self._progress_store.is_section_complete(section_id, list_item_id): return section_id, list_item_id def _get_last_complete_section_key(self) -> Tuple[str, Optional[str]]: for section_id, list_item_id in list(self.get_enabled_section_keys())[::-1]: if self._progress_store.is_section_complete(section_id, list_item_id): return section_id, list_item_id def _is_section_enabled(self, section): if "enabled" not in section: return True for condition in section["enabled"]: if evaluate_when_rules( condition["when"], self._schema, self._metadata, self._answer_store, self._list_store, ): return True return False @staticmethod def get_next_block_url(location, routing_path): next_block_id = routing_path[routing_path.index(location.block_id) + 1] return url_for( "questionnaire.block", block_id=next_block_id, list_name=routing_path.list_name, list_item_id=routing_path.list_item_id, ) @staticmethod def _get_section_url(location): return url_for( "questionnaire.get_section", section_id=location.section_id, list_item_id=location.list_item_id, )
py
1a3ddc83ff3d4dae51a45e5aedac96383f9a2d58
import argparse import getpass import logging import os import sys import cartography.config import cartography.sync import cartography.util from cartography.intel.aws.util.common import parse_and_validate_aws_requested_syncs logger = logging.getLogger(__name__) class CLI: """ :type sync: cartography.sync.Sync :param sync: A sync task for the command line program to execute. :type prog: string :param prog: The name of the command line program. This will be displayed in usage and help output. """ def __init__(self, sync, prog=None): self.prog = prog self.sync = sync self.parser = self._build_parser() def _build_parser(self): """ :rtype: argparse.ArgumentParser :return: A cartography argument parser. Calling parse_args on the argument parser will return an object which implements the cartography.config.Config interface. """ parser = argparse.ArgumentParser( prog=self.prog, description=( "cartography consolidates infrastructure assets and the relationships between them in an intuitive " "graph view. This application can be used to pull configuration data from multiple sources, load it " "in to Neo4j, and run arbitrary enrichment and analysis on that data. Please make sure you have Neo4j " "running and have configured AWS credentials with the SecurityAudit IAM policy before getting started. " "Running cartography with no parameters will execute a simple sync against a Neo4j instance running " "locally. It will use your default AWS credentials and will not execute and post-sync analysis jobs. " "Please see the per-parameter documentation below for information on how to connect to different Neo4j " "instances, use auth when communicating with Neo4j, sync data from multiple AWS accounts, and execute " "arbitrary analysis jobs after the conclusion of the sync." ), epilog='For more documentation please visit: https://github.com/lyft/cartography', ) parser.add_argument( '-v', '--verbose', action='store_true', help='Enable verbose logging for cartography.', ) parser.add_argument( '-q', '--quiet', action='store_true', help='Restrict cartography logging to warnings and errors only.', ) parser.add_argument( '--neo4j-uri', type=str, default='bolt://localhost:7687', help=( 'A valid Neo4j URI to sync against. See ' 'https://neo4j.com/docs/api/python-driver/current/driver.html#uri for complete documentation on the ' 'structure of a Neo4j URI.' ), ) parser.add_argument( '--neo4j-user', type=str, default=None, help='A username with which to authenticate to Neo4j.', ) parser.add_argument( '--neo4j-password-env-var', type=str, default=None, help='The name of an environment variable containing a password with which to authenticate to Neo4j.', ) parser.add_argument( '--neo4j-password-prompt', action='store_true', help=( 'Present an interactive prompt for a password with which to authenticate to Neo4j. This parameter ' 'supersedes other methods of supplying a Neo4j password.' ), ) parser.add_argument( '--neo4j-max-connection-lifetime', type=int, default=3600, help=( 'Time in seconds for the Neo4j driver to consider a TCP connection alive. cartography default = 3600, ' 'which is the same as the Neo4j driver default. See ' 'https://neo4j.com/docs/driver-manual/1.7/client-applications/#driver-config-connection-pool-management' '.' ), ) # TODO add the below parameters to a 'sync' subparser parser.add_argument( '--update-tag', type=int, default=None, help=( 'A unique tag to apply to all Neo4j nodes and relationships created or updated during the sync run. ' 'This tag is used by cleanup jobs to identify nodes and relationships that are stale and need to be ' 'removed from the graph. By default, cartography will use a UNIX timestamp as the update tag.' ), ) parser.add_argument( '--aws-sync-all-profiles', action='store_true', help=( 'Enable AWS sync for all discovered named profiles. When this parameter is supplied cartography will ' 'discover all configured AWS named profiles (see ' 'https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html) and run the AWS sync ' 'job for each profile not named "default". If this parameter is not supplied, cartography will use the ' 'default AWS credentials available in your environment to run the AWS sync once. When using this ' 'parameter it is suggested that you create an AWS config file containing a named profile for each AWS ' 'account you want to sync and use the AWS_CONFIG_FILE environment variable to point to that config ' 'file (see https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html). cartography ' 'respects the AWS CLI/SDK environment variables and does not override them.' ), ) parser.add_argument( '--azure-sync-all-subscriptions', action='store_true', help=( 'Enable Azure sync for all discovered subscriptions. When this parameter is supplied cartography will ' 'discover all configured Azure subscriptions.' ), ) parser.add_argument( '--azure-sp-auth', action='store_true', help=( 'Use Service Principal authentication for Azure sync.' ), ) parser.add_argument( '--azure-tenant-id', type=str, default=None, help=( 'Azure Tenant Id for Service Principal Authentication.' ), ) parser.add_argument( '--azure-client-id', type=str, default=None, help=( 'Azure Client Id for Service Principal Authentication.' ), ) parser.add_argument( '--azure-client-secret-env-var', type=str, default=None, help=( 'The name of environment variable containing Azure Client Secret for Service Principal Authentication.' ), ) parser.add_argument( '--aws-requested-syncs', type=str, default=None, help=( 'Comma-separated list of AWS resources to sync. Example 1: "ecr,s3,ec2:instance" for ECR, S3, and all ' 'EC2 instance resources. See the full list available in source code at cartography.intel.aws.resources.' ' If not specified, cartography by default will run all AWS sync modules available.' ), ) parser.add_argument( '--crxcavator-api-base-uri', type=str, default='https://api.crxcavator.io/v1', help=( 'Base URI for the CRXcavator API. Defaults to public API endpoint.' ), ) parser.add_argument( '--crxcavator-api-key-env-var', type=str, default=None, help=( 'The name of an environment variable containing a key with which to auth to the CRXcavator API. ' 'Required if you are using the CRXcavator intel module. Ignored otherwise.' ), ) parser.add_argument( '--analysis-job-directory', type=str, default=None, help=( 'A path to a directory containing analysis jobs to run at the conclusion of the sync. cartography will ' 'discover all JSON files in the given directory (and its subdirectories) and pass them to the GraphJob ' 'API to execute against the graph. This allows you to apply data transformation and augmentation at ' 'the end of a sync run without writing code. cartography does not guarantee the order in which the ' 'jobs are executed.' ), ) parser.add_argument( '--okta-org-id', type=str, default=None, help=( 'Okta organizational id to sync. Required if you are using the Okta intel module. Ignored otherwise.' ), ) parser.add_argument( '--okta-api-key-env-var', type=str, default=None, help=( 'The name of an environment variable containing a key with which to auth to the Okta API.' 'Required if you are using the Okta intel module. Ignored otherwise.' ), ) parser.add_argument( '--okta-saml-role-regex', type=str, default=r"^aws\#\S+\#(?{{role}}[\w\-]+)\#(?{{accountid}}\d+)$", help=( 'The regex used to map Okta groups to AWS roles when using okta as a SAML provider.' 'The regex is the one entered in Step 5: Enabling Group Based Role Mapping in Okta' 'https://saml-doc.okta.com/SAML_Docs/How-to-Configure-SAML-2.0-for-Amazon-Web-Service#c-step5' 'The regex must contain the {{role}} and {{accountid}} tags' ), ) parser.add_argument( '--github-config-env-var', type=str, default=None, help=( 'The name of an environment variable containing a Base64 encoded GitHub config object.' 'Required if you are using the GitHub intel module. Ignored otherwise.' ), ) parser.add_argument( '--digitalocean-token-env-var', type=str, default=None, help=( 'The name of an environment variable containing a DigitalOcean access token.' 'Required if you are using the DigitalOcean intel module. Ignored otherwise.' ), ) parser.add_argument( '--permission-relationships-file', type=str, default="cartography/data/permission_relationships.yaml", help=( 'The path to the permission relationships mapping file.' 'If omitted the default permission relationships will be created' ), ) parser.add_argument( '--jamf-base-uri', type=str, default=None, help=( 'Your Jamf base URI, e.g. https://hostname.com/JSSResource.' 'Required if you are using the Jamf intel module. Ignored otherwise.' ), ) parser.add_argument( '--jamf-user', type=str, default=None, help='A username with which to authenticate to Jamf.', ) parser.add_argument( '--jamf-password-env-var', type=str, default=None, help='The name of an environment variable containing a password with which to authenticate to Jamf.', ) parser.add_argument( '--statsd-enabled', action='store_true', help=( 'If set, enables sending metrics using statsd to a server of your choice.' ), ) parser.add_argument( '--statsd-prefix', type=str, default='', help=( 'The string to prefix statsd metrics with. Only used if --statsd-enabled is on. Default = empty string.' ), ) parser.add_argument( '--statsd-host', type=str, default='127.0.0.1', help=( 'The IP address of your statsd server. Only used if --statsd-enabled is on. Default = 127.0.0.1.' ), ) parser.add_argument( '--statsd-port', type=int, default=8125, help=( 'The port of your statsd server. Only used if --statsd-enabled is on. Default = UDP 8125.' ), ) return parser def main(self, argv): """ Entrypoint for the command line interface. :type argv: string :param argv: The parameters supplied to the command line program. """ # TODO support parameter lookup in environment variables if not present on command line config: cartography.config.Config = self.parser.parse_args(argv) # Logging config if config.verbose: logging.getLogger('cartography').setLevel(logging.DEBUG) elif config.quiet: logging.getLogger('cartography').setLevel(logging.WARNING) else: logging.getLogger('cartography').setLevel(logging.INFO) logger.debug("Launching cartography with CLI configuration: %r", vars(config)) # Neo4j config if config.neo4j_user: config.neo4j_password = None if config.neo4j_password_prompt: logger.info("Reading password for Neo4j user '%s' interactively.", config.neo4j_user) config.neo4j_password = getpass.getpass() elif config.neo4j_password_env_var: logger.debug( "Reading password for Neo4j user '%s' from environment variable '%s'.", config.neo4j_user, config.neo4j_password_env_var, ) config.neo4j_password = os.environ.get(config.neo4j_password_env_var) if not config.neo4j_password: logger.warning("Neo4j username was provided but a password could not be found.") else: config.neo4j_password = None # AWS config if config.aws_requested_syncs: # No need to store the returned value; we're using this for input validation. parse_and_validate_aws_requested_syncs(config.aws_requested_syncs) # Azure config if config.azure_sp_auth and config.azure_client_secret_env_var: logger.debug( "Reading Client Secret for Azure Service Principal Authentication from environment variable %s", config.azure_client_secret_env_var, ) config.azure_client_secret = os.environ.get(config.azure_client_secret_env_var) else: config.azure_client_secret = None # Okta config if config.okta_org_id and config.okta_api_key_env_var: logger.debug(f"Reading API key for Okta from environment variable {config.okta_api_key_env_var}") config.okta_api_key = os.environ.get(config.okta_api_key_env_var) else: config.okta_api_key = None # CRXcavator config if config.crxcavator_api_base_uri and config.crxcavator_api_key_env_var: logger.debug(f"Reading API key for CRXcavator from env variable {config.crxcavator_api_key_env_var}.") config.crxcavator_api_key = os.environ.get(config.crxcavator_api_key_env_var) else: config.crxcavator_api_key = None # GitHub config if config.github_config_env_var: logger.debug(f"Reading config string for GitHub from environment variable {config.github_config_env_var}") config.github_config = os.environ.get(config.github_config_env_var) else: config.github_config = None # DigitalOcean config if config.digitalocean_token_env_var: logger.debug(f"Reading token for DigitalOcean from env variable {config.digitalocean_token_env_var}") config.digitalocean_token = os.environ.get(config.digitalocean_token_env_var) else: config.digitalocean_token = None # Jamf config if config.jamf_base_uri: if config.jamf_user: config.jamf_password = None if config.jamf_password_env_var: logger.debug( "Reading password for Jamf user '%s' from environment variable '%s'.", config.jamf_user, config.jamf_password_env_var, ) config.jamf_password = os.environ.get(config.jamf_password_env_var) if not config.jamf_user: logger.warning("A Jamf base URI was provided but a user was not.") if not config.jamf_password: logger.warning("A Jamf password could not be found.") else: config.jamf_user = None config.jamf_password = None if config.statsd_enabled: logger.debug( f'statsd enabled. Sending metrics to server {config.statsd_host}:{config.statsd_port}. ' f'Metrics have prefix "{config.statsd_prefix}".', ) # Run cartography try: return cartography.sync.run_with_config(self.sync, config) except KeyboardInterrupt: return 130 def main(argv=None): """ Entrypoint for the default cartography command line interface. This entrypoint build and executed the default cartography sync. See cartography.sync.build_default_sync. :rtype: int :return: The return code. """ logging.basicConfig(level=logging.INFO) logging.getLogger('botocore').setLevel(logging.WARNING) logging.getLogger('googleapiclient').setLevel(logging.WARNING) logging.getLogger('neo4j.bolt').setLevel(logging.WARNING) argv = argv if argv is not None else sys.argv[1:] default_sync = cartography.sync.build_default_sync() return CLI(default_sync, prog='cartography').main(argv)
py
1a3dddd093c5b202f03b2cacb78158aae99a5223
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License. # This product includes software developed at Datadog (https://www.datadoghq.com/). # Copyright 2019-Present Datadog, Inc. from datadog_api_client.v2.model_utils import ( ModelNormal, cached_property, ) def lazy_import(): from datadog_api_client.v2.model.dashboard_list_item_request import DashboardListItemRequest globals()["DashboardListItemRequest"] = DashboardListItemRequest class DashboardListDeleteItemsRequest(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ validations = {} @cached_property def openapi_types(): lazy_import() return { "dashboards": ([DashboardListItemRequest],), } attribute_map = { "dashboards": "dashboards", } read_only_vars = {} def __init__(self, *args, **kwargs): """DashboardListDeleteItemsRequest - a model defined in OpenAPI Keyword Args: dashboards ([DashboardListItemRequest]): [optional] List of dashboards to delete from the dashboard list. """ super().__init__(kwargs) self._check_pos_args(args) @classmethod def _from_openapi_data(cls, *args, **kwargs): """Helper creating a new instance from a response.""" self = super(DashboardListDeleteItemsRequest, cls)._from_openapi_data(kwargs) self._check_pos_args(args) return self
py
1a3dde9bd74b355a0953e9c83739f05396fae6a0
from peewee import * from .base_model import BaseModel class User(BaseModel): telegram_id = BigIntegerField(unique=True) first_name = TextField(null=True) last_name = TextField(null=True) username = TextField(null=True)
py
1a3ddf21666b0e33c4302a83b8c2d2ff354cac2b
from __future__ import division import keras import six from keras.models import Model from keras.layers import ( Input, Activation, Dense, Flatten ) from keras.layers.convolutional import ( Conv2D, MaxPooling2D, AveragePooling2D ) from keras.layers.merge import add from keras.layers.normalization import BatchNormalization from keras.regularizers import l2 from keras import backend as K def _bn_relu(input): """Helper to build a BN -> relu block """ norm = BatchNormalization(axis=CHANNEL_AXIS)(input) return Activation("relu")(norm) def _conv_bn_relu(**conv_params): """Helper to build a conv -> BN -> relu block """ filters = conv_params["filters"] kernel_size = conv_params["kernel_size"] strides = conv_params.setdefault("strides", (1, 1)) kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal") padding = conv_params.setdefault("padding", "same") kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4)) def f(input): conv = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer)(input) return _bn_relu(conv) return f def _bn_relu_conv(**conv_params): """Helper to build a BN -> relu -> conv block. This is an improved scheme proposed in http://arxiv.org/pdf/1603.05027v2.pdf """ filters = conv_params["filters"] kernel_size = conv_params["kernel_size"] strides = conv_params.setdefault("strides", (1, 1)) kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal") padding = conv_params.setdefault("padding", "same") kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4)) def f(input): activation = _bn_relu(input) return Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer)(activation) return f def _shortcut(input, residual): """Adds a shortcut between input and residual block and merges them with "sum" """ # Expand channels of shortcut to match residual. # Stride appropriately to match residual (width, height) # Should be int if network architecture is correctly configured. input_shape = K.int_shape(input) residual_shape = K.int_shape(residual) stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS])) stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS])) equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS] shortcut = input # 1 X 1 conv if shape is different. Else identity. if stride_width > 1 or stride_height > 1 or not equal_channels: shortcut = Conv2D(filters=residual_shape[CHANNEL_AXIS], kernel_size=(1, 1), strides=(stride_width, stride_height), padding="valid", kernel_initializer="he_normal", kernel_regularizer=l2(0.0001))(input) return add([shortcut, residual]) def _residual_block(block_function, filters, repetitions, is_first_layer=False): """Builds a residual block with repeating bottleneck blocks. """ def f(input): for i in range(repetitions): init_strides = (1, 1) if i == 0 and not is_first_layer: init_strides = (2, 2) input = block_function(filters=filters, init_strides=init_strides, is_first_block_of_first_layer=(is_first_layer and i == 0))(input) return input return f def basic_block(filters, init_strides=(1, 1), is_first_block_of_first_layer=False): """Basic 3 X 3 convolution blocks for use on resnets with layers <= 34. Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf """ def f(input): if is_first_block_of_first_layer: # don't repeat bn->relu since we just did bn->relu->maxpool conv1 = Conv2D(filters=filters, kernel_size=(3, 3), strides=init_strides, padding="same", kernel_initializer="he_normal", kernel_regularizer=l2(1e-4))(input) else: conv1 = _bn_relu_conv(filters=filters, kernel_size=(3, 3), strides=init_strides)(input) residual = _bn_relu_conv(filters=filters, kernel_size=(3, 3))(conv1) return _shortcut(input, residual) return f def bottleneck(filters, init_strides=(1, 1), is_first_block_of_first_layer=False): """Bottleneck architecture for > 34 layer resnet. Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf Returns: A final conv layer of filters * 4 """ def f(input): if is_first_block_of_first_layer: # don't repeat bn->relu since we just did bn->relu->maxpool conv_1_1 = Conv2D(filters=filters, kernel_size=(1, 1), strides=init_strides, padding="same", kernel_initializer="he_normal", kernel_regularizer=l2(5e-4))(input) else: conv_1_1 = _bn_relu_conv(filters=filters, kernel_size=(1, 1), strides=init_strides)(input) conv_3_3 = _bn_relu_conv(filters=filters, kernel_size=(3, 3))(conv_1_1) residual = _bn_relu_conv(filters=filters * 4, kernel_size=(1, 1))(conv_3_3) return _shortcut(input, residual) return f def _handle_dim_ordering(): global ROW_AXIS global COL_AXIS global CHANNEL_AXIS if K.image_dim_ordering() == 'tf': ROW_AXIS = 1 COL_AXIS = 2 CHANNEL_AXIS = 3 else: CHANNEL_AXIS = 1 ROW_AXIS = 2 COL_AXIS = 3 def _get_block(identifier): if isinstance(identifier, six.string_types): res = globals().get(identifier) if not res: raise ValueError('Invalid {}'.format(identifier)) return res return identifier class ResnetBuilder(object): @staticmethod def build(input_shape, num_outputs, block_fn, repetitions): """Builds a custom ResNet like architecture. Args: input_shape: The input shape in the form (nb_channels, nb_rows, nb_cols) num_outputs: The number of outputs at final softmax layer block_fn: The block function to use. This is either `basic_block` or `bottleneck`. The original paper used basic_block for layers < 50 repetitions: Number of repetitions of various block units. At each block unit, the number of filters are doubled and the input size is halved Returns: The keras `Model`. """ _handle_dim_ordering() if len(input_shape) != 3: raise Exception("Input shape should be a tuple (nb_channels, nb_rows, nb_cols)") # Permute dimension order if necessary if K.image_dim_ordering() == 'tf': input_shape = (input_shape[1], input_shape[2], input_shape[0]) # Load function from str if needed. block_fn = _get_block(block_fn) input = Input(shape=input_shape) conv1 = _conv_bn_relu(filters=64, kernel_size=(7, 7), strides=(2, 2))(input) pool1 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same")(conv1) block = pool1 filters = 64 for i, r in enumerate(repetitions): block = _residual_block(block_fn, filters=filters, repetitions=r, is_first_layer=(i == 0))(block) filters *= 2 # Last activation block = _bn_relu(block) # Classifier block block_shape = K.int_shape(block) pool2 = AveragePooling2D(pool_size=(block_shape[ROW_AXIS], block_shape[COL_AXIS]), strides=(1, 1))(block) flatten1 = Flatten()(pool2) dense = Dense(units=num_outputs, kernel_initializer=keras.initializers.RandomNormal(stddev=0.001), bias_initializer=keras.initializers.Constant(0.), activation="softmax")(flatten1) model = Model(inputs=input, outputs=dense) return model @staticmethod def build_resnet_18(input_shape, num_outputs): return ResnetBuilder.build(input_shape, num_outputs, basic_block, [2, 2, 2, 2]) @staticmethod def build_resnet_34(input_shape, num_outputs): return ResnetBuilder.build(input_shape, num_outputs, basic_block, [3, 4, 6, 3]) @staticmethod def build_resnet_50(input_shape, num_outputs): return ResnetBuilder.build(input_shape, num_outputs, bottleneck, [3, 4, 6, 3]) @staticmethod def build_resnet_101(input_shape, num_outputs): return ResnetBuilder.build(input_shape, num_outputs, bottleneck, [3, 4, 23, 3]) @staticmethod def build_resnet_152(input_shape, num_outputs): return ResnetBuilder.build(input_shape, num_outputs, bottleneck, [3, 8, 36, 3])
py
1a3ddf266ff93e74eb99f5922e802f702467c0f1
# coding=utf-8 # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 OpenAI GPT-2 model. """ from __future__ import absolute_import, division, print_function, unicode_literals import collections import json import logging import math import os import sys from io import open import numpy as np import tensorflow as tf from .modeling_tf_utils import (TFPreTrainedModel, TFConv1D, TFSharedEmbeddings, TFSequenceSummary, shape_list, get_initializer) from .configuration_gpt2 import GPT2Config from .file_utils import add_start_docstrings logger = logging.getLogger(__name__) TF_GPT2_PRETRAINED_MODEL_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-tf_model.h5", "gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-tf_model.h5", "gpt2-large": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-tf_model.h5", "distilgpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/distilgpt2-tf_model.h5",} def gelu(x): """Gaussian Error Linear Unit. This is a smoother version of the RELU. Original paper: https://arxiv.org/abs/1606.08415 Args: x: float Tensor to perform activation. Returns: `x` with the GELU activation applied. """ cdf = 0.5 * (1.0 + tf.tanh( (np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3))))) return x * cdf class TFAttention(tf.keras.layers.Layer): def __init__(self, nx, n_ctx, config, scale=False, **kwargs): super(TFAttention, self).__init__(**kwargs) self.output_attentions = config.output_attentions n_state = nx # in Attention: n_state=768 (nx=n_embd) # [switch nx => n_state from Block to Attention to keep identical to TF implem] assert n_state % config.n_head == 0 self.n_ctx = n_ctx self.n_head = config.n_head self.split_size = n_state self.scale = scale self.c_attn = TFConv1D(n_state * 3, nx, initializer_range=config.initializer_range, name='c_attn') self.c_proj = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name='c_proj') self.attn_dropout = tf.keras.layers.Dropout(config.attn_pdrop) self.resid_dropout = tf.keras.layers.Dropout(config.resid_pdrop) self.pruned_heads = set() def prune_heads(self, heads): pass @staticmethod def causal_attention_mask(nd, ns, dtype): """1's in the lower triangle, counting from the lower right corner. Same as tf.matrix_band_part(tf.ones([nd, ns]), -1, ns-nd), but doesn't produce garbage on TPUs. """ i = tf.range(nd)[:,None] j = tf.range(ns) m = i >= j - ns + nd return tf.cast(m, dtype) def _attn(self, inputs, training=False): q, k, v, attention_mask, head_mask = inputs # q, k, v have shape [batch, heads, sequence, features] print("MatMul") w = tf.matmul(q, k, transpose_b=True) if self.scale: dk = tf.cast(tf.shape(k)[-1], tf.float32) # scale attention_scores w = w / tf.math.sqrt(dk) # w has shape [batch, heads, dst_sequence, src_sequence], where information flows from src to dst. _, _, nd, ns = shape_list(w) b = self.causal_attention_mask(nd, ns, dtype=w.dtype) b = tf.reshape(b, [1, 1, nd, ns]) w = w * b - 1e4 * (1 - b) if attention_mask is not None: # Apply the attention mask w = w + attention_mask w = tf.nn.softmax(w, axis=-1) w = self.attn_dropout(w, training=training) # Mask heads if we want to if head_mask is not None: w = w * head_mask outputs = [tf.matmul(w, v)] if self.output_attentions: outputs.append(w) return outputs def merge_heads(self, x): x = tf.transpose(x, [0, 2, 1, 3]) x_shape = shape_list(x) new_x_shape = x_shape[:-2] + [x_shape[-2] * x_shape[-1]] return tf.reshape(x, new_x_shape) def split_heads(self, x): x_shape = shape_list(x) new_x_shape = x_shape[:-1] + [self.n_head, x_shape[-1] // self.n_head] x = tf.reshape(x, new_x_shape) return tf.transpose(x, (0, 2, 1, 3)) # (batch, head, seq_length, head_features) def call(self, inputs, training=False): x, layer_past, attention_mask, head_mask = inputs x = self.c_attn(x) query, key, value = tf.split(x, 3, axis=2) query = self.split_heads(query) key = self.split_heads(key) value = self.split_heads(value) if layer_past is not None: past_key, past_value = tf.unstack(layer_past, axis=1) key = tf.concat([past_key, key], axis=-2) value = tf.concat([past_value, value], axis=-2) present = tf.stack([key, value], axis=1) attn_outputs = self._attn([query, key, value, attention_mask, head_mask], training=training) a = attn_outputs[0] a = self.merge_heads(a) a = self.c_proj(a) a = self.resid_dropout(a, training=training) outputs = [a, present] + attn_outputs[1:] return outputs # a, present, (attentions) class TFMLP(tf.keras.layers.Layer): def __init__(self, n_state, config, **kwargs): super(TFMLP, self).__init__(**kwargs) nx = config.n_embd self.c_fc = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name='c_fc') self.c_proj = TFConv1D(nx, n_state, initializer_range=config.initializer_range, name='c_proj') self.act = gelu self.dropout = tf.keras.layers.Dropout(config.resid_pdrop) def call(self, x, training=False): h = self.act(self.c_fc(x)) h2 = self.c_proj(h) h2 = self.dropout(h2, training=training) return h2 class TFBlock(tf.keras.layers.Layer): def __init__(self, n_ctx, config, scale=False, **kwargs): super(TFBlock, self).__init__(**kwargs) nx = config.n_embd self.ln_1 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name='ln_1') self.attn = TFAttention(nx, n_ctx, config, scale, name='attn') self.ln_2 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name='ln_2') self.mlp = TFMLP(4 * nx, config, name='mlp') def call(self, inputs, training=False): x, layer_past, attention_mask, head_mask = inputs a = self.ln_1(x) output_attn = self.attn([a, layer_past, attention_mask, head_mask], training=training) a = output_attn[0] # output_attn: a, present, (attentions) x = x + a m = self.ln_2(x) m = self.mlp(m, training=training) x = x + m outputs = [x] + output_attn[1:] return outputs # x, present, (attentions) class TFGPT2MainLayer(tf.keras.layers.Layer): def __init__(self, config, *inputs, **kwargs): super(TFGPT2MainLayer, self).__init__(config, *inputs, **kwargs) self.output_hidden_states = config.output_hidden_states self.output_attentions = config.output_attentions self.num_hidden_layers = config.n_layer self.vocab_size = config.vocab_size self.n_embd = config.n_embd self.wte = TFSharedEmbeddings(config.vocab_size, config.hidden_size, initializer_range=config.initializer_range, name='wte') self.wpe = tf.keras.layers.Embedding(config.n_positions, config.n_embd, embeddings_initializer=get_initializer(config.initializer_range), name='wpe') self.drop = tf.keras.layers.Dropout(config.embd_pdrop) self.h = [TFBlock(config.n_ctx, config, scale=True, name='h_._{}'.format(i)) for i in range(config.n_layer)] self.ln_f = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name='ln_f') def _resize_token_embeddings(self, new_num_tokens): raise NotImplementedError def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} """ raise NotImplementedError def call(self, inputs, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, training=False): if isinstance(inputs, (tuple, list)): input_ids = inputs[0] past = inputs[1] if len(inputs) > 1 else past attention_mask = inputs[2] if len(inputs) > 2 else attention_mask token_type_ids = inputs[3] if len(inputs) > 3 else token_type_ids position_ids = inputs[4] if len(inputs) > 4 else position_ids head_mask = inputs[5] if len(inputs) > 5 else head_mask assert len(inputs) <= 6, "Too many inputs." elif isinstance(inputs, dict): input_ids = inputs.get('input_ids') past = inputs.get('past', past) attention_mask = inputs.get('attention_mask', attention_mask) token_type_ids = inputs.get('token_type_ids', token_type_ids) position_ids = inputs.get('position_ids', position_ids) head_mask = inputs.get('head_mask', head_mask) assert len(inputs) <= 6, "Too many inputs." else: input_ids = inputs if past is None: past_length = 0 past = [None] * len(self.h) else: past_length = shape_list(past[0][0])[-2] if position_ids is None: position_ids = tf.range(past_length, shape_list(input_ids)[-1] + past_length, dtype=tf.int32)[tf.newaxis, :] if attention_mask is not None: # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :] # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. attention_mask = tf.cast(attention_mask, tf.float32) attention_mask = (1.0 - attention_mask) * -10000.0 else: attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if not head_mask is None: raise NotImplementedError else: head_mask = [None] * self.num_hidden_layers # head_mask = tf.constant([0] * self.num_hidden_layers) input_shape = shape_list(input_ids) input_ids = tf.reshape(input_ids, [-1, input_shape[-1]]) position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]]) inputs_embeds = self.wte(input_ids, mode='embedding') position_embeds = self.wpe(position_ids) if token_type_ids is not None: token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]]) token_type_embeds = self.wte(token_type_ids, mode='embedding') else: token_type_embeds = 0 hidden_states = inputs_embeds + position_embeds + token_type_embeds hidden_states = self.drop(hidden_states, training=training) output_shape = input_shape + [shape_list(hidden_states)[-1]] presents = () all_attentions = [] all_hidden_states = () for i, (block, layer_past) in enumerate(zip(self.h, past)): if self.output_hidden_states: all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),) outputs = block([hidden_states, layer_past, attention_mask, head_mask[i]], training=training) hidden_states, present = outputs[:2] presents = presents + (present,) if self.output_attentions: all_attentions.append(outputs[2]) hidden_states = self.ln_f(hidden_states) hidden_states = tf.reshape(hidden_states, output_shape) # Add last hidden state if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = (hidden_states, presents) if self.output_hidden_states: outputs = outputs + (all_hidden_states,) if self.output_attentions: # let the number of heads free (-1) so we can extract attention even after head pruning attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:] all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions) outputs = outputs + (all_attentions,) return outputs # last hidden state, presents, (all hidden_states), (attentions) class TFGPT2PreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for dowloading and loading pretrained models. """ config_class = GPT2Config pretrained_model_archive_map = TF_GPT2_PRETRAINED_MODEL_ARCHIVE_MAP base_model_prefix = "transformer" GPT2_START_DOCSTRING = r""" OpenAI GPT-2 model was proposed in `Language Models are Unsupervised Multitask Learners`_ by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**. It's a causal (unidirectional) transformer pre-trained using language modeling on a very large corpus of ~40 GB of text data. This model is a tf.keras.Model `tf.keras.Model`_ sub-class. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. .. _`Language Models are Unsupervised Multitask Learners`: https://openai.com/blog/better-language-models/ .. _`tf.keras.Model`: https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/Model Note on the model inputs: TF 2.0 models accepts two formats as inputs: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional arguments. This second option is usefull when using `tf.keras.Model.fit()` method which currently requires having all the tensors in the first argument of the model call function: `model(inputs)`. If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument : - a single Tensor with input_ids only and nothing else: `model(inputs_ids) - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associaed to the input names given in the docstring: `model({'input_ids': input_ids, 'token_type_ids': token_type_ids})` Parameters: config (:class:`~transformers.GPT2Config`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ GPT2_INPUTS_DOCSTRING = r""" Inputs: **input_ids**: ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: Indices of input sequence tokens in the vocabulary. GPT-2 is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. Indices can be obtained using :class:`transformers.BPT2Tokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details. **past**: list of ``Numpy array`` or ``tf.Tensor`` (one for each layer): that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see `past` output below). Can be used to speed up sequential decoding. **attention_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. **token_type_ids**: (`optional`) ```Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: A parallel sequence of tokens (can be used to indicate various portions of the inputs). The embeddings from these tokens will be summed with the respective token embeddings. Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices). **position_ids**: (`optional`) ```Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. **head_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. """ @add_start_docstrings("The bare GPT2 Model transformer outputing raw hidden-states without any specific head on top.", GPT2_START_DOCSTRING, GPT2_INPUTS_DOCSTRING) class TFGPT2Model(TFGPT2PreTrainedModel): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **last_hidden_state**: ``tf.Tensor`` of shape ``(batch_size, sequence_length, hidden_size)`` Sequence of hidden-states at the last layer of the model. **past**: list of ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: that contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `past` input) to speed up sequential decoding. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``tf.Tensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: import tensorflow as tf from transformers import GPT2Tokenizer, TFGPT2Model tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = TFGPT2Model.from_pretrained('gpt2') input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 outputs = model(input_ids) last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple """ def __init__(self, config, *inputs, **kwargs): super(TFGPT2Model, self).__init__(config, *inputs, **kwargs) self.transformer = TFGPT2MainLayer(config, name='transformer') def call(self, inputs, **kwargs): outputs = self.transformer(inputs, **kwargs) return outputs @add_start_docstrings("""The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """, GPT2_START_DOCSTRING, GPT2_INPUTS_DOCSTRING) class TFGPT2LMHeadModel(TFGPT2PreTrainedModel): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **prediction_scores**: `tf.Tensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). **past**: list of `tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: that contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `past` input) to speed up sequential decoding. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of `tf.Tensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of `tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: import tensorflow as tf from transformers import GPT2Tokenizer, TFGPT2LMHeadModel tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = TFGPT2LMHeadModel.from_pretrained('gpt2') input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 outputs = model(input_ids) logits = outputs[0] """ def __init__(self, config, *inputs, **kwargs): super(TFGPT2LMHeadModel, self).__init__(config, *inputs, **kwargs) self.transformer = TFGPT2MainLayer(config, name='transformer') def call(self, inputs, **kwargs): transformer_outputs = self.transformer(inputs, **kwargs) hidden_states = transformer_outputs[0] lm_logits = self.transformer.wte(hidden_states, mode="linear") outputs = (lm_logits,) + transformer_outputs[1:] return outputs # lm_logits, presents, (all hidden_states), (attentions) @add_start_docstrings("""The GPT2 Model transformer with a language modeling and a multiple-choice classification head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the input embeddings, the classification head takes as input the input of a specified classification token index in the input sequence). """, GPT2_START_DOCSTRING, GPT2_INPUTS_DOCSTRING) class TFGPT2DoubleHeadsModel(TFGPT2PreTrainedModel): r""" **mc_token_ids**: (`optional`, default to index of the last token of the input) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, num_choices)``: Index of the classification token in each input sequence. Selected in the range ``[0, input_ids.size(-1) - 1[``. Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **lm_prediction_scores**: `tf.Tensor`` of shape ``(batch_size, num_choices, sequence_length, config.vocab_size)`` Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). **mc_prediction_scores**: `tf.Tensor`` of shape ``(batch_size, num_choices)`` Prediction scores of the multiplechoice classification head (scores for each choice before SoftMax). **past**: list of `tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: that contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `past` input) to speed up sequential decoding. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of `tf.Tensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of `tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: import tensorflow as tf from transformers import GPT2Tokenizer, TFGPT2DoubleHeadsModel tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = TFGPT2DoubleHeadsModel.from_pretrained('gpt2') # Add a [CLS] to the vocabulary (we should train it also!) # This option is currently not implemented in TF 2.0 raise NotImplementedError tokenizer.add_special_tokens({'cls_token': '[CLS]'}) model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size print(tokenizer.cls_token_id, len(tokenizer)) # The newly token the last token of the vocabulary choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"] encoded_choices = [tokenizer.encode(s) for s in choices] cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices] input_ids = tf.constant(encoded_choices)[None, :] # Batch size: 1, number of choices: 2 mc_token_ids = tf.constant([cls_token_location]) # Batch size: 1 outputs = model(input_ids, mc_token_ids=mc_token_ids) lm_prediction_scores, mc_prediction_scores = outputs[:2] """ def __init__(self, config, *inputs, **kwargs): super(TFGPT2DoubleHeadsModel, self).__init__(config, *inputs, **kwargs) self.transformer = TFGPT2MainLayer(config, name='transformer') self.multiple_choice_head = TFSequenceSummary(config, initializer_range=config.initializer_range, name='multiple_choice_head') def call(self, inputs, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, mc_token_ids=None, training=False): if isinstance(inputs, (tuple, list)): input_ids = inputs[0] past = inputs[1] if len(inputs) > 1 else past attention_mask = inputs[2] if len(inputs) > 2 else attention_mask token_type_ids = inputs[3] if len(inputs) > 3 else token_type_ids position_ids = inputs[4] if len(inputs) > 4 else position_ids head_mask = inputs[5] if len(inputs) > 5 else head_mask mc_token_ids = inputs[6] if len(inputs) > 6 else mc_token_ids assert len(inputs) <= 7, "Too many inputs." elif isinstance(inputs, dict): input_ids = inputs.get('input_ids') past = inputs.get('past', past) attention_mask = inputs.get('attention_mask', attention_mask) token_type_ids = inputs.get('token_type_ids', token_type_ids) position_ids = inputs.get('position_ids', position_ids) head_mask = inputs.get('head_mask', head_mask) mc_token_ids = inputs.get('mc_token_ids', mc_token_ids) assert len(inputs) <= 7, "Too many inputs." else: input_ids = inputs input_shapes = shape_list(input_ids) seq_length = input_shapes[-1] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None flat_inputs = [flat_input_ids, past, flat_attention_mask, flat_token_type_ids, flat_position_ids, head_mask] transformer_outputs = self.transformer(flat_inputs, training=training) hidden_states = transformer_outputs[0] hidden_states = tf.reshape(hidden_states, input_shapes + shape_list(hidden_states)[-1:]) lm_logits = self.transformer.wte(hidden_states, mode="linear") mc_logits = self.multiple_choice_head([hidden_states, mc_token_ids], training=training) mc_logits = tf.squeeze(mc_logits, axis=-1) outputs = (lm_logits, mc_logits) + transformer_outputs[1:] return outputs # lm logits, mc logits, presents, (all hidden_states), (attentions)
py
1a3ddff7cdd1f20f46281bcfd8757aef6a2d7cfa
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .common import typing as typing from .parametrization import parameter as p from .optimization import optimizerlib as optimizers # busy namespace, likely to be simplified from .optimization import families as families from .optimization import callbacks as callbacks from .common import errors as errors from . import ops as ops __all__ = ["optimizers", "families", "callbacks", "p", "typing", "errors", "ops"] __version__ = "0.4.3.post4"
py
1a3de0652f0b09f2b409f6a278f4e61db6e59d4a
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys import argparse import functools import paddle import paddle.fluid as fluid import models import reader from utility import add_arguments, print_arguments, check_cuda parser = argparse.ArgumentParser(description=__doc__) add_arg = functools.partial(add_arguments, argparser=parser) # yapf: disable add_arg('model', str, "MobileNetV3_large_x1_25", "Set the network to use.") add_arg('embedding_size', int, 128, "Embedding size.") add_arg('batch_size', int, 1, "Minibatch size.") add_arg('image_shape', str, "3,128,128", "Input image size.") add_arg('use_gpu', bool, True, "Whether to use GPU or not.") add_arg('pretrained_model', str, None, "Whether to use pretrained model.") # yapf: enable model_list = [m for m in dir(models) if "__" not in m] def infer(args): # parameters from arguments model_name = args.model pretrained_model = args.pretrained_model image_shape = [int(m) for m in args.image_shape.split(",")] assert model_name in model_list, "{} is not in lists: {}".format(args.model, model_list) image = fluid.data(name='image', shape=[None] + image_shape, dtype='float32') infer_loader = fluid.io.DataLoader.from_generator( feed_list=[image], capacity=64, use_double_buffer=True, iterable=True) # model definition model = models.__dict__[model_name]() out = model.net(input=image, embedding_size=args.embedding_size) test_program = fluid.default_main_program().clone(for_test=True) place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) if pretrained_model: def if_exist(var): return os.path.exists(os.path.join(pretrained_model, var.name)) fluid.load(model_path=pretrained_model, program=test_program, executor=exe) infer_loader.set_sample_generator( reader.test(args), batch_size=args.batch_size, drop_last=False, places=place) fetch_list = [out.name] for batch_id, data in enumerate(infer_loader()): result = exe.run(test_program, fetch_list=fetch_list, feed=data) result = result[0][0].reshape(-1) print("Test-{0}-feature: {1}".format(batch_id, result[:5])) sys.stdout.flush() def main(): paddle.enable_static() args = parser.parse_args() print_arguments(args) check_cuda(args.use_gpu) infer(args) if __name__ == '__main__': main()
py
1a3de0bf050a715562a34a33d9e7a486d9529dfa
""" Instruction for the candidate. 1) Given a string compres the repeating letters following each letter with number of repetition in the output string 2) Example: 'a' -> 'a1' 'aaa' -> 'a3' 'aabb' -> 'a2b2' '' -> '' """ def rle(test_string): result = '' if not test_string: return result current = test_string[0] count = 1 for c in test_string[1:]: if c == current: count += 1 else: result += current result += str(count) current = c count = 1 result += current result += str(count) return result def eq(exp, res): assert exp == res, f'expected: {exp} - result: {res}' def main(): input = ['', 'a', 'aaabbc', 'aaabbbccc', 'abcdefg'] expect= ['', 'a1', 'a3b2c1', 'a3b3c3', 'a1b1c1d1e1f1g1'] for i, o in zip(input, expect): eq(o, rle(i)) print('success') if __name__ == '__main__': main()
py
1a3de1301e7daee8ae86675da5300d2be9fc828e
import pytest from cfripper.config.config import Config from cfripper.config.filter import Filter from cfripper.model.enums import RuleGranularity, RuleMode, RuleRisk from cfripper.model.result import Failure from cfripper.rule_processor import RuleProcessor from cfripper.rules import DEFAULT_RULES, EC2SecurityGroupOpenToWorldRule from tests.utils import compare_lists_of_failures, get_cfmodel_from @pytest.fixture() def security_group_type_slash0(): return get_cfmodel_from("rules/EC2SecurityGroupOpenToWorldRule/security_group_type_slash0.json").resolve() @pytest.fixture() def valid_security_group_not_slash0(): return get_cfmodel_from("rules/EC2SecurityGroupOpenToWorldRule/valid_security_group_not_slash0.json").resolve() @pytest.fixture() def valid_security_group_port80(): return get_cfmodel_from("rules/EC2SecurityGroupOpenToWorldRule/valid_security_group_port80.json").resolve() @pytest.fixture() def valid_security_group_port443(): return get_cfmodel_from("rules/EC2SecurityGroupOpenToWorldRule/valid_security_group_port443.json").resolve() @pytest.fixture() def invalid_security_group_cidripv6(): return get_cfmodel_from("rules/EC2SecurityGroupOpenToWorldRule/invalid_security_group_cidripv6.json").resolve() @pytest.fixture() def invalid_security_group_range(): return get_cfmodel_from("rules/EC2SecurityGroupOpenToWorldRule/invalid_security_group_range.json").resolve() @pytest.fixture() def invalid_security_group_multiple_statements(): return get_cfmodel_from( "rules/EC2SecurityGroupOpenToWorldRule/invalid_security_group_multiple_statements.json" ).resolve() def test_security_group_type_slash0(security_group_type_slash0): rule = EC2SecurityGroupOpenToWorldRule(None) result = rule.invoke(security_group_type_slash0) assert not result.valid assert compare_lists_of_failures( result.failures, [ Failure( granularity=RuleGranularity.RESOURCE, reason="Port(s) 22 open to public IPs: (0.0.0.0/0) in security group 'SecurityGroup'", risk_value=RuleRisk.MEDIUM, rule="EC2SecurityGroupOpenToWorldRule", rule_mode=RuleMode.BLOCKING, actions=None, resource_ids={"SecurityGroup"}, ) ], ) def test_valid_security_group_not_slash0(valid_security_group_not_slash0): rule = EC2SecurityGroupOpenToWorldRule(None) result = rule.invoke(valid_security_group_not_slash0) assert result.valid assert compare_lists_of_failures(result.failures, []) def test_valid_security_group_port80(valid_security_group_port80): rule = EC2SecurityGroupOpenToWorldRule(None) result = rule.invoke(valid_security_group_port80) assert result.valid assert compare_lists_of_failures(result.failures, []) def test_valid_security_group_port443(valid_security_group_port443): rule = EC2SecurityGroupOpenToWorldRule(None) result = rule.invoke(valid_security_group_port443) assert result.valid assert compare_lists_of_failures(result.failures, []) def test_invalid_security_group_cidripv6(invalid_security_group_cidripv6): rule = EC2SecurityGroupOpenToWorldRule(None) result = rule.invoke(invalid_security_group_cidripv6) assert not result.valid assert compare_lists_of_failures( result.failures, [ Failure( granularity=RuleGranularity.RESOURCE, reason="Port(s) 22 open to public IPs: (::/0) in security group 'SecurityGroup'", risk_value=RuleRisk.MEDIUM, rule="EC2SecurityGroupOpenToWorldRule", rule_mode=RuleMode.BLOCKING, actions=None, resource_ids={"SecurityGroup"}, ) ], ) def test_invalid_security_group_range(invalid_security_group_range): rule = EC2SecurityGroupOpenToWorldRule(None) result = rule.invoke(invalid_security_group_range) assert not result.valid assert compare_lists_of_failures( result.failures, [ Failure( granularity=RuleGranularity.RESOURCE, reason="Port(s) 0-79, 81-100 open to public IPs: (11.0.0.0/8) in security group 'SecurityGroup'", risk_value=RuleRisk.MEDIUM, rule="EC2SecurityGroupOpenToWorldRule", rule_mode=RuleMode.BLOCKING, actions=None, resource_ids={"SecurityGroup"}, ) ], ) def test_invalid_security_group_multiple_statements(invalid_security_group_multiple_statements): rule = EC2SecurityGroupOpenToWorldRule(None) result = rule.invoke(invalid_security_group_multiple_statements) assert not result.valid assert compare_lists_of_failures( result.failures, [ Failure( granularity=RuleGranularity.RESOURCE, reason="Port(s) 9090 open to public IPs: (172.0.0.0/8) in security group 'SecurityGroup'", risk_value=RuleRisk.MEDIUM, rule="EC2SecurityGroupOpenToWorldRule", rule_mode=RuleMode.BLOCKING, actions=None, resource_ids={"SecurityGroup"}, ) ], ) def test_filter_do_not_report_anything(invalid_security_group_range): mock_config = Config( rules=["EC2SecurityGroupOpenToWorldRule"], aws_account_id="123456789", stack_name="mockstack", rules_filters=[ Filter( rule_mode=RuleMode.ALLOWED, eval={ "and": [ {"eq": [{"ref": "config.stack_name"}, "mockstack"]}, {"eq": [{"ref": "open_ports"}, list(range(0, 101))]}, ] }, rules={"EC2SecurityGroupOpenToWorldRule"}, ) ], ) rules = [DEFAULT_RULES.get(rule)(mock_config) for rule in mock_config.rules] processor = RuleProcessor(*rules) result = processor.process_cf_template(invalid_security_group_range, mock_config) assert result.valid assert compare_lists_of_failures(result.failures, []) def test_non_matching_filters_are_reported_normally(invalid_security_group_range): mock_config = Config( rules=["EC2SecurityGroupOpenToWorldRule"], aws_account_id="123456789", stack_name="mockstack", rules_filters=[ Filter( rule_mode=RuleMode.ALLOWED, eval={"eq": [{"ref": "config.stack_name"}, "anotherstack"]}, rules={"EC2SecurityGroupOpenToWorldRule"}, ) ], ) rules = [DEFAULT_RULES.get(rule)(mock_config) for rule in mock_config.rules] processor = RuleProcessor(*rules) result = processor.process_cf_template(invalid_security_group_range, mock_config) assert not result.valid assert compare_lists_of_failures( result.failures, [ Failure( granularity=RuleGranularity.RESOURCE, reason="Port(s) 0-79, 81-100 open to public IPs: (11.0.0.0/8) in security group 'SecurityGroup'", risk_value=RuleRisk.MEDIUM, rule="EC2SecurityGroupOpenToWorldRule", rule_mode=RuleMode.BLOCKING, actions=None, resource_ids={"SecurityGroup"}, ) ], )
py
1a3de2d9866c2255cd7a517f3c7f7cf4189f6e23
import gym import os from floatenv import FloatEnv def get_user_action(env): env.render(show_position_numbers=True) print("What action would you like to take? Enter a location and an increment value:") str_action = input().strip(" ") locations = str_action.split(" ") if len(locations) != 2: return None return (int(locations[0]), float(locations[1])) if __name__ == '__main__': path_to_config = os.getcwd() + "/config.yaml" env = FloatEnv(path_to_config) observation = env.reset() total_reward = 0 actions_taken = 0 print("Current board score: ", env._current_score()) while True: action = get_user_action(env) if action == None: print("That is not a valid action. Please retry:") continue print("Action taken: ", action) state, reward, done, other = env.step(action) total_reward += reward actions_taken += 1 print("Reward recieved: ", reward) print("Average reward: ", total_reward/actions_taken) print("Total Reward: ", total_reward) print("Number of actions: ", actions_taken) if done: break print("You reached the maximum number of actions, the game has ended.\n") # print final board state print("Final ", end="") env.render()
py
1a3de39e26971c69d450e57c9da5313586750659
from __future__ import unicode_literals import frappe from erpnext.controllers.website_list_for_contact import get_customers_suppliers from frappe import _ def get_context(context): # do your magic here pass def set_supplier(): if hasattr(frappe.local, "cookie_manager"): customers, suppliers = get_customers_suppliers('Supplier Product Info', frappe.session.user) if suppliers: frappe.local.cookie_manager.set_cookie("supplier", suppliers[0])
py
1a3de3e9d2d6450f5746df255597e58dbcced4b1
# This file was automatically created by FeynRules 2.3.32 # Mathematica version: 11.3.0 for Mac OS X x86 (64-bit) (March 7, 2018) # Date: Sat 21 Apr 2018 20:43:27 from object_library import all_parameters, Parameter from function_library import complexconjugate, re, im, csc, sec, acsc, asec, cot # This is a default parameter object representing 0. ZERO = Parameter(name = 'ZERO', nature = 'internal', type = 'real', value = '0.0', texname = '0') # User-defined parameters. cabi = Parameter(name = 'cabi', nature = 'external', type = 'real', value = 0.227736, texname = '\\theta _c', lhablock = 'CKMBLOCK', lhacode = [ 1 ]) aEWM1 = Parameter(name = 'aEWM1', nature = 'external', type = 'real', value = 127.9, texname = '\\text{aEWM1}', lhablock = 'SMINPUTS', lhacode = [ 1 ]) Gf = Parameter(name = 'Gf', nature = 'external', type = 'real', value = 0.0000116637, texname = 'G_f', lhablock = 'SMINPUTS', lhacode = [ 2 ]) aS = Parameter(name = 'aS', nature = 'external', type = 'real', value = 0.1184, texname = '\\alpha _s', lhablock = 'SMINPUTS', lhacode = [ 3 ]) ymdo = Parameter(name = 'ymdo', nature = 'external', type = 'real', value = 0.00504, texname = '\\text{ymdo}', lhablock = 'YUKAWA', lhacode = [ 1 ]) ymup = Parameter(name = 'ymup', nature = 'external', type = 'real', value = 0.00255, texname = '\\text{ymup}', lhablock = 'YUKAWA', lhacode = [ 2 ]) yms = Parameter(name = 'yms', nature = 'external', type = 'real', value = 0.101, texname = '\\text{yms}', lhablock = 'YUKAWA', lhacode = [ 3 ]) ymc = Parameter(name = 'ymc', nature = 'external', type = 'real', value = 1.27, texname = '\\text{ymc}', lhablock = 'YUKAWA', lhacode = [ 4 ]) ymb = Parameter(name = 'ymb', nature = 'external', type = 'real', value = 4.7, texname = '\\text{ymb}', lhablock = 'YUKAWA', lhacode = [ 5 ]) ymt = Parameter(name = 'ymt', nature = 'external', type = 'real', value = 172, texname = '\\text{ymt}', lhablock = 'YUKAWA', lhacode = [ 6 ]) yme = Parameter(name = 'yme', nature = 'external', type = 'real', value = 0.000511, texname = '\\text{yme}', lhablock = 'YUKAWA', lhacode = [ 11 ]) ymm = Parameter(name = 'ymm', nature = 'external', type = 'real', value = 0.10566, texname = '\\text{ymm}', lhablock = 'YUKAWA', lhacode = [ 13 ]) ymtau = Parameter(name = 'ymtau', nature = 'external', type = 'real', value = 1.777, texname = '\\text{ymtau}', lhablock = 'YUKAWA', lhacode = [ 15 ]) kq = Parameter(name = 'kq', nature = 'external', type = 'real', value = 0.001, texname = 'k_q', lhablock = 'FRBlock', lhacode = [ 1 ]) lamf = Parameter(name = 'lamf', nature = 'external', type = 'real', value = 0.1, texname = 'l_{\\text{fi}}', lhablock = 'FRBlock', lhacode = [ 2 ]) yf1x1 = Parameter(name = 'yf1x1', nature = 'external', type = 'complex', value = 0, texname = '\\text{yf1x1}', lhablock = 'FRBlock6', lhacode = [ 1, 1 ]) yf1x2 = Parameter(name = 'yf1x2', nature = 'external', type = 'complex', value = 1.e-6, texname = '\\text{yf1x2}', lhablock = 'FRBlock6', lhacode = [ 1, 2 ]) yf1x3 = Parameter(name = 'yf1x3', nature = 'external', type = 'complex', value = 0, texname = '\\text{yf1x3}', lhablock = 'FRBlock6', lhacode = [ 1, 3 ]) yf2x1 = Parameter(name = 'yf2x1', nature = 'external', type = 'complex', value = 0, texname = '\\text{yf2x1}', lhablock = 'FRBlock6', lhacode = [ 2, 1 ]) yf2x2 = Parameter(name = 'yf2x2', nature = 'external', type = 'complex', value = 0, texname = '\\text{yf2x2}', lhablock = 'FRBlock6', lhacode = [ 2, 2 ]) yf2x3 = Parameter(name = 'yf2x3', nature = 'external', type = 'complex', value = 0, texname = '\\text{yf2x3}', lhablock = 'FRBlock6', lhacode = [ 2, 3 ]) yf3x1 = Parameter(name = 'yf3x1', nature = 'external', type = 'complex', value = 0, texname = '\\text{yf3x1}', lhablock = 'FRBlock6', lhacode = [ 3, 1 ]) yf3x2 = Parameter(name = 'yf3x2', nature = 'external', type = 'complex', value = 0, texname = '\\text{yf3x2}', lhablock = 'FRBlock6', lhacode = [ 3, 2 ]) yf3x3 = Parameter(name = 'yf3x3', nature = 'external', type = 'complex', value = 0, texname = '\\text{yf3x3}', lhablock = 'FRBlock6', lhacode = [ 3, 3 ]) MZ = Parameter(name = 'MZ', nature = 'external', type = 'real', value = 91.1876, texname = '\\text{MZ}', lhablock = 'MASS', lhacode = [ 23 ]) Me = Parameter(name = 'Me', nature = 'external', type = 'real', value = 0.000511, texname = '\\text{Me}', lhablock = 'MASS', lhacode = [ 11 ]) MMU = Parameter(name = 'MMU', nature = 'external', type = 'real', value = 0.10566, texname = '\\text{MMU}', lhablock = 'MASS', lhacode = [ 13 ]) MTA = Parameter(name = 'MTA', nature = 'external', type = 'real', value = 1.777, texname = '\\text{MTA}', lhablock = 'MASS', lhacode = [ 15 ]) MU = Parameter(name = 'MU', nature = 'external', type = 'real', value = 0.00255, texname = 'M', lhablock = 'MASS', lhacode = [ 2 ]) MC = Parameter(name = 'MC', nature = 'external', type = 'real', value = 1.27, texname = '\\text{MC}', lhablock = 'MASS', lhacode = [ 4 ]) MT = Parameter(name = 'MT', nature = 'external', type = 'real', value = 172, texname = '\\text{MT}', lhablock = 'MASS', lhacode = [ 6 ]) MD = Parameter(name = 'MD', nature = 'external', type = 'real', value = 0.00504, texname = '\\text{MD}', lhablock = 'MASS', lhacode = [ 1 ]) MS = Parameter(name = 'MS', nature = 'external', type = 'real', value = 0.101, texname = '\\text{MS}', lhablock = 'MASS', lhacode = [ 3 ]) MB = Parameter(name = 'MB', nature = 'external', type = 'real', value = 4.7, texname = '\\text{MB}', lhablock = 'MASS', lhacode = [ 5 ]) MH = Parameter(name = 'MH', nature = 'external', type = 'real', value = 125, texname = '\\text{MH}', lhablock = 'MASS', lhacode = [ 25 ]) MP = Parameter(name = 'MP', nature = 'external', type = 'real', value = 120, texname = '\\text{MP}', lhablock = 'MASS', lhacode = [ 9000005 ]) Mfi = Parameter(name = 'Mfi', nature = 'external', type = 'real', value = 10, texname = '\\text{Mfi}', lhablock = 'MASS', lhacode = [ 9000006 ]) WZ = Parameter(name = 'WZ', nature = 'external', type = 'real', value = 2.4952, texname = '\\text{WZ}', lhablock = 'DECAY', lhacode = [ 23 ]) WW = Parameter(name = 'WW', nature = 'external', type = 'real', value = 2.085, texname = '\\text{WW}', lhablock = 'DECAY', lhacode = [ 24 ]) WT = Parameter(name = 'WT', nature = 'external', type = 'real', value = 1.50833649, texname = '\\text{WT}', lhablock = 'DECAY', lhacode = [ 6 ]) WH = Parameter(name = 'WH', nature = 'external', type = 'real', value = 0.00589569, texname = '\\text{WH}', lhablock = 'DECAY', lhacode = [ 25 ]) WH1 = Parameter(name = 'WH1', nature = 'external', type = 'real', value = 0.00575308848, texname = '\\text{WH1}', lhablock = 'DECAY', lhacode = [ 9000005 ]) Wfi = Parameter(name = 'Wfi', nature = 'external', type = 'real', value = 6.03044e-9, texname = '\\text{Wfi}', lhablock = 'DECAY', lhacode = [ 9000006 ]) aEW = Parameter(name = 'aEW', nature = 'internal', type = 'real', value = '1/aEWM1', texname = '\\alpha _{\\text{EW}}') G = Parameter(name = 'G', nature = 'internal', type = 'real', value = '2*cmath.sqrt(aS)*cmath.sqrt(cmath.pi)', texname = 'G') CKM1x1 = Parameter(name = 'CKM1x1', nature = 'internal', type = 'complex', value = 'cmath.cos(cabi)', texname = '\\text{CKM1x1}') CKM1x2 = Parameter(name = 'CKM1x2', nature = 'internal', type = 'complex', value = 'cmath.sin(cabi)', texname = '\\text{CKM1x2}') CKM1x3 = Parameter(name = 'CKM1x3', nature = 'internal', type = 'complex', value = '0', texname = '\\text{CKM1x3}') CKM2x1 = Parameter(name = 'CKM2x1', nature = 'internal', type = 'complex', value = '-cmath.sin(cabi)', texname = '\\text{CKM2x1}') CKM2x2 = Parameter(name = 'CKM2x2', nature = 'internal', type = 'complex', value = 'cmath.cos(cabi)', texname = '\\text{CKM2x2}') CKM2x3 = Parameter(name = 'CKM2x3', nature = 'internal', type = 'complex', value = '0', texname = '\\text{CKM2x3}') CKM3x1 = Parameter(name = 'CKM3x1', nature = 'internal', type = 'complex', value = '0', texname = '\\text{CKM3x1}') CKM3x2 = Parameter(name = 'CKM3x2', nature = 'internal', type = 'complex', value = '0', texname = '\\text{CKM3x2}') CKM3x3 = Parameter(name = 'CKM3x3', nature = 'internal', type = 'complex', value = '1', texname = '\\text{CKM3x3}') MW = Parameter(name = 'MW', nature = 'internal', type = 'real', value = 'cmath.sqrt(MZ**2/2. + cmath.sqrt(MZ**4/4. - (aEW*cmath.pi*MZ**2)/(Gf*cmath.sqrt(2))))', texname = 'M_W') ee = Parameter(name = 'ee', nature = 'internal', type = 'real', value = '2*cmath.sqrt(aEW)*cmath.sqrt(cmath.pi)', texname = 'e') sw2 = Parameter(name = 'sw2', nature = 'internal', type = 'real', value = '1 - MW**2/MZ**2', texname = '\\text{sw2}') cw = Parameter(name = 'cw', nature = 'internal', type = 'real', value = 'cmath.sqrt(1 - sw2)', texname = 'c_w') sw = Parameter(name = 'sw', nature = 'internal', type = 'real', value = 'cmath.sqrt(sw2)', texname = 's_w') g1 = Parameter(name = 'g1', nature = 'internal', type = 'real', value = 'ee/cw', texname = 'g_1') gw = Parameter(name = 'gw', nature = 'internal', type = 'real', value = 'ee/sw', texname = 'g_w') vev = Parameter(name = 'vev', nature = 'internal', type = 'real', value = '(2*MW*sw)/ee', texname = '\\text{vev}') mfi = Parameter(name = 'mfi', nature = 'internal', type = 'real', value = 'cmath.sqrt(100 - (kq*vev**2)/2.)', texname = 'M_{\\text{fi}}') AH = Parameter(name = 'AH', nature = 'internal', type = 'real', value = '(47*ee**2*(1 - (2*MH**4)/(987.*MT**4) - (14*MH**2)/(705.*MT**2) + (213*MH**12)/(2.634632e7*MW**12) + (5*MH**10)/(119756.*MW**10) + (41*MH**8)/(180950.*MW**8) + (87*MH**6)/(65800.*MW**6) + (57*MH**4)/(6580.*MW**4) + (33*MH**2)/(470.*MW**2)))/(72.*cmath.pi**2*vev)', texname = 'A_H') GH = Parameter(name = 'GH', nature = 'internal', type = 'real', value = '-(G**2*(1 + (13*MH**6)/(16800.*MT**6) + MH**4/(168.*MT**4) + (7*MH**2)/(120.*MT**2)))/(12.*cmath.pi**2*vev)', texname = 'G_H') Gphi = Parameter(name = 'Gphi', nature = 'internal', type = 'real', value = '-(G**2*(1 + MH**6/(560.*MT**6) + MH**4/(90.*MT**4) + MH**2/(12.*MT**2)))/(8.*cmath.pi**2*vev)', texname = 'G_h') lam = Parameter(name = 'lam', nature = 'internal', type = 'real', value = 'MH**2/(2.*vev**2)', texname = '\\text{lam}') yb = Parameter(name = 'yb', nature = 'internal', type = 'real', value = '(ymb*cmath.sqrt(2))/vev', texname = '\\text{yb}') yc = Parameter(name = 'yc', nature = 'internal', type = 'real', value = '(ymc*cmath.sqrt(2))/vev', texname = '\\text{yc}') ydo = Parameter(name = 'ydo', nature = 'internal', type = 'real', value = '(ymdo*cmath.sqrt(2))/vev', texname = '\\text{ydo}') ye = Parameter(name = 'ye', nature = 'internal', type = 'real', value = '(yme*cmath.sqrt(2))/vev', texname = '\\text{ye}') ym = Parameter(name = 'ym', nature = 'internal', type = 'real', value = '(ymm*cmath.sqrt(2))/vev', texname = '\\text{ym}') ys = Parameter(name = 'ys', nature = 'internal', type = 'real', value = '(yms*cmath.sqrt(2))/vev', texname = '\\text{ys}') yt = Parameter(name = 'yt', nature = 'internal', type = 'real', value = '(ymt*cmath.sqrt(2))/vev', texname = '\\text{yt}') ytau = Parameter(name = 'ytau', nature = 'internal', type = 'real', value = '(ymtau*cmath.sqrt(2))/vev', texname = '\\text{ytau}') yup = Parameter(name = 'yup', nature = 'internal', type = 'real', value = '(ymup*cmath.sqrt(2))/vev', texname = '\\text{yup}') muH = Parameter(name = 'muH', nature = 'internal', type = 'real', value = 'cmath.sqrt(lam*vev**2)', texname = '\\mu')
py
1a3de460921a53462f10c36f243e715843a79a57
import rich.repr @rich.repr.auto class Bird: def __init__(self, name, eats=None, fly=True, extinct=False): self.name = name self.eats = list(eats) if eats else [] self.fly = fly self.extinct = extinct # Note that the repr is still generated without Rich # Try commenting out the following line from rich import print BIRDS = { "gull": Bird("gull", eats=["fish", "chips", "ice cream", "sausage rolls"]), "penguin": Bird("penguin", eats=["fish"], fly=False), "dodo": Bird("dodo", eats=["fruit"], fly=False, extinct=True), } print(BIRDS)
py
1a3de4ab1e2a104cbe69854cb5a4301d13e360d9
from .base import * from .mgr import CoreManager as Mgr class CreationPhaseManager: _id_generator = id_generator() def __init__(self, obj_type, has_color=False, add_to_hist=False): self._obj = None self._obj_type = obj_type self._has_color = has_color self._add_to_hist = add_to_hist self._custom_obj_name = "" self._origin_pos = Point3() self._creation_start_func = None self._creation_handlers = [] self._current_creation_phase = 0 if has_color: self.set_next_object_color() else: GD[f"next_{obj_type}_color"] = None Mgr.expose(f"custom_{obj_type}_name", lambda: self._custom_obj_name) Mgr.accept(f"set_custom_{obj_type}_name", self.__set_custom_object_name) def setup(self, creation_phases, status_text): creation_status = {} mode_text = f"Create {status_text['obj_type']}" info_text = "LMB-drag to start creation" creation_status["idle"] = {"mode": mode_text, "info": info_text} info_text = "[SNAP] LMB-drag to start creation" creation_status["snap_idle"] = {"mode": mode_text, "info": info_text} add_state = Mgr.add_state bind = Mgr.bind_state state_persistence = -12 phase_finishers = [] for i, phase_data in enumerate(creation_phases): if len(phase_data) == 3: main_starter, main_handler, finisher = phase_data else: main_starter, main_handler = phase_data finisher = lambda: None phase_finishers.append(finisher) def complete_creation(index): for finisher in phase_finishers[index:]: finisher() self.__end_creation(cancel=False) if i == 0: self._creation_start_func = main_starter Mgr.accept(f"start_{self._obj_type}_creation", self.__start_creation) on_enter_state = self.__enter_creation_start_phase else: on_enter_state = lambda p, a, m=main_starter: self.__start_creation_phase(m, p, a) on_exit_state = self.__exit_creation_phase state_id = f"{self._obj_type}_creation_phase_{i + 1}" add_state(state_id, state_persistence, on_enter_state, on_exit_state) self._creation_handlers.append(self.__get_creation_phase_handler(main_handler)) binding_id = f"{self._obj_type} creation -> navigate" bind(state_id, binding_id, "space", lambda: Mgr.enter_state("navigation_mode")) binding_id = f"quit {self._obj_type} creation" bind(state_id, binding_id, "escape", self.__end_creation) binding_id = f"abort {self._obj_type} creation" bind(state_id, binding_id, "focus_loss", self.__end_creation) binding_id = f"cancel {self._obj_type} creation" bind(state_id, binding_id, "mouse3", self.__end_creation) binding_id = f"complete {self._obj_type} creation {i}" bind(state_id, binding_id, "enter", lambda index=i: complete_creation(index)) def finish_phase(finisher, state_id=None): finisher() Mgr.enter_state(state_id) if state_id else self.__end_creation(cancel=False) info_text = f"move mouse to {status_text[f'phase{i + 1}']};" \ + " <Tab> to skip phase; <Enter> to complete;" if i == len(creation_phases) - 1: binding_id = f"skip {self._obj_type} creation phase {i}" finish_command = lambda f=finisher: finish_phase(f) bind(state_id, binding_id, "tab", finish_command) binding_id = f"finalize {self._obj_type} creation" bind(state_id, binding_id, "mouse1-up", lambda: self.__end_creation(cancel=False)) info_text += " release LMB to finalize;" else: next_state_id = f"{self._obj_type}_creation_phase_{i + 2}" binding_id = f"skip {self._obj_type} creation phase {i}" finish_command = lambda f=finisher, i=next_state_id: finish_phase(f, i) bind(state_id, binding_id, "tab", finish_command) binding_id = f"start {self._obj_type} creation phase {i + 2}" command = lambda state_id=next_state_id: Mgr.enter_state(state_id) bind(state_id, binding_id, "mouse1-up", command) info_text += " release LMB to set;" info_text += " RMB to cancel; <Space> to navigate" creation_status[f"phase{i + 1}"] = {"mode": mode_text, "info": info_text} status_data = GD["status"]["create"] status_data[self._obj_type] = creation_status return True def __enter_creation_start_phase(self, prev_state_id, active): if active: Mgr.do("enable_view_gizmo", False) Mgr.do("set_view_gizmo_mouse_region_sort", 0) Mgr.update_remotely("interactive_creation", "resumed") snap_settings = GD["snap"] if snap_settings["on"]["creation"]: snap_type = "creation_phase_1" snap_on = snap_settings["on"][snap_type] snap_tgt_type = snap_settings["tgt_type"][snap_type] if snap_on and snap_tgt_type != "increment": snap_settings["type"] = snap_type Mgr.do("init_snap_target_checking", "create") self._creation_start_func() Mgr.add_task(self._creation_handlers[0], "draw_object", sort=3) Mgr.update_app("status", ["create", self._obj_type, "phase1"]) def __exit_creation_phase(self, next_state_id, active): if active: Mgr.remove_task("draw_object") Mgr.do("enable_view_gizmo", True) Mgr.do("set_view_gizmo_mouse_region_sort", 210) self.__disable_snap() def __start_creation(self, origin_pos): self._origin_pos = origin_pos self._current_creation_phase = 1 Mgr.enter_state(f"{self._obj_type}_creation_phase_1") def __start_creation_phase(self, main_start_func, prev_state_id, active): phase_id = self._current_creation_phase if active: Mgr.do("enable_view_gizmo", False) Mgr.do("set_view_gizmo_mouse_region_sort", 0) Mgr.update_remotely("interactive_creation", "resumed") else: phase_id += 1 self._current_creation_phase = phase_id snap_settings = GD["snap"] if snap_settings["on"]["creation"]: snap_type = f"creation_phase_{phase_id - 1}" snap_on = snap_settings["on"][snap_type] snap_tgt_type = snap_settings["tgt_type"][snap_type] if snap_on: if snap_tgt_type != "increment": Mgr.do("end_snap_target_checking") Mgr.set_cursor("create") if snap_tgt_type == "grid_point" and not active: Mgr.update_app("active_grid_plane", GD["active_grid_plane"]) snap_type = f"creation_phase_{phase_id}" snap_on = snap_settings["on"][snap_type] snap_tgt_type = snap_settings["tgt_type"][snap_type] if snap_on and snap_tgt_type != "increment": snap_settings["type"] = snap_type Mgr.do("init_snap_target_checking", "create") Mgr.remove_task("draw_object") main_start_func() creation_handler = self._creation_handlers[phase_id - 1] Mgr.add_task(creation_handler, "draw_object", sort=3) Mgr.update_app("status", ["create", self._obj_type, f"phase{phase_id}"]) def __get_creation_phase_handler(self, main_handler_func): def handle_creation_phase(task): main_handler_func() return task.cont return handle_creation_phase def __set_custom_object_name(self, custom_name): self._custom_obj_name = custom_name def init_object(self, obj): self._obj = obj def get_object(self): return self._obj def get_object_type(self): return self._obj_type def generate_object_id(self): obj_id = (self._obj_type,) + next(self._id_generator) return obj_id def set_next_object_color(self): color_values = tuple(random.random() * .4 + .5 for i in range(3)) GD[f"next_{self._obj_type}_color"] = color_values def get_next_object_color(self): r, g, b = GD[f"next_{self._obj_type}_color"] color = VBase4(r, g, b, 1.) return color def get_origin_pos(self): return self._origin_pos def add_history(self, toplevel_obj): Mgr.do("update_history_time") event_descr = f'Create "{toplevel_obj.name}"' obj_id = toplevel_obj.id obj_data = {obj_id: toplevel_obj.get_data_to_store("creation")} event_data = {"objects": obj_data} event_data["object_ids"] = set(Mgr.get("object_ids")) Mgr.do("add_history", event_descr, event_data, update_time_id=False) def __disable_snap(self, reset_grid=False): snap_settings = GD["snap"] if snap_settings["on"]["creation"]: snap_type = f"creation_phase_{self._current_creation_phase}" snap_on = snap_settings["on"][snap_type] snap_tgt_type = snap_settings["tgt_type"][snap_type] if snap_on: if snap_tgt_type != "increment": Mgr.do("end_snap_target_checking") Mgr.set_cursor("create") if snap_tgt_type == "grid_point" and reset_grid: Mgr.update_app("active_grid_plane", GD["active_grid_plane"]) snap_settings["type"] = "creation" def __end_creation(self, cancel=True): self.__disable_snap(reset_grid=True) Mgr.remove_task("draw_object") if cancel or not self._obj.is_valid(): self._obj.destroy() else: self._obj.finalize() name = Mgr.get("next_obj_name", self._obj_type) Mgr.update_remotely("next_obj_name", name) if self._has_color: self.set_next_object_color() if self._add_to_hist: self.add_history(self._obj.toplevel_obj) Mgr.notify("creation_ended") Mgr.enter_state("creation_mode") self._obj = None self._current_creation_phase = 0
py
1a3de56964c6f45d273bbb1befe0c4a18647a876
"""The tests for the sun automation.""" from datetime import datetime import pytest from homeassistant.components import sun import homeassistant.components.automation as automation from homeassistant.const import ( ATTR_ENTITY_ID, ENTITY_MATCH_ALL, SERVICE_TURN_OFF, SERVICE_TURN_ON, SUN_EVENT_SUNRISE, SUN_EVENT_SUNSET, ) from homeassistant.setup import async_setup_component import homeassistant.util.dt as dt_util from tests.async_mock import patch from tests.common import async_fire_time_changed, async_mock_service, mock_component from tests.components.blueprint.conftest import stub_blueprint_populate # noqa ORIG_TIME_ZONE = dt_util.DEFAULT_TIME_ZONE @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") @pytest.fixture(autouse=True) def setup_comp(hass): """Initialize components.""" mock_component(hass, "group") dt_util.set_default_time_zone(hass.config.time_zone) hass.loop.run_until_complete( async_setup_component(hass, sun.DOMAIN, {sun.DOMAIN: {sun.CONF_ELEVATION: 0}}) ) def teardown(): """Restore.""" dt_util.set_default_time_zone(ORIG_TIME_ZONE) async def test_sunset_trigger(hass, calls, legacy_patchable_time): """Test the sunset trigger.""" now = datetime(2015, 9, 15, 23, tzinfo=dt_util.UTC) trigger_time = datetime(2015, 9, 16, 2, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "trigger": {"platform": "sun", "event": SUN_EVENT_SUNSET}, "action": {"service": "test.automation"}, } }, ) await hass.services.async_call( automation.DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: ENTITY_MATCH_ALL}, blocking=True, ) async_fire_time_changed(hass, trigger_time) await hass.async_block_till_done() assert len(calls) == 0 with patch("homeassistant.util.dt.utcnow", return_value=now): await hass.services.async_call( automation.DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ENTITY_MATCH_ALL}, blocking=True, ) async_fire_time_changed(hass, trigger_time) await hass.async_block_till_done() assert len(calls) == 1 async def test_sunrise_trigger(hass, calls, legacy_patchable_time): """Test the sunrise trigger.""" now = datetime(2015, 9, 13, 23, tzinfo=dt_util.UTC) trigger_time = datetime(2015, 9, 16, 14, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "trigger": {"platform": "sun", "event": SUN_EVENT_SUNRISE}, "action": {"service": "test.automation"}, } }, ) async_fire_time_changed(hass, trigger_time) await hass.async_block_till_done() assert len(calls) == 1 async def test_sunset_trigger_with_offset(hass, calls, legacy_patchable_time): """Test the sunset trigger with offset.""" now = datetime(2015, 9, 15, 23, tzinfo=dt_util.UTC) trigger_time = datetime(2015, 9, 16, 2, 30, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "trigger": { "platform": "sun", "event": SUN_EVENT_SUNSET, "offset": "0:30:00", }, "action": { "service": "test.automation", "data_template": { "some": "{{ trigger.%s }}" % "}} - {{ trigger.".join(("platform", "event", "offset")) }, }, } }, ) async_fire_time_changed(hass, trigger_time) await hass.async_block_till_done() assert len(calls) == 1 assert calls[0].data["some"] == "sun - sunset - 0:30:00" async def test_sunrise_trigger_with_offset(hass, calls, legacy_patchable_time): """Test the sunrise trigger with offset.""" now = datetime(2015, 9, 13, 23, tzinfo=dt_util.UTC) trigger_time = datetime(2015, 9, 16, 13, 30, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "trigger": { "platform": "sun", "event": SUN_EVENT_SUNRISE, "offset": "-0:30:00", }, "action": {"service": "test.automation"}, } }, ) async_fire_time_changed(hass, trigger_time) await hass.async_block_till_done() assert len(calls) == 1 async def test_if_action_before_sunrise_no_offset(hass, calls): """ Test if action was before sunrise. Before sunrise is true from midnight until sunset, local time. """ await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "trigger": {"platform": "event", "event_type": "test_event"}, "condition": {"condition": "sun", "before": SUN_EVENT_SUNRISE}, "action": {"service": "test.automation"}, } }, ) # sunrise: 2015-09-16 06:32:43 local, sunset: 2015-09-16 18:55:24 local # sunrise: 2015-09-16 13:32:43 UTC, sunset: 2015-09-17 01:55:24 UTC # now = sunrise + 1s -> 'before sunrise' not true now = datetime(2015, 9, 16, 13, 32, 44, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 0 # now = sunrise -> 'before sunrise' true now = datetime(2015, 9, 16, 13, 32, 43, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 # now = local midnight -> 'before sunrise' true now = datetime(2015, 9, 16, 7, 0, 0, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 2 # now = local midnight - 1s -> 'before sunrise' not true now = datetime(2015, 9, 17, 6, 59, 59, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 2 async def test_if_action_after_sunrise_no_offset(hass, calls): """ Test if action was after sunrise. After sunrise is true from sunrise until midnight, local time. """ await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "trigger": {"platform": "event", "event_type": "test_event"}, "condition": {"condition": "sun", "after": SUN_EVENT_SUNRISE}, "action": {"service": "test.automation"}, } }, ) # sunrise: 2015-09-16 06:32:43 local, sunset: 2015-09-16 18:55:24 local # sunrise: 2015-09-16 13:32:43 UTC, sunset: 2015-09-17 01:55:24 UTC # now = sunrise - 1s -> 'after sunrise' not true now = datetime(2015, 9, 16, 13, 32, 42, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 0 # now = sunrise + 1s -> 'after sunrise' true now = datetime(2015, 9, 16, 13, 32, 43, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 # now = local midnight -> 'after sunrise' not true now = datetime(2015, 9, 16, 7, 0, 0, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 # now = local midnight - 1s -> 'after sunrise' true now = datetime(2015, 9, 17, 6, 59, 59, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 2 async def test_if_action_before_sunrise_with_offset(hass, calls): """ Test if action was before sunrise with offset. Before sunrise is true from midnight until sunset, local time. """ await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "trigger": {"platform": "event", "event_type": "test_event"}, "condition": { "condition": "sun", "before": SUN_EVENT_SUNRISE, "before_offset": "+1:00:00", }, "action": {"service": "test.automation"}, } }, ) # sunrise: 2015-09-16 06:32:43 local, sunset: 2015-09-16 18:55:24 local # sunrise: 2015-09-16 13:32:43 UTC, sunset: 2015-09-17 01:55:24 UTC # now = sunrise + 1s + 1h -> 'before sunrise' with offset +1h not true now = datetime(2015, 9, 16, 14, 32, 44, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 0 # now = sunrise + 1h -> 'before sunrise' with offset +1h true now = datetime(2015, 9, 16, 14, 32, 43, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 # now = UTC midnight -> 'before sunrise' with offset +1h not true now = datetime(2015, 9, 17, 0, 0, 0, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 # now = UTC midnight - 1s -> 'before sunrise' with offset +1h not true now = datetime(2015, 9, 16, 23, 59, 59, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 # now = local midnight -> 'before sunrise' with offset +1h true now = datetime(2015, 9, 16, 7, 0, 0, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 2 # now = local midnight - 1s -> 'before sunrise' with offset +1h not true now = datetime(2015, 9, 17, 6, 59, 59, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 2 # now = sunset -> 'before sunrise' with offset +1h not true now = datetime(2015, 9, 17, 1, 56, 48, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 2 # now = sunset -1s -> 'before sunrise' with offset +1h not true now = datetime(2015, 9, 17, 1, 56, 45, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 2 async def test_if_action_before_sunset_with_offset(hass, calls): """ Test if action was before sunset with offset. Before sunset is true from midnight until sunset, local time. """ await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "trigger": {"platform": "event", "event_type": "test_event"}, "condition": { "condition": "sun", "before": "sunset", "before_offset": "+1:00:00", }, "action": {"service": "test.automation"}, } }, ) # sunrise: 2015-09-16 06:32:43 local, sunset: 2015-09-16 18:55:24 local # sunrise: 2015-09-16 13:32:43 UTC, sunset: 2015-09-17 01:55:24 UTC # now = local midnight -> 'before sunset' with offset +1h true now = datetime(2015, 9, 16, 7, 0, 0, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 # now = sunset + 1s + 1h -> 'before sunset' with offset +1h not true now = datetime(2015, 9, 17, 2, 55, 25, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 # now = sunset + 1h -> 'before sunset' with offset +1h true now = datetime(2015, 9, 17, 2, 55, 24, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 2 # now = UTC midnight -> 'before sunset' with offset +1h true now = datetime(2015, 9, 17, 0, 0, 0, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 3 # now = UTC midnight - 1s -> 'before sunset' with offset +1h true now = datetime(2015, 9, 16, 23, 59, 59, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 4 # now = sunrise -> 'before sunset' with offset +1h true now = datetime(2015, 9, 16, 13, 32, 43, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 5 # now = sunrise -1s -> 'before sunset' with offset +1h true now = datetime(2015, 9, 16, 13, 32, 42, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 6 # now = local midnight-1s -> 'after sunrise' with offset +1h not true now = datetime(2015, 9, 17, 6, 59, 59, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 6 async def test_if_action_after_sunrise_with_offset(hass, calls): """ Test if action was after sunrise with offset. After sunrise is true from sunrise until midnight, local time. """ await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "trigger": {"platform": "event", "event_type": "test_event"}, "condition": { "condition": "sun", "after": SUN_EVENT_SUNRISE, "after_offset": "+1:00:00", }, "action": {"service": "test.automation"}, } }, ) # sunrise: 2015-09-16 06:32:43 local, sunset: 2015-09-16 18:55:24 local # sunrise: 2015-09-16 13:32:43 UTC, sunset: 2015-09-17 01:55:24 UTC # now = sunrise - 1s + 1h -> 'after sunrise' with offset +1h not true now = datetime(2015, 9, 16, 14, 32, 42, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 0 # now = sunrise + 1h -> 'after sunrise' with offset +1h true now = datetime(2015, 9, 16, 14, 32, 43, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 # now = UTC noon -> 'after sunrise' with offset +1h not true now = datetime(2015, 9, 16, 12, 0, 0, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 # now = UTC noon - 1s -> 'after sunrise' with offset +1h not true now = datetime(2015, 9, 16, 11, 59, 59, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 # now = local noon -> 'after sunrise' with offset +1h true now = datetime(2015, 9, 16, 19, 1, 0, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 2 # now = local noon - 1s -> 'after sunrise' with offset +1h true now = datetime(2015, 9, 16, 18, 59, 59, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 3 # now = sunset -> 'after sunrise' with offset +1h true now = datetime(2015, 9, 17, 1, 55, 24, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 4 # now = sunset + 1s -> 'after sunrise' with offset +1h true now = datetime(2015, 9, 17, 1, 55, 25, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 5 # now = local midnight-1s -> 'after sunrise' with offset +1h true now = datetime(2015, 9, 17, 6, 59, 59, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 6 # now = local midnight -> 'after sunrise' with offset +1h not true now = datetime(2015, 9, 17, 7, 0, 0, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 6 async def test_if_action_after_sunset_with_offset(hass, calls): """ Test if action was after sunset with offset. After sunset is true from sunset until midnight, local time. """ await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "trigger": {"platform": "event", "event_type": "test_event"}, "condition": { "condition": "sun", "after": "sunset", "after_offset": "+1:00:00", }, "action": {"service": "test.automation"}, } }, ) # sunrise: 2015-09-15 06:32:05 local, sunset: 2015-09-15 18:56:46 local # sunrise: 2015-09-15 13:32:05 UTC, sunset: 2015-09-16 01:56:46 UTC # now = sunset - 1s + 1h -> 'after sunset' with offset +1h not true now = datetime(2015, 9, 16, 2, 56, 45, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 0 # now = sunset + 1h -> 'after sunset' with offset +1h true now = datetime(2015, 9, 16, 2, 56, 46, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 # now = midnight-1s -> 'after sunset' with offset +1h true now = datetime(2015, 9, 16, 6, 59, 59, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 2 # now = midnight -> 'after sunset' with offset +1h not true now = datetime(2015, 9, 16, 7, 0, 0, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 2 async def test_if_action_before_and_after_during(hass, calls): """ Test if action was after sunset and before sunrise. This is true from sunrise until sunset. """ await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "trigger": {"platform": "event", "event_type": "test_event"}, "condition": { "condition": "sun", "after": SUN_EVENT_SUNRISE, "before": SUN_EVENT_SUNSET, }, "action": {"service": "test.automation"}, } }, ) # sunrise: 2015-09-16 06:32:43 local, sunset: 2015-09-16 18:55:24 local # sunrise: 2015-09-16 13:32:43 UTC, sunset: 2015-09-17 01:55:24 UTC # now = sunrise - 1s -> 'after sunrise' + 'before sunset' not true now = datetime(2015, 9, 16, 13, 32, 42, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 0 # now = sunset + 1s -> 'after sunrise' + 'before sunset' not true now = datetime(2015, 9, 17, 1, 55, 25, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 0 # now = sunrise -> 'after sunrise' + 'before sunset' true now = datetime(2015, 9, 16, 13, 32, 43, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 # now = sunset -> 'after sunrise' + 'before sunset' true now = datetime(2015, 9, 17, 1, 55, 24, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 2 # now = 9AM local -> 'after sunrise' + 'before sunset' true now = datetime(2015, 9, 16, 16, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 3 async def test_if_action_before_sunrise_no_offset_kotzebue(hass, calls): """ Test if action was before sunrise. Local timezone: Alaska time Location: Kotzebue, which has a very skewed local timezone with sunrise at 7 AM and sunset at 3AM during summer After sunrise is true from sunrise until midnight, local time. """ tz = dt_util.get_time_zone("America/Anchorage") dt_util.set_default_time_zone(tz) hass.config.latitude = 66.5 hass.config.longitude = 162.4 await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "trigger": {"platform": "event", "event_type": "test_event"}, "condition": {"condition": "sun", "before": SUN_EVENT_SUNRISE}, "action": {"service": "test.automation"}, } }, ) # sunrise: 2015-07-24 07:17:24 local, sunset: 2015-07-25 03:16:27 local # sunrise: 2015-07-24 15:17:24 UTC, sunset: 2015-07-25 11:16:27 UTC # now = sunrise + 1s -> 'before sunrise' not true now = datetime(2015, 7, 24, 15, 17, 25, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 0 # now = sunrise -> 'before sunrise' true now = datetime(2015, 7, 24, 15, 17, 24, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 # now = local midnight -> 'before sunrise' true now = datetime(2015, 7, 24, 8, 0, 0, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 2 # now = local midnight - 1s -> 'before sunrise' not true now = datetime(2015, 7, 24, 7, 59, 59, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 2 async def test_if_action_after_sunrise_no_offset_kotzebue(hass, calls): """ Test if action was after sunrise. Local timezone: Alaska time Location: Kotzebue, which has a very skewed local timezone with sunrise at 7 AM and sunset at 3AM during summer Before sunrise is true from midnight until sunrise, local time. """ tz = dt_util.get_time_zone("America/Anchorage") dt_util.set_default_time_zone(tz) hass.config.latitude = 66.5 hass.config.longitude = 162.4 await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "trigger": {"platform": "event", "event_type": "test_event"}, "condition": {"condition": "sun", "after": SUN_EVENT_SUNRISE}, "action": {"service": "test.automation"}, } }, ) # sunrise: 2015-07-24 07:17:24 local, sunset: 2015-07-25 03:16:27 local # sunrise: 2015-07-24 15:17:24 UTC, sunset: 2015-07-25 11:16:27 UTC # now = sunrise -> 'after sunrise' true now = datetime(2015, 7, 24, 15, 17, 24, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 # now = sunrise - 1s -> 'after sunrise' not true now = datetime(2015, 7, 24, 15, 17, 23, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 # now = local midnight -> 'after sunrise' not true now = datetime(2015, 7, 24, 8, 0, 1, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 # now = local midnight - 1s -> 'after sunrise' true now = datetime(2015, 7, 24, 7, 59, 59, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 2 async def test_if_action_before_sunset_no_offset_kotzebue(hass, calls): """ Test if action was before sunrise. Local timezone: Alaska time Location: Kotzebue, which has a very skewed local timezone with sunrise at 7 AM and sunset at 3AM during summer Before sunset is true from midnight until sunset, local time. """ tz = dt_util.get_time_zone("America/Anchorage") dt_util.set_default_time_zone(tz) hass.config.latitude = 66.5 hass.config.longitude = 162.4 await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "trigger": {"platform": "event", "event_type": "test_event"}, "condition": {"condition": "sun", "before": SUN_EVENT_SUNSET}, "action": {"service": "test.automation"}, } }, ) # sunrise: 2015-07-24 07:17:24 local, sunset: 2015-07-25 03:16:27 local # sunrise: 2015-07-24 15:17:24 UTC, sunset: 2015-07-25 11:16:27 UTC # now = sunrise + 1s -> 'before sunrise' not true now = datetime(2015, 7, 25, 11, 16, 28, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 0 # now = sunrise -> 'before sunrise' true now = datetime(2015, 7, 25, 11, 16, 27, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 # now = local midnight -> 'before sunrise' true now = datetime(2015, 7, 24, 8, 0, 0, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 2 # now = local midnight - 1s -> 'before sunrise' not true now = datetime(2015, 7, 24, 7, 59, 59, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 2 async def test_if_action_after_sunset_no_offset_kotzebue(hass, calls): """ Test if action was after sunrise. Local timezone: Alaska time Location: Kotzebue, which has a very skewed local timezone with sunrise at 7 AM and sunset at 3AM during summer After sunset is true from sunset until midnight, local time. """ tz = dt_util.get_time_zone("America/Anchorage") dt_util.set_default_time_zone(tz) hass.config.latitude = 66.5 hass.config.longitude = 162.4 await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "trigger": {"platform": "event", "event_type": "test_event"}, "condition": {"condition": "sun", "after": SUN_EVENT_SUNSET}, "action": {"service": "test.automation"}, } }, ) # sunrise: 2015-07-24 07:17:24 local, sunset: 2015-07-25 03:16:27 local # sunrise: 2015-07-24 15:17:24 UTC, sunset: 2015-07-25 11:16:27 UTC # now = sunset -> 'after sunset' true now = datetime(2015, 7, 25, 11, 16, 27, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 # now = sunset - 1s -> 'after sunset' not true now = datetime(2015, 7, 25, 11, 16, 26, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 # now = local midnight -> 'after sunset' not true now = datetime(2015, 7, 24, 8, 0, 1, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 1 # now = local midnight - 1s -> 'after sunset' true now = datetime(2015, 7, 24, 7, 59, 59, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() assert len(calls) == 2
py
1a3de6a14bcae793d9492d078291a67546cbe41e
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. import onnx from onnx import helper, TensorProto IN = helper.make_tensor_value_info('in', TensorProto.FLOAT, [7]) OUT = helper.make_tensor_value_info('out', TensorProto.INT8, [7]) nodes = [ helper.make_node( 'Cast', ['in'], ['out'], to=getattr(TensorProto, 'INT8'), ), ] graph_def = helper.make_graph( nodes, 'float_to_int8', [IN], [OUT], ) model_def = helper.make_model(graph_def, producer_name='float_to_int8.py', opset_imports=[onnx.OperatorSetIdProto(version=13)]) onnx.save(model_def, 'float_to_int8.onnx')
py
1a3de83c986d53776ee8b204e4d463c95f30dd96
from django.test import TestCase from django.urls import reverse from catalog.models import Author class AuthorListViewTest(TestCase): @classmethod def setUpTestData(cls): # Create 13 authors for pagination tests number_of_authors = 13 for author_id in range(number_of_authors): Author.objects.create( first_name=f'Christian {author_id}', last_name=f'Surname {author_id}', ) def test_view_url_exists_at_desired_location(self): response = self.client.get('/catalog/authors/') self.assertEqual(response.status_code, 200) def test_view_url_accessible_by_name(self): response = self.client.get(reverse('authors')) self.assertEqual(response.status_code, 200) def test_view_uses_correct_template(self): response = self.client.get(reverse('authors')) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'catalog/author_list.html') def test_pagination_is_ten(self): response = self.client.get(reverse('authors')) self.assertEqual(response.status_code, 200) self.assertTrue('is_paginated' in response.context) self.assertTrue(response.context['is_paginated'] == True) self.assertTrue(len(response.context['author_list']) == 10) def test_lists_all_authors(self): # Get second page and confirm it has (exactly) remaining 3 items response = self.client.get(reverse('authors')+'?page=2') self.assertEqual(response.status_code, 200) self.assertTrue('is_paginated' in response.context) self.assertTrue(response.context['is_paginated'] == True) self.assertTrue(len(response.context['author_list']) == 3) import datetime from django.utils import timezone from django.contrib.auth.models import User # Required to assign User as a borrower from catalog.models import BookInstance, Book, Genre, Language class LoanedBookInstancesByUserListViewTest(TestCase): def setUp(self): # Create two users test_user1 = User.objects.create_user(username='testuser1', password='1X<ISRUkw+tuK') test_user2 = User.objects.create_user(username='testuser2', password='2HJ1vRV0Z&3iD') test_user1.save() test_user2.save() # Create a book test_author = Author.objects.create(first_name='John', last_name='Smith') test_genre = Genre.objects.create(name='Fantasy') test_language = Language.objects.create(name='English') test_book = Book.objects.create( title='Book Title', summary='My book summary', isbn='ABCDEFG', author=test_author, language=test_language, ) # Create genre as a post-step genre_objects_for_book = Genre.objects.all() test_book.genre.set(genre_objects_for_book) # Direct assignment of many-to-many types not allowed. test_book.save() # Create 30 BookInstance objects number_of_book_copies = 30 for book_copy in range(number_of_book_copies): return_date = timezone.localtime() + datetime.timedelta(days=book_copy%5) the_borrower = test_user1 if book_copy % 2 else test_user2 status = 'm' BookInstance.objects.create( book=test_book, imprint='Unlikely Imprint, 2016', due_back=return_date, borrower=the_borrower, status=status, ) def test_redirect_if_not_logged_in(self): response = self.client.get(reverse('my-borrowed')) self.assertRedirects(response, '/accounts/login/?next=/catalog/mybooks/') def test_logged_in_uses_correct_template(self): login = self.client.login(username='testuser1', password='1X<ISRUkw+tuK') response = self.client.get(reverse('my-borrowed')) # Check our user is logged in self.assertEqual(str(response.context['user']), 'testuser1') # Check that we got a response "success" self.assertEqual(response.status_code, 200) # Check we used correct template self.assertTemplateUsed(response, 'catalog/bookinstance_list_borrowed_user.html') def test_only_borrowed_books_in_list(self): login = self.client.login(username='testuser1', password='1X<ISRUkw+tuK') response = self.client.get(reverse('my-borrowed')) # Check our user is logged in self.assertEqual(str(response.context['user']), 'testuser1') # Check that we got a response "success" self.assertEqual(response.status_code, 200) # Check that initially we don't have any books in list (none on loan) self.assertTrue('bookinstance_list' in response.context) self.assertEqual(len(response.context['bookinstance_list']), 0) # Now change all books to be on loan books = BookInstance.objects.all()[:10] for book in books: book.status = 'o' book.save() # Check that now we have borrowed books in the list response = self.client.get(reverse('my-borrowed')) # Check our user is logged in self.assertEqual(str(response.context['user']), 'testuser1') # Check that we got a response "success" self.assertEqual(response.status_code, 200) self.assertTrue('bookinstance_list' in response.context) # Confirm all books belong to testuser1 and are on loan for bookitem in response.context['bookinstance_list']: self.assertEqual(response.context['user'], bookitem.borrower) self.assertEqual('o', bookitem.status) def test_pages_ordered_by_due_date(self): # Change all books to be on loan for book in BookInstance.objects.all(): book.status='o' book.save() login = self.client.login(username='testuser1', password='1X<ISRUkw+tuK') response = self.client.get(reverse('my-borrowed')) # Check our user is logged in self.assertEqual(str(response.context['user']), 'testuser1') # Check that we got a response "success" self.assertEqual(response.status_code, 200) # Confirm that of the items, only 10 are displayed due to pagination. self.assertEqual(len(response.context['bookinstance_list']), 10) last_date = 0 for book in response.context['bookinstance_list']: if last_date == 0: last_date = book.due_back else: self.assertTrue(last_date <= book.due_back) last_date = book.due_back import uuid from django.contrib.auth.models import Permission # Required to grant the permission needed to set a book as returned. class RenewBookInstancesViewTest(TestCase): def setUp(self): # Create a user test_user1 = User.objects.create_user(username='testuser1', password='1X<ISRUkw+tuK') test_user2 = User.objects.create_user(username='testuser2', password='2HJ1vRV0Z&3iD') test_user1.save() test_user2.save() permission = Permission.objects.get(name='Set book as returned') test_user2.user_permissions.add(permission) test_user2.save() # Create a book test_author = Author.objects.create(first_name='John', last_name='Smith') test_genre = Genre.objects.create(name='Fantasy') test_language = Language.objects.create(name='English') test_book = Book.objects.create( title='Book Title', summary='My book summary', isbn='ABCDEFG', author=test_author, language=test_language, ) # Create genre as a post-step genre_objects_for_book = Genre.objects.all() test_book.genre.set(genre_objects_for_book) # Direct assignment of many-to-many types not allowed. test_book.save() # Create a BookInstance object for test_user1 return_date = datetime.date.today() + datetime.timedelta(days=5) self.test_bookinstance1 = BookInstance.objects.create( book=test_book, imprint='Unlikely Imprint, 2016', due_back=return_date, borrower=test_user1, status='o', ) # Create a BookInstance object for test_user2 return_date = datetime.date.today() + datetime.timedelta(days=5) self.test_bookinstance2 = BookInstance.objects.create( book=test_book, imprint='Unlikely Imprint, 2016', due_back=return_date, borrower=test_user2, status='o', ) def test_redirect_if_not_logged_in(self): response = self.client.get(reverse('renew-book-librarian', kwargs={'pk': self.test_bookinstance1.pk})) # Manually check redirect (Can't use assertRedirect, because the redirect URL is unpredictable) self.assertEqual(response.status_code, 302) self.assertTrue(response.url.startswith('/accounts/login/')) def test_forbidden_if_logged_in_but_not_correct_permission(self): login = self.client.login(username='testuser1', password='1X<ISRUkw+tuK') response = self.client.get(reverse('renew-book-librarian', kwargs={'pk': self.test_bookinstance1.pk})) self.assertEqual(response.status_code, 403) def test_logged_in_with_permission_borrowed_book(self): login = self.client.login(username='testuser2', password='2HJ1vRV0Z&3iD') response = self.client.get(reverse('renew-book-librarian', kwargs={'pk': self.test_bookinstance2.pk})) # Check that it lets us login - this is our book and we have the right permissions. self.assertEqual(response.status_code, 200) def test_logged_in_with_permission_another_users_borrowed_book(self): login = self.client.login(username='testuser2', password='2HJ1vRV0Z&3iD') response = self.client.get(reverse('renew-book-librarian', kwargs={'pk': self.test_bookinstance1.pk})) # Check that it lets us login. We're a librarian, so we can view any users book self.assertEqual(response.status_code, 200) def test_HTTP404_for_invalid_book_if_logged_in(self): # unlikely UID to match our bookinstance! test_uid = uuid.uuid4() login = self.client.login(username='testuser2', password='2HJ1vRV0Z&3iD') response = self.client.get(reverse('renew-book-librarian', kwargs={'pk':test_uid})) self.assertEqual(response.status_code, 404) def test_uses_correct_template(self): login = self.client.login(username='testuser2', password='2HJ1vRV0Z&3iD') response = self.client.get(reverse('renew-book-librarian', kwargs={'pk': self.test_bookinstance1.pk})) self.assertEqual(response.status_code, 200) # Check we used correct template self.assertTemplateUsed(response, 'catalog/book_renew_librarian.html') def test_form_renewal_date_initially_has_date_three_weeks_in_future(self): login = self.client.login(username='testuser2', password='2HJ1vRV0Z&3iD') response = self.client.get(reverse('renew-book-librarian', kwargs={'pk': self.test_bookinstance1.pk})) self.assertEqual(response.status_code, 200) date_3_weeks_in_future = datetime.date.today() + datetime.timedelta(weeks=3) self.assertEqual(response.context['form'].initial['renewal_date'], date_3_weeks_in_future) def test_redirects_to_all_borrowed_book_list_on_success(self): login = self.client.login(username='testuser2', password='2HJ1vRV0Z&3iD') valid_date_in_future = datetime.date.today() + datetime.timedelta(weeks=2) response = self.client.post(reverse('renew-book-librarian', kwargs={'pk':self.test_bookinstance1.pk,}), {'renewal_date':valid_date_in_future}) self.assertRedirects(response, reverse('all-borrowed')) def test_form_invalid_renewal_date_past(self): login = self.client.login(username='testuser2', password='2HJ1vRV0Z&3iD') date_in_past = datetime.date.today() - datetime.timedelta(weeks=1) response = self.client.post(reverse('renew-book-librarian', kwargs={'pk': self.test_bookinstance1.pk}), {'renewal_date': date_in_past}) self.assertEqual(response.status_code, 200) self.assertFormError(response, 'form', 'renewal_date', 'Invalid date - renewal in past') def test_form_invalid_renewal_date_future(self): login = self.client.login(username='testuser2', password='2HJ1vRV0Z&3iD') invalid_date_in_future = datetime.date.today() + datetime.timedelta(weeks=5) response = self.client.post(reverse('renew-book-librarian', kwargs={'pk': self.test_bookinstance1.pk}), {'renewal_date': invalid_date_in_future}) self.assertEqual(response.status_code, 200) self.assertFormError(response, 'form', 'renewal_date', 'Invalid date - renewal more than 4 weeks ahead')
py
1a3de984dae9dc3efb071f8f3d0e61f40ed291ba
from setuptools import setup CLASSIFIERS = [ "Development Status :: 4 - Beta", "Environment :: Web Environment", "Framework :: Django", "Framework :: Django", "Framework :: Django :: 1.11", "Framework :: Django :: 2.0", "Intended Audience :: Developers", # "License :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Topic :: Software Development", "Topic :: Software Development :: Libraries :: Application Frameworks", ] setup( name="bitrix", version='0.0.3', author="Noors Ergesh", author_email="[email protected]", description="Bitrix24 python library", license='MIT', # long_description=LONG_DESCRIPTION, url="https://github.com/NursErgesh/bitrix.git", packages=("bitrix",), include_package_data=True, install_requires=open('requirements/requirements.txt').read().splitlines(), tests_require=open('requirements/test.txt').read().splitlines(), classifiers=CLASSIFIERS, zip_safe=False, )
py
1a3dea4b338819a91a30dbe9928b9d4aeab11519
# coding: utf-8 import re import six from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization class UpdateClusterRequest: """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ sensitive_list = [] openapi_types = { 'cluster_id': 'str', 'body': 'ClusterInformation' } attribute_map = { 'cluster_id': 'cluster_id', 'body': 'body' } def __init__(self, cluster_id=None, body=None): """UpdateClusterRequest - a model defined in huaweicloud sdk""" self._cluster_id = None self._body = None self.discriminator = None self.cluster_id = cluster_id if body is not None: self.body = body @property def cluster_id(self): """Gets the cluster_id of this UpdateClusterRequest. 集群 ID,获取方式请参见[[如何获取接口URI中参数](https://support.huaweicloud.com/api-cce/cce_02_0271.html)](tag:hws)[[如何获取接口URI中参数](https://support.huaweicloud.com/intl/zh-cn/api-cce/cce_02_0271.html)](tag:hws_hk) :return: The cluster_id of this UpdateClusterRequest. :rtype: str """ return self._cluster_id @cluster_id.setter def cluster_id(self, cluster_id): """Sets the cluster_id of this UpdateClusterRequest. 集群 ID,获取方式请参见[[如何获取接口URI中参数](https://support.huaweicloud.com/api-cce/cce_02_0271.html)](tag:hws)[[如何获取接口URI中参数](https://support.huaweicloud.com/intl/zh-cn/api-cce/cce_02_0271.html)](tag:hws_hk) :param cluster_id: The cluster_id of this UpdateClusterRequest. :type: str """ self._cluster_id = cluster_id @property def body(self): """Gets the body of this UpdateClusterRequest. :return: The body of this UpdateClusterRequest. :rtype: ClusterInformation """ return self._body @body.setter def body(self, body): """Sets the body of this UpdateClusterRequest. :param body: The body of this UpdateClusterRequest. :type: ClusterInformation """ self._body = body def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: if attr in self.sensitive_list: result[attr] = "****" else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" import simplejson as json if six.PY2: import sys reload(sys) sys.setdefaultencoding("utf-8") return json.dumps(sanitize_for_serialization(self), ensure_ascii=False) def __repr__(self): """For `print`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, UpdateClusterRequest): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
py
1a3deaf2afe89e28cc0b70b019fe4ef799a12fc0
from flask import Blueprint, request, abort, send_file, Response, make_response from app.helpers.google_maps import get_static_map from app.helpers.slack import verify_slack_request from flask_jwt_extended import jwt_required from app.model import db,UserResponse from flask import current_app as app from datetime import datetime import json # Add blueprints api = Blueprint('api', __name__, url_prefix="/askjeeves", template_folder='templates') @api.route('/GoogleMaps', methods=['GET']) def get_google_map(): """ Input: Takes in a location via the query string in the URL Output: Returns a Google Map static image (PNG) to client """ # Get query string query_string = request.args image = get_static_map(query_string['location']) return send_file(image, mimetype='image/png') @api.route('/', methods=['POST']) @api.route('/UserResponse', methods=['POST']) def user_response(): if request.headers.get("X-Slack-Signature") and request.headers.get("X-Slack-Request-Timestamp") and request.headers["Content-Type"] == "application/x-www-form-urlencoded": request_body = request.get_data() slack_signature = request.headers.get('X-Slack-Signature', None) slack_request_timestamp = request.headers.get('X-Slack-Request-Timestamp', None) if verify_slack_request(slack_signature, slack_request_timestamp, request_body): # Get URL encoded form data payload = json.loads(request.form['payload']) # Unpack values from fields temp_dict = dict() for field in payload['message']['blocks'][3]['fields']: temp_dict[field['text'].split("*\n")[0][1:]] = field['text'].split("*\n")[1] temp_dict['Username'] = payload['user']['username'] temp_dict['user_selection'] = payload['actions'][0]['value'] # Create DB entry userResponse = UserResponse( EventID=temp_dict['EventID'], Username=temp_dict['Username'], Timestamp=temp_dict['Timestamp'], Location=temp_dict['Location'], IPaddress=temp_dict['IPaddress'], VPNHash=temp_dict['VPNhash'], Device=temp_dict['Device'], Hostname=temp_dict['Hostname'], Selection=temp_dict['user_selection'] ) # Commit DB entry db.session.add(userResponse) db.session.commit() # remove blocks del payload['message']['blocks'] selection = payload['actions'][0]['value'] msg_text = str() if selection == "legitimate_login": msg_text = ":partyparrot:" else: msg_text = ":rotating-light-red: :rotating-light-red: :rotating-light-red: Alerting security team :rotating-light-red: :rotating-light-red: :rotating-light-red: " response = app.slack_client.chat_update( channel=payload["channel"]["id"], ts=payload['container']["message_ts"], text=msg_text, blocks=[], attachments=[] ) return make_response("", 200) return abort(404) @api.route('/GetUserResponse', methods=['GET']) @jwt_required def get_user_responses(): """ Input: Request to get all the user responses in MySQL database Output: Return JSON list of all user responses """ # Request all user responses from DB userResponses = db.session.query(UserResponse).all() # Delete all entries for userResponse in userResponses: db.session.delete(userResponse) db.session.commit() # Create list of dicts of each DB entry userResponseLst = list() for userResponse in userResponses: temp = userResponse.__dict__ del temp['_sa_instance_state'] userResponseLst.append(temp) # return user responses as JSON return json.dumps(userResponseLst)
py
1a3debb35cf1fe6e6d685cc3341c96ff0a5ca826
#!/usr/bin/env python from __future__ import with_statement # ============================================================================== # MetaPhlAn v2.x: METAgenomic PHyLogenetic ANalysis for taxonomic classification # of metagenomic data # # Authors: Nicola Segata ([email protected]), # Duy Tin Truong, # Francesco Asnicar ([email protected]) # # Please type "./metaphlan2.py -h" for usage help # # ============================================================================== __author__ = ('Nicola Segata ([email protected]), ' 'Duy Tin Truong, ' 'Francesco Asnicar ([email protected])') __version__ = '2.8' __date__ = '31 May 2018' import sys import os import stat import re import time import tarfile # from binascii import b2a_uu try: import numpy as np except ImportError: sys.stderr.write("Error! numpy python library not detected!!\n") sys.exit(1) import tempfile as tf import argparse as ap import subprocess as subp try: from subprocess import DEVNULL # py3k except ImportError: DEVNULL = open(os.devnull, 'wb') # import multiprocessing as mp from collections import defaultdict as defdict import bz2 import itertools from distutils.version import LooseVersion try: import cPickle as pickle except ImportError: import pickle # try to import urllib.request.urlretrieve for python3 try: from urllib.request import urlretrieve except ImportError: from urllib import urlretrieve from glob import glob import hashlib # set the location of the database download url DATABASE_DOWNLOAD = "https://www.dropbox.com/sh/7qze7m7g9fe2xjg/AADHWzATSQcI0CNFD0sk7MAga" FILE_LIST= "https://www.dropbox.com/sh/7qze7m7g9fe2xjg/AAA4XDP85WHon_eHvztxkamTa/file_list.txt?dl=1" # get the directory that contains this script metaphlan2_script_install_folder = os.path.dirname(os.path.abspath(__file__)) # get the default database folder DEFAULT_DB_FOLDER = os.path.join(metaphlan2_script_install_folder, "metaphlan_databases") #********************************************************************************************** # Modification of Code : * # Modified the code so instead of using the current clade IDs, which are numbers, we will * # use the clade_names * # Users reported the biom output is invalid and also the IDs were changing from run to * # run. * # George Weingart 05/22/2017 [email protected] * #********************************************************************************************** #************************************************************* #* Imports related to biom file generation * #************************************************************* try: import biom import biom.table # import numpy as np # numpy already imported above except ImportError: sys.stderr.write("Warning! Biom python library not detected!" "\n Exporting to biom format will not work!\n") try: import json except ImportError: sys.stderr.write("Warning! json python library not detected!" "\n Exporting to biom format will not work!\n") # This set contains the markers that after careful validation are found to have low precision or recall # We esclude the markers here to avoid generating a new marker DB when changing just few markers markers_to_exclude = set(['NC_001782.1', 'GeneID:17099689', 'gi|419819595|ref|NZ_AJRE01000517.1|:1-118', 'GeneID:10498696', 'GeneID:10498710', 'GeneID:10498726', 'GeneID:10498735', 'GeneID:10498757', 'GeneID:10498760', 'GeneID:10498761', 'GeneID:10498763', 'GeneID:11294465', 'GeneID:14181982', 'GeneID:14182132', 'GeneID:14182146', 'GeneID:14182148', 'GeneID:14182328', 'GeneID:14182639', 'GeneID:14182647', 'GeneID:14182650', 'GeneID:14182663', 'GeneID:14182683', 'GeneID:14182684', 'GeneID:14182691', 'GeneID:14182803', 'GeneID:14296322', 'GeneID:1489077', 'GeneID:1489080', 'GeneID:1489081', 'GeneID:1489084', 'GeneID:1489085', 'GeneID:1489088', 'GeneID:1489089', 'GeneID:1489090', 'GeneID:1489528', 'GeneID:1489530', 'GeneID:1489531', 'GeneID:1489735', 'GeneID:1491873', 'GeneID:1491889', 'GeneID:1491962', 'GeneID:1491963', 'GeneID:1491964', 'GeneID:1491965', 'GeneID:17099689', 'GeneID:1724732', 'GeneID:17494231', 'GeneID:2546403', 'GeneID:2703374', 'GeneID:2703375', 'GeneID:2703498', 'GeneID:2703531', 'GeneID:2772983', 'GeneID:2772989', 'GeneID:2772991', 'GeneID:2772993', 'GeneID:2772995', 'GeneID:2773037', 'GeneID:2777387', 'GeneID:2777399', 'GeneID:2777400', 'GeneID:2777439', 'GeneID:2777493', 'GeneID:2777494', 'GeneID:3077424', 'GeneID:3160801', 'GeneID:3197323', 'GeneID:3197355', 'GeneID:3197400', 'GeneID:3197428', 'GeneID:3783722', 'GeneID:3783750', 'GeneID:3953004', 'GeneID:3959334', 'GeneID:3964368', 'GeneID:3964370', 'GeneID:4961452', 'GeneID:5075645', 'GeneID:5075646', 'GeneID:5075647', 'GeneID:5075648', 'GeneID:5075649', 'GeneID:5075650', 'GeneID:5075651', 'GeneID:5075652', 'GeneID:5075653', 'GeneID:5075654', 'GeneID:5075655', 'GeneID:5075656', 'GeneID:5075657', 'GeneID:5075658', 'GeneID:5075659', 'GeneID:5075660', 'GeneID:5075661', 'GeneID:5075662', 'GeneID:5075663', 'GeneID:5075664', 'GeneID:5075665', 'GeneID:5075667', 'GeneID:5075668', 'GeneID:5075669', 'GeneID:5075670', 'GeneID:5075671', 'GeneID:5075672', 'GeneID:5075673', 'GeneID:5075674', 'GeneID:5075675', 'GeneID:5075676', 'GeneID:5075677', 'GeneID:5075678', 'GeneID:5075679', 'GeneID:5075680', 'GeneID:5075681', 'GeneID:5075682', 'GeneID:5075683', 'GeneID:5075684', 'GeneID:5075685', 'GeneID:5075686', 'GeneID:5075687', 'GeneID:5075688', 'GeneID:5075689', 'GeneID:5075690', 'GeneID:5075691', 'GeneID:5075692', 'GeneID:5075693', 'GeneID:5075694', 'GeneID:5075695', 'GeneID:5075696', 'GeneID:5075697', 'GeneID:5075698', 'GeneID:5075700', 'GeneID:5075701', 'GeneID:5075702', 'GeneID:5075703', 'GeneID:5075704', 'GeneID:5075705', 'GeneID:5075707', 'GeneID:5075708', 'GeneID:5075709', 'GeneID:5075710', 'GeneID:5075711', 'GeneID:5075712', 'GeneID:5075713', 'GeneID:5075714', 'GeneID:5075715', 'GeneID:5075716', 'GeneID:5176189', 'GeneID:6803896', 'GeneID:6803915', 'GeneID:7944151', 'GeneID:927334', 'GeneID:927335', 'GeneID:927337', 'GeneID:940263', 'GeneID:9538324', 'NC_003977.1', 'gi|103485498|ref|NC_008048.1|:1941166-1942314', 'gi|108802856|ref|NC_008148.1|:1230231-1230875', 'gi|124806686|ref|XM_001350760.1|', 'gi|126661648|ref|NZ_AAXW01000149.1|:c1513-1341', 'gi|149172845|ref|NZ_ABBW01000029.1|:970-1270', 'gi|153883242|ref|NZ_ABDQ01000074.1|:79-541', 'gi|167031021|ref|NC_010322.1|:1834668-1835168', 'gi|171344510|ref|NZ_ABJO01001391.1|:1-116', 'gi|171346813|ref|NZ_ABJO01001728.1|:c109-1', 'gi|190640924|ref|NZ_ABRC01000948.1|:c226-44', 'gi|223045343|ref|NZ_ACEN01000042.1|:1-336', 'gi|224580998|ref|NZ_GG657387.1|:c114607-114002', 'gi|224993759|ref|NZ_ACFY01000068.1|:c357-1', 'gi|237784637|ref|NC_012704.1|:141000-142970', 'gi|237784637|ref|NC_012704.1|:c2048315-2047083', 'gi|240136783|ref|NC_012808.1|:1928224-1928961', 'gi|255319020|ref|NZ_ACVR01000025.1|:28698-29132', 'gi|260590341|ref|NZ_ACEO02000062.1|:c387-151', 'gi|262368201|ref|NZ_GG704964.1|:733100-733978', 'gi|262369811|ref|NZ_GG704966.1|:c264858-264520', 'gi|288559258|ref|NC_013790.1|:448046-451354', 'gi|288559258|ref|NC_013790.1|:532047-533942', 'gi|294794157|ref|NZ_GG770200.1|:245344-245619', 'gi|304372805|ref|NC_014448.1|:444677-445120', 'gi|304372805|ref|NC_014448.1|:707516-708268', 'gi|304372805|ref|NC_014448.1|:790263-792257', 'gi|304372805|ref|NC_014448.1|:c367313-364470', 'gi|304372805|ref|NC_014448.1|:c659144-658272', 'gi|304372805|ref|NC_014448.1|:c772578-770410', 'gi|304372805|ref|NC_014448.1|:c777901-777470', 'gi|306477407|ref|NZ_GG770409.1|:c1643877-1643338', 'gi|317120849|ref|NC_014831.1|:c891121-890144', 'gi|323356441|ref|NZ_GL698442.1|:560-682', 'gi|324996766|ref|NZ_BABV01000451.1|:10656-11579', 'gi|326579405|ref|NZ_AEGQ01000006.1|:2997-3791', 'gi|326579407|ref|NZ_AEGQ01000008.1|:c45210-44497', 'gi|326579433|ref|NZ_AEGQ01000034.1|:346-3699', 'gi|329889017|ref|NZ_GL883086.1|:586124-586804', 'gi|330822653|ref|NC_015422.1|:2024431-2025018', 'gi|335053104|ref|NZ_AFIL01000010.1|:c33862-32210', 'gi|339304121|ref|NZ_AEOR01000258.1|:c294-1', 'gi|339304277|ref|NZ_AEOR01000414.1|:1-812', 'gi|342211239|ref|NZ_AFUK01000001.1|:790086-790835', 'gi|342211239|ref|NZ_AFUK01000001.1|:c1579497-1578787', 'gi|342213707|ref|NZ_AFUJ01000005.1|:48315-48908', 'gi|355707189|ref|NZ_JH376566.1|:326756-326986', 'gi|355707384|ref|NZ_JH376567.1|:90374-91453', 'gi|355707384|ref|NZ_JH376567.1|:c388018-387605', 'gi|355708440|ref|NZ_JH376569.1|:c80380-79448', 'gi|358051729|ref|NZ_AEUN01000100.1|:c120-1', 'gi|365983217|ref|XM_003668394.1|', 'gi|377571722|ref|NZ_BAFD01000110.1|:c1267-29', 'gi|377684864|ref|NZ_CM001194.1|:c1159954-1159619', 'gi|377684864|ref|NZ_CM001194.1|:c4966-4196', 'gi|378759497|ref|NZ_AFXE01000152.1|:1628-2215', 'gi|378835506|ref|NC_016829.1|:112560-113342', 'gi|378835506|ref|NC_016829.1|:114945-115193', 'gi|378835506|ref|NC_016829.1|:126414-127151', 'gi|378835506|ref|NC_016829.1|:272056-272403', 'gi|378835506|ref|NC_016829.1|:272493-272786', 'gi|378835506|ref|NC_016829.1|:358647-360863', 'gi|378835506|ref|NC_016829.1|:37637-38185', 'gi|378835506|ref|NC_016829.1|:60012-60497', 'gi|378835506|ref|NC_016829.1|:606819-607427', 'gi|378835506|ref|NC_016829.1|:607458-607760', 'gi|378835506|ref|NC_016829.1|:826192-826821', 'gi|378835506|ref|NC_016829.1|:c451932-451336', 'gi|378835506|ref|NC_016829.1|:c460520-459951', 'gi|378835506|ref|NC_016829.1|:c483843-482842', 'gi|378835506|ref|NC_016829.1|:c544660-543638', 'gi|378835506|ref|NC_016829.1|:c556383-555496', 'gi|378835506|ref|NC_016829.1|:c632166-631228', 'gi|378835506|ref|NC_016829.1|:c805066-802691', 'gi|384124469|ref|NC_017160.1|:c2157447-2156863', 'gi|385263288|ref|NZ_AJST01000001.1|:594143-594940', 'gi|385858114|ref|NC_017519.1|:10252-10746', 'gi|385858114|ref|NC_017519.1|:104630-104902', 'gi|385858114|ref|NC_017519.1|:154292-156016', 'gi|385858114|ref|NC_017519.1|:205158-206462', 'gi|385858114|ref|NC_017519.1|:507239-507703', 'gi|385858114|ref|NC_017519.1|:518924-519772', 'gi|385858114|ref|NC_017519.1|:524712-525545', 'gi|385858114|ref|NC_017519.1|:528387-528785', 'gi|385858114|ref|NC_017519.1|:532275-533429', 'gi|385858114|ref|NC_017519.1|:586402-586824', 'gi|385858114|ref|NC_017519.1|:621696-622226', 'gi|385858114|ref|NC_017519.1|:673673-676105', 'gi|385858114|ref|NC_017519.1|:706602-708218', 'gi|385858114|ref|NC_017519.1|:710627-711997', 'gi|385858114|ref|NC_017519.1|:744974-745456', 'gi|385858114|ref|NC_017519.1|:791055-791801', 'gi|385858114|ref|NC_017519.1|:805643-807430', 'gi|385858114|ref|NC_017519.1|:c172050-170809', 'gi|385858114|ref|NC_017519.1|:c334545-333268', 'gi|385858114|ref|NC_017519.1|:c383474-383202', 'gi|385858114|ref|NC_017519.1|:c450880-450389', 'gi|385858114|ref|NC_017519.1|:c451975-451001', 'gi|385858114|ref|NC_017519.1|:c470488-470036', 'gi|385858114|ref|NC_017519.1|:c485596-484598', 'gi|385858114|ref|NC_017519.1|:c58658-58065', 'gi|385858114|ref|NC_017519.1|:c592754-591081', 'gi|385858114|ref|NC_017519.1|:c59590-58820', 'gi|385858114|ref|NC_017519.1|:c601339-600575', 'gi|385858114|ref|NC_017519.1|:c76080-75160', 'gi|385858114|ref|NC_017519.1|:c97777-96302', 'gi|391227518|ref|NZ_CM001514.1|:c1442504-1440237', 'gi|391227518|ref|NZ_CM001514.1|:c3053472-3053023', 'gi|394749766|ref|NZ_AHHC01000069.1|:3978-6176', 'gi|398899615|ref|NZ_AKJK01000021.1|:28532-29209', 'gi|406580057|ref|NZ_AJRD01000017.1|:c17130-15766', 'gi|406584668|ref|NZ_AJQZ01000017.1|:c1397-771', 'gi|408543458|ref|NZ_AJLO01000024.1|:67702-68304', 'gi|410936685|ref|NZ_AJRF02000012.1|:21785-22696', 'gi|41406098|ref|NC_002944.2|:c4468304-4467864', 'gi|416998679|ref|NZ_AEXI01000003.1|:c562937-562176', 'gi|417017738|ref|NZ_AEYL01000489.1|:c111-1', 'gi|417018375|ref|NZ_AEYL01000508.1|:100-238', 'gi|418576506|ref|NZ_AHKB01000025.1|:c7989-7669', 'gi|419819595|ref|NZ_AJRE01000517.1|:1-118', 'gi|421806549|ref|NZ_AMTB01000006.1|:c181247-180489', 'gi|422320815|ref|NZ_GL636045.1|:28704-29048', 'gi|422320874|ref|NZ_GL636046.1|:4984-5742', 'gi|422323244|ref|NZ_GL636061.1|:479975-480520', 'gi|422443048|ref|NZ_GL383112.1|:663738-664823', 'gi|422552858|ref|NZ_GL383469.1|:c216727-215501', 'gi|422859491|ref|NZ_GL878548.1|:c271832-271695', 'gi|423012810|ref|NZ_GL982453.1|:3888672-3888935', 'gi|423012810|ref|NZ_GL982453.1|:4541873-4542328', 'gi|423012810|ref|NZ_GL982453.1|:c2189976-2188582', 'gi|423012810|ref|NZ_GL982453.1|:c5471232-5470300', 'gi|423262555|ref|NC_019552.1|:24703-25212', 'gi|423262555|ref|NC_019552.1|:28306-30696', 'gi|423262555|ref|NC_019552.1|:284252-284581', 'gi|423262555|ref|NC_019552.1|:311161-311373', 'gi|423262555|ref|NC_019552.1|:32707-34497', 'gi|423262555|ref|NC_019552.1|:34497-35237', 'gi|423262555|ref|NC_019552.1|:53691-56813', 'gi|423262555|ref|NC_019552.1|:c388986-386611', 'gi|423262555|ref|NC_019552.1|:c523106-522528', 'gi|423689090|ref|NZ_CM001513.1|:c1700632-1699448', 'gi|423689090|ref|NZ_CM001513.1|:c1701670-1700651', 'gi|423689090|ref|NZ_CM001513.1|:c5739118-5738390', 'gi|427395956|ref|NZ_JH992914.1|:c592682-591900', 'gi|427407324|ref|NZ_JH992904.1|:c2681223-2679463', 'gi|451952303|ref|NZ_AJRB03000021.1|:1041-1574', 'gi|452231579|ref|NZ_AEKA01000123.1|:c18076-16676', 'gi|459791914|ref|NZ_CM001824.1|:c899379-899239', 'gi|471265562|ref|NC_020815.1|:3155799-3156695', 'gi|472279780|ref|NZ_ALPV02000001.1|:33911-36751', 'gi|482733945|ref|NZ_AHGZ01000071.1|:10408-11154', 'gi|483051300|ref|NZ_ALYK02000034.1|:c37582-36650', 'gi|483051300|ref|NZ_ALYK02000034.1|:c38037-37582', 'gi|483993347|ref|NZ_AMXG01000045.1|:251724-253082', 'gi|484100856|ref|NZ_JH670250.1|:600643-602949', 'gi|484115941|ref|NZ_AJXG01000093.1|:567-947', 'gi|484228609|ref|NZ_JH730929.1|:c103784-99021', 'gi|484228797|ref|NZ_JH730960.1|:c16193-12429', 'gi|484228814|ref|NZ_JH730962.1|:c29706-29260', 'gi|484228929|ref|NZ_JH730981.1|:18645-22060', 'gi|484228939|ref|NZ_JH730983.1|:42943-43860', 'gi|484266598|ref|NZ_AKGC01000024.1|:118869-119636', 'gi|484327375|ref|NZ_AKVP01000093.1|:1-1281', 'gi|484328234|ref|NZ_AKVP01000127.1|:c325-110', 'gi|487376144|ref|NZ_KB911257.1|:600445-601482', 'gi|487376194|ref|NZ_KB911260.1|:146228-146533', 'gi|487381776|ref|NZ_KB911485.1|:101242-103083', 'gi|487381776|ref|NZ_KB911485.1|:c32472-31627', 'gi|487381800|ref|NZ_KB911486.1|:39414-39872', 'gi|487381828|ref|NZ_KB911487.1|:15689-17026', 'gi|487381846|ref|NZ_KB911488.1|:13678-13821', 'gi|487382089|ref|NZ_KB911497.1|:23810-26641', 'gi|487382176|ref|NZ_KB911501.1|:c497-381', 'gi|487382213|ref|NZ_KB911502.1|:12706-13119', 'gi|487382247|ref|NZ_KB911505.1|:c7595-6663', 'gi|490551798|ref|NZ_AORG01000011.1|:40110-41390', 'gi|491099398|ref|NZ_KB849654.1|:c720460-719912', 'gi|491124812|ref|NZ_KB849705.1|:1946500-1946937', 'gi|491155563|ref|NZ_KB849732.1|:46469-46843', 'gi|491155563|ref|NZ_KB849732.1|:46840-47181', 'gi|491155563|ref|NZ_KB849732.1|:47165-48616', 'gi|491155563|ref|NZ_KB849732.1|:55055-56662', 'gi|491155563|ref|NZ_KB849732.1|:56662-57351', 'gi|491155563|ref|NZ_KB849732.1|:6101-7588', 'gi|491155563|ref|NZ_KB849732.1|:7657-8073', 'gi|491349766|ref|NZ_KB850082.1|:441-941', 'gi|491395079|ref|NZ_KB850142.1|:1461751-1462554', 'gi|512608407|ref|NZ_KE150401.1|:c156891-156016', 'gi|518653462|ref|NZ_ATLM01000004.1|:c89669-89247', 'gi|520818261|ref|NZ_ATLQ01000015.1|:480744-481463', 'gi|520822538|ref|NZ_ATLQ01000063.1|:103173-103283', 'gi|520826510|ref|NZ_ATLQ01000092.1|:c13892-13563', 'gi|544644736|ref|NZ_KE747865.1|:68388-69722', 'gi|545347918|ref|NZ_KE952096.1|:c83873-81831', 'gi|550735774|gb|AXMM01000002.1|:c743886-743575', 'gi|552875787|ref|NZ_KI515684.1|:c584270-583890', 'gi|552876418|ref|NZ_KI515685.1|:36713-37258', 'gi|552876418|ref|NZ_KI515685.1|:432422-433465', 'gi|552876418|ref|NZ_KI515685.1|:c1014617-1014117', 'gi|552876418|ref|NZ_KI515685.1|:c931935-931327', 'gi|552876815|ref|NZ_KI515686.1|:613740-614315', 'gi|552879811|ref|NZ_AXME01000001.1|:1146402-1146932', 'gi|552879811|ref|NZ_AXME01000001.1|:40840-41742', 'gi|552879811|ref|NZ_AXME01000001.1|:49241-49654', 'gi|552891898|ref|NZ_AXMG01000001.1|:99114-99290', 'gi|552891898|ref|NZ_AXMG01000001.1|:c1460921-1460529', 'gi|552895565|ref|NZ_AXMI01000001.1|:619555-620031', 'gi|552895565|ref|NZ_AXMI01000001.1|:c14352-13837', 'gi|552896371|ref|NZ_AXMI01000002.1|:c148595-146280', 'gi|552897201|ref|NZ_AXMI01000004.1|:c231437-230883', 'gi|552902020|ref|NZ_AXMK01000001.1|:c1625038-1624022', 'gi|556346902|ref|NZ_KI535485.1|:c828278-827901', 'gi|556478613|ref|NZ_KI535633.1|:3529392-3530162', 'gi|560534311|ref|NZ_AYSF01000111.1|:26758-29049', 'gi|564165687|gb|AYLX01000355.1|:10906-11166', 'gi|564169776|gb|AYLX01000156.1|:1-185', 'gi|564938696|gb|AWYH01000018.1|:c75674-75039', 'gi|67993724|ref|XM_664440.1|', 'gi|68059117|ref|XM_666447.1|', 'gi|68062389|ref|XM_668109.1|', 'gi|71730848|gb|AAAM03000019.1|:c14289-12877', 'gi|82753723|ref|XM_722699.1|', 'gi|82775382|ref|NC_007606.1|:2249487-2250014', 'gi|82793634|ref|XM_723027.1|', 'GeneID:1489527']) tax_units = "kpcofgst" if float(sys.version_info[0]) < 3.0: def read_and_split(ofn): return (l.strip().split('\t') for l in ofn) def read_and_split_line(line): return line.strip().split('\t') else: def read_and_split(ofn): return (l.decode('utf-8').strip().split('\t') for l in ofn) def read_and_split_line(line): return line.decode('utf-8').strip().split('\t') def plain_read_and_split(ofn): return (l.strip().split('\t') for l in ofn) def plain_read_and_split_line(l): return l.strip().split('\t') if float(sys.version_info[0]) < 3.0: def mybytes(val): return val else: def mybytes(val): return bytes(val, encoding='utf-8') def read_params(args): p = ap.ArgumentParser( description= "DESCRIPTION\n" " MetaPhlAn version "+__version__+" ("+__date__+"): \n" " METAgenomic PHyLogenetic ANalysis for metagenomic taxonomic profiling.\n\n" "AUTHORS: "+__author__+"\n\n" "COMMON COMMANDS\n\n" " We assume here that metaphlan2.py is in the system path and that mpa_dir bash variable contains the\n" " main MetaPhlAn folder. Also BowTie2 should be in the system path with execution and read\n" " permissions, and Perl should be installed)\n\n" "\n========== MetaPhlAn 2 clade-abundance estimation ================= \n\n" "The basic usage of MetaPhlAn 2 consists in the identification of the clades (from phyla to species and \n" "strains in particular cases) present in the metagenome obtained from a microbiome sample and their \n" "relative abundance. This correspond to the default analysis type (--analysis_type rel_ab).\n\n" "* Profiling a metagenome from raw reads:\n" "$ metaphlan2.py metagenome.fastq --input_type fastq\n\n" "* You can take advantage of multiple CPUs and save the intermediate BowTie2 output for re-running\n" " MetaPhlAn extremely quickly:\n" "$ metaphlan2.py metagenome.fastq --bowtie2out metagenome.bowtie2.bz2 --nproc 5 --input_type fastq\n\n" "* If you already mapped your metagenome against the marker DB (using a previous MetaPhlAn run), you\n" " can obtain the results in few seconds by using the previously saved --bowtie2out file and \n" " specifying the input (--input_type bowtie2out):\n" "$ metaphlan2.py metagenome.bowtie2.bz2 --nproc 5 --input_type bowtie2out\n\n" "* You can also provide an externally BowTie2-mapped SAM if you specify this format with \n" " --input_type. Two steps: first apply BowTie2 and then feed MetaPhlAn2 with the obtained sam:\n" "$ bowtie2 --sam-no-hd --sam-no-sq --no-unal --very-sensitive -S metagenome.sam -x ${mpa_dir}/db_v20/mpa_v20_m200 -U metagenome.fastq\n" "$ metaphlan2.py metagenome.sam --input_type sam > profiled_metagenome.txt\n\n" # "* Multiple alternative ways to pass the input are also available:\n" # "$ cat metagenome.fastq | metaphlan2.py --input_type fastq \n" # "$ tar xjf metagenome.tar.bz2 --to-stdout | metaphlan2.py --input_type fastq \n" # "$ metaphlan2.py --input_type fastq < metagenome.fastq\n" # "$ metaphlan2.py --input_type fastq <(bzcat metagenome.fastq.bz2)\n" # "$ metaphlan2.py --input_type fastq <(zcat metagenome_1.fastq.gz metagenome_2.fastq.gz)\n\n" "* We can also natively handle paired-end metagenomes, and, more generally, metagenomes stored in \n" " multiple files (but you need to specify the --bowtie2out parameter):\n" "$ metaphlan2.py metagenome_1.fastq,metagenome_2.fastq --bowtie2out metagenome.bowtie2.bz2 --nproc 5 --input_type fastq\n\n" "\n------------------------------------------------------------------- \n \n\n" "\n========== Marker level analysis ============================ \n\n" "MetaPhlAn 2 introduces the capability of charachterizing organisms at the strain level using non\n" "aggregated marker information. Such capability comes with several slightly different flavours and \n" "are a way to perform strain tracking and comparison across multiple samples.\n" "Usually, MetaPhlAn 2 is first ran with the default --analysis_type to profile the species present in\n" "the community, and then a strain-level profiling can be performed to zoom-in into specific species\n" "of interest. This operation can be performed quickly as it exploits the --bowtie2out intermediate \n" "file saved during the execution of the default analysis type.\n\n" "* The following command will output the abundance of each marker with a RPK (reads per kil-base) \n" " higher 0.0. (we are assuming that metagenome_outfmt.bz2 has been generated before as \n" " shown above).\n" "$ metaphlan2.py -t marker_ab_table metagenome_outfmt.bz2 --input_type bowtie2out > marker_abundance_table.txt\n" " The obtained RPK can be optionally normalized by the total number of reads in the metagenome \n" " to guarantee fair comparisons of abundances across samples. The number of reads in the metagenome\n" " needs to be passed with the '--nreads' argument\n\n" "* The list of markers present in the sample can be obtained with '-t marker_pres_table'\n" "$ metaphlan2.py -t marker_pres_table metagenome_outfmt.bz2 --input_type bowtie2out > marker_abundance_table.txt\n" " The --pres_th argument (default 1.0) set the minimum RPK value to consider a marker present\n\n" "* The list '-t clade_profiles' analysis type reports the same information of '-t marker_ab_table'\n" " but the markers are reported on a clade-by-clade basis.\n" "$ metaphlan2.py -t clade_profiles metagenome_outfmt.bz2 --input_type bowtie2out > marker_abundance_table.txt\n\n" "* Finally, to obtain all markers present for a specific clade and all its subclades, the \n" " '-t clade_specific_strain_tracker' should be used. For example, the following command\n" " is reporting the presence/absence of the markers for the B. fragulis species and its strains\n" " the optional argument --min_ab specifies the minimum clade abundance for reporting the markers\n\n" "$ metaphlan2.py -t clade_specific_strain_tracker --clade s__Bacteroides_fragilis metagenome_outfmt.bz2 --input_type bowtie2out > marker_abundance_table.txt\n" "\n------------------------------------------------------------------- \n\n" "", formatter_class=ap.RawTextHelpFormatter, add_help=False ) arg = p.add_argument arg( 'inp', metavar='INPUT_FILE', type=str, nargs='?', default=None, help= "the input file can be:\n" "* a fastq file containing metagenomic reads\n" "OR\n" "* a BowTie2 produced SAM file. \n" "OR\n" "* an intermediary mapping file of the metagenome generated by a previous MetaPhlAn run \n" "If the input file is missing, the script assumes that the input is provided using the standard \n" "input, or named pipes.\n" "IMPORTANT: the type of input needs to be specified with --input_type" ) arg( 'output', metavar='OUTPUT_FILE', type=str, nargs='?', default=None, help= "the tab-separated output file of the predicted taxon relative abundances \n" "[stdout if not present]") g = p.add_argument_group('Required arguments') arg = g.add_argument input_type_choices = ['fastq','fasta','multifasta','multifastq','bowtie2out','sam'] arg( '--input_type', choices=input_type_choices, required = '--install' not in args, help = "set whether the input is the multifasta file of metagenomic reads or \n" "the SAM file of the mapping of the reads against the MetaPhlAn db.\n" "[default 'automatic', i.e. the script will try to guess the input format]\n" ) g = p.add_argument_group('Mapping arguments') arg = g.add_argument arg('--mpa_pkl', type=str, default=None, help="The metadata pickled MetaPhlAn file [deprecated]") arg('--bowtie2db', metavar="METAPHLAN_BOWTIE2_DB", type=str, default=DEFAULT_DB_FOLDER, help=("The BowTie2 database file of the MetaPhlAn database. Used if " "--input_type is fastq, fasta, multifasta, or multifastq [default "+DEFAULT_DB_FOLDER+"]\n")) INDEX = 'v20_m200' arg('-x', '--index', type=str, default='v20_m200', help=("Specify the id of the database version to use. If the database\n" "files are not found on the local MetaPhlAn2 installation they\n" "will be automatically downloaded [default "+INDEX+"]\n")) bt2ps = ['sensitive', 'very-sensitive', 'sensitive-local', 'very-sensitive-local'] arg('--bt2_ps', metavar="BowTie2 presets", default='very-sensitive', choices=bt2ps, help="Presets options for BowTie2 (applied only when a " "multifasta file is provided)\n" "The choices enabled in MetaPhlAn are:\n" " * sensitive\n" " * very-sensitive\n" " * sensitive-local\n" " * very-sensitive-local\n" "[default very-sensitive]\n") arg('--bowtie2_exe', type=str, default=None, help='Full path and name of the BowTie2 executable. This option allows' 'MetaPhlAn to reach the executable even when it is not in the ' 'system PATH or the system PATH is unreachable') arg('--bowtie2_build', type=str, default='bowtie2-build', help="Full path to the bowtie2-build command to use, deafult assumes " "that 'bowtie2-build is present in the system path") arg('--bowtie2out', metavar="FILE_NAME", type=str, default=None, help="The file for saving the output of BowTie2") arg('--no_map', action='store_true', help="Avoid storing the --bowtie2out map file") arg('--tmp_dir', metavar="", default=None, type=str, help="The folder used to store temporary files [default is the OS " "dependent tmp dir]") g = p.add_argument_group('Post-mapping arguments') arg = g.add_argument stat_choices = ['avg_g','avg_l','tavg_g','tavg_l','wavg_g','wavg_l','med'] arg( '--tax_lev', metavar='TAXONOMIC_LEVEL', type=str, choices='a'+tax_units, default='a', help = "The taxonomic level for the relative abundance output:\n" "'a' : all taxonomic levels\n" "'k' : kingdoms\n" "'p' : phyla only\n" "'c' : classes only\n" "'o' : orders only\n" "'f' : families only\n" "'g' : genera only\n" "'s' : species only\n" "[default 'a']" ) arg( '--min_cu_len', metavar="", default="2000", type=int, help = "minimum total nucleotide length for the markers in a clade for\n" "estimating the abundance without considering sub-clade abundances\n" "[default 2000]\n" ) arg( '--min_alignment_len', metavar="", default=None, type=int, help = "The sam records for aligned reads with the longest subalignment\n" "length smaller than this threshold will be discarded.\n" "[default None]\n" ) arg( '--ignore_viruses', action='store_true', help= "Do not profile viral organisms" ) arg( '--ignore_eukaryotes', action='store_true', help= "Do not profile eukaryotic organisms" ) arg( '--ignore_bacteria', action='store_true', help= "Do not profile bacterial organisms" ) arg( '--ignore_archaea', action='store_true', help= "Do not profile archeal organisms" ) arg( '--stat_q', metavar="", type = float, default=0.1, help = "Quantile value for the robust average\n" "[default 0.1]" ) arg( '--ignore_markers', type=str, default = None, help = "File containing a list of markers to ignore. \n") arg( '--avoid_disqm', action="store_true", help = "Deactivate the procedure of disambiguating the quasi-markers based on the \n" "marker abundance pattern found in the sample. It is generally recommended \n" "too keep the disambiguation procedure in order to minimize false positives\n") arg( '--stat', metavar="", choices=stat_choices, default="tavg_g", type=str, help = "EXPERIMENTAL! Statistical approach for converting marker abundances into clade abundances\n" "'avg_g' : clade global (i.e. normalizing all markers together) average\n" "'avg_l' : average of length-normalized marker counts\n" "'tavg_g' : truncated clade global average at --stat_q quantile\n" "'tavg_l' : trunated average of length-normalized marker counts (at --stat_q)\n" "'wavg_g' : winsorized clade global average (at --stat_q)\n" "'wavg_l' : winsorized average of length-normalized marker counts (at --stat_q)\n" "'med' : median of length-normalized marker counts\n" "[default tavg_g]" ) arg = p.add_argument g = p.add_argument_group('Additional analysis types and arguments') arg = g.add_argument analysis_types = ['rel_ab', 'rel_ab_w_read_stats', 'reads_map', 'clade_profiles', 'marker_ab_table', 'marker_counts', 'marker_pres_table', 'clade_specific_strain_tracker'] arg( '-t', metavar='ANALYSIS TYPE', type=str, choices = analysis_types, default='rel_ab', help = "Type of analysis to perform: \n" " * rel_ab: profiling a metagenomes in terms of relative abundances\n" " * rel_ab_w_read_stats: profiling a metagenomes in terms of relative abundances and estimate the number of reads comming from each clade.\n" " * reads_map: mapping from reads to clades (only reads hitting a marker)\n" " * clade_profiles: normalized marker counts for clades with at least a non-null marker\n" " * marker_ab_table: normalized marker counts (only when > 0.0 and normalized by metagenome size if --nreads is specified)\n" " * marker_counts: non-normalized marker counts [use with extreme caution]\n" " * marker_pres_table: list of markers present in the sample (threshold at 1.0 if not differently specified with --pres_th\n" "[default 'rel_ab']" ) arg( '--nreads', metavar="NUMBER_OF_READS", type=int, default = None, help = "The total number of reads in the original metagenome. It is used only when \n" "-t marker_table is specified for normalizing the length-normalized counts \n" "with the metagenome size as well. No normalization applied if --nreads is not \n" "specified" ) arg( '--pres_th', metavar="PRESENCE_THRESHOLD", type=int, default = 1.0, help = 'Threshold for calling a marker present by the -t marker_pres_table option' ) arg( '--clade', metavar="", default=None, type=str, help = "The clade for clade_specific_strain_tracker analysis\n" ) arg( '--min_ab', metavar="", default=0.1, type=float, help = "The minimum percentage abundace for the clade in the clade_specific_strain_tracker analysis\n" ) g = p.add_argument_group('Output arguments') arg = g.add_argument arg( '-o', '--output_file', metavar="output file", type=str, default=None, help = "The output file (if not specified as positional argument)\n") arg('--sample_id_key', metavar="name", type=str, default="#SampleID", help =("Specify the sample ID key for this analysis." " Defaults to '#SampleID'.")) arg('--sample_id', metavar="value", type=str, default="Metaphlan2_Analysis", help =("Specify the sample ID for this analysis." " Defaults to 'Metaphlan2_Analysis'.")) arg( '-s', '--samout', metavar="sam_output_file", type=str, default=None, help="The sam output file\n") #************************************************************* #* Parameters related to biom file generation * #************************************************************* arg( '--biom', '--biom_output_file', metavar="biom_output", type=str, default=None, help = "If requesting biom file output: The name of the output file in biom format \n") arg( '--mdelim', '--metadata_delimiter_char', metavar="mdelim", type=str, default="|", help = "Delimiter for bug metadata: - defaults to pipe. e.g. the pipe in k__Bacteria|p__Proteobacteria \n") #************************************************************* #* End parameters related to biom file generation * #************************************************************* g = p.add_argument_group('Other arguments') arg = g.add_argument arg('--nproc', metavar="N", type=int, default=4, help="The number of CPUs to use for parallelizing the mapping [default 4]") arg('--install', action='store_true', help="Only checks if the MetaPhlAn2 DB is installed and installs it if not. All other parameters are ignored.") arg('--read_min_len', type=int, default=70, help="Specify the minimum length of the reads to be considered when parsing the input file with " "'read_fastx.py' script, default value is 70") arg('-v', '--version', action='version', version="MetaPhlAn version {} ({})".format(__version__, __date__), help="Prints the current MetaPhlAn version and exit") arg("-h", "--help", action="help", help="show this help message and exit") return vars(p.parse_args()) def byte_to_megabyte(byte): """ Convert byte value to megabyte """ return byte / (1024.0**2) class ReportHook(): def __init__(self): self.start_time = time.time() def report(self, blocknum, block_size, total_size): """ Print download progress message """ if blocknum == 0: self.start_time = time.time() if total_size > 0: sys.stderr.write("Downloading file of size: {:.2f} MB\n" .format(byte_to_megabyte(total_size))) else: total_downloaded = blocknum * block_size status = "{:3.2f} MB ".format(byte_to_megabyte(total_downloaded)) if total_size > 0: percent_downloaded = total_downloaded * 100.0 / total_size # use carriage return plus sys.stderr to overwrite stderr download_rate = total_downloaded / (time.time() - self.start_time) estimated_time = (total_size - total_downloaded) / download_rate estimated_minutes = int(estimated_time / 60.0) estimated_seconds = estimated_time - estimated_minutes * 60.0 status += ("{:3.2f} % {:5.2f} MB/sec {:2.0f} min {:2.0f} sec " .format(percent_downloaded, byte_to_megabyte(download_rate), estimated_minutes, estimated_seconds)) status += " \r" sys.stderr.write(status) def download(url, download_file): """ Download a file from a url """ if not os.path.isfile(download_file): try: sys.stderr.write("\nDownloading " + url + "\n") file, headers = urlretrieve(url, download_file, reporthook=ReportHook().report) except EnvironmentError: sys.stderr.write("\nWarning: Unable to download " + url + "\n") else: sys.stderr.write("\nFile {} already present!\n".format(download_file)) def download_unpack_tar(FILE_LIST, download_file_name, folder, bowtie2_build, nproc): """ Download the url to the file and decompress into the folder """ # Create the folder if it does not already exist if not os.path.isdir(folder): try: os.makedirs(folder) except EnvironmentError: sys.exit("ERROR: Unable to create folder for database install: " + folder) # Check the directory permissions if not os.access(folder, os.W_OK): sys.exit("ERROR: The directory is not writeable: " + folder + ". " "Please modify the permissions.") tar_file = os.path.join(folder, "mpa_" + download_file_name + ".tar") md5_file = os.path.join(folder, "mpa_" + download_file_name + ".md5") if not os.path.isfile(md5_file) or not os.path.isfile(tar_file): #Download the list of all the files in the Dropbox folder list_file_path = os.path.join(folder, "file_list.txt") download(FILE_LIST, list_file_path) if os.path.isfile(list_file_path): with open(list_file_path) as f: ls_f = dict( [row.strip().split() for row in f]) url_tar_file = ls_f["mpa_" + download_file_name + ".tar"] download(url_tar_file, tar_file) # download MD5 checksum url_md5_file = ls_f["mpa_" + download_file_name + ".md5"] download(url_md5_file, md5_file) md5_md5 = None md5_tar = None if os.path.isfile(md5_file): with open(md5_file) as f: for row in f: md5_md5 = row.strip().split(' ')[0] else: sys.stderr.write('File "{}" not found!\n'.format(md5_file)) # compute MD5 of .tar.bz2 if os.path.isfile(tar_file): hash_md5 = hashlib.md5() with open(tar_file, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_md5.update(chunk) md5_tar = hash_md5.hexdigest()[:32] else: sys.stderr.write('File "{}" not found!\n'.format(tar_file)) if (md5_tar is None) or (md5_md5 is None): sys.exit("MD5 checksums not found, something went wrong!") # compare checksums if md5_tar != md5_md5: sys.exit("MD5 checksums do not correspond! If this happens again, you should remove the database files and " "rerun MetaPhlAn2 so they are re-downloaded") # untar try: tarfile_handle = tarfile.open(tar_file) tarfile_handle.extractall(path=folder) tarfile_handle.close() except EnvironmentError: sys.stderr.write("Warning: Unable to extract {}.\n".format(tar_file)) # uncompress sequences bz2_file = os.path.join(folder, "mpa_" + download_file_name + ".fna.bz2") fna_file = os.path.join(folder, "mpa_" + download_file_name + ".fna") if not os.path.isfile(fna_file): sys.stderr.write('\n\nDecompressing {} into {}\n'.format(bz2_file, fna_file)) with open(fna_file, 'wb') as fna_h, bz2.BZ2File(bz2_file, 'rb') as bz2_h: for data in iter(lambda: bz2_h.read(100 * 1024), b''): fna_h.write(data) # build bowtie2 indexes if not glob(os.path.join(folder, "mpa_" + download_file_name + "*.bt2")): bt2_base = os.path.join(folder, "mpa_" + download_file_name) bt2_cmd = [bowtie2_build, '--quiet'] if nproc > 1: bt2_build_output = subp.check_output([bowtie2_build, '--usage'], stderr=subp.STDOUT) if 'threads' in str(bt2_build_output): bt2_cmd += ['--threads', str(nproc)] bt2_cmd += ['-f', fna_file, bt2_base] sys.stderr.write('\nBuilding Bowtie2 indexes\n') try: subp.check_call(bt2_cmd) except Exception as e: sys.stderr.write("Fatal error running '{}'\nError message: '{}'\n\n".format(' '.join(bt2_cmd), e)) sys.exit(1) sys.stderr.write('Removing uncompress database {}\n'.format(fna_file)) os.remove(fna_file) def check_and_install_database(index, bowtie2_db, bowtie2_build, nproc): """ Check if the database is installed, if not download and install """ if len(glob(os.path.join(bowtie2_db, "mpa_{}*".format(index)))) >= 7: return # download the tar archive and decompress sys.stderr.write("\nDownloading MetaPhlAn2 database\nPlease note due to " "the size this might take a few minutes\n") download_unpack_tar(FILE_LIST, index, bowtie2_db, bowtie2_build, nproc) sys.stderr.write("\nDownload complete\n") def set_mapping_arguments(index, bowtie2_db): mpa_pkl = 'mpa_pkl' bowtie2db = 'bowtie2db' if os.path.isfile(os.path.join(bowtie2_db, "mpa_{}.pkl".format(index))): mpa_pkl = os.path.join(bowtie2_db, "mpa_{}.pkl".format(index)) if glob(os.path.join(bowtie2_db, "mpa_{}*.bt2".format(index))): bowtie2db = os.path.join(bowtie2_db, "mpa_{}".format(index)) return (mpa_pkl, bowtie2db) def run_bowtie2(fna_in, outfmt6_out, bowtie2_db, preset, nproc, file_format="multifasta", exe=None, samout=None, min_alignment_len=None, read_min_len=0): # checking read_fastx.py read_fastx = "read_fastx.py" try: subp.check_call([read_fastx, "-h"], stdout=DEVNULL) except Exception as e: try: read_fastx = os.path.join(os.path.join(os.path.dirname(__file__), "utils"), read_fastx) subp.check_call([read_fastx, "-h"], stdout=DEVNULL) except Exception as e: sys.stderr.write("OSError: fatal error running '{}'. Is it in the system path?\n".format(read_fastx)) sys.exit(1) # checking bowtie2 try: subp.check_call([exe if exe else 'bowtie2', "-h"], stdout=DEVNULL) except Exception as e: sys.stderr.write('OSError: "{}"\nFatal error running BowTie2. Is BowTie2 in the system path?\n'.format(e)) sys.exit(1) try: if fna_in: readin = subp.Popen([read_fastx, '-l', str(read_min_len), fna_in], stdout=subp.PIPE) else: readin = subp.Popen([read_fastx, '-l', str(read_min_len)], stdin=sys.stdin, stdout=subp.PIPE) bowtie2_cmd = [exe if exe else 'bowtie2', "--quiet", "--no-unal", "--{}".format(preset), "-S", "-", "-x", bowtie2_db] if int(nproc) > 1: bowtie2_cmd += ["-p", str(nproc)] bowtie2_cmd += ["-U", "-"] # if not stat.S_ISFIFO(os.stat(fna_in).st_mode) else [] if file_format == "multifasta": bowtie2_cmd += ["-f"] p = subp.Popen(bowtie2_cmd, stdout=subp.PIPE, stdin=readin.stdout) readin.stdout.close() lmybytes, outf = (mybytes, bz2.BZ2File(outfmt6_out, "w")) if outfmt6_out.endswith(".bz2") else (str, open(outfmt6_out, "w")) try: if samout: if samout[-4:] == '.bz2': sam_file = bz2.BZ2File(samout, 'w') else: sam_file = open(samout, 'wb') except IOError as e: sys.stderr.write('IOError: "{}"\nUnable to open sam output file.\n'.format(e)) sys.exit(1) for line in p.stdout: if samout: sam_file.write(line) o = read_and_split_line(line) if not o[0].startswith('@'): if not o[2].endswith('*'): if ((min_alignment_len is None) or (max([int(x.strip('M')) for x in re.findall(r'(\d*M)', o[5]) if x]) >= min_alignment_len)): outf.write(lmybytes("\t".join([o[0], o[2]]) + "\n")) outf.close() if samout: sam_file.close() p.communicate() except OSError as e: sys.stderr.write('OSError: "{}"\nFatal error running BowTie2.\n'.format(e)) sys.exit(1) except ValueError as e: sys.stderr.write('ValueError: "{}"\nFatal error running BowTie2.\n'.format(e)) sys.exit(1) except IOError as e: sys.stderr.write('IOError: "{}"\nFatal error running BowTie2.\n'.format(e)) sys.exit(1) if p.returncode == 13: sys.stderr.write("Permission Denied Error: fatal error running BowTie2." "Is the BowTie2 file in the path with execution and read permissions?\n") sys.exit(1) elif p.returncode != 0: sys.stderr.write("Error while running bowtie2.\n") sys.exit(1) #def guess_input_format( inp_file ): # if "," in inp_file: # sys.stderr.write( "Sorry, I cannot guess the format of the input, when " # "more than one file is specified. Please set the --input_type parameter \n" ) # sys.exit(1) # # with open( inp_file ) as inpf: # for i,l in enumerate(inpf): # line = l.strip() # if line[0] == '#': continue # if line[0] == '>': return 'multifasta' # if line[0] == '@': return 'multifastq' # if len(l.split('\t')) == 2: return 'bowtie2out' # if i > 20: break # return None class TaxClade: min_cu_len = -1 markers2lens = None stat = None quantile = None avoid_disqm = False def __init__( self, name, uncl = False, id_int = 0 ): self.children, self.markers2nreads = {}, {} self.name, self.father = name, None self.uncl, self.subcl_uncl = uncl, False self.abundance, self.uncl_abundance = None, 0 self.id = id_int def add_child( self, name, id_int ): new_clade = TaxClade( name, id_int=id_int ) self.children[name] = new_clade new_clade.father = self return new_clade def get_terminals( self ): terms = [] if not self.children: return [self] for c in self.children.values(): terms += c.get_terminals() return terms def get_full_name( self ): fullname = [self.name] cl = self.father while cl: fullname = [cl.name] + fullname cl = cl.father return "|".join(fullname[1:]) def get_normalized_counts( self ): return [(m,float(n)*1000.0/self.markers2lens[m]) for m,n in self.markers2nreads.items()] def compute_abundance( self ): if self.abundance is not None: return self.abundance sum_ab = sum([c.compute_abundance() for c in self.children.values()]) rat_nreads = sorted([(self.markers2lens[m],n) for m,n in self.markers2nreads.items()], key = lambda x: x[1]) rat_nreads, removed = [], [] for m,n in sorted(self.markers2nreads.items(),key=lambda pars:pars[0]): misidentified = False if not self.avoid_disqm: for e in self.markers2exts[m]: toclade = self.taxa2clades[e] m2nr = toclade.markers2nreads tocladetmp = toclade while len(tocladetmp.children) == 1: tocladetmp = list(tocladetmp.children.values())[0] m2nr = tocladetmp.markers2nreads nonzeros = sum([v>0 for v in m2nr.values()]) if len(m2nr): if float(nonzeros) / len(m2nr) > 0.33: misidentified = True removed.append( (self.markers2lens[m],n) ) break if not misidentified: rat_nreads.append( (self.markers2lens[m],n) ) if not self.avoid_disqm and len(removed): n_rat_nreads = float(len(rat_nreads)) n_removed = float(len(removed)) n_tot = n_rat_nreads + n_removed n_ripr = 10 if len(self.get_terminals()) < 2: n_ripr = 0 if "k__Viruses" in self.get_full_name(): n_ripr = 0 if n_rat_nreads < n_ripr and n_tot > n_rat_nreads: rat_nreads += removed[:n_ripr-int(n_rat_nreads)] rat_nreads = sorted(rat_nreads, key = lambda x: x[1]) rat_v,nreads_v = zip(*rat_nreads) if rat_nreads else ([],[]) rat, nrawreads, loc_ab = float(sum(rat_v)) or -1.0, sum(nreads_v), 0.0 quant = int(self.quantile*len(rat_nreads)) ql,qr,qn = (quant,-quant,quant) if quant else (None,None,0) if self.name[0] == 't' and (len(self.father.children) > 1 or "_sp" in self.father.name or "k__Viruses" in self.get_full_name()): non_zeros = float(len([n for r,n in rat_nreads if n > 0])) nreads = float(len(rat_nreads)) if nreads == 0.0 or non_zeros / nreads < 0.7: self.abundance = 0.0 return 0.0 if rat < 0.0: pass elif self.stat == 'avg_g' or (not qn and self.stat in ['wavg_g','tavg_g']): loc_ab = nrawreads / rat if rat >= 0 else 0.0 elif self.stat == 'avg_l' or (not qn and self.stat in ['wavg_l','tavg_l']): loc_ab = np.mean([float(n)/r for r,n in rat_nreads]) elif self.stat == 'tavg_g': wnreads = sorted([(float(n)/r,r,n) for r,n in rat_nreads], key=lambda x:x[0]) den,num = zip(*[v[1:] for v in wnreads[ql:qr]]) loc_ab = float(sum(num))/float(sum(den)) if any(den) else 0.0 elif self.stat == 'tavg_l': loc_ab = np.mean(sorted([float(n)/r for r,n in rat_nreads])[ql:qr]) elif self.stat == 'wavg_g': vmin, vmax = nreads_v[ql], nreads_v[qr] wnreads = [vmin]*qn+list(nreads_v[ql:qr])+[vmax]*qn loc_ab = float(sum(wnreads)) / rat elif self.stat == 'wavg_l': wnreads = sorted([float(n)/r for r,n in rat_nreads]) vmin, vmax = wnreads[ql], wnreads[qr] wnreads = [vmin]*qn+list(wnreads[ql:qr])+[vmax]*qn loc_ab = np.mean(wnreads) elif self.stat == 'med': loc_ab = np.median(sorted([float(n)/r for r,n in rat_nreads])[ql:qr]) self.abundance = loc_ab if rat < self.min_cu_len and self.children: self.abundance = sum_ab elif loc_ab < sum_ab: self.abundance = sum_ab if self.abundance > sum_ab and self.children: # *1.1?? self.uncl_abundance = self.abundance - sum_ab self.subcl_uncl = not self.children and self.name[0] not in tax_units[-2:] return self.abundance def get_all_abundances( self ): ret = [(self.name,self.abundance)] if self.uncl_abundance > 0.0: lchild = list(self.children.values())[0].name[:3] ret += [(lchild+self.name[3:]+"_unclassified",self.uncl_abundance)] if self.subcl_uncl and self.name[0] != tax_units[-2]: cind = tax_units.index( self.name[0] ) ret += [( tax_units[cind+1]+self.name[1:]+"_unclassified", self.abundance)] for c in self.children.values(): ret += c.get_all_abundances() return ret class TaxTree: def __init__( self, mpa, markers_to_ignore = None ): #, min_cu_len ): self.root = TaxClade( "root" ) self.all_clades, self.markers2lens, self.markers2clades, self.taxa2clades, self.markers2exts = {}, {}, {}, {}, {} TaxClade.markers2lens = self.markers2lens TaxClade.markers2exts = self.markers2exts TaxClade.taxa2clades = self.taxa2clades self.id_gen = itertools.count(1) # clades_txt = ((l.strip().split("|"),n) for l,n in mpa_pkl['taxonomy'].items()) clades_txt = ((l.strip().split("|"), n) for l, n in mpa['taxonomy'].items()) for clade,lenc in clades_txt: father = self.root for clade_lev in clade: # !!!!! [:-1]: if not clade_lev in father.children: father.add_child( clade_lev, id_int=next(self.id_gen) ) self.all_clades[clade_lev] = father.children[clade_lev] if clade_lev[0] == "t": self.taxa2clades[clade_lev[3:]] = father father = father.children[clade_lev] if clade_lev[0] == "t": father.glen = lenc def add_lens( node ): if not node.children: return node.glen lens = [] for c in node.children.values(): lens.append( add_lens( c ) ) node.glen = sum(lens) / len(lens) return node.glen add_lens( self.root ) # for k,p in mpa_pkl['markers'].items(): for k, p in mpa['markers'].items(): if k in markers_to_exclude: continue if k in markers_to_ignore: continue self.markers2lens[k] = p['len'] self.markers2clades[k] = p['clade'] self.add_reads(k, 0) self.markers2exts[k] = p['ext'] def set_min_cu_len( self, min_cu_len ): TaxClade.min_cu_len = min_cu_len def set_stat( self, stat, quantile, avoid_disqm = False ): TaxClade.stat = stat TaxClade.quantile = quantile TaxClade.avoid_disqm = avoid_disqm def add_reads( self, marker, n, ignore_viruses = False, ignore_eukaryotes = False, ignore_bacteria = False, ignore_archaea = False ): clade = self.markers2clades[marker] cl = self.all_clades[clade] if ignore_viruses or ignore_eukaryotes or ignore_bacteria or ignore_archaea: cn = cl.get_full_name() if ignore_viruses and cn.startswith("k__Viruses"): return "" if ignore_eukaryotes and cn.startswith("k__Eukaryota"): return "" if ignore_archaea and cn.startswith("k__Archaea"): return "" if ignore_bacteria and cn.startswith("k__Bacteria"): return "" while len(cl.children) == 1: cl = list(cl.children.values())[0] cl.markers2nreads[marker] = n return cl.get_full_name() def markers2counts( self ): m2c = {} for k,v in self.all_clades.items(): for m,c in v.markers2nreads.items(): m2c[m] = c return m2c def clade_profiles( self, tax_lev, get_all = False ): cl2pr = {} for k,v in self.all_clades.items(): if tax_lev and not k.startswith(tax_lev): continue prof = v.get_normalized_counts() if not get_all and ( len(prof) < 1 or not sum([p[1] for p in prof]) > 0.0 ): continue cl2pr[v.get_full_name()] = prof return cl2pr def relative_abundances( self, tax_lev ): cl2ab_n = dict([(k,v) for k,v in self.all_clades.items() if k.startswith("k__") and not v.uncl]) cl2ab, cl2glen, tot_ab = {}, {}, 0.0 for k,v in cl2ab_n.items(): tot_ab += v.compute_abundance() for k,v in cl2ab_n.items(): for cl,ab in sorted(v.get_all_abundances(),key=lambda pars:pars[0]): if not tax_lev: if cl not in self.all_clades: to = tax_units.index(cl[0]) t = tax_units[to-1] cl = t + cl.split("_unclassified")[0][1:] cl = self.all_clades[cl].get_full_name() spl = cl.split("|") cl = "|".join(spl+[tax_units[to]+spl[-1][1:]+"_unclassified"]) glen = self.all_clades[spl[-1]].glen else: glen = self.all_clades[cl].glen cl = self.all_clades[cl].get_full_name() elif not cl.startswith(tax_lev): if cl in self.all_clades: glen = self.all_clades[cl].glen else: glen = 1.0 continue cl2ab[cl] = ab cl2glen[cl] = glen ret_d = dict([( k, float(v) / tot_ab if tot_ab else 0.0) for k,v in cl2ab.items()]) ret_r = dict([( k, (v,cl2glen[k],float(v)*cl2glen[k])) for k,v in cl2ab.items()]) #ret_r = dict([( k, float(v) / tot_ab if tot_ab else 0.0) for k,v in cl2ab.items()]) if tax_lev: ret_d[tax_lev+"unclassified"] = 1.0 - sum(ret_d.values()) return ret_d, ret_r def map2bbh(mapping_f, input_type='bowtie2out', min_alignment_len=None): if not mapping_f: ras, ras_line, inpf = plain_read_and_split, plain_read_and_split_line, sys.stdin else: if mapping_f.endswith(".bz2"): ras, ras_line, inpf = read_and_split, read_and_split_line, bz2.BZ2File(mapping_f, "r") else: ras, ras_line, inpf = plain_read_and_split, plain_read_and_split_line, open(mapping_f) reads2markers = {} if input_type == 'bowtie2out': for r, c in ras(inpf): reads2markers[r] = c elif input_type == 'sam': for line in inpf: o = ras_line(line) if ((o[0][0] != '@') and (o[2][-1] != '*') and ((min_alignment_len is None) or (max([int(x.strip('M')) for x in re.findall(r'(\d*M)', o[5]) if x]) >= min_alignment_len))): reads2markers[o[0]] = o[2] inpf.close() markers2reads = defdict(set) for r, m in reads2markers.items(): markers2reads[m].add(r) return markers2reads def maybe_generate_biom_file(tree, pars, abundance_predictions): json_key = "MetaPhlAn2" if not pars['biom']: return None if not abundance_predictions: biom_table = biom.Table([], [], []) # create empty BIOM table with open(pars['biom'], 'w') as outfile: biom_table.to_json(json_key, direct_io=outfile) return True delimiter = "|" if len(pars['mdelim']) > 1 else pars['mdelim'] def istip(clade_name): end_name = clade_name.split(delimiter)[-1] return end_name.startswith("t__") or end_name.endswith("_unclassified") def findclade(clade_name): if clade_name.endswith('_unclassified'): name = clade_name.split(delimiter)[-2] else: name = clade_name.split(delimiter)[-1] return tree.all_clades[name] def to_biomformat(clade_name): return {'taxonomy': clade_name.split(delimiter)} clades = iter((abundance, findclade(name)) for (name, abundance) in abundance_predictions if istip(name)) packed = iter(([abundance], clade.get_full_name(), clade.id) for (abundance, clade) in clades) # unpack that tuple here to stay under 80 chars on a line data, clade_names, clade_ids = zip(*packed) # biom likes column vectors, so we give it an array like this: # np.array([a],[b],[c]) data = np.array(data) sample_ids = [pars['sample_id']] table_id = 'MetaPhlAn2_Analysis' #********************************************************************************************** # Modification of Code : * # Modified the code so instead of using the current clade IDs, which are numbers, we will * # use the clade_names * # Users reported the biom output is invalid and also the IDs were changing from run to * # run. * # George Weingart 05/22/2017 [email protected] * #********************************************************************************************** if LooseVersion(biom.__version__) < LooseVersion("2.0.0"): biom_table = biom.table.table_factory( data, sample_ids, ######## clade_ids, #Modified by George Weingart 5/22/2017 - We will use instead the clade_names clade_names, #Modified by George Weingart 5/22/2017 - We will use instead the clade_names sample_metadata = None, observation_metadata = map(to_biomformat, clade_names), table_id = table_id, constructor = biom.table.DenseOTUTable ) with open(pars['biom'], 'w') as outfile: json.dump( biom_table.getBiomFormatObject(json_key), outfile ) else: # Below is the biom2 compatible code biom_table = biom.table.Table( data, #clade_ids, #Modified by George Weingart 5/22/2017 - We will use instead the clade_names clade_names, #Modified by George Weingart 5/22/2017 - We will use instead the clade_names sample_ids, sample_metadata = None, observation_metadata = map(to_biomformat, clade_names), table_id = table_id, input_is_dense = True ) with open(pars['biom'], 'w') as outfile: biom_table.to_json( json_key, direct_io = outfile ) return True def metaphlan2(): pars = read_params(sys.argv) # check if the database is installed, if not then install check_and_install_database(pars['index'], pars['bowtie2db'], pars['bowtie2_build'], pars['nproc']) if pars['install']: sys.stderr.write('The database is installed\n') return # set correct map_pkl and bowtie2db variables pars['mpa_pkl'], pars['bowtie2db'] = set_mapping_arguments(pars['index'], pars['bowtie2db']) #if pars['inp'] is None and ( pars['input_type'] is None or pars['input_type'] == 'automatic'): # sys.stderr.write( "The --input_type parameter need top be specified when the " # "input is provided from the standard input.\n" # "Type metaphlan.py -h for more info\n") # sys.exit(0) if (pars['bt2_ps'] in ["sensitive-local", "very-sensitive-local"]) and (pars['min_alignment_len'] is None): pars['min_alignment_len'] = 100 sys.stderr.write('Warning! bt2_ps is set to local mode, and min_alignment_len is None, I automatically ' 'set min_alignment_len to 100! If you do not like, rerun the command and set ' 'min_alignment_len to a specific value.\n') if pars['input_type'] == 'fastq': pars['input_type'] = 'multifastq' if pars['input_type'] == 'fasta': pars['input_type'] = 'multifasta' #if pars['input_type'] == 'automatic': # pars['input_type'] = guess_input_format( pars['inp'] ) # if not pars['input_type']: # sys.stderr.write( "Sorry, I cannot guess the format of the input file, please " # "specify the --input_type parameter \n" ) # sys.exit(1) # check for the mpa_pkl file if not os.path.isfile(pars['mpa_pkl']): sys.stderr.write("Error: Unable to find the mpa_pkl file at: " + pars['mpa_pkl'] + "\nExpecting location ${mpa_dir}/db_v20/map_v20_m200.pkl " "\nSelect the file location with the option --mpa_pkl.\n" "Exiting...\n\n") sys.exit(1) if pars['ignore_markers']: with open(pars['ignore_markers']) as ignv: ignore_markers = set([l.strip() for l in ignv]) else: ignore_markers = set() no_map = False if pars['input_type'] == 'multifasta' or pars['input_type'] == 'multifastq': bow = pars['bowtie2db'] is not None if not bow: sys.stderr.write( "No MetaPhlAn BowTie2 database provided\n " "[--bowtie2db options]!\n" "Exiting...\n\n" ) sys.exit(1) if pars['no_map']: pars['bowtie2out'] = tf.NamedTemporaryFile(dir=pars['tmp_dir']).name no_map = True else: if bow and not pars['bowtie2out']: if pars['inp'] and "," in pars['inp']: sys.stderr.write("Error! --bowtie2out needs to be specified when multiple " "fastq or fasta files (comma separated) are provided\n") sys.exit(1) fname = pars['inp'] if fname is None: fname = "stdin_map" elif stat.S_ISFIFO(os.stat(fname).st_mode): fname = "fifo_map" pars['bowtie2out'] = fname + ".bowtie2out.txt" if os.path.exists( pars['bowtie2out'] ): sys.stderr.write( "BowTie2 output file detected: " + pars['bowtie2out'] + "\n" "Please use it as input or remove it if you want to " "re-perform the BowTie2 run.\n" "Exiting...\n\n" ) sys.exit(1) if bow and not all([os.path.exists(".".join([str(pars['bowtie2db']), p])) for p in ["1.bt2", "2.bt2", "3.bt2", "4.bt2", "rev.1.bt2", "rev.2.bt2"]]): sys.stderr.write("No MetaPhlAn BowTie2 database found (--index " "option)!\nExpecting location {}\nExiting..." .format(pars['bowtie2db'])) sys.exit(1) if bow: run_bowtie2(pars['inp'], pars['bowtie2out'], pars['bowtie2db'], pars['bt2_ps'], pars['nproc'], file_format=pars['input_type'], exe=pars['bowtie2_exe'], samout=pars['samout'], min_alignment_len=pars['min_alignment_len'], read_min_len=pars['read_min_len']) pars['input_type'] = 'bowtie2out' pars['inp'] = pars['bowtie2out'] # !!! with open( pars['mpa_pkl'], 'rb' ) as a: mpa_pkl = pickle.loads( bz2.decompress( a.read() ) ) tree = TaxTree( mpa_pkl, ignore_markers ) tree.set_min_cu_len( pars['min_cu_len'] ) tree.set_stat( pars['stat'], pars['stat_q'], pars['avoid_disqm'] ) markers2reads = map2bbh(pars['inp'], pars['input_type'], pars['min_alignment_len']) if no_map: os.remove( pars['inp'] ) map_out = [] for marker,reads in sorted(markers2reads.items(), key=lambda pars: pars[0]): if marker not in tree.markers2lens: continue tax_seq = tree.add_reads( marker, len(reads), ignore_viruses = pars['ignore_viruses'], ignore_eukaryotes = pars['ignore_eukaryotes'], ignore_bacteria = pars['ignore_bacteria'], ignore_archaea = pars['ignore_archaea'], ) if tax_seq: map_out +=["\t".join([r,tax_seq]) for r in sorted(reads)] if pars['output'] is None and pars['output_file'] is not None: pars['output'] = pars['output_file'] with (open(pars['output'],"w") if pars['output'] else sys.stdout) as outf: outf.write('\t'.join((pars["sample_id_key"], pars["sample_id"])) + '\n') if pars['t'] == 'reads_map': outf.write( "\n".join( map_out ) + "\n" ) elif pars['t'] == 'rel_ab': cl2ab, _ = tree.relative_abundances( pars['tax_lev']+"__" if pars['tax_lev'] != 'a' else None ) outpred = [(k,round(v*100.0,5)) for k,v in cl2ab.items() if v > 0.0] if outpred: for k,v in sorted( outpred, reverse=True, key=lambda x:x[1]+(100.0*(8-x[0].count("|"))) ): outf.write( "\t".join( [k,str(v)] ) + "\n" ) else: outf.write( "unclassified\t100.0\n" ) maybe_generate_biom_file(tree, pars, outpred) elif pars['t'] == 'rel_ab_w_read_stats': cl2ab, rr = tree.relative_abundances( pars['tax_lev']+"__" if pars['tax_lev'] != 'a' else None ) outpred = [(k,round(v*100.0,5)) for k,v in cl2ab.items() if v > 0.0] totl = 0 if outpred: outf.write( "\t".join( [ "#clade_name", "relative_abundance", "coverage", "average_genome_length_in_the_clade", "estimated_number_of_reads_from_the_clade" ]) +"\n" ) for k,v in sorted( outpred, reverse=True, key=lambda x:x[1]+(100.0*(8-x[0].count("|"))) ): outf.write( "\t".join( [ k, str(v), str(rr[k][0]) if k in rr else "-", str(rr[k][1]) if k in rr else "-", str(int(round(rr[k][2],0)) if k in rr else "-") ] ) + "\n" ) if "|" not in k: totl += (int(round(rr[k][2],0)) if k in rr else 0) outf.write( "#estimated total number of reads from known clades: " + str(totl)+"\n") else: outf.write( "unclassified\t100.0\n" ) maybe_generate_biom_file(tree, pars, outpred) elif pars['t'] == 'clade_profiles': cl2pr = tree.clade_profiles( pars['tax_lev']+"__" if pars['tax_lev'] != 'a' else None ) for c,p in cl2pr.items(): mn,n = zip(*p) outf.write( "\t".join( [""]+[str(s) for s in mn] ) + "\n" ) outf.write( "\t".join( [c]+[str(s) for s in n] ) + "\n" ) elif pars['t'] == 'marker_ab_table': cl2pr = tree.clade_profiles( pars['tax_lev']+"__" if pars['tax_lev'] != 'a' else None ) for v in cl2pr.values(): outf.write( "\n".join(["\t".join([str(a),str(b/float(pars['nreads'])) if pars['nreads'] else str(b)]) for a,b in v if b > 0.0]) + "\n" ) elif pars['t'] == 'marker_pres_table': cl2pr = tree.clade_profiles( pars['tax_lev']+"__" if pars['tax_lev'] != 'a' else None ) for v in cl2pr.values(): strout = ["\t".join([str(a),"1"]) for a,b in v if b > pars['pres_th']] if strout: outf.write( "\n".join(strout) + "\n" ) elif pars['t'] == 'marker_counts': outf.write( "\n".join( ["\t".join([m,str(c)]) for m,c in tree.markers2counts().items() ]) +"\n" ) elif pars['t'] == 'clade_specific_strain_tracker': cl2pr = tree.clade_profiles( None, get_all = True ) cl2ab, _ = tree.relative_abundances( None ) strout = [] for cl,v in cl2pr.items(): if cl.endswith(pars['clade']) and cl2ab[cl]*100.0 < pars['min_ab']: strout = [] break if pars['clade'] in cl: strout += ["\t".join([str(a),str(int(b > pars['pres_th']))]) for a,b in v] if strout: strout = sorted(strout,key=lambda x:x[0]) outf.write( "\n".join(strout) + "\n" ) else: sys.stderr.write("Clade "+pars['clade']+" not present at an abundance >"+str(round(pars['min_ab'],2))+"%, " "so no clade specific markers are reported\n") if __name__ == '__main__': metaphlan2()
py
1a3dec02419ba514987c47216e9b45c999eb9582
import os from os.path import splitext, basename, exists, join import sys import datetime import configparser from pigeon.utils.parseconfig import ParseConfig from pigeon.utils.runpipe import RunPipe class Pipe(): """Docstring for seqPipe. """ def __init__(self, pipeline_config, dryrun=False, verbose=False, read_from='file'): """TODO: to be defined1. """ pipe_conf = ParseConfig(pipeline_config, read_from=read_from) self.project_parameters = pipe_conf.project_parameters self.task_parameters = pipe_conf.task_parameters self.task_list = pipe_conf.task_list self.runtime_task_list = pipe_conf.task_list self.include = pipe_conf.include self.number_included = 0 # Use current working dir if not output_dir given if 'output_dir' in self.project_parameters: self.project_output_dir = os.path.abspath(self.project_parameters['output_dir']) else: self.project_output_dir = os.path.abspath(os.getcwd()) if 'project_name' in self.project_parameters: self.project_name = self.project_parameters['project_name'] else: self.project_name = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S') self.cmd_feed = {} self.input_feed = { 'input_files': self.project_parameters['input_files'].split()} if 'input_names' in self.project_parameters: self.input_names = { 'input_names': self.project_parameters['input_names'].split()} self.dryrun = dryrun self.verbose = verbose self.create_pipeline() def in_out_feed(self, task, input_files): # Use task specific output_dir if given if 'output_dir' in self.task_parameters[task]: output_dir = self.task_parameters[task]['output_dir'] else: output_dir = self.project_output_dir if 'dump_dir' in self.task_parameters[task]: dump_dir = self.task_parameters[task]['dump_dir'] if self.dryrun is not True: mkdir(join(output_dir, dump_dir)) else: dump_dir = '' if 'ext' in self.task_parameters[task]: ext = self.task_parameters[task]['ext'] else: # keep it same if type(input_files) == list: ext = splitext(basename(input_files[0]))[1][1:] if type(input_files) == str: ext = splitext(basename(input_files))[1][1:] if 'suffix' in self.task_parameters[task]: suffix = self.task_parameters[task]['suffix'] else: suffix = '' if 'input_multi' in self.task_parameters[task] and (self.task_parameters[task]['input_multi'] == 'paired' or self.task_parameters[task]['input_multi'] == 'all'): in_out = join(output_dir, dump_dir, splitext( basename(input_files[0]))[0] + suffix + '.' + ext) else: in_out = join(output_dir, dump_dir, splitext( basename(input_files))[0] + suffix + '.' + ext) # add newly created output files to input feed to be used by next task if 'paired_output' in self.task_parameters[task]: if task not in self.input_feed: self.input_feed[task] = [input_files, in_out] # if it exists append the output else: self.input_feed[task].extend([input_files, in_out]) else: if task not in self.input_feed: self.input_feed[task] = [in_out] # if it exists append the output else: self.input_feed[task].append(in_out) return in_out def create_cmd(self, task, input_files, name=None): cmd = '' # if container task add container parameters if 'container' in self.task_parameters[task]: cmd = cmd + self.task_parameters[task]['container'] + ' ' # otherwise just add path to task cmd = cmd + self.task_parameters[task]['tool'] # if running sub command add the sub command if 'sub_tool' in self.task_parameters[task]: cmd = cmd + ' ' + self.task_parameters[task]['sub_tool'] # add the parameters cmd = cmd + ' ' + self.task_parameters[task]['args'] # If names true add names if 'named' in self.task_parameters[task]: cmd = cmd.replace('name_placeholder', name) # replace inputplaceholder with paired input files if 'input_multi' in self.task_parameters[task]: # replace input_file directory with task input_dir if 'input_dir' in self.task_parameters[task]: input_files = [join(self.task_parameters[task]['input_dir'], basename(input_file)) for input_file in input_files] if self.task_parameters[task]['input_multi'] == 'paired': # replace input_file directory with task input_dir if 'secondary_in_dir' in self.task_parameters[task]: input_files[1] = join(self.task_parameters[task]['secondary_in_dir'], basename(input_files[1])) if 'secondary_input' in self.task_parameters[task]: cmd = cmd.replace('input_placeholder', input_files[0]) cmd = cmd.replace( 'secondary_in_placeholder', input_files[1]) else: cmd = cmd.replace('input_placeholder', ' '.join(input_files)) elif self.task_parameters[task]['input_multi'] == 'all': if 'input_flag_repeat' in self.task_parameters[task]: flag_string = ' {} '.format( self.task_parameters[task]['input_flag_repeat']) else: flag_string = ' ' cmd = cmd.replace('input_placeholder', flag_string.join(input_files)) else: # replace input_file directory with task input_dir if 'input_dir' in self.task_parameters[task]: input_files = join(self.task_parameters[task]['input_dir'], basename(input_files)) cmd = cmd.replace('input_placeholder', input_files) # replace output_placeholder with first file in_out = self.in_out_feed(task, input_files) cmd = cmd.replace('output_placeholder', in_out) if 'secondary_output' in self.task_parameters[task]: cmd = cmd.replace('secondary_out_placeholder', self.secondary_output(task, input_files)) return cmd def include_pipeline(self, include_task, i): task = include_task.split(':')[1] include_config = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation()) include_config.read(self.include[task]) for parameter in self.task_parameters[include_task]: include_config['PROJECT'][parameter] = self.task_parameters[include_task][parameter] include_config['PROJECT']['input_files'] = ' '.join(self.input_feed[self.task_parameters[include_task]['input_files']]) pipeline = Pipe(include_config, self.dryrun, self.verbose, read_from='configparser') # add task parameters self.task_parameters.update(pipeline.task_parameters) # add commands to cmd_feed self.cmd_feed.update(pipeline.cmd_feed) # add pipeline output files to input_feed self.input_feed.update(pipeline.input_feed) # This binds pipes self.input_feed[include_task] = pipeline.input_feed[include_config['PROJECT']['output_files']] # add task cmds to list self.number_included += i self.runtime_task_list.pop(self.number_included) self.runtime_task_list[self.number_included:self.number_included] = pipeline.runtime_task_list def create_pipeline(self): for i, task in enumerate(self.task_list): if task.startswith('INCLUDE:'): self.include_pipeline(task, i) continue # For multiple input if 'input_multi' in self.task_parameters[task]: # For paired if self.task_parameters[task]['input_multi'] == 'paired': # For paired and named if 'named' in self.task_parameters[task]: for name, input_files in zip(self.input_names['input_names'], chunks(self.input_feed[self.task_parameters[task]['input_from']], 2)): if task not in self.cmd_feed: self.cmd_feed[task] = [ self.create_cmd(task, input_files, name)] # if it exists append the output else: self.cmd_feed[task].append( self.create_cmd(task, input_files, name)) # Paired and not named else: for input_files in chunks(self.input_feed[self.task_parameters[task]['input_from']], 2): if task not in self.cmd_feed: self.cmd_feed[task] = [ self.create_cmd(task, input_files)] # if it exists append the output else: self.cmd_feed[task].append( self.create_cmd(task, input_files)) # Can't name if input is all elif self.task_parameters[task]['input_multi'] == 'all': self.cmd_feed[task] = [ self.create_cmd(task, self.input_feed[self.task_parameters[task]['input_from']])] else: # Single named input if 'named' in self.task_parameters[task]: for name, input_file in zip(self.input_names['input_names'], self.input_feed[self.task_parameters[task]['input_from']]): if task not in self.cmd_feed: self.cmd_feed[task] = [ self.create_cmd(task, input_file, name)] # if it exists append the output else: self.cmd_feed[task].append( self.create_cmd(task, input_file, name)) # Single Unnamed input else: for input_file in self.input_feed[self.task_parameters[task]['input_from']]: if task not in self.cmd_feed: self.cmd_feed[task] = [ self.create_cmd(task, input_file)] # if it exists append the output else: self.cmd_feed[task].append( self.create_cmd(task, input_file)) def run_pipeline(self): for task in self.runtime_task_list: for cmd in self.cmd_feed[task]: if self.dryrun is not True: if 'pass' not in self.task_parameters[task]: task_instance = RunPipe( cmd, task, self.project_output_dir, self.project_name, self.verbose) task_instance.run_task() else: pass else: if 'pass' in self.task_parameters[task]: sys.stdout.write('Passed:\n' + cmd + '\n') else: sys.stdout.write('\n' + cmd + '\n') self.remove_files() def remove_files(self): for task in self.runtime_task_list: if 'remove' in self.task_parameters[task] and self.task_parameters[task]['remove'] == 'True': for marked_for_removal in self.input_feed[task]: if self.dryrun is True: sys.stdout.write( 'Will be removed: {}\n'.format(marked_for_removal)) else: os.remove(marked_for_removal) def secondary_output(self, task, input_files): # Use task specific output_dir if given if 'output_dir' in self.task_parameters[task]: output_dir = self.task_parameters[task]['output_dir'] else: output_dir = self.project_output_dir if 'secondary_dump_dir' in self.task_parameters[task]: secondary_dump_dir = self.task_parameters[task]['secondary_dump_dir'] elif 'dump_dir' in self.task_parameters[task]: secondary_dump_dir = self.task_parameters[task]['dump_dir'] else: secondary_dump_dir = '' if self.dryrun is not True: mkdir(join(output_dir, secondary_dump_dir)) if 'secondary_ext' in self.task_parameters[task]: secondary_ext = self.task_parameters[task]['secondary_ext'] else: secondary_ext = '' if 'secondary_suffix' in self.task_parameters[task]: secondary_suffix = self.task_parameters[task]['secondary_suffix'] else: secondary_suffix = '' if 'input_multi' in self.task_parameters[task] and (self.task_parameters[task]['input_multi'] == 'paired' or self.task_parameters[task]['input_multi'] == 'all'): secondary_out = join(output_dir, secondary_dump_dir, splitext( basename(input_files[0]))[0] + secondary_suffix + '.' + secondary_ext) else: secondary_out = join(output_dir, secondary_dump_dir, splitext( basename(input_files))[0] + secondary_suffix + '.' + secondary_ext) return secondary_out def mkdir(directory): if not exists(directory): os.makedirs(directory) def chunks(l, n): '''Yield successive n-size chunks from l.''' for i in range(0, len(l), n): yield l[i:i + n]
py
1a3dec12906483b5b6ee9e6989b4a03f1dbd83c8
# -*- coding: utf-8 -*- # Copyright (C) 2013-2014 Mag. Christian Tanzer All rights reserved # Glasauergasse 32, A--1130 Wien, Austria. [email protected] # #*** <License> ************************************************************# # This module is part of the package GTW.__test__. # # This module is licensed under the terms of the BSD 3-Clause License # <http://www.c-tanzer.at/license/bsd_3c.html>. # #*** </License> ***********************************************************# # #++ # Name # GTW.__test__.Certificate # # Purpose # Test GTW.OMP.Auth.Certificate # # Revision Dates # 16-Jan-2013 (CT) Creation # 25-Feb-2013 (CT) Add tests # 26-Apr-2013 (CT) Remove `cert_id` # ««revision-date»»··· #-- from _GTW.__test__.model import * import datetime _test_create = """ >>> scope = Scaffold.scope (%(p1)s, %(n1)s) # doctest:+ELLIPSIS Creating new scope ... >>> Auth = scope.Auth >>> a1 = Auth.Account (name = "foo@bar", raw = True) >>> a2 = Auth.Account (name = "foo@baz", raw = True) >>> Auth.Certificate.query_s ().all () [] >>> c1 = Auth.Certificate (email = "foo@bar", validity = ("20130116", ), raw = True) >>> c2 = Auth.Certificate (email = "foo@baz", validity = ("20130131", ), raw = True) >>> scope.commit () >>> all_cs = Auth.Certificate.query_s ().all () >>> prepr (all_cs) [Auth.Certificate ('foo@bar', ('2013-01-16', ), ''), Auth.Certificate ('foo@baz', ('2013-01-31', ), '')] >>> for c in all_cs : ... print (c.as_code (), c.validity.start) Auth.Certificate ('foo@bar', ('2013-01-16', ), '', ) 2013-01-16 00:00:00 Auth.Certificate ('foo@baz', ('2013-01-31', ), '', ) 2013-01-31 00:00:00 >>> c3 = Auth.Certificate (email = "foo@baz", validity = ("20150131", ), raw = True) >>> scope.commit () >>> all_cs = Auth.Certificate.query_s ().all () >>> prepr (all_cs) [Auth.Certificate ('foo@bar', ('2013-01-16', ), ''), Auth.Certificate ('foo@baz', ('2013-01-31', ), ''), Auth.Certificate ('foo@baz', ('2015-01-31', ), '')] >>> prepr ((c1, c1.alive)) (Auth.Certificate ('foo@bar', ('2013-01-16', ), ''), False) >>> c1.pem = b"fake value to fool `alive`" >>> prepr ((c1, c1.alive)) (Auth.Certificate ('foo@bar', ('2013-01-16', ), ''), True) >>> rdf = MOM.Attr.A_Date_Time.now () + datetime.timedelta (days = +1) >>> rdp = MOM.Attr.A_Date_Time.now () + datetime.timedelta (days = -1) >>> with expect_except (MOM.Error.Invariants) : ... _ = c1.set (revocation_date = rdf) # doctest:+ELLIPSIS Invariants: Condition `valid_revocation_date` : The revocation date cannot be in the future. (revocation_date <= today) revocation_date = ... today = ... >>> _ = c1.set (revocation_date = rdp) >>> prepr ((c1, c1.alive)) (Auth.Certificate ('foo@bar', ('2013-01-16', ), ''), False) >>> scope.commit () >>> prepr ((c1, c1.alive)) (Auth.Certificate ('foo@bar', ('2013-01-16', ), ''), False) >>> c4 = Auth.Certificate (email = "foo@foo", validity = (), raw = True) >>> prepr ((c4, c4.alive)) (Auth.Certificate ('foo@foo', (), ''), None) >>> c4.validity.start = "20130225" >>> prepr ((c4, c4.alive)) (Auth.Certificate ('foo@foo', ('2013-02-25', ), ''), False) >>> c4.pem = b"fake value to fool `alive`" >>> prepr ((c4, c4.alive)) (Auth.Certificate ('foo@foo', ('2013-02-25', ), ''), True) >>> c5 = Auth.Certificate (email = "bar@foo", validity = ("20130225", ), raw = True) >>> prepr ((c5, c5.alive)) (Auth.Certificate ('bar@foo', ('2013-02-25', ), ''), False) >>> for c in Auth.Certificate.query ().order_by (Q.cert_id) : ... (int (c.pid), int (c.cert_id or 0) or None) (3, 1) (4, 2) (5, 3) (6, 4) (7, 5) """ class _Certificate_Scaffold_ (Scaffold.__class__) : Backend_Parameters = dict \ ( Scaffold.Backend_Parameters , HPS = "'hps:///test.hps'" , SQL = "'sqlite:///test.sql'" , sq = "'sqlite:///test.sql'" ) # end class _Certificate_Scaffold_ Scaffold = _Certificate_Scaffold_ () __test__ = Scaffold.create_test_dict \ ( dict ( test_create = _test_create ) ) ### __END__ GTW.__test__.Certificate
py
1a3dec847428e966a2a8d72ca73b3dc88871f951
from .bbox import bbox_overlaps from .box_iou_rotated import box_iou_rotated from .carafe import CARAFE, CARAFENaive, CARAFEPack, carafe, carafe_naive from .cc_attention import CrissCrossAttention from .corner_pool import CornerPool from .deform_conv import DeformConv2d, DeformConv2dPack, deform_conv2d from .deform_roi_pool import (DeformRoIPool, DeformRoIPoolPack, ModulatedDeformRoIPoolPack, deform_roi_pool) from .deprecated_wrappers import Conv2d_deprecated as Conv2d from .deprecated_wrappers import ConvTranspose2d_deprecated as ConvTranspose2d from .deprecated_wrappers import Linear_deprecated as Linear from .deprecated_wrappers import MaxPool2d_deprecated as MaxPool2d from .focal_loss import (SigmoidFocalLoss, SoftmaxFocalLoss, sigmoid_focal_loss, softmax_focal_loss) from .fused_bias_leakyrelu import FusedBiasLeakyReLU, fused_bias_leakyrelu from .info import (get_compiler_version, get_compiling_cuda_version, get_onnxruntime_op_path) from .masked_conv import MaskedConv2d, masked_conv2d from .modulated_deform_conv import (ModulatedDeformConv2d, ModulatedDeformConv2dPack, modulated_deform_conv2d) from .nms import batched_nms, nms, nms_match, nms_rotated, soft_nms from .point_sample import (SimpleRoIAlign, point_sample, rel_roi_point_to_rel_img_point) from .psa_mask import PSAMask from .roi_align import RoIAlign, roi_align from .roi_align_rotated import RoIAlignRotated, roi_align_rotated from .roi_pool import RoIPool, roi_pool from .saconv import SAConv2d from .sync_bn import SyncBatchNorm from .tin_shift import TINShift, tin_shift from .upfirdn2d import upfirdn2d __all__ = [ 'bbox_overlaps', 'CARAFE', 'CARAFENaive', 'CARAFEPack', 'carafe', 'carafe_naive', 'CornerPool', 'DeformConv2d', 'DeformConv2dPack', 'deform_conv2d', 'DeformRoIPool', 'DeformRoIPoolPack', 'ModulatedDeformRoIPoolPack', 'deform_roi_pool', 'SigmoidFocalLoss', 'SoftmaxFocalLoss', 'sigmoid_focal_loss', 'softmax_focal_loss', 'get_compiler_version', 'get_compiling_cuda_version', 'get_onnxruntime_op_path', 'MaskedConv2d', 'masked_conv2d', 'ModulatedDeformConv2d', 'ModulatedDeformConv2dPack', 'modulated_deform_conv2d', 'batched_nms', 'nms', 'soft_nms', 'nms_match', 'RoIAlign', 'roi_align', 'RoIPool', 'roi_pool', 'SyncBatchNorm', 'Conv2d', 'ConvTranspose2d', 'Linear', 'MaxPool2d', 'CrissCrossAttention', 'PSAMask', 'point_sample', 'rel_roi_point_to_rel_img_point', 'SimpleRoIAlign', 'SAConv2d', 'TINShift', 'tin_shift', 'box_iou_rotated', 'nms_rotated', 'upfirdn2d', 'FusedBiasLeakyReLU', 'fused_bias_leakyrelu', 'RoIAlignRotated', 'roi_align_rotated' ]
py
1a3decc99f15f5f13a8fea97259542cf3bfe4cd7
"""This module implements the SocketServerPort, which basically implements a serial like interface using a socket server. """ import select class SocketPort(object): def __init__(self, skt): self.socket = skt self.baud = 0 def read_byte(self, block=False): """Reads a byte from the bus. This function will return None if no character was read within the designated timeout. The max Return Delay time is 254 x 2 usec = 508 usec (the default is 500 usec). This represents the minimum time between receiving a packet and sending a response. """ if block: readable = True else: readable, _, _ = select.select([self.socket.fileno()], [], [], 0.1) if readable: data = self.socket.recv(1) if data: return data[0] def set_baud(self, baud): """Baud doesn't really mean anything for a network socket.""" self.baud = baud print('Baud set to: {}'.format(baud)) def write_packet(self, packet_data): """Function implemented by a derived class which actually writes the data to a device. """ self.socket.send(packet_data)
py
1a3dece8d547a477145b7a33531bbe2a5a69c314
# import modules import numpy as np from numpy.linalg import norm import astropy.units as u from astropy.constants import G from pathlib import Path # import plotting modules import matplotlib import matplotlib.pyplot as plt from matplotlib.colors import LogNorm # my modules from galaxy.galaxy import Galaxy from galaxy.centerofmass import CenterOfMass from galaxy.massprofile import MassProfile from galaxy.timecourse import TimeCourse from galaxy.plots import Plots def make_plot(gname, snap, lim, fname): try: gal = Galaxy(gname, snap, usesql=True, ptype=2) t = gal.time.value / 1000 except TypeError: gal = Galaxy(gname, snap, datadir=datadir, ptype=2) t = gal.time.value / 1000 com = CenterOfMass(gal) tc = TimeCourse(usesql=True) com_xyz, com_vxyz = tc.get_one_com(gname, snap) # gal_xyzD, gal_vxyzD = com.center_com(com_xyz, com_vxyz) # determine the rotated velocity vectors rn, _ = com.rotate_frame(com_p=com_xyz, com_v=com_vxyz) p.plot_density(rn, gname, snap, t, pngout=True, lim=lim, fname=fname) plt.close('all') p = Plots() limits = {'MW': (50, 80), 'M31': (50, 80), 'M33': (30, 100)} cmd = '' datadir = Path.home() / 'HighRes' cmdfile = 'make_densities.sh' with open(cmdfile, 'w') as fp: fp.write(cmd) for gname in ('MW', 'M31', 'M33'): print(gname) group = 'early' for snap in np.arange(0, 300): print(snap, end=' ') lim = limits[gname][0] fname = f'png_files/{gname}_density_{group}_{snap:03}.png' make_plot(gname, snap, lim, fname=fname) cmd += f'ffmpeg -r 10 -start_number 0 -s 1920x1080' cmd += f' -i png_files/{gname}_density_early_%03d.png' cmd += f' -vcodec libx264 -vf fps=25 -crf 25 -pix_fmt yuv420p {gname}_early.mp4\n' with open(cmdfile, 'w') as fp: fp.write(cmd) for snap in np.arange(290, 802): print(snap, end=' ') group = 'late' lim = limits[gname][1] fname = f'png_files/{gname}_density_{group}_{snap:03}.png' make_plot(gname, snap, lim, fname=fname) cmd += f'ffmpeg -r 10 -start_number 290 -s 1920x1080' cmd += f' -i png_files/{gname}_density_late_%03d.png' cmd += f' -vcodec libx264 -vf fps=25 -crf 25 -pix_fmt yuv420p {gname}_late.mp4\n' with open(cmdfile, 'w') as fp: fp.write(cmd)
py
1a3dedbfc7dd75ee0ce3c43fb73b0dbd8e791773
''' This code is due to Yutong Deng (@yutongD), Yingtong Dou (@Yingtong Dou) and UIC BDSC Lab DGFraud (A Deep Graph-based Toolbox for Fraud Detection) https://github.com/safe-graph/DGFraud ''' import tensorflow as tf import argparse from algorithms.Player2Vec.Player2Vec import Player2Vec import time from utils.data_loader import * from utils.utils import * # os.environ['CUDA_VISIBLE_DEVICES'] = '0,1' # init the common args, expect the model specific args def arg_parser(): parser = argparse.ArgumentParser() parser.add_argument('--seed', type=int, default=123, help='Random seed.') parser.add_argument('--dataset_str', type=str, default='dblp', help="['dblp','example']") parser.add_argument('--epoch_num', type=int, default=30, help='Number of epochs to train.') parser.add_argument('--batch_size', type=int, default=1000) parser.add_argument('--momentum', type=int, default=0.9) parser.add_argument('--learning_rate', default=0.001, help='the ratio of training set in whole dataset.') # GCN args parser.add_argument('--hidden1', default=16, help='Number of units in GCN hidden layer 1.') parser.add_argument('--hidden2', default=16, help='Number of units in GCN hidden layer 2.') parser.add_argument('--gcn_output', default=4, help='gcn output size.') args = parser.parse_args() return args def set_env(args): tf.reset_default_graph() np.random.seed(args.seed) tf.set_random_seed(args.seed) # get batch data def get_data(ix, int_batch, train_size): if ix + int_batch >= train_size: ix = train_size - int_batch end = train_size else: end = ix + int_batch return train_data[ix:end], train_label[ix:end] def load_data(args): if args.dataset_str == 'dblp': adj_list, features, train_data, train_label, test_data, test_label = load_data_dblp() node_size = features.shape[0] node_embedding = features.shape[1] class_size = train_label.shape[1] train_size = len(train_data) paras = [node_size, node_embedding, class_size, train_size] return adj_list, features, train_data, train_label, test_data, test_label, paras def train(args, adj_list, features, train_data, train_label, test_data, test_label, paras): with tf.Session() as sess: adj_data = [normalize_adj(adj) for adj in adj_list] meta_size = len(adj_list) net = Player2Vec(session=sess, class_size=paras[2], gcn_output1=args.hidden1, meta=meta_size, nodes=paras[0], embedding=paras[1], encoding=args.gcn_output) sess.run(tf.global_variables_initializer()) # net.load(sess) t_start = time.clock() for epoch in range(args.epoch_num): train_loss = 0 train_acc = 0 count = 0 for index in range(0, paras[3], args.batch_size): batch_data, batch_label = get_data(index, args.batch_size, paras[3]) loss, acc, pred, prob = net.train(features, adj_data, batch_label, batch_data, args.learning_rate, args.momentum) print("batch loss: {:.4f}, batch acc: {:.4f}".format(loss, acc)) # print(prob, pred) train_loss += loss train_acc += acc count += 1 train_loss = train_loss / count train_acc = train_acc / count print("epoch{:d} : train_loss: {:.4f}, train_acc: {:.4f}".format(epoch, train_loss, train_acc)) # net.save(sess) t_end = time.clock() print("train time=", "{:.5f}".format(t_end - t_start)) print("Train end!") test_acc, test_pred, test_probabilities, test_tags = net.test(features, adj_data, test_label, test_data) print("test acc:", test_acc) if __name__ == "__main__": args = arg_parser() set_env(args) adj_list, features, train_data, train_label, test_data, test_label, paras = load_data(args) train(args, adj_list, features, train_data, train_label, test_data, test_label, paras)
py
1a3dedc28485b25d1a80fb521524815df7d679d6
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """Python for AHDA. Part 5, Example 10. """ # Named Entity Recognition import nltk nltk.download('max_ent_chunker') nltk.download('words') print() sentence = "President Trump visited the United Nations headquarters in New York." tokens = nltk.word_tokenize(sentence) pos_tags = nltk.pos_tag(tokens) print(nltk.ne_chunk(pos_tags)) print() # as a one liner if you prefer # print(nltk.ne_chunk(nltk.pos_tag(nltk.word_tokenize(sentence))))
py
1a3def9a51f4e6487187b8bf3e9ab5431d5cf4c5
DATABASE_TYPE = "mongodb" DATABASE_CONN = "mongodb://localhost:27017/rpki-read" BGP_SOURCE = "NA" UPDATE_INTERVAL_STATS = 17 UPDATE_INTERVAL_FACTOR = 19
py
1a3deffb8f04c6adf73bff27d7fb473cca588223
#! /usr/bin/env python # -*- coding: utf-8 -*- import wx import sys reload(sys) sys.setdefaultencoding('utf-8') import os import time sys.path.append(os.path.abspath("..")) from mem import RTxxx_memcore from ui import RTxxx_uidef from ui import uidef from ui import uivar from ui import uilang kRetryPingTimes = 5 class secBootRTxxxMain(RTxxx_memcore.secBootRTxxxMem): def __init__(self, parent): RTxxx_memcore.secBootRTxxxMem.__init__(self, parent) self.RTxxx_isAllInOneActionTaskPending = False if self.mcuSeries == uidef.kMcuSeries_iMXRTxxx: self._RTxxx_initMain() def _RTxxx_initMain( self ): self.connectStage = uidef.kConnectStage_Rom self.isBootableAppAllowedToView = False self.lastTime = None self.isAccessMemTaskPending = False self.accessMemType = '' self.isThereBoardConnection = False def _RTxxx_startGaugeTimer( self ): if not self.RTxxx_isAllInOneActionTaskPending: self.lastTime = time.time() self.initGauge() def _RTxxx_stopGaugeTimer( self ): if not self.RTxxx_isAllInOneActionTaskPending: self.deinitGauge() self.updateCostTime() def RTxxx_callbackSetMcuSeries( self ): self.RTxxx_initUi() self.RTxxx_initGen() self.RTxxx_initRun() self._RTxxx_initMain() self.RTxxx_setTargetSetupValue() def RTxxx_callbackSetMcuDevice( self ): self.RTxxx_setTargetSetupValue() needToPlaySound = False self.RTxxx_setSecureBootSeqColor(needToPlaySound) def RTxxx_callbackSetBootDevice( self ): self.RTxxx_setTargetSetupValue() needToPlaySound = False self.RTxxx_setSecureBootSeqColor(needToPlaySound) def _RTxxx_retryToPingBootloader( self ): pingStatus = False pingCnt = kRetryPingTimes while (not pingStatus) and pingCnt > 0: pingStatus = self.RTxxx_pingRom() if pingStatus: break pingCnt = pingCnt - 1 if self.isUsbhidPortSelected: time.sleep(2) return pingStatus def _RTxxx_connectFailureHandler( self ): self.connectStage = uidef.kConnectStage_Rom self.updateConnectStatus('red') usbIdList = self.RTxxx_getUsbid() self.setPortSetupValue(self.connectStage, usbIdList, False, False) self.isBootableAppAllowedToView = False def _RTxxx_connectStateMachine( self, showError=True ): connectSteps = RTxxx_uidef.kConnectStep_Normal self.getOneStepConnectMode() retryToDetectUsb = False if self.isOneStepConnectMode: if self.connectStage == uidef.kConnectStage_Reset or self.connectStage == uidef.kConnectStage_ExternalMemory: connectSteps = RTxxx_uidef.kConnectStep_Fast - 1 elif self.connectStage == uidef.kConnectStage_Rom: connectSteps = RTxxx_uidef.kConnectStep_Fast retryToDetectUsb = True else: pass while connectSteps: if not self.updatePortSetupValue(retryToDetectUsb, showError): if self.connectStage == uidef.kConnectStage_Rom: if showError: self.popupMsgBox(uilang.kMsgLanguageContentDict['connectError_doubleCheckIsp'][self.languageIndex]) self._RTxxx_connectFailureHandler() return if self.connectStage == uidef.kConnectStage_Rom: self.RTxxx_connectToDevice(self.connectStage) if self._RTxxx_retryToPingBootloader(): self.RTxxx_getMcuDeviceInfoViaRom() self.updateConnectStatus('green') self.connectStage = uidef.kConnectStage_ExternalMemory else: self.updateConnectStatus('red') if showError: self.popupMsgBox(uilang.kMsgLanguageContentDict['connectError_doubleCheckIsp'][self.languageIndex]) return elif self.connectStage == uidef.kConnectStage_ExternalMemory: if self.RTxxx_configureBootDevice(): self.RTxxx_getBootDeviceInfoViaRom() self.connectStage = uidef.kConnectStage_Reset self.updateConnectStatus('blue') else: if showError: self.popupMsgBox(uilang.kMsgLanguageContentDict['connectError_failToCfgBootDevice'][self.languageIndex]) self._RTxxx_connectFailureHandler() return elif self.connectStage == uidef.kConnectStage_Reset: self.RTxxx_resetMcuDevice() self.isBootableAppAllowedToView = False self.connectStage = uidef.kConnectStage_Rom self.updateConnectStatus('black') usbIdList = self.RTxxx_getUsbid() self.setPortSetupValue(self.connectStage, usbIdList, True, True) self.RTxxx_connectToDevice(self.connectStage) else: pass connectSteps -= 1 def RTxxx_callbackConnectToDevice( self ): self._RTxxx_startGaugeTimer() self.printLog("'Connect to xxx' button is clicked") if not self.isSbFileEnabledToGen: self._RTxxx_connectStateMachine(True) else: if not self.isThereBoardConnection: if self.connectStage == uidef.kConnectStage_Rom: self.initSbAppBdfilesContent() else: # It means there is board connection self.isThereBoardConnection = True self._RTxxx_connectStateMachine(False) if not self.isThereBoardConnection: if self.connectStage == uidef.kConnectStage_Rom: # It means there is no board connection, but we need to set it as True for SB generation self.isThereBoardConnection = True self.RTxxx_isDeviceEnabledToOperate = False self.RTxxx_configureBootDevice() self.connectStage = uidef.kConnectStage_Reset self.updateConnectStatus('blue') else: self.isThereBoardConnection = False else: self.isThereBoardConnection = False self.RTxxx_isDeviceEnabledToOperate = True self.connectStage = uidef.kConnectStage_Rom self.updateConnectStatus('black') self._RTxxx_stopGaugeTimer() def RTxxx_callbackSetSecureBootType( self ): self.setCostTime(0) self.RTxxx_setSecureBootSeqColor() def RTxxx_task_doAllInOneAction( self ): while True: if self.RTxxx_isAllInOneActionTaskPending: self._RTxxx_doAllInOneAction() self.RTxxx_isAllInOneActionTaskPending = False self._RTxxx_stopGaugeTimer() time.sleep(1) def _RTxxx_doAllInOneAction( self ): allInOneSeqCnt = 1 status = False while allInOneSeqCnt: status = self._RTxxx_doGenImage() if not status: break status = self._RTxxx_doFlashImage() if not status: break allInOneSeqCnt -= 1 if status and self.isAutomaticImageReadback: self.showPageInMainBootSeqWin(uidef.kPageIndex_BootDeviceMemory) self._RTxxx_doViewMem() self.invalidateStepButtonColor(uidef.kSecureBootSeqStep_AllInOne, status) def RTxxx_callbackAllInOneAction( self ): self._RTxxx_startGaugeTimer() self.RTxxx_isAllInOneActionTaskPending = True def _RTxxx_doGenImage( self ): status = False self._RTxxx_startGaugeTimer() self.printLog("'Generate Bootable Image' button is clicked") if self.createMatchedAppJsonfile(): if self.RTxxx_genBootableImage(): status = True self._RTxxx_stopGaugeTimer() self.invalidateStepButtonColor(uidef.kSecureBootSeqStep_GenImage, status) return status def RTxxx_callbackGenImage( self ): if not self.isToolRunAsEntryMode: self._RTxxx_doGenImage() else: self.popupMsgBox(uilang.kMsgLanguageContentDict['separActnError_notAvailUnderEntry'][self.languageIndex]) def _RTxxx_doFlashImage( self ): status = False if self.connectStage == uidef.kConnectStage_Reset: self._RTxxx_startGaugeTimer() self.printLog("'Load Bootable Image' button is clicked") if not self.RTxxx_flashBootableImage(): self.popupMsgBox(uilang.kMsgLanguageContentDict['operImgError_failToFlashImage'][self.languageIndex]) else: self.isBootableAppAllowedToView = True if self.RTxxx_burnBootDeviceOtps(): status = True self._RTxxx_stopGaugeTimer() else: self.popupMsgBox(uilang.kMsgLanguageContentDict['connectError_hasnotCfgBootDevice'][self.languageIndex]) self.invalidateStepButtonColor(uidef.kSecureBootSeqStep_FlashImage, status) return status def RTxxx_callbackFlashImage( self ): if not self.isToolRunAsEntryMode: self._RTxxx_doFlashImage() else: self.popupMsgBox(uilang.kMsgLanguageContentDict['separActnError_notAvailUnderEntry'][self.languageIndex]) def _RTxxx_doViewMem( self ): if self.connectStage == uidef.kConnectStage_Reset: if self.isBootableAppAllowedToView: self._RTxxx_startGaugeTimer() self.RTxxx_readProgrammedMemoryAndShow() self._RTxxx_stopGaugeTimer() else: self.popupMsgBox(uilang.kMsgLanguageContentDict['operImgError_hasnotFlashImage'][self.languageIndex]) else: self.popupMsgBox(uilang.kMsgLanguageContentDict['connectError_hasnotCfgBootDevice'][self.languageIndex]) def RTxxx_callbackViewMem( self ): self._RTxxx_doViewMem() def RTxxx_switchToolRunMode( self ): self.applyOtpOperToRunMode()
py
1a3df20a57eb7615546dd84f6d21c234c43b5751
from django.db import models from django.contrib.auth.models import ( AbstractBaseUser, BaseUserManager,PermissionsMixin ) # Create your models here. class UserManager(BaseUserManager): # Manager class def create_user(self,email,password=None, **extra_fields): """ Create and save a new user """ if not email: raise ValueError("User must have an email address") user = self.model(email=self.normalize_email(email), **extra_fields) user.set_password(password) user.save() return user def create_superuser(self,email,password): """Create and save a new super user""" user= self.create_user(email,password) user.is_staff = True user.is_superuser = True user.save() return user class User(AbstractBaseUser,PermissionsMixin): """ Custom user model that support using email instead of username """ email = models.EmailField(max_length=255,unique=True) name = models.CharField(max_length=255) is_active = models.BooleanField(default=True) is_staff = models.BooleanField(default=False) objects = UserManager() USERNAME_FIELD = 'email'
py
1a3df24ff32301dea601f3ad77de38d42fa2d9ae
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License" # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from auto_scan_test import OPConvertAutoScanTest, BaseNet from hypothesis import reproduce_failure import hypothesis.strategies as st import numpy as np import unittest import paddle import random class Net(BaseNet): """ simple Net """ def forward(self, x): """ forward """ scale = self.config["scale"] if self.config['isTensor']: scale = paddle.to_tensor(scale) x = paddle.scale( x, scale=scale, bias=self.config["bias"], bias_after_scale=self.config["bias_after_scale"]) return x class TestScaleConvert(OPConvertAutoScanTest): """ api: paddle.scale OPset version: 7, 9, 15 """ def sample_convert_config(self, draw): input_shape = draw( st.lists( st.integers( min_value=2, max_value=20), min_size=2, max_size=5)) # int32, int64 has a bug dtype = draw(st.sampled_from(["float32", "float64"])) scale = draw(st.floats(min_value=-20, max_value=20)) isTensor = draw(st.booleans()) bias = draw(st.floats(min_value=-20, max_value=20)) bias_after_scale = draw(st.booleans()) config = { "op_names": ["scale"], "test_data_shapes": [input_shape], "test_data_types": [[dtype]], "opset_version": [7, 9, 15], "input_spec_shape": [], "scale": scale, "bias": bias, "bias_after_scale": bias_after_scale, "isTensor": isTensor, } models = Net(config) return (config, models) def test(self): self.run_and_statis(max_examples=30) if __name__ == "__main__": unittest.main()
py
1a3df256b0189c3ecc3bd494cf0258ace21e061f
""" ================================================================================ mcerp: Real-time latin-hypercube-sampling-based Monte Carlo Error Propagation ================================================================================ Generalizes mathematical operators that work on numeric objects (from the math module or numpy) compatible with objects with uncertainty distributions Author: Abraham Lee Copyright: 2013 """ from mcerp import UncertainFunction, to_uncertain_func import numpy as np __author__ = 'Abraham Lee' def abs(x): """ Absolute value """ if isinstance(x, UncertainFunction): mcpts = np.abs(x._mcpts) return UncertainFunction(mcpts) else: return np.abs(x) def acos(x): """ Inverse cosine """ if isinstance(x, UncertainFunction): mcpts = np.arccos(x._mcpts) return UncertainFunction(mcpts) else: return np.arccos(x) def acosh(x): """ Inverse hyperbolic cosine """ if isinstance(x, UncertainFunction): mcpts = np.arccosh(x._mcpts) return UncertainFunction(mcpts) else: return np.arccosh(x) def asin(x): """ Inverse sine """ if isinstance(x, UncertainFunction): mcpts = np.arcsin(x._mcpts) return UncertainFunction(mcpts) else: return np.arcsin(x) def asinh(x): """ Inverse hyperbolic sine """ if isinstance(x, UncertainFunction): mcpts = np.arcsinh(x._mcpts) return UncertainFunction(mcpts) else: return np.arcsinh(x) def atan(x): """ Inverse tangent """ if isinstance(x, UncertainFunction): mcpts = np.arctan(x._mcpts) return UncertainFunction(mcpts) else: return np.arctan(x) def atanh(x): """ Inverse hyperbolic tangent """ if isinstance(x, UncertainFunction): mcpts = np.arctanh(x._mcpts) return UncertainFunction(mcpts) else: return np.arctanh(x) def ceil(x): """ Ceiling function (round towards positive infinity) """ if isinstance(x, UncertainFunction): mcpts = np.ceil(x._mcpts) return UncertainFunction(mcpts) else: return np.ceil(x) def cos(x): """ Cosine """ if isinstance(x, UncertainFunction): mcpts = np.cos(x._mcpts) return UncertainFunction(mcpts) else: return np.cos(x) def cosh(x): """ Hyperbolic cosine """ if isinstance(x, UncertainFunction): mcpts = np.cosh(x._mcpts) return UncertainFunction(mcpts) else: return np.cosh(x) def degrees(x): """ Convert radians to degrees """ if isinstance(x, UncertainFunction): mcpts = np.degrees(x._mcpts) return UncertainFunction(mcpts) else: return np.degrees(x) def exp(x): """ Exponential function """ if isinstance(x, UncertainFunction): mcpts = np.exp(x._mcpts) return UncertainFunction(mcpts) else: return np.exp(x) def expm1(x): """ Calculate exp(x) - 1 """ if isinstance(x, UncertainFunction): mcpts = np.expm1(x._mcpts) return UncertainFunction(mcpts) else: return np.expm1(x) def fabs(x): """ Absolute value function """ if isinstance(x, UncertainFunction): mcpts = np.fabs(x._mcpts) return UncertainFunction(mcpts) else: return np.fabs(x) def floor(x): """ Floor function (round towards negative infinity) """ if isinstance(x, UncertainFunction): mcpts = np.floor(x._mcpts) return UncertainFunction(mcpts) else: return np.floor(x) def hypot(x, y): """ Calculate the hypotenuse given two "legs" of a right triangle """ if isinstance(x, UncertainFunction) or isinstance(x, UncertainFunction): ufx = to_uncertain_func(x) ufy = to_uncertain_func(y) mcpts = np.hypot(ufx._mcpts, ufy._mcpts) return UncertainFunction(mcpts) else: return np.hypot(x, y) def ln(x): """ Natural logarithm (same as "log(x)") """ return log(x) def log(x): """ Natural logarithm """ if isinstance(x, UncertainFunction): mcpts = np.log(x._mcpts) return UncertainFunction(mcpts) else: return np.log(x) def log10(x): """ Base-10 logarithm """ if isinstance(x, UncertainFunction): mcpts = np.log10(x._mcpts) return UncertainFunction(mcpts) else: return np.log10(x) def log1p(x): """ Natural logarithm of (1 + x) """ if isinstance(x, UncertainFunction): mcpts = np.log1p(x._mcpts) return UncertainFunction(mcpts) else: return np.log1p(x) def radians(x): """ Convert degrees to radians """ if isinstance(x, UncertainFunction): mcpts = np.radians(x._mcpts) return UncertainFunction(mcpts) else: return np.radians(x) def sin(x): """ Sine """ if isinstance(x, UncertainFunction): mcpts = np.sin(x._mcpts) return UncertainFunction(mcpts) else: return np.sin(x) def sinh(x): """ Hyperbolic sine """ if isinstance(x, UncertainFunction): mcpts = np.sinh(x._mcpts) return UncertainFunction(mcpts) else: return np.sinh(x) def sqrt(x): """ Square-root function """ if isinstance(x, UncertainFunction): mcpts = np.sqrt(x._mcpts) return UncertainFunction(mcpts) else: return np.sqrt(x) def tan(x): """ Tangent """ if isinstance(x, UncertainFunction): mcpts = np.tan(x._mcpts) return UncertainFunction(mcpts) else: return np.tan(x) def tanh(x): """ Hyperbolic tangent """ if isinstance(x, UncertainFunction): mcpts = np.tanh(x._mcpts) return UncertainFunction(mcpts) else: return np.tanh(x) def trunc(x): """ Truncate the values to the integer value without rounding """ if isinstance(x, UncertainFunction): mcpts = np.trunc(x._mcpts) return UncertainFunction(mcpts) else: return np.trunc(x)
py
1a3df2ec4e2d8b6f416e186b6f9358b31cb5e725
#Author-Chun-Yu Ke #Description-Creates a VGmesh component. import adsk.core, adsk.fusion, adsk.cam, traceback import math import time # Globals _app = adsk.core.Application.cast(None) _ui = adsk.core.UserInterface.cast(None) _units = 'mm' # Command inputs _deltaAngle = adsk.core.DropDownCommandInput.cast(None) _outerRadius = adsk.core.ValueCommandInput.cast(None) _innerRadius = adsk.core.ValueCommandInput.cast(None) _numLayer = adsk.core.StringValueCommandInput.cast(None) _memberRadius = adsk.core.ValueCommandInput.cast(None) _meshSize = adsk.core.ValueCommandInput.cast(None) _vesselDiameter = adsk.core.ValueCommandInput.cast(None) _vesselHeight = adsk.core.TextBoxCommandInput.cast(None) _errMessage = adsk.core.TextBoxCommandInput.cast(None) _handlers = [] def run(context): try: global _app, _ui _app = adsk.core.Application.get() _ui = _app.userInterface cmdDef = _ui.commandDefinitions.itemById('VGmeshPythonScript') if not cmdDef: # Create a command definition. cmdDef = _ui.commandDefinitions.addButtonDefinition('VGmeshPythonScript', 'VGmesh', 'Creates a VGmesh component', 'Resources/VGmesh') # Connect to the command created event. onCommandCreated = VGmeshCommandCreatedHandler() cmdDef.commandCreated.add(onCommandCreated) _handlers.append(onCommandCreated) # Execute the command. cmdDef.execute() # prevent this module from being terminate when the script returns, because we are waiting for event handlers to fire adsk.autoTerminate(False) except: if _ui: _ui.messageBox('Failed:\n{}'.format(traceback.format_exc())) class VGmeshCommandDestroyHandler(adsk.core.CommandEventHandler): def __init__(self): super().__init__() def notify(self, args): try: eventArgs = adsk.core.CommandEventArgs.cast(args) # when the command is done, terminate the script # this will release all globals which will remove all event handlers adsk.terminate() except: if _ui: _ui.messageBox('Failed:\n{}'.format(traceback.format_exc())) # Verfies that a value command input has a valid expression and returns the # value if it does. Otherwise it returns False. This works around a # problem where when you get the value from a ValueCommandInput it causes the # current expression to be evaluated and updates the display. Some new functionality # is being added in the future to the ValueCommandInput object that will make # this easier and should make this function obsolete. def getCommandInputValue(commandInput, unitType): try: valCommandInput = adsk.core.ValueCommandInput.cast(commandInput) if not valCommandInput: return (False, 0) # Verify that the expression is valid. des = adsk.fusion.Design.cast(_app.activeProduct) unitsMgr = des.unitsManager if unitsMgr.isValidExpression(valCommandInput.expression, unitType): value = unitsMgr.evaluateExpression(valCommandInput.expression, unitType) return (True, value) else: return (False, 0) except: if _ui: _ui.messageBox('Failed:\n{}'.format(traceback.format_exc())) # Event handler for the commandCreated event. class VGmeshCommandCreatedHandler(adsk.core.CommandCreatedEventHandler): def __init__(self): super().__init__() def notify(self, args): try: eventArgs = adsk.core.CommandCreatedEventArgs.cast(args) # Verify that a Fusion design is active. des = adsk.fusion.Design.cast(_app.activeProduct) if not des: _ui.messageBox('A Fusion design must be active when invoking this command.') return() defaultUnits = des.unitsManager.defaultLengthUnits # Determine whether to use inches or millimeters as the intial default. global _units _units = 'mm' deltaAngle = '30 deg' deltaAngleAttrib = des.attributes.itemByName('VGmesh', 'deltaAngle') if deltaAngleAttrib: deltaAngle = deltaAngleAttrib.value outerRadius = '0.0745' outerRadiusAttrib = des.attributes.itemByName('VGmesh', 'outerRadius') if outerRadiusAttrib: outerRadius = outerRadiusAttrib.value innerRadius = '0.0395' innerRadiusAttrib = des.attributes.itemByName('VGmesh', 'innerRadius') if innerRadiusAttrib: innerRadius = innerRadiusAttrib.value numLayer = '2' numLayerAttrib = des.attributes.itemByName('VGmesh', 'numLayer') if numLayerAttrib: numLayer = numLayerAttrib.value memberRadius = '0.0025' memberRadiusAttrib = des.attributes.itemByName('VGmesh', 'memberRadius') if memberRadiusAttrib: memberRadius = memberRadiusAttrib.value meshSize = '0.0100' meshSizeAttrib = des.attributes.itemByName('VGmesh', 'meshSize') if meshSizeAttrib: meshSize = meshSizeAttrib.value cmd = eventArgs.command cmd.isExecutedWhenPreEmpted = False inputs = cmd.commandInputs global _deltaAngle, _outerRadius, _innerRadius, _numLayer, _memberRadius, _meshSize, _vesselDiameter, _vesselHeight, _errMessage #, _imgInputEnglish, _imgInputMetric # Define the command dialog. # _imgInputEnglish = inputs.addImageCommandInput('VGmeshImageEnglish', '', 'Resources/VGmeshEnglish.png') # _imgInputEnglish.isFullWidth = True # _imgInputMetric = inputs.addImageCommandInput('VGmeshImageMetric', '', 'Resources/VGmeshMetric.png') # _imgInputMetric.isFullWidth = True _outerRadius = inputs.addValueInput('outerRadius', 'Outer Radius', _units, adsk.core.ValueInput.createByReal(float(outerRadius))) _innerRadius = inputs.addValueInput('innerRadius', 'Inner Radius', _units, adsk.core.ValueInput.createByReal(float(innerRadius))) _memberRadius = inputs.addValueInput('memberRadius', 'Member Radius', _units, adsk.core.ValueInput.createByReal(float(memberRadius))) _meshSize = inputs.addValueInput('meshSize', 'Mesh Size', _units, adsk.core.ValueInput.createByReal(float(meshSize))) _deltaAngle = inputs.addDropDownCommandInput('deltaAngle', 'Delta Angle', adsk.core.DropDownStyles.TextListDropDownStyle) if deltaAngle == '15 deg': _deltaAngle.listItems.add('15 deg', True) else: _deltaAngle.listItems.add('15 deg', False) if deltaAngle == '30 deg': _deltaAngle.listItems.add('30 deg', True) else: _deltaAngle.listItems.add('30 deg', False) if deltaAngle == '45 deg': _deltaAngle.listItems.add('45 deg', True) else: _deltaAngle.listItems.add('45 deg', False) _numLayer = inputs.addStringValueInput('numLayer', 'Number of Layers', numLayer) _vesselDiameter = inputs.addTextBoxCommandInput('vesselDiameter', 'Vessel Diameter', '', 1, True) _vesselHeight = inputs.addTextBoxCommandInput('vesselHeight', 'Vessel Height', '', 1, True) _errMessage = inputs.addTextBoxCommandInput('errMessage', '', '', 2, True) _errMessage.isFullWidth = True # Connect to the command related events. onExecute = VGmeshCommandExecuteHandler() cmd.execute.add(onExecute) _handlers.append(onExecute) onInputChanged = VGmeshCommandInputChangedHandler() cmd.inputChanged.add(onInputChanged) _handlers.append(onInputChanged) onValidateInputs = VGmeshCommandValidateInputsHandler() cmd.validateInputs.add(onValidateInputs) _handlers.append(onValidateInputs) onDestroy = VGmeshCommandDestroyHandler() cmd.destroy.add(onDestroy) _handlers.append(onDestroy) except: if _ui: _ui.messageBox('Failed:\n{}'.format(traceback.format_exc())) # Event handler for the execute event. class VGmeshCommandExecuteHandler(adsk.core.CommandEventHandler): def __init__(self): super().__init__() def notify(self, args): try: eventArgs = adsk.core.CommandEventArgs.cast(args) # Save the current values as attributes. des = adsk.fusion.Design.cast(_app.activeProduct) attribs = des.attributes attribs.add('VGmesh', 'outerRadius', str(_outerRadius.value)) attribs.add('VGmesh', 'innerRadius', str(_innerRadius.value)) attribs.add('VGmesh', 'memberRadius', str(_memberRadius.value)) attribs.add('VGmesh', 'meshSize', str(_meshSize.value)) attribs.add('VGmesh', 'deltaAngle', _deltaAngle.selectedItem.name) attribs.add('VGmesh', 'numLayer', str(_numLayer.value)) # Get the current values. if _deltaAngle.selectedItem.name == '15 deg': deltaAngle = 15.0 * (math.pi/180) elif _deltaAngle.selectedItem.name == '30 deg': deltaAngle = 30.0 * (math.pi/180) elif _deltaAngle.selectedItem.name == '45 deg': deltaAngle = 45.0 * (math.pi/180) numLayer = int(_numLayer.value) memberRadius = _memberRadius.value meshSize = _meshSize.value outerRadius = _outerRadius.value innerRadius = _innerRadius.value # Create the gear. VGmeshComp = drawVGmesh(des, outerRadius, innerRadius, numLayer, meshSize, memberRadius, deltaAngle) if VGmeshComp: desc = 'VGmesh; Outer Radius: ' + des.unitsManager.formatInternalValue(outerRadius, _units, True) + '; ' desc += 'Inner Radius: ' + des.unitsManager.formatInternalValue(innerRadius, _units, True) + '; ' desc += 'Member Radius: ' + des.unitsManager.formatInternalValue(memberRadius, _units, True) + '; ' desc += 'Mesh Size: ' + des.unitsManager.formatInternalValue(meshSize, _units, True) + '; ' desc += 'Delta Angle: ' + str(deltaAngle * (180/math.pi)) + '; ' desc += 'Number Layers: ' + str(numLayer) VGmeshComp.description = desc except: if _ui: _ui.messageBox('Failed:\n{}'.format(traceback.format_exc())) # Event handler for the inputChanged event. class VGmeshCommandInputChangedHandler(adsk.core.InputChangedEventHandler): def __init__(self): super().__init__() def notify(self, args): try: eventArgs = adsk.core.InputChangedEventArgs.cast(args) changedInput = eventArgs.input global _units # Update the pitch diameter value. meshSize = None result = getCommandInputValue(_meshSize, '') if result[0]: meshSize = result[1] if not meshSize == None: if _numLayer.value.isdigit(): numLayer = int(_numLayer.value) vesselHeight = numLayer * meshSize # The pitch dia has been calculated in inches, but this expects cm as the input units. des = adsk.fusion.Design.cast(_app.activeProduct) vesselHeightText = des.unitsManager.formatInternalValue(vesselHeight, _units, True) _vesselHeight.text = vesselHeightText else: _vesselHeight.text = '' else: _vesselHeight.text = '' outerRadius = None result = getCommandInputValue(_outerRadius, '') if result[0]: outerRadius = result[1] if not outerRadius == None: vesselDiameter = outerRadius * 2 vesselDiameterText = des.unitsManager.formatInternalValue(vesselDiameter, _units, True) _vesselDiameter.text = vesselDiameterText else: _vesselDiameter.text = '' except: if _ui: _ui.messageBox('Failed:\n{}'.format(traceback.format_exc())) # Event handler for the validateInputs event. class VGmeshCommandValidateInputsHandler(adsk.core.ValidateInputsEventHandler): def __init__(self): super().__init__() def notify(self, args): try: eventArgs = adsk.core.ValidateInputsEventArgs.cast(args) _errMessage.text = '' # Verify that at lesat 4 teath are specified. if not _numLayer.value.isdigit(): _errMessage.text = 'The number of layers must be a whole number.' eventArgs.areInputsValid = False return else: numLayer = int(_numLayer.value) if _outerRadius.value <= _innerRadius.value: _errMessage.text = 'Outer Radius must be greater than Inner Radius.' eventArgs.areInputsValid = False return else: outerRadius = float(_outerRadius.value) if _deltaAngle.selectedItem.name == '15 deg': deltaAngle = 15.0 * (math.pi/180) elif _deltaAngle.selectedItem.name == '30 deg': deltaAngle = 20.0 * (math.pi/180) elif _deltaAngle.selectedItem.name == '45 deg': deltaAngle = 25.0 * (math.pi/180) des = adsk.fusion.Design.cast(_app.activeProduct) except: if _ui: _ui.messageBox('Failed:\n{}'.format(traceback.format_exc())) # Calculate points along an involute curve. def involutePoint(baseCircleRadius, distFromCenterToInvolutePoint): try: # Calculate the other side of the right-angle triangle defined by the base circle and the current distance radius. # This is also the length of the involute chord as it comes off of the base circle. triangleSide = math.sqrt(math.pow(distFromCenterToInvolutePoint,2) - math.pow(baseCircleRadius,2)) # Calculate the angle of the involute. alpha = triangleSide / baseCircleRadius # Calculate the angle where the current involute point is. theta = alpha - math.acos(baseCircleRadius / distFromCenterToInvolutePoint) # Calculate the coordinates of the involute point. x = distFromCenterToInvolutePoint * math.cos(theta) y = distFromCenterToInvolutePoint * math.sin(theta) # Create a point to return. return adsk.core.Point3D.create(x, y, 0) except: if _ui: _ui.messageBox('Failed:\n{}'.format(traceback.format_exc())) # Builds a VGmesh. def drawVGmesh(design, outerRadius, innerRadius, numLayer, meshSize, memberRadius, deltaAngle): try: t_begin = time.time() # Create a new component by creating an occurrence. occs = design.rootComponent.occurrences mat = adsk.core.Matrix3D.create() newOcc = occs.addNewComponent(mat) newComp = adsk.fusion.Component.cast(newOcc.component) rootComp = design.rootComponent # Create a new sketch. sketches = newComp.sketches xzPlane = newComp.xZConstructionPlane baseSketch = sketches.add(xzPlane) origin = adsk.core.Point3D.create(0,0,0) # Create one unit component nr = math.floor((outerRadius - innerRadius) / meshSize) + 1 nt = round(2 * math.pi / deltaAngle) nz = numLayer global t t = deltaAngle / 2 z = 0 plate_unit_angle = adsk.core.ObjectCollection.create() support_unit_angle = adsk.core.ObjectCollection.create() for ir in range(0, nr): r1 = innerRadius + meshSize * ir if (ir % 2 == 0): y1 = r1 * math.tan(t) p1 = adsk.core.Point3D.create(r1, y1, z) p2 = adsk.core.Point3D.create(r1, -y1, z) create_bond(newComp, baseSketch, plate_unit_angle, p1, p2, memberRadius) if ir > 0: p0 = adsk.core.Point3D.create(r1 - meshSize, 0, z) create_bond(newComp, baseSketch, plate_unit_angle, p0, p1, memberRadius) create_bond(newComp, baseSketch, plate_unit_angle, p0, p2, memberRadius) p3 = adsk.core.Point3D.create(r1 / math.cos(t), 0, z + meshSize) create_bond(newComp, baseSketch, support_unit_angle, p1, p3, memberRadius) create_bond(newComp, baseSketch, support_unit_angle, p2, p3, memberRadius) if ir < nr - 1: r2 = r1 + meshSize p4 = adsk.core.Point3D.create(r2 * math.cos(t), r2 * math.sin(t), z + meshSize) create_bond(newComp, baseSketch, support_unit_angle, p1, p4, memberRadius) if ir > 0: r0 = r1 - meshSize p4 = adsk.core.Point3D.create(r0 * math.cos(t), r0 * math.sin(t), z + meshSize) create_bond(newComp, baseSketch, support_unit_angle, p1, p4, memberRadius) else: p1 = adsk.core.Point3D.create(r1, 0, z) r0 = r1 - meshSize y0 = r0 * math.tan(t) p2 = adsk.core.Point3D.create(r0, y0, z) p3 = adsk.core.Point3D.create(r0, -y0, z) p4 = adsk.core.Point3D.create(r1 * math.cos(2*t), r1 * math.sin(2*t), z) create_bond(newComp, baseSketch, plate_unit_angle, p1, p2, memberRadius) create_bond(newComp, baseSketch, plate_unit_angle, p1, p3, memberRadius) create_bond(newComp, baseSketch, plate_unit_angle, p1, p4, memberRadius) x2 = r1 * math.cos(t) y2 = r1 * math.sin(t) p5 = adsk.core.Point3D.create(x2, y2, z + meshSize) p6 = adsk.core.Point3D.create(x2, -y2, z + meshSize) create_bond(newComp, baseSketch, support_unit_angle, p1, p5, memberRadius) create_bond(newComp, baseSketch, support_unit_angle, p1, p6, memberRadius) if ir < nr - 1: r2 = r1 + meshSize p7 = adsk.core.Point3D.create(r2 / math.cos(t), 0, z + meshSize) create_bond(newComp, baseSketch, support_unit_angle, p1, p7, memberRadius) if ir > 0: p7 = adsk.core.Point3D.create(r0 / math.cos(t), 0, z + meshSize) create_bond(newComp, baseSketch, support_unit_angle, p1, p7, memberRadius) plate_b = plate_unit_angle.item(0) plate_unit_angle.removeByIndex(0) support_b = support_unit_angle.item(0) support_unit_angle.removeByIndex(0) combineFeats = newComp.features.combineFeatures combineInput = combineFeats.createInput(plate_b, plate_unit_angle) combineInput.operation = adsk.fusion.FeatureOperations.JoinFeatureOperation combineFeats.add(combineInput) combineInput = combineFeats.createInput(support_b, support_unit_angle) combineInput.operation = adsk.fusion.FeatureOperations.JoinFeatureOperation combineFeats.add(combineInput) plate_b = newComp.bRepBodies.item(0) support_b = newComp.bRepBodies.item(1) # Copy and paste in theta plate = adsk.core.ObjectCollection.create() support = adsk.core.ObjectCollection.create() plate.add(plate_b) support.add(support_b) normal = baseSketch.xDirection.crossProduct(baseSketch.yDirection) normal.transformBy(baseSketch.transform) for it in range(1, nt): theta = deltaAngle * it transform = adsk.core.Matrix3D.create() transform.setToRotation(theta, normal, baseSketch.origin) new_plate = adsk.core.ObjectCollection.create() new_support = adsk.core.ObjectCollection.create() new_plate.add(plate_b.copyToComponent(newOcc)); new_support.add(support_b.copyToComponent(newOcc)); moveInput = newComp.features.moveFeatures.createInput(new_plate, transform); newComp.features.moveFeatures.add(moveInput); moveInput = newComp.features.moveFeatures.createInput(new_support, transform); newComp.features.moveFeatures.add(moveInput); for entity in new_plate: plate.add(entity) for entity in new_support: support.add(entity) plate_b = plate.item(0) plate.removeByIndex(0) support_b = support.item(0) support.removeByIndex(0) combineFeats = newComp.features.combineFeatures combineInput = combineFeats.createInput(plate_b, plate) combineInput.operation = adsk.fusion.FeatureOperations.JoinFeatureOperation combineFeats.add(combineInput) combineInput = combineFeats.createInput(support_b, support) combineInput.operation = adsk.fusion.FeatureOperations.JoinFeatureOperation combineFeats.add(combineInput) plate_b = newComp.bRepBodies.item(0) support_b = newComp.bRepBodies.item(1) # Copy and paste in z rot = adsk.core.Matrix3D.create() rot.setToRotation(deltaAngle / 2, normal, baseSketch.origin) bodies = adsk.core.ObjectCollection.create() bodies.add(plate_b) bodies.add(support_b) for iz in range(1, nz): transform = adsk.core.Matrix3D.create() transform.translation = adsk.core.Vector3D.create(0, 0, meshSize * iz) new_plate = adsk.core.ObjectCollection.create() new_plate.add(plate_b.copyToComponent(newOcc)); moveInput = newComp.features.moveFeatures.createInput(new_plate, transform); newComp.features.moveFeatures.add(moveInput); bodies.add(newComp.bRepBodies.item(newComp.bRepBodies.count - 1)) if iz % 2 == 1: moveInput = newComp.features.moveFeatures.createInput(new_plate, rot); newComp.features.moveFeatures.add(moveInput); if iz < nz - 1: new_support = adsk.core.ObjectCollection.create() new_support.add(support_b.copyToComponent(newOcc)); moveInput = newComp.features.moveFeatures.createInput(new_support, transform); newComp.features.moveFeatures.add(moveInput); if iz % 2 == 1: moveInput = newComp.features.moveFeatures.createInput(new_support, rot); newComp.features.moveFeatures.add(moveInput) bodies.add(newComp.bRepBodies.item(newComp.bRepBodies.count - 1)) for i in range(0, bodies.count - 1): combineFeats = newComp.features.combineFeatures that = adsk.core.ObjectCollection.create() that.add(bodies.item(i + 1)) combineInput = combineFeats.createInput(bodies.item(i), that) combineInput.operation = adsk.fusion.FeatureOperations.JoinFeatureOperation combineFeats.add(combineInput) # Group everything used to create the gear in the timeline. timelineGroups = design.timeline.timelineGroups newOccIndex = newOcc.timelineObject.index baseSketchIndex = baseSketch.timelineObject.index timelineGroup = timelineGroups.add(newOccIndex, baseSketchIndex) timelineGroup.name = 'VGmesh' VGmeshValues = {} VGmeshValues['outerRadius'] = str(outerRadius) VGmeshValues['innerRadius'] = str(innerRadius) VGmeshValues['memberRadius'] = str(memberRadius) VGmeshValues['meshSize'] = str(meshSize) VGmeshValues['deltaAngle'] = str(deltaAngle) VGmeshValues['numLayer'] = str(numLayer) attrib = newComp.attributes.add('VGmesh', 'Values',str(VGmeshValues)) newComp.name = 'VGmesh' t_end = time.time() _ui.messageBox('Elapsed time: %s' % str(t_end - t_begin)) return newComp except Exception as error: _ui.messageBox("drawVGmesh Failed : " + str(error)) return None def create_bond(rootComp, rootSketch, container, start, end, r): global t planes = rootComp.constructionPlanes planeInput = planes.createInput() line_sketch = rootComp.sketches.add(rootComp.xYConstructionPlane) lines = line_sketch.sketchCurves.sketchLines line = lines.addByTwoPoints(start, end) path = rootComp.features.createPath(line) planeInput = rootComp.constructionPlanes.createInput() planeInput.setByDistanceOnPath(path, adsk.core.ValueInput.createByReal(0)) plane1 = rootComp.constructionPlanes.add(planeInput) sketch1 = rootComp.sketches.add(plane1) circles = sketch1.sketchCurves.sketchCircles circle1 = circles.addByCenterRadius(adsk.core.Point3D.create(0, 0, 0), r) profile0 = sketch1.profiles.item(0) extrudes = rootComp.features.extrudeFeatures dist = adsk.core.ValueInput.createByReal(start.distanceTo(end) + r * math.tan(t)) extrude1 = extrudes.addSimple(profile0, dist, adsk.fusion.FeatureOperations.NewBodyFeatureOperation) container.add(rootComp.bRepBodies.item(rootComp.bRepBodies.count - 1)) line_sketch.deleteMe() sketch1.deleteMe()
py
1a3df2ef9c463fd3dab3023542464024c88696a4
from rest_framework import serializers from reporting.models import IncidentReport, IncidentType, InvolvedParties class IncidentTypeSerializer(serializers.ModelSerializer): class Meta: model = IncidentType fields = "__all__" class IncidentReportSerializer(serializers.ModelSerializer): class Meta: model = IncidentReport fields = "__all__" def create(request, *args, **kwargs): print(request.user) class InvolvedPartiesSerializer(serializers.ModelSerializer): class Meta: model = InvolvedParties fields = "__all__" def create(self, validated_data): d = validated_data d["label"] = d["name"] d["value"] = d["name"].lower() print(d) return super().create(validated_data)
py
1a3df3b515a0cced2d2dc66bd58f65bfcd100165
# Copyright 2008-2015 Nokia Networks # Copyright 2016- Robot Framework Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import difflib import re import time import token from tokenize import generate_tokens, untokenize from robot.api import logger from robot.errors import (ContinueForLoop, DataError, ExecutionFailed, ExecutionFailures, ExecutionPassed, ExitForLoop, PassExecution, ReturnFromKeyword) from robot.running import Keyword, RUN_KW_REGISTER from robot.running.context import EXECUTION_CONTEXTS from robot.running.usererrorhandler import UserErrorHandler from robot.utils import (DotDict, escape, format_assign_message, get_error_message, get_time, html_escape, is_falsy, is_integer, is_string, is_truthy, is_unicode, IRONPYTHON, JYTHON, Matcher, normalize, NormalizedDict, parse_time, prepr, RERAISED_EXCEPTIONS, plural_or_not as s, roundup, secs_to_timestr, seq2str, split_from_equals, StringIO, timestr_to_secs, type_name, unic, is_list_like) from robot.utils.asserts import assert_equal, assert_not_equal from robot.variables import (is_list_var, is_var, DictVariableTableValue, VariableTableValue, VariableSplitter, variable_not_found) from robot.version import get_version if JYTHON: from java.lang import String, Number # TODO: Clean-up registering run keyword variants in RF 3.1. # https://github.com/robotframework/robotframework/issues/2190 def run_keyword_variant(resolve): def decorator(method): RUN_KW_REGISTER.register_run_keyword('BuiltIn', method.__name__, resolve, deprecation_warning=False) return method return decorator class _BuiltInBase(object): @property def _context(self): return self._get_context() def _get_context(self, top=False): ctx = EXECUTION_CONTEXTS.current if not top else EXECUTION_CONTEXTS.top if ctx is None: raise RobotNotRunningError('Cannot access execution context') return ctx @property def _namespace(self): return self._get_context().namespace @property def _variables(self): return self._namespace.variables def _matches(self, string, pattern, caseless=False): # Must use this instead of fnmatch when string may contain newlines. matcher = Matcher(pattern, caseless=caseless, spaceless=False) return matcher.match(string) def _is_true(self, condition): if is_string(condition): condition = self.evaluate(condition, modules='os,sys') return bool(condition) def _log_types(self, *args): self._log_types_at_level('DEBUG', *args) def _log_types_at_level(self, level, *args): msg = ["Argument types are:"] + [self._get_type(a) for a in args] self.log('\n'.join(msg), level) def _get_type(self, arg): # In IronPython type(u'x') is str. We want to report unicode anyway. if is_unicode(arg): return "<type 'unicode'>" return str(type(arg)) class _Converter(_BuiltInBase): def convert_to_integer(self, item, base=None): """Converts the given item to an integer number. If the given item is a string, it is by default expected to be an integer in base 10. There are two ways to convert from other bases: - Give base explicitly to the keyword as ``base`` argument. - Prefix the given string with the base so that ``0b`` means binary (base 2), ``0o`` means octal (base 8), and ``0x`` means hex (base 16). The prefix is considered only when ``base`` argument is not given and may itself be prefixed with a plus or minus sign. The syntax is case-insensitive and possible spaces are ignored. Examples: | ${result} = | Convert To Integer | 100 | | # Result is 100 | | ${result} = | Convert To Integer | FF AA | 16 | # Result is 65450 | | ${result} = | Convert To Integer | 100 | 8 | # Result is 64 | | ${result} = | Convert To Integer | -100 | 2 | # Result is -4 | | ${result} = | Convert To Integer | 0b100 | | # Result is 4 | | ${result} = | Convert To Integer | -0x100 | | # Result is -256 | See also `Convert To Number`, `Convert To Binary`, `Convert To Octal`, `Convert To Hex`, and `Convert To Bytes`. """ self._log_types(item) return self._convert_to_integer(item, base) def _convert_to_integer(self, orig, base=None): try: item = self._handle_java_numbers(orig) item, base = self._get_base(item, base) if base: return int(item, self._convert_to_integer(base)) return int(item) except: raise RuntimeError("'%s' cannot be converted to an integer: %s" % (orig, get_error_message())) def _handle_java_numbers(self, item): if not JYTHON: return item if isinstance(item, String): return unic(item) if isinstance(item, Number): return item.doubleValue() return item def _get_base(self, item, base): if not is_string(item): return item, base item = normalize(item) if item.startswith(('-', '+')): sign = item[0] item = item[1:] else: sign = '' bases = {'0b': 2, '0o': 8, '0x': 16} if base or not item.startswith(tuple(bases)): return sign+item, base return sign+item[2:], bases[item[:2]] def convert_to_binary(self, item, base=None, prefix=None, length=None): """Converts the given item to a binary string. The ``item``, with an optional ``base``, is first converted to an integer using `Convert To Integer` internally. After that it is converted to a binary number (base 2) represented as a string such as ``1011``. The returned value can contain an optional ``prefix`` and can be required to be of minimum ``length`` (excluding the prefix and a possible minus sign). If the value is initially shorter than the required length, it is padded with zeros. Examples: | ${result} = | Convert To Binary | 10 | | | # Result is 1010 | | ${result} = | Convert To Binary | F | base=16 | prefix=0b | # Result is 0b1111 | | ${result} = | Convert To Binary | -2 | prefix=B | length=4 | # Result is -B0010 | See also `Convert To Integer`, `Convert To Octal` and `Convert To Hex`. """ return self._convert_to_bin_oct_hex(item, base, prefix, length, 'b') def convert_to_octal(self, item, base=None, prefix=None, length=None): """Converts the given item to an octal string. The ``item``, with an optional ``base``, is first converted to an integer using `Convert To Integer` internally. After that it is converted to an octal number (base 8) represented as a string such as ``775``. The returned value can contain an optional ``prefix`` and can be required to be of minimum ``length`` (excluding the prefix and a possible minus sign). If the value is initially shorter than the required length, it is padded with zeros. Examples: | ${result} = | Convert To Octal | 10 | | | # Result is 12 | | ${result} = | Convert To Octal | -F | base=16 | prefix=0 | # Result is -017 | | ${result} = | Convert To Octal | 16 | prefix=oct | length=4 | # Result is oct0020 | See also `Convert To Integer`, `Convert To Binary` and `Convert To Hex`. """ return self._convert_to_bin_oct_hex(item, base, prefix, length, 'o') def convert_to_hex(self, item, base=None, prefix=None, length=None, lowercase=False): """Converts the given item to a hexadecimal string. The ``item``, with an optional ``base``, is first converted to an integer using `Convert To Integer` internally. After that it is converted to a hexadecimal number (base 16) represented as a string such as ``FF0A``. The returned value can contain an optional ``prefix`` and can be required to be of minimum ``length`` (excluding the prefix and a possible minus sign). If the value is initially shorter than the required length, it is padded with zeros. By default the value is returned as an upper case string, but the ``lowercase`` argument a true value (see `Boolean arguments`) turns the value (but not the given prefix) to lower case. Examples: | ${result} = | Convert To Hex | 255 | | | # Result is FF | | ${result} = | Convert To Hex | -10 | prefix=0x | length=2 | # Result is -0x0A | | ${result} = | Convert To Hex | 255 | prefix=X | lowercase=yes | # Result is Xff | See also `Convert To Integer`, `Convert To Binary` and `Convert To Octal`. """ spec = 'x' if is_truthy(lowercase) else 'X' return self._convert_to_bin_oct_hex(item, base, prefix, length, spec) def _convert_to_bin_oct_hex(self, item, base, prefix, length, format_spec): self._log_types(item) ret = format(self._convert_to_integer(item, base), format_spec) prefix = prefix or '' if ret[0] == '-': prefix = '-' + prefix ret = ret[1:] if length: ret = ret.rjust(self._convert_to_integer(length), '0') return prefix + ret def convert_to_number(self, item, precision=None): """Converts the given item to a floating point number. If the optional ``precision`` is positive or zero, the returned number is rounded to that number of decimal digits. Negative precision means that the number is rounded to the closest multiple of 10 to the power of the absolute precision. If a number is equally close to a certain precision, it is always rounded away from zero. Examples: | ${result} = | Convert To Number | 42.512 | | # Result is 42.512 | | ${result} = | Convert To Number | 42.512 | 1 | # Result is 42.5 | | ${result} = | Convert To Number | 42.512 | 0 | # Result is 43.0 | | ${result} = | Convert To Number | 42.512 | -1 | # Result is 40.0 | Notice that machines generally cannot store floating point numbers accurately. This may cause surprises with these numbers in general and also when they are rounded. For more information see, for example, these resources: - http://docs.python.org/tutorial/floatingpoint.html - http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition If you want to avoid possible problems with floating point numbers, you can implement custom keywords using Python's [http://docs.python.org/library/decimal.html|decimal] or [http://docs.python.org/library/fractions.html|fractions] modules. If you need an integer number, use `Convert To Integer` instead. """ self._log_types(item) return self._convert_to_number(item, precision) def _convert_to_number(self, item, precision=None): number = self._convert_to_number_without_precision(item) if precision is not None: number = roundup(number, self._convert_to_integer(precision), return_type=float) return number def _convert_to_number_without_precision(self, item): try: if JYTHON: item = self._handle_java_numbers(item) return float(item) except: error = get_error_message() try: return float(self._convert_to_integer(item)) except RuntimeError: raise RuntimeError("'%s' cannot be converted to a floating " "point number: %s" % (item, error)) def convert_to_string(self, item): """Converts the given item to a Unicode string. Uses ``__unicode__`` or ``__str__`` method with Python objects and ``toString`` with Java objects. Use `Encode String To Bytes` and `Decode Bytes To String` keywords in ``String`` library if you need to convert between Unicode and byte strings using different encodings. Use `Convert To Bytes` if you just want to create byte strings. """ self._log_types(item) return self._convert_to_string(item) def _convert_to_string(self, item): return unic(item) def convert_to_boolean(self, item): """Converts the given item to Boolean true or false. Handles strings ``True`` and ``False`` (case-insensitive) as expected, otherwise returns item's [http://docs.python.org/library/stdtypes.html#truth|truth value] using Python's ``bool()`` method. """ self._log_types(item) if is_string(item): if item.upper() == 'TRUE': return True if item.upper() == 'FALSE': return False return bool(item) def convert_to_bytes(self, input, input_type='text'): u"""Converts the given ``input`` to bytes according to the ``input_type``. Valid input types are listed below: - ``text:`` Converts text to bytes character by character. All characters with ordinal below 256 can be used and are converted to bytes with same values. Many characters are easiest to represent using escapes like ``\\x00`` or ``\\xff``. Supports both Unicode strings and bytes. - ``int:`` Converts integers separated by spaces to bytes. Similarly as with `Convert To Integer`, it is possible to use binary, octal, or hex values by prefixing the values with ``0b``, ``0o``, or ``0x``, respectively. - ``hex:`` Converts hexadecimal values to bytes. Single byte is always two characters long (e.g. ``01`` or ``FF``). Spaces are ignored and can be used freely as a visual separator. - ``bin:`` Converts binary values to bytes. Single byte is always eight characters long (e.g. ``00001010``). Spaces are ignored and can be used freely as a visual separator. In addition to giving the input as a string, it is possible to use lists or other iterables containing individual characters or numbers. In that case numbers do not need to be padded to certain length and they cannot contain extra spaces. Examples (last column shows returned bytes): | ${bytes} = | Convert To Bytes | hyv\xe4 | | # hyv\\xe4 | | ${bytes} = | Convert To Bytes | \\xff\\x07 | | # \\xff\\x07 | | ${bytes} = | Convert To Bytes | 82 70 | int | # RF | | ${bytes} = | Convert To Bytes | 0b10 0x10 | int | # \\x02\\x10 | | ${bytes} = | Convert To Bytes | ff 00 07 | hex | # \\xff\\x00\\x07 | | ${bytes} = | Convert To Bytes | 5246212121 | hex | # RF!!! | | ${bytes} = | Convert To Bytes | 0000 1000 | bin | # \\x08 | | ${input} = | Create List | 1 | 2 | 12 | | ${bytes} = | Convert To Bytes | ${input} | int | # \\x01\\x02\\x0c | | ${bytes} = | Convert To Bytes | ${input} | hex | # \\x01\\x02\\x12 | Use `Encode String To Bytes` in ``String`` library if you need to convert text to bytes using a certain encoding. """ try: try: ordinals = getattr(self, '_get_ordinals_from_%s' % input_type) except AttributeError: raise RuntimeError("Invalid input type '%s'." % input_type) return bytes(bytearray(o for o in ordinals(input))) except: raise RuntimeError("Creating bytes failed: %s" % get_error_message()) def _get_ordinals_from_text(self, input): # https://github.com/IronLanguages/main/issues/1237 if IRONPYTHON and isinstance(input, bytearray): input = bytes(input) for char in input: ordinal = char if is_integer(char) else ord(char) yield self._test_ordinal(ordinal, char, 'Character') def _test_ordinal(self, ordinal, original, type): if 0 <= ordinal <= 255: return ordinal raise RuntimeError("%s '%s' cannot be represented as a byte." % (type, original)) def _get_ordinals_from_int(self, input): if is_string(input): input = input.split() elif is_integer(input): input = [input] for integer in input: ordinal = self._convert_to_integer(integer) yield self._test_ordinal(ordinal, integer, 'Integer') def _get_ordinals_from_hex(self, input): for token in self._input_to_tokens(input, length=2): ordinal = self._convert_to_integer(token, base=16) yield self._test_ordinal(ordinal, token, 'Hex value') def _get_ordinals_from_bin(self, input): for token in self._input_to_tokens(input, length=8): ordinal = self._convert_to_integer(token, base=2) yield self._test_ordinal(ordinal, token, 'Binary value') def _input_to_tokens(self, input, length): if not is_string(input): return input input = ''.join(input.split()) if len(input) % length != 0: raise RuntimeError('Expected input to be multiple of %d.' % length) return (input[i:i+length] for i in range(0, len(input), length)) def create_list(self, *items): """Returns a list containing given items. The returned list can be assigned both to ``${scalar}`` and ``@{list}`` variables. Examples: | @{list} = | Create List | a | b | c | | ${scalar} = | Create List | a | b | c | | ${ints} = | Create List | ${1} | ${2} | ${3} | """ return list(items) @run_keyword_variant(resolve=0) def create_dictionary(self, *items): """Creates and returns a dictionary based on the given ``items``. Items are typically given using the ``key=value`` syntax same way as ``&{dictionary}`` variables are created in the Variable table. Both keys and values can contain variables, and possible equal sign in key can be escaped with a backslash like ``escaped\\=key=value``. It is also possible to get items from existing dictionaries by simply using them like ``&{dict}``. Alternatively items can be specified so that keys and values are given separately. This and the ``key=value`` syntax can even be combined, but separately given items must be first. If same key is used multiple times, the last value has precedence. The returned dictionary is ordered, and values with strings as keys can also be accessed using a convenient dot-access syntax like ``${dict.key}``. Examples: | &{dict} = | Create Dictionary | key=value | foo=bar | | | # key=value syntax | | Should Be True | ${dict} == {'key': 'value', 'foo': 'bar'} | | &{dict2} = | Create Dictionary | key | value | foo | bar | # separate key and value | | Should Be Equal | ${dict} | ${dict2} | | &{dict} = | Create Dictionary | ${1}=${2} | &{dict} | foo=new | | # using variables | | Should Be True | ${dict} == {1: 2, 'key': 'value', 'foo': 'new'} | | Should Be Equal | ${dict.key} | value | | | | # dot-access | This keyword was changed in Robot Framework 2.9 in many ways: - Moved from ``Collections`` library to ``BuiltIn``. - Support also non-string keys in ``key=value`` syntax. - Returned dictionary is ordered and dot-accessible. - Old syntax to give keys and values separately was deprecated, but deprecation was later removed in RF 3.0.1. """ separate, combined = self._split_dict_items(items) result = DotDict(self._format_separate_dict_items(separate)) combined = DictVariableTableValue(combined).resolve(self._variables) result.update(combined) return result def _split_dict_items(self, items): separate = [] for item in items: name, value = split_from_equals(item) if value is not None or VariableSplitter(item).is_dict_variable(): break separate.append(item) return separate, items[len(separate):] def _format_separate_dict_items(self, separate): separate = self._variables.replace_list(separate) if len(separate) % 2 != 0: raise DataError('Expected even number of keys and values, got %d.' % len(separate)) return [separate[i:i+2] for i in range(0, len(separate), 2)] class _Verify(_BuiltInBase): def _set_and_remove_tags(self, tags): set_tags = [tag for tag in tags if not tag.startswith('-')] remove_tags = [tag[1:] for tag in tags if tag.startswith('-')] if remove_tags: self.remove_tags(*remove_tags) if set_tags: self.set_tags(*set_tags) def fail(self, msg=None, *tags): """Fails the test with the given message and optionally alters its tags. The error message is specified using the ``msg`` argument. It is possible to use HTML in the given error message, similarly as with any other keyword accepting an error message, by prefixing the error with ``*HTML*``. It is possible to modify tags of the current test case by passing tags after the message. Tags starting with a hyphen (e.g. ``-regression``) are removed and others added. Tags are modified using `Set Tags` and `Remove Tags` internally, and the semantics setting and removing them are the same as with these keywords. Examples: | Fail | Test not ready | | | # Fails with the given message. | | Fail | *HTML*<b>Test not ready</b> | | | # Fails using HTML in the message. | | Fail | Test not ready | not-ready | | # Fails and adds 'not-ready' tag. | | Fail | OS not supported | -regression | | # Removes tag 'regression'. | | Fail | My message | tag | -t* | # Removes all tags starting with 't' except the newly added 'tag'. | See `Fatal Error` if you need to stop the whole test execution. """ self._set_and_remove_tags(tags) raise AssertionError(msg) if msg else AssertionError() def fatal_error(self, msg=None): """Stops the whole test execution. The test or suite where this keyword is used fails with the provided message, and subsequent tests fail with a canned message. Possible teardowns will nevertheless be executed. See `Fail` if you only want to stop one test case unconditionally. """ error = AssertionError(msg) if msg else AssertionError() error.ROBOT_EXIT_ON_FAILURE = True raise error def should_not_be_true(self, condition, msg=None): """Fails if the given condition is true. See `Should Be True` for details about how ``condition`` is evaluated and how ``msg`` can be used to override the default error message. """ if self._is_true(condition): raise AssertionError(msg or "'%s' should not be true." % condition) def should_be_true(self, condition, msg=None): """Fails if the given condition is not true. If ``condition`` is a string (e.g. ``${rc} < 10``), it is evaluated as a Python expression as explained in `Evaluating expressions` and the keyword status is decided based on the result. If a non-string item is given, the status is got directly from its [http://docs.python.org/library/stdtypes.html#truth|truth value]. The default error message (``<condition> should be true``) is not very informative, but it can be overridden with the ``msg`` argument. Examples: | Should Be True | ${rc} < 10 | | Should Be True | '${status}' == 'PASS' | # Strings must be quoted | | Should Be True | ${number} | # Passes if ${number} is not zero | | Should Be True | ${list} | # Passes if ${list} is not empty | Variables used like ``${variable}``, as in the examples above, are replaced in the expression before evaluation. Variables are also available in the evaluation namespace and can be accessed using special syntax ``$variable``. This is a new feature in Robot Framework 2.9 and it is explained more thoroughly in `Evaluating expressions`. Examples: | Should Be True | $rc < 10 | | Should Be True | $status == 'PASS' | # Expected string must be quoted | `Should Be True` automatically imports Python's [http://docs.python.org/library/os.html|os] and [http://docs.python.org/library/sys.html|sys] modules that contain several useful attributes: | Should Be True | os.linesep == '\\n' | # Unixy | | Should Be True | os.linesep == '\\r\\n' | # Windows | | Should Be True | sys.platform == 'darwin' | # OS X | | Should Be True | sys.platform.startswith('java') | # Jython | """ if not self._is_true(condition): raise AssertionError(msg or "'%s' should be true." % condition) def should_be_equal(self, first, second, msg=None, values=True, ignore_case=False): """Fails if the given objects are unequal. Optional ``msg`` and ``values`` arguments specify how to construct the error message if this keyword fails: - If ``msg`` is not given, the error message is ``<first> != <second>``. - If ``msg`` is given and ``values`` gets a true value (default), the error message is ``<msg>: <first> != <second>``. - If ``msg`` is given and ``values`` gets a false value, the error message is simply ``<msg>``. See `Boolean arguments` for more details about using false values. If ``ignore_case`` is given a true value (see `Boolean arguments`) and arguments are strings, it indicates that comparison should be case-insensitive. New option in Robot Framework 3.0.1. If both arguments are multiline strings, the comparison is done using `multiline string comparisons`. Examples: | Should Be Equal | ${x} | expected | | Should Be Equal | ${x} | expected | Custom error message | | Should Be Equal | ${x} | expected | Custom message | values=False | | Should Be Equal | ${x} | expected | ignore_case=True | """ self._log_types_at_info_if_different(first, second) if is_truthy(ignore_case) and is_string(first) and is_string(second): first = first.lower() second = second.lower() self._should_be_equal(first, second, msg, values) def _should_be_equal(self, first, second, msg, values): if first == second: return include_values = self._include_values(values) if include_values and is_string(first) and is_string(second): self._raise_multi_diff(first, second) assert_equal(first, second, msg, include_values) def _log_types_at_info_if_different(self, first, second): level = 'DEBUG' if type(first) == type(second) else 'INFO' self._log_types_at_level(level, first, second) def _raise_multi_diff(self, first, second): first_lines, second_lines = first.splitlines(), second.splitlines() if len(first_lines) < 3 or len(second_lines) < 3: return self.log("%s\n!=\n%s" % (first, second)) err = 'Multiline strings are different:\n' for line in difflib.unified_diff(first_lines, second_lines, fromfile='first', tofile='second', lineterm=''): err += line + '\n' raise AssertionError(err) def _include_values(self, values): return is_truthy(values) and str(values).upper() != 'NO VALUES' def should_not_be_equal(self, first, second, msg=None, values=True, ignore_case=False): """Fails if the given objects are equal. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. If ``ignore_case`` is given a true value (see `Boolean arguments`) and both arguments are strings, it indicates that comparison should be case-insensitive. New option in Robot Framework 3.0.1. """ self._log_types_at_info_if_different(first, second) if is_truthy(ignore_case) and is_string(first) and is_string(second): first = first.lower() second = second.lower() self._should_not_be_equal(first, second, msg, values) def _should_not_be_equal(self, first, second, msg, values): assert_not_equal(first, second, msg, self._include_values(values)) def should_not_be_equal_as_integers(self, first, second, msg=None, values=True, base=None): """Fails if objects are equal after converting them to integers. See `Convert To Integer` for information how to convert integers from other bases than 10 using ``base`` argument or ``0b/0o/0x`` prefixes. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. See `Should Be Equal As Integers` for some usage examples. """ self._log_types_at_info_if_different(first, second) self._should_not_be_equal(self._convert_to_integer(first, base), self._convert_to_integer(second, base), msg, values) def should_be_equal_as_integers(self, first, second, msg=None, values=True, base=None): """Fails if objects are unequal after converting them to integers. See `Convert To Integer` for information how to convert integers from other bases than 10 using ``base`` argument or ``0b/0o/0x`` prefixes. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. Examples: | Should Be Equal As Integers | 42 | ${42} | Error message | | Should Be Equal As Integers | ABCD | abcd | base=16 | | Should Be Equal As Integers | 0b1011 | 11 | """ self._log_types_at_info_if_different(first, second) self._should_be_equal(self._convert_to_integer(first, base), self._convert_to_integer(second, base), msg, values) def should_not_be_equal_as_numbers(self, first, second, msg=None, values=True, precision=6): """Fails if objects are equal after converting them to real numbers. The conversion is done with `Convert To Number` keyword using the given ``precision``. See `Should Be Equal As Numbers` for examples on how to use ``precision`` and why it does not always work as expected. See also `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. """ self._log_types_at_info_if_different(first, second) first = self._convert_to_number(first, precision) second = self._convert_to_number(second, precision) self._should_not_be_equal(first, second, msg, values) def should_be_equal_as_numbers(self, first, second, msg=None, values=True, precision=6): """Fails if objects are unequal after converting them to real numbers. The conversion is done with `Convert To Number` keyword using the given ``precision``. Examples: | Should Be Equal As Numbers | ${x} | 1.1 | | # Passes if ${x} is 1.1 | | Should Be Equal As Numbers | 1.123 | 1.1 | precision=1 | # Passes | | Should Be Equal As Numbers | 1.123 | 1.4 | precision=0 | # Passes | | Should Be Equal As Numbers | 112.3 | 75 | precision=-2 | # Passes | As discussed in the documentation of `Convert To Number`, machines generally cannot store floating point numbers accurately. Because of this limitation, comparing floats for equality is problematic and a correct approach to use depends on the context. This keyword uses a very naive approach of rounding the numbers before comparing them, which is both prone to rounding errors and does not work very well if numbers are really big or small. For more information about comparing floats, and ideas on how to implement your own context specific comparison algorithm, see http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/. If you want to avoid possible problems with floating point numbers, you can implement custom keywords using Python's [http://docs.python.org/library/decimal.html|decimal] or [http://docs.python.org/library/fractions.html|fractions] modules. See `Should Not Be Equal As Numbers` for a negative version of this keyword and `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. """ self._log_types_at_info_if_different(first, second) first = self._convert_to_number(first, precision) second = self._convert_to_number(second, precision) self._should_be_equal(first, second, msg, values) def should_not_be_equal_as_strings(self, first, second, msg=None, values=True, ignore_case=False): """Fails if objects are equal after converting them to strings. If ``ignore_case`` is given a true value (see `Boolean arguments`), it indicates that comparison should be case-insensitive. New option in Robot Framework 3.0.1. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. """ self._log_types_at_info_if_different(first, second) first = self._convert_to_string(first) second = self._convert_to_string(second) if is_truthy(ignore_case): first = first.lower() second = second.lower() self._should_not_be_equal(first, second, msg, values) def should_be_equal_as_strings(self, first, second, msg=None, values=True, ignore_case=False): """Fails if objects are unequal after converting them to strings. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``. If ``ignore_case`` is given a true value (see `Boolean arguments`), it indicates that comparison should be case-insensitive. New option in Robot Framework 3.0.1. If both arguments are multiline strings, the comparison is done using `multiline string comparisons`. """ self._log_types_at_info_if_different(first, second) first = self._convert_to_string(first) second = self._convert_to_string(second) if is_truthy(ignore_case): first = first.lower() second = second.lower() self._should_be_equal(first, second, msg, values) def should_not_start_with(self, str1, str2, msg=None, values=True, ignore_case=False): """Fails if the string ``str1`` starts with the string ``str2``. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``, as well as for semantics of the ``ignore_case`` option. """ if is_truthy(ignore_case): str1 = str1.lower() str2 = str2.lower() if str1.startswith(str2): raise AssertionError(self._get_string_msg(str1, str2, msg, values, 'starts with')) def should_start_with(self, str1, str2, msg=None, values=True, ignore_case=False): """Fails if the string ``str1`` does not start with the string ``str2``. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``, as well as for semantics of the ``ignore_case`` option. """ if is_truthy(ignore_case): str1 = str1.lower() str2 = str2.lower() if not str1.startswith(str2): raise AssertionError(self._get_string_msg(str1, str2, msg, values, 'does not start with')) def should_not_end_with(self, str1, str2, msg=None, values=True, ignore_case=False): """Fails if the string ``str1`` ends with the string ``str2``. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``, as well as for semantics of the ``ignore_case`` option. """ if is_truthy(ignore_case): str1 = str1.lower() str2 = str2.lower() if str1.endswith(str2): raise AssertionError(self._get_string_msg(str1, str2, msg, values, 'ends with')) def should_end_with(self, str1, str2, msg=None, values=True, ignore_case=False): """Fails if the string ``str1`` does not end with the string ``str2``. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``, as well as for semantics of the ``ignore_case`` option. """ if is_truthy(ignore_case): str1 = str1.lower() str2 = str2.lower() if not str1.endswith(str2): raise AssertionError(self._get_string_msg(str1, str2, msg, values, 'does not end with')) def should_not_contain(self, container, item, msg=None, values=True, ignore_case=False): """Fails if ``container`` contains ``item`` one or more times. Works with strings, lists, and anything that supports Python's ``in`` operator. See `Should Be Equal` for an explanation on how to override the default error message with arguments ``msg`` and ``values``. ``ignore_case`` has exactly the same semantics as with `Should Contain`. Examples: | Should Not Contain | ${some list} | value | | Should Not Contain | ${output} | FAILED | ignore_case=True | """ # TODO: It is inconsistent that errors show original case in 'container' # 'item' is in lower case. Should rather show original case everywhere # and add separate '(case-insensitive)' not to the error message. # This same logic should be used with all keywords supporting # case-insensitive comparisons. orig_container = container if is_truthy(ignore_case) and is_string(item): item = item.lower() if is_string(container): container = container.lower() elif is_list_like(container): container = set(x.lower() if is_string(x) else x for x in container) if item in container: raise AssertionError(self._get_string_msg(orig_container, item, msg, values, 'contains')) def should_contain(self, container, item, msg=None, values=True, ignore_case=False): """Fails if ``container`` does not contain ``item`` one or more times. Works with strings, lists, and anything that supports Python's ``in`` operator. See `Should Be Equal` for an explanation on how to override the default error message with arguments ``msg`` and ``values``. If ``ignore_case`` is given a true value (see `Boolean arguments`) and compared items are strings, it indicates that comparison should be case-insensitive. If the ``container`` is a list-like object, string items in it are compared case-insensitively. New option in Robot Framework 3.0.1. Examples: | Should Contain | ${output} | PASS | | Should Contain | ${some list} | value | msg=Failure! | values=False | | Should Contain | ${some list} | value | ignore_case=True | """ orig_container = container if is_truthy(ignore_case) and is_string(item): item = item.lower() if is_string(container): container = container.lower() elif is_list_like(container): container = set(x.lower() if is_string(x) else x for x in container) if item not in container: raise AssertionError(self._get_string_msg(orig_container, item, msg, values, 'does not contain')) def should_contain_any(self, container, *items, **configuration): """Fails if ``container`` does not contain any of the ``*items``. Works with strings, lists, and anything that supports Python's ``in`` operator. Supports additional configuration parameters ``msg``, ``values`` and ``ignore_case``, which have exactly the same semantics as arguments with same names have with `Should Contain`. These arguments must always be given using ``name=value`` syntax after all ``items``. Note that possible equal signs in ``items`` must be escaped with a backslash (e.g. ``foo\\=bar``) to avoid them to be passed in as ``**configuration``. Examples: | Should Contain Any | ${string} | substring 1 | substring 2 | | Should Contain Any | ${list} | item 1 | item 2 | item 3 | | Should Contain Any | ${list} | item 1 | item 2 | item 3 | ignore_case=True | | Should Contain Any | ${list} | @{items} | msg=Custom message | values=False | New in Robot Framework 3.0.1. """ msg = configuration.pop('msg', None) values = configuration.pop('values', True) ignore_case = configuration.pop('ignore_case', False) if configuration: raise RuntimeError("Unsupported configuration parameter%s: %s." % (s(configuration), seq2str(sorted(configuration)))) if not items: raise RuntimeError('One or more items required.') orig_container = container if is_truthy(ignore_case): items = [x.lower() if is_string(x) else x for x in items] if is_string(container): container = container.lower() elif is_list_like(container): container = set(x.lower() if is_string(x) else x for x in container) if not any(item in container for item in items): msg = self._get_string_msg(orig_container, seq2str(items, lastsep=' or '), msg, values, 'does not contain any of', quote_item2=False) raise AssertionError(msg) def should_not_contain_any(self, container, *items, **configuration): """Fails if ``container`` contains one or more of the ``*items``. Works with strings, lists, and anything that supports Python's ``in`` operator. Supports additional configuration parameters ``msg``, ``values`` and ``ignore_case``, which have exactly the same semantics as arguments with same names have with `Should Contain`. These arguments must always be given using ``name=value`` syntax after all ``items``. Note that possible equal signs in ``items`` must be escaped with a backslash (e.g. ``foo\\=bar``) to avoid them to be passed in as ``**configuration``. Examples: | Should Not Contain Any | ${string} | substring 1 | substring 2 | | Should Not Contain Any | ${list} | item 1 | item 2 | item 3 | | Should Not Contain Any | ${list} | item 1 | item 2 | item 3 | ignore_case=True | | Should Not Contain Any | ${list} | @{items} | msg=Custom message | values=False | New in Robot Framework 3.0.1. """ msg = configuration.pop('msg', None) values = configuration.pop('values', True) ignore_case = configuration.pop('ignore_case', False) if configuration: raise RuntimeError("Unsupported configuration parameter%s: %s." % (s(configuration), seq2str(sorted(configuration)))) if not items: raise RuntimeError('One or more items required.') orig_container = container if is_truthy(ignore_case): items = [x.lower() if is_string(x) else x for x in items] if is_string(container): container = container.lower() elif is_list_like(container): container = set(x.lower() if is_string(x) else x for x in container) if any(item in container for item in items): msg = self._get_string_msg(orig_container, seq2str(items, lastsep=' or '), msg, values, 'contains one or more of', quote_item2=False) raise AssertionError(msg) def should_contain_x_times(self, item1, item2, count, msg=None, ignore_case=False): """Fails if ``item1`` does not contain ``item2`` ``count`` times. Works with strings, lists and all objects that `Get Count` works with. The default error message can be overridden with ``msg`` and the actual count is always logged. If ``ignore_case`` is given a true value (see `Boolean arguments`) and compared items are strings, it indicates that comparison should be case-insensitive. If the ``item1`` is a list-like object, string items in it are compared case-insensitively. New option in Robot Framework 3.0.1. Examples: | Should Contain X Times | ${output} | hello | 2 | | Should Contain X Times | ${some list} | value | 3 | ignore_case=True | """ # TODO: Rename 'item1' and 'item2' to 'container' and 'item' in RF 3.1. # Other 'contain' keywords use these names. And 'Get Count' should too. # Cannot be done in minor release due to backwards compatibility. # Remember to update it also in the docstring!! count = self._convert_to_integer(count) orig_item1 = item1 if is_truthy(ignore_case) and is_string(item2): item2 = item2.lower() if is_string(item1): item1 = item1.lower() elif is_list_like(item1): item1 = [x.lower() if is_string(x) else x for x in item1] x = self.get_count(item1, item2) if not msg: msg = "'%s' contains '%s' %d time%s, not %d time%s." \ % (unic(orig_item1), unic(item2), x, s(x), count, s(count)) self.should_be_equal_as_integers(x, count, msg, values=False) def get_count(self, item1, item2): """Returns and logs how many times ``item2`` is found from ``item1``. This keyword works with Python strings and lists and all objects that either have ``count`` method or can be converted to Python lists. Example: | ${count} = | Get Count | ${some item} | interesting value | | Should Be True | 5 < ${count} < 10 | """ if not hasattr(item1, 'count'): try: item1 = list(item1) except: raise RuntimeError("Converting '%s' to list failed: %s" % (item1, get_error_message())) count = item1.count(item2) self.log('Item found from the first item %d time%s' % (count, s(count))) return count def should_not_match(self, string, pattern, msg=None, values=True, ignore_case=False): """Fails if the given ``string`` matches the given ``pattern``. Pattern matching is similar as matching files in a shell with ``*``, ``?`` and ``[chars]`` acting as wildcards. See the `Glob patterns` section for more information. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``, as well as for semantics of the ``ignore_case`` option. """ if self._matches(string, pattern, caseless=is_truthy(ignore_case)): raise AssertionError(self._get_string_msg(string, pattern, msg, values, 'matches')) def should_match(self, string, pattern, msg=None, values=True, ignore_case=False): """Fails if the given ``string`` does not match the given ``pattern``. Pattern matching is similar as matching files in a shell with ``*``, ``?`` and ``[chars]`` acting as wildcards. See the `Glob patterns` section for more information. See `Should Be Equal` for an explanation on how to override the default error message with ``msg`` and ``values``, as well as for semantics of the ``ignore_case`` option. """ if not self._matches(string, pattern, caseless=is_truthy(ignore_case)): raise AssertionError(self._get_string_msg(string, pattern, msg, values, 'does not match')) def should_match_regexp(self, string, pattern, msg=None, values=True): """Fails if ``string`` does not match ``pattern`` as a regular expression. See the `Regular expressions` section for more information about regular expressions and how to use then in Robot Framework test data. Notice that the given pattern does not need to match the whole string. For example, the pattern ``ello`` matches the string ``Hello world!``. If a full match is needed, the ``^`` and ``$`` characters can be used to denote the beginning and end of the string, respectively. For example, ``^ello$`` only matches the exact string ``ello``. Possible flags altering how the expression is parsed (e.g. ``re.IGNORECASE``, ``re.MULTILINE``) must be embedded to the pattern like ``(?im)pattern``. The most useful flags are ``i`` (case-insensitive), ``m`` (multiline mode), ``s`` (dotall mode) and ``x`` (verbose). If this keyword passes, it returns the portion of the string that matched the pattern. Additionally, the possible captured groups are returned. See the `Should Be Equal` keyword for an explanation on how to override the default error message with the ``msg`` and ``values`` arguments. Examples: | Should Match Regexp | ${output} | \\\\d{6} | # Output contains six numbers | | Should Match Regexp | ${output} | ^\\\\d{6}$ | # Six numbers and nothing more | | ${ret} = | Should Match Regexp | Foo: 42 | (?i)foo: \\\\d+ | | ${match} | ${group1} | ${group2} = | | ... | Should Match Regexp | Bar: 43 | (Foo|Bar): (\\\\d+) | => | ${ret} = 'Foo: 42' | ${match} = 'Bar: 43' | ${group1} = 'Bar' | ${group2} = '43' """ res = re.search(pattern, string) if res is None: raise AssertionError(self._get_string_msg(string, pattern, msg, values, 'does not match')) match = res.group(0) groups = res.groups() if groups: return [match] + list(groups) return match def should_not_match_regexp(self, string, pattern, msg=None, values=True): """Fails if ``string`` matches ``pattern`` as a regular expression. See `Should Match Regexp` for more information about arguments. """ if re.search(pattern, string) is not None: raise AssertionError(self._get_string_msg(string, pattern, msg, values, 'matches')) def get_length(self, item): """Returns and logs the length of the given item as an integer. The item can be anything that has a length, for example, a string, a list, or a mapping. The keyword first tries to get the length with the Python function ``len``, which calls the item's ``__len__`` method internally. If that fails, the keyword tries to call the item's possible ``length`` and ``size`` methods directly. The final attempt is trying to get the value of the item's ``length`` attribute. If all these attempts are unsuccessful, the keyword fails. Examples: | ${length} = | Get Length | Hello, world! | | | Should Be Equal As Integers | ${length} | 13 | | @{list} = | Create List | Hello, | world! | | ${length} = | Get Length | ${list} | | | Should Be Equal As Integers | ${length} | 2 | See also `Length Should Be`, `Should Be Empty` and `Should Not Be Empty`. """ length = self._get_length(item) self.log('Length is %d' % length) return length def _get_length(self, item): try: return len(item) except RERAISED_EXCEPTIONS: raise except: try: return item.length() except RERAISED_EXCEPTIONS: raise except: try: return item.size() except RERAISED_EXCEPTIONS: raise except: try: return item.length except RERAISED_EXCEPTIONS: raise except: raise RuntimeError("Could not get length of '%s'." % item) def length_should_be(self, item, length, msg=None): """Verifies that the length of the given item is correct. The length of the item is got using the `Get Length` keyword. The default error message can be overridden with the ``msg`` argument. """ length = self._convert_to_integer(length) actual = self.get_length(item) if actual != length: raise AssertionError(msg or "Length of '%s' should be %d but is %d." % (item, length, actual)) def should_be_empty(self, item, msg=None): """Verifies that the given item is empty. The length of the item is got using the `Get Length` keyword. The default error message can be overridden with the ``msg`` argument. """ if self.get_length(item) > 0: raise AssertionError(msg or "'%s' should be empty." % item) def should_not_be_empty(self, item, msg=None): """Verifies that the given item is not empty. The length of the item is got using the `Get Length` keyword. The default error message can be overridden with the ``msg`` argument. """ if self.get_length(item) == 0: raise AssertionError(msg or "'%s' should not be empty." % item) def _get_string_msg(self, item1, item2, custom_message, include_values, delimiter, quote_item1=True, quote_item2=True): if custom_message and not self._include_values(include_values): return custom_message item1 = "'%s'" % unic(item1) if quote_item1 else unic(item1) item2 = "'%s'" % unic(item2) if quote_item2 else unic(item2) default_message = '%s %s %s' % (item1, delimiter, item2) if not custom_message: return default_message return '%s: %s' % (custom_message, default_message) class _Variables(_BuiltInBase): def get_variables(self, no_decoration=False): """Returns a dictionary containing all variables in the current scope. Variables are returned as a special dictionary that allows accessing variables in space, case, and underscore insensitive manner similarly as accessing variables in the test data. This dictionary supports all same operations as normal Python dictionaries and, for example, Collections library can be used to access or modify it. Modifying the returned dictionary has no effect on the variables available in the current scope. By default variables are returned with ``${}``, ``@{}`` or ``&{}`` decoration based on variable types. Giving a true value (see `Boolean arguments`) to the optional argument ``no_decoration`` will return the variables without the decoration. This option is new in Robot Framework 2.9. Example: | ${example_variable} = | Set Variable | example value | | ${variables} = | Get Variables | | | Dictionary Should Contain Key | ${variables} | \\${example_variable} | | Dictionary Should Contain Key | ${variables} | \\${ExampleVariable} | | Set To Dictionary | ${variables} | \\${name} | value | | Variable Should Not Exist | \\${name} | | | | ${no decoration} = | Get Variables | no_decoration=Yes | | Dictionary Should Contain Key | ${no decoration} | example_variable | """ return self._variables.as_dict(decoration=is_falsy(no_decoration)) @run_keyword_variant(resolve=0) def get_variable_value(self, name, default=None): """Returns variable value or ``default`` if the variable does not exist. The name of the variable can be given either as a normal variable name (e.g. ``${NAME}``) or in escaped format (e.g. ``\\${NAME}``). Notice that the former has some limitations explained in `Set Suite Variable`. Examples: | ${x} = | Get Variable Value | ${a} | default | | ${y} = | Get Variable Value | ${a} | ${b} | | ${z} = | Get Variable Value | ${z} | | => | ${x} gets value of ${a} if ${a} exists and string 'default' otherwise | ${y} gets value of ${a} if ${a} exists and value of ${b} otherwise | ${z} is set to Python None if it does not exist previously See `Set Variable If` for another keyword to set variables dynamically. """ try: return self._variables[self._get_var_name(name)] except DataError: return self._variables.replace_scalar(default) def log_variables(self, level='INFO'): """Logs all variables in the current scope with given log level.""" variables = self.get_variables() for name in sorted(variables, key=lambda s: s[2:-1].lower()): msg = format_assign_message(name, variables[name], cut_long=False) self.log(msg, level) @run_keyword_variant(resolve=0) def variable_should_exist(self, name, msg=None): """Fails unless the given variable exists within the current scope. The name of the variable can be given either as a normal variable name (e.g. ``${NAME}``) or in escaped format (e.g. ``\\${NAME}``). Notice that the former has some limitations explained in `Set Suite Variable`. The default error message can be overridden with the ``msg`` argument. See also `Variable Should Not Exist` and `Keyword Should Exist`. """ name = self._get_var_name(name) msg = self._variables.replace_string(msg) if msg \ else "Variable %s does not exist." % name try: self._variables[name] except DataError: raise AssertionError(msg) @run_keyword_variant(resolve=0) def variable_should_not_exist(self, name, msg=None): """Fails if the given variable exists within the current scope. The name of the variable can be given either as a normal variable name (e.g. ``${NAME}``) or in escaped format (e.g. ``\\${NAME}``). Notice that the former has some limitations explained in `Set Suite Variable`. The default error message can be overridden with the ``msg`` argument. See also `Variable Should Exist` and `Keyword Should Exist`. """ name = self._get_var_name(name) msg = self._variables.replace_string(msg) if msg \ else "Variable %s exists." % name try: self._variables[name] except DataError: pass else: raise AssertionError(msg) def replace_variables(self, text): """Replaces variables in the given text with their current values. If the text contains undefined variables, this keyword fails. If the given ``text`` contains only a single variable, its value is returned as-is and it can be any object. Otherwise this keyword always returns a string. Example: The file ``template.txt`` contains ``Hello ${NAME}!`` and variable ``${NAME}`` has the value ``Robot``. | ${template} = | Get File | ${CURDIR}/template.txt | | ${message} = | Replace Variables | ${template} | | Should Be Equal | ${message} | Hello Robot! | """ return self._variables.replace_scalar(text) def set_variable(self, *values): """Returns the given values which can then be assigned to a variables. This keyword is mainly used for setting scalar variables. Additionally it can be used for converting a scalar variable containing a list to a list variable or to multiple scalar variables. It is recommended to use `Create List` when creating new lists. Examples: | ${hi} = | Set Variable | Hello, world! | | ${hi2} = | Set Variable | I said: ${hi} | | ${var1} | ${var2} = | Set Variable | Hello | world | | @{list} = | Set Variable | ${list with some items} | | ${item1} | ${item2} = | Set Variable | ${list with 2 items} | Variables created with this keyword are available only in the scope where they are created. See `Set Global Variable`, `Set Test Variable` and `Set Suite Variable` for information on how to set variables so that they are available also in a larger scope. """ if len(values) == 0: return '' elif len(values) == 1: return values[0] else: return list(values) @run_keyword_variant(resolve=0) def set_test_variable(self, name, *values): """Makes a variable available everywhere within the scope of the current test. Variables set with this keyword are available everywhere within the scope of the currently executed test case. For example, if you set a variable in a user keyword, it is available both in the test case level and also in all other user keywords used in the current test. Other test cases will not see variables set with this keyword. See `Set Suite Variable` for more information and examples. """ name = self._get_var_name(name) value = self._get_var_value(name, values) self._variables.set_test(name, value) self._log_set_variable(name, value) @run_keyword_variant(resolve=0) def set_task_variable(self, name, *values): """Makes a variable available everywhere within the scope of the current task. This is an alias for `Set Test Variable` that is more applicable when creating tasks, not tests. New in RF 3.1. """ self.set_test_variable(name, *values) @run_keyword_variant(resolve=0) def set_suite_variable(self, name, *values): """Makes a variable available everywhere within the scope of the current suite. Variables set with this keyword are available everywhere within the scope of the currently executed test suite. Setting variables with this keyword thus has the same effect as creating them using the Variable table in the test data file or importing them from variable files. Possible child test suites do not see variables set with this keyword by default. Starting from Robot Framework 2.9, that can be controlled by using ``children=<option>`` as the last argument. If the specified ``<option>`` is a non-empty string or any other value considered true in Python, the variable is set also to the child suites. Parent and sibling suites will never see variables set with this keyword. The name of the variable can be given either as a normal variable name (e.g. ``${NAME}``) or in escaped format as ``\\${NAME}`` or ``$NAME``. Variable value can be given using the same syntax as when variables are created in the Variable table. If a variable already exists within the new scope, its value will be overwritten. Otherwise a new variable is created. If a variable already exists within the current scope, the value can be left empty and the variable within the new scope gets the value within the current scope. Examples: | Set Suite Variable | ${SCALAR} | Hello, world! | | Set Suite Variable | ${SCALAR} | Hello, world! | children=true | | Set Suite Variable | @{LIST} | First item | Second item | | Set Suite Variable | &{DICT} | key=value | foo=bar | | ${ID} = | Get ID | | Set Suite Variable | ${ID} | To override an existing value with an empty value, use built-in variables ``${EMPTY}``, ``@{EMPTY}`` or ``&{EMPTY}``: | Set Suite Variable | ${SCALAR} | ${EMPTY} | | Set Suite Variable | @{LIST} | @{EMPTY} | | Set Suite Variable | &{DICT} | &{EMPTY} | *NOTE:* If the variable has value which itself is a variable (escaped or not), you must always use the escaped format to set the variable: Example: | ${NAME} = | Set Variable | \\${var} | | Set Suite Variable | ${NAME} | value | # Sets variable ${var} | | Set Suite Variable | \\${NAME} | value | # Sets variable ${NAME} | This limitation applies also to `Set Test Variable`, `Set Global Variable`, `Variable Should Exist`, `Variable Should Not Exist` and `Get Variable Value` keywords. """ name = self._get_var_name(name) if (values and is_string(values[-1]) and values[-1].startswith('children=')): children = self._variables.replace_scalar(values[-1][9:]) children = is_truthy(children) values = values[:-1] else: children = False value = self._get_var_value(name, values) self._variables.set_suite(name, value, children=children) self._log_set_variable(name, value) @run_keyword_variant(resolve=0) def set_global_variable(self, name, *values): """Makes a variable available globally in all tests and suites. Variables set with this keyword are globally available in all subsequent test suites, test cases and user keywords. Also variables in variable tables are overridden. Variables assigned locally based on keyword return values or by using `Set Test Variable` and `Set Suite Variable` override these variables in that scope, but the global value is not changed in those cases. In practice setting variables with this keyword has the same effect as using command line options ``--variable`` and ``--variablefile``. Because this keyword can change variables everywhere, it should be used with care. See `Set Suite Variable` for more information and examples. """ name = self._get_var_name(name) value = self._get_var_value(name, values) self._variables.set_global(name, value) self._log_set_variable(name, value) # Helpers def _get_var_name(self, orig): name = self._resolve_possible_variable(orig) try: return self._unescape_variable_if_needed(name) except ValueError: raise RuntimeError("Invalid variable syntax '%s'." % orig) def _resolve_possible_variable(self, name): try: resolved = self._variables.replace_string(name) return self._unescape_variable_if_needed(resolved) except (KeyError, ValueError, DataError): return name def _unescape_variable_if_needed(self, name): if name.startswith('\\'): name = name[1:] if len(name) < 2: raise ValueError if name[0] in '$@&' and name[1] != '{': name = '%s{%s}' % (name[0], name[1:]) if is_var(name): return name # Support for possible internal variables (issue 397) name = '%s{%s}' % (name[0], self.replace_variables(name[2:-1])) if is_var(name): return name raise ValueError def _get_var_value(self, name, values): if not values: return self._variables[name] if name[0] == '$': # We could consider catenating values similarly as when creating # scalar variables in the variable table, but that would require # handling non-string values somehow. For details see # https://github.com/robotframework/robotframework/issues/1919 if len(values) != 1 or VariableSplitter(values[0]).is_list_variable(): raise DataError("Setting list value to scalar variable '%s' " "is not supported anymore. Create list " "variable '@%s' instead." % (name, name[1:])) return self._variables.replace_scalar(values[0]) return VariableTableValue(values, name).resolve(self._variables) def _log_set_variable(self, name, value): self.log(format_assign_message(name, value)) class _RunKeyword(_BuiltInBase): # If you use any of these run keyword variants from another library, you # should register those keywords with 'register_run_keyword' method. See # the documentation of that method at the end of this file. There are also # other run keyword variant keywords in BuiltIn which can also be seen # at the end of this file. @run_keyword_variant(resolve=1) def run_keyword(self, name, *args): """Executes the given keyword with the given arguments. Because the name of the keyword to execute is given as an argument, it can be a variable and thus set dynamically, e.g. from a return value of another keyword or from the command line. """ if not is_string(name): raise RuntimeError('Keyword name must be a string.') kw = Keyword(name, args=args) return kw.run(self._context) @run_keyword_variant(resolve=0) def run_keywords(self, *keywords): """Executes all the given keywords in a sequence. This keyword is mainly useful in setups and teardowns when they need to take care of multiple actions and creating a new higher level user keyword would be an overkill. By default all arguments are expected to be keywords to be executed. Examples: | `Run Keywords` | `Initialize database` | `Start servers` | `Clear logs` | | `Run Keywords` | ${KW 1} | ${KW 2} | | `Run Keywords` | @{KEYWORDS} | Keywords can also be run with arguments using upper case ``AND`` as a separator between keywords. The keywords are executed so that the first argument is the first keyword and proceeding arguments until the first ``AND`` are arguments to it. First argument after the first ``AND`` is the second keyword and proceeding arguments until the next ``AND`` are its arguments. And so on. Examples: | `Run Keywords` | `Initialize database` | db1 | AND | `Start servers` | server1 | server2 | | `Run Keywords` | `Initialize database` | ${DB NAME} | AND | `Start servers` | @{SERVERS} | AND | `Clear logs` | | `Run Keywords` | ${KW} | AND | @{KW WITH ARGS} | Notice that the ``AND`` control argument must be used explicitly and cannot itself come from a variable. If you need to use literal ``AND`` string as argument, you can either use variables or escape it with a backslash like ``\\AND``. """ self._run_keywords(self._split_run_keywords(list(keywords))) def _run_keywords(self, iterable): errors = [] for kw, args in iterable: try: self.run_keyword(kw, *args) except ExecutionPassed as err: err.set_earlier_failures(errors) raise err except ExecutionFailed as err: errors.extend(err.get_errors()) if not err.can_continue(self._context.in_teardown): break if errors: raise ExecutionFailures(errors) def _split_run_keywords(self, keywords): if 'AND' not in keywords: for name in self._variables.replace_list(keywords): yield name, () else: for name, args in self._split_run_keywords_from_and(keywords): yield name, args def _split_run_keywords_from_and(self, keywords): while 'AND' in keywords: index = keywords.index('AND') yield self._resolve_run_keywords_name_and_args(keywords[:index]) keywords = keywords[index+1:] yield self._resolve_run_keywords_name_and_args(keywords) def _resolve_run_keywords_name_and_args(self, kw_call): kw_call = self._variables.replace_list(kw_call, replace_until=1) if not kw_call: raise DataError('Incorrect use of AND') return kw_call[0], kw_call[1:] @run_keyword_variant(resolve=2) def run_keyword_if(self, condition, name, *args): """Runs the given keyword with the given arguments, if ``condition`` is true. The given ``condition`` is evaluated in Python as explained in `Evaluating expressions`, and ``name`` and ``*args`` have same semantics as with `Run Keyword`. Example, a simple if/else construct: | ${status} | ${value} = | `Run Keyword And Ignore Error` | `My Keyword` | | `Run Keyword If` | '${status}' == 'PASS' | `Some Action` | arg | | `Run Keyword Unless` | '${status}' == 'PASS' | `Another Action` | In this example, only either `Some Action` or `Another Action` is executed, based on the status of `My Keyword`. Instead of `Run Keyword And Ignore Error` you can also use `Run Keyword And Return Status`. Variables used like ``${variable}``, as in the examples above, are replaced in the expression before evaluation. Variables are also available in the evaluation namespace and can be accessed using special syntax ``$variable``. This is a new feature in Robot Framework 2.9 and it is explained more thoroughly in `Evaluating expressions`. Example: | `Run Keyword If` | $result is None or $result == 'FAIL' | `Keyword` | This keyword supports also optional ELSE and ELSE IF branches. Both of them are defined in ``*args`` and must use exactly format ``ELSE`` or ``ELSE IF``, respectively. ELSE branches must contain first the name of the keyword to execute and then its possible arguments. ELSE IF branches must first contain a condition, like the first argument to this keyword, and then the keyword to execute and its possible arguments. It is possible to have ELSE branch after ELSE IF and to have multiple ELSE IF branches. Nested `Run Keyword If` usage is not supported when using ELSE and/or ELSE IF branches. Given previous example, if/else construct can also be created like this: | ${status} | ${value} = | `Run Keyword And Ignore Error` | `My Keyword` | | `Run Keyword If` | '${status}' == 'PASS' | `Some Action` | arg | ELSE | `Another Action` | The return value of this keyword is the return value of the actually executed keyword or Python ``None`` if no keyword was executed (i.e. if ``condition`` was false). Hence, it is recommended to use ELSE and/or ELSE IF branches to conditionally assign return values from keyword to variables (see `Set Variable If` if you need to set fixed values conditionally). This is illustrated by the example below: | ${var1} = | `Run Keyword If` | ${rc} == 0 | `Some keyword returning a value` | | ... | ELSE IF | 0 < ${rc} < 42 | `Another keyword` | | ... | ELSE IF | ${rc} < 0 | `Another keyword with args` | ${rc} | arg2 | | ... | ELSE | `Final keyword to handle abnormal cases` | ${rc} | | ${var2} = | `Run Keyword If` | ${condition} | `Some keyword` | In this example, ${var2} will be set to ``None`` if ${condition} is false. Notice that ``ELSE`` and ``ELSE IF`` control words must be used explicitly and thus cannot come from variables. If you need to use literal ``ELSE`` and ``ELSE IF`` strings as arguments, you can escape them with a backslash like ``\\ELSE`` and ``\\ELSE IF``. Python's [http://docs.python.org/library/os.html|os] and [http://docs.python.org/library/sys.html|sys] modules are automatically imported when evaluating the ``condition``. Attributes they contain can thus be used in the condition: | `Run Keyword If` | os.sep == '/' | `Unix Keyword` | | ... | ELSE IF | sys.platform.startswith('java') | `Jython Keyword` | | ... | ELSE | `Windows Keyword` | """ args, branch = self._split_elif_or_else_branch(args) if self._is_true(condition): return self.run_keyword(name, *args) return branch() def _split_elif_or_else_branch(self, args): if 'ELSE IF' in args: args, branch = self._split_branch(args, 'ELSE IF', 2, 'condition and keyword') return args, lambda: self.run_keyword_if(*branch) if 'ELSE' in args: args, branch = self._split_branch(args, 'ELSE', 1, 'keyword') return args, lambda: self.run_keyword(*branch) return args, lambda: None def _split_branch(self, args, control_word, required, required_error): index = list(args).index(control_word) branch = self._variables.replace_list(args[index+1:], required) if len(branch) < required: raise DataError('%s requires %s.' % (control_word, required_error)) return args[:index], branch @run_keyword_variant(resolve=2) def run_keyword_unless(self, condition, name, *args): """Runs the given keyword with the given arguments if ``condition`` is false. See `Run Keyword If` for more information and an example. Notice that this keyword does not support ``ELSE`` or ``ELSE IF`` branches like `Run Keyword If` does, though. """ if not self._is_true(condition): return self.run_keyword(name, *args) @run_keyword_variant(resolve=1) def run_keyword_and_ignore_error(self, name, *args): """Runs the given keyword with the given arguments and ignores possible error. This keyword returns two values, so that the first is either string ``PASS`` or ``FAIL``, depending on the status of the executed keyword. The second value is either the return value of the keyword or the received error message. See `Run Keyword And Return Status` If you are only interested in the execution status. The keyword name and arguments work as in `Run Keyword`. See `Run Keyword If` for a usage example. Errors caused by invalid syntax, timeouts, or fatal exceptions are not caught by this keyword. Otherwise this keyword itself never fails. Since Robot Framework 2.9, variable errors are caught by this keyword. """ try: return 'PASS', self.run_keyword(name, *args) except ExecutionFailed as err: if err.dont_continue: raise return 'FAIL', unic(err) @run_keyword_variant(resolve=1) def run_keyword_and_return_status(self, name, *args): """Runs the given keyword with given arguments and returns the status as a Boolean value. This keyword returns Boolean ``True`` if the keyword that is executed succeeds and ``False`` if it fails. This is useful, for example, in combination with `Run Keyword If`. If you are interested in the error message or return value, use `Run Keyword And Ignore Error` instead. The keyword name and arguments work as in `Run Keyword`. Example: | ${passed} = | `Run Keyword And Return Status` | Keyword | args | | `Run Keyword If` | ${passed} | Another keyword | Errors caused by invalid syntax, timeouts, or fatal exceptions are not caught by this keyword. Otherwise this keyword itself never fails. """ status, _ = self.run_keyword_and_ignore_error(name, *args) return status == 'PASS' @run_keyword_variant(resolve=1) def run_keyword_and_continue_on_failure(self, name, *args): """Runs the keyword and continues execution even if a failure occurs. The keyword name and arguments work as with `Run Keyword`. Example: | Run Keyword And Continue On Failure | Fail | This is a stupid example | | Log | This keyword is executed | The execution is not continued if the failure is caused by invalid syntax, timeout, or fatal exception. Since Robot Framework 2.9, variable errors are caught by this keyword. """ try: return self.run_keyword(name, *args) except ExecutionFailed as err: if not err.dont_continue: err.continue_on_failure = True raise err @run_keyword_variant(resolve=2) def run_keyword_and_expect_error(self, expected_error, name, *args): """Runs the keyword and checks that the expected error occurred. The keyword to execute and its arguments are specified using ``name`` and ``*args`` exactly like with `Run Keyword`. The expected error must be given in the same format as in Robot Framework reports. By default it is interpreted as a glob pattern with ``*``, ``?`` and ``[chars]`` as wildcards, but starting from Robot Framework 3.1 that can be changed by using various prefixes explained in the table below. Prefixes are case-sensitive and they must be separated from the actual message with a colon and an optional space like ``PREFIX: Message`` or ``PREFIX:Message``. | = Prefix = | = Explanation = | | ``EQUALS`` | Exact match. Especially useful if the error contains glob wildcards. | | ``STARTS`` | Error must start with the specified error. | | ``REGEXP`` | Regular expression match. | | ``GLOB`` | Same as the default behavior. | See the `Pattern matching` section for more information about glob patterns and regular expressions. If the expected error occurs, the error message is returned and it can be further processed or tested if needed. If there is no error, or the error does not match the expected error, this keyword fails. Examples: | Run Keyword And Expect Error | My error | Keyword | arg | | Run Keyword And Expect Error | ValueError: * | Some Keyword | | Run Keyword And Expect Error | STARTS: ValueError: | Some Keyword | | Run Keyword And Expect Error | EQUALS:No match for '//input[@type="text"]' | | ... | Find Element | //input[@type="text"] | | ${msg} = | Run Keyword And Expect Error | * | | ... | Keyword | arg1 | arg2 | | Log To Console | ${msg} | Errors caused by invalid syntax, timeouts, or fatal exceptions are not caught by this keyword. Since Robot Framework 2.9, variable errors are caught by this keyword. """ try: self.run_keyword(name, *args) except ExecutionFailed as err: if err.dont_continue: raise error = err.message else: raise AssertionError("Expected error '%s' did not occur." % expected_error) if not self._error_is_expected(error, expected_error): raise AssertionError("Expected error '%s' but got '%s'." % (expected_error, error)) return error def _error_is_expected(self, error, expected_error): glob = self._matches matchers = {'GLOB': glob, 'EQUALS': lambda s, p: s == p, 'STARTS': lambda s, p: s.startswith(p), 'REGEXP': lambda s, p: re.match(p, s) is not None} prefixes = tuple(prefix + ':' for prefix in matchers) if not expected_error.startswith(prefixes): return glob(error, expected_error) prefix, expected_error = expected_error.split(':', 1) return matchers[prefix](error, expected_error.lstrip()) @run_keyword_variant(resolve=2) def repeat_keyword(self, repeat, name, *args): """Executes the specified keyword multiple times. ``name`` and ``args`` define the keyword that is executed similarly as with `Run Keyword`. ``repeat`` specifies how many times (as a count) or how long time (as a timeout) the keyword should be executed. If ``repeat`` is given as count, it specifies how many times the keyword should be executed. ``repeat`` can be given as an integer or as a string that can be converted to an integer. If it is a string, it can have postfix ``times`` or ``x`` (case and space insensitive) to make the expression more explicit. If ``repeat`` is given as timeout, it must be in Robot Framework's time format (e.g. ``1 minute``, ``2 min 3 s``). Using a number alone (e.g. ``1`` or ``1.5``) does not work in this context. If ``repeat`` is zero or negative, the keyword is not executed at all. This keyword fails immediately if any of the execution rounds fails. Examples: | Repeat Keyword | 5 times | Go to Previous Page | | Repeat Keyword | ${var} | Some Keyword | arg1 | arg2 | | Repeat Keyword | 2 minutes | Some Keyword | arg1 | arg2 | Specifying ``repeat`` as a timeout is new in Robot Framework 3.0. """ try: count = self._get_repeat_count(repeat) except RuntimeError as err: timeout = self._get_repeat_timeout(repeat) if timeout is None: raise err keywords = self._keywords_repeated_by_timeout(timeout, name, args) else: keywords = self._keywords_repeated_by_count(count, name, args) self._run_keywords(keywords) def _get_repeat_count(self, times, require_postfix=False): times = normalize(str(times)) if times.endswith('times'): times = times[:-5] elif times.endswith('x'): times = times[:-1] elif require_postfix: raise ValueError return self._convert_to_integer(times) def _get_repeat_timeout(self, timestr): try: float(timestr) except ValueError: pass else: return None try: return timestr_to_secs(timestr) except ValueError: return None def _keywords_repeated_by_count(self, count, name, args): if count <= 0: self.log("Keyword '%s' repeated zero times." % name) for i in range(count): self.log("Repeating keyword, round %d/%d." % (i + 1, count)) yield name, args def _keywords_repeated_by_timeout(self, timeout, name, args): if timeout <= 0: self.log("Keyword '%s' repeated zero times." % name) repeat_round = 0 maxtime = time.time() + timeout while time.time() < maxtime: repeat_round += 1 self.log("Repeating keyword, round %d, %s remaining." % (repeat_round, secs_to_timestr(maxtime - time.time(), compact=True))) yield name, args @run_keyword_variant(resolve=3) def wait_until_keyword_succeeds(self, retry, retry_interval, name, *args): """Runs the specified keyword and retries if it fails. ``name`` and ``args`` define the keyword that is executed similarly as with `Run Keyword`. How long to retry running the keyword is defined using ``retry`` argument either as timeout or count. ``retry_interval`` is the time to wait before trying to run the keyword again after the previous run has failed. If ``retry`` is given as timeout, it must be in Robot Framework's time format (e.g. ``1 minute``, ``2 min 3 s``, ``4.5``) that is explained in an appendix of Robot Framework User Guide. If it is given as count, it must have ``times`` or ``x`` postfix (e.g. ``5 times``, ``10 x``). ``retry_interval`` must always be given in Robot Framework's time format. If the keyword does not succeed regardless of retries, this keyword fails. If the executed keyword passes, its return value is returned. Examples: | Wait Until Keyword Succeeds | 2 min | 5 sec | My keyword | argument | | ${result} = | Wait Until Keyword Succeeds | 3x | 200ms | My keyword | All normal failures are caught by this keyword. Errors caused by invalid syntax, test or keyword timeouts, or fatal exceptions (caused e.g. by `Fatal Error`) are not caught. Running the same keyword multiple times inside this keyword can create lots of output and considerably increase the size of the generated output files. It is possible to remove unnecessary keywords from the outputs using ``--RemoveKeywords WUKS`` command line option. Support for specifying ``retry`` as a number of times to retry is a new feature in Robot Framework 2.9. Since Robot Framework 2.9, variable errors are caught by this keyword. """ maxtime = count = -1 try: count = self._get_repeat_count(retry, require_postfix=True) except ValueError: timeout = timestr_to_secs(retry) maxtime = time.time() + timeout message = 'for %s' % secs_to_timestr(timeout) else: if count <= 0: raise ValueError('Retry count %d is not positive.' % count) message = '%d time%s' % (count, s(count)) retry_interval = timestr_to_secs(retry_interval) while True: try: return self.run_keyword(name, *args) except ExecutionFailed as err: if err.dont_continue: raise count -= 1 if time.time() > maxtime > 0 or count == 0: raise AssertionError("Keyword '%s' failed after retrying " "%s. The last error was: %s" % (name, message, err)) self._sleep_in_parts(retry_interval) @run_keyword_variant(resolve=1) def set_variable_if(self, condition, *values): """Sets variable based on the given condition. The basic usage is giving a condition and two values. The given condition is first evaluated the same way as with the `Should Be True` keyword. If the condition is true, then the first value is returned, and otherwise the second value is returned. The second value can also be omitted, in which case it has a default value None. This usage is illustrated in the examples below, where ``${rc}`` is assumed to be zero. | ${var1} = | Set Variable If | ${rc} == 0 | zero | nonzero | | ${var2} = | Set Variable If | ${rc} > 0 | value1 | value2 | | ${var3} = | Set Variable If | ${rc} > 0 | whatever | | => | ${var1} = 'zero' | ${var2} = 'value2' | ${var3} = None It is also possible to have 'else if' support by replacing the second value with another condition, and having two new values after it. If the first condition is not true, the second is evaluated and one of the values after it is returned based on its truth value. This can be continued by adding more conditions without a limit. | ${var} = | Set Variable If | ${rc} == 0 | zero | | ... | ${rc} > 0 | greater than zero | less then zero | | | | ${var} = | Set Variable If | | ... | ${rc} == 0 | zero | | ... | ${rc} == 1 | one | | ... | ${rc} == 2 | two | | ... | ${rc} > 2 | greater than two | | ... | ${rc} < 0 | less than zero | Use `Get Variable Value` if you need to set variables dynamically based on whether a variable exist or not. """ values = self._verify_values_for_set_variable_if(list(values)) if self._is_true(condition): return self._variables.replace_scalar(values[0]) values = self._verify_values_for_set_variable_if(values[1:], True) if len(values) == 1: return self._variables.replace_scalar(values[0]) return self.run_keyword('BuiltIn.Set Variable If', *values[0:]) def _verify_values_for_set_variable_if(self, values, default=False): if not values: if default: return [None] raise RuntimeError('At least one value is required') if is_list_var(values[0]): values[:1] = [escape(item) for item in self._variables[values[0]]] return self._verify_values_for_set_variable_if(values) return values @run_keyword_variant(resolve=1) def run_keyword_if_test_failed(self, name, *args): """Runs the given keyword with the given arguments, if the test failed. This keyword can only be used in a test teardown. Trying to use it anywhere else results in an error. Otherwise, this keyword works exactly like `Run Keyword`, see its documentation for more details. Prior to Robot Framework 2.9 failures in test teardown itself were not detected by this keyword. """ test = self._get_test_in_teardown('Run Keyword If Test Failed') if not test.passed: return self.run_keyword(name, *args) @run_keyword_variant(resolve=1) def run_keyword_if_test_passed(self, name, *args): """Runs the given keyword with the given arguments, if the test passed. This keyword can only be used in a test teardown. Trying to use it anywhere else results in an error. Otherwise, this keyword works exactly like `Run Keyword`, see its documentation for more details. Prior to Robot Framework 2.9 failures in test teardown itself were not detected by this keyword. """ test = self._get_test_in_teardown('Run Keyword If Test Passed') if test.passed: return self.run_keyword(name, *args) @run_keyword_variant(resolve=1) def run_keyword_if_timeout_occurred(self, name, *args): """Runs the given keyword if either a test or a keyword timeout has occurred. This keyword can only be used in a test teardown. Trying to use it anywhere else results in an error. Otherwise, this keyword works exactly like `Run Keyword`, see its documentation for more details. """ self._get_test_in_teardown('Run Keyword If Timeout Occurred') if self._context.timeout_occurred: return self.run_keyword(name, *args) def _get_test_in_teardown(self, kwname): ctx = self._context if ctx.test and ctx.in_test_teardown: return ctx.test raise RuntimeError("Keyword '%s' can only be used in test teardown." % kwname) @run_keyword_variant(resolve=1) def run_keyword_if_all_critical_tests_passed(self, name, *args): """Runs the given keyword with the given arguments, if all critical tests passed. This keyword can only be used in suite teardown. Trying to use it in any other place will result in an error. Otherwise, this keyword works exactly like `Run Keyword`, see its documentation for more details. """ suite = self._get_suite_in_teardown('Run Keyword If ' 'All Critical Tests Passed') if suite.statistics.critical.failed == 0: return self.run_keyword(name, *args) @run_keyword_variant(resolve=1) def run_keyword_if_any_critical_tests_failed(self, name, *args): """Runs the given keyword with the given arguments, if any critical tests failed. This keyword can only be used in a suite teardown. Trying to use it anywhere else results in an error. Otherwise, this keyword works exactly like `Run Keyword`, see its documentation for more details. """ suite = self._get_suite_in_teardown('Run Keyword If ' 'Any Critical Tests Failed') if suite.statistics.critical.failed > 0: return self.run_keyword(name, *args) @run_keyword_variant(resolve=1) def run_keyword_if_all_tests_passed(self, name, *args): """Runs the given keyword with the given arguments, if all tests passed. This keyword can only be used in a suite teardown. Trying to use it anywhere else results in an error. Otherwise, this keyword works exactly like `Run Keyword`, see its documentation for more details. """ suite = self._get_suite_in_teardown('Run Keyword If All Tests Passed') if suite.statistics.all.failed == 0: return self.run_keyword(name, *args) @run_keyword_variant(resolve=1) def run_keyword_if_any_tests_failed(self, name, *args): """Runs the given keyword with the given arguments, if one or more tests failed. This keyword can only be used in a suite teardown. Trying to use it anywhere else results in an error. Otherwise, this keyword works exactly like `Run Keyword`, see its documentation for more details. """ suite = self._get_suite_in_teardown('Run Keyword If Any Tests Failed') if suite.statistics.all.failed > 0: return self.run_keyword(name, *args) def _get_suite_in_teardown(self, kwname): if not self._context.in_suite_teardown: raise RuntimeError("Keyword '%s' can only be used in suite teardown." % kwname) return self._context.suite class _Control(_BuiltInBase): def continue_for_loop(self): """Skips the current for loop iteration and continues from the next. Skips the remaining keywords in the current for loop iteration and continues from the next one. Can be used directly in a for loop or in a keyword that the loop uses. Example: | :FOR | ${var} | IN | @{VALUES} | | | Run Keyword If | '${var}' == 'CONTINUE' | Continue For Loop | | | Do Something | ${var} | See `Continue For Loop If` to conditionally continue a for loop without using `Run Keyword If` or other wrapper keywords. """ self.log("Continuing for loop from the next iteration.") raise ContinueForLoop() def continue_for_loop_if(self, condition): """Skips the current for loop iteration if the ``condition`` is true. A wrapper for `Continue For Loop` to continue a for loop based on the given condition. The condition is evaluated using the same semantics as with `Should Be True` keyword. Example: | :FOR | ${var} | IN | @{VALUES} | | | Continue For Loop If | '${var}' == 'CONTINUE' | | | Do Something | ${var} | """ if self._is_true(condition): self.continue_for_loop() def exit_for_loop(self): """Stops executing the enclosing for loop. Exits the enclosing for loop and continues execution after it. Can be used directly in a for loop or in a keyword that the loop uses. Example: | :FOR | ${var} | IN | @{VALUES} | | | Run Keyword If | '${var}' == 'EXIT' | Exit For Loop | | | Do Something | ${var} | See `Exit For Loop If` to conditionally exit a for loop without using `Run Keyword If` or other wrapper keywords. """ self.log("Exiting for loop altogether.") raise ExitForLoop() def exit_for_loop_if(self, condition): """Stops executing the enclosing for loop if the ``condition`` is true. A wrapper for `Exit For Loop` to exit a for loop based on the given condition. The condition is evaluated using the same semantics as with `Should Be True` keyword. Example: | :FOR | ${var} | IN | @{VALUES} | | | Exit For Loop If | '${var}' == 'EXIT' | | | Do Something | ${var} | """ if self._is_true(condition): self.exit_for_loop() @run_keyword_variant(resolve=0) def return_from_keyword(self, *return_values): """Returns from the enclosing user keyword. This keyword can be used to return from a user keyword with PASS status without executing it fully. It is also possible to return values similarly as with the ``[Return]`` setting. For more detailed information about working with the return values, see the User Guide. This keyword is typically wrapped to some other keyword, such as `Run Keyword If` or `Run Keyword If Test Passed`, to return based on a condition: | Run Keyword If | ${rc} < 0 | Return From Keyword | | Run Keyword If Test Passed | Return From Keyword | It is possible to use this keyword to return from a keyword also inside a for loop. That, as well as returning values, is demonstrated by the `Find Index` keyword in the following somewhat advanced example. Notice that it is often a good idea to move this kind of complicated logic into a test library. | ***** Variables ***** | @{LIST} = foo baz | | ***** Test Cases ***** | Example | ${index} = Find Index baz @{LIST} | Should Be Equal ${index} ${1} | ${index} = Find Index non existing @{LIST} | Should Be Equal ${index} ${-1} | | ***** Keywords ***** | Find Index | [Arguments] ${element} @{items} | ${index} = Set Variable ${0} | :FOR ${item} IN @{items} | \\ Run Keyword If '${item}' == '${element}' Return From Keyword ${index} | \\ ${index} = Set Variable ${index + 1} | Return From Keyword ${-1} # Also [Return] would work here. The most common use case, returning based on an expression, can be accomplished directly with `Return From Keyword If`. See also `Run Keyword And Return` and `Run Keyword And Return If`. """ self._return_from_keyword(return_values) def _return_from_keyword(self, return_values=None, failures=None): self.log('Returning from the enclosing user keyword.') raise ReturnFromKeyword(return_values, failures) @run_keyword_variant(resolve=1) def return_from_keyword_if(self, condition, *return_values): """Returns from the enclosing user keyword if ``condition`` is true. A wrapper for `Return From Keyword` to return based on the given condition. The condition is evaluated using the same semantics as with `Should Be True` keyword. Given the same example as in `Return From Keyword`, we can rewrite the `Find Index` keyword as follows: | ***** Keywords ***** | Find Index | [Arguments] ${element} @{items} | ${index} = Set Variable ${0} | :FOR ${item} IN @{items} | \\ Return From Keyword If '${item}' == '${element}' ${index} | \\ ${index} = Set Variable ${index + 1} | Return From Keyword ${-1} # Also [Return] would work here. See also `Run Keyword And Return` and `Run Keyword And Return If`. """ if self._is_true(condition): self._return_from_keyword(return_values) @run_keyword_variant(resolve=1) def run_keyword_and_return(self, name, *args): """Runs the specified keyword and returns from the enclosing user keyword. The keyword to execute is defined with ``name`` and ``*args`` exactly like with `Run Keyword`. After running the keyword, returns from the enclosing user keyword and passes possible return value from the executed keyword further. Returning from a keyword has exactly same semantics as with `Return From Keyword`. Example: | `Run Keyword And Return` | `My Keyword` | arg1 | arg2 | | # Above is equivalent to: | | ${result} = | `My Keyword` | arg1 | arg2 | | `Return From Keyword` | ${result} | | | Use `Run Keyword And Return If` if you want to run keyword and return based on a condition. """ try: ret = self.run_keyword(name, *args) except ExecutionFailed as err: self._return_from_keyword(failures=[err]) else: self._return_from_keyword(return_values=[escape(ret)]) @run_keyword_variant(resolve=2) def run_keyword_and_return_if(self, condition, name, *args): """Runs the specified keyword and returns from the enclosing user keyword. A wrapper for `Run Keyword And Return` to run and return based on the given ``condition``. The condition is evaluated using the same semantics as with `Should Be True` keyword. Example: | `Run Keyword And Return If` | ${rc} > 0 | `My Keyword` | arg1 | arg2 | | # Above is equivalent to: | | `Run Keyword If` | ${rc} > 0 | `Run Keyword And Return` | `My Keyword ` | arg1 | arg2 | Use `Return From Keyword If` if you want to return a certain value based on a condition. """ if self._is_true(condition): self.run_keyword_and_return(name, *args) def pass_execution(self, message, *tags): """Skips rest of the current test, setup, or teardown with PASS status. This keyword can be used anywhere in the test data, but the place where used affects the behavior: - When used in any setup or teardown (suite, test or keyword), passes that setup or teardown. Possible keyword teardowns of the started keywords are executed. Does not affect execution or statuses otherwise. - When used in a test outside setup or teardown, passes that particular test case. Possible test and keyword teardowns are executed. Possible continuable failures before this keyword is used, as well as failures in executed teardowns, will fail the execution. It is mandatory to give a message explaining why execution was passed. By default the message is considered plain text, but starting it with ``*HTML*`` allows using HTML formatting. It is also possible to modify test tags passing tags after the message similarly as with `Fail` keyword. Tags starting with a hyphen (e.g. ``-regression``) are removed and others added. Tags are modified using `Set Tags` and `Remove Tags` internally, and the semantics setting and removing them are the same as with these keywords. Examples: | Pass Execution | All features available in this version tested. | | Pass Execution | Deprecated test. | deprecated | -regression | This keyword is typically wrapped to some other keyword, such as `Run Keyword If`, to pass based on a condition. The most common case can be handled also with `Pass Execution If`: | Run Keyword If | ${rc} < 0 | Pass Execution | Negative values are cool. | | Pass Execution If | ${rc} < 0 | Negative values are cool. | Passing execution in the middle of a test, setup or teardown should be used with care. In the worst case it leads to tests that skip all the parts that could actually uncover problems in the tested application. In cases where execution cannot continue do to external factors, it is often safer to fail the test case and make it non-critical. """ message = message.strip() if not message: raise RuntimeError('Message cannot be empty.') self._set_and_remove_tags(tags) log_message, level = self._get_logged_test_message_and_level(message) self.log('Execution passed with message:\n%s' % log_message, level) raise PassExecution(message) @run_keyword_variant(resolve=1) def pass_execution_if(self, condition, message, *tags): """Conditionally skips rest of the current test, setup, or teardown with PASS status. A wrapper for `Pass Execution` to skip rest of the current test, setup or teardown based the given ``condition``. The condition is evaluated similarly as with `Should Be True` keyword, and ``message`` and ``*tags`` have same semantics as with `Pass Execution`. Example: | :FOR | ${var} | IN | @{VALUES} | | | Pass Execution If | '${var}' == 'EXPECTED' | Correct value was found | | | Do Something | ${var} | """ if self._is_true(condition): message = self._variables.replace_string(message) tags = self._variables.replace_list(tags) self.pass_execution(message, *tags) class _Misc(_BuiltInBase): def no_operation(self): """Does absolutely nothing.""" def sleep(self, time_, reason=None): """Pauses the test executed for the given time. ``time`` may be either a number or a time string. Time strings are in a format such as ``1 day 2 hours 3 minutes 4 seconds 5milliseconds`` or ``1d 2h 3m 4s 5ms``, and they are fully explained in an appendix of Robot Framework User Guide. Optional `reason` can be used to explain why sleeping is necessary. Both the time slept and the reason are logged. Examples: | Sleep | 42 | | Sleep | 1.5 | | Sleep | 2 minutes 10 seconds | | Sleep | 10s | Wait for a reply | """ seconds = timestr_to_secs(time_) # Python hangs with negative values if seconds < 0: seconds = 0 self._sleep_in_parts(seconds) self.log('Slept %s' % secs_to_timestr(seconds)) if reason: self.log(reason) def _sleep_in_parts(self, seconds): # time.sleep can't be stopped in windows # to ensure that we can signal stop (with timeout) # split sleeping to small pieces endtime = time.time() + float(seconds) while True: remaining = endtime - time.time() if remaining <= 0: break time.sleep(min(remaining, 0.01)) def catenate(self, *items): """Catenates the given items together and returns the resulted string. By default, items are catenated with spaces, but if the first item contains the string ``SEPARATOR=<sep>``, the separator ``<sep>`` is used instead. Items are converted into strings when necessary. Examples: | ${str1} = | Catenate | Hello | world | | | ${str2} = | Catenate | SEPARATOR=--- | Hello | world | | ${str3} = | Catenate | SEPARATOR= | Hello | world | => | ${str1} = 'Hello world' | ${str2} = 'Hello---world' | ${str3} = 'Helloworld' """ if not items: return '' items = [unic(item) for item in items] if items[0].startswith('SEPARATOR='): sep = items[0][len('SEPARATOR='):] items = items[1:] else: sep = ' ' return sep.join(items) def log(self, message, level='INFO', html=False, console=False, repr=False): u"""Logs the given message with the given level. Valid levels are TRACE, DEBUG, INFO (default), HTML, WARN, and ERROR. Messages below the current active log level are ignored. See `Set Log Level` keyword and ``--loglevel`` command line option for more details about setting the level. Messages logged with the WARN or ERROR levels will be automatically visible also in the console and in the Test Execution Errors section in the log file. Logging can be configured using optional ``html``, ``console`` and ``repr`` arguments. They are off by default, but can be enabled by giving them a true value. See `Boolean arguments` section for more information about true and false values. If the ``html`` argument is given a true value, the message will be considered HTML and special characters such as ``<`` in it are not escaped. For example, logging ``<img src="image.png">`` creates an image when ``html`` is true, but otherwise the message is that exact string. An alternative to using the ``html`` argument is using the HTML pseudo log level. It logs the message as HTML using the INFO level. If the ``console`` argument is true, the message will be written to the console where test execution was started from in addition to the log file. This keyword always uses the standard output stream and adds a newline after the written message. Use `Log To Console` instead if either of these is undesirable, If the ``repr`` argument is true, the given item will be passed through a custom version of Python's ``pprint.pformat()`` function before logging it. This is useful, for example, when working with strings or bytes containing invisible characters, or when working with nested data structures. The custom version differs from the standard one so that it omits the ``u`` prefix from Unicode strings and adds ``b`` prefix to byte strings on Python 2. Examples: | Log | Hello, world! | | | # Normal INFO message. | | Log | Warning, world! | WARN | | # Warning. | | Log | <b>Hello</b>, world! | html=yes | | # INFO message as HTML. | | Log | <b>Hello</b>, world! | HTML | | # Same as above. | | Log | <b>Hello</b>, world! | DEBUG | html=true | # DEBUG as HTML. | | Log | Hello, console! | console=yes | | # Log also to the console. | | Log | Hyv\xe4 \\x00 | repr=yes | | # Log ``'Hyv\\xe4 \\x00'``. | See `Log Many` if you want to log multiple messages in one go, and `Log To Console` if you only want to write to the console. """ if is_truthy(repr): message = prepr(message, width=80) logger.write(message, level, is_truthy(html)) if is_truthy(console): logger.console(message) @run_keyword_variant(resolve=0) def log_many(self, *messages): """Logs the given messages as separate entries using the INFO level. Supports also logging list and dictionary variable items individually. Examples: | Log Many | Hello | ${var} | | Log Many | @{list} | &{dict} | See `Log` and `Log To Console` keywords if you want to use alternative log levels, use HTML, or log to the console. """ for msg in self._yield_logged_messages(messages): self.log(msg) def _yield_logged_messages(self, messages): for msg in messages: var = VariableSplitter(msg) value = self._variables.replace_scalar(msg) if var.is_list_variable(): for item in value: yield item elif var.is_dict_variable(): for name, value in value.items(): yield '%s=%s' % (name, value) else: yield value def log_to_console(self, message, stream='STDOUT', no_newline=False): """Logs the given message to the console. By default uses the standard output stream. Using the standard error stream is possibly by giving the ``stream`` argument value ``STDERR`` (case-insensitive). By default appends a newline to the logged message. This can be disabled by giving the ``no_newline`` argument a true value (see `Boolean arguments`). Examples: | Log To Console | Hello, console! | | | Log To Console | Hello, stderr! | STDERR | | Log To Console | Message starts here and is | no_newline=true | | Log To Console | continued without newline. | | This keyword does not log the message to the normal log file. Use `Log` keyword, possibly with argument ``console``, if that is desired. """ logger.console(message, newline=is_falsy(no_newline), stream=stream) @run_keyword_variant(resolve=0) def comment(self, *messages): """Displays the given messages in the log file as keyword arguments. This keyword does nothing with the arguments it receives, but as they are visible in the log, this keyword can be used to display simple messages. Given arguments are ignored so thoroughly that they can even contain non-existing variables. If you are interested about variable values, you can use the `Log` or `Log Many` keywords. """ pass def set_log_level(self, level): """Sets the log threshold to the specified level and returns the old level. Messages below the level will not logged. The default logging level is INFO, but it can be overridden with the command line option ``--loglevel``. The available levels: TRACE, DEBUG, INFO (default), WARN, ERROR and NONE (no logging). """ try: old = self._context.output.set_log_level(level) except DataError as err: raise RuntimeError(unic(err)) self._namespace.variables.set_global('${LOG_LEVEL}', level.upper()) self.log('Log level changed from %s to %s.' % (old, level.upper())) return old def reload_library(self, name_or_instance): """Rechecks what keywords the specified library provides. Can be called explicitly in the test data or by a library itself when keywords it provides have changed. The library can be specified by its name or as the active instance of the library. The latter is especially useful if the library itself calls this keyword as a method. New in Robot Framework 2.9. """ library = self._namespace.reload_library(name_or_instance) self.log('Reloaded library %s with %s keywords.' % (library.name, len(library))) @run_keyword_variant(resolve=0) def import_library(self, name, *args): """Imports a library with the given name and optional arguments. This functionality allows dynamic importing of libraries while tests are running. That may be necessary, if the library itself is dynamic and not yet available when test data is processed. In a normal case, libraries should be imported using the Library setting in the Setting table. This keyword supports importing libraries both using library names and physical paths. When paths are used, they must be given in absolute format or found from [http://robotframework.org/robotframework/latest/RobotFrameworkUserGuide.html#pythonpath-jythonpath-and-ironpythonpath| search path]. Forward slashes can be used as path separators in all operating systems. It is possible to pass arguments to the imported library and also named argument syntax works if the library supports it. ``WITH NAME`` syntax can be used to give a custom name to the imported library. Examples: | Import Library | MyLibrary | | Import Library | ${CURDIR}/../Library.py | arg1 | named=arg2 | | Import Library | ${LIBRARIES}/Lib.java | arg | WITH NAME | JavaLib | """ try: self._namespace.import_library(name, list(args)) except DataError as err: raise RuntimeError(unic(err)) @run_keyword_variant(resolve=0) def import_variables(self, path, *args): """Imports a variable file with the given path and optional arguments. Variables imported with this keyword are set into the test suite scope similarly when importing them in the Setting table using the Variables setting. These variables override possible existing variables with the same names. This functionality can thus be used to import new variables, for example, for each test in a test suite. The given path must be absolute or found from [http://robotframework.org/robotframework/latest/RobotFrameworkUserGuide.html#pythonpath-jythonpath-and-ironpythonpath| search path]. Forward slashes can be used as path separator regardless the operating system. Examples: | Import Variables | ${CURDIR}/variables.py | | | | Import Variables | ${CURDIR}/../vars/env.py | arg1 | arg2 | | Import Variables | file_from_pythonpath.py | | | """ try: self._namespace.import_variables(path, list(args), overwrite=True) except DataError as err: raise RuntimeError(unic(err)) @run_keyword_variant(resolve=0) def import_resource(self, path): """Imports a resource file with the given path. Resources imported with this keyword are set into the test suite scope similarly when importing them in the Setting table using the Resource setting. The given path must be absolute or found from [http://robotframework.org/robotframework/latest/RobotFrameworkUserGuide.html#pythonpath-jythonpath-and-ironpythonpath| search path]. Forward slashes can be used as path separator regardless the operating system. Examples: | Import Resource | ${CURDIR}/resource.txt | | Import Resource | ${CURDIR}/../resources/resource.html | | Import Resource | found_from_pythonpath.robot | """ try: self._namespace.import_resource(path) except DataError as err: raise RuntimeError(unic(err)) def set_library_search_order(self, *search_order): """Sets the resolution order to use when a name matches multiple keywords. The library search order is used to resolve conflicts when a keyword name in the test data matches multiple keywords. The first library (or resource, see below) containing the keyword is selected and that keyword implementation used. If the keyword is not found from any library (or resource), test executing fails the same way as when the search order is not set. When this keyword is used, there is no need to use the long ``LibraryName.Keyword Name`` notation. For example, instead of having | MyLibrary.Keyword | arg | | MyLibrary.Another Keyword | | MyLibrary.Keyword | xxx | you can have | Set Library Search Order | MyLibrary | | Keyword | arg | | Another Keyword | | Keyword | xxx | This keyword can be used also to set the order of keywords in different resource files. In this case resource names must be given without paths or extensions like: | Set Library Search Order | resource | another_resource | *NOTE:* - The search order is valid only in the suite where this keywords is used. - Keywords in resources always have higher priority than keywords in libraries regardless the search order. - The old order is returned and can be used to reset the search order later. - Library and resource names in the search order are both case and space insensitive. """ return self._namespace.set_search_order(search_order) def keyword_should_exist(self, name, msg=None): """Fails unless the given keyword exists in the current scope. Fails also if there are more than one keywords with the same name. Works both with the short name (e.g. ``Log``) and the full name (e.g. ``BuiltIn.Log``). The default error message can be overridden with the ``msg`` argument. See also `Variable Should Exist`. """ try: runner = self._namespace.get_runner(name) except DataError as error: raise AssertionError(msg or error.message) if isinstance(runner, UserErrorHandler): raise AssertionError(msg or runner.error.message) def get_time(self, format='timestamp', time_='NOW'): """Returns the given time in the requested format. *NOTE:* DateTime library contains much more flexible keywords for getting the current date and time and for date and time handling in general. How time is returned is determined based on the given ``format`` string as follows. Note that all checks are case-insensitive. 1) If ``format`` contains the word ``epoch``, the time is returned in seconds after the UNIX epoch (1970-01-01 00:00:00 UTC). The return value is always an integer. 2) If ``format`` contains any of the words ``year``, ``month``, ``day``, ``hour``, ``min``, or ``sec``, only the selected parts are returned. The order of the returned parts is always the one in the previous sentence and the order of words in ``format`` is not significant. The parts are returned as zero-padded strings (e.g. May -> ``05``). 3) Otherwise (and by default) the time is returned as a timestamp string in the format ``2006-02-24 15:08:31``. By default this keyword returns the current local time, but that can be altered using ``time`` argument as explained below. Note that all checks involving strings are case-insensitive. 1) If ``time`` is a number, or a string that can be converted to a number, it is interpreted as seconds since the UNIX epoch. This documentation was originally written about 1177654467 seconds after the epoch. 2) If ``time`` is a timestamp, that time will be used. Valid timestamp formats are ``YYYY-MM-DD hh:mm:ss`` and ``YYYYMMDD hhmmss``. 3) If ``time`` is equal to ``NOW`` (default), the current local time is used. 4) If ``time`` is equal to ``UTC``, the current time in [http://en.wikipedia.org/wiki/Coordinated_Universal_Time|UTC] is used. 5) If ``time`` is in the format like ``NOW - 1 day`` or ``UTC + 1 hour 30 min``, the current local/UTC time plus/minus the time specified with the time string is used. The time string format is described in an appendix of Robot Framework User Guide. Examples (expecting the current local time is 2006-03-29 15:06:21): | ${time} = | Get Time | | | | | ${secs} = | Get Time | epoch | | | | ${year} = | Get Time | return year | | | | ${yyyy} | ${mm} | ${dd} = | Get Time | year,month,day | | @{time} = | Get Time | year month day hour min sec | | | | ${y} | ${s} = | Get Time | seconds and year | | => | ${time} = '2006-03-29 15:06:21' | ${secs} = 1143637581 | ${year} = '2006' | ${yyyy} = '2006', ${mm} = '03', ${dd} = '29' | @{time} = ['2006', '03', '29', '15', '06', '21'] | ${y} = '2006' | ${s} = '21' Examples (expecting the current local time is 2006-03-29 15:06:21 and UTC time is 2006-03-29 12:06:21): | ${time} = | Get Time | | 1177654467 | # Time given as epoch seconds | | ${secs} = | Get Time | sec | 2007-04-27 09:14:27 | # Time given as a timestamp | | ${year} = | Get Time | year | NOW | # The local time of execution | | @{time} = | Get Time | hour min sec | NOW + 1h 2min 3s | # 1h 2min 3s added to the local time | | @{utc} = | Get Time | hour min sec | UTC | # The UTC time of execution | | ${hour} = | Get Time | hour | UTC - 1 hour | # 1h subtracted from the UTC time | => | ${time} = '2007-04-27 09:14:27' | ${secs} = 27 | ${year} = '2006' | @{time} = ['16', '08', '24'] | @{utc} = ['12', '06', '21'] | ${hour} = '11' """ return get_time(format, parse_time(time_)) def evaluate(self, expression, modules=None, namespace=None): """Evaluates the given expression in Python and returns the results. ``expression`` is evaluated in Python as explained in `Evaluating expressions`. ``modules`` argument can be used to specify a comma separated list of Python modules to be imported and added to the evaluation namespace. ``namespace`` argument can be used to pass a custom evaluation namespace as a dictionary. Possible ``modules`` are added to this namespace. Variables used like ``${variable}`` are replaced in the expression before evaluation. Variables are also available in the evaluation namespace and can be accessed using special syntax ``$variable``. This is a new feature in Robot Framework 2.9 and it is explained more thoroughly in `Evaluating expressions`. Examples (expecting ``${result}`` is 3.14): | ${status} = | Evaluate | 0 < ${result} < 10 | # Would also work with string '3.14' | | ${status} = | Evaluate | 0 < $result < 10 | # Using variable itself, not string representation | | ${random} = | Evaluate | random.randint(0, sys.maxint) | modules=random, sys | | ${ns} = | Create Dictionary | x=${4} | y=${2} | | ${result} = | Evaluate | x*10 + y | namespace=${ns} | => | ${status} = True | ${random} = <random integer> | ${result} = 42 """ if is_string(expression) and '$' in expression: expression, variables = self._handle_variables_in_expression(expression) else: variables = {} namespace = self._create_evaluation_namespace(namespace, modules) try: if not is_string(expression): raise TypeError("Expression must be string, got %s." % type_name(expression)) if not expression: raise ValueError("Expression cannot be empty.") return eval(expression, namespace, variables) except: raise RuntimeError("Evaluating expression '%s' failed: %s" % (expression, get_error_message())) def _handle_variables_in_expression(self, expression): variables = None variable_started = False tokens = [] generated = generate_tokens(StringIO(expression).readline) for toknum, tokval, _, _, _ in generated: if variable_started: if toknum == token.NAME: if variables is None: variables = self._variables.as_dict(decoration=False) if tokval not in variables: variable_not_found('$%s' % tokval, variables, deco_braces=False) tokval = 'RF_VAR_' + tokval else: tokens.append((token.ERRORTOKEN, '$')) variable_started = False if toknum == token.ERRORTOKEN and tokval == '$': variable_started = True else: tokens.append((toknum, tokval)) if variables is None: return expression, {} decorated = [('RF_VAR_' + name, variables[name]) for name in variables] return untokenize(tokens).strip(), NormalizedDict(decorated, ignore='_') def _create_evaluation_namespace(self, namespace, modules): namespace = dict(namespace or {}) modules = modules.replace(' ', '').split(',') if modules else [] namespace.update((m, __import__(m)) for m in modules if m) return namespace def call_method(self, object, method_name, *args, **kwargs): """Calls the named method of the given object with the provided arguments. The possible return value from the method is returned and can be assigned to a variable. Keyword fails both if the object does not have a method with the given name or if executing the method raises an exception. Support for ``**kwargs`` is new in Robot Framework 2.9. Since that possible equal signs in other arguments must be escaped with a backslash like ``\\=``. Examples: | Call Method | ${hashtable} | put | myname | myvalue | | ${isempty} = | Call Method | ${hashtable} | isEmpty | | | Should Not Be True | ${isempty} | | | | | ${value} = | Call Method | ${hashtable} | get | myname | | Should Be Equal | ${value} | myvalue | | | | Call Method | ${object} | kwargs | name=value | foo=bar | | Call Method | ${object} | positional | escaped\\=equals | """ try: method = getattr(object, method_name) except AttributeError: raise RuntimeError("Object '%s' does not have method '%s'." % (object, method_name)) try: return method(*args, **kwargs) except: raise RuntimeError("Calling method '%s' failed: %s" % (method_name, get_error_message())) def regexp_escape(self, *patterns): """Returns each argument string escaped for use as a regular expression. This keyword can be used to escape strings to be used with `Should Match Regexp` and `Should Not Match Regexp` keywords. Escaping is done with Python's ``re.escape()`` function. Examples: | ${escaped} = | Regexp Escape | ${original} | | @{strings} = | Regexp Escape | @{strings} | """ if len(patterns) == 0: return '' if len(patterns) == 1: return re.escape(patterns[0]) return [re.escape(p) for p in patterns] def set_test_message(self, message, append=False): """Sets message for the current test case. If the optional ``append`` argument is given a true value (see `Boolean arguments`), the given ``message`` is added after the possible earlier message by joining the messages with a space. In test teardown this keyword can alter the possible failure message, but otherwise failures override messages set by this keyword. Notice that in teardown the message is available as a built-in variable ``${TEST MESSAGE}``. It is possible to use HTML format in the message by starting the message with ``*HTML*``. Examples: | Set Test Message | My message | | | Set Test Message | is continued. | append=yes | | Should Be Equal | ${TEST MESSAGE} | My message is continued. | | Set Test Message | `*`HTML`*` <b>Hello!</b> | | This keyword can not be used in suite setup or suite teardown. """ test = self._context.test if not test: raise RuntimeError("'Set Test Message' keyword cannot be used in " "suite setup or teardown.") test.message = self._get_new_text(test.message, message, append, handle_html=True) if self._context.in_test_teardown: self._variables.set_test("${TEST_MESSAGE}", test.message) message, level = self._get_logged_test_message_and_level(test.message) self.log('Set test message to:\n%s' % message, level) def _get_new_text(self, old, new, append, handle_html=False): if not is_unicode(new): new = unic(new) if not (is_truthy(append) and old): return new if handle_html: if new.startswith('*HTML*'): new = new[6:].lstrip() if not old.startswith('*HTML*'): old = '*HTML* %s' % html_escape(old) elif old.startswith('*HTML*'): new = html_escape(new) return '%s %s' % (old, new) def _get_logged_test_message_and_level(self, message): if message.startswith('*HTML*'): return message[6:].lstrip(), 'HTML' return message, 'INFO' def set_test_documentation(self, doc, append=False): """Sets documentation for the current test case. By default the possible existing documentation is overwritten, but this can be changed using the optional ``append`` argument similarly as with `Set Test Message` keyword. The current test documentation is available as a built-in variable ``${TEST DOCUMENTATION}``. This keyword can not be used in suite setup or suite teardown. """ test = self._context.test if not test: raise RuntimeError("'Set Test Documentation' keyword cannot be " "used in suite setup or teardown.") test.doc = self._get_new_text(test.doc, doc, append) self._variables.set_test('${TEST_DOCUMENTATION}', test.doc) self.log('Set test documentation to:\n%s' % test.doc) def set_suite_documentation(self, doc, append=False, top=False): """Sets documentation for the current test suite. By default the possible existing documentation is overwritten, but this can be changed using the optional ``append`` argument similarly as with `Set Test Message` keyword. This keyword sets the documentation of the current suite by default. If the optional ``top`` argument is given a true value (see `Boolean arguments`), the documentation of the top level suite is altered instead. The documentation of the current suite is available as a built-in variable ``${SUITE DOCUMENTATION}``. """ top = is_truthy(top) suite = self._get_context(top).suite suite.doc = self._get_new_text(suite.doc, doc, append) self._variables.set_suite('${SUITE_DOCUMENTATION}', suite.doc, top) self.log('Set suite documentation to:\n%s' % suite.doc) def set_suite_metadata(self, name, value, append=False, top=False): """Sets metadata for the current test suite. By default possible existing metadata values are overwritten, but this can be changed using the optional ``append`` argument similarly as with `Set Test Message` keyword. This keyword sets the metadata of the current suite by default. If the optional ``top`` argument is given a true value (see `Boolean arguments`), the metadata of the top level suite is altered instead. The metadata of the current suite is available as a built-in variable ``${SUITE METADATA}`` in a Python dictionary. Notice that modifying this variable directly has no effect on the actual metadata the suite has. """ top = is_truthy(top) if not is_unicode(name): name = unic(name) metadata = self._get_context(top).suite.metadata original = metadata.get(name, '') metadata[name] = self._get_new_text(original, value, append) self._variables.set_suite('${SUITE_METADATA}', metadata.copy(), top) self.log("Set suite metadata '%s' to value '%s'." % (name, metadata[name])) def set_tags(self, *tags): """Adds given ``tags`` for the current test or all tests in a suite. When this keyword is used inside a test case, that test gets the specified tags and other tests are not affected. If this keyword is used in a suite setup, all test cases in that suite, recursively, gets the given tags. It is a failure to use this keyword in a suite teardown. The current tags are available as a built-in variable ``@{TEST TAGS}``. See `Remove Tags` if you want to remove certain tags and `Fail` if you want to fail the test case after setting and/or removing tags. """ ctx = self._context if ctx.test: ctx.test.tags.add(tags) ctx.variables.set_test('@{TEST_TAGS}', list(ctx.test.tags)) elif not ctx.in_suite_teardown: ctx.suite.set_tags(tags, persist=True) else: raise RuntimeError("'Set Tags' cannot be used in suite teardown.") self.log('Set tag%s %s.' % (s(tags), seq2str(tags))) def remove_tags(self, *tags): """Removes given ``tags`` from the current test or all tests in a suite. Tags can be given exactly or using a pattern with ``*``, ``?`` and ``[chars]`` acting as wildcards. See the `Glob patterns` section for more information. This keyword can affect either one test case or all test cases in a test suite similarly as `Set Tags` keyword. The current tags are available as a built-in variable ``@{TEST TAGS}``. Example: | Remove Tags | mytag | something-* | ?ython | See `Set Tags` if you want to add certain tags and `Fail` if you want to fail the test case after setting and/or removing tags. """ ctx = self._context if ctx.test: ctx.test.tags.remove(tags) ctx.variables.set_test('@{TEST_TAGS}', list(ctx.test.tags)) elif not ctx.in_suite_teardown: ctx.suite.set_tags(remove=tags, persist=True) else: raise RuntimeError("'Remove Tags' cannot be used in suite teardown.") self.log('Removed tag%s %s.' % (s(tags), seq2str(tags))) def get_library_instance(self, name=None, all=False): """Returns the currently active instance of the specified test library. This keyword makes it easy for test libraries to interact with other test libraries that have state. This is illustrated by the Python example below: | from robot.libraries.BuiltIn import BuiltIn | | def title_should_start_with(expected): | seleniumlib = BuiltIn().get_library_instance('SeleniumLibrary') | title = seleniumlib.get_title() | if not title.startswith(expected): | raise AssertionError("Title '%s' did not start with '%s'" | % (title, expected)) It is also possible to use this keyword in the test data and pass the returned library instance to another keyword. If a library is imported with a custom name, the ``name`` used to get the instance must be that name and not the original library name. If the optional argument ``all`` is given a true value, then a dictionary mapping all library names to instances will be returned. This feature is new in Robot Framework 2.9.2. Example: | &{all libs} = | Get library instance | all=True | """ if is_truthy(all): return self._namespace.get_library_instances() try: return self._namespace.get_library_instance(name) except DataError as err: raise RuntimeError(unic(err)) class BuiltIn(_Verify, _Converter, _Variables, _RunKeyword, _Control, _Misc): """An always available standard library with often needed keywords. ``BuiltIn`` is Robot Framework's standard library that provides a set of generic keywords needed often. It is imported automatically and thus always available. The provided keywords can be used, for example, for verifications (e.g. `Should Be Equal`, `Should Contain`), conversions (e.g. `Convert To Integer`) and for various other purposes (e.g. `Log`, `Sleep`, `Run Keyword If`, `Set Global Variable`). == Table of contents == - `HTML error messages` - `Evaluating expressions` - `Boolean arguments` - `Pattern matching` - `Multiline string comparisons` - `Shortcuts` - `Keywords` = HTML error messages = Many of the keywords accept an optional error message to use if the keyword fails, and it is possible to use HTML in these messages by prefixing them with ``*HTML*``. See `Fail` keyword for a usage example. Notice that using HTML in messages is not limited to BuiltIn library but works with any error message. = Evaluating expressions = Many keywords, such as `Evaluate`, `Run Keyword If` and `Should Be True`, accept an expression that is evaluated in Python. These expressions are evaluated using Python's [http://docs.python.org/library/functions.html#eval|eval] function so that all Python built-ins like ``len()`` and ``int()`` are available. `Evaluate` allows configuring the execution namespace with custom modules, and other keywords have [http://docs.python.org/library/os.html|os] and [http://docs.python.org/library/sys.html|sys] modules available automatically. Examples: | `Run Keyword If` | os.sep == '/' | Log | Not on Windows | | ${random int} = | `Evaluate` | random.randint(0, 5) | modules=random | When a variable is used in the expressing using the normal ``${variable}`` syntax, its value is replaces before the expression is evaluated. This means that the value used in the expression will be the string representation of the variable value, not the variable value itself. This is not a problem with numbers and other objects that have a string representation that can be evaluated directly, but with other objects the behavior depends on the string representation. Most importantly, strings must always be quoted, and if they can contain newlines, they must be triple quoted. Examples: | `Should Be True` | ${rc} < 10 | Return code greater than 10 | | `Run Keyword If` | '${status}' == 'PASS' | Log | Passed | | `Run Keyword If` | 'FAIL' in '''${output}''' | Log | Output contains FAIL | Starting from Robot Framework 2.9, variables themselves are automatically available in the evaluation namespace. They can be accessed using special variable syntax without the curly braces like ``$variable``. These variables should never be quoted, and in fact they are not even replaced inside strings. Examples: | `Should Be True` | $rc < 10 | Return code greater than 10 | | `Run Keyword If` | $status == 'PASS' | `Log` | Passed | | `Run Keyword If` | 'FAIL' in $output | `Log` | Output contains FAIL | | `Should Be True` | len($result) > 1 and $result[1] == 'OK' | Using the ``$variable`` syntax slows down expression evaluation a little. This should not typically matter, but should be taken into account if complex expressions are evaluated often and there are strict time constrains. Notice that instead of creating complicated expressions, it is often better to move the logic into a test library. That eases maintenance and can also enhance execution speed. = Boolean arguments = Some keywords accept arguments that are handled as Boolean values true or false. If such an argument is given as a string, it is considered false if it is an empty string or equal to ``FALSE``, ``NONE``, ``NO``, ``OFF`` or ``0``, case-insensitively. Keywords verifying something that allow dropping actual and expected values from the possible error message also consider string ``no values`` to be false. Other strings are considered true regardless their value, and other argument types are tested using the same [http://docs.python.org/library/stdtypes.html#truth|rules as in Python]. True examples: | `Should Be Equal` | ${x} | ${y} | Custom error | values=True | # Strings are generally true. | | `Should Be Equal` | ${x} | ${y} | Custom error | values=yes | # Same as the above. | | `Should Be Equal` | ${x} | ${y} | Custom error | values=${TRUE} | # Python ``True`` is true. | | `Should Be Equal` | ${x} | ${y} | Custom error | values=${42} | # Numbers other than 0 are true. | False examples: | `Should Be Equal` | ${x} | ${y} | Custom error | values=False | # String ``false`` is false. | | `Should Be Equal` | ${x} | ${y} | Custom error | values=no | # Also string ``no`` is false. | | `Should Be Equal` | ${x} | ${y} | Custom error | values=${EMPTY} | # Empty string is false. | | `Should Be Equal` | ${x} | ${y} | Custom error | values=${FALSE} | # Python ``False`` is false. | | `Should Be Equal` | ${x} | ${y} | Custom error | values=no values | # ``no values`` works with ``values`` argument | Considering string ``NONE`` false is new in Robot Framework 3.0.3 and considering also ``OFF`` and ``0`` false is new in Robot Framework 3.1. = Pattern matching = Many keywords accepts arguments as either glob or regular expression patterns. == Glob patterns == Some keywords, for example `Should Match`, support so called [http://en.wikipedia.org/wiki/Glob_(programming)|glob patterns] where: | ``*`` | matches any string, even an empty string | | ``?`` | matches any single character | | ``[chars]`` | matches one character in the bracket | | ``[!chars]`` | matches one character not in the bracket | | ``[a-z]`` | matches one character from the range in the bracket | | ``[!a-z]`` | matches one character not from the range in the bracket | Unlike with glob patterns normally, path separator characters ``/`` and ``\\`` and the newline character ``\\n`` are matches by the above wildcards. Support for brackets like ``[abc]`` and ``[!a-z]`` is new in Robot Framework 3.1 == Regular expressions == Some keywords, for example `Should Match Regexp`, support [http://en.wikipedia.org/wiki/Regular_expression|regular expressions] that are more powerful but also more complicated that glob patterns. The regular expression support is implemented using Python's [http://docs.python.org/library/re.html|re module] and its documentation should be consulted for more information about the syntax. Because the backslash character (``\\``) is an escape character in Robot Framework test data, possible backslash characters in regular expressions need to be escaped with another backslash like ``\\\\d\\\\w+``. Strings that may contain special characters but should be handled as literal strings, can be escaped with the `Regexp Escape` keyword. = Multiline string comparisons = `Should Be Equal` and `Should Be Equal As Strings` report the failures using [http://en.wikipedia.org/wiki/Diff_utility#Unified_format|unified diff format] if both strings have more than two lines. New in Robot Framework 2.9.1. Example: | ${first} = | `Catenate` | SEPARATOR=\\n | Not in second | Same | Differs | Same | | ${second} = | `Catenate` | SEPARATOR=\\n | Same | Differs2 | Same | Not in first | | `Should Be Equal` | ${first} | ${second} | Results in the following error message: | Multiline strings are different: | --- first | +++ second | @@ -1,4 +1,4 @@ | -Not in second | Same | -Differs | +Differs2 | Same | +Not in first """ ROBOT_LIBRARY_SCOPE = 'GLOBAL' ROBOT_LIBRARY_VERSION = get_version() class RobotNotRunningError(AttributeError): """Used when something cannot be done because Robot is not running. Based on AttributeError to be backwards compatible with RF < 2.8.5. May later be based directly on Exception, so new code should except this exception explicitly. """ pass def register_run_keyword(library, keyword, args_to_process=None, deprecation_warning=True): """Registers 'run keyword' so that its arguments can be handled correctly. *NOTE:* This API will change in RF 3.1. For more information see https://github.com/robotframework/robotframework/issues/2190. Use with `deprecation_warning=False` to avoid related deprecation warnings. 1) Why is this method needed Keywords running other keywords internally (normally using `Run Keyword` or some variants of it in BuiltIn) must have the arguments meant to the internally executed keyword handled specially to prevent processing them twice. This is done ONLY for keywords registered using this method. If the register keyword has same name as any keyword from Robot Framework standard libraries, it can be used without getting warnings. Normally there is a warning in such cases unless the keyword is used in long format (e.g. MyLib.Keyword). Keywords executed by registered run keywords can be tested in dry-run mode if they have 'name' argument which takes the name of the executed keyword. 2) How to use this method `library` is the name of the library where the registered keyword is implemented. `keyword` can be either a function or method implementing the keyword, or name of the implemented keyword as a string. `args_to_process` is needed when `keyword` is given as a string, and it defines how many of the arguments to the registered keyword must be processed normally. When `keyword` is a method or function, this information is got directly from it so that varargs (those specified with syntax '*args') are not processed but others are. 3) Examples from robot.libraries.BuiltIn import BuiltIn, register_run_keyword def my_run_keyword(name, *args): # do something return BuiltIn().run_keyword(name, *args) # Either one of these works register_run_keyword(__name__, my_run_keyword) register_run_keyword(__name__, 'My Run Keyword', 1) ------------- from robot.libraries.BuiltIn import BuiltIn, register_run_keyword class MyLibrary: def my_run_keyword_if(self, expression, name, *args): # do something return BuiltIn().run_keyword_if(expression, name, *args) # Either one of these works register_run_keyword('MyLibrary', MyLibrary.my_run_keyword_if) register_run_keyword('MyLibrary', 'my_run_keyword_if', 2) """ RUN_KW_REGISTER.register_run_keyword(library, keyword, args_to_process, deprecation_warning)
py
1a3df4c87f1d99a451d884103a708da9f775fc3f
#!/usr/bin/env python2 # -*- coding: utf-8 -*- class Debug(object): def __init__(self, mode_debug): self.mode_debug = mode_debug def show(self, text, color): if int(self.mode_debug) > 0: t = text.split("\n") for i in t: print(BColors.WARNING + "DEBUG: "+color+i + BColors.ENDC) class BColors(object): HEADER = '\033[95m' BLUE = '\033[94m' GREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' def __init__(self): pass