prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>LargeHeaderButton.tsx<|end_file_name|><|fim▁begin|>import React from 'react';
import classNames from 'classnames';
import { Button } from 'reakit';
import { Icon, IconName } from 'components';
require('./largeHeaderButton.scss');
type Props = {
active?: boolean;
className?: string;
disabled?: boolean;
icon?: React.ReactNode | IconName;
label?:
| {
top?: React.ReactNode;
bottom?: React.ReactNode;
}
| React.ReactNode;
onClick?: (...args: any[]) => any;
outerLabel?:
| {
top?: React.ReactNode;
bottom?: React.ReactNode;
}
| React.ReactNode;
showCaret?: boolean;
tagName?: string;
};
const LargeHeaderButton = React.forwardRef((props: Props, ref) => {
const {
active = false,
className = '',
disabled = false,
icon = null,
label,
onClick = null,
outerLabel,
showCaret = false,
tagName = 'button',
...restProps
} = props;
// @ts-expect-error ts-migrate(2339) FIXME: Property 'top' does not exist on type 'string | nu... Remove this comment to see the full error message
const hasStackedLabel = label && label.top && label.bottom;
// @ts-expect-error ts-migrate(2339) FIXME: Property 'top' does not exist on type 'string | nu... Remove this comment to see the full error message
const hasStackedOuterLabel = outerLabel && outerLabel.top && outerLabel.bottom;
return (
<Button
disabled={disabled}
// @ts-expect-error ts-migrate(2769) FIXME: No overload matches this call.
as={tagName}
className={classNames(
'large-header-button-component',
'pub-header-themed-box-hover-target',
className,
)}
onClick={onClick}
ref={ref}
{...restProps}
>
<div
className={classNames(
'button-box',
'pub-header-themed-box',
!label && 'no-label',
active && 'active',
)}
>
{typeof icon === 'string' ? <Icon icon={icon as any} iconSize={22} /> : icon}
{showCaret && <Icon icon="caret-down" className="caret" iconSize={10} />}
{label && !hasStackedLabel && <div className="label">{label}</div>}
{hasStackedLabel && (
<div className="stacked-label">
{/* @ts-expect-error ts-migrate(2533) FIXME: Object is possibly 'null' or 'undefined'. */}
<div className="top">{label.top}</div><|fim▁hole|> <div className="bottom pub-header-themed-secondary">{label.bottom}</div>
</div>
)}
</div>
{outerLabel && !hasStackedOuterLabel && <div className="outer-label">{outerLabel}</div>}
{hasStackedOuterLabel && (
<div className="outer-label">
{/* @ts-expect-error ts-migrate(2533) FIXME: Object is possibly 'null' or 'undefined'. */}
<div className="top pub-header-themed-secondary">{outerLabel!.top}</div>
{/* @ts-expect-error ts-migrate(2533) FIXME: Object is possibly 'null' or 'undefined'. */}
<div className="bottom">{outerLabel!.bottom}</div>
</div>
)}
</Button>
);
});
export default LargeHeaderButton;<|fim▁end|>
|
{/* @ts-expect-error ts-migrate(2533) FIXME: Object is possibly 'null' or 'undefined'. */}
|
<|file_name|>main.py<|end_file_name|><|fim▁begin|>from flask_wtf import FlaskForm
from saylua.utils.form import sl_validators
from saylua.utils.form.fields import SlField, SlTextAreaField
class ForumThreadForm(FlaskForm):
title = SlField('Thread Title', [
sl_validators.Required(),
sl_validators.NotBlank(),
sl_validators.Min(3)])
body = SlTextAreaField('Thread Body', [<|fim▁hole|> sl_validators.NotBlank(),
sl_validators.Min(2)])
class ForumPostForm(FlaskForm):
body = SlTextAreaField('Post Content', [
sl_validators.Required(),
sl_validators.NotBlank(),
sl_validators.Min(2)])<|fim▁end|>
|
sl_validators.Required(),
|
<|file_name|>settings.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import, unicode_literals
import os
from django import VERSION as DJANGO_VERSION
from django.utils.translation import ugettext_lazy as _
######################
# MEZZANINE SETTINGS #
######################
# The following settings are already defined with default values in
# the ``defaults.py`` module within each of Mezzanine's apps, but are
# common enough to be put here, commented out, for conveniently
# overriding. Please consult the settings documentation for a full list
# of settings Mezzanine implements:
# http://mezzanine.jupo.org/docs/configuration.html#default-settings
# Controls the ordering and grouping of the admin menu.
#
# ADMIN_MENU_ORDER = (
# ("Content", ("pages.Page", "blog.BlogPost",
# "generic.ThreadedComment", (_("Media Library"), "media-library"),)),
# ("Site", ("sites.Site", "redirects.Redirect", "conf.Setting")),
# ("Users", ("auth.User", "auth.Group",)),
# )
# A three item sequence, each containing a sequence of template tags
# used to render the admin dashboard.
#
# DASHBOARD_TAGS = (
# ("blog_tags.quick_blog", "mezzanine_tags.app_list"),
# ("comment_tags.recent_comments",),
# ("mezzanine_tags.recent_actions",),
# )
# A sequence of templates used by the ``page_menu`` template tag. Each
# item in the sequence is a three item sequence, containing a unique ID
# for the template, a label for the template, and the template path.
# These templates are then available for selection when editing which
# menus a page should appear in. Note that if a menu template is used
# that doesn't appear in this setting, all pages will appear in it.
# PAGE_MENU_TEMPLATES = (
# (1, _("Top navigation bar"), "pages/menus/dropdown.html"),
# (2, _("Left-hand tree"), "pages/menus/tree.html"),
# (3, _("Footer"), "pages/menus/footer.html"),
# )
# A sequence of fields that will be injected into Mezzanine's (or any
# library's) models. Each item in the sequence is a four item sequence.
# The first two items are the dotted path to the model and its field
# name to be added, and the dotted path to the field class to use for
# the field. The third and fourth items are a sequence of positional
# args and a dictionary of keyword args, to use when creating the
# field instance. When specifying the field class, the path
# ``django.models.db.`` can be omitted for regular Django model fields.
#
# EXTRA_MODEL_FIELDS = (
# (
# # Dotted path to field.
# "mezzanine.blog.models.BlogPost.image",
# # Dotted path to field class.
# "somelib.fields.ImageField",
# # Positional args for field class.
# (_("Image"),),
# # Keyword args for field class.
# {"blank": True, "upload_to": "blog"},
# ),
# # Example of adding a field to *all* of Mezzanine's content types:
# (
# "mezzanine.pages.models.Page.another_field",
# "IntegerField", # 'django.db.models.' is implied if path is omitted.
# (_("Another name"),),
# {"blank": True, "default": 1},
# ),
# )
# Setting to turn on featured images for blog posts. Defaults to False.
#
# BLOG_USE_FEATURED_IMAGE = True
# If True, the django-modeltranslation will be added to the
# INSTALLED_APPS setting.
USE_MODELTRANSLATION = False
########################
# MAIN DJANGO SETTINGS #
########################
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['localhost', '127.0.0.1', '111.222.333.444']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en"
# Supported languages
LANGUAGES = (
('en', _('English')),
)
# A boolean that turns on/off debug mode. When set to ``True``, stack traces
# are displayed for error pages. Should always be set to ``False`` in
# production. Best set to ``True`` in local_settings.py
DEBUG = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SITE_ID = 1<|fim▁hole|># If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
AUTHENTICATION_BACKENDS = ("mezzanine.core.auth_backends.MezzanineBackend",)
# The numeric mode to set newly-uploaded files to. The value should be
# a mode you'd pass directly to os.chmod.
FILE_UPLOAD_PERMISSIONS = 0o644
#############
# DATABASES #
#############
DATABASES = {
"default": {
# Add "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.",
# DB name or path to database file if using sqlite3.
"NAME": "cloudSolarDB",
# Not used with sqlite3.
"USER": "valia",
# Not used with sqlite3.
"PASSWORD": "scenetwork",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "localhost",
# Set to empty string for default. Not used with sqlite3.
"PORT": "5432",
}
}
#########
# PATHS #
#########
# Full filesystem path to the project.
PROJECT_APP_PATH = os.path.dirname(os.path.abspath(__file__))
PROJECT_APP = os.path.basename(PROJECT_APP_PATH)
PROJECT_ROOT = BASE_DIR = os.path.dirname(PROJECT_APP_PATH)
# Every cache key will get prefixed with this value - here we set it to
# the name of the directory the project is in to try and use something
# project specific.
CACHE_MIDDLEWARE_KEY_PREFIX = PROJECT_APP
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, STATIC_URL.strip("/"))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = STATIC_URL + "media/"
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, *MEDIA_URL.strip("/").split("/"))
# Package/module name to import the root urlpatterns from for the project.
ROOT_URLCONF = "%s.urls" % PROJECT_APP
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
os.path.join(PROJECT_ROOT, "templates")
],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.static",
"django.template.context_processors.media",
"django.template.context_processors.request",
"django.template.context_processors.tz",
"mezzanine.conf.context_processors.settings",
"mezzanine.pages.context_processors.page",
],
"builtins": [
"mezzanine.template.loader_tags",
],
},
},
]
if DJANGO_VERSION < (1, 9):
del TEMPLATES[0]["OPTIONS"]["builtins"]
################
# APPLICATIONS #
################
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.redirects",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
"mezzanine.boot",
"mezzanine.conf",
"mezzanine.core",
"mezzanine.generic",
"mezzanine.pages",
"mezzanine.blog",
"mezzanine.forms",
"mezzanine.galleries",
"mezzanine.twitter",
# "mezzanine.accounts",
# "mezzanine.mobile",
)
# List of middleware classes to use. Order is important; in the request phase,
# these middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
"mezzanine.core.middleware.UpdateCacheMiddleware",
'django.contrib.sessions.middleware.SessionMiddleware',
# Uncomment if using internationalisation or localisation
# 'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
"mezzanine.core.request.CurrentRequestMiddleware",
"mezzanine.core.middleware.RedirectFallbackMiddleware",
"mezzanine.core.middleware.TemplateForDeviceMiddleware",
"mezzanine.core.middleware.TemplateForHostMiddleware",
"mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware",
"mezzanine.core.middleware.SitePermissionMiddleware",
# Uncomment the following if using any of the SSL settings:
# "mezzanine.core.middleware.SSLRedirectMiddleware",
"mezzanine.pages.middleware.PageMiddleware",
"mezzanine.core.middleware.FetchFromCacheMiddleware",
)
# Store these package names here as they may change in the future since
# at the moment we are using custom forks of them.
PACKAGE_NAME_FILEBROWSER = "filebrowser_safe"
PACKAGE_NAME_GRAPPELLI = "grappelli_safe"
#########################
# OPTIONAL APPLICATIONS #
#########################
# These will be added to ``INSTALLED_APPS``, only if available.
OPTIONAL_APPS = (
"debug_toolbar",
"django_extensions",
"compressor",
PACKAGE_NAME_FILEBROWSER,
PACKAGE_NAME_GRAPPELLI,
)
##################
# LOCAL SETTINGS #
##################
# Allow any settings to be defined in local_settings.py which should be
# ignored in your version control system allowing for settings to be
# defined per machine.
# Instead of doing "from .local_settings import *", we use exec so that
# local_settings has full access to everything defined in this module.
# Also force into sys.modules so it's visible to Django's autoreload.
f = os.path.join(PROJECT_APP_PATH, "local_settings.py")
if os.path.exists(f):
import sys
import imp
module_name = "%s.local_settings" % PROJECT_APP
module = imp.new_module(module_name)
module.__file__ = f
sys.modules[module_name] = module
exec(open(f, "rb").read())
####################
# DYNAMIC SETTINGS #
####################
# set_dynamic_settings() will rewrite globals based on what has been
# defined so far, in order to provide some better defaults where
# applicable. We also allow this settings module to be imported
# without Mezzanine installed, as the case may be when using the
# fabfile, where setting the dynamic settings below isn't strictly
# required.
try:
from mezzanine.utils.conf import set_dynamic_settings
except ImportError:
pass
else:
set_dynamic_settings(globals())<|fim▁end|>
| |
<|file_name|>utils_for_test.go<|end_file_name|><|fim▁begin|>package datastore
import (
"bytes"
"errors"
"io"
)
const (
emptyBlobName = "ZZ8FaUwURAkWvzbnRhTt2pWSJCYZMAELqPk9USTUJgC4"
)
var testBlobs = []struct {
name string
data []byte
}{
{"Pq2UxZQcWw2rN8iKPcteaSd4LeXYW2YphibQjmj3kUQC", []byte("Test")},
{"TZ4M9KMpYgLEPBxvo36FR4hDpgvuoxqiu1BLzeT3xLAr", []byte("Test1")},
{"ZZ8FaUwURAkWvzbnRhTt2pWSJCYZMAELqPk9USTUJgC4", []byte("")},
}
func emptyBlobReader() io.ReadCloser {
return io.NopCloser(bytes.NewBuffer([]byte{}))
}
type errorOnExists struct {
memory
}
func (a *errorOnExists) Exists(name string) (bool, error) {
return false, errors.New("Error")
}
type helperReader struct {
buf io.Reader
onRead func() error
onEOF func() error
onClose func() error
}
func bReader(b []byte, onRead func() error, onEOF func() error, onClose func() error) *helperReader {
nop := func() error {
return nil
}
<|fim▁hole|> onEOF = nop
}
if onClose == nil {
onClose = nop
}
return &helperReader{
buf: bytes.NewReader(b),
onRead: onRead,
onEOF: onEOF,
onClose: onClose,
}
}
func (h *helperReader) Read(b []byte) (n int, err error) {
err = h.onRead()
if err != nil {
return 0, err
}
n, err = h.buf.Read(b)
if err == io.EOF {
err = h.onEOF()
if err != nil {
return 0, err
}
return 0, io.EOF
}
return n, err
}
func (h *helperReader) Close() error {
return h.onClose()
}
func errPanic(e error) {
if e != nil {
panic("Unexpected error: " + e.Error())
}
}
func putBlob(n string, b []byte, c DS) {
e := c.Save(n, bReader(b, nil, nil, nil))
errPanic(e)
if !exists(c, n) {
panic("Blob does not exist: " + n)
}
}
func getBlob(n string, c DS) []byte {
r, e := c.Open(n)
errPanic(e)
d, e := io.ReadAll(r)
errPanic(e)
e = r.Close()
errPanic(e)
return d
}
func exists(c DS, n string) bool {
exists, err := c.Exists(n)
if err != nil {
panic("Invalid error detected when testing blob's existence: " + err.Error())
}
return exists
}
type memoryNoConsistencyCheck struct {
memory
}
func (m *memoryNoConsistencyCheck) Open(n string) (io.ReadCloser, error) {
m.rw.RLock()
defer m.rw.RUnlock()
b, ok := m.bmap[n]
if !ok {
return nil, ErrNotFound
}
return io.NopCloser(bytes.NewReader(b)), nil
}
func newMemoryNoConsistencyCheck() *memoryNoConsistencyCheck {
return &memoryNoConsistencyCheck{
memory: memory{
bmap: make(map[string][]byte),
},
}
}
type memoryBrokenAutoNamed struct {
memory
breaker func(string) string
}
func (m *memoryBrokenAutoNamed) SaveAutoNamed(r io.ReadCloser) (string, error) {
n, err := m.memory.SaveAutoNamed(r)
if err != nil {
return "", err
}
return m.breaker(n), nil
}
func newMemoryBrokenAutoNamed(breaker func(string) string) *memoryBrokenAutoNamed {
return &memoryBrokenAutoNamed{
memory: memory{
bmap: make(map[string][]byte),
},
breaker: breaker,
}
}<|fim▁end|>
|
if onRead == nil {
onRead = nop
}
if onEOF == nil {
|
<|file_name|>collision_object.cpp<|end_file_name|><|fim▁begin|>#include "torch-moveit.h"
#include <moveit/planning_scene_interface/planning_scene_interface.h>
#include "utils.h"
typedef std::shared_ptr<moveit_msgs::CollisionObject> CollisionObjectPtr;
MOVIMP(CollisionObjectPtr*, CollisionObject, new)()
{
CollisionObjectPtr *p = new CollisionObjectPtr(new moveit_msgs::CollisionObject());
(*p)->operation = moveit_msgs::CollisionObject::ADD;
return p;
}
MOVIMP(void, CollisionObject, delete)(CollisionObjectPtr *ptr)
{
if (ptr)
delete ptr;
}
MOVIMP(const char *, CollisionObject, getId)(CollisionObjectPtr *self)
{
return (*self)->id.c_str();
}<|fim▁hole|>
MOVIMP(void, CollisionObject, setId)(CollisionObjectPtr *self, const char *id)
{
(*self)->id = id;
}
MOVIMP(const char *, CollisionObject, getFrameId)(CollisionObjectPtr *self)
{
return (*self)->header.frame_id.c_str();
}
MOVIMP(void, CollisionObject, setFrameId)(CollisionObjectPtr *self, const char *id)
{
(*self)->header.frame_id = id;
}
MOVIMP(int, CollisionObject, getOperation)(CollisionObjectPtr *self)
{
return (*self)->operation;
}
MOVIMP(void, CollisionObject, setOperation)(CollisionObjectPtr *self, int operation)
{
(*self)->operation = static_cast< moveit_msgs::CollisionObject::_operation_type>(operation);
}
MOVIMP(void, CollisionObject, addPrimitive)(CollisionObjectPtr *self, int type, THDoubleTensor *dimensions, tf::Transform *transform)
{
shape_msgs::SolidPrimitive primitive;
primitive.type = type;
Tensor2vector(dimensions, primitive.dimensions);
(*self)->primitives.push_back(primitive);
geometry_msgs::Pose pose; // convert transform to pose msg
poseTFToMsg(*transform, pose);
(*self)->primitive_poses.push_back(pose);
}
MOVIMP(void, CollisionObject, addPlane)(CollisionObjectPtr *self, THDoubleTensor *coefs, tf::Transform *transform)
{
shape_msgs::Plane plane;
for (int i = 0; i < 4; ++i)
plane.coef[i] = THDoubleTensor_get1d(coefs, i);
(*self)->planes.push_back(plane);
geometry_msgs::Pose pose; // convert transform to pose msg
poseTFToMsg(*transform, pose);
(*self)->plane_poses.push_back(pose);
}<|fim▁end|>
| |
<|file_name|>divsum_analysis.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
import sys
print "divsum_analysis.py DivsumFile NumberOfNucleotides"
try:
file = sys.argv[1]
except:
file = raw_input("Introduce RepeatMasker's Divsum file: ")
try:
nucs = sys.argv[2]
except:
nucs = raw_input("Introduce number of analysed nucleotides: ")
nucs = int(nucs)
data = open(file).readlines()<|fim▁hole|>
s_matrix = data.index("Coverage for each repeat class and divergence (Kimura)\n")
matrix = []
elements = data[s_matrix+1]
elements = elements.split()
for element in elements[1:]:
matrix.append([element,[]])
n_el = len(matrix)
for line in data[s_matrix+2:]:
# print line
info = line.split()
info = info[1:]
for n in range(0,n_el):
matrix[n][1].append(int(info[n]))
abs = open(file+".abs", "w")
rel = open(file+".rel", "w")
for n in range(0,n_el):
abs.write("%s\t%s\n" % (matrix[n][0], sum(matrix[n][1])))
rel.write("%s\t%s\n" % (matrix[n][0], round(1.0*sum(matrix[n][1])/nucs,100)))<|fim▁end|>
| |
<|file_name|>views.py<|end_file_name|><|fim▁begin|>from io import BytesIO
from itertools import groupby
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from flask import make_response, render_template, abort
from webapp import app
from webapp.evaluation import *
from webapp.ioutils import *
from webapp import config
@app.route('/')
def index():
experiments = get_experiments_list()
# group by date, newest first
experiments = sorted(experiments, key=lambda r: r.timestamp.date(), reverse=True)
experiments = [(date, list(items)) for date, items in groupby(experiments, lambda r: r.timestamp.date())]
# for each date sort its results, best first
experiments = [(date, sorted(items, key=lambda r: r.score, reverse=True))
for date, items in experiments]
return render_template('overview.html', experiments=experiments, score_name=config.score_name)
@app.route('/<timestamp>')
def details(timestamp):
# will fail with 404 if exp not known
get_labels_predictions(timestamp)
return render_template('details.html', timestamp=timestamp)
@app.route("/<timestamp>/norm_confusions")
def normalized_confusion_matrix(timestamp):
test_labels, test_predictions = get_labels_predictions(timestamp)<|fim▁hole|>@app.route("/<timestamp>/importances")
def feature_importances(timestamp):
features, importances = get_feature_importances(timestamp)
importance_fig = plot_feature_importances(features, importances)
return serve_matplotlib_fig(importance_fig)
@app.route("/<timestamp>/precision-recall")
def precision_recall(timestamp):
test_labels, test_predictions = get_labels_predictions(timestamp)
prec_recall_fig = plot_precision_recall_n(test_labels, test_predictions)
return serve_matplotlib_fig(prec_recall_fig)
@app.route("/<timestamp>/precision-cutoff")
def precision_cutoff(timestamp):
test_labels, test_predictions = get_labels_predictions(timestamp)
prec_cutoff_fig = plot_precision_cutoff(test_labels, test_predictions)
return serve_matplotlib_fig(prec_cutoff_fig)
@app.route("/<timestamp>/ROC")
def ROC(timestamp):
test_labels, test_predictions = get_labels_predictions(timestamp)
roc_fig = plot_ROC(test_labels, test_predictions)
return serve_matplotlib_fig(roc_fig)
@app.route("/growth")
def growth():
experiments = get_experiments_list()
# group by date, newest first
experiments = sorted(experiments, key=lambda r: r.timestamp.date(), reverse=True)
experiments = [(date, list(items)) for date, items in groupby(experiments, lambda r: r.timestamp.date())]
# only keep best result for each day
experiments = [(date, sorted(items, key=lambda r: r.score, reverse=True)[0])
for date, items in experiments]
experiments = [(date, best.score) for date, best in experiments]
growth_fig = plot_growth(experiments)
return serve_matplotlib_fig(growth_fig)
def serve_matplotlib_fig(fig):
canvas=FigureCanvas(fig)
png_output = BytesIO()
canvas.print_png(png_output)
response = make_response(png_output.getvalue())
response.headers['Content-Type'] = 'image/png'
return response<|fim▁end|>
|
matrix_fig = plot_normalized_confusion_matrix(test_labels, test_predictions)
return serve_matplotlib_fig(matrix_fig)
|
<|file_name|>xrootd.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# XRootD
#
# XRootD package installer.
#
# Author M Mottram - 15/04/2016 <[email protected]> : First revision
#######################################################################
import localpackage
import os
import stat
import shutil
class XRootD(localpackage.LocalPackage):
""" Base XRootD installer."""
def __init__(self, name, system, version):
""" Initialise the XRootD package."""
super(XRootD, self).__init__(name, system)
self._version = version
def get_tar_name(self):
""" Return the tarball name"""
return "xrootd-%s.tar.gz" % self._version
# Functions to override
def get_dependencies(self):
""" Return the dependency names as a list of names."""
return ["openssl-dev", "cmake-2.8.12"]
def _is_downloaded(self):
""" Check the tarball has been downloaded"""
return self._system.file_exists(self.get_tar_name())
def _is_installed(self):
""" Check the script has been marked as executable."""
return self._system.file_exists("xrootd", os.path.join(self.get_install_path(), "bin")) and \
bool(os.stat(os.path.join(self.get_install_path(), "bin/xrootd")).st_mode & stat.S_IXUSR)
def _download(self):
""" Download XRootD"""
self._system.download_file("http://xrootd.org/download/v%s/%s" % (self._version,
self.get_tar_name()))
<|fim▁hole|> def _install(self):
""" Mark the script as executable"""
source_path = os.path.join(self._system.get_install_path(), "%s-source" % self._name)
self._system.untar_file(self.get_tar_name(), source_path, 1)
if not os.path.exists(self.get_install_path()):
os.makedirs(self.get_install_path())
cmake_opts = [source_path,
"-DCMAKE_INSTALL_PREFIX=%s" % self.get_install_path(),
"-DENABLE_PERL=FALSE"]
cmake_command = "cmake"
if self._dependency_paths["cmake-2.8.12"] is not None:
cmake_command = "%s/bin/cmake" % self._dependency_paths["cmake-2.8.12"]
self._system.configure_command(cmake_command, cmake_opts, self.get_install_path(),
config_type="xrootd")
self._system.execute_command("make", [], self.get_install_path())
self._system.execute_command("make", ["install"], self.get_install_path())
shutil.rmtree(source_path)<|fim▁end|>
| |
<|file_name|>ActionsCapability.cpp<|end_file_name|><|fim▁begin|>/****************************************************************************************
* Copyright (c) 2007 Nikolaj Hald Nielsen <[email protected]> *
* *
* This program is free software; you can redistribute it and/or modify it under *
* the terms of the GNU General Public License as published by the Free Software *
* Foundation; either version 2 of the License, or (at your option) any later *
* version. *
* *
* This program is distributed in the hope that it will be useful, but WITHOUT ANY *
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A *
* PARTICULAR PURPOSE. See the GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License along with *
* this program. If not, see <http://www.gnu.org/licenses/>. *
****************************************************************************************/
#include "core/capabilities/ActionsCapability.h"
Capabilities::ActionsCapability::ActionsCapability()
: Capabilities::Capability()
{
//nothing to do<|fim▁hole|> , m_actions( actions )
{
//nothing to do
}
Capabilities::ActionsCapability::~ActionsCapability()
{
//nothing to do.
}
QList<QAction *>
Capabilities::ActionsCapability::actions() const
{
return m_actions;
}
#include "ActionsCapability.moc"<|fim▁end|>
|
}
Capabilities::ActionsCapability::ActionsCapability( const QList<QAction*> &actions )
: Capabilities::Capability()
|
<|file_name|>input.rs<|end_file_name|><|fim▁begin|>// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Trie test input deserialization.
use std::collections::BTreeMap;
use std::str::FromStr;
use bytes::Bytes;
use serde::{Deserialize, Deserializer, Error};
use serde::de::{Visitor, MapVisitor, SeqVisitor};
/// Trie test input.
#[derive(Debug, PartialEq)]
pub struct Input {
/// Input params.
pub data: BTreeMap<Bytes, Option<Bytes>>,
}
impl Deserialize for Input {
fn deserialize<D>(deserializer: &mut D) -> Result<Self, D::Error>
where D: Deserializer
{
deserializer.deserialize(InputVisitor)
}
}
struct InputVisitor;
impl Visitor for InputVisitor {
type Value = Input;
fn visit_map<V>(&mut self, mut visitor: V) -> Result<Self::Value, V::Error> where V: MapVisitor {
let mut result = BTreeMap::new();
loop {
let key_str: Option<String> = try!(visitor.visit_key());
let key = match key_str {
Some(ref k) if k.starts_with("0x") => try!(Bytes::from_str(k).map_err(Error::custom)),
Some(k) => Bytes::new(k.into_bytes()),
None => { break; }
};
let val_str: Option<String> = try!(visitor.visit_value());
let val = match val_str {
Some(ref v) if v.starts_with("0x") => Some(try!(Bytes::from_str(v).map_err(Error::custom))),
Some(v) => Some(Bytes::new(v.into_bytes())),
None => None,
};
result.insert(key, val);
}
try!(visitor.end());
let input = Input {
data: result
};
Ok(input)
}
fn visit_seq<V>(&mut self, mut visitor: V) -> Result<Self::Value, V::Error> where V: SeqVisitor {
let mut result = BTreeMap::new();
loop {
let keyval: Option<Vec<Option<String>>> = try!(visitor.visit());
let keyval = match keyval {
Some(k) => k,
_ => { break; },
};
if keyval.len() != 2 {
return Err(Error::custom("Invalid key value pair."));
}<|fim▁hole|> let ref val_str: Option<String> = keyval[1];
let key = match *key_str {
Some(ref k) if k.starts_with("0x") => try!(Bytes::from_str(k).map_err(Error::custom)),
Some(ref k) => Bytes::new(k.clone().into_bytes()),
None => { break; }
};
let val = match *val_str {
Some(ref v) if v.starts_with("0x") => Some(try!(Bytes::from_str(v).map_err(Error::custom))),
Some(ref v) => Some(Bytes::new(v.clone().into_bytes())),
None => None,
};
result.insert(key, val);
}
try!(visitor.end());
let input = Input {
data: result
};
Ok(input)
}
}
#[cfg(test)]
mod tests {
use std::collections::BTreeMap;
use serde_json;
use bytes::Bytes;
use super::Input;
#[test]
fn input_deserialization_from_map() {
let s = r#"{
"0x0045" : "0x0123456789",
"be" : "e",
"0x0a" : null
}"#;
let input: Input = serde_json::from_str(s).unwrap();
let mut map = BTreeMap::new();
map.insert(Bytes::new(vec![0, 0x45]), Some(Bytes::new(vec![0x01, 0x23, 0x45, 0x67, 0x89])));
map.insert(Bytes::new(vec![0x62, 0x65]), Some(Bytes::new(vec![0x65])));
map.insert(Bytes::new(vec![0x0a]), None);
assert_eq!(input.data, map);
}
#[test]
fn input_deserialization_from_array() {
let s = r#"[
["0x0045", "0x0123456789"],
["be", "e"],
["0x0a", null]
]"#;
let input: Input = serde_json::from_str(s).unwrap();
let mut map = BTreeMap::new();
map.insert(Bytes::new(vec![0, 0x45]), Some(Bytes::new(vec![0x01, 0x23, 0x45, 0x67, 0x89])));
map.insert(Bytes::new(vec![0x62, 0x65]), Some(Bytes::new(vec![0x65])));
map.insert(Bytes::new(vec![0x0a]), None);
assert_eq!(input.data, map);
}
}<|fim▁end|>
|
let ref key_str: Option<String> = keyval[0];
|
<|file_name|>Login.py<|end_file_name|><|fim▁begin|>from UtmpHead import UtmpHead
import Config
from Log import Log
<|fim▁hole|> return (self._loginid == other._loginid)
def __ne__(self, other):
return not self.__eq__(other)
def __init__(self, loginid):
self._loginid = loginid
def get_loginid(self):
return self._loginid
def get_userid(self):
from Utmp import Utmp
return Utmp.GetUserId(self._loginid - 1)
# UtmpHead.LIST
@staticmethod
def list_head():
listhead = UtmpHead.GetListHead()
return Login(listhead)
def list_next(self):
return Login(UtmpHead.GetListNext(self._loginid - 1))
def list_prev(self):
return Login(UtmpHead.GetListPrev(self._loginid - 1))
def set_listnext(self, listnext):
return UtmpHead.SetListNext(self._loginid - 1, listnext._loginid)
def set_listprev(self, listprev):
return UtmpHead.SetListPrev(self._loginid - 1, listprev._loginid)
def list_remove(self):
if (Login.list_head() == self):
UtmpHead.SetListHead(self.list_next()._loginid)
self.list_prev().set_listnext(self.list_next())
self.list_next().set_listprev(self.list_prev())
def list_add(self, userid = None):
if (userid == None):
userid = self.get_userid()
if (userid == None or userid == ''):
raise Exception("illegal call to list_add")
node = Login.list_head()
if (node == None):
# empty list -> single element
self.set_listprev(self)
self.set_listnext(self)
UtmpHead.SetListHead(self._loginid)
return True
if (node.get_userid().lower() >= userid.lower()):
# insert at head
self.set_listprev(node.list_prev())
self.set_listnext(node)
node.set_listprev(self)
self.list_prev().set_listnext(self)
UtmpHead.SetListHead(self._loginid)
return True
count = 0
node = node.list_next()
while ((node.get_userid().lower() < userid.lower()) and (node != Login.list_head())):
node = node.list_next()
count += 1
if (count > Config.USHM_SIZE):
UtmpHead.SetListHead(0)
from Utmp import Utmp
Utmp.RebuildList()
return False
self.set_listprev(node.list_prev())
self.set_listnext(node)
node.set_listprev(self)
self.list_prev().set_listnext(self)
return True
# UtmpHead.HASH
@staticmethod
def hash_head(userid):
from Utmp import Utmp
hashkey = Utmp.Hash(userid)
hashhead = UtmpHead.GetHashHead(hashkey)
return hashkey, Login(hashhead)
def set_hashnext(self, hashnext):
UtmpHead.SetNext(self._loginid - 1, hashnext._loginid)
def hash_next(self):
nextid = UtmpHead.GetNext(self._loginid - 1)
return Login(nextid)
def hash_remove(self, userid = None): # userid: for debugging
if (userid == None):
from Utmp import Utmp
userid = Utmp.GetUserId(self._loginid - 1)
hashkey, pos = Login.hash_head(userid)
if (pos == None):
Log.error("Login.hash_remove: hash list empty!")
return False
if (pos == self):
UtmpHead.SetHashHead(hashkey, self.hash_next()._loginid)
else:
while (pos.hash_next() != None and pos.hash_next() != self):
pos = pos.hash_next()
if (pos.hash_next() == None):
Log.error("Login.hash_remove: can't find in hash list")
return False
else:
pos.set_hashnext(self.hash_next())
# add to free list
self.set_hashnext(Login.free_list())
Login.set_freelist(self)
return True
def hash_add(self, userid = None):
if (userid == None):
userid = self.get_userid()
if (userid == None or userid == ''):
raise Exception("illegal call to hash_add")
# remove from free list
Login.set_freelist(self.hash_next())
hashkey, node = Login.hash_head(userid)
self.set_hashnext(node)
UtmpHead.SetHashHead(hashkey, self._loginid)
@staticmethod
def free_list():
hashhead = UtmpHead.GetHashHead(0)
return Login(hashhead)
@staticmethod
def set_freelist(login):
UtmpHead.SetHashHead(0, login._loginid)<|fim▁end|>
|
class Login:
def __eq__(self, other):
if (other == None):
return (self._loginid == 0)
|
<|file_name|>sample1.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
test_data = [
{
"Transaction" : {
"transaction_date" : "2015-01-08",
"amount" : -1286.75,
"security_amount" : 4.0726,
"security_rate" : 413.68
},
"Account" : {
"name" : "Spar Aktie"
},
"Currency" : {
"code" : "SEK"
},
"SecurityProvider" : {
"name" : "Aktiefond #1"
},
"TransactionType" : {
"name" : "Sälj"
},
"TransactionData" : {
"ISIN" : "SE0000000001",
"courtage" : 10.50
}
},
{
"Transaction" : {
"transaction_date" : "2015-01-07",
"amount" : -1329.5,
"security_amount" : 15.1663,
"security_rate" : 222.17
},
"Account" : {
"name" : "Spar Aktie"
},
"Currency" : {
"code" : "SEK"
},
"SecurityProvider" : {
"name" : "Aktiefond #2"
},
"TransactionType" : {
"name" : "Köp"
},
"TransactionData" : {
"ISIN" : "SE0000000002",
"courtage" : 20
}
},
{
"Transaction" : {
"transaction_date" : "2015-01-07",
"amount" : -682.61,
"security_amount" : 0.8534,
"security_rate" : 1974
},
"Account" : {
"name" : "Spar Aktie"
},
"Currency" : {
"code" : "SEK"
},
"SecurityProvider" : {
"name" : "Aktiefond #3"
},
"TransactionType" : {
"name" : "Köp"
},
"TransactionData" : {
"ISIN" : "SE0000000003",
"courtage" : 30.50
}
},
{
"Transaction" : {
"transaction_date" : "2015-01-05",
"amount" : 2728.8,
"security_amount" : None,
"security_rate" : None
},
"Account" : {
"name" : "Spar Aktie"
},
"Currency" : {
"code" : "SEK"
},
"SecurityProvider" : {
"name" : "Insättning Januari"
},
"TransactionType" : {
"name" : "Insättning"
},
"TransactionData" : {
"ISIN" : "SE0000000004",
"courtage" : 40
}
},
{
"Transaction" : {
"transaction_date" : "2014-12-08",
"amount" : -1144.98,
"security_amount" : 5.1423,
"security_rate" : 222.66
},
"Account" : {
"name" : "Spar Aktie"
},
"Currency" : {
"code" : "SEK"
},
"SecurityProvider" : {
"name" : "Aktiefond #2"
},
"TransactionType" : {
"name" : "Köp"
},
"TransactionData" : {
"ISIN" : "SE0000000005",
"courtage" : 50.50
}
},
{
"Transaction" : {
"transaction_date" : "2014-11-26",
"amount" : 2145.42,
"security_amount" : None,
"security_rate" : None
},
"Account" : {
"name" : "Spar Aktie"
},
"Currency" : {
"code" : "SEK"
},
"SecurityProvider" : {
"name" : "Insättning November"
},
"TransactionType" : {
"name" : "Insättning"
},
"TransactionData" : {
"ISIN" : "SE0000000006",
"courtage" : 60
}
},
{
"Transaction" : {
"transaction_date" : "2014-10-29",
"amount" : -863.81,
"security_amount" : 16.2254,
"security_rate" : 114.87
},
"Account" : {
"name" : "Spar Aktie"
},
"Currency" : {
"code" : "SEK"
},
"SecurityProvider" : {
"name" : "Aktiefond #3"
},
"TransactionType" : {
"name" : "Köp"
},
"TransactionData" : {
"ISIN" : "SE0000000007",
"courtage" : 70.50
}
},
{
"Transaction" : {
"transaction_date" : "2014-10-28",
"amount" : -862.99,
"security_amount" : 8.7321,
"security_rate" : 213.35
},
"Account" : {
"name" : "Spar Aktie"
},
"Currency" : {
"code" : "SEK"
},
"SecurityProvider" : {
"name" : "Aktiefond #2"
},
"TransactionType" : {
"name" : "Köp"
},
"TransactionData" : {
"ISIN" : "SE0000000008",
"courtage" : 80
}
},
{
"Transaction" : {
"transaction_date" : "2014-10-27",
"amount" : 2826.80,
"security_amount" : None,
"security_rate" : None
},
"Account" : {
"name" : "Spar Aktie"
},
"Currency" : {
"code" : "SEK"
},
"SecurityProvider" : {
"name" : "Insättning Oktober"
},
"TransactionType" : {
"name" : "Insättning"
},
"TransactionData" : {
"ISIN" : "SE0000000009",
"courtage" : 90.50
}
},
{<|fim▁hole|> "security_rate" : 114.92
},
"Account" : {
"name" : "Spar Aktie"
},
"Currency" : {
"code" : "SEK"
},
"SecurityProvider" : {
"name" : "Aktiefond #1"
},
"TransactionType" : {
"name" : "Köp"
},
"TransactionData" : {
"ISIN" : "SE00000000010",
"courtage" : 100
}
},
]<|fim▁end|>
|
"Transaction" : {
"transaction_date" : "2014-10-02",
"amount" : -10218.04,
"security_amount" : 149.8263,
|
<|file_name|>FormBruterJob.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
This is part of WebScout software<|fim▁hole|>Docs EN: http://hack4sec.pro/wiki/index.php/WebScout_en
Docs RU: http://hack4sec.pro/wiki/index.php/WebScout
License: MIT
Copyright (c) Anton Kuzmin <http://anton-kuzmin.ru> (ru) <http://anton-kuzmin.pro> (en)
Job class for FormBruter module
"""
from classes.jobs.GeneratorJob import GeneratorJob
class FormBruterJob(GeneratorJob):
""" Job class for FormBruter module """
collection_name = 'form_bruter'<|fim▁end|>
| |
<|file_name|>CameraPoller.java<|end_file_name|><|fim▁begin|>/*
* IRIS -- Intelligent Roadway Information System
* Copyright (C) 2007-2016 Minnesota Department of Transportation
* Copyright (C) 2014 AHMCT, University of California
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/<|fim▁hole|>
import us.mn.state.dot.tms.DeviceRequest;
import us.mn.state.dot.tms.server.CameraImpl;
/**
* CameraPoller is an interface for pollers which can send camera control
* messages.
*
* @author Douglas Lau
* @author Travis Swanston
*/
public interface CameraPoller {
/** Send a PTZ camera move command */
void sendPTZ(CameraImpl c, float p, float t, float z);
/** Send a store camera preset command */
void sendStorePreset(CameraImpl c, int preset);
/** Send a recall camera preset command */
void sendRecallPreset(CameraImpl c, int preset);
/** Send a device request
* @param c The CameraImpl object.
* @param r The desired DeviceRequest. */
void sendRequest(CameraImpl c, DeviceRequest r);
}<|fim▁end|>
|
package us.mn.state.dot.tms.server.comm;
|
<|file_name|>IUpdateFeeDiscountBMO.java<|end_file_name|><|fim▁begin|>package com.java110.fee.bmo.feeDiscount;
import com.alibaba.fastjson.JSONArray;
import com.java110.po.feeDiscount.FeeDiscountPo;
import org.springframework.http.ResponseEntity;
public interface IUpdateFeeDiscountBMO {
/**
* 修改费用折扣
* add by wuxw<|fim▁hole|> *
* @param feeDiscountPo
* @return
*/
ResponseEntity<String> update(FeeDiscountPo feeDiscountPo, JSONArray feeDiscountRuleSpecs);
}<|fim▁end|>
| |
<|file_name|>fm_emph.py<|end_file_name|><|fim▁begin|>#
# Copyright 2005,2007,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr, filter
import math
import cmath
class fm_deemph(gr.hier_block2):
r"""
FM Deemphasis IIR filter
Args:
fs: sampling frequency in Hz (float)
tau: Time constant in seconds (75us in US, 50us in EUR) (float)
An analog deemphasis filter:
R
o------/\/\/\/---+----o
|
= C
|
---
Has this transfer function:
1 1
---- ---
RC tau
H(s) = ---------- = ----------
1 1
s + ---- s + ---
RC tau
And has its -3 dB response, due to the pole, at
|H(j w_c)|^2 = 1/2 => s = j w_c = j (1/(RC))
Historically, this corner frequency of analog audio deemphasis filters
been specified by the RC time constant used, called tau.
So w_c = 1/tau.
FWIW, for standard tau values, some standard analog components would be:
tau = 75 us = (50K)(1.5 nF) = (50 ohms)(1.5 uF)
tau = 50 us = (50K)(1.0 nF) = (50 ohms)(1.0 uF)
In specifying tau for this digital deemphasis filter, tau specifies
the *digital* corner frequency, w_c, desired.
The digital deemphasis filter design below, uses the
"bilinear transformation" method of designing digital filters:
1. Convert digital specifications into the analog domain, by prewarping
digital frequency specifications into analog frequencies.
w_a = (2/T)tan(wT/2)
2. Use an analog filter design technique to design the filter.
3. Use the bilinear transformation to convert the analog filter design to a
digital filter design.
H(z) = H(s)|
s = (2/T)(1-z^-1)/(1+z^-1)
w_ca 1 1 - (-1) z^-1
H(z) = ---- * ----------- * -----------------------
2 fs -w_ca -w_ca
1 - ----- 1 + -----
2 fs 2 fs
1 - ----------- z^-1
-w_ca
1 - -----
2 fs
We use this design technique, because it is an easy way to obtain a filter
design with the -6 dB/octave roll-off required of the deemphasis filter.
Jackson, Leland B., _Digital_Filters_and_Signal_Processing_Second_Edition_,
Kluwer Academic Publishers, 1989, pp 201-212
Orfanidis, Sophocles J., _Introduction_to_Signal_Processing_, Prentice Hall,
1996, pp 573-583
"""
def __init__(self, fs, tau=75e-6):
gr.hier_block2.__init__(self, "fm_deemph",
gr.io_signature(1, 1, gr.sizeof_float), # Input signature
gr.io_signature(1, 1, gr.sizeof_float)) # Output signature
# Digital corner frequency
w_c = 1.0 / tau
# Prewarped analog corner frequency
w_ca = 2.0 * fs * math.tan(w_c / (2.0 * fs))
# Resulting digital pole, zero, and gain term from the bilinear
# transformation of H(s) = w_ca / (s + w_ca) to
# H(z) = b0 (1 - z1 z^-1)/(1 - p1 z^-1)
k = -w_ca / (2.0 * fs)
z1 = -1.0
p1 = (1.0 + k) / (1.0 - k)
b0 = -k / (1.0 - k)
btaps = [ b0 * 1.0, b0 * -z1 ]
ataps = [ 1.0, -p1 ]
# Since H(s = 0) = 1.0, then H(z = 1) = 1.0 and has 0 dB gain at DC
if 0:
print("btaps =", btaps)
print("ataps =", ataps)
global plot1
plot1 = gru.gnuplot_freqz(gru.freqz(btaps, ataps), fs, True)
deemph = filter.iir_filter_ffd(btaps, ataps, False)
self.connect(self, deemph, self)
class fm_preemph(gr.hier_block2):
r"""
FM Preemphasis IIR filter.
Args:
fs: sampling frequency in Hz (float)
tau: Time constant in seconds (75us in US, 50us in EUR) (float)
fh: High frequency at which to flatten out (< 0 means default of 0.925*fs/2.0) (float)
An analog preemphasis filter, that flattens out again at the high end:
C
+-----||------+
| |
o------+ +-----+--------o
| R1 | |
+----/\/\/\/--+ \
/
\ R2<|fim▁hole|> |
o--------------------------+--------o
(This fine ASCII rendition is based on Figure 5-15
in "Digital and Analog Communication Systems", Leon W. Couch II)
Has this transfer function:
1
s + ---
R1C
H(s) = ------------------
1 R1
s + --- (1 + --)
R1C R2
It has a corner due to the numerator, where the rise starts, at
|Hn(j w_cl)|^2 = 2*|Hn(0)|^2 => s = j w_cl = j (1/(R1C))
It has a corner due to the denominator, where it levels off again, at
|Hn(j w_ch)|^2 = 1/2*|Hd(0)|^2 => s = j w_ch = j (1/(R1C) * (1 + R1/R2))
Historically, the corner frequency of analog audio preemphasis filters
been specified by the R1C time constant used, called tau.
So
w_cl = 1/tau = 1/R1C; f_cl = 1/(2*pi*tau) = 1/(2*pi*R1*C)
w_ch = 1/tau2 = (1+R1/R2)/R1C; f_ch = 1/(2*pi*tau2) = (1+R1/R2)/(2*pi*R1*C)
and note f_ch = f_cl * (1 + R1/R2).
For broadcast FM audio, tau is 75us in the United States and 50us in Europe.
f_ch should be higher than our digital audio bandwidth.
The Bode plot looks like this:
/----------------
/
/ <-- slope = 20dB/decade
/
-------------/
f_cl f_ch
In specifying tau for this digital preemphasis filter, tau specifies
the *digital* corner frequency, w_cl, desired.
The digital preemphasis filter design below, uses the
"bilinear transformation" method of designing digital filters:
1. Convert digital specifications into the analog domain, by prewarping
digital frequency specifications into analog frequencies.
w_a = (2/T)tan(wT/2)
2. Use an analog filter design technique to design the filter.
3. Use the bilinear transformation to convert the analog filter design to a
digital filter design.
H(z) = H(s)|
s = (2/T)(1-z^-1)/(1+z^-1)
-w_cla
1 + ------
2 fs
1 - ------------ z^-1
-w_cla -w_cla
1 - ------ 1 - ------
2 fs 2 fs
H(z) = ------------ * -----------------------
-w_cha -w_cha
1 - ------ 1 + ------
2 fs 2 fs
1 - ------------ z^-1
-w_cha
1 - ------
2 fs
We use this design technique, because it is an easy way to obtain a filter
design with the 6 dB/octave rise required of the premphasis filter.
Jackson, Leland B., _Digital_Filters_and_Signal_Processing_Second_Edition_,
Kluwer Academic Publishers, 1989, pp 201-212
Orfanidis, Sophocles J., _Introduction_to_Signal_Processing_, Prentice Hall,
1996, pp 573-583
"""
def __init__(self, fs, tau=75e-6, fh=-1.0):
gr.hier_block2.__init__(self, "fm_preemph",
gr.io_signature(1, 1, gr.sizeof_float), # Input signature
gr.io_signature(1, 1, gr.sizeof_float)) # Output signature
# Set fh to something sensible, if needed.
# N.B. fh == fs/2.0 or fh == 0.0 results in a pole on the unit circle
# at z = -1.0 or z = 1.0 respectively. That makes the filter unstable
# and useless.
if fh <= 0.0 or fh >= fs / 2.0:
fh = 0.925 * fs/2.0
# Digital corner frequencies
w_cl = 1.0 / tau
w_ch = 2.0 * math.pi * fh
# Prewarped analog corner frequencies
w_cla = 2.0 * fs * math.tan(w_cl / (2.0 * fs))
w_cha = 2.0 * fs * math.tan(w_ch / (2.0 * fs))
# Resulting digital pole, zero, and gain term from the bilinear
# transformation of H(s) = (s + w_cla) / (s + w_cha) to
# H(z) = b0 (1 - z1 z^-1)/(1 - p1 z^-1)
kl = -w_cla / (2.0 * fs)
kh = -w_cha / (2.0 * fs)
z1 = (1.0 + kl) / (1.0 - kl)
p1 = (1.0 + kh) / (1.0 - kh)
b0 = (1.0 - kl) / (1.0 - kh)
# Since H(s = infinity) = 1.0, then H(z = -1) = 1.0 and
# this filter has 0 dB gain at fs/2.0.
# That isn't what users are going to expect, so adjust with a
# gain, g, so that H(z = 1) = 1.0 for 0 dB gain at DC.
w_0dB = 2.0 * math.pi * 0.0
g = abs(1.0 - p1 * cmath.rect(1.0, -w_0dB)) \
/ (b0 * abs(1.0 - z1 * cmath.rect(1.0, -w_0dB)))
btaps = [ g * b0 * 1.0, g * b0 * -z1 ]
ataps = [ 1.0, -p1 ]
if 0:
print("btaps =", btaps)
print("ataps =", ataps)
global plot2
plot2 = gru.gnuplot_freqz(gru.freqz(btaps, ataps), fs, True)
preemph = filter.iir_filter_ffd(btaps, ataps, False)
self.connect(self, preemph, self)<|fim▁end|>
|
/
\
|
<|file_name|>index.js<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|>
|
module.exports = require('./lib/SimpleNodeDb');
|
<|file_name|>Input.py<|end_file_name|><|fim▁begin|># Copyright (C) 2005 Colin McMillen <[email protected]>
#
# This file is part of GalaxyMage.
#
# GalaxyMage is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# GalaxyMage is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GalaxyMage; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
import pygame
import logging
logger = logging.getLogger("gui")
CURSOR_UP = pygame.USEREVENT + 1
CURSOR_DOWN = pygame.USEREVENT + 2
CURSOR_LEFT = pygame.USEREVENT + 3
CURSOR_RIGHT = pygame.USEREVENT + 4
CURSOR_ACCEPT = pygame.USEREVENT + 5
CURSOR_CANCEL = pygame.USEREVENT + 6
LOWER_CAMERA = pygame.USEREVENT + 7
RAISE_CAMERA = pygame.USEREVENT + 8
ROTATE_CAMERA_CW = pygame.USEREVENT + 9
ROTATE_CAMERA_CCW = pygame.USEREVENT + 10
ROTATE_CAMERA = pygame.USEREVENT + 11
PITCH_CAMERA = pygame.USEREVENT + 12
TRANSLATE_CAMERA = pygame.USEREVENT + 13
RAISE_TILE = pygame.USEREVENT + 14
LOWER_TILE = pygame.USEREVENT + 15
RAISE_CENTER = pygame.USEREVENT + 16
LOWER_CENTER = pygame.USEREVENT + 17
RAISE_B_BL_CORNER = pygame.USEREVENT + 18
RAISE_L_FL_CORNER = pygame.USEREVENT + 20
RAISE_F_FR_CORNER = pygame.USEREVENT + 21
RAISE_R_BR_CORNER = pygame.USEREVENT + 19
LOWER_B_BL_CORNER = pygame.USEREVENT + 22
LOWER_L_FL_CORNER = pygame.USEREVENT + 24
LOWER_F_FR_CORNER = pygame.USEREVENT + 25
LOWER_R_BR_CORNER = pygame.USEREVENT + 23
RAISE_WATER = pygame.USEREVENT + 26
LOWER_WATER = pygame.USEREVENT + 27
FPS = pygame.USEREVENT + 28
TOGGLE_SOUND = pygame.USEREVENT + 29
TOGGLE_FULLSCREEN = pygame.USEREVENT + 30
UNDO = pygame.USEREVENT + 31
START_CHAT = pygame.USEREVENT + 32
_input = None
def get():
return _input
class Event(object):
def __init__(self, type_, data):
self.type = type_
self.__dict__.update(data)
def postUserEvent(type_, data={}):
e = Event(type_, data)
pygame.event.post(pygame.event.Event(pygame.USEREVENT,
{'event': e}))
class Input(object):
def __init__(self, joystickID):
global _input
_input = self
self._repeatDelay = 0.5 # seconds
self._repeatInterval = 0.05 # seconds
self.inDialog = False
self._eventRepeatTime = {}
self._joystick = None
if pygame.joystick.get_count() > joystickID:
self._joystick = pygame.joystick.Joystick(joystickID)
self._joystick.init()
logger.debug(("Initialized joystick %d " +
"(buttons: %d, hats: %d, axes: %d)")
% (self._joystick.get_id(),
self._joystick.get_numbuttons(),
self._joystick.get_numhats(),
self._joystick.get_numaxes()))
def joyButton(self, number):
if self._joystick == None or self._joystick.get_numbuttons() <= number:
return False
return self._joystick.get_button(number)
def joyHat(self, number, axis):
if self._joystick == None or self._joystick.get_numhats() <= number:
return 0
hat = self._joystick.get_hat(number)
return hat[axis]
def joyAxis(self, number):
if self._joystick == None or self._joystick.get_numaxes() <= number:
return 0.0
return self._joystick.get_axis(number)
def setInDialog(self, inDialog):
self.inDialog = inDialog
if inDialog:
pygame.key.set_repeat(300,100)
else:
pygame.key.set_repeat()
for button in self._eventRepeatTime:
self._eventRepeatTime[button] = self._repeatDelay
def update(self, timeElapsed):
if not self.inDialog:
self.updateGuiEvents(timeElapsed)
def updateGuiEvents(self, timeElapsed):
keysPressed = pygame.key.get_pressed()
mousePressed = pygame.mouse.get_pressed()
mouseMotion = pygame.mouse.get_rel()
# Generate events that are subject to repeat
self.buttonPressed(timeElapsed,
START_CHAT,
keysPressed[pygame.K_t])
self.buttonPressed(timeElapsed,
FPS,
keysPressed[pygame.K_f])
self.buttonPressed(timeElapsed,
TOGGLE_SOUND,
keysPressed[pygame.K_s])
self.buttonPressed(timeElapsed,
TOGGLE_FULLSCREEN,
keysPressed[pygame.K_F12])
self.buttonPressed(timeElapsed,
CURSOR_UP,
keysPressed[pygame.K_UP] or
self.joyAxis(5) < -0.8 or
self.joyHat(0, 1) == 1)
self.buttonPressed(timeElapsed,
CURSOR_DOWN,
keysPressed[pygame.K_DOWN] or
self.joyAxis(5) > 0.8 or
self.joyHat(0, 1) == -1)
self.buttonPressed(timeElapsed,
CURSOR_LEFT,
keysPressed[pygame.K_LEFT] or
self.joyAxis(4) < -0.8 or
self.joyHat(0, 0) == -1)
self.buttonPressed(timeElapsed,
CURSOR_RIGHT,
keysPressed[pygame.K_RIGHT] or
self.joyAxis(4) > 0.8 or
self.joyHat(0, 0) == 1)
self.buttonPressed(timeElapsed,
CURSOR_ACCEPT,
keysPressed[pygame.K_RETURN] or
self.joyButton(1))
self.buttonPressed(timeElapsed,
CURSOR_CANCEL,
keysPressed[pygame.K_ESCAPE] or
self.joyButton(2))
self.buttonPressed(timeElapsed,
LOWER_CAMERA,
keysPressed[pygame.K_PAGEUP] or
self.joyButton(6))
self.buttonPressed(timeElapsed,
RAISE_CAMERA,
keysPressed[pygame.K_PAGEDOWN] or
self.joyButton(7))
self.buttonPressed(timeElapsed,
ROTATE_CAMERA_CCW,
keysPressed[pygame.K_LEFTBRACKET] or
keysPressed[pygame.K_HOME] or
self.joyButton(4))
self.buttonPressed(timeElapsed,
ROTATE_CAMERA_CW,
keysPressed[pygame.K_RIGHTBRACKET] or
keysPressed[pygame.K_END] or
self.joyButton(5))
self.buttonPressed(timeElapsed,
RAISE_TILE,
keysPressed[pygame.K_EQUALS])
self.buttonPressed(timeElapsed,
LOWER_TILE,
keysPressed[pygame.K_MINUS])
self.buttonPressed(timeElapsed,
RAISE_CENTER,
(keysPressed[pygame.K_s] and not
(keysPressed[pygame.K_LSHIFT] or keysPressed[pygame.K_RSHIFT])))
self.buttonPressed(timeElapsed,
LOWER_CENTER,
(keysPressed[pygame.K_s] and
(keysPressed[pygame.K_LSHIFT] or keysPressed[pygame.K_RSHIFT])))
self.buttonPressed(timeElapsed,
RAISE_B_BL_CORNER,
keysPressed[pygame.K_w] and not
(keysPressed[pygame.K_LSHIFT] or keysPressed[pygame.K_RSHIFT]))
self.buttonPressed(timeElapsed,
RAISE_L_FL_CORNER,
keysPressed[pygame.K_a] and not
(keysPressed[pygame.K_LSHIFT] or keysPressed[pygame.K_RSHIFT]))
self.buttonPressed(timeElapsed,
RAISE_F_FR_CORNER,
keysPressed[pygame.K_x] and not
(keysPressed[pygame.K_LSHIFT] or keysPressed[pygame.K_RSHIFT]))
self.buttonPressed(timeElapsed,
RAISE_R_BR_CORNER,
keysPressed[pygame.K_d] and not
(keysPressed[pygame.K_LSHIFT] or keysPressed[pygame.K_RSHIFT]))
self.buttonPressed(timeElapsed,
LOWER_B_BL_CORNER,
keysPressed[pygame.K_w] and
(keysPressed[pygame.K_LSHIFT] or keysPressed[pygame.K_RSHIFT]))
self.buttonPressed(timeElapsed,
LOWER_L_FL_CORNER,
keysPressed[pygame.K_a] and
(keysPressed[pygame.K_LSHIFT] or keysPressed[pygame.K_RSHIFT]))
self.buttonPressed(timeElapsed,
LOWER_F_FR_CORNER,
keysPressed[pygame.K_x] and
(keysPressed[pygame.K_LSHIFT] or keysPressed[pygame.K_RSHIFT]))
self.buttonPressed(timeElapsed,
LOWER_R_BR_CORNER,
keysPressed[pygame.K_d] and
(keysPressed[pygame.K_LSHIFT] or keysPressed[pygame.K_RSHIFT]))
self.buttonPressed(timeElapsed,
RAISE_WATER,
keysPressed[pygame.K_e] and not
(keysPressed[pygame.K_LSHIFT] or keysPressed[pygame.K_RSHIFT]))
self.buttonPressed(timeElapsed,
LOWER_WATER,
keysPressed[pygame.K_e] and
(keysPressed[pygame.K_LSHIFT] or keysPressed[pygame.K_RSHIFT]))
self.buttonPressed(timeElapsed,
UNDO,
keysPressed[pygame.K_BACKSPACE])
# Generate misc events
# Quit
if keysPressed[pygame.K_q] or self.joyButton(9):
pygame.event.post(pygame.event.Event(pygame.QUIT))
# Rotate camera smoothly
if mousePressed[2] and mouseMotion[0] != 0:
postUserEvent(ROTATE_CAMERA,
{'amount': mouseMotion[0]})
if abs(self.joyAxis(0)) > 0.8:
amount = self.joyAxis(0) * timeElapsed * 180.0
postUserEvent(ROTATE_CAMERA,
{'amount': amount})
# Pitch camera
if mousePressed[2]:
postUserEvent(PITCH_CAMERA,
{'amount': mouseMotion[1]/3.0})
if abs(self.joyAxis(1)) > 0.8:
amount = self.joyAxis(1) * timeElapsed * 90.0
postUserEvent(PITCH_CAMERA,
{'amount': amount})
# Translate view
if mousePressed[0]:<|fim▁hole|> {'amount': (x, y)})
if abs(self.joyAxis(2)) > 0.2 or abs(self.joyAxis(3)) > 0.2:
(x, y) = (0.0, 0.0)
if abs(self.joyAxis(2)) > 0.2:
x = -self.joyAxis(2) * timeElapsed * 0.75
if abs(self.joyAxis(3)) > 0.2:
y = -self.joyAxis(3) * timeElapsed * 0.75
postUserEvent(TRANSLATE_CAMERA,
{'amount': (x, y)})
def buttonPressed(self, timeElapsed, button, pressed):
if not self._eventRepeatTime.has_key(button):
self._eventRepeatTime[button] = -1.0
if pressed:
generateEvent = False
oldTime = self._eventRepeatTime[button]
if oldTime == -1.0:
generateEvent = True
self._eventRepeatTime[button] = self._repeatDelay
elif oldTime <= 0.0:
generateEvent = True
self._eventRepeatTime[button] = self._repeatInterval
else:
self._eventRepeatTime[button] -= timeElapsed
if generateEvent:
postUserEvent(button)
else:
self._eventRepeatTime[button] = -1.0<|fim▁end|>
|
x = mouseMotion[0] / 750.0
y = mouseMotion[1] / 750.0
postUserEvent(TRANSLATE_CAMERA,
|
<|file_name|>page_hinkley.py<|end_file_name|><|fim▁begin|>"""
The Tornado Framework
By Ali Pesaranghader
University of Ottawa, Ontario, Canada
E-mail: apesaran -at- uottawa -dot- ca / alipsgh -at- gmail -dot- com
---
*** The Page Hinkley (PH) Method Implementation ***
Paper: Page, Ewan S. "Continuous inspection schemes."
Published in: Biometrika 41.1/2 (1954): 100-115.
URL: http://www.jstor.org/stable/2333009
"""
from dictionary.tornado_dictionary import TornadoDic
from drift_detection.detector import SuperDetector
class PH(SuperDetector):
"""The Page Hinkley (PH) drift detection method class."""
DETECTOR_NAME = TornadoDic.PH
def __init__(self, min_instance=30, delta=0.005, lambda_=50, alpha=1 - 0.0001):
super().__init__()
self.MINIMUM_NUM_INSTANCES = min_instance
self.m_n = 1
self.x_mean = 0.0
self.sum = 0.0
self.delta = delta
self.lambda_ = lambda_
self.alpha = alpha
def run(self, pr):
<|fim▁hole|>
# 1. UPDATING STATS
self.x_mean = self.x_mean + (pr - self.x_mean) / self.m_n
self.sum = self.alpha * self.sum + (pr - self.x_mean - self.delta)
self.m_n += 1
# 2. UPDATING WARNING AND DRIFT STATUSES
if self.m_n >= self.MINIMUM_NUM_INSTANCES:
if self.sum > self.lambda_:
drift_status = True
return warning_status, drift_status
def reset(self):
super().reset()
self.m_n = 1
self.x_mean = 0.0
self.sum = 0.0
def get_settings(self):
return [str(self.MINIMUM_NUM_INSTANCES) + "." + str(self.delta) + "." +
str(self.lambda_) + "." + str(self.alpha),
"$n_{min}$:" + str(self.MINIMUM_NUM_INSTANCES) + ", " +
"$\delta$:" + str(self.delta).upper() + ", " +
"$\lambda$:" + str(self.lambda_).upper() + ", " +
"$\\alpha$:" + str(self.alpha).upper()]<|fim▁end|>
|
pr = 1 if pr is False else 0
warning_status = False
drift_status = False
|
<|file_name|>users.server.controller.js<|end_file_name|><|fim▁begin|>const _ = require('lodash');
module.exports = _.extend(
require('./users/users.authentication.server.controller'),
require('./users/users.authorization.server.controller'),
require('./users/users.password.server.controller'),<|fim▁hole|><|fim▁end|>
|
require('./users/users.profile.server.controller')
);
|
<|file_name|>add_polynomials.cpp<|end_file_name|><|fim▁begin|>/* Part of Cosmos by OpenGenus Foundation */
/* Contributed by Vaibhav Jain (vaibhav29498) */
/* Refactored by Adeen Shukla (adeen-s) */
#include <iostream>
#include <stddef.h>
using namespace std;
struct term {
int coeff;
int pow;
term* next;
term(int, int);
};
term::term(int c, int p) {
coeff = c;
pow = p;
next = NULL;
}
class polynomial {
term* head;
public:
polynomial();
void insert_term(int, int);
void print();
friend polynomial operator+(polynomial, polynomial);
};
polynomial::polynomial() { head = NULL; }
void polynomial::insert_term(int c, int p) {
if (head == NULL) {
head = new term(c, p);
return;
}
if (p > head->pow) {
term* t = new term(c, p);
t->next = head;
head = t;
return;
}
term* cur = head;
while (cur != NULL) {
if (cur->pow == p) {
cur->coeff += c;
return;
}
if ((cur->next == NULL) || (cur->next->pow < p)) {
term* t = new term(c, p);
t->next = cur->next;
cur->next = t;
return;
}
cur = cur->next;
}
}
void polynomial::print() {
term* t = head;
while (t != NULL) {
cout << t->coeff;
if (t->pow) cout << "x^" << t->pow;
if (t->next != NULL) cout << "+";
t = t->next;
}
cout << endl;
}
polynomial operator+(polynomial p1, polynomial p2) {
polynomial p;
term *t1 = p1.head, *t2 = p2.head;
while ((t1 != NULL) && (t2 != NULL)) {
if (t1->pow > t2->pow) {
p.insert_term(t1->coeff, t1->pow);
t1 = t1->next;
} else if (t1->pow < t2->pow) {
p.insert_term(t2->coeff, t2->pow);
t2 = t2->next;
} else {
p.insert_term(t1->coeff + t2->coeff, t1->pow);
t1 = t1->next;
t2 = t2->next;
}
}
while (t1 != NULL) {
p.insert_term(t1->coeff, t1->pow);
t1 = t1->next;
}
while (t2 != NULL) {
p.insert_term(t2->coeff, t2->pow);
t2 = t2->next;
}
return p;
}
int main() {
polynomial p1, p2;
p1.insert_term(7, 4);
<|fim▁hole|> p1.print();
p2.insert_term(5, 0);
p2.insert_term(6, 5);
p2.insert_term(7, 0);
p2.insert_term(3, 2);
cout << "Second polynomial:";
p2.print();
polynomial p3 = p1 + p2;
cout << "Sum:";
p3.print();
return 0;
}<|fim▁end|>
|
p1.insert_term(4, 5);
p1.insert_term(10, 0);
p1.insert_term(9, 2);
cout << "First polynomial:";
|
<|file_name|>common_layers.py<|end_file_name|><|fim▁begin|># coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layers common to multiple models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import contextlib
import functools
from functools import partial
import math
import numpy as np
from six.moves import range # pylint: disable=redefined-builtin
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import inplace_ops
@function.Defun(
python_grad_func=lambda x, dy: tf.convert_to_tensor(dy),
shape_func=lambda op: [op.inputs[0].get_shape()])
def convert_gradient_to_tensor(x):
"""Identity operation whose gradient is converted to a `Tensor`.
Currently, the gradient to `tf.concat` is particularly expensive to
compute if dy is an `IndexedSlices` (a lack of GPU implementation
forces the gradient operation onto CPU). This situation occurs when
the output of the `tf.concat` is eventually passed to `tf.gather`.
It is sometimes faster to convert the gradient to a `Tensor`, so as
to get the cheaper gradient for `tf.concat`. To do this, replace
`tf.concat(x)` with `convert_gradient_to_tensor(tf.concat(x))`.
Args:
x: A `Tensor`.
Returns:
The input `Tensor`.
"""
return x
def is_xla_compiled():
"""Whether we are building graph that will be compiled by XLA.
This checks whether the code is executing within an XLA context.
If True, model authors should ensure the graph they build is compilable by
XLA. Specifically, they should ensure that all ops have XLA implementations
and that all shapes are statically known.
Returns:
bool, whether the current graph will be compiled for XLA.
"""
ctxt = tf.get_default_graph()._get_control_flow_context() # pylint: disable=protected-access
return control_flow_util.GetContainingXLAContext(ctxt) is not None
def dropout_with_broadcast_dims(x, keep_prob, broadcast_dims=None, **kwargs):
"""Like tf.nn.dropout but takes broadcast_dims instead of noise_shape.
Instead of specifying noise_shape, this function takes broadcast_dims -
a list of dimension numbers in which noise_shape should be 1. The random
keep/drop tensor has dimensionality 1 along these dimensions.
Args:
x: a floating point tensor.
keep_prob: A scalar Tensor with the same type as x.
The probability that each element is kept.
broadcast_dims: an optional list of integers
the dimensions along which to broadcast the keep/drop flags.
**kwargs: keyword arguments to tf.nn.dropout other than "noise_shape".
Returns:
Tensor of the same shape as x.
"""
assert "noise_shape" not in kwargs
if broadcast_dims:
shape = tf.shape(x)
ndims = len(x.get_shape())
# Allow dimensions like "-1" as well.
broadcast_dims = [dim + ndims if dim < 0 else dim for dim in broadcast_dims]
kwargs["noise_shape"] = [
1 if i in broadcast_dims else shape[i] for i in range(ndims)
]
return tf.nn.dropout(x, keep_prob, **kwargs)
def comma_separated_string_to_integer_list(s):
return [int(i) for i in s.split(",") if i]
def saturating_sigmoid(x):
"""Saturating sigmoid: 1.2 * sigmoid(x) - 0.1 cut to [0, 1]."""
with tf.name_scope("saturating_sigmoid", values=[x]):
y = tf.sigmoid(x)
return tf.minimum(1.0, tf.maximum(0.0, 1.2 * y - 0.1))
def hard_sigmoid(x, saturation_limit=0.9):
saturation_cost = tf.reduce_mean(tf.nn.relu(tf.abs(x) - saturation_limit))
x_shifted = 0.5 * x + 0.5
return tf.minimum(1.0, tf.nn.relu(x_shifted)), saturation_cost
def hard_tanh(x, saturation_limit=0.9):
saturation_cost = tf.reduce_mean(tf.nn.relu(tf.abs(x) - saturation_limit))
return tf.minimum(1.0, tf.maximum(x, -1.0)), saturation_cost
def inverse_exp_decay(max_step, min_value=0.01, step=None):
"""Inverse-decay exponentially from 0.01 to 1.0 reached at max_step."""
inv_base = tf.exp(tf.log(min_value) / float(max_step))
if step is None:
step = tf.train.get_global_step()
if step is None:
return 1.0
step = tf.to_float(step)
return inv_base**tf.maximum(float(max_step) - step, 0.0)
def inverse_lin_decay(max_step, min_value=0.01, step=None):
"""Inverse-decay linearly from 0.01 to 1.0 reached at max_step."""
if step is None:
step = tf.train.get_global_step()
if step is None:
return 1.0
step = tf.to_float(step)
progress = tf.minimum(step / float(max_step), 1.0)
return progress * (1.0 - min_value) + min_value
def shakeshake2_py(x, y, equal=False, individual=False):
"""The shake-shake sum of 2 tensors, python version."""
if equal:
alpha = 0.5
elif individual:
alpha = tf.random_uniform(tf.get_shape(x)[:1])
else:
alpha = tf.random_uniform([])
return alpha * x + (1.0 - alpha) * y
@function.Defun()
def shakeshake2_grad(x1, x2, dy):
"""Overriding gradient for shake-shake of 2 tensors."""
y = shakeshake2_py(x1, x2)
dx = tf.gradients(ys=[y], xs=[x1, x2], grad_ys=[dy])
return dx
@function.Defun()
def shakeshake2_indiv_grad(x1, x2, dy):
"""Overriding gradient for shake-shake of 2 tensors."""
y = shakeshake2_py(x1, x2, individual=True)
dx = tf.gradients(ys=[y], xs=[x1, x2], grad_ys=[dy])
return dx
@function.Defun()
def shakeshake2_equal_grad(x1, x2, dy):
"""Overriding gradient for shake-shake of 2 tensors."""
y = shakeshake2_py(x1, x2, equal=True)
dx = tf.gradients(ys=[y], xs=[x1, x2], grad_ys=[dy])
return dx
@function.Defun(grad_func=shakeshake2_grad)
def shakeshake2(x1, x2):
"""The shake-shake function with a different alpha for forward/backward."""
return shakeshake2_py(x1, x2)
@function.Defun(grad_func=shakeshake2_indiv_grad)
def shakeshake2_indiv(x1, x2):
return shakeshake2_py(x1, x2, individual=True)
@function.Defun(grad_func=shakeshake2_equal_grad)
def shakeshake2_eqgrad(x1, x2):
"""The shake-shake function with a different alpha for forward/backward."""
return shakeshake2_py(x1, x2)
def shakeshake(xs, equal_grad=False):
"""Multi-argument shake-shake, currently approximated by sums of 2."""
if len(xs) == 1:
return xs[0]
div = (len(xs) + 1) // 2
arg1 = shakeshake(xs[:div], equal_grad=equal_grad)
arg2 = shakeshake(xs[div:], equal_grad=equal_grad)
if equal_grad:
return shakeshake2_eqgrad(arg1, arg2)
return shakeshake2(arg1, arg2)
def convert_rgb_to_real(x):
"""Conversion of pixel values to real numbers."""
with tf.name_scope("rgb_to_real", values=[x]):
x = tf.to_float(x)
x /= 255.0
return x
def convert_rgb_to_symmetric_real(x):
"""Conversion of pixel values to real numbers."""
with tf.name_scope("rgb_to_real", values=[x]):
x = tf.to_float(x)
# Convert each pixel intensity in [0, 1, 2, ..., 255] into a real number in
# the range [-1, 1].
x = (x / 127.5) - 1
return x
def convert_real_to_rgb(x):
"""Conversion of real numbers to pixel values."""
with tf.name_scope("real_to_rgb", values=[x]):
x *= 255.0
return x
def expand_squeeze_to_nd(x, n, squeeze_dim=2, expand_dim=-1):
"""Make x n-d with squeeze and expand_dims."""
if len(x.shape) > n:
while len(x.shape) != n:
x = tf.squeeze(x, [squeeze_dim])
else:
while len(x.shape) != n:
x = tf.expand_dims(x, expand_dim)
return x
def standardize_images(x):
"""Image standardization on batches and videos."""
with tf.name_scope("standardize_images", [x]):
x_shape = shape_list(x)
x = tf.to_float(tf.reshape(x, [-1] + x_shape[-3:]))
x_mean = tf.reduce_mean(x, axis=[1, 2], keepdims=True)
x_variance = tf.reduce_mean(
tf.square(x - x_mean), axis=[1, 2], keepdims=True)
num_pixels = tf.to_float(x_shape[-2] * x_shape[-3])
x = (x - x_mean) / tf.maximum(tf.sqrt(x_variance), tf.rsqrt(num_pixels))
return tf.reshape(x, x_shape)
def flatten4d3d(x):
"""Flatten a 4d-tensor into a 3d-tensor by joining width and height."""
xshape = shape_list(x)
result = tf.reshape(x, [xshape[0], xshape[1] * xshape[2], xshape[3]])
return result
# TODO(noam): remove this function after TPUs do gather faster.
def gather(params, indices, dtype=tf.float32):
"""Version of tf.gather that works faster on tpu."""
if not is_xla_compiled():
return tf.gather(params, indices)
vocab_size = params.get_shape().as_list()[0]
indices_flat = tf.reshape(indices, [-1])
out = tf.matmul(tf.one_hot(indices_flat, vocab_size, dtype=dtype), params)
out = reshape_like(out, tf.expand_dims(indices, -1))
return out
# TODO(noam): remove this function after TPUs do cumsum faster.
def cumsum(x, axis=0, exclusive=False):
"""TPU hack for tf.cumsum.
This is equivalent to tf.cumsum and is faster on TPU as of 04/2018 unless
the axis dimension is very large.
Args:
x: a Tensor
axis: an integer
exclusive: a boolean
Returns:
Tensor of the same shape as x.
"""
if not is_xla_compiled():
return tf.cumsum(x, axis=axis, exclusive=exclusive)
x_shape = shape_list(x)
rank = len(x_shape)
length = x_shape[axis]
my_range = tf.range(length)
comparator = tf.less if exclusive else tf.less_equal
mask = tf.cast(
comparator(tf.expand_dims(my_range, 1), tf.expand_dims(my_range, 0)),
x.dtype)
ret = tf.tensordot(x, mask, axes=[[axis], [0]])
if axis != rank - 1:
ret = tf.transpose(
ret,
list(range(axis)) + [rank - 1] + list(range(axis, rank - 1)))
return ret
def dropout_no_scaling(x, keep_prob):
"""Like tf.nn.dropout, but does not scale up. Works on integers also.
Args:
x: a Tensor
keep_prob: a floating point number
Returns:
Tensor of the same shape as x.
"""
if keep_prob == 1.0:
return x
mask = tf.less(tf.random_uniform(tf.shape(x)), keep_prob)
return x * cast_like(mask, x)
def embedding(x,
vocab_size,
dense_size,
name=None,
reuse=None,
multiplier=1.0,
symbol_dropout_rate=0.0,
embedding_var=None,
dtype=tf.float32):
"""Embed x of type int64 into dense vectors, reducing to max 4 dimensions."""
with tf.variable_scope(
name, default_name="embedding", values=[x], reuse=reuse, dtype=dtype):
if embedding_var is None:
embedding_var = tf.get_variable("kernel", [vocab_size, dense_size])
# On the backwards pass, we want to convert the gradient from
# an indexed-slices to a regular tensor before sending it back to the
# parameter server. This avoids excess computation on the parameter server.
if not tf.contrib.eager.in_eager_mode():
embedding_var = convert_gradient_to_tensor(embedding_var)
x = dropout_no_scaling(x, 1.0 - symbol_dropout_rate)
emb_x = gather(embedding_var, x, dtype)
if multiplier != 1.0:
emb_x *= multiplier
static_shape = emb_x.shape.as_list()
if len(static_shape) < 5:
return emb_x
assert len(static_shape) == 5
# If we had an extra channel dimension, assume it's 1, i.e. shape[3] == 1.
return tf.squeeze(emb_x, 3)
def shift_right(x, pad_value=None):
"""Shift the second dimension of x right by one."""
if pad_value is None:
shifted_targets = tf.pad(x, [[0, 0], [1, 0], [0, 0], [0, 0]])[:, :-1, :, :]
else:
shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1, :, :]
return shifted_targets
def shift_right_3d(x, pad_value=None):
"""Shift the second dimension of x right by one."""
if pad_value is None:
shifted_targets = tf.pad(x, [[0, 0], [1, 0], [0, 0]])[:, :-1, :]
else:
shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1, :]
return shifted_targets
def shift_right_2d(x, pad_value=None):
"""Shift the second dimension of x right by one."""
if pad_value is None:
shifted_targets = tf.pad(x, [[0, 0], [1, 0]])[:, :-1]
else:
shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1]
return shifted_targets
def conv_stride2_multistep(x, nbr_steps, output_filters, name=None, reuse=None):
"""Use a strided convolution to downsample x by 2, `nbr_steps` times.
We use stride and filter size 2 to avoid the checkerboard problem of deconvs.
As detailed in http://distill.pub/2016/deconv-checkerboard/.
Args:
x: a `Tensor` with shape `[batch, spatial, depth]` or
`[batch, spatial_1, spatial_2, depth]`
nbr_steps: number of halving downsample rounds to apply
output_filters: an int specifying the filter count for the convolutions
name: a string
reuse: a boolean
Returns:
a `Tensor` with shape `[batch, spatial / (2**nbr_steps), output_filters]` or
`[batch, spatial_1 / (2**nbr_steps), spatial_2 / (2**nbr_steps),
output_filters]`
"""
with tf.variable_scope(
name, default_name="conv_stride2_multistep", values=[x], reuse=reuse):
if nbr_steps == 0:
out = conv(x, output_filters, (1, 1))
return out, [out]
hidden_layers = [x]
for i in range(nbr_steps):
hidden_layers.append(
conv(
hidden_layers[-1],
output_filters, (2, 2),
strides=2,
activation=tf.nn.relu,
name="conv" + str(i)))
return hidden_layers[-1], hidden_layers
def deconv_stride2_multistep(x,
nbr_steps,
output_filters,
name=None,
reuse=None):
"""Use a deconvolution to upsample x by 2**`nbr_steps`.
Args:
x: a `Tensor` with shape `[batch, spatial, depth]` or
`[batch, spatial_1, spatial_2, depth]`
nbr_steps: an int specifying the number of doubling upsample rounds to
apply.
output_filters: an int specifying the filter count for the deconvolutions
name: a string
reuse: a boolean
Returns:
a `Tensor` with shape `[batch, spatial * (2**nbr_steps), output_filters]` or
`[batch, spatial_1 * (2**nbr_steps), spatial_2 * (2**nbr_steps),
output_filters]`
"""
with tf.variable_scope(
name, default_name="deconv_stride2_multistep", values=[x], reuse=reuse):
def deconv1d(cur, i):
cur_shape = shape_list(cur)
thicker = conv(
cur,
output_filters * 2, (1, 1),
padding="SAME",
activation=tf.nn.relu,
name="deconv1d" + str(i))
return tf.reshape(thicker,
[cur_shape[0], cur_shape[1] * 2, 1, output_filters])
def deconv2d(cur, i):
thicker = conv(
cur,
output_filters * 4, (1, 1),
padding="SAME",
activation=tf.nn.relu,
name="deconv2d" + str(i))
return tf.depth_to_space(thicker, 2)
cur = x
for i in range(nbr_steps):
if cur.get_shape()[2] == 1:
cur = deconv1d(cur, i)
else:
cur_dim = shape_list(cur)[2]
if isinstance(cur_dim, int):
if cur_dim == 1:
cur = deconv1d(cur, i)
else:
cur = deconv2d(cur, i)
else:
cur = tf.cond(
tf.equal(cur_dim, 1),
lambda idx=i: deconv1d(cur, idx),
lambda idx=i: deconv2d(cur, idx))
return cur
def conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs):
"""Conditional conv_fn making kernel 1d or 2d depending on inputs shape."""
static_shape = inputs.get_shape()
if not static_shape or len(static_shape) != 4:
raise ValueError("Inputs to conv must have statically known rank 4. "
"Shape: " + str(static_shape))
# Add support for left padding.
if kwargs.get("padding") == "LEFT":
dilation_rate = (1, 1)
if "dilation_rate" in kwargs:
dilation_rate = kwargs["dilation_rate"]
assert kernel_size[0] % 2 == 1 and kernel_size[1] % 2 == 1
height_padding = 2 * (kernel_size[0] // 2) * dilation_rate[0]
cond_padding = tf.cond(
tf.equal(shape_list(inputs)[2], 1), lambda: tf.constant(0),
lambda: tf.constant(2 * (kernel_size[1] // 2) * dilation_rate[1]))
width_padding = 0 if static_shape[2] == 1 else cond_padding
padding = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]]
inputs = tf.pad(inputs, padding)
# Set middle two dimensions to None to prevent convolution from complaining
inputs.set_shape([static_shape[0], None, None, static_shape[3]])
kwargs["padding"] = "VALID"
def conv2d_kernel(kernel_size_arg, name_suffix):
"""Call conv2d but add suffix to name."""
name = "{}_{}".format(kwargs.get("name", "conv"), name_suffix)
original_name = kwargs.pop("name", None)
original_force2d = kwargs.pop("force2d", None)
result = conv_fn(inputs, filters, kernel_size_arg, name=name, **kwargs)
if original_name is not None:
kwargs["name"] = original_name # Restore for other calls.
if original_force2d is not None:
kwargs["force2d"] = original_force2d
return result
return conv2d_kernel(kernel_size, "single")
def conv(inputs, filters, kernel_size, dilation_rate=(1, 1), **kwargs):
return conv_internal(
tf.layers.conv2d,
inputs,
filters,
kernel_size,
dilation_rate=dilation_rate,
**kwargs)
def conv1d(inputs, filters, kernel_size, dilation_rate=1, **kwargs):
return tf.squeeze(
conv(
tf.expand_dims(inputs, 2),
filters, (kernel_size, 1),
dilation_rate=(dilation_rate, 1),
**kwargs), 2)
def separable_conv(inputs, filters, kernel_size, **kwargs):
return conv_internal(tf.layers.separable_conv2d, inputs, filters, kernel_size,
**kwargs)
def subseparable_conv(inputs, filters, kernel_size, **kwargs):
"""Sub-separable convolution. If separability == 0 it's a separable_conv."""
def conv_fn(inputs, filters, kernel_size, **kwargs):
"""Sub-separable convolution, splits into separability-many blocks."""
separability = None
if "separability" in kwargs:
separability = kwargs.pop("separability")
if separability:
parts = []
abs_sep = separability if separability > 0 else -1 * separability
for split_idx, split in enumerate(tf.split(inputs, abs_sep, axis=3)):
with tf.variable_scope("part_%d" % split_idx):
if separability > 0:
parts.append(
tf.layers.conv2d(split, filters // separability, kernel_size,
**kwargs))
else:
parts.append(
tf.layers.separable_conv2d(split, filters // abs_sep,
kernel_size, **kwargs))
if separability > 1:
result = tf.layers.conv2d(tf.concat(parts, axis=3), filters, (1, 1))
elif abs_sep == 1: # If we have just one block, return it.
assert len(parts) == 1
result = parts[0]
else:
result = tf.concat(parts, axis=3)
else:
result = tf.layers.separable_conv2d(inputs, filters, kernel_size,
**kwargs)
if separability is not None:
kwargs["separability"] = separability
return result
return conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs)
def tpu_conv1d(inputs, filters, kernel_size, padding="SAME", name="tpu_conv1d"):
"""Version of conv1d that works on TPU (as of 11/2017).
Args:
inputs: a Tensor with shape [batch, length, input_depth].
filters: an integer.
kernel_size: an integer.
padding: a string - "SAME" or "LEFT".
name: a string.
Returns:
a Tensor with shape [batch, length, filters].
"""
if kernel_size == 1:
return dense(inputs, filters, name=name, use_bias=True)
if padding == "SAME":
assert kernel_size % 2 == 1
first_offset = -((kernel_size - 1) // 2)
else:
assert padding == "LEFT"
first_offset = -(kernel_size - 1)
last_offset = first_offset + kernel_size - 1
results = []
padded = tf.pad(inputs, [[0, 0], [-first_offset, last_offset], [0, 0]])
for i in range(kernel_size):
shifted = tf.slice(padded, [0, i, 0], tf.shape(inputs)) if i else inputs
shifted.set_shape(inputs.get_shape())
results.append(
dense(shifted, filters, use_bias=(i == 0), name=name + "_%d" % i))
ret = tf.add_n(results)
ret *= kernel_size**-0.5
return ret
def layer_norm_vars(filters):
"""Create Variables for layer norm."""
scale = tf.get_variable(
"layer_norm_scale", [filters], initializer=tf.ones_initializer())
bias = tf.get_variable(
"layer_norm_bias", [filters], initializer=tf.zeros_initializer())
return scale, bias
def layer_norm_compute(x, epsilon, scale, bias):
"""Layer norm raw computation."""
epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]
mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
variance = tf.reduce_mean(tf.square(x - mean), axis=[-1], keepdims=True)
norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
return norm_x * scale + bias
def layer_norm(x, filters=None, epsilon=1e-6, name=None, reuse=None):
"""Layer normalize the tensor x, averaging over the last dimension."""
if filters is None:
filters = shape_list(x)[-1]
with tf.variable_scope(
name, default_name="layer_norm", values=[x], reuse=reuse):
scale, bias = layer_norm_vars(filters)
return layer_norm_compute(x, epsilon, scale, bias)
def group_norm(x, filters=None, num_groups=8, epsilon=1e-5):
"""Group normalization as in https://arxiv.org/abs/1803.08494."""
x_shape = shape_list(x)
if filters is None:
filters = x_shape[-1]
assert len(x_shape) == 4
assert filters % num_groups == 0
# Prepare variables.
scale = tf.get_variable(
"group_norm_scale", [filters], initializer=tf.ones_initializer())
bias = tf.get_variable(
"group_norm_bias", [filters], initializer=tf.zeros_initializer())
epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]
# Reshape and compute group norm.
x = tf.reshape(x, x_shape[:-1] + [num_groups, filters // num_groups])
# Calculate mean and variance on heights, width, channels (not groups).
mean, variance = tf.nn.moments(x, [1, 2, 4], keep_dims=True)
norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
return tf.reshape(norm_x, x_shape) * scale + bias
def noam_norm(x, epsilon=1.0, name=None):
"""One version of layer normalization."""
with tf.name_scope(name, default_name="noam_norm", values=[x]):
shape = x.get_shape()
ndims = len(shape)
return (tf.nn.l2_normalize(x, ndims - 1, epsilon=epsilon) * tf.sqrt(
tf.to_float(shape[-1])))
def l2_norm(x, filters=None, epsilon=1e-6, name=None, reuse=None):
"""Layer normalization with l2 norm."""
if filters is None:
filters = shape_list(x)[-1]
with tf.variable_scope(name, default_name="l2_norm", values=[x], reuse=reuse):
scale = tf.get_variable(
"l2_norm_scale", [filters], initializer=tf.ones_initializer())
bias = tf.get_variable(
"l2_norm_bias", [filters], initializer=tf.zeros_initializer())
epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]
mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
l2norm = tf.reduce_sum(tf.square(x - mean), axis=[-1], keepdims=True)
norm_x = (x - mean) * tf.rsqrt(l2norm + epsilon)
return norm_x * scale + bias
def apply_spectral_norm(x):
"""Normalizes x using the spectral norm.
The implementation follows Algorithm 1 of
https://arxiv.org/abs/1802.05957. If x is not a 2-D Tensor, then it is
reshaped such that the number of channels (last-dimension) is the same.
Args:
x: Tensor with the last dimension equal to the number of filters.
Returns:
x: Tensor with the same shape as x normalized by the spectral norm.
assign_op: Op to be run after every step to update the vector "u".
"""
weights_shape = shape_list(x)
other, num_filters = tf.reduce_prod(weights_shape[:-1]), weights_shape[-1]
# Reshape into a 2-D matrix with outer size num_filters.
weights_2d = tf.reshape(x, (other, num_filters))
# v = Wu / ||W u||
with tf.variable_scope("u", reuse=tf.AUTO_REUSE):
u = tf.get_variable(
"u", [num_filters, 1],
initializer=tf.truncated_normal_initializer(),
trainable=False)
v = tf.nn.l2_normalize(tf.matmul(weights_2d, u))
# u_new = vW / ||v W||
u_new = tf.nn.l2_normalize(tf.matmul(tf.transpose(v), weights_2d))
# s = v*W*u
spectral_norm = tf.squeeze(
tf.matmul(tf.transpose(v), tf.matmul(weights_2d, tf.transpose(u_new))))
# set u equal to u_new in the next iteration.
assign_op = tf.assign(u, tf.transpose(u_new))
return tf.divide(x, spectral_norm), assign_op
def apply_norm(x, norm_type, depth, epsilon):
"""Apply Normalization."""
if norm_type == "layer":
return layer_norm(x, filters=depth, epsilon=epsilon)
if norm_type == "group":
return group_norm(x, filters=depth, epsilon=epsilon)
if norm_type == "batch":
return tf.layers.batch_normalization(x, epsilon=epsilon)
if norm_type == "noam":
return noam_norm(x, epsilon)
if norm_type == "l2":
return l2_norm(x, filters=depth, epsilon=epsilon)
if norm_type == "none":
return x
raise ValueError("Parameter normalizer_fn must be one of: 'layer', 'batch',"
"'noam', 'lr', 'none'.")
def zero_add(previous_value, x, name=None, reuse=None):
"""Resnet connection with zero initialization.
Another type of resnet connection which returns previous_value + gamma * x.
gamma is a trainable scalar and initialized with zero. It is useful when a
module is plugged into a trained model and we want to make sure it matches the
original model's performance.
Args:
previous_value: A tensor.
x: A tensor.
name: name of variable scope; defaults to zero_add.
reuse: reuse scope.
Returns:
previous_value + gamma * x.
"""
with tf.variable_scope(name, default_name="zero_add", reuse=reuse):
gamma = tf.get_variable("gamma", (), initializer=tf.zeros_initializer())
return previous_value + gamma * x
def layer_prepostprocess(previous_value,
x,
sequence,
dropout_rate,
norm_type,
depth,
epsilon,
default_name,
name=None,
dropout_broadcast_dims=None):
"""Apply a sequence of functions to the input or output of a layer.
The sequence is specified as a string which may contain the following
characters:
a: add previous_value
n: apply normalization
d: apply dropout
z: zero add
For example, if sequence=="dna", then the output is
previous_value + normalize(dropout(x))
Args:
previous_value: A Tensor, to be added as a residual connection ('a')
x: A Tensor to be transformed.
sequence: a string.
dropout_rate: a float
norm_type: a string (see apply_norm())
depth: an integer (size of last dimension of x).
epsilon: a float (parameter for normalization)
default_name: a string
name: a string
dropout_broadcast_dims: an optional list of integers less than 3
specifying in which dimensions to broadcast the dropout decisions.
saves memory.
Returns:
a Tensor
"""
with tf.variable_scope(name, default_name=default_name):
if sequence == "none":
return x
for c in sequence:
if c == "a":
x += previous_value
elif c == "z":
x = zero_add(previous_value, x)
elif c == "n":
x = apply_norm(x, norm_type, depth, epsilon)
else:
assert c == "d", ("Unknown sequence step %s" % c)
x = dropout_with_broadcast_dims(
x, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims)
return x
def layer_preprocess(layer_input, hparams):
"""Apply layer preprocessing.
See layer_prepostprocess() for details.
A hyperparameters object is passed for convenience. The hyperparameters
that may be used are:
layer_preprocess_sequence
layer_prepostprocess_dropout
norm_type
hidden_size
norm_epsilon
Args:
layer_input: a Tensor
hparams: a hyperparameters object.
Returns:
a Tensor
"""
assert "a" not in hparams.layer_preprocess_sequence, (
"No residual connections allowed in hparams.layer_preprocess_sequence")
assert "z" not in hparams.layer_preprocess_sequence, (
"No residual connections allowed in hparams.layer_preprocess_sequence")
return layer_prepostprocess(
None,
layer_input,
sequence=hparams.layer_preprocess_sequence,
dropout_rate=hparams.layer_prepostprocess_dropout,
norm_type=hparams.norm_type,
depth=None,
epsilon=hparams.norm_epsilon,
dropout_broadcast_dims=comma_separated_string_to_integer_list(
getattr(hparams, "layer_prepostprocess_dropout_broadcast_dims", "")),
default_name="layer_prepostprocess")
def layer_postprocess(layer_input, layer_output, hparams):
"""Apply layer postprocessing.
See layer_prepostprocess() for details.
A hyperparameters object is passed for convenience. The hyperparameters
that may be used are:
layer_postprocess_sequence
layer_prepostprocess_dropout
norm_type
hidden_size
norm_epsilon
Args:
layer_input: a Tensor
layer_output: a Tensor
hparams: a hyperparameters object.
Returns:
a Tensor
"""
return layer_prepostprocess(
layer_input,
layer_output,
sequence=hparams.layer_postprocess_sequence,
dropout_rate=hparams.layer_prepostprocess_dropout,
norm_type=hparams.norm_type,
depth=None,
epsilon=hparams.norm_epsilon,
dropout_broadcast_dims=comma_separated_string_to_integer_list(
getattr(hparams, "layer_prepostprocess_dropout_broadcast_dims", "")),
default_name="layer_postprocess")
def conv_block_internal(conv_fn,
inputs,
filters,
dilation_rates_and_kernel_sizes,
first_relu=True,
use_elu=False,
separabilities=None,
**kwargs):
"""A block of convolutions.
Args:
conv_fn: convolution function, e.g. conv or separable_conv.
inputs: a Tensor
filters: an Integer
dilation_rates_and_kernel_sizes: a list of tuples (dilation, (k_w, k_h))
first_relu: whether to do a relu at start (defaults to True)
use_elu: whether to use ELUs instead of ReLUs (defaults to False)
separabilities: list of separability factors (per-layer).
**kwargs: additional arguments (e.g., pooling)
Returns:
a Tensor.
"""
name = kwargs.pop("name") if "name" in kwargs else None
mask = kwargs.pop("mask") if "mask" in kwargs else None
# Usage for normalize_fn kwarg:
# if not specified, use layer norm
# if given normalize_fn=None, don't use any normalization
# if given normalize_fn=norm, use the specified norm function
use_layer_norm = "normalizer_fn" not in kwargs
norm = kwargs.pop("normalizer_fn", None)
use_normalizer_fn = use_layer_norm or norm
if use_layer_norm:
norm = lambda x, name: layer_norm(x, filters, name=name)
with tf.variable_scope(name, "conv_block", [inputs]):
cur, counter = inputs, -1
for dilation_rate, kernel_size in dilation_rates_and_kernel_sizes:
counter += 1
if first_relu or counter > 0:
cur = tf.nn.elu(cur) if use_elu else tf.nn.relu(cur)
if mask is not None:
cur *= mask
if separabilities:
cur = conv_fn(
cur,
filters,
kernel_size,
dilation_rate=dilation_rate,
name="conv_block_%d" % counter,
use_bias=norm is None,
separability=separabilities[counter],
**kwargs)
else:
cur = conv_fn(
cur,
filters,
kernel_size,
dilation_rate=dilation_rate,
name="conv_block_%d" % counter,
use_bias=norm is None,
**kwargs)
if use_normalizer_fn:
cur = norm(cur, name="conv_block_norm_%d" % counter)
return cur
def conv_block(inputs, filters, dilation_rates_and_kernel_sizes, **kwargs):
"""A block of standard 2d convolutions."""
return conv_block_internal(conv, inputs, filters,
dilation_rates_and_kernel_sizes, **kwargs)
def conv1d_block(inputs, filters, dilation_rates_and_kernel_sizes, **kwargs):
"""A block of standard 1d convolutions."""
return conv_block_internal(conv1d, inputs, filters,
dilation_rates_and_kernel_sizes, **kwargs)
def separable_conv_block(inputs, filters, dilation_rates_and_kernel_sizes,
**kwargs):
"""A block of separable convolutions."""
return conv_block_internal(separable_conv, inputs, filters,
dilation_rates_and_kernel_sizes, **kwargs)
def subseparable_conv_block(inputs, filters, dilation_rates_and_kernel_sizes,
**kwargs):
"""A block of separable convolutions."""
return conv_block_internal(subseparable_conv, inputs, filters,
dilation_rates_and_kernel_sizes, **kwargs)
def pool(inputs, window_size, pooling_type, padding, strides=(1, 1)):
"""Pooling (supports "LEFT")."""
with tf.name_scope("pool", values=[inputs]):
static_shape = inputs.get_shape()
if not static_shape or len(static_shape) != 4:
raise ValueError("Inputs to conv must have statically known rank 4.")
# Add support for left padding.
if padding == "LEFT":
assert window_size[0] % 2 == 1 and window_size[1] % 2 == 1
if len(static_shape) == 3:
width_padding = 2 * (window_size[1] // 2)
padding_ = [[0, 0], [width_padding, 0], [0, 0]]
else:
height_padding = 2 * (window_size[0] // 2)
cond_padding = tf.cond(
tf.equal(shape_list(inputs)[2], 1), lambda: tf.constant(0),
lambda: tf.constant(2 * (window_size[1] // 2)))
width_padding = 0 if static_shape[2] == 1 else cond_padding
padding_ = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]]
inputs = tf.pad(inputs, padding_)
inputs.set_shape([static_shape[0], None, None, static_shape[3]])
padding = "VALID"
return tf.nn.pool(inputs, window_size, pooling_type, padding, strides=strides)
def conv_block_downsample(x,
kernel,
strides,
padding,
separability=0,
name=None,
reuse=None):
"""Implements a downwards-striding conv block, like Xception exit flow."""
with tf.variable_scope(
name, default_name="conv_block_downsample", values=[x], reuse=reuse):
hidden_size = int(x.get_shape()[-1])
res = conv_block(
x,
int(1.25 * hidden_size), [((1, 1), kernel)],
padding=padding,
strides=strides,
name="res_conv")
x = subseparable_conv_block(
x,
hidden_size, [((1, 1), kernel)],
padding=padding,
separability=separability,
name="conv0")
x = subseparable_conv_block(
x,
int(1.25 * hidden_size), [((1, 1), kernel)],
padding=padding,
separability=separability,
name="conv1")
x = pool(x, kernel, "MAX", padding, strides=strides)
x += res
x = subseparable_conv_block(
x,
2 * hidden_size, [((1, 1), kernel)],
first_relu=False,
padding=padding,
separability=separability,
name="conv2")
x = subseparable_conv_block(
x,
int(2.5 * hidden_size), [((1, 1), kernel)],
padding=padding,
separability=separability,
name="conv3")
return x
def get_timing_signal(length,
min_timescale=1,
max_timescale=1e4,
num_timescales=16):
"""Create Tensor of sinusoids of different frequencies.
Args:
length: Length of the Tensor to create, i.e. Number of steps.
min_timescale: a float
max_timescale: a float
num_timescales: an int
Returns:
Tensor of shape (length, 2*num_timescales)
"""
positions = tf.to_float(tf.range(length))
log_timescale_increment = (
math.log(max_timescale / min_timescale) / (num_timescales - 1))
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0)
return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
def add_timing_signal(x, min_timescale=1, max_timescale=1e4, num_timescales=16):
"""Adds a bunch of sinusoids of different frequencies to a Tensor.
This allows attention to learn to use absolute and relative positions.
The timing signal should be added to some precursor of both the source
and the target of the attention.
The use of relative position is possible because sin(x+y) and cos(x+y) can be
expressed in terms of y, sin(x) and cos(x).
In particular, we use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. For each timescale, we
generate the two sinusoidal signals sin(timestep/timescale) and
cos(timestep/timescale). All of these sinusoids are concatenated in
the depth dimension, padded with zeros to be the same depth as the input,
and added into input.
Args:
x: a Tensor with shape [?, length, ?, depth]
min_timescale: a float
max_timescale: a float
num_timescales: an int <= depth/2
Returns:
a Tensor the same shape as x.
"""
length = shape_list(x)[1]
depth = shape_list(x)[3]
signal = get_timing_signal(length, min_timescale, max_timescale,
num_timescales)
padded_signal = tf.pad(signal, [[0, 0], [0, depth - 2 * num_timescales]])
return x + tf.reshape(padded_signal, [1, length, 1, depth])
def mask_from_embedding(emb):
"""Input embeddings -> padding mask.
We have hacked symbol_modality to return all-zero embeddings for padding.
Returns a mask with 0.0 in the padding positions and 1.0 elsewhere.
Args:
emb: a Tensor with shape [batch, width, height, depth].
Returns:
a 0.0/1.0 Tensor with shape [batch, width, height, 1].
"""
return weights_nonzero(tf.reduce_sum(tf.abs(emb), axis=3, keepdims=True))
def length_from_embedding(emb):
"""Compute the length of each sequence in the batch.
Args:
emb: a sequence embedding Tensor with shape [batch, max_time, 1, depth].
Returns:
a Tensor with shape [batch].
"""
return tf.cast(tf.reduce_sum(mask_from_embedding(emb), [1, 2, 3]), tf.int32)
def mask_leq(target_length, source_length):
"""A mask with 1.0 wherever source_pos <= target_pos and 0.0 elsewhere.
Args:
target_length: an integer
source_length: an integer
Returns:
a Tensor with shape [1, target_length, source_length]
"""
return ones_matrix_band_part(
target_length,
source_length,
-1,
0,
out_shape=[1, target_length, source_length])
def relu_density_logit(x, reduce_dims):
"""logit(density(x)).
Useful for histograms.
Args:
x: a Tensor, typically the output of tf.relu
reduce_dims: a list of dimensions
Returns:
a Tensor
"""
frac = tf.reduce_mean(tf.to_float(x > 0.0), reduce_dims)
scaled = tf.log(frac + math.exp(-10)) - tf.log((1.0 - frac) + math.exp(-10))
return scaled
def maybe_zero_out_padding(inputs, kernel_size, nonpadding_mask):
"""If necessary, zero out inputs to a conv for padding positions.
Args:
inputs: a Tensor with shape [batch, length, ...]
kernel_size: an integer or pair of integers
nonpadding_mask: a Tensor with shape [batch, length]
Returns:
Tensor of the same shape as inputs.
"""
if (kernel_size != 1 and kernel_size != (1, 1) and
nonpadding_mask is not None):
while nonpadding_mask.get_shape().ndims < inputs.get_shape().ndims:
nonpadding_mask = tf.expand_dims(nonpadding_mask, -1)
return inputs * nonpadding_mask
return inputs
def dense_relu_dense(inputs,
filter_size,
output_size,
output_activation=None,
dropout=0.0,
dropout_broadcast_dims=None,
name=None):
"""Hidden layer with RELU activation followed by linear projection."""
layer_name = "%s_{}" % name if name else "{}"
h = dense(
inputs,
filter_size,
use_bias=True,
activation=tf.nn.relu,
name=layer_name.format("conv1"))
if dropout != 0.0:
h = dropout_with_broadcast_dims(
h, 1.0 - dropout, broadcast_dims=dropout_broadcast_dims)
o = dense(
h,
output_size,
activation=output_activation,
use_bias=True,
name=layer_name.format("conv2"))
return o
def dense_dropconnect(inputs,
output_size,
dropconnect_dropout=0.0,
name="dense_dropconnect",
**kwargs):
"""Dense layer with dropconnect."""
if dropconnect_dropout != 0.0:
tf.logging.info("Applying dropconnect as the kernel regularization.")
kwargs["kernel_regularizer"] = partial(
tf.nn.dropout, keep_prob=1.0 - dropconnect_dropout)
return dense(inputs, output_size, use_bias=True, name=name, **kwargs)
def conv_relu_conv(inputs,
filter_size,
output_size,
first_kernel_size=3,
second_kernel_size=3,
padding="SAME",
nonpadding_mask=None,
dropout=0.0,
name=None,
cache=None,
decode_loop_step=None):
"""Hidden layer with RELU activation followed by linear projection.
Args:
inputs: A tensor.
filter_size: An integer.
output_size: An integer.
first_kernel_size: An integer.
second_kernel_size: An integer.
padding: A string.
nonpadding_mask: A tensor.
dropout: A float.
name: A string.
cache: A dict, containing Tensors which are the results of previous
attentions, used for fast decoding.
decode_loop_step: An integer, step number of the decoding loop.
Only used for inference on TPU. If it is not None, the function
will do inplace update for the cache instead of concatenating the
current result to the cache.
Returns:
A Tensor.
"""
with tf.variable_scope(name, "conv_relu_conv", [inputs]):
inputs = maybe_zero_out_padding(inputs, first_kernel_size, nonpadding_mask)
if cache:
if decode_loop_step is None:
inputs = cache["f"] = tf.concat([cache["f"], inputs], axis=1)
else:
# Inplace update is required for inference on TPU.
# Inplace_ops only supports inplace_update on the first dimension.
# The performance of current implementation is better than updating
# the tensor by adding the result of matmul(one_hot,
# update_in_current_step)
tmp_f = tf.transpose(cache["f"], perm=[1, 0, 2])
tmp_f = inplace_ops.alias_inplace_update(
tmp_f,
decode_loop_step * tf.shape(inputs)[1],
tf.transpose(inputs, perm=[1, 0, 2]))
inputs = cache["f"] = tf.transpose(tmp_f, perm=[1, 0, 2])
inputs = cache["f"] = inputs[:, -first_kernel_size:, :]
h = tpu_conv1d(
inputs, filter_size, first_kernel_size, padding=padding, name="conv1")
if cache:
h = h[:, -1:, :]
h = tf.nn.relu(h)
if dropout != 0.0:
h = tf.nn.dropout(h, 1.0 - dropout)
h = maybe_zero_out_padding(h, second_kernel_size, nonpadding_mask)
return tpu_conv1d(
h, output_size, second_kernel_size, padding=padding, name="conv2")
def sepconv_relu_sepconv(inputs,
filter_size,
output_size,
first_kernel_size=(1, 1),
second_kernel_size=(1, 1),
padding="LEFT",
nonpadding_mask=None,
dropout=0.0,
name=None):
"""Hidden layer with RELU activation followed by linear projection."""
with tf.variable_scope(name, "sepconv_relu_sepconv", [inputs]):
inputs = maybe_zero_out_padding(inputs, first_kernel_size, nonpadding_mask)
if inputs.get_shape().ndims == 3:
is_3d = True
inputs = tf.expand_dims(inputs, 2)
else:
is_3d = False
h = separable_conv(
inputs,
filter_size,
first_kernel_size,
activation=tf.nn.relu,
padding=padding,
name="conv1")
if dropout != 0.0:
h = tf.nn.dropout(h, 1.0 - dropout)
h = maybe_zero_out_padding(h, second_kernel_size, nonpadding_mask)
ret = separable_conv(
h, output_size, second_kernel_size, padding=padding, name="conv2")
if is_3d:
ret = tf.squeeze(ret, 2)
return ret
# DEPRECATED - use dense_relu_dense, conv_relu_conv, sepconv_relu_sepconv
def conv_hidden_relu(inputs,
hidden_size,
output_size,
kernel_size=(1, 1),
second_kernel_size=(1, 1),
dropout=0.0,
**kwargs):
"""Hidden layer with RELU activation followed by linear projection."""
name = kwargs.pop("name") if "name" in kwargs else None
with tf.variable_scope(name, "conv_hidden_relu", [inputs]):
if inputs.get_shape().ndims == 3:
is_3d = True
inputs = tf.expand_dims(inputs, 2)
else:
is_3d = False
conv_f1 = conv if kernel_size == (1, 1) else separable_conv
h = conv_f1(
inputs,
hidden_size,
kernel_size,
activation=tf.nn.relu,
name="conv1",
**kwargs)
if dropout != 0.0:
h = tf.nn.dropout(h, 1.0 - dropout)
conv_f2 = conv if second_kernel_size == (1, 1) else separable_conv
ret = conv_f2(h, output_size, second_kernel_size, name="conv2", **kwargs)
if is_3d:
ret = tf.squeeze(ret, 2)
return ret
def conv_gru(x,
kernel_size,
filters,
padding="SAME",
dilation_rate=(1, 1),
name=None,
reuse=None):
"""Convolutional GRU in 1 dimension."""
# Let's make a shorthand for conv call first.
def do_conv(args, name, bias_start, padding):
return conv(
args,
filters,
kernel_size,
padding=padding,
dilation_rate=dilation_rate,
bias_initializer=tf.constant_initializer(bias_start),
name=name)
# Here comes the GRU gate.
with tf.variable_scope(
name, default_name="conv_gru", values=[x], reuse=reuse):
reset = saturating_sigmoid(do_conv(x, "reset", 1.0, padding))
gate = saturating_sigmoid(do_conv(x, "gate", 1.0, padding))
candidate = tf.tanh(do_conv(reset * x, "candidate", 0.0, padding))
return gate * x + (1 - gate) * candidate
def gru_feedfwd(a_t, h_prev, filters, name=None):
"""position-wise Feed-fwd GRU gates following the MPNN.
Args:
a_t: Tensor of shape [batch, length, depth] of current input
h_prev: Tensor of shape [batch, length, depth] of prev input
filters: an integer specifying number of dimensions of the filters
name: A string
Returns:
h_t: [batch, length, filters] hidden state
"""
with tf.variable_scope(name, default_name="GRU", values=[a_t, h_prev]):
# we use right matrix multiplication to handle batches
# W_z and W_r have shape 2d, d. U_z U_r have shape d,d
z_t = (
tf.sigmoid(
tpu_conv1d(a_t, filters, 1, padding="SAME", name="W_z") +
tpu_conv1d(h_prev, filters, 1, padding="SAME", name="U_z")))
r_t = (
tf.sigmoid(
tpu_conv1d(a_t, filters, 1, padding="SAME", name="W_r") +
tpu_conv1d(h_prev, filters, 1, padding="SAME", name="U_r")))
h_tilde = (
tf.tanh(
tpu_conv1d(a_t, filters, 1, padding="SAME", name="W") +
tpu_conv1d(r_t * h_prev, filters, 1, padding="SAME", name="U")))
h_t = (1. - z_t) * h_prev + z_t * h_tilde
return h_t
def conv_lstm(x,
kernel_size,
filters,
padding="SAME",
dilation_rate=(1, 1),
name=None,
reuse=None):
"""Convolutional LSTM in 1 dimension."""
with tf.variable_scope(
name, default_name="conv_lstm", values=[x], reuse=reuse):
gates = conv(
x,
4 * filters,
kernel_size,
padding=padding,
dilation_rate=dilation_rate)
g = tf.split(layer_norm(gates, 4 * filters), 4, axis=3)
new_cell = tf.sigmoid(g[0]) * x + tf.sigmoid(g[1]) * tf.tanh(g[3])
return tf.sigmoid(g[2]) * tf.tanh(new_cell)
def diagonal_conv_gru(x,
kernel_size,
filters,
dropout=0.0,
name=None,
reuse=None):
"""Diagonal Convolutional GRU as in https://arxiv.org/abs/1702.08727."""
# Let's make a shorthand for conv call first.
def do_conv(args, name, bias_start):
return conv(
args,
filters,
kernel_size,
padding="SAME",
bias_initializer=tf.constant_initializer(bias_start),
name=name)
# Here comes the GRU gate.
with tf.variable_scope(
name, default_name="diagonal_conv_gru", values=[x], reuse=reuse):
reset, reset_cost = hard_sigmoid(do_conv(x, "reset", 0.5))
gate, gate_cost = hard_sigmoid(do_conv(x, "gate", 0.7))
candidate = tf.tanh(do_conv(reset * x, "candidate", 0.0))
if dropout > 0.0:
candidate = tf.nn.dropout(candidate, 1.0 - dropout)
# Diagonal shift.
shift_filters = filters // 3
base_filter = ([[0, 1, 0]] * (filters - 2 * shift_filters) +
[[1, 0, 0]] * shift_filters + [[0, 0, 1]] * shift_filters)
shift_filter = tf.constant(np.transpose(base_filter), dtype=tf.float32)
shift_filter = tf.expand_dims(tf.expand_dims(shift_filter, 0), 3)
x_shifted = tf.nn.depthwise_conv2d(
x, shift_filter, [1, 1, 1, 1], padding="SAME")
# Return the gated result and cost.
total_cost_avg = 0.5 * (reset_cost + gate_cost)
return gate * x_shifted + (1 - gate) * candidate, total_cost_avg
def pad_to_same_length(x, y, final_length_divisible_by=1, axis=1):
"""Pad tensors x and y on axis 1 so that they have the same length."""
if axis not in [1, 2]:
raise ValueError("Only axis=1 and axis=2 supported for now.")
with tf.name_scope("pad_to_same_length", values=[x, y]):
x_length = shape_list(x)[axis]
y_length = shape_list(y)[axis]
if (isinstance(x_length, int) and isinstance(y_length, int) and
x_length == y_length and final_length_divisible_by == 1):
return x, y
max_length = tf.maximum(x_length, y_length)
if final_length_divisible_by > 1:
# Find the nearest larger-or-equal integer divisible by given number.
max_length += final_length_divisible_by - 1
max_length //= final_length_divisible_by
max_length *= final_length_divisible_by
length_diff1 = max_length - x_length
length_diff2 = max_length - y_length
def padding_list(length_diff, arg):
if axis == 1:
return [[[0, 0], [0, length_diff]],
tf.zeros([tf.rank(arg) - 2, 2], dtype=tf.int32)]
return [[[0, 0], [0, 0], [0, length_diff]],
tf.zeros([tf.rank(arg) - 3, 2], dtype=tf.int32)]
paddings1 = tf.concat(padding_list(length_diff1, x), axis=0)
paddings2 = tf.concat(padding_list(length_diff2, y), axis=0)
res_x = tf.pad(x, paddings1)
res_y = tf.pad(y, paddings2)
# Static shapes are the same except for axis=1.
x_shape = x.shape.as_list()
x_shape[axis] = None
res_x.set_shape(x_shape)
y_shape = y.shape.as_list()
y_shape[axis] = None
res_y.set_shape(y_shape)
return res_x, res_y
def pad_with_zeros(logits, labels):
"""Pad labels on the length dimension to match logits length."""
with tf.name_scope("pad_with_zeros", values=[logits, labels]):
logits, labels = pad_to_same_length(logits, labels)
if len(labels.shape) == 3: # 2-d labels.
logits, labels = pad_to_same_length(logits, labels, axis=2)
return logits, labels
def weights_nonzero(labels):
"""Assign weight 1.0 to all labels except for padding (id=0)."""
return tf.to_float(tf.not_equal(labels, 0))
def weights_prepend_inputs_to_targets(labels):
"""Assign weight 1.0 to only the "targets" portion of the labels.
Weight 1.0 is assigned to all nonzero labels past the first zero.
See prepend_mode in common_hparams.py
Args:
labels: A Tensor of int32s.
Returns:
A Tensor of floats.
"""
past_first_zero = tf.cumsum(tf.to_float(tf.equal(labels, 0)), axis=1)
nonzero = tf.to_float(labels)
return tf.to_float(tf.not_equal(past_first_zero * nonzero, 0))
def weights_multi_problem(labels, taskid=-1):
"""Assign weight 1.0 to only the "targets" portion of the labels.
Weight 1.0 is assigned to all labels past the taskid.
Args:
labels: A Tensor of int32s.
taskid: an int32 representing the task id for a problem.
Returns:
A Tensor of floats.
Raises:
ValueError: The Task ID must be valid.
"""
past_taskid = tf.cumsum(tf.to_float(tf.equal(labels, taskid)), axis=1)
# Additionally zero out the task id location
past_taskid *= tf.to_float(tf.not_equal(labels, taskid))
non_taskid = tf.to_float(labels)
return tf.to_float(tf.not_equal(past_taskid * non_taskid, 0))
def weights_multi_problem_all(labels, taskid=-1):
"""Assign weight 1.0 to only examples from the given task."""
weights = tf.to_float(tf.not_equal(labels, 0))
if taskid < 0:
raise ValueError("Task ID must be non-negative.")
past_taskid = tf.cumsum(tf.to_float(tf.equal(labels, taskid)), axis=1)
# Additionally zero out the task id location
past_taskid *= tf.to_float(tf.not_equal(labels, taskid))
non_taskid = tf.to_float(labels)
example_mask = tf.to_float(tf.not_equal(past_taskid * non_taskid, 0))
example_mask = tf.reduce_sum(example_mask, axis=1)
example_mask = tf.to_float(
tf.greater(example_mask, tf.zeros_like(example_mask)))
return weights * tf.expand_dims(example_mask, axis=-1)
def weights_multi_problem_input(labels, taskid=-1):
"""Assign weight 1.0 to only the inputs for the given task."""
weights_all_tokens = weights_multi_problem_all(labels, taskid)
weights_target = weights_multi_problem(labels, taskid)
return weights_all_tokens - weights_target
def weights_all(labels):
"""Assign weight 1.0 to all labels."""
return tf.ones_like(labels, dtype=tf.float32)
def weights_concatenated(labels):
"""Assign weight 1.0 to the "target" part of the concatenated labels.
The labels look like:
source English I love you . ID1 target French Je t'aime . ID1 source
English the cat ID1 target French le chat ID1 source English ...
We want to assign weight 1.0 to all words in the target text (including the
ID1 end symbol), but not to the source text or the boilerplate. In the
above example, the target words that get positive weight are:
Je t'aime . ID1 le chat ID1
Args:
labels: a Tensor
Returns:
a Tensor
"""
eos_mask = tf.to_int32(tf.equal(labels, 1))
sentence_num = tf.cumsum(eos_mask, axis=1, exclusive=True)
in_target = tf.equal(tf.mod(sentence_num, 2), 1)
# first two tokens of each sentence are boilerplate.
sentence_num_plus_one = sentence_num + 1
shifted = tf.pad(sentence_num_plus_one,
[[0, 0], [2, 0], [0, 0], [0, 0]])[:, :-2, :, :]
nonboilerplate = tf.equal(sentence_num_plus_one, shifted)
ret = tf.to_float(tf.logical_and(nonboilerplate, in_target))
return ret
def padded_cross_entropy(logits,
labels,
label_smoothing,
weights_fn=weights_nonzero,
reduce_sum=True,
cutoff=0.0,
gaussian=False):
"""Compute cross-entropy assuming 0s are padding.
Computes a loss numerator (the sum of losses), and loss denominator
(the number of non-padding tokens).
Args:
logits: a `Tensor` with shape `[batch, timesteps, vocab_size]`.
optionally a FactoredTensor.
labels: an integer `Tensor` with shape `[batch, timesteps]`.
label_smoothing: a floating point `Scalar`.
weights_fn: A function from labels to weights.
reduce_sum: a Boolean, whether to sum at the end or not.
cutoff: a float, at which point to have no loss.
gaussian: If true, use a Gaussian distribution for label smoothing
Returns:
loss_numerator: a `Scalar`. Sum of losses.
loss_denominator: a `Scalar. The number of non-padding target tokens.
Raises:
ValueError: in case of unsupported argument types.
"""
if isinstance(logits, FactoredTensor):
if gaussian:
raise ValueError("Factored padded cross entropy with Gaussian smoothing "
"is not implemented yet.")
return padded_cross_entropy_factored(
logits,
labels,
label_smoothing,
weights_fn=weights_fn,
reduce_sum=reduce_sum)
confidence = 1.0 - label_smoothing
logits_shape = shape_list(logits)
vocab_size = logits_shape[-1]
with tf.name_scope("padded_cross_entropy", values=[logits, labels]):
if len(logits_shape) == 2:
# Deal with the case where we did not insert extra dimensions due to
# TPU issues. No pad-to-same-length happens in this case.
# TODO(noam): remove this logic once TPU can handle extra dimensions.
labels = tf.reshape(labels, [-1])
else:
logits, labels = pad_with_zeros(logits, labels)
logits = tf.reshape(
logits,
shape_list(labels) + [vocab_size],
name="padded_cross_entropy_size_check")
logits = tf.cast(logits, tf.float32)
xent = smoothing_cross_entropy(
logits, labels, vocab_size, confidence, gaussian=gaussian)
weights = weights_fn(labels)
if cutoff > 0.0:
xent = tf.nn.relu(xent - cutoff)
if not reduce_sum:
return xent * weights, weights
return tf.reduce_sum(xent * weights), tf.reduce_sum(weights)
def _weights_one_third(labels):
"""Returns Tensor of shape [batch, height, width]. Each element is 1/3."""
return tf.ones(tf.shape(labels)[:-1]) / 3.
def dml_loss(pred, labels, weights_fn=_weights_one_third, reduce_sum=True):
"""Discretized mixture of logistics loss.
Args:
pred: A [batch, height, width, num_mixtures*10] tensor of floats
comprising one unconstrained mixture probability, three means
(one per channel), three standard deviations (one per channel),
and three coefficients which linearly parameterize dependence across
channels.
labels: A [batch, height, width, channels] tensor of 8-bit pixel
intensities. The computation assumes channels is 3.
weights_fn: A function of labels, returning a Tensor of shape
[batch, height, width] which weights each loss term. Default is to scale
each loss term by 1/3 so that they capture the average across channels.
reduce_sum: A boolean, to return scalar loss instead of per position.
Returns:
Tuple of loss tensors for numerator and denominator, each a scalar if
reduce_sum else of shape [batch, height, width]. The sum of their divisions
is the number of nats for each pixel in labels.
"""
real_labels = convert_rgb_to_symmetric_real(labels)
dml_loss_value = discretized_mix_logistic_loss(pred=pred, labels=real_labels)
weights = weights_fn(labels)
loss_num = weights * dml_loss_value
loss_den = weights_nonzero(weights)
if reduce_sum:
loss_num = tf.reduce_sum(loss_num)
loss_den = tf.reduce_sum(loss_den)
return loss_num, loss_den
def split_to_discretized_mix_logistic_params(inputs):
"""Splits input tensor into parameters of discretized mixture logistic.
Args:
inputs: A [batch, height, width, num_mixtures*10] tensor of floats
comprising one unconstrained mixture probability, three means
(one per channel), three standard deviations (one per channel),
and three coefficients which linearly parameterize dependence across
channels.
Returns:
Tuple of unconstrained mixture probabilities, locations, scales, and
coefficient parameters of the distribution. The mixture probability has
shape [batch, height, width, num_mixtures]. Other parameters have shape
[batch, height, width, num_mixtures, 3].
"""
batch, height, width, output_dim = shape_list(inputs)
num_mixtures = output_dim // 10
logits, locs, log_scales, coeffs = tf.split(
inputs,
num_or_size_splits=[
num_mixtures, num_mixtures * 3, num_mixtures * 3, num_mixtures * 3
],
axis=-1)
split_shape = [batch, height, width, num_mixtures, 3]
locs = tf.reshape(locs, split_shape)
log_scales = tf.reshape(log_scales, split_shape)
log_scales = tf.maximum(log_scales, -7.)
coeffs = tf.reshape(coeffs, split_shape)
coeffs = tf.tanh(coeffs)
return logits, locs, log_scales, coeffs
def discretized_mix_logistic_loss(pred, labels):
"""Computes negative log probability for the discretized mixture of logistics.
The distribution of a whole pixel is a mixture of 3-dimensional discretized
logistic distributions. The 3-D discretized logistic factorizes as 3 1-D
discretized logistic distributions, one for each channel. It defines
```none
P(X = x)
= sum_{k=1}^K probs[k] * P(X = x | locs[k], scales[k])
= sum_{k=1}^K probs[k] * [
prod_{c=1}^3 DiscretizedLogistic(X[c] = x[c] | means[k][c], scales[k]) ]
```
The means tensor is a linear combination of location parameters and previous
channels. The discretized logistic distribution assigns probability mass to an
event P(X=x) via logistic CDFs: P(X <= x + 0.5) - P(X > x - 0.5) for 1 < x <
254; P(X <= 0.5) for x = 0; and 1 - P(X > 245.5) for x = 255. Instead of
8-bit inputs, this implementation assumes the events are rescaled to [-1, 1].
Args:
pred: A [batch, height, width, num_mixtures*10] tensor of floats
comprising one unconstrained mixture probability, three means
(one per channel), three standard deviations (one per channel),
and three coefficients which linearly parameterize dependence across
channels.
labels: A [batch, height, width, channels] tensor of true pixel intensities
rescaled to [-1, 1]. The computation assumes channels is 3.
Returns:
A [batch, height, width] tensor of the negative log conditional probability
of each pixel given all previous pixels.
"""
logits, locs, log_scales, coeffs = split_to_discretized_mix_logistic_params(
pred)
# Tile labels to broadcast compute across the mixture dimension.
batch, height, width, num_mixtures = shape_list(logits)
labels = tf.tile(
tf.reshape(labels, [batch, height, width, 1, 3]),
[1, 1, 1, num_mixtures, 1])
# p(x) = sigmoid((x - means_i + 1/255.)/scale_i) -
# sigmoid((x - means_i - 1/255.)/scale_i)
# for each channel i. The means are linearly parameterized.
means_0 = locs[..., 0]
means_1 = locs[..., 1] + coeffs[..., 0] * labels[..., 0]
means_2 = (
locs[..., 2] + coeffs[..., 1] * labels[..., 0] +
coeffs[..., 2] * labels[..., 1])
means = tf.stack([means_0, means_1, means_2], axis=-1)
centered_labels = labels - means
inv_stdv = tf.exp(-log_scales)
plus_in = inv_stdv * (centered_labels + 1. / 255.)
min_in = inv_stdv * (centered_labels - 1. / 255.)
cdf_plus = tf.nn.sigmoid(plus_in)
cdf_min = tf.nn.sigmoid(min_in)
# Compute log probability for edge case of 0 (before scaling), 255 (before
# scaling), and all other cases respectively.
log_prob_0 = plus_in - tf.nn.softplus(plus_in)
log_prob_255 = -tf.nn.softplus(min_in)
prob_event = tf.maximum(cdf_plus - cdf_min, 1e-12)
log_prob_event = tf.log(prob_event)
# Robustly select log-prob based on numerical edge-cases: (a) [-1, -1+eps);
# (b) (1-eps, 1]; (c) NaNs during `tf.gradients` of `tf.select`, which may
# cause `tf.log(0.)`; (d) p(x) < 1e-5.
mid_in = inv_stdv * centered_labels
log_prob_event_approx = (
mid_in - log_scales - 2. * tf.nn.softplus(mid_in) - np.log(127.5))
log_probs = tf.where(
labels < -0.999, log_prob_0,
tf.where(
labels > 0.999, log_prob_255,
tf.where(prob_event > 1e-5, log_prob_event, log_prob_event_approx)))
# Sum over channels and compute log-probability of each mixture.
log_probs = tf.reduce_sum(log_probs, -1) + tf.nn.log_softmax(logits, axis=-1)
output = -tf.reduce_logsumexp(log_probs, axis=-1)
return output
def sample_from_discretized_mix_logistic(pred, seed=None):
"""Sampling from a discretized mixture of logistics.
Args:
pred: A [batch, height, width, num_mixtures*10] tensor of floats
comprising one unconstrained mixture probability, three means
(one per channel), three standard deviations (one per channel),
and three coefficients which linearly parameterize dependence across
channels.
seed: Random seed.
Returns:
A tensor of shape [batch, height, width, 3] with real intensities scaled
between -1 and 1.
"""
logits, locs, log_scales, coeffs = split_to_discretized_mix_logistic_params(
pred)
# Sample mixture indicator given logits using the gumbel max trick.
num_mixtures = shape_list(logits)[-1]
gumbel_noise = -tf.log(-tf.log(
tf.random_uniform(
tf.shape(logits), minval=1e-5, maxval=1. - 1e-5, seed=seed)))
sel = tf.one_hot(
tf.argmax(logits + gumbel_noise, -1),
depth=num_mixtures,
dtype=tf.float32)
# Select mixture component's parameters.
sel = tf.expand_dims(sel, -1)
locs = tf.reduce_sum(locs * sel, 3)
log_scales = tf.reduce_sum(log_scales * sel, 3)
coeffs = tf.reduce_sum(coeffs * sel, 3)
# Sample from 3-D logistic & clip to interval. Note we don't round to the
# nearest 8-bit value when sampling.
uniform_noise = tf.random_uniform(
tf.shape(locs), minval=1e-5, maxval=1. - 1e-5, seed=seed)
logistic_noise = tf.log(uniform_noise) - tf.log(1. - uniform_noise)
x = locs + tf.exp(log_scales) * logistic_noise
x0 = x[..., 0]
x1 = x[..., 1] + coeffs[..., 0] * x0
x2 = x[..., 2] + coeffs[..., 1] * x0 + coeffs[..., 2] * x1
x = tf.stack([x0, x1, x2], axis=-1)
x = tf.clip_by_value(x, -1., 1.)
return x
def smoothing_cross_entropy(logits,
labels,
vocab_size,
confidence,
gaussian=False):
"""Cross entropy with label smoothing to limit over-confidence.
Args:
logits: Tensor of shape [batch_size, ?, ?, ?, vocab_size].
labels: Tensor of shape [batch_size, ?, ?, ?].
vocab_size: Tensor representing the size of the vocabulary.
confidence: Used to determine on and off values for label smoothing.
If `gaussian` is true, `confidence` is the variance to the Gaussian
distribution.
gaussian: Uses a Gaussian distribution for label smoothing
Returns:
Tensor of shape [batch_size, ?, ?, ?].
"""
with tf.name_scope("smoothing_cross_entropy", values=[logits, labels]):
# Low confidence is given to all non-true labels, uniformly.
low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
# Normalizing constant is the best cross-entropy value with soft targets.
# We subtract it just for readability, makes no difference on learning.
normalizing = -(
confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) *
low_confidence * tf.log(low_confidence + 1e-20))
if gaussian and confidence > 0.0:
labels = tf.cast(labels, tf.float32)
normal_dist = tfp.distributions.Normal(loc=labels, scale=confidence)
# Locations to evaluate the probability distributions.
soft_targets = normal_dist.prob(
tf.cast(tf.range(vocab_size), tf.float32)[:, None, None, None, None])
# Reordering soft_targets from [vocab_size, batch_size, ?, ?, ?] to match
# logits: [batch_size, ?, ?, ?, vocab_size]
soft_targets = tf.transpose(soft_targets, perm=[1, 2, 3, 4, 0])
else:
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=soft_targets)
return xentropy - normalizing
def global_pool_1d(inputs, pooling_type="MAX", mask=None):
"""Pool elements across the last dimension.
Useful to convert a list of vectors into a single vector so as
to get a representation of a set.
Args:
inputs: A tensor of shape [batch_size, sequence_length, input_dims]
containing the sequences of input vectors.
pooling_type: the pooling type to use, MAX or AVR
mask: A tensor of shape [batch_size, sequence_length] containing a
mask for the inputs with 1's for existing elements, and 0's elsewhere.
Returns:
A tensor of shape [batch_size, input_dims] containing the sequences of
transformed vectors.
"""
with tf.name_scope("global_pool", values=[inputs]):
if mask is not None:
mask = tf.expand_dims(mask, axis=2)
inputs = tf.multiply(inputs, mask)
if pooling_type == "MAX":
# A tf.pool can be used here, but reduce is cleaner
output = tf.reduce_max(inputs, axis=1)
elif pooling_type == "AVR":
if mask is not None:
# Some elems are dummy elems so we can't just reduce the average.
output = tf.reduce_sum(inputs, axis=1)
num_elems = tf.reduce_sum(mask, axis=1, keepdims=True)
output = tf.div(output, tf.maximum(num_elems, 1))
else:
output = tf.reduce_mean(inputs, axis=1)
return output
def running_global_pool_1d(inputs, pooling_type="MAX"):
"""Same global pool, but only for the elements up to the current element.
Useful for outputs where the state of future elements is not known.
Takes no mask as all elements up to the current element are assumed to exist.
Currently only supports maximum. Equivalent to using a lower triangle bias.
Args:
inputs: A tensor of shape [batch_size, sequence_length, input_dims]
containing the sequences of input vectors.
pooling_type: Pooling type to use. Currently only supports 'MAX'.
Returns:
A tensor of shape [batch_size, sequence_length, input_dims] containing the
running 'totals'.
"""
del pooling_type
with tf.name_scope("running_global_pool", values=[inputs]):
scan_fct = tf.maximum
# Permute inputs so seq_length is first.
elems = tf.transpose(inputs, [1, 0, 2])
# Perform scan.
cumulatives = tf.scan(scan_fct, elems, swap_memory=True)
# Permute output to get back to original order.
output = tf.transpose(cumulatives, [1, 0, 2])
return output
def gated_linear_unit_layer(x, name=None):
"""Gated linear unit layer.
Paper: Language Modeling with Gated Convolutional Networks.
Link: https://arxiv.org/abs/1612.08083
x = Wx * sigmoid(W'x).
Args:
x: A tensor
name: A string
Returns:
A tensor of the same shape as x.
"""
with tf.variable_scope(name, default_name="glu_layer", values=[x]):
depth = shape_list(x)[-1]
x = tf.layers.dense(x, depth * 2, activation=None)
x, gating_x = tf.split(x, 2, axis=-1)
return x * tf.nn.sigmoid(gating_x)
def sru_with_scan(x,
num_layers=2,
activation=None,
initial_state=None,
name=None,
reuse=None):
"""SRU cell as in https://arxiv.org/abs/1709.02755.
This implementation uses tf.scan and can incur overhead, see the full SRU
function doc for details and an implementation that is sometimes faster.
Args:
x: A tensor of shape [batch, ..., channels] ; ... is treated as time.
num_layers: How many SRU layers; default is 2 as results for 1 disappoint.
activation: Optional activation function, try tf.nn.tanh or tf.nn.relu.
initial_state: Optional initial c-state, set to zeros if None.
name: Optional name, "sru" by default.
reuse: Optional reuse.
Returns:
A tensor of the same shape as x.
Raises:
ValueError: if num_layers is not positive.
"""
if num_layers < 1:
raise ValueError("Number of layers must be positive: %d" % num_layers)
with tf.variable_scope(name, default_name="sru", values=[x], reuse=reuse):
# We assume x is [batch, ..., channels] and treat all ... as time.
x_shape = shape_list(x)
x = tf.reshape(x, [x_shape[0], -1, x_shape[-1]])
x = tf.transpose(x, [1, 0, 2]) # Scan assumes time on axis 0.
initial_state = initial_state or tf.zeros([x_shape[0], x_shape[-1]])
# SRU state manipulation function.
def next_state(cur_state, args_tup):
cur_x_times_one_minus_f, cur_f = args_tup
return cur_f * cur_state + cur_x_times_one_minus_f
# Calculate SRU on each layer.
for i in range(num_layers):
# The parallel part of the SRU.
x_orig = x
x, f, r = tf.split(
tf.layers.dense(x, 3 * x_shape[-1], name="kernel_%d" % i), 3, axis=-1)
f, r = tf.sigmoid(f), tf.sigmoid(r)
x_times_one_minus_f = x * (1.0 - f) # Compute in parallel for speed.
# Calculate states.
c_states = tf.scan(
next_state, (x_times_one_minus_f, f),
initializer=initial_state,
parallel_iterations=2,
name="scan_%d" % i)
# Final output.
if activation is not None:
c_states = activation(c_states)
h = c_states * r + (1.0 - r) * x_orig
x = h # Next layer.
# Transpose back to batch-major.
x = tf.transpose(x, [1, 0, 2])
return tf.reshape(x, x_shape)
class CumsumprodCell(object):
"""Cumulative sum and product object for use with functional_rnn API."""
def __init__(self, initializer):
self._initializer = initializer
@property
def output_size(self):
return int(shape_list(self._initializer)[-1])
def zero_state(self, batch_size, dtype):
dtype = dtype or tf.float32
return tf.zeros([batch_size, self.output_size], dtype=dtype)
def __call__(self, inputs_t, state_t):
cur_x_times_one_minus_f, cur_f = tf.split(inputs_t, 2, axis=-1)
state_next = cur_f * state_t + cur_x_times_one_minus_f
outputs_t = state_next
return outputs_t, state_next
def sru(x,
num_layers=2,
activation=None,
initial_state=None,
name=None,
reuse=None):
"""SRU cell as in https://arxiv.org/abs/1709.02755.
As defined in the paper:
(1) x'_t = W x_t
(2) f_t = sigmoid(Wf x_t + bf)
(3) r_t = sigmoid(Wr x_t + br)
(4) c_t = f_t * c_{t-1} + (1 - f_t) * x'_t
(5) h_t = r_t * activation(c_t) + (1 - r_t) * x_t
This version uses functional ops to be faster on GPUs with TF-1.9+.
Args:
x: A tensor of shape [batch, ..., channels] ; ... is treated as time.
num_layers: How many SRU layers; default is 2 as results for 1 disappoint.
activation: Optional activation function, try tf.nn.tanh or tf.nn.relu.
initial_state: Optional initial c-state, set to zeros if None.
name: Optional name, "sru" by default.
reuse: Optional reuse.
Returns:<|fim▁hole|> ValueError: if num_layers is not positive.
"""
if num_layers < 1:
raise ValueError("Number of layers must be positive: %d" % num_layers)
if is_xla_compiled(): # On TPU the XLA does a good job with while.
return sru_with_scan(x, num_layers, activation, initial_state, name, reuse)
try:
from tensorflow.contrib.recurrent.python.ops import functional_rnn # pylint: disable=g-import-not-at-top
except ImportError:
tf.logging.info("functional_rnn not found, using sru_with_scan instead")
return sru_with_scan(x, num_layers, activation, initial_state, name, reuse)
with tf.variable_scope(name, default_name="sru", values=[x], reuse=reuse):
# We assume x is [batch, ..., channels] and treat all ... as time.
x_shape = shape_list(x)
x = tf.reshape(x, [x_shape[0], -1, x_shape[-1]])
initial_state = initial_state or tf.zeros([x_shape[0], x_shape[-1]])
cell = CumsumprodCell(initial_state)
# Calculate SRU on each layer.
for i in range(num_layers):
# The parallel part of the SRU.
x_orig = x
x, f, r = tf.split(
tf.layers.dense(x, 3 * x_shape[-1], name="kernel_%d" % i), 3, axis=-1)
f, r = tf.sigmoid(f), tf.sigmoid(r)
x_times_one_minus_f = x * (1.0 - f) # Compute in parallel for speed.
# Calculate states.
concat = tf.concat([x_times_one_minus_f, f], axis=-1)
c_states, _ = functional_rnn.functional_rnn(
cell, concat, time_major=False)
# Final output.
if activation is not None:
c_states = activation(c_states)
h = c_states * r + (1.0 - r) * x_orig
x = h # Next layer.
return tf.reshape(x, x_shape)
def linear_set_layer(layer_size,
inputs,
context=None,
activation_fn=tf.nn.relu,
dropout=0.0,
name=None):
"""Basic layer type for doing funky things with sets.
Applies a linear transformation to each element in the input set.
If a context is supplied, it is concatenated with the inputs.
e.g. One can use global_pool_1d to get a representation of the set which
can then be used as the context for the next layer.
TODO: Add bias add (or control the biases used).
Args:
layer_size: Dimension to transform the input vectors to.
inputs: A tensor of shape [batch_size, sequence_length, input_dims]
containing the sequences of input vectors.
context: A tensor of shape [batch_size, context_dims] containing a global
statistic about the set.
activation_fn: The activation function to use.
dropout: Dropout probability.
name: name.
Returns:
Tensor of shape [batch_size, sequence_length, output_dims] containing the
sequences of transformed vectors.
"""
with tf.variable_scope(
name, default_name="linear_set_layer", values=[inputs]):
# Apply 1D convolution to apply linear filter to each element
# along the 2nd dimension.
outputs = conv1d(inputs, layer_size, 1, activation=None, name="set_conv")
# Apply the context if it exists.
if context is not None:
# Unfortunately tf doesn't support broadcasting via concat, but we can
# simply add the transformed context to get the same effect.
if len(context.get_shape().as_list()) == 2:
context = tf.expand_dims(context, axis=1)
cont_tfm = conv1d(
context, layer_size, 1, activation=None, name="cont_conv")
outputs += cont_tfm
if activation_fn is not None:
outputs = activation_fn(outputs)
if dropout != 0.0:
outputs = tf.nn.dropout(outputs, 1.0 - dropout)
return outputs
def ravanbakhsh_set_layer(layer_size,
inputs,
mask=None,
sequential=False,
activation_fn=tf.nn.tanh,
dropout=0.0,
name=None):
"""Layer from Deep Sets paper: https://arxiv.org/abs/1611.04500 .
More parameter-efficient version of a linear-set-layer with context.
Args:
layer_size: Dimension to transform the input vectors to.
inputs: A tensor of shape [batch_size, sequence_length, vector]
containing the sequences of input vectors.
mask: A tensor of shape [batch_size, sequence_length] containing a
mask for the inputs with 1's for existing elements, and 0's elsewhere.
sequential: If true, will use a running global pool so each element will
only depend on those before it. Set true if this layer is being used in
an output sequence.
activation_fn: The activation function to use.
dropout: dropout.
name: name.
Returns:
Tensor of shape [batch_size, sequence_length, vector] containing the
sequences of transformed vectors.
"""
del dropout
with tf.variable_scope(name, "ravanbakhsh_set_layer", [inputs]):
if sequential:
return linear_set_layer(
layer_size,
inputs - running_global_pool_1d(inputs),
activation_fn=activation_fn,
name=name)
return linear_set_layer(
layer_size,
inputs - tf.expand_dims(global_pool_1d(inputs, mask=mask), axis=1),
activation_fn=activation_fn,
name=name)
def fn_device_dependency_dict():
"""State container for fn_device_dependency."""
if not hasattr(tf.get_default_graph(), "dependency_dict"):
setattr(tf.get_default_graph(), "dependency_dict", defaultdict(list))
return tf.get_default_graph().dependency_dict
@contextlib.contextmanager
def fn_device_dependency(name, device=""):
"""Add control deps for name and device."""
key = name + "_" + device
outs = []
def body():
with tf.control_dependencies(fn_device_dependency_dict()[key]):
yield outs
assert outs
deps = outs
if isinstance(outs[0], (list, tuple)):
assert len(outs) == 1
deps = outs[0]
fn_device_dependency_dict()[key] = deps
if device:
with tf.device(device):
return body()
else:
return body()
def underlying_variable_ref(t):
"""Find the underlying variable ref.
Traverses through Identity, ReadVariableOp, and Enter ops.
Stops when op type has Variable or VarHandle in name.
Args:
t: a Tensor
Returns:
a Tensor that is a variable ref, or None on error.
"""
while t.op.type in ["Identity", "ReadVariableOp", "Enter"]:
t = t.op.inputs[0]
op_type = t.op.type
if "Variable" in op_type or "VarHandle" in op_type:
return t
else:
return None
def underlying_variable(t):
"""Find the underlying tf.Variable object.
Args:
t: a Tensor
Returns:
tf.Variable.
"""
t = underlying_variable_ref(t)
assert t is not None
# make sure that the graph has a variable index and that it is up-to-date
if not hasattr(tf.get_default_graph(), "var_index"):
tf.get_default_graph().var_index = {}
var_index = tf.get_default_graph().var_index
for v in tf.global_variables()[len(var_index):]:
var_index[v.name] = v
return var_index[t.name]
def approximate_split(x, num_splits, axis=0):
"""Split approximately equally into num_splits parts.
Args:
x: a Tensor
num_splits: an integer
axis: an integer.
Returns:
a list of num_splits Tensors.
"""
size = shape_list(x)[axis]
size_splits = [tf.div(size + i, num_splits) for i in range(num_splits)]
return tf.split(x, size_splits, axis=axis)
class FactoredTensor(object):
"""A concise factored representation of Tensor as two tensors.
This class represents the tensor tf.matmul(a, b, transpose_b=True)
by storing the values of Tensors a and b.
The reason for this is that the product may be too big to fully realize at
once, so it can be realized a part at a time.
"a" may have extra leading dimensions, in which case they are flattened out
before computing the matrix product, then re-expanded afterwards.
"""
def __init__(self, a, b):
self._a = a
self._b = b
@property
def a(self):
return self._a
@property
def b(self):
return self._b
def to_tensor(self):
"""Convert to Tensor."""
a_shape = shape_list(self.a)
b_shape = shape_list(self.b)
inner_dim = b_shape[1]
result_dim = b_shape[0]
flat_a = tf.reshape(self.a, [-1, inner_dim])
product = tf.matmul(flat_a, self.b, transpose_b=True)
product_shape = a_shape[:-1] + [result_dim]
product = tf.reshape(product, product_shape)
product.set_shape(self.a.get_shape().as_list()[:-1] +
[self.b.get_shape()[0]])
return product
def _convert_factored_tensor_to_tensor(value, *args, **kwargs):
# call ops.convert_to_tensor to handle optional arguments appropriately
return ops.internal_convert_to_tensor(value.to_tensor(), *args, **kwargs)
tf.register_tensor_conversion_function(FactoredTensor,
_convert_factored_tensor_to_tensor)
def smoothing_cross_entropy_factored_grad(op, dy):
"""Gradient function for smoothing_cross_entropy_factored."""
a = op.inputs[0]
b = op.inputs[1]
labels = op.inputs[2]
confidence = op.inputs[3]
num_splits = 16
vocab_size = shape_list(b)[0]
labels = approximate_split(labels, num_splits)
a = approximate_split(a, num_splits)
dy = approximate_split(dy, num_splits)
b_grad = None
a_grad_parts = []
deps = []
for part in range(num_splits):
with tf.control_dependencies(deps):
logits = tf.matmul(a[part], b, transpose_b=True)
output_part = smoothing_cross_entropy(logits, labels[part], vocab_size,
confidence)
a_grad_part, b_grad_part = tf.gradients(
ys=[output_part], xs=[a[part], b], grad_ys=[dy[part]])
a_grad_parts.append(a_grad_part)
if part > 0:
b_grad += b_grad_part
else:
b_grad = b_grad_part
deps = [b_grad, a_grad_part]
a_grad = tf.concat(a_grad_parts, 0)
return a_grad, b_grad, None, None
@function.Defun(
noinline=True,
python_grad_func=smoothing_cross_entropy_factored_grad,
compiled=True,
separate_compiled_gradients=True)
def smoothing_cross_entropy_factored(a, b, labels, confidence):
"""Memory-efficient computation of smoothing cross-entropy.
Avoids realizing the entire logits matrix at once.
Args:
a: a Tensor with shape [batch, inner_dim]
b: a Tensor with shape [vocab_size, inner_dim]
labels: an integer Tensor with shape [batch]
confidence: a float
Returns:
A Tensor with shape [batch]
"""
num_splits = 16
vocab_size = shape_list(b)[0]
labels = approximate_split(labels, num_splits)
a = approximate_split(a, num_splits)
parts = []
for part in range(num_splits):
with tf.control_dependencies(parts[-1:]):
logits = tf.matmul(a[part], b, transpose_b=True)
parts.append(
smoothing_cross_entropy(logits, labels[part], vocab_size, confidence))
return tf.concat(parts, 0)
def padded_cross_entropy_factored(factored_logits,
labels,
label_smoothing,
weights_fn=weights_nonzero,
reduce_sum=True):
"""Memory-efficient computation of smoothing cross-entropy.
Avoids realizing the entire logits matrix at once.
Args:
factored_logits: a `FactoredTensor` representing a Tensor
with shape `[batch, timesteps, vocab_size]`.
labels: an integer `Tensor` with shape `[batch, timesteps]`.
label_smoothing: a floating point `Scalar`.
weights_fn: A function from labels to weights.
reduce_sum: a Boolean, whether to sum at the end or not.
Returns:
loss_numerator: a `Scalar`. Sum of losses.
loss_denominator: a `Scalar. The number of non-padding target tokens.
"""
a = factored_logits.a
b = factored_logits.b
confidence = 1.0 - label_smoothing
with tf.name_scope("padded_cross_entropy_factored", values=[a, b, labels]):
labels_flat = tf.reshape(labels, [-1])
a_flat = tf.reshape(a, [-1, shape_list(b)[1]])
xent = smoothing_cross_entropy_factored(a_flat, b, labels_flat,
tf.convert_to_tensor(confidence))
xent = tf.reshape(xent, shape_list(labels))
weights = weights_fn(labels)
if not reduce_sum:
return xent * weights, weights
return tf.reduce_sum(xent * weights), tf.reduce_sum(weights)
def fn_with_custom_grad(grad_fn, use_global_vars=False):
"""Decorator to create a subgraph with a custom gradient function.
The subgraph created by the decorated function is NOT put in a Defun and so
does not suffer from the limitations of the Defun (all subgraph ops on the
same device, no summaries).
Args:
grad_fn: function with signature
(inputs, variables, outputs, output_grads) -> (grad_inputs, grad_vars),
all of which are lists of Tensors.
use_global_vars: if True, variables will be the global variables created.
If False, will be the trainable variables.
Returns:
Decorator for function such that the gradient is defined by grad_fn.
"""
def dec(fn):
@functools.wraps(fn)
def wrapped(*args):
return _fn_with_custom_grad(
fn, args, grad_fn, use_global_vars=use_global_vars)
return wrapped
return dec
def _fn_with_custom_grad(fn, inputs, grad_fn, use_global_vars=False):
"""Create a subgraph with a custom gradient.
Args:
fn: function that takes inputs as arguments and produces 1 or more Tensors.
inputs: list<Tensor>, will be passed as fn(*inputs).
grad_fn: function with signature
(inputs, vars, outputs, output_grads) -> (grad_inputs, grad_vars),
all of which are lists of Tensors.
use_global_vars: if True, variables will be the global variables created.
If False, will be the trainable variables.
Returns:
fn(*inputs)
"""
vs = tf.get_variable_scope()
get_vars_fn = (
vs.global_variables if use_global_vars else vs.trainable_variables)
len_before_vars = len(get_vars_fn())
inputs = list(inputs)
outputs = fn(*inputs)
train_vars = get_vars_fn()[len_before_vars:]
if grad_fn is None:
return outputs
if not isinstance(outputs, (tuple, list)):
outputs = [outputs]
outputs = list(outputs)
defun_inputs = [inputs, train_vars, outputs]
def custom_grad_fn(op, *dys):
"""Custom grad fn applying grad_fn for identity Defun."""
fn_inputs, fn_vars, fn_outputs = tf.contrib.framework.nest.pack_sequence_as(
defun_inputs, list(op.inputs))
dys = list(dys)
assert len(fn_outputs) == len(outputs)
assert len(fn_outputs) == len(dys)
grad_inputs, grad_vars = grad_fn(fn_inputs, fn_vars, fn_outputs, dys)
grad_outputs = [None] * len(fn_outputs)
return tuple(grad_inputs + grad_vars + grad_outputs)
# The Defun takes as input the original inputs, the trainable variables
# created in fn, and the outputs. In the forward it passes through the
# outputs. In the backwards, it produces gradients for the original inputs
# and the trainable variables.
in_types = [t.dtype for t in inputs]
out_types = [t.dtype for t in outputs]
var_types = [t.dtype for t in train_vars]
@function.Defun(
*(in_types + var_types + out_types),
func_name="identity_custom_grad%d" % ops.uid(),
python_grad_func=custom_grad_fn,
shape_func=lambda _: [t.get_shape() for t in outputs])
def identity(*args):
_, _, outs = tf.contrib.framework.nest.pack_sequence_as(defun_inputs, args)
return tuple([tf.identity(t) for t in outs])
flat_inputs = tf.contrib.framework.nest.flatten(defun_inputs)
id_out = identity(*flat_inputs)
return id_out
_function_cache = {}
def conv_hidden_relu_memory_efficient(x,
filter_size,
epsilon=1e-6,
forget=True,
test_vars=None,
name=None):
"""LayerNorm, Conv, ReLU, Conv.
All convolutions have kernel size 1.
returns conv(relu(conv(layer_norm(x))))
Args:
x: input Tensor with shape [batch, length, io_size]
filter_size: an integer - size of the hidden layer.
epsilon: a float (for layer norm)
forget: a boolean - forget forwards activations and recompute on backprop
test_vars: optional tuple of variables for testing purposes
name: an optional string
Returns:
a Tensor with shape [batch, length, io_size]
"""
io_size = x.get_shape().as_list()[-1]
def forward_internal(x, f1, f2, scale, bias):
"""Forward function."""
# split batch-wise to avoid exhausting memory in cast the batch is large
# and the hidden layer is large.
num_splits = 4
x_flat = tf.reshape(x, [-1, 1, shape_list(x)[2]])
xs = approximate_split(x_flat, num_splits)
ys = []
for i in range(num_splits):
with tf.control_dependencies(ys[-1:]):
n = layer_norm_compute(xs[i], epsilon, scale, bias)
y = tf.nn.conv1d(n, f1, 1, "SAME")
y = tf.nn.relu(y)
y = tf.nn.conv1d(y, f2, 1, "SAME")
ys.append(y)
y = tf.concat(ys, 0)
y = tf.reshape(y, shape_list(x))
return y
key = ("conv_hidden_relu_memory_efficient %s" % epsilon)
if not forget:
forward_fn = forward_internal
elif key in _function_cache:
forward_fn = _function_cache[key]
else:
@function.Defun(compiled=True)
def grad_fn(x, f1, f2, scale, bias, dy):
"""Gradient for efficiency."""
with tf.control_dependencies([dy]):
num_splits = 4
x_shape = shape_list(x)
flat_shape = [-1, 1, x_shape[2]]
x = tf.reshape(x, flat_shape)
dy = tf.reshape(dy, flat_shape)
xs = approximate_split(x, num_splits)
dys = approximate_split(dy, num_splits)
dxs = []
df1 = 0
df2 = 0
dscale = 0
dbias = 0
deps = []
for i in range(num_splits):
with tf.control_dependencies(deps):
n = layer_norm_compute(xs[i], epsilon, scale, bias)
y = tf.nn.conv1d(n, f1, 1, "SAME")
y = tf.nn.relu(y)
y = tf.nn.conv1d(y, f2, 1, "SAME")
dxi, pdf1, pdf2, pdscale, pdbias = tf.gradients(
ys=[y], xs=[xs[i], f1, f2, scale, bias], grad_ys=[dys[i]])
df1 += pdf1
df2 += pdf2
dscale += pdscale
dbias += pdbias
dxs.append(dxi)
deps = [dxi, df1, df2, dscale, dbias]
with tf.control_dependencies(deps):
dx = tf.concat(dxs, 0)
dx = tf.reshape(dx, x_shape)
return dx, df1, df2, dscale, dbias
@function.Defun(
grad_func=grad_fn, compiled=True, separate_compiled_gradients=True)
def forward_fn(x, f1, f2, scale, bias):
return forward_internal(x, f1, f2, scale, bias)
with tf.variable_scope(name, default_name="ffn2", values=[x]):
# TODO(noam): it would be nice to save memory by casting x to float16
# here, but this causes problems with the gradients. Figure out if there
# is a way to leave the gradients as float32.
if test_vars is not None:
f1, f2, scale, bias = list(test_vars)
else:
f1 = tf.get_variable("f1", [1, io_size, filter_size])
f2 = tf.get_variable("f2", [1, filter_size, io_size])
scale, bias = layer_norm_vars(io_size)
if forget:
y = forward_fn(x, f1, f2, scale, bias)
else:
y = forward_internal(x, f1, f2, scale, bias)
y.set_shape(x.get_shape())
return y
def shape_list(x):
"""Return list of dims, statically where possible."""
x = tf.convert_to_tensor(x)
# If unknown rank, return dynamic shape
if x.get_shape().dims is None:
return tf.shape(x)
static = x.get_shape().as_list()
shape = tf.shape(x)
ret = []
for i in range(len(static)):
dim = static[i]
if dim is None:
dim = shape[i]
ret.append(dim)
return ret
def list_product(els):
prod = els[0]
for el in els[1:]:
prod *= el
return prod
def sample_with_temperature(logits, temperature):
"""Either argmax or random sampling.
Args:
logits: a Tensor.
temperature: a float 0.0=argmax 1.0=random
Returns:
a Tensor with one fewer dimension than logits.
"""
if temperature == 0.0:
# TF argmax doesn't handle >5 dimensions, so we reshape here.
logits_shape = shape_list(logits)
argmax = tf.argmax(tf.reshape(logits, [-1, logits_shape[-1]]), axis=1)
return tf.reshape(argmax, logits_shape[:-1])
else:
assert temperature > 0.0
reshaped_logits = (
tf.reshape(logits, [-1, shape_list(logits)[-1]]) / temperature)
choices = tf.multinomial(reshaped_logits, 1)
choices = tf.reshape(choices,
shape_list(logits)[:logits.get_shape().ndims - 1])
return choices
def ones_matrix_band_part(rows, cols, num_lower, num_upper, out_shape=None):
"""Matrix band part of ones.
Args:
rows: int determining number of rows in output
cols: int
num_lower: int, maximum distance backward. Negative values indicate
unlimited.
num_upper: int, maximum distance forward. Negative values indicate
unlimited.
out_shape: shape to reshape output by.
Returns:
Tensor of size rows * cols reshaped into shape out_shape.
"""
if all([isinstance(el, int) for el in [rows, cols, num_lower, num_upper]]):
# Needed info is constant, so we construct in numpy
if num_lower < 0:
num_lower = rows - 1
if num_upper < 0:
num_upper = cols - 1
lower_mask = np.tri(cols, rows, num_lower).T
upper_mask = np.tri(rows, cols, num_upper)
band = np.ones((rows, cols)) * lower_mask * upper_mask
if out_shape:
band = band.reshape(out_shape)
band = tf.constant(band, tf.float32)
else:
band = tf.matrix_band_part(
tf.ones([rows, cols]), tf.cast(num_lower, tf.int64),
tf.cast(num_upper, tf.int64))
if out_shape:
band = tf.reshape(band, out_shape)
return band
def reshape_like_all_dims(a, b):
"""Reshapes a to match the shape of b."""
ret = tf.reshape(a, tf.shape(b))
if not tf.contrib.eager.in_eager_mode():
ret.set_shape(b.get_shape())
return ret
def recompute_grad(fn):
"""Decorator that recomputes the function on the backwards pass.
Args:
fn: a function that takes Tensors (all as positional arguments) and returns
a tuple of Tensors.
Returns:
A wrapped fn that is identical to fn when called, but its activations will
be discarded and recomputed on the backwards pass (i.e. on a call to
tf.gradients).
"""
@functools.wraps(fn)
def wrapped(*args):
return _recompute_grad(fn, args)
return wrapped
def _recompute_grad(fn, args):
"""See recompute_grad."""
cached_vs = []
cached_arg_scope = []
def grad_fn(inputs, variables, outputs, output_grads):
"""Recompute outputs for gradient computation."""
del outputs
variables = [underlying_variable_ref(v) for v in variables]
# Recompute outputs
with tf.control_dependencies(output_grads):
with tf.contrib.framework.arg_scope(cached_arg_scope[0]):
with tf.variable_scope(cached_vs[0], reuse=True):
outputs = fn(*inputs)
if not isinstance(outputs, (list, tuple)):
outputs = [outputs]
outputs = list(outputs)
grads = tf.gradients(outputs, inputs + variables, output_grads)
grad_inputs = grads[:len(inputs)]
grad_vars = grads[len(inputs):]
# TODO(rsepassi): Make fn_with_custom_grad work with bfloat16.
# If the input gradients are bfloat16, it's assumed the variables are
# bfloat16. This is a hack to ensure that grad_vars are the right type.
if grad_inputs[0].dtype == tf.bfloat16:
grad_vars = [tf.cast(grad_var, tf.bfloat16) for grad_var in grad_vars]
return grad_inputs, grad_vars
@fn_with_custom_grad(grad_fn)
def fn_with_recompute(*args):
cached_vs.append(tf.get_variable_scope())
cached_arg_scope.append(tf.contrib.framework.current_arg_scope())
return fn(*args)
return fn_with_recompute(*args)
def dense(x, units, **kwargs):
"""Identical to tf.layers.dense."""
return tf.layers.dense(x, units, **kwargs)
def batch_dense(inputs,
units,
activation=None,
kernel_initializer=None,
reuse=None,
name=None):
"""Multiply a batch of input matrices by a batch of parameter matrices.
Each input matrix is multiplied by the corresponding parameter matrix.
This is useful in a mixture-of-experts where the batch represents different
experts with different inputs.
Args:
inputs: a Tensor with shape [batch, length, input_units]
units: an integer
activation: an optional activation function to apply to the output
kernel_initializer: an optional initializer
reuse: whether to reuse the varaible scope
name: an optional string
Returns:
a Tensor with shape [batch, length, units]
Raises:
ValueError: if the "batch" or "input_units" dimensions of inputs are not
statically known.
"""
inputs_shape = shape_list(inputs)
if len(inputs_shape) != 3:
raise ValueError("inputs must have 3 dimensions")
batch = inputs_shape[0]
input_units = inputs_shape[2]
if not isinstance(batch, int) or not isinstance(input_units, int):
raise ValueError("inputs must have static dimensions 0 and 2")
with tf.variable_scope(
name,
default_name="batch_dense",
values=[inputs],
reuse=reuse,
dtype=inputs.dtype):
if kernel_initializer is None:
kernel_initializer = tf.random_normal_initializer(
stddev=input_units**-0.5)
w = tf.get_variable(
"w", [batch, input_units, units],
initializer=kernel_initializer,
dtype=inputs.dtype)
y = tf.matmul(inputs, w)
if activation is not None:
y = activation(y)
return y
def mix(x1,
x2,
steps,
is_training,
min_prob=0.0,
max_prob=1.0,
mode="lin",
simple=False,
broadcast_last=False):
"""Mix starting with x2, mixing mixing, going towards x1."""
with tf.name_scope("mix"):
if not is_training:
if max_prob >= 1.0:
return x1
alpha_shape = shape_list(x1)
if broadcast_last:
alpha_shape = alpha_shape[:-1] + [1]
alpha = tf.random_uniform(alpha_shape)
alpha = tf.to_float(tf.less(alpha, max_prob))
return alpha * x1 + (1.0 - alpha) * x2
def get_res():
"""Create the result.
Separate function to speed it up later (see below).
Returns:
Tensor of mixed inputs.
"""
if mode == "lin":
alpha_p = inverse_lin_decay(steps)
else:
alpha_p = inverse_exp_decay(steps)
alpha_p = alpha_p * (max_prob - min_prob) + min_prob
if simple:
return alpha_p * x1 + (1.0 - alpha_p) * x2
alpha_shape = shape_list(x1)
if broadcast_last:
alpha_shape = alpha_shape[:-1] + [1]
alpha = tf.random_uniform(alpha_shape)
alpha = tf.to_float(tf.less(alpha, alpha_p))
return alpha * x1 + (1.0 - alpha) * x2
if max_prob < 1.0:
return get_res()
# Prevent sampling after steps is passed to speed it up.
if is_xla_compiled():
return get_res()
else:
cur_step = tf.train.get_global_step()
if cur_step is None:
return x1 # Step not available, probably eval mode, don't mix.
return tf.cond(tf.less(cur_step, steps), get_res, lambda: x1)
def brelu(x):
"""Bipolar ReLU as in https://arxiv.org/abs/1709.04054."""
x_shape = shape_list(x)
x1, x2 = tf.split(tf.reshape(x, x_shape[:-1] + [-1, 2]), 2, axis=-1)
y1 = tf.nn.relu(x1)
y2 = -tf.nn.relu(-x2)
return tf.reshape(tf.concat([y1, y2], axis=-1), x_shape)
def belu(x):
"""Bipolar ELU as in https://arxiv.org/abs/1709.04054."""
x_shape = shape_list(x)
x1, x2 = tf.split(tf.reshape(x, x_shape[:-1] + [-1, 2]), 2, axis=-1)
y1 = tf.nn.elu(x1)
y2 = -tf.nn.elu(-x2)
return tf.reshape(tf.concat([y1, y2], axis=-1), x_shape)
def nac(x, depth, name=None, reuse=None):
"""NAC as in https://arxiv.org/abs/1808.00508."""
with tf.variable_scope(name, default_name="nac", values=[x], reuse=reuse):
x_shape = shape_list(x)
w = tf.get_variable("w", [x_shape[-1], depth])
m = tf.get_variable("m", [x_shape[-1], depth])
w = tf.tanh(w) * tf.nn.sigmoid(m)
x_flat = tf.reshape(x, [-1, x_shape[-1]])
res_flat = tf.matmul(x_flat, w)
return tf.reshape(res_flat, x_shape[:-1] + [depth])
def nalu(x, depth, epsilon=1e-30, name=None, reuse=None):
"""NALU as in https://arxiv.org/abs/1808.00508."""
with tf.variable_scope(name, default_name="nalu", values=[x], reuse=reuse):
x_shape = shape_list(x)
x_flat = tf.reshape(x, [-1, x_shape[-1]])
gw = tf.get_variable("w", [x_shape[-1], depth])
g = tf.nn.sigmoid(tf.matmul(x_flat, gw))
g = tf.reshape(g, x_shape[:-1] + [depth])
a = nac(x, depth, name="nac_lin")
log_x = tf.log(tf.abs(x) + epsilon)
m = nac(log_x, depth, name="nac_log")
return g * a + (1 - g) * tf.exp(m)
def argmax_with_score(logits, axis=None):
"""Argmax along with the value."""
axis = axis or len(logits.get_shape()) - 1
predictions = tf.argmax(logits, axis=axis)
logits_shape = shape_list(logits)
prefix_shape, vocab_size = logits_shape[:-1], logits_shape[-1]
prefix_size = 1
for d in prefix_shape:
prefix_size *= d
# Flatten to extract scores
flat_logits = tf.reshape(logits, [prefix_size, vocab_size])
flat_predictions = tf.reshape(predictions, [prefix_size])
flat_indices = tf.stack(
[tf.range(tf.to_int64(prefix_size)),
tf.to_int64(flat_predictions)],
axis=1)
flat_scores = tf.gather_nd(flat_logits, flat_indices)
# Unflatten
scores = tf.reshape(flat_scores, prefix_shape)
return predictions, scores
def log_prob_from_logits(logits, reduce_axis=-1):
return logits - tf.reduce_logsumexp(logits, axis=reduce_axis, keepdims=True)
def top_1_tpu(inputs):
"""find max and argmax over the last dimension.
Works well on TPU
Args:
inputs: A tensor with shape [..., depth]
Returns:
values: a Tensor with shape [...]
indices: a Tensor with shape [...]
"""
inputs_max = tf.reduce_max(inputs, axis=-1, keepdims=True)
mask = tf.to_int32(tf.equal(inputs_max, inputs))
index = tf.range(tf.shape(inputs)[-1]) * mask
return tf.squeeze(inputs_max, -1), tf.reduce_max(index, axis=-1)
def index_last_dim_with_indices(x, indices):
"""Use indices to index into the last axis of x.
This can be useful for recovering the actual probabilities of a sample from a
probability distribution.
Args:
x: Tensor, n-d.
indices: Tensor, (n-1)-d, where the dimension sizes match the first (n-1)
dimensions of x. The values of indices will be used to index into the last
axis of x.
Returns:
Tensor, (n-1)-d.
"""
assert len(x.shape) == len(indices.shape) + 1
x_shape = shape_list(x)
vocab_size = x_shape[-1]
flat_x = tf.reshape(x, [list_product(x_shape[:-1]), vocab_size])
flat_indices = tf.reshape(indices, [list_product(x_shape[:-1])])
idx = tf.stack(
[
tf.range(tf.to_int64(shape_list(flat_indices)[0])),
tf.to_int64(flat_indices)
],
axis=1)
flat_x_idx = tf.gather_nd(flat_x, idx)
x_idx = tf.reshape(flat_x_idx, x_shape[:-1])
return x_idx
def should_generate_summaries():
"""Is this an appropriate context to generate summaries.
Returns:
a boolean
"""
name_scope = tf.contrib.framework.get_name_scope()
if name_scope and "while/" in name_scope:
# Summaries don't work well within tf.while_loop()
return False
if tf.get_variable_scope().reuse:
# Avoid generating separate summaries for different data shards
return False
return True
def reshape_like(a, b):
"""Reshapes a to match the shape of b in all but the last dimension."""
ret = tf.reshape(a, tf.concat([tf.shape(b)[:-1], tf.shape(a)[-1:]], 0))
if not tf.contrib.eager.in_eager_mode():
ret.set_shape(b.get_shape().as_list()[:-1] + a.get_shape().as_list()[-1:])
return ret
def summarize_video(video, prefix, max_outputs=1):
"""Summarize the video using image summaries starting with prefix."""
video_shape = shape_list(video)
if len(video_shape) != 5:
raise ValueError("Assuming videos given as tensors in the format "
"[batch, time, height, width, channels] but got one "
"of shape: %s" % str(video_shape))
if tf.contrib.eager.in_eager_mode():
return
if video.get_shape().as_list()[1] is None:
tf.summary.image(
"%s_last_frame" % prefix,
tf.cast(video[:, -1, :, :, :], tf.uint8),
max_outputs=max_outputs)
else:
for k in range(video_shape[1]):
tf.summary.image(
"%s_frame_%d" % (prefix, k),
tf.cast(video[:, k, :, :, :], tf.uint8),
max_outputs=max_outputs)
def cast_like(x, y):
"""Cast x to y's dtype, if necessary."""
x = tf.convert_to_tensor(x)
y = tf.convert_to_tensor(y)
if x.dtype.base_dtype == y.dtype.base_dtype:
return x
cast_x = tf.cast(x, y.dtype)
if cast_x.device != x.device:
tf.logging.warning("Cast for %s may induce copy from '%s' to '%s'", x.name,
x.device, cast_x.device)
return cast_x
def make_even_size(x):
"""Pad x to be even-sized on axis 1 and 2, but only if necessary."""
x_shape = x.get_shape().as_list()
assert len(x_shape) > 2, "Only 3+-dimensional tensors supported."
shape = [dim if dim is not None else -1 for dim in x_shape]
new_shape = x_shape # To make sure constant shapes remain constant.
if x_shape[1] is not None:
new_shape[1] = 2 * int(math.ceil(x_shape[1] * 0.5))
if x_shape[2] is not None:
new_shape[2] = 2 * int(math.ceil(x_shape[2] * 0.5))
if shape[1] % 2 == 0 and shape[2] % 2 == 0:
return x
if shape[1] % 2 == 0:
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=2)
x.set_shape(new_shape)
return x
if shape[2] % 2 == 0:
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=1)
x.set_shape(new_shape)
return x
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=1)
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=2)
x.set_shape(new_shape)
return x
def sliced_gan_loss(input1,
input2,
discriminator,
num_vecs,
do_random_vecs=True,
do_tanh=True,
return_logits=False):
"""Loss inspired by the sliced WGAN paper: https://arxiv.org/abs/1804.01947.
Puts input1 and input2 through the provided discriminator to get logits.
Then, computes num_vecs random projections of the logits, sorts them on
the batch dimension and returns the L2 loss between the sorted vectors.
See the above-mentioned paper for the reasoning behind it.
Args:
input1: first discriminator inputs.
input2: second discriminator inputs.
discriminator: inputs -> logits function.
num_vecs: how many random vectors to use for projections.
do_random_vecs: whether to use random vectors or just tanh of the logits.
do_tanh: if true (default) we'll also just use tanh of the logits.
return_logits: Whether or not to return the logits.
Returns:
The generator loss, i.e., the sliced approximation of the distance between
the projected distributions (warning: discriminator should maximize it).
"""
with tf.variable_scope("sliced_gan"):
with tf.variable_scope("discriminator"):
logits1 = discriminator(input1)
with tf.variable_scope("discriminator", reuse=True):
logits2 = discriminator(input2)
if do_random_vecs:
random_vecs = tf.nn.l2_normalize(
tf.random_uniform([shape_list(logits1)[-1], num_vecs]), axis=0)
def get_sorted_projections(x):
"""Make projections of x and sort them on the batch dimension."""
x = tf.reshape(x, [-1, shape_list(x)[-1]])
batch_size = shape_list(x)[0]
if do_random_vecs and do_tanh:
n = tf.nn.l2_normalize(x, axis=1)
proj = tf.concat([tf.matmul(n, random_vecs), tf.tanh(n)], axis=1)
elif do_random_vecs:
n = tf.nn.l2_normalize(x, axis=1)
proj = tf.matmul(n, random_vecs)
else:
proj = tf.tanh(x)
proj = tf.transpose(proj, [1, 0]) # [num_vecs, batch] after this.
if is_xla_compiled():
proj_dtype = proj.dtype
proj = tf.cast(proj, tf.bfloat16)
# Currently TPU only supports 1-D top_k calls.
map_fn = lambda x: tf.nn.top_k(x, k=batch_size, sorted=True)[0]
values = tf.map_fn(map_fn, proj)
values = tf.cast(values, proj_dtype)
else:
values, _ = tf.nn.top_k(proj, k=batch_size, sorted=True)
return values
proj1 = get_sorted_projections(logits1)
proj2 = get_sorted_projections(logits2)
dist = tf.reduce_mean(tf.square(proj1 - proj2))
if return_logits:
return dist, logits1, logits2
return dist
def lrelu(input_, leak=0.2, name="lrelu"):
return tf.maximum(input_, leak * input_, name=name)
def deep_discriminator(x,
batch_norm,
is_training,
filters=64,
filter_size=4,
stride=2,
output_size=1024):
"""Discriminator architecture based on InfoGAN."""
with tf.variable_scope(
"discriminator", initializer=tf.random_normal_initializer(stddev=0.02)):
batch_size, height, width = shape_list(x)[:3]
net = tf.layers.conv2d(
x, filters, filter_size, strides=stride, padding="SAME", name="conv1")
net = lrelu(net)
net = tf.layers.conv2d(
net,
2 * filters,
filter_size,
strides=stride,
padding="SAME",
name="conv2")
# [bs, h/4, w/4, 128]
if batch_norm:
net = tf.layers.batch_normalization(
net, training=is_training, momentum=0.999, name="d_bn2")
net = lrelu(net)
size = height * width
x_shape = x.get_shape().as_list()
if x_shape[1] is None or x_shape[2] is None:
net = tf.reduce_mean(net, axis=[1, 2])
else:
net = tf.reshape(net, [batch_size, size * 8])
net = tf.layers.dense(net, output_size, name="d_fc3")
if batch_norm:
net = tf.layers.batch_normalization(
net, training=is_training, momentum=0.999, name="d_bn3")
net = lrelu(net)
return net
def instance_norm(x):
"""Instance normalization layer."""
with tf.variable_scope("instance_norm"):
epsilon = 1e-5
mean, var = tf.nn.moments(x, [1, 2], keep_dims=True)
scale = tf.get_variable(
"scale", [x.get_shape()[-1]],
initializer=tf.truncated_normal_initializer(mean=1.0, stddev=0.02))
offset = tf.get_variable(
"offset", [x.get_shape()[-1]], initializer=tf.constant_initializer(0.0))
out = scale * tf.div(x - mean, tf.sqrt(var + epsilon)) + offset
return out
def general_conv(x,
num_filters=64,
filter_size=7,
stride=1,
stddev=0.02,
padding="VALID",
name="conv",
do_norm="instance",
do_relu=True,
relufactor=0):
"""Generalized convolution layer."""
with tf.variable_scope(name):
x = tf.layers.conv2d(
x,
num_filters,
filter_size,
stride,
padding,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=stddev),
bias_initializer=tf.constant_initializer(0.0))
if do_norm == "layer":
x = tf.contrib.layers.layer_norm(x)
elif do_norm == "instance":
x = instance_norm(x)
if do_relu:
if relufactor == 0:
x = tf.nn.relu(x, "relu")
else:
x = lrelu(x, leak=relufactor)
return x
def patch_discriminator(x, filters=64, filter_size=5, n=4,
name="patch_discrim"):
"""Patch descriminator."""
with tf.variable_scope(name):
x_shape = shape_list(x)
spatial_dims = [x_shape[1] // 4, x_shape[2] // 4]
x = tf.random_crop(x, [x_shape[0]] + spatial_dims + [x_shape[3]])
for i in range(n):
x = general_conv(
x=x,
num_filters=filters * 2**i,
filter_size=filter_size,
stride=2 if i != n - 1 else 1,
stddev=0.02,
padding="SAME",
name="c%d" % i,
do_norm="instance" if i != 0 else False,
do_relu=i != n - 1,
relufactor=0.2)
x = tf.reduce_mean(x, [1, 2])
return x
def mean_with_attention(x, name, num_heads=4):
"""Mean and attention to reduce spatial dimensions."""
with tf.variable_scope(name):
shape = shape_list(x)
m = tf.reduce_mean(x, [1, 2])
a = tf.layers.dense(x, num_heads, name="mean_attn")
s = tf.reshape(a, [shape[0], -1, num_heads])
s = tf.nn.softmax(s, axis=1)
s = tf.reshape(s, shape[:-1] + [1, num_heads])
am = tf.reduce_mean(tf.expand_dims(x, axis=-1) * s, [1, 2])
l = tf.concat([am, tf.expand_dims(m, axis=-1)], axis=-1)
return tf.layers.dense(tf.reshape(l, [shape[0], (num_heads+1) * shape[-1]]),
2 * shape[-1], name="mean_attn_final")
def single_discriminator(x, filters=128, kernel_size=8,
strides=4, pure_mean=False):
"""A simple single-layer convolutional discriminator."""
with tf.variable_scope("discriminator"):
net = tf.layers.conv2d(
x, filters, kernel_size, strides=strides, padding="SAME", name="conv1")
if pure_mean:
net = tf.reduce_mean(net, [1, 2])
else:
net = mean_with_attention(net, "mean_with_attention")
return net
def double_discriminator(x, filters1=128, filters2=None,
kernel_size=8, strides=4, pure_mean=False):
"""A convolutional discriminator with 2 layers and concatenated output."""
if filters2 is None:
filters2 = 4 * filters1
with tf.variable_scope("discriminator"):
batch_size = shape_list(x)[0]
net = tf.layers.conv2d(
x, filters1, kernel_size, strides=strides, padding="SAME", name="conv1")
if pure_mean:
net1 = tf.reduce_mean(net, [1, 2])
else:
net1 = mean_with_attention(net, "mean_with_attention1")
tf.reshape(net, [batch_size, -1])
net = tf.nn.relu(net)
net = tf.layers.conv2d(
x, filters2, kernel_size, strides=strides, padding="SAME", name="conv2")
if pure_mean:
net2 = tf.reduce_mean(net, [1, 2])
else:
net2 = mean_with_attention(net, "mean_with_attention2")
return tf.concat([net1, net2], axis=-1)
def upscale(inputs, f, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR):
"""Upscaling the image by a factor of f."""
height, width = shape_list(inputs)[1:3]
return tf.image.resize_images(inputs, (height * f, width * f), method)
def tpu_safe_image_summary(image):
if is_xla_compiled():
# We only support float32 images at the moment due to casting complications.
if image.dtype != tf.float32:
image = tf.to_float(image)
else:
image = tf.cast(image, tf.uint8)
return image
# This has been (shamefully) copied from
# GitHub tensorflow/models/blob/master/research/slim/nets/cyclegan.py
#
# tensorflow/models cannot be pip installed, and even if it were we don't want
# to depend on all the models in it.
#
# Therefore copying and forgoing any more bugfixes into it is the most
# expedient way to use this function.
def cyclegan_upsample(net, num_outputs, stride, method="conv2d_transpose"):
"""Upsamples the given inputs.
Args:
net: A Tensor of size [batch_size, height, width, filters].
num_outputs: The number of output filters.
stride: A list of 2 scalars or a 1x2 Tensor indicating the scale,
relative to the inputs, of the output dimensions. For example, if kernel
size is [2, 3], then the output height and width will be twice and three
times the input size.
method: The upsampling method: 'nn_upsample_conv',
'bilinear_upsample_conv', or 'conv2d_transpose'.
Returns:
A Tensor which was upsampled using the specified method.
Raises:
ValueError: if `method` is not recognized.
"""
with tf.variable_scope("upconv"):
net_shape = tf.shape(net)
height = net_shape[1]
width = net_shape[2]
# Reflection pad by 1 in spatial dimensions (axes 1, 2 = h, w) to make a
# 3x3 "valid" convolution produce an output with the same dimension as the
# input.
spatial_pad_1 = np.array([[0, 0], [1, 1], [1, 1], [0, 0]])
if method == "nn_upsample_conv":
net = tf.image.resize_nearest_neighbor(
net, [stride[0] * height, stride[1] * width])
net = tf.pad(net, spatial_pad_1, "REFLECT")
net = tf.contrib.layers.conv2d(
net, num_outputs, kernel_size=[3, 3], padding="valid")
elif method == "bilinear_upsample_conv":
net = tf.image.resize_bilinear(net,
[stride[0] * height, stride[1] * width])
net = tf.pad(net, spatial_pad_1, "REFLECT")
net = tf.contrib.layers.conv2d(
net, num_outputs, kernel_size=[3, 3], padding="valid")
elif method == "conv2d_transpose":
# This corrects 1 pixel offset for images with even width and height.
# conv2d is left aligned and conv2d_transpose is right aligned for even
# sized images (while doing "SAME" padding).
# Note: This doesn"t reflect actual model in paper.
net = tf.contrib.layers.conv2d_transpose(
net, num_outputs, kernel_size=[3, 3], stride=stride, padding="valid")
net = net[:, 1:, 1:, :]
else:
raise ValueError("Unknown method: [%s]" % method)
return net
def weight_targeting(w, k):
"""Weight-level magnitude pruning."""
k = tf.to_int32(k)
w_shape = shape_list(w)
size = tf.to_int32(tf.reduce_prod(w_shape[:-1]))
w = tf.reshape(w, [size, w_shape[-1]])
transpose_w = tf.transpose(w)
thres = tf.contrib.framework.sort(tf.abs(transpose_w), axis=1)[:, k]
mask = tf.to_float(thres[None, :] >= tf.abs(w))
return tf.reshape(mask, w_shape)
def unit_targeting(w, k):
"""Unit-level magnitude pruning."""
k = tf.to_int32(k)
w_shape = shape_list(w)
size = tf.to_int32(tf.reduce_prod(w_shape[:-1]))
w = tf.reshape(w, [size, w_shape[-1]])
norm = tf.norm(w, axis=0)
thres = tf.contrib.framework.sort(norm, axis=0)[k]
mask = tf.to_float(thres >= norm)[None, :]
mask = tf.tile(mask, [size, 1])
return tf.reshape(mask, w_shape)
def td_conv(inputs,
filters,
kernel_size,
targeting_count,
targeting_fn,
keep_prob,
is_training,
do_prune=True,
strides=(1, 1),
padding="valid",
data_format="channels_last",
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
name=None,
reuse=None):
"""Apply targeted dropout to the weights of a convolution."""
with tf.variable_scope(name, default_name="td_conv", reuse=reuse):
nhwc = data_format == "channels_last"
in_dim = shape_list(inputs)[-1] if nhwc else shape_list(inputs)[1]
kernel_shape = [kernel_size, kernel_size, in_dim, filters]
w = tf.get_variable(
"DW", shape=kernel_shape, initializer=kernel_initializer)
if use_bias:
b = tf.get_variable("b", shape=[filters], initializer=bias_initializer)
if keep_prob < 1.0:
w = targeted_dropout(
w,
targeting_count,
keep_prob,
targeting_fn,
is_training,
do_prune=do_prune)
if isinstance(strides, int):
strides = [strides, strides]
if isinstance(dilation_rate, int):
dilation_rate = [dilation_rate, dilation_rate]
if nhwc:
strides = [1, strides[0], strides[1], 1]
dilation_rate = [1, dilation_rate[0], dilation_rate[1], 1]
else:
strides = [1, 1, strides[0], strides[1]]
dilation_rate = [1, 1, dilation_rate[0], dilation_rate[1]]
y = tf.nn.conv2d(
inputs,
w,
strides,
padding,
data_format="NHWC" if nhwc else "NCHW",
dilations=dilation_rate,
name=None)
if use_bias:
y += b
if activation:
y = activation(y)
return y
def targeted_dropout(inputs,
k,
keep_prob,
targeting_fn,
is_training,
do_prune=False):
"""Applies targeted dropout.
Applies dropout at a rate of `1 - keep_prob` to only those elements of
`inputs` marked by `targeting_fn`. See below and paper for more detail:
"Targeted Dropout for Posthoc Pruning" Aidan N. Gomez, Ivan Zhang,
Kevin Swersky, Yarin Gal, and Geoffrey E. Hinton.
Args:
inputs: Tensor, inputs to apply targeted dropout to.
k: Scalar Tensor or python scalar, sets the number of elements to target in
`inputs`. Must be within `[0, tf.shape(x)[-1]]` and compatible with
second argument of `targeting_fn`.
keep_prob: Scalar Tensor, passed as `tf.nn.dropout`'s `keep_prob` argument.
targeting_fn: callable `fn(inputs, k) -> Boolean Tensor`, produces a
boolean mask the same shape as `inputs` where True indicates an element
will be dropped, and False not.
is_training: bool, indicates whether currently training.
do_prune: bool, indicates whether to prune the `k * (1 - keep_prob)`
elements of `inputs` expected to be dropped each forwards pass.
Returns:
Tensor, same shape and dtype as `inputs`.
"""
if not is_training and do_prune:
k = tf.round(tf.to_float(k) * tf.to_float(1. - keep_prob))
mask = targeting_fn(inputs, k)
mask = tf.cast(mask, inputs.dtype)
if is_training:
return inputs * (1 - mask) + tf.nn.dropout(inputs, keep_prob) * mask
elif do_prune:
return inputs * (1 - mask)
else:
return inputs
def kl_divergence(mu, log_var, mu_p=0.0, log_var_p=0.0):
"""KL divergence of diagonal gaussian N(mu,exp(log_var)) and N(0,1).
Args:
mu: mu parameter of the distribution.
log_var: log(var) parameter of the distribution.
mu_p: optional mu from a learned prior distribution
log_var_p: optional log(var) from a learned prior distribution
Returns:
the KL loss.
"""
batch_size = shape_list(mu)[0]
prior_distribution = tfp.distributions.Normal(
mu_p, tf.exp(tf.multiply(0.5, log_var_p)))
posterior_distribution = tfp.distributions.Normal(
mu, tf.exp(tf.multiply(0.5, log_var)))
kld = tfp.distributions.kl_divergence(posterior_distribution,
prior_distribution)
return tf.reduce_sum(kld) / tf.to_float(batch_size)
def sparse_equals_constant(constant, tensor):
return tf.SparseTensor(
indices=tensor.indices,
dense_shape=tensor.dense_shape,
values=tf.equal(tensor.values, constant))
def sparse_expand_dims(tensor, current_num_dims, axis=0):
if axis == -1:
axis = current_num_dims
new_col = tf.zeros([tf.shape(tensor.indices)[0]], dtype=tf.int64)
cols = tf.unstack(tensor.indices, axis=1, num=current_num_dims)
shape = tf.unstack(tensor.dense_shape, num=current_num_dims)
new_indices = tf.stack(cols[:axis] + [new_col] + cols[axis:], axis=1)
return tf.SparseTensor(
indices=new_indices,
values=tensor.values,
dense_shape=tf.stack(shape[:axis] + [1] + shape[axis:]))
def sparse_add_constant(constant, tensor):
return tf.SparseTensor(
indices=tensor.indices,
values=constant + tensor.values,
dense_shape=tensor.dense_shape)
def sparse_eye(size):
indices = tf.cast(tf.stack([tf.range(size), tf.range(size)]), tf.int64)
values = tf.ones(size)
dense_shape = [tf.cast(size, tf.int64), tf.cast(size, tf.int64)]
return tf.SparseTensor(
indices=indices, values=values, dense_shape=dense_shape)
# modification from https://github.com/tensorflow/tensorflow/pull/21276
# without special initialization for g
class WeightNorm(tf.keras.layers.Wrapper):
""" This wrapper reparameterizes a layer by decoupling the weight's
magnitude and direction. This speeds up convergence by improving the
conditioning of the optimization problem.
Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks: https://arxiv.org/abs/1602.07868
Tim Salimans, Diederik P. Kingma (2016)
WeightNorm wrapper works for keras and tf layers.
```python
net = WeightNorm(tf.keras.layers.Conv2D(2, 2, activation='relu'),
input_shape=(32, 32, 3), data_init=True)(x)
net = WeightNorm(tf.keras.layers.Conv2D(16, 5, activation='relu'),
data_init=True)
net = WeightNorm(tf.keras.layers.Dense(120, activation='relu'),
data_init=True)(net)
net = WeightNorm(tf.keras.layers.Dense(n_classes),
data_init=True)(net)
```
Arguments:
layer: a layer instance.
data_init: If `True` use data dependent variable initialization
Raises:
ValueError: If not initialized with a `Layer` instance.
ValueError: If `Layer` does not contain a `kernel` of weights
NotImplementedError: If `data_init` is True and running graph execution
"""
def __init__(self, layer, data_init=False, **kwargs):
if not isinstance(layer, tf.keras.layers.Layer):
raise ValueError(
"Please initialize `WeightNorm` layer with a "
"`Layer` instance. You passed: {input}".format(input=layer))
super(WeightNorm, self).__init__(layer, **kwargs)
self._track_checkpointable(layer, name="layer")
def _compute_weights(self):
"""Generate weights with normalization."""
with tf.variable_scope("compute_weights"):
self.layer.kernel = tf.nn.l2_normalize(
self.layer.v, axis=self.norm_axes) * self.layer.g
def _init_norm(self, weights):
"""Set the norm of the weight vector."""
with tf.variable_scope("init_norm"):
flat = tf.reshape(weights, [-1, self.layer_depth])
return tf.reshape(tf.norm(flat, axis=0), (self.layer_depth,))
def _data_dep_init(self, inputs):
"""Data dependent initialization for eager execution."""
with tf.variable_scope("data_dep_init"):
# Generate data dependent init values
activation = self.layer.activation
self.layer.activation = None
x_init = self.layer.call(inputs)
m_init, v_init = tf.moments(x_init, self.norm_axes)
scale_init = 1. / tf.sqrt(v_init + 1e-10)
# Assign data dependent init values
self.layer.g = self.layer.g * scale_init
self.layer.bias = (-m_init * scale_init)
self.layer.activation = activation
self.initialized = True
def build(self, input_shape=None):
"""Build `Layer`."""
input_shape = tf.TensorShape(input_shape).as_list()
self.input_spec = tf.layers.InputSpec(shape=input_shape)
if not self.layer.built:
self.layer.build(input_shape)
self.layer.built = False
if not hasattr(self.layer, "kernel"):
raise ValueError("`WeightNorm` must wrap a layer that"
" contains a `kernel` for weights")
# The kernel's filter or unit dimension is -1
self.layer_depth = int(self.layer.kernel.shape[-1])
self.norm_axes = list(range(self.layer.kernel.shape.ndims - 1))
self.layer.v = self.layer.kernel
self.layer.g = self.layer.add_variable(
name="g",
shape=(self.layer_depth,),
initializer=tf.ones_initializer,
dtype=self.layer.kernel.dtype,
trainable=True)
# with ops.control_dependencies([self.layer.g.assign(
# self._init_norm(self.layer.v))]):
# self._compute_weights()
self._compute_weights()
self.layer.built = True
super(WeightNorm, self).build()
self.built = True
def call(self, inputs):
"""Call `Layer`."""
# if context.executing_eagerly():
# if not self.initialized:
# self._data_dep_init(inputs)
self._compute_weights() # Recompute weights for each forward pass
output = self.layer.call(inputs)
return output
def compute_output_shape(self, input_shape):
return tf.TensorShape(
self.layer.compute_output_shape(input_shape).as_list())<|fim▁end|>
|
A tensor of the same shape as x.
Raises:
|
<|file_name|>Changes.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright © 2011 Thomas Schreiber
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# by Thomas Schreiber <[email protected]>
#
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from view.changesUi import Ui_changeSummary
import string
class ChangeWin(QDialog):
"""A QDialog that lists changes before they are commited.
:param QDialog: Parent class.
"""
def __init__(self, parent):
"""Initialize ChangeWin.
:param parent: Caller.
"""
QDialog.__init__(self, parent)
self.ui=Ui_changeSummary()
self.ui.setupUi(self)
def setChanges(self, changeDict):
"""Add changes to ChangeWin.
:param changeDict: Dictionary of changes.
"""
installString = ''
upgradeString = ''
removeString = ''
for app in changeDict['repoInstalls']:
installString += app + ' '
for app in changeDict['aurInstalls']:
installString += app + ' '
for app in changeDict['aurBuildDeps']:
installString += app + ' '
for app in changeDict['aurDeps']:
installString += app + ' '
for app in changeDict['repoUpgrades']:
upgradeString += app + ' '
for app in changeDict['aurUpgrades']:
upgradeString += app + ' '
for app in changeDict['removes']:
removeString += app + ' '
self.ui.toInstallEdit.setText(installString)
self.ui.toUpgradeEdit.setText(upgradeString)
self.ui.toRemoveEdit.setText(removeString)
<|fim▁hole|><|fim▁end|>
|
# vim: set ts=4 sw=4 noet:
|
<|file_name|>lcon.d.ts<|end_file_name|><|fim▁begin|>declare module "lcon" {
export interface SourceLocation {
line: number
column: number
}
export interface SyntaxTreeBuilder<A> {
initObject(start: SourceLocation): A
initArray(start: SourceLocation): A
appendKeyToObject(key: string, object: A, start: SourceLocation, end: SourceLocation): void
appendValueToArray(value: any, array: A): void
appendValueToObject(value: any, object: A): void
closeObject(object: A, end: SourceLocation): void
closeArray(array: A, end: SourceLocation): void
lastElementOfArray(array: A): any
isObject(thing: any): boolean
processString(value: string, start: SourceLocation, end: SourceLocation): any
processNumber(value: number, start: SourceLocation, end: SourceLocation): any
processBoolean(value: boolean, start: SourceLocation, end: SourceLocation): any
processNull(start: SourceLocation, end: SourceLocation): any
}
export function parseWithBuilder<A>(src: string, builder: SyntaxTreeBuilder<A>): any
/**
* Converts ordered JSON data to normal (unordered) JSON data. Note that
* "JSON", in this case, refers to actual JavaScript objects and arrays, not to
* a JSON string.
*
* Ordered JSON is a special JSON format that includes only arrays, no objects.
* Key order is significant, and, in order to preserve it, objects are
* represented by arrays in the format `[false, key, value, key, value, ...]`.
* Actual arrays start with `true`, in order to differentiate them from
* objects.
*/
export function orderedToUnordered(ordered: any): any
/**
* Parses an LCON string into Ordered JSON data.
*
* Ordered JSON is a special JSON format that includes only arrays, no objects.
* Key order is significant, and, in order to preserve it, objects are
* represented by arrays in the format `[false, key, value, key, value, ...]`.
* Actual arrays start with `true`, in order to differentiate them from
* objects.
*/
export function parseOrdered(src: string): any
/**
* Parses an LCON string into standard JavaScript data types (JSON). This is
* the LCON equivalent of `JSON.parse`.
*
* Key order will usually be preserved, but this is
* JavaScript-implementation-dependent and not guaranteed. If key order is
* significant, use `parseOrdered` instead.
*/
export function parseUnordered(src: string): any
/**
* Generates a compact LCON string from standard JavaScript data types (JSON).
* This is the LCON equivalent of `JSON.stringify`.
*
* Key order will usually be preserved, but this is<|fim▁hole|> * JavaScript-implementation-dependent and not guaranteed. If key order is
* significant, use `stringifyOrdered` instead.
*/
export function stringifyUnordered(data: any): string
/**
* Generates a compact LCON string from Ordered JSON data. Ordered JSON is the
* data format output by `parseOrdered`; see that function's documentation for
* more information.
*/
export function stringifyOrdered(data: any): string
/**
* Generates a JSON string (which is also valid LCON) from Ordered JSON data.
* Ordered JSON is the data format output by `parseOrdered`; see that
* function's documentation for more information.
*
* A second argument, an integer, may be provided; this specifies the size of
* indents to insert into the output. If the indent size is greater than 0, the
* output will be pretty-printed with newlines and indentation.
*/
export function stringifyOrderedJSON(data: any, indent?: number, indentStep?: number): string
}<|fim▁end|>
| |
<|file_name|>SettingAction.java<|end_file_name|><|fim▁begin|>package kawigi.cmd;
import kawigi.properties.*;
import kawigi.widget.*;
import kawigi.editor.*;
import javax.swing.*;
import java.awt.event.*;
/**
* Action implementation for setting actions.
*
* This class really serves two purposes. The first is to implement actions
* that are related to settings but aren't settings themselves (for instance,
* launching the config dialog and the OK and Cancel buttons on the config
* dialog).
*
* The other purpose is that it's the base class of all setting actions. As
* part of this, there are a set of static variables and methods that are
* shared by all setting instances.
*
* The intended behavior is that if the settings dialog is up, all settings are
* set on a temporary prefs object, and committed when the "OK" button is
* pushed. Otherwise, the setting is applied immediately. Therefore, if
* buttons bound to setting actions are put on the main UI somewhere, they will
* be effective immediately on being used, but settings done on the dialog are
* cancellable.
**/
@SuppressWarnings("serial")
public class SettingAction extends DefaultAction
{
/**
* Temporary storage for settings until the settings dialog is committed.
**/
protected static PrefProxy tempPrefs;
/**
* Reference to the config dialog.
**/
protected static JDialog dialog;
/**
* Returns the temporary prefs if there is one, otherwise the real
* KawigiEdit prefs.
**/
protected static PrefProxy getCurrentPrefs()
{
if (tempPrefs == null)
return PrefFactory.getPrefs();
else
return tempPrefs;
}
/**
* Returns true if settings shouldn't be committed yet.
*
<|fim▁hole|> * Even though things set to the temporary prefs won't be committed par se,
* in order to immediately be effective, some settings need to notify other
* objects (for instance, syntax highlighting settings require a
* repopulation of some structures in the View classes), but they should
* only do that if delayNotify() returns false.
**/
protected static boolean delayNotify()
{
return tempPrefs != null;
}
/**
* Constructs a new SettingAction for the given ActID.
**/
public SettingAction(ActID cmdid)
{
super(cmdid);
}
/**
* Executes the non-setting setting commands.
**/
public void actionPerformed(ActionEvent e)
{
switch (cmdid)
{
case actLaunchConfig:
if (dialog == null)
{
dialog = new JDialog(Dispatcher.getWindow(), "KawigiEdit Configuration", true);
dialog.getContentPane().add(UIHandler.loadMenu(MenuID.ConfigPanel, Dispatcher.getGlobalDispatcher()));
dialog.setDefaultCloseOperation(WindowConstants.HIDE_ON_CLOSE);
dialog.pack();
dialog.addWindowListener(
new WindowAdapter() {
public void windowClosing(WindowEvent e) {
Dispatcher.getGlobalDispatcher().runCommand(ActID.actCancelConfig);
}
});
}
if (tempPrefs == null)
tempPrefs = new ChainedPrefs(PrefFactory.getPrefs());
Dispatcher.getGlobalDispatcher().UIRefresh();
dialog.setVisible(true);
break;
case actCommitConfig:
tempPrefs.commit();
doUpdates();
// fallthrough...
case actCancelConfig:
tempPrefs = null;
if (dialog != null)
{
dialog.setVisible(false);
}
break;
}
}
/**
* Returns true if this action is available.
**/
public boolean isEnabled()
{
return true;
}
/**
* Does all the commital actions that need to happen assuming all settings
* were changed at once.
**/
public void doUpdates()
{
if (Dispatcher.getProblemTimer() != null)
{
boolean show = getCurrentPrefs().getBoolean("kawigi.timer.show");
if (show)
Dispatcher.getProblemTimer().start();
else
Dispatcher.getProblemTimer().stop();
}
ProblemTimer.resetPrefs();
GenericView.getColors();
CPPView.initColors();
PythonView.initColors();
CSharpView.initColors();
JavaView.initColors();
VBView.initColors();
GenericView.resetTabStop();
if (Dispatcher.getCompileComponent() != null)
Dispatcher.getCompileComponent().updatePrefs();
if (Dispatcher.getOutputComponent() != null)
Dispatcher.getOutputComponent().updatePrefs();
if (Dispatcher.getEditorPanel() != null)
Dispatcher.getCodePane().resetPrefs();
if (Dispatcher.getLocalCodeEditorPanel() != null)
Dispatcher.getLocalCodePane().resetPrefs();
}
}<|fim▁end|>
| |
<|file_name|>project_issue_solution.py<|end_file_name|><|fim▁begin|>#-*- coding: utf-8 -*-
<|fim▁hole|> """ Note """
_inherit = 'project.issue.solution'
_columns = {
'product_ids': fields.many2many('product.product', 'project_issue_solution_product_rel', 'solution_id','product_id', string='Products'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:<|fim▁end|>
|
from openerp.osv import fields, osv, orm
class project_isssue_solution(osv.osv):
|
<|file_name|>rdd.py<|end_file_name|><|fim▁begin|>#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import sys
import os
import operator
import shlex
import warnings
import heapq
import bisect
import random
from subprocess import Popen, PIPE
from threading import Thread
from collections import defaultdict
from itertools import chain
from functools import reduce
from math import sqrt, log, isinf, isnan, pow, ceil
from typing import (
Any,
Callable,
Dict,
Generic,
Hashable,
Iterable,
Iterator,
IO,
List,
NoReturn,
Optional,
Sequence,
Tuple,
Union,
TypeVar,
cast,
overload,
TYPE_CHECKING,
)
from pyspark.java_gateway import local_connect_and_auth
from pyspark.serializers import (
AutoBatchedSerializer,
BatchedSerializer,
NoOpSerializer,
CartesianDeserializer,
CloudPickleSerializer,
PairDeserializer,
CPickleSerializer,
Serializer,
pack_long,
read_int,
write_int,
)
from pyspark.join import (
python_join,
python_left_outer_join,
python_right_outer_join,
python_full_outer_join,
python_cogroup,
)
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resource.requests import ExecutorResourceRequests, TaskResourceRequests
from pyspark.resource.profile import ResourceProfile
from pyspark.resultiterable import ResultIterable
from pyspark.shuffle import (
Aggregator,
ExternalMerger,
get_used_memory,
ExternalSorter,
ExternalGroupBy,
)
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.util import fail_on_stopiteration, _parse_memory
if TYPE_CHECKING:
import socket
import io
from pyspark._typing import NonUDFType
from pyspark._typing import S, NumberOrArray
from pyspark.context import SparkContext
from pyspark.sql.pandas._typing import (
PandasScalarUDFType,
PandasGroupedMapUDFType,
PandasGroupedAggUDFType,
PandasWindowAggUDFType,
PandasScalarIterUDFType,
PandasMapIterUDFType,
PandasCogroupedMapUDFType,
ArrowMapIterUDFType,
)
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.types import AtomicType, StructType
from pyspark.sql._typing import AtomicValue, RowLike, SQLBatchedUDFType
from py4j.java_gateway import JavaObject # type: ignore[import]
from py4j.java_collections import JavaArray # type: ignore[import]
T = TypeVar("T")
T_co = TypeVar("T_co", covariant=True)
U = TypeVar("U")
K = TypeVar("K", bound=Hashable)
V = TypeVar("V")
V1 = TypeVar("V1")
V2 = TypeVar("V2")
V3 = TypeVar("V3")
__all__ = ["RDD"]
class PythonEvalType:
"""
Evaluation type of python rdd.
These values are internal to PySpark.
These values should match values in org.apache.spark.api.python.PythonEvalType.
"""
NON_UDF: "NonUDFType" = 0
SQL_BATCHED_UDF: "SQLBatchedUDFType" = 100
SQL_SCALAR_PANDAS_UDF: "PandasScalarUDFType" = 200
SQL_GROUPED_MAP_PANDAS_UDF: "PandasGroupedMapUDFType" = 201
SQL_GROUPED_AGG_PANDAS_UDF: "PandasGroupedAggUDFType" = 202
SQL_WINDOW_AGG_PANDAS_UDF: "PandasWindowAggUDFType" = 203
SQL_SCALAR_PANDAS_ITER_UDF: "PandasScalarIterUDFType" = 204
SQL_MAP_PANDAS_ITER_UDF: "PandasMapIterUDFType" = 205
SQL_COGROUPED_MAP_PANDAS_UDF: "PandasCogroupedMapUDFType" = 206
SQL_MAP_ARROW_ITER_UDF: "ArrowMapIterUDFType" = 207
def portable_hash(x: Hashable) -> int:
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
Examples
--------
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if "PYTHONHASHSEED" not in os.environ:
raise RuntimeError("Randomness of hash of string should be disabled via PYTHONHASHSEED")
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxsize
h ^= len(x)
if h == -1:
h = -2
return int(h)
return hash(x)
class BoundedFloat(float):
"""
Bounded value is generated by approximate job, with confidence and low
bound and high bound.
Examples
--------
>>> BoundedFloat(100.0, 0.95, 95.0, 105.0)
100.0
"""
confidence: float
low: float
high: float
def __new__(cls, mean: float, confidence: float, low: float, high: float) -> "BoundedFloat":
obj = float.__new__(cls, mean)
obj.confidence = confidence
obj.low = low
obj.high = high
return obj
def _create_local_socket(sock_info: "JavaArray") -> "io.BufferedRWPair":
"""
Create a local socket that can be used to load deserialized data from the JVM
Parameters
----------
sock_info : tuple
Tuple containing port number and authentication secret for a local socket.
Returns
-------
sockfile file descriptor of the local socket
"""
sockfile: "io.BufferedRWPair"
sock: "socket.socket"
port: int = sock_info[0]
auth_secret: str = sock_info[1]
sockfile, sock = local_connect_and_auth(port, auth_secret)
# The RDD materialization time is unpredictable, if we set a timeout for socket reading
# operation, it will very possibly fail. See SPARK-18281.
sock.settimeout(None)
return sockfile
def _load_from_socket(sock_info: "JavaArray", serializer: Serializer) -> Iterator[Any]:
"""
Connect to a local socket described by sock_info and use the given serializer to yield data
Parameters
----------
sock_info : tuple
Tuple containing port number and authentication secret for a local socket.
serializer : :py:class:`Serializer`
The PySpark serializer to use
Returns
-------
result of :py:meth:`Serializer.load_stream`,
usually a generator that yields deserialized data
"""
sockfile = _create_local_socket(sock_info)
# The socket will be automatically closed when garbage-collected.
return serializer.load_stream(sockfile)
def _local_iterator_from_socket(sock_info: "JavaArray", serializer: Serializer) -> Iterator[Any]:
class PyLocalIterable:
"""Create a synchronous local iterable over a socket"""
def __init__(self, _sock_info: "JavaArray", _serializer: Serializer):
port: int
auth_secret: str
jsocket_auth_server: "JavaObject"
port, auth_secret, self.jsocket_auth_server = _sock_info
self._sockfile = _create_local_socket((port, auth_secret))
self._serializer = _serializer
self._read_iter: Iterator[Any] = iter([]) # Initialize as empty iterator
self._read_status = 1
def __iter__(self) -> Iterator[Any]:
while self._read_status == 1:
# Request next partition data from Java
write_int(1, self._sockfile)
self._sockfile.flush()
# If response is 1 then there is a partition to read, if 0 then fully consumed
self._read_status = read_int(self._sockfile)
if self._read_status == 1:
# Load the partition data as a stream and read each item
self._read_iter = self._serializer.load_stream(self._sockfile)
for item in self._read_iter:
yield item
# An error occurred, join serving thread and raise any exceptions from the JVM
elif self._read_status == -1:
self.jsocket_auth_server.getResult()
def __del__(self) -> None:
# If local iterator is not fully consumed,
if self._read_status == 1:
try:
# Finish consuming partition data stream
for _ in self._read_iter:
pass
# Tell Java to stop sending data and close connection
write_int(0, self._sockfile)
self._sockfile.flush()
except Exception:
# Ignore any errors, socket is automatically closed when garbage-collected
pass
return iter(PyLocalIterable(sock_info, serializer))
class Partitioner:
def __init__(self, numPartitions: int, partitionFunc: Callable[[Any], int]):
self.numPartitions = numPartitions
self.partitionFunc = partitionFunc
def __eq__(self, other: Any) -> bool:
return (
isinstance(other, Partitioner)
and self.numPartitions == other.numPartitions
and self.partitionFunc == other.partitionFunc
)
def __call__(self, k: Any) -> int:
return self.partitionFunc(k) % self.numPartitions
class RDD(Generic[T_co]):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(
self,
jrdd: "JavaObject",
ctx: "SparkContext",
jrdd_deserializer: Serializer = AutoBatchedSerializer(CPickleSerializer()),
):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.has_resource_profile = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
self.partitioner: Optional[Partitioner] = None
def _pickled(self: "RDD[T]") -> "RDD[T]":
return self._reserialize(AutoBatchedSerializer(CPickleSerializer()))
def id(self) -> int:
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self) -> str:
return self._jrdd.toString()
def __getnewargs__(self) -> NoReturn:
# This method is called when attempting to pickle an RDD, which is always an error:
raise RuntimeError(
"It appears that you are attempting to broadcast an RDD or reference an RDD from an "
"action or transformation. RDD transformations and actions can only be invoked by the "
"driver, not inside of other transformations; for example, "
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values "
"transformation and count action cannot be performed inside of the rdd1.map "
"transformation. For more information, see SPARK-5063."
)
@property
def context(self) -> "SparkContext":
"""
The :class:`SparkContext` that this RDD was created on.
"""
return self.ctx
def cache(self: "RDD[T]") -> "RDD[T]":
"""
Persist this RDD with the default storage level (`MEMORY_ONLY`).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY)
return self
def persist(self: "RDD[T]", storageLevel: StorageLevel = StorageLevel.MEMORY_ONLY) -> "RDD[T]":
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (`MEMORY_ONLY`).
Examples
--------
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> rdd.persist().is_cached
True
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self: "RDD[T]", blocking: bool = False) -> "RDD[T]":
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
.. versionchanged:: 3.0.0
Added optional argument `blocking` to specify whether to block until all
blocks are deleted.
"""
self.is_cached = False
self._jrdd.unpersist(blocking)
return self
def checkpoint(self) -> None:
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with :meth:`SparkContext.setCheckpointDir` and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self) -> bool:
"""
Return whether this RDD is checkpointed and materialized, either reliably or locally.
"""
return self._jrdd.rdd().isCheckpointed()
def localCheckpoint(self) -> None:
"""
Mark this RDD for local checkpointing using Spark's existing caching layer.
This method is for users who wish to truncate RDD lineages while skipping the expensive
step of replicating the materialized data in a reliable distributed file system. This is
useful for RDDs with long lineages that need to be truncated periodically (e.g. GraphX).
Local checkpointing sacrifices fault-tolerance for performance. In particular, checkpointed
data is written to ephemeral local storage in the executors instead of to a reliable,
fault-tolerant storage. The effect is that if an executor fails during the computation,
the checkpointed data may no longer be accessible, causing an irrecoverable job failure.
This is NOT safe to use with dynamic allocation, which removes executors along
with their cached blocks. If you must use both features, you are advised to set
`spark.dynamicAllocation.cachedExecutorIdleTimeout` to a high value.
The checkpoint directory set through :meth:`SparkContext.setCheckpointDir` is not used.
"""
self._jrdd.rdd().localCheckpoint()
def isLocallyCheckpointed(self) -> bool:
"""
Return whether this RDD is marked for local checkpointing.
Exposed for testing.
"""
return self._jrdd.rdd().isLocallyCheckpointed()
def getCheckpointFile(self) -> Optional[str]:
"""
Gets the name of the file to which this RDD was checkpointed
Not defined if RDD is checkpointed locally.
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
return checkpointFile.get() if checkpointFile.isDefined() else None
def map(self: "RDD[T]", f: Callable[[T], U], preservesPartitioning: bool = False) -> "RDD[U]":
"""
Return a new RDD by applying a function to each element of this RDD.
Examples
--------
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_: int, iterator: Iterable[T]) -> Iterable[U]:
return map(fail_on_stopiteration(f), iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def flatMap(
self: "RDD[T]", f: Callable[[T], Iterable[U]], preservesPartitioning: bool = False
) -> "RDD[U]":
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
Examples
--------
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(_: int, iterator: Iterable[T]) -> Iterable[U]:
return chain.from_iterable(map(fail_on_stopiteration(f), iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(
self: "RDD[T]", f: Callable[[Iterable[T]], Iterable[U]], preservesPartitioning: bool = False
) -> "RDD[U]":
"""
Return a new RDD by applying a function to each partition of this RDD.
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(_: int, iterator: Iterable[T]) -> Iterable[U]:
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(
self: "RDD[T]",
f: Callable[[int, Iterable[T]], Iterable[U]],
preservesPartitioning: bool = False,
) -> "RDD[U]":
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(
self: "RDD[T]",
f: Callable[[int, Iterable[T]], Iterable[U]],
preservesPartitioning: bool = False,
) -> "RDD[U]":
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
.. deprecated:: 0.9.0
use :py:meth:`RDD.mapPartitionsWithIndex` instead.
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn(
"mapPartitionsWithSplit is deprecated; use mapPartitionsWithIndex instead",
FutureWarning,
stacklevel=2,
)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self) -> int:
"""
Returns the number of partitions in RDD
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self: "RDD[T]", f: Callable[[T], bool]) -> "RDD[T]":
"""
Return a new RDD containing only the elements that satisfy a predicate.
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator: Iterable[T]) -> Iterable[T]:
return filter(fail_on_stopiteration(f), iterator)
return self.mapPartitions(func, True)
def distinct(self: "RDD[T]", numPartitions: Optional[int] = None) -> "RDD[T]":
"""
Return a new RDD containing the distinct elements in this RDD.
Examples
--------
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return (
self.map(lambda x: (x, None))
.reduceByKey(lambda x, _: x, numPartitions)
.map(lambda x: x[0])
)
def sample(
self: "RDD[T]", withReplacement: bool, fraction: float, seed: Optional[int] = None
) -> "RDD[T]":
"""
Return a sampled subset of this RDD.
Parameters
----------
withReplacement : bool
can elements be sampled multiple times (replaced when sampled out)
fraction : float
expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
seed : int, optional
seed for the random number generator
Notes
-----
This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
Examples
--------
>>> rdd = sc.parallelize(range(100), 4)
>>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14
True
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
def randomSplit(
self: "RDD[T]", weights: Sequence[Union[int, float]], seed: Optional[int] = None
) -> "List[RDD[T]]":
"""
Randomly splits this RDD with the provided weights.
weights : list
weights for splits, will be normalized if they don't sum to 1
seed : int, optional
random seed
Returns
-------
list
split RDDs in a list
Examples
--------
>>> rdd = sc.parallelize(range(500), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> len(rdd1.collect() + rdd2.collect())
500
>>> 150 < rdd1.count() < 250
True
>>> 250 < rdd2.count() < 350
True
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [
self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])
]
# this is ported from scala/spark/RDD.scala
def takeSample(
self: "RDD[T]", withReplacement: bool, num: int, seed: Optional[int] = None
) -> List[T]:
"""
Return a fixed-size sampled subset of this RDD.
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num > maxSampleSize:
raise ValueError("Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(
sampleSizeLowerBound: int, total: int, withReplacement: bool
) -> float:
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if sampleSizeLowerBound < 12:
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = -log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self: "RDD[T]", other: "RDD[U]") -> "RDD[Union[T, U]]":
"""
Return the union of this RDD and another one.
Examples
--------
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd: "RDD[Union[T, U]]" = RDD(
self._jrdd.union(other._jrdd), self.ctx, self._jrdd_deserializer
)
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx, self.ctx.serializer)
if (
self.partitioner == other.partitioner
and self.getNumPartitions() == rdd.getNumPartitions()
):
rdd.partitioner = self.partitioner
return rdd
def intersection(self: "RDD[T]", other: "RDD[T]") -> "RDD[T]":
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
Notes
-----
This method performs a shuffle internally.
Examples
--------
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return (
self.map(lambda v: (v, None))
.cogroup(other.map(lambda v: (v, None)))
.filter(lambda k_vs: all(k_vs[1]))
.keys()
)
def _reserialize(self: "RDD[T]", serializer: Optional[Serializer] = None) -> "RDD[T]":
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer != serializer:
self = self.map(lambda x: x, preservesPartitioning=True)
self._jrdd_deserializer = serializer
return self
def __add__(self: "RDD[T]", other: "RDD[U]") -> "RDD[Union[T, U]]":
"""
Return the union of this RDD and another one.
Examples
--------
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
@overload
def repartitionAndSortWithinPartitions(
self: "RDD[Tuple[S, V]]",
numPartitions: Optional[int] = ...,
partitionFunc: Callable[["S"], int] = ...,
ascending: bool = ...,
) -> "RDD[Tuple[S, V]]":
...
@overload
def repartitionAndSortWithinPartitions(
self: "RDD[Tuple[K, V]]",
numPartitions: Optional[int],
partitionFunc: Callable[[K], int],
ascending: bool,
keyfunc: Callable[[K], "S"],
) -> "RDD[Tuple[K, V]]":
...
@overload
def repartitionAndSortWithinPartitions(
self: "RDD[Tuple[K, V]]",
numPartitions: Optional[int] = ...,
partitionFunc: Callable[[K], int] = ...,
ascending: bool = ...,
*,
keyfunc: Callable[[K], "S"],
) -> "RDD[Tuple[K, V]]":
...
def repartitionAndSortWithinPartitions(
self: "RDD[Tuple[Any, Any]]",
numPartitions: Optional[int] = None,
partitionFunc: Callable[[Any], int] = portable_hash,
ascending: bool = True,
keyfunc: Callable[[Any], Any] = lambda x: x,
) -> "RDD[Tuple[Any, Any]]":
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
Examples
--------
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, True)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator: Iterable[Tuple[K, V]]) -> Iterable[Tuple[K, V]]:
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
@overload
def sortByKey(
self: "RDD[Tuple[S, V]]",
ascending: bool = ...,
numPartitions: Optional[int] = ...,
) -> "RDD[Tuple[K, V]]":
...
@overload
def sortByKey(
self: "RDD[Tuple[K, V]]",
ascending: bool,
numPartitions: int,
keyfunc: Callable[[K], "S"],
) -> "RDD[Tuple[K, V]]":
...
@overload
def sortByKey(
self: "RDD[Tuple[K, V]]",
ascending: bool = ...,
numPartitions: Optional[int] = ...,
*,
keyfunc: Callable[[K], "S"],
) -> "RDD[Tuple[K, V]]":
...
def sortByKey(
self: "RDD[Tuple[K, V]]",
ascending: Optional[bool] = True,
numPartitions: Optional[int] = None,
keyfunc: Callable[[Any], Any] = lambda x: x,
) -> "RDD[Tuple[K, V]]":
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
Examples
--------
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator: Iterable[Tuple[K, V]]) -> Iterable[Tuple[K, V]]:
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [
samples[int(len(samples) * (i + 1) / numPartitions)]
for i in range(0, numPartitions - 1)
]
def rangePartitioner(k: K) -> int:
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p # type: ignore[operator]
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
def sortBy(
self: "RDD[T]",
keyfunc: Callable[[T], "S"],
ascending: bool = True,
numPartitions: Optional[int] = None,
) -> "RDD[T]":
"""
Sorts this RDD by the given keyfunc
Examples
--------
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return (
self.keyBy(keyfunc) # type: ignore[type-var]
.sortByKey(ascending, numPartitions)
.values()
)
def glom(self: "RDD[T]") -> "RDD[List[T]]":
"""
Return an RDD created by coalescing all elements within each partition
into a list.
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator: Iterable[T]) -> Iterable[List[T]]:
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self: "RDD[T]", other: "RDD[U]") -> "RDD[Tuple[T, U]]":
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements ``(a, b)`` where ``a`` is in `self` and
``b`` is in `other`.
Examples
--------
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer, other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(
self: "RDD[T]",
f: Callable[[T], K],
numPartitions: Optional[int] = None,
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, Iterable[T]]]":
"""
Return an RDD of grouped items.
Examples
--------
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc)
def pipe(
self, command: str, env: Optional[Dict[str, str]] = None, checkCode: bool = False
) -> "RDD[str]":
"""
Return an RDD created by piping elements to a forked external process.
Parameters
----------
command : str
command to run.
env : dict, optional
environment variables to set.
checkCode : bool, optional
whether or not to check the return value of the shell command.
Examples
--------
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
['1', '2', '', '3']
"""
if env is None:
env = dict()
def func(iterator: Iterable[T]) -> Iterable[str]:
pipe = Popen(shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out: IO[bytes]) -> None:
for obj in iterator:
s = str(obj).rstrip("\n") + "\n"
out.write(s.encode("utf-8"))
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
def check_return_code() -> Iterable[int]:
pipe.wait()
if checkCode and pipe.returncode:
raise RuntimeError(
"Pipe function `%s' exited "
"with error code %d" % (command, pipe.returncode)
)
else:
for i in range(0):
yield i
return (
cast(bytes, x).rstrip(b"\n").decode("utf-8")
for x in chain(
iter(cast(IO[bytes], pipe.stdout).readline, b""), check_return_code()
)
)
return self.mapPartitions(func)
def foreach(self: "RDD[T]", f: Callable[[T], None]) -> None:
"""
Applies a function to all elements of this RDD.
Examples
--------
>>> def f(x): print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
f = fail_on_stopiteration(f)
def processPartition(iterator: Iterable[T]) -> Iterable[Any]:
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() # Force evaluation
def foreachPartition(self: "RDD[T]", f: Callable[[Iterable[T]], None]) -> None:
"""
Applies a function to each partition of this RDD.
Examples
--------
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it: Iterable[T]) -> Iterable[Any]:
r = f(it)
try:
return iter(r) # type: ignore[call-overload]
except TypeError:
return iter([])
self.mapPartitions(func).count() # Force evaluation
def collect(self: "RDD[T]") -> List[T]:
"""
Return a list that contains all of the elements in this RDD.
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
"""
with SCCallSiteSync(self.context):
assert self.ctx._jvm is not None
sock_info = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
return list(_load_from_socket(sock_info, self._jrdd_deserializer))
def collectWithJobGroup(
self: "RDD[T]", groupId: str, description: str, interruptOnCancel: bool = False
) -> "List[T]":
"""
When collect rdd, use this method to specify job group.
.. versionadded:: 3.0.0
.. deprecated:: 3.1.0
Use :class:`pyspark.InheritableThread` with the pinned thread mode enabled.
"""
warnings.warn(
"Deprecated in 3.1, Use pyspark.InheritableThread with "
"the pinned thread mode enabled.",
FutureWarning,
)
with SCCallSiteSync(self.context):
assert self.ctx._jvm is not None
sock_info = self.ctx._jvm.PythonRDD.collectAndServeWithJobGroup(
self._jrdd.rdd(), groupId, description, interruptOnCancel
)
return list(_load_from_socket(sock_info, self._jrdd_deserializer))
def reduce(self: "RDD[T]", f: Callable[[T, T], T]) -> T:
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
Examples
--------
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
f = fail_on_stopiteration(f)
def func(iterator: Iterable[T]) -> Iterable[T]:
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD")
def treeReduce(self: "RDD[T]", f: Callable[[T, T], T], depth: int = 2) -> T:
"""
Reduces the elements of this RDD in a multi-level tree pattern.
Parameters
----------
f : function
depth : int, optional
suggested depth of the tree (default: 2)
Examples
--------
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeReduce(add)
-5
>>> rdd.treeReduce(add, 1)
-5
>>> rdd.treeReduce(add, 2)
-5
>>> rdd.treeReduce(add, 5)
-5
>>> rdd.treeReduce(add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
# Use the second entry to indicate whether this is a dummy value.
zeroValue: Tuple[T, bool] = ( # type: ignore[assignment]
None,
True,
)
def op(x: Tuple[T, bool], y: Tuple[T, bool]) -> Tuple[T, bool]:
if x[1]:
return y
elif y[1]:
return x
else:
return f(x[0], y[0]), False # type: ignore[arg-type]
reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth)
if reduced[1]:
raise ValueError("Cannot reduce empty RDD.")
return reduced[0]
def fold(self: "RDD[T]", zeroValue: T, op: Callable[[T, T], T]) -> T:
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero value."
The function ``op(t1, t2)`` is allowed to modify ``t1`` and return it
as its result value to avoid object allocation; however, it should not
modify ``t2``.
This behaves somewhat differently from fold operations implemented
for non-distributed collections in functional languages like Scala.
This fold operation may be applied to partitions individually, and then
fold those results into the final result, rather than apply the fold
to each element sequentially in some defined ordering. For functions
that are not commutative, the result may differ from that of a fold
applied to a non-distributed collection.
Examples
--------
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
op = fail_on_stopiteration(op)
def func(iterator: Iterable[T]) -> Iterable[T]:
acc = zeroValue
for obj in iterator:
acc = op(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(
self: "RDD[T]", zeroValue: U, seqOp: Callable[[U, T], U], combOp: Callable[[U, U], U]
) -> U:
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions ``op(t1, t2)`` is allowed to modify ``t1`` and return it
as its result value to avoid object allocation; however, it should not
modify ``t2``.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
Examples
--------
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
seqOp = fail_on_stopiteration(seqOp)
combOp = fail_on_stopiteration(combOp)
def func(iterator: Iterable[T]) -> Iterable[U]:
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(combOp, vals, zeroValue)
def treeAggregate(
self: "RDD[T]",
zeroValue: U,
seqOp: Callable[[U, T], U],
combOp: Callable[[U, U], U],
depth: int = 2,
) -> U:
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
depth : int, optional
suggested depth of the tree (default: 2)
Examples
--------
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.treeAggregate(0, add, add, 1)
-5
>>> rdd.treeAggregate(0, add, add, 2)
-5
>>> rdd.treeAggregate(0, add, add, 5)
-5
>>> rdd.treeAggregate(0, add, add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
if self.getNumPartitions() == 0:
return zeroValue
def aggregatePartition(iterator: Iterable[T]) -> Iterable[U]:
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
partiallyAggregated = self.mapPartitions(aggregatePartition)
numPartitions = partiallyAggregated.getNumPartitions()
scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2)
# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree
# aggregation.
while numPartitions > scale + numPartitions / scale:
numPartitions /= scale # type: ignore[assignment]
curNumPartitions = int(numPartitions)
def mapPartition(i: int, iterator: Iterable[U]) -> Iterable[Tuple[int, U]]:
for obj in iterator:
yield (i % curNumPartitions, obj)
partiallyAggregated = (
partiallyAggregated.mapPartitionsWithIndex(mapPartition)
.reduceByKey(combOp, curNumPartitions)
.values()
)
return partiallyAggregated.reduce(combOp)
@overload
def max(self: "RDD[S]") -> "S":
...
@overload
def max(self: "RDD[T]", key: Callable[[T], "S"]) -> T:
...
def max(self: "RDD[T]", key: Optional[Callable[[T], "S"]] = None) -> T:
"""
Find the maximum item in this RDD.
Parameters
----------
key : function, optional
A function used to generate key for comparing
Examples
--------
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max) # type: ignore[arg-type]
return self.reduce(lambda a, b: max(a, b, key=key)) # type: ignore[arg-type]
@overload
def min(self: "RDD[S]") -> "S":
...
@overload
def min(self: "RDD[T]", key: Callable[[T], "S"]) -> T:
...
def min(self: "RDD[T]", key: Optional[Callable[[T], "S"]] = None) -> T:
"""
Find the minimum item in this RDD.
Parameters
----------
key : function, optional
A function used to generate key for comparing
Examples
--------
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
2.0
>>> rdd.min(key=str)
10.0
"""
if key is None:
return self.reduce(min) # type: ignore[arg-type]
return self.reduce(lambda a, b: min(a, b, key=key)) # type: ignore[arg-type]
def sum(self: "RDD[NumberOrArray]") -> "NumberOrArray":
"""
Add up the elements in this RDD.
Examples
--------
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).fold( # type: ignore[return-value]
0, operator.add
)
def count(self) -> int:
"""
Return the number of elements in this RDD.
Examples
--------
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self: "RDD[NumberOrArray]") -> StatCounter:
"""
Return a :class:`StatCounter` object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter: StatCounter, right_counter: StatCounter) -> StatCounter:
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce( # type: ignore[arg-type]
redFunc
)
def histogram(
self: "RDD[S]", buckets: Union[int, List["S"], Tuple["S", ...]]
) -> Tuple[Sequence["S"], List[int]]:
"""
Compute a histogram using the provided buckets. The buckets
are all open to the right except for the last which is closed.
e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50],
which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1
and 50 we would have a histogram of 1,0,1.
If your histogram is evenly spaced (e.g. [0, 10, 20, 30]),
this can be switched from an O(log n) insertion to O(1) per
element (where n is the number of buckets).
Buckets must be sorted, not contain any duplicates, and have
at least two elements.
If `buckets` is a number, it will generate buckets which are
evenly spaced between the minimum and maximum of the RDD. For
example, if the min value is 0 and the max is 100, given `buckets`
as 2, the resulting buckets will be [0,50) [50,100]. `buckets` must
be at least 1. An exception is raised if the RDD contains infinity.
If the elements in the RDD do not vary (max == min), a single bucket
will be used.
The return value is a tuple of buckets and histogram.
Examples
--------
>>> rdd = sc.parallelize(range(51))
>>> rdd.histogram(2)
([0, 25, 50], [25, 26])
>>> rdd.histogram([0, 5, 25, 50])
([0, 5, 25, 50], [5, 20, 26])
>>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets
([0, 15, 30, 45, 60], [15, 15, 15, 6])
>>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"])
>>> rdd.histogram(("a", "b", "c"))
(('a', 'b', 'c'), [2, 2])
"""
if isinstance(buckets, int):
if buckets < 1:
raise ValueError("number of buckets must be >= 1")
# filter out non-comparable elements
def comparable(x: Any) -> bool:
if x is None:
return False
if type(x) is float and isnan(x):
return False
return True
filtered = self.filter(comparable)
# faster than stats()
def minmax(a: Tuple["S", "S"], b: Tuple["S", "S"]) -> Tuple["S", "S"]:
return min(a[0], b[0]), max(a[1], b[1])
try:
minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax)
except TypeError as e:
if " empty " in str(e):
raise ValueError("can not generate buckets from empty RDD")
raise
if minv == maxv or buckets == 1:
return [minv, maxv], [filtered.count()]
try:
inc = (maxv - minv) / buckets # type: ignore[operator]
except TypeError:
raise TypeError("Can not generate buckets with non-number in RDD")
if isinf(inc):
raise ValueError("Can not generate buckets with infinite value")
# keep them as integer if possible
inc = int(inc)
if inc * buckets != maxv - minv: # type: ignore[operator]
inc = (maxv - minv) * 1.0 / buckets # type: ignore[operator]
buckets = [i * inc + minv for i in range(buckets)]
buckets.append(maxv) # fix accumulated error
even = True
elif isinstance(buckets, (list, tuple)):
if len(buckets) < 2:
raise ValueError("buckets should have more than one value")
if any(i is None or isinstance(i, float) and isnan(i) for i in buckets):
raise ValueError("can not have None or NaN in buckets")
if sorted(buckets) != list(buckets):
raise ValueError("buckets should be sorted")
if len(set(buckets)) != len(buckets):
raise ValueError("buckets should not contain duplicated values")
minv = buckets[0]
maxv = buckets[-1]
even = False
inc = None
try:
steps = [
buckets[i + 1] - buckets[i] # type: ignore[operator]
for i in range(len(buckets) - 1)
]
except TypeError:
pass # objects in buckets do not support '-'
else:
if max(steps) - min(steps) < 1e-10: # handle precision errors
even = True
inc = (maxv - minv) / (len(buckets) - 1) # type: ignore[operator]
else:
raise TypeError("buckets should be a list or tuple or number(int or long)")
def histogram(iterator: Iterable["S"]) -> Iterable[List[int]]:
counters = [0] * len(buckets) # type: ignore[arg-type]
for i in iterator:
if (
i is None
or (isinstance(i, float) and isnan(i)) # type: ignore[arg-type]
or i > maxv
or i < minv
):
continue
t = (
int((i - minv) / inc) # type: ignore[operator]
if even
else bisect.bisect_right(buckets, i) - 1 # type: ignore[arg-type]
)
counters[t] += 1
# add last two together
last = counters.pop()
counters[-1] += last
return [counters]
def mergeCounters(a: List[int], b: List[int]) -> List[int]:
return [i + j for i, j in zip(a, b)]
return buckets, self.mapPartitions(histogram).reduce(mergeCounters)
def mean(self: "RDD[NumberOrArray]") -> "NumberOrArray":
"""
Compute the mean of this RDD's elements.
Examples
--------
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean() # type: ignore[return-value]
def variance(self: "RDD[NumberOrArray]") -> "NumberOrArray":
"""
Compute the variance of this RDD's elements.
Examples
--------
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance() # type: ignore[return-value]
def stdev(self: "RDD[NumberOrArray]") -> "NumberOrArray":
"""
Compute the standard deviation of this RDD's elements.
Examples
--------
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev() # type: ignore[return-value]
def sampleStdev(self: "RDD[NumberOrArray]") -> "NumberOrArray":
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
Examples
--------
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev() # type: ignore[return-value]
def sampleVariance(self: "RDD[NumberOrArray]") -> "NumberOrArray":
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
Examples
--------
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance() # type: ignore[return-value]
def countByValue(self: "RDD[K]") -> Dict[K, int]:
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
Examples
--------
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator: Iterable[K]) -> Iterable[Dict[K, int]]:
counts: Dict[K, int] = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1: Dict[K, int], m2: Dict[K, int]) -> Dict[K, int]:
for k, v in m2.items():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
@overload
def top(self: "RDD[S]", num: int) -> List["S"]:
...
@overload
def top(self: "RDD[T]", num: int, key: Callable[[T], "S"]) -> List[T]:
...
def top(self: "RDD[T]", num: int, key: Optional[Callable[[T], "S"]] = None) -> List[T]:
"""
Get the top N elements from an RDD.
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
It returns the list sorted in descending order.
Examples
--------
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator: Iterable[T]) -> Iterable[List[T]]:
yield heapq.nlargest(num, iterator, key=key)
def merge(a: List[T], b: List[T]) -> List[T]:
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
@overload
def takeOrdered(self: "RDD[S]", num: int) -> List["S"]:
...
@overload
def takeOrdered(self: "RDD[T]", num: int, key: Callable[[T], "S"]) -> List[T]:
...
def takeOrdered(self: "RDD[T]", num: int, key: Optional[Callable[[T], "S"]] = None) -> List[T]:
"""
Get the N elements from an RDD ordered in ascending order or as<|fim▁hole|> Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def merge(a: List[T], b: List[T]) -> List[T]:
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
def take(self: "RDD[T]", num: int) -> List[T]:
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Translated from the Scala implementation in RDD#take().
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items: List[T] = []
totalParts = self.getNumPartitions()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the previous iteration,
# quadruple and retry. Otherwise, interpolate the number of
# partitions we need to try, but overestimate it by 50%.
# We also cap the estimation in the end.
if len(items) == 0:
numPartsToTry = partsScanned * 4
else:
# the first parameter of max is >=1 whenever partsScanned >= 2
numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned
numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4)
left = num - len(items)
def takeUpToNumLeft(iterator: Iterable[T]) -> Iterable[T]:
iterator = iter(iterator)
taken = 0
while taken < left:
try:
yield next(iterator)
except StopIteration:
return
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self: "RDD[T]") -> T:
"""
Return the first element in this RDD.
Examples
--------
>>> sc.parallelize([2, 3, 4]).first()
2
>>> sc.parallelize([]).first()
Traceback (most recent call last):
...
ValueError: RDD is empty
"""
rs = self.take(1)
if rs:
return rs[0]
raise ValueError("RDD is empty")
def isEmpty(self) -> bool:
"""
Returns true if and only if the RDD contains no elements at all.
Notes
-----
An RDD may be empty even when it has at least 1 partition.
Examples
--------
>>> sc.parallelize([]).isEmpty()
True
>>> sc.parallelize([1]).isEmpty()
False
"""
return self.getNumPartitions() == 0 or len(self.take(1)) == 0
def saveAsNewAPIHadoopDataset(
self: "RDD[Tuple[K, V]]",
conf: Dict[str, str],
keyConverter: Optional[str] = None,
valueConverter: Optional[str] = None,
) -> None:
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are
converted for output using either user specified converters or, by default,
"org.apache.spark.api.python.JavaToWritableConverter".
Parameters
----------
conf : dict
Hadoop job configuration
keyConverter : str, optional
fully qualified classname of key converter (None by default)
valueConverter : str, optional
fully qualified classname of value converter (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
assert self.ctx._jvm is not None
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(
pickledRDD._jrdd, True, jconf, keyConverter, valueConverter, True
)
def saveAsNewAPIHadoopFile(
self: "RDD[Tuple[K, V]]",
path: str,
outputFormatClass: str,
keyClass: Optional[str] = None,
valueClass: Optional[str] = None,
keyConverter: Optional[str] = None,
valueConverter: Optional[str] = None,
conf: Optional[Dict[str, str]] = None,
) -> None:
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or "org.apache.spark.api.python.JavaToWritableConverter". The
`conf` is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
path : str
path to Hadoop file
outputFormatClass : str
fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
keyClass : str, optional
fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
valueClass : str, optional
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
keyConverter : str, optional
fully qualified classname of key converter (None by default)
valueConverter : str, optional
fully qualified classname of value converter (None by default)
conf : dict, optional
Hadoop job configuration (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
assert self.ctx._jvm is not None
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(
pickledRDD._jrdd,
True,
path,
outputFormatClass,
keyClass,
valueClass,
keyConverter,
valueConverter,
jconf,
)
def saveAsHadoopDataset(
self: "RDD[Tuple[K, V]]",
conf: Dict[str, str],
keyConverter: Optional[str] = None,
valueConverter: Optional[str] = None,
) -> None:
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Keys/values are
converted for output using either user specified converters or, by default,
"org.apache.spark.api.python.JavaToWritableConverter".
Parameters
----------
conf : dict
Hadoop job configuration
keyConverter : str, optional
fully qualified classname of key converter (None by default)
valueConverter : str, optional
fully qualified classname of value converter (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
assert self.ctx._jvm is not None
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(
pickledRDD._jrdd, True, jconf, keyConverter, valueConverter, False
)
def saveAsHadoopFile(
self: "RDD[Tuple[K, V]]",
path: str,
outputFormatClass: str,
keyClass: Optional[str] = None,
valueClass: Optional[str] = None,
keyConverter: Optional[str] = None,
valueConverter: Optional[str] = None,
conf: Optional[Dict[str, str]] = None,
compressionCodecClass: Optional[str] = None,
) -> None:
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or "org.apache.spark.api.python.JavaToWritableConverter". The
`conf` is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
Parameters
----------
path : str
path to Hadoop file
outputFormatClass : str
fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
keyClass : str, optional
fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
valueClass : str, optional
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
keyConverter : str, optional
fully qualified classname of key converter (None by default)
valueConverter : str, optional
fully qualified classname of value converter (None by default)
conf : dict, optional
(None by default)
compressionCodecClass : str
fully qualified classname of the compression codec class
i.e. "org.apache.hadoop.io.compress.GzipCodec" (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
assert self.ctx._jvm is not None
self.ctx._jvm.PythonRDD.saveAsHadoopFile(
pickledRDD._jrdd,
True,
path,
outputFormatClass,
keyClass,
valueClass,
keyConverter,
valueConverter,
jconf,
compressionCodecClass,
)
def saveAsSequenceFile(
self: "RDD[Tuple[K, V]]", path: str, compressionCodecClass: Optional[str] = None
) -> None:
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the "org.apache.hadoop.io.Writable" types that we convert from the
RDD's key and value types. The mechanism is as follows:
1. Pickle is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
Parameters
----------
path : str
path to sequence file
compressionCodecClass : str, optional
fully qualified classname of the compression codec class
i.e. "org.apache.hadoop.io.compress.GzipCodec" (None by default)
"""
pickledRDD = self._pickled()
assert self.ctx._jvm is not None
self.ctx._jvm.PythonRDD.saveAsSequenceFile(
pickledRDD._jrdd, True, path, compressionCodecClass
)
def saveAsPickleFile(self, path: str, batchSize: int = 10) -> None:
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is :class:`pyspark.serializers.CPickleSerializer`, default batch size
is 10.
Examples
--------
>>> from tempfile import NamedTemporaryFile
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).map(str).collect())
['1', '2', 'rdd', 'spark']
"""
ser: Serializer
if batchSize == 0:
ser = AutoBatchedSerializer(CPickleSerializer())
else:
ser = BatchedSerializer(CPickleSerializer(), batchSize)
self._reserialize(ser)._jrdd.saveAsObjectFile(path)
def saveAsTextFile(self, path: str, compressionCodecClass: Optional[str] = None) -> None:
"""
Save this RDD as a text file, using string representations of elements.
Parameters
----------
path : str
path to text file
compressionCodecClass : str, optional
fully qualified classname of the compression codec class
i.e. "org.apache.hadoop.io.compress.GzipCodec" (None by default)
Examples
--------
>>> from tempfile import NamedTemporaryFile
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> from tempfile import NamedTemporaryFile
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
Using compressionCodecClass
>>> from tempfile import NamedTemporaryFile
>>> tempFile3 = NamedTemporaryFile(delete=True)
>>> tempFile3.close()
>>> codec = "org.apache.hadoop.io.compress.GzipCodec"
>>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec)
>>> from fileinput import input, hook_compressed
>>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed))
>>> ''.join([r.decode('utf-8') if isinstance(r, bytes) else r for r in result])
'bar\\nfoo\\n'
"""
def func(split: int, iterator: Iterable[Any]) -> Iterable[bytes]:
for x in iterator:
if isinstance(x, bytes):
yield x
elif isinstance(x, str):
yield x.encode("utf-8")
else:
yield str(x).encode("utf-8")
keyed = self.mapPartitionsWithIndex(func)
keyed._bypass_serializer = True # type: ignore[attr-defined]
assert self.ctx._jvm is not None
if compressionCodecClass:
compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass)
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec)
else:
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self: "RDD[Tuple[K, V]]") -> Dict[K, V]:
"""
Return the key-value pairs in this RDD to the master as a dictionary.
Notes
-----
This method should only be used if the resulting data is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self: "RDD[Tuple[K, V]]") -> "RDD[K]":
"""
Return an RDD with the keys of each tuple.
Examples
--------
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda x: x[0])
def values(self: "RDD[Tuple[K, V]]") -> "RDD[V]":
"""
Return an RDD with the values of each tuple.
Examples
--------
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda x: x[1])
def reduceByKey(
self: "RDD[Tuple[K, V]]",
func: Callable[[V, V], V],
numPartitions: Optional[int] = None,
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, V]]":
"""
Merge the values for each key using an associative and commutative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be partitioned with `numPartitions` partitions, or
the default parallelism level if `numPartitions` is not specified.
Default partitioner is hash-partition.
Examples
--------
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions, partitionFunc)
def reduceByKeyLocally(self: "RDD[Tuple[K, V]]", func: Callable[[V, V], V]) -> Dict[K, V]:
"""
Merge the values for each key using an associative and commutative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Examples
--------
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
func = fail_on_stopiteration(func)
def reducePartition(iterator: Iterable[Tuple[K, V]]) -> Iterable[Dict[K, V]]:
m: Dict[K, V] = {}
for k, v in iterator:
m[k] = func(m[k], v) if k in m else v
yield m
def mergeMaps(m1: Dict[K, V], m2: Dict[K, V]) -> Dict[K, V]:
for k, v in m2.items():
m1[k] = func(m1[k], v) if k in m1 else v
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self: "RDD[Tuple[K, V]]") -> Dict[K, int]:
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
Examples
--------
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, U]]",
numPartitions: Optional[int] = None,
) -> "RDD[Tuple[K, Tuple[V, U]]]":
"""
Return an RDD containing all pairs of elements with matching keys in
`self` and `other`.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in `self` and (k, v2) is in `other`.
Performs a hash join across the cluster.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, U]]",
numPartitions: Optional[int] = None,
) -> "RDD[Tuple[K, Tuple[V, Optional[U]]]]":
"""
Perform a left outer join of `self` and `other`.
For each element (k, v) in `self`, the resulting RDD will either
contain all pairs (k, (v, w)) for w in `other`, or the pair
(k, (v, None)) if no elements in `other` have key k.
Hash-partitions the resulting RDD into the given number of partitions.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, U]]",
numPartitions: Optional[int] = None,
) -> "RDD[Tuple[K, Tuple[Optional[V], U]]]":
"""
Perform a right outer join of `self` and `other`.
For each element (k, w) in `other`, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in `self` have key k.
Hash-partitions the resulting RDD into the given number of partitions.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
def fullOuterJoin(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, U]]",
numPartitions: Optional[int] = None,
) -> "RDD[Tuple[K, Tuple[Optional[V], Optional[U]]]]":
"""
Perform a right outer join of `self` and `other`.
For each element (k, v) in `self`, the resulting RDD will either
contain all pairs (k, (v, w)) for w in `other`, or the pair
(k, (v, None)) if no elements in `other` have key k.
Similarly, for each element (k, w) in `other`, the resulting RDD will
either contain all pairs (k, (v, w)) for v in `self`, or the pair
(k, (None, w)) if no elements in `self` have key k.
Hash-partitions the resulting RDD into the given number of partitions.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("c", 8)])
>>> sorted(x.fullOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))]
"""
return python_full_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(
self: "RDD[Tuple[K, V]]",
numPartitions: Optional[int],
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, V]]":
"""
Return a copy of the RDD partitioned using the specified partitioner.
Examples
--------
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> len(set(sets[0]).intersection(set(sets[1])))
0
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
partitioner = Partitioner(numPartitions, partitionFunc)
if self.partitioner == partitioner:
return self
# Transferring O(n) objects to Java is too expensive.
# Instead, we'll form the hash buckets in Python,
# transferring O(numPartitions) objects to Java.
# Each object is a (splitNumber, [objects]) pair.
# In order to avoid too huge objects, the objects are
# grouped into chunks.
outputSerializer = self.ctx._unbatched_serializer
limit = self._memory_limit() / 2
def add_shuffle_key(split: int, iterator: Iterable[Tuple[K, V]]) -> Iterable[bytes]:
buckets = defaultdict(list)
c, batch = 0, min(10 * numPartitions, 1000) # type: ignore[operator]
for k, v in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v)) # type: ignore[operator]
c += 1
# check used memory and avg size of chunk of objects
if c % 1000 == 0 and get_used_memory() > limit or c > batch:
n, size = len(buckets), 0
for split in list(buckets.keys()):
yield pack_long(split)
d = outputSerializer.dumps(buckets[split])
del buckets[split]
yield d
size += len(d)
avg = int(size / n) >> 20
# let 1M < avg < 10M
if avg < 1:
batch = min(sys.maxsize, batch * 1.5) # type: ignore[assignment]
elif avg > 10:
batch = max(int(batch / 1.5), 1)
c = 0
for split, items in buckets.items():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True)
keyed._bypass_serializer = True # type: ignore[attr-defined]
assert self.ctx._jvm is not None
with SCCallSiteSync(self.context):
pairRDD = self.ctx._jvm.PairwiseRDD(keyed._jrdd.rdd()).asJavaPairRDD()
jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions, id(partitionFunc))
jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner))
rdd: "RDD[Tuple[K, V]]" = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
rdd.partitioner = partitioner
return rdd
# TODO: add control over map-side aggregation
def combineByKey(
self: "RDD[Tuple[K, V]]",
createCombiner: Callable[[V], U],
mergeValue: Callable[[U, V], U],
mergeCombiners: Callable[[U, U], U],
numPartitions: Optional[int] = None,
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, U]]":
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C.
Users provide three functions:
- `createCombiner`, which turns a V into a C (e.g., creates
a one-element list)
- `mergeValue`, to merge a V into a C (e.g., adds it to the end of
a list)
- `mergeCombiners`, to combine two C's into a single one (e.g., merges
the lists)
To avoid memory allocation, both mergeValue and mergeCombiners are allowed to
modify and return their first argument instead of creating a new C.
In addition, users can control the partitioning of the output RDD.
Notes
-----
V and C can be different -- for example, one might group an RDD of type
(Int, Int) into an RDD of type (Int, List[Int]).
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 2)])
>>> def to_list(a):
... return [a]
...
>>> def append(a, b):
... a.append(b)
... return a
...
>>> def extend(a, b):
... a.extend(b)
... return a
...
>>> sorted(x.combineByKey(to_list, append, extend).collect())
[('a', [1, 2]), ('b', [1])]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
serializer = self.ctx.serializer
memory = self._memory_limit()
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combineLocally(iterator: Iterable[Tuple[K, V]]) -> Iterable[Tuple[K, U]]:
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def _mergeCombiners(iterator: Iterable[Tuple[K, U]]) -> Iterable[Tuple[K, U]]:
merger = ExternalMerger(agg, memory, serializer)
merger.mergeCombiners(iterator)
return merger.items()
return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
def aggregateByKey(
self: "RDD[Tuple[K, V]]",
zeroValue: U,
seqFunc: Callable[[U, V], U],
combFunc: Callable[[U, U], U],
numPartitions: Optional[int] = None,
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, U]]":
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero() -> U:
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions, partitionFunc
)
def foldByKey(
self: "RDD[Tuple[K, V]]",
zeroValue: V,
func: Callable[[V, V], V],
numPartitions: Optional[int] = None,
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, V]]":
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
Examples
--------
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> sorted(rdd.foldByKey(0, add).collect())
[('a', 2), ('b', 1)]
"""
def createZero() -> V:
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: func(createZero(), v), func, func, numPartitions, partitionFunc
)
def _memory_limit(self) -> int:
return _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
# TODO: support variant with custom partitioner
def groupByKey(
self: "RDD[Tuple[K, V]]",
numPartitions: Optional[int] = None,
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, Iterable[V]]]":
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with numPartitions partitions.
Notes
-----
If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey or aggregateByKey will
provide much better performance.
Examples
--------
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.groupByKey().mapValues(len).collect())
[('a', 2), ('b', 1)]
>>> sorted(rdd.groupByKey().mapValues(list).collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x: V) -> List[V]:
return [x]
def mergeValue(xs: List[V], x: V) -> List[V]:
xs.append(x)
return xs
def mergeCombiners(a: List[V], b: List[V]) -> List[V]:
a.extend(b)
return a
memory = self._memory_limit()
serializer = self._jrdd_deserializer
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combine(iterator: Iterable[Tuple[K, V]]) -> Iterable[Tuple[K, List[V]]]:
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combine, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def groupByKey(it: Iterable[Tuple[K, List[V]]]) -> Iterable[Tuple[K, List[V]]]:
merger = ExternalGroupBy(agg, memory, serializer)
merger.mergeCombiners(it)
return merger.items()
return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable)
def flatMapValues(
self: "RDD[Tuple[K, V]]", f: Callable[[V], Iterable[U]]
) -> "RDD[Tuple[K, U]]":
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
Examples
--------
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
def flat_map_fn(kv: Tuple[K, V]) -> Iterable[Tuple[K, U]]:
return ((kv[0], x) for x in f(kv[1]))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self: "RDD[Tuple[K, V]]", f: Callable[[V], U]) -> "RDD[Tuple[K, U]]":
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
Examples
--------
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
def map_values_fn(kv: Tuple[K, V]) -> Tuple[K, U]:
return kv[0], f(kv[1])
return self.map(map_values_fn, preservesPartitioning=True)
@overload
def groupWith(
self: "RDD[Tuple[K, V]]", other: "RDD[Tuple[K, V1]]"
) -> "RDD[Tuple[K, Tuple[ResultIterable[V], ResultIterable[V1]]]]":
...
@overload
def groupWith(
self: "RDD[Tuple[K, V]]", other: "RDD[Tuple[K, V1]]", __o1: "RDD[Tuple[K, V2]]"
) -> "RDD[Tuple[K, Tuple[ResultIterable[V], ResultIterable[V1], ResultIterable[V2]]]]":
...
@overload
def groupWith(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, V1]]",
_o1: "RDD[Tuple[K, V2]]",
_o2: "RDD[Tuple[K, V3]]",
) -> """RDD[
Tuple[
K,
Tuple[
ResultIterable[V],
ResultIterable[V1],
ResultIterable[V2],
ResultIterable[V3],
],
]
]""":
...
def groupWith( # type: ignore[misc]
self: "RDD[Tuple[Any, Any]]", other: "RDD[Tuple[Any, Any]]", *others: "RDD[Tuple[Any, Any]]"
) -> "RDD[Tuple[Any, Tuple[ResultIterable[Any], ...]]]":
"""
Alias for cogroup but with support for multiple RDDs.
Examples
--------
>>> w = sc.parallelize([("a", 5), ("b", 6)])
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> z = sc.parallelize([("b", 42)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(w.groupWith(x, y, z).collect()))]
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom partitioner
def cogroup(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, U]]",
numPartitions: Optional[int] = None,
) -> "RDD[Tuple[K, Tuple[ResultIterable[V], ResultIterable[U]]]]":
"""
For each key k in `self` or `other`, return a resulting RDD that
contains a tuple with the list of values for that key in `self` as
well as `other`.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(x.cogroup(y).collect()))]
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def sampleByKey(
self: "RDD[Tuple[K, V]]",
withReplacement: bool,
fractions: Dict[K, Union[float, int]],
seed: Optional[int] = None,
) -> "RDD[Tuple[K, V]]":
"""
Return a subset of this RDD sampled by key (via stratified sampling).
Create a sample of this RDD using variable sampling rates for
different keys as specified by fractions, a key to sampling rate map.
Examples
--------
>>> fractions = {"a": 0.2, "b": 0.1}
>>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000)))
>>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect())
>>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150
True
>>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0
True
>>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0
True
"""
for fraction in fractions.values():
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(
RDDStratifiedSampler(withReplacement, fractions, seed).func, True
)
def subtractByKey(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, Any]]",
numPartitions: Optional[int] = None,
) -> "RDD[Tuple[K, V]]":
"""
Return each (key, value) pair in `self` that has no pair with matching
key in `other`.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
def filter_func(pair: Tuple[K, Tuple[V, Any]]) -> bool:
key, (val1, val2) = pair
return val1 and not val2 # type: ignore[return-value]
return (
self.cogroup(other, numPartitions)
.filter(filter_func) # type: ignore[arg-type]
.flatMapValues(lambda x: x[0])
)
def subtract(self: "RDD[T]", other: "RDD[T]", numPartitions: Optional[int] = None) -> "RDD[T]":
"""
Return each value in `self` that is not contained in `other`.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
def keyBy(self: "RDD[T]", f: Callable[[T], K]) -> "RDD[Tuple[K, T]]":
"""
Creates tuples of the elements in this RDD by applying `f`.
Examples
--------
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> [(x, list(map(list, y))) for x, y in sorted(x.cogroup(y).collect())]
[(0, [[0], [0]]), (1, [[1], [1]]), (2, [[], [2]]), (3, [[], [3]]), (4, [[2], [4]])]
"""
return self.map(lambda x: (f(x), x))
def repartition(self: "RDD[T]", numPartitions: int) -> "RDD[T]":
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
Examples
--------
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
return self.coalesce(numPartitions, shuffle=True)
def coalesce(self: "RDD[T]", numPartitions: int, shuffle: bool = False) -> "RDD[T]":
"""
Return a new RDD that is reduced into `numPartitions` partitions.
Examples
--------
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
if shuffle:
# Decrease the batch size in order to distribute evenly the elements across output
# partitions. Otherwise, repartition will possibly produce highly skewed partitions.
batchSize = min(10, self.ctx._batchSize or 1024)
ser = BatchedSerializer(CPickleSerializer(), batchSize)
selfCopy = self._reserialize(ser)
jrdd_deserializer = selfCopy._jrdd_deserializer
jrdd = selfCopy._jrdd.coalesce(numPartitions, shuffle)
else:
jrdd_deserializer = self._jrdd_deserializer
jrdd = self._jrdd.coalesce(numPartitions, shuffle)
return RDD(jrdd, self.ctx, jrdd_deserializer)
def zip(self: "RDD[T]", other: "RDD[U]") -> "RDD[Tuple[T, U]]":
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
Examples
--------
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
def get_batch_size(ser: Serializer) -> int:
if isinstance(ser, BatchedSerializer):
return ser.batchSize
return 1 # not batched
def batch_as(rdd: "RDD[V]", batchSize: int) -> "RDD[V]":
return rdd._reserialize(BatchedSerializer(CPickleSerializer(), batchSize))
my_batch = get_batch_size(self._jrdd_deserializer)
other_batch = get_batch_size(other._jrdd_deserializer)
if my_batch != other_batch or not my_batch:
# use the smallest batchSize for both of them
batchSize = min(my_batch, other_batch)
if batchSize <= 0:
# auto batched or unlimited
batchSize = 100
other = batch_as(other, batchSize)
self = batch_as(self, batchSize)
if self.getNumPartitions() != other.getNumPartitions():
raise ValueError("Can only zip with RDD which has the same number of partitions")
# There will be an Exception in JVM if there are different number
# of items in each partitions.
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer, other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def zipWithIndex(self: "RDD[T]") -> "RDD[Tuple[T, int]]":
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This method needs to trigger a spark job when this RDD contains
more than one partitions.
Examples
--------
>>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect()
[('a', 0), ('b', 1), ('c', 2), ('d', 3)]
"""
starts = [0]
if self.getNumPartitions() > 1:
nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
def func(k: int, it: Iterable[T]) -> Iterable[Tuple[T, int]]:
for i, v in enumerate(it, starts[k]):
yield v, i
return self.mapPartitionsWithIndex(func)
def zipWithUniqueId(self: "RDD[T]") -> "RDD[Tuple[T, int]]":
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
:meth:`zipWithIndex`.
Examples
--------
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k: int, it: Iterable[T]) -> Iterable[Tuple[T, int]]:
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func)
def name(self) -> Optional[str]:
"""
Return the name of this RDD.
"""
n = self._jrdd.name()
return n if n else None
def setName(self: "RDD[T]", name: str) -> "RDD[T]":
"""
Assign a name to this RDD.
Examples
--------
>>> rdd1 = sc.parallelize([1, 2])
>>> rdd1.setName('RDD1').name()
'RDD1'
"""
self._jrdd.setName(name)
return self
def toDebugString(self) -> Optional[bytes]:
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
return debug_string.encode("utf-8") if debug_string else None
def getStorageLevel(self) -> StorageLevel:
"""
Get the RDD's current storage level.
Examples
--------
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(
java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication(),
)
return storage_level
def _defaultReducePartitions(self) -> int:
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
def lookup(self: "RDD[Tuple[K, V]]", key: K) -> List[V]:
"""
Return the list of values in the RDD for key `key`. This operation
is done efficiently if the RDD has a known partitioner by only
searching the partition that the key maps to.
Examples
--------
>>> l = range(1000)
>>> rdd = sc.parallelize(zip(l, l), 10)
>>> rdd.lookup(42) # slow
[42]
>>> sorted = rdd.sortByKey()
>>> sorted.lookup(42) # fast
[42]
>>> sorted.lookup(1024)
[]
>>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey()
>>> list(rdd2.lookup(('a', 'b'))[0])
['c']
"""
values = self.filter(lambda kv: kv[0] == key).values()
if self.partitioner is not None:
return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)])
return values.collect()
def _to_java_object_rdd(self) -> "JavaObject":
"""Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pickle, whenever the
RDD is serialized in batch or not.
"""
rdd = self._pickled()
assert self.ctx._jvm is not None
return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
def countApprox(self, timeout: int, confidence: float = 0.95) -> int:
"""
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
Examples
--------
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000
"""
drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))])
return int(drdd.sumApprox(timeout, confidence))
def sumApprox(
self: "RDD[Union[float, int]]", timeout: int, confidence: float = 0.95
) -> BoundedFloat:
"""
Approximate operation to return the sum within a timeout
or meet the confidence.
Examples
--------
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000))
>>> abs(rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
assert self.ctx._jvm is not None
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def meanApprox(
self: "RDD[Union[float, int]]", timeout: int, confidence: float = 0.95
) -> BoundedFloat:
"""
Approximate operation to return the mean within a timeout
or meet the confidence.
Examples
--------
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000)) / 1000.0
>>> abs(rdd.meanApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.map(float)._to_java_object_rdd()
assert self.ctx._jvm is not None
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.meanApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def countApproxDistinct(self: "RDD[T]", relativeSD: float = 0.05) -> int:
"""
Return approximate number of distinct elements in the RDD.
Parameters
----------
relativeSD : float, optional
Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.
Notes
-----
The algorithm used is based on streamlib's implementation of
`"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available here
<https://doi.org/10.1145/2452376.2452456>`_.
Examples
--------
>>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct()
>>> 900 < n < 1100
True
>>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct()
>>> 16 < n < 24
True
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
def toLocalIterator(self: "RDD[T]", prefetchPartitions: bool = False) -> Iterator[T]:
"""
Return an iterator that contains all of the elements in this RDD.
The iterator will consume as much memory as the largest partition in this RDD.
With prefetch it may consume up to the memory of the 2 largest partitions.
Parameters
----------
prefetchPartitions : bool, optional
If Spark should pre-fetch the next partition
before it is needed.
Examples
--------
>>> rdd = sc.parallelize(range(10))
>>> [x for x in rdd.toLocalIterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
assert self.ctx._jvm is not None
with SCCallSiteSync(self.context):
sock_info = self.ctx._jvm.PythonRDD.toLocalIteratorAndServe(
self._jrdd.rdd(), prefetchPartitions
)
return _local_iterator_from_socket(sock_info, self._jrdd_deserializer)
def barrier(self: "RDD[T]") -> "RDDBarrier[T]":
"""
Marks the current stage as a barrier stage, where Spark must launch all tasks together.
In case of a task failure, instead of only restarting the failed task, Spark will abort the
entire stage and relaunch all tasks for this stage.
The barrier execution mode feature is experimental and it only handles limited scenarios.
Please read the linked SPIP and design docs to understand the limitations and future plans.
.. versionadded:: 2.4.0
Returns
-------
:class:`RDDBarrier`
instance that provides actions within a barrier stage.
See Also
--------
pyspark.BarrierTaskContext
Notes
-----
For additional information see
- `SPIP: Barrier Execution Mode <http://jira.apache.org/jira/browse/SPARK-24374>`_
- `Design Doc <https://jira.apache.org/jira/browse/SPARK-24582>`_
This API is experimental
"""
return RDDBarrier(self)
def _is_barrier(self) -> bool:
"""
Whether this RDD is in a barrier stage.
"""
return self._jrdd.rdd().isBarrier()
def withResources(self: "RDD[T]", profile: ResourceProfile) -> "RDD[T]":
"""
Specify a :class:`pyspark.resource.ResourceProfile` to use when calculating this RDD.
This is only supported on certain cluster managers and currently requires dynamic
allocation to be enabled. It will result in new executors with the resources specified
being acquired to calculate the RDD.
.. versionadded:: 3.1.0
Notes
-----
This API is experimental
"""
self.has_resource_profile = True
if profile._java_resource_profile is not None:
jrp = profile._java_resource_profile
else:
assert self.ctx._jvm is not None
builder = self.ctx._jvm.org.apache.spark.resource.ResourceProfileBuilder()
ereqs = ExecutorResourceRequests(self.ctx._jvm, profile._executor_resource_requests)
treqs = TaskResourceRequests(self.ctx._jvm, profile._task_resource_requests)
builder.require(ereqs._java_executor_resource_requests)
builder.require(treqs._java_task_resource_requests)
jrp = builder.build()
self._jrdd.withResources(jrp)
return self
def getResourceProfile(self) -> Optional[ResourceProfile]:
"""
Get the :class:`pyspark.resource.ResourceProfile` specified with this RDD or None
if it wasn't specified.
.. versionadded:: 3.1.0
Returns
-------
:py:class:`pyspark.resource.ResourceProfile`
The user specified profile or None if none were specified
Notes
-----
This API is experimental
"""
rp = self._jrdd.getResourceProfile()
if rp is not None:
return ResourceProfile(_java_resource_profile=rp)
else:
return None
@overload
def toDF(
self: "RDD[RowLike]",
schema: Optional[Union[List[str], Tuple[str, ...]]] = None,
sampleRatio: Optional[float] = None,
) -> "DataFrame":
...
@overload
def toDF(
self: "RDD[RowLike]", schema: Optional[Union["StructType", str]] = None
) -> "DataFrame":
...
@overload
def toDF(
self: "RDD[AtomicValue]",
schema: Union["AtomicType", str],
) -> "DataFrame":
...
def toDF(
self: "RDD[Any]", schema: Optional[Any] = None, sampleRatio: Optional[float] = None
) -> "DataFrame":
raise RuntimeError("""RDD.toDF was called before SparkSession was initialized.""")
def _prepare_for_python_RDD(sc: "SparkContext", command: Any) -> Tuple[bytes, Any, Any, Any]:
# the serialized command will be compressed by broadcast
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
assert sc._jvm is not None
if len(pickled_command) > sc._jvm.PythonUtils.getBroadcastThreshold(sc._jsc): # Default 1M
# The broadcast will have same life cycle as created PythonRDD
broadcast = sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
broadcast_vars = [x._jbroadcast for x in sc._pickled_broadcast_vars]
sc._pickled_broadcast_vars.clear()
return pickled_command, broadcast_vars, sc.environment, sc._python_includes
def _wrap_function(
sc: "SparkContext", func: Callable, deserializer: Any, serializer: Any, profiler: Any = None
) -> "JavaObject":
assert deserializer, "deserializer should not be empty"
assert serializer, "serializer should not be empty"
command = (func, profiler, deserializer, serializer)
pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command)
assert sc._jvm is not None
return sc._jvm.PythonFunction(
bytearray(pickled_command),
env,
includes,
sc.pythonExec,
sc.pythonVer,
broadcast_vars,
sc._javaAccumulator,
)
class RDDBarrier(Generic[T]):
"""
Wraps an RDD in a barrier stage, which forces Spark to launch tasks of this stage together.
:class:`RDDBarrier` instances are created by :func:`RDD.barrier`.
.. versionadded:: 2.4.0
Notes
-----
This API is experimental
"""
def __init__(self, rdd: RDD[T]):
self.rdd = rdd
def mapPartitions(
self, f: Callable[[Iterable[T]], Iterable[U]], preservesPartitioning: bool = False
) -> RDD[U]:
"""
Returns a new RDD by applying a function to each partition of the wrapped RDD,
where tasks are launched together in a barrier stage.
The interface is the same as :func:`RDD.mapPartitions`.
Please see the API doc there.
.. versionadded:: 2.4.0
Notes
-----
This API is experimental
"""
def func(s: int, iterator: Iterable[T]) -> Iterable[U]:
return f(iterator)
return PipelinedRDD(self.rdd, func, preservesPartitioning, isFromBarrier=True)
def mapPartitionsWithIndex(
self,
f: Callable[[int, Iterable[T]], Iterable[U]],
preservesPartitioning: bool = False,
) -> RDD[U]:
"""
Returns a new RDD by applying a function to each partition of the wrapped RDD, while
tracking the index of the original partition. And all tasks are launched together
in a barrier stage.
The interface is the same as :func:`RDD.mapPartitionsWithIndex`.
Please see the API doc there.
.. versionadded:: 3.0.0
Notes
-----
This API is experimental
"""
return PipelinedRDD(self.rdd, f, preservesPartitioning, isFromBarrier=True)
class PipelinedRDD(RDD[U], Generic[T, U]):
"""
Examples
--------
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(
self,
prev: RDD[T],
func: Callable[[int, Iterable[T]], Iterable[U]],
preservesPartitioning: bool = False,
isFromBarrier: bool = False,
):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func: Callable[[int, Iterable[V]], Iterable[T]] = prev.func
def pipeline_func(split: int, iterator: Iterable[V]) -> Iterable[U]:
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.has_resource_profile = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val: Optional["JavaObject"] = None
self._id = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
self.partitioner = prev.partitioner if self.preservesPartitioning else None
self.is_barrier = isFromBarrier or prev._is_barrier()
def getNumPartitions(self) -> int:
return self._prev_jrdd.partitions().size()
@property
def _jrdd(self) -> "JavaObject":
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
if self.ctx.profiler_collector:
profiler = self.ctx.profiler_collector.new_profiler(self.ctx)
else:
profiler = None
wrapped_func = _wrap_function(
self.ctx, self.func, self._prev_jrdd_deserializer, self._jrdd_deserializer, profiler
)
assert self.ctx._jvm is not None
python_rdd = self.ctx._jvm.PythonRDD(
self._prev_jrdd.rdd(), wrapped_func, self.preservesPartitioning, self.is_barrier
)
self._jrdd_val = python_rdd.asJavaRDD()
if profiler:
assert self._jrdd_val is not None
self._id = self._jrdd_val.id()
self.ctx.profiler_collector.add_profiler(self._id, profiler)
return self._jrdd_val
def id(self) -> int:
if self._id is None:
self._id = self._jrdd.id()
return self._id
def _is_pipelinable(self) -> bool:
return not (self.is_cached or self.is_checkpointed or self.has_resource_profile)
def _is_barrier(self) -> bool:
return self.is_barrier
def _test() -> None:
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs["sc"] = SparkContext("local[4]", "PythonTest")
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs["sc"].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()<|fim▁end|>
|
specified by the optional key function.
|
<|file_name|>testInstallation.py<|end_file_name|><|fim▁begin|>"""Tests for CMFNotification installation ad uninstallation.
$Id: testInstallation.py 65679 2008-05-25 23:45:26Z dbaty $
"""
from zope.component import getUtility
from zope.component import getMultiAdapter
from AccessControl.PermissionRole import rolesForPermissionOn
from plone.portlets.interfaces import IPortletManager
from plone.portlets.interfaces import IPortletAssignmentMapping
from Products.CMFCore.utils import getToolByName
from Products.CMFNotification.config import LAYER_NAME
from Products.CMFNotification.config import PORTLET_NAME
from Products.CMFNotification.NotificationTool import ID as TOOL_ID
from Products.CMFNotification.permissions import SUBSCRIBE_PERMISSION
from Products.CMFNotification.tests.plonetestbrowser import Browser
from Products.CMFNotification.tests.base import CMFNotificationTestCase
class TestInstallation(CMFNotificationTestCase):
"""Make sure that the product is properly installed."""
def afterSetUp(self):
pass
def testToolIsThere(self):
portal = self.portal
tool = getToolByName(self.portal, TOOL_ID)
self.failUnless(tool is not None)
def testSkinLayerIsThere(self):
stool = getToolByName(self.portal, 'portal_skins')
for skin, layers in stool._getSelections().items():
layers = layers.split(',')
self.failUnless(LAYER_NAME in layers)
self.failUnless(LAYER_NAME in stool.objectIds())
<|fim▁hole|> base_url = self.portal.absolute_url()
for name in ('plone.leftcolumn', 'plone.rightcolumn'):
manager = getUtility(IPortletManager,
name=name,
context=self.portal)
titles = [p.title for p in manager.getAddablePortletTypes()]
self.failUnless(PORTLET_NAME in titles)
manager = getUtility(IPortletManager,
name='plone.rightcolumn',
context=self.portal)
right_portlets = getMultiAdapter((self.portal, manager),
IPortletAssignmentMapping,
context=self.portal)
right_portlets = right_portlets.keys()
self.failUnless(PORTLET_NAME in right_portlets)
def testPermissionHasBeenSet(self):
roles = set(rolesForPermissionOn(SUBSCRIBE_PERMISSION, self.portal))
self.failUnlessEqual(roles, set(('Manager', 'Member')))
def testConfigletHasBeenAdded(self):
cptool = getToolByName(self.portal, 'portal_controlpanel')
configlets = [c.getId() for c in cptool.listActions()]
self.failUnless('cmfnotification_configuration' in configlets)
class TestUnInstallation(CMFNotificationTestCase):
"""Test that the product has been properly uninstalled."""
def afterSetUp(self):
"""Uninstall the product before running each test."""
qtool = getToolByName(self.portal, 'portal_quickinstaller')
self.setRoles(['Manager'])
qtool.uninstallProducts(['CMFNotification'])
def testToolIsNotThere(self):
tool = getToolByName(self.portal, TOOL_ID, None)
self.failUnless(tool is None)
def testSkinLayerIsNotThere(self):
stool = getToolByName(self.portal, 'portal_skins')
for skin, layers in stool._getSelections().items():
layers = layers.split(',')
self.failUnless (LAYER_NAME not in layers)
self.failUnless(LAYER_NAME not in stool.objectIds())
def testPortletDoNoExist(self):
base_url = self.portal.absolute_url()
for name in ('plone.leftcolumn', 'plone.rightcolumn'):
manager = getUtility(IPortletManager,
name=name,
context=self.portal)
titles = [p.title for p in manager.getAddablePortletTypes()]
self.failUnless(PORTLET_NAME not in titles)
manager = getUtility(IPortletManager,
name='plone.rightcolumn',
context=self.portal)
right_portlets = getMultiAdapter((self.portal, manager),
IPortletAssignmentMapping,
context=self.portal)
right_portlets = right_portlets.keys()
self.failUnless(PORTLET_NAME not in right_portlets)
def testConfigletDoNotExist(self):
cptool = getToolByName(self.portal, 'portal_controlpanel')
configlets = [c.getId() for c in cptool.listActions()]
self.failUnless('cmfnotification_configuration' not in configlets)
def test_suite():
from unittest import TestSuite, makeSuite
suite = TestSuite()
suite.addTest(makeSuite(TestInstallation))
suite.addTest(makeSuite(TestUnInstallation))
return suite<|fim▁end|>
|
def testPortletCanBeAdded(self):
|
<|file_name|>manage.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "quotoxic.settings")
from django.core.management import execute_from_command_line<|fim▁hole|><|fim▁end|>
|
execute_from_command_line(sys.argv)
|
<|file_name|>admin.py<|end_file_name|><|fim▁begin|># coding: utf-8
from __future__ import unicode_literals
from django.contrib import admin<|fim▁hole|>from .models import FinishDetail, FinishSubcategory
class FinishSubcategoryAdmin(SubcategoryAdmin):
def get_form(self, request, obj=None, **kwargs):
from . import DETAIL_TYPE
form = super(FinishSubcategoryAdmin, self).get_form(request, obj, **kwargs)
if 'category' in form.base_fields:
field = form.base_fields['category']
field.queryset = field.queryset.filter(type=DETAIL_TYPE)
return form
admin.site.register(FinishSubcategory, FinishSubcategoryAdmin)
class FinishDetailAdmin(DetailAdmin):
def get_form(self, request, obj=None, **kwargs):
from . import DETAIL_TYPE
form = super(FinishDetailAdmin, self).get_form(request, obj, **kwargs)
if 'subcategory' in form.base_fields:
field = form.base_fields['subcategory']
field.queryset = field.queryset.filter(type=DETAIL_TYPE)
return form
admin.site.register(FinishDetail, FinishDetailAdmin)<|fim▁end|>
|
from core.admin import SubcategoryAdmin, DetailAdmin
|
<|file_name|>day.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Forms for day forms
"""
from django.conf import settings
from django import forms
from django.utils.translation import ugettext as _
from arrow import Arrow
from datebook.models import DayEntry
from datebook.forms import CrispyFormMixin
from datebook.utils.imports import safe_import_module
DATETIME_FORMATS = {
'input_date_formats': ['%d/%m/%Y'],
'input_time_formats': ['%H:%M'],
'widget': forms.SplitDateTimeWidget(date_format='%d/%m/%Y', time_format='%H:%M'),
}
class DayBaseFormMixin(object):
"""
DayBase form mixin
"""
crispy_form_helper_path = 'datebook.forms.crispies.day_helper'
crispy_form_helper_kwargs = {}
def fill_initial_data(self, *args, **kwargs):
# Pass initial data for start and stop to their SplitDateTimeField clones
if 'start' in kwargs['initial']:
kwargs['initial']['start_datetime'] = kwargs['initial']['start']
if 'stop' in kwargs['initial']:
kwargs['initial']['stop_datetime'] = kwargs['initial']['stop']
# For existing instance (in edit mode) pass the start and stop values to their
# clone with SplitDateTimeField via initial datas
if kwargs.get('instance'):
kwargs['initial']['start_datetime'] = kwargs['instance'].start
kwargs['initial']['stop_datetime'] = kwargs['instance'].stop
return kwargs
def init_fields(self, *args, **kwargs):
self.fields['start_datetime'] = forms.SplitDateTimeField(label=_('start'), **DATETIME_FORMATS)
self.fields['stop_datetime'] = forms.SplitDateTimeField(label=_('stop'), **DATETIME_FORMATS)
# Set the form field for DayEntry.content
field_helper = safe_import_module(settings.DATEBOOK_TEXT_FIELD_HELPER_PATH)
if field_helper is not None:
self.fields['content'] = field_helper(self, **{'label':_('content'), 'required': False})
def clean_content(self):
"""
Text content validation
"""
content = self.cleaned_data.get("content")
validation_helper = safe_import_module(settings.DATEBOOK_TEXT_VALIDATOR_HELPER_PATH)
if validation_helper is not None:
return validation_helper(self, content)
else:
return content
def clean_start_datetime(self):
start = self.cleaned_data['start_datetime']
# Day entry can't start before the targeted day date
if start and start.date() < self.daydate:
raise forms.ValidationError(_("You can't start a day before itself"))
# Day entry can't start after the targeted day date
if start and start.date() > self.daydate:
raise forms.ValidationError(_("You can't start a day after itself"))
return start
def clean_stop_datetime(self):
start = self.cleaned_data.get('start_datetime')
stop = self.cleaned_data['stop_datetime']
# Day entry can't stop before the start
if start and stop and stop <= start:
raise forms.ValidationError(_("Stop time can't be less or equal to start time"))
# Day entry can't stop in more than one futur day from the targeted day date
if stop and stop.date() > Arrow.fromdate(self.daydate).replace(days=1).date():
raise forms.ValidationError(_("Stop time can't be more than the next day"))
return stop
# TODO: overtime must not be more than effective worked time
#def clean_overtime(self):
#overtime = self.cleaned_data.get('overtime')
#return overtime
# TODO
#def clean_pause(self):
#start = self.cleaned_data.get('start_datetime')
#stop = self.cleaned_data.get('stop_datetime')
#pause = self.cleaned_data['pause']
## Pause time can't be more than elapsed time between start and stop
#if start and stop and pause and False:
#raise forms.ValidationError("Pause time is more than the elapsed time")
#return pause
class DayEntryForm(DayBaseFormMixin, CrispyFormMixin, forms.ModelForm):
"""
DayEntry form
"""<|fim▁hole|>
# Args to give to the form layout method
self.crispy_form_helper_kwargs.update({
'next_day': kwargs.pop('next_day', None),
'day_to_model_url': kwargs.pop('day_to_model_url', None),
'form_action': kwargs.pop('form_action'),
'remove_url': kwargs.pop('remove_url', None),
})
# Fill initial datas
kwargs = self.fill_initial_data(*args, **kwargs)
super(DayEntryForm, self).__init__(*args, **kwargs)
super(forms.ModelForm, self).__init__(*args, **kwargs)
# Init some special fields
kwargs = self.init_fields(*args, **kwargs)
def clean(self):
cleaned_data = super(DayBaseFormMixin, self).clean()
content = cleaned_data.get("content")
vacation = cleaned_data.get("vacation")
# Content text is only required when vacation is not checked
if not vacation and not content:
raise forms.ValidationError(_("Worked days require a content text"))
return cleaned_data
def save(self, *args, **kwargs):
instance = super(DayEntryForm, self).save(commit=False, *args, **kwargs)
instance.start = self.cleaned_data['start_datetime']
instance.stop = self.cleaned_data['stop_datetime']
instance.datebook = self.datebook
instance.activity_date = self.daydate
instance.save()
return instance
class Meta:
model = DayEntry
exclude = ('datebook', 'activity_date', 'start', 'stop')
widgets = {
'pause': forms.TimeInput(format=DATETIME_FORMATS['input_time_formats'][0]),
'overtime': forms.TimeInput(format=DATETIME_FORMATS['input_time_formats'][0]),
}
class DayEntryCreateForm(DayEntryForm):
def clean(self):
cleaned_data = super(DayEntryCreateForm, self).clean()
# Validate that there is not allready a day entry for the same day
try:
obj = DayEntry.objects.get(datebook=self.datebook, activity_date=self.daydate)
except DayEntry.DoesNotExist:
pass
else:
raise forms.ValidationError(_("This day entry has allready been created"))
return cleaned_data<|fim▁end|>
|
def __init__(self, datebook, day, *args, **kwargs):
self.datebook = datebook
self.daydate = datebook.period.replace(day=day)
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from streamable_archive_tests import * <|fim▁hole|><|fim▁end|>
|
from delivery_collection_tests import *
|
<|file_name|>day_10.rs<|end_file_name|><|fim▁begin|>#![allow(transmute_ptr_to_ref)]
<|fim▁hole|>use std::boxed::Box;
use std::option::Option;
use std::ops::Deref;
use std::ops::DerefMut;
use std::marker::Copy;
use std::clone::Clone;
struct Bucket {
key: Option<i32>,
value: Option<i32>,
next: Option<Link>
}
impl Bucket {
fn new(key: i32, value: i32) -> Bucket {
Bucket {
key: Some(key),
value: Some(value),
next: None
}
}
fn empty() -> Bucket {
Bucket {
key: None,
value: None,
next: None
}
}
}
struct Link {
ptr: *mut Bucket
}
impl Link {
fn new(bucket: Bucket) -> Link {
Link {
ptr: Box::into_raw(Box::new(bucket))
}
}
}
impl Deref for Link {
type Target = Bucket;
fn deref(&self) -> &Bucket {
unsafe { mem::transmute(self.ptr) }
}
}
impl DerefMut for Link {
fn deref_mut(&mut self) -> &mut Bucket {
unsafe { mem::transmute(self.ptr) }
}
}
impl Copy for Link { }
impl Clone for Link {
fn clone(&self) -> Link {
Link {
ptr: self.ptr
}
}
}
const CAPACITY: usize = 16;
#[derive(Default)]
pub struct Map {
size: usize,
table: Vec<Link>
}
impl Map {
pub fn new() -> Map {
let mut table = Vec::with_capacity(CAPACITY);
for _ in 0..CAPACITY {
table.push(Link::new(Bucket::empty()));
}
Map {
size: 0,
table: table
}
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
pub fn len(&self) -> usize {
self.size
}
pub fn insert(&mut self, key: i32, value: i32) {
let index = self.table.capacity() & key as usize;
let link = self.iterate(key, index);
if (*link).key != Some(key) {
self.size += 1;
let mut new_bucket = Bucket::new(key, value);
let link = self.table[index];
new_bucket.next = Some(link);
self.table[index] = Link::new(new_bucket);
}
}
pub fn contains(&self, key: i32) -> bool {
let index = self.table.capacity() & key as usize;
let link = self.iterate(key, index);
(*link).key == Some(key)
}
fn iterate(&self, key: i32, index: usize) -> Link {
let mut link = self.table[index];
while (*link).key != Some(key) && (*link).next.is_some() {
link = (*link).next.unwrap();
}
link
}
pub fn get(&self, key: i32) -> Option<i32> {
let index = self.table.capacity() & key as usize;
let link = self.iterate(key, index);
(*link).value
}
}<|fim▁end|>
|
use std::mem;
|
<|file_name|>iconSentry.tsx<|end_file_name|><|fim▁begin|>import React from 'react';
import SvgIcon from './svgIcon';
type Props = React.ComponentProps<typeof SvgIcon>;<|fim▁hole|> props: Props,
ref: React.Ref<SVGSVGElement>
) {
return (
<SvgIcon {...props} ref={ref}>
<path d="M15.8,14.57a1.53,1.53,0,0,0,0-1.52L9.28,1.43a1.46,1.46,0,0,0-2.56,0L4.61,5.18l.54.32A10.43,10.43,0,0,1,8.92,9.39a10.84,10.84,0,0,1,1.37,4.67H8.81a9.29,9.29,0,0,0-1.16-3.91A9,9,0,0,0,4.41,6.81L3.88,6.5,1.91,10l.53.32a5.12,5.12,0,0,1,2.42,3.73H1.48a.25.25,0,0,1-.21-.12.24.24,0,0,1,0-.25L2.21,12a3.32,3.32,0,0,0-1.07-.63L.2,13.05a1.53,1.53,0,0,0,0,1.52,1.46,1.46,0,0,0,1.28.76H6.13V14.7a6.55,6.55,0,0,0-.82-3.16,6.31,6.31,0,0,0-1.73-2l.74-1.32a7.85,7.85,0,0,1,2.26,2.53,8,8,0,0,1,1,3.92v.63h3.94V14.7A12.14,12.14,0,0,0,10,8.75a11.8,11.8,0,0,0-3.7-4l1.5-2.67a.24.24,0,0,1,.42,0l6.52,11.63a.24.24,0,0,1,0,.25.24.24,0,0,1-.21.12H13c0,.43,0,.85,0,1.27h1.53a1.46,1.46,0,0,0,1.28-.76" />
</SvgIcon>
);
});
IconSentry.displayName = 'IconSentry';
export {IconSentry};<|fim▁end|>
|
const IconSentry = React.forwardRef(function IconSentry(
|
<|file_name|>thesubdb.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import babelfish
import requests
from . import Provider
from .. import __version__
from ..exceptions import InvalidSubtitle, ProviderNotAvailable, ProviderError
from ..subtitle import Subtitle, is_valid_subtitle, detect
logger = logging.getLogger(__name__)
class TheSubDBSubtitle(Subtitle):
provider_name = 'thesubdb'
def __init__(self, language, hash): # @ReservedAssignment
super(TheSubDBSubtitle, self).__init__(language)
self.hash = hash
def compute_matches(self, video):
matches = set()
# hash
if 'thesubdb' in video.hashes and video.hashes['thesubdb'] == self.hash:
matches.add('hash')
return matches
class TheSubDBProvider(Provider):
languages = set([babelfish.Language.fromalpha2(l) for l in ['en', 'es', 'fr', 'it', 'nl', 'pl', 'pt', 'ro', 'sv', 'tr']])
required_hash = 'thesubdb'
def initialize(self):
self.session = requests.Session()
self.session.headers = {'User-Agent': 'SubDB/1.0 (subliminal/%s; https://github.com/Diaoul/subliminal)' %
__version__}
def terminate(self):
self.session.close()
def get(self, params):
"""Make a GET request on the server with the given parameters
:param params: params of the request
:return: the response
:rtype: :class:`requests.Response`
:raise: :class:`~subliminal.exceptions.ProviderNotAvailable`
"""
try:
r = self.session.get('http://api.thesubdb.com', params=params, timeout=10)
except requests.Timeout:
raise ProviderNotAvailable('Timeout after 10 seconds')
return r
def query(self, hash): # @ReservedAssignment
params = {'action': 'search', 'hash': hash}
logger.debug('Searching subtitles %r', params)
r = self.get(params)
if r.status_code == 404:
logger.debug('No subtitle found')
return []
elif r.status_code != 200:
raise ProviderError('Request failed with status code %d' % r.status_code)
return [TheSubDBSubtitle(language, hash) for language in
set([babelfish.Language.fromalpha2(l) for l in r.content.split(',')])]
def list_subtitles(self, video, languages):
return [s for s in self.query(video.hashes['thesubdb']) if s.language in languages]
def download_subtitle(self, subtitle):
params = {'action': 'download', 'hash': subtitle.hash, 'language': subtitle.language.alpha2}
r = self.get(params)
if r.status_code != 200:
raise ProviderError('Request failed with status code %d' % r.status_code)
logger.debug('Download URL: %s {hash=%s, lang=%s}' % (
'http://api.thesubdb.com', subtitle.hash, subtitle.language.alpha2,
))
subtitle_text = r.content.decode(<|fim▁hole|> if not is_valid_subtitle(subtitle_text):
raise InvalidSubtitle
return subtitle_text<|fim▁end|>
|
detect(r.content, subtitle.language.alpha2)['encoding'], 'replace')
|
<|file_name|>quantization_mnist_test.py<|end_file_name|><|fim▁begin|># Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software<|fim▁hole|># limitations under the License.
# ==============================================================================
"""Script to test TF-TRT INT8 conversion without calibration on Mnist model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.compiler.tf2tensorrt.python.ops import trt_ops
# pylint: enable=unused-import
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import data
from tensorflow.python import keras
from tensorflow.python.compiler.tensorrt import trt_convert
from tensorflow.python.estimator.estimator import Estimator
from tensorflow.python.estimator.model_fn import EstimatorSpec
from tensorflow.python.estimator.model_fn import ModeKeys
from tensorflow.python.estimator.run_config import RunConfig
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.datasets import mnist
from tensorflow.python.layers import layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import saver
from tensorflow.python.training.adam import AdamOptimizer
from tensorflow.python.training.checkpoint_management import latest_checkpoint
from tensorflow.python.training.training_util import get_global_step
INPUT_NODE_NAME = 'input'
OUTPUT_NODE_NAME = 'output'
class QuantizationAwareTrainingMNISTTest(test_util.TensorFlowTestCase):
def _BuildGraph(self, x):
def _Quantize(x, r):
x = gen_array_ops.quantize_and_dequantize_v2(x, -r, r)
return x
def _DenseLayer(x, num_inputs, num_outputs, quantization_range, name):
"""Dense layer with quantized outputs.
Args:
x: input to the dense layer
num_inputs: number of input columns of x
num_outputs: number of output columns
quantization_range: the min/max range for quantization
name: name of the variable scope
Returns:
The output of the layer.
"""
with variable_scope.variable_scope(name):
kernel = variable_scope.get_variable(
'kernel',
shape=[num_inputs, num_outputs],
dtype=dtypes.float32,
initializer=keras.initializers.glorot_uniform())
bias = variable_scope.get_variable(
'bias',
shape=[num_outputs],
dtype=dtypes.float32,
initializer=keras.initializers.zeros())
x = math_ops.matmul(x, kernel)
x = _Quantize(x, quantization_range)
x = nn.bias_add(x, bias)
x = _Quantize(x, quantization_range)
return x
x = _Quantize(x, 1)
# Conv + Bias + Relu6
x = layers.conv2d(x, filters=32, kernel_size=3, use_bias=True)
x = nn.relu6(x)
# Conv + Bias + Relu6
x = layers.conv2d(x, filters=64, kernel_size=3, use_bias=True)
x = nn.relu6(x)
# Reduce
x = math_ops.reduce_mean(x, [1, 2])
x = _Quantize(x, 6)
# FC1
x = _DenseLayer(x, 64, 512, 6, name='dense')
x = nn.relu6(x)
# FC2
x = _DenseLayer(x, 512, 10, 25, name='dense_1')
x = array_ops.identity(x, name=OUTPUT_NODE_NAME)
return x
def _GetGraphDef(self, use_trt, max_batch_size, model_dir):
"""Get the frozen mnist GraphDef.
Args:
use_trt: whether use TF-TRT to convert the graph.
max_batch_size: the max batch size to apply during TF-TRT conversion.
model_dir: the model directory to load the checkpoints.
Returns:
The frozen mnist GraphDef.
"""
graph = ops.Graph()
with self.session(graph=graph) as sess:
with graph.device('/GPU:0'):
x = array_ops.placeholder(
shape=(None, 28, 28, 1), dtype=dtypes.float32, name=INPUT_NODE_NAME)
self._BuildGraph(x)
# Load weights
mnist_saver = saver.Saver()
checkpoint_file = latest_checkpoint(model_dir)
mnist_saver.restore(sess, checkpoint_file)
# Freeze
graph_def = graph_util.convert_variables_to_constants(
sess, sess.graph_def, output_node_names=[OUTPUT_NODE_NAME])
# Convert with TF-TRT
if use_trt:
logging.info('Number of nodes before TF-TRT conversion: %d',
len(graph_def.node))
graph_def = trt_convert.create_inference_graph(
graph_def,
outputs=[OUTPUT_NODE_NAME],
max_batch_size=max_batch_size,
precision_mode='INT8',
# There is a 2GB GPU memory limit for each test, so we set
# max_workspace_size_bytes to 256MB to leave enough room for TF
# runtime to allocate GPU memory.
max_workspace_size_bytes=1 << 28,
minimum_segment_size=2,
use_calibration=False,
)
logging.info('Number of nodes after TF-TRT conversion: %d',
len(graph_def.node))
num_engines = len(
[1 for n in graph_def.node if str(n.op) == 'TRTEngineOp'])
self.assertEqual(1, num_engines)
return graph_def
def _Run(self, is_training, use_trt, batch_size, num_epochs, model_dir):
"""Train or evaluate the model.
Args:
is_training: whether to train or evaluate the model. In training mode,
quantization will be simulated where the quantize_and_dequantize_v2 are
placed.
use_trt: if true, use TRT INT8 mode for evaluation, which will perform
real quantization. Otherwise use native TensorFlow which will perform
simulated quantization. Ignored if is_training is True.
batch_size: batch size.
num_epochs: how many epochs to train. Ignored if is_training is False.
model_dir: where to save or load checkpoint.
Returns:
The Estimator evaluation result.
"""
# Get dataset
train_data, test_data = mnist.load_data()
def _PreprocessFn(x, y):
x = math_ops.cast(x, dtypes.float32)
x = array_ops.expand_dims(x, axis=2)
x = 2.0 * (x / 255.0) - 1.0
y = math_ops.cast(y, dtypes.int32)
return x, y
def _EvalInputFn():
mnist_x, mnist_y = test_data
dataset = data.Dataset.from_tensor_slices((mnist_x, mnist_y))
dataset = dataset.apply(
data.experimental.map_and_batch(
map_func=_PreprocessFn,
batch_size=batch_size,
num_parallel_calls=8))
dataset = dataset.repeat(count=1)
iterator = dataset.make_one_shot_iterator()
features, labels = iterator.get_next()
return features, labels
def _TrainInputFn():
mnist_x, mnist_y = train_data
dataset = data.Dataset.from_tensor_slices((mnist_x, mnist_y))
dataset = dataset.shuffle(2 * len(mnist_x))
dataset = dataset.apply(
data.experimental.map_and_batch(
map_func=_PreprocessFn,
batch_size=batch_size,
num_parallel_calls=8))
dataset = dataset.repeat(count=num_epochs)
iterator = dataset.make_one_shot_iterator()
features, labels = iterator.get_next()
return features, labels
def _ModelFn(features, labels, mode):
if is_training:
logits_out = self._BuildGraph(features)
else:
graph_def = self._GetGraphDef(use_trt, batch_size, model_dir)
logits_out = importer.import_graph_def(
graph_def,
input_map={INPUT_NODE_NAME: features},
return_elements=[OUTPUT_NODE_NAME + ':0'],
name='')[0]
loss = losses.sparse_softmax_cross_entropy(
labels=labels, logits=logits_out)
summary.scalar('loss', loss)
classes_out = math_ops.argmax(logits_out, axis=1, name='classes_out')
accuracy = metrics.accuracy(
labels=labels, predictions=classes_out, name='acc_op')
summary.scalar('accuracy', accuracy[1])
if mode == ModeKeys.EVAL:
return EstimatorSpec(
mode, loss=loss, eval_metric_ops={'accuracy': accuracy})
elif mode == ModeKeys.TRAIN:
optimizer = AdamOptimizer(learning_rate=1e-2)
train_op = optimizer.minimize(loss, global_step=get_global_step())
return EstimatorSpec(mode, loss=loss, train_op=train_op)
config_proto = config_pb2.ConfigProto()
config_proto.gpu_options.allow_growth = True
estimator = Estimator(
model_fn=_ModelFn,
model_dir=model_dir if is_training else None,
config=RunConfig(session_config=config_proto))
if is_training:
estimator.train(_TrainInputFn)
results = estimator.evaluate(_EvalInputFn)
logging.info('accuracy: %s', str(results['accuracy']))
return results
# To generate the checkpoint, set a different model_dir and call self._Run()
# by setting is_training=True and num_epochs=1000, e.g.:
# model_dir = '/tmp/quantization_mnist'
# self._Run(
# is_training=True,
# use_trt=False,
# batch_size=128,
# num_epochs=100,
# model_dir=model_dir)
def testEval(self):
if not trt_convert.is_tensorrt_enabled():
return
model_dir = test.test_src_dir_path('python/compiler/tensorrt/test/testdata')
accuracy_tf_native = self._Run(
is_training=False,
use_trt=False,
batch_size=128,
num_epochs=None,
model_dir=model_dir)['accuracy']
logging.info('accuracy_tf_native: %f', accuracy_tf_native)
self.assertAllClose(0.9662, accuracy_tf_native, rtol=1e-3, atol=1e-3)
if trt_convert.get_linked_tensorrt_version()[0] < 5:
return
accuracy_tf_trt = self._Run(
is_training=False,
use_trt=True,
batch_size=128,
num_epochs=None,
model_dir=model_dir)['accuracy']
logging.info('accuracy_tf_trt: %f', accuracy_tf_trt)
self.assertAllClose(0.9675, accuracy_tf_trt, rtol=1e-3, atol=1e-3)
if __name__ == '__main__':
test.main()<|fim▁end|>
|
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
|
<|file_name|>views.py<|end_file_name|><|fim▁begin|># include for aggregation
from django.db.models import Case, IntegerField, Sum, Value, When
from django.db.models import CharField
# ------------------------------------------
# imports needed for the functional view
from rest_framework.response import Response
# ------------------------------------------
# ------------------------------------------
# generics class to make writing endpoints easier
from rest_framework import generics
# ------------------------------------------
# main pieces from our DRF app that need to be linked
from . import models
from . import serializers
from . import filters
# ------------------------------------------
LA_Bureaus = ['MF']
EO_Bureaus = ['MY', 'PA', 'PS', 'PW', 'PU', 'AU']
class ListOcrb(generics.ListAPIView):
"""
Operating and Capital Requirements by Bureau (OCRB).
Note: Parameter values are compared case-insensitive.
"""
serializer_class = serializers.OcrbSerializer
filter_class = filters.OcrbFilter
def get_queryset(self):
return models.OCRB.objects.order_by('-fiscal_year', 'budget_type', 'service_area', 'bureau', 'budget_category')
class OcrbSummary(generics.ListAPIView):
"""
Summarize Budget for Operating and Capital Requirements by Service Area and Bureau
"""
serializer_class = serializers.OcrbSumSerializer
filter_class = filters.OcrbSummaryFilter
def get_queryset(self):
return models.OCRB.objects.values('fiscal_year', 'service_area', 'bureau')\
.annotate(bureau_total=Sum('amount'))\
.order_by('fiscal_year', 'service_area', 'bureau')
class ListKpm(generics.ListAPIView):
"""
Key Performance Measures (KPM).
Note: Parameter values are compared case-insensitive.
"""
queryset = models.KPM.objects.all()
serializer_class = serializers.KpmSerializer
filter_class = filters.KpmFilter
<|fim▁hole|>
class ListBudgetHistory(generics.ListAPIView):
"""
Historical Operating and Capital Requirements by Service Area and Bureau
Note: Parameter values are compared case-insensitive.
"""
serializer_class = serializers.BudgetHistorySerializer
filter_class = filters.BudgetHistoryFilter
def get_queryset(self):
return models.BudgetHistory.objects.order_by('fiscal_year', 'bureau_name', 'accounting_object_name', 'functional_area_name')
class HistorySummaryByBureau(generics.ListAPIView):
"""
Summary of Historical Operating and Capital Requirements by Service Area and Bureau
"""
serializer_class = serializers.HistorySummaryBureauSerializer
filter_class = filters.HistoryBureauFilter
def get_queryset(self):
"""
Append the calculated service area based on business logic.
(Some bureaus are in service areas not reflected by the data)
"""
qs = models.BudgetHistory.objects.all()
qs = qs.values('fiscal_year', 'service_area_code', 'bureau_code', 'bureau_name').annotate(
sa_calced=Case(
When(bureau_code__in = LA_Bureaus, then = Value('LA')),
When(bureau_code__in = EO_Bureaus, then = Value('EO')),
default = 'service_area_code',
output_field = CharField()
),
amount=Sum('amount'))
qs = qs.order_by('fiscal_year', 'service_area_code', 'bureau_code', 'bureau_name')
return qs
class HistorySummaryByServiceArea(generics.ListAPIView):
"""
Summary of BudgetHistory by Service Area.
"""
serializer_class = serializers.HistorySummaryByServiceAreaSerializer
filter_class = filters.HistoryServiceAreaFilter
def get_queryset(self):
"""
Calculate service area based on business logic.
(Some bureaus are in service areas not reflected by the data)
"""
qs = models.BudgetHistory.objects.all()
qs = qs.values('fiscal_year', ).annotate(
sa_calced=Case(
When(bureau_code__in = LA_Bureaus, then = Value('LA')),
When(bureau_code__in = EO_Bureaus, then = Value('EO')),
default = 'service_area_code',
output_field = CharField()
),
amount=Sum('amount'),
)
qs = qs.order_by('fiscal_year', 'sa_calced')
return qs
class HistorySummaryByServiceAreaObjectCode(generics.ListAPIView):
"""
Summary of Historical Operating and Capital Requirements by Service Area and Object Code
"""
serializer_class = serializers.HistorySummaryByServiceAreaObjectCodeSerializer
filter_class = filters.HistoryObjectCode
def get_queryset(self):
qs = models.BudgetHistory.objects.all()
qs = qs.values('fiscal_year', 'service_area_code', 'object_code').annotate(amount=Sum('amount'))
qs = qs.order_by('fiscal_year', 'service_area_code', 'object_code')
return qs
class ListLookupCode(generics.ListAPIView):
"""
Code reference table for Budget History.
Note: Parameter values are compared case-insensitive.
"""
serializer_class = serializers.LookupCodeSerializer
filter_class = filters.LookupCodeFilter
def get_queryset(self):
return models.LookupCode.objects.all()<|fim▁end|>
| |
<|file_name|>test-form.js<|end_file_name|><|fim▁begin|>/**
* Copyright 2016 The AMP HTML Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {getFormAsObject} from '../../src/form.js';
describes.realWin('getFormAsObject', {}, env => {
let form;
beforeEach(() => {
form = env.win.document.createElement('form');
env.win.document.body.appendChild(form);
});
it('excludes disabled input', () => {
const input = env.win.document.createElement('input');
input.type = 'text';
input.name = 'foo';
input.value = 'bar';
input.disabled = true;
form.appendChild(input);
expect(getFormAsObject(form)).to.be.an('object').that.is.empty;
});
it('excludes input without name', () => {
const input = env.win.document.createElement('input');
input.type = 'text';
input.value = 'bar';
form.appendChild(input);
expect(getFormAsObject(form)).to.be.an('object').that.is.empty;
});
it('returns text input entries', () => {
const input = env.win.document.createElement('input');
input.type = 'text';
input.name = 'foo';
input.value = 'bar';
form.appendChild(input);
expect(getFormAsObject(form)).to.deep.equal({'foo': ['bar']});
});
it('returns text input entries with empty value', () => {
const input = env.win.document.createElement('input');
input.type = 'text';
input.name = 'foo';
form.appendChild(input);
expect(getFormAsObject(form)).to.deep.equal({'foo': ['']});
});
it('returns textarea entries', () => {
const textarea = env.win.document.createElement('textarea');
textarea.name = 'foo';
textarea.value = 'bar';
form.appendChild(textarea);
expect(getFormAsObject(form)).to.deep.equal({'foo': ['bar']});
});
it('returns checked checkbox entries', () => {
const input = env.win.document.createElement('input');
input.type = 'checkbox';
input.name = 'foo';
input.value = 'bar';
input.checked = true;
form.appendChild(input);
expect(getFormAsObject(form)).to.deep.equal({'foo': ['bar']});
});
it('excludes unchecked checkbox entries', () => {
const input = env.win.document.createElement('input');
input.type = 'checkbox';
input.name = 'foo';
input.value = 'bar';
input.checked = false;
form.appendChild(input);
expect(getFormAsObject(form)).to.be.an('object').that.is.empty;
});
it('returns checked radio button entries', () => {
const input = env.win.document.createElement('input');
input.type = 'radio';
input.name = 'foo';
input.value = 'bar';
input.checked = true;
form.appendChild(input);
expect(getFormAsObject(form)).to.deep.equal({'foo': ['bar']});
});
it('excludes unchecked radio button entries', () => {
const input = env.win.document.createElement('input');
input.type = 'radio';
input.name = 'foo';
input.value = 'bar';
input.checked = false;
form.appendChild(input);
expect(getFormAsObject(form)).to.be.an('object').that.is.empty;
});
it('returns first option for select with nothing selected', () => {
const select = env.win.document.createElement('select');
select.name = 'foo';
select.multiple = false;
const selectedOption = env.win.document.createElement('option');
selectedOption.value = 'bar';
selectedOption.selected = false;
const unselectedOption = env.win.document.createElement('option');
unselectedOption.value = 'bang';
unselectedOption.selected = false;
select.appendChild(selectedOption);
select.appendChild(unselectedOption);
form.appendChild(select);
expect(getFormAsObject(form)).to.deep.equal({'foo': ['bar']});
});
it('returns empty for multi-select with nothing selected', () => {
const select = env.win.document.createElement('select');
select.name = 'foo';
select.multiple = true;
const selectedOption = env.win.document.createElement('option');
selectedOption.value = 'bar';
selectedOption.selected = false;
const unselectedOption = env.win.document.createElement('option');
unselectedOption.value = 'bang';
unselectedOption.selected = false;
select.appendChild(selectedOption);
select.appendChild(unselectedOption);
form.appendChild(select);
expect(getFormAsObject(form)).to.deep.equal({});
});
it('returns selected entry in single-select', () => {
const select = env.win.document.createElement('select');
select.name = 'foo';
select.multiple = false;
const selectedOption = env.win.document.createElement('option');<|fim▁hole|>
const unselectedOption = env.win.document.createElement('option');
unselectedOption.value = 'bang';
unselectedOption.selected = false;
select.appendChild(selectedOption);
select.appendChild(unselectedOption);
form.appendChild(select);
expect(getFormAsObject(form)).to.deep.equal({'foo': ['bar']});
});
it('returns single selected entry in multi-select', () => {
const select = env.win.document.createElement('select');
select.name = 'foo';
select.multiple = true;
const selectedOption = env.win.document.createElement('option');
selectedOption.value = 'bar';
selectedOption.selected = true;
const unselectedOption = env.win.document.createElement('option');
unselectedOption.value = 'bang';
unselectedOption.selected = false;
select.appendChild(selectedOption);
select.appendChild(unselectedOption);
form.appendChild(select);
expect(getFormAsObject(form)).to.deep.equal({'foo': ['bar']});
});
it('returns multiple selected entries in multi-select', () => {
const select = env.win.document.createElement('select');
select.name = 'foo';
select.multiple = true;
const selectedOption = env.win.document.createElement('option');
selectedOption.value = 'bar';
selectedOption.selected = true;
const unselectedOption = env.win.document.createElement('option');
unselectedOption.value = 'bang';
unselectedOption.selected = true;
select.appendChild(selectedOption);
select.appendChild(unselectedOption);
form.appendChild(select);
expect(getFormAsObject(form)).to.deep.equal({'foo': ['bar', 'bang']});
});
it('returns focused submit input entries', () => {
const input = env.win.document.createElement('input');
input.type = 'submit';
input.name = 'foo';
input.value = 'bar';
form.appendChild(input);
expect(getFormAsObject(form)).to.deep.equal({});
Object.defineProperty(form, 'ownerDocument', {get() {
return {activeElement: input};
}});
expect(getFormAsObject(form)).to.deep.equal({'foo': ['bar']});
});
it('returns focused button input entries', () => {
const input = env.win.document.createElement('button');
input.name = 'foo';
input.value = 'bar';
form.appendChild(input);
expect(getFormAsObject(form)).to.deep.equal({});
Object.defineProperty(form, 'ownerDocument', {get() {
return {activeElement: input};
}});
expect(getFormAsObject(form)).to.deep.equal({'foo': ['bar']});
});
it('returns multiple form entries', () => {
const form = env.win.document.createElement('form');
const input = env.win.document.createElement('input');
input.type = 'text';
input.name = 'foo1';
input.value = 'bar';
const checkbox = env.win.document.createElement('input');
checkbox.type = 'checkbox';
checkbox.name = 'foo';
checkbox.value = 'bar';
checkbox.checked = true;
const textarea = env.win.document.createElement('textarea');
textarea.name = 'foo2';
textarea.value = 'bar';
const select = env.win.document.createElement('select');
select.name = 'foo';
select.multiple = false;
const selectedOption = env.win.document.createElement('option');
selectedOption.value = 'baz';
selectedOption.selected = true;
select.appendChild(selectedOption);
form.appendChild(input);
form.appendChild(checkbox);
form.appendChild(textarea);
form.appendChild(select);
const formDataObject = getFormAsObject(form);
expect(formDataObject).to.be.an('object')
.that.has.all.keys('foo', 'foo1', 'foo2');
expect(formDataObject).to.have.property('foo')
.that.has.deep.members(['bar', 'baz']);
expect(formDataObject).to.have.property('foo1')
.that.has.deep.members(['bar']);
expect(formDataObject).to.have.property('foo2')
.that.has.deep.members(['bar']);
});
});<|fim▁end|>
|
selectedOption.value = 'bar';
selectedOption.selected = true;
|
<|file_name|>proxy.py<|end_file_name|><|fim▁begin|># GUI object/properties browser.
# Copyright (C) 2011 Matiychuk D.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
import pywinauto
import sys, os
import time
import wx
import thread
import exceptions
import platform
import warnings
from const import *
'''
proxy module for pywinauto
'''
pywinauto.timings.Timings.window_find_timeout = 1
def resource_path(filename):
if hasattr(sys, '_MEIPASS'):
# PyInstaller >= 1.6
###os.chdir(sys._MEIPASS)
filename = os.path.join(sys._MEIPASS, filename)
elif '_MEIPASS2' in os.environ:
# PyInstaller < 1.6 (tested on 1.5 only)
###os.chdir(os.environ['_MEIPASS2'])
filename = os.path.join(os.environ['_MEIPASS2'], filename)
else:
###os.chdir(sys.path.dirname(sys.argv[0]))
filename = os.path.join(os.path.dirname(sys.argv[0]), filename)
return filename
class SWAPYObject(object):
'''
Base proxy class for pywinauto objects.
'''
def __init__(self, pwa_obj):
'''
Constructor
'''
#original pywinauto object
self.pwa_obj = pwa_obj
default_sort_key = lambda name: name[0].lower()
self.subitems_sort_key = default_sort_key
def GetProperties(self):
'''
Return dict of original + additional properies
Can be owerridden for non pywinauto obects
'''
properties = {}
properties.update(self._get_properies())
properties.update(self._get_additional_properties())
return properties
def Get_subitems(self):
'''
Return list of children - [(control_text, swapy_obj),...]
Can be owerridden for non pywinauto obects
'''
subitems = []
subitems += self._get_children()
'''
for control in children:
try:
texts = control.Texts()
except exceptions.RuntimeError:
texts = ['Unknown control name2!'] #workaround
while texts.count(''):
texts.remove('')
c_name = ', '.join(texts)
if not c_name:
#nontext_controlname = pywinauto.findbestmatch.GetNonTextControlName(control, children)[0]
top_level_parent = control.TopLevelParent().Children()
nontext_controlname = pywinauto.findbestmatch.GetNonTextControlName(control, top_level_parent)[0]
if nontext_controlname:
c_name = nontext_controlname
else:
c_name = 'Unknown control name1!'
subitems.append((c_name, self._get_swapy_object(control)))
'''
subitems += self._get_additional_children()
subitems.sort(key=self.subitems_sort_key)
#encode names
subitems_encoded = []
for (name, obj) in subitems:
name = name.encode('cp1251', 'replace')
subitems_encoded.append((name, obj))
return subitems_encoded
def Exec_action(self, action_id):
'''
Execute action on the control
'''
action = ACTIONS[action_id]
#print('self.pwa_obj.'+action+'()')
exec('self.pwa_obj.'+action+'()')
return 0
def Get_actions(self):
'''
return allowed actions for this object. [(id,action_name),...]
'''
allowed_actions = []
try:
obj_actions = dir(self.pwa_obj.WrapperObject())
except:
obj_actions = dir(self.pwa_obj)
for id, action in ACTIONS.items():
if action in obj_actions:
allowed_actions.append((id,action))
allowed_actions.sort(key=lambda name: name[1].lower())
return allowed_actions
def Get_code(self, action_id):
'''
Generate code for pywinauto module
'''
action = ACTIONS[action_id]
code = "\
ctrl = window['"+self._get_additional_properties()['Access names'][0].encode('unicode-escape', 'replace')+"']\n\
ctrl."+action+"()\n"
return code
def Highlight_control(self):
if self._check_visibility():
thread.start_new_thread(self._highlight_control,(3,))
return 0
def _get_properies(self):
'''
Get original pywinauto's object properties
'''
#print type(self.pwa_obj)
try:
properties = self.pwa_obj.GetProperties()
except exceptions.RuntimeError:
properties = {} #workaround
return properties
def _get_additional_properties(self):
'''
Get additonal useful properties, like a handle, process ID, etc.
Can be overridden by derived class
'''
additional_properties = {}
pwa_app = pywinauto.application.Application()
#-----Access names
try:
#parent_obj = self.pwa_obj.Parent()
parent_obj = self.pwa_obj.TopLevelParent()
except:
pass
else:
try:
#all_controls = parent_obj.Children()
all_controls = [pwa_app.window_(handle=ch) for ch in pywinauto.findwindows.find_windows(parent=parent_obj.handle, top_level_only=False)]
except:
pass
else:
access_names = []
uniq_names = pywinauto.findbestmatch.build_unique_dict(all_controls)
for uniq_name, obj in uniq_names.items():
if uniq_name != '' and obj.WrapperObject() == self.pwa_obj:
access_names.append(uniq_name)
access_names.sort(key=len)
additional_properties.update({'Access names' : access_names})
#-----
#-----pwa_type
additional_properties.update({'pwa_type' : str(type(self.pwa_obj))})
#---
#-----handle
try:
additional_properties.update({'handle' : str(self.pwa_obj.handle)})
except:
pass
#---
return additional_properties
def _get_children(self):
'''
Return original pywinauto's object children & names
[(control_text, swapy_obj),...]
'''
def _get_name_control(control):
try:
texts = control.Texts()
except exceptions.WindowsError:
texts = ['Unknown control name2!'] #workaround for WindowsError: [Error 0] ...
except exceptions.RuntimeError:
texts = ['Unknown control name3!'] #workaround for RuntimeError: GetButtonInfo failed for button with command id 256
while texts.count(''):
texts.remove('')
text = ', '.join(texts)
if not text:
u_names = []
for uniq_name, obj in uniq_names.items():
if uniq_name != '' and obj.WrapperObject() == control:
#if uniq_name != '' and obj == control:
u_names.append(uniq_name)
if u_names:
u_names.sort(key=len)
name = u_names[-1]
else:
name = 'Unknown control name1!'
else:
name = text
return (name, self._get_swapy_object(control))
pwa_app = pywinauto.application.Application()
try:
parent_obj = self.pwa_obj.TopLevelParent()
except pywinauto.controls.HwndWrapper.InvalidWindowHandle:
#For non visible windows
#...
#InvalidWindowHandle: Handle 0x262710 is not a vaild window handle
parent_obj = self.pwa_obj
children = self.pwa_obj.Children()
visible_controls = [pwa_app.window_(handle=ch) for ch in pywinauto.findwindows.find_windows(parent=parent_obj.handle, top_level_only=False)]
uniq_names = pywinauto.findbestmatch.build_unique_dict(visible_controls)
#uniq_names = pywinauto.findbestmatch.build_unique_dict(children)
names_children = map(_get_name_control, children)
return names_children
def _get_additional_children(self):
'''
Get additonal children, like for a menu, submenu, subtab, etc.
Should be owerriden in derived classes of non standart pywinauto object
'''
return []
def _get_pywinobj_type(self, obj):
'''
Check self pywinauto object type
'''
if type(obj) == pywinauto.application.WindowSpecification:
return 'window'
elif type(obj) == pywinauto.controls.menuwrapper.Menu:
return 'menu'
elif type(obj) == pywinauto.controls.menuwrapper.MenuItem:
return 'menu_item'
elif type(obj) == pywinauto.controls.win32_controls.ComboBoxWrapper:
return 'combobox'
elif type(obj) == pywinauto.controls.common_controls.ListViewWrapper:
return 'listview'
elif type(obj) == pywinauto.controls.common_controls.TabControlWrapper:
return 'tab'
elif type(obj) == pywinauto.controls.common_controls.ToolbarWrapper:
return 'toolbar'
elif type(obj) == pywinauto.controls.common_controls._toolbar_button:
return 'toolbar_button'
elif type(obj) == pywinauto.controls.common_controls.TreeViewWrapper:
return 'tree_view'
elif type(obj) == pywinauto.controls.common_controls._treeview_element:
return 'tree_item'
elif 1==0:
return 'other'
else:
return 'unknown'
def _get_swapy_object(self, pwa_obj):
pwa_type = self._get_pywinobj_type(pwa_obj)
#print pwa_type
if pwa_type == 'smt_NEW':
return smt_NEW(pwa_obj)
if pwa_type == 'window':
return Pwa_window(pwa_obj)
if pwa_type == 'menu':
return Pwa_menu(pwa_obj)
if pwa_type == 'menu_item':
return Pwa_menu_item(pwa_obj)
if pwa_type == 'combobox':
return Pwa_combobox(pwa_obj)
if pwa_type == 'listview':
return Pwa_listview(pwa_obj)
if pwa_type == 'tab':
return Pwa_tab(pwa_obj)
if pwa_type == 'toolbar':
return Pwa_toolbar(pwa_obj)
if pwa_type == 'toolbar_button':
return Pwa_toolbar_button(pwa_obj)
if pwa_type == 'tree_view':
return Pwa_tree(pwa_obj)
if pwa_type == 'tree_item':
return Pwa_tree_item(pwa_obj)
else:
return SWAPYObject(pwa_obj)
def _highlight_control(self, repeat = 1):
while repeat > 0:
repeat -= 1
self.pwa_obj.DrawOutline('red', thickness=1)
time.sleep(0.3)
self.pwa_obj.DrawOutline(colour=0xffffff, thickness=1)
time.sleep(0.2)
return 0
def _check_visibility(self):
'''
Check control/window visibility.
Return pwa.IsVisible() or False if fails
'''
is_visible = False
try:
is_visible = self.pwa_obj.IsVisible()
except:
pass
return is_visible
def _check_actionable(self):
'''
Check control/window Actionable.
Return True or False if fails
'''
try:
self.pwa_obj.VerifyActionable()
except:
is_actionable = False
else:
is_actionable = True
return is_actionable
def _check_existence(self):
'''
Check control/window Exists.
Return True or False if fails
'''
try:
handle_ = self.pwa_obj.handle
obj = pywinauto.application.WindowSpecification({'handle': handle_})
except:
is_exist = False
else:
is_exist = obj.Exists()
return is_exist
class VirtualSWAPYObject(SWAPYObject):
def __init__(self, parent, index):
self.parent = parent
self.index = index
self.pwa_obj = self
self._check_visibility = self.parent._check_visibility
self._check_actionable = self.parent._check_actionable
self._check_existence = self.parent._check_existence
def Select(self):
self.parent.pwa_obj.Select(self.index)
def Get_code(self, action_id):
'''
Generate code for pywinauto module
'''
action = ACTIONS[action_id]
arg = ""
try:
arg = "'"+self.index.encode('unicode-escape', 'replace')+"'"
except:
arg = str(self.index)
code = "\
ctrl."+action+"("+arg+")\n"
return code
def _get_properies(self):
return {}
def Get_subitems(self):
return []
def Highlight_control(self):
pass
return 0
'''
def Get_code(self, action_id):
return '#---Not implemented yet.---\n'
'''
class PC_system(SWAPYObject):
handle = 0
def Get_subitems(self):
'''
returns [(window_text, swapy_obj),...]
'''
#windows--------------------
windows = []
try_count = 3
app = pywinauto.application.Application()<|fim▁hole|> for i in range(try_count):
try:
handles = pywinauto.findwindows.find_windows()
except exceptions.OverflowError: # workaround for OverflowError: array too large
time.sleep(1)
except exceptions.MemoryError:# workaround for MemoryError
time.sleep(1)
else:
break
else:
#TODO: add swapy exception: Could not get windows list
handles = []
#we have to find taskbar in windows list
warnings.filterwarnings("ignore", category=FutureWarning) #ignore future warning in taskbar module
from pywinauto import taskbar
taskbar_handle = taskbar.TaskBarHandle()
for w_handle in handles:
wind = app.window_(handle=w_handle)
if w_handle == taskbar_handle:
texts = ['TaskBar']
else:
texts = wind.Texts()
while texts.count(''):
texts.remove('')
title = ', '.join(texts)
if not title:
title = 'Window#%s' % w_handle
title = title.encode('cp1251', 'replace')
windows.append((title, self._get_swapy_object(wind)))
windows.sort(key=lambda name: name[0].lower())
#-----------------------
#smt new----------------
#------------------------
return windows
def _get_properies(self):
info = { 'Platform' : platform.platform(), \
'Processor' : platform.processor(), \
'PC name' : platform.node() }
return info
def Get_actions(self):
'''
No actions for PC_system
'''
return []
def Get_code(self, action_id):
'''
No code for PC_system
'''
return ''
def Highlight_control(self):
pass
return 0
def _check_visibility(self):
return True
def _check_actionable(self):
return True
def _check_existence(self):
return True
class Pwa_window(SWAPYObject):
def _get_additional_children(self):
'''
Add menu object as children
'''
additional_children = []
menu = self.pwa_obj.Menu()
if menu:
menu_child = [('!Menu', self._get_swapy_object(menu))]
additional_children += menu_child
return additional_children
def Get_code(self, action_id):
'''
winod code
'''
action = ACTIONS[action_id]
code = "\
w_handle = pywinauto.findwindows.find_windows(title=u'"+ self.pwa_obj.WindowText().encode('unicode-escape', 'replace') +"', class_name='"+ self.pwa_obj.Class() +"')[0]\n\
window = pwa_app.window_(handle=w_handle)\n\
window."+action+"()\n"
return code
class Pwa_menu(SWAPYObject):
def _check_visibility(self):
is_visible = False
try:
is_visible = self.pwa_obj.ctrl.IsVisible()
except:
pass
return is_visible
def _check_actionable(self):
try:
self.pwa_obj.ctrl.VerifyActionable()
except:
is_actionable = False
else:
is_actionable = True
return is_actionable
def _check_existence(self):
try:
self.pwa_obj.ctrl.handle
except:
is_exist = False
else:
is_exist = True
return is_exist
def _get_additional_children(self):
'''
Add submenu object as children
'''
#print(dir(self.pwa_obj))
#print(self.pwa_obj.is_main_menu)
#print(self.pwa_obj.owner_item)
self.subitems_sort_key = lambda obj: obj[1].pwa_obj.Index() #sorts items by indexes
additional_children = []
menu_items = self.pwa_obj.Items()
for menu_item in menu_items:
item_text = menu_item.Text()
if item_text == '':
if menu_item.Type() == 2048:
item_text = '-----Separator-----'
else:
item_text = 'Index: %d' % menu_item.Index()
menu_item_child = [(item_text, self._get_swapy_object(menu_item))]
additional_children += menu_item_child
return additional_children
def _get_children(self):
'''
Return original pywinauto's object children
'''
return []
def Highlight_control(self):
pass
return 0
class Pwa_menu_item(Pwa_menu):
def _check_actionable(self):
if self.pwa_obj.State() == 3: #grayed
is_actionable = False
else:
is_actionable = True
return is_actionable
def _get_additional_children(self):
'''
Add submenu object as children
'''
#print(dir(self.pwa_obj))
#print(self.pwa_obj.menu)
#print self.get_menuitems_path()
additional_children = []
submenu = self.pwa_obj.SubMenu()
if submenu:
submenu_child = [(self.pwa_obj.Text()+' submenu', self._get_swapy_object(submenu))]
additional_children += submenu_child
return additional_children
def get_menuitems_path(self):
'''
Compose menuitems_path for GetMenuPath. Example "#0 -> Save As", "Tools -> #0 -> Configure"
'''
path = []
owner_item = self.pwa_obj
while owner_item:
text = owner_item.Text()
if not text:
text = '#%d' % owner_item.Index()
path.append(text)
menu = owner_item.menu
owner_item = menu.owner_item
return '->'.join(path[::-1])
def Get_code(self, action_id):
'''
Generate code for pywinauto module
'''
action = ACTIONS[action_id]
code = "\
window.MenuItem(u'"+self.get_menuitems_path().encode('unicode-escape', 'replace')+"')."+action+"()\n\
"
return code
class Pwa_combobox(SWAPYObject):
def _get_additional_children(self):
'''
Add ComboBox items as children
'''
additional_children = []
items_texts = self.pwa_obj.ItemTexts()
for item_name in items_texts:
additional_children += [(item_name, virtual_combobox_item(self, item_name))]
return additional_children
class virtual_combobox_item(VirtualSWAPYObject):
def _get_properies(self):
index = None
text = self.index
for i, name in enumerate(self.parent.pwa_obj.ItemTexts()):
if name == text:
index = i
break
return {'Index' : index, 'Text' : text.encode('unicode-escape', 'replace')}
class Pwa_listview(SWAPYObject):
def _get_additional_children(self):
'''
Add SysListView32 items as children
'''
additional_children = []
for index in range(self.pwa_obj.ItemCount()):
item = self.pwa_obj.GetItem(index)
additional_children += [(item['text'], virtual_listview_item(self, index))]
return additional_children
class virtual_listview_item(VirtualSWAPYObject):
def _get_properies(self):
item_properties = {'Index' : self.index}
for index, item_props in enumerate(self.parent.pwa_obj.Items()):
if index == self.index:
item_properties.update(item_props)
break
return item_properties
class Pwa_tab(SWAPYObject):
def _get_additional_children(self):
'''
Add TabControl items as children
'''
additional_children = []
for index in range(self.pwa_obj.TabCount()):
text = self.pwa_obj.GetTabText(index)
additional_children += [(text, virtual_tab_item(self, index))]
return additional_children
class virtual_tab_item(VirtualSWAPYObject):
def _get_properies(self):
item_properties = {'Index' : self.index}
return item_properties
class Pwa_toolbar(SWAPYObject):
def _get_additional_children(self):
'''
Add button objects as children
'''
additional_children = []
buttons_count = self.pwa_obj.ButtonCount()
for button_index in range(buttons_count):
try:
button_text = self.pwa_obj.Button(button_index).info.text
button_object = self._get_swapy_object(self.pwa_obj.Button(button_index))
except exceptions.RuntimeError:
#button_text = ['Unknown button name1!'] #workaround for RuntimeError: GetButtonInfo failed for button with index 0
pass #ignore the button
else:
button_item = [(button_text, button_object)]
additional_children += button_item
return additional_children
def _get_children(self):
'''
Return original pywinauto's object children
'''
return []
class Pwa_toolbar_button(SWAPYObject):
def _check_visibility(self):
is_visible = False
try:
is_visible = self.pwa_obj.toolbar_ctrl.IsVisible()
except:
pass
return is_visible
def _check_actionable(self):
try:
self.pwa_obj.toolbar_ctrl.VerifyActionable()
except:
is_actionable = False
else:
is_actionable = True
return is_actionable
def _check_existence(self):
try:
handle_ = self.pwa_obj.toolbar_ctrl.handle
obj = pywinauto.application.WindowSpecification({'handle': handle_})
except:
is_exist = False
else:
is_exist = obj.Exists()
return is_exist
def _get_children(self):
return []
def _get_properies(self):
o = self.pwa_obj
props = {'IsCheckable' : o.IsCheckable(),
'IsChecked' : o.IsChecked(),
'IsEnabled': o.IsEnabled(),
'IsPressable' : o.IsPressable(),
'IsPressed' : o.IsPressed(),
'Rectangle' : o.Rectangle(),
'State' : o.State(),
'Style' : o.Style(),
'index' : o.index,}
return props
def Highlight_control(self):
pass
return 0
def Get_code(self, action_id):
'''
Generate code for pywinauto module
'''
action = ACTIONS[action_id]
arg = str(self.pwa_obj.index)
code = "\
ctrl.Button("+arg+")."+action+"()\n"
return code
class Pwa_tree(SWAPYObject):
def _get_additional_children(self):
'''
Add roots object as children
'''
additional_children = []
roots = self.pwa_obj.Roots()
for root in roots:
root_text = root.Text()
obj = self._get_swapy_object(root)
obj.path = [root_text]
root_item = [(root_text, obj)]
additional_children += root_item
return additional_children
def Highlight_control(self):
pass
return 0
class Pwa_tree_item(SWAPYObject):
def _get_properies(self):
o = self.pwa_obj
props = {'Rectangle' : o.Rectangle(),
'State' : o.State(),
'Text' : o.Text(),}
return props
def _check_visibility(self):
return True
def _check_existence(self):
return True
def _check_actionable(self):
return True
def _get_children(self):
return []
def Highlight_control(self):
pass
return 0
def _get_additional_children(self):
'''
Add sub tree items object as children
'''
additional_children = []
sub_items = self.pwa_obj.Children()
for item in sub_items:
item_text = item.Text()
obj = self._get_swapy_object(item)
obj.path = self.path + [item_text]
sub_item = [(item_text, obj)]
additional_children += sub_item
return additional_children
def Get_code(self, action_id):
'''
Generate code for pywinauto module
'''
action = ACTIONS[action_id]
code = "\
ctrl.GetItem("+str(self.path)+")."+action+"()\n"
return code<|fim▁end|>
| |
<|file_name|>publish_twist_state.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
from flexbe_core import EventState, Logger
import rospy
from flexbe_core.proxy import ProxyPublisher
from geometry_msgs.msg import Twist
"""Created on June. 21, 2017
@author: Alireza Hosseini
"""
class PublishTwistState(EventState):
"""
Publishes a velocity command from userdata.
-- topic string Topic to which the velocity command will be published.
># twist Twist Velocity command to be published.
<= done Velcoity command has been published.
<|fim▁hole|> """
def __init__(self, topic):
"""Constructor"""
super(PublishTwistState, self).__init__(outcomes=['done'],
input_keys=['twist'])
self._topic = topic
self._pub = ProxyPublisher({self._topic: Twist})
def execute(self, userdata):
return 'done'
def on_enter(self, userdata):
self._pub.publish(self._topic, userdata.twist)<|fim▁end|>
| |
<|file_name|>LogisticClassifier.py<|end_file_name|><|fim▁begin|>import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# Datos de Prueba
data = [("me gusta comer en la cafeteria".split(), "SPANISH"),
("Give it to me".split(), "ENGLISH"),
("No creo que sea una buena idea".split(), "SPANISH"),
("No it is not a good idea to get lost at sea".split(), "ENGLISH")]
test_data = [("Yo creo que si".split(), "SPANISH"),
("it is lost on me".split(), "ENGLISH")]
# Modelo de Logistic Regression
class BoWClassifier(nn.Module):
def __init__(self, num_labels, vocab_size):
super(BoWClassifier, self).__init__()
# (Tamanio Entrada TE, Tamanio Salida TS) Dimensiones: A=TS*TE x=TE b=TS
self.linear = nn.Linear(vocab_size, num_labels) # Logistic Regression solo es: y = Ax + b
def forward(self, bow_vec):
return F.log_softmax(self.linear(bow_vec))
def make_bow_vector(sentence, word_to_ix):
vec = torch.zeros(len(word_to_ix))
for word in sentence:
vec[word_to_ix[word]] += 1
return vec.view(1, -1)
def make_target(label, label_to_ix):
return torch.LongTensor([label_to_ix[label]])
def train_model(model,data):
for epoch in range(100):
for instance, label in data:
model.zero_grad()
bow_vec = autograd.Variable(make_bow_vector(instance, word_to_ix))
target = autograd.Variable(make_target(label, label_to_ix))
log_probs = model(bow_vec)
loss = loss_function(log_probs, target)
loss.backward()
optimizer.step()
return model
def test_model(model,test_data):
for instance, label in test_data:
bow_vec = autograd.Variable(make_bow_vector(instance, word_to_ix))
log_probs = model(bow_vec)
print(log_probs)
return model
if __name__ == "__main__":
torch.manual_seed(1)<|fim▁hole|> # Diccionario {Word:ID}
word_to_ix = {}
for sent, _ in data + test_data:
for word in sent:
if word not in word_to_ix:
word_to_ix[word] = len(word_to_ix)
#print(word_to_ix)
#### Vars
VOCAB_SIZE = len(word_to_ix)
NUM_LABELS = 2
label_to_ix = {"SPANISH": 0, "ENGLISH": 1}
#### CREAR Modelo
model = BoWClassifier(NUM_LABELS, VOCAB_SIZE) #model.parameters() es de dimension [2,26] ([etiquetas,tokens+bias])
# Todo debe ser convertido a autograd.Variable para armar el grafo de operaciones
sample = data[0]
bow_vector = make_bow_vector(sample[0], word_to_ix)
log_probs = model(autograd.Variable(bow_vector))
#print(log_probs)
#### ENTRENAR Modelo
loss_function = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=0.1)
model = train_model(model,data)
# Index corresponding to Spanish goes up, English goes down!
model = test_model(model,test_data)
print(next(model.parameters())[:, word_to_ix["good"]])<|fim▁end|>
| |
<|file_name|>xls.py<|end_file_name|><|fim▁begin|># Rekall Memory Forensics
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Authors:
# Michael Cohen <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""This file implements an xls renderer based on the openpyxl project.
We produce xls (Excel spreadsheet files) with the output from Rekall plugins.
"""
import time
import openpyxl
from openpyxl import styles
from openpyxl.styles import colors
from openpyxl.styles import fills
from rekall import utils
from rekall.ui import renderer
from rekall.ui import text
# pylint: disable=unexpected-keyword-arg,no-value-for-parameter
# pylint: disable=redefined-outer-name
HEADER_STYLE = styles.Style(font=styles.Font(bold=True))
SECTION_STYLE = styles.Style(
fill=styles.PatternFill(
fill_type=fills.FILL_SOLID, start_color=styles.Color(colors.RED)))
FORMAT_STYLE = styles.Style(
alignment=styles.Alignment(vertical="top", wrap_text=False))
class XLSObjectRenderer(renderer.ObjectRenderer):
"""By default the XLS renderer delegates to the text renderer."""
renders_type = "object"
renderers = ["XLSRenderer"]
STYLE = None
def _GetDelegateObjectRenderer(self, item):
return self.ForTarget(item, "TextRenderer")(
session=self.session, renderer=self.renderer.delegate_text_renderer)
def RenderHeader(self, worksheet, column):
cell = worksheet.cell(
row=worksheet.current_row, column=worksheet.current_column)
cell.value = column.name
cell.style = HEADER_STYLE
# Advance the pointer by 1 cell.
worksheet.current_column += 1
def RenderCell(self, value, worksheet, **options):
# By default just render a single value into the current cell.
cell = worksheet.cell(
row=worksheet.current_row, column=worksheet.current_column)
cell.value = self.GetData(value, **options)
if self.STYLE:
cell.style = self.STYLE
# Advance the pointer by 1 cell.
worksheet.current_column += 1
def GetData(self, value, **options):
if isinstance(value, (int, float, long)):
return value
return unicode(self._GetDelegateObjectRenderer(value).render_row(
value, **options))
class XLSColumn(text.TextColumn):
def __init__(self, type=None, table=None, renderer=None, session=None,
**options):
super(XLSColumn, self).__init__(table=table, renderer=renderer,
session=session, **options)
if type:
self.object_renderer = self.renderer.get_object_renderer(
type=type, target_renderer="XLSRenderer", **options)
class XLSTable(text.TextTable):
column_class = XLSColumn
def render_header(self):
current_ws = self.renderer.current_ws
for column in self.columns:
if column.object_renderer:
object_renderer = column.object_renderer
else:
object_renderer = XLSObjectRenderer(
session=self.session, renderer=self.renderer)
object_renderer.RenderHeader(self.renderer.current_ws, column)
current_ws.current_row += 1
current_ws.current_column = 1
def render_row(self, row=None, highlight=None, **options):
merged_opts = self.options.copy()
merged_opts.update(options)
# Get each column to write its own header.
current_ws = self.renderer.current_ws
for item in row:
# Get the object renderer for the item.
object_renderer = self.renderer.get_object_renderer(
target=item, type=merged_opts.get("type"), **merged_opts)
object_renderer.RenderCell(item, current_ws, **options)
current_ws.current_row += 1
current_ws.current_column = 1
class XLSRenderer(renderer.BaseRenderer):
"""A Renderer for xls files."""
name = "xls"
table_class = XLSTable
tablesep = ""
def __init__(self, output=None, **kwargs):
super(XLSRenderer, self).__init__(**kwargs)
# Make a single delegate text renderer for reuse. Most of the time we
# will just replicate the output from the TextRenderer inside the
# spreadsheet cell.
self.delegate_text_renderer = text.TextRenderer(session=self.session)
self.output = output or self.session.GetParameter("output")
# If no output filename was give, just make a name based on the time
# stamp.
if self.output == None:
self.output = "%s.xls" % time.ctime()
try:
self.wb = openpyxl.load_workbook(self.output)
self.current_ws = self.wb.create_sheet()
except IOError:
self.wb = openpyxl.Workbook()
self.current_ws = self.wb.active
def start(self, plugin_name=None, kwargs=None):
super(XLSRenderer, self).start(plugin_name=plugin_name, kwargs=kwargs)
# Make a new worksheet for this run.
if self.current_ws is None:
self.current_ws = self.wb.create_sheet()
ws = self.current_ws
ws.title = plugin_name or ""
ws.current_row = 1
ws.current_column = 1
return self
def flush(self):
super(XLSRenderer, self).flush()
self.current_ws = None
# Write the spreadsheet to a file.
self.wb.save(self.output)
def section(self, name=None, **_):
ws = self.current_ws
for i in range(10):
cell = ws.cell(row=ws.current_row, column=i + 1)
if i == 0:
cell.value = name
cell.style = SECTION_STYLE
ws.current_row += 1
ws.current_column = 1
def format(self, formatstring, *data):
worksheet = self.current_ws
if "%" in formatstring:
data = formatstring % data
else:
data = formatstring.format(*data)
cell = worksheet.cell(
row=worksheet.current_row, column=worksheet.current_column)
cell.value = data
cell.style = FORMAT_STYLE
worksheet.current_column += 1
if "\n" in data:
worksheet.current_row += 1
worksheet.current_column = 1
def table_header(self, *args, **options):
super(XLSRenderer, self).table_header(*args, **options)
self.table.render_header()
# Following here are object specific renderers.
class XLSEProcessRenderer(XLSObjectRenderer):
"""Expands an EPROCESS into three columns (address, name and PID)."""
renders_type = "_EPROCESS"
def RenderHeader(self, worksheet, column):
for heading in ["_EPROCESS", "Name", "PID"]:
cell = worksheet.cell(
row=worksheet.current_row, column=worksheet.current_column)
cell.value = heading
cell.style = HEADER_STYLE
worksheet.current_column += 1
def RenderCell(self, item, worksheet, **options):
for value in ["%#x" % item.obj_offset, item.name, item.pid]:
object_renderer = self.ForTarget(value, self.renderer)(
session=self.session, renderer=self.renderer, **options)
object_renderer.RenderCell(value, worksheet, **options)
class XLSStringRenderer(XLSObjectRenderer):
renders_type = "String"
def GetData(self, item, **_):
return utils.SmartStr(item)
class XLSStructRenderer(XLSObjectRenderer):
"""Hex format struct's offsets."""
renders_type = "Struct"
def GetData(self, item, **_):
return "%#x" % item.obj_offset
class XLSPointerRenderer(XLSObjectRenderer):
"""Renders the address of the pointer target as a hex string."""
renders_type = "Pointer"
def GetData(self, item, **_):
result = item.v()
if result == None:
return "-"
return "%#x" % result
class XLSNativeTypeRenderer(XLSObjectRenderer):<|fim▁hole|> """Renders native types as python objects."""
renders_type = "NativeType"
def GetData(self, item, **options):
result = item.v()
if result != None:
return result
class XLS_UNICODE_STRING_Renderer(XLSNativeTypeRenderer):
renders_type = "_UNICODE_STRING"
class XLSNoneObjectRenderer(XLSObjectRenderer):
renders_type = "NoneObject"
def GetData(self, item, **_):
_ = item
return "-"
class XLSDateTimeRenderer(XLSObjectRenderer):
"""Renders timestamps as python datetime objects."""
renders_type = "UnixTimeStamp"
STYLE = styles.Style(number_format='MM/DD/YYYY HH:MM:SS')
def GetData(self, item, **options):
if item.v() == 0:
return None
return item.as_datetime()<|fim▁end|>
| |
<|file_name|>test_selection.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import io
import re
from unittest import mock
from stestr import selection
from stestr.tests import base
class TestSelection(base.TestCase):
def test_filter_tests_no_filter(self):
test_list = ['a', 'b', 'c']
result = selection.filter_tests(None, test_list)
self.assertEqual(test_list, result)
def test_filter_tests(self):
test_list = ['a', 'b', 'c']
result = selection.filter_tests(['a'], test_list)
self.assertEqual(['a'], result)
def test_filter_invalid_regex(self):
test_list = ['a', 'b', 'c']
with mock.patch('sys.exit', side_effect=ImportError) as mock_exit:
self.assertRaises(ImportError, selection.filter_tests,
['fake_regex_with_bad_part[The-BAD-part]'],
test_list)
mock_exit.assert_called_once_with(5)
class TestExclusionReader(base.TestCase):
def test_exclusion_reader(self):
exclude_list = io.StringIO()
for i in range(4):
exclude_list.write('fake_regex_%s\n' % i)
exclude_list.write('fake_regex_with_note_%s # note\n' % i)
exclude_list.seek(0)
with mock.patch('builtins.open',
return_value=exclude_list):
result = selection.exclusion_reader('fake_path')
self.assertEqual(2 * 4, len(result))
note_cnt = 0
# not assuming ordering, mainly just testing the type
for r in result:
self.assertEqual(r[2], [])
if r[1] == 'note':
note_cnt += 1
self.assertIn('search', dir(r[0])) # like a compiled regexp
self.assertEqual(note_cnt, 4)
def test_invalid_regex(self):
exclude_list = io.StringIO()
exclude_list.write("fake_regex_with_bad_part[The-BAD-part]")<|fim▁hole|> with mock.patch('sys.exit') as mock_exit:
selection.exclusion_reader('fake_path')
mock_exit.assert_called_once_with(5)
class TestConstructList(base.TestCase):
def test_simple_re(self):
test_lists = ['fake_test(scen)[tag,bar])', 'fake_test(scen)[egg,foo])']
result = selection.construct_list(test_lists, regexes=['foo'])
self.assertEqual(list(result), ['fake_test(scen)[egg,foo])'])
def test_simple_exclusion_re(self):
test_lists = ['fake_test(scen)[tag,bar])', 'fake_test(scen)[egg,foo])']
result = selection.construct_list(test_lists, exclude_regex='foo')
self.assertEqual(list(result), ['fake_test(scen)[tag,bar])'])
def test_invalid_exclusion_re(self):
test_lists = ['fake_test(scen)[tag,bar])', 'fake_test(scen)[egg,foo])']
invalid_regex = "fake_regex_with_bad_part[The-BAD-part]"
with mock.patch('sys.exit', side_effect=ImportError) as exit_mock:
self.assertRaises(ImportError, selection.construct_list,
test_lists, exclude_regex=invalid_regex)
exit_mock.assert_called_once_with(5)
def test_exclusion_list(self):
exclude_list = [(re.compile('foo'), 'foo not liked', [])]
test_lists = ['fake_test(scen)[tag,bar])', 'fake_test(scen)[egg,foo])']
with mock.patch('stestr.selection.exclusion_reader',
return_value=exclude_list):
result = selection.construct_list(test_lists,
exclude_list='file',
regexes=['fake_test'])
self.assertEqual(list(result), ['fake_test(scen)[tag,bar])'])
def test_inclusion_list(self):
include_list = [re.compile('fake_test1'), re.compile('fake_test2')]
test_lists = ['fake_test1[tg]', 'fake_test2[tg]', 'fake_test3[tg]']
include_getter = 'stestr.selection._get_regex_from_include_list'
with mock.patch(include_getter,
return_value=include_list):
result = selection.construct_list(test_lists,
include_list='file')
self.assertEqual(set(result),
{'fake_test1[tg]', 'fake_test2[tg]'})
def test_inclusion_list_invalid_regex(self):
include_list = io.StringIO()
include_list.write("fake_regex_with_bad_part[The-BAD-part]")
include_list.seek(0)
with mock.patch('builtins.open',
return_value=include_list):
with mock.patch('sys.exit') as mock_exit:
selection._get_regex_from_include_list('fake_path')
mock_exit.assert_called_once_with(5)
def test_inclusion_exclusion_list_re(self):
include_list = [re.compile('fake_test1'), re.compile('fake_test2')]
test_lists = ['fake_test1[tg]', 'fake_test2[spam]',
'fake_test3[tg,foo]', 'fake_test4[spam]']
exclude_list = [(re.compile('spam'), 'spam not liked', [])]
include_getter = 'stestr.selection._get_regex_from_include_list'
with mock.patch(include_getter,
return_value=include_list):
with mock.patch('stestr.selection.exclusion_reader',
return_value=exclude_list):
result = selection.construct_list(
test_lists, exclude_list='exclude_file',
include_list='include_file', regexes=['foo'])
self.assertEqual(set(result),
{'fake_test1[tg]', 'fake_test3[tg,foo]'})
def test_overlapping_exclude_regex(self):
exclude_list = [(re.compile('compute.test_keypairs.KeypairsTestV210'),
'', []),
(re.compile('compute.test_keypairs.KeypairsTestV21'),
'', [])]
test_lists = [
'compute.test_keypairs.KeypairsTestV210.test_create_keypair',
'compute.test_keypairs.KeypairsTestV21.test_create_keypair',
'compute.test_fake.FakeTest.test_fake_test']
with mock.patch('stestr.selection.exclusion_reader',
return_value=exclude_list):
result = selection.construct_list(test_lists,
exclude_list='file',
regexes=['fake_test'])
self.assertEqual(
list(result), ['compute.test_fake.FakeTest.test_fake_test'])<|fim▁end|>
|
exclude_list.seek(0)
with mock.patch('builtins.open',
return_value=exclude_list):
|
<|file_name|>main.go<|end_file_name|><|fim▁begin|>package main
import (
"log"
"os"
"golang.org/x/net/context"
"google.golang.org/grpc"
// "google.golang.org/grpc/credentials/oauth"
"google.golang.org/grpc/metadata"
m "github.com/konjoot/grpc/proto/messages"
s "github.com/konjoot/grpc/proto/sessions"
)
const sessionAddr = "localhost:50051"
const messageAddr = "localhost:50052"
var (
defaultLogin = []byte("login")
defaultPass = []byte("pass")
)
func main() {
// Set up a connection to the server.
sessionConn, err := grpc.Dial(sessionAddr, grpc.WithInsecure())
if err != nil {
log.Fatalf("did not connect to session server: %v", err)
}
defer sessionConn.Close()
session := s.NewSessionClient(sessionConn)
login := defaultLogin
pass := defaultPass
if len(os.Args) > 1 {
login = []byte(os.Args[1])
}
if len(os.Args) > 2 {
pass = []byte(os.Args[2])
}
sess, err := session.Create(context.Background(), &s.SessionRequest{Login: login, Pass: pass})
if err != nil {
log.Fatalf("could not create session: %v", err)
}
messageConn, err := grpc.Dial(messageAddr, grpc.WithInsecure())
if err != nil {
log.Fatalf("did not connect to message server: %v", err)
}
defer messageConn.Close()
<|fim▁hole|>
// header usage example
header := metadata.Pairs("Authorization", string(sess.Token))
ctx := metadata.NewContext(context.Background(), header)
msg, err := message.Create(ctx, &m.MessageRequest{User: []byte("user1"), Text: []byte("hello")})
if err != nil {
log.Fatalf("could not create message: %v", err)
}
log.Print(msg)
}<|fim▁end|>
|
message := m.NewMessageClient(messageConn)
|
<|file_name|>database.js<|end_file_name|><|fim▁begin|>/* eslint-disable no-console*/
import low from 'lowdb';
import fse from 'fs-extra';
import uuid from 'uuid';
import slug from 'slug';
import constants from './constants';
import { crypt } from '../utils';
const user = {
id: uuid(),
name: constants.USER_NAME,
password: crypt.encrypt(constants.USER_PASSWORD),
role: 'admin',
slug: slug(constants.USER_NAME),
};
fse.ensureFileSync(constants.DATA_FILE);
const db = low(constants.DATA_FILE);
db.defaults({<|fim▁hole|> imageSources: [],
images: [],
templates: [],
volumes: [],
}).write();
console.log('DB is running');
export default () => (db);<|fim▁end|>
|
users: [user],
containers: [],
|
<|file_name|>network.ts<|end_file_name|><|fim▁begin|>/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import * as platform from 'vs/base/common/platform';
import { URI } from 'vs/base/common/uri';
export namespace Schemas {
/**
* A schema that is used for models that exist in memory
* only and that have no correspondence on a server or such.
*/
export const inMemory = 'inmemory';
/**
* A schema that is used for setting files
*/
export const vscode = 'vscode';
/**
* A schema that is used for internal private files
*/
export const internal = 'private';
/**
* A walk-through document.
*/
export const walkThrough = 'walkThrough';
/**
* An embedded code snippet.
*/
export const walkThroughSnippet = 'walkThroughSnippet';
export const http = 'http';
export const https = 'https';
export const file = 'file';
export const mailto = 'mailto';
export const untitled = 'untitled';
export const data = 'data';
export const command = 'command';
export const vscodeRemote = 'vscode-remote';
export const vscodeRemoteResource = 'vscode-remote-resource';
export const userData = 'vscode-userdata';
export const vscodeCustomEditor = 'vscode-custom-editor';
export const vscodeNotebook = 'vscode-notebook';
export const vscodeNotebookCell = 'vscode-notebook-cell';
export const vscodeNotebookCellMetadata = 'vscode-notebook-cell-metadata';
export const vscodeNotebookCellOutput = 'vscode-notebook-cell-output';
export const vscodeInteractive = 'vscode-interactive';
export const vscodeInteractiveInput = 'vscode-interactive-input';
export const vscodeSettings = 'vscode-settings';
<|fim▁hole|>
export const vscodeTerminal = 'vscode-terminal';
export const webviewPanel = 'webview-panel';
/**
* Scheme used for loading the wrapper html and script in webviews.
*/
export const vscodeWebview = 'vscode-webview';
/**
* Scheme used for extension pages
*/
export const extension = 'extension';
/**
* Scheme used as a replacement of `file` scheme to load
* files with our custom protocol handler (desktop only).
*/
export const vscodeFileResource = 'vscode-file';
/**
* Scheme used for temporary resources
*/
export const tmp = 'tmp';
}
class RemoteAuthoritiesImpl {
private readonly _hosts: { [authority: string]: string | undefined; } = Object.create(null);
private readonly _ports: { [authority: string]: number | undefined; } = Object.create(null);
private readonly _connectionTokens: { [authority: string]: string | undefined; } = Object.create(null);
private _preferredWebSchema: 'http' | 'https' = 'http';
private _delegate: ((uri: URI) => URI) | null = null;
setPreferredWebSchema(schema: 'http' | 'https') {
this._preferredWebSchema = schema;
}
setDelegate(delegate: (uri: URI) => URI): void {
this._delegate = delegate;
}
set(authority: string, host: string, port: number): void {
this._hosts[authority] = host;
this._ports[authority] = port;
}
setConnectionToken(authority: string, connectionToken: string): void {
this._connectionTokens[authority] = connectionToken;
}
rewrite(uri: URI): URI {
if (this._delegate) {
return this._delegate(uri);
}
const authority = uri.authority;
let host = this._hosts[authority];
if (host && host.indexOf(':') !== -1) {
host = `[${host}]`;
}
const port = this._ports[authority];
const connectionToken = this._connectionTokens[authority];
let query = `path=${encodeURIComponent(uri.path)}`;
if (typeof connectionToken === 'string') {
query += `&tkn=${encodeURIComponent(connectionToken)}`;
}
return URI.from({
scheme: platform.isWeb ? this._preferredWebSchema : Schemas.vscodeRemoteResource,
authority: `${host}:${port}`,
path: `/vscode-remote-resource`,
query
});
}
}
export const RemoteAuthorities = new RemoteAuthoritiesImpl();
class FileAccessImpl {
private static readonly FALLBACK_AUTHORITY = 'vscode-app';
/**
* Returns a URI to use in contexts where the browser is responsible
* for loading (e.g. fetch()) or when used within the DOM.
*
* **Note:** use `dom.ts#asCSSUrl` whenever the URL is to be used in CSS context.
*/
asBrowserUri(uri: URI): URI;
asBrowserUri(moduleId: string, moduleIdToUrl: { toUrl(moduleId: string): string }): URI;
asBrowserUri(uriOrModule: URI | string, moduleIdToUrl?: { toUrl(moduleId: string): string }): URI {
const uri = this.toUri(uriOrModule, moduleIdToUrl);
// Handle remote URIs via `RemoteAuthorities`
if (uri.scheme === Schemas.vscodeRemote) {
return RemoteAuthorities.rewrite(uri);
}
// Convert to `vscode-file` resource..
if (
// ...only ever for `file` resources
uri.scheme === Schemas.file &&
(
// ...and we run in native environments
platform.isNative ||
// ...or web worker extensions on desktop
(typeof platform.globals.importScripts === 'function' && platform.globals.origin === `${Schemas.vscodeFileResource}://${FileAccessImpl.FALLBACK_AUTHORITY}`)
)
) {
return uri.with({
scheme: Schemas.vscodeFileResource,
// We need to provide an authority here so that it can serve
// as origin for network and loading matters in chromium.
// If the URI is not coming with an authority already, we
// add our own
authority: uri.authority || FileAccessImpl.FALLBACK_AUTHORITY,
query: null,
fragment: null
});
}
return uri;
}
/**
* Returns the `file` URI to use in contexts where node.js
* is responsible for loading.
*/
asFileUri(uri: URI): URI;
asFileUri(moduleId: string, moduleIdToUrl: { toUrl(moduleId: string): string }): URI;
asFileUri(uriOrModule: URI | string, moduleIdToUrl?: { toUrl(moduleId: string): string }): URI {
const uri = this.toUri(uriOrModule, moduleIdToUrl);
// Only convert the URI if it is `vscode-file:` scheme
if (uri.scheme === Schemas.vscodeFileResource) {
return uri.with({
scheme: Schemas.file,
// Only preserve the `authority` if it is different from
// our fallback authority. This ensures we properly preserve
// Windows UNC paths that come with their own authority.
authority: uri.authority !== FileAccessImpl.FALLBACK_AUTHORITY ? uri.authority : null,
query: null,
fragment: null
});
}
return uri;
}
private toUri(uriOrModule: URI | string, moduleIdToUrl?: { toUrl(moduleId: string): string }): URI {
if (URI.isUri(uriOrModule)) {
return uriOrModule;
}
return URI.parse(moduleIdToUrl!.toUrl(uriOrModule));
}
}
export const FileAccess = new FileAccessImpl();<|fim▁end|>
|
export const vscodeWorkspaceTrust = 'vscode-workspace-trust';
|
<|file_name|>SeqDoubleIterator.py<|end_file_name|><|fim▁begin|>import SeqIterator
import Constants
"""
@author: Jacob Porter
@summary: An iterator class for iterating through two sequence record files simultaneously.
@requires: SeqIterator
"""
<|fim▁hole|> self.SeqIterator1 = SeqIterator.SeqIterator(file_name1, file_type=file_type, gzip_switch = gzip_switch)
self.SeqIterator2 = SeqIterator.SeqIterator(file_name2, file_type=file_type, gzip_switch = gzip_switch)
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
record1 = self.SeqIterator1.next()
record2 = self.SeqIterator2.next()
return (record1, record2)<|fim▁end|>
|
class SeqDoubleIterator:
def __init__(self, file_name1, file_name2, file_type=Constants.FASTQ, gzip_switch = False):
|
<|file_name|>ValidationException.java<|end_file_name|><|fim▁begin|>package io.blitz.curl.exception;
/**
* Exceptions thrown when a validation error occur during a test execution
* @author ghermeto
*/
public class ValidationException extends BlitzException {
/**
* Constructs an instance of <code>ValidationException</code> with the
* specified error and reason message.
* @param reason the detailed error message.<|fim▁hole|> */
public ValidationException(String reason) {
super("validation", reason);
}
}<|fim▁end|>
| |
<|file_name|>HostOnlyDaoImpl.java<|end_file_name|><|fim▁begin|>package io.cattle.platform.networking.host.dao.impl;
import static io.cattle.platform.core.model.tables.HostTable.*;
import static io.cattle.platform.core.model.tables.HostVnetMapTable.*;
import static io.cattle.platform.core.model.tables.SubnetVnetMapTable.*;
import static io.cattle.platform.core.model.tables.VnetTable.*;
import java.util.List;
import io.cattle.platform.core.model.Host;
import io.cattle.platform.core.model.HostVnetMap;
import io.cattle.platform.core.model.Network;
import io.cattle.platform.core.model.Subnet;
import io.cattle.platform.core.model.SubnetVnetMap;
import io.cattle.platform.core.model.Vnet;
import io.cattle.platform.core.model.tables.records.VnetRecord;
import io.cattle.platform.db.jooq.dao.impl.AbstractJooqDao;
import io.cattle.platform.networking.host.contants.HostOnlyConstants;
import io.cattle.platform.networking.host.dao.HostOnlyDao;
import io.cattle.platform.object.ObjectManager;
import javax.inject.Inject;
import org.jooq.Record;
public class HostOnlyDaoImpl extends AbstractJooqDao implements HostOnlyDao {
ObjectManager objectManager;
@Override
public Vnet getVnetForHost(Network network, Host host) {
Long physicalHostId = host.getPhysicalHostId();
Record record = null;
if ( physicalHostId == null ) {
record = create()
.select(VNET.fields())
.from(VNET)
.join(HOST_VNET_MAP)
.on(HOST_VNET_MAP.VNET_ID.eq(VNET.ID))
.where(VNET.NETWORK_ID.eq(network.getId())
.and(HOST_VNET_MAP.HOST_ID.eq(host.getId()))
.and(HOST_VNET_MAP.REMOVED.isNull()))
.fetchAny();
} else {
record = create()
.select(VNET.fields())
.from(VNET)
.join(HOST_VNET_MAP)
.on(HOST_VNET_MAP.VNET_ID.eq(VNET.ID))
.join(HOST)
.on(HOST_VNET_MAP.HOST_ID.eq(HOST.ID))
.where(VNET.NETWORK_ID.eq(network.getId())
.and(HOST.PHYSICAL_HOST_ID.eq(physicalHostId))
.and(HOST_VNET_MAP.REMOVED.isNull()))
.fetchAny();
}
return record == null ? null : record.into(VnetRecord.class);
}
@Override
public Vnet createVnetForHost(Network network, Host host, Subnet subnet, String uri) {
if ( uri == null ) {
uri = HostOnlyConstants.DEFAULT_HOST_SUBNET_URI;
}
Vnet vnet = objectManager.create(Vnet.class,
VNET.URI, uri,
VNET.ACCOUNT_ID, network.getAccountId(),
VNET.NETWORK_ID, network.getId());
objectManager.create(HostVnetMap.class,
HOST_VNET_MAP.VNET_ID, vnet.getId(),
HOST_VNET_MAP.HOST_ID, host.getId());
if ( subnet != null ) {
objectManager.create(SubnetVnetMap.class,
SUBNET_VNET_MAP.VNET_ID, vnet.getId(),
SUBNET_VNET_MAP.SUBNET_ID, subnet.getId());
}
return vnet;
}
@Override
public HostVnetMap mapVnetToHost(Vnet vnet, Host host) {
List<HostVnetMap> maps = objectManager.find(HostVnetMap.class,
HOST_VNET_MAP.VNET_ID, vnet.getId(),
HOST_VNET_MAP.HOST_ID, host.getId());
if ( maps.size() > 0 ) {
return maps.get(0);
}<|fim▁hole|> }
public ObjectManager getObjectManager() {
return objectManager;
}
@Inject
public void setObjectManager(ObjectManager objectManager) {
this.objectManager = objectManager;
}
}<|fim▁end|>
|
return objectManager.create(HostVnetMap.class,
HOST_VNET_MAP.VNET_ID, vnet.getId(),
HOST_VNET_MAP.HOST_ID, host.getId());
|
<|file_name|>parser_init_mapping.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""Class representing the mapper for the parser init files."""
from plasoscaffolder.bll.mappings import base_mapping_helper
from plasoscaffolder.bll.mappings import base_sqliteplugin_mapping
from plasoscaffolder.model import init_data_model
<|fim▁hole|> _PARSER_INIT_TEMPLATE = 'parser_init_template.jinja2'
def __init__(self, mapping_helper: base_mapping_helper.BaseMappingHelper):
"""Initializing the init mapper class.
Args:
mapping_helper (base_mapping_helper.BaseMappingHelper): the helper class
for the mapping
"""
super().__init__()
self._helper = mapping_helper
def GetRenderedTemplate(
self,
data: init_data_model.InitDataModel) -> str:
"""Retrieves the parser init file.
Args:
data (init_data_model.InitDataModel): the data for init file
Returns:
str: the rendered template
"""
context = {'plugin_name': data.plugin_name,
'is_create_template': data.is_create_template}
rendered = self._helper.RenderTemplate(
self._PARSER_INIT_TEMPLATE, context)
return rendered<|fim▁end|>
|
class ParserInitMapping(
base_sqliteplugin_mapping.BaseSQLitePluginMapper):
"""Class representing the parser mapper."""
|
<|file_name|>invitation_test.py<|end_file_name|><|fim▁begin|>from django.core import mail
from django.test import TestCase
from users.default_roles import DefaultGroups
from users.models import Invitation, Membership, OCUser
from communities.tests.common import create_sample_community
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
class InvitationTest(TestCase):
def setUp(self):
(self.community, self.members, self.chairmens) = create_sample_community()
def tearDown(self):
mail.outbox = []
def test_send_invitation(self):
i = Invitation.objects.create(community=self.community,
created_by=self.members[0],
email="[email protected]")
i.send(self.members[0])
self.assertEqual(len(mail.outbox), 1)
self.assertIn(self.community.name, mail.outbox[0].subject)
self.assertIn(i.get_absolute_url(), mail.outbox[0].body)
class InvitationViewTest(TestCase):
def setUp(self):
(self.community, self.members, self.chairmen) = create_sample_community()
def tearDown(self):
mail.outbox = []
def post_invite(self, data=None):
if not data:
data = {"email": "[email protected]",
"default_group_name": DefaultGroups.MEMBER,
"message": "the message"}
return self.client.post(reverse("members"
, kwargs={"community_id": self.community.id}),
data)
def login_chairmen(self):
self.client.login(username=self.chairmen[0].email, password="password")
def test_view(self):
self.login_chairmen()
response = self.post_invite({"email": "[email protected]",
"default_group_name": DefaultGroups.MEMBER,
"message": "the message"})
self.assertEqual(Invitation.objects.all().count(), 1)
invitation = Invitation.objects.all()[0]
self.assertEqual(invitation.community, self.community)
self.assertEqual(invitation.created_by, self.chairmen[0])
self.assertEqual(invitation.message, "the message")
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(response.status_code, 200)
#the response is an ajax response the show the user as added
#to the list of members
self.assertIn("delete-invitation", response.content)
self.assertIn("[email protected]", response.content)
def test_no_invite_permission(self):
self.client.login(username=self.members[6].email, password="password")
response = self.post_invite()
self.assertEqual(response.status_code, 403)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(Invitation.objects.all().count(), 0)
def test_bad_email(self):
self.login_chairmen()
response = self.post_invite({"email": "not a real email",
"default_group_name": DefaultGroups.MEMBER,
"message": "the message"})
self.assertEqual(response.status_code, 400)
self.assertEqual(_("Form error. Please supply a valid email."), response.content)
def test_invitee_already_invited(self):
Invitation.objects.create(community=self.community,
created_by=self.chairmen[0],
email="[email protected]")
self.login_chairmen()
response = self.post_invite()
self.assertEqual(response.status_code, 400)
self.assertEqual(_("This user is already invited to this community."), response.content)
def test_invitee_already_a_member(self):
u = OCUser.objects.create_user("[email protected]",
"sample user", password="password")
Membership.objects.create(user=u, community=self.community, default_group_name=DefaultGroups.MEMBER)
self.login_chairmen()
response = self.post_invite()<|fim▁hole|><|fim▁end|>
|
self.assertEqual(response.status_code, 400)
self.assertEqual(_("This user already a member of this community."), response.content)
|
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from lino.api import dd<|fim▁hole|>
class Tag(dd.Model):
name = dd.CharField(max_length=100)
def __str__(self):
return self.name
@dd.receiver(dd.auto_create)
def my_auto_create_handler(sender, **kw):
print("My handler was called with {}".format(sender))<|fim▁end|>
| |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>from setuptools import setup
import minify.command
setup(name='cloaca',
version='0.1.0',
url='https://github.com/mhmurray/cloaca',
author='Michael Murray',
author_email='[email protected]',
license='MIT',
packages=['cloaca'],
zip_safe=False,
include_package_data=True,
scripts=[
'cloaca/cloacaapp.py'
],<|fim▁hole|> 'futures>=3.0.5',
'minify',
],
cmdclass={
'minify_css' : minify.command.minify_css,
},
)<|fim▁end|>
|
install_requires=[
'tornado>=4.3.0',
'tornadis>=0.7.0',
'bcrypt>=2.0.0',
|
<|file_name|>launch_reduction.py<|end_file_name|><|fim▁begin|>import os
import traceback
from mantidqt.utils.asynchronous import AsyncTask
from addie.processing.mantid.master_table.master_table_exporter import TableFileExporter as MantidTableExporter
# Mantid Total Scattering integration
# (https://github.com/marshallmcdonnell/mantid_total_scattering)
try:
import total_scattering
print("Mantid Total Scattering Version: ", total_scattering.__version__)
from total_scattering.reduction import TotalScatteringReduction
MANTID_TS_ENABLED = True
except ImportError:
print('total_scattering module not found. Functionality disabled')
MANTID_TS_ENABLED = False
class JobPool(object):
task_output = None,
running = None
task_exc_type, task_exc, task_exc_stack = None, None, None
def __init__(self, configurations):
self.jobs = []
for config in configurations:
print("CONFIG:", config)
self.jobs.append(AsyncTask(TotalScatteringReduction, args=(config,),
success_cb=self.on_success, error_cb=self.on_error,
finished_cb=self.on_finished))
def _start_next(self):
if self.jobs:
self.running = self.jobs.pop(0)
self.running.start()
else:
self.running = None
def start(self):
if not self.jobs:
raise RuntimeError('Cannot start empty job list')
self._start_next()
def on_success(self, task_result):
# TODO should emit a signal
self.task_output = task_result.output
print('SUCCESS!!! {}'.format(self.task_output))
def on_error(self, task_result):
# TODO should emit a signal
print('ERROR!!!')
self.task_exc_type = task_result.exc_type
self.task_exc = task_result.exc_value
self.task_exc_stack = traceback.extract_tb(task_result.stack)
traceback.print_tb(task_result.stack)
print(task_result)
def on_finished(self):
'''Both success and failure call this method afterwards'''<|fim▁hole|> self._start_next() # kick off the next one in the pool
def run_mantid(parent):
num_rows = parent.processing_ui.h3_table.rowCount()
if num_rows <= 0:
raise RuntimeError('Cannot export empty table')
exporter = MantidTableExporter(parent=parent)
# write out the full table to disk
# TODO make a class level name so it can be reused
full_reduction_filename = os.path.join(
os.path.expanduser('~'), '.mantid', 'addie.json')
print('writing out full table to "{}"'.format(full_reduction_filename))
exporter.export(full_reduction_filename)
# append the individual rows to input list (reduction_inputs)
reduction_inputs = []
for row in range(num_rows):
if not exporter.isActive(row):
print('skipping row {} - inactive'.format(row + 1)) # REMOVE?
continue
print('Will be running row {} for reduction'.format(
row + 1)) # TODO should be debug logging
json_input = exporter.retrieve_row_info(row)
reduction_input = exporter.convert_from_row_to_reduction(json_input)
reduction_inputs.append(reduction_input)
if len(reduction_inputs) == 0:
raise RuntimeError('None of the rows were activated')
# locate total scattering script
if MANTID_TS_ENABLED:
pool = JobPool(reduction_inputs)
pool.start()
else:
# TODO should be on the status bar
print('total_scattering module not found. Functionality disabled')<|fim▁end|>
|
# TODO should emit a signal
|
<|file_name|>affix.js<|end_file_name|><|fim▁begin|>/**
* angular-strap
* @version v2.2.1 - 2015-05-15
* @link http://mgcrea.github.io/angular-strap
* @author Olivier Louvignes <[email protected]> (https://github.com/mgcrea)
* @license MIT License, http://www.opensource.org/licenses/MIT
*/
'use strict';
angular.module('mgcrea.ngStrap.affix', [ 'mgcrea.ngStrap.helpers.dimensions', 'mgcrea.ngStrap.helpers.debounce' ]).provider('$affix', function() {
var defaults = this.defaults = {
offsetTop: 'auto',
inlineStyles: true
};
this.$get = [ '$window', 'debounce', 'dimensions', function($window, debounce, dimensions) {
var bodyEl = angular.element($window.document.body);
var windowEl = angular.element($window);
function AffixFactory(element, config) {
var $affix = {};
var options = angular.extend({}, defaults, config);
var targetEl = options.target;
var reset = 'affix affix-top affix-bottom', setWidth = false, initialAffixTop = 0, initialOffsetTop = 0, offsetTop = 0, offsetBottom = 0, affixed = null, unpin = null;
var parent = element.parent();
if (options.offsetParent) {
if (options.offsetParent.match(/^\d+$/)) {
for (var i = 0; i < options.offsetParent * 1 - 1; i++) {
parent = parent.parent();
}
} else {
parent = angular.element(options.offsetParent);
}
}
$affix.init = function() {
this.$parseOffsets();
initialOffsetTop = dimensions.offset(element[0]).top + initialAffixTop;
setWidth = !element[0].style.width;
targetEl.on('scroll', this.checkPosition);
targetEl.on('click', this.checkPositionWithEventLoop);
windowEl.on('resize', this.$debouncedOnResize);
this.checkPosition();
this.checkPositionWithEventLoop();
};
$affix.destroy = function() {
targetEl.off('scroll', this.checkPosition);
targetEl.off('click', this.checkPositionWithEventLoop);
windowEl.off('resize', this.$debouncedOnResize);
};
$affix.checkPositionWithEventLoop = function() {
setTimeout($affix.checkPosition, 1);
};
$affix.checkPosition = function() {
var scrollTop = getScrollTop();
var position = dimensions.offset(element[0]);
var elementHeight = dimensions.height(element[0]);
var affix = getRequiredAffixClass(unpin, position, elementHeight);
if (affixed === affix) return;
affixed = affix;
element.removeClass(reset).addClass('affix' + (affix !== 'middle' ? '-' + affix : ''));
if (affix === 'top') {
unpin = null;
if (setWidth) {
element.css('width', '');
}
if (options.inlineStyles) {
element.css('position', options.offsetParent ? '' : 'relative');
element.css('top', '');
}
} else if (affix === 'bottom') {
if (options.offsetUnpin) {
unpin = -(options.offsetUnpin * 1);
} else {
unpin = position.top - scrollTop;
}
if (setWidth) {
element.css('width', '');
}
if (options.inlineStyles) {
element.css('position', options.offsetParent ? '' : 'relative');<|fim▁hole|> }
} else {
unpin = null;
if (setWidth) {
element.css('width', element[0].offsetWidth + 'px');
}
if (options.inlineStyles) {
element.css('position', 'fixed');
element.css('top', initialAffixTop + 'px');
}
}
};
$affix.$onResize = function() {
$affix.$parseOffsets();
$affix.checkPosition();
};
$affix.$debouncedOnResize = debounce($affix.$onResize, 50);
$affix.$parseOffsets = function() {
var initialPosition = element.css('position');
if (options.inlineStyles) {
element.css('position', options.offsetParent ? '' : 'relative');
}
if (options.offsetTop) {
if (options.offsetTop === 'auto') {
options.offsetTop = '+0';
}
if (options.offsetTop.match(/^[-+]\d+$/)) {
initialAffixTop = -options.offsetTop * 1;
if (options.offsetParent) {
offsetTop = dimensions.offset(parent[0]).top + options.offsetTop * 1;
} else {
offsetTop = dimensions.offset(element[0]).top - dimensions.css(element[0], 'marginTop', true) + options.offsetTop * 1;
}
} else {
offsetTop = options.offsetTop * 1;
}
}
if (options.offsetBottom) {
if (options.offsetParent && options.offsetBottom.match(/^[-+]\d+$/)) {
offsetBottom = getScrollHeight() - (dimensions.offset(parent[0]).top + dimensions.height(parent[0])) + options.offsetBottom * 1 + 1;
} else {
offsetBottom = options.offsetBottom * 1;
}
}
if (options.inlineStyles) {
element.css('position', initialPosition);
}
};
function getRequiredAffixClass(unpin, position, elementHeight) {
var scrollTop = getScrollTop();
var scrollHeight = getScrollHeight();
if (scrollTop <= offsetTop) {
return 'top';
} else if (unpin !== null && scrollTop + unpin <= position.top) {
return 'middle';
} else if (offsetBottom !== null && position.top + elementHeight + initialAffixTop >= scrollHeight - offsetBottom) {
return 'bottom';
} else {
return 'middle';
}
}
function getScrollTop() {
return targetEl[0] === $window ? $window.pageYOffset : targetEl[0].scrollTop;
}
function getScrollHeight() {
return targetEl[0] === $window ? $window.document.body.scrollHeight : targetEl[0].scrollHeight;
}
$affix.init();
return $affix;
}
return AffixFactory;
} ];
}).directive('bsAffix', [ '$affix', '$window', function($affix, $window) {
return {
restrict: 'EAC',
require: '^?bsAffixTarget',
link: function postLink(scope, element, attr, affixTarget) {
var options = {
scope: scope,
target: affixTarget ? affixTarget.$element : angular.element($window)
};
angular.forEach([ 'offsetTop', 'offsetBottom', 'offsetParent', 'offsetUnpin', 'inlineStyles' ], function(key) {
if (angular.isDefined(attr[key])) {
var option = attr[key];
if (/true/i.test(option)) option = true;
if (/false/i.test(option)) option = false;
options[key] = option;
}
});
var affix = $affix(element, options);
scope.$on('$destroy', function() {
affix && affix.destroy();
options = null;
affix = null;
});
}
};
} ]).directive('bsAffixTarget', function() {
return {
controller: [ '$element', function($element) {
this.$element = $element;
} ]
};
});<|fim▁end|>
|
element.css('top', options.offsetParent ? '' : bodyEl[0].offsetHeight - offsetBottom - elementHeight - initialOffsetTop + 'px');
|
<|file_name|>share_links.py<|end_file_name|><|fim▁begin|>import logging
from constance import config
from dateutil.relativedelta import relativedelta
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
from django.utils import timezone
from django.utils.translation import ugettext as _
from seaserv import seafile_api
from pysearpc import SearpcError
from seahub.api2.utils import api_error
from seahub.api2.authentication import TokenAuthentication
from seahub.api2.throttling import UserRateThrottle
from seahub.share.models import FileShare, OrgFileShare
from seahub.utils import gen_shared_link, is_org_context
from seahub.views import check_folder_permission
logger = logging.getLogger(__name__)
def get_share_link_info(fileshare):
data = {}
token = fileshare.token
data['repo_id'] = fileshare.repo_id
data['path'] = fileshare.path
data['ctime'] = fileshare.ctime
data['view_cnt'] = fileshare.view_cnt
data['link'] = gen_shared_link(token, fileshare.s_type)
data['token'] = token
data['expire_date'] = fileshare.expire_date
data['is_expired'] = fileshare.is_expired()
data['username'] = fileshare.username
return data
class ShareLinks(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle,)
def _can_generate_shared_link(self, request):
return request.user.permissions.can_generate_shared_link()
def _generate_obj_id_and_type_by_path(self, repo_id, path):
file_id = seafile_api.get_file_id_by_path(repo_id, path)
if file_id:
return (file_id, 'f')
dir_id = seafile_api.get_dir_id_by_path(repo_id, path)
if dir_id:
return (dir_id, 'd')
return (None, None)
def get(self, request):
""" get share links.<|fim▁hole|> return api_error(status.HTTP_403_FORBIDDEN, error_msg)
# check if args invalid
repo_id = request.GET.get('repo_id', None)
if repo_id:
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# repo level permission check
if not check_folder_permission(request, repo_id, '/'):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
path = request.GET.get('path', None)
if path:
try:
obj_id, s_type = self._generate_obj_id_and_type_by_path(repo_id, path)
except SearpcError as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
if not obj_id:
if s_type == 'f':
error_msg = 'file %s not found.' % path
elif s_type == 'd':
error_msg = 'folder %s not found.' % path
else:
error_msg = 'path %s not found.' % path
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# folder/path permission check
if not check_folder_permission(request, repo_id, path):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
username = request.user.username
fileshares = FileShare.objects.filter(username=username)
# filter result by args
if repo_id:
fileshares = filter(lambda fs: fs.repo_id == repo_id, fileshares)
if path:
if s_type == 'd' and path[-1] != '/':
path = path + '/'
fileshares = filter(lambda fs: fs.path == path, fileshares)
result = []
for fs in fileshares:
link_info = get_share_link_info(fs)
result.append(link_info)
if len(result) == 1:
result = result[0]
return Response(result)
def post(self, request):
""" create share link.
"""
if not self._can_generate_shared_link(request):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
repo_id = request.data.get('repo_id', None)
if not repo_id:
error_msg = 'repo_id invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
path = request.data.get('path', None)
if not path:
error_msg = 'path invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
try:
obj_id, s_type = self._generate_obj_id_and_type_by_path(repo_id, path)
except SearpcError as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
if not obj_id:
if s_type == 'f':
error_msg = 'file %s not found.' % path
elif s_type == 'd':
error_msg = 'folder %s not found.' % path
else:
error_msg = 'path %s not found.' % path
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# permission check
if not check_folder_permission(request, repo_id, path):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
password = request.data.get('password', None)
if password and len(password) < config.SHARE_LINK_PASSWORD_MIN_LENGTH:
error_msg = _('Password is too short.')
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
try:
expire_days = int(request.data.get('expire_days', 0))
except ValueError:
expire_days = 0
if expire_days <= 0:
expire_date = None
else:
expire_date = timezone.now() + relativedelta(days=expire_days)
username = request.user.username
if s_type == 'f':
fs = FileShare.objects.get_file_link_by_path(username, repo_id, path)
if not fs:
fs = FileShare.objects.create_file_link(username, repo_id, path,
password, expire_date)
if is_org_context(request):
org_id = request.user.org.org_id
OrgFileShare.objects.set_org_file_share(org_id, fs)
elif s_type == 'd':
fs = FileShare.objects.get_dir_link_by_path(username, repo_id, path)
if not fs:
fs = FileShare.objects.create_dir_link(username, repo_id, path,
password, expire_date)
if is_org_context(request):
org_id = request.user.org.org_id
OrgFileShare.objects.set_org_file_share(org_id, fs)
link_info = get_share_link_info(fs)
return Response(link_info)
class ShareLink(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def _can_generate_shared_link(self, request):
return request.user.permissions.can_generate_shared_link()
def get(self, request, token):
try:
fs = FileShare.objects.get(token=token)
except FileShare.DoesNotExist:
error_msg = 'token %s not found.' % token
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
link_info = get_share_link_info(fs)
return Response(link_info)
def delete(self, request, token):
""" delete share link.
"""
if not self._can_generate_shared_link(request):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
try:
fs = FileShare.objects.get(token=token)
except FileShare.DoesNotExist:
error_msg = 'token %s not found.' % token
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
username = request.user.username
if not fs.is_owner(username):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
try:
fs.delete()
return Response({'success': True})
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)<|fim▁end|>
|
"""
if not self._can_generate_shared_link(request):
error_msg = 'Permission denied.'
|
<|file_name|>vue.js<|end_file_name|><|fim▁begin|>'use strict';
module.exports = {
extends: ['./index',
'./rules/imports',
'./rules/frontend',
'./rules/vue'].map(require.resolve).concat(['plugin:vue/recommended']),<|fim▁hole|> ecmaVersion: 2017,
ecmaFeatures: {
jsx: true,
experimentalObjectRestSpread: true,
},
},
rules: {
// this two doesn't work in vue
'import/no-named-as-default': 'off',
'import/no-named-as-default-member': 'off',
},
};<|fim▁end|>
|
parser: 'vue-eslint-parser',
parserOptions: {
parser: 'babel-eslint',
sourceType: 'module',
|
<|file_name|>base_ovh_konnector.js<|end_file_name|><|fim▁begin|>// Generated by CoffeeScript 1.10.0
var Bill, baseKonnector, filterExisting, linkBankOperation, ovhFetcher, saveDataAndFile;
ovhFetcher = require('../lib/ovh_fetcher');
filterExisting = require('../lib/filter_existing');
saveDataAndFile = require('../lib/save_data_and_file');
linkBankOperation = require('../lib/link_bank_operation');
baseKonnector = require('../lib/base_konnector');
Bill = require('../models/bill');
module.exports = {
createNew: function(ovhApi, name, slug) {
var connector, fetchBills, fileOptions, logger, ovhFetcherInstance;
fileOptions = {
vendor: slug,
dateFormat: 'YYYYMMDD'
};
logger = require('printit')({
prefix: name,
date: true
});
ovhFetcherInstance = ovhFetcher["new"](ovhApi, slug, logger);
fetchBills = function(requiredFields, entries, body, next) {
return ovhFetcherInstance.fetchBills(requiredFields, entries, body, next);
};
return connector = baseKonnector.createNew({
name: name,
fields: {
loginUrl: "link",
token: "hidden",
folderPath: "folder"
},<|fim▁hole|> log: logger,
model: Bill,
identifier: slug,
dateDelta: 4,
amountDelta: 0.1
})
]
});
}
};<|fim▁end|>
|
models: [Bill],
fetchOperations: [
fetchBills, filterExisting(logger, Bill), saveDataAndFile(logger, Bill, fileOptions, ['bill']), linkBankOperation({
|
<|file_name|>TSink.java<|end_file_name|><|fim▁begin|>/* -----------------------------------------------------------------------------
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the<|fim▁hole|>
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
----------------------------------------------------------------------------- */
package ppm_java._dev.concept.example.event;
import ppm_java.backend.TController;
import ppm_java.typelib.IControllable;
import ppm_java.typelib.IEvented;
import ppm_java.typelib.VBrowseable;
/**
*
*/
class TSink extends VBrowseable implements IEvented, IControllable
{
public TSink (String id)
{
super (id);
}
public void OnEvent (int e, String arg0)
{
String msg;
msg = GetID () + ": " + "Received messaging event. Message: " + arg0;
System.out.println (msg);
}
public void Start () {/* Do nothing */}
public void Stop () {/* Do nothing */}
public void OnEvent (int e) {/* Do nothing */}
public void OnEvent (int e, int arg0) {/* Do nothing */}
public void OnEvent (int e, long arg0) {/* Do nothing */}
protected void _Register ()
{
TController.Register (this);
}
}<|fim▁end|>
|
GNU General Public License for more details.
|
<|file_name|>test_distreporter.py<|end_file_name|><|fim▁begin|># Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.trial.distreporter}.
"""
from cStringIO import StringIO
from twisted.trial._dist.distreporter import DistReporter
from twisted.trial.unittest import TestCase
from twisted.trial.reporter import TreeReporter
class DistReporterTestCase(TestCase):
"""
Tests for L{DistReporter}.
"""
def setUp(self):
self.stream = StringIO()
self.distReporter = DistReporter(TreeReporter(self.stream))
self.test = TestCase()
def test_startSuccessStop(self):
"""
Success output only gets sent to the stream after the test has stopped.<|fim▁hole|> self.distReporter.addSuccess(self.test)
self.assertEqual(self.stream.getvalue(), "")
self.distReporter.stopTest(self.test)
self.assertNotEqual(self.stream.getvalue(), "")
def test_startErrorStop(self):
"""
Error output only gets sent to the stream after the test has stopped.
"""
self.distReporter.startTest(self.test)
self.assertEqual(self.stream.getvalue(), "")
self.distReporter.addError(self.test, "error")
self.assertEqual(self.stream.getvalue(), "")
self.distReporter.stopTest(self.test)
self.assertNotEqual(self.stream.getvalue(), "")
def test_forwardedMethods(self):
"""
Calling methods of L{DistReporter} add calls to the running queue of
the test.
"""
self.distReporter.startTest(self.test)
self.distReporter.addFailure(self.test, "foo")
self.distReporter.addError(self.test, "bar")
self.distReporter.addSkip(self.test, "egg")
self.distReporter.addUnexpectedSuccess(self.test, "spam")
self.distReporter.addExpectedFailure(self.test, "err", "foo")
self.assertEqual(len(self.distReporter.running[self.test.id()]), 6)<|fim▁end|>
|
"""
self.distReporter.startTest(self.test)
self.assertEqual(self.stream.getvalue(), "")
|
<|file_name|>fields.py<|end_file_name|><|fim▁begin|>from django.db.models import CharField
<|fim▁hole|>
class HrefField(CharField):
def __init__(
self,
protocols=DEFALT_PROTOCOLS,
allow_paths=True,
allow_fragments=True,
allow_query_strings=True,
max_length=255,
**kwargs):
self.protocols = protocols
self.allow_paths = allow_paths
self.allow_fragments = allow_fragments
self.allow_query_strings = allow_query_strings
kwargs['max_length'] = max_length
super(HrefField, self).__init__(**kwargs)
#TODO - FUNCTIONALITY!<|fim▁end|>
|
DEFALT_PROTOCOLS = ('http', 'https', 'mailto', 'tel')
|
<|file_name|>ArrayItem.tsx<|end_file_name|><|fim▁begin|>/* eslint-disable import/no-unresolved,no-nested-ternary */
import {ChangeIndicatorScope, ContextProvidedChangeIndicator} from '@sanity/base/change-indicators'
import {
ArraySchemaType,
isReferenceSchemaType,
isValidationMarker,
Marker,
Path,
} from '@sanity/types'
import {FormFieldPresence} from '@sanity/base/presence'
import React, {memo, useCallback, useMemo, useRef} from 'react'
import {FOCUS_TERMINATOR, pathFor, startsWith} from '@sanity/util/paths'
import {Box} from '@sanity/ui'
import {useConditionalReadOnly} from '@sanity/base/_internal'
import PatchEvent from '../../../../PatchEvent'
import {ArrayMember, InsertEvent, ReferenceItemComponentType} from '../types'
import {EMPTY_ARRAY} from '../../../../utils/empty'
import {hasFocusAtPath, hasFocusWithinPath} from '../../../../utils/focusUtils'
import {useScrollIntoViewOnFocusWithin} from '../../../../hooks/useScrollIntoViewOnFocusWithin'
import {EditPortal} from '../../../../EditPortal'
import {useDidUpdate} from '../../../../hooks/useDidUpdate'
import {getItemType, isEmpty} from './helpers'
import {ItemForm} from './ItemForm'
import {RowItem} from './RowItem'
import {CellItem} from './CellItem'
interface ArrayInputListItemProps {
type: ArraySchemaType
value: ArrayMember
index: number
compareValue?: any[]
markers: Marker[]
itemKey: string | undefined
layout?: 'media' | 'default'
onRemove: (value: ArrayMember) => void
onInsert: (event: InsertEvent) => void
onChange: (event: PatchEvent, value: ArrayMember) => void
onFocus: (path: Path) => void
onBlur: () => void
ReferenceItemComponent: ReferenceItemComponentType
filterField: () => any
readOnly: boolean
focusPath: Path
presence: FormFieldPresence[]
}
export const ArrayItem = memo(function ArrayItem(props: ArrayInputListItemProps) {
const {
value,
markers,
type,
index,
itemKey,
readOnly,
presence,
focusPath,
onFocus,
onChange,
onRemove,
onInsert,
onBlur,
filterField,
compareValue,
ReferenceItemComponent,
} = props
const innerElementRef = useRef(null)
const conditionalReadOnly = useConditionalReadOnly() ?? readOnly
const hasFocusWithin = hasFocusWithinPath(props.focusPath, props.value)
useScrollIntoViewOnFocusWithin(innerElementRef, hasFocusWithin)
useDidUpdate(hasFocusAtPath(props.focusPath, props.value), (hadFocus, hasFocus) => {
if (!hadFocus && hasFocus && innerElementRef.current) {
// Note: if editing an inline item, focus is handled by the item input itself and no ref is being set
innerElementRef.current.focus()
}
})
const itemPath = useMemo(() => pathFor([itemKey ? {_key: itemKey} : index]), [index, itemKey])
const emitFocus = useCallback(
(path) => {
if (itemKey) {
onFocus([{_key: itemKey}, ...path])
}
},
[onFocus, itemKey]
)
const handleItemElementFocus = useCallback(
(event: React.FocusEvent) => {
if (event.target === event.currentTarget) {
emitFocus([])
}
},
[emitFocus]
)
const handleEditOpen = useCallback(() => emitFocus([FOCUS_TERMINATOR]), [emitFocus])
const handleEditClose = useCallback(() => {
if (isEmpty(value)) {
onRemove(value)
} else {
emitFocus([])
}
}, [value, onRemove, emitFocus])
const handleChange = useCallback(
(event: PatchEvent, valueOverride?: ArrayMember) =>
onChange(event, typeof valueOverride === 'undefined' ? value : valueOverride),
[onChange, value]
)
const handleRemove = useCallback(() => onRemove(value), [onRemove, value])
const handleKeyPress = useCallback(
(event) => {
if (event.key === 'Enter' || event.key === ' ') {
event.preventDefault()
handleEditOpen()
}
},
[handleEditOpen]
)
const options = type.options || {}
const isSortable = !conditionalReadOnly && options.sortable !== false
const isEditing = hasFocusWithinPath(focusPath, value)
const itemType = getItemType(type, value)
const isGrid = type.options?.layout === 'grid'
const ItemComponent = isGrid ? CellItem : RowItem
const itemMarkers = React.useMemo(
() => markers.filter((marker: Marker) => startsWith(itemPath, marker.path)),
[itemPath, markers]<|fim▁hole|>
const scopedValidation = useMemo(
() =>
itemMarkers.length === 0
? EMPTY_ARRAY
: itemMarkers.filter(isValidationMarker).map((marker) => {
if (marker.path.length <= 1) {
return marker
}
const level = marker.level === 'error' ? 'errors' : 'warnings'
return {...marker, item: marker.item.cloneWithMessage(`Contains ${level}`)}
}),
[itemMarkers]
)
const itemPresence = useMemo(
() =>
presence.filter((presenceItem: FormFieldPresence) => startsWith(itemPath, presenceItem.path)),
[itemPath, presence]
)
const isReference = itemType && isReferenceSchemaType(itemType)
const editForm = useMemo(() => {
if (!isEditing && !isReference) {
return null
}
const form = (
<ItemForm
onChange={handleChange}
markers={itemMarkers}
filterField={filterField}
focusPath={focusPath}
onFocus={onFocus}
onBlur={onBlur}
onInsert={onInsert}
insertableTypes={type.of}
type={itemType}
value={value}
isSortable={isSortable}
ReferenceItemComponent={ReferenceItemComponent}
readOnly={conditionalReadOnly}
presence={itemPresence}
compareValue={compareValue}
/>
)
return isReference && !isGrid ? (
form
) : (
<EditPortal
header={
conditionalReadOnly ? `View ${itemType?.title || ''}` : `Edit ${itemType?.title || ''}`
}
type={type?.options?.editModal === 'fold' ? 'dialog' : type?.options?.editModal || 'dialog'}
id={value._key}
onClose={handleEditClose}
legacy_referenceElement={innerElementRef.current}
>
{form}
</EditPortal>
)
}, [
ReferenceItemComponent,
compareValue,
filterField,
focusPath,
handleChange,
handleEditClose,
isEditing,
isGrid,
isReference,
isSortable,
itemMarkers,
itemPresence,
itemType,
onBlur,
onFocus,
onInsert,
conditionalReadOnly,
type?.options?.editModal,
value,
])
const item = (
<ItemComponent
aria-selected={isEditing}
index={index}
value={value}
readOnly={readOnly}
type={itemType}
insertableTypes={type.of}
presence={isEditing ? EMPTY_ARRAY : itemPresence}
validation={scopedValidation}
isSortable={isSortable}
onInsert={onInsert}
onFocus={handleItemElementFocus}
onClick={itemType ? handleEditOpen : undefined}
onRemove={handleRemove}
onKeyPress={handleKeyPress}
ref={innerElementRef}
/>
)
return (
<>
<ChangeIndicatorScope path={itemPath}>
<ContextProvidedChangeIndicator compareDeep disabled={isEditing && !isReference}>
{isGrid ? (
// grid should be rendered without a margin
item
) : (
<Box marginX={1}>{isReference ? editForm : item}</Box>
)}
</ContextProvidedChangeIndicator>
</ChangeIndicatorScope>
{isEditing && (!isReference || isGrid) ? editForm : null}
</>
)
})<|fim▁end|>
|
)
|
<|file_name|>std-pub.rs<|end_file_name|><|fim▁begin|>// The 'std' crates should always be implicitly public,
// without having to pass any compiler arguments
// run-pass<|fim▁hole|>
pub struct PublicType {
pub field: Option<u8>
}
fn main() {}<|fim▁end|>
|
#![deny(exported_private_dependencies)]
|
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>// Copyright (c) 2015 Daniel Grunwald
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this
// software and associated documentation files (the "Software"), to deal in the Software
// without restriction, including without limitation the rights to use, copy, modify, merge,
// publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
// to whom the Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or
// substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
// INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
// PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
// FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
pub use self::object::PyObject;
pub use self::typeobject::PyType;
pub use self::module::PyModule;
pub use self::string::{PyBytes, PyString, PyStringData};
#[cfg(feature="python27-sys")]
pub use self::string::PyUnicode;
#[cfg(feature="python3-sys")]
pub use self::string::PyString as PyUnicode;
pub use self::iterator::PyIterator;
pub use self::boolobject::PyBool;
pub use self::tuple::{PyTuple, NoArgs};
pub use self::dict::PyDict;
pub use self::list::PyList;
#[cfg(feature="python27-sys")]
pub use self::num::PyInt;
#[cfg(feature="python3-sys")]
pub use self::num::PyLong as PyInt;
pub use self::num::{PyLong, PyFloat};
pub use self::sequence::PySequence;
#[macro_export]
macro_rules! pyobject_newtype(
($name: ident) => (
py_impl_to_py_object_for_python_object!($name);
py_impl_from_py_object_for_python_object!($name);
impl $crate::PythonObject for $name {
#[inline]
fn as_object(&self) -> &$crate::PyObject {
&self.0
}
#[inline]
fn into_object(self) -> $crate::PyObject {
self.0
}
/// Unchecked downcast from PyObject to Self.
/// Undefined behavior if the input object does not have the expected type.
#[inline]
unsafe fn unchecked_downcast_from(obj: $crate::PyObject) -> Self {
$name(obj)
}
/// Unchecked downcast from PyObject to Self.
/// Undefined behavior if the input object does not have the expected type.
#[inline]
unsafe fn unchecked_downcast_borrow_from<'a>(obj: &'a $crate::PyObject) -> &'a Self {
::std::mem::transmute(obj)
}
}
);
($name: ident, $checkfunction: ident) => (
pyobject_newtype!($name);
impl ::python::PythonObjectWithCheckedDowncast for $name {
#[inline]
fn downcast_from<'p>(py: ::python::Python<'p>, obj: ::objects::object::PyObject) -> Result<$name, ::python::PythonObjectDowncastError<'p>> {
unsafe {
if ::ffi::$checkfunction(obj.as_ptr()) != 0 {
Ok($name(obj))
} else {
Err(::python::PythonObjectDowncastError(py))
}
}
}
#[inline]
fn downcast_borrow_from<'a, 'p>(py: ::python::Python<'p>, obj: &'a ::objects::object::PyObject) -> Result<&'a $name, ::python::PythonObjectDowncastError<'p>> {
unsafe {
if ::ffi::$checkfunction(obj.as_ptr()) != 0 {
Ok(::std::mem::transmute(obj))
} else {
Err(::python::PythonObjectDowncastError(py))
}
}
}
}
);
($name: ident, $checkfunction: ident, $typeobject: ident) => (<|fim▁hole|> fn type_object(py: ::python::Python) -> ::objects::typeobject::PyType {
unsafe { ::objects::typeobject::PyType::from_type_ptr(py, &mut ::ffi::$typeobject) }
}
}
);
);
macro_rules! extract(
($obj:ident to $t:ty; $py:ident => $body: block) => {
impl <'source> ::conversion::FromPyObject<'source>
for $t
{
fn extract($py: Python, $obj: &'source PyObject) -> PyResult<Self> {
$body
}
}
}
);
mod object;
mod typeobject;
mod module;
mod string;
mod dict;
mod iterator;
mod boolobject;
mod tuple;
mod list;
mod num;
mod sequence;
pub mod exc;
#[cfg(feature="python27-sys")]
pub mod oldstyle;
mod tests;<|fim▁end|>
|
pyobject_newtype!($name, $checkfunction);
impl ::python::PythonObjectWithTypeObject for $name {
#[inline]
|
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*
typeck.rs, an introduction
The type checker is responsible for:
1. Determining the type of each expression
2. Resolving methods and traits
3. Guaranteeing that most type rules are met ("most?", you say, "why most?"
Well, dear reader, read on)
The main entry point is `check_crate()`. Type checking operates in two major
phases: collect and check. The collect phase passes over all items and
determines their type, without examining their "innards". The check phase
then checks function bodies and so forth.
Within the check phase, we check each function body one at a time (bodies of
function expressions are checked as part of the containing function).
Inference is used to supply types wherever they are unknown. The actual
checking of a function itself has several phases (check, regionck, writeback),
as discussed in the documentation for the `check` module.
The type checker is defined into various submodules which are documented
independently:
- astconv: converts the AST representation of types
into the `ty` representation
- collect: computes the types of each top-level item and enters them into
the `cx.tcache` table for later use
- check: walks over function bodies and type checks them, inferring types for
local variables, type parameters, etc as necessary.
- infer: finds the types to use for each type variable such that
all subtyping and assignment constraints are met. In essence, the check
module specifies the constraints, and the infer module solves them.
*/
use driver::session;
use middle::resolve;
use middle::ty;
use util::common::time;
use util::ppaux::Repr;
use util::ppaux;
use std::hashmap::HashMap;
use std::result;
use extra::list::List;
use extra::list;
use syntax::codemap::span;
use syntax::print::pprust::*;
use syntax::{ast, ast_map, abi};
use syntax::opt_vec;
#[path = "check/mod.rs"]
pub mod check;
pub mod rscope;
pub mod astconv;
#[path = "infer/mod.rs"]
pub mod infer;
pub mod collect;
pub mod coherence;
#[deriving(Encodable, Decodable)]
pub enum method_origin {
// supertrait method invoked on "self" inside a default method
// first field is supertrait ID;
// second field is method index (relative to the *supertrait*
// method list)
method_super(ast::def_id, uint),
// fully statically resolved method
method_static(ast::def_id),
// method invoked on a type parameter with a bounded trait
method_param(method_param),
// method invoked on a trait instance
method_trait(ast::def_id, uint, ty::TraitStore),
// method invoked on "self" inside a default method
method_self(ast::def_id, uint)
}
// details for a method invoked with a receiver whose type is a type parameter
// with a bounded trait.
#[deriving(Encodable, Decodable)]
pub struct method_param {
// the trait containing the method to be invoked
trait_id: ast::def_id,
// index of the method to be invoked amongst the trait's methods
method_num: uint,
// index of the type parameter (from those that are in scope) that is
// the type of the receiver
param_num: uint,
// index of the bound for this type parameter which specifies the trait
bound_num: uint,
}
pub struct method_map_entry {
// the type of the self parameter, which is not reflected in the fn type
// (FIXME #3446)
self_ty: ty::t,
// the mode of `self`
self_mode: ty::SelfMode,
// the type of explicit self on the method
explicit_self: ast::explicit_self_,
// method details being invoked
origin: method_origin,
}
// maps from an expression id that corresponds to a method call to the details
// of the method to be invoked
pub type method_map = @mut HashMap<ast::node_id, method_map_entry>;
pub type vtable_param_res = @~[vtable_origin];
// Resolutions for bounds of all parameters, left to right, for a given path.
pub type vtable_res = @~[vtable_param_res];
<|fim▁hole|> from whence comes the vtable, and tys are the type substs.
vtable_res is the vtable itself
*/
vtable_static(ast::def_id, ~[ty::t], vtable_res),
/*
Dynamic vtable, comes from a parameter that has a bound on it:
fn foo<T:quux,baz,bar>(a: T) -- a's vtable would have a
vtable_param origin
The first uint is the param number (identifying T in the example),
and the second is the bound number (identifying baz)
*/
vtable_param(uint, uint),
/*
Dynamic vtable, comes from self.
*/
vtable_self(ast::def_id)
}
impl Repr for vtable_origin {
fn repr(&self, tcx: ty::ctxt) -> ~str {
match *self {
vtable_static(def_id, ref tys, ref vtable_res) => {
fmt!("vtable_static(%?:%s, %s, %s)",
def_id,
ty::item_path_str(tcx, def_id),
tys.repr(tcx),
vtable_res.repr(tcx))
}
vtable_param(x, y) => {
fmt!("vtable_param(%?, %?)", x, y)
}
vtable_self(def_id) => {
fmt!("vtable_self(%?)", def_id)
}
}
}
}
pub type vtable_map = @mut HashMap<ast::node_id, vtable_res>;
pub struct CrateCtxt {
// A mapping from method call sites to traits that have that method.
trait_map: resolve::TraitMap,
method_map: method_map,
vtable_map: vtable_map,
coherence_info: coherence::CoherenceInfo,
tcx: ty::ctxt
}
// Functions that write types into the node type table
pub fn write_ty_to_tcx(tcx: ty::ctxt, node_id: ast::node_id, ty: ty::t) {
debug!("write_ty_to_tcx(%d, %s)", node_id, ppaux::ty_to_str(tcx, ty));
assert!(!ty::type_needs_infer(ty));
tcx.node_types.insert(node_id as uint, ty);
}
pub fn write_substs_to_tcx(tcx: ty::ctxt,
node_id: ast::node_id,
substs: ~[ty::t]) {
if substs.len() > 0u {
debug!("write_substs_to_tcx(%d, %?)", node_id,
substs.map(|t| ppaux::ty_to_str(tcx, *t)));
assert!(substs.iter().all(|t| !ty::type_needs_infer(*t)));
tcx.node_type_substs.insert(node_id, substs);
}
}
pub fn write_tpt_to_tcx(tcx: ty::ctxt,
node_id: ast::node_id,
tpt: &ty::ty_param_substs_and_ty) {
write_ty_to_tcx(tcx, node_id, tpt.ty);
if !tpt.substs.tps.is_empty() {
write_substs_to_tcx(tcx, node_id, copy tpt.substs.tps);
}
}
pub fn lookup_def_tcx(tcx: ty::ctxt, sp: span, id: ast::node_id) -> ast::def {
match tcx.def_map.find(&id) {
Some(&x) => x,
_ => {
tcx.sess.span_fatal(sp, "internal error looking up a definition")
}
}
}
pub fn lookup_def_ccx(ccx: &CrateCtxt, sp: span, id: ast::node_id)
-> ast::def {
lookup_def_tcx(ccx.tcx, sp, id)
}
pub fn no_params(t: ty::t) -> ty::ty_param_bounds_and_ty {
ty::ty_param_bounds_and_ty {
generics: ty::Generics {type_param_defs: @~[],
region_param: None},
ty: t
}
}
pub fn require_same_types(
tcx: ty::ctxt,
maybe_infcx: Option<@mut infer::InferCtxt>,
t1_is_expected: bool,
span: span,
t1: ty::t,
t2: ty::t,
msg: &fn() -> ~str) -> bool {
let l_tcx;
let l_infcx;
match maybe_infcx {
None => {
l_tcx = tcx;
l_infcx = infer::new_infer_ctxt(tcx);
}
Some(i) => {
l_tcx = i.tcx;
l_infcx = i;
}
}
match infer::mk_eqty(l_infcx, t1_is_expected, infer::Misc(span), t1, t2) {
result::Ok(()) => true,
result::Err(ref terr) => {
l_tcx.sess.span_err(span, msg() + ": " +
ty::type_err_to_str(l_tcx, terr));
ty::note_and_explain_type_err(l_tcx, terr);
false
}
}
}
// a list of mapping from in-scope-region-names ("isr") to the
// corresponding ty::Region
pub type isr_alist = @List<(ty::bound_region, ty::Region)>;
trait get_and_find_region {
fn get(&self, br: ty::bound_region) -> ty::Region;
fn find(&self, br: ty::bound_region) -> Option<ty::Region>;
}
impl get_and_find_region for isr_alist {
pub fn get(&self, br: ty::bound_region) -> ty::Region {
self.find(br).get()
}
pub fn find(&self, br: ty::bound_region) -> Option<ty::Region> {
for list::each(*self) |isr| {
let (isr_br, isr_r) = *isr;
if isr_br == br { return Some(isr_r); }
}
return None;
}
}
fn check_main_fn_ty(ccx: &CrateCtxt,
main_id: ast::node_id,
main_span: span) {
let tcx = ccx.tcx;
let main_t = ty::node_id_to_type(tcx, main_id);
match ty::get(main_t).sty {
ty::ty_bare_fn(ref fn_ty) => {
match tcx.items.find(&main_id) {
Some(&ast_map::node_item(it,_)) => {
match it.node {
ast::item_fn(_, _, _, ref ps, _)
if ps.is_parameterized() => {
tcx.sess.span_err(
main_span,
"main function is not allowed to have type parameters");
return;
}
_ => ()
}
}
_ => ()
}
let mut ok = ty::type_is_nil(fn_ty.sig.output);
let num_args = fn_ty.sig.inputs.len();
ok &= num_args == 0u;
if !ok {
tcx.sess.span_err(
main_span,
fmt!("Wrong type in main function: found `%s`, \
expected `fn() -> ()`",
ppaux::ty_to_str(tcx, main_t)));
}
}
_ => {
tcx.sess.span_bug(main_span,
fmt!("main has a non-function type: found `%s`",
ppaux::ty_to_str(tcx, main_t)));
}
}
}
fn check_start_fn_ty(ccx: &CrateCtxt,
start_id: ast::node_id,
start_span: span) {
let tcx = ccx.tcx;
let start_t = ty::node_id_to_type(tcx, start_id);
match ty::get(start_t).sty {
ty::ty_bare_fn(_) => {
match tcx.items.find(&start_id) {
Some(&ast_map::node_item(it,_)) => {
match it.node {
ast::item_fn(_,_,_,ref ps,_)
if ps.is_parameterized() => {
tcx.sess.span_err(
start_span,
"start function is not allowed to have type parameters");
return;
}
_ => ()
}
}
_ => ()
}
let se_ty = ty::mk_bare_fn(tcx, ty::BareFnTy {
purity: ast::impure_fn,
abis: abi::AbiSet::Rust(),
sig: ty::FnSig {
bound_lifetime_names: opt_vec::Empty,
inputs: ~[
ty::mk_int(),
ty::mk_imm_ptr(tcx, ty::mk_imm_ptr(tcx, ty::mk_u8())),
ty::mk_imm_ptr(tcx, ty::mk_u8())
],
output: ty::mk_int()
}
});
require_same_types(tcx, None, false, start_span, start_t, se_ty,
|| fmt!("start function expects type: `%s`", ppaux::ty_to_str(ccx.tcx, se_ty)));
}
_ => {
tcx.sess.span_bug(start_span,
fmt!("start has a non-function type: found `%s`",
ppaux::ty_to_str(tcx, start_t)));
}
}
}
fn check_for_entry_fn(ccx: &CrateCtxt) {
let tcx = ccx.tcx;
if !*tcx.sess.building_library {
match *tcx.sess.entry_fn {
Some((id, sp)) => match *tcx.sess.entry_type {
Some(session::EntryMain) => check_main_fn_ty(ccx, id, sp),
Some(session::EntryStart) => check_start_fn_ty(ccx, id, sp),
None => tcx.sess.bug("entry function without a type")
},
None => tcx.sess.bug("type checking without entry function")
}
}
}
pub fn check_crate(tcx: ty::ctxt,
trait_map: resolve::TraitMap,
crate: &ast::crate)
-> (method_map, vtable_map) {
let time_passes = tcx.sess.time_passes();
let ccx = @mut CrateCtxt {
trait_map: trait_map,
method_map: @mut HashMap::new(),
vtable_map: @mut HashMap::new(),
coherence_info: coherence::CoherenceInfo(),
tcx: tcx
};
time(time_passes, ~"type collecting", ||
collect::collect_item_types(ccx, crate));
// this ensures that later parts of type checking can assume that items
// have valid types and not error
tcx.sess.abort_if_errors();
time(time_passes, ~"coherence checking", ||
coherence::check_coherence(ccx, crate));
time(time_passes, ~"type checking", ||
check::check_item_types(ccx, crate));
check_for_entry_fn(ccx);
tcx.sess.abort_if_errors();
(ccx.method_map, ccx.vtable_map)
}<|fim▁end|>
|
pub enum vtable_origin {
/*
Statically known vtable. def_id gives the class or impl item
|
<|file_name|>_container_registry.py<|end_file_name|><|fim▁begin|># coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.7.4, generator: @autorest/[email protected])
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import TYPE_CHECKING
from azure.core import PipelineClient
from msrest import Deserializer, Serializer
from . import models
from ._configuration import ContainerRegistryConfiguration
from .operations import AuthenticationOperations, ContainerRegistryBlobOperations, ContainerRegistryOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.rest import HttpRequest, HttpResponse
class ContainerRegistry(object):
"""Metadata API definition for the Azure Container Registry runtime.
:ivar container_registry: ContainerRegistryOperations operations
:vartype container_registry: container_registry.operations.ContainerRegistryOperations
:ivar container_registry_blob: ContainerRegistryBlobOperations operations
:vartype container_registry_blob: container_registry.operations.ContainerRegistryBlobOperations
:ivar authentication: AuthenticationOperations operations
:vartype authentication: container_registry.operations.AuthenticationOperations
:param url: Registry login URL.
:type url: str
:keyword api_version: Api Version. The default value is "2021-07-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
url, # type: str
**kwargs # type: Any
):
# type: (...) -> None
_base_url = '{url}'
self._config = ContainerRegistryConfiguration(url=url, **kwargs)
self._client = PipelineClient(base_url=_base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.container_registry = ContainerRegistryOperations(self._client, self._config, self._serialize, self._deserialize)
self.container_registry_blob = ContainerRegistryBlobOperations(self._client, self._config, self._serialize, self._deserialize)
self.authentication = AuthenticationOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request, # type: HttpRequest
**kwargs # type: Any
):
# type: (...) -> HttpResponse
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""<|fim▁hole|> }
request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments)
return self._client.send_request(request_copy, **kwargs)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> ContainerRegistry
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)<|fim▁end|>
|
request_copy = deepcopy(request)
path_format_arguments = {
"url": self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
|
<|file_name|>evaluate_callables_tests.py<|end_file_name|><|fim▁begin|>from build import evaluate_callables
<|fim▁hole|> self.result = evaluate_callables({"abc": 123, "def": 456, "xyz": 789})
def it_should_return_the_same_dict(self):
assert self.result == {"abc": 123, "def": 456, "xyz": 789}
class WhenEvaluatingADictWithCallables:
def given_input_containing_lambdas(self):
self.input = {"abc": lambda: 123, "def": lambda: 456, "xyz": 789}
self.input_copy = self.input.copy()
def when_i_evaluate_the_dict(self):
self.result = evaluate_callables(self.input)
def it_should_return_the_dict_having_called_the_functions(self):
assert self.result == {"abc": 123, "def": 456, "xyz": 789}
def it_should_not_change_the_original_dict(self):
assert self.input == self.input_copy
class MyDict(dict):
def __eq__(self, other):
if not isinstance(other, MyDict):
return False
return super().__eq__(other)
def copy(self):
return MyDict({k: v for k, v in self.items()})
class WhenEvaluatingACustomDictWithNoCallables:
def when_i_evaluate_the_dict(self):
self.result = evaluate_callables(MyDict({"abc": 123, "def": 456, "xyz": 789}))
def it_should_return_an_instance_of_the_same_class(self):
assert self.result == MyDict({"abc": 123, "def": 456, "xyz": 789})
class WhenEvaluatingACustomDictWithCallables:
def given_input_containing_lambdas(self):
self.input = MyDict({"abc": lambda: 123, "def": lambda: 456, "xyz": 789})
self.input_copy = self.input.copy()
def when_i_evaluate_the_dict(self):
self.result = evaluate_callables(self.input)
def it_should_return_an_instance_of_the_same_class_having_called_the_functions(self):
assert self.result == MyDict({"abc": 123, "def": 456, "xyz": 789})
def it_should_not_change_the_original_dict(self):
assert self.input == self.input_copy
# todo: make it work for other sequences<|fim▁end|>
|
class WhenEvaluatingADictWithNoCallables:
def when_i_evaluate_the_dict(self):
|
<|file_name|>algorithms.ts<|end_file_name|><|fim▁begin|>import 'rxjs/add/operator/map';
import {Injectable} from '@angular/core';
import {Http, RequestMethod} from '@angular/http';
import {Observable} from 'rxjs/Observable';
import {Cube} from '../models/cube';
import 'rxjs/add/operator/mergeMap';
import {Algorithm} from '../models/analysis/algorithm';
import {Input, InputTypes} from '../models/analysis/input';
import {Output, OutputTypes} from '../models/analysis/output';
import {environment} from '../../environments/environment';
import {ExecutionConfiguration} from '../models/analysis/executionConfiguration';
import {Configuration} from 'jasmine-spec-reporter/built/configuration';
@Injectable()
export class AlgorithmsService {
private API_DAM_PATH: string = environment.DAMUrl ;
constructor(private http: Http) {
}
getActualCompatibleAlgorithms(): Observable<Algorithm[]> {
// console.log(JSON.stringify({time_series: AlgorithmsService.dummyTimeSeries().serialize(), descriptive_statistics: AlgorithmsService.dummyDescriptiveStatistics().serialize(), clustering: AlgorithmsService.dummyClustering().serialize()}));
return this.http.get(`${environment.DAMUrl}/services/meta/all`)
.map(res => {
let algorithms = [];
let response = res.json();
for (let key of Object.keys(response)){
let algorithm = new Algorithm().deserialize(response[key]);
algorithms.push(algorithm);
}
return algorithms;
});
}
getActualCompatibleAlgorithm( algorithmName): Observable<Algorithm> {
return this.http.get(`${environment.DAMUrl}/services/meta/${algorithmName}`)
.map(res => {
let response = res.json();
return new Algorithm().deserialize(response);
});
}
getTimeSeriesAlgorithm(): Observable<Algorithm> {
let that = this;
return this.getActualCompatibleAlgorithm('time_series');
}
getDescriptiveStatisticsAlgorithm(): Observable<Algorithm> {
return this.getActualCompatibleAlgorithm('descriptive_statistics');
}
getClusteringAlgorithm(): Observable<Algorithm> {
return this.getActualCompatibleAlgorithm('clustering');
}
getOutlierDetectionAlgorithm(): Observable<Algorithm> {
let that = this;
return this.getActualCompatibleAlgorithm('outlier_detection');
/*return Observable.create(function (observer: any) {
observer.next(AlgorithmsService.dummyOutlierDetection());
});*/
}
getRuleMiningAlgorithm(): Observable<Algorithm> {
let that = this;
return this.getActualCompatibleAlgorithm('rule_mining');
/*return Observable.create(function (observer: any) {
observer.next(AlgorithmsService.dummyOutlierDetection());
});*/
}
getAlgorithm(name, cube: Cube): Observable<Algorithm> {
switch (name) {
case 'time_series':
return this.getTimeSeriesAlgorithm();
case 'descriptive_statistics':
return this.getDescriptiveStatisticsAlgorithm();
case 'clustering':
return this.getClusteringAlgorithm();
case 'outlier_detection':
return this.getOutlierDetectionAlgorithm();<|fim▁hole|> default:
return this.http.get(`${this.API_DAM_PATH}/${name}`)
.map(res => {
let response = res.json();
return new Algorithm().deserialize(response);
});
}
}
}<|fim▁end|>
|
case 'rule_mining':
return this.getRuleMiningAlgorithm();
|
<|file_name|>test_coilheatingdxvariablespeed.py<|end_file_name|><|fim▁begin|>import os
import tempfile
import unittest
import logging<|fim▁hole|>
log = logging.getLogger(__name__)
class TestCoilHeatingDxVariableSpeed(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_coilheatingdxvariablespeed(self):
pyidf.validation_level = ValidationLevel.error
obj = CoilHeatingDxVariableSpeed()
# alpha
var_name = "Name"
obj.name = var_name
# node
var_indoor_air_inlet_node_name = "node|Indoor Air Inlet Node Name"
obj.indoor_air_inlet_node_name = var_indoor_air_inlet_node_name
# node
var_indoor_air_outlet_node_name = "node|Indoor Air Outlet Node Name"
obj.indoor_air_outlet_node_name = var_indoor_air_outlet_node_name
# integer
var_number_of_speeds = 5
obj.number_of_speeds = var_number_of_speeds
# integer
var_nominal_speed_level = 5
obj.nominal_speed_level = var_nominal_speed_level
# real
var_rated_heating_capacity_at_selected_nominal_speed_level = 6.6
obj.rated_heating_capacity_at_selected_nominal_speed_level = var_rated_heating_capacity_at_selected_nominal_speed_level
# real
var_rated_air_flow_rate_at_selected_nominal_speed_level = 7.7
obj.rated_air_flow_rate_at_selected_nominal_speed_level = var_rated_air_flow_rate_at_selected_nominal_speed_level
# object-list
var_energy_part_load_fraction_curve_name = "object-list|Energy Part Load Fraction Curve Name"
obj.energy_part_load_fraction_curve_name = var_energy_part_load_fraction_curve_name
# object-list
var_defrost_energy_input_ratio_function_of_temperature_curve_name = "object-list|Defrost Energy Input Ratio Function of Temperature Curve Name"
obj.defrost_energy_input_ratio_function_of_temperature_curve_name = var_defrost_energy_input_ratio_function_of_temperature_curve_name
# real
var_minimum_outdoor_drybulb_temperature_for_compressor_operation = -50.0
obj.minimum_outdoor_drybulb_temperature_for_compressor_operation = var_minimum_outdoor_drybulb_temperature_for_compressor_operation
# real
var_outdoor_drybulb_temperature_to_turn_on_compressor = 11.11
obj.outdoor_drybulb_temperature_to_turn_on_compressor = var_outdoor_drybulb_temperature_to_turn_on_compressor
# real
var_maximum_outdoor_drybulb_temperature_for_defrost_operation = 3.61
obj.maximum_outdoor_drybulb_temperature_for_defrost_operation = var_maximum_outdoor_drybulb_temperature_for_defrost_operation
# real
var_crankcase_heater_capacity = 0.0
obj.crankcase_heater_capacity = var_crankcase_heater_capacity
# real
var_maximum_outdoor_drybulb_temperature_for_crankcase_heater_operation = 0.0
obj.maximum_outdoor_drybulb_temperature_for_crankcase_heater_operation = var_maximum_outdoor_drybulb_temperature_for_crankcase_heater_operation
# alpha
var_defrost_strategy = "ReverseCycle"
obj.defrost_strategy = var_defrost_strategy
# alpha
var_defrost_control = "Timed"
obj.defrost_control = var_defrost_control
# real
var_defrost_time_period_fraction = 0.0
obj.defrost_time_period_fraction = var_defrost_time_period_fraction
# real
var_resistive_defrost_heater_capacity = 0.0
obj.resistive_defrost_heater_capacity = var_resistive_defrost_heater_capacity
# real
var_speed_1_reference_unit_gross_rated_heating_capacity = 0.0
obj.speed_1_reference_unit_gross_rated_heating_capacity = var_speed_1_reference_unit_gross_rated_heating_capacity
# real
var_speed_1_reference_unit_gross_rated_heating_cop = 0.0
obj.speed_1_reference_unit_gross_rated_heating_cop = var_speed_1_reference_unit_gross_rated_heating_cop
# real
var_speed_1_reference_unit_rated_air_flow_rate = 0.0
obj.speed_1_reference_unit_rated_air_flow_rate = var_speed_1_reference_unit_rated_air_flow_rate
# object-list
var_speed_1_heating_capacity_function_of_temperature_curve_name = "object-list|Speed 1 Heating Capacity Function of Temperature Curve Name"
obj.speed_1_heating_capacity_function_of_temperature_curve_name = var_speed_1_heating_capacity_function_of_temperature_curve_name
# object-list
var_speed_1_total_heating_capacity_function_of_air_flow_fraction_curve_name = "object-list|Speed 1 Total Heating Capacity Function of Air Flow Fraction Curve Name"
obj.speed_1_total_heating_capacity_function_of_air_flow_fraction_curve_name = var_speed_1_total_heating_capacity_function_of_air_flow_fraction_curve_name
# object-list
var_speed_1_energy_input_ratio_function_of_temperature_curve_name = "object-list|Speed 1 Energy Input Ratio Function of Temperature Curve Name"
obj.speed_1_energy_input_ratio_function_of_temperature_curve_name = var_speed_1_energy_input_ratio_function_of_temperature_curve_name
# object-list
var_speed_1_energy_input_ratio_function_of_air_flow_fraction_curve_name = "object-list|Speed 1 Energy Input Ratio Function of Air Flow Fraction Curve Name"
obj.speed_1_energy_input_ratio_function_of_air_flow_fraction_curve_name = var_speed_1_energy_input_ratio_function_of_air_flow_fraction_curve_name
# real
var_speed_2_reference_unit_gross_rated_heating_capacity = 0.0
obj.speed_2_reference_unit_gross_rated_heating_capacity = var_speed_2_reference_unit_gross_rated_heating_capacity
# real
var_speed_2_reference_unit_gross_rated_heating_cop = 0.0
obj.speed_2_reference_unit_gross_rated_heating_cop = var_speed_2_reference_unit_gross_rated_heating_cop
# real
var_speed_2_reference_unit_rated_air_flow_rate = 0.0
obj.speed_2_reference_unit_rated_air_flow_rate = var_speed_2_reference_unit_rated_air_flow_rate
# object-list
var_speed_2_heating_capacity_function_of_temperature_curve_name = "object-list|Speed 2 Heating Capacity Function of Temperature Curve Name"
obj.speed_2_heating_capacity_function_of_temperature_curve_name = var_speed_2_heating_capacity_function_of_temperature_curve_name
# object-list
var_speed_2_total_heating_capacity_function_of_air_flow_fraction_curve_name = "object-list|Speed 2 Total Heating Capacity Function of Air Flow Fraction Curve Name"
obj.speed_2_total_heating_capacity_function_of_air_flow_fraction_curve_name = var_speed_2_total_heating_capacity_function_of_air_flow_fraction_curve_name
# object-list
var_speed_2_energy_input_ratio_function_of_temperature_curve_name = "object-list|Speed 2 Energy Input Ratio Function of Temperature Curve Name"
obj.speed_2_energy_input_ratio_function_of_temperature_curve_name = var_speed_2_energy_input_ratio_function_of_temperature_curve_name
# object-list
var_speed_2_energy_input_ratio_function_of_air_flow_fraction_curve_name = "object-list|Speed 2 Energy Input Ratio Function of Air Flow Fraction Curve Name"
obj.speed_2_energy_input_ratio_function_of_air_flow_fraction_curve_name = var_speed_2_energy_input_ratio_function_of_air_flow_fraction_curve_name
# real
var_speed_3_reference_unit_gross_rated_heating_capacity = 0.0
obj.speed_3_reference_unit_gross_rated_heating_capacity = var_speed_3_reference_unit_gross_rated_heating_capacity
# real
var_speed_3_reference_unit_gross_rated_heating_cop = 0.0
obj.speed_3_reference_unit_gross_rated_heating_cop = var_speed_3_reference_unit_gross_rated_heating_cop
# real
var_speed_3_reference_unit_rated_air_flow_rate = 0.0
obj.speed_3_reference_unit_rated_air_flow_rate = var_speed_3_reference_unit_rated_air_flow_rate
# object-list
var_speed_3_heating_capacity_function_of_temperature_curve_name = "object-list|Speed 3 Heating Capacity Function of Temperature Curve Name"
obj.speed_3_heating_capacity_function_of_temperature_curve_name = var_speed_3_heating_capacity_function_of_temperature_curve_name
# object-list
var_speed_3_total_heating_capacity_function_of_air_flow_fraction_curve_name = "object-list|Speed 3 Total Heating Capacity Function of Air Flow Fraction Curve Name"
obj.speed_3_total_heating_capacity_function_of_air_flow_fraction_curve_name = var_speed_3_total_heating_capacity_function_of_air_flow_fraction_curve_name
# object-list
var_speed_3_energy_input_ratio_function_of_temperature_curve_name = "object-list|Speed 3 Energy Input Ratio Function of Temperature Curve Name"
obj.speed_3_energy_input_ratio_function_of_temperature_curve_name = var_speed_3_energy_input_ratio_function_of_temperature_curve_name
# object-list
var_speed_3_energy_input_ratio_function_of_air_flow_fraction_curve_name = "object-list|Speed 3 Energy Input Ratio Function of Air Flow Fraction Curve Name"
obj.speed_3_energy_input_ratio_function_of_air_flow_fraction_curve_name = var_speed_3_energy_input_ratio_function_of_air_flow_fraction_curve_name
# real
var_speed_4_reference_unit_gross_rated_heating_capacity = 0.0
obj.speed_4_reference_unit_gross_rated_heating_capacity = var_speed_4_reference_unit_gross_rated_heating_capacity
# real
var_speed_4_reference_unit_gross_rated_heating_cop = 0.0
obj.speed_4_reference_unit_gross_rated_heating_cop = var_speed_4_reference_unit_gross_rated_heating_cop
# real
var_speed_4_reference_unit_rated_air_flow_rate = 0.0
obj.speed_4_reference_unit_rated_air_flow_rate = var_speed_4_reference_unit_rated_air_flow_rate
# object-list
var_speed_4_heating_capacity_function_of_temperature_curve_name = "object-list|Speed 4 Heating Capacity Function of Temperature Curve Name"
obj.speed_4_heating_capacity_function_of_temperature_curve_name = var_speed_4_heating_capacity_function_of_temperature_curve_name
# object-list
var_speed_4_heating_capacity_function_of_air_flow_fraction_curve_name = "object-list|Speed 4 Heating Capacity Function of Air Flow Fraction Curve Name"
obj.speed_4_heating_capacity_function_of_air_flow_fraction_curve_name = var_speed_4_heating_capacity_function_of_air_flow_fraction_curve_name
# object-list
var_speed_4_energy_input_ratio_function_of_temperature_curve_name = "object-list|Speed 4 Energy Input Ratio Function of Temperature Curve Name"
obj.speed_4_energy_input_ratio_function_of_temperature_curve_name = var_speed_4_energy_input_ratio_function_of_temperature_curve_name
# object-list
var_speed_4_energy_input_ratio_function_of_air_flow_fraction_curve_name = "object-list|Speed 4 Energy Input Ratio Function of Air Flow Fraction Curve Name"
obj.speed_4_energy_input_ratio_function_of_air_flow_fraction_curve_name = var_speed_4_energy_input_ratio_function_of_air_flow_fraction_curve_name
# real
var_speed_5_reference_unit_gross_rated_heating_capacity = 0.0
obj.speed_5_reference_unit_gross_rated_heating_capacity = var_speed_5_reference_unit_gross_rated_heating_capacity
# real
var_speed_5_reference_unit_gross_rated_heating_cop = 0.0
obj.speed_5_reference_unit_gross_rated_heating_cop = var_speed_5_reference_unit_gross_rated_heating_cop
# real
var_speed_5_reference_unit_rated_air_flow_rate = 0.0
obj.speed_5_reference_unit_rated_air_flow_rate = var_speed_5_reference_unit_rated_air_flow_rate
# object-list
var_speed_5_heating_capacity_function_of_temperature_curve_name = "object-list|Speed 5 Heating Capacity Function of Temperature Curve Name"
obj.speed_5_heating_capacity_function_of_temperature_curve_name = var_speed_5_heating_capacity_function_of_temperature_curve_name
# object-list
var_speed_5_heating_capacity_function_of_air_flow_fraction_curve_name = "object-list|Speed 5 Heating Capacity Function of Air Flow Fraction Curve Name"
obj.speed_5_heating_capacity_function_of_air_flow_fraction_curve_name = var_speed_5_heating_capacity_function_of_air_flow_fraction_curve_name
# object-list
var_speed_5_energy_input_ratio_function_of_temperature_curve_name = "object-list|Speed 5 Energy Input Ratio Function of Temperature Curve Name"
obj.speed_5_energy_input_ratio_function_of_temperature_curve_name = var_speed_5_energy_input_ratio_function_of_temperature_curve_name
# object-list
var_speed_5_energy_input_ratio_function_of_air_flow_fraction_curve_name = "object-list|Speed 5 Energy Input Ratio Function of Air Flow Fraction Curve Name"
obj.speed_5_energy_input_ratio_function_of_air_flow_fraction_curve_name = var_speed_5_energy_input_ratio_function_of_air_flow_fraction_curve_name
# real
var_speed_6_reference_unit_gross_rated_heating_capacity = 0.0
obj.speed_6_reference_unit_gross_rated_heating_capacity = var_speed_6_reference_unit_gross_rated_heating_capacity
# real
var_speed_6_reference_unit_gross_rated_heating_cop = 0.0
obj.speed_6_reference_unit_gross_rated_heating_cop = var_speed_6_reference_unit_gross_rated_heating_cop
# real
var_speed_6_reference_unit_rated_air_flow_rate = 0.0
obj.speed_6_reference_unit_rated_air_flow_rate = var_speed_6_reference_unit_rated_air_flow_rate
# object-list
var_speed_6_heating_capacity_function_of_temperature_curve_name = "object-list|Speed 6 Heating Capacity Function of Temperature Curve Name"
obj.speed_6_heating_capacity_function_of_temperature_curve_name = var_speed_6_heating_capacity_function_of_temperature_curve_name
# object-list
var_speed_6_heating_capacity_function_of_air_flow_fraction_curve_name = "object-list|Speed 6 Heating Capacity Function of Air Flow Fraction Curve Name"
obj.speed_6_heating_capacity_function_of_air_flow_fraction_curve_name = var_speed_6_heating_capacity_function_of_air_flow_fraction_curve_name
# object-list
var_speed_6_energy_input_ratio_function_of_temperature_curve_name = "object-list|Speed 6 Energy Input Ratio Function of Temperature Curve Name"
obj.speed_6_energy_input_ratio_function_of_temperature_curve_name = var_speed_6_energy_input_ratio_function_of_temperature_curve_name
# object-list
var_speed_6_energy_input_ratio_function_of_air_flow_fraction_curve_name = "object-list|Speed 6 Energy Input Ratio Function of Air Flow Fraction Curve Name"
obj.speed_6_energy_input_ratio_function_of_air_flow_fraction_curve_name = var_speed_6_energy_input_ratio_function_of_air_flow_fraction_curve_name
# real
var_speed_7_reference_unit_gross_rated_heating_capacity = 0.0
obj.speed_7_reference_unit_gross_rated_heating_capacity = var_speed_7_reference_unit_gross_rated_heating_capacity
# real
var_speed_7_reference_unit_gross_rated_heating_cop = 0.0
obj.speed_7_reference_unit_gross_rated_heating_cop = var_speed_7_reference_unit_gross_rated_heating_cop
# real
var_speed_7_reference_unit_rated_air_flow_rate = 0.0
obj.speed_7_reference_unit_rated_air_flow_rate = var_speed_7_reference_unit_rated_air_flow_rate
# object-list
var_speed_7_heating_capacity_function_of_temperature_curve_name = "object-list|Speed 7 Heating Capacity Function of Temperature Curve Name"
obj.speed_7_heating_capacity_function_of_temperature_curve_name = var_speed_7_heating_capacity_function_of_temperature_curve_name
# object-list
var_speed_7_heating_capacity_function_of_air_flow_fraction_curve_name = "object-list|Speed 7 Heating Capacity Function of Air Flow Fraction Curve Name"
obj.speed_7_heating_capacity_function_of_air_flow_fraction_curve_name = var_speed_7_heating_capacity_function_of_air_flow_fraction_curve_name
# object-list
var_speed_7_energy_input_ratio_function_of_temperature_curve_name = "object-list|Speed 7 Energy Input Ratio Function of Temperature Curve Name"
obj.speed_7_energy_input_ratio_function_of_temperature_curve_name = var_speed_7_energy_input_ratio_function_of_temperature_curve_name
# object-list
var_speed_7_energy_input_ratio_function_of_air_flow_fraction_curve_name = "object-list|Speed 7 Energy Input Ratio Function of Air Flow Fraction Curve Name"
obj.speed_7_energy_input_ratio_function_of_air_flow_fraction_curve_name = var_speed_7_energy_input_ratio_function_of_air_flow_fraction_curve_name
# real
var_speed_8_reference_unit_gross_rated_heating_capacity = 0.0
obj.speed_8_reference_unit_gross_rated_heating_capacity = var_speed_8_reference_unit_gross_rated_heating_capacity
# real
var_speed_8_reference_unit_gross_rated_heating_cop = 0.0
obj.speed_8_reference_unit_gross_rated_heating_cop = var_speed_8_reference_unit_gross_rated_heating_cop
# real
var_speed_8_reference_unit_rated_air_flow_rate = 0.0
obj.speed_8_reference_unit_rated_air_flow_rate = var_speed_8_reference_unit_rated_air_flow_rate
# object-list
var_speed_8_heating_capacity_function_of_temperature_curve_name = "object-list|Speed 8 Heating Capacity Function of Temperature Curve Name"
obj.speed_8_heating_capacity_function_of_temperature_curve_name = var_speed_8_heating_capacity_function_of_temperature_curve_name
# object-list
var_speed_8_heating_capacity_function_of_air_flow_fraction_curve_name = "object-list|Speed 8 Heating Capacity Function of Air Flow Fraction Curve Name"
obj.speed_8_heating_capacity_function_of_air_flow_fraction_curve_name = var_speed_8_heating_capacity_function_of_air_flow_fraction_curve_name
# object-list
var_speed_8_energy_input_ratio_function_of_temperature_curve_name = "object-list|Speed 8 Energy Input Ratio Function of Temperature Curve Name"
obj.speed_8_energy_input_ratio_function_of_temperature_curve_name = var_speed_8_energy_input_ratio_function_of_temperature_curve_name
# object-list
var_speed_8_energy_input_ratio_function_of_air_flow_fraction_curve_name = "object-list|Speed 8 Energy Input Ratio Function of Air Flow Fraction Curve Name"
obj.speed_8_energy_input_ratio_function_of_air_flow_fraction_curve_name = var_speed_8_energy_input_ratio_function_of_air_flow_fraction_curve_name
# real
var_speed_9_reference_unit_gross_rated_heating_capacity = 0.0
obj.speed_9_reference_unit_gross_rated_heating_capacity = var_speed_9_reference_unit_gross_rated_heating_capacity
# real
var_speed_9_reference_unit_gross_rated_heating_cop = 0.0
obj.speed_9_reference_unit_gross_rated_heating_cop = var_speed_9_reference_unit_gross_rated_heating_cop
# real
var_speed_9_reference_unit_rated_air_flow_rate = 0.0
obj.speed_9_reference_unit_rated_air_flow_rate = var_speed_9_reference_unit_rated_air_flow_rate
# object-list
var_speed_9_heating_capacity_function_of_temperature_curve_name = "object-list|Speed 9 Heating Capacity Function of Temperature Curve Name"
obj.speed_9_heating_capacity_function_of_temperature_curve_name = var_speed_9_heating_capacity_function_of_temperature_curve_name
# object-list
var_speed_9_heating_capacity_function_of_air_flow_fraction_curve_name = "object-list|Speed 9 Heating Capacity Function of Air Flow Fraction Curve Name"
obj.speed_9_heating_capacity_function_of_air_flow_fraction_curve_name = var_speed_9_heating_capacity_function_of_air_flow_fraction_curve_name
# object-list
var_speed_9_energy_input_ratio_function_of_temperature_curve_name = "object-list|Speed 9 Energy Input Ratio Function of Temperature Curve Name"
obj.speed_9_energy_input_ratio_function_of_temperature_curve_name = var_speed_9_energy_input_ratio_function_of_temperature_curve_name
# object-list
var_speed_9_energy_input_ratio_function_of_air_flow_fraction_curve_name = "object-list|Speed 9 Energy Input Ratio Function of Air Flow Fraction Curve Name"
obj.speed_9_energy_input_ratio_function_of_air_flow_fraction_curve_name = var_speed_9_energy_input_ratio_function_of_air_flow_fraction_curve_name
# real
var_speed_10_reference_unit_gross_rated_heating_capacity = 0.0
obj.speed_10_reference_unit_gross_rated_heating_capacity = var_speed_10_reference_unit_gross_rated_heating_capacity
# real
var_speed_10_reference_unit_gross_rated_heating_cop = 0.0
obj.speed_10_reference_unit_gross_rated_heating_cop = var_speed_10_reference_unit_gross_rated_heating_cop
# real
var_speed_10_reference_unit_rated_air_flow_rate = 0.0
obj.speed_10_reference_unit_rated_air_flow_rate = var_speed_10_reference_unit_rated_air_flow_rate
# object-list
var_speed_10_heating_capacity_function_of_temperature_curve_name = "object-list|Speed 10 Heating Capacity Function of Temperature Curve Name"
obj.speed_10_heating_capacity_function_of_temperature_curve_name = var_speed_10_heating_capacity_function_of_temperature_curve_name
# object-list
var_speed_10_heating_capacity_function_of_air_flow_fraction_curve_name = "object-list|Speed 10 Heating Capacity Function of Air Flow Fraction Curve Name"
obj.speed_10_heating_capacity_function_of_air_flow_fraction_curve_name = var_speed_10_heating_capacity_function_of_air_flow_fraction_curve_name
# object-list
var_speed_10_energy_input_ratio_function_of_temperature_curve_name = "object-list|Speed 10 Energy Input Ratio Function of Temperature Curve Name"
obj.speed_10_energy_input_ratio_function_of_temperature_curve_name = var_speed_10_energy_input_ratio_function_of_temperature_curve_name
# object-list
var_speed_10_energy_input_ratio_function_of_air_flow_fraction_curve_name = "object-list|Speed 10 Energy Input Ratio Function of Air Flow Fraction Curve Name"
obj.speed_10_energy_input_ratio_function_of_air_flow_fraction_curve_name = var_speed_10_energy_input_ratio_function_of_air_flow_fraction_curve_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].name, var_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].indoor_air_inlet_node_name, var_indoor_air_inlet_node_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].indoor_air_outlet_node_name, var_indoor_air_outlet_node_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].number_of_speeds, var_number_of_speeds)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].nominal_speed_level, var_nominal_speed_level)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].rated_heating_capacity_at_selected_nominal_speed_level, var_rated_heating_capacity_at_selected_nominal_speed_level)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].rated_air_flow_rate_at_selected_nominal_speed_level, var_rated_air_flow_rate_at_selected_nominal_speed_level)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].energy_part_load_fraction_curve_name, var_energy_part_load_fraction_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].defrost_energy_input_ratio_function_of_temperature_curve_name, var_defrost_energy_input_ratio_function_of_temperature_curve_name)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].minimum_outdoor_drybulb_temperature_for_compressor_operation, var_minimum_outdoor_drybulb_temperature_for_compressor_operation)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].outdoor_drybulb_temperature_to_turn_on_compressor, var_outdoor_drybulb_temperature_to_turn_on_compressor)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].maximum_outdoor_drybulb_temperature_for_defrost_operation, var_maximum_outdoor_drybulb_temperature_for_defrost_operation)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].crankcase_heater_capacity, var_crankcase_heater_capacity)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].maximum_outdoor_drybulb_temperature_for_crankcase_heater_operation, var_maximum_outdoor_drybulb_temperature_for_crankcase_heater_operation)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].defrost_strategy, var_defrost_strategy)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].defrost_control, var_defrost_control)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].defrost_time_period_fraction, var_defrost_time_period_fraction)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].resistive_defrost_heater_capacity, var_resistive_defrost_heater_capacity)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_1_reference_unit_gross_rated_heating_capacity, var_speed_1_reference_unit_gross_rated_heating_capacity)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_1_reference_unit_gross_rated_heating_cop, var_speed_1_reference_unit_gross_rated_heating_cop)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_1_reference_unit_rated_air_flow_rate, var_speed_1_reference_unit_rated_air_flow_rate)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_1_heating_capacity_function_of_temperature_curve_name, var_speed_1_heating_capacity_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_1_total_heating_capacity_function_of_air_flow_fraction_curve_name, var_speed_1_total_heating_capacity_function_of_air_flow_fraction_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_1_energy_input_ratio_function_of_temperature_curve_name, var_speed_1_energy_input_ratio_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_1_energy_input_ratio_function_of_air_flow_fraction_curve_name, var_speed_1_energy_input_ratio_function_of_air_flow_fraction_curve_name)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_2_reference_unit_gross_rated_heating_capacity, var_speed_2_reference_unit_gross_rated_heating_capacity)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_2_reference_unit_gross_rated_heating_cop, var_speed_2_reference_unit_gross_rated_heating_cop)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_2_reference_unit_rated_air_flow_rate, var_speed_2_reference_unit_rated_air_flow_rate)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_2_heating_capacity_function_of_temperature_curve_name, var_speed_2_heating_capacity_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_2_total_heating_capacity_function_of_air_flow_fraction_curve_name, var_speed_2_total_heating_capacity_function_of_air_flow_fraction_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_2_energy_input_ratio_function_of_temperature_curve_name, var_speed_2_energy_input_ratio_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_2_energy_input_ratio_function_of_air_flow_fraction_curve_name, var_speed_2_energy_input_ratio_function_of_air_flow_fraction_curve_name)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_3_reference_unit_gross_rated_heating_capacity, var_speed_3_reference_unit_gross_rated_heating_capacity)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_3_reference_unit_gross_rated_heating_cop, var_speed_3_reference_unit_gross_rated_heating_cop)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_3_reference_unit_rated_air_flow_rate, var_speed_3_reference_unit_rated_air_flow_rate)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_3_heating_capacity_function_of_temperature_curve_name, var_speed_3_heating_capacity_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_3_total_heating_capacity_function_of_air_flow_fraction_curve_name, var_speed_3_total_heating_capacity_function_of_air_flow_fraction_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_3_energy_input_ratio_function_of_temperature_curve_name, var_speed_3_energy_input_ratio_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_3_energy_input_ratio_function_of_air_flow_fraction_curve_name, var_speed_3_energy_input_ratio_function_of_air_flow_fraction_curve_name)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_4_reference_unit_gross_rated_heating_capacity, var_speed_4_reference_unit_gross_rated_heating_capacity)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_4_reference_unit_gross_rated_heating_cop, var_speed_4_reference_unit_gross_rated_heating_cop)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_4_reference_unit_rated_air_flow_rate, var_speed_4_reference_unit_rated_air_flow_rate)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_4_heating_capacity_function_of_temperature_curve_name, var_speed_4_heating_capacity_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_4_heating_capacity_function_of_air_flow_fraction_curve_name, var_speed_4_heating_capacity_function_of_air_flow_fraction_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_4_energy_input_ratio_function_of_temperature_curve_name, var_speed_4_energy_input_ratio_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_4_energy_input_ratio_function_of_air_flow_fraction_curve_name, var_speed_4_energy_input_ratio_function_of_air_flow_fraction_curve_name)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_5_reference_unit_gross_rated_heating_capacity, var_speed_5_reference_unit_gross_rated_heating_capacity)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_5_reference_unit_gross_rated_heating_cop, var_speed_5_reference_unit_gross_rated_heating_cop)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_5_reference_unit_rated_air_flow_rate, var_speed_5_reference_unit_rated_air_flow_rate)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_5_heating_capacity_function_of_temperature_curve_name, var_speed_5_heating_capacity_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_5_heating_capacity_function_of_air_flow_fraction_curve_name, var_speed_5_heating_capacity_function_of_air_flow_fraction_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_5_energy_input_ratio_function_of_temperature_curve_name, var_speed_5_energy_input_ratio_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_5_energy_input_ratio_function_of_air_flow_fraction_curve_name, var_speed_5_energy_input_ratio_function_of_air_flow_fraction_curve_name)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_6_reference_unit_gross_rated_heating_capacity, var_speed_6_reference_unit_gross_rated_heating_capacity)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_6_reference_unit_gross_rated_heating_cop, var_speed_6_reference_unit_gross_rated_heating_cop)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_6_reference_unit_rated_air_flow_rate, var_speed_6_reference_unit_rated_air_flow_rate)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_6_heating_capacity_function_of_temperature_curve_name, var_speed_6_heating_capacity_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_6_heating_capacity_function_of_air_flow_fraction_curve_name, var_speed_6_heating_capacity_function_of_air_flow_fraction_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_6_energy_input_ratio_function_of_temperature_curve_name, var_speed_6_energy_input_ratio_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_6_energy_input_ratio_function_of_air_flow_fraction_curve_name, var_speed_6_energy_input_ratio_function_of_air_flow_fraction_curve_name)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_7_reference_unit_gross_rated_heating_capacity, var_speed_7_reference_unit_gross_rated_heating_capacity)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_7_reference_unit_gross_rated_heating_cop, var_speed_7_reference_unit_gross_rated_heating_cop)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_7_reference_unit_rated_air_flow_rate, var_speed_7_reference_unit_rated_air_flow_rate)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_7_heating_capacity_function_of_temperature_curve_name, var_speed_7_heating_capacity_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_7_heating_capacity_function_of_air_flow_fraction_curve_name, var_speed_7_heating_capacity_function_of_air_flow_fraction_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_7_energy_input_ratio_function_of_temperature_curve_name, var_speed_7_energy_input_ratio_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_7_energy_input_ratio_function_of_air_flow_fraction_curve_name, var_speed_7_energy_input_ratio_function_of_air_flow_fraction_curve_name)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_8_reference_unit_gross_rated_heating_capacity, var_speed_8_reference_unit_gross_rated_heating_capacity)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_8_reference_unit_gross_rated_heating_cop, var_speed_8_reference_unit_gross_rated_heating_cop)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_8_reference_unit_rated_air_flow_rate, var_speed_8_reference_unit_rated_air_flow_rate)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_8_heating_capacity_function_of_temperature_curve_name, var_speed_8_heating_capacity_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_8_heating_capacity_function_of_air_flow_fraction_curve_name, var_speed_8_heating_capacity_function_of_air_flow_fraction_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_8_energy_input_ratio_function_of_temperature_curve_name, var_speed_8_energy_input_ratio_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_8_energy_input_ratio_function_of_air_flow_fraction_curve_name, var_speed_8_energy_input_ratio_function_of_air_flow_fraction_curve_name)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_9_reference_unit_gross_rated_heating_capacity, var_speed_9_reference_unit_gross_rated_heating_capacity)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_9_reference_unit_gross_rated_heating_cop, var_speed_9_reference_unit_gross_rated_heating_cop)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_9_reference_unit_rated_air_flow_rate, var_speed_9_reference_unit_rated_air_flow_rate)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_9_heating_capacity_function_of_temperature_curve_name, var_speed_9_heating_capacity_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_9_heating_capacity_function_of_air_flow_fraction_curve_name, var_speed_9_heating_capacity_function_of_air_flow_fraction_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_9_energy_input_ratio_function_of_temperature_curve_name, var_speed_9_energy_input_ratio_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_9_energy_input_ratio_function_of_air_flow_fraction_curve_name, var_speed_9_energy_input_ratio_function_of_air_flow_fraction_curve_name)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_10_reference_unit_gross_rated_heating_capacity, var_speed_10_reference_unit_gross_rated_heating_capacity)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_10_reference_unit_gross_rated_heating_cop, var_speed_10_reference_unit_gross_rated_heating_cop)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_10_reference_unit_rated_air_flow_rate, var_speed_10_reference_unit_rated_air_flow_rate)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_10_heating_capacity_function_of_temperature_curve_name, var_speed_10_heating_capacity_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_10_heating_capacity_function_of_air_flow_fraction_curve_name, var_speed_10_heating_capacity_function_of_air_flow_fraction_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_10_energy_input_ratio_function_of_temperature_curve_name, var_speed_10_energy_input_ratio_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_10_energy_input_ratio_function_of_air_flow_fraction_curve_name, var_speed_10_energy_input_ratio_function_of_air_flow_fraction_curve_name)<|fim▁end|>
|
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.coils import CoilHeatingDxVariableSpeed
|
<|file_name|>_user.model.ts<|end_file_name|><|fim▁begin|><%#
Copyright 2013-2018 the original author or authors from the JHipster project.
This file is part of the JHipster project, see http://www.jhipster.tech/
for more information.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-%>
export class User {
public id?: any;
public login?: string;
public firstName?: string;
public lastName?: string;
public email?: string;
public activated?: Boolean;
public langKey?: string;
public authorities?: any[];
public createdBy?: string;
public createdDate?: Date;
public lastModifiedBy?: string;
public lastModifiedDate?: Date;
public password?: string;
constructor(
id?: any,
login?: string,
firstName?: string,
lastName?: string,
email?: string,
activated?: Boolean,
langKey?: string,
authorities?: any[],
createdBy?: string,
createdDate?: Date,
lastModifiedBy?: string,
lastModifiedDate?: Date,
password?: string
) {<|fim▁hole|> this.id = id ? id : null;
this.login = login ? login : null;
this.firstName = firstName ? firstName : null;
this.lastName = lastName ? lastName : null;
this.email = email ? email : null;
this.activated = activated ? activated : false;
this.langKey = langKey ? langKey : null;
this.authorities = authorities ? authorities : null;
this.createdBy = createdBy ? createdBy : null;
this.createdDate = createdDate ? createdDate : null;
this.lastModifiedBy = lastModifiedBy ? lastModifiedBy : null;
this.lastModifiedDate = lastModifiedDate ? lastModifiedDate : null;
this.password = password ? password : null;
}
}<|fim▁end|>
| |
<|file_name|>RTree.java<|end_file_name|><|fim▁begin|>package com.github.davidmoten.rtree;
import static com.github.davidmoten.rtree.geometry.Geometries.rectangle;
import static java.util.Optional.of;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Optional;
import com.github.davidmoten.guavamini.Lists;
import com.github.davidmoten.guavamini.annotations.VisibleForTesting;
import com.github.davidmoten.rtree.geometry.Circle;
import com.github.davidmoten.rtree.geometry.Geometry;
import com.github.davidmoten.rtree.geometry.HasGeometry;
import com.github.davidmoten.rtree.geometry.Intersects;
import com.github.davidmoten.rtree.geometry.Line;
import com.github.davidmoten.rtree.geometry.Point;
import com.github.davidmoten.rtree.geometry.Rectangle;
import com.github.davidmoten.rtree.internal.Comparators;
import com.github.davidmoten.rtree.internal.NodeAndEntries;
import com.github.davidmoten.rtree.internal.operators.OperatorBoundedPriorityQueue;
import rx.Observable;
import rx.functions.Func1;
import rx.functions.Func2;
/**
* Immutable in-memory 2D R-Tree with configurable splitter heuristic.
*
* @param <T>
* the entry value type
* @param <S>
* the entry geometry type
*/
public final class RTree<T, S extends Geometry> {
public static final Rectangle ZERO_RECTANGLE = rectangle(0, 0, 0, 0);
private final Optional<? extends Node<T, S>> root;
private final Context<T, S> context;
/**
* Benchmarks show that this is a good choice for up to O(10,000) entries when
* using Quadratic splitter (Guttman).
*/
public static final int MAX_CHILDREN_DEFAULT_GUTTMAN = 4;
/**
* Benchmarks show that this is the sweet spot for up to O(10,000) entries when
* using R*-tree heuristics.
*/
public static final int MAX_CHILDREN_DEFAULT_STAR = 4;
/**
* Current size in Entries of the RTree.
*/
private final int size;
private static final Func2<Optional<Rectangle>, Entry<Object, Geometry>, Optional<Rectangle>> RECTANGLE_ACCUMULATOR =
(rectangle, entry) ->
rectangle.map(value -> Optional.of(value.add(entry.geometry().mbr())))
.orElseGet(() -> Optional.of(entry.geometry().mbr()));
/**
* Constructor.
*
* @param root
* the root node of the tree if present
* @param context
* options for the R-tree
*/
private RTree(Optional<? extends Node<T, S>> root, int size, Context<T, S> context) {
this.root = root;
this.size = size;
this.context = context;
}
private RTree() {
this(Optional.empty(), 0, null);
}
/**
* Constructor.
*
* @param root
* the root node of the R-tree
* @param context
* options for the R-tree
*/
private RTree(Node<T, S> root, int size, Context<T, S> context) {
this(of(root), size, context);
}
static <T, S extends Geometry> RTree<T, S> create(Optional<? extends Node<T, S>> root, int size,
Context<T, S> context) {
return new RTree<T, S>(root, size, context);
}
/**
* Returns a new Builder instance for {@link RTree}. Defaults to
* maxChildren=128, minChildren=64, splitter=QuadraticSplitter.
*
* @param <T>
* the value type of the entries in the tree
* @param <S>
* the geometry type of the entries in the tree
* @return a new RTree instance
*/
public static <T, S extends Geometry> RTree<T, S> create() {
return new Builder().create();
}
/**
* Construct an Rtree through STR bulk loading. Default to maxChildren=128,
* minChildren=64 and fill nodes by a factor of 0.7
*
* @param entries
* entries to add to the R-tree
*
* @param <T>
* the value type of the entries in the tree
* @param <S>
* the geometry type of the entries in the tree
* @return a new RTree instance
*/
public static <T, S extends Geometry> RTree<T, S> create(List<Entry<T, S>> entries) {
return new Builder().create(entries);
}
/**
* The tree is scanned for depth and the depth returned. This involves recursing
* down to the leaf level of the tree to get the current depth. Should be
* <code>log(n)</code> in complexity.
*
* @return depth of the R-tree
*/
public int calculateDepth() {
return calculateDepth(root);
}
private static <T, S extends Geometry> int calculateDepth(Optional<? extends Node<T, S>> root) {
return root.map(node -> calculateDepth(node, 0)).orElse(0);
}
private static <T, S extends Geometry> int calculateDepth(Node<T, S> node, int depth) {
if (node instanceof Leaf) {
return depth + 1;
} else {
return calculateDepth(((NonLeaf<T, S>) node).child(0), depth + 1);
}
}
/**
* When the number of children in an R-tree node drops below this number the
* node is deleted and the children are added on to the R-tree again.
*
* @param minChildren
* less than this number of children in a node triggers a node
* deletion and redistribution of its members
* @return builder
*/
public static Builder minChildren(int minChildren) {
return new Builder().minChildren(minChildren);
}
/**
* Sets the max number of children in an R-tree node.
*
* @param maxChildren
* max number of children in an R-tree node
* @return builder
*/
public static Builder maxChildren(int maxChildren) {
return new Builder().maxChildren(maxChildren);
}
/**
* Sets the {@link Splitter} to use when maxChildren is reached.
*
* @param splitter
* the splitter algorithm to use
* @return builder
*/
public static Builder splitter(Splitter splitter) {
return new Builder().splitter(splitter);
}
/**
* Sets the node {@link Selector} which decides which branches to follow when
* inserting or searching.
*
* @param selector
* determines which branches to follow when inserting or searching
* @return builder
*/
public static Builder selector(Selector selector) {
return new Builder().selector(selector);
}
/**
* Sets the splitter to {@link SplitterRStar} and selector to
* {@link SelectorRStar} and defaults to minChildren=10.
*
* @return builder
*/
public static Builder star() {
return new Builder().star();
}
/**
* RTree Builder.
*/
public static class Builder {
/**
* According to http://dbs.mathematik.uni-marburg.de/publications/myPapers
* /1990/BKSS90.pdf (R*-tree paper), best filling ratio is 0.4 for both
* quadratic split and R*-tree split.
*/
private static final double DEFAULT_FILLING_FACTOR = 0.4;
private static final double DEFAULT_LOADING_FACTOR = 0.7;
private Optional<Integer> maxChildren = Optional.empty();
private Optional<Integer> minChildren = Optional.empty();
private Splitter splitter = new SplitterQuadratic();
private Selector selector = new SelectorMinimalAreaIncrease();
private double loadingFactor;
private boolean star = false;
private Factory<Object, Geometry> factory = Factories.defaultFactory();
private Builder() {
loadingFactor = DEFAULT_LOADING_FACTOR;
}
/**
* The factor is used as the fill ratio during bulk loading.
*
* @param factor
* loading factor
* @return this
*/
public Builder loadingFactor(double factor) {
this.loadingFactor = factor;
return this;
}
/**
* When the number of children in an R-tree node drops below this number the
* node is deleted and the children are added on to the R-tree again.
*
* @param minChildren
* less than this number of children in a node triggers a
* redistribution of its children.
* @return builder
*/
public Builder minChildren(int minChildren) {
this.minChildren = of(minChildren);
return this;
}
/**
* Sets the max number of children in an R-tree node.
*
* @param maxChildren
* max number of children in R-tree node.
* @return builder
*/
public Builder maxChildren(int maxChildren) {
this.maxChildren = of(maxChildren);
return this;
}
/**
* Sets the {@link Splitter} to use when maxChildren is reached.
*
* @param splitter
* node splitting method to use
* @return builder
*/
public Builder splitter(Splitter splitter) {
this.splitter = splitter;
return this;
}
/**
* Sets the node {@link Selector} which decides which branches to follow when
* inserting or searching.
*
* @param selector
* selects the branch to follow when inserting or searching
* @return builder
*/
public Builder selector(Selector selector) {
this.selector = selector;
return this;
}
/**
* Sets the splitter to {@link SplitterRStar} and selector to
* {@link SelectorRStar} and defaults to minChildren=10.
*
* @return builder
*/
public Builder star() {
selector = new SelectorRStar();
splitter = new SplitterRStar();
star = true;
return this;
}
@SuppressWarnings("unchecked")
public Builder factory(Factory<?, ? extends Geometry> factory) {
// TODO could change the signature of Builder to have types to
// support this method but would be breaking change for existing
// clients
this.factory = (Factory<Object, Geometry>) factory;
return this;
}
/**
* Builds the {@link RTree}.
*
* @param <T>
* value type
* @param <S>
* geometry type
* @return RTree
*/
@SuppressWarnings("unchecked")
public <T, S extends Geometry> RTree<T, S> create() {
setDefaultCapacity();
return new RTree<T, S>(Optional.<Node<T, S>>empty(), 0,
new Context<T, S>(minChildren.get(), maxChildren.get(), selector, splitter,
(Factory<T, S>) factory));
}
/**
* Create an RTree by bulk loading, using the STR method. STR: a simple and
* efficient algorithm for R-tree packing
* http://ieeexplore.ieee.org/abstract/document/582015/
* <p>
* Note: this method mutates the input entries, the internal order of the List
* may be changed.
* </p>
*
* @param entries
* entries to be added to the r-tree
* @return a loaded RTree
*/
@SuppressWarnings("unchecked")
public <T, S extends Geometry> RTree<T, S> create(List<Entry<T, S>> entries) {
setDefaultCapacity();
Context<T, S> context = new Context<T, S>(minChildren.get(), maxChildren.get(),
selector, splitter, (Factory<T, S>) factory);
return packingSTR(entries, true, entries.size(), context);
}
private void setDefaultCapacity() {
if (!maxChildren.isPresent()) {
if (star) {
maxChildren = Optional.of(MAX_CHILDREN_DEFAULT_STAR);
} else {
maxChildren = Optional.of(MAX_CHILDREN_DEFAULT_GUTTMAN);
}
}
if (!minChildren.isPresent()) {
minChildren = Optional.of((int) Math.round(maxChildren.get() * DEFAULT_FILLING_FACTOR));
}
}
@SuppressWarnings("unchecked")
private <T, S extends Geometry> RTree<T, S> packingSTR(List<? extends HasGeometry> objects,
boolean isLeaf, int size, Context<T, S> context) {
int capacity = (int) Math.round(maxChildren.get() * loadingFactor);
int nodeCount = (int) Math.ceil(1.0 * objects.size() / capacity);
if (nodeCount == 0) {
return create();
} else if (nodeCount == 1) {
Node<T, S> root;
if (isLeaf) {
root = context.factory().createLeaf((List<Entry<T, S>>) objects, context);
} else {
root = context.factory().createNonLeaf((List<Node<T, S>>) objects, context);
}
return new RTree<T, S>(of(root), size, context);
}
int nodePerSlice = (int) Math.ceil(Math.sqrt(nodeCount));
int sliceCapacity = nodePerSlice * capacity;
int sliceCount = (int) Math.ceil(1.0 * objects.size() / sliceCapacity);
Collections.sort(objects, new MidComparator((short) 0));
List<Node<T, S>> nodes = new ArrayList<Node<T, S>>(nodeCount);
for (int s = 0; s < sliceCount; s++) {
@SuppressWarnings("rawtypes")
List slice = objects.subList(s * sliceCapacity,
Math.min((s + 1) * sliceCapacity, objects.size()));
Collections.sort(slice, new MidComparator((short) 1));
for (int i = 0; i < slice.size(); i += capacity) {
if (isLeaf) {
List<Entry<T, S>> entries = slice.subList(i,
Math.min(slice.size(), i + capacity));
Node<T, S> leaf = context.factory().createLeaf(entries, context);
nodes.add(leaf);
} else {
List<Node<T, S>> children = slice.subList(i,
Math.min(slice.size(), i + capacity));
Node<T, S> nonleaf = context.factory().createNonLeaf(children, context);
nodes.add(nonleaf);
}
}
}
return packingSTR(nodes, false, size, context);
}
private static final class MidComparator implements Comparator<HasGeometry> {
private final short dimension; // leave space for multiple dimensions, 0 for x, 1 for y,
// ...
public MidComparator(short dim) {
dimension = dim;
}
@Override
public int compare(HasGeometry o1, HasGeometry o2) {
return Double.compare(mid(o1), mid(o2));
}
private double mid(HasGeometry o) {
Rectangle mbr = o.geometry().mbr();
if (dimension == 0)
return (mbr.x1() + mbr.x2()) / 2;
else
return (mbr.y1() + mbr.y2()) / 2;
}
}
}
/**
* Returns an immutable copy of the RTree with the addition of given entry.
*
* @param entry
* item to add to the R-tree.
* @return a new immutable R-tree including the new entry
*/
@SuppressWarnings("unchecked")
public RTree<T, S> add(Entry<? extends T, ? extends S> entry) {
if (root.isPresent()) {
List<Node<T, S>> nodes = root.get().add(entry);
Node<T, S> node;
if (nodes.size() == 1)
node = nodes.get(0);
else {
node = context.factory().createNonLeaf(nodes, context);
}
return new RTree<T, S>(node, size + 1, context);
} else {
Leaf<T, S> node = context.factory().createLeaf(Lists.newArrayList((Entry<T, S>) entry),
context);
return new RTree<T, S>(node, size + 1, context);
}
}
/**
* Returns an immutable copy of the RTree with the addition of an entry
* comprised of the given value and Geometry.
* <|fim▁hole|> * @param value
* the value of the {@link Entry} to be added
* @param geometry
* the geometry of the {@link Entry} to be added
* @return a new immutable R-tree including the new entry
*/
public RTree<T, S> add(T value, S geometry) {
return add(context.factory().createEntry(value, geometry));
}
/**
* Returns an immutable RTree with the current entries and the additional
* entries supplied as a parameter.
*
* @param entries
* entries to add
* @return R-tree with entries added
*/
public RTree<T, S> add(Iterable<Entry<T, S>> entries) {
RTree<T, S> tree = this;
for (Entry<T, S> entry : entries)
tree = tree.add(entry);
return tree;
}
/**
* Returns the Observable sequence of trees created by progressively adding
* entries.
*
* @param entries
* the entries to add
* @return a sequence of trees
*/
public Observable<RTree<T, S>> add(Observable<Entry<T, S>> entries) {
return entries.scan(this, (tree, entry) -> tree.add(entry));
}
/**
* Returns the Observable sequence of trees created by progressively deleting
* entries.
*
* @param entries
* the entries to add
* @param all
* if true delete all matching otherwise just first matching
* @return a sequence of trees
*/
public Observable<RTree<T, S>> delete(Observable<Entry<T, S>> entries, final boolean all) {
return entries.scan(this, new Func2<RTree<T, S>, Entry<T, S>, RTree<T, S>>() {
@Override
public RTree<T, S> call(RTree<T, S> tree, Entry<T, S> entry) {
return tree.delete(entry, all);
}
});
}
/**
* Returns a new R-tree with the given entries deleted. If <code>all</code> is
* false deletes only one if exists. If <code>all</code> is true deletes all
* matching entries.
*
* @param entries
* entries to delete
* @param all
* if false deletes one if exists else deletes all
* @return R-tree with entries deleted
*/
public RTree<T, S> delete(Iterable<Entry<T, S>> entries, boolean all) {
RTree<T, S> tree = this;
for (Entry<T, S> entry : entries)
tree = tree.delete(entry, all);
return tree;
}
/**
* Returns a new R-tree with the given entries deleted but only one matching
* occurence of each entry is deleted.
*
* @param entries
* entries to delete
* @return R-tree with entries deleted up to one matching occurence per entry
*/
public RTree<T, S> delete(Iterable<Entry<T, S>> entries) {
RTree<T, S> tree = this;
for (Entry<T, S> entry : entries)
tree = tree.delete(entry);
return tree;
}
/**
* If <code>all</code> is false deletes one entry matching the given value and
* Geometry. If <code>all</code> is true deletes all entries matching the given
* value and geometry. This method has no effect if the entry is not present.
* The entry must match on both value and geometry to be deleted.
*
* @param value
* the value of the {@link Entry} to be deleted
* @param geometry
* the geometry of the {@link Entry} to be deleted
* @param all
* if false deletes one if exists else deletes all
* @return a new immutable R-tree without one or many instances of the specified
* entry if it exists otherwise returns the original RTree object
*/
public RTree<T, S> delete(T value, S geometry, boolean all) {
return delete(context.factory().createEntry(value, geometry), all);
}
/**
* Deletes maximum one entry matching the given value and geometry. This method
* has no effect if the entry is not present. The entry must match on both value
* and geometry to be deleted.
*
* @param value
* the value to be matched for deletion
* @param geometry
* the geometry to be matched for deletion
* @return an immutable RTree without one entry (if found) matching the given
* value and geometry
*/
public RTree<T, S> delete(T value, S geometry) {
return delete(context.factory().createEntry(value, geometry), false);
}
/**
* Deletes one or all matching entries depending on the value of
* <code>all</code>. If multiple copies of the entry are in the R-tree only one
* will be deleted if all is false otherwise all matching entries will be
* deleted. The entry must match on both value and geometry to be deleted.
*
* @param entry
* the {@link Entry} to be deleted
* @param all
* if true deletes all matches otherwise deletes first found
* @return a new immutable R-tree without one instance of the specified entry
*/
public RTree<T, S> delete(Entry<? extends T, ? extends S> entry, boolean all) {
if (root.isPresent()) {
NodeAndEntries<T, S> nodeAndEntries = root.get().delete(entry, all);
if (nodeAndEntries.node().isPresent() && nodeAndEntries.node().get() == root.get())
return this;
else
return new RTree<T, S>(nodeAndEntries.node(),
size - nodeAndEntries.countDeleted() - nodeAndEntries.entriesToAdd().size(),
context).add(nodeAndEntries.entriesToAdd());
} else
return this;
}
/**
* Deletes one entry if it exists, returning an immutable copy of the RTree
* without that entry. If multiple copies of the entry are in the R-tree only
* one will be deleted. The entry must match on both value and geometry to be
* deleted.
*
* @param entry
* the {@link Entry} to be deleted
* @return a new immutable R-tree without one instance of the specified entry
*/
public RTree<T, S> delete(Entry<? extends T, ? extends S> entry) {
return delete(entry, false);
}
/**
* <p>
* Returns an Observable sequence of {@link Entry} that satisfy the given
* condition. Note that this method is well-behaved only if:
*
*
* <p>
* {@code condition(g)} is true for {@link Geometry} g implies
* {@code condition(r)} is true for the minimum bounding rectangles of the
* ancestor nodes.
*
* <p>
* {@code distance(g) < D} is an example of such a condition.
*
*
* @param condition
* return Entries whose geometry satisfies the given condition
* @return sequence of matching entries
*/
@VisibleForTesting
Observable<Entry<T, S>> search(Func1<? super Geometry, Boolean> condition) {
return root
.map(node -> Observable.unsafeCreate(new OnSubscribeSearch<>(node, condition)))
.orElseGet(Observable::empty);
}
/**
* Returns a predicate function that indicates if {@link Geometry} intersects
* with a given rectangle.
*
* @param r
* the rectangle to check intersection with
* @return whether the geometry and the rectangle intersect
*/
public static Func1<Geometry, Boolean> intersects(final Rectangle r) {
return g -> g.intersects(r);
}
/**
* Returns the always true predicate. See {@link RTree#entries()} for example
* use.
*/
private static final Func1<Geometry, Boolean> ALWAYS_TRUE = rectangle -> true;
/**
* Returns an {@link Observable} sequence of all {@link Entry}s in the R-tree
* whose minimum bounding rectangle intersects with the given rectangle.
*
* @param r
* rectangle to check intersection with the entry mbr
* @return entries that intersect with the rectangle r
*/
public Observable<Entry<T, S>> search(final Rectangle r) {
return search(intersects(r));
}
/**
* Returns an {@link Observable} sequence of all {@link Entry}s in the R-tree
* whose minimum bounding rectangle intersects with the given point.
*
* @param p
* point to check intersection with the entry mbr
* @return entries that intersect with the point p
*/
public Observable<Entry<T, S>> search(final Point p) {
return search(p.mbr());
}
public Observable<Entry<T, S>> search(Circle circle) {
return search(circle, Intersects.geometryIntersectsCircle);
}
public Observable<Entry<T, S>> search(Line line) {
return search(line, Intersects.geometryIntersectsLine);
}
/**
* Returns an {@link Observable} sequence of all {@link Entry}s in the R-tree
* whose minimum bounding rectangles are strictly less than maxDistance from the
* given rectangle.
*
* @param r
* rectangle to measure distance from
* @param maxDistance
* entries returned must be within this distance from rectangle r
* @return the sequence of matching entries
*/
public Observable<Entry<T, S>> search(final Rectangle r, final double maxDistance) {
return search(g -> g.distance(r) < maxDistance);
}
/**
* Returns the intersections with the the given (arbitrary) geometry using an
* intersection function to filter the search results returned from a search of
* the mbr of <code>g</code>.
*
* @param <R>
* type of geometry being searched for intersection with
* @param g
* geometry being searched for intersection with
* @param intersects
* function to determine if the two geometries intersect
* @return a sequence of entries that intersect with g
*/
public <R extends Geometry> Observable<Entry<T, S>> search(final R g,
final Func2<? super S, ? super R, Boolean> intersects) {
return search(g.mbr()).filter(entry -> intersects.call(entry.geometry(), g));
}
/**
* Returns all entries strictly less than <code>maxDistance</code> from the
* given geometry. Because the geometry may be of an arbitrary type it is
* necessary to also pass a distance function.
*
* @param <R>
* type of the geometry being searched for
* @param g
* geometry to search for entries within maxDistance of
* @param maxDistance
* strict max distance that entries must be from g
* @param distance
* function to calculate the distance between geometries of type S
* and R.
* @return entries strictly less than maxDistance from g
*/
public <R extends Geometry> Observable<Entry<T, S>> search(final R g, final double maxDistance,
final Func2<? super S, ? super R, Double> distance) {
// just use the mbr initially
return search(entry -> entry.distance(g.mbr()) < maxDistance)
// refine with distance function
.filter(entry -> distance.call(entry.geometry(), g) < maxDistance);
}
/**
* Returns an {@link Observable} sequence of all {@link Entry}s in the R-tree
* whose minimum bounding rectangles are within maxDistance from the given
* point.
*
* @param p
* point to measure distance from
* @param maxDistance
* entries returned must be within this distance from point p
* @return the sequence of matching entries
*/
public Observable<Entry<T, S>> search(final Point p, final double maxDistance) {
return search(p.mbr(), maxDistance);
}
/**
* Returns the nearest k entries (k=maxCount) to the given rectangle where the
* entries are strictly less than a given maximum distance from the rectangle.
*
* @param r
* rectangle
* @param maxDistance
* max distance of returned entries from the rectangle
* @param maxCount
* max number of entries to return
* @return nearest entries to maxCount, in ascending order of distance
*/
public Observable<Entry<T, S>> nearest(final Rectangle r, final double maxDistance,
int maxCount) {
return search(r, maxDistance).lift(new OperatorBoundedPriorityQueue<Entry<T, S>>(maxCount,
Comparators.<T, S>ascendingDistance(r)));
}
/**
* Returns the nearest k entries (k=maxCount) to the given point where the
* entries are strictly less than a given maximum distance from the point.
*
* @param p
* point
* @param maxDistance
* max distance of returned entries from the point
* @param maxCount
* max number of entries to return
* @return nearest entries to maxCount, in ascending order of distance
*/
public Observable<Entry<T, S>> nearest(final Point p, final double maxDistance, int maxCount) {
return nearest(p.mbr(), maxDistance, maxCount);
}
/**
* Returns all entries in the tree as an {@link Observable} sequence.
*
* @return all entries in the R-tree
*/
public Observable<Entry<T, S>> entries() {
return search(ALWAYS_TRUE);
}
/**
* Returns a {@link Visualizer} for an image of given width and height and
* restricted to the given view of the coordinates. The points in the view are
* scaled to match the aspect ratio defined by the width and height.
*
* @param width
* of the image in pixels
* @param height
* of the image in pixels
* @param view
* using the coordinate system of the entries
* @return visualizer
*/
@SuppressWarnings("unchecked")
public Visualizer visualize(int width, int height, Rectangle view) {
return new Visualizer((RTree<?, Geometry>) this, width, height, view);
}
/**
* Returns a {@link Visualizer} for an image of given width and height and
* restricted to the the smallest view that fully contains the coordinates. The
* points in the view are scaled to match the aspect ratio defined by the width
* and height.
*
* @param width
* of the image in pixels
* @param height
* of the image in pixels
* @return visualizer
*/
public Visualizer visualize(int width, int height) {
return visualize(width, height, calculateMaxView(this));
}
private Rectangle calculateMaxView(RTree<T, S> tree) {
@SuppressWarnings("unchecked")
Func2<Optional<Rectangle>, Entry<T, S>, Optional<Rectangle>> ra = //
(Func2<Optional<Rectangle>, Entry<T, S>, Optional<Rectangle>>) //
(Func2<?,?,?>) //
RECTANGLE_ACCUMULATOR;
return tree.entries()
.reduce(Optional.empty(), ra)
.toBlocking().single()
.orElse(ZERO_RECTANGLE);
}
public Optional<? extends Node<T, S>> root() {
return root;
}
/**
* If the RTree has no entries returns {@link Optional#absent} otherwise returns
* the minimum bounding rectangle of all entries in the RTree.
*
* @return minimum bounding rectangle of all entries in RTree
*/
public Optional<Rectangle> mbr() {
return root.map(r -> r.geometry().mbr());
}
/**
* Returns true if and only if the R-tree is empty of entries.
*
* @return is R-tree empty
*/
public boolean isEmpty() {
return size == 0;
}
/**
* Returns the number of entries in the RTree.
*
* @return the number of entries
*/
public int size() {
return size;
}
/**
* Returns a {@link Context} containing the configuration of the RTree at the
* time of instantiation.
*
* @return the configuration of the RTree prior to instantiation
*/
public Context<T, S> context() {
return context;
}
/**
* Returns a human readable form of the RTree. Here's an example:
*
* <pre>
* mbr=Rectangle [x1=10.0, y1=4.0, x2=62.0, y2=85.0]
* mbr=Rectangle [x1=28.0, y1=4.0, x2=34.0, y2=85.0]
* entry=Entry [value=2, geometry=Point [x=29.0, y=4.0]]
* entry=Entry [value=1, geometry=Point [x=28.0, y=19.0]]
* entry=Entry [value=4, geometry=Point [x=34.0, y=85.0]]
* mbr=Rectangle [x1=10.0, y1=45.0, x2=62.0, y2=63.0]
* entry=Entry [value=5, geometry=Point [x=62.0, y=45.0]]
* entry=Entry [value=3, geometry=Point [x=10.0, y=63.0]]
* </pre>
*
* @return a string representation of the RTree
*/
public String asString() {
if (!root.isPresent())
return "";
else
return asString(root.get(), "");
}
private static final String MARGIN_INCREMENT = " ";
private String asString(Node<T, S> node, String margin) {
StringBuilder s = new StringBuilder();
s.append(margin);
s.append("mbr=");
s.append(node.geometry());
s.append('\n');
if (node instanceof NonLeaf) {
NonLeaf<T, S> n = (NonLeaf<T, S>) node;
for (int i = 0; i < n.count(); i++) {
Node<T, S> child = n.child(i);
s.append(asString(child, margin + MARGIN_INCREMENT));
}
} else {
Leaf<T, S> leaf = (Leaf<T, S>) node;
for (Entry<T, S> entry : leaf.entries()) {
s.append(margin);
s.append(MARGIN_INCREMENT);
s.append("entry=");
s.append(entry);
s.append('\n');
}
}
return s.toString();
}
}<|fim▁end|>
| |
<|file_name|>addon.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# coding=utf-8
import sys
base_url = sys.argv[0]
addon_handle = int(sys.argv[1])
import xbmcplugin
xbmcplugin.setContent(addon_handle, 'episodes')
import urlparse
args = urlparse.parse_qs(sys.argv[2][1:])
mode = args.get('mode', None)
from urllib import FancyURLopener, urlencode
class URLOpener(FancyURLopener):
version = 'Mozilla/5.0 (X11; Linux i686; rv:31.0) Gecko/20100101 Firefox/31.0 Iceweasel/31.0'
urlopen = URLOpener().open
urlmake = lambda query: base_url + '?' + urlencode(query)
rooturl = 'http://nick.walla.co.il'
def getpage(url):
if url.startswith('/'): url = rooturl + url
elif not url.startswith('http://'): url = rooturl + '/' + url
resets = 0
for tries in range(5):
try:
page = urlopen(url).read()
break
except IOError:
page = u''
if isinstance(page, str): page = page.decode('windows-1255', 'replace')
page = page.encode('utf-8')
return page
import re
vidregexp = re.compile(
'class="vitem.*?"',
re.DOTALL
)
nextregexp = re.compile(
'<a class="p_r" style="" href="(.+?)"'
)
def vidsfromseason(url):
page = getpage(url)
vids = vidregexp.findall(page)
for nexturl in nextregexp.findall(page):
vids += vidregexp.findall(getpage(nexturl))
return vids
def vidsfromshow(showurl):<|fim▁hole|> 'href="([^"]*)"[^>]*>[^<]*פרקים מלאים',
getpage(showurl)
)]
import xbmcgui
if mode is None:
for show in re.findall(
'<a href="([^"]+)" class="item right w3" style=".*?">([^<]+)</a>',
getpage('/')
):
xbmcplugin.addDirectoryItem(
handle=addon_handle,
url=urlmake({'mode': 'show', 'showurl': show[0]}),
listitem=xbmcgui.ListItem(show[1]),
isFolder=True
)
xbmcplugin.endOfDirectory(addon_handle)
elif mode[0] == 'show':
print(vidsfromshow(args['showurl'][0]))
xbmcplugin.addDirectoryItem(
handle=addon_handle,
url='/',
listitem=xbmcgui.ListItem('Video')
)
xbmcplugin.endOfDirectory(addon_handle)<|fim▁end|>
|
return [vidsfromseason(url) for url in re.findall(
|
<|file_name|>select-kernel.tsx<|end_file_name|><|fim▁begin|>/*
* This file is part of CoCalc: Copyright © 2020 Sagemath, Inc.
* License: AGPLv3 s.t. "Commons Clause" – see LICENSE.md for details
*/
// help users selecting a kernel
import { React, Component, Rendered } from "../app-framework";
import {
Map as ImmutableMap,
List,
OrderedMap /*, List as ImmutableList*/,
} from "immutable";
import * as misc from "smc-util/misc";
import { Icon, Loading } from "../r_misc";
const {
Button,
Col,
Row,
ButtonGroup,
Checkbox,
Alert,
} = require("react-bootstrap"); // TODO: import types
import { Kernel } from "./util";
const { COLORS } = require("smc-util/theme");
import { JupyterActions } from "./browser-actions";
const row_style: React.CSSProperties = {
marginTop: "5px",
marginBottom: "5px",
};
const main_style: React.CSSProperties = {
padding: "20px 10px",
overflowY: "auto",
overflowX: "hidden",
};
interface KernelSelectorProps {
actions: JupyterActions;
site_name: string;
kernel?: string;
kernel_info?: any;
default_kernel?: string;
ask_jupyter_kernel?: boolean;
kernel_selection?: ImmutableMap<string, string>;
kernels_by_name?: OrderedMap<string, ImmutableMap<string, string>>;
kernels_by_language?: OrderedMap<string, List<string>>;
closestKernel?: Kernel;
}
interface KernelSelectorState {}
export class KernelSelector extends Component<
KernelSelectorProps,
KernelSelectorState
> {
constructor(props: KernelSelectorProps, context: any) {
super(props, context);
this.state = {};
}
// the idea here is to not set the kernel, but still render the notebook.
// looks like that's not easy, and well, probably incompatible with classical jupyter.
/*
<Row style={row_style} className={"pull-right"}>
{this.close_button()}
</Row>
close_button() {
return (
<Button
key={"close"}
bsStyle={"default"}
onClick={() => this.props.actions.select_kernel(null)}
>
{"View without kernel"}
</Button>
);
}
*/
kernel_name(name: string): string | undefined {
return this.kernel_attr(name, "display_name");
}
kernel_attr(name: string, attr: string): string | undefined {
if (this.props.kernels_by_name == null) return undefined;
const k = this.props.kernels_by_name.get(name);
if (k == null) return undefined;
return k.get(attr, name);
}
render_suggested_link(cocalc) {
if (cocalc == null) return;
const url: string | undefined = cocalc.get("url");
const descr: string | undefined = cocalc.get("description", "");
if (url != null) {
return (
<a href={url} target={"_blank"} rel={"noopener"}>
{descr}
</a>
);
} else {
return descr;
}
}
render_kernel_button(
name: string,
size?: string,
show_icon: boolean = true
): Rendered {
const lang = this.kernel_attr(name, "language");
let icon: Rendered | undefined = undefined;
if (lang != null && show_icon) {
if (["python", "r", "sagemath", "octave", "julia"].indexOf(lang) >= 0) {
icon = <Icon name={`cc-icon-${lang}`} />;
} else if (lang.startsWith("bash")) {
icon = <Icon name={"terminal"} />;
}
// TODO do other languages have icons?
}
return (
<Button
key={`kernel-${lang}-${name}`}
onClick={() => this.props.actions.select_kernel(name)}
bsSize={size}
style={{ marginBottom: "5px" }}
>
{icon} {this.kernel_name(name) || name}
</Button>
);
}
render_suggested() {
if (
this.props.kernel_selection == null ||
this.props.kernels_by_name == null
)
return;
const entries: Rendered[] = [];
const kbn = this.props.kernels_by_name;
this.props.kernel_selection
.sort((a, b) => {
// try to find the display name, otherwise fallback to kernel ID
const name_a = this.kernel_name(a) || a;
const name_b = this.kernel_name(b) || b;
return name_a.localeCompare(name_b);
})
.map((name, lang) => {
const cocalc: ImmutableMap<string, any> = kbn.getIn(
[name, "metadata", "cocalc"],
null
);
if (cocalc == null) return;
const prio: number = cocalc.get("priority", 0);
// drop those below 10, priority is too low
if (prio < 10) return;
entries.push(
<Row key={lang} style={row_style}>
<Col sm={6}>{this.render_kernel_button(name)}</Col>
<Col sm={6}>
<div>{this.render_suggested_link(cocalc)}</div>
</Col>
</Row>
);
});
if (entries.length == 0) return;
return (
<Row style={row_style}>
<h4>Suggested kernels</h4>
<Col>{entries}</Col>
</Row>
);
}
private render_custom(): Rendered {
return (
<Row style={row_style}>
<h4>Custom kernels</h4>
<a onClick={() => this.props.actions.custom_jupyter_kernel_docs()}>
How to create a custom kernel...
</a>
</Row>
);
}
// render_all_selected_link() {
// if (this.props.kernels_by_name == null) return;
// const name = this.state.selected_kernel;
// if (name == null) return;
// const cocalc: ImmutableMap<string, any> = this.props.kernels_by_name.getIn(
// [name, "metadata", "cocalc"],
// null
// );
// return this.render_suggested_link(cocalc);
// }
render_all_langs(): Rendered[] | undefined {
if (this.props.kernels_by_language == null) return;
const label: React.CSSProperties = {
fontWeight: "bold",
color: COLORS.GRAY_D,
};
const all: Rendered[] = [];
this.props.kernels_by_language.forEach((names, lang) => {
const kernels = names.map((name) =>
this.render_kernel_button(name, "small", false)
);
all.push(
<Row key={lang} style={row_style}>
<Col sm={2} style={label}>
{misc.capitalize(lang)}
</Col>
<Col sm={10}>
<ButtonGroup>{kernels}</ButtonGroup>
</Col>
</Row>
);
return true;
});
return all;
}
render_all() {
if (this.props.kernels_by_language == null) return;
return (
<Row style={row_style}>
<h4>All kernels by language</h4>
<Col>{this.render_all_langs()}</Col>
</Row>
);
}
render_last() {
const name = this.props.default_kernel;
if (name == null) return;
if (this.props.kernels_by_name == null) return;
// also don't render "last", if we do not know that kernel!
if (!this.props.kernels_by_name.has(name)) return;
return (
<Row style={row_style}>
<h4>Quick selection</h4>
<div>
Your most recently selected kernel is{" "}
{this.render_kernel_button(name)}.
</div>
</Row>
);
}
dont_ask_again_click(checked: boolean) {
this.props.actions.kernel_dont_ask_again(checked);<|fim▁hole|> <Row style={row_style}>
<div>
<Checkbox
checked={!this.props.ask_jupyter_kernel}
onChange={(e) => this.dont_ask_again_click(e.target.checked)}
>
Do not ask, instead default to your most recent selection (you can
always show this screen again by clicking on the kernel name in the
upper right)
</Checkbox>
</div>
</Row>
);
}
render_top() {
if (this.props.kernel == null || this.props.kernel_info == null) {
let msg: Rendered;
// kernel, but no info means it is not known
if (this.props.kernel != null && this.props.kernel_info == null) {
msg = (
<>
Your notebook kernel <code>"{this.props.kernel}"</code> does not
exist on {this.props.site_name}.
</>
);
} else {
msg = <>This notebook has no kernel.</>;
}
return (
<Row style={row_style}>
<strong>{msg}</strong> A working kernel is required in order to
evaluate the code in the notebook. Please select one for the
programming language you want to work with.
</Row>
);
} else {
const name = this.kernel_name(this.props.kernel);
const current =
name != null ? <> The currently selected kernel is "{name}".</> : "";
return (
<Row style={row_style}>
<strong>Select a new kernel.</strong>
{current}
</Row>
);
}
}
render_unknown() {
const closestKernel = this.props.closestKernel;
if (this.props.kernel_info != null || closestKernel == null) return;
const closestKernelName = closestKernel.get("name");
if (closestKernelName == null) return;
return (
<Row style={row_style}>
<Alert bsStyle={"danger"}>
<h4>Unknown Kernel</h4>
<div>
A similar kernel might be{" "}
{this.render_kernel_button(closestKernelName)}.
</div>
</Alert>
</Row>
);
}
render_footer(): Rendered {
return (
<Row style={{ ...row_style, ...{ color: COLORS.GRAY } }}>
<strong>Note:</strong> You can always change the selected kernel later
in the »Kernel« menu or by clicking on the kernel information at the top
right.
</Row>
);
}
render_close_button(): Rendered | undefined {
if (this.props.kernel == null || this.props.kernel_info == null) return;
return (
<Button
style={{ float: "right" }}
onClick={() => this.props.actions.hide_select_kernel()}
>
Close
</Button>
);
}
render_body(): Rendered {
if (
this.props.kernels_by_name == null ||
this.props.kernel_selection == null
) {
return (
<Row style={row_style}>
<Loading />
</Row>
);
} else {
return (
<>
{this.render_top()}
{this.render_unknown()}
{this.render_last()}
{this.render_dont_ask_again()}
{this.render_suggested()}
{this.render_all()}
{this.render_custom()}
<hr />
{this.render_footer()}
</>
);
}
}
render_head(): Rendered {
return (
<Row style={row_style}>
<h3>
{"Select a Kernel"}
{this.render_close_button()}
</h3>
</Row>
);
}
render(): Rendered {
return (
<div style={main_style} className={"smc-vfill"}>
<Col md={8} mdOffset={2}>
{this.render_head()}
{this.render_body()}
</Col>
</div>
);
}
}<|fim▁end|>
|
}
render_dont_ask_again() {
return (
|
<|file_name|>cube.py<|end_file_name|><|fim▁begin|>class cube():
CUBE_STATUS = "Off"
PATTERN = "Hello"
def __init__(self):
pass
@staticmethod
def status():
"""
Return dictionary of details about the cube
:return: Dictionary
"""
return {<|fim▁hole|> 'status': cube.CUBE_STATUS
}<|fim▁end|>
|
'pattern': cube.PATTERN,
|
<|file_name|>PALPIDEFSProducer.hh<|end_file_name|><|fim▁begin|>//
// Producer for the ALICE pALPIDEfs chip
// Author: [email protected]
//
#include "eudaq/Producer.hh"
#include "eudaq/Configuration.hh"
#include <mutex>
#include <thread>
#include <queue>
#include <tinyxml.h>
// pALPIDEfs driver
#include "TTestsetup.h"
// #define DEBUG_USB
struct SingleEvent {
SingleEvent(unsigned int length, uint64_t trigger_id, uint64_t timestamp, uint64_t timestamp_reference)
: m_buffer(0), m_length(length), m_trigger_id(trigger_id),
m_timestamp(timestamp), m_timestamp_reference(timestamp_reference) {
m_buffer = new unsigned char[length];
}
~SingleEvent() {
delete[] m_buffer;
m_buffer = 0;
}
unsigned char* m_buffer;
unsigned int m_length;
uint64_t m_trigger_id;
uint64_t m_timestamp;
uint64_t m_timestamp_reference;
};
class SimpleLock {
public:
SimpleLock(std::mutex &mutex) : m_mutex(mutex) { mutex.lock(); }
~SimpleLock() { m_mutex.unlock(); }
protected:
std::mutex &m_mutex;
};
class DeviceReader {
public:
DeviceReader(int id, int debuglevel, TTestSetup* test_setup, int boardid,
TDAQBoard* daq_board, TpAlpidefs* dut, std::vector<unsigned char>* raw_data=0x0);
~DeviceReader();
void SetMaxQueueSize(unsigned long size) { m_max_queue_size = size; }
void SetQueueFullDelay(int delay) { m_queuefull_delay = delay; }
void Stop();
void SetRunning(bool running);
void StartDAQ();
void StopDAQ();
void DeleteNextEvent();
SingleEvent* PopNextEvent();
void PrintQueueStatus();
int GetQueueLength() {
SimpleLock lock(m_mutex);
return m_queue.size();
}
static void* LoopWrapper(void* arg);
TDAQBoard* GetDAQBoard() { return m_daq_board; }
TpAlpidefs* GetDUT() { return m_dut; }
float GetTemperature();
void ParseXML(TiXmlNode* node, int base, int rgn, bool readwrite);
void PrintDAQboardStatus() {
m_daq_board->ReadAllRegisters();
m_daq_board->ReadMonitorRegisters();
}
void RequestThresholdScan() {
SimpleLock lock(m_mutex);
m_threshold_scan_result = 0;
m_threshold_scan_rqst = true;
}
int GetThresholdScanState() {
SimpleLock lock(m_mutex);
return m_threshold_scan_result;
}
void SetupThresholdScan(int NMaskStage, int NEvts, int ChStart, int ChStop,
int ChStep, unsigned char*** Data,
unsigned char* Points);
bool IsWaitingForEOR() {
SimpleLock lock(m_mutex);
return m_waiting_for_eor;
}
bool IsReading() {
SimpleLock lock(m_mutex);
return m_reading;
}
bool IsFlushing() {
SimpleLock lock(m_mutex);
return m_flushing;
}
protected:
void Loop();
void Print(int level, const char* text, uint64_t value1 = -1,
uint64_t value2 = -1, uint64_t value3 = -1, uint64_t value4 = -1);
bool IsStopping() {
SimpleLock lock(m_mutex);
return m_stop;
}
bool IsRunning() {
SimpleLock lock(m_mutex);
return m_running;
}
void SetReading(bool reading) {
SimpleLock lock(m_mutex);
m_reading = reading;
}
void SetStopping() {
SimpleLock lock(m_mutex);
m_stop = true;
}
bool IsThresholdScanRqsted() {
SimpleLock lock(m_mutex);
return m_threshold_scan_rqst;
}
void Push(SingleEvent* ev);<|fim▁hole|> void PrepareMaskStage(TAlpidePulseType APulseType, int AMaskStage, int steps);
std::queue<SingleEvent* > m_queue;
unsigned long m_queue_size;
std::thread m_thread;
std::mutex m_mutex;
std::vector<unsigned char>* m_raw_data;
bool m_stop;
bool m_stopped;
bool m_running;
bool m_flushing;
bool m_reading;
bool m_waiting_for_eor;
bool m_threshold_scan_rqst;
int m_threshold_scan_result; // 0 = not running, 1 = running, 2 = error, 3 =
// success
int m_id;
int m_boardid; // id of the DAQ board as used by TTestSetup::GetDAQBoard()...
int m_debuglevel;
uint64_t m_last_trigger_id;
uint64_t m_timestamp_reference;
TTestSetup* m_test_setup;
TDAQBoard* m_daq_board;
TpAlpidefs* m_dut;
int m_daq_board_header_length;
int m_daq_board_trailer_length;
// config
int m_queuefull_delay; // milliseconds
unsigned long m_max_queue_size; // queue size in B
// S-Curve scan
int m_n_mask_stages;
int m_n_events;
int m_ch_start;
int m_ch_stop;
int m_ch_step;
unsigned char*** m_data;
unsigned char* m_points;
};
class PALPIDEFSProducer : public eudaq::Producer {
public:
PALPIDEFSProducer(const std::string &name, const std::string &runcontrol,
int debuglevel = 0)
: eudaq::Producer(name, runcontrol), m_run(0), m_ev(0), m_good_ev(0),
m_oos_ev(0), m_last_oos_ev(0), m_timestamp_last(0x0), m_timestamp_full(0x0),
m_done(false), m_running(false), m_stopping(false), m_flushing(false),
m_configured(false), m_firstevent(false), m_reader(0), m_next_event(0),
m_debuglevel(debuglevel), m_testsetup(0), m_raw_data(0x0), m_mutex(), m_param(), m_nDevices(0),
m_status_interval(-1), m_full_config_v1(), m_full_config_v2(),
m_full_config_v3(), m_full_config_v4(), m_ignore_trigger_ids(true),
m_recover_outofsync(true), m_chip_type(0x0),
m_strobe_length(0x0), m_strobeb_length(0x0), m_trigger_delay(0x0),
m_readout_delay(0x0), m_chip_readoutmode(0x0),
m_monitor_PSU(false), m_back_bias_voltage(-1),
m_dut_pos(-1.), m_dut_angle1(-1.), m_dut_angle2(-1.),
m_SCS_charge_start(-1), m_SCS_charge_stop(-1),
m_SCS_charge_step(-1), m_SCS_n_events(-1), m_SCS_n_mask_stages(-1),
m_SCS_n_steps(-1), m_do_SCS(0x0), m_SCS_data(0x0), m_SCS_points(0x0) {}
~PALPIDEFSProducer() { PowerOffTestSetup(); }
virtual void OnConfigure(const eudaq::Configuration ¶m);
virtual void OnStartRun(unsigned param);
virtual void OnStopRun();
virtual void OnTerminate();
virtual void OnReset();
virtual void OnStatus() {}
virtual void OnUnrecognised(const std::string &cmd, const std::string ¶m);
void Loop();
protected:
bool InitialiseTestSetup(const eudaq::Configuration ¶m);
bool PowerOffTestSetup();
bool DoSCurveScan(const eudaq::Configuration ¶m);
void SetBackBiasVoltage(const eudaq::Configuration ¶m);
void ControlLinearStage(const eudaq::Configuration ¶m);
void ControlRotaryStages(const eudaq::Configuration ¶m);
bool ConfigChip(int id, TpAlpidefs *dut, std::string configFile);
int BuildEvent();
void SendEOR();
void SendStatusEvent();
void PrintQueueStatus();
void PrepareMaskStage(TAlpidePulseType APulseType, int AMaskStage,
int nPixels, int*** Data);
bool IsRunning() {
SimpleLock lock(m_mutex);
return m_running;
}
bool IsStopping() {
SimpleLock lock(m_mutex);
return m_stopping;
}
bool IsFlushing() {
SimpleLock lock(m_mutex);
return m_flushing;
}
bool IsDone() {
SimpleLock lock(m_mutex);
return m_done;
}
bool IsConfiguring() {
SimpleLock lock(m_mutex);
return m_configuring;
}
unsigned m_run, m_ev, m_good_ev, m_oos_ev, m_last_oos_ev;
uint64_t *m_timestamp_last;
uint64_t *m_timestamp_full;
bool m_done;
bool m_running;
bool m_stopping;
bool m_flushing;
bool m_configuring;
bool m_configured;
bool m_firstevent;
DeviceReader** m_reader;
SingleEvent** m_next_event;
int m_debuglevel;
std::mutex m_mutex;
TTestSetup* m_testsetup;
std::vector<unsigned char>** m_raw_data;
// config
eudaq::Configuration m_param;
int m_nDevices;
int m_status_interval;
std::string m_full_config_v1;
std::string m_full_config_v2;
std::string m_full_config_v3;
std::string m_full_config_v4;
bool m_ignore_trigger_ids;
bool m_recover_outofsync;
int* m_chip_type;
int* m_strobe_length;
int* m_strobeb_length;
int* m_trigger_delay;
int* m_readout_delay;
int* m_chip_readoutmode;
bool m_monitor_PSU;
float m_back_bias_voltage;
float m_dut_pos;
float m_dut_angle1;
float m_dut_angle2;
// S-Curve scan settings
int m_SCS_charge_start;
int m_SCS_charge_stop;
int m_SCS_charge_step;
int m_SCS_n_events;
int m_SCS_n_mask_stages;
int m_SCS_n_steps;
bool* m_do_SCS;
int m_n_trig;
float m_period;
// S-Curve scan output data
unsigned char**** m_SCS_data;
unsigned char** m_SCS_points;
};<|fim▁end|>
|
bool QueueFull();
bool ThresholdScan();
|
<|file_name|>lineMaps.component.ts<|end_file_name|><|fim▁begin|>import {Component, ViewEncapsulation} from '@angular/core';
import {BaCard, BaAmChart} from '../../../../theme/components';
import {LineMapsService} from './lineMaps.service';
@Component({
selector: 'line-maps',
pipes: [],
providers: [LineMapsService],
encapsulation: ViewEncapsulation.None,
styles: [require('./lineMaps.scss')],
template: require('./lineMaps.html'),<|fim▁hole|>})
export class LineMaps {
chartData:Object;
constructor(private _lineMapsService:LineMapsService) {
this.chartData = this._lineMapsService.getData();
}
}<|fim▁end|>
|
directives: [BaCard, BaAmChart],
|
<|file_name|>constants.js<|end_file_name|><|fim▁begin|>export const APP_LAYOUT_INIT = 'APP_LAYOUT_INIT';<|fim▁hole|><|fim▁end|>
|
export const APP_LAYOUT_CHANGE = 'APP_LAYOUT_CHANGE';
|
<|file_name|>user.go<|end_file_name|><|fim▁begin|>package model
<|fim▁hole|>import "time"
type User struct {
Id int64 `json:"-"`
Email string `json:"email,omitempty"`
PasswordHash string `json:"-"`
Active bool `json:"active,omitempty"`
UpdateToken string `json:"update_token,omitempty"`
NotificationEnabled bool `json:"notification_enabled"`
Timestamp time.Time `json:"timestamp,omitempty"`
SubscriptionId *string `json:"subscription_id,omitempty"`
}<|fim▁end|>
| |
<|file_name|>run_script.js<|end_file_name|><|fim▁begin|>module.exports = {
//---------------------------------------------------------------------
// Action Name
//
// This is the name of the action displayed in the editor.
//---------------------------------------------------------------------
name: "Run Script",
//---------------------------------------------------------------------
// Action Section
//
// This is the section the action will fall into.
//---------------------------------------------------------------------
section: "Other Stuff",
//---------------------------------------------------------------------
// Action Subtitle
//
// This function generates the subtitle displayed next to the name.
//---------------------------------------------------------------------
subtitle: function(data) {
return `${data.code}`;
},
//---------------------------------------------------------------------
// Action Storage Function
//
// Stores the relevant variable info for the editor.
//---------------------------------------------------------------------
variableStorage: function(data, varType) {
const type = parseInt(data.storage);
if(type !== varType) return;
return ([data.varName, 'Unknown Type']);
},
//---------------------------------------------------------------------
// Action Fields
//
// These are the fields for the action. These fields are customized
// by creating elements with corresponding IDs in the HTML. These
// are also the names of the fields stored in the action's JSON data.
//---------------------------------------------------------------------
fields: ["behavior", "interpretation", "code", "storage", "varName"],
//---------------------------------------------------------------------
// Command HTML
//
// This function returns a string containing the HTML used for
// editting actions.
//
// The "isEvent" parameter will be true if this action is being used
// for an event. Due to their nature, events lack certain information,
// so edit the HTML to reflect this.
//
// The "data" parameter stores constants for select elements to use.
// Each is an array: index 0 for commands, index 1 for events.
// The names are: sendTargets, members, roles, channels,
// messages, servers, variables
//---------------------------------------------------------------------
html: function(isEvent, data) {
return `
<div>
<div style="float: left; width: 45%;">
End Behavior:<br>
<select id="behavior" class="round">
<option value="0" selected>Call Next Action Automatically</option>
<option value="1">Do Not Call Next Action</option>
</select>
</div>
<div style="padding-left: 5%; float: left; width: 55%;">
Interpretation Style:<br>
<select id="interpretation" class="round">
<option value="0" selected>Evaluate Text First</option>
<option value="1">Evaluate Text Directly</option>
</select>
</div>
</div><br><br><br>
<div style="padding-top: 8px;">
Custom Code:<br>
<textarea id="code" rows="9" name="is-eval" style="width: 99%; white-space: nowrap; resize: none;"></textarea>
</div><br>
<div>
<div style="float: left; width: 35%;">
Store In:<br>
<select id="storage" class="round" onchange="glob.variableChange(this, 'varNameContainer')">
${data.variables[0]}
</select>
</div>
<div id="varNameContainer" style="display: none; float: right; width: 60%;">
Variable Name:<br>
<input id="varName" class="round" type="text">
</div>
</div>`<|fim▁hole|>// Action Editor Init Code
//
// When the HTML is first applied to the action editor, this code
// is also run. This helps add modifications or setup reactionary
// functions for the DOM elements.
//---------------------------------------------------------------------
init: function() {
const {glob, document} = this;
glob.variableChange(document.getElementById('storage'), 'varNameContainer');
},
//---------------------------------------------------------------------
// Action Bot Function
//
// This is the function for the action within the Bot's Action class.
// Keep in mind event calls won't have access to the "msg" parameter,
// so be sure to provide checks for variable existance.
//---------------------------------------------------------------------
action: function(cache) {
const data = cache.actions[cache.index];
let code;
if(data.interpretation === "0") {
code = this.evalMessage(data.code, cache);
} else {
code = data.code;
}
const result = this.eval(code, cache);
const varName = this.evalMessage(data.varName, cache);
const storage = parseInt(data.storage);
this.storeValue(result, storage, varName, cache);
if(data.behavior === "0") {
this.callNextAction(cache);
}
},
//---------------------------------------------------------------------
// Action Bot Mod
//
// Upon initialization of the bot, this code is run. Using the bot's
// DBM namespace, one can add/modify existing functions if necessary.
// In order to reduce conflictions between mods, be sure to alias
// functions you wish to overwrite.
//---------------------------------------------------------------------
mod: function(DBM) {
}
}; // End of module<|fim▁end|>
|
},
//---------------------------------------------------------------------
|
<|file_name|>DiscordChannelPartEvent.java<|end_file_name|><|fim▁begin|>/*
* Copyright (C) 2016-2022 phantombot.github.io/PhantomBot
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or<|fim▁hole|> * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package tv.phantombot.event.discord.channel;
import discord4j.core.object.entity.User;
public class DiscordChannelPartEvent extends DiscordChannelEvent {
/**
* Class constructor.
*
* @param {IUser} user
*/
public DiscordChannelPartEvent(User user) {
super(user);
}
}<|fim▁end|>
|
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
<|file_name|>unit_test_inverse_dynamics_balance_controller.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Created on Thu Aug 31 16:04:18 2017
@author: adelpret
"""
import pinocchio as se3
import numpy as np
from pinocchio import RobotWrapper
from conversion_utils import config_sot_to_urdf, joints_sot_to_urdf, velocity_sot_to_urdf
from dynamic_graph.sot.torque_control.inverse_dynamics_balance_controller import InverseDynamicsBalanceController
from dynamic_graph.sot.torque_control.create_entities_utils import create_ctrl_manager
import dynamic_graph.sot.torque_control.hrp2.balance_ctrl_sim_conf as balance_ctrl_conf
import dynamic_graph.sot.torque_control.hrp2.control_manager_sim_conf as control_manager_conf
from dynamic_graph.sot.torque_control.tests.robot_data_test import initRobotData
np.set_printoptions(precision=3, suppress=True, linewidth=100);
def create_balance_controller(dt, q, conf, robot_name='robot'):
ctrl = InverseDynamicsBalanceController("invDynBalCtrl");
ctrl.q.value = tuple(q);
ctrl.v.value = (NJ+6)*(0.0,);
ctrl.wrench_right_foot.value = 6*(0.0,);
ctrl.wrench_left_foot.value = 6*(0.0,);
ctrl.posture_ref_pos.value = tuple(q[6:]);
ctrl.posture_ref_vel.value = NJ*(0.0,);
ctrl.posture_ref_acc.value = NJ*(0.0,);
ctrl.com_ref_pos.value = (0., 0., 0.8);
ctrl.com_ref_vel.value = 3*(0.0,);
ctrl.com_ref_acc.value = 3*(0.0,);
# ctrl.rotor_inertias.value = np.array(conf.ROTOR_INERTIAS);
# ctrl.gear_ratios.value = conf.GEAR_RATIOS;
ctrl.rotor_inertias.value = tuple([g*g*r for (g,r) in zip(conf.GEAR_RATIOS, conf.ROTOR_INERTIAS)])
ctrl.gear_ratios.value = NJ*(1.0,);
ctrl.contact_normal.value = conf.FOOT_CONTACT_NORMAL;
ctrl.contact_points.value = conf.RIGHT_FOOT_CONTACT_POINTS;
ctrl.f_min.value = conf.fMin;
ctrl.f_max_right_foot.value = conf.fMax;
ctrl.f_max_left_foot.value = conf.fMax;
ctrl.mu.value = conf.mu[0];
ctrl.weight_contact_forces.value = (1e2, 1e2, 1e0, 1e3, 1e3, 1e3);
ctrl.kp_com.value = 3*(conf.kp_com,);
ctrl.kd_com.value = 3*(conf.kd_com,);
ctrl.kp_constraints.value = 6*(conf.kp_constr,);<|fim▁hole|> ctrl.kd_constraints.value = 6*(conf.kd_constr,);
ctrl.kp_feet.value = 6*(conf.kp_feet,);
ctrl.kd_feet.value = 6*(conf.kd_feet,);
ctrl.kp_posture.value = conf.kp_posture;
ctrl.kd_posture.value = conf.kd_posture;
ctrl.kp_pos.value = conf.kp_pos;
ctrl.kd_pos.value = conf.kd_pos;
ctrl.w_com.value = conf.w_com;
ctrl.w_feet.value = conf.w_feet;
ctrl.w_forces.value = conf.w_forces;
ctrl.w_posture.value = conf.w_posture;
ctrl.w_base_orientation.value = conf.w_base_orientation;
ctrl.w_torques.value = conf.w_torques;
ctrl.active_joints.value = NJ*(1,);
ctrl.init(dt, robot_name);
return ctrl;
print "*** UNIT TEST FOR INVERSE-DYNAMICS-BALANCE-CONTROLLER (IDBC) ***"
print "This test computes the torques using the IDBC and compares them with"
print "the torques computed using the desired joint accelerations and contact"
print "wrenches computed by the IDBC. The two values should be identical."
print "Some small differences are expected due to the precision loss when"
print "Passing the parameters from python to c++."
print "However, none of the following values should be larger than 1e-3.\n"
N_TESTS = 100
dt = 0.001;
NJ = initRobotData.nbJoints
# robot configuration
q_sot = np.array([-0.0027421149619457344, -0.0013842807952574399, 0.6421082804660067,
-0.0005693871512031474, -0.0013094048521806974, 0.0028568508070167,
-0.0006369040657361668, 0.002710094953239396, -0.48241992906618536, 0.9224570746372157, -0.43872624301275104, -0.0021586727954009096,
-0.0023395862060549863, 0.0031045906573987617, -0.48278188636903313, 0.9218508861779927, -0.4380058166724791, -0.0025558837738616047,
-0.012985322450541008, 0.04430420221275542, 0.37027327677517635, 1.4795064165303056,
0.20855551221055582, -0.13188842278441873, 0.005487207370709895, -0.2586657542648506, 2.6374918629921953, -0.004223605878088189, 0.17118034021053144, 0.24171737354070008, 0.11594430024547904, -0.05264225067057105, -0.4691871937149223, 0.0031522040623960016, 0.011836097472447007, 0.18425595002313025]);
ctrl_manager = create_ctrl_manager(control_manager_conf, dt);
ctrl = create_balance_controller(dt, q_sot, balance_ctrl_conf);
robot = RobotWrapper(initRobotData.testRobotPath, [], se3.JointModelFreeFlyer())
index_rf = robot.index('RLEG_JOINT5');
index_lf = robot.index('LLEG_JOINT5');
Md = np.matrix(np.zeros((NJ+6,NJ+6)));
gr = joints_sot_to_urdf(balance_ctrl_conf.GEAR_RATIOS);
ri = joints_sot_to_urdf(balance_ctrl_conf.ROTOR_INERTIAS);
for i in range(NJ):
Md[6+i,6+i] = ri[i] * gr[i] * gr[i];
for i in range(N_TESTS):
q_sot += 0.001*np.random.random(NJ+6);
v_sot = np.random.random(NJ+6);
q_pin = np.matrix(config_sot_to_urdf(q_sot));
v_pin = np.matrix(velocity_sot_to_urdf(v_sot));
ctrl.q.value = tuple(q_sot);
ctrl.v.value = tuple(v_sot);
ctrl.tau_des.recompute(i);
tau_ctrl = joints_sot_to_urdf(np.array(ctrl.tau_des.value));
ctrl.dv_des.recompute(i);
dv = velocity_sot_to_urdf(np.array(ctrl.dv_des.value));
M = Md + robot.mass(q_pin);
h = robot.bias(q_pin, v_pin);
ctrl.f_des_right_foot.recompute(i);
ctrl.f_des_left_foot.recompute(i);
f_rf = np.matrix(ctrl.f_des_right_foot.value).T;
f_lf = np.matrix(ctrl.f_des_left_foot.value).T;
J_rf = robot.jacobian(q_pin, index_rf);
J_lf = robot.jacobian(q_pin, index_lf);
tau_pin = M*np.matrix(dv).T + h - J_rf.T * f_rf - J_lf.T * f_lf;
# ctrl.M.recompute(i);
# M_ctrl = np.array(ctrl.M.value);
print "norm(tau_ctrl-tau_pin) = %.4f"% np.linalg.norm(tau_ctrl - tau_pin[6:,0].T);
print "norm(tau_pin[:6]) = %.4f"% np.linalg.norm(tau_pin[:6]);
# print "q_pin:\n", q_pin;
# print "tau_pin:\n", tau_pin[6:,0].T, "\n";
# print "tau ctrl:\n", tau_ctrl.T, "\n";
# print "dv = ", np.linalg.norm(dv);
# print "f_rf:", f_rf.T, "\n";
# print "f_lf:", f_lf.T, "\n";
# print "h:", h.T, "\n";
# M_err = M-M_ctrl
# print "M-M_ctrl = ", M_err.diagonal(), "\n"
# for j in range(NJ+6):
# print M_err[j,:];<|fim▁end|>
| |
<|file_name|>test_controllers.py<|end_file_name|><|fim▁begin|>import json
import re
import tg<|fim▁hole|>pylons.c = pylons.tmpl_context
pylons.g = pylons.app_globals
from pylons import c
from ming.orm import ThreadLocalORMSession
from datadiff.tools import assert_equal
from allura import model as M
from allura.lib import helpers as h
from allura.tests import decorators as td
from alluratest.controller import TestController
class _TestCase(TestController):
def setUp(self):
super(_TestCase, self).setUp()
self.setup_with_tools()
@td.with_git
def setup_with_tools(self):
h.set_context('test', 'src-git', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgegit', 'tests/data')
c.app.repo.fs_path = repo_dir
c.app.repo.status = 'ready'
c.app.repo.name = 'testgit.git'
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
h.set_context('test', 'src-git', neighborhood='Projects')
c.app.repo.refresh()
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
class TestRootController(_TestCase):
def test_index(self):
resp = self.app.get('/src-git/').follow().follow()
assert 'git://' in resp
def test_index_empty(self):
self.app.get('/git/')
def test_commit_browser(self):
resp = self.app.get('/src-git/commit_browser')
def test_commit_browser_data(self):
resp = self.app.get('/src-git/commit_browser_data')
data = json.loads(resp.body);
assert data['max_row'] == 3
assert data['next_column'] == 1
assert_equal(data['built_tree']['df30427c488aeab84b2352bdf88a3b19223f9d7a'],
{u'url': u'/p/test/src-git/ci/df30427c488aeab84b2352bdf88a3b19223f9d7a/',
u'oid': u'df30427c488aeab84b2352bdf88a3b19223f9d7a',
u'column': 0,
u'parents': [u'6a45885ae7347f1cac5103b0050cc1be6a1496c8'],
u'message': u'Add README', u'row': 1})
def test_log(self):
resp = self.app.get('/src-git/ref/master~/log/')
def test_tags(self):
resp = self.app.get('/src-git/ref/master~/tags/')
def _get_ci(self):
r = self.app.get('/src-git/ref/master:/')
resp = r.follow()
for tag in resp.html.findAll('a'):
if tag['href'].startswith('/p/test/src-git/ci/'):
return tag['href']
return None
def test_commit(self):
ci = self._get_ci()
resp = self.app.get(ci)
assert 'Rick' in resp, resp.showbrowser()
def test_feed(self):
assert 'Add README' in self.app.get('/feed')
def test_tree(self):
ci = self._get_ci()
resp = self.app.get(ci + 'tree/')
assert len(resp.html.findAll('tr')) == 2, resp.showbrowser()
resp = self.app.get(ci + 'tree/')
assert 'README' in resp, resp.showbrowser()
links = [ a.get('href') for a in resp.html.findAll('a') ]
assert 'README' in links, resp.showbrowser()
assert 'README/' not in links, resp.showbrowser()
def test_tree_extra_params(self):
ci = self._get_ci()
resp = self.app.get(ci + 'tree/?format=raw')
assert 'README' in resp, resp.showbrowser()
def test_file(self):
ci = self._get_ci()
resp = self.app.get(ci + 'tree/README')
assert 'README' in resp.html.find('h2', {'class':'dark title'}).contents[2]
content = str(resp.html.find('div', {'class':'clip grid-19'}))
assert 'This is readme' in content, content
assert '<span id="l1" class="code_block">' in resp
assert 'var hash = window.location.hash.substring(1);' in resp
def test_invalid_file(self):
ci = self._get_ci()
self.app.get(ci + 'tree/READMEz', status=404)
def test_diff(self):
ci = self._get_ci()
resp = self.app.get(ci + 'tree/README?diff=df30427c488aeab84b2352bdf88a3b19223f9d7a')
assert 'readme' in resp, resp.showbrowser()
assert '+++' in resp, resp.showbrowser()
def test_refresh(self):
notification = M.Notification.query.find(
dict(subject='[test:src-git] 4 new commits to test Git')).first()
domain = '.'.join(reversed(c.app.url[1:-1].split('/'))).replace('_', '-')
common_suffix = tg.config.get('forgemail.domain', '.sourceforge.net')
email = 'noreply@%s%s' % (domain, common_suffix)
assert email in notification['reply_to_address']
def test_file_force_display(self):
ci = self._get_ci()
resp = self.app.get(ci + 'tree/README?force=True')
content = str(resp.html.find('div', {'class':'clip grid-19'}))
assert re.search(r'<pre>.*This is readme', content), content
assert '</pre>' in content, content
class TestRestController(_TestCase):
def test_index(self):
self.app.get('/rest/p/test/src-git/', status=200)
def test_commits(self):
self.app.get('/rest/p/test/src-git/commits', status=200)
class TestFork(_TestCase):
def setUp(self):
super(TestFork, self).setUp()
to_project = M.Project.query.get(
shortname='test2', neighborhood_id=c.project.neighborhood_id)
r = self.app.post('/src-git/fork', params=dict(
project_id=str(to_project._id),
mount_point='code',
mount_label='Test forked repository'))
assert "{status: 'error'}" not in str(r.follow())
cloned_from = c.app.repo
with h.push_context('test2', 'code', neighborhood='Projects'):
c.app.repo.init_as_clone(
cloned_from.full_fs_path,
cloned_from.app.config.script_name(),
cloned_from.full_fs_path)
def _follow(self, r, **kw):
if r.status_int == 302:
print r.request.url
while r.status_int == 302:
print ' ==> 302 ==> %s' % r.location
r = r.follow(**kw)
return r
def _upstream_page(self, **kw):
r = self.app.get('/src-git/', **kw)
r = self._follow(r, **kw)
return r
def _fork_page(self, **kw):
r = self.app.get('/p/test2/code/', **kw)
r = self._follow(r, **kw)
return r
def _request_merge(self, **kw):
r = self.app.get('/p/test2/code/request_merge', **kw)
r = self._follow(r, **kw)
r = r.forms[0].submit()
r = self._follow(r, **kw)
mr_num = r.request.url.split('/')[-2]
assert mr_num.isdigit(), mr_num
return r, mr_num
def test_fork_form(self):
r = self.app.get('%sfork/' % c.app.repo.url())
assert '<input type="text" name="mount_point" value="test"/>' in r
assert '<input type="text" name="mount_label" value="test - Git"/>' in r
def test_fork_listed_in_parent(self):
assert 'Forks' in self._upstream_page()
def test_fork_display(self):
r = self._fork_page()
assert 'Clone of' in r
assert 'Test forked repository' in r
def test_fork_links_go_to_fork(self):
r = self._fork_page()
hrefs = ( a.get('href') for a in r.html('a') )
hrefs = ( href for href in hrefs if href and '/ci/' in href )
for href in hrefs:
assert href.startswith('/p/test2/code/'), href
def test_merge_request_visible_to_admin(self):
assert 'Request Merge' in self._fork_page()
def test_merge_request_invisible_to_non_admin(self):
assert 'Request Merge' not in self._fork_page(
extra_environ=dict(username='test-user'))
def test_merge_action_available_to_admin(self):
self.app.get('/p/test2/code/request_merge')
def test_merge_action_unavailable_to_non_admin(self):
self.app.get(
'/p/test2/code/request_merge',
status=403, extra_environ=dict(username='test-user'))
def test_merge_request_detail_view(self):
r, mr_num = self._request_merge()
assert 'would like you to merge' in r, r.showbrowser()
def test_merge_request_list_view(self):
r, mr_num = self._request_merge()
r = self.app.get('/p/test/src-git/merge-requests/')
assert 'href="%s/"' % mr_num in r, r
def test_merge_request_update_status(self):
r, mr_num = self._request_merge()
r = self.app.post('/p/test/src-git/merge-requests/%s/save' % mr_num,
params=dict(status='rejected')).follow()
assert 'Merge Request #%s: (rejected)' % mr_num in r, r<|fim▁end|>
|
import pkg_resources
import pylons
|
<|file_name|>test_path.py<|end_file_name|><|fim▁begin|>import unittest
import tqdm
from ieml.dictionary.script import Script
from ieml.ieml_database import IEMLDatabase, GitInterface
from ieml.usl import PolyMorpheme, Lexeme, Word
from ieml.usl.decoration.parser.parser import PathParser
from ieml.usl.decoration.path import PolymorphemePath, GroupIndex, FlexionPath, LexemeIndex, LexemePath, RolePath, \
usl_from_path_values, path
from ieml.usl.parser import IEMLParser
from ieml.usl.syntagmatic_function import SyntagmaticRole
from ieml.usl.usl import usl
parser = PathParser()
class TestPath(unittest.TestCase):
def check(self, path, _type, usl, expected_type):
self.assertEqual(str(parser.parse(path)), path)
res = parser.parse(path).deference(usl)
self.assertIsInstance(res, expected_type)
def test_path(self):
from ieml.usl.usl import usl
pm = [usl("A: E: S: B: T:"), usl("A: E: m1(S: B: T:)"), usl("A: m1(E:) m1(S: B: T:)"),
usl("m1(A:) m1(E:) m1(S: B: T:)")]
# pm_path = PolymorphemePath(GroupIndex.CONSTANT, usl('S:'))
PolymorphemePath(GroupIndex.CONSTANT, usl('S:')).deference(pm[0])
PolymorphemePath(GroupIndex.GROUP_0, usl('S:')).deference(pm[1])
PolymorphemePath(GroupIndex.GROUP_1, usl('S:')).deference(pm[2])
PolymorphemePath(GroupIndex.GROUP_2, usl('S:')).deference(pm[3])
self.check(">constant>S:", PolymorphemePath, usl('S: A:'), Script)
self.check(">constant", PolymorphemePath, usl('S: A:'), PolyMorpheme)
self.check(">group_0 1>S:", PolymorphemePath, usl('A: m1(S:)'), Script)
self.check(">group_0 1", PolymorphemePath, usl('m1(S: A:)'), PolyMorpheme)
self.check(">group_2 1>B:", PolymorphemePath, usl('A: m1(U:) m1(B:) m1(S:)'), Script)
self.check(">group_1 1>S:", PolymorphemePath, usl('A: m1(U:) m1(S:)'), Script)
self.check(">group_2 1", PolymorphemePath, usl('A: m1(U:) m1(B:) m1(S:)'), PolyMorpheme)
self.check(">group_1 1", PolymorphemePath, usl('A: m1(U:) m1(S:)'), PolyMorpheme)
self.check(">", PolymorphemePath, usl('S: A:'), PolyMorpheme)
LexemePath(LexemeIndex.CONTENT, child=PolymorphemePath(GroupIndex.CONSTANT, usl('S:'))).deference(
usl("()(S: B:)"))
LexemePath(LexemeIndex.FLEXION, child=FlexionPath(usl('S:'))).deference(usl("(S: B:)(S:)"))
self.check('>content>constant>S:', LexemePath, usl('()(S:)'), Script)
self.check('>flexion>S:', LexemePath, usl('(S:)(B:)'), Script)
self.check('>flexion', LexemePath, usl('(S:)(B:)'), PolyMorpheme)
self.check('>flexion', LexemePath, usl('(S:)(B:)'), PolyMorpheme)
self.check(">", LexemePath, usl('(S:)(B:)'), Lexeme)
w = usl("[! E:A:. ()(m.-B:.A:.-') > E:A:. E:A:. (E:B:.-d.u.-')(p.E:A:T:.- m1(S:))]")
path = RolePath(SyntagmaticRole([usl('E:A:.'), usl('E:A:.')]),
child=LexemePath(LexemeIndex.CONTENT,
child=PolymorphemePath(GroupIndex.CONSTANT, usl('p.E:A:T:.-'))))
path.deference(w)
self.check(">role>E:A:. E:A:.>content>group_0 1>S:", RolePath, w, Script)
self.check(">role>E:A:. E:A:.>content>constant>p.E:A:T:.-", RolePath, w, Script)
self.check(">role>E:A:. E:A:.>flexion>E:B:.-d.u.-'", RolePath, w, Script)
self.check(">role>E:A:.>content>constant>m.-B:.A:.-'", RolePath, w, Script)
u = usl(
"[! E:B:. ()(k.a.-k.a.-' l.o.-k.o.-') > E:.f.- ()(m1(p.E:A:S:.- p.E:A:B:.- p.E:A:T:.- t.i.-l.i.-' c.-'B:.-'k.o.-t.o.-',))]")
self.check(">role>E:.f.->content>group_0 1>p.E:A:S:.-", RolePath, u, Script)
self.check(">role>E:A:.", RolePath, w, Lexeme)
self.check(">role>E:A:.>content", RolePath, w, PolyMorpheme)
self.check(">", RolePath, w, Word)
def test_paths_values_to_usl(self):
pm = [(">constant>S:", "S:"), (">constant>B:", "B:"), (">group_0 2>T:", "T:"), (">group_0 2>A:", "A:")]
res = usl_from_path_values(pm)
self.assertIsInstance(res, PolyMorpheme)
self.assertEqual(str(res), "S: B: m2(A: T:)")
pm = [(">content>constant>S:", "S:"), (">content>constant>B:", "B:"), (">content>group_0 1>T:", "T:")]
res = usl_from_path_values(pm)
self.assertIsInstance(res, Lexeme)
self.assertEqual(str(res), "()(S: B: m1(T:))")
pm = [(">role>! E:A:.>content>constant>S:", "S:"),
(">role>E:A:. E:A:.>content>constant>B:", "B:"),
(">role>E:A:. E:A:.>content>group_0>T:", "T:")]
res = usl_from_path_values(pm)
self.assertIsInstance(res, Word)
self.assertEqual(str(res), "[! E:A:. ()(S:) > E:A:. E:A:. ()(B: m1(T:))]")
def test_expand_compose_into_paths(self):
# parser = IEMLParser().parse
gitdb = GitInterface(origin='https://github.com/plevyieml/ieml-language.git')
gitdb.pull()
db = IEMLDatabase(folder=gitdb.folder)
usls = db.list(type=Word, parse=True) + db.list(type=PolyMorpheme, parse=True) + db.list(type=Lexeme, parse=True)
for u in tqdm.tqdm(usls):
p_u = list(u.iter_structure_path_by_script_ss())
res = usl_from_path_values(p_u)
self.assertEqual(str(u), str(res), "expand_compose_into_paths failed on: " + str(u))
def test_expand_compose_into_paths_empty_exclamation(self):
u = usl('[E:A:. (E:.-n.S:.-\')(b.a.- b.o.-n.o.-s.u.-\' f.a.-b.a.-f.o.-\') > E:A:. E:A:. ()(n.-S:.U:.-\'B:.-\'B:.-\',B:.-\',B:.-\',_ n.-S:.U:.-\'B:.-\'B:.-\',T:.-\',S:.-\',_) > ! E:A:. E:U:. ()]')
p_u = list(u.iter_structure_path_by_script_ss())
res = usl_from_path_values(p_u)
self.assertEqual(str(u), str(res))
def test_expand_compose_into_paths_pm(self):
u = usl("E:T:S:. n.-T:.A:.-'")
p_u = list(u.iter_structure_path_by_script_ss())
res = usl_from_path_values(p_u)
self.assertEqual(str(u), str(res))
def test_expand_compose_into_paths_pm2(self):
u = usl("s.-S:.U:.-' n.-T:.A:.-' d.-S:.U:.-' m1(E:.-U:.b.-l.-' E:.-U:.f.-l.-') m1(E:.-B:.k.-l.-')")
p_u = list(u.iter_structure_path_by_script_ss())
res = usl_from_path_values(p_u)
self.assertEqual(str(u), str(res))
def test_has_prefix(self):
u = usl("[! E:A:. ()(b.-S:.A:.-'S:.-'S:.-', m1(S: B: T:) m2(y. o. e. u. a. i.)) > E:A:. E:A:. (m1(E:U:T:. E:A:T:. E:S:T:. E:B:T:. E:T:T:.))(k.a.-k.a.-')]")
p0 = path(">role>! E:A:.>content>group_0 1>S:")
p0_prefix = path(">role>! E:A:.>content>group_0 1")
self.assertTrue(p0.has_prefix(p0_prefix))
def test_usl_from_path(self):
structure = {">role>! E:A:.>flexion>E:": "E:",
">role>! E:A:.>content>constant>b.-S:.A:.-'S:.-'S:.-',": "b.-S:.A:.-'S:.-'S:.-',",
">role>E:A:. E:A:.>flexion>E:": "E:",
">role>E:A:. E:A:.>flexion>E:U:T:.": "E:U:T:.",
">role>E:A:. E:A:.>flexion>E:A:T:.": "E:A:T:.",
">role>E:A:. E:A:.>flexion>E:S:T:.": "E:S:T:.",
">role>E:A:. E:A:.>flexion>E:B:T:.": "E:B:T:.",
">role>E:A:. E:A:.>flexion>E:T:T:.": "E:T:T:.",
">role>E:A:. E:A:.>content>constant>k.a.-k.a.-'": "k.a.-k.a.-'"}
usl_parser = IEMLParser().parse
path_parser = PathParser().parse
structure = [(path_parser(p), usl_parser(u)) for p, u in structure.items()]
u = usl_from_path_values(structure)
self.assertEqual(u, usl("[! E:A:. ()(b.-S:.A:.-'S:.-'S:.-',) > E:A:. E:A:. (m1(E:U:T:. E:A:T:. E:S:T:. E:B:T:. E:T:T:.))(k.a.-k.a.-')]"))
def test_usl_from_path_pm(self):
structure = [
(">constant>b.-S:.A:.-'S:.-'S:.-',", "b.-S:.A:.-'S:.-'S:.-',"),
(">constant>k.a.-k.a.-'", "k.a.-k.a.-'"),
(">constant", "U:"),
(">constant", "E:")
]
usl_parser = IEMLParser().parse
path_parser = PathParser().parse
structure = [(path_parser(p), usl_parser(u)) for p, u in structure]
u = usl_from_path_values(structure)
self.assertEqual(str(u), "U: k.a.-k.a.-' b.-S:.A:.-'S:.-'S:.-',")
def test_usl_from_path_flexion_paradigm(self):
structure = [
(">flexion", "E:.wo.U:.-t.o.-'"),
(">flexion", "E:.wo.A:.-t.o.-'"),<|fim▁hole|> ]
usl_parser = IEMLParser().parse
path_parser = PathParser().parse
structure = [(path_parser(p), usl_parser(u)) for p, u in structure]
u = usl_from_path_values(structure)
self.assertEqual(str(u), "(m1(E:.wo.U:.-t.o.-' E:.wo.A:.-t.o.-'))(U:)")
def test_usl_from_path_pm2(self):
structure = [
(">constant>b.-S:.A:.-'S:.-'S:.-',", "b.-S:.A:.-'S:.-'S:.-',"),
(">constant", "k.a.-k.a.-' A:"),
(">constant", "U:"),
(">constant", "E:")
]
usl_parser = IEMLParser().parse
path_parser = PathParser().parse
structure = [(path_parser(p), usl_parser(u)) for p, u in structure]
u = usl_from_path_values(structure)
self.assertEqual(str(u), "U: A: k.a.-k.a.-' b.-S:.A:.-'S:.-'S:.-',")
if __name__ == '__main__':
unittest.main()<|fim▁end|>
|
(">content>constant", "U:"),
|
<|file_name|>generic-arithmetic-2.rs<|end_file_name|><|fim▁begin|><|fim▁hole|>
#![feature(repr_simd, platform_intrinsics)]
#![allow(non_camel_case_types)]
#[repr(simd)]
#[derive(Copy, Clone)]
pub struct i32x4(pub i32, pub i32, pub i32, pub i32);
#[repr(simd)]
#[derive(Copy, Clone)]
pub struct u32x4(pub u32, pub u32, pub u32, pub u32);
#[repr(simd)]
#[derive(Copy, Clone)]
pub struct f32x4(pub f32, pub f32, pub f32, pub f32);
extern "platform-intrinsic" {
fn simd_add<T>(x: T, y: T) -> T;
fn simd_sub<T>(x: T, y: T) -> T;
fn simd_mul<T>(x: T, y: T) -> T;
fn simd_div<T>(x: T, y: T) -> T;
fn simd_rem<T>(x: T, y: T) -> T;
fn simd_shl<T>(x: T, y: T) -> T;
fn simd_shr<T>(x: T, y: T) -> T;
fn simd_and<T>(x: T, y: T) -> T;
fn simd_or<T>(x: T, y: T) -> T;
fn simd_xor<T>(x: T, y: T) -> T;
fn simd_neg<T>(x: T) -> T;
}
fn main() {
let x = i32x4(0, 0, 0, 0);
let y = u32x4(0, 0, 0, 0);
let z = f32x4(0.0, 0.0, 0.0, 0.0);
unsafe {
simd_add(x, x);
simd_add(y, y);
simd_add(z, z);
simd_sub(x, x);
simd_sub(y, y);
simd_sub(z, z);
simd_mul(x, x);
simd_mul(y, y);
simd_mul(z, z);
simd_div(x, x);
simd_div(y, y);
simd_div(z, z);
simd_rem(x, x);
simd_rem(y, y);
simd_rem(z, z);
simd_shl(x, x);
simd_shl(y, y);
simd_shr(x, x);
simd_shr(y, y);
simd_and(x, x);
simd_and(y, y);
simd_or(x, x);
simd_or(y, y);
simd_xor(x, x);
simd_xor(y, y);
simd_neg(x);
simd_neg(z);
simd_add(0, 0);
//~^ ERROR expected SIMD input type, found non-SIMD `i32`
simd_sub(0, 0);
//~^ ERROR expected SIMD input type, found non-SIMD `i32`
simd_mul(0, 0);
//~^ ERROR expected SIMD input type, found non-SIMD `i32`
simd_div(0, 0);
//~^ ERROR expected SIMD input type, found non-SIMD `i32`
simd_shl(0, 0);
//~^ ERROR expected SIMD input type, found non-SIMD `i32`
simd_shr(0, 0);
//~^ ERROR expected SIMD input type, found non-SIMD `i32`
simd_and(0, 0);
//~^ ERROR expected SIMD input type, found non-SIMD `i32`
simd_or(0, 0);
//~^ ERROR expected SIMD input type, found non-SIMD `i32`
simd_xor(0, 0);
//~^ ERROR expected SIMD input type, found non-SIMD `i32`
simd_neg(0);
//~^ ERROR expected SIMD input type, found non-SIMD `i32`
simd_shl(z, z);
//~^ ERROR unsupported operation on `f32x4` with element `f32`
simd_shr(z, z);
//~^ ERROR unsupported operation on `f32x4` with element `f32`
simd_and(z, z);
//~^ ERROR unsupported operation on `f32x4` with element `f32`
simd_or(z, z);
//~^ ERROR unsupported operation on `f32x4` with element `f32`
simd_xor(z, z);
//~^ ERROR unsupported operation on `f32x4` with element `f32`
}
}<|fim▁end|>
|
// build-fail
|
<|file_name|>ViewConfig.js<|end_file_name|><|fim▁begin|>/**
* Client.
* @module client
*/
<|fim▁hole|> */
function ViewConfig(resources) {
this.playAnimations = true;
this.resources = resources;
}
/**
* Should we play animations?
* @method setPlayAnimations
*/
ViewConfig.prototype.setPlayAnimations = function(value) {
this.playAnimations = value;
}
/**
* Should we play animations?
* @method getPlayAnimations
*/
ViewConfig.prototype.getPlayAnimations = function() {
return this.playAnimations;
}
/**
* Scale animation time.
* @method scaleAnimationTime
*/
ViewConfig.prototype.scaleAnimationTime = function(millis) {
if (this.playAnimations)
return millis;
return 1;
}
/**
* Get resources.
* @method getResources
*/
ViewConfig.prototype.getResources = function() {
return this.resources;
}
module.exports = ViewConfig;<|fim▁end|>
|
/**
* View configuration.
* @class ViewConfig
|
<|file_name|>0001_initial.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0004_make_focal_point_key_not_nullable'),
('wagtailcore', '0008_populate_latest_revision_created_at'),
]
operations = [
migrations.CreateModel(
name='HomePage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('video', models.URLField(null=True, verbose_name='Background Video')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='Office',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('official', models.CharField(max_length=30, null=True, verbose_name="Official's Name")),
('phone', models.CharField(max_length=15, null=True, verbose_name='Phone')),
('address', models.TextField(null=True, verbose_name='Address')),
('body', models.TextField(null=True, verbose_name='Page Body')),
('portrait', models.ForeignKey(verbose_name='Portrait', to='wagtailimages.Image', null=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='OfficePage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('body', models.TextField(null=True, verbose_name='Page Body')),
],
options={
'abstract': False,<|fim▁hole|> ),
migrations.CreateModel(
name='Offices',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]<|fim▁end|>
|
},
bases=('wagtailcore.page',),
|
<|file_name|>airFilterWhenSmoggy.js<|end_file_name|><|fim▁begin|>/**
* Copyright (c) 2014 [email protected]
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/**
* @author [email protected]
* @fileoverview Prevent an air filter or other device from being turned off if
* air quality is bad. The main intent of this is to prevent
* automated macros or machine learning from turning off a filter
* if air quality (PM 2.5) is really bad.
*/
module.exports = (function () {
'use strict';
return {
version : 20180822,
airFilterWhenSmoggy : function (deviceId, command, controllers, config) {
var deviceState = require(__dirname + '/../../lib/deviceState'),
commandParts = command.split('-'),
filter = config.filter,
maxLevel = config.maxLevel || 34.4,
commandSubdevice = '',
checkState,
status;
checkState = function () {
var currDevice,
currentDevice = {},
status = {},
subdeviceId,
i = 0;
for (currDevice in controllers) {
if (controllers[currDevice].config) {
switch (controllers[currDevice].config.typeClass) {
// Look for bad PM 2.5 values in Air Quality
case 'airQuality' :
currentDevice = deviceState.getDeviceState(currDevice);
if (currentDevice.value && currentDevice.value.report) {
for (i; i < currentDevice.value.report.length; i += 1) {
if (currentDevice.value.report[i].type === 'pm25') {
status.value = currentDevice.value.report[i].value;
}
}
}<|fim▁hole|> case 'smartthings' :
case 'wemo' :
currentDevice = deviceState.getDeviceState(currDevice);
if (currentDevice.value) {
for (subdeviceId in currentDevice.value.devices) {
if (currentDevice.value.devices[subdeviceId].label === filter) {
status.filter = currentDevice.value.devices[subdeviceId];
status.newState = currentDevice.value;
}
}
}
break;
}
}
}
return status;
};
if (commandParts.length === 3) {
commandSubdevice = commandParts[1];
// We only care if it's the given subdevice AND we're trying to
// potentially turn it off.
if ((filter === commandSubdevice) && ((commandParts[2] === 'toggle') || (commandParts[2] === 'off'))) {
status = checkState();
// Air quality is beyond it's determined safe bounds and the chosen
// filter is currently on - abort this off or toggle command.
if ((status.value >= maxLevel) && (status.filter.state === 'on')) {
return false;
}
}
}
}
};
}());<|fim▁end|>
|
break;
case 'nest' :
|
<|file_name|>conf.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# TrinityX documentation build configuration file, created by
# sphinx-quickstart on Tue Oct 25 14:04:29 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
# 'rinoh.frontend.sphinx',
# 'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'TrinityX'
copyright = '2020, ClusterVision Solutions BV'
author = 'ClusterVision Solutions BV'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '12'
# The full version, including alpha/beta/rc tags.
release = '12.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'README.rst']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'none'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
#html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
html_title = 'TrinityX r12'<|fim▁hole|>
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
html_logo = 'trinityxlogo.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
html_show_sourcelink = False
html_copy_source = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'TrinityXdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'TrinityX.tex', 'TrinityX Documentation',
'ClusterVision Solutions BV', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'trinityx', 'TrinityX Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'TrinityX', 'TrinityX Documentation',
author, 'TrinityX', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False<|fim▁end|>
| |
<|file_name|>main_test.go<|end_file_name|><|fim▁begin|>// Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package expression
import (
"testing"
"time"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/testkit/testmain"
"github.com/pingcap/tidb/util/mock"
"github.com/pingcap/tidb/util/testbridge"
"github.com/pingcap/tidb/util/timeutil"
"github.com/stretchr/testify/require"
"github.com/tikv/client-go/v2/tikv"
"go.uber.org/goleak"
)
func TestMain(m *testing.M) {
testbridge.WorkaroundGoCheckFlags()
testmain.ShortCircuitForBench(m)
config.UpdateGlobal(func(conf *config.Config) {
conf.TiKVClient.AsyncCommit.SafeWindow = 0
conf.TiKVClient.AsyncCommit.AllowedClockDrift = 0
conf.Experimental.AllowsExpressionIndex = true
})
tikv.EnableFailpoints()
// Some test depends on the values of timeutil.SystemLocation()
// If we don't SetSystemTZ() here, the value would change unpredictable.
// Affected by the order whether a testsuite runs before or after integration test.
// Note, SetSystemTZ() is a sync.Once operation.
timeutil.SetSystemTZ("system")
opts := []goleak.Option{
goleak.IgnoreTopFunction("go.etcd.io/etcd/pkg/logutil.(*MergeLogger).outputLoop"),
goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"),
goleak.IgnoreTopFunction("github.com/pingcap/tidb/table/tables.mockRemoteService"),
}
goleak.VerifyTestMain(m, opts...)
}
<|fim▁hole|>func createContext(t *testing.T) *mock.Context {
ctx := mock.NewContext()
ctx.GetSessionVars().StmtCtx.TimeZone = time.Local
sc := ctx.GetSessionVars().StmtCtx
sc.TruncateAsWarning = true
require.NoError(t, ctx.GetSessionVars().SetSystemVar("max_allowed_packet", "67108864"))
ctx.GetSessionVars().PlanColumnID = 0
return ctx
}<|fim▁end|>
| |
<|file_name|>test_denormalize.py<|end_file_name|><|fim▁begin|>import unittest
from programy.processors.post.denormalize import DenormalizePostProcessor
from programy.bot import Bot
from programy.brain import Brain
from programy.config.brain import BrainConfiguration
from programy.config.bot import BotConfiguration
class DenormalizeTests(unittest.TestCase):
def setUp(self):
self.bot = Bot(Brain(BrainConfiguration()), config=BotConfiguration())
self.bot.brain.denormals.process_splits([" dot com ",".com"])
def test_denormalize(self):
processor = DenormalizePostProcessor ()
result = processor.process(self.bot, "testid", "Hello")
self.assertIsNotNone(result)
self.assertEqual("Hello", result)
result = processor.process(self.bot, "testid", "hello dot com")<|fim▁hole|> self.assertEqual("hello.com", result)<|fim▁end|>
|
self.assertIsNotNone(result)
|
<|file_name|>AreaFragmentRight.java<|end_file_name|><|fim▁begin|>package com.example.mathsolver;
import android.annotation.TargetApi;
import android.app.Fragment;
import android.os.Build;
import android.os.Bundle;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
@TargetApi(Build.VERSION_CODES.HONEYCOMB)
public class AreaFragmentRight extends Fragment {
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
// TODO Auto-generated method stub
<|fim▁hole|> }
}<|fim▁end|>
|
return inflater.inflate(R.layout.area_right, container, false);
|
<|file_name|>model_control_one_enabled_BoxCox_LinearTrend_BestCycle_MLP.py<|end_file_name|><|fim▁begin|><|fim▁hole|>import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['BoxCox'] , ['LinearTrend'] , ['BestCycle'] , ['MLP'] );<|fim▁end|>
| |
<|file_name|>test.py<|end_file_name|><|fim▁begin|>from planarprocess import *
from gds_helpers import *
from itertools import cycle
xmin, xmax = -5, 5
layers = gds_cross_section('mypmos.gds', [(0,xmin), (0, xmax)], 'gdsmap.map')
['P-Active-Well', 'Active-Cut', 'N-Well', 'Metal-2', 'Metal-1', 'P-Select',
'N-Select', 'Transistor-Poly', 'Via1']
wafer = Wafer(1., 5., 0, xmax - xmin)
# N-Well
nw = layers['N-Well']
wafer.implant(.7, nw, outdiffusion=5., label='N-Well')
# Field and gate oxides
de = layers['P-Active-Well']
# TODO: channel stop under field oxide
fox = wafer.grow(.5, wafer.blank_mask().difference(de),
y_offset=-.2, outdiffusion=.1)
gox = wafer.grow(.05, de, outdiffusion=.05, base=wafer.wells,
label='Gate oxide')
# Gate poly and N+/P+ implants
gp = layers['Transistor-Poly']
poly = wafer.grow(.25, gp, outdiffusion=.25, label='Gate poly')
np = layers['N-Select'].intersection(
layers['P-Active-Well']).difference(gp)
nplus = wafer.implant(.1, np, outdiffusion=.1, target=wafer.wells, source=gox,
label='N+')
pp = layers['P-Select'].intersection(
layers['P-Active-Well']).difference(gp)
pplus = wafer.implant(.1, pp, outdiffusion=.1, target=wafer.wells, source=gox,
label='P+')
# Multi-level dielectric and contacts
mld_thickness = .5
mld = wafer.grow(mld_thickness, wafer.blank_mask(), outdiffusion=.1)
ct = layers['Active-Cut']
contact = wafer.grow(-mld_thickness*1.1, ct, consuming=[mld, gox], base=wafer.air,
outdiffusion=.05, outdiffusion_vertices=3)
# Metals and vias
m1 = layers['Metal-1']
metal1 = wafer.grow(.6, m1, outdiffusion=.1, label='Metal-1')
ild_thickness = 1.2
ild1 = wafer.grow(ild_thickness, wafer.blank_mask(), outdiffusion=.1)
wafer.planarize()
v1 = layers['Via1']
via1 = wafer.grow(-ild_thickness*1.1, v1, consuming=[ild1], base=wafer.air,
outdiffusion=.05, outdiffusion_vertices=3)
m2 = layers['Metal-2']<|fim▁hole|>for solid, color in {
fox: '.4', gox: 'r', poly: 'g', mld: 'k',
ild1: '.3', contact: '.5', via1: '.5',
metal1: '.7', metal2: '.8'}.items():
custom_style[solid].update(dict(facecolor=color, edgecolor='k'))
for solid in wafer.solids:
if solid not in wafer.wells:
custom_style[solid].update(dict(hatch=None, fill=True))
base_hatches = r'\/' # r'/\|-+xoO.*'
hatches = cycle(list(base_hatches) + [h1+h2 for h1 in base_hatches
for h2 in base_hatches])
colors = cycle('krgbcmy')
plot_geometryref(wafer.air, hatch='.', fill=False, linewidth=0, color=(.9,.9,.9),
zorder=-100)
zorder = -99
for solid in wafer.solids:
style = dict(hatch=next(hatches), fill=False,
edgecolor=next(colors), zorder=zorder)
zorder += 1
style.update(custom_style.get(solid, {}))
plot_geometryref(solid, **style)
pyplot.legend()
pyplot.savefig('mypmos-x.png')
pyplot.show()<|fim▁end|>
|
metal2 = wafer.grow(1., m2, outdiffusion=.1, label='Metal-2')
# Presentation
custom_style = {s: {} for s in wafer.solids}
|
<|file_name|>emoji.d.ts<|end_file_name|><|fim▁begin|><|fim▁hole|>import React = require('react');
import { EmojiData, EmojiSkin } from '..';
export type BackgroundImageFn = (set: EmojiSet, sheetSize: EmojiSheetSize) => string;
export type EmojiSet = 'apple'|'google'|'twitter'|'emojione'|'messenger'|'facebook';
export type EmojiSheetSize = 16|20|32|64;
export interface Props {
onOver?(emoji: EmojiData, e: React.MouseEvent<HTMLElement>): void;
onLeave?(emoji: EmojiData, e: React.MouseEvent<HTMLElement>): void;
onClick?(emoji: EmojiData, e: React.MouseEvent<HTMLElement>): void;
/** defaults to returning a png from unpkg.com-hosted emoji-datasource-${set} */
backgroundImageFn?: BackgroundImageFn;
native?: boolean;
forceSize?: boolean;
tooltip?: boolean;
/** defaults to 1 */
skin?: EmojiSkin;
/** defaults to 64 */
sheetSize?: EmojiSheetSize;
/** defaults to 'apple' */
set?: EmojiSet;
size: number;
emoji: string|EmojiData;
}
// tslint:disable-next-line strict-export-declare-modifiers
declare const Emoji: React.SFC<Props>;
export { Emoji as default };<|fim▁end|>
| |
<|file_name|>runcmd.go<|end_file_name|><|fim▁begin|>// Copyright 2011-2015 visualfc <[email protected]>. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runcmd
<|fim▁hole|> "strings"
"github.com/visualfc/gotools/pkg/command"
)
var Command = &command.Command{
Run: runCmd,
UsageLine: "runcmd [-w work_path] <program_name> [arguments...]",
Short: "run program",
Long: `run program and arguments`,
}
var execWorkPath string
var execWaitEnter bool
func init() {
Command.Flag.StringVar(&execWorkPath, "w", "", "work path")
Command.Flag.BoolVar(&execWaitEnter, "e", true, "wait enter and continue")
}
func runCmd(cmd *command.Command, args []string) error {
if len(args) == 0 {
cmd.Usage()
return os.ErrInvalid
}
if execWorkPath == "" {
var err error
execWorkPath, err = os.Getwd()
if err != nil {
return err
}
}
fileName := args[0]
filePath, err := exec.LookPath(fileName)
if err != nil {
filePath, err = exec.LookPath("./" + fileName)
}
if err != nil {
return err
}
fmt.Println("Starting Process", filePath, strings.Join(args[1:], " "), "...")
command := exec.Command(filePath, args[1:]...)
command.Dir = execWorkPath
command.Stdin = os.Stdin
command.Stdout = os.Stdout
command.Stderr = os.Stderr
err = command.Run()
if err != nil {
fmt.Println("\nEnd Process", err)
} else {
fmt.Println("\nEnd Process", "exit status 0")
}
exitWaitEnter()
return nil
}
func exitWaitEnter() {
if !execWaitEnter {
return
}
fmt.Println("\nPress enter key to continue")
var s = [256]byte{}
os.Stdin.Read(s[:])
}<|fim▁end|>
|
import (
"fmt"
"os"
"os/exec"
|
<|file_name|>showsByFollowedArtists.test.ts<|end_file_name|><|fim▁begin|>/* eslint-disable promise/always-return */
import { runAuthenticatedQuery } from "schema/v2/test/utils"
import gql from "lib/gql"
describe("Me", () => {
describe("ShowsByFollowedArtists", () => {
it("returns shows by followed artists", async () => {
const query = gql`
{
me {
showsByFollowedArtists(
first: 100
sort: NAME_ASC
status: UPCOMING
) {
totalCount
edges {
node {
name
}
}
}
}
}
`
const expectedConnectionData = {
totalCount: 2,
edges: [
{
node: {
name: "Show 1",
},
},
{
node: {
name: "Show 2",
},
},
],
}
const followedArtistsShowsLoader = jest.fn(async () => ({
headers: { "x-total-count": 2 },
body: [
{
name: "Show 1",
},
{
name: "Show 2",
},
],
}))
const context = {
meLoader: () => Promise.resolve({}),
followedArtistsShowsLoader,
}
<|fim▁hole|> const {
me: { showsByFollowedArtists },
} = await runAuthenticatedQuery(query, context)
expect(showsByFollowedArtists).toEqual(expectedConnectionData)
expect(followedArtistsShowsLoader).toHaveBeenCalledWith({
offset: 0,
size: 100,
sort: "name",
status: "upcoming",
total_count: true,
})
})
})
})<|fim▁end|>
| |
<|file_name|>fr.js<|end_file_name|><|fim▁begin|>/*
* Copyright (C) 2009-2016 by the geOrchestra PSC
*
* This file is part of geOrchestra.
*
* geOrchestra is free software: you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free
* Software Foundation, either version 3 of the License, or (at your option)
* any later version.
*
* geOrchestra is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* geOrchestra. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* @requires GeoExt/Lang.js
*/
/*
* French translation file
*/
OpenLayers.Lang.fr = OpenLayers.Util.extend(OpenLayers.Lang.fr, {
/* General purpose strings */
"Yes": "Oui",
"No": "Non",
"OK": "OK",
"or": "ou",
"Cancel": "Annuler",
"Save": "Sauvegarder",
"Loading...": "Chargement...",
"File": "Fichier",
"Layer": "Couche",
"layers": "couches",
"Description": "Description",
"Error": "Erreur",
"Server": "Serveur",
"Close": "Fermer",
"labelSeparator": " : ",
"File submission failed or invalid file": "L'envoi du fichier a échoué - le fichier est peut-être non valide",
"Type": "Type",
"Title": "Titre",
"Actions": "Actions",
"Incorrect server response.": "Réponse du serveur incorrecte.",
"No features found.": "Aucun objet trouvé.",
/* GEOR.js strings */
"Cities": "Localités",
"Recentering on GeoNames cities": "Recentrage sur localités<br />de la base GeoNames",
"Referentials": "Référentiels",
"Recentering on a selection of referential layers": "Recentrage sur une sélection<br />de couches \"référentiels\"",
"Addresses": "Adresses",
"Recentering on a given address": "Recentrage sur point adresse",
"Available layers": "Couches disponibles",
"WMS Search": "Recherche WMS",
"WFS Search": "Recherche WFS",
"resultspanel.emptytext":
"<p>Sélectionnez l'outil d'interrogation " +
"ou construisez une requête sur une couche.<br />" +
"Les attributs des objets s'afficheront dans ce cadre.</p>",
/* GEOR_ClassificationPanel.js strings */
"Attribute": "Attribut",
"Number of classes": "Nombre de classes",
"Minimum size": "Taille minimum",
"Maximum size": "Taille maximum",
"First color": "Première couleur",
"Last color": "Dernière couleur",
"Palette": "Palette",
"Auto classification": "Classification automatique",
"Classify": "Classifier",
"Unique values": "Valeurs uniques",
"Color range": "Plages de couleurs",
"Proportional symbols": "Symboles proportionnels",
/* GEOR_FeatureDataModel.js strings */
"objects": "objets",
/* GEOR_address.js strings */
"Go to: ": "Aller à : ",
"searching...": "recherche en cours...",
"adressSearchExemple": "ex: 4, Hugo, Brest",
/* GEOR_ajaxglobal.js strings strings */
"Server did not respond.": "Le serveur n'a pas répondu.",
"Server access denied.": "Le serveur a refusé de répondre.",
"ajax.badresponse":
"Le service a répondu, mais le contenu de la " +
"réponse n'est pas conforme à celle attendue",
"Server unavailable.": "Le serveur est temporairement indisponible. Veuillez réessayer ultérieurement.",
"Too much data.": "Données trop volumineuses.",
"Server exception.": "Le serveur a renvoyé une exception.",
"ajax.defaultexception":
"Pour plus d'information, vous pouvez " +
"chercher le code de retour sur <a href=\"http://" +
"en.wikipedia.org/wiki/List_of_HTTP_status_codes\" target=\"_blank\">" +
"cette page</a>.",
"An error occured.<br />": "Une erreur est survenue.<br />",
"Warning : browser may freeze": "Attention : risque de blocage du navigateur",
"ajaxglobal.data.too.big": "Les données provenant du serveur sont trop " +
"volumineuses.<br />Le serveur a envoyé ${SENT}KO " +
"(la limite est à ${LIMIT}KO)<br />Voulez-vous tout de même continuer ?",
/* GEOR_config.js strings */
/* GEOR_cswbrowser.js strings */
"NAME layer": "Couche ${NAME}",
"Metadata without a name": "Métadonnée non nommée",
"The getDomain CSW query failed": "La requête CSW getDomain a échoué",
"Error for the thesaurus": "Erreur sur le thésaurus",
"Missing key to access the thesaurus":
"Absence de clé pour accéder à ce thésaurus",
"Keywords query failed": "La requête des mots clés a échoué",
"Thesaurus:": "Thésaurus :",
"Thesaurus": "Thésaurus",
"cswbrowser.default.thesaurus.mismatch":
"Administrateur : problème de configuration - " +
"la variable DEFAULT_THESAURUS_KEY ne correspond à aucune" +
" valeur exportée par GeoNetwork",
/* GEOR_cswquerier.js strings */
"cswquerier.help.title": "Syntaxe pour la recherche avancée",
"cswquerier.help.message": "<ul><li><b>@mot</b> cherche dans le nom de l'organisation.</li><li><b>#mot</b> cherche dans les mots clés de la métadonnée.</li><li><b>?mot</b> élargit la recherche à tous les champs de la métadonnée.</li></ul>",
"NAME layer on VALUE": "Couche ${NAME} sur ${VALUE}",
"Show metadata essentials in a window":
"Afficher les métadonnées basiques",
"Show metadata sheet in a new browser tab":
"Afficher la métadonnée complète dans un nouvel onglet",
"more": "plus",
"Click to select or deselect the layer":
"Cliquez pour sélectionner ou désélectionner la couche",
"Open the URL url in a new window":
"Ouvrir l'url ${URL} dans une nouvelle fenêtre",
"Unreachable server": "Serveur non disponible",
"Catalogue": "Catalogue",
"Find": "Chercher",
"in": "dans",
"No linked layer.": "Aucune couche.",
"One layer found.": "Une couche trouvée.",
"NB layers found.": "${NB} couches trouvées.",
"NB metadata match the query.": "${NB} métadonnées correspondent à la requête.",
"A single metadata matches the query.": "Une unique métadonnée correspond à la requête.",
"Precise your request.": "Précisez votre requête.",
"No metadata matches the query.":
"Aucune métadonnée ne correspond à la requête.",
"Limit to map extent": "Limiter à l'étendue de la carte",
"Search limited to current map extent.": "Recherche limitée à l'étendue de la carte.",
/* GEOR_fileupload.js strings */
"2D only": "géométries 2D",
"Local file": "Fichier",
"The service is inactive": "Le service est inactif",
"Upload a vector data file.": "Uploadez un fichier de données vectorielles.",
"The allowed formats are the following: ": "Les formats acceptés sont les suivants : ",
"Use ZIP compression for multifiles formats, such as": "Utilisez la compression ZIP pour les formats multi-fichiers comme",
"fileupload_error_incompleteMIF": "Fichier MIF/MID incomplet.",
"fileupload_error_incompleteSHP": "Fichier shapefile incomplet.",
"fileupload_error_incompleteTAB": "Fichier TAB incomplet.",
"fileupload_error_ioError": "Erreur d'I/O sur le serveur. Contacter l'administrateur de la plateforme pour plus de détails.",
"fileupload_error_multipleFiles": "L'archive ZIP contient plusieurs fichiers de données. Elle ne doit en contenir qu'un seul.",
"fileupload_error_outOfMemory": "Le serveur ne dispose plus de la mémoire suffisante. Contacter l'administrateur de la plateforme pour plus de détails.",
"fileupload_error_sizeError": "Le fichier est trop grand pour pouvoir être traité.",
"fileupload_error_unsupportedFormat": "Ce format de données n'est pas géré par l'application.",
"fileupload_error_projectionError": "Une erreur est survenue lors de la lecture des coordonnées géographiques. Êtes-vous sûr que le fichier contient les informations de projection ?",
"server upload error: ERROR":
"L'upload du fichier a échoué. ${ERROR}",
/* GEOR_geonames.js strings */
/* GEOR_getfeatureinfo.js strings */
"<div>Searching...</div>": "<div>Recherche en cours...</div>",
"<div>No layer selected</div>": "<div>Aucune couche sélectionnée</div>",
"<div>Search on objects active for NAME layer. Click on the map.</div>":
"<div>Recherche d\'objets activée sur la couche ${NAME}. " +<|fim▁hole|> "WMS GetFeatureInfo at ": "GetFeatureInfo WMS sur ",
/* GEOR_layerfinder.js strings */
"metadata": "métadonnée",
"Add layers from local files": "Ajouter des couches en uploadant un fichier depuis votre ordinateur",
"Find layers searching in metadata":
"Trouvez des couches en cherchant dans les métadonnées",
"Find layers from keywords": "Trouvez des couches par mots clés",
"Find layers querying OGC services":
"Trouvez des couches en interrogeant des services OGC",
"layerfinder.layer.unavailable":
"La couche ${NAME} n'a pas été trouvée dans le service WMS.<br/<br/>" +
"Peut-être n'avez-vous pas le droit d'y accéder " +
"ou alors cette couche n'est plus disponible",
"Layer projection is not compatible":
"La projection de la couche n'est pas compatible.",
"The NAME layer does not contain a valid geometry column":
"La couche ${NAME} ne possède pas de colonne géométrique valide.",
"Add": "Ajouter",
"Add layers from a ...": "Ajouter des couches depuis un ...",
"Malformed URL": "URL non conforme.",
"Queryable": "Interrogeable",
"Opaque": "Opaque",
"OGC server": "Serveur OGC",
"I'm looking for ...": "Je recherche ...",
"Service type": "Type de service",
"Choose a server": "Choisissez un serveur",
"... or enter its address": "... ou saisissez son adresse",
"The server is publishing one layer with an incompatible projection":
"Le serveur publie une couche dont la projection n'est pas compatible",
"The server is publishing NB layers with an incompatible projection":
"Le serveur publie ${NB} couches dont la projection n'est pas " +
"compatible",
"This server does not support HTTP POST": "Ce serveur ne supporte pas HTTP POST",
"Unreachable server or insufficient rights": "Réponse invalide du " +
"serveur. Raisons possibles : droits insuffisants, " +
"serveur injoignable, trop de données, etc.",
/* GEOR_managelayers.js strings */
"layergroup": "couche composite",
"Service": "Service",
"Protocol": "Protocole",
"About this layer": "A propos de cette couche",
"Set as overlay": "Passer en calque",
"Set as baselayer": "Passer en couche de fond",
"Tiled mode" : "Mode tuilé",
"Confirm NAME layer deletion ?":
"Voulez-vous réellement supprimer la couche ${NAME} ?",
"1:MAXSCALE to 1:MINSCALE": "1:${MAXSCALE} à 1:${MINSCALE}",
"Visibility range (indicative):<br />from TEXT":
"Plage de visibilité (indicative):<br />de ${TEXT}",
"Information on objects of this layer":
"Interroger les objets de cette couche",
"default style": "style par défaut",
"no styling": "absence de style",
"Recenter on the layer": "Recentrer sur la couche",
"Impossible to get layer extent":
"Impossible d'obtenir l'étendue de la couche.",
"Refresh layer": "Recharger la couche",
"Show metadata": "Afficher les métadonnées",
"Edit symbology": "Éditer la symbologie",
"Build a query": "Construire une requête",
"Download data": "Extraire les données",
"Choose a style": "Choisir un style",
"Modify format": "Modifier le format",
"Delete this layer": "Supprimer cette couche",
"Push up this layer": "Monter cette couche",
"Push down this layer": "descendre cette couche",
"Add layers": "Ajouter des couches",
"Remove all layers": "Supprimer toutes les couches",
"Are you sure you want to remove all layers ?": "Voulez vous réellement supprimer toutes les couches ?",
"source: ": "source : ",
"unknown": "inconnue",
"Draw new point": "Dessiner un nouveau point",
"Draw new line": "Dessiner une nouvelle ligne",
"Draw new polygon": "Dessiner un nouveau polygone",
"Edition": "Edition",
"Editing": "Edition en cours",
"Switch on/off edit mode for this layer": "Basculer cette couche en mode édition",
"No geometry column.": "Colonne géométrique non détectée.",
"Geometry column type (TYPE) is unsupported.": "Le type de la colonne géométrique (${TYPE}) n'est pas supporté.",
"Switching to attributes-only edition.": "Seuls les attributs des objets existants seront éditables.",
/* GEOR_map.js strings */
"Location map": "Carte de situation",
"Warning after loading layer":
"Avertissement suite au chargement de couche",
"The <b>NAME</b> layer could not appear for this reason: ":
"La couche <b>${NAME}</b> pourrait ne pas apparaître pour " +
"la raison suivante : ",
"Min/max visibility scales are invalid":
"Les échelles min/max de visibilité sont invalides.",
"Visibility range does not match map scales":
"La plage de visibilité ne correspond pas aux échelles de la carte.",
"Geografic extent does not match map extent":
"L'étendue géographique ne correspond pas à celle de la carte.",
/* GEOR_mapinit.js strings */
"Add layers from WMS services":
"Ajouter des couches depuis des services WMS",
"Add layers from WFS services":
"Ajouter des couches depuis des services WFS",
"NB layers not imported": "${NB} couches non importées",
"One layer not imported": "Une couche non importée",
"mapinit.layers.load.error":
"Les couches nommées ${LIST} n'ont pas pu être chargées. " +
"Raisons possibles : droits insuffisants, SRS incompatible ou couche non existante",
"NB layers imported": "${NB} couches importées",
"One layer imported": "Une couche importée",
"No layer imported": "Aucune couche importée",
"The provided context is not valid": "Le contexte fourni n'est pas valide",
"The default context is not defined (and it is a BIG problem!)":
"Le contexte par défaut n'est pas défini " +
"(et ce n'est pas du tout normal !)",
"Error while loading file": "Erreur au chargement du fichier",
/* GEOR_mappanel.js strings */
"Coordinates in ": "Coordonnées en ",
"scale picker": "échelle",
/* GEOR_ows.js strings */
"The NAME layer was not found in WMS service.":
"La couche ${NAME} n'a pas été trouvée dans le service WMS.",
"Problem restoring a context saved with buggy Chrome 36 or 37":
"Nous ne pouvons restaurer un contexte enregistré avec Chrome 36 ou 37",
/* GEOR_print.js strings */
"Sources: ": "Sources : ",
"Source: ": "Source : ",
"Projection: PROJ": "Projection : ${PROJ}",
"Print error": "Impression impossible",
"Print server returned an error":
"Le service d'impression a signalé une erreur.",
"Contact platform administrator":
"Contactez l'administrateur de la plateforme.",
"Layer unavailable for printing": "Couche non disponible pour impression",
"The NAME layer cannot be printed.":
"La couche ${NAME} ne peut pas encore être imprimée.",
"Unable to print": "Impression non disponible",
"The print server is currently unreachable":
"Le service d'impression est actuellement inaccessible.",
"print.unknown.layout":
"Erreur de configuration: DEFAULT_PRINT_LAYOUT " +
"${LAYOUT} n'est pas dans la liste des formats d'impression",
"print.unknown.resolution":
"Erreur de configuration: DEFAULT_PRINT_RESOLUTION " +
"${RESOLUTION} n'est pas dans la liste des résolutions d'impression",
"print.unknown.format":
"Erreur de configuration: le format " +
"${FORMAT} n'est pas supporté par le serveur d'impression",
"Pick an output format": "Choisissez un format de sortie",
"Comments": "Commentaires",
"Scale: ": "Échelle : ",
"Date: ": "Date : ",
"Minimap": "Mini-carte",
"North": "Nord",
"Scale": "Échelle",
"Date": "Date",
"Legend": "Légende",
"Format": "Format",
"Resolution": "Résolution",
"Print the map": "Impression de la carte",
"Print": "Imprimer",
"Printing...": "Impression en cours...",
"Print current map": "Imprimer la carte courante",
/* GEOR_Querier.js strings */
"Fields of filters with a red mark are mandatory": "Vous devez remplir " +
"les champs des filtres marqués en rouge.",
"Request on NAME": "Requêteur sur ${NAME}",
"WFS GetFeature on filter": "GetFeature WFS sur un filtre",
"Search": "Rechercher",
"querier.layer.no.geom":
"La couche ne possède pas de colonne géométrique." +
"<br />Le requêteur géométrique ne sera pas fonctionnel.",
"querier.layer.error":
"Impossible d'obtenir les caractéristiques de la couche demandée." +
"<br />Le requêteur ne sera pas disponible.",
/* GEOR_referentials.js strings */
"Referential": "Référentiel",
"There is no geometry column in the selected referential":
"Le référentiel sélectionné ne possède pas de colonne géométrique",
"Choose a referential": "Choisissez un référentiel",
/* GEOR_resultspanel.js strings */
"Symbology": "Symbologie",
"Edit this panel's features symbology": "Editer la symbologie de la sélection",
"Reset": "Réinitialiser",
"Export is not possible: features have no geometry": "Export impossible : absence de géométries",
"resultspanel.maxfeature.reached":
" <span ext:qtip=\"Utilisez un navigateur plus performant " +
"pour augmenter le nombre d'objets affichables\">" +
"Nombre maximum d'objets atteint (${NB})</span>",
"NB results": "${NB} résultats",
"One result": "1 résultat",
"No result": "Aucun résultat",
"Clean": "Effacer",
"All": "Tous",
"None": "Aucun",
"Invert selection": "Inverser la sélection",
"Actions on the selection or on all results if no row is selected":
"Actions sur la sélection ou sur tous les résultats si aucun n'est sélectionné",
"Store the geometry":
"Enregistrer la géométrie",
"Aggregates the geometries of the selected features and stores it in your browser for later use in the querier":
"La géométrie des objets sélectionnés est enregistrée pour un usage ultérieur dans le requêteur",
"Geometry successfully stored in this browser":
"Géométrie enregistrée avec succès sur ce navigateur",
"Clean all results on the map and in the table": "Supprimer les " +
"résultats affichés sur la carte et dans le tableau",
"Zoom": "Zoom",
"Zoom to results extent": "Cadrer l'étendue de la carte sur celle " +
"des résultats",
"Export": "Export",
"Export results as": "Exporter l'ensemble des résultats en",
"<p>No result for this request.</p>": "<p>Aucun objet ne " +
"correspond à votre requête.</p>",
/* GEOR_scalecombo.js strings */
/* GEOR_selectfeature.js strings */
"<div>Select features activated on NAME layer. Click on the map.</div>":
"<div>Sélection d\'objets activée sur la couche ${NAME}. " +
"Cliquez sur la carte.</div>",
"OpenLayers SelectFeature":"Sélection d\'objets",
/* GEOR_styler.js strings */
"Download style": "Télécharger le style",
"You can download your SLD style at ": "Votre SLD est disponible à " +
"l\'adresse suivante : ",
"Thanks!": "Merci !",
"Saving SLD": "Sauvegarde du SLD",
"Some classes are invalid, verify that all fields are correct": "Des " +
"classes ne sont pas valides, vérifier que les champs sont corrects",
"Get SLD": "Récupération du SLD",
"Malformed SLD": "Le SLD n'est pas conforme.",
"circle": "cercle",
"square": "carré",
"triangle": "triangle",
"star": "étoile",
"cross": "croix",
"x": "x",
"customized...": "personnalisé...",
"Classification ...<br/>(this operation can take some time)":
"Classification ...<br/>(cette opération peut prendre du temps)",
"Class": "Classe",
"Untitled": "Sans titre",
"styler.guidelines":
"Utiliser le bouton \"+\" pour créer une classe, et le bouton " +
"\"Analyse\" pour créer un ensemble de classes définies par une " +
"analyse thématique.</p>",
"Analyze": "Analyse",
"Add a class": "Ajouter une classe",
"Delete the selected class": "Supprimer la classe sélectionnée",
"Styler": "Styleur",
"Apply": "Appliquer",
"Impossible to complete the operation:": "Opération impossible :",
"no available attribute": "aucun attribut disponible.",
/* GEOR_toolbar.js strings */
"m": "m",
"hectares": "hectares",
"zoom to global extent of the map": "zoom sur l'étendue globale de la " +
"carte",
"pan": "glisser - déplacer la carte",
"zoom in": "zoom en avant (pour zoomer sur une emprise: appuyer sur SHIFT + dessiner l'emprise)",
"zoom out": "zoom en arrière",
"back to previous zoom": "revenir à la précédente emprise",
"go to next zoom": "aller à l'emprise suivante",
"Login": "Connexion",
"Logout": "Déconnexion",
"Help": "Aide",
"Query all active layers": "Interroger toutes les couches actives",
"Show legend": "Afficher la légende",
"Leave this page ? You will lose the current cartographic context.":
"Vous allez quitter cette page et perdre le contexte cartographique " +
"courant",
"Online help": "Aide en ligne",
"Display the user guide": "Afficher le guide de l'utilisateur",
"Contextual help": "Aide contextuelle",
"Activate or deactivate contextual help bubbles": "Activer ou désactiver les bulles d'aide contextuelle",
/* GEOR_tools.js strings */
"Tools": "Outils",
"tools": "outils",
"tool": "outil",
"No tool": "aucun outil",
"Manage tools": "Gérer les outils",
"remember the selection": "se souvenir de la sélection",
"Available tools:": "Outils disponibles :",
"Click to select or deselect the tool": "Cliquez pour (dé)sélectionner l'outil",
"Could not load addon ADDONNAME": "Impossible de charger l'addon ${ADDONNAME}",
"Your new tools are now available in the tools menu.": 'Vos nouveaux outils sont disponibles dans le menu "outils"',
/* GEOR_util.js strings */
"Characters": "Caractères",
"Digital": "Numérique",
"Boolean": "Booléen",
"Other": "Autre",
"Confirmation": "Confirmation",
"Information": "Information",
"pointOfContact": "contact",
"custodian": "producteur",
"distributor": "distributeur",
"originator": "instigateur",
"More": "Plus",
"Could not parse metadata.": "Impossible d'analyser la métadonnée",
"Could not get metadata.": "Impossible d'obtenir la métadonnée",
/* GEOR_waiter.js strings */
/* GEOR_wmc.js strings */
"The provided file is not a valid OGC context": "Le fichier fourni n'est pas un contexte OGC valide",
"Warning: trying to restore WMC with a different projection (PROJCODE1, while map SRS is PROJCODE2). Strange things might occur !": "Attention: le contexte restauré avait été sauvegardé en ${PROJCODE1} alors que la carte actuelle est en ${PROJCODE2}. Il pourrait y avoir des comportements inattendus.",
/* GEOR_wmcbrowser.js strings */
"all contexts": "toutes les cartes",
"Could not find WMC file": "Le fichier spécifié n'existe pas",
"... or a local context": "... ou une carte locale",
"Load or add the layers from one of these map contexts:": "Charger ou ajouter les couches de l'une de ces cartes :",
"A unique OSM layer": "Une unique couche OpenStreetMap",
"default viewer context": "carte par défaut",
"(default)": "<br/>(carte par défaut)",
/* GEOR_workspace.js strings */
"Created:": "Date de création : ",
"Last accessed:": "Date de dernier accès : ",
"Access count:": "Nombre d'accès : ",
"Permalink:": "Permalien : ",
"My contexts": "Mes cartes",
"Created": "Création",
"Accessed": "Accédé",
"Count": "Accès",
"View": "Visualiser",
"View the selected context": "Visualiser la carte sélectionnée (attention : remplacera la carte courante)",
"Download": "Télécharger",
"Download the selected context": "Télécharger la carte sélectionnée",
"Delete": "Supprimer",
"Delete the selected context": "Supprimer la carte sélectionnée",
"Failed to delete context": "Impossible de supprimer la carte",
"Manage my contexts": "Gérer mes cartes",
"Keywords": "Mots clés",
"comma separated keywords": "mots clés séparés par une virgule",
"Save to metadata": "Créer une métadonnée",
"in group": "dans le groupe",
"The context title is mandatory": "Le titre de la carte est obligatoire",
"There was an error creating the metadata.": "La création de la métadonnée a échoué.",
"Share this map": "Partager cette carte",
"Mobile viewer": "Visualiseur mobile",
"Mobile compatible viewer on sdi.georchestra.org": "Visualiseur mobile sur sdi.georchestra.org",
"Desktop viewer": "Visualiseur web",
"Desktop viewer on sdi.georchestra.org": "Visualiseur web sur sdi.georchestra.org",
"Abstract": "Résumé",
"Context saving": "Sauvegarde de la carte",
"The file is required.": "Un nom de fichier est nécessaire.",
"Context restoring": "Restauration d'une carte",
"<p>Please note that the WMC must be UTF-8 encoded</p>": "<p>Notez que le" +
" fichier de contexte doit être encodé en UTF-8.</p>",
"Load": "Charger",
"Workspace": "Espace de travail",
"Save the map context": "Sauvegarder la carte",
"Load a map context": "Charger une carte",
"Get a permalink": "Obtenir un permalien",
"Permalink": "Permalien",
"Share your map with this URL: ": "Partagez la carte avec l'adresse suivante : ",
/* GEOR_edit.js */
"Req.": "Req.", // requis
"Required": "Requis",
"Not required": "Non requis",
"Synchronization failed.": "Erreur lors de la synchronisation.",
"Edit activated": "Edition activée",
"Hover the feature you wish to edit, or choose \"new feature\" in the edit menu": "Survolez les objets de la couche que vous souhaitez modifier, ou choisissez \"nouvel objet\" dans le menu d'édition de la couche",
/* GeoExt.data.CSW.js */
"no abstract": "pas de résumé"
// no trailing comma
});
GeoExt.Lang.add("fr", {
"GeoExt.ux.FeatureEditorGrid.prototype": {
deleteMsgTitle: "Suppression",
deleteMsg: "Confirmer la suppression de cet objet vectoriel ?",
deleteButtonText: "Supprimer",
deleteButtonTooltip: "Supprimer cet objet",
cancelMsgTitle: "Annulation",
cancelMsg: "L'objet a été modifié localement. Confirmer l'abandon des changements ?",
cancelButtonText: "Annuler",
cancelButtonTooltip: "Abandonner les modifications en cours",
saveButtonText: "Enregistrer",
saveButtonTooltip: "Enregistrer les modifications",
nameHeader: "Attribut",
valueHeader: "Valeur"
}
});<|fim▁end|>
|
"Cliquez sur la carte.</div>",
|
<|file_name|>sig-mention-handler.go<|end_file_name|><|fim▁begin|>/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mungers
import (
"fmt"
"strings"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/test-infra/mungegithub/features"
"k8s.io/test-infra/mungegithub/github"
"k8s.io/test-infra/mungegithub/options"
)
type SigMentionHandler struct{}
func init() {
h := &SigMentionHandler{}
RegisterMungerOrDie(h)
}
// Name is the name usable in --pr-mungers
func (*SigMentionHandler) Name() string { return "sig-mention-handler" }
// RequiredFeatures is a slice of 'features' that must be provided
func (*SigMentionHandler) RequiredFeatures() []string {
return []string{}
}
// Initialize will initialize the munger
func (s *SigMentionHandler) Initialize(config *github.Config, features *features.Features) error {
return nil
}
// EachLoop is called at the start of every munge loop
func (*SigMentionHandler) EachLoop() error { return nil }
// RegisterOptions registers options for this munger; returns any that require a restart when changed.
func (*SigMentionHandler) RegisterOptions(opts *options.Options) sets.String { return nil }
func (*SigMentionHandler) HasSigLabel(obj *github.MungeObject) bool {
labels := obj.Issue.Labels
for i := range labels {
if labels[i].Name != nil && strings.HasPrefix(*labels[i].Name, "sig/") {
return true
}
if labels[i].Name != nil && strings.HasPrefix(*labels[i].Name, "committee/") {
return true
}
}
return false
}
func (*SigMentionHandler) HasNeedsSigLabel(obj *github.MungeObject) bool {
labels := obj.Issue.Labels
for i := range labels {
if labels[i].Name != nil && strings.Compare(*labels[i].Name, "needs-sig") == 0 {
return true
}
}
return false
}
// Munge is the workhorse notifying issue owner to add a @kubernetes/sig mention if there is none
// The algorithm:
// (1) return if it is a PR and/or the issue is closed
// (2) find if the issue has a sig label
// (3) find if the issue has a needs-sig label
// (4) if the issue has both the sig and needs-sig labels, remove the needs-sig label
// (5) if the issue has none of the labels, add the needs-sig label and comment
// (6) if the issue has only the sig label, do nothing
// (7) if the issue has only the needs-sig label, do nothing
func (s *SigMentionHandler) Munge(obj *github.MungeObject) {
if obj.Issue == nil || obj.IsPR() || obj.Issue.State == nil || *obj.Issue.State == "closed" {
return
}
hasSigLabel := s.HasSigLabel(obj)
hasNeedsSigLabel := s.HasNeedsSigLabel(obj)
if hasSigLabel && hasNeedsSigLabel {
if err := obj.RemoveLabel("needs-sig"); err != nil {
glog.Errorf("failed to remove needs-sig label for issue #%v", *obj.Issue.Number)
}
} else if !hasSigLabel && !hasNeedsSigLabel {
if err := obj.AddLabel("needs-sig"); err != nil {<|fim▁hole|> glog.Errorf("failed to add needs-sig label for issue #%v", *obj.Issue.Number)
return
}
msg := fmt.Sprintf(`@%s
There are no sig labels on this issue. Please [add a sig label](https://github.com/kubernetes/test-infra/blob/master/commands.md) by:
1. mentioning a sig: `+"`@kubernetes/sig-<group-name>-<group-suffix>`"+`
e.g., `+"`@kubernetes/sig-contributor-experience-<group-suffix>`"+` to notify the contributor experience sig, OR
2. specifying the label manually: `+"`/sig <label>`"+`
e.g., `+"`/sig scalability`"+` to apply the `+"`sig/scalability`"+` label
Note: Method 1 will trigger an email to the group. You can find the group list [here](https://github.com/kubernetes/community/blob/master/sig-list.md) and label list [here](https://github.com/kubernetes/kubernetes/labels).
The `+"`<group-suffix>`"+` in the method 1 has to be replaced with one of these: _**bugs, feature-requests, pr-reviews, test-failures, proposals**_`, *obj.Issue.User.Login)
if err := obj.WriteComment(msg); err != nil {
glog.Errorf("failed to leave comment for %s that issue #%v needs sig label", *obj.Issue.User.Login, *obj.Issue.Number)
}
}
}<|fim▁end|>
| |
<|file_name|>ErpInventory.py<|end_file_name|><|fim▁begin|># Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#<|fim▁hole|># IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Core.IdentifiedObject import IdentifiedObject
class ErpInventory(IdentifiedObject):
"""Utility inventory-related information about an item or part (and not for description of the item and its attributes). It is used by ERP applications to enable the synchronization of Inventory data that exists on separate Item Master databases. This data is not the master data that describes the attributes of the item such as dimensions, weight, or unit of measure - it describes the item as it exists at a specific location.Utility inventory-related information about an item or part (and not for description of the item and its attributes). It is used by ERP applications to enable the synchronization of Inventory data that exists on separate Item Master databases. This data is not the master data that describes the attributes of the item such as dimensions, weight, or unit of measure - it describes the item as it exists at a specific location.
"""
def __init__(self, Asset=None, status=None, *args, **kw_args):
"""Initialises a new 'ErpInventory' instance.
@param Asset:
@param status:
"""
self._Asset = None
self.Asset = Asset
self.status = status
super(ErpInventory, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["Asset", "status"]
_many_refs = []
def getAsset(self):
return self._Asset
def setAsset(self, value):
if self._Asset is not None:
self._Asset._ErpInventory = None
self._Asset = value
if self._Asset is not None:
self._Asset.ErpInventory = None
self._Asset._ErpInventory = self
Asset = property(getAsset, setAsset)
status = None<|fim▁end|>
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>fn find() -> usize {
let limit = 4_000_000usize;
let mut fibonachi: Vec<usize> = Vec::new();
fibonachi.push(1);
fibonachi.push(2);
let mut i = 2usize;
loop {
let k = fibonachi[i - 2] + fibonachi[i - 1];
println!("{} - {}", i, k);
if k < limit {
fibonachi.push(k);
i += 1;
} else {
break;
}
}
let sum = fibonachi.iter().map(|&x| {if x % 2 == 0 {x} else {0} }).sum();
sum
}
<|fim▁hole|>
#[test]
fn it_works() {
assert!(find()==4613732usize);
}<|fim▁end|>
|
fn main() {
let sum = find();
println!("sum of even = {}", sum);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.