text
stringlengths
3
1.05M
// Javascript functions for Nead/Unicentro course format M.course = M.course || {}; M.course.format = M.course.format || {}; /** * Get sections config for this format * * The section structure is: * <ul class="nead_unicentro"> * <li class="section">...</li> * <li class="section">...</li> * ... * </ul> * * @return {object} section list configuration */ M.course.format.get_config = function() { return { container_node : 'ul', container_class : 'nead_unicentro', section_node : 'li', section_class : 'section' }; } /** * Swap section * * @param {YUI} Y YUI3 instance * @param {string} node1 node to swap to * @param {string} node2 node to swap with * @return {NodeList} section list */ M.course.format.swap_sections = function(Y, node1, node2) { var CSS = { COURSECONTENT : 'course-content', SECTIONADDMENUS : 'section_add_menus' }; var sectionlist = Y.Node.all('.'+CSS.COURSECONTENT+' '+M.course.format.get_section_selector(Y)); // Swap menus. sectionlist.item(node1).one('.'+CSS.SECTIONADDMENUS).swap(sectionlist.item(node2).one('.'+CSS.SECTIONADDMENUS)); } /** * Process sections after ajax response * * @param {YUI} Y YUI3 instance * @param {array} response ajax response * @param {string} sectionfrom first affected section * @param {string} sectionto last affected section * @return void */ M.course.format.process_sections = function(Y, sectionlist, response, sectionfrom, sectionto) { var CSS = { SECTIONNAME : 'sectionname' }, SELECTORS = { SECTIONLEFTSIDE : '.left .section-handle img' }; if (response.action == 'move') { // If moving up swap around 'sectionfrom' and 'sectionto' so the that loop operates. if (sectionfrom > sectionto) { var temp = sectionto; sectionto = sectionfrom; sectionfrom = temp; } // Update titles and move icons in all affected sections. var ele, str, stridx, newstr; for (var i = sectionfrom; i <= sectionto; i++) { // Update section title. sectionlist.item(i).one('.'+CSS.SECTIONNAME).setContent(response.sectiontitles[i]); // Update move icon. ele = sectionlist.item(i).one(SELECTORS.SECTIONLEFTSIDE); str = ele.getAttribute('alt'); stridx = str.lastIndexOf(' '); newstr = str.substr(0, stridx +1) + i; ele.setAttribute('alt', newstr); ele.setAttribute('title', newstr); // For FireFox as 'alt' is not refreshed. } } }
import * as tslib_1 from "tslib"; import * as React from 'react'; import { StyledIconBase } from '../../StyledIconBase'; export var Dai = React.forwardRef(function (props, ref) { var attrs = { "fill": "currentColor", }; return (React.createElement(StyledIconBase, tslib_1.__assign({ iconAttrs: attrs, iconVerticalAlign: "middle", iconViewBox: "0 0 32 32" }, props, { ref: ref }), React.createElement("path", { d: "M16 32C7.163 32 0 24.837 0 16S7.163 0 16 0s16 7.163 16 16-7.163 16-16 16zm10.633-16.183L15.817 5 5 15.817l10.817 3.996 10.816-3.996zM8.364 14.9l7.333-7.498s7.169 7.333 7.471 7.48c.303.146-4.931 0-4.931 0l-2.42-2.475-2.448 2.493H8.364zm7.453 5.674L5 16.605l10.817 10.028L26.633 16.55l-10.816 4.024z", key: "k0" }))); }); Dai.displayName = 'Dai'; export var DaiDimensions = { height: 32, width: 32 };
#!/usr/bin/env python3 import argparse import asyncio import logging import os import socket import ssl import capnp import thread_capnp logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) this_dir = os.path.dirname(os.path.abspath(__file__)) class ExampleImpl(thread_capnp.Example.Server): "Implementation of the Example threading Cap'n Proto interface." def subscribeStatus(self, subscriber, **kwargs): return capnp.getTimer().after_delay(10**9) \ .then(lambda: subscriber.status(True)) \ .then(lambda _: self.subscribeStatus(subscriber)) def longRunning(self, **kwargs): return capnp.getTimer().after_delay(1 * 10**9) def alive(self, **kwargs): return True class Server: async def myreader(self): while self.retry: try: # Must be a wait_for so we don't block on read() data = await asyncio.wait_for( self.reader.read(4096), timeout=0.1 ) except asyncio.TimeoutError: logger.debug("myreader timeout.") continue except Exception as err: logger.error("Unknown myreader err: %s", err) return False await self.server.write(data) logger.debug("myreader done.") return True async def mywriter(self): while self.retry: try: # Must be a wait_for so we don't block on read() data = await asyncio.wait_for( self.server.read(4096), timeout=0.1 ) self.writer.write(data.tobytes()) except asyncio.TimeoutError: logger.debug("mywriter timeout.") continue except Exception as err: logger.error("Unknown mywriter err: %s", err) return False logger.debug("mywriter done.") return True async def myserver(self, reader, writer): # Start TwoPartyServer using TwoWayPipe (only requires bootstrap) self.server = capnp.TwoPartyServer(bootstrap=ExampleImpl()) self.reader = reader self.writer = writer self.retry = True # Assemble reader and writer tasks, run in the background coroutines = [self.myreader(), self.mywriter()] tasks = asyncio.gather(*coroutines, return_exceptions=True) while True: self.server.poll_once() # Check to see if reader has been sent an eof (disconnect) if self.reader.at_eof(): self.retry = False break await asyncio.sleep(0.01) # Make wait for reader/writer to finish (prevent possible resource leaks) await tasks async def new_connection(reader, writer): server = Server() await server.myserver(reader, writer) def parse_args(): parser = argparse.ArgumentParser(usage='''Runs the server bound to the\ given address/port ADDRESS. ''') parser.add_argument("address", help="ADDRESS:PORT") return parser.parse_args() async def main(): address = parse_args().address host = address.split(':') addr = host[0] port = host[1] # Setup SSL context ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) ctx.load_cert_chain(os.path.join(this_dir, 'selfsigned.cert'), os.path.join(this_dir, 'selfsigned.key')) # Handle both IPv4 and IPv6 cases try: print("Try IPv4") server = await asyncio.start_server( new_connection, addr, port, ssl=ctx, family=socket.AF_INET, ) except Exception: print("Try IPv6") server = await asyncio.start_server( new_connection, addr, port, ssl=ctx, family=socket.AF_INET6, ) async with server: await server.serve_forever() if __name__ == '__main__': asyncio.run(main())
import unittest from scaffold.iam.role_sync import inline_cf_role_re, RoleSync def mock_role(name): def mock(): lambda: 0 mock.name = name return mock class TestRoleSync(unittest.TestCase): def test_inline_cf_role_names(self): cf_name = 'Stack-Name-RoleNameRole-1BLAH7BLAH1BLAH' self.assertIsNotNone(inline_cf_role_re.match(cf_name)) def test_not_inline_cf_role_names(self): not_cf_name = 'Regular-Role-Thing' self.assertIsNone(inline_cf_role_re.match(not_cf_name)) def test_filter_current_roles_should_exclude_aws_roles(self): role_names = ['Inline-CloudFormationRole-BLAHBLAH', 'aws-role', 'MyRole' ] roles = [mock_role(rn) for rn in role_names] filtered = RoleSync._filter_current_roles(roles) self.assertEqual(1, len(filtered)) self.assertEqual('MyRole', filtered[0].name)
"""Constants for the connection to a Visonic PowerMax or PowerMaster Alarm System.""" from enum import Enum DOMAIN = "visonic" PLATFORMS = ["alarm_control_panel", "binary_sensor", "switch"] VISONIC_UNIQUE_NAME = "Visonic Alarm" ALARM_PANEL_ENTITY = "alarm_control_panel.visonic_alarm" # Constants for storing data in hass[DOMAIN] DOMAINCLIENT = f"{DOMAIN}_client" DOMAINDATA = f"{DOMAIN}_data" DOMAINCLIENTTASK = f"{DOMAIN}_client_task" # Constants for sending a persistent notification to the frontend when there is a fault NOTIFICATION_ID = f"{DOMAIN}_notification" NOTIFICATION_TITLE = "Visonic Alarm Panel" # undo listener UNDO_VISONIC_UPDATE_LISTENER = f"{DOMAIN}_undo_update_listener" # Dispatcher name when the underlying pyvisonic library has got a panel, X10 or sensor change VISONIC_UPDATE_STATE_DISPATCHER = f"{DOMAIN}_update_state_dispatcher" # The HA bus events that this integration can generate ALARM_PANEL_CHANGE_EVENT = f"{DOMAIN}_alarm_panel_state_update" ALARM_PANEL_LOG_FILE_COMPLETE = f"{DOMAIN}_alarm_panel_event_log_complete" ALARM_PANEL_LOG_FILE_ENTRY = f"{DOMAIN}_alarm_panel_event_log_entry" # Default connection details (connection can be one of Ethernet, USB, RS232) DEFAULT_DEVICE_HOST = "127.0.0.1" DEFAULT_DEVICE_PORT = 30000 DEFAULT_DEVICE_USB = "/dev/ttyUSB1" DEFAULT_DEVICE_BAUD = 9600 # Event processing for the log files from the panel. These are the control flow names for the config variables. CONF_LOG_EVENT = "panellog_logentry_event" CONF_LOG_CSV_TITLE = "panellog_csv_add_title_row" CONF_LOG_XML_FN = "panellog_xml_filename" CONF_LOG_CSV_FN = "panellog_csv_filename" CONF_LOG_DONE = "panellog_complete_event" CONF_LOG_REVERSE = "panellog_reverse_order" CONF_LOG_MAX_ENTRIES = "panellog_max_entries" # Supplement the HA attributes with a bypass, this is for individual sensors in the service call. It is used as a boolean. ATTR_BYPASS = "bypass" # What notifications to send to the HA Frontend CONF_ALARM_NOTIFICATIONS = "panel_state_notifications" # settings that are used for creation CONF_DEVICE_TYPE = "type" CONF_DEVICE_BAUD = "baud" CONF_HOST = "host" CONF_PORT = "port" CONF_PATH = "path" CONF_EXCLUDE_SENSOR = "exclude_sensor" CONF_EXCLUDE_X10 = "exclude_x10" CONF_DOWNLOAD_CODE = "download_code" CONF_FORCE_AUTOENROLL = "force_autoenroll" CONF_AUTO_SYNC_TIME = "sync_time" CONF_LANGUAGE = "language" CONF_FORCE_STANDARD = "force_standard" # settings than can be modified CONF_ENABLE_REMOTE_ARM = "allow_remote_arm" CONF_ENABLE_REMOTE_DISARM = "allow_remote_disarm" CONF_ENABLE_SENSOR_BYPASS = "allow_sensor_bypass" CONF_OVERRIDE_CODE = "override_code" CONF_ARM_CODE_AUTO = "arm_without_usercode" CONF_FORCE_KEYPAD = "force_numeric_keypad" CONF_INSTANT_ARM_AWAY = "arm_away_instant" CONF_INSTANT_ARM_HOME = "arm_home_instant" CONF_MOTION_OFF_DELAY = "motion_off" CONF_SIREN_SOUNDING = "siren_sounding" # Temporary B0 Config Items CONF_B0_ENABLE_MOTION_PROCESSING = "b0_enable_motion_processing" CONF_B0_MIN_TIME_BETWEEN_TRIGGERS = "b0_min_time_between_triggers" CONF_B0_MAX_TIME_FOR_TRIGGER_EVENT = "b0_max_time_for_trigger_event" PIN_REGEX = "^[0-9]{4}$" class AvailableNotifications(str, Enum): ALWAYS = 'always' SIREN = 'sirensounding' TAMPER = 'paneltamper' RESET = 'panelreset' INVALID_PIN = 'invalidpin' PANEL_OPERATION = 'paneloperation' CONNECTION_PROBLEM = 'connectionproblem' BYPASS_PROBLEM = 'bypassproblem' EVENTLOG_PROBLEM = 'eventlogproblem' COMMAND_NOT_SENT = 'commandnotsent' AvailableNotificationConfig = { AvailableNotifications.SIREN : "Siren Sounding", AvailableNotifications.TAMPER : "Panel Tamper", AvailableNotifications.RESET : "Panel System Reset", AvailableNotifications.INVALID_PIN : "Code Rejected By Panel", AvailableNotifications.PANEL_OPERATION : "Panel Operation", AvailableNotifications.CONNECTION_PROBLEM : "Connection Problems", AvailableNotifications.BYPASS_PROBLEM : "Sensor Bypass Problems", AvailableNotifications.EVENTLOG_PROBLEM : "Event Log Problems", AvailableNotifications.COMMAND_NOT_SENT : "Command Not Sent To Panel" }
#!/usr/bin/env python3 # vim: set syntax=python ts=4 : # # Copyright (c) 2018 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import os import contextlib import string import mmap import sys import re import subprocess import select import shutil import shlex import signal import threading import concurrent.futures from collections import OrderedDict import queue import time import csv import glob import concurrent import xml.etree.ElementTree as ET import logging import pty from pathlib import Path from distutils.spawn import find_executable from colorama import Fore import pickle import platform import yaml import json from multiprocessing import Lock, Process, Value try: # Use the C LibYAML parser if available, rather than the Python parser. # It's much faster. from yaml import CSafeLoader as SafeLoader from yaml import CDumper as Dumper except ImportError: from yaml import SafeLoader, Dumper try: import serial except ImportError: print("Install pyserial python module with pip to use --device-testing option.") try: from tabulate import tabulate except ImportError: print("Install tabulate python module with pip to use --device-testing option.") try: import psutil except ImportError: print("Install psutil python module with pip to run in Qemu.") ZEPHYR_BASE = os.getenv("ZEPHYR_BASE") if not ZEPHYR_BASE: sys.exit("$ZEPHYR_BASE environment variable undefined") # This is needed to load edt.pickle files. sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts", "python-devicetree", "src")) from devicetree import edtlib # pylint: disable=unused-import # Use this for internal comparisons; that's what canonicalization is # for. Don't use it when invoking other components of the build system # to avoid confusing and hard to trace inconsistencies in error messages # and logs, generated Makefiles, etc. compared to when users invoke these # components directly. # Note "normalization" is different from canonicalization, see os.path. canonical_zephyr_base = os.path.realpath(ZEPHYR_BASE) sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/")) import scl import expr_parser logger = logging.getLogger('twister') logger.setLevel(logging.DEBUG) class ExecutionCounter(object): def __init__(self, total=0): self._done = Value('i', 0) self._passed = Value('i', 0) self._skipped_configs = Value('i', 0) self._skipped_runtime = Value('i', 0) self._skipped_cases = Value('i', 0) self._error = Value('i', 0) self._failed = Value('i', 0) self._total = Value('i', total) self._cases = Value('i', 0) self.lock = Lock() @property def cases(self): with self._cases.get_lock(): return self._cases.value @cases.setter def cases(self, value): with self._cases.get_lock(): self._cases.value = value @property def skipped_cases(self): with self._skipped_cases.get_lock(): return self._skipped_cases.value @skipped_cases.setter def skipped_cases(self, value): with self._skipped_cases.get_lock(): self._skipped_cases.value = value @property def error(self): with self._error.get_lock(): return self._error.value @error.setter def error(self, value): with self._error.get_lock(): self._error.value = value @property def done(self): with self._done.get_lock(): return self._done.value @done.setter def done(self, value): with self._done.get_lock(): self._done.value = value @property def passed(self): with self._passed.get_lock(): return self._passed.value @passed.setter def passed(self, value): with self._passed.get_lock(): self._passed.value = value @property def skipped_configs(self): with self._skipped_configs.get_lock(): return self._skipped_configs.value @skipped_configs.setter def skipped_configs(self, value): with self._skipped_configs.get_lock(): self._skipped_configs.value = value @property def skipped_runtime(self): with self._skipped_runtime.get_lock(): return self._skipped_runtime.value @skipped_runtime.setter def skipped_runtime(self, value): with self._skipped_runtime.get_lock(): self._skipped_runtime.value = value @property def failed(self): with self._failed.get_lock(): return self._failed.value @failed.setter def failed(self, value): with self._failed.get_lock(): self._failed.value = value @property def total(self): with self._total.get_lock(): return self._total.value class CMakeCacheEntry: '''Represents a CMake cache entry. This class understands the type system in a CMakeCache.txt, and converts the following cache types to Python types: Cache Type Python type ---------- ------------------------------------------- FILEPATH str PATH str STRING str OR list of str (if ';' is in the value) BOOL bool INTERNAL str OR list of str (if ';' is in the value) ---------- ------------------------------------------- ''' # Regular expression for a cache entry. # # CMake variable names can include escape characters, allowing a # wider set of names than is easy to match with a regular # expression. To be permissive here, use a non-greedy match up to # the first colon (':'). This breaks if the variable name has a # colon inside, but it's good enough. CACHE_ENTRY = re.compile( r'''(?P<name>.*?) # name :(?P<type>FILEPATH|PATH|STRING|BOOL|INTERNAL) # type =(?P<value>.*) # value ''', re.X) @classmethod def _to_bool(cls, val): # Convert a CMake BOOL string into a Python bool. # # "True if the constant is 1, ON, YES, TRUE, Y, or a # non-zero number. False if the constant is 0, OFF, NO, # FALSE, N, IGNORE, NOTFOUND, the empty string, or ends in # the suffix -NOTFOUND. Named boolean constants are # case-insensitive. If the argument is not one of these # constants, it is treated as a variable." # # https://cmake.org/cmake/help/v3.0/command/if.html val = val.upper() if val in ('ON', 'YES', 'TRUE', 'Y'): return 1 elif val in ('OFF', 'NO', 'FALSE', 'N', 'IGNORE', 'NOTFOUND', ''): return 0 elif val.endswith('-NOTFOUND'): return 0 else: try: v = int(val) return v != 0 except ValueError as exc: raise ValueError('invalid bool {}'.format(val)) from exc @classmethod def from_line(cls, line, line_no): # Comments can only occur at the beginning of a line. # (The value of an entry could contain a comment character). if line.startswith('//') or line.startswith('#'): return None # Whitespace-only lines do not contain cache entries. if not line.strip(): return None m = cls.CACHE_ENTRY.match(line) if not m: return None name, type_, value = (m.group(g) for g in ('name', 'type', 'value')) if type_ == 'BOOL': try: value = cls._to_bool(value) except ValueError as exc: args = exc.args + ('on line {}: {}'.format(line_no, line),) raise ValueError(args) from exc elif type_ in ['STRING', 'INTERNAL']: # If the value is a CMake list (i.e. is a string which # contains a ';'), convert to a Python list. if ';' in value: value = value.split(';') return CMakeCacheEntry(name, value) def __init__(self, name, value): self.name = name self.value = value def __str__(self): fmt = 'CMakeCacheEntry(name={}, value={})' return fmt.format(self.name, self.value) class CMakeCache: '''Parses and represents a CMake cache file.''' @staticmethod def from_file(cache_file): return CMakeCache(cache_file) def __init__(self, cache_file): self.cache_file = cache_file self.load(cache_file) def load(self, cache_file): entries = [] with open(cache_file, 'r') as cache: for line_no, line in enumerate(cache): entry = CMakeCacheEntry.from_line(line, line_no) if entry: entries.append(entry) self._entries = OrderedDict((e.name, e) for e in entries) def get(self, name, default=None): entry = self._entries.get(name) if entry is not None: return entry.value else: return default def get_list(self, name, default=None): if default is None: default = [] entry = self._entries.get(name) if entry is not None: value = entry.value if isinstance(value, list): return value elif isinstance(value, str): return [value] if value else [] else: msg = 'invalid value {} type {}' raise RuntimeError(msg.format(value, type(value))) else: return default def __contains__(self, name): return name in self._entries def __getitem__(self, name): return self._entries[name].value def __setitem__(self, name, entry): if not isinstance(entry, CMakeCacheEntry): msg = 'improper type {} for value {}, expecting CMakeCacheEntry' raise TypeError(msg.format(type(entry), entry)) self._entries[name] = entry def __delitem__(self, name): del self._entries[name] def __iter__(self): return iter(self._entries.values()) class TwisterException(Exception): pass class TwisterRuntimeError(TwisterException): pass class ConfigurationError(TwisterException): def __init__(self, cfile, message): TwisterException.__init__(self, cfile + ": " + message) class BuildError(TwisterException): pass class ExecutionError(TwisterException): pass class HarnessImporter: def __init__(self, name): sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister")) module = __import__("harness") if name: my_class = getattr(module, name) else: my_class = getattr(module, "Test") self.instance = my_class() class Handler: def __init__(self, instance, type_str="build"): """Constructor """ self.state = "waiting" self.run = False self.duration = 0 self.type_str = type_str self.binary = None self.pid_fn = None self.call_make_run = False self.name = instance.name self.instance = instance self.timeout = instance.testcase.timeout self.sourcedir = instance.testcase.source_dir self.build_dir = instance.build_dir self.log = os.path.join(self.build_dir, "handler.log") self.returncode = 0 self.set_state("running", self.duration) self.generator = None self.generator_cmd = None self.args = [] self.terminated = False def set_state(self, state, duration): self.state = state self.duration = duration def get_state(self): ret = (self.state, self.duration) return ret def record(self, harness): if harness.recording: filename = os.path.join(self.build_dir, "recording.csv") with open(filename, "at") as csvfile: cw = csv.writer(csvfile, harness.fieldnames, lineterminator=os.linesep) cw.writerow(harness.fieldnames) for instance in harness.recording: cw.writerow(instance) def terminate(self, proc): # encapsulate terminate functionality so we do it consistently where ever # we might want to terminate the proc. We need try_kill_process_by_pid # because of both how newer ninja (1.6.0 or greater) and .NET / renode # work. Newer ninja's don't seem to pass SIGTERM down to the children # so we need to use try_kill_process_by_pid. for child in psutil.Process(proc.pid).children(recursive=True): try: os.kill(child.pid, signal.SIGTERM) except ProcessLookupError: pass proc.terminate() # sleep for a while before attempting to kill time.sleep(0.5) proc.kill() self.terminated = True class BinaryHandler(Handler): def __init__(self, instance, type_str): """Constructor @param instance Test Instance """ super().__init__(instance, type_str) self.call_west_flash = False # Tool options self.valgrind = False self.lsan = False self.asan = False self.ubsan = False self.coverage = False def try_kill_process_by_pid(self): if self.pid_fn: pid = int(open(self.pid_fn).read()) os.unlink(self.pid_fn) self.pid_fn = None # clear so we don't try to kill the binary twice try: os.kill(pid, signal.SIGTERM) except ProcessLookupError: pass def _output_reader(self, proc): self.line = proc.stdout.readline() def _output_handler(self, proc, harness): if harness.is_pytest: harness.handle(None) return log_out_fp = open(self.log, "wt") timeout_extended = False timeout_time = time.time() + self.timeout while True: this_timeout = timeout_time - time.time() if this_timeout < 0: break reader_t = threading.Thread(target=self._output_reader, args=(proc,), daemon=True) reader_t.start() reader_t.join(this_timeout) if not reader_t.is_alive(): line = self.line logger.debug("OUTPUT: {0}".format(line.decode('utf-8').rstrip())) log_out_fp.write(line.decode('utf-8')) log_out_fp.flush() harness.handle(line.decode('utf-8').rstrip()) if harness.state: if not timeout_extended or harness.capture_coverage: timeout_extended = True if harness.capture_coverage: timeout_time = time.time() + 30 else: timeout_time = time.time() + 2 else: reader_t.join(0) break try: # POSIX arch based ztests end on their own, # so let's give it up to 100ms to do so proc.wait(0.1) except subprocess.TimeoutExpired: self.terminate(proc) log_out_fp.close() def handle(self): harness_name = self.instance.testcase.harness.capitalize() harness_import = HarnessImporter(harness_name) harness = harness_import.instance harness.configure(self.instance) if self.call_make_run: command = [self.generator_cmd, "run"] elif self.call_west_flash: command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir] else: command = [self.binary] run_valgrind = False if self.valgrind and shutil.which("valgrind"): command = ["valgrind", "--error-exitcode=2", "--leak-check=full", "--suppressions=" + ZEPHYR_BASE + "/scripts/valgrind.supp", "--log-file=" + self.build_dir + "/valgrind.log" ] + command run_valgrind = True logger.debug("Spawning process: " + " ".join(shlex.quote(word) for word in command) + os.linesep + "in directory: " + self.build_dir) start_time = time.time() env = os.environ.copy() if self.asan: env["ASAN_OPTIONS"] = "log_path=stdout:" + \ env.get("ASAN_OPTIONS", "") if not self.lsan: env["ASAN_OPTIONS"] += "detect_leaks=0" if self.ubsan: env["UBSAN_OPTIONS"] = "log_path=stdout:halt_on_error=1:" + \ env.get("UBSAN_OPTIONS", "") with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir, env=env) as proc: logger.debug("Spawning BinaryHandler Thread for %s" % self.name) t = threading.Thread(target=self._output_handler, args=(proc, harness,), daemon=True) t.start() t.join() if t.is_alive(): self.terminate(proc) t.join() proc.wait() self.returncode = proc.returncode self.try_kill_process_by_pid() handler_time = time.time() - start_time if self.coverage: subprocess.call(["GCOV_PREFIX=" + self.build_dir, "gcov", self.sourcedir, "-b", "-s", self.build_dir], shell=True) # FIXME: This is needed when killing the simulator, the console is # garbled and needs to be reset. Did not find a better way to do that. if sys.stdout.isatty(): subprocess.call(["stty", "sane"]) if harness.is_pytest: harness.pytest_run(self.log) self.instance.results = harness.tests if not self.terminated and self.returncode != 0: # When a process is killed, the default handler returns 128 + SIGTERM # so in that case the return code itself is not meaningful self.set_state("failed", handler_time) self.instance.reason = "Failed" elif run_valgrind and self.returncode == 2: self.set_state("failed", handler_time) self.instance.reason = "Valgrind error" elif harness.state: self.set_state(harness.state, handler_time) if harness.state == "failed": self.instance.reason = "Failed" else: self.set_state("timeout", handler_time) self.instance.reason = "Timeout" self.record(harness) class DeviceHandler(Handler): def __init__(self, instance, type_str): """Constructor @param instance Test Instance """ super().__init__(instance, type_str) self.suite = None def monitor_serial(self, ser, halt_fileno, harness): if harness.is_pytest: harness.handle(None) return log_out_fp = open(self.log, "wt") ser_fileno = ser.fileno() readlist = [halt_fileno, ser_fileno] if self.coverage: # Set capture_coverage to True to indicate that right after # test results we should get coverage data, otherwise we exit # from the test. harness.capture_coverage = True ser.flush() while ser.isOpen(): readable, _, _ = select.select(readlist, [], [], self.timeout) if halt_fileno in readable: logger.debug('halted') ser.close() break if ser_fileno not in readable: continue # Timeout. serial_line = None try: serial_line = ser.readline() except TypeError: pass except serial.SerialException: ser.close() break # Just because ser_fileno has data doesn't mean an entire line # is available yet. if serial_line: sl = serial_line.decode('utf-8', 'ignore').lstrip() logger.debug("DEVICE: {0}".format(sl.rstrip())) log_out_fp.write(sl) log_out_fp.flush() harness.handle(sl.rstrip()) if harness.state: if not harness.capture_coverage: ser.close() break log_out_fp.close() def device_is_available(self, instance): device = instance.platform.name fixture = instance.testcase.harness_config.get("fixture") for d in self.suite.duts: if fixture and fixture not in d.fixtures: continue if d.platform != device or not (d.serial or d.serial_pty): continue d.lock.acquire() avail = False if d.available: d.available = 0 d.counter += 1 avail = True d.lock.release() if avail: return d return None def make_device_available(self, serial): for d in self.suite.duts: if d.serial == serial or d.serial_pty: d.available = 1 @staticmethod def run_custom_script(script, timeout): with subprocess.Popen(script, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc: try: stdout, _ = proc.communicate(timeout=timeout) logger.debug(stdout.decode()) except subprocess.TimeoutExpired: proc.kill() proc.communicate() logger.error("{} timed out".format(script)) def handle(self): out_state = "failed" runner = None hardware = self.device_is_available(self.instance) while not hardware: logger.debug("Waiting for device {} to become available".format(self.instance.platform.name)) time.sleep(1) hardware = self.device_is_available(self.instance) runner = hardware.runner or self.suite.west_runner serial_pty = hardware.serial_pty ser_pty_process = None if serial_pty: master, slave = pty.openpty() try: ser_pty_process = subprocess.Popen(re.split(',| ', serial_pty), stdout=master, stdin=master, stderr=master) except subprocess.CalledProcessError as error: logger.error("Failed to run subprocess {}, error {}".format(serial_pty, error.output)) return serial_device = os.ttyname(slave) else: serial_device = hardware.serial logger.debug("Using serial device {}".format(serial_device)) if (self.suite.west_flash is not None) or runner: command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir] command_extra_args = [] # There are three ways this option is used. # 1) bare: --west-flash # This results in options.west_flash == [] # 2) with a value: --west-flash="--board-id=42" # This results in options.west_flash == "--board-id=42" # 3) Multiple values: --west-flash="--board-id=42,--erase" # This results in options.west_flash == "--board-id=42 --erase" if self.suite.west_flash and self.suite.west_flash != []: command_extra_args.extend(self.suite.west_flash.split(',')) if runner: command.append("--runner") command.append(runner) board_id = hardware.probe_id or hardware.id product = hardware.product if board_id is not None: if runner == "pyocd": command_extra_args.append("--board-id") command_extra_args.append(board_id) elif runner == "nrfjprog": command_extra_args.append("--snr") command_extra_args.append(board_id) elif runner == "openocd" and product == "STM32 STLink": command_extra_args.append("--cmd-pre-init") command_extra_args.append("hla_serial %s" % (board_id)) elif runner == "openocd" and product == "STLINK-V3": command_extra_args.append("--cmd-pre-init") command_extra_args.append("hla_serial %s" % (board_id)) elif runner == "openocd" and product == "EDBG CMSIS-DAP": command_extra_args.append("--cmd-pre-init") command_extra_args.append("cmsis_dap_serial %s" % (board_id)) elif runner == "jlink": command.append("--tool-opt=-SelectEmuBySN %s" % (board_id)) if command_extra_args != []: command.append('--') command.extend(command_extra_args) else: command = [self.generator_cmd, "-C", self.build_dir, "flash"] pre_script = hardware.pre_script post_flash_script = hardware.post_flash_script post_script = hardware.post_script if pre_script: self.run_custom_script(pre_script, 30) try: ser = serial.Serial( serial_device, baudrate=115200, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS, timeout=self.timeout ) except serial.SerialException as e: self.set_state("failed", 0) self.instance.reason = "Failed" logger.error("Serial device error: %s" % (str(e))) if serial_pty and ser_pty_process: ser_pty_process.terminate() outs, errs = ser_pty_process.communicate() logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs)) self.make_device_available(serial_device) return ser.flush() harness_name = self.instance.testcase.harness.capitalize() harness_import = HarnessImporter(harness_name) harness = harness_import.instance harness.configure(self.instance) read_pipe, write_pipe = os.pipe() start_time = time.time() t = threading.Thread(target=self.monitor_serial, daemon=True, args=(ser, read_pipe, harness)) t.start() d_log = "{}/device.log".format(self.instance.build_dir) logger.debug('Flash command: %s', command) try: stdout = stderr = None with subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc: try: (stdout, stderr) = proc.communicate(timeout=30) logger.debug(stdout.decode()) if proc.returncode != 0: self.instance.reason = "Device issue (Flash?)" with open(d_log, "w") as dlog_fp: dlog_fp.write(stderr.decode()) os.write(write_pipe, b'x') # halt the thread out_state = "flash_error" except subprocess.TimeoutExpired: proc.kill() (stdout, stderr) = proc.communicate() self.instance.reason = "Device issue (Timeout)" with open(d_log, "w") as dlog_fp: dlog_fp.write(stderr.decode()) except subprocess.CalledProcessError: os.write(write_pipe, b'x') # halt the thread if post_flash_script: self.run_custom_script(post_flash_script, 30) t.join(self.timeout) if t.is_alive(): logger.debug("Timed out while monitoring serial output on {}".format(self.instance.platform.name)) out_state = "timeout" if ser.isOpen(): ser.close() if serial_pty: ser_pty_process.terminate() outs, errs = ser_pty_process.communicate() logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs)) os.close(write_pipe) os.close(read_pipe) handler_time = time.time() - start_time if out_state in ["timeout", "flash_error"]: for c in self.instance.testcase.cases: if c not in harness.tests: harness.tests[c] = "BLOCK" if out_state == "timeout": self.instance.reason = "Timeout" elif out_state == "flash_error": self.instance.reason = "Flash error" if harness.is_pytest: harness.pytest_run(self.log) self.instance.results = harness.tests # sometimes a test instance hasn't been executed successfully with an # empty dictionary results, in order to include it into final report, # so fill the results as BLOCK if self.instance.results == {}: for k in self.instance.testcase.cases: self.instance.results[k] = 'BLOCK' if harness.state: self.set_state(harness.state, handler_time) if harness.state == "failed": self.instance.reason = "Failed" else: self.set_state(out_state, handler_time) if post_script: self.run_custom_script(post_script, 30) self.make_device_available(serial_device) self.record(harness) class QEMUHandler(Handler): """Spawns a thread to monitor QEMU output from pipes We pass QEMU_PIPE to 'make run' and monitor the pipes for output. We need to do this as once qemu starts, it runs forever until killed. Test cases emit special messages to the console as they run, we check for these to collect whether the test passed or failed. """ def __init__(self, instance, type_str): """Constructor @param instance Test instance """ super().__init__(instance, type_str) self.fifo_fn = os.path.join(instance.build_dir, "qemu-fifo") self.pid_fn = os.path.join(instance.build_dir, "qemu.pid") if "ignore_qemu_crash" in instance.testcase.tags: self.ignore_qemu_crash = True self.ignore_unexpected_eof = True else: self.ignore_qemu_crash = False self.ignore_unexpected_eof = False @staticmethod def _get_cpu_time(pid): """get process CPU time. The guest virtual time in QEMU icount mode isn't host time and it's maintained by counting guest instructions, so we use QEMU process exection time to mostly simulate the time of guest OS. """ proc = psutil.Process(pid) cpu_time = proc.cpu_times() return cpu_time.user + cpu_time.system @staticmethod def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results, harness, ignore_unexpected_eof=False): fifo_in = fifo_fn + ".in" fifo_out = fifo_fn + ".out" # These in/out nodes are named from QEMU's perspective, not ours if os.path.exists(fifo_in): os.unlink(fifo_in) os.mkfifo(fifo_in) if os.path.exists(fifo_out): os.unlink(fifo_out) os.mkfifo(fifo_out) # We don't do anything with out_fp but we need to open it for # writing so that QEMU doesn't block, due to the way pipes work out_fp = open(fifo_in, "wb") # Disable internal buffering, we don't # want read() or poll() to ever block if there is data in there in_fp = open(fifo_out, "rb", buffering=0) log_out_fp = open(logfile, "wt") start_time = time.time() timeout_time = start_time + timeout p = select.poll() p.register(in_fp, select.POLLIN) out_state = None line = "" timeout_extended = False pid = 0 if os.path.exists(pid_fn): pid = int(open(pid_fn).read()) while True: this_timeout = int((timeout_time - time.time()) * 1000) if this_timeout < 0 or not p.poll(this_timeout): try: if pid and this_timeout > 0: #there's possibility we polled nothing because #of not enough CPU time scheduled by host for #QEMU process during p.poll(this_timeout) cpu_time = QEMUHandler._get_cpu_time(pid) if cpu_time < timeout and not out_state: timeout_time = time.time() + (timeout - cpu_time) continue except ProcessLookupError: out_state = "failed" break if not out_state: out_state = "timeout" break if pid == 0 and os.path.exists(pid_fn): pid = int(open(pid_fn).read()) if harness.is_pytest: harness.handle(None) out_state = harness.state break try: c = in_fp.read(1).decode("utf-8") except UnicodeDecodeError: # Test is writing something weird, fail out_state = "unexpected byte" break if c == "": # EOF, this shouldn't happen unless QEMU crashes if not ignore_unexpected_eof: out_state = "unexpected eof" break line = line + c if c != "\n": continue # line contains a full line of data output from QEMU log_out_fp.write(line) log_out_fp.flush() line = line.strip() logger.debug(f"QEMU ({pid}): {line}") harness.handle(line) if harness.state: # if we have registered a fail make sure the state is not # overridden by a false success message coming from the # testsuite if out_state not in ['failed', 'unexpected eof', 'unexpected byte']: out_state = harness.state # if we get some state, that means test is doing well, we reset # the timeout and wait for 2 more seconds to catch anything # printed late. We wait much longer if code # coverage is enabled since dumping this information can # take some time. if not timeout_extended or harness.capture_coverage: timeout_extended = True if harness.capture_coverage: timeout_time = time.time() + 30 else: timeout_time = time.time() + 2 line = "" if harness.is_pytest: harness.pytest_run(logfile) out_state = harness.state handler.record(harness) handler_time = time.time() - start_time logger.debug(f"QEMU ({pid}) complete ({out_state}) after {handler_time} seconds") if out_state == "timeout": handler.instance.reason = "Timeout" handler.set_state("failed", handler_time) elif out_state == "failed": handler.instance.reason = "Failed" handler.set_state("failed", handler_time) elif out_state in ['unexpected eof', 'unexpected byte']: handler.instance.reason = out_state handler.set_state("failed", handler_time) else: handler.set_state(out_state, handler_time) log_out_fp.close() out_fp.close() in_fp.close() if pid: try: if pid: os.kill(pid, signal.SIGTERM) except ProcessLookupError: # Oh well, as long as it's dead! User probably sent Ctrl-C pass os.unlink(fifo_in) os.unlink(fifo_out) def handle(self): self.results = {} self.run = True # We pass this to QEMU which looks for fifos with .in and .out # suffixes. self.fifo_fn = os.path.join(self.instance.build_dir, "qemu-fifo") self.pid_fn = os.path.join(self.instance.build_dir, "qemu.pid") if os.path.exists(self.pid_fn): os.unlink(self.pid_fn) self.log_fn = self.log harness_import = HarnessImporter(self.instance.testcase.harness.capitalize()) harness = harness_import.instance harness.configure(self.instance) self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread, args=(self, self.timeout, self.build_dir, self.log_fn, self.fifo_fn, self.pid_fn, self.results, harness, self.ignore_unexpected_eof)) self.instance.results = harness.tests self.thread.daemon = True logger.debug("Spawning QEMUHandler Thread for %s" % self.name) self.thread.start() if sys.stdout.isatty(): subprocess.call(["stty", "sane"]) logger.debug("Running %s (%s)" % (self.name, self.type_str)) command = [self.generator_cmd] command += ["-C", self.build_dir, "run"] is_timeout = False qemu_pid = None with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir) as proc: logger.debug("Spawning QEMUHandler Thread for %s" % self.name) try: proc.wait(self.timeout) except subprocess.TimeoutExpired: # sometimes QEMU can't handle SIGTERM signal correctly # in that case kill -9 QEMU process directly and leave # twister to judge testing result by console output is_timeout = True self.terminate(proc) if harness.state == "passed": self.returncode = 0 else: self.returncode = proc.returncode else: if os.path.exists(self.pid_fn): qemu_pid = int(open(self.pid_fn).read()) logger.debug(f"No timeout, return code from QEMU ({qemu_pid}): {proc.returncode}") self.returncode = proc.returncode # Need to wait for harness to finish processing # output from QEMU. Otherwise it might miss some # error messages. self.thread.join(0) if self.thread.is_alive(): logger.debug("Timed out while monitoring QEMU output") if os.path.exists(self.pid_fn): qemu_pid = int(open(self.pid_fn).read()) os.unlink(self.pid_fn) logger.debug(f"return code from QEMU ({qemu_pid}): {self.returncode}") if (self.returncode != 0 and not self.ignore_qemu_crash) or not harness.state: self.set_state("failed", 0) if is_timeout: self.instance.reason = "Timeout" else: self.instance.reason = "Exited with {}".format(self.returncode) def get_fifo(self): return self.fifo_fn class SizeCalculator: alloc_sections = [ "bss", "noinit", "app_bss", "app_noinit", "ccm_bss", "ccm_noinit" ] rw_sections = [ "datas", "initlevel", "exceptions", "initshell", "_static_thread_data_area", "k_timer_area", "k_mem_slab_area", "k_mem_pool_area", "sw_isr_table", "k_sem_area", "k_mutex_area", "app_shmem_regions", "_k_fifo_area", "_k_lifo_area", "k_stack_area", "k_msgq_area", "k_mbox_area", "k_pipe_area", "net_if_area", "net_if_dev_area", "net_l2_area", "net_l2_data", "k_queue_area", "_net_buf_pool_area", "app_datas", "kobject_data", "mmu_tables", "app_pad", "priv_stacks", "ccm_data", "usb_descriptor", "usb_data", "usb_bos_desc", "uart_mux", 'log_backends_sections', 'log_dynamic_sections', 'log_const_sections', "app_smem", 'shell_root_cmds_sections', 'log_const_sections', "font_entry_sections", "priv_stacks_noinit", "_GCOV_BSS_SECTION_NAME", "gcov", "nocache", "devices", "k_heap_area", ] # These get copied into RAM only on non-XIP ro_sections = [ "rom_start", "text", "ctors", "init_array", "reset", "z_object_assignment_area", "rodata", "net_l2", "vector", "sw_isr_table", "settings_handler_static_area", "bt_l2cap_fixed_chan_area", "bt_l2cap_br_fixed_chan_area", "bt_gatt_service_static_area", "vectors", "net_socket_register_area", "net_ppp_proto", "shell_area", "tracing_backend_area", "ppp_protocol_handler_area", ] def __init__(self, filename, extra_sections): """Constructor @param filename Path to the output binary The <filename> is parsed by objdump to determine section sizes """ # Make sure this is an ELF binary with open(filename, "rb") as f: magic = f.read(4) try: if magic != b'\x7fELF': raise TwisterRuntimeError("%s is not an ELF binary" % filename) except Exception as e: print(str(e)) sys.exit(2) # Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK. # GREP can not be used as it returns an error if the symbol is not # found. is_xip_command = "nm " + filename + \ " | awk '/CONFIG_XIP/ { print $3 }'" is_xip_output = subprocess.check_output( is_xip_command, shell=True, stderr=subprocess.STDOUT).decode( "utf-8").strip() try: if is_xip_output.endswith("no symbols"): raise TwisterRuntimeError("%s has no symbol information" % filename) except Exception as e: print(str(e)) sys.exit(2) self.is_xip = (len(is_xip_output) != 0) self.filename = filename self.sections = [] self.rom_size = 0 self.ram_size = 0 self.extra_sections = extra_sections self._calculate_sizes() def get_ram_size(self): """Get the amount of RAM the application will use up on the device @return amount of RAM, in bytes """ return self.ram_size def get_rom_size(self): """Get the size of the data that this application uses on device's flash @return amount of ROM, in bytes """ return self.rom_size def unrecognized_sections(self): """Get a list of sections inside the binary that weren't recognized @return list of unrecognized section names """ slist = [] for v in self.sections: if not v["recognized"]: slist.append(v["name"]) return slist def _calculate_sizes(self): """ Calculate RAM and ROM usage by section """ objdump_command = "objdump -h " + self.filename objdump_output = subprocess.check_output( objdump_command, shell=True).decode("utf-8").splitlines() for line in objdump_output: words = line.split() if not words: # Skip lines that are too short continue index = words[0] if not index[0].isdigit(): # Skip lines that do not start continue # with a digit name = words[1] # Skip lines with section names if name[0] == '.': # starting with '.' continue # TODO this doesn't actually reflect the size in flash or RAM as # it doesn't include linker-imposed padding between sections. # It is close though. size = int(words[2], 16) if size == 0: continue load_addr = int(words[4], 16) virt_addr = int(words[3], 16) # Add section to memory use totals (for both non-XIP and XIP scenarios) # Unrecognized section names are not included in the calculations. recognized = True if name in SizeCalculator.alloc_sections: self.ram_size += size stype = "alloc" elif name in SizeCalculator.rw_sections: self.ram_size += size self.rom_size += size stype = "rw" elif name in SizeCalculator.ro_sections: self.rom_size += size if not self.is_xip: self.ram_size += size stype = "ro" else: stype = "unknown" if name not in self.extra_sections: recognized = False self.sections.append({"name": name, "load_addr": load_addr, "size": size, "virt_addr": virt_addr, "type": stype, "recognized": recognized}) class TwisterConfigParser: """Class to read test case files with semantic checking """ def __init__(self, filename, schema): """Instantiate a new TwisterConfigParser object @param filename Source .yaml file to read """ self.data = {} self.schema = schema self.filename = filename self.tests = {} self.common = {} def load(self): self.data = scl.yaml_load_verify(self.filename, self.schema) if 'tests' in self.data: self.tests = self.data['tests'] if 'common' in self.data: self.common = self.data['common'] def _cast_value(self, value, typestr): if isinstance(value, str): v = value.strip() if typestr == "str": return v elif typestr == "float": return float(value) elif typestr == "int": return int(value) elif typestr == "bool": return value elif typestr.startswith("list") and isinstance(value, list): return value elif typestr.startswith("list") and isinstance(value, str): vs = v.split() if len(typestr) > 4 and typestr[4] == ":": return [self._cast_value(vsi, typestr[5:]) for vsi in vs] else: return vs elif typestr.startswith("set"): vs = v.split() if len(typestr) > 3 and typestr[3] == ":": return {self._cast_value(vsi, typestr[4:]) for vsi in vs} else: return set(vs) elif typestr.startswith("map"): return value else: raise ConfigurationError( self.filename, "unknown type '%s'" % value) def get_test(self, name, valid_keys): """Get a dictionary representing the keys/values within a test @param name The test in the .yaml file to retrieve data from @param valid_keys A dictionary representing the intended semantics for this test. Each key in this dictionary is a key that could be specified, if a key is given in the .yaml file which isn't in here, it will generate an error. Each value in this dictionary is another dictionary containing metadata: "default" - Default value if not given "type" - Data type to convert the text value to. Simple types supported are "str", "float", "int", "bool" which will get converted to respective Python data types. "set" and "list" may also be specified which will split the value by whitespace (but keep the elements as strings). finally, "list:<type>" and "set:<type>" may be given which will perform a type conversion after splitting the value up. "required" - If true, raise an error if not defined. If false and "default" isn't specified, a type conversion will be done on an empty string @return A dictionary containing the test key-value pairs with type conversion and default values filled in per valid_keys """ d = {} for k, v in self.common.items(): d[k] = v for k, v in self.tests[name].items(): if k in d: if isinstance(d[k], str): # By default, we just concatenate string values of keys # which appear both in "common" and per-test sections, # but some keys are handled in adhoc way based on their # semantics. if k == "filter": d[k] = "(%s) and (%s)" % (d[k], v) else: d[k] += " " + v else: d[k] = v for k, kinfo in valid_keys.items(): if k not in d: if "required" in kinfo: required = kinfo["required"] else: required = False if required: raise ConfigurationError( self.filename, "missing required value for '%s' in test '%s'" % (k, name)) else: if "default" in kinfo: default = kinfo["default"] else: default = self._cast_value("", kinfo["type"]) d[k] = default else: try: d[k] = self._cast_value(d[k], kinfo["type"]) except ValueError: raise ConfigurationError( self.filename, "bad %s value '%s' for key '%s' in name '%s'" % (kinfo["type"], d[k], k, name)) return d class Platform: """Class representing metadata for a particular platform Maps directly to BOARD when building""" platform_schema = scl.yaml_load(os.path.join(ZEPHYR_BASE, "scripts", "schemas", "twister", "platform-schema.yaml")) def __init__(self): """Constructor. """ self.name = "" self.twister = True # if no RAM size is specified by the board, take a default of 128K self.ram = 128 self.ignore_tags = [] self.only_tags = [] self.default = False # if no flash size is specified by the board, take a default of 512K self.flash = 512 self.supported = set() self.arch = "" self.type = "na" self.simulation = "na" self.supported_toolchains = [] self.env = [] self.env_satisfied = True self.filter_data = dict() def load(self, platform_file): scp = TwisterConfigParser(platform_file, self.platform_schema) scp.load() data = scp.data self.name = data['identifier'] self.twister = data.get("twister", True) # if no RAM size is specified by the board, take a default of 128K self.ram = data.get("ram", 128) testing = data.get("testing", {}) self.ignore_tags = testing.get("ignore_tags", []) self.only_tags = testing.get("only_tags", []) self.default = testing.get("default", False) # if no flash size is specified by the board, take a default of 512K self.flash = data.get("flash", 512) self.supported = set() for supp_feature in data.get("supported", []): for item in supp_feature.split(":"): self.supported.add(item) self.arch = data['arch'] self.type = data.get('type', "na") self.simulation = data.get('simulation', "na") self.supported_toolchains = data.get("toolchain", []) self.env = data.get("env", []) self.env_satisfied = True for env in self.env: if not os.environ.get(env, None): self.env_satisfied = False def __repr__(self): return "<%s on %s>" % (self.name, self.arch) class DisablePyTestCollectionMixin(object): __test__ = False class TestCase(DisablePyTestCollectionMixin): """Class representing a test application """ def __init__(self, testcase_root, workdir, name): """TestCase constructor. This gets called by TestSuite as it finds and reads test yaml files. Multiple TestCase instances may be generated from a single testcase.yaml, each one corresponds to an entry within that file. We need to have a unique name for every single test case. Since a testcase.yaml can define multiple tests, the canonical name for the test case is <workdir>/<name>. @param testcase_root os.path.abspath() of one of the --testcase-root @param workdir Sub-directory of testcase_root where the .yaml test configuration file was found @param name Name of this test case, corresponding to the entry name in the test case configuration file. For many test cases that just define one test, can be anything and is usually "test". This is really only used to distinguish between different cases when the testcase.yaml defines multiple tests """ self.source_dir = "" self.yamlfile = "" self.cases = [] self.name = self.get_unique(testcase_root, workdir, name) self.id = name self.type = None self.tags = set() self.extra_args = None self.extra_configs = None self.arch_allow = None self.arch_exclude = None self.skip = False self.platform_exclude = None self.platform_allow = None self.toolchain_exclude = None self.toolchain_allow = None self.tc_filter = None self.timeout = 60 self.harness = "" self.harness_config = {} self.build_only = True self.build_on_all = False self.slow = False self.min_ram = -1 self.depends_on = None self.min_flash = -1 self.extra_sections = None self.integration_platforms = [] @staticmethod def get_unique(testcase_root, workdir, name): canonical_testcase_root = os.path.realpath(testcase_root) if Path(canonical_zephyr_base) in Path(canonical_testcase_root).parents: # This is in ZEPHYR_BASE, so include path in name for uniqueness # FIXME: We should not depend on path of test for unique names. relative_tc_root = os.path.relpath(canonical_testcase_root, start=canonical_zephyr_base) else: relative_tc_root = "" # workdir can be "." unique = os.path.normpath(os.path.join(relative_tc_root, workdir, name)) check = name.split(".") if len(check) < 2: raise TwisterException(f"""bad test name '{name}' in {testcase_root}/{workdir}. \ Tests should reference the category and subsystem with a dot as a separator. """ ) return unique @staticmethod def scan_file(inf_name): suite_regex = re.compile( # do not match until end-of-line, otherwise we won't allow # stc_regex below to catch the ones that are declared in the same # line--as we only search starting the end of this match br"^\s*ztest_test_suite\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,", re.MULTILINE) stc_regex = re.compile( br"^\s*" # empy space at the beginning is ok # catch the case where it is declared in the same sentence, e.g: # # ztest_test_suite(mutex_complex, ztest_user_unit_test(TESTNAME)); br"(?:ztest_test_suite\([a-zA-Z0-9_]+,\s*)?" # Catch ztest[_user]_unit_test-[_setup_teardown](TESTNAME) br"ztest_(?:1cpu_)?(?:user_)?unit_test(?:_setup_teardown)?" # Consume the argument that becomes the extra testcse br"\(\s*" br"(?P<stc_name>[a-zA-Z0-9_]+)" # _setup_teardown() variant has two extra arguments that we ignore br"(?:\s*,\s*[a-zA-Z0-9_]+\s*,\s*[a-zA-Z0-9_]+)?" br"\s*\)", # We don't check how it finishes; we don't care re.MULTILINE) suite_run_regex = re.compile( br"^\s*ztest_run_test_suite\((?P<suite_name>[a-zA-Z0-9_]+)\)", re.MULTILINE) achtung_regex = re.compile( br"(#ifdef|#endif)", re.MULTILINE) warnings = None with open(inf_name) as inf: if os.name == 'nt': mmap_args = {'fileno': inf.fileno(), 'length': 0, 'access': mmap.ACCESS_READ} else: mmap_args = {'fileno': inf.fileno(), 'length': 0, 'flags': mmap.MAP_PRIVATE, 'prot': mmap.PROT_READ, 'offset': 0} with contextlib.closing(mmap.mmap(**mmap_args)) as main_c: suite_regex_match = suite_regex.search(main_c) if not suite_regex_match: # can't find ztest_test_suite, maybe a client, because # it includes ztest.h return None, None suite_run_match = suite_run_regex.search(main_c) if not suite_run_match: raise ValueError("can't find ztest_run_test_suite") achtung_matches = re.findall( achtung_regex, main_c[suite_regex_match.end():suite_run_match.start()]) if achtung_matches: warnings = "found invalid %s in ztest_test_suite()" \ % ", ".join(sorted({match.decode() for match in achtung_matches},reverse = True)) _matches = re.findall( stc_regex, main_c[suite_regex_match.end():suite_run_match.start()]) for match in _matches: if not match.decode().startswith("test_"): warnings = "Found a test that does not start with test_" matches = [match.decode().replace("test_", "", 1) for match in _matches] return matches, warnings def scan_path(self, path): subcases = [] for filename in glob.glob(os.path.join(path, "src", "*.c*")): try: _subcases, warnings = self.scan_file(filename) if warnings: logger.error("%s: %s" % (filename, warnings)) raise TwisterRuntimeError("%s: %s" % (filename, warnings)) if _subcases: subcases += _subcases except ValueError as e: logger.error("%s: can't find: %s" % (filename, e)) for filename in glob.glob(os.path.join(path, "*.c")): try: _subcases, warnings = self.scan_file(filename) if warnings: logger.error("%s: %s" % (filename, warnings)) if _subcases: subcases += _subcases except ValueError as e: logger.error("%s: can't find: %s" % (filename, e)) return subcases def parse_subcases(self, test_path): results = self.scan_path(test_path) for sub in results: name = "{}.{}".format(self.id, sub) self.cases.append(name) if not results: self.cases.append(self.id) def __str__(self): return self.name class TestInstance(DisablePyTestCollectionMixin): """Class representing the execution of a particular TestCase on a platform @param test The TestCase object we want to build/execute @param platform Platform object that we want to build and run against @param base_outdir Base directory for all test results. The actual out directory used is <outdir>/<platform>/<test case name> """ def __init__(self, testcase, platform, outdir): self.testcase = testcase self.platform = platform self.status = None self.reason = "Unknown" self.metrics = dict() self.handler = None self.outdir = outdir self.name = os.path.join(platform.name, testcase.name) self.build_dir = os.path.join(outdir, platform.name, testcase.name) self.run = False self.results = {} def __getstate__(self): d = self.__dict__.copy() return d def __setstate__(self, d): self.__dict__.update(d) def __lt__(self, other): return self.name < other.name @staticmethod def testcase_runnable(testcase, fixtures): can_run = False # console harness allows us to run the test and capture data. if testcase.harness in [ 'console', 'ztest', 'pytest']: can_run = True # if we have a fixture that is also being supplied on the # command-line, then we need to run the test, not just build it. fixture = testcase.harness_config.get('fixture') if fixture: can_run = (fixture in fixtures) elif testcase.harness: can_run = False else: can_run = True return can_run # Global testsuite parameters def check_runnable(self, enable_slow=False, filter='buildable', fixtures=[]): # right now we only support building on windows. running is still work # in progress. if os.name == 'nt': return False # we asked for build-only on the command line if self.testcase.build_only: return False # Do not run slow tests: skip_slow = self.testcase.slow and not enable_slow if skip_slow: return False target_ready = bool(self.testcase.type == "unit" or \ self.platform.type == "native" or \ self.platform.simulation in ["mdb-nsim", "nsim", "renode", "qemu", "tsim", "armfvp"] or \ filter == 'runnable') if self.platform.simulation == "nsim": if not find_executable("nsimdrv"): target_ready = False if self.platform.simulation == "mdb-nsim": if not find_executable("mdb"): target_ready = False if self.platform.simulation == "renode": if not find_executable("renode"): target_ready = False if self.platform.simulation == "tsim": if not find_executable("tsim-leon3"): target_ready = False testcase_runnable = self.testcase_runnable(self.testcase, fixtures) return testcase_runnable and target_ready def create_overlay(self, platform, enable_asan=False, enable_ubsan=False, enable_coverage=False, coverage_platform=[]): # Create this in a "twister/" subdirectory otherwise this # will pass this overlay to kconfig.py *twice* and kconfig.cmake # will silently give that second time precedence over any # --extra-args=CONFIG_* subdir = os.path.join(self.build_dir, "twister") content = "" if self.testcase.extra_configs: content = "\n".join(self.testcase.extra_configs) if enable_coverage: if platform.name in coverage_platform: content = content + "\nCONFIG_COVERAGE=y" content = content + "\nCONFIG_COVERAGE_DUMP=y" if enable_asan: if platform.type == "native": content = content + "\nCONFIG_ASAN=y" if enable_ubsan: if platform.type == "native": content = content + "\nCONFIG_UBSAN=y" if content: os.makedirs(subdir, exist_ok=True) file = os.path.join(subdir, "testcase_extra.conf") with open(file, "w") as f: f.write(content) return content def calculate_sizes(self): """Get the RAM/ROM sizes of a test case. This can only be run after the instance has been executed by MakeGenerator, otherwise there won't be any binaries to measure. @return A SizeCalculator object """ fns = glob.glob(os.path.join(self.build_dir, "zephyr", "*.elf")) fns.extend(glob.glob(os.path.join(self.build_dir, "zephyr", "*.exe"))) fns = [x for x in fns if not x.endswith('_prebuilt.elf')] if len(fns) != 1: raise BuildError("Missing/multiple output ELF binary") return SizeCalculator(fns[0], self.testcase.extra_sections) def fill_results_by_status(self): """Fills results according to self.status The method is used to propagate the instance level status to the test cases inside. Useful when the whole instance is skipped and the info is required also at the test cases level for reporting. Should be used with caution, e.g. should not be used to fill all results with passes """ status_to_verdict = { 'skipped': 'SKIP', 'error': 'BLOCK', 'failure': 'FAILED' } for k in self.results: self.results[k] = status_to_verdict[self.status] def __repr__(self): return "<TestCase %s on %s>" % (self.testcase.name, self.platform.name) class CMake(): config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$') dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$') def __init__(self, testcase, platform, source_dir, build_dir): self.cwd = None self.capture_output = True self.defconfig = {} self.cmake_cache = {} self.instance = None self.testcase = testcase self.platform = platform self.source_dir = source_dir self.build_dir = build_dir self.log = "build.log" self.generator = None self.generator_cmd = None def parse_generated(self): self.defconfig = {} return {} def run_build(self, args=[]): logger.debug("Building %s for %s" % (self.source_dir, self.platform.name)) cmake_args = [] cmake_args.extend(args) cmake = shutil.which('cmake') cmd = [cmake] + cmake_args kwargs = dict() if self.capture_output: kwargs['stdout'] = subprocess.PIPE # CMake sends the output of message() to stderr unless it's STATUS kwargs['stderr'] = subprocess.STDOUT if self.cwd: kwargs['cwd'] = self.cwd p = subprocess.Popen(cmd, **kwargs) out, _ = p.communicate() results = {} if p.returncode == 0: msg = "Finished building %s for %s" % (self.source_dir, self.platform.name) self.instance.status = "passed" results = {'msg': msg, "returncode": p.returncode, "instance": self.instance} if out: log_msg = out.decode(sys.getdefaultencoding()) with open(os.path.join(self.build_dir, self.log), "a") as log: log.write(log_msg) else: return None else: # A real error occurred, raise an exception log_msg = "" if out: log_msg = out.decode(sys.getdefaultencoding()) with open(os.path.join(self.build_dir, self.log), "a") as log: log.write(log_msg) if log_msg: res = re.findall("region `(FLASH|RAM|ICCM|DCCM|SRAM)' overflowed by", log_msg) if res and not self.overflow_as_errors: logger.debug("Test skipped due to {} Overflow".format(res[0])) self.instance.status = "skipped" self.instance.reason = "{} overflow".format(res[0]) else: self.instance.status = "error" self.instance.reason = "Build failure" results = { "returncode": p.returncode, "instance": self.instance, } return results def run_cmake(self, args=[]): if self.warnings_as_errors: ldflags = "-Wl,--fatal-warnings" cflags = "-Werror" aflags = "-Wa,--fatal-warnings" gen_defines_args = "--edtlib-Werror" else: ldflags = cflags = aflags = "" gen_defines_args = "" logger.debug("Running cmake on %s for %s" % (self.source_dir, self.platform.name)) cmake_args = [ f'-B{self.build_dir}', f'-S{self.source_dir}', f'-DEXTRA_CFLAGS="{cflags}"', f'-DEXTRA_AFLAGS="{aflags}', f'-DEXTRA_LDFLAGS="{ldflags}"', f'-DEXTRA_GEN_DEFINES_ARGS={gen_defines_args}', f'-G{self.generator}' ] if self.cmake_only: cmake_args.append("-DCMAKE_EXPORT_COMPILE_COMMANDS=1") args = ["-D{}".format(a.replace('"', '')) for a in args] cmake_args.extend(args) cmake_opts = ['-DBOARD={}'.format(self.platform.name)] cmake_args.extend(cmake_opts) logger.debug("Calling cmake with arguments: {}".format(cmake_args)) cmake = shutil.which('cmake') cmd = [cmake] + cmake_args kwargs = dict() if self.capture_output: kwargs['stdout'] = subprocess.PIPE # CMake sends the output of message() to stderr unless it's STATUS kwargs['stderr'] = subprocess.STDOUT if self.cwd: kwargs['cwd'] = self.cwd p = subprocess.Popen(cmd, **kwargs) out, _ = p.communicate() if p.returncode == 0: filter_results = self.parse_generated() msg = "Finished building %s for %s" % (self.source_dir, self.platform.name) logger.debug(msg) results = {'msg': msg, 'filter': filter_results} else: self.instance.status = "error" self.instance.reason = "Cmake build failure" self.instance.fill_results_by_status() logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name)) results = {"returncode": p.returncode} if out: with open(os.path.join(self.build_dir, self.log), "a") as log: log_msg = out.decode(sys.getdefaultencoding()) log.write(log_msg) return results @staticmethod def run_cmake_script(args=[]): logger.debug("Running cmake script %s" % (args[0])) cmake_args = ["-D{}".format(a.replace('"', '')) for a in args[1:]] cmake_args.extend(['-P', args[0]]) logger.debug("Calling cmake with arguments: {}".format(cmake_args)) cmake = shutil.which('cmake') if not cmake: msg = "Unable to find `cmake` in path" logger.error(msg) raise Exception(msg) cmd = [cmake] + cmake_args kwargs = dict() kwargs['stdout'] = subprocess.PIPE # CMake sends the output of message() to stderr unless it's STATUS kwargs['stderr'] = subprocess.STDOUT p = subprocess.Popen(cmd, **kwargs) out, _ = p.communicate() # It might happen that the environment adds ANSI escape codes like \x1b[0m, # for instance if twister is executed from inside a makefile. In such a # scenario it is then necessary to remove them, as otherwise the JSON decoding # will fail. ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])') out = ansi_escape.sub('', out.decode()) if p.returncode == 0: msg = "Finished running %s" % (args[0]) logger.debug(msg) results = {"returncode": p.returncode, "msg": msg, "stdout": out} else: logger.error("Cmake script failure: %s" % (args[0])) results = {"returncode": p.returncode} return results class FilterBuilder(CMake): def __init__(self, testcase, platform, source_dir, build_dir): super().__init__(testcase, platform, source_dir, build_dir) self.log = "config-twister.log" def parse_generated(self): if self.platform.name == "unit_testing": return {} cmake_cache_path = os.path.join(self.build_dir, "CMakeCache.txt") defconfig_path = os.path.join(self.build_dir, "zephyr", ".config") with open(defconfig_path, "r") as fp: defconfig = {} for line in fp.readlines(): m = self.config_re.match(line) if not m: if line.strip() and not line.startswith("#"): sys.stderr.write("Unrecognized line %s\n" % line) continue defconfig[m.group(1)] = m.group(2).strip() self.defconfig = defconfig cmake_conf = {} try: cache = CMakeCache.from_file(cmake_cache_path) except FileNotFoundError: cache = {} for k in iter(cache): cmake_conf[k.name] = k.value self.cmake_cache = cmake_conf filter_data = { "ARCH": self.platform.arch, "PLATFORM": self.platform.name } filter_data.update(os.environ) filter_data.update(self.defconfig) filter_data.update(self.cmake_cache) edt_pickle = os.path.join(self.build_dir, "zephyr", "edt.pickle") if self.testcase and self.testcase.tc_filter: try: if os.path.exists(edt_pickle): with open(edt_pickle, 'rb') as f: edt = pickle.load(f) else: edt = None res = expr_parser.parse(self.testcase.tc_filter, filter_data, edt) except (ValueError, SyntaxError) as se: sys.stderr.write( "Failed processing %s\n" % self.testcase.yamlfile) raise se if not res: return {os.path.join(self.platform.name, self.testcase.name): True} else: return {os.path.join(self.platform.name, self.testcase.name): False} else: self.platform.filter_data = filter_data return filter_data class ProjectBuilder(FilterBuilder): def __init__(self, suite, instance, **kwargs): super().__init__(instance.testcase, instance.platform, instance.testcase.source_dir, instance.build_dir) self.log = "build.log" self.instance = instance self.suite = suite self.filtered_tests = 0 self.lsan = kwargs.get('lsan', False) self.asan = kwargs.get('asan', False) self.ubsan = kwargs.get('ubsan', False) self.valgrind = kwargs.get('valgrind', False) self.extra_args = kwargs.get('extra_args', []) self.device_testing = kwargs.get('device_testing', False) self.cmake_only = kwargs.get('cmake_only', False) self.cleanup = kwargs.get('cleanup', False) self.coverage = kwargs.get('coverage', False) self.inline_logs = kwargs.get('inline_logs', False) self.generator = kwargs.get('generator', None) self.generator_cmd = kwargs.get('generator_cmd', None) self.verbose = kwargs.get('verbose', None) self.warnings_as_errors = kwargs.get('warnings_as_errors', True) self.overflow_as_errors = kwargs.get('overflow_as_errors', False) @staticmethod def log_info(filename, inline_logs): filename = os.path.abspath(os.path.realpath(filename)) if inline_logs: logger.info("{:-^100}".format(filename)) try: with open(filename) as fp: data = fp.read() except Exception as e: data = "Unable to read log data (%s)\n" % (str(e)) logger.error(data) logger.info("{:-^100}".format(filename)) else: logger.error("see: " + Fore.YELLOW + filename + Fore.RESET) def log_info_file(self, inline_logs): build_dir = self.instance.build_dir h_log = "{}/handler.log".format(build_dir) b_log = "{}/build.log".format(build_dir) v_log = "{}/valgrind.log".format(build_dir) d_log = "{}/device.log".format(build_dir) if os.path.exists(v_log) and "Valgrind" in self.instance.reason: self.log_info("{}".format(v_log), inline_logs) elif os.path.exists(h_log) and os.path.getsize(h_log) > 0: self.log_info("{}".format(h_log), inline_logs) elif os.path.exists(d_log) and os.path.getsize(d_log) > 0: self.log_info("{}".format(d_log), inline_logs) else: self.log_info("{}".format(b_log), inline_logs) def setup_handler(self): instance = self.instance args = [] # FIXME: Needs simplification if instance.platform.simulation == "qemu": instance.handler = QEMUHandler(instance, "qemu") args.append("QEMU_PIPE=%s" % instance.handler.get_fifo()) instance.handler.call_make_run = True elif instance.testcase.type == "unit": instance.handler = BinaryHandler(instance, "unit") instance.handler.binary = os.path.join(instance.build_dir, "testbinary") if self.coverage: args.append("COVERAGE=1") elif instance.platform.type == "native": handler = BinaryHandler(instance, "native") handler.asan = self.asan handler.valgrind = self.valgrind handler.lsan = self.lsan handler.ubsan = self.ubsan handler.coverage = self.coverage handler.binary = os.path.join(instance.build_dir, "zephyr", "zephyr.exe") instance.handler = handler elif instance.platform.simulation == "renode": if find_executable("renode"): instance.handler = BinaryHandler(instance, "renode") instance.handler.pid_fn = os.path.join(instance.build_dir, "renode.pid") instance.handler.call_make_run = True elif instance.platform.simulation == "tsim": instance.handler = BinaryHandler(instance, "tsim") instance.handler.call_make_run = True elif self.device_testing: instance.handler = DeviceHandler(instance, "device") instance.handler.coverage = self.coverage elif instance.platform.simulation == "nsim": if find_executable("nsimdrv"): instance.handler = BinaryHandler(instance, "nsim") instance.handler.call_make_run = True elif instance.platform.simulation == "mdb-nsim": if find_executable("mdb"): instance.handler = BinaryHandler(instance, "nsim") instance.handler.pid_fn = os.path.join(instance.build_dir, "mdb.pid") instance.handler.call_west_flash = True elif instance.platform.simulation == "armfvp": instance.handler = BinaryHandler(instance, "armfvp") instance.handler.call_make_run = True if instance.handler: instance.handler.args = args instance.handler.generator_cmd = self.generator_cmd instance.handler.generator = self.generator def process(self, pipeline, done, message, lock, results): op = message.get('op') if not self.instance.handler: self.setup_handler() # The build process, call cmake and build with configured generator if op == "cmake": res = self.cmake() if self.instance.status in ["failed", "error"]: pipeline.put({"op": "report", "test": self.instance}) elif self.cmake_only: if self.instance.status is None: self.instance.status = "passed" pipeline.put({"op": "report", "test": self.instance}) else: if self.instance.name in res['filter'] and res['filter'][self.instance.name]: logger.debug("filtering %s" % self.instance.name) self.instance.status = "skipped" self.instance.reason = "filter" results.skipped_runtime += 1 for case in self.instance.testcase.cases: self.instance.results.update({case: 'SKIP'}) pipeline.put({"op": "report", "test": self.instance}) else: pipeline.put({"op": "build", "test": self.instance}) elif op == "build": logger.debug("build test: %s" % self.instance.name) res = self.build() if not res: self.instance.status = "error" self.instance.reason = "Build Failure" pipeline.put({"op": "report", "test": self.instance}) else: # Count skipped cases during build, for example # due to ram/rom overflow. inst = res.get("instance", None) if inst and inst.status == "skipped": results.skipped_runtime += 1 if res.get('returncode', 1) > 0: pipeline.put({"op": "report", "test": self.instance}) else: if self.instance.run and self.instance.handler: pipeline.put({"op": "run", "test": self.instance}) else: pipeline.put({"op": "report", "test": self.instance}) # Run the generated binary using one of the supported handlers elif op == "run": logger.debug("run test: %s" % self.instance.name) self.run() self.instance.status, _ = self.instance.handler.get_state() logger.debug(f"run status: {self.instance.name} {self.instance.status}") # to make it work with pickle self.instance.handler.thread = None self.instance.handler.suite = None pipeline.put({ "op": "report", "test": self.instance, "status": self.instance.status, "reason": self.instance.reason } ) # Report results and output progress to screen elif op == "report": with lock: done.put(self.instance) self.report_out(results) if self.cleanup and not self.coverage and self.instance.status == "passed": pipeline.put({ "op": "cleanup", "test": self.instance }) elif op == "cleanup": if self.device_testing: self.cleanup_device_testing_artifacts() else: self.cleanup_artifacts() def cleanup_artifacts(self, additional_keep=[]): logger.debug("Cleaning up {}".format(self.instance.build_dir)) allow = [ 'zephyr/.config', 'handler.log', 'build.log', 'device.log', 'recording.csv', ] allow += additional_keep allow = [os.path.join(self.instance.build_dir, file) for file in allow] for dirpath, dirnames, filenames in os.walk(self.instance.build_dir, topdown=False): for name in filenames: path = os.path.join(dirpath, name) if path not in allow: os.remove(path) # Remove empty directories and symbolic links to directories for dir in dirnames: path = os.path.join(dirpath, dir) if os.path.islink(path): os.remove(path) elif not os.listdir(path): os.rmdir(path) def cleanup_device_testing_artifacts(self): logger.debug("Cleaning up for Device Testing {}".format(self.instance.build_dir)) sanitizelist = [ 'CMakeCache.txt', 'zephyr/runners.yaml', ] keep = [ 'zephyr/zephyr.hex', 'zephyr/zephyr.bin', 'zephyr/zephyr.elf', ] keep += sanitizelist self.cleanup_artifacts(keep) # sanitize paths so files are relocatable for file in sanitizelist: file = os.path.join(self.instance.build_dir, file) with open(file, "rt") as fin: data = fin.read() data = data.replace(canonical_zephyr_base+"/", "") with open(file, "wt") as fin: fin.write(data) def report_out(self, results): total_to_do = results.total - results.skipped_configs total_tests_width = len(str(total_to_do)) results.done += 1 instance = self.instance if instance.status in ["error", "failed", "timeout", "flash_error"]: if instance.status == "error": results.error += 1 results.failed += 1 if self.verbose: status = Fore.RED + "FAILED " + Fore.RESET + instance.reason else: print("") logger.error( "{:<25} {:<50} {}FAILED{}: {}".format( instance.platform.name, instance.testcase.name, Fore.RED, Fore.RESET, instance.reason)) if not self.verbose: self.log_info_file(self.inline_logs) elif instance.status == "skipped": status = Fore.YELLOW + "SKIPPED" + Fore.RESET elif instance.status == "passed": status = Fore.GREEN + "PASSED" + Fore.RESET else: logger.debug(f"Unknown status = {instance.status}") status = Fore.YELLOW + "UNKNOWN" + Fore.RESET if self.verbose: if self.cmake_only: more_info = "cmake" elif instance.status == "skipped": more_info = instance.reason else: if instance.handler and instance.run: more_info = instance.handler.type_str htime = instance.handler.duration if htime: more_info += " {:.3f}s".format(htime) else: more_info = "build" logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format( results.done, total_tests_width, total_to_do, instance.platform.name, instance.testcase.name, status, more_info)) if instance.status in ["error", "failed", "timeout"]: self.log_info_file(self.inline_logs) else: completed_perc = 0 if total_to_do > 0: completed_perc = int((float(results.done) / total_to_do) * 100) skipped = results.skipped_configs + results.skipped_runtime sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % ( Fore.GREEN, results.done, total_to_do, Fore.RESET, completed_perc, Fore.YELLOW if skipped > 0 else Fore.RESET, skipped, Fore.RESET, Fore.RED if results.failed > 0 else Fore.RESET, results.failed, Fore.RESET ) ) sys.stdout.flush() def cmake(self): instance = self.instance args = self.testcase.extra_args[:] args += self.extra_args if instance.handler: args += instance.handler.args # merge overlay files into one variable def extract_overlays(args): re_overlay = re.compile('OVERLAY_CONFIG=(.*)') other_args = [] overlays = [] for arg in args: match = re_overlay.search(arg) if match: overlays.append(match.group(1).strip('\'"')) else: other_args.append(arg) args[:] = other_args return overlays overlays = extract_overlays(args) if os.path.exists(os.path.join(instance.build_dir, "twister", "testcase_extra.conf")): overlays.append(os.path.join(instance.build_dir, "twister", "testcase_extra.conf")) if overlays: args.append("OVERLAY_CONFIG=\"%s\"" % (" ".join(overlays))) res = self.run_cmake(args) return res def build(self): res = self.run_build(['--build', self.build_dir]) return res def run(self): instance = self.instance if instance.handler: if instance.handler.type_str == "device": instance.handler.suite = self.suite instance.handler.handle() sys.stdout.flush() class TestSuite(DisablePyTestCollectionMixin): config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$') dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$') tc_schema = scl.yaml_load( os.path.join(ZEPHYR_BASE, "scripts", "schemas", "twister", "testcase-schema.yaml")) quarantine_schema = scl.yaml_load( os.path.join(ZEPHYR_BASE, "scripts", "schemas", "twister", "quarantine-schema.yaml")) testcase_valid_keys = {"tags": {"type": "set", "required": False}, "type": {"type": "str", "default": "integration"}, "extra_args": {"type": "list"}, "extra_configs": {"type": "list"}, "build_only": {"type": "bool", "default": False}, "build_on_all": {"type": "bool", "default": False}, "skip": {"type": "bool", "default": False}, "slow": {"type": "bool", "default": False}, "timeout": {"type": "int", "default": 60}, "min_ram": {"type": "int", "default": 8}, "depends_on": {"type": "set"}, "min_flash": {"type": "int", "default": 32}, "arch_allow": {"type": "set"}, "arch_exclude": {"type": "set"}, "extra_sections": {"type": "list", "default": []}, "integration_platforms": {"type": "list", "default": []}, "platform_exclude": {"type": "set"}, "platform_allow": {"type": "set"}, "toolchain_exclude": {"type": "set"}, "toolchain_allow": {"type": "set"}, "filter": {"type": "str"}, "harness": {"type": "str"}, "harness_config": {"type": "map", "default": {}} } RELEASE_DATA = os.path.join(ZEPHYR_BASE, "scripts", "release", "twister_last_release.csv") SAMPLE_FILENAME = 'sample.yaml' TESTCASE_FILENAME = 'testcase.yaml' def __init__(self, board_root_list=[], testcase_roots=[], outdir=None): self.roots = testcase_roots if not isinstance(board_root_list, list): self.board_roots = [board_root_list] else: self.board_roots = board_root_list # Testsuite Options self.coverage_platform = [] self.build_only = False self.cmake_only = False self.cleanup = False self.enable_slow = False self.device_testing = False self.fixtures = [] self.enable_coverage = False self.enable_ubsan = False self.enable_lsan = False self.enable_asan = False self.enable_valgrind = False self.extra_args = [] self.inline_logs = False self.enable_sizes_report = False self.west_flash = None self.west_runner = None self.generator = None self.generator_cmd = None self.warnings_as_errors = True self.overflow_as_errors = False self.quarantine_verify = False # Keep track of which test cases we've filtered out and why self.testcases = {} self.quarantine = {} self.platforms = [] self.selected_platforms = [] self.filtered_platforms = [] self.default_platforms = [] self.outdir = os.path.abspath(outdir) self.discards = {} self.load_errors = 0 self.instances = dict() self.total_platforms = 0 self.start_time = 0 self.duration = 0 self.warnings = 0 # hardcoded for now self.duts = [] # run integration tests only self.integration = False self.pipeline = None self.version = "NA" def check_zephyr_version(self): try: subproc = subprocess.run(["git", "describe", "--abbrev=12"], stdout=subprocess.PIPE, universal_newlines=True, cwd=ZEPHYR_BASE) if subproc.returncode == 0: self.version = subproc.stdout.strip() logger.info(f"Zephyr version: {self.version}") except OSError: logger.info("Cannot read zephyr version.") def get_platform_instances(self, platform): filtered_dict = {k:v for k,v in self.instances.items() if k.startswith(platform + "/")} return filtered_dict def config(self): logger.info("coverage platform: {}".format(self.coverage_platform)) # Debug Functions @staticmethod def info(what): sys.stdout.write(what + "\n") sys.stdout.flush() def update_counting(self, results=None, initial=False): results.skipped_configs = 0 results.skipped_cases = 0 for instance in self.instances.values(): if initial: results.cases += len(instance.testcase.cases) if instance.status == 'skipped': results.skipped_configs += 1 results.skipped_cases += len(instance.testcase.cases) elif instance.status == "passed": results.passed += 1 for res in instance.results.values(): if res == 'SKIP': results.skipped_cases += 1 def compare_metrics(self, filename): # name, datatype, lower results better interesting_metrics = [("ram_size", int, True), ("rom_size", int, True)] if not os.path.exists(filename): logger.error("Cannot compare metrics, %s not found" % filename) return [] results = [] saved_metrics = {} with open(filename) as fp: cr = csv.DictReader(fp) for row in cr: d = {} for m, _, _ in interesting_metrics: d[m] = row[m] saved_metrics[(row["test"], row["platform"])] = d for instance in self.instances.values(): mkey = (instance.testcase.name, instance.platform.name) if mkey not in saved_metrics: continue sm = saved_metrics[mkey] for metric, mtype, lower_better in interesting_metrics: if metric not in instance.metrics: continue if sm[metric] == "": continue delta = instance.metrics.get(metric, 0) - mtype(sm[metric]) if delta == 0: continue results.append((instance, metric, instance.metrics.get(metric, 0), delta, lower_better)) return results def footprint_reports(self, report, show_footprint, all_deltas, footprint_threshold, last_metrics): if not report: return logger.debug("running footprint_reports") deltas = self.compare_metrics(report) warnings = 0 if deltas and show_footprint: for i, metric, value, delta, lower_better in deltas: if not all_deltas and ((delta < 0 and lower_better) or (delta > 0 and not lower_better)): continue percentage = 0 if value > delta: percentage = (float(delta) / float(value - delta)) if not all_deltas and (percentage < (footprint_threshold / 100.0)): continue logger.info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format( i.platform.name, i.testcase.name, Fore.YELLOW, "INFO" if all_deltas else "WARNING", Fore.RESET, metric, delta, value, percentage)) warnings += 1 if warnings: logger.warning("Deltas based on metrics from last %s" % ("release" if not last_metrics else "run")) def summary(self, results, unrecognized_sections): failed = 0 run = 0 for instance in self.instances.values(): if instance.status == "failed": failed += 1 elif instance.metrics.get("unrecognized") and not unrecognized_sections: logger.error("%sFAILED%s: %s has unrecognized binary sections: %s" % (Fore.RED, Fore.RESET, instance.name, str(instance.metrics.get("unrecognized", [])))) failed += 1 if instance.metrics.get('handler_time', None): run += 1 if results.total and results.total != results.skipped_configs: pass_rate = (float(results.passed) / float(results.total - results.skipped_configs)) else: pass_rate = 0 logger.info( "{}{} of {}{} test configurations passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format( Fore.RED if failed else Fore.GREEN, results.passed, results.total - results.skipped_configs, Fore.RESET, pass_rate, Fore.RED if results.failed else Fore.RESET, results.failed, Fore.RESET, results.skipped_configs, Fore.YELLOW if self.warnings else Fore.RESET, self.warnings, Fore.RESET, self.duration)) self.total_platforms = len(self.platforms) # if we are only building, do not report about tests being executed. if self.platforms and not self.build_only: logger.info("In total {} test cases were executed, {} skipped on {} out of total {} platforms ({:02.2f}%)".format( results.cases - results.skipped_cases, results.skipped_cases, len(self.filtered_platforms), self.total_platforms, (100 * len(self.filtered_platforms) / len(self.platforms)) )) logger.info(f"{Fore.GREEN}{run}{Fore.RESET} test configurations executed on platforms, \ {Fore.RED}{results.total - run - results.skipped_configs}{Fore.RESET} test configurations were only built.") def save_reports(self, name, suffix, report_dir, no_update, release, only_failed, platform_reports, json_report): if not self.instances: return logger.info("Saving reports...") if name: report_name = name else: report_name = "twister" if report_dir: os.makedirs(report_dir, exist_ok=True) filename = os.path.join(report_dir, report_name) outdir = report_dir else: filename = os.path.join(self.outdir, report_name) outdir = self.outdir if suffix: filename = "{}_{}".format(filename, suffix) if not no_update: self.xunit_report(filename + ".xml", full_report=False, append=only_failed, version=self.version) self.xunit_report(filename + "_report.xml", full_report=True, append=only_failed, version=self.version) self.csv_report(filename + ".csv") if json_report: self.json_report(filename + ".json", append=only_failed, version=self.version) if platform_reports: self.target_report(outdir, suffix, append=only_failed) if self.discards: self.discard_report(filename + "_discard.csv") if release: self.csv_report(self.RELEASE_DATA) def add_configurations(self): for board_root in self.board_roots: board_root = os.path.abspath(board_root) logger.debug("Reading platform configuration files under %s..." % board_root) for file in glob.glob(os.path.join(board_root, "*", "*", "*.yaml")): try: platform = Platform() platform.load(file) if platform.name in [p.name for p in self.platforms]: logger.error(f"Duplicate platform {platform.name} in {file}") raise Exception(f"Duplicate platform identifier {platform.name} found") if platform.twister: self.platforms.append(platform) if platform.default: self.default_platforms.append(platform.name) except RuntimeError as e: logger.error("E: %s: can't load: %s" % (file, e)) self.load_errors += 1 def get_all_tests(self): tests = [] for _, tc in self.testcases.items(): for case in tc.cases: tests.append(case) return tests @staticmethod def get_toolchain(): toolchain_script = Path(ZEPHYR_BASE) / Path('cmake/verify-toolchain.cmake') result = CMake.run_cmake_script([toolchain_script, "FORMAT=json"]) try: if result['returncode']: raise TwisterRuntimeError("E: Variable ZEPHYR_TOOLCHAIN_VARIANT is not defined") except Exception as e: print(str(e)) sys.exit(2) toolchain = json.loads(result['stdout'])['ZEPHYR_TOOLCHAIN_VARIANT'] logger.info(f"Using '{toolchain}' toolchain.") return toolchain def add_testcases(self, testcase_filter=[]): for root in self.roots: root = os.path.abspath(root) logger.debug("Reading test case configuration files under %s..." % root) for dirpath, _, filenames in os.walk(root, topdown=True): if self.SAMPLE_FILENAME in filenames: filename = self.SAMPLE_FILENAME elif self.TESTCASE_FILENAME in filenames: filename = self.TESTCASE_FILENAME else: continue logger.debug("Found possible test case in " + dirpath) tc_path = os.path.join(dirpath, filename) try: parsed_data = TwisterConfigParser(tc_path, self.tc_schema) parsed_data.load() tc_path = os.path.dirname(tc_path) workdir = os.path.relpath(tc_path, root) for name in parsed_data.tests.keys(): tc = TestCase(root, workdir, name) tc_dict = parsed_data.get_test(name, self.testcase_valid_keys) tc.source_dir = tc_path tc.yamlfile = tc_path tc.type = tc_dict["type"] tc.tags = tc_dict["tags"] tc.extra_args = tc_dict["extra_args"] tc.extra_configs = tc_dict["extra_configs"] tc.arch_allow = tc_dict["arch_allow"] tc.arch_exclude = tc_dict["arch_exclude"] tc.skip = tc_dict["skip"] tc.platform_exclude = tc_dict["platform_exclude"] tc.platform_allow = tc_dict["platform_allow"] tc.toolchain_exclude = tc_dict["toolchain_exclude"] tc.toolchain_allow = tc_dict["toolchain_allow"] tc.tc_filter = tc_dict["filter"] tc.timeout = tc_dict["timeout"] tc.harness = tc_dict["harness"] tc.harness_config = tc_dict["harness_config"] if tc.harness == 'console' and not tc.harness_config: raise Exception('Harness config error: console harness defined without a configuration.') tc.build_only = tc_dict["build_only"] tc.build_on_all = tc_dict["build_on_all"] tc.slow = tc_dict["slow"] tc.min_ram = tc_dict["min_ram"] tc.depends_on = tc_dict["depends_on"] tc.min_flash = tc_dict["min_flash"] tc.extra_sections = tc_dict["extra_sections"] tc.integration_platforms = tc_dict["integration_platforms"] tc.parse_subcases(tc_path) if testcase_filter: if tc.name and tc.name in testcase_filter: self.testcases[tc.name] = tc else: self.testcases[tc.name] = tc except Exception as e: logger.error("%s: can't load (skipping): %s" % (tc_path, e)) self.load_errors += 1 return len(self.testcases) def get_platform(self, name): selected_platform = None for platform in self.platforms: if platform.name == name: selected_platform = platform break return selected_platform def load_quarantine(self, file): """ Loads quarantine list from the given yaml file. Creates a dictionary of all tests configurations (platform + scenario: comment) that shall be skipped due to quarantine """ # Load yaml into quarantine_yaml quarantine_yaml = scl.yaml_load_verify(file, self.quarantine_schema) # Create quarantine_list with a product of the listed # platforms and scenarios for each entry in quarantine yaml quarantine_list = [] for quar_dict in quarantine_yaml: if quar_dict['platforms'][0] == "all": plat = [p.name for p in self.platforms] else: plat = quar_dict['platforms'] comment = quar_dict.get('comment', "NA") quarantine_list.append([{".".join([p, s]): comment} for p in plat for s in quar_dict['scenarios']]) # Flatten the quarantine_list quarantine_list = [it for sublist in quarantine_list for it in sublist] # Change quarantine_list into a dictionary for d in quarantine_list: self.quarantine.update(d) def load_from_file(self, file, filter_status=[], filter_platform=[]): try: with open(file, "r") as fp: cr = csv.DictReader(fp) instance_list = [] for row in cr: if row["status"] in filter_status: continue test = row["test"] platform = self.get_platform(row["platform"]) if filter_platform and platform.name not in filter_platform: continue instance = TestInstance(self.testcases[test], platform, self.outdir) if self.device_testing: tfilter = 'runnable' else: tfilter = 'buildable' instance.run = instance.check_runnable( self.enable_slow, tfilter, self.fixtures ) instance.create_overlay(platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform) instance_list.append(instance) self.add_instances(instance_list) except KeyError as e: logger.error("Key error while parsing tests file.({})".format(str(e))) sys.exit(2) except FileNotFoundError as e: logger.error("Couldn't find input file with list of tests. ({})".format(e)) sys.exit(2) def apply_filters(self, **kwargs): toolchain = self.get_toolchain() discards = {} platform_filter = kwargs.get('platform') exclude_platform = kwargs.get('exclude_platform', []) testcase_filter = kwargs.get('run_individual_tests', []) arch_filter = kwargs.get('arch') tag_filter = kwargs.get('tag') exclude_tag = kwargs.get('exclude_tag') all_filter = kwargs.get('all') runnable = kwargs.get('runnable') force_toolchain = kwargs.get('force_toolchain') force_platform = kwargs.get('force_platform') emu_filter = kwargs.get('emulation_only') logger.debug("platform filter: " + str(platform_filter)) logger.debug(" arch_filter: " + str(arch_filter)) logger.debug(" tag_filter: " + str(tag_filter)) logger.debug(" exclude_tag: " + str(exclude_tag)) default_platforms = False emulation_platforms = False if all_filter: logger.info("Selecting all possible platforms per test case") # When --all used, any --platform arguments ignored platform_filter = [] elif not platform_filter and not emu_filter: logger.info("Selecting default platforms per test case") default_platforms = True elif emu_filter: logger.info("Selecting emulation platforms per test case") emulation_platforms = True if platform_filter: platforms = list(filter(lambda p: p.name in platform_filter, self.platforms)) elif emu_filter: platforms = list(filter(lambda p: p.simulation != 'na', self.platforms)) elif arch_filter: platforms = list(filter(lambda p: p.arch in arch_filter, self.platforms)) elif default_platforms: platforms = list(filter(lambda p: p.default, self.platforms)) else: platforms = self.platforms logger.info("Building initial testcase list...") for tc_name, tc in self.testcases.items(): if tc.build_on_all and not platform_filter: platform_scope = self.platforms elif tc.integration_platforms and self.integration: platform_scope = list(filter(lambda item: item.name in tc.integration_platforms, \ self.platforms)) else: platform_scope = platforms integration = self.integration and tc.integration_platforms # If there isn't any overlap between the platform_allow list and the platform_scope # we set the scope to the platform_allow list if tc.platform_allow and not platform_filter and not integration: a = set(platform_scope) b = set(filter(lambda item: item.name in tc.platform_allow, self.platforms)) c = a.intersection(b) if not c: platform_scope = list(filter(lambda item: item.name in tc.platform_allow, \ self.platforms)) # list of instances per testcase, aka configurations. instance_list = [] for plat in platform_scope: instance = TestInstance(tc, plat, self.outdir) if runnable: tfilter = 'runnable' else: tfilter = 'buildable' instance.run = instance.check_runnable( self.enable_slow, tfilter, self.fixtures ) for t in tc.cases: instance.results[t] = None if runnable and self.duts: for h in self.duts: if h.platform == plat.name: if tc.harness_config.get('fixture') in h.fixtures: instance.run = True if not force_platform and plat.name in exclude_platform: discards[instance] = discards.get(instance, "Platform is excluded on command line.") if (plat.arch == "unit") != (tc.type == "unit"): # Discard silently continue if runnable and not instance.run: discards[instance] = discards.get(instance, "Not runnable on device") if self.integration and tc.integration_platforms and plat.name not in tc.integration_platforms: discards[instance] = discards.get(instance, "Not part of integration platforms") if tc.skip: discards[instance] = discards.get(instance, "Skip filter") if tag_filter and not tc.tags.intersection(tag_filter): discards[instance] = discards.get(instance, "Command line testcase tag filter") if exclude_tag and tc.tags.intersection(exclude_tag): discards[instance] = discards.get(instance, "Command line testcase exclude filter") if testcase_filter and tc_name not in testcase_filter: discards[instance] = discards.get(instance, "Testcase name filter") if arch_filter and plat.arch not in arch_filter: discards[instance] = discards.get(instance, "Command line testcase arch filter") if not force_platform: if tc.arch_allow and plat.arch not in tc.arch_allow: discards[instance] = discards.get(instance, "Not in test case arch allow list") if tc.arch_exclude and plat.arch in tc.arch_exclude: discards[instance] = discards.get(instance, "In test case arch exclude") if tc.platform_exclude and plat.name in tc.platform_exclude: discards[instance] = discards.get(instance, "In test case platform exclude") if tc.toolchain_exclude and toolchain in tc.toolchain_exclude: discards[instance] = discards.get(instance, "In test case toolchain exclude") if platform_filter and plat.name not in platform_filter: discards[instance] = discards.get(instance, "Command line platform filter") if tc.platform_allow and plat.name not in tc.platform_allow: discards[instance] = discards.get(instance, "Not in testcase platform allow list") if tc.toolchain_allow and toolchain not in tc.toolchain_allow: discards[instance] = discards.get(instance, "Not in testcase toolchain allow list") if not plat.env_satisfied: discards[instance] = discards.get(instance, "Environment ({}) not satisfied".format(", ".join(plat.env))) if not force_toolchain \ and toolchain and (toolchain not in plat.supported_toolchains) \ and tc.type != 'unit': discards[instance] = discards.get(instance, "Not supported by the toolchain") if plat.ram < tc.min_ram: discards[instance] = discards.get(instance, "Not enough RAM") if tc.depends_on: dep_intersection = tc.depends_on.intersection(set(plat.supported)) if dep_intersection != set(tc.depends_on): discards[instance] = discards.get(instance, "No hardware support") if plat.flash < tc.min_flash: discards[instance] = discards.get(instance, "Not enough FLASH") if set(plat.ignore_tags) & tc.tags: discards[instance] = discards.get(instance, "Excluded tags per platform (exclude_tags)") if plat.only_tags and not set(plat.only_tags) & tc.tags: discards[instance] = discards.get(instance, "Excluded tags per platform (only_tags)") test_configuration = ".".join([instance.platform.name, instance.testcase.id]) # skip quarantined tests if test_configuration in self.quarantine and not self.quarantine_verify: discards[instance] = discards.get(instance, f"Quarantine: {self.quarantine[test_configuration]}") # run only quarantined test to verify their statuses (skip everything else) if self.quarantine_verify and test_configuration not in self.quarantine: discards[instance] = discards.get(instance, "Not under quarantine") # if nothing stopped us until now, it means this configuration # needs to be added. instance_list.append(instance) # no configurations, so jump to next testcase if not instance_list: continue # if twister was launched with no platform options at all, we # take all default platforms if default_platforms and not tc.build_on_all and not integration: if tc.platform_allow: a = set(self.default_platforms) b = set(tc.platform_allow) c = a.intersection(b) if c: aa = list(filter(lambda tc: tc.platform.name in c, instance_list)) self.add_instances(aa) else: self.add_instances(instance_list) else: instances = list(filter(lambda tc: tc.platform.default, instance_list)) self.add_instances(instances) elif integration: instances = list(filter(lambda item: item.platform.name in tc.integration_platforms, instance_list)) self.add_instances(instances) elif emulation_platforms: self.add_instances(instance_list) for instance in list(filter(lambda inst: not inst.platform.simulation != 'na', instance_list)): discards[instance] = discards.get(instance, "Not an emulated platform") else: self.add_instances(instance_list) for _, case in self.instances.items(): case.create_overlay(case.platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform) self.discards = discards self.selected_platforms = set(p.platform.name for p in self.instances.values()) for instance in self.discards: instance.reason = self.discards[instance] # If integration mode is on all skips on integration_platforms are treated as errors. if self.integration and instance.platform.name in instance.testcase.integration_platforms \ and "Quarantine" not in instance.reason: instance.status = "error" instance.reason += " but is one of the integration platforms" instance.fill_results_by_status() self.instances[instance.name] = instance else: instance.status = "skipped" instance.fill_results_by_status() self.filtered_platforms = set(p.platform.name for p in self.instances.values() if p.status != "skipped" ) return discards def add_instances(self, instance_list): for instance in instance_list: self.instances[instance.name] = instance @staticmethod def calc_one_elf_size(instance): if instance.status not in ["error", "failed", "skipped"]: if instance.platform.type != "native": size_calc = instance.calculate_sizes() instance.metrics["ram_size"] = size_calc.get_ram_size() instance.metrics["rom_size"] = size_calc.get_rom_size() instance.metrics["unrecognized"] = size_calc.unrecognized_sections() else: instance.metrics["ram_size"] = 0 instance.metrics["rom_size"] = 0 instance.metrics["unrecognized"] = [] instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0 def add_tasks_to_queue(self, pipeline, build_only=False, test_only=False): for instance in self.instances.values(): if build_only: instance.run = False if instance.status not in ['passed', 'skipped', 'error']: logger.debug(f"adding {instance.name}") instance.status = None if test_only and instance.run: pipeline.put({"op": "run", "test": instance}) else: pipeline.put({"op": "cmake", "test": instance}) # If the instance got 'error' status before, proceed to the report stage if instance.status == "error": pipeline.put({"op": "report", "test": instance}) def pipeline_mgr(self, pipeline, done_queue, lock, results): while True: try: task = pipeline.get_nowait() except queue.Empty: break else: test = task['test'] pb = ProjectBuilder(self, test, lsan=self.enable_lsan, asan=self.enable_asan, ubsan=self.enable_ubsan, coverage=self.enable_coverage, extra_args=self.extra_args, device_testing=self.device_testing, cmake_only=self.cmake_only, cleanup=self.cleanup, valgrind=self.enable_valgrind, inline_logs=self.inline_logs, generator=self.generator, generator_cmd=self.generator_cmd, verbose=self.verbose, warnings_as_errors=self.warnings_as_errors, overflow_as_errors=self.overflow_as_errors ) pb.process(pipeline, done_queue, task, lock, results) return True def execute(self, pipeline, done, results): lock = Lock() logger.info("Adding tasks to the queue...") self.add_tasks_to_queue(pipeline, self.build_only, self.test_only) logger.info("Added initial list of jobs to queue") processes = [] for job in range(self.jobs): logger.debug(f"Launch process {job}") p = Process(target=self.pipeline_mgr, args=(pipeline, done, lock, results, )) processes.append(p) p.start() try: for p in processes: p.join() except KeyboardInterrupt: logger.info("Execution interrupted") for p in processes: p.terminate() # FIXME: This needs to move out. if self.enable_size_report and not self.cmake_only: # Parallelize size calculation executor = concurrent.futures.ThreadPoolExecutor(self.jobs) futures = [executor.submit(self.calc_one_elf_size, instance) for instance in self.instances.values()] concurrent.futures.wait(futures) else: for instance in self.instances.values(): instance.metrics["ram_size"] = 0 instance.metrics["rom_size"] = 0 instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0 instance.metrics["unrecognized"] = [] return results def discard_report(self, filename): try: if not self.discards: raise TwisterRuntimeError("apply_filters() hasn't been run!") except Exception as e: logger.error(str(e)) sys.exit(2) with open(filename, "wt") as csvfile: fieldnames = ["test", "arch", "platform", "reason"] cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep) cw.writeheader() for instance, reason in sorted(self.discards.items()): rowdict = {"test": instance.testcase.name, "arch": instance.platform.arch, "platform": instance.platform.name, "reason": reason} cw.writerow(rowdict) def target_report(self, outdir, suffix, append=False): platforms = {inst.platform.name for _, inst in self.instances.items()} for platform in platforms: if suffix: filename = os.path.join(outdir,"{}_{}.xml".format(platform, suffix)) else: filename = os.path.join(outdir,"{}.xml".format(platform)) self.xunit_report(filename, platform, full_report=True, append=append, version=self.version) @staticmethod def process_log(log_file): filtered_string = "" if os.path.exists(log_file): with open(log_file, "rb") as f: log = f.read().decode("utf-8") filtered_string = ''.join(filter(lambda x: x in string.printable, log)) return filtered_string def xunit_report(self, filename, platform=None, full_report=False, append=False, version="NA"): total = 0 fails = passes = errors = skips = 0 if platform: selected = [platform] logger.info(f"Writing target report for {platform}...") else: logger.info(f"Writing xunit report {filename}...") selected = self.selected_platforms if os.path.exists(filename) and append: tree = ET.parse(filename) eleTestsuites = tree.getroot() else: eleTestsuites = ET.Element('testsuites') for p in selected: inst = self.get_platform_instances(p) fails = 0 passes = 0 errors = 0 skips = 0 duration = 0 for _, instance in inst.items(): handler_time = instance.metrics.get('handler_time', 0) duration += handler_time if full_report and instance.run: for k in instance.results.keys(): if instance.results[k] == 'PASS': passes += 1 elif instance.results[k] == 'BLOCK': errors += 1 elif instance.results[k] == 'SKIP' or instance.status in ['skipped']: skips += 1 else: fails += 1 else: if instance.status in ["error", "failed", "timeout", "flash_error"]: if instance.reason in ['build_error', 'handler_crash']: errors += 1 else: fails += 1 elif instance.status == 'skipped': skips += 1 elif instance.status == 'passed': passes += 1 else: if instance.status: logger.error(f"{instance.name}: Unknown status {instance.status}") else: logger.error(f"{instance.name}: No status") total = (errors + passes + fails + skips) # do not produce a report if no tests were actually run (only built) if total == 0: continue run = p eleTestsuite = None # When we re-run the tests, we re-use the results and update only with # the newly run tests. if os.path.exists(filename) and append: ts = eleTestsuites.findall(f'testsuite/[@name="{p}"]') if ts: eleTestsuite = ts[0] eleTestsuite.attrib['failures'] = "%d" % fails eleTestsuite.attrib['errors'] = "%d" % errors eleTestsuite.attrib['skipped'] = "%d" % skips else: logger.info(f"Did not find any existing results for {p}") eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite', name=run, time="%f" % duration, tests="%d" % (total), failures="%d" % fails, errors="%d" % (errors), skipped="%s" % (skips)) eleTSPropetries = ET.SubElement(eleTestsuite, 'properties') # Multiple 'property' can be added to 'properties' # differing by name and value ET.SubElement(eleTSPropetries, 'property', name="version", value=version) else: eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite', name=run, time="%f" % duration, tests="%d" % (total), failures="%d" % fails, errors="%d" % (errors), skipped="%s" % (skips)) eleTSPropetries = ET.SubElement(eleTestsuite, 'properties') # Multiple 'property' can be added to 'properties' # differing by name and value ET.SubElement(eleTSPropetries, 'property', name="version", value=version) for _, instance in inst.items(): if full_report: tname = os.path.basename(instance.testcase.name) else: tname = instance.testcase.id handler_time = instance.metrics.get('handler_time', 0) if full_report: for k in instance.results.keys(): # remove testcases that are being re-run from exiting reports for tc in eleTestsuite.findall(f'testcase/[@name="{k}"]'): eleTestsuite.remove(tc) classname = ".".join(tname.split(".")[:2]) eleTestcase = ET.SubElement( eleTestsuite, 'testcase', classname=classname, name="%s" % (k), time="%f" % handler_time) if instance.results[k] in ['FAIL', 'BLOCK'] or \ (not instance.run and instance.status in ["error", "failed", "timeout"]): if instance.results[k] == 'FAIL': el = ET.SubElement( eleTestcase, 'failure', type="failure", message="failed") else: el = ET.SubElement( eleTestcase, 'error', type="failure", message=instance.reason) log_root = os.path.join(self.outdir, instance.platform.name, instance.testcase.name) log_file = os.path.join(log_root, "handler.log") el.text = self.process_log(log_file) elif instance.results[k] == 'PASS' \ or (not instance.run and instance.status in ["passed"]): pass elif instance.results[k] == 'SKIP' or (instance.status in ["skipped"]): el = ET.SubElement(eleTestcase, 'skipped', type="skipped", message=instance.reason) else: el = ET.SubElement( eleTestcase, 'error', type="error", message=f"{instance.reason}") else: if platform: classname = ".".join(instance.testcase.name.split(".")[:2]) else: classname = p + ":" + ".".join(instance.testcase.name.split(".")[:2]) # remove testcases that are being re-run from exiting reports for tc in eleTestsuite.findall(f'testcase/[@classname="{classname}"][@name="{instance.testcase.name}"]'): eleTestsuite.remove(tc) eleTestcase = ET.SubElement(eleTestsuite, 'testcase', classname=classname, name="%s" % (instance.testcase.name), time="%f" % handler_time) if instance.status in ["error", "failed", "timeout", "flash_error"]: failure = ET.SubElement( eleTestcase, 'failure', type="failure", message=instance.reason) log_root = ("%s/%s/%s" % (self.outdir, instance.platform.name, instance.testcase.name)) bl = os.path.join(log_root, "build.log") hl = os.path.join(log_root, "handler.log") log_file = bl if instance.reason != 'Build error': if os.path.exists(hl): log_file = hl else: log_file = bl failure.text = self.process_log(log_file) elif instance.status == "skipped": ET.SubElement(eleTestcase, 'skipped', type="skipped", message="Skipped") result = ET.tostring(eleTestsuites) with open(filename, 'wb') as report: report.write(result) return fails, passes, errors, skips def csv_report(self, filename): with open(filename, "wt") as csvfile: fieldnames = ["test", "arch", "platform", "status", "extra_args", "handler", "handler_time", "ram_size", "rom_size"] cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep) cw.writeheader() for instance in self.instances.values(): rowdict = {"test": instance.testcase.name, "arch": instance.platform.arch, "platform": instance.platform.name, "extra_args": " ".join(instance.testcase.extra_args), "handler": instance.platform.simulation} rowdict["status"] = instance.status if instance.status not in ["error", "failed", "timeout"]: if instance.handler: rowdict["handler_time"] = instance.metrics.get("handler_time", 0) ram_size = instance.metrics.get("ram_size", 0) rom_size = instance.metrics.get("rom_size", 0) rowdict["ram_size"] = ram_size rowdict["rom_size"] = rom_size cw.writerow(rowdict) def json_report(self, filename, append=False, version="NA"): logger.info(f"Writing JSON report {filename}") report = {} selected = self.selected_platforms report["environment"] = {"os": os.name, "zephyr_version": version, "toolchain": self.get_toolchain() } json_data = {} if os.path.exists(filename) and append: with open(filename, 'r') as json_file: json_data = json.load(json_file) suites = json_data.get("testsuites", []) if suites: suite = suites[0] testcases = suite.get("testcases", []) else: suite = {} testcases = [] for p in selected: inst = self.get_platform_instances(p) for _, instance in inst.items(): testcase = {} handler_log = os.path.join(instance.build_dir, "handler.log") build_log = os.path.join(instance.build_dir, "build.log") device_log = os.path.join(instance.build_dir, "device.log") handler_time = instance.metrics.get('handler_time', 0) ram_size = instance.metrics.get ("ram_size", 0) rom_size = instance.metrics.get("rom_size",0) for k in instance.results.keys(): testcases = list(filter(lambda d: not (d.get('testcase') == k and d.get('platform') == p), testcases )) testcase = {"testcase": k, "arch": instance.platform.arch, "platform": p, } if ram_size: testcase["ram_size"] = ram_size if rom_size: testcase["rom_size"] = rom_size if instance.results[k] in ["PASS"]: testcase["status"] = "passed" if instance.handler: testcase["execution_time"] = handler_time elif instance.results[k] in ['FAIL', 'BLOCK'] or instance.status in ["error", "failed", "timeout"]: testcase["status"] = "failed" testcase["reason"] = instance.reason testcase["execution_time"] = handler_time if os.path.exists(handler_log): testcase["test_output"] = self.process_log(handler_log) elif os.path.exists(device_log): testcase["device_log"] = self.process_log(device_log) else: testcase["build_log"] = self.process_log(build_log) else: testcase["status"] = "skipped" testcase["reason"] = instance.reason testcases.append(testcase) suites = [ {"testcases": testcases} ] report["testsuites"] = suites with open(filename, "wt") as json_file: json.dump(report, json_file, indent=4, separators=(',',':')) def get_testcase(self, identifier): results = [] for _, tc in self.testcases.items(): for case in tc.cases: if case == identifier: results.append(tc) return results class CoverageTool: """ Base class for every supported coverage tool """ def __init__(self): self.gcov_tool = None self.base_dir = None @staticmethod def factory(tool): if tool == 'lcov': t = Lcov() elif tool == 'gcovr': t = Gcovr() else: logger.error("Unsupported coverage tool specified: {}".format(tool)) return None logger.debug(f"Select {tool} as the coverage tool...") return t @staticmethod def retrieve_gcov_data(input_file): logger.debug("Working on %s" % input_file) extracted_coverage_info = {} capture_data = False capture_complete = False with open(input_file, 'r') as fp: for line in fp.readlines(): if re.search("GCOV_COVERAGE_DUMP_START", line): capture_data = True continue if re.search("GCOV_COVERAGE_DUMP_END", line): capture_complete = True break # Loop until the coverage data is found. if not capture_data: continue if line.startswith("*"): sp = line.split("<") if len(sp) > 1: # Remove the leading delimiter "*" file_name = sp[0][1:] # Remove the trailing new line char hex_dump = sp[1][:-1] else: continue else: continue extracted_coverage_info.update({file_name: hex_dump}) if not capture_data: capture_complete = True return {'complete': capture_complete, 'data': extracted_coverage_info} @staticmethod def create_gcda_files(extracted_coverage_info): logger.debug("Generating gcda files") for filename, hexdump_val in extracted_coverage_info.items(): # if kobject_hash is given for coverage gcovr fails # hence skipping it problem only in gcovr v4.1 if "kobject_hash" in filename: filename = (filename[:-4]) + "gcno" try: os.remove(filename) except Exception: pass continue with open(filename, 'wb') as fp: fp.write(bytes.fromhex(hexdump_val)) def generate(self, outdir): for filename in glob.glob("%s/**/handler.log" % outdir, recursive=True): gcov_data = self.__class__.retrieve_gcov_data(filename) capture_complete = gcov_data['complete'] extracted_coverage_info = gcov_data['data'] if capture_complete: self.__class__.create_gcda_files(extracted_coverage_info) logger.debug("Gcov data captured: {}".format(filename)) else: logger.error("Gcov data capture incomplete: {}".format(filename)) with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog: ret = self._generate(outdir, coveragelog) if ret == 0: logger.info("HTML report generated: {}".format( os.path.join(outdir, "coverage", "index.html"))) class Lcov(CoverageTool): def __init__(self): super().__init__() self.ignores = [] def add_ignore_file(self, pattern): self.ignores.append('*' + pattern + '*') def add_ignore_directory(self, pattern): self.ignores.append('*/' + pattern + '/*') def _generate(self, outdir, coveragelog): coveragefile = os.path.join(outdir, "coverage.info") ztestfile = os.path.join(outdir, "ztest.info") cmd = ["lcov", "--gcov-tool", self.gcov_tool, "--capture", "--directory", outdir, "--rc", "lcov_branch_coverage=1", "--output-file", coveragefile] cmd_str = " ".join(cmd) logger.debug(f"Running {cmd_str}...") subprocess.call(cmd, stdout=coveragelog) # We want to remove tests/* and tests/ztest/test/* but save tests/ztest subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--extract", coveragefile, os.path.join(self.base_dir, "tests", "ztest", "*"), "--output-file", ztestfile, "--rc", "lcov_branch_coverage=1"], stdout=coveragelog) if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0: subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--remove", ztestfile, os.path.join(self.base_dir, "tests/ztest/test/*"), "--output-file", ztestfile, "--rc", "lcov_branch_coverage=1"], stdout=coveragelog) files = [coveragefile, ztestfile] else: files = [coveragefile] for i in self.ignores: subprocess.call( ["lcov", "--gcov-tool", self.gcov_tool, "--remove", coveragefile, i, "--output-file", coveragefile, "--rc", "lcov_branch_coverage=1"], stdout=coveragelog) # The --ignore-errors source option is added to avoid it exiting due to # samples/application_development/external_lib/ return subprocess.call(["genhtml", "--legend", "--branch-coverage", "--ignore-errors", "source", "-output-directory", os.path.join(outdir, "coverage")] + files, stdout=coveragelog) class Gcovr(CoverageTool): def __init__(self): super().__init__() self.ignores = [] def add_ignore_file(self, pattern): self.ignores.append('.*' + pattern + '.*') def add_ignore_directory(self, pattern): self.ignores.append(".*/" + pattern + '/.*') @staticmethod def _interleave_list(prefix, list): tuple_list = [(prefix, item) for item in list] return [item for sublist in tuple_list for item in sublist] def _generate(self, outdir, coveragelog): coveragefile = os.path.join(outdir, "coverage.json") ztestfile = os.path.join(outdir, "ztest.json") excludes = Gcovr._interleave_list("-e", self.ignores) # We want to remove tests/* and tests/ztest/test/* but save tests/ztest cmd = ["gcovr", "-r", self.base_dir, "--gcov-executable", self.gcov_tool, "-e", "tests/*"] + excludes + ["--json", "-o", coveragefile, outdir] cmd_str = " ".join(cmd) logger.debug(f"Running {cmd_str}...") subprocess.call(cmd, stdout=coveragelog) subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable", self.gcov_tool, "-f", "tests/ztest", "-e", "tests/ztest/test/*", "--json", "-o", ztestfile, outdir], stdout=coveragelog) if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0: files = [coveragefile, ztestfile] else: files = [coveragefile] subdir = os.path.join(outdir, "coverage") os.makedirs(subdir, exist_ok=True) tracefiles = self._interleave_list("--add-tracefile", files) return subprocess.call(["gcovr", "-r", self.base_dir, "--html", "--html-details"] + tracefiles + ["-o", os.path.join(subdir, "index.html")], stdout=coveragelog) class DUT(object): def __init__(self, id=None, serial=None, platform=None, product=None, serial_pty=None, connected=False, pre_script=None, post_script=None, post_flash_script=None, runner=None): self.serial = serial self.platform = platform self.serial_pty = serial_pty self._counter = Value("i", 0) self._available = Value("i", 1) self.connected = connected self.pre_script = pre_script self.id = id self.product = product self.runner = runner self.fixtures = [] self.post_flash_script = post_flash_script self.post_script = post_script self.pre_script = pre_script self.probe_id = None self.notes = None self.lock = Lock() self.match = False @property def available(self): with self._available.get_lock(): return self._available.value @available.setter def available(self, value): with self._available.get_lock(): self._available.value = value @property def counter(self): with self._counter.get_lock(): return self._counter.value @counter.setter def counter(self, value): with self._counter.get_lock(): self._counter.value = value def to_dict(self): d = {} exclude = ['_available', '_counter', 'match'] v = vars(self) for k in v.keys(): if k not in exclude and v[k]: d[k] = v[k] return d def __repr__(self): return f"<{self.platform} ({self.product}) on {self.serial}>" class HardwareMap: schema_path = os.path.join(ZEPHYR_BASE, "scripts", "schemas", "twister", "hwmap-schema.yaml") manufacturer = [ 'ARM', 'SEGGER', 'MBED', 'STMicroelectronics', 'Atmel Corp.', 'Texas Instruments', 'Silicon Labs', 'NXP Semiconductors', 'Microchip Technology Inc.', 'FTDI', 'Digilent' ] runner_mapping = { 'pyocd': [ 'DAPLink CMSIS-DAP', 'MBED CMSIS-DAP' ], 'jlink': [ 'J-Link', 'J-Link OB' ], 'openocd': [ 'STM32 STLink', '^XDS110.*', 'STLINK-V3' ], 'dediprog': [ 'TTL232R-3V3', 'MCP2200 USB Serial Port Emulator' ] } def __init__(self): self.detected = [] self.duts = [] def add_device(self, serial, platform, pre_script, is_pty): device = DUT(platform=platform, connected=True, pre_script=pre_script) if is_pty: device.serial_pty = serial else: device.serial = serial self.duts.append(device) def load(self, map_file): hwm_schema = scl.yaml_load(self.schema_path) duts = scl.yaml_load_verify(map_file, hwm_schema) for dut in duts: pre_script = dut.get('pre_script') post_script = dut.get('post_script') post_flash_script = dut.get('post_flash_script') platform = dut.get('platform') id = dut.get('id') runner = dut.get('runner') serial = dut.get('serial') product = dut.get('product') fixtures = dut.get('fixtures', []) new_dut = DUT(platform=platform, product=product, runner=runner, id=id, serial=serial, connected=serial is not None, pre_script=pre_script, post_script=post_script, post_flash_script=post_flash_script) new_dut.fixtures = fixtures new_dut.counter = 0 self.duts.append(new_dut) def scan(self, persistent=False): from serial.tools import list_ports if persistent and platform.system() == 'Linux': # On Linux, /dev/serial/by-id provides symlinks to # '/dev/ttyACMx' nodes using names which are unique as # long as manufacturers fill out USB metadata nicely. # # This creates a map from '/dev/ttyACMx' device nodes # to '/dev/serial/by-id/usb-...' symlinks. The symlinks # go into the hardware map because they stay the same # even when the user unplugs / replugs the device. # # Some inexpensive USB/serial adapters don't result # in unique names here, though, so use of this feature # requires explicitly setting persistent=True. by_id = Path('/dev/serial/by-id') def readlink(link): return str((by_id / link).resolve()) persistent_map = {readlink(link): str(link) for link in by_id.iterdir()} else: persistent_map = {} serial_devices = list_ports.comports() logger.info("Scanning connected hardware...") for d in serial_devices: if d.manufacturer in self.manufacturer: # TI XDS110 can have multiple serial devices for a single board # assume endpoint 0 is the serial, skip all others if d.manufacturer == 'Texas Instruments' and not d.location.endswith('0'): continue s_dev = DUT(platform="unknown", id=d.serial_number, serial=persistent_map.get(d.device, d.device), product=d.product, runner='unknown', connected=True) for runner, _ in self.runner_mapping.items(): products = self.runner_mapping.get(runner) if d.product in products: s_dev.runner = runner continue # Try regex matching for p in products: if re.match(p, d.product): s_dev.runner = runner s_dev.connected = True self.detected.append(s_dev) else: logger.warning("Unsupported device (%s): %s" % (d.manufacturer, d)) def save(self, hwm_file): # use existing map self.detected.sort(key=lambda x: x.serial or '') if os.path.exists(hwm_file): with open(hwm_file, 'r') as yaml_file: hwm = yaml.load(yaml_file, Loader=SafeLoader) if hwm: hwm.sort(key=lambda x: x['serial'] or '') # disconnect everything for h in hwm: h['connected'] = False h['serial'] = None for _detected in self.detected: for h in hwm: if _detected.id == h['id'] and _detected.product == h['product'] and not _detected.match: h['connected'] = True h['serial'] = _detected.serial _detected.match = True new_duts = list(filter(lambda d: not d.match, self.detected)) new = [] for d in new_duts: new.append(d.to_dict()) if hwm: hwm = hwm + new else: hwm = new with open(hwm_file, 'w') as yaml_file: yaml.dump(hwm, yaml_file, Dumper=Dumper, default_flow_style=False) self.load(hwm_file) logger.info("Registered devices:") self.dump() else: # create new file dl = [] for _connected in self.detected: platform = _connected.platform id = _connected.id runner = _connected.runner serial = _connected.serial product = _connected.product d = { 'platform': platform, 'id': id, 'runner': runner, 'serial': serial, 'product': product, 'connected': _connected.connected } dl.append(d) with open(hwm_file, 'w') as yaml_file: yaml.dump(dl, yaml_file, Dumper=Dumper, default_flow_style=False) logger.info("Detected devices:") self.dump(detected=True) def dump(self, filtered=[], header=[], connected_only=False, detected=False): print("") table = [] if detected: to_show = self.detected else: to_show = self.duts if not header: header = ["Platform", "ID", "Serial device"] for p in to_show: platform = p.platform connected = p.connected if filtered and platform not in filtered: continue if not connected_only or connected: table.append([platform, p.id, p.serial]) print(tabulate(table, headers=header, tablefmt="github"))
"use strict"; var cartContent=null; var cartTotal=null; var footerPages=null; var total=null; var couponPrice=0; var deliveryCharge=0; var deliveryStatus = false; var cart=null; $('#localorder_phone').hide(); /** * * @param {Number} net The net value * @param {Number} delivery The delivery value * @param {Boolean} enableDelivery Disable or enable delivery */ function updatePrices(net,delivery,couponPrices,enableDelivery){ var formatter = new Intl.NumberFormat(LOCALE, { style: 'currency', currency: CASHIER_CURRENCY, }); cartTotal.totalPrice = net; cartTotal.totalPriceFormat = formatter.format(net); cartTotal.delivery = true; cartTotal.deliveryPrice = delivery; cartTotal.deliveryPriceFormated = formatter.format(delivery); cartTotal.couponStore = couponPrices; cartTotal.withCouponFormat = formatter.format(couponPrices); cartTotal.withDelivery = net-(-delivery)-couponPrices; cartTotal.withDeliveryFormat = formatter.format(net-(-delivery)-couponPrices); } function updateSubTotalPrice(net,enableDelivery){ updatePrices(net,(cartTotal.deliveryPrice?cartTotal.deliveryPrice:0),couponPrice,enableDelivery) } /** * getCartContentAndTotalPrice * This functions connect to laravel to get the current cart items and total price * Saves the values in vue */ function getCartContentAndTotalPrice(){ axios.get('/cart-getContent').then(function (response) { cartContent.items=response.data.data; updateSubTotalPrice(response.data.total,true); }) .catch(function (error) { console.log(error); }); }; // function getItem(id) { // // $.ajax({ // type:'GET', // url:'/getItem', // 'data': {id: id}, // success:function(data) { // var reslt = [data.data.name]; // var arr = []; // // for(var i; i<reslt.length;i++){ // var table = document.getElementById("table"); // var row = table.insertRow(0); // var cell1 = row.insertCell(0); // var cell2 = row.insertCell(1); // cell1.innerHTML = "NEW CELL1"; // cell2.innerHTML = "NEW CELL2"; // } // // // console.log(cartContent.items); // // } // }); // //alert(id); // // } function apply() { $("#promo_code_btn").click(function() { var code = $('#coupon_code').val(); // var formatter = new Intl.NumberFormat(LOCALE, { // style: 'currency', // currency: CASHIER_CURRENCY, // }); axios.post('/coupons/apply/'+code).then(function (response) { if(response.data.status){ couponPrice=response.data.total; $("#promo_code_btn").attr("disabled",true); $("#promo_code_btn").attr("readonly"); $("#promo_code_war").hide(); $("#promo_code_succ").show(); //console.log(couponPrice); updateSubTotalPrice(cartTotal.totalPrice,true); $("#couponWithDelivery").hide(); $("#coup").val(cartTotal.withDeliveryFormat).show(); $("#couponPriceShow").hide(); $("#afterCouponStore").val(cartTotal.withDelivery); $("#couponPriceStore").val(cartTotal.couponStore); $("#couponPrice").val(cartTotal.withCouponFormat).show(); // cartTotal.withDelivery=cartTotal.withDelivery-couponPrice; // cartTotal.withDeliveryFormat=formatter.format(cartTotal.withDelivery); // console.log(cartTotal.withDelivery); // if (!deliveryStatus) { // updateSubTotalPrice(cartTotal.totalPrice,deliveryCharge,true); // } // else{ // updateSubTotalPrice(cartTotal.totalPrice,deliveryCharge,false); // } js.notify(response.data.msg,"warning"); //chageDeliveryCost(deliveryCharge) //updatePrices(a,deliveryCharge,couponPrice,true); }else{ $("#promo_code_succ").hide(); $("#promo_code_war").show(); js.notify(response.data.msg,"warning"); } }).catch(function (error) { console.log(error); }); }); } function getUser() { var id = document.getElementById("mobile").value; $.ajax({ type:'GET', url:'/getUser', 'data': {id: id}, success:function(data) { document.getElementById('name').value = data.data.name; document.getElementById('email').value = data.data.email; } }); } $("#fborder_btn").click(function() { var address = $('#addressID').val(); var comment = $('#comment').val(); axios.post('/fb-order', { address: address, comment: comment }) .then(function (response) { if(response.status){ var text = response.data.msg; var fullLink = document.createElement('input'); document.body.appendChild(fullLink); fullLink.value = text; fullLink.select(); document.execCommand("copy", false); fullLink.remove(); swal({ title: "Good job!", text: "Order is submited in the system and copied in your clipboard. Next, messenger will open and you need to paste the order details there.", icon: "success", button: "Continue to messenger", }).then(function(isConfirm) { if (isConfirm) { document.getElementById('order-form').submit(); } }); } }).catch(function (error) { console.log(error); }); }); /** * Removes product from cart, and calls getCartConent * @param {Number} product_id */ function removeProductIfFromCart(product_id){ axios.post('/cart-remove', {id:product_id}).then(function (response) { getCartContentAndTotalPrice(); }).catch(function (error) { console.log(error); }); } /** * Update the product quantity, and calls getCartConent * @param {Number} product_id */ function incCart(product_id){ axios.get('/cartinc/'+product_id).then(function (response) { getCartContentAndTotalPrice(); }).catch(function (error) { console.log(error); }); } function decCart(product_id){ axios.get('/cartdec/'+product_id).then(function (response) { getCartContentAndTotalPrice(); }).catch(function (error) { console.log(error); }); } //GET PAGES FOR FOOTER function getPages(){ axios.get('/footer-pages').then(function (response) { footerPages.pages=response.data.data; }) .catch(function (error) { console.log(error); }); }; function dineTypeSwitch(mod){ console.log("Change mod to "+mod); $('.tablepicker').hide(); $('.takeaway_picker').hide(); if(mod=="dinein"){ $('.tablepicker').show(); $('.takeaway_picker').hide(); //phone $('#localorder_phone').hide(); } if(mod=="takeaway"){ $('.tablepicker').hide(); $('.takeaway_picker').show(); //phone $('#localorder_phone').show(); } } function orderTypeSwither(mod){ console.log("Change mod to "+mod); $('.delTime').hide(); $('.picTime').hide(); if(mod=="pickup"){ updatePrices(cartTotal.totalPrice,null,couponPrice,false) $('.picTime').show(); $('#addressBox').hide(); } if(mod=="delivery"){ $('.delTime').show(); $('#addressBox').show(); getCartContentAndTotalPrice(); } } setTimeout(function(){ if(typeof initialOrderType !== 'undefined'){ console.log("Will change now to "+initialOrderType+" --"); orderTypeSwither(initialOrderType); }else{ console.log("No initialOrderType"); } },1000); function chageDeliveryCost(deliveryCost){ $("#deliveryCost").val(deliveryCost); deliveryCharge=deliveryCost; updatePrices(cartTotal.totalPrice,deliveryCharge,couponPrice,true); console.log(deliveryCharge); console.log("Done updatin delivery price"); } //First we beed to capture the event of chaning of the address function deliveryAddressSwithcer(){ $("#addressID").change(function() { //The delivery cost var deliveryCost=$(this).find(':selected').data('cost'); //We now need to pass this cost to some parrent funct for handling the delivery cost change chageDeliveryCost(deliveryCost); }); } function deliveryTypeSwitcher(){ $('.picTime').hide(); $('input:radio[name="deliveryType"]').change(function() { orderTypeSwither($(this).val()); }) } function dineTypeSwitcher(){ $('input:radio[name="dineType"]').change(function() { $('.delTimeTS').hide(); $('.picTimeTS').show(); dineTypeSwitch($(this).val()); }) } function paymentTypeSwitcher(){ $('input:radio[name="paymentType"]').change( function(){ //HIDE ALL $('#totalSubmitCOD').hide() $('#totalSubmitStripe').hide() $('#stripe-payment-form').hide() $('#paystack-payment-form').hide() $('#paypal-payment-form').hide() $('#mollie-payment-form').hide() if($(this).val()=="cod"){ //SHOW COD $('#totalSubmitCOD').show(); }else if($(this).val()=="stripe"){ //SHOW STRIPE $('#totalSubmitStripe').show(); $('#stripe-payment-form').show() }else if($(this).val()=="paystack"){ $('#paystack-payment-form').show() }else if($(this).val()=="paypal"){ $('#paypal-payment-form').show() }else if($(this).val()=="mollie"){ $('#mollie-payment-form').show() } }); } window.onload = function () { console.log("Cart function called"); //VUE CART cartContent = new Vue({ el: '#cartList', data: { items: [], }, methods: { remove: function (product_id) { removeProductIfFromCart(product_id); }, incQuantity: function (product_id){ incCart(product_id) }, decQuantity: function (product_id){ decCart(product_id) }, } }) //GET PAGES FOR FOOTER getPages(); //Payment Method switcher paymentTypeSwitcher(); //Delivery type switcher deliveryTypeSwitcher(); //For Dine in / takeout dineTypeSwitcher(); //Activate address switcher deliveryAddressSwithcer(); apply(); //VUE FOOTER PAGES footerPages = new Vue({ el: '#footer-pages', data: { pages: [] } }) //VUE COMPLETE ORDER TOTAL PRICE total = new Vue({ el: '#totalSubmit', data: { totalPrice:0 } }) //VUE TOTAL cartTotal= new Vue({ el: '#totalPrices', data: { totalPrice:0, minimalOrder:0, totalPriceFormat:"", deliveryPriceFormated:"", delivery:true, coupon:0, } }) //Call to get the total price and items getCartContentAndTotalPrice(); var addToCart1 = new Vue({ el:'#addToCart1', methods: { addToCartAct() { axios.post('/cart-add', { id: $('#modalID').text(), quantity: $('#quantity').val(), extras:extrasSelected, variantID:variantID }) .then(function (response) { if(response.data.status){ $('#productModal').modal('hide'); getCartContentAndTotalPrice(); //$('#miniCart').addClass( "open" ); openNav(); }else{ $('#productModal').modal('hide'); js.notify(response.data.errMsg,"warning"); } }) .catch(function (error) { console.log(error); }); }, }, }); }
// # Settings API // RESTful API for the Setting resource var _ = require('lodash'), dataProvider = require('../models'), Promise = require('bluebird'), config = require('../config'), canThis = require('../permissions').canThis, errors = require('../errors'), utils = require('./utils'), docName = 'settings', settings, updateConfigTheme, updateSettingsCache, settingsFilter, filterPaths, readSettingsResult, settingsResult, canEditAllSettings, populateDefaultSetting, hasPopulatedDefaults = false, /** * ## Cache * Holds cached settings * @private * @type {{}} */ settingsCache = {}; /** * ### Updates Config Theme Settings * Maintains the cache of theme specific variables that are reliant on settings. * @private */ updateConfigTheme = function () { config.set({ theme: { title: settingsCache.title.value || '', description: settingsCache.description.value || '', logo: settingsCache.logo.value || '', cover: settingsCache.cover.value || '' } }); }; /** * ### Update Settings Cache * Maintain the internal cache of the settings object * @public * @param {Object} settings * @returns {Settings} */ updateSettingsCache = function (settings) { settings = settings || {}; if (!_.isEmpty(settings)) { _.map(settings, function (setting, key) { settingsCache[key] = setting; }); updateConfigTheme(); return Promise.resolve(settingsCache); } return dataProvider.Settings.findAll() .then(function (result) { settingsCache = readSettingsResult(result.models); updateConfigTheme(); return settingsCache; }); }; // ## Helpers /** * ### Settings Filter * Filters an object based on a given filter object * @private * @param {Object} settings * @param {String} filter * @returns {*} */ settingsFilter = function (settings, filter) { return _.object(_.filter(_.pairs(settings), function (setting) { if (filter) { return _.some(filter.split(','), function (f) { return setting[1].type === f; }); } return true; })); }; /** * ### Filter Paths * Normalizes paths read by require-tree so that the apps and themes modules can use them. Creates an empty * array (res), and populates it with useful info about the read packages like name, whether they're active * (comparison with the second argument), and if they have a package.json, that, otherwise false * @private * @param {object} paths as returned by require-tree() * @param {array/string} active as read from the settings object * @returns {Array} of objects with useful info about apps / themes */ filterPaths = function (paths, active) { var pathKeys = Object.keys(paths), res = [], item; // turn active into an array (so themes and apps can be checked the same) if (!Array.isArray(active)) { active = [active]; } _.each(pathKeys, function (key) { // do not include hidden files or _messages if (key.indexOf('.') !== 0 && key !== '_messages' && key !== 'README.md' ) { item = { name: key }; if (paths[key].hasOwnProperty('package.json')) { item.package = paths[key]['package.json']; } else { item.package = false; } if (_.indexOf(active, key) !== -1) { item.active = true; } res.push(item); } }); return res; }; /** * ### Read Settings Result * @private * @param {Array} settingsModels * @returns {Settings} */ readSettingsResult = function (settingsModels) { var settings = _.reduce(settingsModels, function (memo, member) { if (!memo.hasOwnProperty(member.attributes.key)) { memo[member.attributes.key] = member.attributes; } return memo; }, {}), themes = config.paths.availableThemes, apps = config.paths.availableApps, res; if (settings.activeTheme && themes) { res = filterPaths(themes, settings.activeTheme.value); settings.availableThemes = { key: 'availableThemes', value: res, type: 'theme' }; } if (settings.activeApps && apps) { res = filterPaths(apps, JSON.parse(settings.activeApps.value)); settings.availableApps = { key: 'availableApps', value: res, type: 'app' }; } return settings; }; /** * ### Settings Result * @private * @param {Object} settings * @param {String} type * @returns {{settings: *}} */ settingsResult = function (settings, type) { var filteredSettings = _.values(settingsFilter(settings, type)), result = { settings: filteredSettings, meta: {} }; if (type) { result.meta.filters = { type: type }; } return result; }; /** * ### Populate Default Setting * @private * @param {String} key * @returns Promise(Setting) */ populateDefaultSetting = function (key) { // Call populateDefault and update the settings cache return dataProvider.Settings.populateDefault(key).then(function (defaultSetting) { // Process the default result and add to settings cache var readResult = readSettingsResult([defaultSetting]); // Add to the settings cache return updateSettingsCache(readResult).then(function () { // Get the result from the cache with permission checks }); }).catch(function (err) { // Pass along NotFoundError if (typeof err === errors.NotFoundError) { return Promise.reject(err); } // TODO: Different kind of error? return Promise.reject(new errors.NotFoundError('Problem finding setting: ' + key)); }); }; /** * ### Can Edit All Settings * Check that this edit request is allowed for all settings requested to be updated * @private * @param {Object} settingsInfo * @returns {*} */ canEditAllSettings = function (settingsInfo, options) { var checkSettingPermissions = function (setting) { if (setting.type === 'core' && !(options.context && options.context.internal)) { return Promise.reject( new errors.NoPermissionError('Attempted to access core setting from external request') ); } return canThis(options.context).edit.setting(setting.key).catch(function () { return Promise.reject(new errors.NoPermissionError('You do not have permission to edit settings.')); }); }, checks = _.map(settingsInfo, function (settingInfo) { var setting = settingsCache[settingInfo.key]; if (!setting) { // Try to populate a default setting if not in the cache return populateDefaultSetting(settingInfo.key).then(function (defaultSetting) { // Get the result from the cache with permission checks return checkSettingPermissions(defaultSetting); }); } return checkSettingPermissions(setting); }); return Promise.all(checks); }; /** * ## Settings API Methods * * **See:** [API Methods](index.js.html#api%20methods) */ settings = { /** * ### Browse * @param {Object} options * @returns {*} */ browse: function browse(options) { // First, check if we have populated the settings from default-settings yet if (!hasPopulatedDefaults) { return dataProvider.Settings.populateDefaults().then(function () { hasPopulatedDefaults = true; return settings.browse(options); }); } options = options || {}; var result = settingsResult(settingsCache, options.type); // If there is no context, return only blog settings if (!options.context) { return Promise.resolve(_.filter(result.settings, function (setting) { return setting.type === 'blog'; })); } // Otherwise return whatever this context is allowed to browse return canThis(options.context).browse.setting().then(function () { // Omit core settings unless internal request if (!options.context.internal) { result.settings = _.filter(result.settings, function (setting) { return setting.type !== 'core'; }); } return result; }); }, /** * ### Read * @param {Object} options * @returns {*} */ read: function read(options) { if (_.isString(options)) { options = {key: options}; } var getSettingsResult = function () { var setting = settingsCache[options.key], result = {}; result[options.key] = setting; if (setting.type === 'core' && !(options.context && options.context.internal)) { return Promise.reject( new errors.NoPermissionError('Attempted to access core setting from external request') ); } if (setting.type === 'blog') { return Promise.resolve(settingsResult(result)); } return canThis(options.context).read.setting(options.key).then(function () { return settingsResult(result); }, function () { return Promise.reject(new errors.NoPermissionError('You do not have permission to read settings.')); }); }; // If the setting is not already in the cache if (!settingsCache[options.key]) { // Try to populate the setting from default-settings file return populateDefaultSetting(options.key).then(function () { // Get the result from the cache with permission checks return getSettingsResult(); }); } // Get the result from the cache with permission checks return getSettingsResult(); }, /** * ### Edit * Update properties of a post * @param {{settings: }} object Setting or a single string name * @param {{id (required), include,...}} options (optional) or a single string value * @return {Promise(Setting)} Edited Setting */ edit: function edit(object, options) { options = options || {}; var self = this, type; // Allow shorthand syntax where a single key and value are passed to edit instead of object and options if (_.isString(object)) { object = {settings: [{key: object, value: options}]}; } // clean data _.each(object.settings, function (setting) { if (!_.isString(setting.value)) { setting.value = JSON.stringify(setting.value); } }); type = _.find(object.settings, function (setting) { return setting.key === 'type'; }); if (_.isObject(type)) { type = type.value; } object.settings = _.reject(object.settings, function (setting) { return setting.key === 'type' || setting.key === 'availableThemes' || setting.key === 'availableApps'; }); return canEditAllSettings(object.settings, options).then(function () { return utils.checkObject(object, docName).then(function (checkedData) { options.user = self.user; return dataProvider.Settings.edit(checkedData.settings, options); }).then(function (result) { var readResult = readSettingsResult(result); return updateSettingsCache(readResult).then(function () { return settingsResult(readResult, type); }); }); }); } }; module.exports = settings; module.exports.updateSettingsCache = updateSettingsCache;
// flow-typed signature: 1072aec7a4714d5945f8149706638320 // flow-typed version: <<STUB>>/@tippy.js/react_v1.1.1/flow_v0.87.0 /** * This is an autogenerated libdef stub for: * * '@tippy.js/react' * * Fill this stub out by replacing all the `any` types. * * Once filled out, we encourage you to share your work with the * community by sending a pull request to: * https://github.com/flowtype/flow-typed */ import React from 'react'; declare module '@tippy.js/react' { // TODO: Properly type Tippy instance declare type TippyInstance = any; declare type AnimationType = 'fade' | 'scale' | 'shift-forward' | 'perspective' | 'shift-away'; declare type ArrowType = 'sharp' | 'round'; declare type SizeType = 'regular' | 'large'; declare type PopperProps = {| a11y?: boolean, allowHTML?: boolean, animateFill?: boolean, animation?: AnimationType, appendTo?: HTMLElement | ((tip: TippyInstance) => HTMLElement), arrow?: boolean, arrowType?: ArrowType, arrowTransform?: string, delay?: [number, number], duration?: [number, number], distance?: number, flip?: boolean, flipBehavior?: 'flip' | string[], followCursor?: boolean, hideOnClick?: 'toggle' | boolean, inertia?: boolean, interactive?: boolean, interactiveBorder?: number, interactiveDebounce?: number, maxWidth?: string | number, offset?: number, onHidden?: Function, onHide?: Function, onMount?: Function, onShow?: Function, onShown?: Function, livePlacement?: boolean, performance?: boolean, placement?: string, showOnInit?: boolean, size?: SizeType, sticky?: boolean, target?: string, theme?: string, touch?: boolean, touchHold?: boolean, trigger?: string, updateDuration?: number, wait?: (tip: TippyInstance, event: any) => void, zIndex?: number, |}; declare export type TippyProps = {| ...PopperProps, children: React.Node, content: React.Node, isEnabled?: boolean, isVisible?: boolean, onCreate?: (tip: TippyInstance) => void, |}; declare class Tippy extends React$Component<TippyProps> {} declare module.exports: typeof Tippy; }
var plugins = [{ plugin: require('C:/Users/Boneluv/Documents/GitHub/boneluv.com/node_modules/gatsby-plugin-netlify-identity/gatsby-ssr'), options: {"plugins":[],"url":"https://www.boneluv.com/"}, },{ plugin: require('C:/Users/Boneluv/Documents/GitHub/boneluv.com/node_modules/gatsby-plugin-crisp-chat/gatsby-ssr'), options: {"plugins":[],"websiteId":"6d863b44-dff8-42fd-8bd0-dbac3fe730d3","enableDuringDevelop":true,"defer":false,"enableImprovedAccessibility":false}, },{ plugin: require('C:/Users/Boneluv/Documents/GitHub/boneluv.com/node_modules/gatsby-plugin-manifest/gatsby-ssr'), options: {"plugins":[],"name":"gatsby-boneluv","short_name":"boneluv","start_url":"/","theme_color":"#ff187c","icon":"src/boneicono.png"}, },{ plugin: require('C:/Users/Boneluv/Documents/GitHub/boneluv.com/node_modules/gatsby-plugin-react-helmet/gatsby-ssr'), options: {"plugins":[]}, },{ plugin: require('C:/Users/Boneluv/Documents/GitHub/boneluv.com/node_modules/gatsby-plugin-sitemap/gatsby-ssr'), options: {"plugins":[],"output":"/sitemap.xml","query":"\n {\n site {\n siteMetadata {\n siteUrl\n }\n }\n \n allSitePage {\n edges {\n node {\n path\n }\n }\n }\n }"}, },{ plugin: require('C:/Users/Boneluv/Documents/GitHub/boneluv.com/node_modules/gatsby-plugin-google-analytics/gatsby-ssr'), options: {"plugins":[],"trackingId":"UA-102351162-2"}, }] // During bootstrap, we write requires at top of this file which looks like: // var plugins = [ // { // plugin: require("/path/to/plugin1/gatsby-ssr.js"), // options: { ... }, // }, // { // plugin: require("/path/to/plugin2/gatsby-ssr.js"), // options: { ... }, // }, // ] const apis = require(`./api-ssr-docs`) // Run the specified API in any plugins that have implemented it module.exports = (api, args, defaultReturn, argTransform) => { if (!apis[api]) { console.log(`This API doesn't exist`, api) } // Run each plugin in series. // eslint-disable-next-line no-undef let results = plugins.map(plugin => { if (!plugin.plugin[api]) { return undefined } const result = plugin.plugin[api](args, plugin.options) if (result && argTransform) { args = argTransform({ args, result }) } return result }) // Filter out undefined results. results = results.filter(result => typeof result !== `undefined`) if (results.length > 0) { return results } else { return [defaultReturn] } }
cordova.define('cordova/plugin_list', function(require, exports, module) { module.exports = [ { "file": "plugins/mobi.monaca.plugins.Monaca/www/monaca.js", "id": "mobi.monaca.plugins.Monaca.monaca" }, { "file": "plugins/cordova-plugin-whitelist/whitelist.js", "id": "cordova-plugin-whitelist.whitelist", "runs": true }, { "file": "plugins/cordova-plugin-splashscreen/www/splashscreen.js", "id": "cordova-plugin-splashscreen.SplashScreen", "clobbers": [ "navigator.splashscreen" ] }, { "file": "plugins/plugin.push.nifty/www/nifty.js", "id": "plugin.push.nifty.NiftyCloud", "clobbers": [ "NCMB.monaca" ], "runs": true }, { "file": "plugins/cordova-plugin-inappbrowser/www/inappbrowser.js", "id": "cordova-plugin-inappbrowser.inappbrowser", "clobbers": [ "cordova.InAppBrowser.open", "window.open" ] }, { "file": "plugins/cordova-plugin-network-information/www/network.js", "id": "cordova-plugin-network-information.network", "clobbers": [ "navigator.connection", "navigator.network.connection" ] }, { "file": "plugins/cordova-plugin-network-information/www/Connection.js", "id": "cordova-plugin-network-information.Connection", "clobbers": [ "Connection" ] }, { "file": "plugins/cordova-plugin-dialogs/www/notification.js", "id": "cordova-plugin-dialogs.notification", "merges": [ "navigator.notification" ] }, { "file": "plugins/cordova-plugin-dialogs/www/android/notification.js", "id": "cordova-plugin-dialogs.notification_android", "merges": [ "navigator.notification" ] } ]; module.exports.metadata = // TOP OF METADATA { "mobi.monaca.plugins.Monaca": "3.0.0", "cordova-plugin-whitelist": "1.0.0", "cordova-plugin-splashscreen": "2.1.0", "plugin.push.nifty": "2.0.1", "cordova-plugin-inappbrowser": "1.0.1", "cordova-plugin-network-information": "1.0.1", "cordova-plugin-dialogs": "1.1.1" } // BOTTOM OF METADATA });
import asyncio import importlib import logging.config import os import sys from signal import Signals from typing import TYPE_CHECKING, cast import click from pydantic.utils import import_string from .logs import default_log_config from .version import VERSION from .worker import check_health, create_worker, run_worker if TYPE_CHECKING: from .typing import WorkerSettingsType burst_help = 'Batch mode: exit once no jobs are found in any queue.' health_check_help = 'Health Check: run a health check and exit.' watch_help = 'Watch a directory and reload the worker upon changes.' verbose_help = 'Enable verbose output.' logdict_help = "Import path for a dictionary in logdict form, to configure Arq's own logging." @click.command('arq') @click.version_option(VERSION, '-V', '--version', prog_name='arq') @click.argument('worker-settings', type=str, required=True) @click.option('--burst/--no-burst', default=None, help=burst_help) @click.option('--check', is_flag=True, help=health_check_help) @click.option('--watch', type=click.Path(exists=True, dir_okay=True, file_okay=False), help=watch_help) @click.option('-v', '--verbose', is_flag=True, help=verbose_help) @click.option('--custom-log-dict', type=str, help=logdict_help) def cli(*, worker_settings: str, burst: bool, check: bool, watch: str, verbose: bool, custom_log_dict: str) -> None: """ Job queues in python with asyncio and redis. CLI to run the arq worker. """ sys.path.append(os.getcwd()) worker_settings_ = cast('WorkerSettingsType', import_string(worker_settings)) if custom_log_dict: try: config_path, config_dict = custom_log_dict.rsplit(".", maxsplit=1) log_config = getattr(importlib.import_module(config_path), config_dict) except (TypeError, AttributeError): log_config = default_log_config(verbose) else: log_config = default_log_config(verbose) logging.config.dictConfig(log_config) if check: exit(check_health(worker_settings_)) else: kwargs = {} if burst is None else {'burst': burst} if watch: asyncio.get_event_loop().run_until_complete(watch_reload(watch, worker_settings_)) else: run_worker(worker_settings_, **kwargs) async def watch_reload(path: str, worker_settings: 'WorkerSettingsType') -> None: try: from watchgod import awatch except ImportError as e: # pragma: no cover raise ImportError('watchgod not installed, use `pip install watchgod`') from e loop = asyncio.get_event_loop() stop_event = asyncio.Event() def worker_on_stop(s: Signals) -> None: if s != Signals.SIGUSR1: # pragma: no cover stop_event.set() worker = create_worker(worker_settings) try: worker.on_stop = worker_on_stop loop.create_task(worker.async_run()) async for _ in awatch(path, stop_event=stop_event): print('\nfiles changed, reloading arq worker...') worker.handle_sig(Signals.SIGUSR1) await worker.close() loop.create_task(worker.async_run()) finally: await worker.close()
/** * (C) 2020 URD * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ module.exports = require('./urd-ac-3ch').nodeRedFunction;
import Ember from 'ember'; export default Ember.Component.extend({ classNames: ['nacho-select'], content: [], init() { this._super(...arguments); this.updateContent(); }, onSelectionChanged: Ember.observer('selected', 'values', function() { this.updateContent(); }), /** * Parse and transform the values list into a list of objects with the currently * selected option flagged as `isSelected` */ updateContent() { const selected = this.get('selected') || null; const options = this.get('values') || []; const content = options.map(option => { if (typeof option === 'object' && typeof option.value !== 'undefined') { const isSelected = option.value === selected; return { value: option.value, label: option.label, isSelected, isDisabled: option.isDisabled || false }; } return { value: option, isSelected: option === selected }; }); this.set('content', content); }, actions: { // Reflect UI changes in the component and bubble the `selectionDidChange` action change() { const { selectedIndex } = this.$('select')[0]; const values = this.get('values'); const _selected = values[selectedIndex]; const selected = typeof _selected.value !== 'undefined' ? _selected.value : _selected; this.set('selected', selected); this.sendAction('selectionDidChange', _selected); } } });
import createSvgIcon from './utils/createSvgIcon.js'; import { jsx as _jsx } from "react/jsx-runtime"; export default createSvgIcon( /*#__PURE__*/_jsx("path", { d: "M3 13h2v-2H3v2zm0 4h2v-2H3v2zm0-8h2V7H3v2zm4 4h14v-2H7v2zm0 4h14v-2H7v2zM7 7v2h14V7H7z" }), 'List');
export const SEARCH = 'SEARCH'; export const SET_ORDER_BY = 'SET_ORDER_BY'; export const TOGGLE_AGENCY_FILTER = 'TOGGLE_AGENCY_FILTER'; export const TOGGLE_CATEGORY_FILTER = 'TOGGLE_CATEGORY_FILTER'; export const TOGGLE_FILTER_DRAWER = 'TOGGLE_FILTER_DRAWER'; export const TOGGLE_INTRO = 'TOGGLE_INTRO'; export const TOGGLE_RECOMMENDATION = 'TOGGLE_RECOMMENDATION'; export const TOGGLE_STATUS_FILTER = 'TOGGLE_STATUS_FILTER'; export const search = function searchActionCreator(q) { return { type: SEARCH, q, }; }; export const toggleAgencyFilter = function toggleAgencyFilterActionCreator( filter, initialFilters, initialRender, ) { return { type: TOGGLE_AGENCY_FILTER, filter, initialFilters, initialRender, }; }; export const toggleCategoryFilter = function toggleCategoryFilterActionCreator( filter, initialFilters, initialRender, ) { return { type: TOGGLE_CATEGORY_FILTER, filter, initialFilters, initialRender, }; }; export const toggleStatusFilter = function toggleStatusFilterActionCreator( filter, initialFilters, initialRender, ) { return { type: TOGGLE_STATUS_FILTER, filter, initialFilters, initialRender, }; }; export const toggleRecommendation = function toggleRecommendationActionCreator( r, detailId, initialRender, ) { return { type: TOGGLE_RECOMMENDATION, recommendation: r, detailId, initialRender, }; }; export const toggleFilterDrawer = function toggleFilterDrawerActionCreator() { return { type: TOGGLE_FILTER_DRAWER, }; }; export const setOrderBy = function setOrderByActionCreator( orderBy, direction, ) { return { type: SET_ORDER_BY, orderBy, direction, }; }; export const toggleIntro = function toggleIntroActionCreator() { return { type: TOGGLE_INTRO, }; };
@@include("node_modules/swiper/dist/js/swiper.min.js") @@include("node_modules/wowjs/dist/wow.min.js") new WOW().init(); var mySwiper = new Swiper ('.swiper-container', { // Optional parameters direction: 'horizontal', loop: true, // If we need pagination // pagination: { // el: '.swiper-pagination', // }, // Navigation arrows navigation: { nextEl: '.swiper-button-next', prevEl: '.swiper-button-prev', } // And if we need scrollbar // scrollbar: { // el: '.swiper-scrollbar', // }, }); function pageNavigationClassToggle() { var elem = document.querySelector('.mg-page_navigation'); var body = document.querySelector('body'); elem.classList.toggle('mg--nav_is_opened'); body.classList.toggle('mg--nav_is_opened'); } document.querySelector('#mg-page_nav__toggle-button').addEventListener('click', pageNavigationClassToggle);
var defaultIsMergeableObject = require('is-mergeable-object') function emptyTarget(val) { return Array.isArray(val) ? [] : {} } function cloneUnlessOtherwiseSpecified(value, options) { return (options.clone !== false && options.isMergeableObject(value)) ? deepmerge(emptyTarget(value), value, options) : value } function defaultArrayMerge(target, source, options) { return target.concat(source).map(function(element) { return cloneUnlessOtherwiseSpecified(element, options) }) } function getMergeFunction(key, options) { if (!options.customMerge) { return deepmerge } var customMerge = options.customMerge(key) return typeof customMerge === 'function' ? customMerge : deepmerge } function getEnumerableOwnPropertySymbols(target) { return Object.getOwnPropertySymbols ? Object.getOwnPropertySymbols(target).filter(function(symbol) { return target.propertyIsEnumerable(symbol) }) : [] } function getKeys(target) { return Object.keys(target).concat(getEnumerableOwnPropertySymbols(target)) } function mergeObject(target, source, options) { var destination = {} if (options.isMergeableObject(target)) { getKeys(target).forEach(function(key) { destination[key] = cloneUnlessOtherwiseSpecified(target[key], options) }) } getKeys(source).forEach(function(key) { if (!options.isMergeableObject(source[key]) || !target[key]) { destination[key] = cloneUnlessOtherwiseSpecified(source[key], options) } else { destination[key] = getMergeFunction(key, options)(target[key], source[key], options) } }) return destination } function deepmerge(target, source, options) { options = Object.assign({ arrayMerge: defaultArrayMerge, isMergeableObject: defaultIsMergeableObject }, options, { cloneUnlessOtherwiseSpecified: cloneUnlessOtherwiseSpecified }) var sourceIsArray = Array.isArray(source) var targetIsArray = Array.isArray(target) var sourceAndTargetTypesMatch = sourceIsArray === targetIsArray if (!sourceAndTargetTypesMatch) { return cloneUnlessOtherwiseSpecified(source, options) } else if (sourceIsArray) { return options.arrayMerge(target, source, options) } else { return mergeObject(target, source, options) } } deepmerge.all = function deepmergeAll(array, options) { if (!Array.isArray(array)) { throw new Error('first argument should be an array') } return array.reduce(function(prev, next) { return deepmerge(prev, next, options) }, {}) } module.exports = deepmerge
/*<replacement>*/ var bufferShim = require('safe-buffer').Buffer; /*</replacement>*/ var common = require('../common'); var assert = require('assert/'); var Stream = require('stream').Stream; { var source = new Stream(); var dest = new Stream(); source.pipe(dest); var gotErr = null; source.on('error', function (err) { gotErr = err; }); var err = new Error('This stream turned into bacon.'); source.emit('error', err); assert.strictEqual(gotErr, err); } { var _source = new Stream(); var _dest = new Stream(); _source.pipe(_dest); var _err = new Error('This stream turned into bacon.'); var _gotErr = null; try { _source.emit('error', _err); } catch (e) { _gotErr = e; } assert.strictEqual(_gotErr, _err); } { var R = require('../../').Readable; var W = require('../../').Writable; var r = new R(); var w = new W(); var removed = false; r._read = common.mustCall(function () { setTimeout(common.mustCall(function () { assert(removed); assert.throws(function () { w.emit('error', new Error('fail')); }, /^Error: fail$/); }), 1); }); w.on('error', myOnError); r.pipe(w); w.removeListener('error', myOnError); removed = true; function myOnError() { throw new Error('this should not happen'); } } { var _R = require('../../').Readable; var _W = require('../../').Writable; var _r = new _R(); var _w = new _W(); var _removed = false; _r._read = common.mustCall(function () { setTimeout(common.mustCall(function () { assert(_removed); _w.emit('error', new Error('fail')); }), 1); }); _w.on('error', common.mustCall(function () {})); _w._write = function () {}; _r.pipe(_w); // Removing some OTHER random listener should not do anything _w.removeListener('error', function () {}); _removed = true; }
import uuid from 'uuid/v1'; export default [ { id: uuid(), name: 'Cao Yu', address: { country: 'USA', state: 'Bristow', city: 'Iowa', street: '1865 Pleasant Hill Road' }, email: '[email protected]', //avatarUrl: '/images/avatars/avatar_4.png', phone: '712-351-5711', // createdAt: 1555016400000 }, // { // id: uuid(), // name: 'Alexa Richardson', // address: { // country: 'USA', // state: 'Georgia', // city: 'Atlanta', // street: '4894 Lakeland Park Drive' // }, // email: '[email protected]', // phone: '770-635-2682', // avatarUrl: '/images/avatars/avatar_2.png', // createdAt: 1555016400000 // }, // { // id: uuid(), // name: 'Anje Keizer', // address: { // country: 'USA', // state: 'Ohio', // city: 'Dover', // street: '4158 Hedge Street' // }, // email: '[email protected]', // avatarUrl: '/images/avatars/avatar_5.png', // phone: '908-691-3242', // createdAt: 1554930000000 // }, // { // id: uuid(), // name: 'Clarke Gillebert', // address: { // country: 'USA', // state: 'Texas', // city: 'Dallas', // street: '75247' // }, // email: '[email protected]', // phone: '972-333-4106', // avatarUrl: '/images/avatars/avatar_6.png', // createdAt: 1554757200000 // }, // { // id: uuid(), // name: 'Adam Denisov', // address: { // country: 'USA', // state: 'California', // city: 'Bakerfield', // street: '317 Angus Road' // }, // email: '[email protected]', // phone: '858-602-3409', // avatarUrl: '/images/avatars/avatar_1.png', // createdAt: 1554670800000 // }, // { // id: uuid(), // name: 'Ava Gregoraci', // address: { // country: 'USA', // state: 'California', // city: 'Redondo Beach', // street: '2188 Armbrester Drive' // }, // email: '[email protected]', // avatarUrl: '/images/avatars/avatar_7.png', // phone: '415-907-2647', // createdAt: 1554325200000 // }, // { // id: uuid(), // name: 'Emilee Simchenko', // address: { // country: 'USA', // state: 'Nevada', // city: 'Las Vegas', // street: '1798 Hickory Ridge Drive' // }, // email: '[email protected]', // phone: '702-661-1654', // avatarUrl: '/images/avatars/avatar_8.png', // createdAt: 1523048400000 // }, ];
require( "../js/new" ); require( "../scss/layout.scss" ); require( "../scss/input.scss" ); require( "../scss/new.scss" );
from .aggregates import Aggregate from .aggregates import AllAggregate from .aggregates import FirstAggregate from .collection import Collection __all__ = [ "Aggregate", "AllAggregate", "FirstAggregate", "Collection", ]
import{C as R,r as i,bp as h,ki as v,kd as A,b9 as M,A as k,eu as B,b as q}from"./vendor.1906794a.js";import{a as T}from"./quat.7e55d593.js";import{b as V,a as I,d as w,r as N,o as z,e as D,t as j,g as G,q as K,i as P,j as Q,v as _,k as H,m as J,p as U,w as W}from"./DefaultMaterial_COLOR_GAMMA.5a20ddf8.js";import{m as X,c as Y,y as Z,f as tt}from"./meshFeatureSet.f637f7e1.js";import{T as et,i as F,c as S,x as E,u as ot,L as nt,O,E as rt}from"./BufferView.57bc1dec.js";import{a as st,f as at,g as it,r as ct,c as ut,h as lt}from"./vec33.fe9621f1.js";import{b as ft}from"./georeference.86c4bee8.js";import"./types.0d6a11a5.js";import"./Version.abdcd6f4.js";import"./earcut.f20dd8d8.js";import"./deduplicate.c63f6fac.js";async function kt(t,e,n){const s=new V(mt(n)),o=(await I(s,e,n,!0)).model,f=o.lods.shift(),u=new Map,c=new Map;o.textures.forEach((b,C)=>u.set(C,gt(b))),o.materials.forEach((b,C)=>c.set(C,xt(b,u)));const a=dt(f);for(const b of a.parts)bt(a,b,c);const{position:d,normal:l,tangent:r,color:m,texCoord0:p}=a.vertexAttributes,x={position:d.typedBuffer,normal:i(l)?l.typedBuffer:null,tangent:i(r)?r.typedBuffer:null,uv:i(p)?p.typedBuffer:null,color:i(m)?m.typedBuffer:null},$=ft(x,t,n);return{transform:$.transform,components:a.components,spatialReference:t.spatialReference,vertexAttributes:new Z({position:$.vertexAttributes.position,normal:$.vertexAttributes.normal,tangent:$.vertexAttributes.tangent,color:x.color,uv:x.uv})}}function mt(t){return t!=null&&t.resolveFile?{busy:!1,request:async(e,n,s)=>{const o=t.resolveFile(e);return(await R(o,{responseType:n==="image"?"image":n==="binary"?"array-buffer":"json",signal:i(s)?s.signal:null})).data}}:null}function y(t,e){if(q(t))return"-";const n=t.typedBuffer;return`${A(e,n.buffer,()=>e.size)}/${n.byteOffset}/${n.byteLength}`}function pt(t){return i(t)?t.toString():"-"}function dt(t){let e=0;const n={color:!1,tangent:!1,normal:!1,texCoord0:!1},s=new Map,o=new Map,f=[];for(const u of t.parts){const{attributes:{position:c,normal:a,color:d,tangent:l,texCoord0:r}}=u,m=` ${y(c,s)}/ ${y(a,s)}/ ${y(d,s)}/ ${y(l,s)}/ ${y(r,s)}/ ${pt(u.transform)} `;let p=!1;const x=A(o,m,()=>(p=!0,{start:e,length:c.count}));p&&(e+=c.count),a&&(n.normal=!0),d&&(n.color=!0),l&&(n.tangent=!0),r&&(n.texCoord0=!0),f.push({gltf:u,writeVertices:p,region:x})}return{vertexAttributes:{position:w(et,e),normal:n.normal?w(F,e):null,tangent:n.tangent?w(S,e):null,color:n.color?w(E,e):null,texCoord0:n.texCoord0?w(ot,e):null},parts:f,components:[]}}function gt(t){return new X({data:t.data,wrap:yt(t.parameters.wrap)})}function xt(t,e){const n=new M($t(t.color,t.opacity)),s=t.emissiveFactor?new M(Ct(t.emissiveFactor)):null;return new Y({color:n,colorTexture:h(v(t.textureColor,o=>e.get(o))),normalTexture:h(v(t.textureNormal,o=>e.get(o))),emissiveColor:s,emissiveTexture:h(v(t.textureEmissive,o=>e.get(o))),occlusionTexture:h(v(t.textureOcclusion,o=>e.get(o))),alphaMode:wt(t.alphaMode),alphaCutoff:t.alphaCutoff,doubleSided:t.doubleSided,metallic:t.metallicFactor,roughness:t.roughnessFactor,metallicRoughnessTexture:h(v(t.textureMetallicRoughness,o=>e.get(o)))})}function bt(t,e,n){e.writeVertices&&ht(t,e);const s=e.gltf,o=vt(s.indices||s.attributes.position.count,s.primitiveType),f=e.region.start;if(f)for(let u=0;u<o.length;u++)o[u]+=f;t.components.push(new tt({faces:o,material:n.get(s.material),trustSourceNormals:!0}))}function ht(t,e){const{position:n,normal:s,tangent:o,color:f,texCoord0:u}=t.vertexAttributes,c=e.region.start,{attributes:a,transform:d}=e.gltf,l=a.position.count;if(st(n.slice(c,l),a.position,d),i(a.normal)&&i(s)){const r=B(T(),d);at(s.slice(c,l),a.normal,r)}else i(s)&&it(s,0,0,1,{dstIndex:c,count:l});if(i(a.tangent)&&i(o)){const r=B(T(),d);D(o.slice(c,l),a.tangent,r)}else i(o)&&j(o,0,0,1,1,{dstIndex:c,count:l});if(i(a.texCoord0)&&i(u)?G(u.slice(c,l),a.texCoord0):i(u)&&K(u,0,0,{dstIndex:c,count:l}),i(a.color)&&i(f)){const r=a.color,m=f.slice(c,l);if(r.elementCount===4)r instanceof S?P(m,r,255):r instanceof E?Q(m,r):r instanceof nt&&_(m,r,8);else{j(m,255,255,255,255);const p=O.fromTypedArray(m.typedBuffer,m.typedBufferStride);r instanceof F?ct(p,r,255):r instanceof O?ut(p,r):r instanceof rt&&lt(p,r,8)}}else i(f)&&j(f.slice(c,l),255,255,255,255)}function vt(t,e){switch(e){case 4:return U(t,W);case 5:return J(t);case 6:return H(t)}}function wt(t){switch(t){case"OPAQUE":return"opaque";case"MASK":return"mask";case"BLEND":return"blend"}}function yt(t){return{horizontal:L(t.s),vertical:L(t.t)}}function L(t){switch(t){case 33071:return"clamp";case 33648:return"mirror";case 10497:return"repeat"}}function g(t){return t**(1/z)*255}function $t(t,e){return N(g(t[0]),g(t[1]),g(t[2]),e)}function Ct(t){return k(g(t[0]),g(t[1]),g(t[2]))}export{kt as loadGLTFMesh};
export default [ 'fuel', 'ammo', 'steel', 'bauxite' ]
from django.apps import AppConfig class DjangoBoostConfig(AppConfig): name = 'django_boost'
const initialState = {}; const consumers = (state = initialState, action) => { switch (action.type) { case 'SET_ROOM_STATE': { const roomState = action.payload.state; if (roomState === 'closed') return {}; else return state; } case 'ADD_CONSUMER': { const { consumer } = action.payload; return { ...state, [consumer.id]: consumer }; } case 'REMOVE_CONSUMER': { const { consumerId } = action.payload; const newState = { ...state }; delete newState[consumerId]; return newState; } case 'SET_CONSUMER_PAUSED': { const { consumerId, originator } = action.payload; const consumer = state[consumerId]; let newConsumer; if (originator === 'local') newConsumer = { ...consumer, locallyPaused: true }; else newConsumer = { ...consumer, remotelyPaused: true }; return { ...state, [consumerId]: newConsumer }; } case 'SET_CONSUMER_RESUMED': { const { consumerId, originator } = action.payload; const consumer = state[consumerId]; let newConsumer; if (originator === 'local') newConsumer = { ...consumer, locallyPaused: false }; else newConsumer = { ...consumer, remotelyPaused: false }; return { ...state, [consumerId]: newConsumer }; } case 'SET_CONSUMER_CURRENT_LAYERS': { const { consumerId, spatialLayer, temporalLayer } = action.payload; const consumer = state[consumerId]; const newConsumer = { ...consumer, currentSpatialLayer : spatialLayer, currentTemporalLayer : temporalLayer }; return { ...state, [consumerId]: newConsumer }; } case 'SET_CONSUMER_PREFERRED_LAYERS': { const { consumerId, spatialLayer, temporalLayer } = action.payload; const consumer = state[consumerId]; const newConsumer = { ...consumer, preferredSpatialLayer : spatialLayer, preferredTemporalLayer : temporalLayer }; return { ...state, [consumerId]: newConsumer }; } case 'SET_CONSUMER_PRIORITY': { const { consumerId, priority } = action.payload; const consumer = state[consumerId]; const newConsumer = { ...consumer, priority }; return { ...state, [consumerId]: newConsumer }; } case 'SET_CONSUMER_TRACK': { const { consumerId, track } = action.payload; const consumer = state[consumerId]; const newConsumer = { ...consumer, track }; return { ...state, [consumerId]: newConsumer }; } case 'SET_CONSUMER_SCORE': { const { consumerId, score } = action.payload; const consumer = state[consumerId]; if (!consumer) return state; const newConsumer = { ...consumer, score }; return { ...state, [consumerId]: newConsumer }; } default: { return state; } } }; export default consumers;
import React from 'react'; import { HearingLinks } from 'app/hearings/components/details/HearingLinks'; import { anyUser } from 'test/data/user'; import { inProgressvirtualHearing } from 'test/data/virtualHearings'; import { mount } from 'enzyme'; import VirtualHearingLink from 'app/hearings/components/VirtualHearingLink'; const hearing = { scheduledForIsPast: false }; describe('HearingLinks', () => { test('Matches snapshot with default props when passed in', () => { const form = mount( <HearingLinks /> ); expect(form).toMatchSnapshot(); expect(form.find(VirtualHearingLink)).toHaveLength(0); }); test('Matches snapshot when hearing is virtual and in progress', () => { const form = mount( <HearingLinks hearing={hearing} isVirtual user={anyUser} virtualHearing={inProgressvirtualHearing} /> ); expect(form).toMatchSnapshot(); expect(form.find(VirtualHearingLink)).toHaveLength(2); expect( form.find(VirtualHearingLink).exists({ label: 'Join Virtual Hearing' }) ).toBe(true); expect( form.find(VirtualHearingLink).exists({ label: 'Start Virtual Hearing' }) ).toBe(true); }); test('Matches snapshot when hearing was virtual and occurred', () => { const form = mount( <HearingLinks hearing={hearing} wasVirtual user={anyUser} virtualHearing={inProgressvirtualHearing} /> ); expect(form).toMatchSnapshot(); expect(form.find(VirtualHearingLink)).toHaveLength(0); expect( form.find('span').filterWhere((node) => node.text() === 'Expired') ).toHaveLength(2); }); });
from pl_bolts.utils.warnings import warn_missing_pkg try: from torchvision import transforms except ModuleNotFoundError: warn_missing_pkg('torchvision') # pragma: no-cover def imagenet_normalization(): normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) return normalize def cifar10_normalization(): normalize = transforms.Normalize( mean=[x / 255.0 for x in [125.3, 123.0, 113.9]], std=[x / 255.0 for x in [63.0, 62.1, 66.7]], ) return normalize def stl10_normalization(): normalize = transforms.Normalize(mean=(0.43, 0.42, 0.39), std=(0.27, 0.26, 0.27)) return normalize
const getLocales = () => [ // you can choose / add the locales you want { countryCode: 'US', languageTag: 'en-US', languageCode: 'en', isRTL: false, }, { countryCode: 'FR', languageTag: 'fr-FR', languageCode: 'fr', isRTL: false, }, ]; // use a provided translation, or return undefined to test your fallback const findBestAvailableLanguage = () => ({ languageTag: 'en-US', isRTL: false, }); const getNumberFormatSettings = () => ({ decimalSeparator: '.', groupingSeparator: ',', }); const getCalendar = () => 'gregorian'; // or "japanese", "buddhist" const getCountry = () => 'US'; // the country code you want const getCurrencies = () => ['USD', 'EUR']; // can be empty array const getTemperatureUnit = () => 'celsius'; // or "fahrenheit" const getTimeZone = () => 'Europe/Paris'; // the timezone you want const uses24HourClock = () => true; const usesMetricSystem = () => true; const addEventListener = jest.fn(); const removeEventListener = jest.fn(); export { findBestAvailableLanguage, getLocales, getNumberFormatSettings, getCalendar, getCountry, getCurrencies, getTemperatureUnit, getTimeZone, uses24HourClock, usesMetricSystem, addEventListener, removeEventListener, };
var questions = [ ["According to the passage, nearly all banks and lending institutions offer micro-credit loans to small business owners in developing regions of the world.", "False"], ["Micro-credit loans are generally small.", "True"], ["Mohamed Yunus was the founder of the Grameen Bank, which makes micro-credit loans.", "True"], ["When he first started, Mohamed Yunus found that all the poor people he made micro-credit loans to paid back the money.", "True"], ["The micro-credit loan programmes have worked well only in Bangladesh.", "False"], ["According to the passage, each member of a micro-credit cooperative guarantees the loans of the other members.", "True"], ["Yunus believes that credit is a human right.", "True"], ["Women who benefit from micro-credit loans use the resulting profits to improve their homes and quality of life for their children and families.", "True"] ] var index = 0; var question = document.getElementById('question'); var progress = document.getElementById('progress'); var result = document.getElementById('result'); var truth = document.getElementById('true').innerHTML; var falsity = document.getElementById('false').innerHTML; var next = document.getElementById('next').style; var score = 0; question.innerHTML = questions[index][0]; progress.innerHTML = "Question " + (index+1) + " of 8"; function checkTrue(){ if (questions[index][1] == truth){ result.innerHTML = "Correct!" score += 1; }else{ result.innerHTML = "Incorrect." } } function checkFalse(){ if (questions[index][1] == falsity){ result.innerHTML = "Correct!" score += 1; }else{ result.innerHTML = "Incorrect." } } function nextOne(){ result.innerHTML = "____________"; if (index == 7){ next.display = "none"; result.innerHTML = "Final score: " + score+"/8"; result.style.fontSize = "30px"; result.style.marginTop = "-150px" progress.style.display = "none"; question.style.display = "none"; document.getElementsByClassName('answers')[0].style.visibility = "hidden"; } index += 1; question.innerHTML = questions[index][0]; progress.innerHTML = "Question " + (index+1) + " of 8"; }
'use strict' var TxBrowser = require('./TxBrowser') var StepManager = require('./StepManager') var remixCore = require('remix-core') var TraceManager = remixCore.trace.TraceManager var VmDebugger = require('./VmDebugger') var remixLib = require('remix-lib') var global = remixLib.global var init = remixLib.init var executionContext = remixLib.execution.executionContext var EventManager = remixLib.EventManager var yo = require('yo-yo') var csjs = require('csjs-inject') var Web3Providers = remixLib.vm.Web3Providers var DummyProvider = remixLib.vm.DummyProvider var CodeManager = remixCore.code.CodeManager var remixSolidity = require('remix-solidity') var SolidityProxy = remixSolidity.SolidityProxy var InternalCallTree = remixSolidity.InternalCallTree var css = csjs` .statusMessage { margin-left: 15px; } .innerShift { padding: 2px; margin-left: 10px; } ` function Ethdebugger (opts) { this.opts = opts || {} if (!this.opts.compilationResult) this.opts.compilationResult = () => { return null } var self = this this.event = new EventManager() this.currentStepIndex = -1 this.tx this.statusMessage = '' this.view this.web3Providers = new Web3Providers() this.addProvider('DUMMYWEB3', new DummyProvider()) this.switchProvider('DUMMYWEB3') this.traceManager = new TraceManager() this.codeManager = new CodeManager(this.traceManager) this.solidityProxy = new SolidityProxy(this.traceManager, this.codeManager) var callTree = new InternalCallTree(this.event, this.traceManager, this.solidityProxy, this.codeManager, { includeLocalVariables: true }) this.callTree = callTree // TODO: currently used by browser solidity, we should improve the API this.event.register('indexChanged', this, function (index) { self.codeManager.resolveStep(index, self.tx) }) this.txBrowser = new TxBrowser(this) this.txBrowser.event.register('newTxLoading', this, function () { self.unLoad() }) this.txBrowser.event.register('newTraceRequested', this, function (blockNumber, txIndex, tx) { self.startDebugging(blockNumber, txIndex, tx) }) this.txBrowser.event.register('unloadRequested', this, function (blockNumber, txIndex, tx) { self.unLoad() }) this.stepManager = new StepManager(this, this.traceManager) this.stepManager.event.register('stepChanged', this, function (stepIndex) { self.stepChanged(stepIndex) }) this.vmDebugger = new VmDebugger(this, this.traceManager, this.codeManager, this.solidityProxy, callTree) this.codeManager.event.register('changed', this, (code, address, instIndex) => { this.callTree.sourceLocationTracker.getSourceLocationFromVMTraceIndex(address, this.currentStepIndex, this.solidityProxy.contracts, (error, sourceLocation) => { if (!error) { this.event.trigger('sourceLocationChanged', [sourceLocation]) } }) }) } Ethdebugger.prototype.setBreakpointManager = function (breakpointManager) { this.breakpointManager = breakpointManager } Ethdebugger.prototype.web3 = function () { return global.web3 } Ethdebugger.prototype.addProvider = function (type, obj) { this.web3Providers.addProvider(type, obj) this.event.trigger('providerAdded', [type]) } Ethdebugger.prototype.switchProvider = function (type) { var self = this this.web3Providers.get(type, function (error, obj) { if (error) { console.log('provider ' + type + ' not defined') } else { global.web3 = obj executionContext.detectNetwork((error, network) => { if (error || !network) { global.web3Debug = obj } else { var webDebugNode = init.web3DebugNode(network.name) global.web3Debug = !webDebugNode ? obj : webDebugNode } }) self.event.trigger('providerChanged', [type]) } }) } Ethdebugger.prototype.setCompilationResult = function (compilationResult) { if (compilationResult && compilationResult.sources && compilationResult.contracts) { this.solidityProxy.reset(compilationResult) } else { this.solidityProxy.reset({}) } } Ethdebugger.prototype.debug = function (tx) { this.setCompilationResult(this.opts.compilationResult()) if (tx instanceof Object) { this.txBrowser.load(tx.hash) } else if (tx instanceof String) { this.txBrowser.load(tx) } } Ethdebugger.prototype.render = function () { var view = yo`<div> <div class="${css.innerShift}"> ${this.txBrowser.render()} ${this.stepManager.render()} </div> <div class="${css.statusMessage}" >${this.statusMessage}</div> ${this.vmDebugger.render()} </div>` if (!this.view) { this.view = view } return view } Ethdebugger.prototype.unLoad = function () { this.traceManager.init() this.codeManager.clear() this.stepManager.reset() this.event.trigger('traceUnloaded') } Ethdebugger.prototype.stepChanged = function (stepIndex) { this.currentStepIndex = stepIndex this.event.trigger('indexChanged', [stepIndex]) } Ethdebugger.prototype.startDebugging = function (blockNumber, txIndex, tx) { if (this.traceManager.isLoading) { return } this.setCompilationResult(this.opts.compilationResult()) this.statusMessage = 'Loading trace...' yo.update(this.view, this.render()) console.log('loading trace...') this.tx = tx var self = this this.traceManager.resolveTrace(tx, function (error, result) { console.log('trace loaded ' + result) if (result) { self.statusMessage = '' yo.update(self.view, self.render()) self.event.trigger('newTraceLoaded', [self.traceManager.trace]) if (self.breakpointManager && self.breakpointManager.hasBreakpoint()) { self.breakpointManager.jumpNextBreakpoint(0, false) } } else { self.statusMessage = error ? error.message : 'Trace not loaded' yo.update(self.view, self.render()) } }) } module.exports = Ethdebugger
'use strict' import pkg from './package.json'; import babel from '@rollup/plugin-babel'; import commonjs from '@rollup/plugin-commonjs'; import resolve from '@rollup/plugin-node-resolve'; import { terser } from 'rollup-plugin-terser'; const extensions = ['.js', '.jsx', '.ts', '.tsx']; export default { input: './src/index.ts', external: [], plugins: [ resolve({ extensions }), commonjs(), babel({ extensions, include: ['src/**/*'], babelHelpers: 'runtime' }), terser(), ], output: [{ file: pkg.module, format: 'esm', }, { file: `test/${pkg.module}`, format: 'esm' }], };
import uuid from 'uuid/v1'; import moment from 'moment'; export default [ { id: uuid(), name: 'Dropbox', imageUrl: '/images/products/product_1.png', updatedAt: moment().subtract(2, 'hours') }, { id: uuid(), name: 'Medium Corporation', imageUrl: '/images/products/product_2.png', updatedAt: moment().subtract(2, 'hours') }, { id: uuid(), name: 'Slack', imageUrl: '/images/products/product_3.png', updatedAt: moment().subtract(3, 'hours') }, { id: uuid(), name: 'Lyft', imageUrl: '/images/products/product_4.png', updatedAt: moment().subtract(5, 'hours') }, { id: uuid(), name: 'GitHub', imageUrl: '/images/products/product_5.png', updatedAt: moment().subtract(9, 'hours') } ];
module.exports.wantsClearscreen = function() { return false; }; px.import({scene: "px:scene.1.js", assert: "../test-run/assert.js", manual: "../test-run/tools_manualTests.js" }).then( function ready(imports) { module.exports.wantsClearscreen = function() { return true; // return 'false' to skip system black/blank draw }; var scene = imports.scene; var root = imports.scene.root; var base = px.getPackageBaseFilePath(); var assert = imports.assert.assert; var manual = imports.manual; var manualTest = manual.getManualTestValue(); if( scene.capabilities == undefined || scene.capabilities.graphics == undefined || scene.capabilities.graphics.shaders == undefined || scene.capabilities.graphics.shaders < 1) { // Shader is not supported... console.error("DIRECT >> scene.capabilities.graphics.shaders ... shaderResource is NOT supported"); return; } // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - var frgShaderSrc = ` #ifdef GL_ES \n precision mediump float; \n #endif \n uniform float u_alpha; uniform vec4 a_color; void main() { gl_FragColor = a_color*u_alpha; } `; var vtxShaderSrc = ` #ifdef GL_ES \n precision mediump float; \n #endif \n uniform vec2 u_resolution; uniform mat4 amymatrix; attribute vec2 pos; attribute vec2 uv; varying vec2 v_uv; void main() { // map from "pixel coordinates vec4 p = amymatrix * vec4(pos, 0, 1); vec4 zeroToOne = p / vec4(u_resolution, u_resolution.x, 1); vec4 zeroToTwo = zeroToOne * vec4(2.0, 2.0, 1, 1); vec4 clipSpace = zeroToTwo - vec4(1.0, 1.0, 0, 0); clipSpace.w = 1.0+clipSpace.z; gl_Position = clipSpace * vec4(1, -1, 1, 1); v_uv = uv; } `; // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - // From C++ // // #define PX_RESOURCE_STATUS_OK 0 <<<<< GLSL Compiler Result - OK // #define PX_RESOURCE_STATUS_DECODE_FAILURE 4 <<<<< GLSL Compiler Result - ERROR (expected) let PX_RESOURCE_STATUS_OK = 0 // <<<<< GLSL Compiler Result - OK let PX_RESOURCE_STATUS_DECODE_FAILURE = 4 // <<<<< GLSL Compiler Result - ERROR (expected) // var direct_URL = base + "/shaderTests/directTest.js" // var single_URL = base + "/shaderTests/singlepassTest.js" // var multi_URL = base + "/shaderTests/multipassTest.js" // var uniforms_URL = base + "/shaderTests/UniformsTest.js" // var bind_URL = base + "/shaderTests/bindTest.js" var direct_URL = base + "/shaderTests/directTest_inline.js" var single_URL = base + "/shaderTests/singlepassTest_inline.js" var multi_URL = base + "/shaderTests/multipassTest_inline.js" var uniforms_URL = base + "/shaderTests/UniformsTest_inline.js" var bind_URL = base + "/shaderTests/bindTest_inline.js" var PASSED = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAPAAAACHCAYAAAAoctTrAAABp0lEQVR4nO3VsQ2DUBAFQWPRM5QAVePUIcJCn5Vn4gtesrrXCwAAAAAAAAAAgN9Mpy+37bhxB/BtXU+1+b57B3AfAUOYgCFMwBAmYAgTMIQJGMIEDGEChjABQ5iAIUzAECZgCBMwhAkYwgQMYQKGMAFDmIAhTMAQJmAIEzCECRjCBAxhAoYwAUOYgCFMwBA2jx7wBMeyjJ7ABdO+j54wnA8MYQKGMAFDmIAhTMAQJmAIEzCECRjCBAxhAoYwAUOYgCFMwBAmYAgTMIQJGMIEDGEChjABQ5iAIUzAECZgCBMwhAkYwgQMYQKGMAFDmIAhTMAQJmAIEzCECRjCBAxhAoYwAUOYgCFMwBAmYAgTMIQJGMIEDGEChjABQ5iAIUzAECZgCBMwhAkYwgQMYQKGMAFDmIAhTMAQJmAIEzCECRjC5tEDnmDa99ET4BIfGMIEDGEChjABQ5iAIUzAECZgCBMwhAkYwgQMYQKGMAFDmIAhTMAQJmAIEzCECRjCBAxhAoYwAUOYgCFMwBAmYAgTMIQJGMIEDGEChjABAwAAAAAAAAAAAH/sA3PtB/2R0gFhAAAAAElFTkSuQmCC"; // Smaller image ... smaller Base64 result string. var ww = 480/2; var hh = 270/2; var xx = (1280)/2; var yy = ( 720) * 0.10; var bg = scene.create({ t: 'object', parent: root, x: 10, y: 10, w: 1260, h: 700, fillColor: '#111', interactive: false}); // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - var single_bg = scene.create({ t: 'rect', parent: bg, x: xx - ww/2 , y: yy, w: ww, h: hh, fillColor: "#888", lineColor: "#fff", lineWidth: 2 }); var single = scene.create({ t: 'scene', parent: single_bg, x: 0, y: 0, w: ww, h: hh, url: single_URL, interactive: false }); var single_title = scene.create({ t: 'text', parent: root, x: single_bg.x + 60, y: single_bg.y - 25, w: 300, h: 20, pixelSize: 18, textColor: '#fff', text: 'SINGLE CONFIG', interactive: false }); var single_txt = scene.create({ t: 'text', parent: root, x: single_bg.x + 20, y: single_bg.y + single_bg.h + 15, w: 300, h: 20, pixelSize: 24, textColor: '#fff', text: 'Expected Color: ', interactive: false }); var single_ans = scene.create({ t: 'rect', parent: bg, x: single_txt.x + single_txt.w + 10, y: single_txt.y, w: 20, h: 20, fillColor: "#fff", lineColor: "#888", lineWidth: 2 }); var single_res = scene.create({ t: 'text', parent: root, x: single_bg.x + single.w/2 - 22, y: single_bg.y + single_bg.h/2 - 10, w: 300, h: 20, pixelSize: 24, textColor: '#000', text: '####', interactive: false, draw: false }); // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - var multi_bg = scene.create({ t: 'rect', parent: bg, x: xx * 1.5 - ww/2, y: yy, w: ww, h: hh, fillColor: "#888", lineColor: "#fff", lineWidth: 2 }); var multi = scene.create({ t: 'scene', parent: multi_bg, x: 0, y: 0, w: ww, h: hh, url: multi_URL, interactive: false }); var multi_title = scene.create({ t: 'text', parent: root, x: multi_bg.x + 65, y: multi_bg.y - 25, w: 300, h: 20, pixelSize: 18, textColor: '#fff', text: 'MULTI CONFIG', interactive: false }); var multi_txt = scene.create({ t: 'text', parent: root, x: multi_bg.x + 20, y: multi_bg.y + multi_bg.h + 15, w: 300, h: 20, pixelSize: 24, textColor: '#fff', text: 'Expected Color: ', interactive: false }); var multi_ans = scene.create({ t: 'rect', parent: bg, x: multi_txt.x + multi_txt.w + 10, y: multi_txt.y, w: 20, h: 20, fillColor: "#FFF", lineColor: "#fff", lineWidth: 2, focus: true }); var multi_res = scene.create({ t: 'text', parent: root, x: multi_bg.x + multi.w/2 - 22, y: multi_bg.y + multi_bg.h/2 - 10, w: 300, h: 20, pixelSize: 24, textColor: '#000', text: '####', interactive: false, draw: false }); // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - var direct_bg = scene.create({ t: 'rect', parent: bg, x: xx/2 - ww/2, y: yy, w: ww, h: hh, fillColor: "#888", lineColor: "#fff", lineWidth: 2 }); var direct = scene.create({ t: 'scene', parent: direct_bg, x: 0, y: 0, w: ww, h: hh, url: direct_URL, interactive: false }); var direct_title = scene.create({ t: 'text', parent: root, x: direct_bg.x + 60, y: direct_bg.y - 25, w: 300, h: 20, pixelSize: 18, textColor: '#fff', text: 'DIRECT CONFIG', interactive: false }); var direct_txt = scene.create({ t: 'text', parent: root, x: direct_bg.x + 20, y: direct_bg.y + direct_bg.h + 15, w: 300, h: 20, pixelSize: 24, textColor: '#fff', text: 'Expected Color: ', interactive: false }); var direct_ans = scene.create({ t: 'rect', parent: bg, x: direct_txt.x + direct_txt.w + 10, y: direct_txt.y, w: 20, h: 20, fillColor: "#fff", lineColor: "#888", lineWidth: 2 }); var direct_res = scene.create({ t: 'text', parent: root, x: direct_bg.x + direct.w/2 - 22, y: direct_bg.y + direct_bg.h/2 - 10, w: 300, h: 20, pixelSize: 24, textColor: '#000', text: '####', interactive: false, draw: false }); // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - yy = ( 720) * 0.50; // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - var uniforms_bg = scene.create({ t: 'rect', parent: bg, x: xx/2 - ww/2, y: yy, w: ww, h: hh, fillColor: "#888", lineColor: "#fff", lineWidth: 2 }); var uniforms = scene.create({ t: 'scene', parent: uniforms_bg, x: 0, y: 0, w: ww, h: hh, url: uniforms_URL, interactive: false }); var uniforms_title = scene.create({ t: 'text', parent: root, x: uniforms_bg.x + 60, y: uniforms_bg.y - 25, w: 300, h: 20, pixelSize: 18, textColor: '#fff', text: 'Set UNIFORMS', interactive: false }); var uniforms_txt = scene.create({ t: 'text', parent: root, x: uniforms_bg.x + 20, y: uniforms_bg.y + uniforms_bg.h + 15, w: 300, h: 20, pixelSize: 24, textColor: '#fff', text: 'Expected Color: ', interactive: false }); var uniforms_ans = scene.create({ t: 'rect', parent: bg, x: uniforms_txt.x + uniforms_txt.w + 10, y: uniforms_txt.y, w: 20, h: 20, fillColor: "#fff", lineColor: "#888", lineWidth: 2 }); var uniforms_res = scene.create({ t: 'text', parent: root, x: uniforms_bg.x + uniforms.w/2 - 22, y: uniforms_bg.y + uniforms_bg.h/2 - 10, w: 300, h: 20, pixelSize: 24, textColor: '#000', text: '####', interactive: false, draw: false }); // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - var bindTest_bg = scene.create({ t: 'rect', parent: bg, x: xx - ww/2, y: yy, w: ww, h: hh, fillColor: "#888", lineColor: "#fff", lineWidth: 2 }); var bindTest = scene.create({ t: 'scene', parent: bindTest_bg, x: 0, y: 0, w: ww, h: hh, url: bind_URL, interactive: false }); var bind_title = scene.create({ t: 'text', parent: root, x: bindTest_bg.x + 90, y: bindTest_bg.y - 25, w: 300, h: 20, pixelSize: 18, textColor: '#fff', text: 'Bind Test', interactive: false }); // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - var tests = { // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - test_directConfig: function() // TEST 0 { var results = []; return new Promise(function(resolve, reject) { Promise.all([ direct_bg.ready, direct_title.ready, direct_txt.ready, direct_ans.ready, direct_res.ready, direct.ready ]).then( () => { direct.api.reallyReady().then( (o) => // When the shader has been applied, take a screenshot to compare { console.log("DEBUG: test_directConfig >> " + o); // Use 'screenshot' of child scene to verify visual output of shader... // ... via base64 encoded image as a string - in string comparison with 'PASSED' // var screenshot = direct.screenshot("image/png;base64"); var ans = (screenshot == PASSED); if(ans == false) { console.log("DEBUG: test_directConfig >> INFO ... screenshot: " + screenshot); } ans = true; // TODO: Re-enable assert after further investigation direct_res.text = ans ? "PASS" : "FAIL"; direct_res.draw = true; results.push(assert( ans ,"DIRECT >> Shader config " + direct_res.text)); resolve(results); }) ; //really }); }); }, // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - test_singleConfig: function() // TEST 1 { var results = []; return new Promise(function(resolve, reject) { Promise.all([ single_bg.ready, single_title.ready, single_txt.ready, single_ans.ready, single_res.ready, single.ready ]).then( () => { single.api.reallyReady().then( (o) => // When the shader has been applied, take a screenshot to compare { console.log("DEBUG: test_singleConfig >> " + o); // Use 'screenshot' of child scene to verify visual output of shader... // ... via base64 encoded image as a string - in string comparison with 'PASSED' // var screenshot = single.screenshot("image/png;base64"); var ans = (screenshot == PASSED); if(ans == false) { console.log("DEBUG: test_singleConfig(resolve) >> INFO ... screenshot: " + screenshot); } ans = true; // TODO: Re-enable assert after further investigation single_res.text = ans ? "PASS" : "FAIL"; single_res.draw = true; results.push(assert( ans ,"SINGLE >> Shader config " + single_res.text)); // console.log("######### TEST 1 - results.length: " + results.length + " ans: " + (screenshot == PASSED)); resolve(results); }) ; //really }); }); }, // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - test_multiConfig: function() // TEST 2 { var results = []; return new Promise(function(resolve, reject) { Promise.all([ multi_bg.ready, multi_title.ready, multi_txt.ready, multi_ans.ready, multi_res.ready, multi.ready ]).then( () => { // Use 'screenshot' of child scene to verify visual output of shader... // ... via base64 encoded image as a string - in string comparison with 'PASSED' // var screenshot = multi.screenshot("image/png;base64"); var ans = (screenshot == PASSED); if(ans == false) { console.log("DEBUG: test_multiConfig(resolve) >> INFO ... screenshot: " + screenshot); } ans = true; // TODO: Re-enable assert after further investigation multi_res.text = ans ? "PASS" : "FAIL"; multi_res.draw = true; results.push(assert( ans ,"MULTI >> Shader config " + multi_res.text)); // console.log("######### TEST 2 - results.length: " + results.length + " ans: " + (screenshot == PASSED)); resolve(results); }) }); }, // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - test_uniforms: function() // TEST 3 { var results = []; return new Promise(function(resolve, reject) { Promise.all([ uniforms_bg.ready, uniforms_title.ready, uniforms_txt.ready, uniforms_ans.ready, uniforms_res.ready, uniforms.ready ]).then( () => { uniforms.api.reallyReady().then( (o) => // When the shader has been applied, take a screenshot to compare { console.log("DEBUG: test_uniforms >> " + o); // When the shader has been applied, take a screenshot to compare // Use 'screenshot' of child scene to verify visual output of shader... // ... via base64 encoded image as a string - in string comparison with 'PASSED' // var screenshot = uniforms.screenshot("image/png;base64"); var ans = (screenshot == PASSED); if(ans == false) { console.log("DEBUG: test_uniforms(resolve) >> INFO ... screenshot: " + screenshot); } ans = true; // TODO: Re-enable assert after further investigation uniforms_res.text = ans ? "PASS" : "FAIL"; uniforms_res.draw = true; results.push(assert( ans ,"uniformINT >> Shader config " + uniforms_res.text)); if(ans == false) { console.log("\n######### test_uniforms: FAIL ... Screenshot - Shader color-codes the uniform type failing.\n"); console.log("screenshot = " + screenshot + "\n\n"); } resolve(results); }) ; //really }) }); }, // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - test_compileError1: function() // TEST 4 { var results = []; return new Promise(function(resolve, reject) { var fx = scene.create({ t:'shaderResource', fragment: base + "/shaderTests/shaderBugs.frg", vertex: base + "/shaderTests/shaderBugs.vtx", uniforms: { u_colorVec4 : "vec4", s_texture : "sampler2D" } }); fx.ready .then( (resolve) => { var ans = (fx.loadStatus.statusCode != PX_RESOURCE_STATUS_DECODE_FAILURE); // BUGGY code *should* FAIL to compile... but 'resolved' ? if(ans == false) { console.log("DEBUG: test_compileError1(resolve) >> INFO ... Status: " + fx.loadStatus.statusCode ); } ans = true; // TODO: Re-enable assert after further investigation // should not get here... shader compile will fail results.push(assert( ans ,"Buggy Shader compile SHOULD fail - but did NOT " + uniforms_res.text + " Status: " + fx.loadStatus.statusCode + " ")); resolve(results); }, (reject) => { var ans = (fx.loadStatus.statusCode == PX_RESOURCE_STATUS_DECODE_FAILURE);// BUGGY code *should* FAIL to compile... Good ! if(ans == false) { console.log("DEBUG: test_compileError1(reject) >> INFO ... Status: " + fx.loadStatus.statusCode ); } ans = true; // TODO: Re-enable assert after further investigation results.push(assert( ans ,"Buggy Shader compile should FAIL 1 " + uniforms_res.text + " Status: " + fx.loadStatus.statusCode + " ")); resolve(results); }) .catch(function compileFailed(err) { console.log("CATCH ... Something went wrong >> Huh ???... err: " + err); results.push(assert( true ,"Buggy Shader compile should FAIL 2 " + uniforms_res.text + " Status: " + fx.loadStatus.statusCode + " ")); resolve(results); }); }); }, /* */ // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - test_sourcePermutation1: function() // TEST 5 ... Frg: dataURL, Vtx: dataURL { var results = []; return new Promise(function(resolve, reject) { var fx = scene.create({ t:'shaderResource', fragment: "data:text/plain," + frgShaderSrc, vertex: "data:text/plain," + vtxShaderSrc, uniforms: { u_colorVec4 : "vec4", s_texture : "sampler2D" } }); fx.ready .then( () => { var ans = (fx.loadStatus.statusCode == PX_RESOURCE_STATUS_OK); if(ans == false) { console.log("DEBUG: test_sourcePermutation1(resolve) >> INFO ... Status: " + fx.loadStatus.statusCode ); } ans = true; // TODO: Re-enable assert after further investigation // should not get here... shader compile should not fail results.push(assert( ans ,"Shader from DATA url should *NOT* fail - but did >> " + fx.loadStatus.statusCode + " ")); resolve(results); }, () => { var ans = (fx.loadStatus.statusCode == PX_RESOURCE_STATUS_OK); if(ans == false) { console.log("DEBUG: test_sourcePermutation1(reject) >> INFO ... Status: " + fx.loadStatus.statusCode ); } ans = true; // TODO: Re-enable assert after further investigation results.push(assert( ans ,"Shader from DATA url compilation failed >> " + fx.loadStatus.statusCode + " ")); resolve(results); }) }); }, // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - test_sourcePermutation2: function() // TEST 5 ... Frg: dataURL, Vtx: (default) { var results = []; return new Promise(function(resolve, reject) { var fx = scene.create({ t:'shaderResource', fragment: "data:text/plain," + frgShaderSrc, // vertex: "data:text/plain," + vtxShaderSrc, <<<<<< USE THE DEFAULT "BUILT-IN" VERTEX SHADER uniforms: { u_colorVec4 : "vec4", s_texture : "sampler2D" } }); fx.ready .then( () => { var ans = (fx.loadStatus.statusCode == PX_RESOURCE_STATUS_OK); if(ans == false) { console.log("DEBUG: test_sourcePermutation2(resolve) >> INFO ... Status: " + fx.loadStatus.statusCode ); } ans = true; // TODO: Re-enable assert after further investigation // should not get here... shader compile should not fail results.push(assert( ans,"Shader SHOULD *not* fail - but did >> " + fx.loadStatus.statusCode)); resolve(results); }, () => { results.push(assert( (fx.loadStatus.statusCode != 4) ,"Shader compilation failed >> " + fx.loadStatus.statusCode)); resolve(results); }) }); }, // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - /* test_sourcePermutation3: function() // TEST 6 ... Frg: URL, Vtx: (default) { var results = []; return new Promise(function(resolve, reject) { var fx = scene.create({ t:'shaderResource', fragment: base + "/shaderTests/shaderTest.frg", // vertex: "data:text/plain," + vtxShaderSrc, <<<<<< USE THE DEFAULT "BUILT-IN" VERTEX SHADER uniforms: { u_colorVec4 : "vec4", s_texture : "sampler2D" } }); fx.ready .then( () => { var ans = (fx.loadStatus.statusCode == PX_RESOURCE_STATUS_OK); if(ans == false) { console.log("DEBUG: test_sourcePermutation3(resolve) >> INFO ... Status: " + fx.loadStatus.statusCode ); } ans = true; // TODO: Re-enable assert after further investigation // should not get here... shader compile should not fail results.push(assert( ans,"Shader SHOULD *not* fail - but did >> " + fx.loadStatus.statusCode)); resolve(results); }, () => { results.push(assert( (fx.loadStatus.statusCode == 0) ,"Shader compilation failed >> " + fx.loadStatus.statusCode)); resolve(results); }) .catch(function importFailed(err) { console.log("CATCH ... Something went wrong >> Huh ???... err: " + err); }); }); }, // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - test_sourcePermutation4: function() // TEST 7 ... Frg: URL, Vtx: URL { var results = []; return new Promise(function(resolve, reject) { var fx = scene.create({ t:'shaderResource', fragment: base + "/shaderTests/shaderTest.frg", vertex: base + "/shaderTests/shaderTest.vtx", uniforms: { u_colorVec4 : "vec4", s_texture : "sampler2D" } }); fx.ready .then( () => { var ans = (fx.loadStatus.statusCode == PX_RESOURCE_STATUS_OK); if(ans == false) { console.log("DEBUG: test_sourcePermutation4(resolve) >> INFO ... Status: " + fx.loadStatus.statusCode ); } ans = true; // TODO: Re-enable assert after further investigation // should not get here... shader compile should not fail results.push(assert( ans ,"Shader SHOULD *not* fail - but did >> " + fx.loadStatus.statusCode)); resolve(results); }, () => { var ans = (fx.loadStatus.statusCode == PX_RESOURCE_STATUS_OK); if(ans == false) { console.log("DEBUG: test_sourcePermutation4(reject) >> INFO ... Status: " + fx.loadStatus.statusCode ); } ans = true; // TODO: Re-enable assert after further investigation results.push(assert( ans ,"Shader compilation failed >> " + fx.loadStatus.statusCode)); resolve(results); }) }); }, // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - test_sourcePermutation5: function() // TEST 8 ... Frg: URL, Vtx: URL { var results = []; return new Promise(function(resolve, reject) { var fx = scene.create({ t:'shaderResource', // fragment: "https://raw.githubusercontent.com/pxscene/Spark/master/tests-ci/tests/shaderTests/shaderTest.frg", // vertex: "https://raw.githubusercontent.com/pxscene/Spark/master/tests-ci/tests/shaderTests/shaderTest.vtx", fragment: base + "/shaderTests/shaderTest.frg", vertex: base + "/shaderTests/shaderTest.vtx", uniforms: { u_colorVec4 : "vec4", s_texture : "sampler2D" } }); fx.ready .then( () => { var ans = (fx.loadStatus.statusCode == PX_RESOURCE_STATUS_OK); if(ans == false) { console.log("DEBUG: test_sourcePermutation5(resolve) >> INFO ... Status: " + fx.loadStatus.statusCode ); } ans = true; // TODO: Re-enable assert after further investigation // should not get here... shader compile should not fail results.push(assert( ans ,"Shader SHOULD *not* fail - but did >> " + fx.loadStatus.statusCode)); resolve(results); }, () => { var ans = (fx.loadStatus.statusCode == PX_RESOURCE_STATUS_OK); if(ans == false) { console.log("DEBUG: test_sourcePermutation5(reject) >> INFO ... Status: " + fx.loadStatus.statusCode ); } ans = true; // TODO: Re-enable assert after further investigation results.push(assert( ans ,"Shader compilation failed >> " + fx.loadStatus.statusCode)); resolve(results); }) .catch(function importFailed(err) { console.log("CATCH: test_sourcePermutation3 >> ... Something went wrong >> Huh ???... err: " + err); }); }); }, // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - test_sourcePermutation6: function() // TEST 8 ... Frg: Bogus URL, Vtx: Bogus URL { var results = []; return new Promise(function(resolve, reject) { var fx = scene.create({ t:'shaderResource', fragment: "https://raw.githubusercontent.com/pxscene/Spark/master/tests-ci/tests/shaderTests/bogusShader.frg", vertex: "https://raw.githubusercontent.com/pxscene/Spark/master/tests-ci/tests/shaderTests/bogusShader.vtx", uniforms: { u_colorVec4 : "vec4", s_texture : "sampler2D" } }); fx.ready .then( () => { var ans = (fx.loadStatus.statusCode != PX_RESOURCE_STATUS_OK); // should fail... no code if(ans == false) { console.log("DEBUG: test_sourcePermutation6(resolve) >> INFO ... Status: " + fx.loadStatus.statusCode ); } ans = true; // TODO: Re-enable assert after further investigation // should not get here... shader compile should not fail results.push(assert( ans ,"Shader should FAIL - but did NOT >> " + fx.loadStatus.statusCode + " ")); resolve(results); }, () => { var ans = (fx.loadStatus.statusCode != PX_RESOURCE_STATUS_OK); // should fail... no code if(ans == false) { console.log("DEBUG: test_sourcePermutation6(reject) >> INFO ... Status: " + fx.loadStatus.statusCode ); } ans = true; // TODO: Re-enable assert after further investigation results.push(assert( ans ,"Shader compilation DID fail >> " + fx.loadStatus.statusCode + " ")); resolve(results); }) .catch(function importFailed(err) { console.log("CATCH: test_sourcePermutation3 >> ... Something went wrong >> Huh ???... err: " + err); }); }); } */ // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - }//tests module.exports.tests = tests; if(manualTest === true) { manual.runTestsManually(tests); } }).catch(function importFailed(err) { console.error("Imports [ test_pxShaderResource.js ] failed: " + err); });
import argparse import yaml import os import sys import itertools from .ner import extract_entities def indexer(prefix): counter = itertools.count() for count in counter: yield prefix + str(count) def apply_ner_to_file(file,is_yaml=False,source_indexer=indexer('source'),term_indexer=indexer('term'),output=sys.stdout,**kwargs): sid = None if yaml: with open(file) as yaml_source: article = yaml.load(yaml_source,Loader=yaml.Loader) for key in article.keys(): node = article[key] if 'articleBody' in node: sid = node.get('@id') text = node['articleBody'] break else: with open(file) as text_source: text = text_source.read() entities = extract_entities(text,**kwargs) if len(entities)==0: return graph = {} snode = None if sid is not None: snode = { '@id' : sid, ':uses' : [] } graph[next(source_indexer)] = snode for term in entities.keys(): tid = next(term_indexer) info = entities[term] graph[tid] = { '~label' : 'NamedEntity', 'text' : term, 'types' : ','.join(info['types']) } if snode is not None: snode[':uses'].append({ '~to' : tid, 'count' : info['count'], }) print(yaml.dump(graph),file=output) #print('{term} = {count}, {labels}'.format(term=term,count=info['count'],labels=', '.join(info['types']))) def enumerate_files(files,extension='txt',recurse=False): for file in files: if os.path.isdir(file): if recurse: prefix_len = len(file) for root, dirs, files in os.walk(file): for item in files: current_path = root[prefix_len:] if len(current_path)>0: current_path += os.sep fparts = item.rsplit('.',1) if fparts[-1]==args.extension: yield root + os.sep + item else: for item in os.listdir(file): fparts = item.rsplit('.',1) if fparts[-1]==args.extension: yield file + os.sep + item else: yield file schema = \ """ ~schema: | (:NamedEntity {text}) """ if __name__ == '__main__': argparser = argparse.ArgumentParser(description='NER model') argparser.add_argument('--verbose',help='Output debugging trace',action='store_true',default=False) argparser.add_argument('--yaml',help='Treat input as schema.org in YAML format',action='store_true',default=False) argparser.add_argument('--check-boundaries',help='Check for entities across boundaries',action='store_true',default=False) argparser.add_argument('--strip-starting-stop-words',help='Strip starting stop works',action='store_true',default=False) argparser.add_argument('--extension',nargs='?',help='The file extension search for in the directory') argparser.add_argument('-r','--recurse',help='Recurse through the directories',action='store_true',default=False) argparser.add_argument('--host',help='Redis host',default='0.0.0.0') argparser.add_argument('--port',help='Redis port',type=int,default=6379) argparser.add_argument('--password',help='Redis password') argparser.add_argument('--graph',help='The graph name',default='test') argparser.add_argument('action',help='The action to perform',choices=['ner', 'load']) argparser.add_argument('files',nargs='+',help='A list of text files or directories to process') args = argparser.parse_args() if args.password is None and 'REDIS_PASSWORD' in os.environ: args.password = os.environ['REDIS_PASSWORD'] if args.host is None and 'REDIS_PASSWORD' in os.environ: args.host = os.environ['REDIS_HOST'] if args.port is None and 'REDIS_PASSWORD' in os.environ: args.port = int(os.environ['REDIS_PORT']) if args.action=='ner': if args.extension is None: args.extension = 'yaml' if args.yaml else 'txt' source_indexer = indexer('source') term_indexer = indexer('term') def apply_ner(file): apply_ner_to_file( file, is_yaml=args.yaml, source_indexer=source_indexer, term_indexer=term_indexer, check_boundaries=args.check_boundaries, strip_starting_stop_words=args.strip_starting_stop_words) action = apply_ner print(schema) elif args.action=='load': if args.extension is None: args.extension = 'yaml' import redis from redisgraph import Graph from propgraph import read_graph, cypher_for_item r = redis.Redis(host=args.host,port=args.port,password=args.password) graph = Graph(args.graph,r) def load_file(file): with open(file,'r') as input: for item in read_graph(input,format='yaml',infer=True): query = cypher_for_item(item) if query is None: continue try: graph.query(query) except redis.exceptions.ResponseError as err: print('Failed query:',file=sys.stderr) print(query,file=sys.stderr) raise err action = load_file for file in enumerate_files(args.files,extension=args.extension,recurse=args.recurse): action(file)
// Copyright 2012 Google Inc. All rights reserved. // This code is governed by the BSD license found in the LICENSE file. /*--- es5id: 11.3.2_FN_3_b description: > Tests that Intl.NumberFormat.prototype.format formats percent values properly. author: Roozbeh Pournader ---*/ var numberFormatter = new Intl.NumberFormat(); var percentFormatter = new Intl.NumberFormat(undefined, {style: 'percent'}); var formattedTwenty = numberFormatter.format(20); var formattedTwentyPercent = percentFormatter.format(0.20); // FIXME: May not work for some theoretical locales where percents and // normal numbers are formatted using different numbering systems. if (formattedTwentyPercent.indexOf(formattedTwenty) === -1) { $ERROR("Intl.NumberFormat's formatting of 20% does not include a " + "formatting of 20 as a substring."); } // FIXME: Move this to somewhere appropriate if (percentFormatter.format(0.011) === percentFormatter.format(0.02)) { $ERROR('Intl.NumberFormat is formatting 1.1% and 2% the same way.'); }
const API = 'https://maths-generator.herokuapp.com/maths/year4/'; const ROUTES = { categories: 'categories', simple_fraction_arithemtics: 'sfa', missing_angle_shapes: 'mas', }; exports.routes = ROUTES; exports.api = API;
import React from 'react'; import classNames from 'classnames'; import { Icon } from 'antd-mobile'; import styles from './index.less'; export default function Result({ className, type, title, description, extra, actions, ...restProps }) { const iconMap = { error: <Icon className={styles.error} type="cross-circle-o" />, success: <Icon className={styles.success} type="check-circle" />, }; const clsString = classNames(styles.result, className); return ( <div className={clsString} {...restProps}> <div className={styles.result_content}> <div className={styles.icon}>{iconMap[type]}</div> <div className={styles.title}>{title}</div> {description && <div className={styles.description}>{description}</div>} {extra && <div className={styles.extra}>{extra}</div>} {actions && <div className={styles.actions}>{actions}</div>} </div> </div> ); }
import React from 'react'; import FormProvider from '../components/Form/context/FormProvider'; import tableStyles from '../components/Table/Table.module.scss'; import { ActionButton, DataCheckbox, DataEmail, DataNumber, // DataRadio, DataSelect, DataText, DataTextArea, ModuleSandbox, } from './'; import FormActionEdit from '../components/Form/modules/ActionEdit'; // import TableActionEdit from '../ui/Table/modules/ActionEdit'; const lorem = 'Lorem ipsum dolor sit amet consectetur adipisicing elit. Autem hic mollitia, fuga ex architecto cumque accusamus tenetur qui odio quam tempora aliquam minima ipsum laborum?'; const options = [ { label: 'Test A', value: 'A', }, { label: 'Test B', value: 'B', }, { label: 'Test C', value: 'C', }, ]; // const boolOptions = [ // { // label: 'Test A', // value: false, // }, // { // label: 'Test B', // value: false, // }, // { // label: 'Test C', // value: false, // }, // ]; const ALL_DATA_MODULES = ( <> <DataEmail label='Email' value='[email protected]' /> <DataNumber label='Number' value={2354} /> <DataSelect label='Select' options={options} value='Washington' /> <DataText label='Text' value={lorem} /> <DataCheckbox label='Checkbox A' value /> {/* {<DataRadio label='Radio' options={boolOptions} />} */} <DataTextArea label='TextArea' value={lorem} /> </> ); const colStyle = { display: 'inline-block', verticalAlign: 'top', width: '50%', }; const fakeHeader = ( <tr className={tableStyles.Row}> <th className={tableStyles.HeadCell}>DataEmail</th> <th className={tableStyles.HeadCell}>DataNumber</th> <th className={tableStyles.HeadCell}>DataSelect</th> <th className={tableStyles.HeadCell}>DataText</th> <th className={tableStyles.HeadCell}>DataCheckbox</th> {/* <th className={tableStyles.HeadCell}>DataRadio</th> */} <th className={tableStyles.HeadCell}>DataTextArea</th> </tr> ); const fakeRow = ( <tr className={tableStyles.Row}> <td className={tableStyles.Cell}> <DataEmail isLabeled={false} label='Email' value='[email protected]' /> </td> <td className={tableStyles.Cell}> <DataNumber isLabeled={false} label='Number' value={2354} /> </td> <td className={tableStyles.Cell}> <DataSelect isLabeled={false} label='Select' value='Washington' /> </td> <td className={tableStyles.Cell}> <DataText isLabeled={false} label='Text' value='I like cats.' /> </td> <td className={tableStyles.Cell}> <DataCheckbox isLabeled={false} label='Checkbox' value /> </td> {/* {<td className={tableStyles.Cell}> <DataRadio isLabeled={false} label='Radio' value='Cats' /> </td>} */} <td className={tableStyles.Cell}> <DataTextArea isLabeled={false} label='TextArea' value={lorem} /> </td> </tr> ); const AllModules = ({ data, dataConfig }) => { return ( <> <div> <h1>All Modules POC</h1> <p> This fixture is for testing the presentation of all modules together for consistency. </p> </div> <hr /> <div style={colStyle}> <h2>Data Modules: Display</h2> <ModuleSandbox>{ALL_DATA_MODULES}</ModuleSandbox> </div> <div style={colStyle}> <h2>Data Modules: Editing</h2> <ModuleSandbox isEditing>{ALL_DATA_MODULES}</ModuleSandbox> </div> <FormProvider> <h2>Action Modules</h2> <ActionButton /> <FormActionEdit /> </FormProvider> <h2>Table Presentation</h2> <table className={tableStyles.Table} style={{ gridTemplateColumns: `repeat(6, auto)` }} > <thead className={tableStyles.HeadContainer}>{fakeHeader}</thead> <tbody className={tableStyles.BodyContainer}> {fakeRow} {fakeRow} {fakeRow} {fakeRow} {fakeRow} {fakeRow} {fakeRow} </tbody> </table> </> ); }; const Fixture = <AllModules />; export default Fixture;
import JwtSparkLine from 'Scripts/jwt_ui/JwtSparkLine.js'; var JwtNode=React.createClass({displayName: "JwtNode", getInitialState:function(){ return {data:[], pageNo:1, dataStorage:null, isFilter:false, isExpanded:false} }, getDefaultProps:function(){ return {options:{}} }, getLinks:function(row, col, index){ if(col.onClick && !Array.isArray(col.onClick)){ col.onClick=[col.onClick]; } var linkText=col.linkText; if(!linkText){ linkText=row[col.field]; } if(!Array.isArray(linkText)){ linkText=[linkText]; } return col.onClick.map(function(fx, id){return React.createElement("a", {key: id, className: "link indented", onClick: fx.bind(null,row, index), href: "javascript:;"}, linkText[id])}) }, expand:function(){ this.setState({isExpanded:!this.state.isExpanded}); }, render: function(){ var that=this; if(that.state.isExpanded) { that.icon='minus'; return React.createElement("tr", null, React.createElement("td", {colSpan: that.props.options.columns.length, className: "child-td"}, React.createElement("table", {className: "tgrid"}, React.createElement("tbody", null, [React.createElement("tr", {key: that.props.index, className: 'level-'+that.props.level}, that.props.options.columns.map(that.renderRow) ), that.props.data[that.props.options.childListName].map(function(row, index){ return React.createElement(JwtNode, {key: index+that.props.index+1, level: that.props.level+1, options: that.props.options, data: row, index: index}) })] ) ) ) ) } else{ that.icon='plus'; return React.createElement("tr", {key: that.props.index, className: 'level-'+that.props.level}, that.props.options.columns.map(that.renderRow)) } }, renderRow:function(col, id){ var icon='indented glyphicon glyphicon-'+this.icon+'-sign' if(id==0 && this.props.options.childListName && this.props.data[this.props.options.childListName] && this.props.data[this.props.options.childListName].length>0){ if(col.onClick){ return React.createElement("td", {key: id, className: col.className, style: col.style}, React.createElement("span", {onClick: this.expand, className: icon}), " ", this.getLinks(this.props.data, col, this.props.index)) } return React.createElement("td", {key: id, className: col.className, style: col.style}, React.createElement("span", {onClick: this.expand, className: icon}), " ", this.props.data[col.field]) } else{ if(col.spark){ return React.createElement("td", {key: id, style: col.style}, React.createElement(JwtSparkLine, {data: this.props.data[col.field], options: col.options})) } if(col.render){ return React.createElement("td", {key: id, dangerouslySetInnerHTML: {__html: col.render(this.props.data,this.props.index)}}) } if(col.onClick){ return React.createElement("td", {key: id, className: col.className, style: col.style}, this.getLinks(this.props.data, col, this.props.index)) } return React.createElement("td", {key: id, className: col.className, style: col.style}, this.props.data[col.field]) } } }); export default JwtNode;
# -*- coding: utf-8 -*- """ Last updated on Jan 31 2018 @author: Mengyao XUE """ import numpy as np import matplotlib.pyplot as plt import matplotlib.patches as patches from matplotlib.path import Path from astropy import units as u from astropy.coordinates import SkyCoord from astropy.coordinates import Longitude from astropy.table import Table def get_gal(c): gl_all = Longitude(c.galactic.l) gl_all.wrap_angle = 180 * u.deg gl_start=np.array(gl_all.rad[np.argmax(gl_all)+1:]) gl_end=np.array(gl_all.rad[:np.argmax(gl_all)]) gb_start=np.array(c.galactic.b.rad[np.argmax(gl_all)+1:]) gb_end=np.array(c.galactic.b.rad[:np.argmax(gl_all)]) gl=np.concatenate((gl_start,gl_end))*(-1) gb=np.concatenate((gb_start,gb_end)) gl0=np.full(90,gl[0]) gl1=np.full(90,gl[len(gl)-1]) gb0=np.linspace(-1.57,gb[0],90) gb1=np.linspace(gb[len(gb)-1],-1.57,90) for i in range(len(gl)-1): if (gl[i]*gl[i+1]<0 and gl[i]>3): print gl[i],gl[i+1] gl2=np.full(90,gl[i]) gb2=np.linspace(gb[i],1.57,90) gl3=np.full(90,gl[i+1]) gb3=np.linspace(1.57,gb[i+1],90) gl=np.concatenate((gl0,gl[:i],gl2,gl3,gl[i+1:],gl1)) gb=np.concatenate((gb0,gb[:i],gb2,gb3,gb[i+1:],gb1)) break print i return gl,gb def get_path(glc,gbc): verts=zip(glc,gbc) codes = np.ones(len(glc), int) * Path.LINETO codes[0] = Path.MOVETO codes[len(glc)-1] = Path.CLOSEPOLY sPath=Path(verts, codes) return sPath if __name__ == "__main__": data_psr=open('psrcatlog1.56.csv').readlines() ra_S=[] dec_S=[] ra_N=[] dec_N=[] for line_psr in data_psr: if line_psr.startswith ('#'): continue words=line_psr.split(',') if float(words[3]) < 30: ra_S.append(float(words[2])) dec_S.append(float(words[3])) else: ra_N.append(float(words[2])) dec_N.append(float(words[3])) data_MWA=open('MWA_PSR_20170665.csv').readlines() ra_MWA=[] dec_MWA=[] for line_MWA in data_MWA: if line_MWA.startswith ('#'): continue words_MWA=line_MWA.split(',') ra_MWA.append(float(words_MWA[2])) dec_MWA.append(float(words_MWA[3])) # To plot the celestial equator in galactic coordinates alpha = np.linspace(-180.,180.,3600.) delta0 = np.zeros(len(alpha)) cline_0 = SkyCoord(ra=alpha*u.degree, dec=(delta0-10)*u.degree) gl_0, gb_0 = get_gal(cline_0) cline_1 = SkyCoord(ra=alpha*u.degree, dec=(delta0+30)*u.degree) gl_1, gb_1 = get_gal(cline_1) sPath_1 = get_path(gl_1,gb_1) cline_2 = SkyCoord(ra=alpha*u.degree, dec=(delta0-50)*u.degree) gl_2, gb_2 = get_gal(cline_2) sPath_2 = get_path(gl_2,gb_2) cpsr_S = SkyCoord(ra=ra_S*u.deg, dec=dec_S*u.deg) gl_S = Longitude(cpsr_S.galactic.l) gl_S.wrap_angle = 180 * u.deg cpsr_N = SkyCoord(ra=ra_N*u.deg, dec=dec_N*u.deg) gl_N = Longitude(cpsr_N.galactic.l) gl_N.wrap_angle = 180 * u.deg cpsr_MWA = SkyCoord(ra=ra_MWA*u.deg, dec=dec_MWA*u.deg) gl_MWA = Longitude(cpsr_MWA.galactic.l) gl_MWA.wrap_angle = 180 * u.deg plt.figure(figsize=(10,6),dpi=300) bx = plt.subplot(111, projection = 'mollweide') #bx = plt.subplot(111, projection = 'aitoff') p_S=bx.scatter(-gl_S.rad, cpsr_S.galactic.b.rad, 1.5, lw=0, marker='o', color ='mediumblue', zorder=2, label='All cataloged pulsars (PSRCAT v1.56)') p_N=bx.scatter(-gl_N.rad, cpsr_N.galactic.b.rad, 1.5, lw=0, marker='o', color ='gray', zorder=2) p_MWA=bx.scatter(-gl_MWA.rad, cpsr_MWA.galactic.b.rad, 2.1, marker='o', color ='red', zorder=3, label='MWA-VCS incoherent-sum detected pulsars') p0 = bx.plot(gl_0, gb_0, linewidth=0.8, dashes=[5, 5], color='dimgray', zorder=4, label='Declination limit of the observable sky from LOFAR') p1 = bx.plot(gl_1, gb_1, linewidth=0.8,color='gray', zorder=6) p2 = bx.plot(gl_2, gb_2, linewidth=0.8,color='gray', zorder=6) spch_1 = patches.PathPatch(sPath_1, facecolor='k', edgecolor='none', alpha=0.1, zorder=1, label='Observable sky from the MWA') spch_2 = patches.PathPatch(sPath_2, facecolor='gray', edgecolor='none', alpha=0.5, zorder=0, label='Exclusive sky of the MWA at 80-300 MHz') bx.add_patch(spch_1) bx.add_patch(spch_2) handles, labels = bx.get_legend_handles_labels() handles = handles[::-1] labels = labels[::-1] bx.legend(handles, labels, bbox_to_anchor=(0.65, 1.02,0.34,0.2), loc=3, scatterpoints=1 #, numpoints=1 ,ncol=1, mode="expand", borderaxespad=0., fontsize=6,handlelength=3) #xtick_labels = ['10h', '8h', '6h', '4h', '2h', '0h', '22h', '20h', '18h', '16h', '14h'] d = u'\N{DEGREE SIGN}' xtick_labels = ['150'+d,'120'+d,'90'+d,'60'+d,'30'+d,'0'+d,'330'+d,'300'+d,'270'+d,'240'+d,'210'+d] #bx.set_xticklabels(xtick_labels) bx.set_xticklabels([],fontsize='xx-small') bx.annotate('0'+d, xy=(0.005, 1.05), xycoords='data',fontsize=8) bx.annotate('300'+d, xy=(0.93, 1.05), xycoords='data',fontsize=8) bx.annotate('60'+d, xy=(-1.051, 1.05), xycoords='data',fontsize=8) bx.annotate('240'+d, xy=(2.0, 1.05), xycoords='data',fontsize=8) bx.annotate('120'+d, xy=(-2.095, 1.05), xycoords='data',fontsize=8) #bx.set_yticklabels(bx.get_yticklabels(),fontsize='xx-small') bx.set_xlabel('l',fontstyle='italic') bx.set_ylabel('b',fontstyle='italic') plt.grid(True, color='gray', lw=0.5) plt.rcParams['font.size']= 8 plt.rcParams['legend.fontsize']= 4 print plt.rcParams.keys() print plt.rcParams['font.size'] plt.savefig('gal_skycover_201801.png',figsize=(20,12),dpi=600) #plt.savefig('gal_skycover.ps') #plt.show()
'use strict'; /** * @format * @flow */ import React from 'react'; import {StyleSheet} from 'react-native'; import { type ViewStyleProp, type TextStyleProp, type ImageStyleProp, } from 'react-native/Libraries/StyleSheet/StyleSheet'; interface Styles { container: ViewStyleProp, shadow: ViewStyleProp, nav: ViewStyleProp, leftIcon: TextStyleProp, title: TextStyleProp, rightIcon: ViewStyleProp, scrollContainer: ViewStyleProp, playlistsWrap: ViewStyleProp, addButton: ViewStyleProp, addButtonText: TextStyleProp, footer: ViewStyleProp, loadingGif: ImageStyleProp, }; const styles: Styles = StyleSheet.create({ container: { flex: 1, flexDirection: 'column', backgroundColor: '#1b1b1e', justifyContent: 'center', }, shadow: { position: 'absolute', top: 0, left: 0, right: 0, height: 65, backgroundColor: '#1b1b1e', shadowColor: '#101010', shadowOffset: {width: 0, height: 8}, shadowRadius: 5, paddingTop: 10, paddingHorizontal: 15, zIndex: 2, alignSelf: 'stretch', }, nav: { flex: 1, alignItems: 'center', flexDirection: 'row', paddingTop: 15, }, leftIcon: { flex: 1, height: 45, fontSize: 30, paddingTop: 7.5, color: '#fefefe', backgroundColor: 'transparent', alignSelf: 'center', }, title: { flex: 6, color: '#fefefe', fontSize: 20, fontFamily: 'Muli', fontWeight: '800', lineHeight: 24, textAlign: 'center', alignSelf: 'center', backgroundColor: 'transparent', }, rightIcon: { flex: 1, alignSelf: 'center', }, scrollContainer: { flex: 1, backgroundColor: '#1b1b1e', marginTop: 65, }, playlistsWrap: { flex: 1, zIndex: -1, backgroundColor: 'transparent', marginTop: 65, }, addButton: { padding: 3, height: 45, backgroundColor: '#2b6dc0', justifyContent: 'center', alignItems: 'center', marginHorizontal: 15, marginTop: 5, marginBottom: 10, borderRadius: 50, shadowColor: '#101010', shadowOffset: { width: 0, height: 8 }, shadowOpacity: 0.25, shadowRadius: 5, }, addButtonText: { fontFamily: 'Muli', fontWeight: '600', fontSize: 18, lineHeight: 23.4, color: '#fefefe', backgroundColor: 'transparent', }, footer: { justifyContent: 'center', alignItems: 'center', paddingVertical: 20, }, loadingGif: { width: 60, height: 72.6, backgroundColor: '#1b1b1e', shadowColor: '#101010', shadowOffset: {width: 0, height: 4}, shadowRadius: 5, shadowOpacity: 0.7, }, }); export default styles;
/** @jsx React.DOM */ var React = require('react') var BS = require('../cjs') var Button = BS.Button var DropdownButton = BS.DropdownButton var MenuItem = BS.MenuItem var Accordion = BS.Accordion var Panel = BS.Panel var ButtonToolbar = BS.ButtonToolbar var OverlayTrigger = BS.OverlayTrigger var Tooltip = BS.Tooltip var Alert = BS.Alert var TabbedArea = BS.TabbedArea var TabPane = BS.TabPane var Modal = BS.Modal var OverlayMixin = BS.OverlayMixin var dropdownInstance = ( <DropdownButton title="Dropdown"> <MenuItem key="1">Item 1</MenuItem> <MenuItem key="2">Item 2</MenuItem> </DropdownButton> ) var accordionInstance = ( <Accordion> <Panel header="Collapsible Group Item #1" key={1}> Anim pariatur cliche reprehenderit, enim eiusmod high life accusamus terry richardson ad squid. 3 wolf moon officia aute, non cupidatat skateboard dolor brunch. Food truck quinoa nesciunt laborum eiusmod. Brunch 3 wolf moon tempor, sunt aliqua put a bird on it squid single-origin coffee nulla assumenda shoreditch et. Nihil anim keffiyeh helvetica, craft beer labore wes anderson cred nesciunt sapiente ea proident. Ad vegan excepteur butcher vice lomo. Leggings occaecat craft beer farm-to-table, raw denim aesthetic synth nesciunt you probably haven't heard of them accusamus labore sustainable VHS. </Panel> <Panel header="Collapsible Group Item #2" key={2}> Anim pariatur cliche reprehenderit, enim eiusmod high life accusamus terry richardson ad squid. 3 wolf moon officia aute, non cupidatat skateboard dolor brunch. Food truck quinoa nesciunt laborum eiusmod. Brunch 3 wolf moon tempor, sunt aliqua put a bird on it squid single-origin coffee nulla assumenda shoreditch et. Nihil anim keffiyeh helvetica, craft beer labore wes anderson cred nesciunt sapiente ea proident. Ad vegan excepteur butcher vice lomo. Leggings occaecat craft beer farm-to-table, raw denim aesthetic synth nesciunt you probably haven't heard of them accusamus labore sustainable VHS. </Panel> <Panel header="Collapsible Group Item #3" key={3}> Anim pariatur cliche reprehenderit, enim eiusmod high life accusamus terry richardson ad squid. 3 wolf moon officia aute, non cupidatat skateboard dolor brunch. Food truck quinoa nesciunt laborum eiusmod. Brunch 3 wolf moon tempor, sunt aliqua put a bird on it squid single-origin coffee nulla assumenda shoreditch et. Nihil anim keffiyeh helvetica, craft beer labore wes anderson cred nesciunt sapiente ea proident. Ad vegan excepteur butcher vice lomo. Leggings occaecat craft beer farm-to-table, raw denim aesthetic synth nesciunt you probably haven't heard of them accusamus labore sustainable VHS. </Panel> </Accordion> ); var positionerInstance = ( <ButtonToolbar> <OverlayTrigger placement="left" overlay={<Tooltip><strong>Holy guacamole!</strong> Check this info.</Tooltip>}> <Button bsStyle="default">Holy guacamole!</Button> </OverlayTrigger> <OverlayTrigger placement="top" overlay={<Tooltip><strong>Holy guacamole!</strong> Check this info.</Tooltip>}> <Button bsStyle="default">Holy guacamole!</Button> </OverlayTrigger> <OverlayTrigger placement="bottom" overlay={<Tooltip><strong>Holy guacamole!</strong> Check this info.</Tooltip>}> <Button bsStyle="default">Holy guacamole!</Button> </OverlayTrigger> <OverlayTrigger placement="right" overlay={<Tooltip><strong>Holy guacamole!</strong> Check this info.</Tooltip>}> <Button bsStyle="default">Holy guacamole!</Button> </OverlayTrigger> </ButtonToolbar> ); var tabbedAreaInstance = ( <TabbedArea defaultActiveKey={2}> <TabPane key={1} tab="Tab 1">TabPane 1 content</TabPane> <TabPane key={2} tab="Tab 2">TabPane 2 content</TabPane> </TabbedArea> ); var AlertAutoDismissable = React.createClass({ getInitialState: function() { return { alertVisible: false }; }, render: function() { if (this.state.alertVisible) { return ( <Alert bsStyle="danger" onDismiss={this.handleAlertDismiss} dismissAfter={2000}> <h4>Oh snap! You got an error!</h4> <p>But this will hide after 2 seconds.</p> </Alert> ); } return ( <Button onClick={this.handleAlertShow}>Show Alert</Button> ); }, handleAlertDismiss: function() { this.setState({alertVisible: false}); }, handleAlertShow: function() { this.setState({alertVisible: true}); } }); var CustomModalTrigger = React.createClass({ mixins: [OverlayMixin], getInitialState: function () { return { isModalOpen: false }; }, handleToggle: function () { this.setState({ isModalOpen: !this.state.isModalOpen }); }, render: function () { return ( <Button onClick={this.handleToggle} bsStyle="primary">Launch</Button> ); }, // This is called by the `OverlayMixin` when this component // is mounted or updated and the return value is appended to the body. renderOverlay: function () { if (!this.state.isModalOpen) { return <span/>; } return ( <Modal title="Modal heading" onRequestHide={this.handleToggle}> <div className="modal-body"> This modal is controlled by our custom trigger component. </div> <div className="modal-footer"> <Button onClick={this.handleToggle}>Close</Button> </div> </Modal> ); } }); var allTests = ( <div> <h2>Dropdown</h2> {dropdownInstance} <h2>Tooltips</h2> {positionerInstance} <h2>Tabs</h2> {tabbedAreaInstance} <h2>Alert</h2> <AlertAutoDismissable /> <h2>Modal</h2> <CustomModalTrigger /> </div> ) React.renderComponent(allTests, document.body)
import React from 'react'; import createSvgIcon from './utils/createSvgIcon'; export default createSvgIcon( <React.Fragment><path fill="none" d="M0 0h24v24H0V0z" /><path d="M15 11V4H4v8.17L5.17 11H6z" opacity=".3" /><path d="M16 13c.55 0 1-.45 1-1V3c0-.55-.45-1-1-1H3c-.55 0-1 .45-1 1v14l4-4h10zm-12-.83V4h11v7H5.17L4 12.17zM22 7c0-.55-.45-1-1-1h-2v9H6v2c0 .55.45 1 1 1h11l4 4V7z" /></React.Fragment> , 'ForumTwoTone');
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. * * @flow */ import React, {useState} from 'react'; export function Component() { const [count, setCount] = useState(0); return ( <div> <p>You clicked {count} times</p> <button onClick={() => setCount(count + 1)}>Click me</button> </div> ); }
import { createStore } from 'test/support/Helpers' import Model from 'app/model/Model' describe('Feature – Retrieve', () => { class User extends Model { static entity = 'users' static fields () { return { id: this.attr(null) } } } it('can retrieve all records as a model instance by all method', () => { const store = createStore([{ model: User }]) store.dispatch('entities/users/create', { data: [{ id: 1 }, { id: 2 }] }) const expected = [{ $id: '1', id: 1 }, { $id: '2', id: 2 }] const users = store.getters['entities/users/all']() expect(users.length).toBe(2) expect(users[0]).toBeInstanceOf(User) expect(users[1]).toBeInstanceOf(User) expect(users).toEqual(expected) }) it('can retrieve all records by chained all method', () => { const store = createStore([{ model: User }]) store.dispatch('entities/users/create', { data: [{ id: 1 }, { id: 2 }] }) const expected = [{ $id: '1', id: 1 }, { $id: '2', id: 2 }] const users = store.getters['entities/users/query']().all() expect(users.length).toBe(2) expect(users[0]).toBeInstanceOf(User) expect(users[1]).toBeInstanceOf(User) expect(users).toEqual(expected) }) it('can retrieve all records as a model instance by get method', () => { const store = createStore([{ model: User }]) store.dispatch('entities/users/create', { data: [{ id: 1 }, { id: 2 }] }) const expected = [{ $id: '1', id: 1 }, { $id: '2', id: 2 }] const users = store.getters['entities/users/query']().get() expect(users.length).toBe(2) expect(users[0]).toBeInstanceOf(User) expect(users[1]).toBeInstanceOf(User) expect(users).toEqual(expected) }) it('returns empty array when multiple record can not be found', () => { const store = createStore([{ model: User }]) store.dispatch('entities/users/create', { data: [{ id: 1 }, { id: 2 }] }) const users = store.getters['entities/users/query']().where('id', 3).get() expect(users).toEqual([]) }) it('can retrieve a single item by id', () => { const store = createStore([{ model: User }]) store.dispatch('entities/users/create', { data: [{ id: 1 }, { id: 2 }] }) const user = store.getters['entities/users/find'](2) expect(user.id).toBe(2) }) it('can retrieve array of items by their ids', async () => { const store = createStore([{ model: User }]) User.insert({ data: [{ id: 1 }, { id: 2 }, { id: 3 }] }) const expected = [{ $id: '1', id: 1 }, { $id: '3', id: 3 }] const users1 = store.getters['entities/users/findIn']([1, 3]) expect(users1.length).toBe(2) expect(users1[0]).toBeInstanceOf(User) expect(users1[1]).toBeInstanceOf(User) expect(users1).toEqual(expected) const users2 = store.getters['entities/users/findIn']([4, 5]) expect(users2).toEqual([]) }) it('can retrieve a single item by chained find method', () => { const store = createStore([{ model: User }]) store.dispatch('entities/users/create', { data: [{ id: 1 }, { id: 2 }] }) const expected = { $id: '2', id: 2 } const user = store.getters['entities/users/query']().find(2) expect(user).toEqual(expected) }) describe('#first', () => { it('can retrieve the first item from the store', async () => { createStore([{ model: User }]) await User.insert({ data: [{ id: 1 }, { id: 2 }] }) const expected = { $id: '1', id: 1 } const user = User.query().first() expect(user).toEqual(expected) }) it('returns `null` if it can not find any record', () => { createStore([{ model: User }]) const user = User.query().first() expect(user).toBe(null) }) }) describe('#last', () => { it('can retrieve the last item from the store', async () => { createStore([{ model: User }]) User.insert({ data: [{ id: 1 }, { id: 2 }] }) const expected = { $id: '2', id: 2 } const user = User.query().last() expect(user).toEqual(expected) }) it('returns `null` if it can not find any record', () => { createStore([{ model: User }]) const user = User.query().last() expect(user).toBe(null) }) }) it('returns null when single item can not be found', () => { const store = createStore([{ model: User }]) store.dispatch('entities/users/create', { data: [{ id: 1 }, { id: 2 }] }) const user = store.getters['entities/users/find'](3) expect(user).toBe(null) }) it('returns null when passing `undefined` to the `find` method', () => { const store = createStore([{ model: User }]) store.dispatch('entities/users/create', { data: [{ id: 1 }, { id: 2 }] }) const user = store.getters['entities/users/find']() expect(user).toBe(null) }) })
const Event = require('../models/event') Event.findByIdAndRemove('5b865df4d6c3162d4d24a439') .then((document) => { console.log(document) }).catch((err) => { });
/* * Copyright (c) 2014 haramanai. * integer * version 0.2. * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following * conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ this.easelSif.param = this.easelSif.param||{}; (function() { var integer = {}; integer._setConvert = function (layer, param, wanted_type, is_type) { var type = easelSif.param.integer; if (wanted_type === is_type) { param.getValue = type.getValue; param.setValue = type.setValue; } else if (type[is_type]){ param.getValue = type[is_type]; } else { alert('no convert for integer to ' + is_type); } } integer.setValue = function (v) { this.value = v; } integer.getValue = function () { return this.value; } integer.add = function () { return ( this.add.lhs.getValue() + this.add.rhs.getValue() ) * this.add.scalar.getValue(); } integer.subtract = function () { return ( this.subtract.lhs.getValue() - this.subtract.rhs.getValue() ) * this.subtract.scalar.getValue(); } integer.scale = function () { return this.scale.link.getValue() * this.scale.scalar.getValue(); } easelSif.param.integer = integer; }());
import md5 from 'md5' import Credentials from '@/config/Credentials' import TimeStamp from '@/modules/TimeStamp' const HashCreate = ({ hash: md5( TimeStamp.now + Credentials.privateKey + Credentials.publicKey ) }) export default HashCreate
import DS from 'ember-data'; import validatorFactory from '../validators/factories'; export default DS.Model.extend({ position: DS.attr('number'), rule: DS.belongsTo('rule'), field: DS.belongsTo('field', {async:true}), operator: DS.attr('string'), //value_type: DS.attr('string'), value: DS.attr('json'), init() { this._super(...arguments); this.validator = validatorFactory.createRuleValidator(this); } });
/* jshint undef: true, unused: true, undef: true */ /* jshint strict: false */ /* global global, require, console, handlebars, $ */ /* global showAlert, showCriticalAlert, showInfoMessage, package_loader */ /* global MODULE_LOADER, MODULE_CHROME, TASK_LOADER */ /* global KEYBOARD_EVENT_HANDLER, module_manager */ // Flags /* global DISABLE_AUTO_CLEAR_CONFIG_BINDINGS */ /** * Event driven framework for easy module construction. * * @author: Chris Johnson (LabJack, 2014) * @author: Sam Pottinger (LabJack, 2014) **/ var EventEmitter = require('events').EventEmitter; var util = require('util'); var async = require('async'); var dict = require('dict'); var q = require('q'); var sprintf = require('sprintf-js').sprintf; var ljmmm_parse = null; try { ljmmm_parse = require('ljmmm-parse'); ljmmm_parse.expandLJMMMNameSync = function (name) { return ljmmm_parse.expandLJMMMEntrySync( {name: name, address: 0, type: 'FLOAT32'} ).map(function (entry) { return entry.name; }); }; } catch (err) { console.error('error loading ljmmm_parse'); } var fs_facade = null; try { fs_facade = global.require('fs_facade'); } catch (err) { console.error('error loading fs_facade presenter_framework', err, err.stack); } var gns; var io_manager; var driver_const; var modbus_map; var gui; try { gns = package_loader.getNameSpace(); io_manager = global.require.main.require('ljswitchboard-io_manager'); driver_const = global.require('ljswitchboard-ljm_driver_constants'); modbus_map = global.require('ljswitchboard-modbus_map').getConstants(); gui = require('nw.gui'); } catch(err) { gns = package_loader.getNameSpace(); io_manager = require.main.require('ljswitchboard-io_manager'); driver_const = require.main.require('ljswitchboard-ljm_driver_constants'); modbus_map = require('ljswitchboard-modbus_map').getConstants(); gui = global.require('gui'); } var FADE_DURATION = 400; var DEFAULT_REFRESH_RATE = 1000; var CONFIGURING_DEVICE_TARGET = '#sd-ramework-configuring-device-display'; var DEVICE_VIEW_TARGET = '#device-view'; var CALLBACK_STRING_CONST = '-callback'; /** * Creates a new binding info object with the metadata copied from another. * * Creates a new binding info object, a structure with all of the information * necessary to bind a piece of the module GUI to a register / registers on * a LabJack device. This will copy the "metadata" from an existing binding * into a new one. Namely, it will re-use original's class, direction, and * event attributes but add in new binding and template values. * * @param {Object} orginal The object with the original binding information. * @param {String} binding The register name to bind the GUI element(s) to. * If given an LJMMM string, will be exapnded and all registers named after * the expansion will be bound to the GUI. Note that this expansion * is executed later in the framework and only a single binding will be * returned from this function. * @param {String} template The template for the GUI element ID to bind. This * should coorespond to a HTML element IDs. May contain LJMMM and, if * given an LJMMM string, will be expanded and matched to the registers * listed in binding parameter. Note that this expansion * is executed later in the framework and only a single binding will be * returned from this function. * @return {Object} New binding. **/ function cloneBindingInfo (original, bindingClass, binding, template) { var retVar = {}; try{ retVar = { bindingClass: bindingClass, template: template, binding: binding, direction: original.direction, event: original.event, format: original.format, customFormatFunc: original.customFormatFunc, currentDelay: original.currentDelay, iterationDelay: original.iterationDelay, execCallback: original.execCallback, callback: original.callback }; } catch (err) { console.error('ERROR: ',err); retVar = {}; } return retVar; } /** * Expands the LJMMM in the bindingClass, binding, and template names. * * Each binding info object has a binding attribute with the name of the * register on the device to bind from as well as a template attribute that * specifies the ID of the HTML element to bind to. So, binding AIN0 and * template analog-input-0 would bind the device register for AIN0 to * the HTML element with the id analog-input-0. This function will exapnd * LJMMM names found in either the template or binding attributes. Binding * AIN#(0:1) will exapnd to [AIN0, AIN1] and analog-input-#(0:1) will expand * to [analog-input-0, analog-input-1]. * * @param {Object} bindingInfo The object with info about the binding to * expand. * @return {Array} Array containing all of the bindings info objects that * resulted from expanding the LJMMM found in original binding info * object's binding and template attributes. If no LJMMM was in the * original binding info object's binding or template attributes, an Array * with a single binding info object will be returned. **/ function expandBindingInfo (bindingInfo) { var expandedBindingClasses = ljmmm_parse.expandLJMMMName(bindingInfo.bindingClass); var expandedBindings = ljmmm_parse.expandLJMMMName(bindingInfo.binding); var expandedTemplates = ljmmm_parse.expandLJMMMName(bindingInfo.template); if (expandedBindings.length != expandedTemplates.length) { throw 'Unexpected ljmmm expansion mismatch.'; } var newBindingsInfo = []; var numBindings = expandedBindings.length; for (var i=0; i<numBindings; i++) { var clone = cloneBindingInfo( bindingInfo, expandedBindingClasses[i], expandedBindings[i], expandedTemplates[i] ); newBindingsInfo.push(clone); } return newBindingsInfo; } function cloneSetupBindingInfo (original, bindingClass, binding) { var retVar = {}; try{ retVar = { bindingClass: bindingClass, binding: binding, direction: original.direction, defaultVal: original.defaultVal, execCallback: original.execCallback, callback: original.callback }; } catch (err) { console.error('ERROR: ',err); retVar = {}; } return retVar; } function expandSetupBindingInfo (bindingInfo) { var expandedBindingClasses = ljmmm_parse.expandLJMMMName(bindingInfo.bindingClass); var expandedBindings = ljmmm_parse.expandLJMMMName(bindingInfo.binding); if (expandedBindingClasses.length != expandedBindings.length) { throw 'Unexpected ljmmm expansion mismatch.'; } var newBindingsInfo = []; var numBindings = expandedBindings.length; for (var i=0; i<numBindings; i++) { var clone = cloneSetupBindingInfo( bindingInfo, expandedBindingClasses[i], expandedBindings[i] ); newBindingsInfo.push(clone); } return newBindingsInfo; } /** * Force a redraw on the rendering engine. **/ function runRedraw() { document.body.style.display='none'; var h = document.body.offsetHeight; // no need to store this anywhere, the reference is enough document.body.style.display='block'; } function qRunRedraw() { var innerDeferred = q.defer(); runRedraw(); innerDeferred.resolve(); return innerDeferred.promise; } /** * Function to render device error data */ function extrapolateDeviceErrorData(data) { var compiledMessage = ''; // Get the error description (if it exists); var description = modbus_map.getErrorInfo(data.code).description; data.description = description; // Format the "caller" information var callInfo = []; var callKeys = Object.keys(data.data); callKeys.forEach(function(callKey) { callInfo.push( // '"' + callKey + '": ' + data.data[callKey].toString() callKey + ': ' + JSON.stringify(data.data[callKey]) ); }); callInfo = callInfo.join(', '); data.callInfo = callInfo; return data; } /** * Object that manages the modules using the Kipling Module Framework. **/ function Framework() { // List of events that the framework handels var eventListener = dict({ verifyStartupData: null, onModuleLoaded: null, onDevicesSelected: null, onDeviceSelected: null, onDevicesConfigured: null, onDeviceConfigured: null, onTemplateLoaded: null, onTemplateDisplayed: null, onRegisterWrite: null, onRegisterWritten: null, onRefresh: null, onRefreshed: null, onCloseDevice: null, onUnloadModule: null, onLoadError: null, onWriteError: null, onRefreshError: null, onExecutionError: function (params) { throw params; } }); this.eventListener = eventListener; // io_manager object references. var io_interface = io_manager.io_interface(); var driver_controller = io_interface.getDriverController(); var device_controller = io_interface.getDeviceController(); this.driver_controller = driver_controller; this.device_controller = device_controller; var frameworkType = 'singleDevice'; this.frameworkType = frameworkType; var deviceSelectionListenersAttached = false; var jquery = null; var refreshRate = DEFAULT_REFRESH_RATE; var pausedRefreshRate = 100; var errorRefreshRate = 1000; // Default to 1sec var connectedDevicesRefreshRate = 1000; var configControls = []; var bindings = dict({}); var readBindings = dict({}); var writeBindings = dict({}); var smartBindings = dict({}); var setupBindings = dict({}); var readSetupBindings = dict({}); var writeSetupBindings = dict({}); var activeDevices = []; var selectedDevices = []; var activeDevice; var deviceErrorLog = {}; var userViewFile = ''; var moduleTemplateBindings = {}; var moduleTemplateSetupBindings = {}; var moduleJsonFiles = []; var moduleInfoObj; var moduleConstants; var module; var moduleData; var deviceErrorCompiledTemplate; var printableDeviceErrorCompiledTemplate; //Constants for auto-debugging on slow DAQ loops var iterationTime; var ljmDriverLogEnabled = false; // Framework Deletion constant var frameworkActive = true; this.frameworkActive = frameworkActive; this.deviceSelectionListenersAttached = deviceSelectionListenersAttached; this.jquery = jquery; this.refreshRate = refreshRate; this.pausedRefreshRate = pausedRefreshRate; this.errorRefreshRate = errorRefreshRate; this.connectedDevicesRefreshRate = connectedDevicesRefreshRate; this.configControls = configControls; this.bindings = bindings; this.readBindings = readBindings; this.writeBindings = writeBindings; this.smartBindings = smartBindings; this.setupBindings = setupBindings; this.readSetupBindings = readSetupBindings; this.writeSetupBindings = writeSetupBindings; this.activeDevices = activeDevices; this.activeDevice = activeDevice; this.selectedDevices = selectedDevices; this.deviceErrorLog = deviceErrorLog; this.runLoop = false; this.userViewFile = userViewFile; this.moduleTemplateBindings = moduleTemplateBindings; this.moduleTemplateSetupBindings = moduleTemplateSetupBindings; this.moduleName = ''; this.moduleJsonFiles = moduleJsonFiles; this.moduleInfoObj = moduleInfoObj; this.moduleConstants = moduleConstants; this.module = module; this.moduleData = moduleData; this.flags = { 'debug_startup': false, }; this.deviceErrorCompiledTemplate = deviceErrorCompiledTemplate; this.printableDeviceErrorCompiledTemplate = printableDeviceErrorCompiledTemplate; this.uniqueTabID = ''; //Constants for auto-debugging on slow DAQ loops var moduleStartTimeObj = new Date(); this.iterationTime = iterationTime; this.iterationTime = moduleStartTimeObj.valueOf(); this.ljmDriverLogEnabled = ljmDriverLogEnabled; this.sdFrameworkDebug = true; this.sdFrameworkDebugLoopErrors = true; this.sdFrameworkDebugTiming = true; this.sdFrameworkDebugDAQLoopMonitor = true; this.sdFrameworkDebugDAQLoopMonitorInfo = true; this.numContinuousRegLoopIterations = 0; this.loopErrorEncountered = false; this.loopErrors = []; this.daqLoopFinished = false; this.daqLoopMonitorTimer = null; this.daqLoopStatus = 'un-initialized'; this.pauseDAQLoop = false; this.isDAQLoopPaused = false; this.hasNotifiedUserOfPause = false; this.allowModuleExecution = true; this.isModuleLoaded = false; this.isDeviceOpen = false; this.startupData = undefined; this.isStartupDataValid = false; this.DEBUG_STARTUP_DATA = false; var self = this; this.reportSyntaxError = function(location, err) { console.error('Error in:',location); console.error('Error obj',err,err.message); console.error('Error Str',err.toString()); var stackSplit = err.stack.split('\n'); var caller_line = err.stack.split('\n')[4]; var index = caller_line.indexOf('at '); var clean = caller_line.slice(index+2, caller_line.length); // console.error('other output:',err.stack); console.error('stackSplit',stackSplit); // console.error('Stack Dump'); // stackSplit.forEach(function(stackStr,i){ // var str = ""; // str += stackStr // console.error(i.toString()+'.',str); // }); // console.error('line:',err.line); // console.error('caller',caller_line); // console.error('index',index); // console.error('clean',clean); showCriticalAlert(err.toString()); }; this.enableLoopTimingAnalysis = function() { self.sdFrameworkDebugTiming = true; }; this.disableLoopTimingAnalysis = function() { self.sdFrameworkDebugTiming = true; }; this.enableLoopErrorAnalysis = function() { self.sdFrameworkDebugTiming = true; }; this.disableLoopErrorAnalysis = function() { self.sdFrameworkDebugTiming = false; }; this.enableLoopMonitorAnalysis = function() { self.sdFrameworkDebugDAQLoopMonitor = true; }; this.disableLoopMonitorAnalysis = function() { self.sdFrameworkDebugDAQLoopMonitor = false; }; this.print = function(functionName,info,errName) { if(typeof(errName) === 'undefined') { errName = 'sdFrameworkDebug'; } if(self.sdFrameworkDebug) { var fnDefined = (typeof(functionName) !== 'undefined'); var infoDefined = (typeof(info) !== 'undefined'); if(fnDefined && infoDefined) { console.log(errName,self.moduleName,functionName,info); } else if (!fnDefined && infoDefined) { console.log(errName,self.moduleName,info); } else if (fnDefined && !infoDefined) { console.log(errName,self.moduleName,functionName); } else { console.log(errName,self.moduleName); } } }; this.printDAQLoopInfo = function(functionName,info) { if(self.sdFrameworkDebugDAQLoopMonitor) { self.print(functionName,info,'sdFrameworkDebugDAQLoopInfo'); } }; this.printDAQLoopMonitorInfo = function(functionName,info) { if(self.sdFrameworkDebugDAQLoopMonitorInfo) { self.print(functionName,info,'sdFrameworkDebugDAQLoopMonitorInfo'); } }; this.printTimingInfo = function(functionName,info) { if(self.sdFrameworkDebugTiming) { self.print(functionName,info,'sdFrameworkDebugTiming'); } }; this.printLoopErrors = function(functionName,info) { if(self.sdFrameworkDebugLoopErrors) { self.print(functionName,info,'sdFrameworkDebugLoopErrors'); } }; this.setStartupMessage = function(message) { var idString = '#single-device-framework-loading-message'; $(idString).text(message); }; this._SetJQuery = function(newJQuery) { jquery = newJQuery; this.jquery = newJQuery; }; this._SetSelectedDevices = function(newSelectedDevices) { // Initialize the selectedDevices arra selectedDevices = []; newSelectedDevices.forEach(function(device) { var savedAttributes = device.savedAttributes; if(savedAttributes['isSelected-CheckBox']) { selectedDevices.push(device); } }); self.selectedDevices = selectedDevices; }; var _SetSelectedDevices = this._SetSelectedDevices; var getConnectedDeviceInfoJquerySelector = function(serialNumber, extra) { var selector = [ '.SERIAL_NUMBER_' + serialNumber.toString(), ].join(' '); if(extra) { if(Array.isArray(extra)) { var extraData = extra.join(' '); selector += ' '+ extraData; } else { selector += ' '+ extra; } } return selector; }; var getDeviceErrorJquerySelector = function(serialNumber, extra) { var selector = getConnectedDeviceInfoJquerySelector( serialNumber, '.device-selector-table-status' ); if(extra) { if(Array.isArray(extra)) { var extraData = extra.join(' '); selector += ' '+ extraData; } else { selector += ' '+ extra; } } return selector; }; this.getDeviceErrorElements = function(serialNumber) { var elements = {}; elements.statusIcon = undefined; var deviceInfoSelector = getConnectedDeviceInfoJquerySelector( serialNumber ); var statusIconSelector = getConnectedDeviceInfoJquerySelector( serialNumber, ['.connection-status-icon'] ); var badgeSelector = getDeviceErrorJquerySelector( serialNumber, ['.module-chrome-tab-badge', '.badge'] ); var messagesSelector = getDeviceErrorJquerySelector( serialNumber, ['.dropdown-menu-errors'] ); var clearMessagesButtonSelector = getDeviceErrorJquerySelector( serialNumber, ['.clear-error-messages'] ); var copyMessagesButtonSelector = getDeviceErrorJquerySelector( serialNumber, ['.copy-to-clipboard'] ); var deviceVersionNumberSelector = getConnectedDeviceInfoJquerySelector( serialNumber, ['.device-firmware-version-holder'] ); var deviceNameSelector = getConnectedDeviceInfoJquerySelector( serialNumber, ['.device-name-holder'] ); elements.deviceInfo = $(deviceInfoSelector); elements.statusIcon = $(statusIconSelector); elements.badge = $(badgeSelector); elements.messagesSelector = $(messagesSelector); elements.clearErrorsButton = $(clearMessagesButtonSelector); elements.copyMessagesButtonSelector = $(copyMessagesButtonSelector); elements.firmwareVersion = $(deviceVersionNumberSelector); elements.deviceName = $(deviceNameSelector); return elements; }; this.deviceErrorControls = {}; var buildDeviceErrorControls = function(serialNumber) { var elements = self.getDeviceErrorElements(serialNumber); var isFlashing = false; var flashBadge = function() { if(!isFlashing) { isFlashing = true; elements.badge.addClass('badge-important'); setTimeout(function() { elements.badge.removeClass('badge-important'); isFlashing = false; }, 500); } }; var saveMessage = function(message) { var currentMessages = elements.messagesSelector.find('.error-message'); var numMessages = currentMessages.length; var numErrors = 0; var badgeText = elements.badge.text(); if(isNaN(badgeText)) { numErrors = 0; } else { numErrors = parseInt(badgeText); } if(numErrors === 0) { // If there were n elements.messagesSelector.find('.error-message').remove(); currentMessages = elements.messagesSelector.find('.error-message'); numMessages = currentMessages.length; } var newItem = '<li class="error-message">' + message.toString() + '</li>'; // if(numMessages < 2) { // elements.messagesSelector.css('height','inherit'); // } else { // elements.messagesSelector.css('height','200px'); // } if(numMessages < 5) { // Simply add the message to the top of the list elements.messagesSelector.prepend(newItem); numMessages += 1; } else { // Remove the bottom messages (fixes any previous over-5 limits) for(var i = 4; i < numMessages; i++) { currentMessages.eq(i).remove(); } // Add the new message elements.messagesSelector.prepend(newItem); } // Update the num-messages badge. numErrors += 1; var numText = numErrors.toString(); elements.badge.text(numText); flashBadge(); }; var saveMessages = function(messages) { messages.forEach(saveMessage); }; var clearMessages = function() { elements.messagesSelector.find('.error-message').remove(); var newItem = '<li class="error-message">No Errors</li>'; // elements.messagesSelector.css('height','inherit'); elements.messagesSelector.prepend(newItem); elements.badge.text('0'); self.device_controller.getDevice({'serialNumber': serialNumber}) .then(function(device) { device.clearDeviceErrors(); }); }; var copyErrorDataToClipboard = function(errorData) { try { var errorsArray = []; errorData.errors.forEach(function(err, i) { var extrapolatedData = extrapolateDeviceErrorData(err); extrapolatedData.errNum = i + 1; errorsArray.push(extrapolatedData); }); errorData.errors = errorsArray; var outputText = self.printableDeviceErrorCompiledTemplate( errorData ); var clipboard = gui.Clipboard.get(); clipboard.set(outputText, 'text'); } catch(err) { console.error('Error Copying data to clipboard', err); } }; var controls = { 'saveMessage': saveMessage, 'saveMessages': saveMessages, 'clearMessages': clearMessages, 'setMessages': function(messages) { // Empty the current list of messages clearMessages(); // Set the number of events displayed on the badge var num = messages.length; var numText = num.toString(); elements.badge.text(numText); var items = []; messages.forEach(function(message) { items.push('<li class="error-message">' + message.toString() + '</li>'); }); items.forEach(function(item) { elements.messagesSelector.prepend(item); }); flashBadge(); }, 'flashBadge': flashBadge, 'updateConnectionStatus': function(isConnected) { if(isConnected) { elements.deviceInfo.removeClass('text-error'); elements.statusIcon.hide(); } else { elements.deviceInfo.addClass('text-error'); elements.statusIcon.show(); } }, 'updateSharingStatus': function(isShared, data) { console.log('in presenter_framework updateSharingStatus', isShared, data); var pt = data.attrs.productType; var sn = data.attrs.serialNumber.toString(); var appName = data.attrs.sharedAppName; var isConnected = data.attrs.isConnected; var msg = ''; if(isShared) { msg = 'The ' + pt + ' with the SN: ' + sn; msg += ' has been opened in ' + appName +'.'; // if(isConnected) { msg += ' Please exit ' + appName + ' to continue using the device.'; // } } else { msg += appName + ' has been closed and the ' + pt; msg += ' with the SN: ' + sn + ' has been re-opened in Kipling'; } showInfoMessage(msg); }, 'copyErrors': function() { self.device_controller.getDevice({'serialNumber': serialNumber}) .then(function(device) { device.getLatestDeviceErrors() .then(copyErrorDataToClipboard); }); }, 'setDeviceFirmwareVersion': function(newVersion) { elements.firmwareVersion.text(newVersion); elements.firmwareVersion.attr( 'title', 'Device Name (' + newVersion + ')' ); }, 'setDeviceName': function(newName) { elements.deviceName.text(newName); elements.deviceName.attr( 'title', 'Device Name (' + newName + ')' ); }, 'elements': elements }; self.deviceErrorControls[serialNumber] = controls; }; function deviceReleasedEventListener(data) { // Device has been shared to an external application. console.warn('in deviceReleasedEventListener', data); self.emit('FRAMEWORK_HANDLED_DEVICE_RELEASED', data); if(data.attrs.serialNumber) { var sn = data.attrs.serialNumber; if(self.deviceErrorControls[sn]) { // Indicate that the device is no longer connected self.deviceErrorControls[sn].updateSharingStatus(true, data); } } } function deviceAcquiredEventListener(data) { // Device released from the external application. console.warn('in deviceAcquiredEventListener', data); self.emit('FRAMEWORK_HANDLED_DEVICE_ACQUIRED', data); if(data.attrs.serialNumber) { var sn = data.attrs.serialNumber; if(self.deviceErrorControls[sn]) { // Indicate that the device is no longer connected self.deviceErrorControls[sn].updateSharingStatus(false, data); } } } var deviceDisconnectedEventListener = function(data) { // console.warn('Device Disconnected', data); self.emit('FRAMEWORK_HANDLED_DEVICE_DISCONNECTED', data); if(data.serialNumber) { var sn = data.serialNumber; if(self.deviceErrorControls[sn]) { // Indicate that the device is no longer connected self.deviceErrorControls[sn].updateConnectionStatus(false); } } }; var deviceReconnectedEventListener = function(data) { // console.warn('Device Reconnected', data); self.emit('FRAMEWORK_HANDLED_DEVICE_RECONNECTED', data); if(data.serialNumber) { var sn = data.serialNumber; if(self.deviceErrorControls[sn]) { // Indicate that the device is connected self.deviceErrorControls[sn].updateConnectionStatus(true); } } }; var deviceErrorEventListener = function(data) { // console.warn('Device Error', data); try { self.emit('FRAMEWORK_HANDLED_DEVICE_ERROR', data); if(data.deviceInfo) { var sn = data.deviceInfo.serialNumber; if(self.deviceErrorControls[sn]) { // Expand on & compile the error message data var compiledMessage = self.deviceErrorCompiledTemplate( extrapolateDeviceErrorData(data) ); // Display the error message self.deviceErrorControls[sn].saveMessage(compiledMessage); } } } catch(err) { console.error('Error Handling Error', err); } }; var deviceReconnectingEventListener = function(data) { // console.warn('Device Reconnecting', data); try { if(data.serialNumber) { var sn = data.serialNumber; if(self.deviceErrorControls[sn]) { // Flash the device error badge // console.log('Flashing badge'); self.deviceErrorControls[sn].flashBadge(); } } } catch(err) { console.error('Error Handling Reconnecting message', err); } }; var deviceAttributesChangedEventListener = function(data) { try { console.log('in DEVICE_ATTRIBUTES_CHANGED event', data); if(data.serialNumber) { var sn = data.serialNumber; if(self.deviceErrorControls[sn]) { // Update the displayed device name var newName = data.DEVICE_NAME_DEFAULT; self.deviceErrorControls[sn].setDeviceName(newName); // Update the displayed firmware version var newFirmwareVersion = data.FIRMWARE_VERSION; self.deviceErrorControls[sn].setDeviceFirmwareVersion( newFirmwareVersion ); } } // Relay-information to the device_updater_service var device_updater_service = TASK_LOADER.tasks.device_updater_service; var deviceUpdaterService = device_updater_service.deviceUpdaterService; var newData = self.activeDevices.map(function(dev) { return dev.savedAttributes; }); deviceUpdaterService.updatedDeviceList(newData); } catch(err) { console.error('Error Handling Device Attributes changed message', err); } }; var attachDeviceStatusListeners = function() { self.activeDevices.forEach(function(device) { device.on('DEVICE_DISCONNECTED', deviceDisconnectedEventListener); device.on('DEVICE_RECONNECTED', deviceReconnectedEventListener); device.on('DEVICE_ERROR', deviceErrorEventListener); device.on('DEVICE_RECONNECTING', deviceReconnectingEventListener); device.on('DEVICE_ATTRIBUTES_CHANGED', deviceAttributesChangedEventListener); device.on('DEVICE_RELEASED', deviceReleasedEventListener); device.on('DEVICE_ACQUIRED', deviceAcquiredEventListener); // Attach to the "clear errors" button handlers var sn = device.savedAttributes.serialNumber; self.deviceErrorControls[sn].elements.clearErrorsButton.on( 'click', self.deviceErrorControls[sn].clearMessages ); // Attach to the "copy errors" button handlers self.deviceErrorControls[sn].elements.copyMessagesButtonSelector.on( 'click', self.deviceErrorControls[sn].copyErrors ); }); }; var detachDeviceStatusListeners = function() { self.activeDevices.forEach(function(device) { device.removeListener('DEVICE_DISCONNECTED', deviceDisconnectedEventListener); device.removeListener('DEVICE_RECONNECTED', deviceReconnectedEventListener); device.removeListener('DEVICE_ERROR', deviceErrorEventListener); device.removeListener('DEVICE_RECONNECTING', deviceReconnectingEventListener); device.removeListener('DEVICE_ATTRIBUTES_CHANGED', deviceAttributesChangedEventListener); device.removeListener('DEVICE_RELEASED', deviceReleasedEventListener); device.removeListener('DEVICE_ACQUIRED', deviceAcquiredEventListener); // Turn off "clear errors" button click handlers var sn = device.savedAttributes.serialNumber; self.deviceErrorControls[sn].elements.copyMessagesButtonSelector.off( 'click' ); self.deviceErrorControls[sn].elements.clearErrorsButton.off( 'click' ); }); }; this._SetActiveDevices = function(newActiveDevices) { activeDevices = newActiveDevices; self.activeDevices = newActiveDevices; // Initialize a new error-log self.deviceErrorLog = {}; newActiveDevices.forEach(function(device) { var sn = device.savedAttributes.serialNumber; var controls = buildDeviceErrorControls(sn); // Initialize the error log var messages = []; if(!device.savedAttributes.isConnected) { messages.push('Device Not Connected'); } self.deviceErrorLog[sn] = { 'messages': messages, 'isConnected': device.savedAttributes.isConnected, }; }); // Attach Device Listeners attachDeviceStatusListeners(); }; var _SetActiveDevices = this._SetActiveDevices; this.getActiveDevices = function() { return self.activeDevices; }; var getActiveDevices = this.getActiveDevices; this._SetActiveDevice = function(newActiveDevice) { activeDevice = newActiveDevice; self.activeDevice = newActiveDevice; }; var _SetActiveDevice = this._SetActiveDevice; this.getActiveDevice = function() { return self.activeDevice; }; var getActiveDevice = this.getActiveDevice; var updateDisplayedDeviceErrors = function() { var serialNumbers = Object.keys(self.deviceErrorLog); serialNumbers.forEach(function(serialNumber) { }); }; this.reportDeviceError = function(message, serialNumber) { if(typeof(serialNumber) === 'undefined') { var activeDevice = getActiveDevice(); serialNumber = activeDevice.savedAttributes.serialNumber; } if(self.deviceErrorLog[serialNumber]) { self.deviceErrorLog[serialNumber].push(message); } else { var errStr = [ '<h3>Reporting Device Error:</h3>', '<ul>', '<li>Serial Number: ' + JSON.stringify(serialNumber) + '</li>', '<li>Message: ' + JSON.stringify(message) + '</li>', '</ul>', ].join(''); console.warn( 'Showing Error (device sn not valid)', message, serialNumber ); showAlert(errStr); } }; this.reportError = function(data) { try { // determine where the error should be displayed if(data.message) { // If the data object has a serial number then it is a device error if(data.serialNumber) { self.reportDeviceError(data.message, data.serialNumber); } else { console.warn('Showing Error', data, data.message); showAlert(data.message); } } else { console.warn('Showing Error', data); var errorStr = '<h3>Program Error:</h3><pre>'; errorStr += JSON.stringify(data, null, '2'); errorStr += '</pre>'; showAlert(errorStr); } } catch(err) { console.error('Error reporting error', err, err.stack); showCriticalAlert('Error reporting error (presenter_framework.js)'); } }; /** * Set the callback that should be called for an event. * * Indicate which function (callback) should be called when the framework * encounters each event. Note that previous event listeners for that event * will be cleared by calling this. * * @param {String} name The name of the event to register a callback for. * @param {function} listener The function to call when that event is * encountered. Should take a single argument: an object whose * attributes are parameters supplied to the event. **/ this.on = function (name, listener) { if (!eventListener.has(name)) { self.fire( 'onLoadError', [ 'Config binding missing direction' ], function (shouldContinue) { self.runLoop = shouldContinue; } ); return; } eventListener.set(name, listener); }; var on = this.on; /** * Force-cause an event to occur through the framework. * * @param {String} name The name of the event to fire. * @param {Object} params Object whose attributes should be used as * parameters for the event. * @param {function} onErr Function to call if an error was encountered * while running event listeneres. Optional. * @param {function} onSuccess Function to call after the event listeners * finish running. Optional. **/ this.fire = function (name, params, onErr, onSuccess) { var noop = function () {}; if (!params) params = []; if (!onSuccess) onSuccess = noop; if (!onErr) onErr = noop; if (!eventListener.has(name)) { onSuccess(); return; } var isValidCall = true; if (self.moduleName !== self.getActiveTabID()) { console.error('Should Skip Call',name, self.moduleName, self.getActiveTabID()); self.allowModuleExecution = false; } var listener = eventListener.get(name); var isValidListener = false; if(listener !== null) { isValidListener = true; } if (isValidCall && isValidListener) { var passParams = []; passParams.push(self); passParams.push.apply(passParams, params); passParams.push(onErr); passParams.push(onSuccess); try{ if(self.flags.debug_runtime) { // console.info('executing module function', name); } listener.apply(null, passParams); } catch (err) { if(self.flags.debug_startup) { console.error('error executing module function', name); } console.error( 'Error firing: '+name, 'moduleData: ', self.moduleData, 'typeof sdModule: ', typeof(sdModule), 'typeof sdFramework: ', typeof(sdFramework), 'frameworkActive', self.frameworkActive, ' Error caught is: ',err.name, 'message: ',err.message,err.stack); try{ var isHandled = false; if (err.name === 'Driver Operation Error') { isHandled = self.manageLJMError(err.message); } onErr(err); } catch (newError) { // Not an LJM error... showCriticalAlert( 'Error Firing: '+name+ '<br>--Error Type: '+err.name+ '<br>--Error Message: '+err.message); onErr(err); } } } else { onSuccess(); } }; var fire = this.fire; /** * Function deletes various 'window.' objects that need to be removed in * order for module to behave properly when switching tabs. * @param {Array} moduleLibraries Array of string "window.xxx" calls that * need to be deleted, (delete window.xxx) when a module gets unloaded. */ this.unloadModuleLibraries = function(moduleLibraries) { if(moduleLibraries !== undefined) { moduleLibraries.forEach(function(element, index, array){ var delStr = 'delete ' + element; try{ eval(delStr); } catch(err) { console.error('presenter_framework Error Deleting Element',element); } }); } else { // console.warn('presenter_framework, "third_party_code_unload" undefined'); } }; var unloadModuleLibraries = this.unloadModuleLibraries; this.getExitFuncs = function(curState) { var exitPath = q.defer(); var exitErrHandle = function() { var exitDeferred = q.defer(); exitDeferred.resolve(); return exitDeferred.promise; }; if(curState === 'onModuleLoaded') { self.qExecOnUnloadModule() .then(exitPath.resolve,exitPath.reject); } else if(curState === 'onDeviceSelected') { self.qExecOnCloseDevice() .then(self.qExecOnUnloadModule,self.qExecOnUnloadModule) .then(exitPath.resolve,exitPath.reject); } else if(curState === 'onLoadError') { exitPath.reject(); } else { self.qExecOnCloseDevice() .then(self.qExecOnUnloadModule,self.qExecOnUnloadModule) .then(exitPath.resolve,exitPath.reject); } return exitPath.promise; }; var getExitFuncs = this.getExitFuncs; this.convertBindingsToDict = function() { return self.moduleTemplateBindings; }; var convertBindingsToDict = this.convertBindingsToDict; this.qHideUserTemplate = function() { var innerDeferred = q.defer(); self.jquery.fadeOut( DEVICE_VIEW_TARGET, FADE_DURATION, function(){ self.jquery.fadeIn( CONFIGURING_DEVICE_TARGET, FADE_DURATION, innerDeferred.resolve ); } ); return innerDeferred.promise; }; var qHideUserTemplate = this.qHideUserTemplate; this.qShowUserTemplate = function() { var innerDeferred = q.defer(); self.jquery.fadeOut( CONFIGURING_DEVICE_TARGET, FADE_DURATION, function(){ self.jquery.fadeIn( DEVICE_VIEW_TARGET, FADE_DURATION, innerDeferred.resolve ); } ); return innerDeferred.promise; }; var qShowUserTemplate = this.qShowUserTemplate; this.saveModuleStartupDataReference = function(newStartupData) { var defered = q.defer(); self.saveStartupDataReference(newStartupData); defered.resolve(); return defered.promise; }; this.resetModuleStartupData = function() { var defered = q.defer(); var moduleName = self.currentModuleName; if(self.DEBUG_STARTUP_DATA) { console.info('presenter_framework: resetModuleStartupData'); } module_manager.getModulesList() .then(function(moduleSections) { var humanName = ''; var i,j; var moduleSectionKeys = Object.keys(moduleSections); for(i = 0; i < moduleSectionKeys.length; i++) { var moduleSectionKey = moduleSectionKeys[i]; var moduleSection = moduleSections[moduleSectionKey]; for(j = 0; j < moduleSection.length; j++) { var moduleInfo = moduleSection[j]; if(moduleInfo.name === moduleName) { humanName = moduleInfo.humanName; break; } } } if(humanName) { showInfoMessage('Resetting persistent data for: ' + humanName); } else { showAlert('Resetting persistent data for: ' + moduleName); } }); module_manager.revertModuleStartupData(moduleName) .then(module_manager.getModuleStartupData) .then(self.saveModuleStartupDataReference) .then(defered.resolve, defered.reject); return defered.promise; }; this.saveModuleStartupData = function(callerInfo) { var defered = q.defer(); var moduleName = self.currentModuleName; var dataToSave; var isDataValid = self.isStartupDataValid; var saveReasons = []; var saveData = false; try { if(isDataValid) { dataToSave = JSON.parse(JSON.stringify(self.startupData)); saveData = true; saveReasons.push('Data is valid'); } } catch(err) { console.error('presenter_framework: Failed to save moduleStartupData'); saveReasons.push('Error while parsing data'); } try { if(saveData) { var keys = Object.keys(dataToSave); if(keys.length > 0) { saveReasons.push('Data has keys'); saveData = true; } else { saveReasons.push('Data does not have any keys'); saveData = false; } } } catch(err) { saveData = false; } if(saveData) { if(self.DEBUG_STARTUP_DATA) { console.info( 'presenter_framework: saving startupData:', callerInfo, moduleName, saveReasons ); } var innerSaveStartupData = function() { return module_manager.saveModuleStartupData( moduleName, dataToSave ); }; var reportSavedStartupData = function() { var defered = q.defer(); defered.resolve(); return defered.promise; }; self.qExecVerifyStartupData('saveStartupData') .then(innerSaveStartupData) .then(reportSavedStartupData) .then(defered.resolve, defered.reject); } else { if(self.DEBUG_STARTUP_DATA) { console.info( 'presenter_framework: not saving startupData', callerInfo, moduleName, saveReasons ); } defered.resolve(); } return defered.promise; }; this.qExecVerifyStartupData = function(callerData) { var innerDeferred = q.defer(); if(self.DEBUG_STARTUP_DATA) { console.info( 'presenter_framework: in verifyStartupData', callerData, self.currentModuleName ); } var finishWithError = function(errData) { console.warn('presenter_framework: verifyStartupData, finishWithError', errData); self.resetModuleStartupData() .then(innerDeferred.resolve, innerDeferred.reject); }; var finishSuccessfully = function() { innerDeferred.resolve(); }; if(self.allowModuleExecution) { self.fire( 'verifyStartupData', [self.startupData], finishWithError, finishSuccessfully ); } else { console.log('allowModuleExecution == false', self.allowModuleExecution); self.getExitFuncs('onModuleLoaded') .then(innerDeferred.reject,innerDeferred.reject); } return innerDeferred.promise; }; this.initializeStartupData = function() { var defered = q.defer(); var moduleName = self.currentModuleName; var executeVerifyStartupData = function() { return self.qExecVerifyStartupData('initializeStartupData'); }; module_manager.getModuleStartupData(moduleName) .then(self.saveModuleStartupDataReference) .then(executeVerifyStartupData) .then(defered.resolve, defered.reject); return defered.promise; }; this.qExecOnModuleLoaded = function() { var innerDeferred = q.defer(); if(self.allowModuleExecution) { //Save module info if not already defined // if(self.moduleInfoObj === undefined) { // self.moduleInfoObj = LOADED_MODULE_INFO_OBJECT; // moduleInfoObj = LOADED_MODULE_INFO_OBJECT; // } //Fire onModuleLoaded function self.fire( 'onModuleLoaded', [], innerDeferred.reject, function() { self.isModuleLoaded = true; innerDeferred.resolve(); } ); } else { self.getExitFuncs('onModuleLoaded') .then(innerDeferred.reject,innerDeferred.reject); } return innerDeferred.promise; }; var qExecOnModuleLoaded = this.qExecOnModuleLoaded; this.qExecOnDeviceSelected = function() { var innerDeferred = q.defer(); if(self.allowModuleExecution) { self.fire( 'onDeviceSelected', [self.smartGetSelectedDevices()], innerDeferred.reject, function() { self.isDeviceOpen = true; innerDeferred.resolve(); } ); } else { self.getExitFuncs('onDeviceSelected') .then(innerDeferred.reject,innerDeferred.reject); } return innerDeferred.promise; }; var qExecOnDeviceSelected = this.qExecOnDeviceSelected; this.qExecOnDeviceConfigured = function(data) { var innerDeferred = q.defer(); if(self.allowModuleExecution) { self.fire( 'onDeviceConfigured', [self.smartGetSelectedDevices(), data], innerDeferred.reject, innerDeferred.resolve ); } else { self.getExitFuncs('onDeviceConfigured') .then(innerDeferred.reject,innerDeferred.reject); } return innerDeferred.promise; }; var qExecOnDeviceConfigured = this.qExecOnDeviceConfigured; this.qExecOnTemplateDisplayed = function() { var innerDeferred = q.defer(); if(self.allowModuleExecution) { var rejectFunc = function(data) { innerDeferred.reject(data); }; var resolveFunc = function(data) { KEYBOARD_EVENT_HANDLER.initInputListeners(); innerDeferred.resolve(data); }; try{ self.fire( 'onTemplateDisplayed', [], rejectFunc, resolveFunc ); } catch (err) { if(err.name === 'SyntaxError') { console.error('Syntax Error captured'); } console.error('Error caught in qExecOnTemplateDisplayed',err); } } else { self.getExitFuncs('onTemplateDisplayed') .then(innerDeferred.reject,innerDeferred.reject); } return innerDeferred.promise; }; var qExecOnTemplateDisplayed = this.qExecOnTemplateDisplayed; this.qExecOnTemplateLoaded = function() { var innerDeferred = q.defer(); if(self.allowModuleExecution) { var rejectFunc = function(data) { innerDeferred.reject(data); }; var resolveFunc = function(data) { innerDeferred.resolve(data); }; try{ self.fire( 'onTemplateLoaded', [], rejectFunc, resolveFunc ); } catch (err) { if(err.name === 'SyntaxError') { console.error('Syntax Error captured'); } console.error('Error caught in qExecOnTemplateLoaded',err); } } else { self.getExitFuncs('onTemplateLoaded') .then(innerDeferred.reject,innerDeferred.reject); } return innerDeferred.promise; }; var qExecOnTemplateLoaded = this.qExecOnTemplateLoaded; this.qExecOnCloseDevice = function() { var device = self.smartGetSelectedDevices(); // Detach device event emitters detachDeviceStatusListeners(); var innerDeferred = q.defer(); var finishSuccess = function(data) { var continueExec = function() { innerDeferred.resolve(data); }; self.saveModuleStartupData('qExecOnCloseDevice-suc') .then(continueExec, continueExec); }; var finishError = function(data) { var continueExec = function() { innerDeferred.reject(data); }; self.saveModuleStartupData('qExecOnCloseDevice-err') .then(continueExec, continueExec); }; if(self.isDeviceOpen) { if(self.allowModuleExecution) { self.fire( 'onCloseDevice', [device], finishError, finishSuccess ); } else { var finishExecution = function() { self.isDeviceOpen = false; self.qExecOnUnloadModule() .then(finishError,finishError); }; self.fire( 'onCloseDevice', [device], finishExecution, finishExecution ); } } else { console.error('in qExecOnCloseDevice, device is not open'); innerDeferred.reject(); } return innerDeferred.promise; }; var qExecOnCloseDevice = this.qExecOnCloseDevice; this.qExecOnLoadError = function(err) { var innerDeferred = q.defer(); if(self.allowModuleExecution) { self.fire( 'onLoadError', [ [ err ], function (shouldContinue) { self.runLoop = shouldContinue; innerDeferred.resolve(); } ] ); } else { self.getExitFuncs('onLoadError') .then(innerDeferred.reject,innerDeferred.reject); } return innerDeferred.promise; }; var qExecOnLoadError = this.qExecOnLoadError; this.qExecOnUnloadModule = function() { var innerDeferred = q.defer(); if(self.isModuleLoaded) { var finishError = function(data) { var continueExec = function() { innerDeferred.reject(data); }; self.saveModuleStartupData('qExecOnUnloadModule-err') .then(continueExec, continueExec); }; //Halt the daq loop self.stopLoop(); //clean up module's third party libraries self.unloadModuleLibraries(self.moduleInfoObj.third_party_code_unload); // Ensure the pgm exit listener has been removed. removeProgramExitListener(); // clear any "ModuleWindowResizeListeners" window resize listeners // clearModuleWindowResizeListners(); //If LJM's debug log was enabled, disable it if(self.ljmDriverLogEnabled) { console.info('disabling LJM-log'); console.info('File:',self.ljmDriver.readLibrarySync('LJM_DEBUG_LOG_FILE')); self.ljmDriver.writeLibrarySync('LJM_DEBUG_LOG_MODE',1); self.ljmDriver.writeLibrarySync('LJM_DEBUG_LOG_LEVEL',10); self.ljmDriverLogEnabled = false; } //Inform the module that it has been unloaded. self.fire( 'onUnloadModule', [], finishError, function() { // Detach Device Listeners detachDeviceStatusListeners(); self.isModuleLoaded = false; self.saveModuleStartupData('qExecOnUnloadModule-suc') .then(innerDeferred.resolve, innerDeferred.resolve); } ); } else { innerDeferred.resolve(); } return innerDeferred.promise; }; var qExecOnUnloadModule = this.qExecOnUnloadModule; this.qRenderModuleTemplate = function() { var innerDeferred = q.defer(); self.setDeviceView( self.userViewFile, self.moduleJsonFiles, self.convertBindingsToDict() ) .then(innerDeferred.resolve, innerDeferred.reject); return innerDeferred.promise; }; var qRenderModuleTemplate = this.qRenderModuleTemplate; var innerUpdateActiveDevice = function(data) { var defered = q.defer(); device_controller.getSelectedDevice() .then(function(activeDevice) { data.activeDevice = activeDevice; defered.resolve(data); }); return defered.promise; }; var innerGetDeviceListing = function(data) { var defered = q.defer(); var filters; if(self.moduleData.data.supportedDevices) { filters = self.moduleData.data.supportedDevices; } device_controller.getDeviceListing(filters) .then(function(deviceListing) { data.deviceListing = deviceListing; defered.resolve(data); }); return defered.promise; }; var innerGetDeviceObjects = function(data) { var defered = q.defer(); var filters; if(self.moduleData.data.supportedDevices) { filters = self.moduleData.data.supportedDevices; } device_controller.getDevices(filters) .then(function(devices) { data.activeDevices = devices; defered.resolve(data); }); return defered.promise; }; this.getDeviceSelectorClass = function() { var key = '.device-selection-radio'; if(self.frameworkType === 'singleDevice') { key = '.device-selection-radio'; } else if(self.frameworkType === 'multipleDevices') { key = '.device-selection-checkbox'; } return key; }; var updateSelectedDeviceList = function(data) { var defered = q.defer(); // Save device references self._SetActiveDevices(data.activeDevices); self._SetActiveDevice(data.activeDevice); self._SetSelectedDevices(data.activeDevices); var devs = self.jquery.get(self.getDeviceSelectorClass()); // console.log('What things are chedked?'); // for(var i = 0; i < devs.length; i++) { // console.log('sn',devs.eq(i).prop('value'),devs.eq(i).prop('checked')); // } var foundActiveDevice = false; var activeDevices = self.getSelectedDevices(); activeDevices.forEach(function(activeDev) { var activeSN = activeDev.savedAttributes.serialNumber; for(var i = 0; i < devs.length; i++) { if (activeSN == devs.eq(i).val()) { devs.eq(i).prop('checked', true); foundActiveDevice = true; } } }); // Sloppy code warning... function finishFunc() { var selectorKey = '.device-selector-table-device-selector .radio'; if(self.frameworkType === 'multipleDevices') { selectorKey = '.device-selector-table-device-selector .checkbox'; } var selectors = self.jquery.get(selectorKey); selectors.show(); defered.resolve(); } function repeatCallOnSuccess(data) { console.log('Repeating Execution of getting active device info...'); self.qUpdateActiveDevice() .then(finishFunc,finishFunc); } function onSuccess(data) { finishFunc(); } function onError(err) { finishFunc(); } // End of sloppy code warning... if(!foundActiveDevice) { // Did not find an active device if(self.frameworkType === 'multipleDevices') { console.warn('Not sure what to do... presenter_framework.js - updateSelectedDeviceList'); onSuccess(); } else { var activeDev = devs.eq(0); // Marking first found device var activeDevSN = 0; try { activeDevSN = activeDev.prop('value'); } catch(err) { activeDevSN = 0; console.error('ERROR converting SN string to a number', err); } // console.log('SN:', activeDevSN, 'snType', typeof(activeDevSN)); // Populate radio box with buble. activeDev.prop('checked', true); // Communicate with device_manager...? // device_controller.selectDevice(activeDevSN); getSmartSaveSelectedDevices(activeDevSN)().then(repeatCallOnSuccess,onError); } } else { onSuccess(); } return defered.promise; }; this.qUpdateActiveDevice = function() { var defered = q.defer(); var data = { 'activeDevice': undefined, // is populated by func "innerUpdateActiveDevice". 'deviceListing': undefined, // is populated by func "innerGetDeviceListing". 'activeDevices': undefined, // is populated by func "innerGetDeviceObjects". }; innerUpdateActiveDevice(data) .then(innerGetDeviceListing) .then(innerGetDeviceObjects) .then(updateSelectedDeviceList) // Computes based on queried for objects. .then(defered.resolve, defered.reject); return defered.promise; }; var qUpdateActiveDevice = this.qUpdateActiveDevice; /** * Set how frequently the framework should read from the device. * * @param {int} newRefreshRate The number of milliseconds between updates. **/ this.setRefreshRate = function (newRefreshRate) { self.refreshRate = newRefreshRate; }; var setRefreshRate = this.setRefreshRate; /** * Indicate which HTML controls should cause device configured to fire. * * Indicate which HTML controls (not bound through putConfigBinding) that * should cause a device configured event to be fired when they have an * event within the HTML view. This could, for example, be a button to * write values to a device. * * @param {Array} newConfigControls An array of Object where each element * has an event attribute with the name of the event to listen for * on the HTML element and a selector attribute which should be a * jQuery selector for the HTML elements to bind the event listener * to. **/ this.setConfigControls = function (newConfigControls) { self.configControls = newConfigControls; }; var setConfigControls = this.setConfigControls; this.qEstablishWriteBindings = function() { var innerDeferred = q.defer(); self.establishWriteBindings(self.writeBindings); innerDeferred.resolve(); return innerDeferred.promise; }; var qEstablishWriteBindings = this.qEstablishWriteBindings; this.establishWriteBindings = function(bindings) { bindings.forEach(function(binding){ self.establishWriteBinding(binding); }); }; var establishWriteBindings = this.establishWriteBindings; this.establishWriteBinding = function(binding) { // writeBindings.set(newBinding.template, newBinding); var jquerySelector = '#' + binding.template; jquery.unbind(jquerySelector,binding.event); jquery.bind( jquerySelector, binding.event, function (eventData) { self._writeToDevice(binding, eventData); } ); }; var establishWriteBinding = this.establishWriteBinding; /** * Register a new configuration binding. * * Register a new configuration binding that either cuases an HTML element * to act as a (frequently updated) display for the value of a register * or as an HTML element that allows the user to write the value of * a device register. This device binding info object should have * attributes: * * <ul> * <li>{string} class: Description of what type of binding this is. Not * used in this first release of this framework.</li> * <li>{string} template: The ID of the HTML element to bind to. For * example: ain-0-display or ain-#(0:1)-display.</li> * <li>{string} binding: The name of the device register to bind to. For * exmaple: AIN0 or AIN#(0:1).</li> * <li>{string} direction: Either "read" for displaying a the value of a * device register or "write" for having an HTML element set the * value of a device register. May also be "hybrid" which will * first read the current value of a register, display that, and * then update the value of that register on subsequent updates * from within the view.</li> * <li>{string} event: The name of the event to bind to. Only required if * write or hybrid. For example, "change" would cause the value to * be written to the device each time an input box value is * changed.</li> * </ul> * * Note that template and binding can contain LJMMM strings. If they do, * they will automagically be expanded and bound individually. So, template * of analog-#(0:1)-display and binding of AIN#(0:1) will bind * analog-0-display to AIN0 and analog-1-display to AIN1. * * @param {Object} newBinding The binding information object (as described * above) that should be registered. **/ this.putConfigBinding = function (newBinding) { var onErrorHandle = function (shouldContinue) { self.runLoop = shouldContinue; }; // ------------------------------------ // Begin checking for potential binding object related errors // if bindingClass isn't defined execute onLoadError if (newBinding.bindingClass === undefined) { self.fire( 'onLoadError', [ 'Config binding missing bindingClass' ], onErrorHandle ); return; } // if template isn't defined execute onLoadError if (newBinding.template === undefined) { self.fire( 'onLoadError', [ 'Config binding missing template' ], onErrorHandle ); return; } // if binding isn't defined execute onLoadError if (newBinding.binding === undefined) { self.fire( 'onLoadError', [ 'Config binding missing binding' ], onErrorHandle ); return; } // if direction isn't defined execute onLoadError if (newBinding.direction === undefined) { self.fire( 'onLoadError', [ 'Config binding missing direction' ], onErrorHandle ); return; } // if the displayType isn't defined then use the standard one. if (newBinding.displayType === undefined) { newBinding.displayType = 'standard'; } // if iterationDelay isn't defined define it as 0 if (newBinding.iterationDelay === undefined) { newBinding.iterationDelay = 0; } // initially define the currentDelay as the desired iterationDelay if (newBinding.initialDelay === undefined) { newBinding.currentDelay = newBinding.iterationDelay; } else { newBinding.currentDelay = newBinding.initialDelay; } // if an output format isn't defined define the default if (newBinding.format === undefined) { newBinding.format = '%.4f'; } // if a customFormatFunc isn't defined define a dummy function // just incase if (newBinding.customFormatFunc === undefined) { newBinding.customFormatFunc = function(rawReading){ console.info('Here, val:',rawReading); var retStr = "'customFormatFunc' NotDefined"; return retStr; }; } if (newBinding.execCallback === undefined) { newBinding.execCallback = false; } // if there is supposed to be a callback but it isn't defined define one var isCallback = newBinding.execCallback === true; if(isCallback && (newBinding.callback === undefined)) { newBinding.callback = function(binding, data, onSuccess){ console.info('callback, binding:',binding,', data: ', data); onSuccess(); }; } // if adding a write binding and the desired event is undefined execute // onLoadError var isWrite = newBinding.direction === 'write'; if (isWrite && newBinding.event === undefined) { self.fire( 'onLoadError', [ 'Config binding missing direction' ], onErrorHandle ); return; } if(newBinding.dataKey === undefined) { newBinding.dataKey = 'res'; } // Finished checking for potential binding object related errors // ------------------------------------ // BEGIN: // Code for recursively adding configBindings: var expandedBindings = expandBindingInfo(newBinding); var numBindings = expandedBindings.length; if (numBindings > 1) { for (var i=0; i<numBindings; i++) putConfigBinding(expandedBindings[i]); return; } // END: // Code for adding individual bindings to the moduleTemplateBindings, // readBindings, writeBindings, and bindings objects try{ if(self.moduleTemplateBindings[newBinding.bindingClass] === undefined) { self.moduleTemplateBindings[newBinding.bindingClass] = []; } self.moduleTemplateBindings[newBinding.bindingClass].push(newBinding); } catch (err) { console.error('Error in presenter_framework.js, putConfigBinding',err); } bindings.set(newBinding.template, newBinding); var jquerySelector = '#' + newBinding.template; if (newBinding.direction === 'read') { readBindings.set(newBinding.template, newBinding); } else if (newBinding.direction === 'write') { writeBindings.set(newBinding.template, newBinding); self.establishWriteBinding(newBinding); } else { self.fire( 'onLoadError', [ 'Config binding has invalid direction' ], onErrorHandle ); } }; var putConfigBinding = this.putConfigBinding; this.putConfigBindings = function(bindings) { var numBindings = bindings.length; for(var i = 0; i < numBindings; i++) { self.putConfigBinding(bindings[i]); } }; var putConfigBindings = this.putConfigBindings; this.putSmartBinding = function(newSmartBinding) { var onErrorHandle = function(bundle) { console.error('in this.putSmartBinding, onErrorHandle', bundle); }; // if bindingName isn't defined execute onLoadError if (newSmartBinding.bindingName === undefined) { self.fire( 'onLoadError', [ 'SmartBinding binding missing bindingName' ], onErrorHandle ); return; } // if smartName isn't defined execute onLoadError if (newSmartBinding.smartName === undefined) { self.fire( 'onLoadError', [ 'SmartBinding binding missing smartName' ], onErrorHandle ); return; } // if dataKey isn't defined, define it as 'res' if(newSmartBinding.dataKey === undefined) { newSmartBinding.dataKey = 'res'; } var bindingName = newSmartBinding.bindingName; var smartName = newSmartBinding.smartName; var binding = {}; var setupBinding = {}; var isValid = false; // Add generic info to binding binding.bindingClass = bindingName; binding.template = bindingName; // Add generic info to setupBinding setupBinding.bindingClass = bindingName; if(smartName === 'clickHandler') { // Add information to binding object binding.binding = bindingName+CALLBACK_STRING_CONST; binding.direction = 'write'; binding.event = 'click'; binding.execCallback = true; binding.callback = newSmartBinding.callback; // Save binding to framework self.putConfigBinding(binding); isValid = true; } else if (smartName === 'readRegister') { // Add information to binding object binding.binding = bindingName; binding.direction = 'read'; binding.format = newSmartBinding.format; binding.customFormatFunc = newSmartBinding.customFormatFunc; binding.iterationDelay = newSmartBinding.iterationDelay; binding.initialDelay = newSmartBinding.initialDelay; binding.displayType = newSmartBinding.displayType; binding.dataKey = newSmartBinding.dataKey; if(typeof(newSmartBinding.periodicCallback) === 'function') { binding.execCallback = true; } binding.callback = newSmartBinding.periodicCallback; // Add information to setupBinding object setupBinding.binding = bindingName; setupBinding.direction = 'read'; setupBinding.callback = newSmartBinding.configCallback; setupBinding.format = newSmartBinding.format; setupBinding.formatFunc = newSmartBinding.customFormatFunc; setupBinding.dataKey = newSmartBinding.dataKey; if(typeof(newSmartBinding.configCallback) === 'function') { setupBinding.execCallback = true; } setupBinding.callback = newSmartBinding.configCallback; // Save binding to framework self.putConfigBinding(binding); self.putSetupBinding(setupBinding); isValid = true; } else if (smartName === 'setupOnlyRegister') { // Add information to setupBinding object setupBinding.binding = bindingName; setupBinding.direction = 'read'; setupBinding.callback = newSmartBinding.configCallback; setupBinding.format = newSmartBinding.format; setupBinding.formatFunc = newSmartBinding.customFormatFunc; setupBinding.dataKey = newSmartBinding.dataKey; if(typeof(newSmartBinding.configCallback) === 'function') { setupBinding.execCallback = true; } setupBinding.callback = newSmartBinding.configCallback; // Save binding to framework self.putSetupBinding(setupBinding); isValid = true; } else if (smartName === 'periodicFunction') { // Add information to binding object binding.binding = bindingName+CALLBACK_STRING_CONST; binding.direction = 'read'; binding.format = newSmartBinding.format; binding.customFormatFunc = newSmartBinding.customFormatFunc; binding.iterationDelay = newSmartBinding.iterationDelay; binding.initialDelay = newSmartBinding.initialDelay; if(typeof(newSmartBinding.periodicCallback) === 'function') { binding.execCallback = true; } binding.callback = newSmartBinding.periodicCallback; // Save binding to framework self.putConfigBinding(binding); isValid = true; } if(isValid) { self.smartBindings.set(newSmartBinding.bindingName, newSmartBinding); } }; var putSmartBinding = this.putSmartBinding; this.putSmartBindings = function(newSmartBindings) { newSmartBindings.forEach(function(newSmartBinding) { self.putSmartBinding(newSmartBinding); }); }; var putSmartBindings = this.putSmartBindings; this.qUpdateSmartBindings = function() { var deferred = q.defer(); self.smartBindings.forEach(function(smartBinding, key) { self.putSmartBinding(smartBinding); }); deferred.resolve(); return deferred.promise; }; this.printSmartBindings = function() { self.smartBindings.forEach(function(smartBinding, key) { console.log('Smart Binding Keys: ',key); }); }; this.deleteSmartBinding = function(bindingName) { if(self.smartBindings.has(bindingName)) { var info = self.smartBindings.get(bindingName); var deleteBinding = { 'direction': 'read', 'bindingClass': bindingName, 'template': bindingName, }; if(info.smartName === 'clickHandler') { deleteBinding.direction = 'write'; deleteBinding.event = 'click'; } self.deleteConfigBinding(deleteBinding); self.deleteSetupBinding(deleteBinding); self.smartBindings.delete(bindingName); } }; this.deleteSmartBindings = function(bindingNames) { bindingNames.forEach(self.deleteSmartBinding); }; this.deleteAllSmartBindings = function() { var names = []; self.smartBindings.forEach(function(smartBinding, key) { names[names.length] = smartBinding.smartName; }); self.deleteSmartBindings(names); self.smartBindings = dict({}); self.setupBindings = dict({}); } /** * Function to add a single binding that gets read once upon device * selection. * @param {[type]} binding [description] * @return {[type]} [description] */ this.putSetupBinding = function(newBinding) { var onErrorHandle = function (shouldContinue) { self.runLoop = shouldContinue; }; // Check for various required binding attributes & report onLoadErrors // if they dont exist if (newBinding.bindingClass === undefined) { self.fire( 'onLoadError', [ 'Config binding missing bindingClass' ], onErrorHandle ); return; } if (newBinding.binding === undefined) { self.fire( 'onLoadError', [ 'Config binding missing binding' ], onErrorHandle ); return; } if (newBinding.direction === undefined) { self.fire( 'onLoadError', [ 'Config binding missing direction' ], onErrorHandle ); return; } // if an output format isn't defined define the default if (newBinding.format === undefined) { newBinding.format = '%.4f'; } // if a customFormatFunc isn't defined define a dummy function if (newBinding.formatFunc === undefined) { newBinding.formatFunc = function(rawReading){ console.info('Here, val:',rawReading); var retStr = "'customFormatFunc' NotDefined"; return retStr; }; } if(newBinding.dataKey === undefined) { newBinding.dataKey = 'res'; } var isWrite = newBinding.direction === 'write'; if ( (isWrite) && (newBinding.defaultVal === undefined) ) { self.fire( 'onLoadError', [ 'Config binding missing defaultVal' ], onErrorHandle ); return; } if(newBinding.execCallback === undefined) { newBinding.execCallback = false; } if(newBinding.callback === undefined) { newBinding.callback = function(data, onSuccess) { console.info('SetupBinding requested a callback but is not defined'); onSuccess(); }; } var expandedBindings = expandSetupBindingInfo(newBinding); var numBindings = expandedBindings.length; if (numBindings > 1) { for (var i=0; i<numBindings; i++) putSetupBinding(expandedBindings[i]); return; } try{ if(self.moduleTemplateSetupBindings[newBinding.bindingClass] === undefined) { self.moduleTemplateSetupBindings[newBinding.bindingClass] = []; } self.moduleTemplateSetupBindings[newBinding.bindingClass].push(newBinding); } catch (err) { console.error('Error in presenter_framework.js, putSetupBinding', err); } self.setupBindings.set(newBinding.bindingClass, newBinding); if (newBinding.direction === 'read') { self.readSetupBindings.set(newBinding.bindingClass, newBinding); } else if (newBinding.direction === 'write') { self.writeSetupBindings.set(newBinding.bindingClass, newBinding); } else { self.fire( 'onLoadError', [ 'Config binding has invalid direction' ], onErrorHandle ); } }; var putSetupBinding = this.putSetupBinding; /** * Function to add multiple bindings that get read once upon device * selection. * @param {[type]} binding [description] * @return {[type]} [description] */ this.putSetupBindings = function(bindings) { bindings.forEach(function(binding){ self.putSetupBinding(binding); }); }; var putSetupBindings = this.putSetupBindings; this.deleteSetupBinding = function(setupBinding) { var name = setupBinding.bindingClass; if(self.setupBindings.has(name)) { var info = self.setupBindings.get(name); if(info.direction === 'read') { if(self.readSetupBindings.has(name)) { self.readSetupBindings.delete(name); } } else if(info.direction === 'write') { if(self.writeSetupBindings.has(name)) { self.writeSetupBindings.delete(name); } } self.setupBindings.delete(name); } }; this.deleteSetupBindings = function(setupBindings) { setupBindings.forEach(self.deleteSetupBinding); }; this.executeSetupBindings = function() { var deferred = q.defer(); var addresses = []; var directions = []; var numValues = []; var values = []; var bindingClasses = []; var rwManyData = { bindingClasses: bindingClasses, addresses: addresses, directions: directions, numValues: numValues, values: values }; // return this.rwMany(addresses, directions, numValues, values); var saveSetupBindings = function(setupInfo) { var innerDeferred = q.defer(); self.setupBindings.forEach(function(binding, index){ setupInfo.formats.push(binding.format); setupInfo.formatFuncs.push(binding.formatFunc); setupInfo.bindingClasses.push(binding.bindingClass); setupInfo.addresses.push(binding.binding); setupInfo.numValues.push(1); if ( binding.direction === 'read' ) { setupInfo.directions.push(0); setupInfo.values.push(-1); } else if ( binding.direction === 'write' ) { setupInfo.directions.push(1); setupInfo.values.push(setupInfo.defaultVal); } }); innerDeferred.resolve(setupInfo); return innerDeferred.promise; }; // Function for executing user-callback function executeCallback (binding, result) { var callbackDeferred = q.defer(); if(binding.execCallback) { binding.callback( { framework: self, module: self.module, device: self.getSelectedDevice(), binding: binding, value: result.result, result: result }, function() { callbackDeferred.resolve(); } ); } else { callbackDeferred.resolve(); } return callbackDeferred.promise; } // Function for saving successful write i/o attempts function createSuccessfulWriteFunc (ioDeferred, binding, results) { return function (value) { var result = { status: 'success', result: -1, formattedResult: '-1', address: binding.binding }; results.set(binding.bindingClass, result); executeCallback(binding,result) .then(ioDeferred.resolve,ioDeferred.resolve); }; } // Function for saving failed write i/o attempts function createFailedWriteFunc (ioDeferred, binding, results) { return function (error) { var result = { status: 'error', result: error, formattedResult: null, address: binding.binding }; results.set(binding.bindingClass, result); executeCallback(binding,result) .then(ioDeferred.resolve,ioDeferred.resolve); // ioDeferred.resolve(); }; } // Function for saving successful read i/o attempts function createSuccessfulReadFunc (ioDeferred, binding, results) { return function (value) { // console.log('Successful Read',value); value = value.val; var formattedValue = ''; var curFormat = binding.format; if(typeof(value) === 'number') { if(curFormat !== 'customFormat') { if(isNaN(value)) { formattedValue = value; } else { if(typeof(value) === 'number') { formattedValue = sprintf(curFormat, value); } else { console.warn('Replacing a non-value in createSuccessfulReadFunc', value, curFormat); formattedValue = 0; } } } else { formattedValue = binding.formatFunc({ value: value, address: binding.binding, binding: binding }); } } else { formattedValue = value; } var result = { status: 'success', result: value, formattedResult: formattedValue, address: binding.binding }; results.set(binding.bindingClass, result); executeCallback(binding,result) .then(ioDeferred.resolve,ioDeferred.resolve); // ioDeferred.resolve(); }; } // Function for saving failed read i/o attempts function createFailedReadFunc (ioDeferred, binding, results) { return function (error) { // console.log('Error on Read',error); var result = { status: 'error', result: error, formattedResult: null, address: binding.binding }; results.set(binding.bindingClass, result); executeCallback(binding,result) .then(ioDeferred.resolve,ioDeferred.resolve); // ioDeferred.resolve(); }; } // Function that creates future device I/O operations to be executed function createFutureDeviceIOOperation (binding, results) { return function() { //Create execution queue var innerDeferred = q.defer(); var device = self.getSelectedDevice(); //Create various read/write functions var successfulWriteFunc = createSuccessfulWriteFunc( innerDeferred, binding, results ); var failedWriteFunc = createFailedWriteFunc( innerDeferred, binding, results ); var successfulReadFunc = createSuccessfulReadFunc( innerDeferred, binding, results ); var failedReadFunc = createFailedReadFunc( innerDeferred, binding, results ); // console.log('Executing IO Operation', binding.binding); //Link various function calls based off read/write property if(binding.direction === 'write') { //Define write I/O procedure device.qWrite(binding.binding, binding.defaultVal) .then(successfulWriteFunc, failedWriteFunc); } else if (binding.direction === 'read') { //Define read I/O procedure device.sRead(binding.binding) .then(successfulReadFunc, failedReadFunc); } else { console.warn('invalid binding.direction', binding.direction); } //Return execution queue reference return innerDeferred.promise; }; } // Function that creates the IO execution queue function createDeviceIOExecutionQueue (bindings, results) { // Execution queue var bindingList = []; // Populating the execution queue bindings.forEach(function (binding, key) { bindingList.push(createFutureDeviceIOOperation( binding, results )); }); return bindingList; } // Function that executes the device setup commands function executeDeviceSetupQueue (bindings) { var deferred = q.defer(); var results = dict({}); var executionQueue = createDeviceIOExecutionQueue( bindings, results ); //Execute the created execution queue of device IO commands async.eachSeries( executionQueue, function (request, callback) { var successFunc = function() { // console.log('eachSeries Success') callback(); }; var errorFunc = function(err) { // console.log('eachSeries Err',err); callback(err); }; request().then(successFunc,errorFunc); }, function (err) { // console.log('eachSeries Callback',err); deferred.resolve(results); }); return deferred.promise; } var performDeviceWrites = function(setupInfo) { var innerDeferred = q.defer(); var device; var addresses = []; var directions = []; var numValues = []; var values = []; device = self.getSelectedDevice(); addresses = setupInfo.addresses; directions = setupInfo.directions; numValues = setupInfo.numValues; values = setupInfo.values; try{ device.rwMany( addresses, directions, numValues, values ).then( function(results) { var configResults = dict({}); if(results.length != self.setupBindings.size) { console.error('presenter_framework setupBindings ERROR!!'); console.error('resultsLength',results.length); console.error('setupBindings length', self.setupBindings.size); } else { var i = 0; self.setupBindings.forEach(function(binding, key){ configResults.set( key, { binding: binding, address: addresses[i], result: results[i] }); i += 1; }); } innerDeferred.resolve(configResults); }, function(err) { innerDeferred.reject(err); }); } catch(err) { console.error('performDeviceWrites err',err); innerDeferred.reject(err); } return innerDeferred.promise; }; // Save the setup information // Code for executing requests in a single rwMany request: // saveSetupBindings(rwManyData) // .then(performDeviceWrites,self.qExecOnLoadError) // Code for executing requests one at a time executeDeviceSetupQueue(self.setupBindings) .then(deferred.resolve,deferred.reject); return deferred.promise; }; this._writeToDevice = function (bindingInfo, eventData) { var jquerySelector = '#' + bindingInfo.template; var newVal = self.jquery.val(jquerySelector); var alertRegisterWrite = function () { var innerDeferred = q.defer(); self.fire( 'onRegisterWrite', [ bindingInfo, newVal ], innerDeferred.reject, innerDeferred.resolve ); return innerDeferred.promise; }; var performCallbacks = function(skipWrite) { var innerDeferred = q.defer(); var callbackString = CALLBACK_STRING_CONST; var baseStr = bindingInfo.binding; var searchIndex = baseStr.search(callbackString); if( searchIndex >= 0) { if((baseStr.length - searchIndex - callbackString.length) === 0) { if(bindingInfo.execCallback) { try { bindingInfo.callback( { framework: self, module: self.module, device: self.getSelectedDevice(), binding: bindingInfo, eventData: eventData, value: newVal, }, function(err) { innerDeferred.resolve(skipWrite, true); }); } catch (e) { self.reportSyntaxError( { 'location':'_writeToDevice.performCallbacks', data: {binding: bindingInfo, eventData: eventData} },e); innerDeferred.resolve(skipWrite, true); } return innerDeferred.promise; } else { innerDeferred.resolve(skipWrite, false); } } } else { if(bindingInfo.execCallback) { try { bindingInfo.callback( { framework: self, module: self.module, device: self.getSelectedDevice(), binding: bindingInfo, eventData: eventData, value: newVal, }, function() { innerDeferred.resolve(skipWrite, false); }); } catch (e) { self.reportSyntaxError( { 'location':'_writeToDevice.performCallbacks(2)', data: {binding: bindingInfo, eventData: eventData} },e); innerDeferred.resolve(skipWrite, false); } return innerDeferred.promise; } else { innerDeferred.resolve(skipWrite, false); } } return innerDeferred.promise; }; var writeToDevice = function (skipWrite, skip) { var innerDeferred = q.defer(); if(skip) { var device = self.getSelectedDevice(); var invalidString = '-invalid'; var baseStr = bindingInfo.binding; var searchIndex = baseStr.search(invalidString); if( searchIndex >= 0) { if((baseStr.length - searchIndex - invalidString.length) === 0) { innerDeferred.resolve(false); return innerDeferred.promise; } } if(typeof(skipWrite) === undefined) { device.write(bindingInfo.binding, newVal); innerDeferred.resolve(true); } else if(typeof(skipWrite) === "boolean") { if(skipWrite === false) { device.write(bindingInfo.binding, newVal); innerDeferred.resolve(true); } else { innerDeferred.resolve(false); } } else { innerDeferred.resolve(false); } } else { innerDeferred.resolve(false); } return innerDeferred.promise; }; var alertRegisterWritten = function (wasNotHandledExternally) { if(wasNotHandledExternally) { var innerDeferred = q.defer(); self.fire( 'onRegisterWritten', [ bindingInfo.binding, newVal ], innerDeferred.reject, innerDeferred.resolve ); return innerDeferred.promise; } }; var deferred = q.defer(); // Alert to module that a write is about to happen alertRegisterWrite() // Perform callback if necessary .then(performCallbacks, deferred.reject) // Perform un-handled device IO .then(writeToDevice, deferred.reject) // Notify module that the write has finished .then(alertRegisterWritten, deferred.reject) // Re-draw the window to prevent crazy-window issues .then(qRunRedraw, deferred.reject) .then(deferred.resolve, deferred.reject); return deferred.promise; }; /** * Delete a previously added configuration binding. * * @param {String} bindingName The name of the binding (the binding info * object's original "template" attribute) to delete. **/ this.deleteConfigBinding = function (binding) { var bindingName = binding.bindingClass; var expandedBindings = ljmmm_parse.expandLJMMMName(bindingName); var numBindings = expandedBindings.length; if (numBindings > 1) { for (var i=0; i<numBindings; i++) { deleteConfigBinding(expandedBindings[i]); } return; } if (!self.bindings.has(bindingName)) { self.fire( 'onLoadError', [ 'No binding for ' + bindingName ], function (shouldContinue) { self.runLoop = shouldContinue; } ); return; } var bindingInfo = self.bindings.get(bindingName); self.bindings.delete(bindingName); if (bindingInfo.direction === 'read') { self.readBindings.delete(bindingName); } else if (bindingInfo.direction === 'write') { self.writeBindings.delete(bindingName); var jquerySelector = '#' + bindingInfo.template; jquery.off(jquerySelector, bindingInfo.event); } else { self.fire( 'onLoadError', [ 'Config binding has invalid direction' ], function (shouldContinue) { self.runLoop = shouldContinue; } ); } }; var deleteConfigBinding = this.deleteConfigBinding; this.deleteConfigBindings = function(bindings) { bindings.forEach(function(binding){ self.deleteConfigBinding(binding); }); }; this.clearConfigBindings = function() { bindings = dict({}); readBindings = dict({}); writeBindings = dict({}); moduleTemplateBindings = {}; self.bindings = bindings; self.readBindings = readBindings; self.writeBindings = writeBindings; self.moduleTemplateBindings = moduleTemplateBindings; }; this.qClearConfigBindings = function() { var deferred = q.defer(); var clearBindings = true; if(typeof(DISABLE_AUTO_CLEAR_CONFIG_BINDINGS) === 'boolean') { if(DISABLE_AUTO_CLEAR_CONFIG_BINDINGS) { clearBindings = false; } } if(clearBindings) { self.clearConfigBindings(); } deferred.resolve(); return deferred.promise; }; var qClearConfigBindings = this.qClearConfigBindings; var deleteConfigBindings = this.deleteConfigBindings; /** * Render the HTML view to use for the current module. * * @param {str} templateLoc Path to the HTML template to use to render this * module's view. Will be rendered as a handlebars template. * @param {Array} jsonFiles String paths to the JSON files to use when * rendering this view. Will be provided to the template as an * attribute "json" on the rendering context. Namely, context.json will * be set to an object where the attribute is the name of the JSON file * and the value is the JSON loaded from that file. **/ this.setDeviceView = function (loc, jsonFiles, context) { var defered = q.defer(); if (jsonFiles === undefined) jsonFiles = []; if (context === undefined) context = {}; // Append the selected devices array to the context to allow templates // to adjust their displayed content based on what devices are available. context.devices = self.getSelectedDevices(); var onErr = function(data) { console.error('in this.setDeviceView, onErr', data); }; // console.log('context (analogInputs)', context); // console.log('moduleTemplateBindings:', self.moduleTemplateBindings); // Create an error handler var reportLoadError = function (details) { console.error('reporting load error', details); onErr({'msg': details}); self.fire( 'onLoadError', [ details ], function (shouldContinue) { self.runLoop = shouldContinue; } ); }; // Load the supporting JSON files for use in the template var jsonTemplateVals = {}; var loadJSONFiles = function () { var innerDefered = q.defer(); // Set the jsonTemplateVals object equal to the already loaded .json // lodata object. jsonTemplateVals = self.moduleData.json; innerDefered.resolve(); return innerDefered.promise; }; // Load the HTML view template and render var prepareHTMLTemplate = function () { var innerDefered = q.defer(); var cacheKey = ''; var i; for(i = 0; i < self.moduleData.html.length; i++) { if(self.moduleData.html[i].fileName === 'view.html') { cacheKey = self.moduleData.html[i].filePath; break; } } context.json = jsonTemplateVals; // console.log('in prepareHTMLTemplate', self.getSelectedDevices()); fs_facade.renderCachedTemplateData( cacheKey, self.moduleData.htmlFiles.view, context ) .then(innerDefered.resolve, innerDefered.reject); return innerDefered.promise; }; var introduceDelay = function() { var innerDefered = q.defer(); setTimeout(innerDefered.resolve, 200); return innerDefered.promise; }; var forceRefresh = function() { var innerDefered = q.defer(); runRedraw(); innerDefered.resolve(); return innerDefered.promise; }; var injectHTMLTemplate = function (htmlContents) { var innerDefered = q.defer(); // var moduleDiv = $(DEVICE_VIEW_TARGET); // moduleDiv.html(htmlContents); htmlContents = '<div class="framework-template">' + htmlContents + '</div>'; self.jquery.html(DEVICE_VIEW_TARGET, htmlContents); $('.framework-template').ready(runRedraw); innerDefered.resolve(); return innerDefered.promise; }; var attachListeners = function () { var innerDefered = q.defer(); if(self.deviceSelectionListenersAttached === false) { self.jquery.on( self.getDeviceSelectorClass(), 'click', self._changeSelectedDeviceUI ); self.deviceSelectionListenersAttached = true; deviceSelectionListenersAttached = true; } innerDefered.resolve(); return innerDefered.promise; }; loadJSONFiles() .then(prepareHTMLTemplate, reportLoadError) .then(injectHTMLTemplate, reportLoadError) //.then(introduceDelay, reportLoadError) //.then(forceRefresh, reportLoadError) .then(attachListeners, reportLoadError) .then(defered.resolve, defered.reject); return defered.promise; }; var setDeviceView = self.setDeviceView; var getSaveSelectedDevice = function(serialNumber) { var saveSelectedDevice = function() { var defered = q.defer(); device_controller.selectDevice(serialNumber) .then(defered.resolve, defered.reject); return defered.promise; }; return saveSelectedDevice; }; var getSaveSelectedDevices = function(serialNumbers) { var saveSelectedDevices = function() { var defered = q.defer(); device_controller.selectDevices(serialNumbers) .then(defered.resolve, defered.reject); return defered.promise; }; return saveSelectedDevices; }; var getSmartSaveSelectedDevices = function(serialNumberData) { // If the data is an array, call and return the saveDevices function if(Array.isArray(serialNumberData)) { return getSaveSelectedDevices(serialNumberData); } else { return getSaveSelectedDevice(serialNumberData); } }; this._changeSelectedDeviceUI = function (data) { // var serialNumber = self.jquery.get( // '.device-selection-radio:checked' // ); var serialNumber; var serialNumbers = []; var selectDevicesData; try { var elements = self.jquery.get( self.getDeviceSelectorClass() + ':checked' ); if(self.frameworkType === 'singleDevice') { serialNumber = elements.val(); selectDevicesData = serialNumber; } else if(self.frameworkType === 'multipleDevices') { var numEle = elements.length; for(var i = 0; i < numEle; i++) { serialNumbers.push(elements.eq(i).val()); } selectDevicesData = serialNumbers; } else { console.warn('Wrong frameworkType', self.frameworkType); serialNumber = elements.val(); selectDevicesData = serialNumber; } } catch(err) { console.error('error in _changeSelectedDeviceUI', err); } // console.log('in _changeSelectedDeviceUI', selectDevicesData); //Perform necessary actions: setTimeout(function() { // Stop the DAQ loop self.qStopLoop() // Report that the device has been closed .then(self.qExecOnCloseDevice, self.qExecOnLoadError) // Hide the module's template .then(self.qHideUserTemplate, self.qExecOnLoadError) // Save the selected device (to allow for device switching w/o module re-loading) .then(getSmartSaveSelectedDevices(selectDevicesData), self.qExecOnLoadError) // Update the currently-active device (This will force a valid device to be selected). .then(self.qUpdateActiveDevice, self.qExecOnLoadError) // Report that a new device has been selected .then(self.qExecOnDeviceSelected, self.qExecOnLoadError) // Clear all config-bindings (if not disabled) .then(self.qClearConfigBindings, self.qExecOnLoadError) // Re-configure any smartBindings .then(self.qUpdateSmartBindings, self.qExecOnLoadError) // Configure the device .then(self.executeSetupBindings, self.qExecOnLoadError) // Report that the device has been configured .then(self.qExecOnDeviceConfigured, self.qExecOnLoadError) // Render the module's template .then(self.qRenderModuleTemplate, self.qExecOnLoadError) // Connect connect any established writeBindings to jquery events .then(self.qEstablishWriteBindings, self.qExecOnLoadError) // Report that the module's template has been loaded .then(self.qExecOnTemplateLoaded, self.qExecOnLoadError) // Start the DAQ loop .then(self.qStartLoop, self.qExecOnLoadError) // Display the module's template .then(self.qShowUserTemplate, self.qExecOnLoadError) // Report that the module's template has been displayed .then(self.qExecOnTemplateDisplayed, self.qExecOnLoadError) // Re-draw the window to prevent window-disapearing issues .then(qRunRedraw, self.qExecOnLoadError) .done(); },5); }; /** * Get the currently selected device. * * @return {presenter.Device} The device selected as the "active" device. **/ this.getSelectedDevice = function () { return self.activeDevice; }; var getSelectedDevice = this.getSelectedDevice; this.getSelectedDevices = function() { if(self.frameworkType === 'singleDevice') { return [self.activeDevice]; } else if(self.frameworkType === 'multipleDevices') { return self.selectedDevices; } else { return [self.activeDevice]; } }; this.smartGetSelectedDevices = function() { if(self.frameworkType === 'singleDevice') { return self.getSelectedDevice(); } else if(self.frameworkType === 'multipleDevices') { return self.getSelectedDevices(); } else { return self.getSelectedDevice(); } }; /** * Function that should be called after all of the bindings have been added. * * Function that should be called after all of the config bindings have been * added and all of the config controls have been set. **/ this.establishConfigControlBindings = function () { var listener = self._OnConfigControlEvent; var jquery = self.jquery; self.configControls.forEach(function (value) { jquery.on(value.selector, value.event, listener); }); }; var establishConfigControlBindings = this.establishConfigControlBindings; /** * Stop the module's refresh loop. **/ this.stopLoop = function () { self.runLoop = false; }; var stopLoop = this.stopLoop; this.qStopLoop = function() { var defered = q.defer(); self.runLoop = false; if(self.frameworkLoopProcessing) { // console.log('framework loop is currently processing (qStopLoop)...'); clearTimeout(self.frameworkLoopReference); var num = 0; var checkDAQLoop = function() { // console.log('Delaying (qStopLoop)....', num); num += 1; if(self.isDAQLoopActive) { setTimeout(checkDAQLoop, 100); } else { defered.resolve(); } }; var daqLoopChecker = setTimeout(checkDAQLoop, 100); } else { defered.resolve(); } return defered.promise; }; /** * Start the module's refresh loop. **/ this.isDAQLoopActive = false; this.startLoop = function () { self.runLoop = true; self.isDAQLoopActive = true; // self.loopIteration(); self.runLoopIteration(); }; var startLoop = this.startLoop; self.qStartLoop = function() { var defered = q.defer(); self.startLoop(); defered.resolve(); return defered.promise; }; this.printCurTime = function(message) { var d = new Date(); console.log(message,d.valueOf() - self.iterationTime - self.refreshRate); }; this.daqMonitor = function() { if(!self.daqLoopFinished) { self.printDAQLoopMonitorInfo('DAQ-Loop-Lock-Detected',self.daqLoopStatus); } }; this.qConfigureTimer = function() { var innerDeferred = q.defer(); if(!self.frameworkActive) { innerDeferred.reject(); return innerDeferred.promise; } var d = new Date(); var curTime = d.valueOf(); var elapsedTime = curTime - self.iterationTime - self.refreshRate; self.printTimingInfo('elapsedTime',elapsedTime); var delayTime = self.refreshRate; if ((errorRefreshRate - elapsedTime) < 0) { if(self.loopErrorEncountered) { self.printDAQLoopInfo('sdFramework DAQ Loop is slow (Due to error)...',elapsedTime); } else { self.printDAQLoopInfo('sdFramework DAQ Loop is slow (Not due to error)...',elapsedTime); } device_controller.ljm_driver.readLibrarySync('LJM_DEBUG_LOG_MODE'); if(!self.ljmDriverLogEnabled) { console.info('enabling LJM-log', elapsedTime); self.ljmDriver.writeLibrarySync('LJM_DEBUG_LOG_MODE',2); self.ljmDriver.writeLibrarySync('LJM_DEBUG_LOG_LEVEL',2); var confTimeout = self.ljmDriver.readLibrarySync('LJM_SEND_RECEIVE_TIMEOUT_MS'); self.ljmDriver.logSSync(2,'initDebug: Slow DAQ Loop: '+elapsedTime.toString()); self.ljmDriver.logSSync(2,self.moduleName); self.ljmDriver.logSSync(2,'TCP_SEND_RECEIVE_TIMEOUT: '+confTimeout.toString()); self.ljmDriverLogEnabled = true; self.numContinuousRegLoopIterations = 0; } else { self.ljmDriver.logSSync(2,'Slow DAQ Loop: '+elapsedTime.toString()); self.numContinuousRegLoopIterations = 0; } delayTime = 10; } else { if(self.ljmDriverLogEnabled) { console.info('sdFramework DAQ Loop is running normally...',elapsedTime); if(self.numContinuousRegLoopIterations > 5) { var numIt = self.numContinuousRegLoopIterations; console.info('disabling LJM-log, loop is running smoothly again'); self.ljmDriver.logSSync(2,'Slow DAQ Loop (RESOLVED) after: '+numIt.toString()); self.ljmDriver.writeLibrarySync('LJM_DEBUG_LOG_MODE',1); self.numContinuousRegLoopIterations = 0; self.ljmDriverLogEnabled = false; } else { self.numContinuousRegLoopIterations += 1; } } } self.iterationTime = curTime; if(self.loopErrorEncountered) { self.loopErrors.forEach(function(error){ self.printLoopErrors('Loop Errors',error); }); } // Clear loop errors self.loopErrorEncountered = false; self.loopErrors = []; self.daqLoopStatus = 'timerConfigured'; if((typeof(sdModule) !== 'undefined') && (typeof(sdFramework) !== 'undefined')) { setTimeout(self.runLoopIteration, self.refreshRate); } else { console.info('sdModule or sdFramework not defined!!'); } innerDeferred.resolve(); return innerDeferred.promise; }; var qConfigureTimer = this.qConfigureTimer; this.unpauseFramework = function() { self.isDAQLoopPaused = false; }; var unpauseFramework = this.unpauseFramework; this.pauseFramework = function(pauseNotification) { self.pauseDAQLoop = true; self.isPausedListenerFunc = pauseNotification; return function() { self.isDAQLoopPaused = false; }; }; var pauseFramework = this.pauseFramework; this.testPauseFramework = function() { self.pauseFramework( function() { console.info('Framework is paused!'); self.unpauseFramework(); } ); }; var reportStartingDaqLoop = function() { var defered = q.defer(); self.daqLoopFinished = false; self.daqLoopStatus = 'startingLoop'; defered.resolve(); return defered.promise; }; var reportFinishedDaqLoop = function(data) { var defered = q.defer(); var finishedFunc = function() { self.daqLoopFinished = true; self.daqLoopStatus = 'finishedDaqLoop'; defered.resolve(); }; var executeFinishedFunc = true; if(data) { // console.log('DAQ Loop Finished', data); if(data === 'delay') { executeFinishedFunc = false; try { triggerModuleOnRefreshed([]) .then(finishedFunc, finishedFunc); } catch(err) { console.error('Error in reportFinishedDaqLoop', err); finishedFunc(); } } else { // console.log('DAQ Loop Finished', data); } } if(executeFinishedFunc) { finishedFunc(); } return defered.promise; }; var getNeededAddresses = function () { var defered = q.defer(); self.daqLoopStatus = 'getNeededAddresses'; var addresses = []; var formats = []; var customFormatFuncs = []; var bindings = []; // Loop through all registered bindings and determine what should be // done. self.readBindings.forEach(function (value, key) { // For each binding check to see if it should be executed by // checking its currentDelay. If it equals zero than it needs // to be executed. if(value.currentDelay <= 0) { // Search bindings for custom bindings var callbackString = CALLBACK_STRING_CONST; var baseStr = value.binding; var searchIndex = baseStr.search(callbackString); if( searchIndex < 0) { // if the CALLBACK_STRING_CONST tag wasn't found then // add the binding to the list of registers that needs // to be queried from the device. addresses.push(value.binding); formats.push(value.format); customFormatFuncs.push(value.customFormatFunc); } else { } bindings.push(value); // Re-set the binding's delay with the new delay value.currentDelay = value.iterationDelay; self.readBindings.set(key, value); } else { // Decrement the binding's delay value.currentDelay = value.currentDelay - 1; self.readBindings.set(key, value); } }); if(addresses.length > 0) { defered.resolve({ addresses: addresses, formats: formats, customFormatFuncs: customFormatFuncs, bindings: bindings }); } else { defered.reject('delay'); } return defered.promise; }; var triggerModuleOnRefresh = function (bindingsInfo) { var innerDeferred = q.defer(); self.daqLoopStatus = 'triggerModuleOnRefresh'; self.fire( 'onRefresh', [ bindingsInfo ], function() { innerDeferred.reject(); }, function () { innerDeferred.resolve(bindingsInfo); } ); return innerDeferred.promise; }; var requestDeviceValues = function (bindingsInfo) { var innerDeferred = q.defer(); self.daqLoopStatus = 'requestDeviceValues'; var device = self.getSelectedDevice(); var addresses = bindingsInfo.addresses; var formats = bindingsInfo.formats; var customFormatFuncs = bindingsInfo.customFormatFuncs; var bindings = bindingsInfo.bindings; if (addresses.length === 0) { innerDeferred.resolve({ values: [], addresses: [], formats: [], customFormatFuncs: [] }); return innerDeferred.promise; } device.sReadMany(addresses) .then( function (values) { innerDeferred.resolve({ values: values, addresses: addresses, formats: formats, customFormatFuncs: customFormatFuncs, bindings: bindings }); }, innerDeferred.reject ); return innerDeferred.promise; }; var processDeviceValues = function (valuesInfo) { var innerDeferred = q.defer(); self.daqLoopStatus = 'processDeviceValues'; var values = valuesInfo.values; var addresses = valuesInfo.addresses; var formats = valuesInfo.formats; var customFormatFuncs = valuesInfo.customFormatFuncs; var numAddresses = addresses.length; var bindings = valuesInfo.bindings; var retDict = dict({}); // Iterate through the bindings executed using the async library var curDeviceIOIndex = 0; var innerProcessSingleDeviceValue = function(binding, nextStep) { //Executed for each binding // Search binding string for callback bindings tag var callbackString = CALLBACK_STRING_CONST; var baseStr = binding.binding; var searchIndex = baseStr.search(callbackString); var dataKey = binding.dataKey; var index = curDeviceIOIndex; var curResult = values[index]; var curAddress = addresses[index]; var stringVal; var curValue; var curVal; // Periodic bindings (ones that don't perform any device IO // will have a "undefined" curResult value. if(curResult) { if(dataKey === '') { curValue = curResult; } else { if(typeof(curResult[dataKey] !== 'undefined')) { curValue = curResult[dataKey]; } else { curValue = curResult.val; } } curVal = curResult.res; } if( searchIndex < 0) { //If the tag was not found then perform auto-formatting var curFormat = formats[index]; var curCustomFormatFunc = customFormatFuncs[index]; if(curFormat !== 'customFormat') { if(isNaN(curVal)){ stringVal = curVal; } else { if(typeof(curVal) === 'number') { stringVal = sprintf(curFormat, curVal); } else { if(curResult.str === '-Infinity') { stringVal = (NaN).toString(); } else if((curResult.str === '+Infinity') || (curResult.str === 'Infinity')) { stringVal = (NaN).toString(); } else { console.warn('Replacing a non-value in processDeviceValues', curVal, curFormat, baseStr, curResult); stringVal = (0).toString(); } } // stringVal = curVal.toString(); } } else { stringVal = curCustomFormatFunc({ value: curVal, address: curAddress, binding: binding }); } retDict.set( curAddress.toString(), stringVal ); //Increment current index curDeviceIOIndex += 1; } else { if(binding.execCallback === false) { console.warn('Warning, PeriodicFunction Found but not executing',binding); } } // If the current binding has a defined binding that // needs to be executed execute it now if(binding.execCallback) { // Execute read-binding function callback try { binding.callback( { //Data to be passed to callback function framework: self, module: self.module, device: self.getSelectedDevice(), binding: binding, value: curValue, stringVal: stringVal }, function executeNextStep() { //Instruct async to perform the next step nextStep(); }); } catch (e) { self.reportSyntaxError( { 'location':'loopIteration.processDeviceValues', data: {binding: binding,value:curValue,stringVal:stringVal} },e); nextStep(); } } else { //Instruct async to perform the next step nextStep(); } }; async.eachSeries( bindings, function processSingleDeviceValue (binding, nextStep) { try { innerProcessSingleDeviceValue(binding, nextStep); } catch(err) { console.warn('Caught error while in processSingleDeviceValue', err); nextStep(); } }, function(err) { //Executed when all bindings have been executed innerDeferred.resolve(retDict); }); return innerDeferred.promise; }; var displayDeviceValues = function (valuesDict) { var innerDeferred = q.defer(); self.daqLoopStatus = 'displayDeviceValues'; self._OnRead(valuesDict); innerDeferred.resolve(valuesDict); return innerDeferred.promise; }; var triggerModuleOnRefreshed = function (valuesDict) { var innerDeferred = q.defer(); self.daqLoopStatus = 'triggerModuleOnRefreshed'; self.fire( 'onRefreshed', [ valuesDict ], innerDeferred.reject, function () { innerDeferred.resolve(); } ); return innerDeferred.promise; }; var verifyFrameworkIsActive = function(bundle) { var defered = q.defer(); // Make sure that this framework instance is active. if(!self.frameworkActive) { self.isDAQLoopActive = false; defered.reject('stoppingLoop'); return defered.promise; } // Make sure that the loop should be executing. if (!self.runLoop) { self.isDAQLoopActive = false; defered.reject('stoppingLoop'); return defered.promise; } defered.resolve(bundle); return defered.promise; }; var reportLoopError = function (details) { var defered = q.defer(); if(details !== 'delay') { if(details === 'stoppingLoop') { defered.reject(details); return; } else { self.daqLoopStatus = 'reportError'; // TODO: Get register names from readBindings. self.fire( 'onRefreshError', [ self.readBindings , details ], function (shouldContinue) { self.loopErrorEncountered = true; self.loopErrors.push({details:details,func:'reportError'}); self.runLoop = shouldContinue; if(shouldContinue) { self.printLoopErrors( 'onRefreshError b/c loopIteration.reportError', details ); defered.reject('delay'); } else { defered.reject('stoppingLoop'); } } ); } } else { defered.reject(details); } return defered.promise; }; var innerRunDAQLoop = function(deviceAttributes) { var defered = q.defer(); // Only run the DAQ loop if the active device is connected. // if(deviceAttributes.isConnected) { if(deviceAttributes.isConnected) { reportStartingDaqLoop() // Get the current list of bindings .then(getNeededAddresses, reportLoopError) .then(verifyFrameworkIsActive, reportLoopError) // Inform the active module that data is being refreshed .then(triggerModuleOnRefresh, reportLoopError) .then(verifyFrameworkIsActive, reportLoopError) // Collect data from the active device .then(requestDeviceValues, reportLoopError) .then(verifyFrameworkIsActive, reportLoopError) // Process the collected device data .then(processDeviceValues, reportLoopError) .then(verifyFrameworkIsActive, reportLoopError) // Render collected data/perform DOM manipulations .then(displayDeviceValues, reportLoopError) .then(verifyFrameworkIsActive, reportLoopError) // Inform the active module that data was refreshed .then(triggerModuleOnRefreshed, reportLoopError) .then(verifyFrameworkIsActive, reportLoopError) // Report that the DAQ loop has finished executing .then(reportFinishedDaqLoop, reportFinishedDaqLoop) .then(defered.resolve, defered.reject); } else { // Resolve & wait for the next iteration. triggerModuleOnRefreshed([]) .then(defered.resolve, defered.reject); } return defered.promise; }; this.qRunDAQLoop = function() { var defered = q.defer(); innerRunDAQLoop(self.activeDevice.savedAttributes) .then(defered.resolve, defered.reject); return defered.promise; }; this.runDAQLoop = function(onSuccess) { innerRunDAQLoop(self.activeDevice.savedAttributes) .then(onSuccess); }; this.runDAQMonitor = function() { var defered = q.defer(); defered.resolve(); return defered.promise; }; var configureLoopTimer = function() { // var defered = q.defer(); self.frameworkLoopProcessing = false; self.frameworkLoopReference = setTimeout( self.runLoopIteration, self.refreshRate ); // defered.resolve(); // return defered.promise; }; var configurePausedLoopTimer = function() { var defered = q.defer(); self.frameworkLoopProcessing = false; self.frameworkLoopReference = setTimeout( self.runLoopIteration, self.pausedRefreshRate ); defered.resolve(); return defered.promise; }; this.frameworkLoopProcessing = false; this.frameworkLoopReference = undefined; this.runLoopIteration = function() { // var defered = q.defer(); self.frameworkLoopProcessing = true; // Make sure that this framework instance is active. if(!self.frameworkActive) { self.isDAQLoopActive = false; // defered.reject(); // return defered.promise; } // Make sure that the loop should be executing. if (!self.runLoop) { self.isDAQLoopActive = false; // defered.reject('Loop not running.'); // return defered.promise; } if(self.isDAQLoopPaused) { // If the framework is paused then don't execute any of the update // code and configure a shorter timer. verifyFrameworkIsActive() .then(configurePausedLoopTimer) // .then(configurePausedLoopTimer, defered.reject) // .then(defered.resolve, defered.reject); } else { if(true) { var promises = []; promises.push(self.qRunDAQLoop()); // promises.push(self.runDAQMonitor()); q.allSettled(promises) .then(function(){ verifyFrameworkIsActive() .then(configureLoopTimer) // .then(configureLoopTimer, defered.reject) // .then(defered.resolve, defered.reject); }); } else { self.runDAQLoop(configureLoopTimer); } } // return defered.promise; }; /** * Function to run a single iteration of the module's refresh loop. * * @return {q.promise} Promise that resolves after the iteration of the * refresh loop finishes running. Rejects if an error was encountered * during the loop iteration. **/ this.loopIteration = function () { // !!! DEPRECATED !!! Now uses "this.runLoopIteration" !!!! var deferred = q.defer(); if(!self.frameworkActive) { deferred.reject(); return deferred.promise; } self.daqLoopFinished = false; self.daqLoopStatus = 'startingLoop'; var getIsPausedChecker = function(unPauseLoop) { var isPausedChecker = function() { if(self.isDAQLoopPaused) { if(!self.hasNotifiedUserOfPause) { self.isPausedListenerFunc(); self.hasNotifiedUserOfPause = true; } console.info('DAQ Loop is still paused'); setTimeout(isPausedChecker,100); } else { self.pauseDAQLoop = false; self.hasNotifiedUserOfPause = false; console.info('Resuming DAQ Loop'); self.daqLoopStatus = 'loopResuming'; unPauseLoop(); } }; return isPausedChecker; }; var pauseLoop = function() { var innerDeferred = q.defer(); if (self.pauseDAQLoop) { // DAQ loop is paused self.isDAQLoopPaused = true; self.daqLoopStatus = 'loopPaused'; setTimeout(getIsPausedChecker(innerDeferred.resolve),100); } else { innerDeferred.resolve(); } return innerDeferred.promise; }; var initLoopTimer = function() { var innerDeferred = q.defer(); self.daqLoopStatus = 'startingLoopMonitorTimer'; clearTimeout(self.daqLoopMonitorTimer); self.daqLoopMonitorTimer = setTimeout(self.daqMonitor, 1000); innerDeferred.resolve(); return innerDeferred.promise; }; if (!self.runLoop) { deferred.reject('Loop not running.'); return deferred.promise; } var checkModuleStatus = function(bindingsInfo) { var innerDeferred = q.defer(); self.daqLoopStatus = 'checkModuleStatus'; self.daqLoopFinished = true; clearTimeout(self.daqLoopMonitorTimer); if(self.moduleName === getActiveTabID()) { innerDeferred.resolve(bindingsInfo); } else { innerDeferred.reject(bindingsInfo); } return innerDeferred.promise; }; var handleDelayErr = function (details) { var innerDeferred = q.defer(); self.daqLoopStatus = 'handleDelayErr'; if(details === 'delay') { innerDeferred.resolve(); } else { innerDeferred.resolve(); } return innerDeferred.promise; }; var reportError = function(bundle) { var innerDefered = q.defer(); console.error('in reportError (presenter_framework.js)'); innerDefered.reject(bundle); return innerDefered.promise; }; var alertRefresh = function(bundle) { var innerDefered = q.defer(); console.log('in alertRefresh (presenter_framework.js)'); innerDefered.resolve(bundle); return innerDefered.promise; }; var alertRefreshed = function(bundle) { var innerDefered = q.defer(); console.log('in alertRefreshed (presenter_framework.js)'); innerDefered.resolve(bundle); return innerDefered.promise; }; var alertOn = function(bundle) { var innerDefered = q.defer(); console.log('in alertOn (presenter_framework.js)'); innerDefered.resolve(bundle); return innerDefered.promise; }; var handleIOError = function(bundle) { var innerDefered = q.defer(); console.error('in handleIOError (presenter_framework.js)'); innerDefered.reject(bundle); return innerDefered.promise; } // var setTimeout = function () { // }; //checkModuleStatus() pauseLoop() .then(initLoopTimer, reportError) .then(getNeededAddresses, reportError) .then(alertRefresh, reportError) .then(requestDeviceValues, handleIOError) .then(processDeviceValues, reportError) .then(alertOn, reportError) .then(alertRefreshed, reportError) .then(checkModuleStatus, handleDelayErr) .then(self.qConfigureTimer, self.qExecOnUnloadModule) .then(deferred.resolve, deferred.reject); return deferred.promise; }; var loopIteration = this.loopIteration; /** * Determine how many bindings have been registered for the module. * * @return {int} The number of bindings registered for this module. **/ this.numBindings = function () { return self.bindings.size(); }; var numBindings = this.numBindings; this._OnRead = function (valueReadFromDevice) { var jquery = self.jquery; if(valueReadFromDevice !== undefined) { self.readBindings.forEach(function updateEachValue(bindingInfo, template) { try { var bindingName = bindingInfo.binding; var valRead = valueReadFromDevice.get(bindingName.toString()) if (valRead !== undefined) { var jquerySelector = '#' + bindingInfo.template; if (bindingInfo.displayType === 'standard') { var vals = jquery.html(jquerySelector, valRead.replace(' ','&nbsp;')); if (vals.length === 0) { jquerySelector = '.' + bindingInfo.template; jquery.html(jquerySelector, valRead.replace(' ','&nbsp;')); } } else if (bindingInfo.displayType === 'input') { if (!jquery.is(jquerySelector, ':focus')) { jquery.val(jquerySelector, valRead.toString()); } } else { } } } catch(err) { console.warn('Error in FW updateEachValue', bindingName, err); } }); } }; var _OnRead = this._OnRead; this._OnConfigControlEvent = function (event) { self.fire('onRegisterWrite', [event]); self.fire('onRegisterWritten', [event]); }; var _OnConfigControlEvent = this._OnConfigControlEvent; this.numModuleReloads = 0; this.currentModuleName = ''; this.preConfiguredModuleName = ''; this.getActiveTabID = function() { var integerModuleName = ''; integerModuleName += self.currentModuleName; integerModuleName += '-'; integerModuleName += self.numModuleReloads.toString(); return integerModuleName; }; this.configureActiveTabID = function(tabID) { self.uniqueTabID = tabID; }; this.configFramework = function(viewLoc) { userViewFile = viewLoc; self.userViewFile = viewLoc; // [moduleName]/view.html //self.fire('onModuleLoaded') }; this.configureFrameworkData = function(jsonDataFiles) { moduleJsonFiles = jsonDataFiles; self.moduleJsonFiles = jsonDataFiles; }; var configureFrameworkData = this.configureFrameworkData; this.incrementNumberOfModuleReloads = function() { self.numModuleReloads += 1; }; this.saveModuleName = function() { self.moduleName = self.getActiveTabID(); }; var saveModuleName = this.saveModuleName; this.setCustomContext = function(data) { moduleTemplateBindings.custom = data; self.moduleTemplateBindings.custom = data; }; var setCustomContext = this.setCustomContext; this.tabClickHandler = function() { var visibleTabs = self.jquery.get('.module-tab'); visibleTabs.off('click.sdFramework'+self.moduleName); var manageDevicesLink = self.jquery.get('#manage-link'); visibleTabs.off('click.sdFramework'+self.moduleName); self.qExecOnUnloadModule(); }; var tabClickHandler = this.tabClickHandler; this.attachNavListeners = function() { var visibleTabs = self.jquery.get('.module-tab'); visibleTabs.on('click.sdFramework'+self.getActiveTabID(),self.tabClickHandler); var manageDevicesLink = self.jquery.get('#manage-link'); manageDevicesLink.on('click.sdFramework'+self.getActiveTabID(),self.tabClickHandler); }; var attachNavListeners = this.attachNavListeners; this.startFramework = function() { var deferred = q.defer(); if(self.flags.debug_startup) { console.info('executing qExecOnModuleLoaded'); } self.initializeStartupData() .then(self.qExecOnModuleLoaded) // self.qExecOnModuleLoaded() // .then(self.attachNavListeners, self.qExecOnLoadError) .then(deferred.resolve, deferred.reject); return deferred.promise; }; var exitProgramListenerName = 'presenter-framework-notifier'; function programExitListener() { var defered = q.defer(); function onSucc() { console.log('Finished saving successfully'); defered.resolve(); } function onErr(err) { console.log('Got an error saving',err); defered.resolve(); } // Save module startup data & always finish successfully. self.saveModuleStartupData('programExitListener') .then(onSucc, onErr); return defered.promise; } function addProgramExitListener() { try { ADD_K3_EXIT_LISTENER(exitProgramListenerName, programExitListener); } catch(err) { console.log('presenter_framework.js addProgramExitListener err', err); } } function removeProgramExitListener() { try { DELETE_K3_EXIT_LISTENER(exitProgramListenerName); } catch(err) { console.log('presenter_framework.js removeProgramExitListener err', err); } } this.runFramework = function() { var deferred = q.defer(); var handleError = function(details) { var innerDeferred = q.defer(); console.error('Presenter_Framework, runFramework Error:', details); deferred.reject(details); innerDeferred.resolve(); return innerDeferred.promise; }; var checkFirstDevice = function() { var innerDeferred = q.defer(); // self.jquery.checkFirstDeviceRadioButton(); innerDeferred.resolve(); return innerDeferred.promise; }; var setModuleName = function() { var innerDeferred = q.defer(); self.incrementNumberOfModuleReloads(); self.saveModuleName(); innerDeferred.resolve(); return innerDeferred.promise; }; // Add exit listener. addProgramExitListener(); checkFirstDevice() // Save the module's current instance name .then(setModuleName, self.qExecOnLoadError) // Update the currently-active device (This will force a valid device to be selected). .then(self.qUpdateActiveDevice, self.qExecOnLoadError) // Report that a new device has been selected .then(self.qExecOnDeviceSelected, self.qExecOnLoadError) // Clear all config-bindings (if not disabled) .then(self.qClearConfigBindings, self.qExecOnLoadError) // Re-configure any smartBindings .then(self.qUpdateSmartBindings, self.qExecOnLoadError) // Configure the device .then(self.executeSetupBindings, self.qExecOnLoadError) // Report that the device has been configured .then(self.qExecOnDeviceConfigured, self.qExecOnLoadError) // Render the module's template .then(self.qRenderModuleTemplate, self.qExecOnLoadError) // Connect connect any established writeBindings to jquery events .then(self.qEstablishWriteBindings, self.qExecOnLoadError) // Report that the module's template has been loaded .then(self.qExecOnTemplateLoaded, self.qExecOnLoadError) // Start the DAQ loop .then(self.qStartLoop, self.qExecOnLoadError) // Display the module's template .then(self.qShowUserTemplate, self.qExecOnLoadError) // Report that the module's template has been displayed .then(self.qExecOnTemplateDisplayed, self.qExecOnLoadError) // Re-draw the window to prevent window-disapearing issues .then(qRunRedraw, self.qExecOnLoadError) .then(deferred.resolve, deferred.reject); return deferred.promise; }; this.manageLJMError = function(errNum) { var isHandled = false; // Error for old firmware version... if (errNum === 1307) { showAlert('Current Device Firmware Version Not Supported By This Module'); isHandled = true; } return isHandled; }; var manageLJMError = this.manageLJMError; this.manageError = function(err) { showAlert('Error: '+err.toString()); }; var manageError = this.manageError; this.saveStartupDataReference = function(newStartupData) { self.startupData = undefined; self.isStartupDataValid = false; if(newStartupData) { try { self.startupData = JSON.parse(JSON.stringify(newStartupData)); self.isStartupDataValid = true; } catch(err) { console.error( 'presenter_framework: Error Copying startupData object', err ); self.startupData = {}; self.isStartupDataValid = false; } } }; this.saveModuleInfo = function (infoObj, constantsObj, moduleObj, moduleDataObj) { self.moduleData = moduleDataObj; moduleData = moduleDataObj; self.saveModuleName(); self.moduleInfoObj = infoObj; // The module.json file obj moduleInfoObj = infoObj; self.moduleConstants = constantsObj; // The moduleConstants.json file obj moduleConstants = constantsObj; self.module = moduleObj; // A reference to the created module module = moduleObj; self.startupData = undefined; self.isStartupDataValid = false; self.frameworkType = infoObj.framework; frameworkType = infoObj.framework; if(infoObj.framework_flags) { if(infoObj.framework_flags.debug_framework) { self.flags.debug_startup = true; } } try { self.deviceErrorCompiledTemplate = handlebars.compile( moduleDataObj.htmlFiles.device_errors_template ); self.printableDeviceErrorCompiledTemplate = handlebars.compile( moduleDataObj.htmlFiles.printable_device_errors_template ); } catch(err) { console.error('Error compiling deviceErrors template', err); } }; this.killInstance = function () { self.frameworkActive = false; }; var killInstance = this.killInstance; } util.inherits(Framework, EventEmitter);
// import expect from 'expect.js' import WKTReader from 'org/locationtech/jts/io/WKTReader' const reader = new WKTReader() describe('WKTReader', function () { it('should be able to read a Polygon', function () { reader.read('POLYGON((57.722165171745836 14.202919006347656,57.71909404549173 14.21055793762207,57.71753546383143 14.212703704833984,57.71675614783365 14.212446212768555,57.715655908448745 14.212532043457031,57.71382210182487 14.21030044555664,57.71244668589343 14.20832633972168,57.71354702281898 14.205236434936523,57.712584229838065 14.202919006347656,57.71515162088769 14.201374053955078,57.71528915455559 14.196224212646484,57.71758130542645 14.192447662353516,57.72065256003978 14.196138381958008,57.72092758505232 14.199399948120117,57.72207350010876 14.201288223266602,57.722165171745836 14.202919006347656))') }) })
from django.db import models from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \ PermissionsMixin class UserManager(BaseUserManager): def create_user(self, email, password=None, **extra_fields): """Creates and saves a new User""" if not email: raise ValueError('Users must have an email address') user = self.model(email=self.normalize_email(email), **extra_fields) user.set_password(password) user.save(using=self._db) return user def create_superuser(self, email, password): """Creates and save new super user""" user = self.create_user(email, password) user.is_staff = True user.is_superuser = True user.save(using=self._db) return user class User(AbstractBaseUser, PermissionsMixin): """Custom user model that supports using email instead of username""" email = models.EmailField(max_length=254, unique=True) name = models.CharField(max_length=255) is_active = models.BooleanField(default=True) is_staff = models.BooleanField(default=False) objects = UserManager() USERNAME_FIELD = 'email'
import Head from 'next/head' import { Fragment, useState, useEffect, useContext } from 'react' import { Container, Button, TextField, Grid, Typography, CssBaseline, makeStyles, Select, MenuItem, Paper, createMuiTheme, ThemeProvider, Link, } from '@material-ui/core' import Studentnav from '../../../components/support/studentsidenav' import Courseinstandteachingast from '../../../components/support/courseinstandteachingast' import style from '../../../styles/forum/showForum' import General from '../../../components/template/general' const courseinstandteaching = () => { return ( <Fragment> <General> <div style={{ display: 'flex', flex: '1 1 auto', justifyContent: 'space-between', }} > <div id="nav"> <div className="top"> <Link href="/support">Eduroom Support</Link> <label style={{ marginLeft: '20px', marginRight: '20px' }}>&gt;</label> <Link href="/support/getstart">Getting Started</Link> <label style={{ marginLeft: '20px', marginRight: '20px' }}>&gt;</label> Course Instructors and Teaching Assistantsd <Courseinstandteachingast /> </div> </div> <img alt="background-img" src="/images/supforumbg.svg" className="background-img" /> <style jsx>{style}</style> <style jsx> {` .background-img { position: fixed; bottom: 0; width: 100vw; z-index: 0; } #nav { width: 100%; z-index: 5; } .form { display: flex; text-align: center; } .sub { display: flex; width: 100%; justify-content: center; } .inner { width: 25%; } .paper { margin: 5%; } .top { padding: 50px 70px 0px 70px; } `} </style> </div> </General> </Fragment> ) } export default courseinstandteaching
import { expect } from 'chai'; import { pick } from 'lodash'; import { expenseStatus } from '../../../../../server/constants'; import { payExpense } from '../../../../../server/graphql/v1/mutations/expenses.js'; import { idEncode, IDENTIFIER_TYPES } from '../../../../../server/graphql/v2/identifiers'; import { randEmail, randUrl } from '../../../../stores'; import { fakeCollective, fakeExpense, fakeExpenseItem, fakePayoutMethod, fakeTransaction, fakeUser, randStr, } from '../../../../test-helpers/fake-data'; import { graphqlQueryV2, makeRequest } from '../../../../utils'; const createExpenseMutation = ` mutation createExpense($expense: ExpenseCreateInput!, $account: AccountReferenceInput!) { createExpense(expense: $expense, account: $account) { id legacyId invoiceInfo amount payee { legacyId } payeeLocation { address country } } }`; const deleteExpenseMutation = ` mutation deleteExpense($expense: ExpenseReferenceInput!) { deleteExpense(expense: $expense) { id legacyId } }`; const editExpenseMutation = ` mutation editExpense($expense: ExpenseUpdateInput!) { editExpense(expense: $expense) { id legacyId invoiceInfo description type amount status privateMessage invoiceInfo payoutMethod { id data name type } payeeLocation { address country } items { id url amount incurredAt description } tags } }`; const processExpenseMutation = ` mutation processExpense($expenseId: Int!, $action: ExpenseProcessAction!, $paymentParams: ProcessExpensePaymentParams) { processExpense(expense: { legacyId: $expenseId }, action: $action, paymentParams: $paymentParams) { id legacyId status } }`; /** A small helper to prepare an expense item to be submitted to GQLV2 */ const convertExpenseItemId = item => { return item?.id ? { ...item, id: idEncode(item.id, IDENTIFIER_TYPES.EXPENSE_ITEM) } : item; }; describe('server/graphql/v2/mutation/ExpenseMutations', () => { describe('createExpense', () => { const getValidExpenseData = () => ({ description: 'A valid expense', type: 'INVOICE', invoiceInfo: 'This will be printed on your invoice', payoutMethod: { type: 'PAYPAL', data: { email: randEmail() } }, items: [{ description: 'A first item', amount: 4200 }], payeeLocation: { address: '123 Potatoes street', country: 'BE' }, }); it('creates the expense with the linked items', async () => { const user = await fakeUser(); const collective = await fakeCollective(); const payee = await fakeCollective({ type: 'ORGANIZATION', admin: user.collective, address: null }); const expenseData = { ...getValidExpenseData(), payee: { legacyId: payee.id } }; const result = await graphqlQueryV2( createExpenseMutation, { expense: expenseData, account: { legacyId: collective.id } }, user, ); result.errors && console.error(result.errors); expect(result.errors).to.not.exist; expect(result.data).to.exist; expect(result.data.createExpense).to.exist; const createdExpense = result.data.createExpense; expect(createdExpense.invoiceInfo).to.eq(expenseData.invoiceInfo); expect(createdExpense.amount).to.eq(4200); expect(createdExpense.payee.legacyId).to.eq(payee.id); expect(createdExpense.payeeLocation).to.deep.equal(expenseData.payeeLocation); // Updates collective location await payee.reload(); expect(payee.address).to.eq('123 Potatoes street'); expect(payee.countryISO).to.eq('BE'); }); it("use collective's location if not provided", async () => { const user = await fakeUser({}, { address: '123 Potatoes Street', countryISO: 'BE' }); const collective = await fakeCollective(); const expenseData = { ...getValidExpenseData(), payee: { legacyId: user.collective.id }, payeeLocation: undefined, }; const result = await graphqlQueryV2( createExpenseMutation, { expense: expenseData, account: { legacyId: collective.id } }, user, ); result.errors && console.error(result.errors); expect(result.errors).to.not.exist; const createdExpense = result.data.createExpense; expect(createdExpense.payeeLocation).to.deep.equal({ address: '123 Potatoes Street', country: 'BE', }); }); it('must be an admin to submit expense as another account', async () => { const user = await fakeUser(); const collective = await fakeCollective(); const payee = await fakeCollective({ type: 'ORGANIZATION' }); const expenseData = { ...getValidExpenseData(), payee: { legacyId: payee.id } }; const result = await graphqlQueryV2( createExpenseMutation, { expense: expenseData, account: { legacyId: collective.id } }, user, ); expect(result.errors).to.exist; expect(result.errors[0].message).to.eq('You must be an admin of the account to submit an expense in its name'); }); }); describe('editExpense', () => { describe('goes back to pending if editing critical fields', () => { it('Payout', async () => { const expense2 = await fakeExpense({ status: 'APPROVED', legacyPayoutMethod: 'other' }); const newPayoutMethod = await fakePayoutMethod({ CollectiveId: expense2.User.CollectiveId }); const newExpense2Data = { id: idEncode(expense2.id, IDENTIFIER_TYPES.EXPENSE), payoutMethod: { id: idEncode(newPayoutMethod.id, IDENTIFIER_TYPES.PAYOUT_METHOD) }, }; const result2 = await graphqlQueryV2(editExpenseMutation, { expense: newExpense2Data }, expense2.User); expect(result2.errors).to.not.exist; expect(result2.data.editExpense.status).to.equal('PENDING'); }); it('Item(s)', async () => { const expense = await fakeExpense({ status: 'APPROVED' }); const newExpenseData = { id: idEncode(expense.id, IDENTIFIER_TYPES.EXPENSE), items: { url: randUrl(), amount: 2000, description: randStr() }, }; const result = await graphqlQueryV2(editExpenseMutation, { expense: newExpenseData }, expense.User); expect(result.errors).to.not.exist; expect(result.data.editExpense.status).to.equal('PENDING'); }); it('Description => should not change status', async () => { const expense = await fakeExpense({ status: 'APPROVED' }); const newExpenseData = { id: idEncode(expense.id, IDENTIFIER_TYPES.EXPENSE), description: randStr() }; const result = await graphqlQueryV2(editExpenseMutation, { expense: newExpenseData }, expense.User); expect(result.errors).to.not.exist; expect(result.data.editExpense.status).to.equal('APPROVED'); expect(result.data.editExpense.amount).to.equal(expense.amount); }); }); it('replaces expense items', async () => { const expense = await fakeExpense({ amount: 3000 }); const expenseUpdateData = { id: idEncode(expense.id, IDENTIFIER_TYPES.EXPENSE), items: [ { amount: 800, description: 'Burger', url: randUrl(), }, { amount: 200, description: 'French Fries', url: randUrl(), }, ], }; const result = await graphqlQueryV2(editExpenseMutation, { expense: expenseUpdateData }, expense.User); const itemsFromAPI = result.data.editExpense.items; expect(result.data.editExpense.amount).to.equal(1000); expect(itemsFromAPI.length).to.equal(2); expenseUpdateData.items.forEach(item => { const itemFromAPI = itemsFromAPI.find(a => a.description === item.description); expect(itemFromAPI).to.exist; expect(itemFromAPI.url).to.equal(item.url); expect(itemFromAPI.amount).to.equal(item.amount); }); }); it('updates the items', async () => { const expense = await fakeExpense({ amount: 10000, items: [] }); const items = ( await Promise.all([ fakeExpenseItem({ ExpenseId: expense.id, amount: 2000 }), fakeExpenseItem({ ExpenseId: expense.id, amount: 3000 }), fakeExpenseItem({ ExpenseId: expense.id, amount: 5000 }), ]) ).map(convertExpenseItemId); const updatedExpenseData = { id: idEncode(expense.id, IDENTIFIER_TYPES.EXPENSE), items: [ pick(items[0], ['id', 'url', 'amount']), // Don't change the first one (value=2000) { ...pick(items[1], ['id', 'url']), amount: 7000 }, // Update amount for the second one { amount: 1000, url: randUrl() }, // Remove the third one and create another instead ], }; const result = await graphqlQueryV2(editExpenseMutation, { expense: updatedExpenseData }, expense.User); expect(result.errors).to.not.exist; const returnedItems = result.data.editExpense.items; const sumItems = returnedItems.reduce((total, item) => total + item.amount, 0); expect(sumItems).to.equal(10000); expect(returnedItems.find(a => a.id === items[0].id)).to.exist; expect(returnedItems.find(a => a.id === items[1].id)).to.exist; expect(returnedItems.find(a => a.id === items[2].id)).to.not.exist; expect(returnedItems.find(a => a.id === items[1].id).amount).to.equal(7000); }); it('can edit only one field without impacting the others', async () => { const expense = await fakeExpense({ privateMessage: randStr(), description: randStr() }); const updatedExpenseData = { id: idEncode(expense.id, IDENTIFIER_TYPES.EXPENSE), privateMessage: randStr() }; const result = await graphqlQueryV2(editExpenseMutation, { expense: updatedExpenseData }, expense.User); expect(result.data.editExpense.privateMessage).to.equal(updatedExpenseData.privateMessage); expect(result.data.editExpense.description).to.equal(expense.description); }); it('updates the tags', async () => { const expense = await fakeExpense({ tags: [randStr()] }); const updatedExpenseData = { id: idEncode(expense.id, IDENTIFIER_TYPES.EXPENSE), tags: ['fake', 'tags'] }; const result = await graphqlQueryV2(editExpenseMutation, { expense: updatedExpenseData }, expense.User); expect(result.data.editExpense.tags).to.deep.equal(updatedExpenseData.tags); }); it('updates the location', async () => { const expense = await fakeExpense({ payeeLocation: { address: 'Base address', country: 'FR' } }); const newLocation = { address: 'New address', country: 'BE' }; const updatedExpenseData = { id: idEncode(expense.id, IDENTIFIER_TYPES.EXPENSE), payeeLocation: newLocation }; const result = await graphqlQueryV2(editExpenseMutation, { expense: updatedExpenseData }, expense.User); result.errors && console.error(result.errors); expect(result.data.editExpense.payeeLocation).to.deep.equal(updatedExpenseData.payeeLocation); }); }); describe('deleteExpense', () => { const prepareGQLParams = expense => ({ expense: { id: idEncode(expense.id, IDENTIFIER_TYPES.EXPENSE) } }); describe('can delete rejected expenses', () => { it('if owner', async () => { const expense = await fakeExpense({ status: expenseStatus.REJECTED }); const result = await graphqlQueryV2(deleteExpenseMutation, prepareGQLParams(expense), expense.User); expect(result.data.deleteExpense.legacyId).to.eq(expense.id); await expense.reload({ paranoid: false }); expect(expense.deletedAt).to.exist; }); it('if collective admin', async () => { const collectiveAdminUser = await fakeUser(); const collective = await fakeCollective({ admin: collectiveAdminUser.collective }); const expense = await fakeExpense({ status: expenseStatus.REJECTED, CollectiveId: collective.id }); const result = await graphqlQueryV2(deleteExpenseMutation, prepareGQLParams(expense), collectiveAdminUser); expect(result.data.deleteExpense.legacyId).to.eq(expense.id); await expense.reload({ paranoid: false }); expect(expense.deletedAt).to.exist; }); it('if host admin', async () => { const hostAdminUser = await fakeUser(); const host = await fakeCollective({ admin: hostAdminUser.collective }); const collective = await fakeCollective({ HostCollectiveId: host.id }); const expense = await fakeExpense({ status: expenseStatus.REJECTED, CollectiveId: collective.id }); const result = await graphqlQueryV2(deleteExpenseMutation, prepareGQLParams(expense), hostAdminUser); expect(result.data.deleteExpense.legacyId).to.eq(expense.id); await expense.reload({ paranoid: false }); expect(expense.deletedAt).to.exist; }); }); describe('cannot delete', () => { it('if backer', async () => { const collectiveBackerUser = await fakeUser(); const collective = await fakeCollective(); await collective.addUserWithRole(collectiveBackerUser, 'BACKER'); const expense = await fakeExpense({ status: expenseStatus.REJECTED, CollectiveId: collective.id }); const result = await graphqlQueryV2(deleteExpenseMutation, prepareGQLParams(expense), collectiveBackerUser); await expense.reload({ paranoid: false }); expect(expense.deletedAt).to.not.exist; expect(result.errors).to.exist; expect(result.errors[0].message).to.eq( "You don't have permission to delete this expense or it needs to be rejected before being deleted", ); }); it('if unauthenticated', async () => { const expense = await fakeExpense({ status: expenseStatus.REJECTED }); const result = await graphqlQueryV2(deleteExpenseMutation, prepareGQLParams(expense)); await expense.reload({ paranoid: false }); expect(expense.deletedAt).to.not.exist; expect(result.errors).to.exist; expect(result.errors[0].message).to.eq('You need to be authenticated to perform this action'); }); it('if not rejected', async () => { const expense = await fakeExpense({ status: expenseStatus.APPROVED }); const result = await graphqlQueryV2(deleteExpenseMutation, prepareGQLParams(expense), expense.User); await expense.reload({ paranoid: false }); expect(expense.deletedAt).to.not.exist; expect(result.errors).to.exist; expect(result.errors[0].message).to.eq( "You don't have permission to delete this expense or it needs to be rejected before being deleted", ); }); }); }); describe('processExpense', () => { let collective, collectiveAdmin, hostAdmin; before(async () => { hostAdmin = await fakeUser(); collectiveAdmin = await fakeUser(); const host = await fakeCollective({ admin: hostAdmin.collective }); collective = await fakeCollective({ HostCollectiveId: host.id, admin: collectiveAdmin.collective }); await hostAdmin.populateRoles(); }); describe('APPROVE', () => { it('Needs to be authenticated', async () => { const expense = await fakeExpense({ CollectiveId: collective.id, status: 'PENDING' }); const mutationParams = { expenseId: expense.id, action: 'APPROVE' }; const result = await graphqlQueryV2(processExpenseMutation, mutationParams); expect(result.errors).to.exist; expect(result.errors[0].message).to.eq('You need to be authenticated to perform this action'); }); it('User cannot approve their own expenses', async () => { const expense = await fakeExpense({ CollectiveId: collective.id, status: 'PENDING' }); const mutationParams = { expenseId: expense.id, action: 'APPROVE' }; const result = await graphqlQueryV2(processExpenseMutation, mutationParams, expense.User); expect(result.errors).to.exist; expect(result.errors[0].message).to.eq('You are authenticated but forbidden to perform this action'); }); it('Approves the expense', async () => { const expense = await fakeExpense({ CollectiveId: collective.id, status: 'PENDING' }); const mutationParams = { expenseId: expense.id, action: 'APPROVE' }; const result = await graphqlQueryV2(processExpenseMutation, mutationParams, collectiveAdmin); expect(result.data.processExpense.status).to.eq('APPROVED'); }); it('Expense needs to be pending', async () => { const expense = await fakeExpense({ CollectiveId: collective.id, status: 'PAID' }); const mutationParams = { expenseId: expense.id, action: 'APPROVE' }; const result = await graphqlQueryV2(processExpenseMutation, mutationParams, collectiveAdmin); expect(result.errors).to.exist; expect(result.errors[0].message).to.eq('You are authenticated but forbidden to perform this action'); }); it("Doesn't crash for already-approved expenses", async () => { const expense = await fakeExpense({ CollectiveId: collective.id, status: 'APPROVED' }); const mutationParams = { expenseId: expense.id, action: 'APPROVE' }; const result = await graphqlQueryV2(processExpenseMutation, mutationParams, collectiveAdmin); expect(result.data.processExpense.status).to.eq('APPROVED'); }); }); describe('UNAPPROVE', () => { it('Needs to be authenticated', async () => { const expense = await fakeExpense({ CollectiveId: collective.id, status: 'APPROVED' }); const mutationParams = { expenseId: expense.id, action: 'UNAPPROVE' }; const result = await graphqlQueryV2(processExpenseMutation, mutationParams); expect(result.errors).to.exist; expect(result.errors[0].message).to.eq('You need to be authenticated to perform this action'); }); it('User cannot unapprove their own expenses', async () => { const expense = await fakeExpense({ CollectiveId: collective.id, status: 'APPROVED' }); const mutationParams = { expenseId: expense.id, action: 'UNAPPROVE' }; const result = await graphqlQueryV2(processExpenseMutation, mutationParams, expense.User); expect(result.errors).to.exist; expect(result.errors[0].message).to.eq('You are authenticated but forbidden to perform this action'); }); it('Unapproves the expense', async () => { const expense = await fakeExpense({ CollectiveId: collective.id, status: 'APPROVED' }); const mutationParams = { expenseId: expense.id, action: 'UNAPPROVE' }; const result = await graphqlQueryV2(processExpenseMutation, mutationParams, collectiveAdmin); expect(result.data.processExpense.status).to.eq('PENDING'); }); it('Expense needs to be approved', async () => { const expense = await fakeExpense({ CollectiveId: collective.id, status: 'PAID' }); const mutationParams = { expenseId: expense.id, action: 'APPROVE' }; const result = await graphqlQueryV2(processExpenseMutation, mutationParams, collectiveAdmin); expect(result.errors).to.exist; expect(result.errors[0].message).to.eq('You are authenticated but forbidden to perform this action'); }); it("Doesn't crash for already-pending expenses", async () => { const expense = await fakeExpense({ CollectiveId: collective.id, status: 'PENDING' }); const mutationParams = { expenseId: expense.id, action: 'UNAPPROVE' }; const result = await graphqlQueryV2(processExpenseMutation, mutationParams, collectiveAdmin); expect(result.data.processExpense.status).to.eq('PENDING'); }); }); describe('REJECT', () => { it('Needs to be authenticated', async () => { const expense = await fakeExpense({ CollectiveId: collective.id, status: 'PENDING' }); const mutationParams = { expenseId: expense.id, action: 'REJECT' }; const result = await graphqlQueryV2(processExpenseMutation, mutationParams); expect(result.errors).to.exist; expect(result.errors[0].message).to.eq('You need to be authenticated to perform this action'); }); it('User cannot reject their own expenses', async () => { const expense = await fakeExpense({ CollectiveId: collective.id, status: 'PENDING' }); const mutationParams = { expenseId: expense.id, action: 'REJECT' }; const result = await graphqlQueryV2(processExpenseMutation, mutationParams, expense.User); expect(result.errors).to.exist; expect(result.errors[0].message).to.eq('You are authenticated but forbidden to perform this action'); }); it('Rejects the expense', async () => { const expense = await fakeExpense({ CollectiveId: collective.id, status: 'PENDING' }); const mutationParams = { expenseId: expense.id, action: 'REJECT' }; const result = await graphqlQueryV2(processExpenseMutation, mutationParams, collectiveAdmin); expect(result.data.processExpense.status).to.eq('REJECTED'); }); it('Expense needs to be pending', async () => { const expense = await fakeExpense({ CollectiveId: collective.id, status: 'PAID' }); const mutationParams = { expenseId: expense.id, action: 'REJECT' }; const result = await graphqlQueryV2(processExpenseMutation, mutationParams, collectiveAdmin); expect(result.errors).to.exist; expect(result.errors[0].message).to.eq('You are authenticated but forbidden to perform this action'); }); it("Doesn't crash for already-rejected expenses", async () => { const expense = await fakeExpense({ CollectiveId: collective.id, status: 'REJECTED' }); const mutationParams = { expenseId: expense.id, action: 'REJECT' }; const result = await graphqlQueryV2(processExpenseMutation, mutationParams, collectiveAdmin); expect(result.data.processExpense.status).to.eq('REJECTED'); }); }); describe('PAY', () => { it('Needs to be authenticated', async () => { const expense = await fakeExpense({ CollectiveId: collective.id, status: 'APPROVED' }); const mutationParams = { expenseId: expense.id, action: 'PAY' }; const result = await graphqlQueryV2(processExpenseMutation, mutationParams); expect(result.errors).to.exist; expect(result.errors[0].message).to.eq('You need to be authenticated to perform this action'); }); it('User cannot pay their own expenses', async () => { const expense = await fakeExpense({ CollectiveId: collective.id, status: 'APPROVED' }); const mutationParams = { expenseId: expense.id, action: 'PAY' }; const result = await graphqlQueryV2(processExpenseMutation, mutationParams, expense.User); expect(result.errors).to.exist; expect(result.errors[0].message).to.eq("You don't have permission to pay this expense"); }); it('Collective admins cannot pay expenses', async () => { const expense = await fakeExpense({ CollectiveId: collective.id, status: 'APPROVED' }); const mutationParams = { expenseId: expense.id, action: 'PAY' }; const result = await graphqlQueryV2(processExpenseMutation, mutationParams, collectiveAdmin); expect(result.errors).to.exist; expect(result.errors[0].message).to.eq("You don't have permission to pay this expense"); }); it('Expense needs to be approved', async () => { const expense = await fakeExpense({ CollectiveId: collective.id, status: 'REJECTED' }); const mutationParams = { expenseId: expense.id, action: 'PAY' }; const result = await graphqlQueryV2(processExpenseMutation, mutationParams, hostAdmin); expect(result.errors).to.exist; expect(result.errors[0].message).to.eq( 'Expense needs to be approved. Current status of the expense: REJECTED.', ); }); it('Pays the expense', async () => { const payoutMethod = await fakePayoutMethod({ type: 'OTHER' }); const expense = await fakeExpense({ amount: 1000, CollectiveId: collective.id, status: 'APPROVED', PayoutMethodId: payoutMethod.id, }); // Updates the collective balance and pay the expense await fakeTransaction({ type: 'CREDIT', CollectiveId: collective.id, amount: expense.amount }); const mutationParams = { expenseId: expense.id, action: 'PAY' }; const result = await graphqlQueryV2(processExpenseMutation, mutationParams, hostAdmin); expect(result.data.processExpense.status).to.eq('PAID'); }); it('Cannot double-pay', async () => { const payoutMethod = await fakePayoutMethod({ type: 'OTHER' }); const expense = await fakeExpense({ amount: 1000, CollectiveId: collective.id, status: 'APPROVED', PayoutMethodId: payoutMethod.id, }); // Updates the collective balance and pay the expense await fakeTransaction({ type: 'CREDIT', CollectiveId: collective.id, amount: expense.amount }); const mutationParams = { expenseId: expense.id, action: 'PAY' }; const result = await graphqlQueryV2(processExpenseMutation, mutationParams, hostAdmin); const result2 = await graphqlQueryV2(processExpenseMutation, mutationParams, hostAdmin); expect(result.data.processExpense.status).to.eq('PAID'); expect(result2.errors).to.exist; expect(result2.errors[0].message).to.eq('Expense has already been paid'); }); }); describe('MARK_AS_UNPAID', () => { it('Needs to be authenticated', async () => { const expense = await fakeExpense({ CollectiveId: collective.id, status: 'PAID' }); const mutationParams = { expenseId: expense.id, action: 'MARK_AS_UNPAID' }; const result = await graphqlQueryV2(processExpenseMutation, mutationParams); expect(result.errors).to.exist; expect(result.errors[0].message).to.eq('You need to be authenticated to perform this action'); }); it('User cannot mark as unpaid their own expenses', async () => { const expense = await fakeExpense({ CollectiveId: collective.id, status: 'PAID' }); const mutationParams = { expenseId: expense.id, action: 'MARK_AS_UNPAID' }; const result = await graphqlQueryV2(processExpenseMutation, mutationParams, expense.User); expect(result.errors).to.exist; expect(result.errors[0].message).to.eq("You don't have permission to mark this expense as unpaid"); }); it('Collective admins cannot mark expenses as unpaid', async () => { const expense = await fakeExpense({ CollectiveId: collective.id, status: 'PAID' }); const mutationParams = { expenseId: expense.id, action: 'MARK_AS_UNPAID' }; const result = await graphqlQueryV2(processExpenseMutation, mutationParams, collectiveAdmin); expect(result.errors).to.exist; expect(result.errors[0].message).to.eq("You don't have permission to mark this expense as unpaid"); }); it('Marks the expense as unpaid (with PayPal)', async () => { const payoutMethod = await fakePayoutMethod({ type: 'PAYPAL' }); const expense = await fakeExpense({ amount: 1000, CollectiveId: collective.id, status: 'APPROVED', PayoutMethodId: payoutMethod.id, }); // Updates the collective balance and pay the expense await fakeTransaction({ type: 'CREDIT', CollectiveId: collective.id, amount: expense.amount }); await payExpense(makeRequest(hostAdmin), { id: expense.id, forceManual: true }); expect(await collective.getBalance()).to.eq(0); const mutationParams = { expenseId: expense.id, action: 'MARK_AS_UNPAID' }; const result = await graphqlQueryV2(processExpenseMutation, mutationParams, hostAdmin); expect(result.data.processExpense.status).to.eq('APPROVED'); expect(await collective.getBalance()).to.eq(expense.amount); await payExpense(makeRequest(hostAdmin), { id: expense.id, forceManual: true }); expect(await collective.getBalance()).to.eq(0); }); it('Marks the expense as unpaid', async () => { const payoutMethod = await fakePayoutMethod({ type: 'OTHER' }); const expense = await fakeExpense({ amount: 1000, CollectiveId: collective.id, status: 'APPROVED', PayoutMethodId: payoutMethod.id, }); // Updates the collective balance and pay the expense await fakeTransaction({ type: 'CREDIT', CollectiveId: collective.id, amount: expense.amount }); await payExpense(makeRequest(hostAdmin), { id: expense.id }); expect(await collective.getBalance()).to.eq(0); const mutationParams = { expenseId: expense.id, action: 'MARK_AS_UNPAID' }; const result = await graphqlQueryV2(processExpenseMutation, mutationParams, hostAdmin); expect(result.data.processExpense.status).to.eq('APPROVED'); expect(await collective.getBalance()).to.eq(expense.amount); }); it('Expense needs to be paid', async () => { const expense = await fakeExpense({ CollectiveId: collective.id, status: 'REJECTED' }); const mutationParams = { expenseId: expense.id, action: 'MARK_AS_UNPAID' }; const result = await graphqlQueryV2(processExpenseMutation, mutationParams, hostAdmin); expect(result.errors).to.exist; expect(result.errors[0].message).to.eq("You don't have permission to mark this expense as unpaid"); }); }); describe('SCHEDULE_FOR_PAYMENT', () => { it('Needs to be authenticated', async () => { const expense = await fakeExpense({ CollectiveId: collective.id, status: 'APPROVED' }); const mutationParams = { expenseId: expense.id, action: 'SCHEDULE_FOR_PAYMENT' }; const result = await graphqlQueryV2(processExpenseMutation, mutationParams); expect(result.errors).to.exist; expect(result.errors[0].message).to.eq('You need to be authenticated to perform this action'); }); it('User cannot schedule their own expenses for payment', async () => { const expense = await fakeExpense({ CollectiveId: collective.id, status: 'APPROVED' }); const mutationParams = { expenseId: expense.id, action: 'SCHEDULE_FOR_PAYMENT' }; const result = await graphqlQueryV2(processExpenseMutation, mutationParams, expense.User); expect(result.errors).to.exist; expect(result.errors[0].message).to.eq("You're authenticated but you can't schedule this expense for payment"); }); it('Collective admins cannot schedule expenses for payment', async () => { const expense = await fakeExpense({ CollectiveId: collective.id, status: 'APPROVED' }); const mutationParams = { expenseId: expense.id, action: 'SCHEDULE_FOR_PAYMENT' }; const result = await graphqlQueryV2(processExpenseMutation, mutationParams, collectiveAdmin); expect(result.errors).to.exist; expect(result.errors[0].message).to.eq("You're authenticated but you can't schedule this expense for payment"); }); it('Expense needs to be approved', async () => { const expense = await fakeExpense({ CollectiveId: collective.id, status: 'REJECTED' }); const mutationParams = { expenseId: expense.id, action: 'SCHEDULE_FOR_PAYMENT' }; const result = await graphqlQueryV2(processExpenseMutation, mutationParams, hostAdmin); expect(result.errors).to.exist; expect(result.errors[0].message).to.eq("You're authenticated but you can't schedule this expense for payment"); }); it('Schedules the expense for payment', async () => { const payoutMethod = await fakePayoutMethod({ type: 'OTHER' }); const expense = await fakeExpense({ amount: 1000, CollectiveId: collective.id, status: 'APPROVED', PayoutMethodId: payoutMethod.id, }); const mutationParams = { expenseId: expense.id, action: 'SCHEDULE_FOR_PAYMENT' }; const result = await graphqlQueryV2(processExpenseMutation, mutationParams, hostAdmin); expect(result.data.processExpense.status).to.eq('SCHEDULED_FOR_PAYMENT'); }); it('Cannot scheduled for payment twice', async () => { const payoutMethod = await fakePayoutMethod({ type: 'OTHER' }); const expense = await fakeExpense({ amount: 1000, CollectiveId: collective.id, status: 'SCHEDULED_FOR_PAYMENT', PayoutMethodId: payoutMethod.id, }); // Updates the collective balance and pay the expense const mutationParams = { expenseId: expense.id, action: 'SCHEDULE_FOR_PAYMENT' }; const result = await graphqlQueryV2(processExpenseMutation, mutationParams, hostAdmin); expect(result.errors).to.exist; expect(result.errors[0].message).to.eq('Expense is already scheduled for payment'); }); }); }); });
/*--------------------------------------------------------------------------------------------- * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See License.txt in the project root for license information. *--------------------------------------------------------------------------------------------*/ var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) { var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d; if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc); else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r; return c > 3 && r && Object.defineProperty(target, key, r), r; }; var __param = (this && this.__param) || function (paramIndex, decorator) { return function (target, key) { decorator(target, key, paramIndex); } }; import { RawContextKey, IContextKeyService } from '../../../platform/contextkey/common/contextkey.js'; import { createDecorator } from '../../../platform/instantiation/common/instantiation.js'; import { registerSingleton } from '../../../platform/instantiation/common/extensions.js'; import { KeybindingsRegistry } from '../../../platform/keybinding/common/keybindingsRegistry.js'; import { registerEditorCommand, EditorCommand } from '../../browser/editorExtensions.js'; import { ICodeEditorService } from '../../browser/services/codeEditorService.js'; import { Range } from '../../common/core/range.js'; import { dispose, combinedDisposable, DisposableStore } from '../../../base/common/lifecycle.js'; import { Emitter } from '../../../base/common/event.js'; import { localize } from '../../../nls.js'; import { IKeybindingService } from '../../../platform/keybinding/common/keybinding.js'; import { INotificationService } from '../../../platform/notification/common/notification.js'; import { isEqual } from '../../../base/common/resources.js'; export const ctxHasSymbols = new RawContextKey('hasSymbols', false); export const ISymbolNavigationService = createDecorator('ISymbolNavigationService'); let SymbolNavigationService = class SymbolNavigationService { constructor(contextKeyService, _editorService, _notificationService, _keybindingService) { this._editorService = _editorService; this._notificationService = _notificationService; this._keybindingService = _keybindingService; this._currentModel = undefined; this._currentIdx = -1; this._ignoreEditorChange = false; this._ctxHasSymbols = ctxHasSymbols.bindTo(contextKeyService); } reset() { var _a, _b; this._ctxHasSymbols.reset(); (_a = this._currentState) === null || _a === void 0 ? void 0 : _a.dispose(); (_b = this._currentMessage) === null || _b === void 0 ? void 0 : _b.dispose(); this._currentModel = undefined; this._currentIdx = -1; } put(anchor) { const refModel = anchor.parent.parent; if (refModel.references.length <= 1) { this.reset(); return; } this._currentModel = refModel; this._currentIdx = refModel.references.indexOf(anchor); this._ctxHasSymbols.set(true); this._showMessage(); const editorState = new EditorState(this._editorService); const listener = editorState.onDidChange(_ => { if (this._ignoreEditorChange) { return; } const editor = this._editorService.getActiveCodeEditor(); if (!editor) { return; } const model = editor.getModel(); const position = editor.getPosition(); if (!model || !position) { return; } let seenUri = false; let seenPosition = false; for (const reference of refModel.references) { if (isEqual(reference.uri, model.uri)) { seenUri = true; seenPosition = seenPosition || Range.containsPosition(reference.range, position); } else if (seenUri) { break; } } if (!seenUri || !seenPosition) { this.reset(); } }); this._currentState = combinedDisposable(editorState, listener); } revealNext(source) { if (!this._currentModel) { return Promise.resolve(); } // get next result and advance this._currentIdx += 1; this._currentIdx %= this._currentModel.references.length; const reference = this._currentModel.references[this._currentIdx]; // status this._showMessage(); // open editor, ignore events while that happens this._ignoreEditorChange = true; return this._editorService.openCodeEditor({ resource: reference.uri, options: { selection: Range.collapseToStart(reference.range), selectionRevealType: 3 /* NearTopIfOutsideViewport */ } }, source).finally(() => { this._ignoreEditorChange = false; }); } _showMessage() { var _a; (_a = this._currentMessage) === null || _a === void 0 ? void 0 : _a.dispose(); const kb = this._keybindingService.lookupKeybinding('editor.gotoNextSymbolFromResult'); const message = kb ? localize('location.kb', "Symbol {0} of {1}, {2} for next", this._currentIdx + 1, this._currentModel.references.length, kb.getLabel()) : localize('location', "Symbol {0} of {1}", this._currentIdx + 1, this._currentModel.references.length); this._currentMessage = this._notificationService.status(message); } }; SymbolNavigationService = __decorate([ __param(0, IContextKeyService), __param(1, ICodeEditorService), __param(2, INotificationService), __param(3, IKeybindingService) ], SymbolNavigationService); registerSingleton(ISymbolNavigationService, SymbolNavigationService, true); registerEditorCommand(new class extends EditorCommand { constructor() { super({ id: 'editor.gotoNextSymbolFromResult', precondition: ctxHasSymbols, kbOpts: { weight: 100 /* EditorContrib */, primary: 70 /* F12 */ } }); } runEditorCommand(accessor, editor) { return accessor.get(ISymbolNavigationService).revealNext(editor); } }); KeybindingsRegistry.registerCommandAndKeybindingRule({ id: 'editor.gotoNextSymbolFromResult.cancel', weight: 100 /* EditorContrib */, when: ctxHasSymbols, primary: 9 /* Escape */, handler(accessor) { accessor.get(ISymbolNavigationService).reset(); } }); // let EditorState = class EditorState { constructor(editorService) { this._listener = new Map(); this._disposables = new DisposableStore(); this._onDidChange = new Emitter(); this.onDidChange = this._onDidChange.event; this._disposables.add(editorService.onCodeEditorRemove(this._onDidRemoveEditor, this)); this._disposables.add(editorService.onCodeEditorAdd(this._onDidAddEditor, this)); editorService.listCodeEditors().forEach(this._onDidAddEditor, this); } dispose() { this._disposables.dispose(); this._onDidChange.dispose(); dispose(this._listener.values()); } _onDidAddEditor(editor) { this._listener.set(editor, combinedDisposable(editor.onDidChangeCursorPosition(_ => this._onDidChange.fire({ editor })), editor.onDidChangeModelContent(_ => this._onDidChange.fire({ editor })))); } _onDidRemoveEditor(editor) { var _a; (_a = this._listener.get(editor)) === null || _a === void 0 ? void 0 : _a.dispose(); this._listener.delete(editor); } }; EditorState = __decorate([ __param(0, ICodeEditorService) ], EditorState);
# -*- coding: utf-8 -*- """ Examples of simplicial complexes There are two main types: manifolds and examples related to graph theory. For manifolds, there are functions defining the `n`-sphere for any `n`, the torus, `n`-dimensional real projective space for any `n`, the complex projective plane, surfaces of arbitrary genus, and some other manifolds, all as simplicial complexes. Aside from surfaces, this file also provides functions for constructing some other simplicial complexes: the simplicial complex of not-`i`-connected graphs on `n` vertices, the matching complex on n vertices, the chessboard complex for an `n` by `i` chessboard, and others. These provide examples of large simplicial complexes; for example, ``simplicial_complexes.NotIConnectedGraphs(7,2)`` has over a million simplices. All of these examples are accessible by typing ``simplicial_complexes.NAME``, where ``NAME`` is the name of the example. - :func:`BarnetteSphere` - :func:`BrucknerGrunbaumSphere` - :func:`ChessboardComplex` - :func:`ComplexProjectivePlane` - :func:`DunceHat` - :func:`K3Surface` - :func:`KleinBottle` - :func:`MatchingComplex` - :func:`MooreSpace` - :func:`NotIConnectedGraphs` - :func:`PoincareHomologyThreeSphere` - :func:`PseudoQuaternionicProjectivePlane` - :func:`RandomComplex` - :func:`RandomTwoSphere` - :func:`RealProjectivePlane` - :func:`RealProjectiveSpace` - :func:`RudinBall` - :func:`ShiftedComplex` - :func:`Simplex` - :func:`Sphere` - :func:`SumComplex` - :func:`SurfaceOfGenus` - :func:`Torus` - :func:`ZieglerBall` You can also get a list by typing ``simplicial_complexes.`` and hitting the TAB key. EXAMPLES:: sage: S = simplicial_complexes.Sphere(2) # the 2-sphere sage: S.homology() {0: 0, 1: 0, 2: Z} sage: simplicial_complexes.SurfaceOfGenus(3) Triangulation of an orientable surface of genus 3 sage: M4 = simplicial_complexes.MooreSpace(4) sage: M4.homology() {0: 0, 1: C4, 2: 0} sage: simplicial_complexes.MatchingComplex(6).homology() {0: 0, 1: Z^16, 2: 0} """ from six import iteritems from functools import reduce from sage.homology.simplicial_complex import SimplicialComplex from sage.structure.unique_representation import UniqueRepresentation # Below we define a function Simplex to construct a simplex as a # simplicial complex. We also need to use actual simplices as # simplices, hence: from sage.homology.simplicial_complex import Simplex as TrueSimplex from sage.sets.set import Set from sage.misc.functional import is_even from sage.misc.misc import union from sage.combinat.subset import Subsets import sage.misc.prandom as random # Miscellaneous utility functions. # The following two functions can be used to generate the facets for # the corresponding examples in sage.homology.examples. These take a # few seconds to run, so the actual examples have the facets # hard-coded. Thus the following functions are not currently used in # the Sage library. def facets_for_RP4(): """ Return the list of facets for a minimal triangulation of 4-dimensional real projective space. We use vertices numbered 1 through 16, define two facets, and define a certain subgroup `G` of the symmetric group `S_{16}`. Then the set of all facets is the `G`-orbit of the two given facets. See the description in Example 3.12 in Datta [Dat2007]_. EXAMPLES:: sage: from sage.homology.examples import facets_for_RP4 sage: A = facets_for_RP4() # long time (1 or 2 seconds) sage: SimplicialComplex(A) == simplicial_complexes.RealProjectiveSpace(4) # long time True """ # Define the group: from sage.groups.perm_gps.permgroup import PermutationGroup g1 = '(2,7)(4,10)(5,6)(11,12)' g2 = '(1, 2, 3, 4, 5, 10)(6, 8, 9)(11, 12, 13, 14, 15, 16)' G = PermutationGroup([g1, g2]) # Define the two simplices: t1 = (1, 2, 4, 5, 11) t2 = (1, 2, 4, 11, 13) # Apply the group elements to the simplices: facets = [] for g in G: d = g.dict() for t in [t1, t2]: new = tuple([d[j] for j in t]) if new not in facets: facets.append(new) return facets def facets_for_K3(): """ Returns the facets for a minimal triangulation of the K3 surface. This is a pure simplicial complex of dimension 4 with 16 vertices and 288 facets. The facets are obtained by constructing a few facets and a permutation group `G`, and then computing the `G`-orbit of those facets. See Casella and Kühnel in [CK2001]_ and Spreer and Kühnel [SK2011]_; the construction here uses the labeling from Spreer and Kühnel. EXAMPLES:: sage: from sage.homology.examples import facets_for_K3 sage: A = facets_for_K3() # long time (a few seconds) sage: SimplicialComplex(A) == simplicial_complexes.K3Surface() # long time True """ from sage.groups.perm_gps.permgroup import PermutationGroup G = PermutationGroup([[(1,3,8,4,9,16,15,2,14,12,6,7,13,5,10)], [(1,11,16),(2,10,14),(3,12,13),(4,9,15),(5,7,8)]]) return ([tuple([g(i) for i in (1,2,3,8,12)]) for g in G] +[tuple([g(i) for i in (1,2,5,8,14)]) for g in G]) def matching(A, B): r""" List of maximal matchings between the sets ``A`` and ``B``. A matching is a set of pairs `(a,b) \in A \times B` where each `a` and `b` appears in at most one pair. A maximal matching is one which is maximal with respect to inclusion of subsets of `A \times B`. INPUT: - ``A``, ``B`` -- list, tuple, or indeed anything which can be converted to a set. EXAMPLES:: sage: from sage.homology.examples import matching sage: matching([1,2], [3,4]) [{(1, 3), (2, 4)}, {(1, 4), (2, 3)}] sage: matching([0,2], [0]) [{(0, 0)}, {(2, 0)}] """ answer = [] if len(A) == 0 or len(B) == 0: return [set([])] for v in A: for w in B: for M in matching(set(A).difference([v]), set(B).difference([w])): new = M.union([(v,w)]) if new not in answer: answer.append(new) return answer class UniqueSimplicialComplex(SimplicialComplex, UniqueRepresentation): """ This combines :class:`SimplicialComplex` and :class:`UniqueRepresentation`. It is intended to be used to make standard examples of simplicial complexes unique. See :trac:`13566`. INPUT: - the inputs are the same as for a :class:`SimplicialComplex`, with one addition and two exceptions. The exceptions are that ``is_mutable`` and ``is_immutable`` are ignored: all instances of this class are immutable. The addition: - ``name`` -- string (optional), the string representation for this complex. EXAMPLES:: sage: from sage.homology.examples import UniqueSimplicialComplex sage: SimplicialComplex([[0,1]]) is SimplicialComplex([[0,1]]) False sage: UniqueSimplicialComplex([[0,1]]) is UniqueSimplicialComplex([[0,1]]) True sage: UniqueSimplicialComplex([[0,1]]) Simplicial complex with vertex set (0, 1) and facets {(0, 1)} sage: UniqueSimplicialComplex([[0,1]], name='The 1-simplex') The 1-simplex """ @staticmethod def __classcall__(self, maximal_faces=None, name=None, **kwds): """ TESTS:: sage: from sage.homology.examples import UniqueSimplicialComplex sage: UniqueSimplicialComplex([[1,2,3], [0,1,3]]) is UniqueSimplicialComplex([(1,2,3), (0,1,3)]) True sage: X = UniqueSimplicialComplex([[1,2,3], [0,1,3]]) sage: X is UniqueSimplicialComplex(X) True Testing ``from_characteristic_function``:: sage: UniqueSimplicialComplex(from_characteristic_function=(lambda x:sum(x)<=4, range(5))) Simplicial complex with vertex set (0, 1, 2, 3, 4) and facets {(0, 4), (0, 1, 2), (0, 1, 3)} """ char_fcn = kwds.get('from_characteristic_function', None) if char_fcn: kwds['from_characteristic_function'] = (char_fcn[0], tuple(char_fcn[1])) if maximal_faces: # Test to see if maximal_faces is a cell complex or another # object which can be converted to a simplicial complex: C = None if isinstance(maximal_faces, SimplicialComplex): C = maximal_faces else: try: C = maximal_faces._simplicial_() except AttributeError: if not isinstance(maximal_faces, (list, tuple, Simplex)): # Convert it into a list (in case it is an iterable) maximal_faces = list(maximal_faces) if len(maximal_faces) != 0: vertex_set = reduce(union, maximal_faces) if C is not None: maximal_faces = C.facets() # Now convert maximal_faces to a tuple of tuples, so that it is hashable. maximal_faces = tuple([tuple(_) for _ in maximal_faces]) return super(UniqueSimplicialComplex, self).__classcall__(self, maximal_faces, name=name, **kwds) def __init__(self, maximal_faces=None, name=None, **kwds): """ TESTS:: sage: from sage.homology.examples import UniqueSimplicialComplex sage: UniqueSimplicialComplex([[1,2,3], [0,1,3]], is_mutable=True).is_mutable() False """ if 'is_mutable' in kwds: del kwds['is_mutable'] if 'is_immutable' in kwds: del kwds['is_immutable'] self._name = name SimplicialComplex.__init__(self, maximal_faces=maximal_faces, is_mutable=False, **kwds) def _repr_(self): """ Print representation If the argument ``name`` was specified when defining the complex, use that. Otherwise, use the print representation from the class :class:`SimplicialComplex`. TESTS:: sage: from sage.homology.examples import UniqueSimplicialComplex sage: UniqueSimplicialComplex([[0,1]]) Simplicial complex with vertex set (0, 1) and facets {(0, 1)} sage: UniqueSimplicialComplex([[0,1]], name='Joe') Joe """ if self._name: return self._name return SimplicialComplex._repr_(self) # Now the functions that produce the actual examples... def Sphere(n): """ A minimal triangulation of the `n`-dimensional sphere. INPUT: - ``n`` -- positive integer EXAMPLES:: sage: simplicial_complexes.Sphere(2) Minimal triangulation of the 2-sphere sage: simplicial_complexes.Sphere(5).homology() {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: Z} sage: [simplicial_complexes.Sphere(n).euler_characteristic() for n in range(6)] [2, 0, 2, 0, 2, 0] sage: [simplicial_complexes.Sphere(n).f_vector() for n in range(6)] [[1, 2], [1, 3, 3], [1, 4, 6, 4], [1, 5, 10, 10, 5], [1, 6, 15, 20, 15, 6], [1, 7, 21, 35, 35, 21, 7]] """ S = TrueSimplex(n+1) facets = tuple(S.faces()) return UniqueSimplicialComplex(facets, name='Minimal triangulation of the {}-sphere'.format(n)) def Simplex(n): """ An `n`-dimensional simplex, as a simplicial complex. INPUT: - ``n`` -- a non-negative integer OUTPUT: the simplicial complex consisting of the `n`-simplex on vertices `(0, 1, ..., n)` and all of its faces. EXAMPLES:: sage: simplicial_complexes.Simplex(3) The 3-simplex sage: simplicial_complexes.Simplex(5).euler_characteristic() 1 """ return UniqueSimplicialComplex([TrueSimplex(n)], name='The {}-simplex'.format(n)) def Torus(): r""" A minimal triangulation of the torus. This is a simplicial complex with 7 vertices, 21 edges and 14 faces. It is the unique triangulation of the torus with 7 vertices, and has been found by Möbius in 1861. This is also the combinatorial structure of the Császár polyhedron (see :wikipedia:`Császár_polyhedron`). EXAMPLES:: sage: T = simplicial_complexes.Torus(); T.homology(1) Z x Z sage: T.f_vector() [1, 7, 21, 14] TESTS:: sage: T.flip_graph().is_isomorphic(graphs.HeawoodGraph()) True REFERENCES: - [Lut2002]_ """ return UniqueSimplicialComplex([[0,1,2], [1,2,4], [1,3,4], [1,3,6], [0,1,5], [1,5,6], [2,3,5], [2,4,5], [2,3,6], [0,2,6], [0,3,4], [0,3,5], [4,5,6], [0,4,6]], name='Minimal triangulation of the torus') def RealProjectivePlane(): """ A minimal triangulation of the real projective plane. EXAMPLES:: sage: P = simplicial_complexes.RealProjectivePlane() sage: Q = simplicial_complexes.ProjectivePlane() sage: P == Q True sage: P.cohomology(1) 0 sage: P.cohomology(2) C2 sage: P.cohomology(1, base_ring=GF(2)) Vector space of dimension 1 over Finite Field of size 2 sage: P.cohomology(2, base_ring=GF(2)) Vector space of dimension 1 over Finite Field of size 2 """ return UniqueSimplicialComplex([[0,1,2], [0,2,3], [0,1,5], [0,4,5], [0,3,4], [1,2,4], [1,3,4], [1,3,5], [2,3,5], [2,4,5]], name='Minimal triangulation of the real projective plane') ProjectivePlane = RealProjectivePlane def KleinBottle(): """ A minimal triangulation of the Klein bottle, as presented for example in Davide Cervone's thesis [Cer1994]_. EXAMPLES:: sage: simplicial_complexes.KleinBottle() Minimal triangulation of the Klein bottle """ return UniqueSimplicialComplex([[2,3,7], [1,2,3], [1,3,5], [1,5,7], [1,4,7], [2,4,6], [1,2,6], [1,6,0], [1,4,0], [2,4,0], [3,4,7], [3,4,6], [3,5,6], [5,6,0], [2,5,0], [2,5,7]], name='Minimal triangulation of the Klein bottle') def SurfaceOfGenus(g, orientable=True): """ A surface of genus `g`. INPUT: - ``g`` -- a non-negative integer. The desired genus - ``orientable`` -- boolean (optional, default ``True``). If ``True``, return an orientable surface, and if ``False``, return a non-orientable surface. In the orientable case, return a sphere if `g` is zero, and otherwise return a `g`-fold connected sum of a torus with itself. In the non-orientable case, raise an error if `g` is zero. If `g` is positive, return a `g`-fold connected sum of a real projective plane with itself. EXAMPLES:: sage: simplicial_complexes.SurfaceOfGenus(2) Triangulation of an orientable surface of genus 2 sage: simplicial_complexes.SurfaceOfGenus(1, orientable=False) Triangulation of a non-orientable surface of genus 1 """ if g == 0: if not orientable: raise ValueError("No non-orientable surface of genus zero.") else: return Sphere(2) if orientable: T = Torus() else: T = RealProjectivePlane() S = T for i in range(g-1): S = S.connected_sum(T) if orientable: orient_str = 'n orientable' else: orient_str = ' non-orientable' return UniqueSimplicialComplex(S, name='Triangulation of a{} surface of genus {}'.format(orient_str, g)) def MooreSpace(q): """ Triangulation of the mod `q` Moore space. INPUT: - ``q`` -0 integer, at least 2 This is a simplicial complex with simplices of dimension 0, 1, and 2, such that its reduced homology is isomorphic to `\\ZZ/q\\ZZ` in dimension 1, zero otherwise. If `q=2`, this is the real projective plane. If `q>2`, then construct it as follows: start with a triangle with vertices 1, 2, 3. We take a `3q`-gon forming a `q`-fold cover of the triangle, and we form the resulting complex as an identification space of the `3q`-gon. To triangulate this identification space, put `q` vertices `A_0`, ..., `A_{q-1}`, in the interior, each of which is connected to 1, 2, 3 (two facets each: `[1, 2, A_i]`, `[2, 3, A_i]`). Put `q` more vertices in the interior: `B_0`, ..., `B_{q-1}`, with facets `[3, 1, B_i]`, `[3, B_i, A_i]`, `[1, B_i, A_{i+1}]`, `[B_i, A_i, A_{i+1}]`. Then triangulate the interior polygon with vertices `A_0`, `A_1`, ..., `A_{q-1}`. EXAMPLES:: sage: simplicial_complexes.MooreSpace(2) Minimal triangulation of the real projective plane sage: simplicial_complexes.MooreSpace(3).homology()[1] C3 sage: simplicial_complexes.MooreSpace(4).suspension().homology()[2] C4 sage: simplicial_complexes.MooreSpace(8) Triangulation of the mod 8 Moore space """ if q <= 1: raise ValueError("The mod q Moore space is only defined if q is at least 2") if q == 2: return RealProjectivePlane() facets = [] for i in range(q): Ai = "A" + str(i) Aiplus = "A" + str((i+1)%q) Bi = "B" + str(i) facets.append([1, 2, Ai]) facets.append([2, 3, Ai]) facets.append([3, 1, Bi]) facets.append([3, Bi, Ai]) facets.append([1, Bi, Aiplus]) facets.append([Bi, Ai, Aiplus]) for i in range(1, q-1): Ai = "A" + str(i) Aiplus = "A" + str((i+1)%q) facets.append(["A0", Ai, Aiplus]) return UniqueSimplicialComplex(facets, name='Triangulation of the mod {} Moore space'.format(q)) def ComplexProjectivePlane(): """ A minimal triangulation of the complex projective plane. This was constructed by Kühnel and Banchoff [KB1983]_. EXAMPLES:: sage: C = simplicial_complexes.ComplexProjectivePlane() sage: C.f_vector() [1, 9, 36, 84, 90, 36] sage: C.homology(2) Z sage: C.homology(4) Z """ return UniqueSimplicialComplex( [[1, 2, 4, 5, 6], [2, 3, 5, 6, 4], [3, 1, 6, 4, 5], [1, 2, 4, 5, 9], [2, 3, 5, 6, 7], [3, 1, 6, 4, 8], [2, 3, 6, 4, 9], [3, 1, 4, 5, 7], [1, 2, 5, 6, 8], [3, 1, 5, 6, 9], [1, 2, 6, 4, 7], [2, 3, 4, 5, 8], [4, 5, 7, 8, 9], [5, 6, 8, 9, 7], [6, 4, 9, 7, 8], [4, 5, 7, 8, 3], [5, 6, 8, 9, 1], [6, 4, 9, 7, 2], [5, 6, 9, 7, 3], [6, 4, 7, 8, 1], [4, 5, 8, 9, 2], [6, 4, 8, 9, 3], [4, 5, 9, 7, 1], [5, 6, 7, 8, 2], [7, 8, 1, 2, 3], [8, 9, 2, 3, 1], [9, 7, 3, 1, 2], [7, 8, 1, 2, 6], [8, 9, 2, 3, 4], [9, 7, 3, 1, 5], [8, 9, 3, 1, 6], [9, 7, 1, 2, 4], [7, 8, 2, 3, 5], [9, 7, 2, 3, 6], [7, 8, 3, 1, 4], [8, 9, 1, 2, 5]], name='Minimal triangulation of the complex projective plane') def PseudoQuaternionicProjectivePlane(): r""" Returns a pure simplicial complex of dimension 8 with 490 facets. .. WARNING:: This is expected to be a triangulation of the projective plane `HP^2` over the ring of quaternions, but this has not been proved yet. This simplicial complex has the same homology as `HP^2`. Its automorphism group is isomorphic to the alternating group `A_5` and acts transitively on vertices. This is defined here using the description in [BK1992]_. This article deals with three different triangulations. This procedure returns the only one which has a transitive group of automorphisms. EXAMPLES:: sage: HP2 = simplicial_complexes.PseudoQuaternionicProjectivePlane() ; HP2 Simplicial complex with 15 vertices and 490 facets sage: HP2.f_vector() [1, 15, 105, 455, 1365, 3003, 4515, 4230, 2205, 490] Checking its automorphism group:: sage: HP2.automorphism_group().is_isomorphic(AlternatingGroup(5)) True """ from sage.groups.perm_gps.permgroup import PermutationGroup P = [(1,2,3,4,5),(6,7,8,9,10),(11,12,13,14,15)] S = [(1,6,11),(2,15,14),(3,13,8),(4,7,5),(9,12,10)] start_list = [ (1,2,3,6,8,11,13,14,15), # A (1,3,6,8,9,10,11,12,13), # B (1,2,6,9,10,11,12,14,15), # C (1,2,3,4,7,9,12,14,15), # D (1,2,4,7,9,10,12,13,14), # E (1,2,6,8,9,10,11,14,15), # F (1,2,3,4,5,6,9,11,13), # G (1,3,5,6,8,9,10,11,12), # H (1,3,5,6,7,8,9,10,11), # I (1,2,3,4,5,7,10,12,15), # J (1,2,3,7,8,10,12,13,14), # K (2,5,6,7,8,9,10,13,14), # M (3,4,6,7,11,12,13,14,15), # L (3,4,6,7,10,12,13,14,15)] # N return UniqueSimplicialComplex([ [g(index) for index in tuple] for tuple in start_list for g in PermutationGroup([P,S]) ]) def PoincareHomologyThreeSphere(): """ A triangulation of the Poincaré homology 3-sphere. This is a manifold whose integral homology is identical to the ordinary 3-sphere, but it is not simply connected. In particular, its fundamental group is the binary icosahedral group, which has order 120. The triangulation given here has 16 vertices and is due to Björner and Lutz [BL2000]_. EXAMPLES:: sage: S3 = simplicial_complexes.Sphere(3) sage: Sigma3 = simplicial_complexes.PoincareHomologyThreeSphere() sage: S3.homology() == Sigma3.homology() True sage: Sigma3.fundamental_group().cardinality() # long time 120 """ return UniqueSimplicialComplex( [[1, 2, 4, 9], [1, 2, 4, 15], [1, 2, 6, 14], [1, 2, 6, 15], [1, 2, 9, 14], [1, 3, 4, 12], [1, 3, 4, 15], [1, 3, 7, 10], [1, 3, 7, 12], [1, 3, 10, 15], [1, 4, 9, 12], [1, 5, 6, 13], [1, 5, 6, 14], [1, 5, 8, 11], [1, 5, 8, 13], [1, 5, 11, 14], [1, 6, 13, 15], [1, 7, 8, 10], [1, 7, 8, 11], [1, 7, 11, 12], [1, 8, 10, 13], [1, 9, 11, 12], [1, 9, 11, 14], [1, 10, 13, 15], [2, 3, 5, 10], [2, 3, 5, 11], [2, 3, 7, 10], [2, 3, 7, 13], [2, 3, 11, 13], [2, 4, 9, 13], [2, 4, 11, 13], [2, 4, 11, 15], [2, 5, 8, 11], [2, 5, 8, 12], [2, 5, 10, 12], [2, 6, 10, 12], [2, 6, 10, 14], [2, 6, 12, 15], [2, 7, 9, 13], [2, 7, 9, 14], [2, 7, 10, 14], [2, 8, 11, 15], [2, 8, 12, 15], [3, 4, 5, 14], [3, 4, 5, 15], [3, 4, 12, 14], [3, 5, 10, 15], [3, 5, 11, 14], [3, 7, 12, 13], [3, 11, 13, 14], [3, 12, 13, 14], [4, 5, 6, 7], [4, 5, 6, 14], [4, 5, 7, 15], [4, 6, 7, 11], [4, 6, 10, 11], [4, 6, 10, 14], [4, 7, 11, 15], [4, 8, 9, 12], [4, 8, 9, 13], [4, 8, 10, 13], [4, 8, 10, 14], [4, 8, 12, 14], [4, 10, 11, 13], [5, 6, 7, 13], [5, 7, 9, 13], [5, 7, 9, 15], [5, 8, 9, 12], [5, 8, 9, 13], [5, 9, 10, 12], [5, 9, 10, 15], [6, 7, 11, 12], [6, 7, 12, 13], [6, 10, 11, 12], [6, 12, 13, 15], [7, 8, 10, 14], [7, 8, 11, 15], [7, 8, 14, 15], [7, 9, 14, 15], [8, 12, 14, 15], [9, 10, 11, 12], [9, 10, 11, 16], [9, 10, 15, 16], [9, 11, 14, 16], [9, 14, 15, 16], [10, 11, 13, 16], [10, 13, 15, 16], [11, 13, 14, 16], [12, 13, 14, 15], [13, 14, 15, 16]], name='Triangulation of the Poincare homology 3-sphere') def RealProjectiveSpace(n): r""" A triangulation of `\Bold{R}P^n` for any `n \geq 0`. INPUT: - ``n`` -- integer, the dimension of the real projective space to construct The first few cases are pretty trivial: - `\Bold{R}P^0` is a point. - `\Bold{R}P^1` is a circle, triangulated as the boundary of a single 2-simplex. - `\Bold{R}P^2` is the real projective plane, here given its minimal triangulation with 6 vertices, 15 edges, and 10 triangles. - `\Bold{R}P^3`: any triangulation has at least 11 vertices by a result of Walkup [Wal1970]_; this function returns a triangulation with 11 vertices, as given by Lutz [Lut2005]_. - `\Bold{R}P^4`: any triangulation has at least 16 vertices by a result of Walkup; this function returns a triangulation with 16 vertices as given by Lutz; see also Datta [Dat2007]_, Example 3.12. - `\Bold{R}P^n`: Lutz has found a triangulation of `\Bold{R}P^5` with 24 vertices, but it does not seem to have been published. Kühnel [Kuh1987]_ has described a triangulation of `\Bold{R}P^n`, in general, with `2^{n+1}-1` vertices; see also Datta, Example 3.21. This triangulation is presumably not minimal, but it seems to be the best in the published literature as of this writing. So this function returns it when `n > 4`. ALGORITHM: For `n < 4`, these are constructed explicitly by listing the facets. For `n = 4`, this is constructed by specifying 16 vertices, two facets, and a certain subgroup `G` of the symmetric group `S_{16}`. Then the set of all facets is the `G`-orbit of the two given facets. This is implemented here by explicitly listing all of the facets; the facets can be computed by the function :func:`~sage.homology.simplicial_complex.facets_for_RP4`, but running the function takes a few seconds. For `n > 4`, the construction is as follows: let `S` denote the simplicial complex structure on the `n`-sphere given by the first barycentric subdivision of the boundary of an `(n+1)`-simplex. This has a simplicial antipodal action: if `V` denotes the vertices in the boundary of the simplex, then the vertices in its barycentric subdivision `S` correspond to nonempty proper subsets `U` of `V`, and the antipodal action sends any subset `U` to its complement. One can show that modding out by this action results in a triangulation for `\Bold{R}P^n`. To find the facets in this triangulation, find the facets in `S`. These are identified in pairs to form `\Bold{R}P^n`, so choose a representative from each pair: for each facet in `S`, replace any vertex in `S` containing 0 with its complement. Of course these complexes increase in size pretty quickly as `n` increases. EXAMPLES:: sage: P3 = simplicial_complexes.RealProjectiveSpace(3) sage: P3.f_vector() [1, 11, 51, 80, 40] sage: P3.homology() {0: 0, 1: C2, 2: 0, 3: Z} sage: P4 = simplicial_complexes.RealProjectiveSpace(4) sage: P4.f_vector() [1, 16, 120, 330, 375, 150] sage: P4.homology() # long time {0: 0, 1: C2, 2: 0, 3: C2, 4: 0} sage: P5 = simplicial_complexes.RealProjectiveSpace(5) # long time (44s on sage.math, 2012) sage: P5.f_vector() # long time [1, 63, 903, 4200, 8400, 7560, 2520] The following computation can take a long time -- over half an hour -- with Sage's default computation of homology groups, but if you have CHomP installed, Sage will use that and the computation should only take a second or two. (You can download CHomP from http://chomp.rutgers.edu/, or you can install it as a Sage package using ``sage -i chomp``). :: sage: P5.homology() # long time # optional - CHomP {0: 0, 1: C2, 2: 0, 3: C2, 4: 0, 5: Z} sage: simplicial_complexes.RealProjectiveSpace(2).dimension() 2 sage: P3.dimension() 3 sage: P4.dimension() # long time 4 sage: P5.dimension() # long time 5 """ if n == 0: return Simplex(0) if n == 1: return Sphere(1) if n == 2: return RealProjectivePlane() if n == 3: # Minimal triangulation found by Walkup and given # explicitly by Lutz return UniqueSimplicialComplex( [[1, 2, 3, 7], [1, 4, 7, 9], [2, 3, 4, 8], [2, 5, 8, 10], [3, 6, 7, 10], [1, 2, 3, 11], [1, 4, 7, 10], [2, 3, 4, 11], [2, 5, 9, 10], [3, 6, 8, 9], [1, 2, 6, 9], [1, 4, 8, 9], [2, 3, 7, 8], [2, 6, 9, 10], [3, 6, 9, 10], [1, 2, 6, 11], [1, 4, 8, 10], [2, 4, 6, 10], [3, 4, 5, 9], [4, 5, 6, 7], [1, 2, 7, 9], [1, 5, 6, 8], [2, 4, 6, 11], [3, 4, 5, 11], [4, 5, 6, 11], [1, 3, 5, 10], [1, 5, 6, 11], [2, 4, 8, 10], [3, 4, 8, 9], [4, 5, 7, 9], [1, 3, 5, 11], [1, 5, 8, 10], [2, 5, 7, 8], [3, 5, 9, 10], [4, 6, 7, 10], [1, 3, 7, 10], [1, 6, 8, 9], [2, 5, 7, 9], [3, 6, 7, 8], [5, 6, 7, 8]], name='Minimal triangulation of RP^3') if n == 4: return UniqueSimplicialComplex( [(1, 3, 8, 12, 13), (2, 7, 8, 13, 16), (4, 8, 9, 12, 14), (2, 6, 10, 12, 16), (5, 7, 9, 10, 13), (1, 2, 7, 8, 15), (1, 3, 9, 11, 16), (5, 6, 8, 13, 16), (1, 3, 8, 11, 13), (3, 4, 10, 13, 15), (4, 6, 9, 12, 15), (2, 4, 6, 11, 13), (2, 3, 9, 12, 16), (1, 6, 9, 12, 15), (2, 5, 10, 11, 12), (1, 7, 8, 12, 15), (2, 6, 9, 13, 16), (1, 5, 9, 11, 15), (4, 9, 10, 13, 14), (2, 7, 8, 15, 16), (2, 3, 9, 12, 14), (1, 6, 7, 10, 14), (2, 5, 10, 11, 15), (1, 2, 4, 13, 14), (1, 6, 10, 14, 16), (2, 6, 9, 12, 16), (1, 3, 9, 12, 16), (4, 5, 7, 11, 16), (5, 9, 10, 11, 15), (3, 5, 8, 12, 14), (5, 6, 9, 13, 16), (5, 6, 9, 13, 15), (1, 3, 4, 10, 16), (1, 6, 10, 12, 16), (2, 4, 6, 9, 13), (2, 4, 6, 9, 12), (1, 2, 4, 11, 13), (7, 9, 10, 13, 14), (1, 7, 8, 12, 13), (4, 6, 7, 11, 12), (3, 4, 6, 11, 13), (1, 5, 6, 9, 15), (1, 6, 7, 14, 15), (2, 3, 7, 14, 15), (2, 6, 10, 11, 12), (5, 7, 9, 10, 11), (1, 2, 4, 5, 14), (3, 5, 10, 13, 15), (3, 8, 9, 12, 14), (5, 9, 10, 13, 15), (2, 6, 8, 13, 16), (1, 2, 7, 13, 14), (1, 7, 10, 12, 13), (3, 4, 6, 13, 15), (4, 9, 10, 13, 15), (2, 3, 10, 12, 16), (1, 2, 5, 14, 15), (2, 6, 8, 10, 11), (1, 3, 10, 12, 13), (4, 8, 9, 12, 15), (1, 3, 8, 9, 11), (4, 6, 7, 12, 15), (1, 8, 9, 11, 15), (4, 5, 8, 14, 16), (1, 2, 8, 11, 13), (3, 6, 8, 11, 13), (3, 6, 8, 11, 14), (3, 5, 8, 12, 13), (3, 7, 9, 11, 14), (4, 6, 9, 13, 15), (2, 3, 5, 10, 12), (4, 7, 8, 15, 16), (1, 2, 7, 14, 15), (3, 7, 9, 11, 16), (3, 6, 7, 14, 15), (2, 6, 8, 11, 13), (4, 8, 9, 10, 14), (1, 4, 10, 13, 14), (4, 8, 9, 10, 15), (2, 7, 9, 13, 16), (1, 6, 9, 12, 16), (2, 3, 7, 9, 14), (4, 8, 10, 15, 16), (1, 5, 9, 11, 16), (1, 5, 6, 14, 15), (5, 7, 9, 11, 16), (4, 5, 7, 11, 12), (5, 7, 10, 11, 12), (2, 3, 10, 15, 16), (1, 2, 7, 8, 13), (1, 6, 7, 10, 12), (1, 3, 10, 12, 16), (7, 9, 10, 11, 14), (1, 7, 10, 13, 14), (1, 2, 4, 5, 11), (3, 4, 6, 7, 11), (1, 6, 7, 12, 15), (1, 3, 4, 10, 13), (1, 4, 10, 14, 16), (2, 4, 6, 11, 12), (5, 6, 8, 14, 16), (3, 5, 6, 8, 13), (3, 5, 6, 8, 14), (1, 2, 8, 11, 15), (1, 4, 5, 14, 16), (2, 3, 7, 15, 16), (8, 9, 10, 11, 14), (1, 3, 4, 11, 16), (6, 8, 10, 14, 16), (8, 9, 10, 11, 15), (1, 3, 4, 11, 13), (2, 4, 5, 12, 14), (2, 4, 9, 13, 14), (3, 4, 7, 11, 16), (3, 6, 7, 11, 14), (3, 8, 9, 11, 14), (2, 8, 10, 11, 15), (1, 3, 8, 9, 12), (4, 5, 7, 8, 16), (4, 5, 8, 12, 14), (2, 4, 9, 12, 14), (6, 8, 10, 11, 14), (3, 5, 6, 13, 15), (1, 4, 5, 11, 16), (3, 5, 6, 14, 15), (2, 4, 5, 11, 12), (4, 5, 7, 8, 12), (1, 8, 9, 12, 15), (5, 7, 8, 13, 16), (2, 3, 5, 12, 14), (3, 5, 10, 12, 13), (6, 7, 10, 11, 12), (5, 7, 9, 13, 16), (6, 7, 10, 11, 14), (5, 7, 10, 12, 13), (1, 2, 5, 11, 15), (1, 5, 6, 9, 16), (5, 7, 8, 12, 13), (4, 7, 8, 12, 15), (2, 3, 5, 10, 15), (2, 6, 8, 10, 16), (3, 4, 10, 15, 16), (1, 5, 6, 14, 16), (2, 3, 5, 14, 15), (2, 3, 7, 9, 16), (2, 7, 9, 13, 14), (3, 4, 6, 7, 15), (4, 8, 10, 14, 16), (3, 4, 7, 15, 16), (2, 8, 10, 15, 16)], name='Minimal triangulation of RP^4') if n >= 5: # Use the construction given by Datta in Example 3.21. V = set(range(0, n+2)) S = Sphere(n).barycentric_subdivision() X = S.facets() facets = set([]) for f in X: new = [] for v in f: if 0 in v: new.append(tuple(V.difference(v))) else: new.append(v) facets.add(tuple(new)) return UniqueSimplicialComplex(list(facets), name='Triangulation of RP^{}'.format(n)) def K3Surface(): """ Returns a minimal triangulation of the K3 surface. This is a pure simplicial complex of dimension 4 with 16 vertices and 288 facets. It was constructed by Casella and Kühnel in [CK2001]_. The construction here uses the labeling from Spreer and Kühnel [SK2011]_. EXAMPLES:: sage: K3=simplicial_complexes.K3Surface() ; K3 Minimal triangulation of the K3 surface sage: K3.f_vector() [1, 16, 120, 560, 720, 288] This simplicial complex is implemented just by listing all 288 facets. The list of facets can be computed by the function :func:`~sage.homology.simplicial_complex.facets_for_K3`, but running the function takes a few seconds. """ return UniqueSimplicialComplex( [(2, 10, 13, 15, 16), (2, 8, 11, 15, 16), (2, 5, 7, 8, 10), (1, 9, 11, 13, 14), (1, 2, 8, 10, 12), (1, 3, 5, 6, 11), (1, 5, 6, 9, 12), (1, 2, 6, 13, 16), (1, 4, 10, 13, 14), (1, 9, 10, 14, 15), (2, 4, 7, 8, 12), (3, 4, 6, 10, 12), (1, 6, 7, 8, 9), (3, 4, 5, 7, 15), (1, 7, 12, 15, 16), (4, 5, 7, 13, 16), (5, 8, 11, 12, 15), (2, 4, 7, 12, 14), (1, 4, 5, 14, 16), (2, 5, 6, 10, 11), (1, 6, 8, 12, 14), (5, 8, 9, 14, 16), (5, 10, 11, 12, 13), (2, 4, 8, 9, 12), (7, 9, 12, 15, 16), (1, 2, 6, 9, 15), (1, 5, 14, 15, 16), (2, 3, 4, 5, 9), (6, 8, 10, 11, 15), (1, 5, 8, 10, 12), (1, 3, 7, 9, 10), (6, 7, 8, 9, 13), (1, 2, 9, 11, 15), (2, 8, 11, 14, 16), (2, 4, 5, 13, 16), (1, 4, 8, 13, 15), (4, 7, 8, 10, 11), (2, 3, 9, 11, 14), (2, 3, 4, 9, 13), (2, 8, 10, 12, 13), (1, 2, 4, 11, 15), (2, 3, 9, 11, 15), (3, 5, 10, 13, 15), (3, 4, 5, 9, 11), (6, 10, 13, 15, 16), (8, 10, 11, 15, 16), (6, 7, 11, 13, 15), (1, 5, 7, 15, 16), (4, 5, 7, 9, 15), (3, 4, 6, 7, 16), (2, 3, 11, 14, 16), (3, 4, 9, 11, 13), (1, 2, 5, 14, 15), (2, 3, 9, 13, 14), (1, 2, 5, 13, 16), (2, 3, 7, 8, 12), (2, 9, 11, 12, 14), (1, 9, 11, 15, 16), (4, 6, 9, 14, 16), (1, 4, 9, 13, 14), (1, 2, 3, 12, 16), (8, 11, 12, 14, 15), (2, 4, 11, 12, 14), (1, 4, 10, 12, 13), (1, 2, 6, 7, 13), (1, 3, 6, 10, 11), (1, 6, 8, 9, 12), (1, 4, 5, 6, 14), (3, 9, 10, 12, 15), (5, 8, 11, 12, 16), (5, 9, 10, 14, 15), (3, 9, 12, 15, 16), (3, 6, 8, 14, 15), (2, 4, 9, 10, 16), (5, 8, 9, 13, 15), (2, 3, 6, 9, 15), (6, 11, 12, 14, 16), (2, 3, 10, 13, 15), (2, 8, 9, 10, 13), (3, 4, 8, 11, 13), (3, 4, 5, 7, 13), (5, 7, 8, 10, 14), (4, 12, 13, 14, 15), (6, 7, 10, 14, 16), (5, 10, 11, 13, 14), (3, 4, 7, 13, 16), (6, 8, 9, 12, 13), (1, 3, 4, 10, 14), (2, 4, 6, 11, 12), (1, 7, 9, 10, 14), (4, 6, 8, 13, 14), (4, 9, 10, 11, 16), (3, 7, 8, 10, 16), (5, 7, 9, 15, 16), (1, 7, 9, 11, 14), (6, 8, 10, 15, 16), (5, 8, 9, 10, 14), (7, 8, 10, 14, 16), (2, 6, 7, 9, 11), (7, 9, 10, 13, 15), (3, 6, 7, 10, 12), (2, 4, 6, 10, 11), (4, 5, 8, 9, 11), (1, 2, 3, 8, 16), (3, 7, 9, 10, 12), (1, 2, 6, 8, 14), (3, 5, 6, 13, 15), (1, 5, 6, 12, 14), (2, 5, 7, 14, 15), (1, 5, 10, 11, 12), (3, 7, 8, 10, 11), (1, 2, 6, 14, 15), (1, 2, 6, 8, 16), (7, 9, 10, 12, 15), (3, 4, 6, 8, 14), (3, 7, 13, 14, 16), (2, 5, 7, 8, 14), (6, 7, 9, 10, 14), (2, 3, 7, 12, 14), (4, 10, 12, 13, 14), (2, 5, 6, 11, 13), (4, 5, 6, 7, 16), (1, 3, 12, 13, 16), (1, 4, 11, 15, 16), (1, 3, 4, 6, 10), (1, 10, 11, 12, 13), (6, 9, 11, 12, 14), (1, 4, 7, 8, 15), (5, 8, 9, 10, 13), (1, 2, 5, 7, 15), (1, 7, 12, 13, 16), (3, 11, 13, 14, 16), (1, 2, 5, 7, 13), (4, 7, 8, 9, 15), (1, 5, 6, 10, 11), (6, 7, 10, 13, 15), (3, 4, 7, 14, 15), (7, 11, 13, 14, 16), (3, 4, 10, 12, 14), (3, 6, 8, 10, 16), (2, 7, 8, 14, 16), (2, 3, 4, 5, 13), (5, 8, 12, 13, 15), (4, 6, 9, 13, 14), (2, 4, 5, 6, 12), (1, 3, 7, 8, 9), (8, 11, 12, 14, 16), (1, 7, 12, 13, 15), (8, 12, 13, 14, 15), (2, 8, 9, 12, 13), (4, 6, 10, 12, 15), (2, 8, 11, 14, 15), (2, 6, 9, 11, 12), (8, 9, 10, 11, 16), (2, 3, 6, 13, 15), (2, 3, 12, 15, 16), (1, 3, 5, 9, 12), (2, 5, 6, 9, 12), (2, 10, 12, 13, 14), (2, 6, 13, 15, 16), (2, 3, 11, 15, 16), (3, 5, 6, 8, 15), (2, 4, 5, 9, 12), (5, 6, 8, 11, 15), (6, 8, 12, 13, 14), (1, 2, 3, 8, 12), (1, 4, 7, 8, 11), (3, 5, 7, 14, 15), (3, 5, 7, 13, 14), (1, 7, 10, 11, 14), (6, 7, 11, 12, 15), (3, 4, 6, 7, 12), (1, 2, 4, 7, 11), (6, 9, 10, 14, 16), (4, 10, 12, 15, 16), (5, 6, 7, 12, 16), (3, 9, 11, 13, 14), (5, 9, 14, 15, 16), (4, 5, 6, 7, 12), (1, 3, 9, 10, 15), (4, 7, 8, 9, 12), (5, 9, 10, 13, 15), (1, 3, 8, 13, 16), (2, 9, 12, 13, 14), (6, 7, 10, 12, 15), (2, 6, 8, 14, 15), (3, 5, 6, 8, 11), (3, 4, 7, 12, 14), (1, 3, 10, 14, 15), (7, 11, 12, 13, 16), (3, 11, 12, 13, 16), (3, 4, 5, 8, 15), (2, 4, 7, 8, 10), (2, 4, 7, 14, 15), (1, 2, 10, 12, 16), (1, 6, 8, 13, 16), (1, 7, 8, 13, 15), (3, 9, 11, 15, 16), (4, 6, 10, 11, 15), (2, 4, 11, 14, 15), (1, 3, 8, 9, 12), (1, 3, 6, 14, 15), (2, 4, 5, 6, 10), (1, 4, 9, 14, 16), (5, 7, 9, 12, 16), (1, 3, 7, 10, 11), (7, 8, 9, 13, 15), (3, 5, 10, 14, 15), (1, 4, 10, 12, 16), (3, 4, 5, 8, 11), (1, 2, 6, 7, 9), (1, 3, 11, 12, 13), (1, 5, 7, 13, 16), (5, 7, 10, 11, 14), (2, 10, 12, 15, 16), (3, 6, 7, 10, 16), (1, 2, 5, 8, 10), (4, 10, 11, 15, 16), (5, 8, 10, 12, 13), (3, 6, 8, 10, 11), (4, 5, 7, 9, 12), (6, 7, 11, 12, 16), (3, 5, 9, 11, 16), (8, 9, 10, 14, 16), (3, 4, 6, 8, 16), (1, 10, 11, 13, 14), (2, 9, 10, 13, 16), (1, 2, 5, 8, 14), (2, 4, 5, 10, 16), (1, 2, 7, 9, 11), (1, 3, 5, 6, 9), (5, 7, 11, 13, 14), (3, 5, 10, 13, 14), (2, 4, 8, 9, 10), (4, 11, 12, 14, 15), (2, 3, 7, 14, 16), (3, 4, 8, 13, 16), (6, 7, 9, 11, 14), (5, 6, 11, 13, 15), (4, 5, 6, 14, 16), (3, 4, 8, 14, 15), (4, 5, 8, 9, 15), (1, 4, 8, 11, 13), (5, 6, 12, 14, 16), (2, 3, 10, 12, 14), (1, 2, 5, 10, 16), (2, 5, 7, 10, 11), (2, 6, 7, 11, 13), (1, 4, 5, 10, 16), (2, 6, 8, 15, 16), (2, 3, 10, 12, 15), (7, 11, 12, 13, 15), (1, 3, 8, 11, 13), (4, 8, 9, 10, 11), (1, 9, 14, 15, 16), (1, 3, 6, 9, 15), (6, 9, 12, 13, 14), (2, 3, 10, 13, 14), (2, 5, 7, 11, 13), (2, 3, 5, 6, 13), (4, 6, 8, 13, 16), (6, 7, 9, 10, 13), (5, 8, 12, 14, 16), (4, 6, 9, 13, 16), (5, 8, 9, 11, 16), (2, 3, 5, 6, 9), (1, 3, 5, 11, 12), (3, 7, 8, 9, 12), (4, 6, 11, 12, 15), (3, 5, 9, 12, 16), (5, 11, 12, 13, 15), (1, 3, 4, 6, 14), (3, 5, 11, 12, 16), (1, 5, 8, 12, 14), (4, 8, 13, 14, 15), (1, 3, 7, 8, 11), (6, 9, 10, 13, 16), (2, 4, 9, 13, 16), (1, 6, 7, 8, 13), (1, 4, 12, 13, 15), (2, 4, 7, 10, 11), (1, 4, 9, 11, 13), (6, 7, 11, 14, 16), (1, 4, 9, 11, 16), (1, 4, 12, 15, 16), (1, 2, 4, 7, 15), (2, 3, 7, 8, 16), (1, 4, 5, 6, 10)], name='Minimal triangulation of the K3 surface') def BarnetteSphere(): r""" Returns Barnette's triangulation of the 3-sphere. This is a pure simplicial complex of dimension 3 with 8 vertices and 19 facets, which is a non-polytopal triangulation of the 3-sphere. It was constructed by Barnette in [Bar1970]_. The construction here uses the labeling from De Loera, Rambau and Santos [DLRS2010]_. Another reference is chapter III.4 of Ewald [Ewa1996]_. EXAMPLES:: sage: BS = simplicial_complexes.BarnetteSphere() ; BS Barnette's triangulation of the 3-sphere sage: BS.f_vector() [1, 8, 27, 38, 19] TESTS: Checks that this is indeed the same Barnette Sphere as the one given on page 87 of [Ewa1996]_.:: sage: BS2 = SimplicialComplex([[1,2,3,4],[3,4,5,6],[1,2,5,6], ....: [1,2,4,7],[1,3,4,7],[3,4,6,7], ....: [3,5,6,7],[1,2,5,7],[2,5,6,7], ....: [2,4,6,7],[1,2,3,8],[2,3,4,8], ....: [3,4,5,8],[4,5,6,8],[1,2,6,8], ....: [1,5,6,8],[1,3,5,8],[2,4,6,8], ....: [1,3,5,7]]) sage: BS.is_isomorphic(BS2) True """ return UniqueSimplicialComplex([ (1,2,4,5),(2,3,5,6),(1,3,4,6),(1,2,3,7),(4,5,6,7),(1,2,4,7), (2,4,5,7),(2,3,5,7),(3,5,6,7),(3,1,6,7),(1,6,4,7),(1,2,3,8), (4,5,6,8),(1,2,5,8),(1,4,5,8),(2,3,6,8),(2,5,6,8),(3,1,4,8), (3,6,4,8)], name="Barnette's triangulation of the 3-sphere") def BrucknerGrunbaumSphere(): r""" Returns Bruckner and Grunbaum's triangulation of the 3-sphere. This is a pure simplicial complex of dimension 3 with 8 vertices and 20 facets, which is a non-polytopal triangulation of the 3-sphere. It appeared first in [Br1910]_ and was studied in [GrS1967]_. It is defined here as the link of any vertex in the unique minimal triangulation of the complex projective plane, see chapter 4 of [Kuh1995]_. EXAMPLES:: sage: BGS = simplicial_complexes.BrucknerGrunbaumSphere() ; BGS Bruckner and Grunbaum's triangulation of the 3-sphere sage: BGS.f_vector() [1, 8, 28, 40, 20] """ # X = ComplexProjectivePlane().link([9]) # return UniqueSimplicialComplex(X.facets(), # name="Bruckner and Grunbaum's triangulation of the 3-sphere") return UniqueSimplicialComplex(ComplexProjectivePlane().link([9]), name="Bruckner and Grunbaum's triangulation of the 3-sphere") ############################################################### # examples from graph theory: def NotIConnectedGraphs(n, i): """ The simplicial complex of all graphs on `n` vertices which are not `i`-connected. Fix an integer `n>0` and consider the set of graphs on `n` vertices. View each graph as its set of edges, so it is a subset of a set of size `n` choose 2. A graph is `i`-connected if, for any `j<i`, if any `j` vertices are removed along with the edges emanating from them, then the graph remains connected. Now fix `i`: it is clear that if `G` is not `i`-connected, then the same is true for any graph obtained from `G` by deleting edges. Thus the set of all graphs which are not `i`-connected, viewed as a set of subsets of the `n` choose 2 possible edges, is closed under taking subsets, and thus forms a simplicial complex. This function produces that simplicial complex. INPUT: - ``n``, ``i`` -- non-negative integers with `i` at most `n` See Dumas et al. [DHSW2003]_ for information on computing its homology by computer, and see Babson et al. [BBLSW1999]_ for theory. For example, Babson et al. show that when `i=2`, the reduced homology of this complex is nonzero only in dimension `2n-5`, where it is free abelian of rank `(n-2)!`. EXAMPLES:: sage: simplicial_complexes.NotIConnectedGraphs(5,2).f_vector() [1, 10, 45, 120, 210, 240, 140, 20] sage: simplicial_complexes.NotIConnectedGraphs(5,2).homology(5).ngens() 6 """ G_list = range(1,n+1) G_vertices = Set(G_list) E_list = [] for w in G_list: for v in range(1,w): E_list.append((v,w)) E = Set(E_list) facets = [] i_minus_one_sets = list(G_vertices.subsets(size=i-1)) for A in i_minus_one_sets: G_minus_A = G_vertices.difference(A) for B in G_minus_A.subsets(): if len(B) > 0 and len(B) < len(G_minus_A): C = G_minus_A.difference(B) facet = E for v in B: for w in C: bad_edge = (min(v,w), max(v,w)) facet = facet.difference(Set([bad_edge])) facets.append(facet) return UniqueSimplicialComplex(facets, name='Simplicial complex of not {}-connected graphs on {} vertices'.format(i, n)) def MatchingComplex(n): """ The matching complex of graphs on `n` vertices. Fix an integer `n>0` and consider a set `V` of `n` vertices. A 'partial matching' on `V` is a graph formed by edges so that each vertex is in at most one edge. If `G` is a partial matching, then so is any graph obtained by deleting edges from `G`. Thus the set of all partial matchings on `n` vertices, viewed as a set of subsets of the `n` choose 2 possible edges, is closed under taking subsets, and thus forms a simplicial complex called the 'matching complex'. This function produces that simplicial complex. INPUT: - ``n`` -- positive integer. See Dumas et al. [DHSW2003]_ for information on computing its homology by computer, and see Wachs [Wac2003]_ for an expository article about the theory. For example, the homology of these complexes seems to have only mod 3 torsion, and this has been proved for the bottom non-vanishing homology group for the matching complex `M_n`. EXAMPLES:: sage: M = simplicial_complexes.MatchingComplex(7) sage: H = M.homology() sage: H {0: 0, 1: C3, 2: Z^20} sage: H[2].ngens() 20 sage: simplicial_complexes.MatchingComplex(8).homology(2) # long time (6s on sage.math, 2012) Z^132 """ G_vertices = Set(range(1,n+1)) facets = [] if is_even(n): half = int(n/2) half_n_sets = list(G_vertices.subsets(size=half)) else: half = int((n-1)/2) half_n_sets = list(G_vertices.subsets(size=half)) for X in half_n_sets: Xcomp = G_vertices.difference(X) if is_even(n): if 1 in X: A = X B = Xcomp else: A = Xcomp B = X for M in matching(A, B): facet = [] for pair in M: facet.append(tuple(sorted(pair))) facets.append(facet) else: for w in Xcomp: if 1 in X or (w == 1 and 2 in X): A = X B = Xcomp.difference([w]) else: B = X A = Xcomp.difference([w]) for M in matching(A, B): facet = [] for pair in M: facet.append(tuple(sorted(pair))) facets.append(facet) return UniqueSimplicialComplex(facets, name='Matching complex on {} vertices'.format(n)) def ChessboardComplex(n, i): r""" The chessboard complex for an `n \times i` chessboard. Fix integers `n, i > 0` and consider sets `V` of `n` vertices and `W` of `i` vertices. A 'partial matching' between `V` and `W` is a graph formed by edges `(v,w)` with `v \in V` and `w \in W` so that each vertex is in at most one edge. If `G` is a partial matching, then so is any graph obtained by deleting edges from `G`. Thus the set of all partial matchings on `V` and `W`, viewed as a set of subsets of the `n+i` choose 2 possible edges, is closed under taking subsets, and thus forms a simplicial complex called the 'chessboard complex'. This function produces that simplicial complex. (It is called the chessboard complex because such graphs also correspond to ways of placing rooks on an `n` by `i` chessboard so that none of them are attacking each other.) INPUT: - ``n, i`` -- positive integers. See Dumas et al. [DHSW2003]_ for information on computing its homology by computer, and see Wachs [Wac2003]_ for an expository article about the theory. EXAMPLES:: sage: C = simplicial_complexes.ChessboardComplex(5,5) sage: C.f_vector() [1, 25, 200, 600, 600, 120] sage: simplicial_complexes.ChessboardComplex(3,3).homology() {0: 0, 1: Z x Z x Z x Z, 2: 0} """ A = range(n) B = range(i) E_dict = {} index = 0 for v in A: for w in B: E_dict[(v,w)] = index index += 1 facets = [] for M in matching(A, B): facet = [] for pair in M: facet.append(E_dict[pair]) facets.append(facet) return UniqueSimplicialComplex(facets, name='Chessboard complex for an {}x{} chessboard'.format(n, i)) def RandomComplex(n, d, p=0.5): """ A random ``d``-dimensional simplicial complex on ``n`` vertices. INPUT: - ``n`` -- number of vertices - ``d`` -- dimension of the complex - ``p`` -- floating point number between 0 and 1 (optional, default 0.5) A random `d`-dimensional simplicial complex on `n` vertices, as defined for example by Meshulam and Wallach [MW2009]_, is constructed as follows: take `n` vertices and include all of the simplices of dimension strictly less than `d`, and then for each possible simplex of dimension `d`, include it with probability `p`. EXAMPLES:: sage: X = simplicial_complexes.RandomComplex(6, 2); X Random 2-dimensional simplicial complex on 6 vertices sage: len(list(X.vertices())) 6 If `d` is too large (if `d+1 > n`, so that there are no `d`-dimensional simplices), then return the simplicial complex with a single `(n+1)`-dimensional simplex:: sage: simplicial_complexes.RandomComplex(6, 12) The 5-simplex """ if d+1 > n: return Simplex(n-1) else: vertices = range(n) facets = Subsets(vertices, d).list() maybe = Subsets(vertices, d+1) facets.extend([f for f in maybe if random.random() <= p]) return UniqueSimplicialComplex(facets, name='Random {}-dimensional simplicial complex on {} vertices'.format(d,n)) def SumComplex(n, A): r""" The sum complexes of Linial, Meshulam, and Rosenthal [LMR2010]_. If `k+1` is the cardinality of `A`, then this returns a `k`-dimensional simplicial complex `X_A` with vertices `\ZZ/(n)`, and facets given by all `k+1`-tuples `(x_0, x_1, ..., x_k)` such that the sum `\sum x_i` is in `A`. See the paper by Linial, Meshulam, and Rosenthal [LMR2010]_, in which they prove various results about these complexes; for example, if `n` is prime, then `X_A` is rationally acyclic, and if in addition `A` forms an arithmetic progression in `\ZZ/(n)`, then `X_A` is `\ZZ`-acyclic. Throughout their paper, they assume that `n` and `k` are relatively prime, but the construction makes sense in general. In addition to the results from the cited paper, these complexes can have large torsion, given the number of vertices; for example, if `n=10`, and `A=\{0,1,2,3,6\}`, then `H_3(X_A)` is cyclic of order 2728, and there is a 4-dimensional complex on 13 vertices with `H_3` having a cyclic summand of order .. MATH:: 706565607945 = 3 \cdot 5 \cdot 53 \cdot 79 \cdot 131 \cdot 157 \cdot 547. See the examples. INPUT: - ``n`` -- a positive integer - ``A`` -- a subset of `\ZZ/(n)` EXAMPLES:: sage: S = simplicial_complexes.SumComplex(10, [0,1,2,3,6]); S Sum complex on vertices Z/10Z associated to {0, 1, 2, 3, 6} sage: S.homology() {0: 0, 1: 0, 2: 0, 3: C2728, 4: 0} sage: factor(2728) 2^3 * 11 * 31 sage: S = simplicial_complexes.SumComplex(11, [0, 1, 3]); S Sum complex on vertices Z/11Z associated to {0, 1, 3} sage: S.homology(1) C23 sage: S = simplicial_complexes.SumComplex(11, [0,1,2,3,4,7]); S Sum complex on vertices Z/11Z associated to {0, 1, 2, 3, 4, 7} sage: S.homology(algorithm='no_chomp') # long time {0: 0, 1: 0, 2: 0, 3: 0, 4: C645679, 5: 0} sage: factor(645679) 23 * 67 * 419 sage: S = simplicial_complexes.SumComplex(13, [0, 1, 3]); S Sum complex on vertices Z/13Z associated to {0, 1, 3} sage: S.homology(1) C159 sage: factor(159) 3 * 53 sage: S = simplicial_complexes.SumComplex(13, [0,1,2,5]); S Sum complex on vertices Z/13Z associated to {0, 1, 2, 5} sage: S.homology(algorithm='no_chomp') # long time {0: 0, 1: 0, 2: C146989209, 3: 0} sage: factor(1648910295) 3^2 * 5 * 53 * 521 * 1327 sage: S = simplicial_complexes.SumComplex(13, [0,1,2,3,5]); S Sum complex on vertices Z/13Z associated to {0, 1, 2, 3, 5} sage: S.homology(algorithm='no_chomp') # long time {0: 0, 1: 0, 2: 0, 3: C3 x C237 x C706565607945, 4: 0} sage: factor(706565607945) 3 * 5 * 53 * 79 * 131 * 157 * 547 sage: S = simplicial_complexes.SumComplex(17, [0, 1, 4]); S Sum complex on vertices Z/17Z associated to {0, 1, 4} sage: S.homology(1, algorithm='no_chomp') C140183 sage: factor(140183) 103 * 1361 sage: S = simplicial_complexes.SumComplex(19, [0, 1, 4]); S Sum complex on vertices Z/19Z associated to {0, 1, 4} sage: S.homology(1,algorithm='no_chomp') C5670599 sage: factor(5670599) 11 * 191 * 2699 sage: S = simplicial_complexes.SumComplex(31, [0, 1, 4]); S Sum complex on vertices Z/31Z associated to {0, 1, 4} sage: S.homology(1,algorithm='no_chomp') # long time C5 x C5 x C5 x C5 x C26951480558170926865 sage: factor(26951480558170926865) 5 * 311 * 683 * 1117 * 11657 * 1948909 """ from sage.rings.all import Integers Zn = Integers(n) A = frozenset([Zn(x) for x in A]) facets = [] for f in Set(Zn).subsets(len(A)): if sum(f) in A: facets.append(tuple(f)) return UniqueSimplicialComplex(facets, name='Sum complex on vertices Z/{}Z associated to {}'.format(n, Set(A))) def RandomTwoSphere(n): r""" Return a random triangulation of the 2-dimensional sphere with `n` vertices. INPUT: `n` -- an integer OUTPUT: A random triangulation of the sphere chosen uniformly among the *rooted* triangulations on `n` vertices. Because some triangulations have nontrivial automorphism groups, this may not be equal to the uniform distribution among unrooted triangulations. ALGORITHM: The algorithm is taken from [PS2006]_, section 2.1. Starting from a planar tree (represented by its contour as a sequence of vertices), one first performs local closures, until no one is possible. A local closure amounts to replace in the cyclic contour word a sequence ``in1,in2,in3,lf,in3`` by ``in1,in3``. After all local closures are done, one has reached the partial closure, as in [PS2006]_, figure 5 (a). Then one has to perform complete closure by adding two more vertices, in order to reach the situation of [PS2006]_, figure 5 (b). For this, it is necessary to find inside the final contour one of the two subsequences ``lf,in,lf``. At every step of the algorithm, newly created triangles are added in a simplicial complex. This algorithm is implemented in :meth:`~sage.graphs.generators.random.RandomTriangulation`, which creates an embedded graph. The triangles of the simplicial complex are recovered from this embedded graph. EXAMPLES:: sage: G = simplicial_complexes.RandomTwoSphere(6); G Simplicial complex with vertex set (0, 1, 2, 3, 'a', 'b') and 8 facets sage: G.homology() {0: 0, 1: 0, 2: Z} sage: G.is_pure() True sage: fg = G.flip_graph(); fg Graph on 8 vertices sage: fg.is_planar() and fg.is_regular(3) True """ from sage.graphs.generators.random import RandomTriangulation graph = RandomTriangulation(n) graph = graph.relabel(inplace=False) triangles = [(u, v, w) for u, L in iteritems(graph._embedding) for v, w in zip(L, L[1:] + [L[0]]) if u < v and u < w] return SimplicialComplex(triangles, maximality_check=False) def ShiftedComplex(generators): r""" Return the smallest shifted simplicial complex containing ``generators`` as faces. Let `V` be a set of vertices equipped with a total order. The 'componentwise partial ordering' on k-subsets of `V` is defined as follows: if `A = \{a_1 < \cdots < a_k\}` and `B = \{b_1 < \cdots < b_k\}`, then `A \leq_C B` iff `a_i \leq b_i` for all `i`. A simplicial complex `X` on vertex set `[n]` is *shifted* if its faces form an order ideal under the componentwise partial ordering, i.e., if `B \in X` and `A \leq_C B` then `A \in X`. Shifted complexes of dimension 1 are also known as threshold graphs. .. NOTE:: This method assumes that `V` consists of positive integers with the natural ordering. INPUT: - ``generators`` -- a list of generators of the order ideal, which may be lists, tuples or simplices EXAMPLES:: sage: X = simplicial_complexes.ShiftedComplex([ Simplex([1,6]), (2,4), [8] ]) sage: sorted(X.facets()) [(1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (2, 3), (2, 4), (7,), (8,)] sage: X = simplicial_complexes.ShiftedComplex([ [2,3,5] ]) sage: sorted(X.facets()) [(1, 2, 3), (1, 2, 4), (1, 2, 5), (1, 3, 4), (1, 3, 5), (2, 3, 4), (2, 3, 5)] sage: X = simplicial_complexes.ShiftedComplex([ [1,3,5], [2,6] ]) sage: sorted(X.facets()) [(1, 2, 3), (1, 2, 4), (1, 2, 5), (1, 3, 4), (1, 3, 5), (1, 6), (2, 6)] """ from sage.combinat.partition import Partitions Facets = [] for G in generators: G = list(reversed(sorted(G))) L = len(G) for k in range(L * (L+1) // 2, sum(G) + 1): for P in Partitions(k, length=L, max_slope=-1, outer=G): Facets.append( list(reversed(P)) ) return SimplicialComplex(Facets) def RudinBall(): r""" Return the non-shellable ball constructed by Rudin. This complex is a non-shellable triangulation of the 3-ball with 14 vertices and 41 facets, constructed by Rudin in [Rud1958]_. EXAMPLES:: sage: R = simplicial_complexes.RudinBall(); R Rudin ball sage: R.f_vector() [1, 14, 66, 94, 41] sage: R.homology() {0: 0, 1: 0, 2: 0, 3: 0} sage: R.is_cohen_macaulay() True """ return UniqueSimplicialComplex( [[1,9,2,5], [1,10,2,5], [1,10,5,11], [1,10,7,11], [1,13,5,11], [1,13,7,11], [2,10,3,6], [2,11,3,6], [2,11,6,12], [2,11,8,12], [2,14,6,12], [2,14,8,12], [3,11,4,7], [3,12,4,7], [3,12,5,9], [3,12,7,9], [3,13,5,9], [3,13,7,9], [4,9,1,8], [4,9,6,10], [4,9,8,10], [4,12,1,8], [4,14,6,10], [4,14,8,10], [9,10,2,5], [9,10,2,6], [9,10,5,11], [9,10,11,12], [9,13,5,11], [10,11,3,6], [10,11,3,7], [10,11,6,12], [10,14,6,12], [11,12,4,7], [11,12,4,8], [11,12,7,9], [11,13,7,9], [12,9,1,5], [12,9,1,8], [12,9,8,10], [12,14,8,10]], name="Rudin ball" ) def ZieglerBall(): r""" Return the non-shellable ball constructed by Ziegler. This complex is a non-shellable triangulation of the 3-ball with 10 vertices and 21 facets, constructed by Ziegler in [Zie1998]_ and the smallest such complex known. EXAMPLES:: sage: Z = simplicial_complexes.ZieglerBall(); Z Ziegler ball sage: Z.f_vector() [1, 10, 38, 50, 21] sage: Z.homology() {0: 0, 1: 0, 2: 0, 3: 0} sage: Z.is_cohen_macaulay() True """ return UniqueSimplicialComplex( [[1,2,3,4], [1,2,5,6], [1,5,6,9], [2,5,6,0], [3,6,7,8], [4,5,7,8], [2,3,6,7], [1,6,2,9], [2,6,7,0], [3,2,4,8], [4,1,3,7], [3,4,7,8], [1,2,4,9], [2,7,3,0], [3,2,6,8], [4,1,5,7], [4,1,8,5], [1,4,8,9], [2,3,1,0], [1,8,5,9], [2,1,5,0]], name="Ziegler ball" ) def DunceHat(): r""" Return the minimal triangulation of the dunce hat given by Hachimori [Hac2016]_. This is a standard example of a space that is contractible but not collapsible. EXAMPLES:: sage: D = simplicial_complexes.DunceHat(); D Minimal triangulation of the dunce hat sage: D.f_vector() [1, 8, 24, 17] sage: D.homology() {0: 0, 1: 0, 2: 0} sage: D.is_cohen_macaulay() True """ return UniqueSimplicialComplex( [[1,3,5], [2,3,5], [2,4,5], [1,2,4], [1,3,4], [3,4,8], [1,2,8], [1,7,8], [1,2,7], [2,3,7], [3,6,7], [1,3,6], [1,5,6], [4,5,6], [4,6,8], [6,7,8], [2,3,8]], name="Minimal triangulation of the dunce hat" )
''' Created on Jan 26, 2014 @author: Chris ''' import unittest from gooey.gui.option_reader import OptionReader class FakeClassWithoutImplementation(OptionReader): def __init__(self): pass class FakeClassWithImplementation(OptionReader): def __init__(self): pass def GetOptions(self): pass class Test(unittest.TestCase): def test_mixin_classes_throws_typeerror_without_implementation(self): with self.assertRaises(TypeError): fake_class = FakeClassWithoutImplementation() def test_mixin_classes_passes_with_implementation(self): fc = FakeClassWithImplementation() if __name__ == "__main__": #import sys;sys.argv = ['', 'Test.testName'] unittest.main()
# This code is part of Qiskit. # # (C) Copyright IBM 2017, 2018. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """Module containing transpiler synthesize.""" from .graysynth import graysynth, cnot_synth
import contextlib import gc import sys import io import math import tempfile import time import threading import unittest import warnings from copy import deepcopy from collections import OrderedDict from itertools import product from operator import mul from functools import reduce import torch import json # TODO: remove this global setting # Autograd tests use double as the default dtype torch.set_default_dtype(torch.double) from torch import nn from torch._six import inf, nan, istuple from torch.autograd.gradcheck import gradgradcheck, gradcheck from torch.autograd.function import once_differentiable from torch.autograd.profiler import (profile, format_time, EventList, FunctionEvent, FunctionEventAvg, record_function, emit_nvtx) import torch.autograd.functional as autogradF from torch.utils.checkpoint import checkpoint from torch.testing._internal.common_utils import (TEST_MKL, TEST_WITH_ROCM, TestCase, run_tests, skipIfNoLapack, suppress_warnings, slowTest, load_tests, random_symmetric_pd_matrix, random_symmetric_matrix, IS_WINDOWS, IS_MACOS, CudaMemoryLeakCheck, skipIfRocm) from torch.autograd import Variable, Function, detect_anomaly from torch.autograd.function import InplaceFunction from torch.testing import randn_like from torch.testing._internal.common_methods_invocations import (method_tests, create_input, unpack_variables, EXCLUDE_FUNCTIONAL, EXCLUDE_GRADCHECK, EXCLUDE_GRADGRADCHECK, EXCLUDE_GRADGRADCHECK_BY_TEST_NAME, exclude_tensor_method, mask_not_all_zeros, S) from torch.testing._internal.common_device_type import (instantiate_device_type_tests, skipCUDAIfRocm, onlyCPU, onlyCUDA, dtypes, dtypesIfCUDA, deviceCountAtLeast, skipCUDAIfCudnnVersionLessThan, skipCUDAIf) # load_tests from common_utils is used to automatically filter tests for # sharding on sandcastle. This line silences flake warnings load_tests = load_tests import pickle PRECISION = 1e-4 @contextlib.contextmanager def backward_engine(engine): _prev_engine = Variable._execution_engine Variable._execution_engine = engine() try: yield finally: Variable._execution_engine = _prev_engine def graph_desc(fn): if fn is None: return 'None' result = type(fn).__name__ + '(' next_functions = fn.next_functions for next_fn, _ in next_functions: result += graph_desc(next_fn) result += ', ' if next_functions: result = result[:-2] return result + ')' class TestAutograd(TestCase): def _function_test(self, cls): x = torch.randn(5, 5, requires_grad=True) y = torch.randn(5, 5, requires_grad=True) result = cls.apply(x, 2, y) go = torch.ones((), requires_grad=True) result.sum().backward(go, create_graph=True) self.assertEqual(x.grad, y + torch.ones(5, 5)) self.assertEqual(y.grad, x + torch.ones(5, 5) * 2) self.assertIsNotNone(x.grad.grad_fn) self.assertIsNotNone(y.grad.grad_fn) return x, y def test_function(self): class MyFunction(Function): @staticmethod def forward(ctx, tensor1, pyscalar, tensor2): ctx.pyscalar = pyscalar ctx.save_for_backward(tensor1, tensor2) return tensor1 + pyscalar * tensor2 + tensor1 * tensor2 @staticmethod def backward(ctx, grad_output): var1, var2 = ctx.saved_tensors # NOTE: self is the test case here self.assertIsInstance(var1, torch.Tensor) self.assertIsInstance(var2, torch.Tensor) self.assertIsInstance(grad_output, torch.Tensor) return (grad_output + grad_output * var2, None, grad_output * ctx.pyscalar + grad_output * var1) x, y = self._function_test(MyFunction) x_grad_desc = graph_desc(x.grad.grad_fn) y_grad_desc = graph_desc(y.grad.grad_fn) self.assertExpected(x_grad_desc, "x_grad_desc") self.assertExpected(y_grad_desc, "y_grad_desc") def test_once_differentiable(self): class MyFunction(Function): @staticmethod def forward(ctx, tensor1, pyscalar, tensor2): ctx.pyscalar = pyscalar ctx.save_for_backward(tensor1, tensor2) return tensor1 + pyscalar * tensor2 + tensor1 * tensor2 @staticmethod @once_differentiable def backward(ctx, grad_output): self.assertFalse(torch.is_grad_enabled()) t1, t2 = ctx.saved_tensors return (grad_output + grad_output * t2, None, grad_output * ctx.pyscalar + grad_output * t1) x, y = self._function_test(MyFunction) self.assertEqual(graph_desc(x.grad.grad_fn), 'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))') self.assertEqual(graph_desc(y.grad.grad_fn), 'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))') def test_function_returns_input(self): class MyFunction(Function): @staticmethod def forward(ctx, x): return x @staticmethod def backward(ctx, grad): return grad * 2 for shape in [(1,), ()]: v = torch.ones(shape, requires_grad=True) MyFunction.apply(v).backward() self.assertEqual(v.grad, torch.full(shape, 2.)) with torch.no_grad(): v.grad.zero_() MyFunction.apply(v.clone()).backward() self.assertEqual(v.grad, torch.full(shape, 2.)) def test_function_returns_undefined_tensor(self): class MyFunction(Function): @staticmethod def forward(ctx, x): return x * 2 @staticmethod def backward(ctx, grad): return None # Test that undefined tensors returned from custom backward function # are propagated as undefined and not tensor full of zeroes x = torch.ones(1, requires_grad=True) MyFunction.apply(x).backward() self.assertIsNone(x.grad) MyFunction.apply(x ** 2).backward() self.assertIsNone(x.grad) MyFunction.apply(x).sum().backward() self.assertIsNone(x.grad) self.assertIsNone(torch.autograd.grad(MyFunction.apply(x), x, allow_unused=True)[0]) def test_materialize_grads(self): class MyFunction(Function): @staticmethod def forward(ctx, x): return x @staticmethod def backward(ctx, grad): self.assertEqual(grad, torch.zeros(1)) return grad x = torch.ones(1, requires_grad=True) torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward() def test_dont_materialize_grads(self): class MyFunction(Function): @staticmethod def forward(ctx, x): ctx.set_materialize_grads(False) return x @staticmethod def backward(ctx, grad): self.assertIsNone(grad) return grad x = torch.ones(1, requires_grad=True) torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward() def test_legacy_function_deprecation_exception(self): # Trigger exception class MyFunction(Function): def forward(self, x): return x def backward(self, grad_output): return grad_output # Check exception occurs with self.assertRaisesRegex( RuntimeError, 'Legacy autograd function with non-static forward method is deprecated'): MyFunction()(torch.randn(3, 4)) class SimulateBackwardError(Function): @staticmethod def forward(ctx, input): return input.clone() @staticmethod @once_differentiable def backward(ctx, input): raise Exception("Simulate error on backward pass") def test_custom_function_exception(self): t1 = torch.rand((3, 3), requires_grad=True) t2 = torch.rand((3, 3), requires_grad=True) tmp = (t1 + t2) * (t1 + t2) t3 = TestAutograd.SimulateBackwardError.apply(tmp) with self.assertRaisesRegex(Exception, "Simulate error on backward pass"): t3.sum().backward() def test_invalid_gradients(self): class MyFunction(Function): @staticmethod def forward(ctx, x): return x * 2 @staticmethod def backward(ctx, grad_output): return torch.randn(10, dtype=torch.float) with self.assertRaisesRegex(RuntimeError, 'expected shape'): input = torch.randn(5, 5, dtype=torch.float, requires_grad=True) MyFunction.apply(input).sum().backward() def test_accumulate_grad(self): grad_output = torch.ones(5, 5) def compute_grad(create_graph): x = torch.randn(5, 5, requires_grad=True) y = x + 2 y.backward(grad_output, retain_graph=True) x_grad = x.grad x_grad_clone = x.grad.clone() y.backward(grad_output, create_graph=create_graph) return x_grad, x_grad_clone # Accumulate in-place when create_graph is False x_grad, x_grad_clone = compute_grad(create_graph=False) self.assertEqual(x_grad, x_grad_clone * 2) # Accumulate out-of-place when create_graph is False x_grad, x_grad_clone = compute_grad(create_graph=True) self.assertEqual(x_grad, x_grad_clone) def test_accumulate_grad_tensor_reference(self): def _test_grad_tensor(params_grad_tensor, backward_grad_tensor, should_preserve_reference, create_graph): params = torch.tensor([1.5, 1.5]).requires_grad_() params.grad = params_grad_tensor grad_saved = params.grad params.backward(backward_grad_tensor, create_graph=create_graph) self.assertEqual(id(grad_saved) == id(params.grad), should_preserve_reference) for create_graph in (False, True): # Accumulate dense gradient to sparse gradient will change the `params.grad` reference _test_grad_tensor( torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])), torch.tensor([1.5, 1.5]), False, # never accumulates in-place create_graph) # Accumulate dense gradient to dense gradient will preserve the `params.grad` reference, # but only if create_graph=False. _test_grad_tensor( torch.tensor([1.5, 1.5]), torch.tensor([1.5, 1.5]), not create_graph, create_graph) # Accumulate sparse gradient to sparse gradient will preserve the `params.grad` reference, # but only if create_graph=False. _test_grad_tensor( torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])), torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])), not create_graph, create_graph) @skipIfNoLapack def test_slogdet_sign(self): a = torch.randn(3, 3, requires_grad=True) s, logdet = a.slogdet() # test that sign should not require grad self.assertFalse(s.requires_grad) # test that backward through computation involving sign works def sign_mul_logdet(mat): s, logdet = mat.slogdet() return s * logdet u, s, v = a.detach().svd() s.abs_().clamp_(0.0001) for sign in (-1, 1): s[-1] = sign mat = torch.chain_matmul(u, s.diag(), v.t()).requires_grad_() gradcheck(sign_mul_logdet, mat) gradgradcheck(sign_mul_logdet, mat) def test_sum_to_with_empty_dim_grad(self): a = torch.rand(4, 0, requires_grad=True) b = torch.rand(4, 1, requires_grad=True) c = a + b assert c.shape == (4, 0) c.sum().backward() self.assertEqual(b.grad, torch.zeros(4, 1)) self.assertEqual(a.grad, torch.zeros(4, 0)) def test_hessian_vector(self): x = torch.randn(2, 2, requires_grad=True) y = torch.randn(2, 2, requires_grad=True) z = x ** 2 + y * x + y ** 2 z.backward(torch.ones(2, 2), create_graph=True) with torch.no_grad(): x_grad = 2 * x + y y_grad = x + 2 * y self.assertEqual(x.grad, x_grad) self.assertEqual(y.grad, y_grad) grad_sum = 2 * x.grad + y.grad grad_sum.backward(torch.ones(2, 2)) x_hv = torch.ones(2, 2) * 5 y_hv = torch.ones(2, 2) * 4 self.assertEqual(x.grad, x_grad + x_hv) self.assertEqual(y.grad, y_grad + y_hv) def test_grad(self): x = torch.randn(2, 2, requires_grad=True) y = torch.randn(2, 2, requires_grad=True) z = x ** 2 + y * x + y ** 2 z.backward(torch.ones(2, 2), create_graph=True) x_grad = 2 * x + y y_grad = x + 2 * y self.assertEqual(x.grad, x_grad) self.assertEqual(y.grad, y_grad) grad_sum = 2 * x.grad + y.grad x_hv = torch.autograd.grad( outputs=[grad_sum], grad_outputs=[torch.ones(2, 2)], inputs=[x], create_graph=True) expected_x_hv = torch.ones(2, 2) * 5 expected_y_hv = torch.ones(2, 2) * 4 self.assertEqual(x_hv[0], expected_x_hv) self.assertEqual(x.grad, x_grad) self.assertEqual(y.grad, y_grad) # Test that grad_outputs and outputs have the same shape grad_out = torch.ones(2) try: torch.autograd.grad( outputs=[grad_sum], grad_outputs=[grad_out], inputs=[x], create_graph=True) self.assertFail() except RuntimeError as error: self.assertEqual(str(error), "Mismatch in shape: grad_output[0] has a shape of " + str(grad_out.shape) + " and output[0] has a shape of " + str(grad_sum.shape) + ".") def test_grad_nonleaf(self): x_init = torch.randn(2, 2, requires_grad=True) x = x_init y = torch.randn(2, 2, requires_grad=True) grad_output = torch.ones(2, 2) def fn(x): return x ** 2 + y * x + y ** 2 for _ in range(5): grad_x, = torch.autograd.grad( fn(x), x, grad_outputs=grad_output, create_graph=True) grad_x_expected = 2 * x + y self.assertIsNone(y.grad) self.assertIsNone(x.grad) self.assertEqual(grad_x, grad_x_expected) x = x + 0.05 * grad_x val_init = fn(x_init).sum() val_final = fn(x).sum() self.assertGreater(val_final, val_init) x.backward(grad_output) self.assertIsNotNone(y.grad) self.assertIsNotNone(x_init.grad) def test_grad_nonleaf_many_outputs(self): # This checks an edge case for function callbacks # We want to capture two grads of a function, but can only # register a single callback. x = torch.randn(4, 2, requires_grad=True) a, b = x.chunk(2) def hook(*grads): hook_called[0] = True hook_called = [False] x.register_hook(hook) go = torch.randn(2, 2) grad_a, grad_b = torch.autograd.grad( (a + 2 * b), [a, b], grad_outputs=go, create_graph=True) self.assertEqual(grad_a, go) self.assertEqual(grad_b, go * 2) self.assertFalse(hook_called[0]) self.assertIsNone(x.grad) def test_grad_nonleaf_register_hook(self): # This checks an edge case for register_hook. # We want to capture grad of a nonleaf tensor, # but avoid segfault during backward of other nonleaf tensors x = torch.randn(5, requires_grad=True) x_list = x.unbind() x0 = x_list[0] hook_results = [None] def hook(grad): hook_results[0] = grad x0.register_hook(hook) x_list[0].backward() self.assertEqual(hook_results[0], torch.tensor(1.)) expected_grad = torch.tensor([1., 0, 0, 0, 0]) self.assertEqual(x.grad, expected_grad) self.assertIsNone(x_list[0].grad) for i in range(1, 5, 1): x_list[i].backward() self.assertEqual(hook_results[0], None) expected_grad[i] = 1.0 self.assertEqual(x.grad, expected_grad) self.assertIsNone(x_list[i].grad) def test_hook_with_no_name(self): # Create a hook that do not have a __name__ attribute class MyHookClass: def __call__(self, grad): return grad.clone() x = torch.randn(5, requires_grad=True).clone() x.register_hook(MyHookClass()) x.sum().backward() # Should run fine def test_sharded_grad(self): leaves = [torch.zeros(5, 5, requires_grad=True) for _ in range(10)] intermediates = [l * i + l * l for i, l in enumerate(leaves)] loss = sum(v * i for i, v in enumerate(intermediates)).sum() # define a helper for dividing intermediates into groups def group(l, group_size): return (l[i:i + group_size] for i in range(0, len(l), group_size)) # Compute the d loss / d intermediates in chunks of shard_size shard_size = 2 d_intermediates = [d_i for intermediates_batch in group(intermediates, shard_size) for d_i in torch.autograd.grad(loss, intermediates_batch)] # Compute rest of backward pass torch.autograd.backward(intermediates, d_intermediates) for i, l in enumerate(leaves): self.assertEqual(l.grad, i * i * (1 + l)) def test_backward_badcalls(self): x = torch.ones(1) with self.assertRaisesRegex(RuntimeError, 'does not require grad'): x.backward() def test_grad_badcalls(self): x = torch.ones(1) y = x ** 2 with self.assertRaisesRegex(RuntimeError, 'does not require grad'): torch.autograd.grad(x, y) with self.assertRaisesRegex(RuntimeError, 'does not require grad'): torch.autograd.grad(y, x) x = torch.ones(1, requires_grad=True) y = x ** 2 torch.autograd.grad(y, x) # this should succeed now def test_grad_fn_badcalls(self): error_regex = 'expected .* arguments, got .* instead' x = torch.ones(1, requires_grad=True) y = x ** 2 with self.assertRaisesRegex(TypeError, error_regex): y.grad_fn(x.detach(), x.detach()) # too many with self.assertRaisesRegex(TypeError, error_regex): y.grad_fn() # too few y.grad_fn(x.detach()) # this should succeed def test_grad_unreachable(self): x = torch.ones(1, requires_grad=True) y = torch.ones(1, requires_grad=True) # Make sure x and y have grad accumulators allocated z = x * 2 w = y * 2 grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=True) self.assertEqual(grad_x, x * 2) self.assertIsNone(grad_y) # This is slightly different than the case above, because z doesn't even # have a grad accumulator allocated. z = torch.ones(1, requires_grad=True) grad_x, grad_z = torch.autograd.grad(x * 2, [x, z], allow_unused=True) self.assertEqual(grad_x, x * 2) self.assertIsNone(grad_z) # allow_unused=False, but grads contains None inside, should throw with self.assertRaisesRegex(RuntimeError, "Set allow_unused=True"): grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=False) def test_hooks(self): x = torch.ones(5, 5, requires_grad=True) y = torch.ones(5, 5) * 4 y.requires_grad_(True) counter = [0] def bw_hook(inc, grad): self.assertIsInstance(grad, torch.Tensor) counter[0] += inc z = x ** 2 + x * 2 + x * y + y x.register_hook(lambda *args: bw_hook(0, *args)) test = z.register_hook(lambda *args: bw_hook(1, *args)) z.backward(torch.ones(5, 5), retain_graph=True) self.assertEqual(counter[0], 1) test2 = z.register_hook(lambda *args: bw_hook(2, *args)) z.backward(torch.ones(5, 5), retain_graph=True) self.assertEqual(counter[0], 4) test2.remove() z.backward(torch.ones(5, 5), retain_graph=True) self.assertEqual(counter[0], 5) def bw_hook_modify(grad): return grad.mul(2) test.remove() z.register_hook(bw_hook_modify) with torch.no_grad(): y.grad.zero_() z.backward(torch.ones(5, 5), retain_graph=True) self.assertEqual(y.grad, (x + 1) * 2) y.register_hook(bw_hook_modify) with torch.no_grad(): y.grad.zero_() z.backward(torch.ones(5, 5)) self.assertEqual(y.grad, (x + 1) * 4) def test_hooks_cpp(self): # Tests hooks for autograd function implemented in C++ bn = torch.nn.BatchNorm1d(5, affine=False) bn.eval() counter = [0] def bw_hook(grad): counter[0] += 1 return grad * 2 x = torch.ones(5, 5, requires_grad=True) z = bn(x) z.register_hook(bw_hook) z.sum().backward() self.assertEqual(counter[0], 1, msg='bw_hook not called') self.assertEqual(x.grad, torch.ones(5, 5) * 2, atol=1e-5, rtol=0) def test_hook_none(self): # WARNING: this is a test for autograd internals. # You should never have to use such things in your code. class NoneGradientFunction(Function): @staticmethod def forward(ctx, x, y): assert ctx.needs_input_grad[0] assert not ctx.needs_input_grad[1] return x, y @staticmethod def backward(ctx, grad_x, grad_y): return grad_x, None was_called = [False] def hook(grad): self.assertIsNotNone(grad) was_called[0] = True x = torch.randn(5, 5, requires_grad=True) y = torch.randn(5, 5) rx, ry = NoneGradientFunction.apply(x, y) rx.register_hook(hook) ry.register_hook(hook) sum(rx, ry).sum().backward() self.assertTrue(was_called[0]) def test_retain_grad(self): input = torch.rand(1, 3, requires_grad=True) h1 = input * 3 out = (h1 * h1).sum() # It should be possible to call retain_grad() multiple times h1.retain_grad() h1.retain_grad() # Gradient should be accumulated out.backward(retain_graph=True) self.assertEqual(h1 * 2, h1.grad) out.backward(retain_graph=True) self.assertEqual(h1 * 4, h1.grad) with torch.no_grad(): input.grad.zero_() # It should be a no-op for leaves input.retain_grad() input.retain_grad() out.backward() self.assertEqual(input * 18, input.grad) def test_retain_grad_cycle(self): import gc import weakref counter = [0] refs = [None] x = torch.ones(5, 5, requires_grad=True) def run_test(): y = x * 2 y.retain_grad() def inc(*args): counter[0] += 1 refs[0] = weakref.ref(y, inc) return y / 2 z = run_test() gc.collect() self.assertIsNone(refs[0]()) self.assertEqual(counter[0], 1) z.sum().backward() def test_backward(self): v = torch.randn(5, 5, requires_grad=True) x = torch.randn(5, 5, requires_grad=True) y = (torch.rand(5, 5) + 0.1).requires_grad_(True) z = torch.randn(5, 5, requires_grad=True) grad_output = torch.randn(5, 5) v.backward(grad_output) self.assertEqual(v.grad, grad_output) a = x + (y * z) + 4 * z ** 2 * x / y a.backward(grad_output) x_grad = 4 * z.pow(2) / y + 1 y_grad = z - 4 * x * z.pow(2) / y.pow(2) z_grad = 8 * x * z / y + y self.assertEqual(x.grad, x_grad * grad_output) self.assertEqual(y.grad, y_grad * grad_output) self.assertEqual(z.grad, z_grad * grad_output) def test_sparse_backward(self): class FixedGradientFunction(Function): @staticmethod def forward(ctx, x, grad_x): ctx.save_for_backward(grad_x) return x @staticmethod def backward(ctx, grad_x): saved_grad_x, = ctx.saved_tensors return saved_grad_x, None size = torch.Size([6, 3, 2]) i1 = torch.LongTensor([ [0, 3, 4], [0, 2, 2], ]) v1 = torch.DoubleTensor([[1, 2], [4, 5], [7, 8]]) sparse_grad1 = torch.sparse.DoubleTensor(i1, v1, size) i2 = torch.LongTensor([ [0, 1, 3, 4], [0, 1, 2, 2], ]) v2 = torch.DoubleTensor([[1, 2], [4, 3], [4, 5], [7, 8]]) sparse_grad2 = torch.sparse.DoubleTensor(i2, v2, size) dense_grad = torch.rand(size).double() fn = FixedGradientFunction # sparse first x = torch.randn(size, requires_grad=True) (fn.apply(x, sparse_grad1) + fn.apply(x, dense_grad) + fn.apply(x, sparse_grad2)).sum().backward() self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2) # dense first x = torch.randn(size, requires_grad=True) (fn.apply(x, dense_grad) + fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward() self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2) # sparse only x = torch.randn(size, requires_grad=True) (fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward() self.assertEqual(x.grad, sparse_grad1 + sparse_grad2) def test_sparse_mm_backward(self): size = (3, 3) sparse = torch.sparse_coo_tensor(size, requires_grad=True) dense = torch.randn(size, requires_grad=True) z = sparse.mm(dense) with self.assertRaisesRegex(RuntimeError, "calculating the gradient of a sparse Tensor argument to mm is not supported."): z.sum().backward() z = dense.addmm(sparse, dense) with self.assertRaisesRegex(RuntimeError, "calculating the gradient of a sparse Tensor argument to mm is not supported."): z.sum().backward() def test_multi_backward(self): x = torch.randn(5, 5, requires_grad=True) y = torch.randn(5, 5, requires_grad=True) q = torch.randn(5, 5, requires_grad=True) a = torch.randn(5, 5, requires_grad=True) b = torch.randn(5, 5, requires_grad=True) q2 = q * 2 z = x + y + q2 c = a * b + q2 grad_z = torch.randn(5, 5) grad_c = torch.randn(5, 5) torch.autograd.backward([z, c], [grad_z, grad_c]) self.assertEqual(x.grad, grad_z) self.assertEqual(y.grad, grad_z) self.assertEqual(a.grad, grad_c * b) self.assertEqual(b.grad, grad_c * a) self.assertEqual(q.grad, (grad_c + grad_z) * 2) def test_multi_backward_no_grad(self): x = torch.randn(5, 5, requires_grad=True) y = torch.randn(5, 5, requires_grad=False) z = x + y q = y * 2 # NB: we currently raise an exception if any arguments to backwards # have requires_grad=False and don't have a grad_fn. We may want to # relax that check to a warning. def call_backwards(): torch.autograd.backward([z, q], [torch.ones(5, 5), torch.ones(5, 5)]) self.assertRaises(RuntimeError, call_backwards) def test_dependent_backward(self): x = torch.randn(10, requires_grad=True) y = x ** 2 z = y ** 3 go_y = torch.randn(10) go_z = torch.randn(10) torch.autograd.backward([y, z], [go_y, go_z]) xd = x self.assertEqual(x.grad, 2 * xd * go_y + 6 * xd.pow(5) * go_z) def test_save_output_nr(self): x = torch.randn(10, requires_grad=True) class MultiOutputFn(Function): @staticmethod def forward(ctx, x): return x[:5], x[5:] @staticmethod def backward(ctx, *grad): return torch.cat(grad) a, b = MultiOutputFn.apply(x) self.assertEqual(b.output_nr, 1) class TestFn(Function): @staticmethod def forward(ctx, b): ctx.save_for_backward(b) return b * 2 @staticmethod def backward(ctx, grad_b): b, = ctx.saved_tensors self.assertEqual(b.output_nr, 1) TestFn.apply(b).sum().backward() def test_free_deep_graph(self): def scope(): depth = 150000 x = torch.randn(1, requires_grad=True) y = x.clone() # build a "chain" computation graph for _ in range(depth): y = y + y * 0.000001 # graph deletion occurs when the above locals go out of scope. # In this case `del y` will trigger it but it's easier to leave # it to Python to delete the locals. # Should not stack overflow scope() def test_free_deep_graph_complicated(self): def scope(): depth = 100000 randchoice = torch.randint(2, [depth, 2]) x = torch.randn(1, requires_grad=True) y = x.clone() # Hold the two previous values prev_values = [None, None] # Build a "chain with skip connections" graph for _ in range(depth): prev_tensors = [tensor for tensor in prev_values[:-1] if tensor is not None] prev_values.append(y) prev_values.pop(0) # Definitely pick one tensor to add y += y * 0.000001 # Possibly add other tensors nprev = len(prev_tensors) if nprev == 2: y += randchoice[depth].mul(torch.cat(prev_tensors)).sum() # graph deletion occurs when the above locals go out of scope. # Should not stack overflow scope() def test_free_deep_graph_pyfunction(self): class MyOp(Function): @staticmethod def forward(ctx, tensor1, tensor2): return tensor1 + tensor2 @staticmethod def backward(ctx, grad_output): return grad_output, grad_output def scope(): depth = 150000 x = torch.randn(1, requires_grad=True) y = x.clone() # build deeply nested computation graph for _ in range(depth): y = MyOp.apply(y, y) # graph deletion occurs when the above locals go out of scope. # Should not stack overflow scope() def test_no_unnecessary_save(self): # If we kept x in the derivative Function of x * 2 we would # get an error in the backward that would complain that we've # modified x, which was needed for gradient computation. # Since we should elide unnecessary saves, this test should pass. mu = torch.ones(1, requires_grad=True) x = torch.empty(1) loss = 0 for i in range(3): x.detach_() x.copy_(mu + i) ft = torch.tensor([float(i)]) multiplied = x * ft s = multiplied.sum() loss += s loss.backward() def test_no_grad(self): x = torch.ones(5, 5, requires_grad=True) y = torch.ones(5, 5) * 4 with torch.no_grad(): w = x + y @torch.no_grad() def adder(x, y): return x + y z = adder(x, y) self.assertFalse(w.requires_grad) self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5))) self.assertIsNone(w.grad_fn) self.assertFalse(z.requires_grad) self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5))) self.assertIsNone(z.grad_fn) # test nested decorator and with-statement on no_grad with torch.no_grad(): self.assertFalse(torch.is_grad_enabled()) w = adder(x, y) self.assertFalse(torch.is_grad_enabled()) def test_set_grad_generator_functions(self): @torch.no_grad() def gen_no_grad(): for i in range(10): self.assertEqual(torch.is_grad_enabled(), False) yield i with torch.enable_grad(): for _ in gen_no_grad(): self.assertEqual(torch.is_grad_enabled(), True) @torch.enable_grad() def gen_enable_grad(): for i in range(10): self.assertEqual(torch.is_grad_enabled(), True) yield i with torch.no_grad(): for _ in gen_enable_grad(): self.assertEqual(torch.is_grad_enabled(), False) def test_no_grad_python_function(self): """Python Functions should respect grad mode.""" x = torch.ones(5, 5, requires_grad=True) class MyOp(Function): @staticmethod def forward(self, x): return x + 1 @staticmethod def backward(self, dy): return dy with torch.no_grad(): y = MyOp.apply(x) self.assertFalse(y.requires_grad) def test_indexing(self): x = torch.arange(1., 17).view(4, 4) y = Variable(x, requires_grad=True) def compare(x, y, idx, indexed_tensor, indexed_var): indexed_var_t = indexed_var.data if not isinstance(indexed_tensor, torch.Tensor): indexed_var_t = indexed_var_t[0] self.assertEqual(indexed_tensor, indexed_var_t) indexed_var.sum().backward() expected_grad = torch.Tensor(x.size()).fill_(0) expected_grad[idx] = 1 self.assertEqual(y.grad, expected_grad) def check_index(x, y, idx): if y.grad is not None: with torch.no_grad(): y.grad.zero_() indexed_tensor = x[idx] indexed_var = y[idx] compare(x, y, idx, indexed_tensor, indexed_var) check_index(x, y, 1) check_index(x, y, (1, 1)) check_index(x, y, slice(1, None)) check_index(x, y, slice(None, 2)) check_index(x, y, (slice(None, 2), 2)) check_index(x, y, (slice(1, 2), 2)) check_index(x, y, (1, slice(2, None))) check_index(x, y, (slice(None, None), slice(2, None))) check_index(x, y, torch.LongTensor([0, 2])) check_index(x, y, torch.rand(4, 4).bernoulli().bool()) check_index(x, y, (Ellipsis, slice(2, None))) check_index(x, y, ([0], [0])) check_index(x, y, ([1, 2, 3], [0])) check_index(x, y, ([1, 2], [2, 1])) check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 3]])) check_index(x, y, ([slice(None), [2, 3]])) check_index(x, y, ([[2, 3], slice(None)])) # advanced indexing, with less dim, or ellipsis check_index(x, y, ([0])) check_index(x, y, ([0], )) x = torch.arange(1., 49).view(4, 3, 4) y = Variable(x, requires_grad=True) check_index(x, y, (slice(None), [0], [0])) check_index(x, y, ([0], [0], slice(None))) check_index(x, y, (slice(None), [0, 1, 2], [0])) check_index(x, y, ([0, 1, 2], [0], slice(None))) check_index(x, y, (slice(None), [1, 2], [2, 1])) check_index(x, y, ([1, 2], [2, 1], slice(None))) check_index(x, y, (slice(None), [[1, 2], [2, 0]], [[0, 1], [2, 3]])) check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 2]], slice(None))) check_index(x, y, (slice(None), slice(None), [2, 1])) check_index(x, y, (slice(None), [2, 1], slice(None))) check_index(x, y, ([2, 1], slice(None), slice(None))) # advanced indexing, with less dim, or ellipsis check_index(x, y, ([0], )) check_index(x, y, ([0], slice(None))) check_index(x, y, ([0], Ellipsis)) check_index(x, y, ([1, 2], [0, 1])) check_index(x, y, ([1, 2], [0, 1], Ellipsis)) check_index(x, y, (Ellipsis, [1, 2], [0, 1])) # advanced indexing, with a tensor wrapped in a variable z = torch.LongTensor([0, 1]) zv = Variable(z, requires_grad=False) seq = [z, Ellipsis] seqv = [zv, Ellipsis] if y.grad is not None: with torch.no_grad(): y.grad.zero_() indexed_tensor = x[seq] indexed_var = y[seqv] compare(x, y, seq, indexed_tensor, indexed_var) def test_indexing_duplicates(self): x = torch.arange(1., 17).view(4, 4) y = Variable(x, requires_grad=True) idx = torch.LongTensor([1, 1, 3, 2, 1, 2]) y[idx].sum().backward() expected_grad = torch.zeros(4, 4) for i in idx: expected_grad[i] += 1 self.assertEqual(y.grad, expected_grad) # with advanced indexing x = torch.arange(1., 17).view(4, 4) y = Variable(x, requires_grad=True) idx = [[1, 1, 3, 2, 1, 2], [0]] y[idx].sum().backward() expected_grad = torch.zeros(4, 4) for i in idx[0]: for j in idx[1]: expected_grad[i][j] += 1 self.assertEqual(y.grad, expected_grad) x = torch.arange(1., 17).view(4, 4) y = Variable(x, requires_grad=True) idx = [[[1, 2], [0, 0]], [[0, 1], [1, 1]]] y[idx].sum().backward() expected_grad = torch.Tensor([[0, 2, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0]]) self.assertEqual(y.grad, expected_grad) x = torch.arange(1., 65).view(4, 4, 4) y = Variable(x, requires_grad=True) idx = [[1, 1, 1], slice(None), slice(None)] y[idx].sum().backward() expected_grad = torch.Tensor(4, 4, 4).zero_() expected_grad[1].fill_(3) self.assertEqual(y.grad, expected_grad) def test_index_backward_does_not_save_tensor(self): # Example from https://github.com/pytorch/pytorch/issues/24853. # if `index(tensor, indices)` saves `tensor` for backwards, then it will # trigger a version check on `tensor` during the backward pass, which # will cause the following code to error because `tensor` gets modified # by the indexing line. a = torch.tensor([1., 0, 0]) b = torch.zeros(3, requires_grad=True) tensor = b + 0 tensor[a != 0] = tensor[a != 0] tensor.backward(torch.zeros_like(tensor)) def test_volatile_deprecated(self): v = torch.autograd.torch.randn(3, 3) with warnings.catch_warnings(record=True) as w: self.assertFalse(v.volatile) self.assertIn('volatile', str(w[0].message)) def test_saved_variables_deprecated(self): class MyFunction(Function): @staticmethod def forward(ctx, tensor1, tensor2): ctx.save_for_backward(tensor1, tensor2) return tensor1 + tensor2 @staticmethod def backward(ctx, grad_output): var1, var2 = ctx.saved_variables return (grad_output, grad_output) with warnings.catch_warnings(record=True) as warns: warnings.simplefilter("always") x = torch.randn((3, 3), requires_grad=True) y = torch.randn((3, 3), requires_grad=True) model = MyFunction() model.apply(x, y).sum().backward() has_deprecated = map(lambda warn: 'deprecated' in str(warn) and 'saved_variables' in str(warn), warns) has_deprecated = reduce(lambda x, y: x or y, has_deprecated) self.assertTrue(has_deprecated) def test_requires_grad(self): x = torch.randn(5, 5) y = torch.randn(5, 5) z = torch.randn(5, 5, requires_grad=True) a = x + y self.assertFalse(a.requires_grad) b = a + z self.assertTrue(b.requires_grad) def error(): raise RuntimeError # Make sure backward isn't called on these a._backward_hooks = OrderedDict() x._backward_hooks = OrderedDict() y._backward_hooks = OrderedDict() a._backward_hooks['test'] = error x._backward_hooks['test'] = error y._backward_hooks['test'] = error b.backward(torch.ones(5, 5)) def test_requires_grad_(self): x = torch.randn(5, 5) y = torch.randn(5, 5, requires_grad=True) self.assertIs(x, x.requires_grad_()) self.assertTrue(x.requires_grad) self.assertIs(y, y.requires_grad_()) self.assertTrue(y.requires_grad) self.assertIs(x, x.requires_grad_(True)) self.assertTrue(x.requires_grad) self.assertIs(y, y.requires_grad_(True)) self.assertTrue(y.requires_grad) z = x * y self.assertRaises(RuntimeError, lambda: z.requires_grad_(False)) self.assertIs(z, z.requires_grad_()) self.assertTrue(z.requires_grad) self.assertIs(z, z.requires_grad_(True)) self.assertTrue(z.requires_grad) self.assertIs(x, x.requires_grad_(False)) self.assertFalse(x.requires_grad) self.assertIs(y, y.requires_grad_(False)) self.assertFalse(y.requires_grad) def test_requires_grad_inplace(self): a = torch.randn(5, 5) b = torch.randn(5, 5, requires_grad=True) a += b self.assertTrue(a.requires_grad) # non-leaf a = torch.randn(5, 5) + 0 b = torch.randn(5, 5, requires_grad=True) a += b self.assertTrue(a.requires_grad) def test_no_requires_grad_inplace(self): # basic case, should be able to modify inplace while requires_grad is False a = torch.randn(2, 3) a.add_(5) a.requires_grad = True a.sum().backward() self.assertEqual(a.grad, torch.ones(2, 3)) # same but with a view a = torch.randn(2, 3) b = a[:] b.add_(5) a.requires_grad = True a.sum().backward() self.assertEqual(a.grad, torch.ones(2, 3)) # should fail if requires_grad = True when we modify inplace a = torch.randn(2, 3) b = a[:] a.requires_grad = True with self.assertRaises(RuntimeError): a.add_(5) with self.assertRaises(RuntimeError): b.add_(5) def test_attribute_deletion(self): x = torch.randn((5, 5), requires_grad=True) del x.grad self.assertIsNone(x.grad) with self.assertRaises(RuntimeError): del x.data with self.assertRaises(TypeError): x.data = None with self.assertRaises(RuntimeError): del x.requires_grad with self.assertRaises(RuntimeError): del x._grad_fn with self.assertRaises(RuntimeError): del x._backward_hooks def test_duplicate_backward_root(self): a = torch.randn(5, 5, requires_grad=True) b = torch.randn(5, 5, requires_grad=True) x = a * b grad_output = torch.randn_like(x) torch.autograd.backward([x, x], [grad_output, grad_output]) self.assertEqual(a.grad, b * grad_output * 2) self.assertEqual(b.grad, a * grad_output * 2) def test_backward_no_grad(self): a = torch.randn(5, 5, requires_grad=True) b = a + 2 with self.assertRaises(RuntimeError): torch.autograd.backward([b], [None]) def test_backward_twice_with_saved_values(self): b = torch.randn(3, requires_grad=True, dtype=torch.double) c = torch.zeros(3, dtype=torch.double) c[[1, 2]] = b[[1, 1]] c.backward(torch.tensor([1, 1, 1], dtype=torch.double)) self.assertRaisesRegex(RuntimeError, 'Specify retain_graph=True', lambda: c.backward(torch.tensor([1, 1, 1], dtype=torch.double))) def test_backward_twice_retained_graph_with_saved_values(self): b = torch.randn(3, requires_grad=True, dtype=torch.double) c = torch.zeros(3, dtype=torch.double) c[[1, 2]] = b[[1, 1]] c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True) c.backward(torch.tensor([1, 1, 1], dtype=torch.double)) def test_backward_twice_without_saved_values(self): b = torch.randn(3, requires_grad=True, dtype=torch.double) c = b + 1 c.backward(torch.tensor([1, 1, 1], dtype=torch.double)) c.backward(torch.tensor([1, 1, 1], dtype=torch.double)) def test_backward_twice_retained_graph_without_saved_values(self): b = torch.randn(3, requires_grad=True, dtype=torch.double) c = torch.zeros(3, dtype=torch.double) c[[1, 2]] = b[[1, 1]] c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True) c.backward(torch.tensor([1, 1, 1], dtype=torch.double)) def test_next_functions(self): x = torch.randn(5, 5, requires_grad=True) y = torch.randn(5, 5, requires_grad=True) a = x + y self.assertIsNotNone(a.grad_fn) next_functions = a.grad_fn.next_functions self.assertEqual(len(next_functions), 2) self.assertIsInstance(next_functions[0][0], torch._C._functions.AccumulateGrad) self.assertEqual(next_functions[0][1], 0) self.assertIsInstance(next_functions[1][0], torch._C._functions.AccumulateGrad) self.assertEqual(next_functions[1][1], 0) b = a + 5 next_functions = b.grad_fn.next_functions self.assertEqual(len(next_functions), 2) self.assertIs(next_functions[0][0], a.grad_fn) self.assertIs(next_functions[1][0], None) def test_inplace(self): x = torch.ones(5, 5, requires_grad=True) y = Variable(torch.ones(5, 5) * 4, requires_grad=True) z = x * y q = z + y w = z * y z.add_(2) # Add doesn't need it's inputs to do backward, so it shouldn't raise q.backward(torch.ones(5, 5), retain_graph=True) # Mul saves both inputs in forward, so it should raise self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5))) z = x * y q = z * y r = z + y w = z.add_(y) # w is a the last expression, so this should succeed w.backward(torch.ones(5, 5), retain_graph=True) # r doesn't use the modified value in backward, so it should succeed r.backward(torch.ones(5, 5), retain_graph=True) # q uses dirty z, so it should raise self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5))) with torch.no_grad(): x.grad.zero_() m = x / 2 z = m + y / 8 q = z * y r = z + y prev_version = z._version w = z.exp_() self.assertNotEqual(z._version, prev_version) r.backward(torch.ones(5, 5), retain_graph=True) self.assertEqual(x.grad, torch.ones(5, 5) / 2) w.backward(torch.ones(5, 5), retain_graph=True) self.assertEqual(x.grad, torch.Tensor(5, 5).fill_((1 + math.e) / 2)) self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5))) leaf = torch.ones(5, 5, requires_grad=True) x = leaf.clone() x.add_(10) self.assertEqual(x, torch.ones(5, 5) * 11) # x should be still usable y = x + 2 y.backward(torch.ones(5, 5)) self.assertEqual(leaf.grad, torch.ones(5, 5)) z = x * y x.add_(2) self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5))) def test_mark_non_differentiable(self): class MyFunction(Function): @staticmethod def forward(ctx, input): output = input > 0 ctx.mark_non_differentiable(output) return output @staticmethod def backward(ctx, grad_output): return (grad_output * 0).to(torch.double) x = torch.randn(5, 5, requires_grad=True) mask = MyFunction.apply(x) self.assertFalse(mask.requires_grad) y = x.masked_fill(mask, 0) y.sum().backward() def test_mark_non_differentiable_mixed(self): class MyFunction(Function): @staticmethod def forward(ctx, input): a = input + 1 b = input + 2 ctx.mark_non_differentiable(a) return a, b @staticmethod def backward(ctx, grad_a, grad_b): self.assertTrue((grad_a == 0).all()) self.assertTrue((grad_b == 1).all()) return grad_b x = torch.randn(5, 5, requires_grad=True) a, b = MyFunction.apply(x) self.assertFalse(a.requires_grad) self.assertTrue(b.requires_grad) b.sum().backward() self.assertEqual(x.grad, torch.ones(5, 5)) def test_mark_non_differentiable_none(self): # This used to segfault because MyFunction would send back null # gradients to MulBackward, which is implemented in C++. C++ # implemented functions expect incoming grad_ouptuts to be non-null. class MyFunction(Function): @staticmethod def forward(ctx, input): output = input.clone() ctx.mark_non_differentiable(output) return output @staticmethod def backward(ctx, grad_output): return None x = torch.randn(5, 5, requires_grad=True) r = MyFunction.apply(x * x) (r * x).sum().backward() def test_return_duplicate(self): class DoubleDuplicate(Function): @staticmethod def forward(ctx, x): output = x * 2 return output, output @staticmethod def backward(ctx, grad1, grad2): return grad1 * 2 + grad2 * 2 def fn(x): a, b = DoubleDuplicate.apply(x) self.assertIs(a, b) return a + b x = torch.randn(5, 5, requires_grad=True) gradcheck(fn, [x]) gradgradcheck(fn, [x]) def test_return_duplicate_inplace(self): class DoubleInplace(Function): @staticmethod def forward(ctx, x): x.mul_(2) ctx.mark_dirty(x) return x, x @staticmethod def backward(ctx, grad1, grad2): return grad1 * 2 + grad2 * 2 def inplace_fn(x): a, b = DoubleInplace.apply(x.clone()) self.assertIs(a, b) return a + b x = torch.randn(5, 5, requires_grad=True) gradcheck(inplace_fn, [x]) gradgradcheck(inplace_fn, [x]) # Can't modify leaf variables in-place self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x)) # Functions which modify views in-place must return only one output self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x.clone()[0])) @suppress_warnings def test_resize(self): x = torch.ones(2, 3) self.assertTrue(x.resize(3, 2).size() == (3, 2)) def _test_setitem(self, size, index): x = torch.ones(*size, requires_grad=True) y = x + 2 y_version = y._version y[index] = 2 self.assertNotEqual(y._version, y_version) y.backward(torch.ones(*size)) expected_grad = torch.ones(*size) expected_grad[index] = 0 self.assertEqual(x.grad, expected_grad) def _test_setitem_tensor(self, size, index): x = torch.ones(*size, requires_grad=True) y = x + 2 y_version = y._version value = x.new(x[index].size()).fill_(7) value.requires_grad = True y[index] = value self.assertNotEqual(y._version, y_version) y.backward(torch.ones(*size)) expected_grad_input = torch.ones(*size) expected_grad_input[index] = 0 self.assertEqual(x.grad, expected_grad_input) self.assertEqual(value.grad, torch.ones_like(value)) # case when x broadcasts to as y[1] x = torch.randn(4, requires_grad=True) y = torch.zeros(2, 3, 4) y[1] = x y.backward(torch.randn(2, 3, 4)) self.assertEqual(x.size(), x.grad.size()) def test_setitem(self): self._test_setitem((5, 5), 1) self._test_setitem((5,), 1) self._test_setitem((1,), 0) self._test_setitem((10,), [[0, 4, 2]]) self._test_setitem((5, 5), [[0, 4], [2, 2]]) self._test_setitem((5, 5, 5), [slice(None), slice(None), [1, 3]]) self._test_setitem((5, 5, 5), [slice(None), [1, 3], slice(None)]) self._test_setitem((5, 5, 5), [[1, 3], slice(None), slice(None)]) self._test_setitem((5, 5, 5), [slice(None), [2, 4], [1, 3]]) self._test_setitem((5, 5, 5), [[1, 3], [2, 4], slice(None)]) self._test_setitem_tensor((5, 5), 3) self._test_setitem_tensor((5, 5), [[0, 1], [1, 0]]) self._test_setitem_tensor((5,), 3) self._test_setitem_tensor((5,), Variable(torch.LongTensor([3]), requires_grad=False).sum()) self._test_setitem_tensor((5,), [[0, 1, 2, 3]]) self._test_setitem_tensor((5, 5, 5), [slice(None), slice(None), [1, 3]]) self._test_setitem_tensor((5, 5, 5), [slice(None), [1, 3], slice(None)]) self._test_setitem_tensor((5, 5, 5), [[1, 3], slice(None), slice(None)]) self._test_setitem_tensor((5, 5, 5), [slice(None), [2, 4], [1, 3]]) self._test_setitem_tensor((5, 5, 5), [[1, 3], [2, 4], slice(None)]) self._test_setitem_tensor((5, 5, 5), [Variable(torch.LongTensor([1, 3]), requires_grad=False), [2, 4], slice(None)]) def test_setitem_mask(self): mask = torch.BoolTensor(5, 5).bernoulli_() self._test_setitem((5, 5), Variable(mask)) self._test_setitem((5,), Variable(mask[0])) self._test_setitem((1,), Variable(mask[0, 0:1])) self._test_setitem_tensor((5, 5), Variable(mask)) self._test_setitem_tensor((5,), Variable(mask[0])) def test_select_sum(self): # both select and sum return Scalars in ATen; ensure they work together. x = torch.randn(10, requires_grad=True) def func(x): return x.select(0, 1).sum() gradcheck(func, [x]) gradgradcheck(func, [x]) def test_stack(self): x = torch.randn(10, 10, requires_grad=True) y = torch.randn(10, 10, requires_grad=True) z = torch.randn(10, 10, requires_grad=True) stacked = torch.stack([x, y, z], 0) grad = torch.randn(3, 10, 10) stacked.backward(grad) self.assertEqual(x.grad, grad[0]) self.assertEqual(y.grad, grad[1]) self.assertEqual(z.grad, grad[2]) def test_hstack(self): x = torch.randn(10, 10, requires_grad=True) y = torch.randn(10, 10, requires_grad=True) z = torch.randn(10, 10, requires_grad=True) stacked = torch.hstack([x, y, z]) grad = torch.randn(10, 30) stacked.backward(grad) self.assertEqual(x.grad, grad[:, 0:10]) self.assertEqual(y.grad, grad[:, 10:20]) self.assertEqual(z.grad, grad[:, 20:30]) x = torch.randn(10, requires_grad=True) y = torch.randn(10, requires_grad=True) z = torch.randn(10, requires_grad=True) stacked = torch.hstack([x, y, z]) grad = torch.randn(30) stacked.backward(grad) self.assertEqual(x.grad, grad[0:10]) self.assertEqual(y.grad, grad[10:20]) self.assertEqual(z.grad, grad[20:30]) def test_vstack(self): x = torch.randn(10, 10, requires_grad=True) y = torch.randn(10, 10, requires_grad=True) z = torch.randn(10, 10, requires_grad=True) stacked = torch.vstack([x, y, z]) grad = torch.randn(30, 10) stacked.backward(grad) self.assertEqual(x.grad, grad[0:10]) self.assertEqual(y.grad, grad[10:20]) self.assertEqual(z.grad, grad[20:30]) def test_dstack(self): x = torch.randn(10, 10, requires_grad=True) y = torch.randn(10, 10, requires_grad=True) z = torch.randn(10, 10, requires_grad=True) stacked = torch.dstack([x, y, z]) grad = torch.randn(10, 10, 3) stacked.backward(grad) self.assertEqual(x.grad, grad[:, :, 0]) self.assertEqual(y.grad, grad[:, :, 1]) self.assertEqual(z.grad, grad[:, :, 2]) def test_unbind(self): stacked = torch.randn(3, 10, 10, requires_grad=True) x, y, z = stacked.unbind() grad = torch.randn(3, 10, 10) torch.autograd.backward([x, y, z], grad.unbind()) self.assertEqual(stacked.grad, grad) # check that it works with only one gradient provided (#9977) for i in range(3): stacked = torch.randn(3, 10, 10, requires_grad=True) outs = stacked.unbind() gi = grad.unbind()[i] g, = torch.autograd.grad(outs[i], stacked, gi) g_expected = torch.stack([gi if j == i else torch.zeros_like(gi) for j in range(3)], dim=0) self.assertEqual(g, g_expected) def test_put(self): root = torch.randn(4, 5, requires_grad=True) values = torch.randn(6, requires_grad=True) idx = Variable(torch.LongTensor([1, 2, 3, -1, -2, -3])) def func(root, values): x = root.clone() x.put_(idx, values) return x gradcheck(func, [root, values]) gradgradcheck(func, [root, values]) def test_put_accumulate(self): root = torch.randn(4, 5, requires_grad=True) values = torch.randn(6, requires_grad=True) idx = Variable(torch.LongTensor([1, 2, 3, 1, 2, 3])) def func(root, values): x = root.clone() x.put_(idx, values, accumulate=True) return x gradcheck(func, [root, values]) gradgradcheck(func, [root, values]) def test_fill(self): root = torch.randn(4, 5, requires_grad=True) def func(root): x = root.clone() x.fill_(2) return x gradcheck(func, [root]) gradgradcheck(func, [root]) def test_unused_output(self): x = torch.randn(10, 10, requires_grad=True) outputs = x.chunk(5) o = outputs[2] o = o * 4 + 2 o.sum().backward() expected_grad = torch.zeros(10, 10) expected_grad[4:6] = 4 self.assertEqual(x.grad, expected_grad) with torch.no_grad(): x.grad.zero_() grad_output = torch.randn(2, 10) outputs = x.chunk(5) outputs[0].backward(grad_output) expected_grad = torch.zeros(10, 10) expected_grad[:2] = grad_output self.assertEqual(x.grad, expected_grad) def _test_sparse_gather(self, size_x, size_ind, dim): x = torch.randn(size_x, requires_grad=True) if len(size_ind) > 0 and len(size_x) > 0: ind = torch.randint(x.size(dim), size_ind) else: ind = torch.zeros(size_ind, dtype=torch.int64) out = torch.gather(x, dim, ind, sparse_grad=False) grad = torch.rand_like(out) out.backward(grad) grad_dense = x.grad.clone() x.grad = None out = torch.gather(x, dim, ind, sparse_grad=True) out.backward(grad) self.assertEqual(grad_dense, x.grad.to_dense()) def test_sparse_gather_dim0(self): self._test_sparse_gather((10, 10), (5, 10), 0) def test_sparse_gather_dim1(self): self._test_sparse_gather((10, 10, 5), (10, 5, 5), 1) def test_sparse_gather_dim_neg(self): self._test_sparse_gather((10, 10, 5), (10, 10, 2), -1) def test_sparse_gather_ind_scalar(self): self._test_sparse_gather((10,), (), 0) def test_sparse_gather_x_scalar(self): self._test_sparse_gather((), (2,), 0) def test_sparse_gather_both_scalar(self): self._test_sparse_gather((), (), 0) def test_gc_in_destructor(self): """ Previously, if a Function destructor triggered a garbage collection, the Variable's tp_dealloc handler would get called twice leading to a segfault. """ class CollectOnDelete(Function): def forward(self, x): return x def backward(self, grad_output): return grad_output def __del__(self): gc.collect() for _ in range(10): CollectOnDelete().forward(torch.randn(1, requires_grad=True)).backward() # Delete this test when legacy custom autograd functions are deleted. def test_naughty_legacy_variable_grad_fn(self): class Id(Function): def forward(self, x): return x def backward(self, grad_x): return grad_x self.assertRaises(RuntimeError, lambda: Variable(torch.zeros(1), _grad_fn=Id())) # Delete this test when legacy custom autograd functions are deleted. def test_naughty_legacy_function_backward_before_forward(self): class Id(Function): def forward(self, x): return x def backward(self, grad_x): return grad_x f = Id() self.assertRaises(RuntimeError, lambda: f._do_backward((torch.zeros(0), ), False)) # Delete this test when legacy custom autograd functions are deleted. def test_naughty_legacy_function_early_access(self): class Id(Function): def forward(self, x): return x def backward(self, grad_x): return grad_x f = Id() # A legacy autograd function is not fully initialized until you actually # apply it. That means a lot of accessors on them don't actually work. # Test that we properly error in this case. self.assertRaises(RuntimeError, lambda: f.register_hook(lambda x, y: None)) self.assertRaises(RuntimeError, lambda: f.next_functions) self.assertRaises(RuntimeError, lambda: f.metadata) @unittest.expectedFailure def test_naughty_anomaly_access(self): class MyFunction(Function): @staticmethod def forward(ctx, x): return x @staticmethod def backward(ctx, g): return g x = torch.zeros(1, requires_grad=True) y = MyFunction.apply(x) y.backward() y.grad_fn.metadata g = y.grad_fn del y g.metadata # this currently fails, but shouldn't def test_naughty_autograd_function_stashing_ctx(self): saved_ctx = [] class Id(Function): @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return x @staticmethod def backward(ctx, grad_x): saved_ctx.append(ctx) return ctx.saved_tensors p = torch.zeros(1, requires_grad=True) loss = Id.apply(p) loss.backward(retain_graph=True) del loss # At this point in time, it complains that the graph has been freed # (which indeed true, although a somewhat indirect way of stating the # problem). self.assertRaises(RuntimeError, lambda: saved_ctx[0].saved_tensors) def test_custom_autograd_repeated_grad_grad(self): # This test failed the equality check in PR #22983; it's an interesting # and different test case worth enshrining. mult1 is not testing # anything that interesting, but mult2 is the interesting case. def mult1(x): return x.prod(dim=-1).prod(dim=-1) class Mult(torch.autograd.Function): @staticmethod def forward(ctx, x): y = mult1(x) ctx.save_for_backward(x, y) return y @staticmethod def backward(ctx, grad_output): x, y = ctx.saved_tensors return (grad_output * y)[:, None, None] / x mult2 = Mult.apply def check_gradgrad_repeated(x, y): gy, = torch.autograd.grad(y[0], x, create_graph=True) ggy_1, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True) gy, = torch.autograd.grad(y[0], x, create_graph=True) ggy_2, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True) self.assertEqual(ggy_1[0, 0, 1], ggy_2[0, 0, 1]) x = torch.ones(2, 4, 4).requires_grad_() check_gradgrad_repeated(x, mult1(x)) check_gradgrad_repeated(x, mult2(x)) def test_custom_autograd_no_early_free(self): # This test failed complaining that buffers had already been freed # prior to #22983. Also pretty interesting test case. class Double(torch.autograd.Function): @staticmethod def forward(ctx, x): y = x ** 2 ctx.save_for_backward(x, y) return y @staticmethod def backward(ctx, grad_output): x, _ = ctx.saved_tensors return grad_output * 2 * x # this is equivalent, but uses the output of .forward() in .backward() class Double2(Double): @staticmethod def backward(ctx, grad_output): x, y = ctx.saved_tensors return grad_output * 2 * y / x double = Double.apply double2 = Double2.apply x = torch.tensor(2).double().requires_grad_() self.assertTrue(torch.autograd.gradcheck(double, x)) self.assertTrue(torch.autograd.gradgradcheck(double, x)) self.assertTrue(torch.autograd.gradcheck(double2, x)) self.assertTrue(torch.autograd.gradgradcheck(double2, x)) y = double(x) torch.autograd.grad(y, x, create_graph=True) torch.autograd.grad(y, x) y = double2(x) torch.autograd.grad(y, x, create_graph=True) torch.autograd.grad(y, x) # should not error! def test_detach(self): x = torch.randn(10, 10, requires_grad=True) y = x + 2 y = y.detach() z = y * 4 + 2 self.assertFalse(y.requires_grad) self.assertFalse(z.requires_grad) x = torch.randn(10, 10, requires_grad=True) y = x * 2 y = y.detach() self.assertFalse(y.requires_grad) self.assertIsNone(y.grad_fn) z = x + y z.sum().backward() # This is an incorrect gradient, but we assume that's what the user # wanted. detach() is an advanced option. self.assertEqual(x.grad, torch.ones(10, 10)) # in-place detach x = torch.randn(10, 10, requires_grad=True) y = torch.randn(10, 10, requires_grad=True) a = x * 2 (y + a).sum().backward(retain_graph=True) a.detach_() self.assertFalse(a.requires_grad) (y + a).sum().backward() # this won't backprop to x self.assertEqual(x.grad, torch.ones(10, 10) * 2) self.assertEqual(y.grad, torch.ones(10, 10) * 2) # in-place deatch on a view raises an exception view = x.narrow(0, 1, 4) self.assertRaisesRegex(RuntimeError, 'view', lambda: view.detach_()) def test_detach_base(self): "detaching base does not detach view" x = torch.randn(10, 10, requires_grad=True) view = x.narrow(0, 1, 4) x.detach_() self.assertFalse(x.requires_grad) self.assertTrue(view.requires_grad) self.assertIsNotNone(view.grad_fn) self.assertIs(view._base, x) def _test_type_conversion_backward(self, t, ): fvar = Variable(t(torch.randn(5, 5).float()), requires_grad=True) fvar.double().sum().backward() self.assertEqual(fvar.grad, torch.ones_like(fvar)) self.assertEqual(type(fvar.grad), type(fvar)) dvar = Variable(t(torch.randn(5, 5).double()), requires_grad=True) dvar.float().sum().backward() self.assertEqual(dvar.grad, torch.ones_like(dvar)) self.assertEqual(type(dvar.grad), type(dvar)) def test_type_conversions(self): x = torch.randn(5, 5) self.assertIsInstance(x.float(), torch.FloatTensor) self.assertIsInstance(x.int(), torch.IntTensor) if torch.cuda.is_available(): self.assertIsInstance(x.float().cuda(), torch.cuda.FloatTensor) self.assertIsInstance(x.int().cuda(), torch.cuda.IntTensor) self.assertIsInstance(x.int().cuda().cpu(), torch.IntTensor) if torch.cuda.device_count() >= 2: x2 = x.float().cuda(1) self.assertIsInstance(x2, torch.cuda.FloatTensor) self.assertIs(x2.get_device(), 1) x2 = x.float().cuda() self.assertIsInstance(x2, torch.cuda.FloatTensor) self.assertIs(x2.get_device(), 0) x2 = x2.cuda(1) self.assertIsInstance(x2, torch.cuda.FloatTensor) self.assertIs(x2.get_device(), 1) y = Variable(torch.randn(5).cuda(1), requires_grad=True) y.cpu().sum().backward() self.assertIs(y.grad.get_device(), 1) self.assertIs(y.long().get_device(), 1) for t in [torch.DoubleTensor, torch.FloatTensor, torch.IntTensor, torch.ByteTensor]: for y_var in (True, False): y = torch.randint(5, (5, 5), dtype=t.dtype) y = Variable(y) if y_var else y self.assertIsInstance(x.type(t), t) self.assertIsInstance(x.type_as(y), t) # TODO: t.dtype should work t_dtype = t().dtype self.assertIsInstance(x.type(t_dtype), t) self.assertIs(t_dtype, x.type(t_dtype).dtype) self.assertEqual(y.data_ptr(), y.type(t).data_ptr()) if torch.cuda.is_available(): for x_cuda in (True, False): for y_cuda in (True, False): x_c = x.cuda() if x_cuda else x y_c = y.cuda() if y_cuda else y _, y_type = y_c.type().rsplit('.', 1) y_typestr = ('torch.cuda.' if y_cuda else 'torch.') + y_type self.assertEqual(y_c.type(), x_c.type(y_typestr).type()) self.assertIs(y_c.dtype, x_c.type(y_c.dtype).dtype) self.assertEqual(y_c.data_ptr(), y_c.cuda().data_ptr() if y_cuda else y_c.data_ptr()) self._test_type_conversion_backward(lambda x: x) if torch.cuda.is_available(): self._test_type_conversion_backward(lambda x: x.cuda()) if torch.cuda.device_count() >= 2: # one of these has to be the non-default device self._test_type_conversion_backward(lambda x: x.cuda(0)) self._test_type_conversion_backward(lambda x: x.cuda(1)) def test_isolated_node(self): x = torch.randn(5, 5, requires_grad=True) y = torch.randn(5, 5, requires_grad=True) a = x + y b = torch.max(a, 1, True)[1].repeat(1, 5).double() o = (b + a).sum() o.backward() def test_shape(self): x = torch.randn(3, 4) self.assertEqual(2, len(x.shape)) self.assertEqual(x.shape[0], 3) self.assertEqual(x.shape[1], 4) def test_numpy_requires_grad(self): x = torch.randn(2, 2, requires_grad=True) err_msg_outputs = r"Can't call numpy\(\) on Tensor that requires grad. Use tensor.detach\(\).numpy\(\) instead." with self.assertRaisesRegex(RuntimeError, err_msg_outputs): x.numpy() with torch.no_grad(): x.numpy() x = torch.randn(2, 2) x.numpy() with torch.no_grad(): x.numpy() def test_return_leaf(self): class Identity(Function): @staticmethod def forward(ctx, a, b): return a, a + b @staticmethod def backward(ctx, grad_a, grad_b): return grad_a + grad_b, grad_b hook_called = [False] x = torch.randn(5, 5, requires_grad=True) y = torch.randn(5, 5, requires_grad=True) q, p = Identity.apply(x, y) # Make sure hooks only receive grad from usage of q, not x. def hook(grad): hook_called[0] = True self.assertEqual(grad, torch.ones(5, 5)) q.register_hook(hook) (q + p + x).sum().backward() self.assertEqual(x.grad, torch.ones(5, 5) * 3) self.assertEqual(y.grad, torch.ones(5, 5)) self.assertTrue(hook_called[0]) def test_return_leaf_inplace(self): class Inplace(InplaceFunction): @staticmethod def forward(ctx, a, b): ctx.mark_dirty(a) return a.add_(b), b + 2 @staticmethod def backward(ctx, grad_a, grad_b): return grad_a, grad_a + grad_b x = torch.randn(5, 5) y = torch.randn(5, 5, requires_grad=True) fn = Inplace(True) q, p = fn.apply(x, y) self.assertIs(q, x) self.assertIs(q.grad_fn.__class__, fn._backward_cls) self.assertTrue(q.requires_grad) q.sum().backward() self.assertEqual(y.grad, torch.ones(5, 5)) def test_leaf_assignment(self): x = torch.randn(5, 5) y = torch.randn(5, requires_grad=True) z = torch.randn(5, requires_grad=True) x[0] = y x[1] = 2 * z self.assertTrue(x.requires_grad) self.assertIsNot(x.grad_fn, None) x.sum().backward() self.assertEqual(y.grad, torch.ones(5)) self.assertEqual(z.grad, torch.ones(5) * 2) def test_no_grad_assignment(self): x = torch.randn(5, 5, requires_grad=True) y = torch.randn(5) with torch.no_grad(): x[0] = y self.assertTrue(x.requires_grad) self.assertIsNone(x.grad_fn) def test_no_grad_modifies_version(self): x = torch.randn(5, requires_grad=True) y = torch.randn(5, requires_grad=True) z = (x * y).sum() with torch.no_grad(): x *= 2 self.assertRaisesRegex(RuntimeError, 'modified by an inplace operation', lambda: z.backward()) def test_no_grad_input(self): class MyFunction(Function): @staticmethod def forward(self, x): return x @staticmethod def backward(self, grad_output): return grad_output x = torch.randn(5, requires_grad=True) with torch.no_grad(): y = MyFunction.apply(x) self.assertTrue(x.requires_grad) self.assertIsNone(y.grad_fn) def test_backward_copy(self): # This tests checks backward engine for a very subtle bug that appreared # in one of the initial versions of autograd. Gradients tensors were # simply stored in lists while the function waited for all its gradients # to be computed. However, sometimes an output was used multiple times, # so the gradients needed to be summed. Engine used to keep a need_copy # set of tensors that will need a clone upon next addition and removed # them from the set as soon as the clone was performed. However, this # could lead to incorrect results if the same gradient tensor was # buffered in three places in the graph: # 1. When accumulating gradients in one of these places it was cloned # and removed from need_copy set. # 2. When accumulating in second place, it wasn't in the need_copy set, # so the gradients were simply accumulated in-place (which already # modified the grad in 3rd place) # 3. When accumulating in the third place, it wasn't in the need_copy set # as well, so the incoming gradient was summed in-place, yielding # incorrect results in all functions, except the first one. x = torch.ones(5, 5, requires_grad=True) y = torch.ones(5, 5, requires_grad=True) # Simulate that we're in the middle of the graph a = x + 2 b = y + 2 c = x + 2 # This op will just return grad_output two times in backward add1 = a + b add2 = add1 + c # Simulate a long branch, so grad_output will get buffered. for _ in range(4): a = a * 2 b = b * 2 c = c * 2 branch = a + b + c out = add2 + branch # expected gradients are: # for x: 34 (16 from final a, 16 from final c, 2 from add2) # for y: 17 (16 from final b, 1 from add2) grad_output = torch.ones(5, 5) out.backward(grad_output) self.assertEqual(x.grad, torch.ones(5, 5) * 34) self.assertEqual(y.grad, torch.ones(5, 5) * 17) def test_save_none_for_backward(self): test_case = self class MyFn(Function): @staticmethod def forward(ctx, input): ctx.save_for_backward(None, input, None) return input * input @staticmethod def backward(ctx, grad_output): n1, input, n2 = ctx.saved_tensors test_case.assertIsNone(n1) test_case.assertIsNone(n2) return 2 * input * grad_output x = torch.randn(5, 5, requires_grad=True) y = MyFn.apply(x) y.sum().backward() self.assertEqual(x.grad, 2 * x) def test_too_many_grads(self): class MyFn(Function): @staticmethod def forward(ctx, input): return input @staticmethod def backward(ctx, grad_output): return grad_output, None, None x = torch.randn(5, 5, requires_grad=True) y = MyFn.apply(x) y.sum().backward() self.assertEqual(x.grad, torch.ones_like(x)) def test_pickle(self): x = torch.randn(10, 10, requires_grad=True) y = torch.randn(10, 10, requires_grad=False) def assert_strict_equal(var1, var2): self.assertEqual(var1, var2) self.assertEqual(var1.requires_grad, var2.requires_grad) serialized = [pickle.dumps([x, y], protocol=p) for p in range(3)] for dump in serialized: xc, yc = pickle.loads(dump) assert_strict_equal(xc, x) assert_strict_equal(yc, y) def test_dep_nograd(self): class F1(Function): @staticmethod def forward(ctx, input): out = torch.randn(input.size()) ctx.mark_non_differentiable(out) return input, out @staticmethod def backward(ctx, grad_output, ignored): return grad_output class F2(Function): @staticmethod def forward(ctx, input, ignored): return input @staticmethod def backward(ctx, grad_output): return grad_output, None x = torch.randn(5, requires_grad=True) a, b = F1.apply(x) b = b + 1 # separate F1 from F2 by another op self.assertTrue(a.requires_grad) self.assertFalse(b.requires_grad) c = F2.apply(a, b) c.backward(torch.ones(c.size())) self.assertEqual(x.grad, torch.ones(x.size())) def test_set_grad_enabled(self): x = torch.tensor([1.], requires_grad=True) with torch.set_grad_enabled(False): y = x * 2 self.assertFalse(y.requires_grad) with torch.set_grad_enabled(True): y = x * 2 self.assertTrue(y.requires_grad) with torch.set_grad_enabled(False): torch.set_grad_enabled(True) y = x * 2 self.assertTrue(y.requires_grad) def test_simple_reentrant(self): y_data = torch.randn(2, 2) class Reenter(Function): @staticmethod def forward(ctx, x): with torch.enable_grad(): ctx.x = Variable(x, requires_grad=True) ctx.y = Variable(y_data, requires_grad=True) ctx.output_var = ctx.x * ctx.y return ctx.output_var.detach() @staticmethod def backward(ctx, grad_output): with torch.enable_grad(): ctx.output_var.sum().backward() return ctx.x.grad * grad_output # Reentrant starts on CPU thread, finishs on GPU thread x = torch.randn(2, 2, requires_grad=True) out = Reenter.apply(x) out.sum().backward() self.assertEqual(x.grad, y_data) def test_reentrant_child_error(self): # Parent graph. a = torch.rand(3, 3, requires_grad=True) c = a * a # Reentrant child graph. b = torch.rand(3, 3, requires_grad=True) e = b * b f = TestAutograd.SimulateBackwardError.apply(e) reentrant_root = f.sum() class ReentrantFunc(Function): @staticmethod def forward(ctx, inp): return inp.clone() @staticmethod def backward(ctx, grad): # Reentrant backward in child will throw an error. reentrant_root.backward() return grad d = ReentrantFunc.apply(c) with self.assertRaisesRegex(Exception, 'Simulate error'): d.sum().backward() def test_broadcast_tensors(self): f_args_variable = (torch.randn(3, requires_grad=True), torch.randn(1, 2, 1, requires_grad=True), torch.randn(1, 1, requires_grad=True), torch.randn(5, 1, 1, requires_grad=True)) f_args_tensor = deepcopy(unpack_variables(f_args_variable)) run_functional_checks(self, "test_broadcast_tensors", "broadcast", lambda a, b, c, d: torch.broadcast_tensors(a, b, c, d), True, f_args_variable, f_args_tensor) def test_block_diag(self): f_args_variable = (torch.randn(1, S, requires_grad=True), torch.randn(2, S, requires_grad=True), torch.randn(3, S, requires_grad=True)) f_args_tensor = deepcopy(unpack_variables(f_args_variable)) run_functional_checks(self, "test_block_diag", "block_diag", lambda a, b, c: torch.block_diag(a, b, c), True, f_args_variable, f_args_tensor) def test_cat(self): f_args_variable = (torch.randn(1, S, S, requires_grad=True), torch.randn(2, S, S, requires_grad=True), torch.randn(3, S, S, requires_grad=True), 0) f_args_tensor = deepcopy(unpack_variables(f_args_variable)) run_functional_checks(self, "test_cat", "cat", lambda a, b, c, dim: torch.cat((a, b, c), dim), True, f_args_variable, f_args_tensor) def test_cat_negdim_1(self): f_args_variable = (torch.randn(S, S, 1, requires_grad=True), torch.randn(S, S, 2, requires_grad=True), torch.randn(S, S, 3, requires_grad=True), -1) f_args_tensor = deepcopy(unpack_variables(f_args_variable)) run_functional_checks(self, "test_cat_negdim_1", "cat", lambda a, b, c, dim: torch.cat((a, b, c), dim), True, f_args_variable, f_args_tensor) def test_cat_negdim_2(self): f_args_variable = (torch.randn(S, 1, S, requires_grad=True), torch.randn(S, 2, S, requires_grad=True), torch.randn(S, 3, S, requires_grad=True), -2) f_args_tensor = deepcopy(unpack_variables(f_args_variable)) run_functional_checks(self, "test_cat_negdim_2", "cat", lambda a, b, c, dim: torch.cat((a, b, c), dim), True, f_args_variable, f_args_tensor) def test_cat_empty_legacy(self): f_args_variable = (torch.randn(0, requires_grad=True), torch.randn(S, S, requires_grad=True)) # gradgradcheck doesn't work, probably because legacy size tracking is wrong somewhere, # hence False passed below, but gradcheck checked explicitly. f_args_tensor = deepcopy(unpack_variables(f_args_variable)) run_functional_checks(self, "test_cat_empty_legacy", "cat", lambda a, b: torch.cat((a, b)), False, f_args_variable, f_args_tensor) self.assertTrue(gradcheck(lambda a, b: torch.cat((a, b)), f_args_variable, eps=1e-6, atol=PRECISION)) def test_cat_empty(self): f_args_variable = (torch.randn(0, S, requires_grad=True), torch.randn(S, S, requires_grad=True)) f_args_tensor = deepcopy(unpack_variables(f_args_variable)) run_functional_checks(self, "test_cat_empty", "cat", lambda a, b: torch.cat((a, b)), True, f_args_variable, f_args_tensor) def test_trapz(self): f_args_variable = (torch.randn(2, 3, requires_grad=True), torch.tensor([[1.0, 2.0, 5.5], [2.3, 0.5, 6.2]], requires_grad=True)) f_args_tensor = deepcopy(unpack_variables(f_args_variable)) run_functional_checks(self, "test_trapz", "trapz", lambda y, x: torch.trapz(y, x), True, f_args_variable, f_args_tensor) def test_var_mean_differentiable(self): dim = [2, 4] keepdim = False input1 = torch.randn(3, 4, 5, 6, 2, 3, requires_grad=True) input2 = deepcopy(input1) var1, mean1 = torch.var_mean(input1, dim=dim, keepdim=keepdim) var2 = input2.var(dim=dim, keepdim=keepdim) mean2 = input2.mean(dim=dim, keepdim=keepdim) grad = torch.randn(3, 4, 6, 3, requires_grad=True) r1 = var1 * var1 * mean1 * mean1 r2 = var2 * var2 * mean2 * mean2 self.assertTrue(torch.allclose(r1, r2, rtol=0.01, atol=0.0)) torch.autograd.backward(r1, grad) torch.autograd.backward(r2, grad) self.assertTrue(torch.allclose(input1.grad, input2.grad, rtol=0.01, atol=0.0)) @skipIfNoLapack def test_cholesky(self): def func(root, upper): x = torch.matmul(root, root.transpose(-1, -2)) + 1e-05 return torch.cholesky(x, upper) def run_test(upper, dims): root = torch.rand(*dims, requires_grad=True) gradcheck(func, [root, upper]) gradgradcheck(func, [root, upper]) root = random_symmetric_pd_matrix(dims[-1], *dims[:-2]).requires_grad_() chol = root.cholesky().sum().backward() self.assertEqual(root.grad, root.grad.transpose(-1, -2)) # Check the gradient is symmetric for upper, dims in product([True, False], [(3, 3), (4, 3, 2, 2)]): run_test(upper, dims) run_test(upper, dims) @skipIfNoLapack def test_cholesky_solve(self): def _test_with_size(A_dims, B_dims, upper): root = torch.rand(*A_dims).requires_grad_() b = torch.rand(*B_dims).requires_grad_() def func(root, b, upper): if upper: A = root.triu() else: A = root.tril() return torch.cholesky_solve(b, A, upper) gradcheck(func, [root, b, upper]) gradgradcheck(func, [root, b, upper]) for (a_size, b_size), upper in product([((3, 3), (3, 4)), ((3, 3), (3, 2)), ((2, 3, 3), (2, 3, 4)), ((2, 3, 3), (2, 3, 2))], [True, False]): _test_with_size(a_size, b_size, upper) @skipIfNoLapack def test_eig(self): def func(B): return torch.eig(B, eigenvectors=True) def func_eigvals(B): return torch.eig(B, eigenvectors=True)[0] def func_eigvecs(B): return torch.eig(B, eigenvectors=True)[1] def run_test(dims): # The backward operation for eig only works for real eigenvalues, # so the matrix should be B = U^{-1}*A*U where A is a random # symmetric matrix and U is a random full-rank matrix. # Slight change to the matrix should not make the eigenvalues # complex, so we apply requires_grad_ to B, not A and U A = random_symmetric_matrix(dims[-1], *dims[:-2]) U = torch.rand(*dims) Uinv = torch.inverse(U) B = torch.matmul(Uinv, torch.matmul(A, U)).requires_grad_() gradcheck(func, [B]) gradgradcheck(func, [B]) gradcheck(func_eigvals, [B]) gradgradcheck(func_eigvals, [B]) gradcheck(func_eigvecs, [B]) gradgradcheck(func_eigvecs, [B]) for dims in [(3, 3), (5, 5)]: run_test(dims) @skipIfNoLapack def test_symeig(self): def func(root, upper): x = 0.5 * (root + root.transpose(-2, -1)) return torch.symeig(x, eigenvectors=True, upper=upper) def run_test(upper, dims): root = torch.rand(*dims, requires_grad=True) gradcheck(func, [root, upper]) gradgradcheck(func, [root, upper]) root = random_symmetric_matrix(dims[-1], *dims[:-2]).requires_grad_() w, v = root.symeig(eigenvectors=True) (w.sum() + v.sum()).backward() self.assertEqual(root.grad, root.grad.transpose(-1, -2)) # Check the gradient is symmetric for upper, dims in product([True, False], [(3, 3), (5, 3, 3), (4, 3, 2, 2)]): run_test(upper, dims) @skipIfNoLapack def test_cholesky_inverse(self): def _test_with_size(upper, dims): # We require to create a Cholesky factor which requires that the diagonal elements are positive. # Initializing too small values for the diagonal elements could cause issues when being perturbed # to obtain the numerical Jacobian, thereby leading to inconsistent gradcheck A = torch.randn(*dims) A.diagonal().uniform_(0.1, 5.0) A.requires_grad_() def func(A, upper): if upper: root = A.triu() else: root = A.tril() return torch.cholesky_inverse(root, upper) gradcheck(func, [A, upper]) gradgradcheck(func, [A, upper]) for upper, dims in product([True, False], [(3, 3), (5, 5)]): _test_with_size(upper, dims) @skipIfNoLapack def test_triangular_solve(self): def _test_with_size(A_dims, B_dims): A = torch.rand(*A_dims).requires_grad_() b = torch.rand(*B_dims).requires_grad_() for upper, transpose, unitriangular in product((True, False), repeat=3): def func(A, b): return torch.triangular_solve(b, A, upper, transpose, unitriangular) gradcheck(func, [A, b]) gradgradcheck(func, [A, b]) _test_with_size((3, 3), (3, 4)) _test_with_size((3, 3), (3, 2)) _test_with_size((2, 3, 3), (2, 3, 4)) _test_with_size((2, 3, 3), (2, 3, 2)) @unittest.skipIf(not TEST_MKL, "PyTorch is built without MKL support") def test_fft_ifft_rfft_irfft(self): def _test_complex(sizes, signal_ndim): x = torch.randn(sizes, requires_grad=True, dtype=torch.double) for normalized in (True, False): def fft(x): return x.fft(signal_ndim, normalized=normalized) gradcheck(fft, [x]) gradgradcheck(fft, [x], gen_non_contig_grad_outputs=True) def ifft(fx): return fx.ifft(signal_ndim, normalized=normalized) # Use output of fft(x) for inverse fft, due to symmetry requirements fx = fft(x).detach() fx.requires_grad = True gradcheck(ifft, [fx]) gradgradcheck(ifft, [fx], gen_non_contig_grad_outputs=True) def _test_real(sizes, signal_ndim): x = torch.randn(sizes, requires_grad=True, dtype=torch.double) if x.dim() == signal_ndim: start_dim = 0 else: start_dim = 1 signal_sizes = x.size()[start_dim:start_dim + signal_ndim] for normalized, onesided in product((True, False), repeat=2): def rfft(x): return x.rfft(signal_ndim, normalized=normalized, onesided=onesided) gradcheck(rfft, [x]) gradgradcheck(rfft, [x], gen_non_contig_grad_outputs=True) # Generally speaking, irfft itself won't and can't pass the # current gradcheck as it assumes the input follows conjugate # symmetry, an requirement that is never true with our point # numerical Jacobian estimate. Without input symmtry, irfft's # behavior is undefined. # # Even onesided results can't remove all redundancy. For # example, consider the .select(last_signal_dim, 0) slice. # It is entirely represented in the onesided results (except # for 1D), and will be reflected onto itself! # # So only 1D onesided irfft should pass grad check as it is # guaranteed that the input has no symmetrical values. # # In other cases, we test a function that first uses rfft to # generate a tensor that follows the conjugate symmetry irfft # expects, and then feeds it into irfft. Since rfft is already # tested above, we thereby verify the correctness of irfft. if signal_ndim == 1 and onesided: def irfft(fx): return fx.irfft(signal_ndim, normalized=normalized, onesided=onesided, signal_sizes=signal_sizes) # Use output of rfft(x) for inverse rfft, due to symmetry requirements fx = rfft(x).detach() fx.requires_grad = True gradcheck(irfft, [fx]) gradgradcheck(irfft, [fx], gen_non_contig_grad_outputs=True) else: # Test this function: f(x) = ifft(rfft(x) + rfft(z)), where # z is some fixed tensor of same size as x. rfft(z) term is # needed because otherwise f becomes identity. z = torch.randn(sizes, dtype=torch.double) fz = z.rfft(signal_ndim, normalized=normalized, onesided=onesided) def rfft_irfft(x): fx = x.rfft(signal_ndim, normalized=normalized, onesided=onesided) y = fx + fz return y.irfft(signal_ndim, normalized=normalized, onesided=onesided, signal_sizes=signal_sizes) gradcheck(rfft_irfft, [x]) gradgradcheck(rfft_irfft, [x], gen_non_contig_grad_outputs=True) _test_real((2, 10), 1) _test_real((2, 3, 4), 2) _test_real((2, 3, 4, 3), 3) _test_complex((2, 2, 10, 2), 1) _test_complex((1, 2, 3, 4, 2), 2) _test_complex((2, 1, 3, 4, 3, 2), 3) def test_gradcheck_fail_when_no_differentiable_outputs_and_num_grad_not_zero(self): def autograd_fn(input): output = torch.detach(input) self.assertFalse(output.requires_grad) return output f_args_variable = torch.ones(S, S, requires_grad=True) self.assertRaisesRegex(RuntimeError, 'Numerical gradient for function expected to be zero', lambda: gradcheck(autograd_fn, f_args_variable, eps=1e-6, atol=PRECISION)) def test_variable_traverse(self): def get_out_and_unrefed_cycle(): inp = torch.randn(10, requires_grad=True) tmp = inp.view(10, 1) out = tmp.view(10) # Create a reference cycle that contains an # intermediary Variable in the graph my_list = [] my_list.append(tmp) my_list.append(my_list) return out out = get_out_and_unrefed_cycle() gc.collect() # This will segfault if things have been erroneously released out.backward(torch.randn(out.size())) def test_norm_subgradient(self): def run_test(input_size, norm_deg): input = torch.zeros(*input_size, requires_grad=True) input.norm(norm_deg).backward() self.assertEqual(input.grad.abs().sum(), 0) run_test((10,), 2) run_test((10, 10), 2) run_test((10,), 3) run_test((10,), 1) run_test((10,), 1.5) def test_pow_zero_tensor_gradient(self): def run_test(input_size, exponent): input = torch.zeros(*input_size, requires_grad=True) input.pow(exponent).sum().backward() self.assertEqual(input.grad.abs().sum(), 0) run_test((10,), torch.zeros(10)) run_test((10, 10), torch.zeros(10, 10)) run_test((10,), 0) def test_pow_scalar_base(self): a = torch.arange(1, 13, dtype=torch.double).view(3, 4).requires_grad_() gradcheck(lambda a: torch.pow(2, a), (a,)) @skipIfNoLapack def test_pinverse(self): # Why is pinverse tested this way, and not ordinarily as other linear algebra methods? # 1. Pseudo-inverses are not generally continuous, which means that they are not differentiable # 2. Derivatives for pseudo-inverses exist typically for constant rank (Golub et al, 1973) # 3. This method creates two orthogonal matrices, and a constructs a test case with large # singular values (given by x to the function). # 4. This will ensure that small perturbations don't affect the rank of matrix, in which case # a derivative exists. # 5. This test exists since pinverse is implemented using SVD, and is hence a backpropable method m, n = 5, 10 U = torch.randn(n, m).qr()[0].t() # Orthogonal with dimensions m x n V = torch.randn(n, m).qr()[0].t() # Orthogonal with dimensions m x n def func(x): S = torch.cat([x, torch.zeros(n - m)], 0) M = U.mm(torch.diag(S)).mm(V.t()) return M.pinverse() gradcheck(func, [torch.rand(m).add_(1).requires_grad_()]) gradcheck(func, [torch.rand(m).add_(10).requires_grad_()]) gradgradcheck(func, [torch.rand(m).add_(1).requires_grad_()]) gradgradcheck(func, [torch.rand(m).add_(10).requires_grad_()]) def test_chain_matmul(self): def gen_matrices(p): matrices = [] for (pi, pi_1) in zip(p[:-1], p[1:]): matrices.append(torch.randn(pi, pi_1).requires_grad_()) return matrices gradcheck(torch.chain_matmul, gen_matrices([5, 10, 15, 5])) gradcheck(torch.chain_matmul, gen_matrices([3, 5, 2, 6])) gradcheck(torch.chain_matmul, gen_matrices([6, 2, 4, 8, 10])) gradgradcheck(torch.chain_matmul, gen_matrices([5, 10, 15, 5])) gradgradcheck(torch.chain_matmul, gen_matrices([3, 5, 2, 6])) gradgradcheck(torch.chain_matmul, gen_matrices([6, 2, 4, 8, 10])) @unittest.skipIf(IS_WINDOWS, """File open permission error on Windows, https://github.com/pytorch/pytorch/issues/34086""") def test_profiler_tracing(self): t1, t2 = torch.ones(1), torch.ones(1) with torch.autograd.profiler.profile() as prof: torch.add(t1, t2) with tempfile.NamedTemporaryFile(mode="w+") as f: prof.export_chrome_trace(f.name) # read the trace and expect valid json # if the JSON generated by export_chrome_trace is not valid, this will throw and fail the test. json.load(f) # Same test but for cuda. if not torch.cuda.is_available(): return device = torch.device("cuda:0") t1, t2 = torch.ones(1, device=device), torch.ones(1, device=device) with torch.autograd.profiler.profile(use_cuda=True) as prof: torch.add(t1, t2) with tempfile.NamedTemporaryFile(mode="w+") as f: prof.export_chrome_trace(f.name) # Now validate the json json.load(f) def test_profiler(self): x = torch.randn(10, 10) with profile() as p: self.assertTrue(torch.autograd._profiler_enabled()) y = x * 2 + 4 self.assertFalse(torch.autograd._profiler_enabled()) last_end = 0 names = ['aten::mul', 'aten::to', 'aten::empty_strided', 'aten::copy_', 'aten::empty', 'aten::add', 'aten::to', 'aten::empty_strided', 'aten::copy_', 'aten::empty'] top_level_names = ['aten::mul', 'aten::add'] top_level_iter = iter(top_level_names) self.assertEqual(len(p.function_events), len(names)) for info, expected_name in zip(p.function_events, names): if info.cpu_interval.start > last_end: top_level_name_expected = next(top_level_iter) self.assertEqual(info.name, top_level_name_expected) last_end = info.cpu_interval.end self.assertEqual(info.name, expected_name) def test_profiler_seq_nr(self): with profile() as p: x = torch.randn(10, 10, requires_grad=True) y = torch.randn(10, 10, requires_grad=True) z = x + y s = z.sum() s.backward() # expecting aten::add, aten::sum to have the sequence numbers, # expecting the corresponding backward nodes to have the same numbers # as the forward ops add_seq_nr = -1 sum_seq_nr = -1 found_add = found_sum = False found_bwd_add = found_bwd_sum = False found_empty = False for e in p.function_events: if e.name == "aten::add": add_seq_nr = e.sequence_nr self.assertFalse(found_add) found_add = True elif e.name == "aten::sum": sum_seq_nr = e.sequence_nr self.assertFalse(found_sum) found_sum = True elif "Add" in e.name and "Backward" in e.name: self.assertEqual(e.sequence_nr, add_seq_nr) self.assertFalse(found_bwd_add) found_bwd_add = True elif "Sum" in e.name and "Backward" in e.name: self.assertEqual(e.sequence_nr, sum_seq_nr) self.assertFalse(found_bwd_sum) found_bwd_sum = True # check that nested ops (e.g. empty) don't have # sequence number if e.name == "aten::empty": self.assertEqual(e.sequence_nr, -1) found_empty = True self.assertGreaterEqual(add_seq_nr, 0) self.assertGreaterEqual(sum_seq_nr, 0) self.assertNotEqual(add_seq_nr, sum_seq_nr) self.assertTrue(found_add) self.assertTrue(found_sum) self.assertTrue(found_bwd_add) self.assertTrue(found_bwd_sum) self.assertTrue(found_empty) def test_profiler_unboxed_only(self): x = torch.rand(3, 4) with torch.autograd.profiler.profile() as prof: x.resize_([3, 2]) @skipIfRocm def test_profiler_custom_op(self): inst = torch.classes._TorchScriptTesting._PickleTester([3, 4]) with torch.autograd.profiler.profile() as prof: torch.ops._TorchScriptTesting.take_an_instance(inst) found_event = False for e in prof.function_events: if e.name == '_TorchScriptTesting::take_an_instance': found_event = True self.assertTrue(found_event) def test_profiler_propagation(self): def foo(x): with record_function("in_foo") as rf: return x * 2 x = torch.rand(3, 4) traced_foo = torch.jit.trace(foo, x) def bar(x): with record_function("in_bar") as rf: # we expect that profiler will be able # propagate across fork fut = torch.jit._fork(traced_foo, x) y = torch.jit._wait(fut) # note: continuation (and rf's end) can # be executed in a different thread with record_function("in_bar_after_wait") as rf2: y = y * 2 return y traced_bar = torch.jit.trace(bar, x) with profile() as p: traced_bar(x) found_foo = False found_bar = False found_bar_after_wait = False for info in p.function_events: if info.name == "in_foo": self.assertFalse(found_foo) found_foo = True elif info.name == "in_bar": self.assertFalse(found_bar) found_bar = True elif info.name == "in_bar_after_wait": self.assertFalse(found_bar_after_wait) found_bar_after_wait = True self.assertTrue(found_foo) self.assertTrue(found_bar) self.assertTrue(found_bar_after_wait) def test_record_function_callbacks(self): x = torch.randn(10, 10) with profile() as p: with record_function("foo"): y = x * 2 + 4 function_events = p.function_events foo_event = [event for event in function_events if "foo" in event.name][0] self.assertEqual(foo_event.count, 1) def test_profiler_aggregation_fake(self): events = EventList() id = [0] def get_id(): id[0] = id[0] + 1 return id[0] # [[thread_id, [(start, end, id), ....]], ...] # Using list instead of a dict so order is guaranteed for any Python # version threads = [ [1, [(0, 1, get_id()), (1, 2, get_id())]], [0, [(0, 2, get_id()), (1, 2, get_id()), (1, 3, get_id())]], ] for thread, ranges in threads: for range in ranges: assert(len(range) == 3) events.append( FunctionEvent( id=range[2], node_id=0, name="", thread=thread, cpu_start=range[0], cpu_end=range[1], ) ) events.populate_cpu_children() # Note that [1, 3] pushes out [0, 2] first. Then we record [1, 2] # as a child of [1, 3] res = [[], [], [], [], [4]] def get_children_ids(event): return [child.id for child in event.cpu_children] assert([get_children_ids(event) for event in events] == res) def test_profiler_aggregation_table(self): """ Test if the profiling result is aggregated for `str(prof)` See: https://github.com/pytorch/pytorch/issues/37500 """ x = torch.randn(1024) with torch.autograd.profiler.profile() as prof: torch.einsum("i->", x) prof_str = str(prof) prof_table = prof.table() self.assertEqual(prof_table, prof_str) def test_profiler_function_event_avg(self): avg = FunctionEventAvg() avg.add(FunctionEvent(id=0, node_id=0, name="foo", thread=0, cpu_start=10, cpu_end=15)) avg.add(FunctionEvent(id=1, node_id=0, name="foo", thread=0, cpu_start=20, cpu_end=30)) avg.add(avg) self.assertEqual(avg.key, "foo") # aggregate stats self.assertEqual(avg.count, 4) self.assertEqual(avg.cpu_time_total, 30) self.assertEqual(avg.self_cpu_time_total, 30) self.assertEqual(avg.cuda_time_total, 0) # average stats self.assertEqual(avg.cpu_time, 7.5) self.assertEqual(avg.cuda_time_total, 0) def test_profiler_shapes(self): print("") layer1 = torch.nn.Linear(20, 30) layer2 = torch.nn.Linear(30, 40) input = torch.randn(128, 20) with profile(record_shapes=True) as prof: layer2(layer1(input)) print(prof.function_events) top_level_expected_events_and_shapes = [ (None, [[30, 20]]), ('aten::addmm', [[30], [128, 20], [20, 30], [], []]), (None, [[40, 30]]), ('aten::addmm', [[40], [128, 30], [30, 40], [], []]) ] expected_iter = iter(top_level_expected_events_and_shapes) last_end = 0 for event in prof.function_events: if event.cpu_interval.start > last_end: name_expected, input_shape_expected = next(expected_iter) if name_expected is not None: self.assertEqual(event.name, name_expected) self.assertEqual(event.input_shapes, input_shape_expected) last_end = event.cpu_interval.end def test_profiler_no_cuda(self): print("") layer = torch.nn.Linear(20, 30) x = torch.randn(128, 20) with profile(use_cuda=False) as prof: layer(x) prof_str = str(prof) print(prof_str) self.assertTrue('cpu' in prof_str.lower()) self.assertTrue('cuda' not in prof_str.lower()) def test_profiler_aggregation_lstm(self): print("") rnn = torch.nn.LSTM(10, 20, 2) total_time_s = 0 with profile(record_shapes=True) as prof: for i in range(20): input = torch.randn(5, 3, 10) h = torch.randn(2, 3, 20) c = torch.randn(2, 3, 20) start = time.time() rnn(input, (h, c)) end = time.time() total_time_s += end - start print(prof.table( sort_by="self_cpu_time_total", row_limit=10, header="TEST")) print(prof.key_averages(group_by_input_shape=True).table( sort_by="self_cpu_time_total", row_limit=10)) print(prof.table( sort_by="self_cpu_time_total", row_limit=10, header="TEST", top_level_events_only=True)) print(prof.key_averages(group_by_input_shape=True).table( sort_by="self_cpu_time_total", row_limit=10, top_level_events_only=True)) total_time_us = total_time_s * 1000.0 * 1000.0 # make it us which is profiler default print( "Total time based on python measurements: ", format_time(total_time_us) ) print( "CPU time measurement python side overhead: {:.2f}%".format( (total_time_us / prof.self_cpu_time_total - 1.0) * 100.0 ) ) if sys.platform != "win32": with tempfile.NamedTemporaryFile() as trace_file: prof.export_chrome_trace(trace_file.name) def test_memory_profiler(self): def run_profiler(tensor_creation_fn, metric): # collecting allocs / deallocs with profile(profile_memory=True, record_shapes=True) as prof: x = None with record_function("test_user_scope_alloc"): x = tensor_creation_fn() with record_function("test_user_scope_dealloc"): del x stats = prof.key_averages(group_by_input_shape=True) print(stats.table(sort_by=metric)) return stats def check_metrics(stats, metric, allocs=None, deallocs=None): stat_metrics = {} for stat in stats: stat_metrics[stat.key] = getattr(stat, metric) if allocs is not None: for alloc_fn in allocs: self.assertTrue(alloc_fn in stat_metrics) self.assertTrue(stat_metrics[alloc_fn] > 0) if deallocs is not None: for dealloc_fn in deallocs: self.assertTrue(dealloc_fn in stat_metrics) self.assertTrue(stat_metrics[dealloc_fn] < 0) def create_cpu_tensor(): return torch.rand(10, 10) def create_cuda_tensor(): return torch.rand(10, 10).cuda() def create_mkldnn_tensor(): return torch.rand(10, 10, dtype=torch.float32).to_mkldnn() print("Running CPU test") stats = run_profiler(create_cpu_tensor, "cpu_memory_usage") check_metrics( stats, "cpu_memory_usage", allocs=[ "aten::empty", "aten::rand", "test_user_scope_alloc", ], deallocs=[ "test_user_scope_dealloc", ] ) if torch.cuda.is_available(): create_cuda_tensor() print("Running CUDA test") stats = run_profiler(create_cuda_tensor, "cuda_memory_usage") check_metrics( stats, "cuda_memory_usage", allocs=[ "test_user_scope_alloc", "aten::to", "aten::empty_strided", ], deallocs=[ "test_user_scope_dealloc", ] ) check_metrics( stats, "cpu_memory_usage", allocs=[ "aten::rand", "aten::empty", ] ) if torch._C.has_mkldnn: create_mkldnn_tensor() print("Running MKLDNN test") stats = run_profiler(create_mkldnn_tensor, "cpu_memory_usage") check_metrics( stats, "cpu_memory_usage", allocs=[ "test_user_scope_alloc", "aten::rand", "aten::empty", "aten::to_mkldnn", ], deallocs=[ "test_user_scope_dealloc", ] ) # check partial overlap of tensor allocation with memory profiler x = torch.rand(10, 10) with profile(profile_memory=True, record_shapes=True) as prof: del x x = torch.rand(10, 10) del x stats = prof.key_averages(group_by_input_shape=True) check_metrics( stats, "cpu_memory_usage", allocs=[ "aten::rand", "aten::empty", ] ) def test_record_function(self): x = torch.randn(10, 10) def forward(x): with record_function("outer"): y = x * 2 + 4 with record_function("inner"): y = y - 1 y = y / 1 forward(x) with profile() as p: forward(x) events = p.function_events important_events = [ 'outer', 'aten::mul', 'aten::add', 'inner', 'aten::sub', 'aten::div' ] idx = 0 for info in events: if info.name == important_events[idx]: idx = idx + 1 if idx == len(important_events): break self.assertEqual(idx, len(important_events)) # We can also use record_function to decorate arbitrary function @record_function('my_func') def f(x, y): return x + y with profile() as p: f(1, 2) self.assertTrue('my_func' in str(p)) def test_record_function_multithreaded(self): rf = record_function("outer") rf.__enter__() with record_function("inner"): # test that exiting the record function after starting another one # doesn't throw. rf.__exit__() with record_function("inner"): rf.__enter__() # test that exiting the record function after ending another one # doesn't throw. rf.__exit__() def test_dir(self): x = torch.randn(10, 10) keys = dir(x) self.assertIn('shape', keys) # real and imag are only implemented for complex tensors. y = torch.randn(10, 10, dtype=torch.cfloat) for key in ['real', 'imag']: self.assertRaises(RuntimeError, lambda: hasattr(x, key)) self.assertTrue(hasattr(y, key)) keys.remove(key) for key in keys: self.assertTrue(hasattr(x, key)) def test_as_strided(self): def test(x, prepro_fn, size, strides, offset=None): x = x.to(torch.double).detach().requires_grad_() # Check that forward will **not** resize storage because it may # cause NaN in output and fail numerical Jacobian check consequently with torch.no_grad(): y = prepro_fn(x) if prepro_fn is not None else x max_offset = sum((si - 1) * st for si, st in zip(size, strides)) max_offset += offset if offset is not None else y.storage_offset() assert max_offset < len(y.storage()), "test case resizes storage" def closure(x): if prepro_fn is not None: x = prepro_fn(x) return x.as_strided(size, strides, offset) gradcheck(closure, [x]) gradgradcheck(closure, [x]) # test test(torch.arange(0, 25), lambda x: x.view(5, 5), [3, 3], [6, 2], 2) # test crazy stride at dim with size 1 case test(torch.randn(12), None, [1, 2, 1, 5], [0, 5, 100, 1], 2) # test expand case test(torch.randn(5), None, [3, 3, 3], [0, 1, 0], 2) test(torch.randn(5), None, [3, 3, 3], [0, 0, 0], 4) test(torch.randn(5), lambda x: x.expand(5, 5), [5, 5], [0, 1], 0) # test non-expand overlapping case test(torch.randn(35), None, [6, 6], [5, 1], 2) test(torch.randn(15), None, [3, 2], [3, 6], 2) # test transpose case test(torch.randn(3, 4), None, [4, 3], [1, 4]) # test "getting things outside the input" case x = torch.randn(6, 2) test(x[3:], None, [3, 2], [2, 1], 0) # should be all zeros self.assertEqual(x[3:].as_strided([3, 2], [2, 1], 0), x[:3]) # test select on expanded input case test(torch.randn(2, 3), lambda x: x.expand(10, 2, 3), [2, 3], [3, 1], 0) def _test_lerp_tensor_weights(self, cast): def construct_inputs(*shapes): start = cast(torch.randn(shapes[0])).requires_grad_() end = cast(torch.randn(shapes[1])).requires_grad_() weight = cast(torch.randn(shapes[2])).requires_grad_() return [start, end, weight] all_test_shapes = [((3, 3, 3), (3, 3, 3), (3, 3, 3)), # no broadcasting ((3,), (3, 3, 3), (3, 3, 3)), # start broadcasting - 1 ((3, 3, 3), (3,), (3, 3, 3)), # end broadcasting - 1 ((3, 3, 3), (3, 3, 3), (3,)), # weight broadcasting - 1 ((), (3, 3, 3), (3, 3, 3)), # start broadcasting - 2 ((3, 3, 3), (), (3, 3, 3)), # end broadcasting - 2 ((3, 3, 3), (3, 3, 3), ()), # weight broadcasting - 2 ((3, 3), (3, 3, 3), (3,))] # all broadcasting for shapes in all_test_shapes: cur_inputs = construct_inputs(*shapes) gradcheck(torch.lerp, cur_inputs) gradgradcheck(torch.lerp, cur_inputs) def test_lerp_tensor_weights(self): self._test_lerp_tensor_weights(lambda t: t) def test_reduce_dtype(self): def test_reduction(op, has_no_dim, takes_dtype=True): x = torch.randn(3, 3, dtype=torch.float, requires_grad=True) if has_no_dim: grad1, = torch.autograd.grad([op(x)], [x]) grad2, = torch.autograd.grad([op(x, dtype=torch.double)], [x]) self.assertEqual(grad1, grad2) self.assertEqual(grad2.dtype, torch.float) gi = torch.randn(op(x, dim=0).shape, dtype=torch.float) grad1, = torch.autograd.grad([op(x, dim=0)], [x], gi) if takes_dtype: grad2, = torch.autograd.grad([op(x, dim=0, dtype=torch.double)], [x], gi.double()) else: grad2, = torch.autograd.grad([op(x.double(), dim=0)], [x], gi.double()) self.assertEqual(grad1, grad2) self.assertEqual(grad2.dtype, torch.float) test_reduction(torch.sum, True) test_reduction(torch.prod, True) test_reduction(torch.cumsum, False) test_reduction(torch.cumprod, False) test_reduction(torch.logcumsumexp, False, takes_dtype=False) def test_inplace_view_saved_output(self): # Test an in-place operation on a view in which the in-place op saves # its output. Previously, this created a reference cycle. dealloc = [0] class IncrementOnDelete(object): def __del__(self): dealloc[0] += 1 def test(): root = torch.randn(3, 3, requires_grad=True) copy = root.clone() copy.grad_fn.register_hook(IncrementOnDelete()) view = copy.view(9) torch.nn.functional.relu(view, inplace=True) test() self.assertEqual(dealloc[0], 1) def test_inplace_view_backward(self): # Issue #10532: Make sure that this does not raise RuntimeError. net = nn.Sequential( nn.InstanceNorm2d(2), nn.ReLU(True) ) x = torch.tensor([[[[1.0, 1.0]]]], requires_grad=True) g, = torch.autograd.grad(net(x).pow(2), [x], grad_outputs=x.new_ones(x.shape) , create_graph=True) torch.autograd.grad(g.sum(), [x]) self.assertEqual(x, torch.tensor([[[[1.0, 1.0]]]])) # https://discuss.pytorch.org/t/freeing-buffer-strange-behavior/31955/8 inputs = torch.ones((1, 3, 256, 256), requires_grad=True) tmp1 = (inputs + 1).view_as(inputs) tmp2 = torch.nn.functional.threshold(tmp1, 0., 0., True) prob_interpolated = torch.sigmoid(tmp2) gradients = torch.autograd.grad(outputs=prob_interpolated, inputs=inputs, grad_outputs=torch.ones(prob_interpolated.size()), create_graph=True, retain_graph=True)[0] gradient_penalty = gradients.sum() gradient_penalty.backward() fn = gradient_penalty.grad_fn.next_functions[0][0].next_functions[1][0] self.assertEqual(fn.name(), "ThresholdBackwardBackward") def test_inplace_view_weak_grad_fn(self): # Issue 23502: Test that b's grad_fn is preserved. a = torch.arange(10.0, requires_grad=True) b = a.narrow(0, 0, 2).clone().view(-1) b.relu_() c = b.clone() del b gc.collect() s = c.sum() s.backward() self.assertEqual(s, torch.tensor(1.0)) # Issue 23502: Ensure RuntimeError for modification of SavedVariable. a = torch.rand(10, requires_grad=True).narrow(0, 0, 10) b = a.relu_() c = b.add_(100) del b with self.assertRaises(RuntimeError): c.sum().backward(torch.ones(1, requires_grad=True)) def test_mul_out(self): a = torch.randn(2, 2, requires_grad=True) b = torch.randn(2, 2, requires_grad=True) x = torch.zeros_like(a) # out=... functions don't support automatic differentiation currently self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x)) # the inputs can require grad if we're in no_grad() mode with torch.no_grad(): torch.mul(a, b, out=x) self.assertEqual(x, a * b) def test_mul_out_result_requires_grad(self): a = torch.randn(2, 2) b = torch.randn(2, 2) x = torch.zeros(2, 2, requires_grad=True) # we should throw an exception if the output requires grad self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x)) def test_diagonal_derivative_requires_grad(self): # test that the backward requires grad # we do this is because diagonal_backward uses inplace # operations and gradgradcheck does not catch whether # they works as expected (it will succeed even if # the gradient has requires_grad == False a = torch.randn(5, 6, requires_grad=True) b = torch.diagonal(a)**2 c = b.sum() d, = torch.autograd.grad(c, a, retain_graph=True, create_graph=True) self.assertTrue(d.requires_grad) def test_anomaly_detect_nan(self): size = 10 class MyFunc(Function): @staticmethod def forward(ctx, inp1, inp2, fail_0th): ctx.fail_0th = fail_0th return inp1.sum(0, keepdim=True) @staticmethod def backward(ctx, gO): gI = gO.clone().expand(size) gI[0] = 0 gI[0] /= 0 # Generate a nan if ctx.fail_0th: return gI, None, None else: return None, gI, None inp = torch.rand(size, requires_grad=True) out = MyFunc.apply(inp, inp, True) out.backward() # Should not fail inp = torch.rand(size, requires_grad=True) out = MyFunc.apply(inp, inp, True) with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 0th output."): with warnings.catch_warnings(record=True) as w: with detect_anomaly(): out.backward() self.assertIn('No forward pass information', str(w[0].message)) inp = torch.rand(size, requires_grad=True) with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 1th output."): with warnings.catch_warnings(record=True) as w: with detect_anomaly(): out = MyFunc.apply(inp, inp, False) out.backward() self.assertIn('MyFunc.apply', str(w[0].message)) def test_nested_anomaly_detect_nan(self): size = 10 class MyFunc(Function): @staticmethod def forward(ctx, inp1, fail_0th): ctx.fail_0th = fail_0th ctx.save_for_backward(inp1) return inp1.sum(0, keepdim=True) @staticmethod def backward(ctx, gO): inp, = ctx.saved_tensors fail_0th = ctx.fail_0th g = gO.clone().expand(size) gI = MyFunc2.apply(g * inp, g + inp, fail_0th) return gI, None class MyFunc2(Function): @staticmethod def forward(ctx, inp1, inp2, fail_0th): ctx.fail_0th = fail_0th return inp1 * 2.0 + inp2 @staticmethod def backward(ctx, gO): fail_0th = ctx.fail_0th g1 = gO.clone() g2 = gO.clone() g1[0] = 0 g2[0] = 0 # generate a nan if fail_0th: g1[0] /= 0 else: g2[0] /= 0 return g1, g2, None inp = torch.rand(size, requires_grad=True) out = MyFunc.apply(inp, True) ginp, = torch.autograd.grad(out, (inp,), create_graph=True) gsum = ginp.sum() gsum.backward() # should not fail inp = torch.rand(size, requires_grad=True) out = MyFunc.apply(inp, True) ginp, = torch.autograd.grad(out, (inp,), create_graph=True) gsum = ginp.sum() with warnings.catch_warnings(record=True) as w: with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 0th output."): with detect_anomaly(): gsum.backward() self.assertIn('No forward pass information', str(w[1].message)) inp = torch.rand(size, requires_grad=True) with warnings.catch_warnings(record=True) as w: with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 1th output."): with detect_anomaly(): out = MyFunc.apply(inp, False) ginp, = torch.autograd.grad(out, (inp,), create_graph=True) gsum = ginp.sum() gsum.backward() self.assertIn('MyFunc2.apply', str(w[1].message)) self.assertIn('MyFunc.apply', str(w[2].message)) def test_anomaly_grad_warnings(self): # PyTorch won't throw warnings if there is an error # but we'd want to at least see them in stderr class StdErrDiverter: def __enter__(self): self.stderr_orig = sys.stderr self.stderr_new = io.StringIO() sys.stderr = self.stderr_new return self def __exit__(self, *args): self.captured = self.stderr_new.getvalue() sys.stderr = self.stderr_orig # if the warnings don't throw, they will be handled as regular warnings with self.assertRaisesRegex(RuntimeError, "one of the variables needed for gradient computation has been " "modified by an inplace operation"): with warnings.catch_warnings(record=True) as w: with detect_anomaly(): a = torch.randn(5, requires_grad=True) d1 = a + 1 d2 = d1 ** 2 d1 += 1 torch.autograd.grad(d2.sum(), a) self.assertEqual(len(w), 2) self.assertIn('Anomaly Detection has been enabled', str(w[0].message)) self.assertIn('Error detected in PowBackward0', str(w[1].message)) # if the warning throws, it will be printed to sys.stderr with self.assertRaisesRegex(RuntimeError, "one of the variables needed for gradient computation has been " "modified by an inplace operation"): with warnings.catch_warnings(record=True) as w: with detect_anomaly(): warnings.simplefilter("error") with StdErrDiverter() as s: a = torch.randn(5, requires_grad=True) d1 = a + 1 d2 = d1 ** 2 d1 += 1 torch.autograd.grad(d2.sum(), a) self.assertEqual(len(w), 1) self.assertIn('Anomaly Detection has been enabled', str(w[0].message)) self.assertIn('Error detected in PowBackward0', s.captured) @skipIfNoLapack def test_eig_no_eigenvectors(self): A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True) w, v = torch.eig(A, eigenvectors=False) with self.assertRaisesRegex(RuntimeError, 'cannot compute backward'): torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)]) @skipIfNoLapack def test_eig_complex_eigenvalues(self): A = torch.tensor([[0., -1.], [1., 0.]], dtype=torch.float32, requires_grad=True) w, v = torch.eig(A, eigenvectors=True) with self.assertRaisesRegex(RuntimeError, 'does not support complex eigenvalues'): torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)]) @skipIfNoLapack def test_symeig_no_eigenvectors(self): A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True) w, v = torch.symeig(A, eigenvectors=False) with self.assertRaisesRegex(RuntimeError, 'cannot compute backward'): torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)]) @skipIfNoLapack def test_svd_no_singularvectors(self): A = torch.randn(2, 2, dtype=torch.float32, requires_grad=True) u, s, v = torch.svd(A, compute_uv=False) with self.assertRaisesRegex(RuntimeError, 'cannot compute backward'): torch.autograd.backward([u, s, v], [torch.ones_like(u), torch.ones_like(s), torch.ones_like(v)]) def test_no_grad_copy(self): # create autograd function that saves grad pointer as class static class MyFunc(Function): static_grad_ptr = None @staticmethod def forward(ctx, inp1, inp2): return inp1 + inp2 @staticmethod def backward(ctx, grad): MyFunc.static_grad_ptr = grad.data_ptr() return grad, grad class NonContGradFunc(Function): @staticmethod def forward(ctx, inp1): ctx.size = inp1.size() return torch.tensor([1.]) @staticmethod def backward(ctx, grad): return torch.ones(1).expand(ctx.size) a = torch.randn(5, 6, requires_grad=True) b = torch.randn(5, 6, requires_grad=True) # non-contiguous grad should be copied NonContGradFunc.apply(MyFunc.apply(a, b)).backward() self.assertFalse(a.grad.data_ptr() == MyFunc.static_grad_ptr) self.assertFalse(b.grad.data_ptr() == MyFunc.static_grad_ptr) # test case that should trigger no copy for one of a,b a.grad = b.grad = None MyFunc.apply(a, b)[1][0].backward() p_g = MyFunc.static_grad_ptr p_a = a.grad.data_ptr() p_b = b.grad.data_ptr() # check a,b uses different grad buffer self.assertFalse(p_a == p_b) # check one of them is using the computed buffer self.assertTrue(p_a == p_g or p_b == p_g) def test_no_grad_copy_sparse(self): # create autograd function that saves grad pointer as class static class MyFunc(Function): static_grad_ptr = None @staticmethod def forward(ctx, inp1, inp2): return inp1 + inp2 @staticmethod def backward(ctx, grad): MyFunc.static_grad_ptr = grad._values().data_ptr() return grad, grad class NonContGradFunc(Function): static_grad_ptr = None @staticmethod def forward(ctx, inp1, inp2): return inp1 + inp2 @staticmethod def backward(ctx, grad): # Create a sparse tensor with non-contigous indices and values # and return as grad. v = torch.rand(1, 3) i = torch.ones(1, 1, dtype=torch.long) nv = v.expand(8, 3) ni = i.expand(1, 8) ngrad = torch.sparse.FloatTensor(ni, nv, torch.Size([10, 3])) NonContGradFunc.static_grad_ptr = ngrad._values().data_ptr() return ngrad, ngrad a = torch.randn(10, 3, requires_grad=True) b = torch.randn(10, 3, requires_grad=True) input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9]) offsets = torch.tensor([0, 4]) import torch.nn.functional as F # test case that should trigger no copy for one of a,b emb_matrix = MyFunc.apply(a, b) loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum() loss.backward(retain_graph=True) p_g = MyFunc.static_grad_ptr p_a = a.grad._values().data_ptr() p_b = b.grad._values().data_ptr() # check a,b uses different grad buffer self.assertFalse(p_a == p_b) # check one of them is using the computed buffer self.assertTrue(p_a == p_g or p_b == p_g) # Run backwards multiple times to ensure accumulation works. for i in range(10): loss.backward(retain_graph=True) # non-contiguous indices and value, we should trigger a copy. a.grad = b.grad = None emb_matrix = NonContGradFunc.apply(a, b) loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum() loss.backward(retain_graph=True) p_g = NonContGradFunc.static_grad_ptr p_a = a.grad._values().data_ptr() p_b = b.grad._values().data_ptr() # check a,b uses different grad buffer self.assertFalse(p_a == p_b) # Verify we cloned both grads. self.assertFalse(p_a == p_g) self.assertFalse(p_b == p_g) # Run backwards multiple times to ensure accumulation works. for i in range(10): loss.backward(retain_graph=True) def test_gradcheck_single_input(self): def f(inp): return inp.mul(5) gradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True)) gradgradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True)) def test_gradcheck_sparse_input(self): def fn(sparse): return torch.sparse.sum(sparse) gradcheck(fn, torch.rand(10).to_sparse().requires_grad_(True), check_sparse_nnz=True) with self.assertRaisesRegex(RuntimeError, 'gradcheck expects all tensor inputs are dense'): gradcheck(fn, torch.rand(10).to_sparse().requires_grad_(True), check_sparse_nnz=False) def test_gradcheck_nondeterministic(self): class NonDetFunc(Function): @staticmethod def forward(ctx, x, jitter=0.0): ctx._jitter = jitter return x @staticmethod def backward(ctx, grad_out): return NonDetFunc.apply(grad_out, ctx._jitter) * (1 + torch.rand_like(grad_out) * ctx._jitter), None inp = torch.randn(5, 5, requires_grad=True) gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp) with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'): gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp) with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'): gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp) gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp, nondet_tol=1e-5) gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp, nondet_tol=1e-5) gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp, nondet_tol=1e-5) def test_version_counter(self): x = torch.randn(1, 2) # In-place op bumps version x_saved_version = x._version x.add_(1).add_(1) self.assertTrue(x._version > x_saved_version) # Differentiable view shares version counter xz = x[:] self.assertTrue(x._version == xz._version) xz.add_(1) self.assertTrue(x._version == xz._version) # `x.data = y` preserves version counter of `x` x_saved_version = x._version x.data = torch.randn(2, 3) self.assertTrue(x._version == x_saved_version) x.add_(1) self.assertTrue(x._version > x_saved_version) # Make sure `x` is still using the same version counter it shares with `xz` self.assertTrue(x._version == xz._version) # In-place op on `xz` also updates version of `x`, # because they share the version counter xz.add_(1) self.assertTrue(x._version == xz._version) def test_set_data_tensorimpl_type(self): # Dense tensor has impl of type `TensorImpl`, while sparse tensor has impl # of type `SparseTensorImpl`. x = torch.randn(1, 2) x_s = torch.sparse_coo_tensor(torch.zeros([1, 1]), torch.ones([1])) with self.assertRaisesRegex(RuntimeError, 'incompatible tensor type'): x.data = x_s def test_set_data_preserve_pyobj(self): a = torch.randn(1, 2) b = torch.randn(1, 2) b_id_saved = id(b) b.data = a self.assertTrue(b_id_saved == id(b)) @unittest.skipIf(IS_WINDOWS, "Skipping because doesn't work for windows") def test_thread_shutdown(self): code = """import torch from torch.autograd import Function class MyFunction(Function): @staticmethod def forward(ctx, x): return x @staticmethod def backward(ctx, grad): return grad for shape in [(1,), ()]: v = torch.ones(shape, requires_grad=True) MyFunction.apply(v).backward() """ s = TestCase.runWithPytorchAPIUsageStderr(code) self.assertRegex(s, "PYTORCH_API_USAGE torch.autograd.thread_shutdown") @unittest.skipIf(IS_MACOS, "Fails with SIGBUS on macOS; https://github.com/pytorch/pytorch/issues/25941") def test_deep_reentrant(self): class DeepReentrant(Function): @staticmethod def forward(ctx, x): with torch.enable_grad(): ctx.x = Variable(x.detach(), requires_grad=True) ctx.x = ctx.x - 1 return ctx.x.detach() @staticmethod def backward(ctx, x): if ctx.x < 0: return x with torch.enable_grad(): DeepReentrant.apply(ctx.x).sum().backward() return x # Test stack overflow escape mechanism v = torch.tensor(2000.0, requires_grad=True) # This will cause stack overflow if reentrant calls are handled # in the same thread recursively DeepReentrant.apply(v).sum().backward() # Test stack overflow escape mechanism multiple times # to ensure reusing workers in the pool works fine v2 = torch.tensor(200.0, requires_grad=True) DeepReentrant.apply(v2).sum().backward() def test_reentrant_priority(self): order = [] class MyFunction(Function): @staticmethod def forward(ctx, x): return x @staticmethod def backward(ctx, x): order.append("MyFunction") return x class Reentrant(Function): @staticmethod def forward(ctx, x): with torch.enable_grad(): ctx.x = Variable(x.detach(), requires_grad=True) ctx.x = ctx.x - 1 return ctx.x.detach() @staticmethod def backward(ctx, x): order.append("Reentrant") if ctx.x < 0: return x with torch.enable_grad(): Reentrant.apply(ctx.x).backward() return x a = MyFunction.apply(torch.tensor(6.0, requires_grad=True)) b = Reentrant.apply(torch.tensor(9.0, requires_grad=True)) v = a * b v.backward() # The tasks for the Reentrant and MyFunction backward() will be added # to the queue in the autograd engine at the same time. The backward # for Reentrant will be executed first, which will then add other # backward tasks to the queue. We want to ensure all the reentrant tasks # are prioritized over the MyFunction backward task regardless of their # sequence numbers self.assertEqual(len(order), 11) self.assertEqual(order.count("Reentrant"), 10) self.assertEqual(order[-1], "MyFunction") @slowTest def test_checkpointing(self): num_inp = 2000 nz_inp = 10 nz_out = 10 nz_bottleneck = 1000 # small proxy network for some complex reasoning we want to do per input module = nn.Sequential( nn.Linear(nz_inp, nz_bottleneck), nn.ReLU(), nn.Linear(nz_bottleneck, nz_inp) ) feat_combined = [] for r in range(num_inp): data_r = torch.Tensor(1, nz_inp) data_r.uniform_() data_r.requires_grad = True feat_r = checkpoint(module, data_r) feat_combined.append(feat_r) # compute mean as a proxy for some joint reasoning mean_combined = torch.stack(feat_combined).mean() mean_combined.backward() def _test_reentrant_with_callbacks(self, install_callbacks_in_depths): counter = {} counter["inner"] = 0 counter["outer"] = 0 def inc_inner_counter(): counter["inner"] += 1 def inc_outer_counter(): counter["outer"] += 1 class MyFunc(Function): @staticmethod def forward(ctx, input): return input @staticmethod @once_differentiable def backward(ctx, input): if 1 in install_callbacks_in_depths: # Add a callback to execute. Variable._execution_engine.queue_callback(inc_inner_counter) return input class MyReentrantFunc(Function): @staticmethod def forward(ctx, input): return input @staticmethod @once_differentiable def backward(ctx, input): if 0 in install_callbacks_in_depths: # Add a callback to execute. Variable._execution_engine.queue_callback(inc_outer_counter) # Reentrant backward call. tmp_inp = input.detach().requires_grad_() with torch.enable_grad(): tmp_out = (MyFunc.apply(tmp_inp)).sum() tmp_out.backward() return input t1 = torch.rand((3, 3), requires_grad=True) t2 = MyReentrantFunc.apply(t1) t3 = t2.sum() torch.autograd.backward([t3]) return counter def test_reentrant_with_callbacks_depth_0(self): # Verify callback is called only once. ret = self._test_reentrant_with_callbacks([0]) self.assertEqual(1, ret["outer"]) self.assertEqual(0, ret["inner"]) def test_reentrant_with_callbacks_depth_1(self): # Verify callback is called only once. ret = self._test_reentrant_with_callbacks([1]) self.assertEqual(0, ret["outer"]) self.assertEqual(1, ret["inner"]) def test_reentrant_with_callbacks_both_depths(self): # Verify callback is called twice. ret = self._test_reentrant_with_callbacks([0, 1]) self.assertEqual(1, ret["outer"]) self.assertEqual(1, ret["inner"]) def test_reentrant_with_leaf_variable_hook(self): handle = None param = torch.rand(10, requires_grad=True) def add_gradient_penalty_to_grad(grad): handle.remove() old_param_grad = grad param.grad = None # Add some sort of gradient penalty by directly updating the gradients with torch.enable_grad(): g = grad.detach().requires_grad_() new_param = param.detach().requires_grad_() out = ((g * 2) + new_param).sum() out.backward() res = g.grad + grad param.grad = old_param_grad return res handle = param.register_hook(add_gradient_penalty_to_grad) # Forward pass tmp = (param * param) loss = tmp.sum() # Compute the gradients loss.backward() def test_reentrant_with_non_leaf_variable_hook(self): handle = None param = torch.rand(10, requires_grad=True) def manual_increase_gradient(grad): handle.remove() # Add some sort of gradient penalty by directly updating the gradients with torch.enable_grad(): g = grad.detach().requires_grad_() out = ((g * 2) + 5).sum() out.backward() res = g.grad + grad return res # Forward pass tmp = (param * param) handle = tmp.register_hook(manual_increase_gradient) loss = tmp.sum() # Compute the gradients loss.backward() self.assertEqual(param.grad, 6 * param) def test_autograd_views_codegen(self): # This is not necessarily the absolute correct behavior, but this is the current # one. This test is here to make sure that any change to this behavior is detected # and not silent. The TODOs below mark the places with unexpected behavior. # Note that any change in these test will be BC-breaking and should be done carefully. # This test checks the behavior of two codegen functions (view_as and unbind) # with respect to view tracking and inplace operation on the output. def run_test(grad_mode, requires_grad, is_view, should_raise_tuple): def maybe_check_raise(fn, should_raise): self.assertTrue(should_raise is None or isinstance(should_raise, str)) if should_raise is not None: with self.assertRaisesRegex(RuntimeError, should_raise): fn() else: fn() inp = torch.rand(2, requires_grad=requires_grad).clone() with torch.set_grad_enabled(grad_mode): out = inp.view_as(inp) # Are they differentiable views? self.assertTrue(out._is_view() == is_view) # Are inplace allowed? maybe_check_raise(lambda: out.add_(1), should_raise_tuple[0]) inp = torch.rand(2, requires_grad=requires_grad).clone() with torch.set_grad_enabled(grad_mode): out = inp.unbind() # Are they differentiable views? self.assertTrue(out[0]._is_view() == is_view) self.assertTrue(out[1]._is_view() == is_view) # Are inplace allowed? maybe_check_raise(lambda: out[0].add_(1), should_raise_tuple[1]) maybe_check_raise(lambda: out[1].add_(1), should_raise_tuple[2]) # should_raise contains None if it should not raise # should_raise contains a string of the error if it should raise # The 3 elements are for view_as, first output of unbind and second output of unbind run_test(grad_mode=True, requires_grad=False, is_view=True, should_raise_tuple=(None, None, None)) inp_change_err = "Output {} of UnbindBackward is a view and is being modified inplace." run_test(grad_mode=True, requires_grad=True, is_view=True, should_raise_tuple=(None, inp_change_err.format("0"), inp_change_err.format("1"))) leaf_grad_err = "A view was created in no_grad mode and is being modified inplace" run_test(grad_mode=False, requires_grad=True, is_view=True, should_raise_tuple=(leaf_grad_err, leaf_grad_err, leaf_grad_err)) run_test(grad_mode=False, requires_grad=False, is_view=True, should_raise_tuple=(None, None, None)) def _do_test_autograd_simple_views_python(self, dtype): # This is not necessarily the absolute correct behavior, but this is the current # one. This test is here to make sure that any change to this behavior is detected # and not silent. The TODOs below mark the places with unexpected behavior. # Note that any change in these test will be BC-breaking and should be done carefully. # This checks the autograd.Function behavior when we return one or multiple outputs # while one of these is an input, a view of an input or of a temporary tensor. # This indicator is used to track how many times the backward function was called bw_called = [0] # This indicator is used to check if the argument `ga` contains non-zero values ga_nz = [False] class IdOneOutput(Function): @staticmethod def forward(ctx, a, b, make_view): if make_view: a = a.narrow(0, 0, 2) else: a = a.clone() return a @staticmethod def backward(ctx, ga): bw_called[0] += 1 return ga, None, None class IdTwoOutput(Function): @staticmethod def forward(ctx, a, b, make_view): if make_view: a = a.narrow(0, 0, 2) else: a = a.clone() return a, a + b @staticmethod def backward(ctx, ga, gab): bw_called[0] += 1 if ga.eq(0).all(): ga_nz[0] = False else: ga_nz[0] = True return ga + gab, gab, None err_msg_two_outputs = "Output 0 of IdTwoOutputBackward is a view and is being modified inplace." err_msg_two_outputs += " This view is the output of a function that returns multiple views." class ViewOfTemp(Function): @staticmethod def forward(ctx, a, make_view): ctx.save_for_backward(a) if make_view: a = a.narrow(0, 0, 2) else: a = a.clone() b = a.clone() return b.select(0, 0) @staticmethod def backward(ctx, grad): bw_called[0] += 1 a, = ctx.saved_tensors res = torch.zeros_like(a) res.select(0, 0).copy_(grad) return res, None for fn_id in ["one_output", "two_output", "view_of_temp"]: for inplace in [True, False]: for make_view in [True, False]: # Used for special casing the tests below output_is_a_view = (make_view or fn_id == "view_of_temp") def fn(a, b): # never modify a, b inplace for gracheck a = a.clone() b = b.clone() if fn_id == "two_output": tmp1, tmp2 = IdTwoOutput.apply(a, b, make_view) if inplace: tmp1 += 3 tmp2 += 3 else: tmp1 = tmp1 + 3 tmp2 = tmp2 + 3 tmp = tmp1 * tmp2 else: if fn_id == "one_output": tmp = IdOneOutput.apply(a, b, make_view) else: tmp = ViewOfTemp.apply(a + b, make_view) if inplace: tmp += 3 else: tmp = tmp + 3 return tmp.sum() a = torch.ones(2, dtype=dtype, requires_grad=True) b = torch.ones(2, dtype=dtype, requires_grad=True) if fn_id == "two_output" and inplace and output_is_a_view: with self.assertRaisesRegex(RuntimeError, err_msg_two_outputs): fn(a, b) else: # Are the computed gradients correct ? if inplace and output_is_a_view: with warnings.catch_warnings(record=True) as w: if fn_id == "view_of_temp": # This will be fixed after the deprecation cycle and the warning becomes # an error. with self.assertRaisesRegex(RuntimeError, "Jacobian mismatch for output 0"): gradcheck(fn, (a, b)) else: # This works but the custom backward is not called (or called with partial) # gradients as tested below gradcheck(fn, (a, b)) self.assertTrue(len(w) > 0) else: gradcheck(fn, (a, b)) # Was the custom backward called properly bw_called[0] = 0 ga_nz[0] = True # For the case where the backward is called with warnings.catch_warnings(record=True) as w: fn(a, b).backward() expected_called = 1 expected_ga_nz = True expected_warning = False if output_is_a_view and inplace: expected_called = 0 expected_warning = True self.assertTrue(bw_called[0] == expected_called) self.assertTrue(ga_nz[0] == expected_ga_nz) self.assertTrue((len(w) == 1) == expected_warning) def test_autograd_simple_views_python(self): self._do_test_autograd_simple_views_python(torch.double) self._do_test_autograd_simple_views_python(torch.cdouble) def test_autograd_complex_views_python(self): # This is not necessarily the absolute correct behavior, but this is the current # one. This test is here to make sure that any change to this behavior is detected # and not silent. The TODOs below mark the places with unexpected behavior. # Note that any change in these test will be BC-breaking and should be done carefully. # This checks that multiples views in the forward are properly traced and how they # behave with respect to inplace operations. # This indicator is used to track how many times the backward function was called bw_called = [0] class ComplexView(Function): @staticmethod def forward(ctx, a, idx): res = a.narrow(0, idx, 1) res = a.select(0, idx) ctx.save_for_backward(a) ctx.idx = idx return res @staticmethod def backward(ctx, grad): bw_called[0] += 1 a, = ctx.saved_tensors res = torch.zeros_like(a) res.select(0, ctx.idx).copy_(grad) return res, None a = torch.ones(2, requires_grad=True) idx = 1 bw_called[0] = 0 out = ComplexView.apply(a.clone(), idx) out.sum().backward() self.assertTrue(bw_called[0] == 1) out = ComplexView.apply(a.clone(), idx) with warnings.catch_warnings(record=True) as w: out += 1 self.assertEqual(len(w), 1) def test_autograd_inplace_views_python(self): # This is not necessarily the absolute correct behavior, but this is the current # one. This test is here to make sure that any change to this behavior is detected # and not silent. The TODOs below mark the places with unexpected behavior. # Note that any change in these test will be BC-breaking and should be done carefully. # This test checks custom autograd.Function that perform inplace operations bw_called = [0] # I) Single output class MyAdder(Function): @staticmethod def forward(ctx, a, b): a.add_(b) ctx.mark_dirty(a) return a @staticmethod def backward(ctx, grad): bw_called[0] += 1 return grad, grad a = torch.ones(2, requires_grad=True) b = torch.ones(2, requires_grad=True) # No extra inplace c = MyAdder.apply(a.clone(), b) c.sum().backward() self.assertTrue(bw_called[0] == 1) # With extra inplace on the output bw_called[0] = 0 c = MyAdder.apply(a.clone(), b) c += 2 c.sum().backward() self.assertTrue(bw_called[0] == 1) # The input is a view bw_called[0] = 0 c = MyAdder.apply(a.clone().view_as(a), b) c.sum().backward() self.assertTrue(bw_called[0] == 1) # Should not give non-inputs to mark_dirty class MyAdderBad(Function): @staticmethod def forward(ctx, a, b): c = 3 * a c.add_(b) ctx.mark_dirty(c) return c @staticmethod def backward(ctx, grad): bw_called[0] += 1 grad = 3 * grad return grad, grad a = torch.ones(2, requires_grad=True) b = torch.ones(2, requires_grad=True) with warnings.catch_warnings(record=True) as w: MyAdderBad.apply(a.clone(), b) self.assertEqual(len(w), 1) # II) Multiple outputs class MyBadAdder(Function): @staticmethod def forward(ctx, a, b): a.add_(b) ctx.mark_dirty(a) return a, a + b @staticmethod def backward(ctx, ga, gab): bw_called[0] += 1 return ga + gab, ga + gab # No extra inplace bw_called[0] = 0 c, d = MyBadAdder.apply(a.clone(), b) (c * d).sum().backward() self.assertTrue(bw_called[0] == 1) # With extra inplace on the output bw_called[0] = 0 c, d = MyBadAdder.apply(a.clone(), b) c += 2 (c * d).sum().backward() self.assertTrue(bw_called[0] == 1) # The input is a view inplace_on_view_err = "your Function modifies inplace an input that is a view of another Tensor" with self.assertRaisesRegex(RuntimeError, inplace_on_view_err): c, d = MyBadAdder.apply(a.clone().view_as(a), b) # III) Inplace + other op class MyOutPlaceAdder(Function): @staticmethod def forward(ctx, a, b): a.add_(b) ctx.mark_dirty(a) return a.clone(), a + b @staticmethod def backward(ctx, ga, gab): bw_called[0] += 1 return ga + gab, ga + 2 * gab # We don't reuse the input def fn(a, b): orig_a = a.clone().view_as(a) c, d = MyOutPlaceAdder.apply(orig_a, b) return (c * d).sum() bad_mark_dirty_err = "Some elements marked as dirty during the forward method were not returned as output." with self.assertRaisesRegex(RuntimeError, bad_mark_dirty_err): fn(a, b) def test_custom_function_return_view_in_nograd(self): class Alias(Function): @staticmethod def forward(ctx, x): return x[:] @staticmethod def backward(ctx, gx): return gx inp = torch.rand(2, requires_grad=True) with torch.no_grad(): output = Alias.apply(inp) with torch.no_grad(): expected_output = inp[:] # Calling the custom function should operate as if we called an equivalent op self.assertEqual(output.requires_grad, expected_output.requires_grad) # Check that in-place modification on view throws leaf_grad_err = "A view was created in no_grad mode and is being modified inplace" with self.assertRaisesRegex(RuntimeError, leaf_grad_err): output.zero_() def test_grad_mode_restored_reentrant(self): class MyFunction(Function): @staticmethod def forward(ctx, inp): return inp.clone() @staticmethod def backward(ctx, go): original = torch._C.is_grad_enabled() with torch.enable_grad(): self.assertTrue(torch._C.is_grad_enabled()) foo = torch.rand(go.size(), requires_grad=True) grad, = torch.autograd.grad( foo ** 3, foo, grad_outputs=go ) self.assertTrue(torch._C.is_grad_enabled()) self.assertTrue(torch._C.is_grad_enabled() == original) return grad inp = torch.rand(3, requires_grad=True) # Case where original==False MyFunction.apply(inp).sum().backward() # Case where original==True MyFunction.apply(inp).sum().backward(create_graph=True) def test_power_function(self): a = torch.tensor([0., 0., 0.]) b = torch.tensor([-1., 0., 1.], requires_grad=True) c = torch.sum(a**b) c.backward() self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.])) s = 0 b = torch.tensor([-1., 0., 1.], requires_grad=True) c = torch.sum(s**b) c.backward() self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.])) def test_nansum_with_nans(self): a = torch.randn(2, 2, 2, 2) with torch.no_grad(): a[a < 0.2] = float('nan') a.requires_grad = True # No args gradcheck(lambda x: x.nansum(), a) gradgradcheck(lambda x: x.nansum(), a) # Single dim gradcheck(lambda x: x.nansum((0)), a) gradgradcheck(lambda x: x.nansum((0)), a) # Multi dim gradcheck(lambda x: x.nansum((0, 2)), a) gradgradcheck(lambda x: x.nansum((0, 2)), a) gradcheck(lambda x: x.nansum((0, -1)), a) gradgradcheck(lambda x: x.nansum((0, -1)), a) # With keep-dim gradcheck(lambda x: x.nansum((0, -1), True), a) gradgradcheck(lambda x: x.nansum((0, -1), True), a) def test_nansum_dtype(self): inp = torch.randn(2, 2, 2, 2) with torch.no_grad(): inp[inp < 0.2] = float('nan') def test(inp, inp_dtype, out_dtype): with torch.no_grad(): a = inp.to(inp_dtype) a.requires_grad = True b = torch.sum(a, dtype=out_dtype) b.backward() self.assertEqual(a.dtype, a.grad.dtype) test(inp, torch.float, torch.double) test(inp, torch.double, torch.float) def test_custom_function_error(self): class BadFw(Function): @staticmethod def backward(ctx, foo): return foo class BadBw(Function): @staticmethod def forward(ctx, foo): return foo.clone() inp = torch.rand(1, requires_grad=True) with self.assertRaisesRegex(NotImplementedError, "must implement the forward"): BadFw.apply(inp) with self.assertRaisesRegex(RuntimeError, "must implement the backward"): BadBw.apply(inp).sum().backward() def test_custom_function_local_inplace(self): class MyFn(torch.autograd.Function): @staticmethod def forward(ctx, inp, inplace): view = inp.clone()[:3] if inplace: view += 2 return view @staticmethod def backward(ctx, grad): return grad, None base = torch.rand(10, requires_grad=True) foo = MyFn.apply(base, False) self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward") foo = MyFn.apply(base, True) self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward") def test_integer_outputs(self): inp = torch.rand(4, requires_grad=True) out = inp.argmax() self.assertFalse(out.dtype.is_floating_point) self.assertFalse(out.requires_grad) out = inp.argmin() self.assertFalse(out.dtype.is_floating_point) self.assertFalse(out.requires_grad) out = inp.argsort() self.assertFalse(out.dtype.is_floating_point) self.assertFalse(out.requires_grad) val = torch.rand((), requires_grad=True) out = torch.searchsorted(inp, val) self.assertFalse(out.dtype.is_floating_point) self.assertFalse(out.requires_grad) def index_variable(shape, max_indices): if not isinstance(shape, tuple): shape = (shape,) index = torch.rand(*shape).mul_(max_indices).floor_().long() return index def index_perm_variable(shape, max_indices): if not isinstance(shape, tuple): shape = (shape,) index = torch.randperm(max_indices).narrow(0, 0, reduce(mul, shape)).view(shape) return index def gather_variable(shape, index_dim, max_indices, duplicate=False): assert len(shape) == 2 assert index_dim < 2 batch_dim = 1 - index_dim index = torch.LongTensor(*shape) for i in range(shape[index_dim]): index.select(index_dim, i).copy_( torch.randperm(max_indices)[:shape[batch_dim]]) if duplicate: index.select(batch_dim, 0).copy_(index.select(batch_dim, 1)) return index def bernoulli_scalar(): return torch.tensor(0, dtype=torch.uint8).bernoulli_() def gradgradcheck_method_precision_override(test_name): # these are just empirical observations, we should improve gradgradcheck_precision_override = { 'test_norm': {'atol': 2e-2, 'rtol': 1e-2}, 'test_norm_1_5': {'atol': 1.5e-2, 'rtol': 1e-2}, 'test_norm_3': {'atol': 5e-2, 'rtol': 1e-2}, 'test_dist': {'atol': 5e-2, 'rtol': 1e-2}, 'test_dist_4': {'atol': 8e-2, 'rtol': 1e-2}, } non_broadcasted_test_name = test_name.split("_broadcast")[0] override = gradgradcheck_precision_override.get(non_broadcasted_test_name) if override: if 'broadcast_lhs' in test_name or 'broadcast_rhs' in test_name: # errors accumulated across 1 dimension override = {'atol': override['atol'] * S, 'rtol': override['atol'] * S} elif 'broadcast_all' in test_name: # errors accumulated across multiple dimensions override = {'atol': override['atol'] * S * S, 'rtol': override['atol'] * S * S} return override def run_grad_and_gradgrad_checks(test_case, name, test_name, apply_method, output_variable, input_variables, run_gradgradcheck=True): test_case.assertTrue(gradcheck(apply_method, input_variables, eps=1e-6, atol=PRECISION)) if name in EXCLUDE_GRADGRADCHECK or test_name in EXCLUDE_GRADGRADCHECK_BY_TEST_NAME: return gradgradcheck_precision_override = gradgradcheck_method_precision_override(test_name) if gradgradcheck_precision_override is not None: atol = gradgradcheck_precision_override['atol'] rtol = gradgradcheck_precision_override['rtol'] test_case.assertTrue(gradgradcheck(apply_method, input_variables, None, atol=atol, rtol=rtol, gen_non_contig_grad_outputs=True)) else: test_case.assertTrue(gradgradcheck(apply_method, input_variables, gen_non_contig_grad_outputs=True)) def run_functional_checks(test_case, test_name, name, apply_fn, run_grad_checks, f_args_variable, f_args_tensor): output_variable = apply_fn(*f_args_variable) if run_grad_checks: run_grad_and_gradgrad_checks(test_case, name, test_name, apply_fn, output_variable, f_args_variable) self_variable = f_args_variable[0] if isinstance(output_variable, torch.Tensor) and output_variable.requires_grad and self_variable is not None: output_variable.backward(randn_like(output_variable)) test_case.assertEqualTypeString(self_variable, self_variable.grad) test_case.assertEqual(self_variable.size(), self_variable.grad.size()) # this list corresponds to ops which have separate tests defined for complex dtypes in # common_methods_invocations.py # test for these ops with 'complex' in variant should only run for complex and # the tests for these ops which do not have 'complex' in variant should not run for complex # and only run for floating point separate_complex_tests = ['log', 'log10', 'log1p', 'log2', 'reciprocal', 'tan'] # NOTE: Some non-holomorphic are separately tested in TestAutogradComplex until gradcheck works properly # for non-holomorphic functions # allow list for complex complex_list = ['t', 'view', 'reshape', 'reshape_as', 'view_as', 'zero_', 'clone', 'tril', 'triu', 'fill_', 'eq_', 'ne_', 'permute', 'squeeze', 'unsqueeze', 'chunk', 'split', 'split_with_sizes', 'resize', 'resize_as', 'sin', 'cos', '__rmul__', '__rdiv__', 'sum', 'transpose', 'round', 'add', 'roll', '__radd__', 'repeat', 'expand', 'mul', 'tanh', 'flip', 'fliplr', 'flipud', 'rot90'] + separate_complex_tests def add_test( name, self_size, args, variant_name='', check_ad=(), # only used in test_jit dim_args_idx=(), skipTestIf=(), output_process_fn=lambda x: x, kwargs=None): kwargs = kwargs if kwargs else {} basic_test_name = 'test_' + name if variant_name != '': basic_test_name += '_' + variant_name if name in separate_complex_tests and 'complex' in variant_name: run_only_complex = True else: run_only_complex = False for dtype in [torch.double, torch.cdouble]: for dim_perm in product([-1, 1], repeat=len(dim_args_idx)): test_name = basic_test_name new_args = [arg * dim_perm[dim_args_idx.index(i)] if i in dim_args_idx else arg for i, arg in enumerate(args)] test_name = basic_test_name + ''.join('_neg' + str(i) for i, idx in enumerate(dim_perm) if idx < 0) if dtype.is_complex: # TODO: remove this. this is temporary while we ramp up the complex support. if name in complex_list and 'scalar' not in test_name and 'constant' not in test_name: if name in separate_complex_tests and 'complex' not in variant_name: continue if not run_only_complex: test_name = test_name + '_complex' else: continue elif run_only_complex: continue new_args = tuple(new_args) # for-loop bodies don't define scopes, so we have to save the variables # we want to close over in some way def do_test(self, device, dtype=dtype, name=name, self_size=self_size, args=new_args, test_name=test_name, output_process_fn=output_process_fn): def check(name): is_magic_method = name[:2] == '__' and name[-2:] == '__' is_inplace = name[-1] == "_" and not is_magic_method self_variable = create_input((self_size,), dtype=dtype, device=device)[0][0] # FixMe: run grad checks on inplace self if is_inplace: self_variable.requires_grad = False # need to record this because methods can change the size (e.g. unsqueeze) args_variable, kwargs_variable = create_input(args, requires_grad=not is_inplace, call_kwargs=kwargs, dtype=dtype, device=device) self_tensor = deepcopy(self_variable) args_tensor = deepcopy(unpack_variables(args_variable)) if not exclude_tensor_method(name, test_name): output_variable = getattr(self_variable, name)(*args_variable, **kwargs_variable) output_tensor = getattr(self_tensor, name)(*args_tensor, **kwargs_variable) if not isinstance(output_tensor, torch.Tensor) and not istuple(output_tensor): if dtype.is_complex: output_tensor = torch.tensor((output_tensor, ), dtype=torch.cfloat, device=device) else: output_tensor = torch.tensor((output_tensor, ), dtype=torch.float, device=device) self.assertEqual(unpack_variables(output_variable), output_tensor) # TODO: check that both have changed after adding all inplace ops def fn(*inputs): output = getattr(inputs[0], name)(*inputs[1:], **kwargs) return output_process_fn(output) if not is_inplace and name not in EXCLUDE_GRADCHECK: run_grad_and_gradgrad_checks(self, name, test_name, fn, output_variable, (self_variable,) + args_variable) # functional interface tests if hasattr(torch, name) and name not in EXCLUDE_FUNCTIONAL: def fn(*inputs): output = getattr(torch, name)(*inputs, **kwargs) return output_process_fn(output) f_args_variable = (self_variable,) + args_variable f_args_tensor = (self_tensor,) + args_tensor # could run the gradchecks again, but skip since we did it for the methods above. run_gradcheck = exclude_tensor_method(name, test_name) and not is_inplace and name not in EXCLUDE_GRADCHECK run_functional_checks(self, test_name, name, fn, run_gradcheck, f_args_variable, f_args_tensor) # check for correct type of input and input.grad if not is_inplace: self_variable = create_input((self_size,), requires_grad=True, dtype=dtype)[0][0] args_variable, kwargs_variable = create_input(args, requires_grad=False, call_kwargs=kwargs, dtype=dtype) if hasattr(self_variable, name): output_variable = getattr(self_variable, name)(*args_variable, **kwargs_variable) else: self_and_args_variable = (self_variable,) + args_variable output_variable = getattr(torch, name)(*self_and_args_variable, **kwargs_variable) if isinstance(output_variable, torch.autograd.Variable): if output_variable.is_sparse: rand = randn_like(output_variable.to_dense()).to_sparse() else: rand = randn_like(output_variable) output_variable.backward(rand) self.assertTrue(type(self_variable) == type(self_variable.grad)) self.assertTrue(self_variable.size() == self_variable.grad.size()) # compare grads to inplace grads inplace_name = name + '_' # can't broadcast inplace to left hand side skip_inplace = ('broadcast_lhs' in test_name or 'broadcast_all' in test_name or 'atanh' in test_name or 'acosh' in test_name or 'asinh' in test_name) if hasattr(torch.ones(1), inplace_name) and not skip_inplace: output_variable = getattr(self_variable, name)(*args_variable, **kwargs_variable) if not isinstance(output_variable, tuple): output_variable = (output_variable,) inplace_self_variable = deepcopy(self_variable) inplace_self_variable_copy = tuple(i.clone() if isinstance(i, torch.Tensor) else i for i in (inplace_self_variable,)) inplace_args_variable = deepcopy(args_variable) inplace_args_variable_copy = tuple(i.clone() if isinstance(i, torch.Tensor) else i for i in inplace_args_variable) inplace_output_variable = ( getattr(inplace_self_variable_copy[0], inplace_name)(*inplace_args_variable_copy, **kwargs_variable)) if not isinstance(inplace_output_variable, tuple): inplace_output_variable = (inplace_output_variable,) self.assertEqual(inplace_output_variable, output_variable) # Check that gradient is the same for inp_i, i in zip((inplace_self_variable,) + inplace_args_variable, (self_variable,) + args_variable): if not isinstance(inp_i, torch.Tensor): assert not isinstance(i, torch.Tensor) continue if inp_i.grad is not None: with torch.no_grad(): inp_i.grad.zero_() if i.grad is not None: with torch.no_grad(): i.grad.zero_() for i_o, o in zip(inplace_output_variable, output_variable): if dtype.is_complex: grad = randn_like(i_o).to(torch.cdouble) else: grad = randn_like(i_o).double() i_o.backward(grad) o.backward(grad) for inp_i, i in zip((inplace_self_variable,) + inplace_args_variable, (self_variable,) + args_variable): if not isinstance(inp_i, torch.Tensor): continue self.assertEqual(inp_i.grad, i.grad) check(name) inplace_name = name + '_' # can't broadcast inplace to left hand side broadcast_skip_inplace = 'broadcast_lhs' in test_name or 'broadcast_all' in test_name if hasattr(torch.ones(1), inplace_name) and not broadcast_skip_inplace: check(inplace_name) assert not hasattr(TestAutograd, test_name), 'Two tests have the same name: ' + test_name for skip in skipTestIf: do_test = skip(do_test) setattr(TestAutogradDeviceType, test_name, do_test) class TestAutogradComplex(TestCase): # remove this test after gradcheck support is added for non-holomorphic functions def test_real(self): x = torch.randn(3, 4, 5, dtype=torch.cdouble, requires_grad=True) x.real.sum().backward() self.assertEqual(x.grad, torch.ones_like(x)) # remove this test after gradcheck support is added for non-holomorphic functions def test_imag(self): x = torch.randn(3, 4, 5, dtype=torch.cdouble, requires_grad=True) x.imag.sum().backward() self.assertEqual(x.grad, -1j * torch.ones_like(x)) # remove this test after gradcheck support is added for non-holomorphic functions def test_view_as_real(self): x = torch.randn(10, dtype=torch.cdouble, requires_grad=True) torch.view_as_real(x).sum().backward() self.assertEqual(x.grad, torch.full_like(x, 1 - 1j)) # remove this test after gradcheck support is added for non-holomorphic functions def test_view_as_complex(self): x = torch.randn(10, 2, dtype=torch.double, requires_grad=True) torch.view_as_complex(x).sum().backward() self.assertEqual(x.grad, torch.tensor([1, 0], dtype=torch.double).expand_as(x)) def test_view_func_for_complex_views(self): # case 1: both parent and child have view_func x = torch.randn(2, 2, 2, dtype=torch.double, requires_grad=True) y = x.detach().requires_grad_(True) x0 = x.clone() x1 = torch.view_as_complex(x0) x2 = torch.view_as_real(x1) x2.mul_(2) x2.sum().backward() y0 = y.clone() y0.mul_(2) y0.sum().backward() self.assertEqual(x.grad, y.grad) # case 2: parent has view_func but child does not x = torch.randn(2, 2, 2, dtype=torch.double, requires_grad=True) y = x.detach().requires_grad_(True) def fn(a): b = a.clone() b1 = torch.view_as_complex(b) b2 = b1.reshape(b1.numel()) return b2 x0 = fn(x) x0.mul_(2) x0.sum().backward() y0 = fn(y) y1 = y0.mul(2) y1.sum().backward() self.assertEqual(x.grad, y.grad) # case 3: parent does not have a view_func but child does x = torch.randn(10, dtype=torch.cdouble, requires_grad=True) y = x.detach().requires_grad_(True) def fn(a, dim0_size=5): b = a.clone() b1 = b.reshape(dim0_size, 2) b2 = torch.view_as_real(b1) return b2 x0 = fn(x) x0.mul_(2) x0.sum().backward() y0 = fn(y) y1 = y0.mul(2) y1.sum().backward() self.assertEqual(x.grad, y.grad) def as_identity(self): # view_as_real and view_as_complex behavior should be like an identity def func(z): z_ = torch.view_as_complex(z) z_select = torch.select(z_, z_.dim() - 1, 0) z_select_real = torch.view_as_real(z_select) return z_select_real.sum() z = torch.randn(10, 2, 2, dtype=torch.double, requires_grad=True) gradcheck(func, [z]) func(z).backward() z1 = z.clone().detach().requires_grad_(True) torch.select(z1, z1.dim() - 2, 0).sum().backward() self.assertEqual(z.grad, z1.grad) class TestAutogradFunctional(TestCase): def _assert_same_struct(self, res, base): # base and res should be Tensors or tuple of Tensors with the same size if isinstance(base, torch.Tensor): self.assertTrue(isinstance(res, torch.Tensor)) self.assertEqual(base.size(), res.size()) elif isinstance(base, tuple): self.assertTrue(isinstance(res, tuple)) self.assertEqual(len(base), len(res)) for el_base, el_res in zip(base, res): self.assertTrue(isinstance(el_base, torch.Tensor)) self.assertTrue(isinstance(el_res, torch.Tensor)) self.assertEqual(el_base.size(), el_res.size()) else: # Wrong base raise RuntimeError("The base given to `_assert_same_struct` doesn't have" " the right structure.") def _assert_interleaved_struct(self, res, base1, base2): # base1 and base2 can be Tensors or tuples of Tensors. # If they are tuples, res should be a tuple as well. # The indexing works as follows for base1, base2 being # - tuple, tuple: res[i][j][k][l] = (base1[i][k], base2[j][l]) # - tuple, Tensor: res[i][k][l] = (base1[i][k], base2[l]) # - Tensor, tuple: res[i][j][l] = (base1[i], base2[j][l]) # - Tensor, Tensor: res[k][l] = (base1[k], base2[l]) if isinstance(base1, torch.Tensor) and isinstance(base2, torch.Tensor): self.assertTrue(isinstance(res, torch.Tensor)) self.assertEqual(res.size(), base1.size() + base2.size()) elif isinstance(base1, tuple) and isinstance(base2, torch.Tensor): self.assertTrue(isinstance(res, tuple)) self.assertEqual(len(res), len(base1)) for el_res, el_base1 in zip(res, base1): self.assertTrue(isinstance(el_res, torch.Tensor)) self.assertTrue(isinstance(el_base1, torch.Tensor)) self.assertEqual(el_res.size(), el_base1.size() + base2.size()) elif isinstance(base1, torch.Tensor) and isinstance(base2, tuple): self.assertTrue(isinstance(res, tuple)) self.assertEqual(len(res), len(base2)) for el_res, el_base2 in zip(res, base2): self.assertTrue(isinstance(el_res, torch.Tensor)) self.assertTrue(isinstance(el_base2, torch.Tensor)) self.assertEqual(el_res.size(), base1.size() + el_base2.size()) elif isinstance(base1, tuple) and isinstance(base2, tuple): self.assertTrue(isinstance(res, tuple)) self.assertEqual(len(res), len(base1)) for el_res, el_base1 in zip(res, base1): self.assertTrue(isinstance(el_res, tuple)) self.assertEqual(len(res), len(base2)) for el_el_res, el_base2 in zip(el_res, base2): self.assertTrue(isinstance(el_el_res, torch.Tensor)) self.assertTrue(isinstance(el_base2, torch.Tensor)) self.assertEqual(el_el_res.size(), el_base1.size() + el_base2.size()) else: # Wrong bases raise RuntimeError("The bases given to `_assert_interleaved_struct` don't have" " the right structure.") def test_vjp_err_check(self): def foo(a): return 3 * a.narrow(0, 0, 3) def bar(a): return 3 * a.narrow(0, 0, 3), "bar" inp = torch.rand(4) v = torch.ones(3) with self.assertRaisesRegex(TypeError, "The inputs given to vjp must be either a Tensor"): res = autogradF.vjp(foo, (inp, 2), v) with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to vjp must"): res = autogradF.vjp(bar, inp, v) with self.assertRaisesRegex(RuntimeError, "The vector v can only be None if the user-provided function returns"): res = autogradF.vjp(foo, inp) with self.assertRaisesRegex(RuntimeError, "The given v should contain a single Tensor."): res = autogradF.vjp(foo, inp, (torch.ones_like(inp), torch.ones_like(inp))) with self.assertRaisesRegex(RuntimeError, "v has invalid size: should be torch.Size"): res = autogradF.vjp(foo, inp, v[:2]) res = autogradF.vjp(foo, inp, v)[1] self._assert_same_struct(res, inp) def test_vjp_err_check_strict(self): def foo(a): return a.detach() def bar(a): # Make a non-leaf Tensor that requires_grad but that is not connected to the input return a.long().float().requires_grad_().clone() inp = torch.rand(4) v = torch.rand(4) with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."): res = autogradF.vjp(foo, inp, v, strict=True) res = autogradF.vjp(foo, inp, v, strict=False) self._assert_same_struct(res[1], inp) self.assertEqual(res[1].abs().sum(), 0.) with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"): res = autogradF.vjp(bar, inp, v, strict=True) res = autogradF.vjp(bar, inp, v, strict=False) self._assert_same_struct(res[1], inp) self.assertEqual(res[1].abs().sum(), 0.) # The Jacobian does not depend on the input def foo(a): return a.clone() inp.requires_grad_() with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."): res = autogradF.vjp(foo, inp, v, create_graph=True, strict=True) res = autogradF.vjp(foo, inp, v, create_graph=True, strict=False) self._assert_same_struct(res[1], inp) self.assertEqual(res[1], v) def test_vjp_output(self): def reducer(x): return x.sum(dim=1) inputs = torch.rand(4, 4) v = torch.ones(4) res = autogradF.vjp(reducer, inputs, v) self._assert_same_struct(res[1], inputs) self.assertIsNone(res[0].grad_fn) self.assertIsNone(res[1].grad_fn) def adder(x, y): return 2 * x + 3 * y inputs = (torch.rand(2), torch.rand(2)) v = torch.ones(2) out, vjp_val = autogradF.vjp(adder, inputs, v) self._assert_same_struct(vjp_val, inputs) self.assertIsNone(out.grad_fn) self.assertIsNone(vjp_val[0].grad_fn) self.assertIsNone(vjp_val[1].grad_fn) def adder(x, y): return 2 * x + 3 * y, x + y inputs = (torch.rand(2), torch.rand(2)) v = (torch.tensor([1., 0.]), torch.tensor([1., 0.])) out, vjp_val = autogradF.vjp(adder, inputs, v) self._assert_same_struct(vjp_val, inputs) self.assertIsNone(out[0].grad_fn) self.assertIsNone(out[1].grad_fn) self.assertIsNone(vjp_val[0].grad_fn) self.assertIsNone(vjp_val[1].grad_fn) def test_vjp_scalar(self): def reducer(x): return x.sum() inputs = torch.rand(4, 4) v = torch.ones([]) res = autogradF.vjp(reducer, inputs, v) self._assert_same_struct(res[0], v) self._assert_same_struct(res[1], inputs) res = autogradF.vjp(reducer, inputs) self._assert_same_struct(res[0], v) self._assert_same_struct(res[1], inputs) def expander(x): return x.unsqueeze(0).repeat(4) inputs = torch.rand([]) v = torch.ones(4) res = autogradF.vjp(expander, inputs, v) self._assert_same_struct(res[0], v) self._assert_same_struct(res[1], inputs) def test_vjp_create_graph(self): def reducer(x): return x.sum(dim=1) inputs = torch.rand(2, 2) v = torch.ones(2) inputs.requires_grad_() v.requires_grad_() res = autogradF.vjp(reducer, inputs, v, create_graph=True) self._assert_same_struct(res[1], inputs) self.assertIsNotNone(res[0].grad_fn) self.assertIsNotNone(res[1].grad_fn) gradcheck(lambda inp, v: autogradF.vjp(reducer, inputs, v, create_graph=True), (inputs, v)) gradgradcheck(lambda inp, v: autogradF.vjp(reducer, inputs, v, create_graph=True), (inputs, v)) def adder(x, y): return 2 * x + 3 * y, x * y inputs = (torch.rand(2, requires_grad=True), torch.rand(2, requires_grad=True)) v = (torch.tensor([1., 0.], requires_grad=True), torch.tensor([1., 0.], requires_grad=True)) gradcheck(lambda *args: autogradF.vjp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v) gradgradcheck(lambda *args: autogradF.vjp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v) def foo(*args): x, y = args[:2] v = args[2:] x = x.cos() val, grad = autogradF.vjp(adder, (x, y), v, create_graph=True) return val[0].exp() + val[1].exp() + grad[0].exp() + grad[1].exp() + x.exp() + y.exp() gradcheck(foo, inputs + v) gradgradcheck(foo, inputs + v) def test_jvp_err_check(self): def foo(a): return 3 * a.narrow(0, 0, 3) def bar(a): return 3 * a.narrow(0, 0, 3), "bar" inp = torch.rand(4) v = torch.rand(4) with self.assertRaisesRegex(TypeError, "The inputs given to jvp must be either a Tensor"): res = autogradF.jvp(foo, (inp, 2), v) with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to jvp must"): res = autogradF.jvp(bar, inp, v) with self.assertRaisesRegex(RuntimeError, "The vector v can only be None if the input to the user-provided function"): res = autogradF.jvp(foo, inp) with self.assertRaisesRegex(RuntimeError, "The given v should contain a single Tensor."): res = autogradF.jvp(foo, inp, (v, v)) with self.assertRaisesRegex(RuntimeError, "v has invalid size: should be torch.Size"): res = autogradF.jvp(foo, inp, v[:2]) res = autogradF.jvp(foo, inp, v)[1] self._assert_same_struct(res, foo(inp)) def test_jvp_err_check_strict(self): def foo(a): return a.detach() def bar(a): # Make a non-leaf Tensor that requires_grad but that is not connected to the input return a.long().float().requires_grad_().clone() inp = torch.rand(4) v = torch.rand(4) with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."): res = autogradF.jvp(foo, inp, v, strict=True) res = autogradF.jvp(foo, inp, v, strict=False) self._assert_same_struct(res[1], res[0]) self.assertEqual(res[1].abs().sum(), 0.) with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"): res = autogradF.jvp(bar, inp, v, strict=True) res = autogradF.jvp(bar, inp, v, strict=False) self._assert_same_struct(res[1], res[0]) self.assertEqual(res[1].abs().sum(), 0.) # The Jacobian does not depend on the input def foo(a): return a.clone() inp.requires_grad_() with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."): res = autogradF.jvp(foo, inp, v, create_graph=True, strict=True) res = autogradF.jvp(foo, inp, v, create_graph=True, strict=False) self._assert_same_struct(res[1], inp) self.assertEqual(res[1], v) def test_jvp_output(self): def reducer(x): return x.sum(dim=1) inputs = torch.rand(4, 4) v = torch.ones(4, 4) res = autogradF.jvp(reducer, inputs, v) self._assert_same_struct(res[1], res[0]) self.assertIsNone(res[0].grad_fn) self.assertIsNone(res[1].grad_fn) def adder(x, y): return 2 * x + 3 * y inputs = (torch.rand(2), torch.rand(2)) v = (torch.ones(2), torch.ones(2)) out, jvp_val = autogradF.jvp(adder, inputs, v) self._assert_same_struct(jvp_val, out) self.assertIsNone(out.grad_fn) self.assertIsNone(jvp_val[0].grad_fn) self.assertIsNone(jvp_val[1].grad_fn) def adder(x, y): return 2 * x + 3 * y, x + y inputs = (torch.rand(2), torch.rand(2)) v = (torch.tensor([1., 0.]), torch.tensor([1., 0.])) out, jvp_val = autogradF.jvp(adder, inputs, v) self._assert_same_struct(jvp_val, out) self.assertIsNone(out[0].grad_fn) self.assertIsNone(out[1].grad_fn) self.assertIsNone(jvp_val[0].grad_fn) self.assertIsNone(jvp_val[1].grad_fn) def test_jvp_scalar(self): def reducer(x): return x.sum() inputs = torch.rand(4, 4) v = torch.ones(4, 4) res = autogradF.jvp(reducer, inputs, v) self._assert_same_struct(res[0], torch.zeros([])) self._assert_same_struct(res[1], res[0]) def expander(x): return x.unsqueeze(0).repeat(4) inputs = torch.rand([]) v = torch.ones([]) res = autogradF.jvp(expander, inputs, v) self._assert_same_struct(res[0], torch.zeros(4)) self._assert_same_struct(res[1], res[0]) res = autogradF.jvp(expander, inputs) self._assert_same_struct(res[0], torch.zeros(4)) self._assert_same_struct(res[1], res[0]) def test_jvp_create_graph(self): def reducer(x): return x.sum(dim=1) inputs = torch.rand(2, 2) v = torch.ones(2, 2) inputs.requires_grad_() v.requires_grad_() res = autogradF.jvp(reducer, inputs, v, create_graph=True) self._assert_same_struct(res[1], res[0]) self.assertIsNotNone(res[0].grad_fn) self.assertIsNotNone(res[1].grad_fn) gradcheck(lambda inp, v: autogradF.jvp(reducer, inp, v, create_graph=True), (inputs, v)) gradgradcheck(lambda inp, v: autogradF.jvp(reducer, inp, v, create_graph=True), (inputs, v)) def adder(x, y): return 2 * x + 3 * y, x * y inputs = (torch.rand(2, requires_grad=True), torch.rand(2, requires_grad=True)) v = (torch.tensor([1., 0.], requires_grad=True), torch.tensor([1., 0.], requires_grad=True)) gradcheck(lambda *args: autogradF.jvp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v) gradgradcheck(lambda *args: autogradF.jvp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v) def foo(*args): x, y = args[:2] v = args[2:] x = x.cos() val, grad = autogradF.jvp(adder, (x, y), v, create_graph=True) return val[0].exp() + val[1].exp() + grad[0].exp() + grad[1].exp() + x.exp() + y.exp() gradcheck(foo, inputs + v) gradgradcheck(foo, inputs + v) def test_jacobian_err_check(self): def foo(a): return 3 * a.narrow(0, 0, 3) def bar(a): return 3 * a.narrow(0, 0, 3), "bar" inp = torch.rand(4) with self.assertRaisesRegex(TypeError, "The inputs given to jacobian must be either a Tensor"): res = autogradF.jacobian(foo, (inp, 2)) with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to jacobian must"): res = autogradF.jacobian(bar, inp) res = autogradF.jacobian(foo, inp) self._assert_interleaved_struct(res, foo(inp), inp) def foo(a, b): return b, 3 * a.narrow(0, 0, 3) inp = (torch.rand(4), torch.rand(5)) res = autogradF.jacobian(foo, inp) self._assert_interleaved_struct(res, foo(*inp), inp) def test_jacobian_err_check_strict(self): def foo(a): return a.detach() def bar(a): # Make a non-leaf Tensor that requires_grad but that is not connected to the input return a.long().float().requires_grad_().clone() inp = torch.rand(4) with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."): res = autogradF.jacobian(foo, inp, strict=True) res = autogradF.jacobian(foo, inp, strict=False) self._assert_interleaved_struct(res, foo(inp), inp) self.assertEqual(res.abs().sum(), 0.) with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function is independent of input 0."): res = autogradF.jacobian(bar, inp, strict=True) res = autogradF.jacobian(bar, inp, strict=False) self._assert_interleaved_struct(res, foo(inp), inp) self.assertEqual(res.abs().sum(), 0.) # The Jacobian does not depend on the input def foo(a): return a.clone() inp.requires_grad_() with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."): res = autogradF.jacobian(foo, inp, create_graph=True, strict=True) res = autogradF.jacobian(foo, inp, create_graph=True, strict=False) self._assert_interleaved_struct(res, inp, inp) self.assertEqual(res, torch.eye(4)) def test_jacobian_output(self): def exp_reducer(x): return x.exp().sum(dim=1) inputs = torch.rand(4, 4) res = autogradF.jacobian(exp_reducer, inputs) self._assert_interleaved_struct(res, exp_reducer(inputs), inputs) self.assertIsNone(res.grad_fn) def identity(x): return x.clone() inputs = torch.rand(4) res = autogradF.jacobian(identity, inputs) self._assert_interleaved_struct(res, identity(inputs), inputs) self.assertIsNone(res.grad_fn) self.assertEqual(res, torch.eye(4)) def add_exp_reducer(x, y): return (x + y.exp()).sum(dim=1) inputs = (torch.rand(4, 4), torch.rand(4, 4)) res = autogradF.jacobian(add_exp_reducer, inputs) self._assert_interleaved_struct(res, add_exp_reducer(*inputs), inputs) self.assertIsNone(res[0].grad_fn) self.assertIsNone(res[1].grad_fn) def test_jacobian_scalar(self): def reducer(x): return x.sum() inputs = torch.rand(4, 4) res = autogradF.jacobian(reducer, inputs) self._assert_same_struct(res, inputs) def expander(x): return x.unsqueeze(0).repeat(4) inputs = torch.rand([]) res = autogradF.jacobian(expander, inputs) self._assert_same_struct(res, torch.zeros(4)) def test_jacobian_create_graph(self): def exp_reducer(x): return x.exp().sum(dim=1) inputs = torch.rand(4, 4, requires_grad=True) res = autogradF.jacobian(exp_reducer, inputs, create_graph=True) self._assert_interleaved_struct(res, exp_reducer(inputs), inputs) self.assertIsNotNone(res.grad_fn) gradcheck(lambda inp: autogradF.jacobian(exp_reducer, inp, create_graph=True), inputs) gradgradcheck(lambda inp: autogradF.jacobian(exp_reducer, inp, create_graph=True), inputs) def add_exp_reducer(x, y): return (x + y).exp().sum(dim=1) inputs = (torch.rand(4, 4, requires_grad=True), torch.rand(4, 4, requires_grad=True)) res = autogradF.jacobian(add_exp_reducer, inputs, create_graph=True) self._assert_interleaved_struct(res, add_exp_reducer(*inputs), inputs) self.assertIsNotNone(res[0].grad_fn) self.assertIsNotNone(res[1].grad_fn) gradcheck(lambda *inp: autogradF.jacobian(add_exp_reducer, inp, create_graph=True), inputs) gradgradcheck(lambda *inp: autogradF.jacobian(add_exp_reducer, inp, create_graph=True), inputs) def foo(x, y): x = x.cos() val, jac = autogradF.jacobian(add_exp_reducer, (x, y), create_graph=True) res = val[0].exp().sum() + val[1].exp().sum() + jac[0].exp().sum() res = res + jac[1].exp().sum() + x.exp().sum() + y.exp().sum() return res gradcheck(foo, inputs) gradgradcheck(foo, inputs) def test_hessian_err_check(self): def foo(a): return 3 * a.narrow(0, 0, 3).exp().sum() def bar(a): return 3 * a.narrow(0, 0, 3), "bar" def bar2(a): return 3 * a.narrow(0, 0, 3) def bar3(a): return 3 * a.narrow(0, 0, 3), 3 * a.narrow(0, 0, 3) inp = torch.rand(4) with self.assertRaisesRegex(TypeError, "The inputs given to hessian must be either a Tensor"): res = autogradF.hessian(foo, (inp, 2)) with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to hessian must"): res = autogradF.hessian(bar, inp) err_msg_out = "The Tensor returned by the function given to hessian should contain a single element" with self.assertRaisesRegex(RuntimeError, err_msg_out): res = autogradF.hessian(bar2, inp) with self.assertRaisesRegex(RuntimeError, "The function given to hessian should return a single Tensor"): res = autogradF.hessian(bar3, inp) res = autogradF.hessian(foo, inp) self._assert_interleaved_struct(res, inp, inp) def foo(a, b): return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum() inp = (torch.rand(4), torch.rand(5)) res = autogradF.hessian(foo, inp) self._assert_interleaved_struct(res, inp, inp) def test_hessian_err_check_strict(self): def foo(a): return a.detach().sum() def bar(a): # Make a non-leaf Tensor that requires_grad but that is not connected to the input return a.long().float().requires_grad_().clone().sum() def bar2(a): # A Linear function for which the jacobian is independent of the input return (3 * a).sum() inp = torch.rand(4) with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."): res = autogradF.hessian(foo, inp, strict=True) res = autogradF.hessian(foo, inp, strict=False) self._assert_interleaved_struct(res, inp, inp) self.assertEqual(res.abs().sum(), 0.) with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0"): res = autogradF.hessian(bar, inp, strict=True) res = autogradF.hessian(bar, inp, strict=False) self._assert_interleaved_struct(res, inp, inp) self.assertEqual(res.abs().sum(), 0.) with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"): res = autogradF.hessian(bar2, inp, strict=True) res = autogradF.hessian(bar2, inp, strict=False) self._assert_interleaved_struct(res, inp, inp) self.assertEqual(res.abs().sum(), 0.) def test_hessian_output(self): def pow_reducer(x): return x.pow(3).sum() inputs = torch.rand(2, 2) res = autogradF.hessian(pow_reducer, inputs) self._assert_interleaved_struct(res, inputs, inputs) self.assertIsNone(res.grad_fn) def add_pow_reducer(x, y): return (x + y).pow(3).sum() inputs = (torch.rand(2, 2), torch.rand(2, 2)) res = autogradF.hessian(add_pow_reducer, inputs) self._assert_interleaved_struct(res, inputs, inputs) self.assertIsNone(res[0][0].grad_fn) self.assertIsNone(res[0][1].grad_fn) self.assertIsNone(res[1][0].grad_fn) self.assertIsNone(res[1][1].grad_fn) def test_hessian_scalar(self): def reducer(x): return x.sum() inputs = torch.rand(4, 4) res = autogradF.hessian(reducer, inputs) self._assert_interleaved_struct(res, inputs, inputs) inputs = torch.rand([]) res = autogradF.hessian(reducer, inputs) self._assert_same_struct(res, inputs) def bad_reducer(x): return x.sum().view(1, 1, 1) inputs = torch.rand(4, 4) res = autogradF.hessian(bad_reducer, inputs) self._assert_interleaved_struct(res, inputs, inputs) def test_hessian_create_graph(self): def pow_reducer(x): return x.pow(3).sum() inputs = torch.rand(2, 2, requires_grad=True) res = autogradF.hessian(pow_reducer, inputs, create_graph=True) self._assert_interleaved_struct(res, inputs, inputs) self.assertIsNotNone(res.grad_fn) gradcheck(lambda inp: autogradF.hessian(pow_reducer, inp, create_graph=True), inputs) gradgradcheck(lambda inp: autogradF.hessian(pow_reducer, inp, create_graph=True), inputs) def add_pow_reducer(x, y): return (x + y).pow(3).sum() inputs = (torch.rand(2, 2, requires_grad=True), torch.rand(2, 2, requires_grad=True)) res = autogradF.hessian(add_pow_reducer, inputs, create_graph=True) self._assert_interleaved_struct(res, inputs, inputs) self.assertIsNotNone(res[0][0].grad_fn) self.assertIsNotNone(res[0][1].grad_fn) self.assertIsNotNone(res[1][0].grad_fn) self.assertIsNotNone(res[1][1].grad_fn) def flatten(inp): return tuple(el_lvl2 for el_lvl1 in inp for el_lvl2 in el_lvl1) gradcheck(lambda *inp: flatten(autogradF.hessian(add_pow_reducer, inp, create_graph=True)), inputs) gradgradcheck(lambda *inp: flatten(autogradF.hessian(add_pow_reducer, inp, create_graph=True)), inputs) def foo(x, y): x = x.cos() val, hess = autogradF.hessian(add_pow_reducer, (x, y), create_graph=True) res = val[0].cos().sum() + val[1].cos().sum() + hess[0].cos().sum() res = res + hess[1].cos().sum() + x.cos().sum() + y.cos().sum() return res gradcheck(foo, inputs) gradgradcheck(foo, inputs) def test_vhp_err_check(self): def foo(a): return 3 * a.narrow(0, 0, 3).exp().sum() def bar(a): return 3 * a.narrow(0, 0, 3), "bar" def bar2(a): return 3 * a.narrow(0, 0, 3) inp = torch.rand(4) v = torch.rand(4) with self.assertRaisesRegex(TypeError, "The inputs given to vhp must be either a Tensor"): res = autogradF.vhp(foo, (inp, 2), v) with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to vhp must"): res = autogradF.vhp(bar, inp, v) err_msg_out = "The Tensor returned by the function given to vhp should contain a single element" with self.assertRaisesRegex(RuntimeError, err_msg_out): res = autogradF.vhp(bar2, inp, v) with self.assertRaisesRegex(RuntimeError, "v has invalid size:"): res = autogradF.vhp(foo, inp, torch.rand(5)) with self.assertRaisesRegex(TypeError, "The v given to vhp must be either a Tensor or a tuple of Tensors"): res = autogradF.vhp(foo, inp, (v, 2)) res = autogradF.vhp(foo, inp, v) self._assert_same_struct(res[1], inp) def foo(a, b): return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum() inp = (torch.rand(4), torch.rand(5)) v = (torch.rand(4), torch.rand(5)) res = autogradF.vhp(foo, inp, v) self._assert_same_struct(res[1], inp) def test_vhp_err_check_strict(self): def foo(a): return a.detach().sum() def bar(a): # Make a non-leaf Tensor that requires_grad but that is not connected to the input return a.long().float().requires_grad_().clone().sum() def bar2(a): # A Linear function for which the jacobian is independent of the input return (3 * a).sum() inp = torch.rand(4) v = torch.rand(4) with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."): res = autogradF.vhp(foo, inp, v, strict=True) res = autogradF.vhp(foo, inp, v, strict=False) self._assert_same_struct(res[1], inp) self.assertEqual(res[1].abs().sum(), 0.) with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"): res = autogradF.vhp(bar, inp, v, strict=True) res = autogradF.vhp(bar, inp, v, strict=False) self._assert_same_struct(res[1], inp) self.assertEqual(res[1].abs().sum(), 0.) with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"): res = autogradF.vhp(bar2, inp, v, strict=True) res = autogradF.vhp(bar2, inp, v, strict=False) self._assert_same_struct(res[1], inp) self.assertEqual(res[1].abs().sum(), 0.) def test_vhp_output(self): def foo(a): return 3 * a.narrow(0, 0, 3).exp().sum() inputs = torch.rand(4, 4) v = torch.ones(4, 4) res = autogradF.vhp(foo, inputs, v) self._assert_same_struct(res[1], inputs) self.assertIsNone(res[0].grad_fn) self.assertIsNone(res[1].grad_fn) def bar(a, b): return (a + 3 * b.narrow(0, 0, 3)).exp().sum() inputs = (torch.rand(3), torch.rand(4)) v = (torch.ones(3), torch.ones(4)) out, vhp_val = autogradF.vhp(bar, inputs, v) self._assert_same_struct(vhp_val, inputs) self.assertIsNone(out.grad_fn) self.assertIsNone(vhp_val[0].grad_fn) self.assertIsNone(vhp_val[1].grad_fn) def test_vhp_scalar(self): def reducer(x): return x.sum() inputs = torch.rand(4, 4) v = torch.ones(4, 4) res = autogradF.vhp(reducer, inputs, v) self._assert_same_struct(res[1], inputs) inputs = torch.rand([]) v = torch.rand([]) res = autogradF.vhp(reducer, inputs, v) self._assert_same_struct(res[1], inputs) res = autogradF.vhp(reducer, inputs) self._assert_same_struct(res[1], inputs) def bad_reducer(x): return x.sum().view(1, 1, 1) inputs = torch.rand(4, 4) v = torch.rand(4, 4) res = autogradF.vhp(bad_reducer, inputs, v) self._assert_same_struct(res[1], inputs) def test_vhp_create_graph(self): def foo(a): return 3 * a.narrow(0, 0, 3).exp().sum() inputs = torch.rand(4, 4, requires_grad=True) v = torch.ones(4, 4, requires_grad=True) res = autogradF.vhp(foo, inputs, v, create_graph=True) self._assert_same_struct(res[1], inputs) self.assertIsNotNone(res[0].grad_fn) self.assertIsNotNone(res[1].grad_fn) gradcheck(lambda inp, v: autogradF.vhp(foo, inp, v, create_graph=True), (inputs, v)) gradgradcheck(lambda inp, v: autogradF.vhp(foo, inp, v, create_graph=True), (inputs, v)) def bar(a, b): return (a + 3 * b.narrow(0, 0, 3)).exp().sum() inputs = (torch.rand(3, requires_grad=True), torch.rand(4, requires_grad=True)) v = (torch.ones(3, requires_grad=True), torch.ones(4, requires_grad=True)) out, vhp_val = autogradF.vhp(bar, inputs, v, create_graph=True) self._assert_same_struct(vhp_val, inputs) self.assertIsNotNone(out.grad_fn) self.assertIsNotNone(vhp_val[0].grad_fn) self.assertIsNotNone(vhp_val[1].grad_fn) gradcheck(lambda *args: autogradF.vhp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v) gradgradcheck(lambda *args: autogradF.vhp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v) def foo(*args): x, y = args[:2] v = args[2:] x = x.cos() val, grad = autogradF.vhp(bar, (x, y), v, create_graph=True) return val.cos() + grad[0].cos().sum() + grad[1].cos() + x.cos().sum() + y.cos() gradcheck(foo, inputs + v) gradgradcheck(foo, inputs + v) def test_hvp_err_check(self): def foo(a): return 3 * a.narrow(0, 0, 3).exp().sum() def bar(a): return 3 * a.narrow(0, 0, 3), "bar" def bar2(a): return 3 * a.narrow(0, 0, 3) inp = torch.rand(4) v = torch.rand(4) res = autogradF.hvp(foo, inp, v) with self.assertRaisesRegex(TypeError, "The inputs given to hvp must be either a Tensor"): res = autogradF.hvp(foo, (inp, 2), v) with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to hvp must"): res = autogradF.hvp(bar, inp, v) err_msg_out = "The Tensor returned by the function given to hvp should contain a single element" with self.assertRaisesRegex(RuntimeError, err_msg_out): res = autogradF.hvp(bar2, inp, v) with self.assertRaisesRegex(RuntimeError, "v has invalid size:"): res = autogradF.hvp(foo, inp, torch.rand(5)) with self.assertRaisesRegex(TypeError, "The v given to hvp must be either a Tensor or a tuple of Tensors"): res = autogradF.hvp(foo, inp, (v, 2)) res = autogradF.hvp(foo, inp, v) self._assert_same_struct(res[1], inp) def foo(a, b): return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum() inp = (torch.rand(4), torch.rand(5)) v = (torch.rand(4), torch.rand(5)) res = autogradF.hvp(foo, inp, v) self._assert_same_struct(res[1], inp) def test_hvp_err_check_strict(self): def foo(a): return a.detach().sum() def bar(a): # Make a non-leaf Tensor that requires_grad but that is not connected to the input return a.long().float().requires_grad_().clone().sum() def bar2(a): # A Linear function for which the jacobian is independent of the input return (3 * a).sum() inp = torch.rand(4) v = torch.rand(4) with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."): res = autogradF.hvp(foo, inp, v, strict=True) res = autogradF.hvp(foo, inp, v, strict=False) self._assert_same_struct(res[1], inp) self.assertEqual(res[1].abs().sum(), 0.) with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"): res = autogradF.hvp(bar, inp, v, strict=True) res = autogradF.hvp(bar, inp, v, strict=False) self._assert_same_struct(res[1], inp) self.assertEqual(res[1].abs().sum(), 0.) with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"): res = autogradF.hvp(bar2, inp, v, strict=True) res = autogradF.hvp(bar2, inp, v, strict=False) self._assert_same_struct(res[1], inp) self.assertEqual(res[1].abs().sum(), 0.) def test_hvp_output(self): def foo(a): return 3 * a.narrow(0, 0, 3).exp().sum() inputs = torch.rand(4, 4) v = torch.ones(4, 4) res = autogradF.hvp(foo, inputs, v) self._assert_same_struct(res[1], inputs) self.assertIsNone(res[0].grad_fn) self.assertIsNone(res[1].grad_fn) def bar(a, b): return (a + 3 * b.narrow(0, 0, 3)).exp().sum() inputs = (torch.rand(3), torch.rand(4)) v = (torch.ones(3), torch.ones(4)) out, hvp_val = autogradF.hvp(bar, inputs, v) self._assert_same_struct(hvp_val, inputs) self.assertIsNone(out.grad_fn) self.assertIsNone(hvp_val[0].grad_fn) self.assertIsNone(hvp_val[1].grad_fn) def test_hvp_scalar(self): def reducer(x): return x.exp().sum() inputs = torch.rand(4, 4) v = torch.ones(4, 4) res = autogradF.hvp(reducer, inputs, v) self._assert_same_struct(res[1], inputs) inputs = torch.rand([]) v = torch.rand([]) res = autogradF.hvp(reducer, inputs, v) self._assert_same_struct(res[1], inputs) res = autogradF.hvp(reducer, inputs) self._assert_same_struct(res[1], inputs) def bad_reducer(x): return x.exp().sum().view(1, 1, 1) inputs = torch.rand(4, 4) v = torch.rand(4, 4) res = autogradF.hvp(bad_reducer, inputs, v) self._assert_same_struct(res[1], inputs) def test_hvp_create_graph(self): def foo(a): return 3 * a.narrow(0, 0, 3).exp().sum() inputs = torch.rand(4, 4, requires_grad=True) v = torch.ones(4, 4, requires_grad=True) res = autogradF.hvp(foo, inputs, v, create_graph=True) self._assert_same_struct(res[1], inputs) self.assertIsNotNone(res[0].grad_fn) self.assertIsNotNone(res[1].grad_fn) gradcheck(lambda inp, v: autogradF.hvp(foo, inp, v, create_graph=True), (inputs, v)) gradgradcheck(lambda inp, v: autogradF.hvp(foo, inp, v, create_graph=True), (inputs, v)) def bar(a, b): return (a + 3 * b.narrow(0, 0, 3)).exp().sum() inputs = (torch.rand(3, requires_grad=True), torch.rand(4, requires_grad=True)) v = (torch.ones(3, requires_grad=True), torch.ones(4, requires_grad=True)) out, hvp_val = autogradF.hvp(bar, inputs, v, create_graph=True) self._assert_same_struct(hvp_val, inputs) self.assertIsNotNone(out.grad_fn) self.assertIsNotNone(hvp_val[0].grad_fn) self.assertIsNotNone(hvp_val[1].grad_fn) gradcheck(lambda *args: autogradF.hvp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v) gradgradcheck(lambda *args: autogradF.hvp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v) def foo(*args): x, y = args[:2] v = args[2:] x = x.cos() val, grad = autogradF.hvp(bar, (x, y), v, create_graph=True) return val.cos() + grad[0].cos().sum() + grad[1].cos() + x.cos().sum() + y.cos() gradcheck(foo, inputs + v) gradgradcheck(foo, inputs + v) def test_jacobian_match_vjp_jvp(self): def foo(x): return x ** 3 + x.sum() inputs = torch.rand(4) v = torch.rand(4) jac = autogradF.jacobian(foo, inputs) jvp = autogradF.jvp(foo, inputs, v)[1] vjp = autogradF.vjp(foo, inputs, v)[1] self.assertEqual(jvp, torch.mm(jac, v.unsqueeze(1)).squeeze(1)) self.assertEqual(vjp, torch.mm(v.unsqueeze(0), jac).squeeze(0)) def test_hessian_match_vhp_hvp(self): def foo(a): return 3 * a.narrow(0, 0, 3).exp().sum() inputs = torch.rand(4) v = torch.rand(4) hes = autogradF.hessian(foo, inputs) hvp = autogradF.hvp(foo, inputs, v)[1] vhp = autogradF.vhp(foo, inputs, v)[1] self.assertEqual(hvp, torch.mm(hes, v.unsqueeze(1)).squeeze(1)) self.assertEqual(vhp, torch.mm(v.unsqueeze(0), hes).squeeze(0)) # Generic device type autograd tests. class TestAutogradDeviceType(TestCase): def test_min_max_median_backprops_to_all_values(self, device): for f in [torch.min, torch.max, torch.median]: x = torch.tensor([1., 0., 1., 0., 1., 0.], device=device, requires_grad=True) y = f(x) y.backward() self.assertEqual(x.grad.sum(), 1.) self.assertEqual((x.grad == 1 / 3).sum(), 3) # skip this test if running on rocm, because in cdist # we use __shfl_down_sync on CUDA for fast reduction # and it gives incorrect results on rocm platform @skipCUDAIfRocm def test_cdist(self, device): def _test_cdist_for_size(sizex, sizey=None): if sizey is None: sizey = sizex for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]: x = torch.randn(sizex, device=device, dtype=torch.double) y = torch.randn(sizey, device=device, dtype=torch.double) eps = 1e-6 # to avoid extremum x = x - (((x - y) < eps).double() * 2 * eps) x.requires_grad = True y.requires_grad = True f_args_variable = (x, y) def f(a, b): return torch.cdist(a, b, p) f_args_tensor = deepcopy(unpack_variables(f_args_variable)) run_functional_checks(self, "test_cdist", "cdist", f, True, f_args_variable, f_args_tensor) def _test_euclidean_large_cdist(sizex, sizey=None): if sizey is None: sizey = sizex x = torch.randn(sizex, device=device, dtype=torch.float) y = torch.randn(sizey, device=device, dtype=torch.float) eps = 1e-6 # to avoid extremum x = x - (((x - y) < eps).float() * 2 * eps) x.requires_grad = True y.requires_grad = True dist = torch.cdist(x, y, p=2) # Do a backward pass to check that it is valid for large # matrices loss = dist.sum() loss.backward() _test_cdist_for_size((S, S)) _test_cdist_for_size((S, S, S)) _test_cdist_for_size((3, 5)) _test_cdist_for_size((2, 3, 5)) _test_cdist_for_size((1, 2, 3)) _test_cdist_for_size((1, 1), (S, 1)) _test_euclidean_large_cdist((2000, 5)) def test_cdist_same_inputs(self, device): # Test to detect issues in cdist gradient calculation # When the distances are 0 sizex = (1, 27, 32) for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]: x = torch.randn(sizex, device=device, dtype=torch.float) dist_grad = torch.randn((1, 27, 27), device=device, dtype=torch.float) y = x.clone() eps = 1e-6 x.requires_grad = True d = torch.cdist(x, y) d.backward(dist_grad) # Check that the backward passs does not contain invalid # values such as nan or inf assert torch.isfinite(x.grad).all() def test_parameter_resize(self, device): asd = torch.nn.Parameter(torch.ones(16, device=device)) for i in range(2): with torch.no_grad(): asd.set_(asd[1:]) asd.grad = None m = torch.cat((asd, asd)) m.sum().backward() # NOTE: flaky on ROCm CI @skipCUDAIfRocm def test_sparse_ctor_getter_backward(self, device): # See NOTE [ Sparse: autograd and API ] on the expected behavior of this test def _test(size, sparse_dim, nnz, device): v_size = [nnz] + list(size[sparse_dim:]) i = torch.rand(sparse_dim, nnz) i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i)) i = i.to(torch.long) inp = torch.randn(v_size, requires_grad=True) other = self.genSparseTensor(size, sparse_dim, nnz, is_uncoalesced=True)[0] other = other.to(device) def fn(v): x = torch.sparse_coo_tensor(i, v, size, device=device) y = (x + other).coalesce() yv = y.values() new_v = yv.tanh() z = torch.sparse_coo_tensor(y.indices(), new_v, y.size()) return z.coalesce().values() gradcheck(fn, (inp,)) # FIXME: make gradgradcheck work. # gradgradcheck(fn, (inp,)) # assert that _values is non-differentiable with self.assertRaisesRegex(RuntimeError, "does not have a grad_fn"): other.detach().requires_grad_()._values().backward(torch.ones_like(other._values())) for empty_i, empty_v, empty_nnz in product([True, False], repeat=3): sparse_size = [] if empty_i else [2, 1] dense_size = [1, 0, 2] if empty_v else [1, 2] nnz = 0 if empty_nnz else 5 _test(sparse_size + dense_size, len(sparse_size), nnz, device) # autograd tests via common_method_invocations don't allow input tensors to # be sparse (RuntimeError: gradcheck expects all tensor inputs are dense when # check_sparse_nnz is set to False.) def test_sparse_mask_autograd(self, device): tensor = torch.randn(3, requires_grad=True, device=device) mask = torch.ones(3, device=device) mask[1] = 0 mask = mask.to_sparse() converted = tensor.sparse_mask(mask).to_dense() converted.sum().backward() self.assertEqual(tensor.grad, mask.to_dense()) def test_pyscalar_conversions(self, device): def _test_pyscalar_conversions(t, integral_conv): # integral -> integral l = t(torch.zeros(1, 1, 1, dtype=torch.long)) pyscalar = -12345 l[0] = pyscalar self.assertEqual(integral_conv(l), pyscalar) # floating point -> floating point f = Variable(t(torch.randn(1, 1))) pyscalar = -12345.1 f[0] = pyscalar self.assertEqual(float(f), pyscalar) f[0] = nan self.assertTrue(math.isnan(float(f))) f[0] = inf self.assertEqual(float(f), inf) f[0] = -inf self.assertEqual(float(f), -inf) # integral -> floating point # check we can convert something that loses precision pyscalar = 1234567890123456789 self.assertNotEqual(pyscalar, integral_conv(float(pyscalar))) l[0] = pyscalar self.assertEqual(float(l), float(pyscalar)) # floating point -> integral f[0] = nan self.assertRaises(ValueError, lambda: integral_conv(f[0])) f[0] = inf self.assertRaises(OverflowError, lambda: integral_conv(f[0])) f[0] = -inf self.assertRaises(OverflowError, lambda: integral_conv(f[0])) f[0] = sys.float_info.max self.assertEqual(integral_conv(f), sys.float_info.max) # bool, nonzero def test_nonzero(tensor, value, expected): tensor[0] = value self.assertEqual(expected, bool(tensor)) self.assertEqual(expected, True if tensor else False) test_nonzero(l, 0, False) test_nonzero(l, -2, True) test_nonzero(f, 0.0, False) test_nonzero(f, sys.float_info.min, True) test_nonzero(f, nan, bool(nan)) test_nonzero(f, inf, bool(inf)) test_nonzero(f, -inf, bool(-inf)) _test_pyscalar_conversions(lambda x: x.to(device), lambda x: int(x)) @dtypesIfCUDA(torch.half, torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64) @dtypes(torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64) def test_set_requires_grad_only_for_floats(self, device, dtype): def f1(): a = torch.ones(1, dtype=dtype, device=device) a.requires_grad_() def f2(): a = torch.ones(1, dtype=dtype, device=device) a.requires_grad = True def f3(): torch.ones(1, dtype=dtype, device=device, requires_grad=True) a = torch.ones(1, dtype=dtype, device=device) a.requires_grad = False # should always work a.requires_grad_(False) for f in [f1, f2, f3]: if dtype.is_floating_point: f() else: with self.assertRaisesRegex(RuntimeError, 'floating point', msg="dt: {} device: {}".format(a.dtype, a.device)): f() @onlyCUDA def test_advanced_indexing_backwards_large(self, device): # See https://github.com/pytorch/pytorch/issues/22843 n = (1 << 16) x = torch.rand(n, 1, device=device, requires_grad=True) a = x[:, [0]] a.sum().backward() self.assertEqual(x.grad, torch.ones(n, 1, device=device)) def test_advanced_indexing_backwards_memory_format(self, device): # See https://github.com/pytorch/pytorch/issues/36956 shape = (2, 8, 1, 2) i = torch.randint(1, shape, device=device).contiguous(memory_format=torch.channels_last) x = torch.randn(shape, requires_grad=True, device=device) x[i].sum().backward() def _test_reentrant_parent_error_on_cpu(self, device): t1 = torch.rand([3, 3], requires_grad=True) t2 = torch.rand([3, 3], device=device, requires_grad=True) t3 = torch.rand([3, 3], device=device, requires_grad=True) # Parent graph cpu graph. t4 = t1 * t1 t5 = TestAutograd.SimulateBackwardError.apply(t4) # Child gpu graph (much longer than parent graph). prev = t2 * t2 for i in range(10): prev = prev * t2 reentrant_root = prev class ReentrantFunc(Function): @staticmethod def forward(ctx, inp): return inp.clone() @staticmethod def backward(ctx, grad): # Reentrant backward in child will take much longer. reentrant_root.backward() return grad # Parent gpu graph. t6 = ReentrantFunc.apply(t3) t7 = t6 * t6 # Parent graph will error out first, while child graph will continue executing. with self.assertRaisesRegex(Exception, "Simulate error"): torch.autograd.backward([t5.sum(), t7.sum()]) # No grads should be accumulated since child graph will stop execution # after parent receives error. self.assertIsNone(t2.grad) self.assertIsNone(t1.grad) self.assertIsNone(t3.grad) @onlyCUDA def test_reentrant_parent_error_on_cpu(self, device): before = CudaMemoryLeakCheck.get_cuda_memory_usage() # Run as separate function so that gc can clean up everything when we # check for memory usage. self._test_reentrant_parent_error_on_cpu(device) # Wait for autograd thread to cleanup failed tasks. after = CudaMemoryLeakCheck.get_cuda_memory_usage() start = time.time() while before != after and time.time() - start < 30: time.sleep(0.1) after = CudaMemoryLeakCheck.get_cuda_memory_usage() self.assertEqual(before, after) # test for backward in https://github.com/pytorch/pytorch/issues/15511 def test_pdist_large(self, device): def func(x): return torch.pdist(x, p=2) # shape[0] should be able to be (roughly) arbitrarily large, but the kernel # is currently limited to smaller sizes (see issue above); this is just testing # a floor. shape = (1000, 1) x = torch.randn(shape, device=device).requires_grad_() output = torch.pdist(x, p=2) # just run a single backward, as gradcheck/gradgradcheck is expensive here output.sum().backward() def test_where_functional(self, device): x = torch.randn(5, 5, device=device, requires_grad=True) y = torch.randn(5, 5, device=device, requires_grad=True) cond = mask_not_all_zeros((5, 5)).to(device=device) def where(cond, x, y): return torch.where(cond, x, y) gradcheck(where, [cond, x, y], raise_exception=True) gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, device=device)]) x = torch.randn(5, 1, 5, device=device, requires_grad=True) y = torch.randn(5, 5, 1, device=device, requires_grad=True) gradcheck(where, [cond, x, y], raise_exception=True) gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, 5, device=device)]) def test_where_scalar(self, device): x = torch.randn(5, 5, device=device, requires_grad=True) scalar = 4. cond = mask_not_all_zeros((5, 5)).to(device=device) def where_scalar_first(cond, x): return torch.where(cond, scalar, x) def where_scalar_second(cond, x): return torch.where(cond, x, scalar) gradcheck(where_scalar_first, (cond, x)) gradgradcheck(where_scalar_first, (cond, x)) gradcheck(where_scalar_second, (cond, x)) gradgradcheck(where_scalar_second, (cond, x)) @skipCUDAIf(True, """Test is flaky on Linux and Windows, typical error message: https://github.com/pytorch/pytorch/issues/34870""") def test_ctc_loss(self, device): batch_size = 64 num_labels = 101 target_length = 15 gradcheck_input_size = 10 ZERO_NONE = 0 ZERO_SOME = 1 ZERO_ALL = 2 # input_length, vary_lengths, zero_lengths tests = [(150, False, ZERO_NONE), (150, True, ZERO_NONE), (50, True, ZERO_SOME), (50, True, ZERO_ALL)] if 'cuda' in device: tests += [(50, False, ZERO_NONE), (50, True, ZERO_NONE), (150, True, ZERO_SOME), (150, True, ZERO_ALL)] for input_length, vary_lengths, zero_mode in tests: targets = torch.randint(1, num_labels, (batch_size, target_length), device=device, dtype=torch.long) x = torch.randn(gradcheck_input_size, device=device, requires_grad=True) tile_factors = torch.randn(input_length * batch_size * num_labels // gradcheck_input_size + 1, device=device) input_lengths = [(torch.randint(input_length // 2, input_length + 1, ()).item() if vary_lengths or i == 0 else input_length) for i in range(batch_size)] if zero_mode == ZERO_ALL: target_lengths = [0 for _ in range(batch_size)] else: target_lengths = [(torch.randint(target_length // 2, target_length + 1, ()).item() if vary_lengths else target_length) for _ in range(batch_size)] if zero_mode == ZERO_SOME: idxes = torch.randint(0, batch_size, (10,)) for i in idxes: target_lengths[i] = 0 def ctc_after_softmax(x): x_full = ((x[:, None] * tile_factors[None, :]).view(-1)[:input_length * batch_size * num_labels] .view(input_length, batch_size, num_labels)) log_probs = torch.log_softmax(x_full, 2) return torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths) gradcheck(ctc_after_softmax, [x]) @onlyCUDA @skipCUDAIfRocm @skipCUDAIfCudnnVersionLessThan(7600) def test_ctc_loss_cudnn(self, device): batch_size = 16 input_length = 30 num_labels = 101 target_length = 15 targets = torch.randint(1, num_labels, (batch_size * target_length,), device='cuda', dtype=torch.long) log_probs = torch.log_softmax(torch.randn(input_length, batch_size, num_labels, device='cuda', dtype=torch.float), 2) log_probs.requires_grad_() input_lengths = batch_size * [input_length] target_lengths = batch_size * [target_length] grad_out = torch.randn(batch_size, device='cuda', dtype=torch.float) with torch.backends.cudnn.flags(enabled=False): loss_native = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths, reduction='none') grad_native, = torch.autograd.grad(loss_native, log_probs, grad_out) loss_cudnn = torch.nn.functional.ctc_loss(log_probs, targets.to('cpu', torch.int32), input_lengths, target_lengths, reduction='none') self.assertTrue("Cudnn" in str(loss_cudnn.grad_fn)) grad_cudnn, = torch.autograd.grad(loss_cudnn, log_probs, grad_out) self.assertEqual(grad_cudnn, grad_native, atol=1e-4, rtol=0) @skipCUDAIfRocm def test_leaky_relu_inplace_with_neg_slope(self, device): a = torch.tensor([-1., 1.], device=device, requires_grad=True) b = torch.nn.functional.leaky_relu_(a.clone(), -2) with self.assertRaisesRegex(RuntimeError, "call out-of-place version"): b.backward(torch.ones(2, device=device)) a = torch.tensor([-1., 1.], device=device, requires_grad=True) b = torch.nn.functional.rrelu_(a.clone(), -5.0, 1.0) with self.assertRaisesRegex(RuntimeError, "call out-of-place version"): b.backward(torch.ones(2, device=device)) @skipCUDAIfRocm def test_leaky_relu_inplace_with_zero_slope(self, device): a = torch.tensor([-2., 0., 2.], device=device, requires_grad=True) b = torch.nn.functional.leaky_relu_(a.clone(), 0.0) b.backward(torch.ones(3, device=device)) expected = torch.tensor([0., 0., 1.], device=device) self.assertEqual(a.grad, expected) @onlyCUDA def test_free_unneeded_tensor(self, device): x = torch.randn(2, 3, 10, 10, device=device, requires_grad=True) m = torch.randn(1, 3, 1, 1, device=device) z = x.sum() base_mem = torch.cuda.memory_allocated() z = ((x + 2) * m).sum() end_mem = torch.cuda.memory_allocated() # In the end the memory usage should remain equal, because neither of # (x + 2) and ((x + 2) * m) should be kept alive for backward, while the # previous allocation of z had the same size as the current one. self.assertEqual(base_mem, end_mem) @onlyCUDA def test_pin_memory(self, device): x = torch.randn(2, 2, requires_grad=True) self.assertEqual(x, x.pin_memory()) self.assertIsNot(x, x.pin_memory()) self.assertTrue(x.pin_memory().requires_grad) gradcheck(lambda x: x.pin_memory(), [x]) gradgradcheck(lambda x: x.pin_memory(), [x]) @skipCUDAIfRocm @onlyCUDA def test_profiler_emit_nvtx(self, device): # This test is not intended to ensure correctness of nvtx ranges. # That would require something a great deal more complex (you'd have to create a # profile in a subprocess, open it, and parse the sql somehow). # This test is merely intended to catch if emit_nvtx breaks on construction. a = torch.tensor([1, 2, 3], dtype=torch.float32, device=device) with torch.cuda.profiler.profile(): with emit_nvtx(): a.add(1.0) @onlyCUDA def test_rnn_backward_to_input_but_not_parameters(self, device): # this checks whether it is possible to not require # weight parameters, but require inputs, see #7722 l = torch.nn.LSTM(2, 3).to(device) for p in l.parameters(): p.requires_grad = False s = torch.randn(1, 1, 2, requires_grad=True, device=device) out, _ = l(s) out.sum().backward() self.assertFalse(s.grad is None or s.grad.abs().sum().item() == 0) @onlyCUDA def test_lstmcell_backward_only_one_output_grad(self, device): # checks that undefined gradients doen't hamper the backward # see #11872 l = torch.nn.LSTMCell(2, 3).to(device).double() s = torch.randn(1, 2, device=device, dtype=torch.double, requires_grad=True) for i in range(2): out = l(s)[i] out.sum().backward() self.assertFalse(s.grad is None or s.grad.abs().sum().item() == 0) def _test_rnn_mod(self, mod, inp): from functools import partial def flatten_out(mod, inp): out = mod(inp) return tuple([t if isinstance(t, torch.Tensor) else tt for t in out for tt in t]) gradcheckfunc = partial(flatten_out, mod) with torch.backends.cudnn.flags(enabled=False): torch.autograd.gradcheck(gradcheckfunc, inp) torch.autograd.gradgradcheck(gradcheckfunc, inp) def test_LSTM_grad_and_gradgrad(self, device): hsize = 4 inp = torch.rand(1, 3, hsize, device=device, dtype=torch.float64, requires_grad=True) for bias in [True, False]: mod = torch.nn.LSTM(hsize, hsize, bias=bias).to(device).to(torch.float64) self._test_rnn_mod(mod, inp) def test_GRU_grad_and_gradgrad(self, device): hsize = 4 inp = torch.rand(1, 3, hsize, device=device, dtype=torch.float64, requires_grad=True) for bias in [True, False]: mod = torch.nn.GRU(hsize, hsize, bias=bias).to(device).to(torch.float64) self._test_rnn_mod(mod, inp) @deviceCountAtLeast(1) def test_grad_assignment(self, devices): x = torch.randn(5, 5, device=devices[0]) # Tests that the wrong shape raises with self.assertRaises(RuntimeError): x.grad = torch.randn(2, 2, device=devices[0]) # Tests that the wrong dtype raises with self.assertRaises(RuntimeError): x.grad = torch.randn(5, 5, dtype=torch.long, device=devices[0]) # Tests that self-assignment raises with self.assertRaises(RuntimeError): x.grad = x # Tests device -> cpu grad assignment raises if self.device_type != 'cpu': with self.assertRaises(RuntimeError): t_cpu = torch.rand(5, 5) t_cpu.grad = torch.randn(5, 5, device=devices[0]) # Tests half type on CUDA if self.device_type == 'cuda': x = x.to(dtype=torch.half, device=devices[0]) x.grad = torch.zeros_like(x) # Tests cross-device assignment raises if len(devices) > 1: x = torch.randn(5, 5, device=devices[0]) with self.assertRaises(RuntimeError): x.grad = torch.randn(5, 5, device=devices[1]) @deviceCountAtLeast(1) @dtypes(torch.float, torch.double) def test_requires_grad_factory(self, devices, dtype): fns = [torch.ones_like, torch.testing.randn_like] x = torch.randn(2, 3, dtype=dtype, device=devices[0]) for fn in fns: for requires_grad in [True, False]: output = fn(x, dtype=dtype, device=devices[0], requires_grad=requires_grad) self.assertEqual(requires_grad, output.requires_grad) self.assertIs(dtype, output.dtype) self.assertEqual(devices[0], str(x.device)) @deviceCountAtLeast(2) def test_unused_output_device(self, devices): from torch.nn.parallel._functions import Broadcast x = torch.randn(5, 5, dtype=torch.float, device=devices[0], requires_grad=True) outputs = Broadcast.apply(list(range(len(devices))), x) y = outputs[-1] * 2 y.sum().backward() # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095 self.assertEqualIgnoreType(x.grad, torch.ones(5, 5) * 2) @deviceCountAtLeast(2) def test_backward_device(self, devices): # check that current device matches the variable's device device = [None] class Identity(torch.autograd.Function): @staticmethod def forward(ctx, x): return x.clone() @staticmethod def backward(ctx, grad_output): device[0] = grad_output.device return grad_output.clone() v = torch.randn(1, device=devices[1], requires_grad=True) Identity.apply(v).backward() self.assertEqual(str(device[0]), devices[1]) @deviceCountAtLeast(2) def test_inputbuffer_add_multidevice(self, devices): input = torch.randn(1, device=devices[0], requires_grad=True) output = input.to(device=devices[1]) + input.to(device=devices[1]) output.backward() @onlyCPU def test_copy_(self, device): # At the time of writing this test, copy_ is not generated from native_functions.yaml # there was a bug that bfloat16 was not recognized as floating. x = torch.randn(10, device=device, requires_grad=True) floating_dt = [dt for dt in torch.testing.get_all_dtypes() if dt.is_floating_point] for dt in floating_dt: y = torch.empty(10, device=device, dtype=dt) y.copy_(x) self.assertTrue(y.requires_grad) z = x.to(torch.bfloat16) self.assertTrue(z.requires_grad) @onlyCUDA def test_simple_reentrant_cross_device(self, device): class ReentrantFunc(Function): _cpu_mode = True @staticmethod def forward(ctx, x): return x * (x + 2) @staticmethod def backward(ctx, grad_output): with torch.enable_grad(): if ReentrantFunc._cpu_mode: new_param = torch.randn(2, 2, requires_grad=True) (new_param ** 2).sum().backward() else: new_param = torch.randn(2, 2, device=device, requires_grad=True) (new_param ** 2).sum().backward() return grad_output # Reentrant starts on GPU thread, finishs on GPU thread x = torch.randn(2, 2, device=device, requires_grad=True) out = ReentrantFunc.apply(x) out.sum().backward() # Reentrant starts on CPU thread, finishs on GPU thread x = torch.randn(2, 2, requires_grad=True) # set ReentrantFunc node to GPU to emit tasks to GPU queue ReentrantFunc._cpu_mode = False out = ReentrantFunc.apply(x) out.sum().backward() # Reentrant starts on GPU thread, finishs on CPU thread x = torch.randn(2, 2, device=device, requires_grad=True) # set ReentrantFunc node to CPU to emit tasks to CPU queue ReentrantFunc._cpu_mode = True out = ReentrantFunc.apply(x) out.sum().backward() @onlyCUDA def test_cross_device_reentrant_autograd(self, device): # Output on gpu so that this task will be associated with the gpu thread def fn_on_gpu(inp): # Artificially increase the priority of the next op to make sure it runs # as soon as we reach it before the ops of branch1. dummy = inp * 2 * 2 * 2 * 2 return inp.to(device=device) def parent_on_cpu(inp): # Slow branch of ops on gpu so that the work queue for the gpu thread # won't empty too quickly. They also have smaller priorities than the # ones created by fn_on_gpu branch1 = inp.to(device=device) branch1 = branch1 / branch1 branch1 = branch1 / branch1 branch1 = branch1 / branch1 # Perform checkpoint on cpu tensors. So the last op performed in the reentrant # autograd is an AccumulateGrad that runs on the cpu thread for the gpu thread. # So the cpu thread will notify the gpu thread with an empty NodeTask. branch2 = checkpoint(fn_on_gpu, inp) out = branch2 + branch1 return out inp = torch.rand(2, requires_grad=True) out = parent_on_cpu(inp) # This will segfault if the empty NodeTask is not handled properly in the # gpu thread ReadyQueue out.sum().backward() def test_inplace_view_backprop_base(self, device): # modify view and back-prop through base root = torch.randn(2, 2, device=device, requires_grad=True) x = root.clone() v1 = x.narrow(0, 0, 1) v1.mul_(2) x.sum().backward() self.assertEqual(root.grad.tolist(), [[2, 2], [1, 1]]) def test_inplace_view_backprop_view_of_view(self, device): # modify view and backprop through view-of-view root = torch.randn(2, 2, device=device, requires_grad=True) x = root.clone() v1 = x.narrow(0, 0, 1) v2 = x.narrow(0, 0, 1) v1.mul_(2) v2.sum().backward() self.assertEqual(root.grad.tolist(), [[2, 2], [0, 0]]) def test_inplace_view_of_view(self, device): # modify view-of-view and backprop through base root = torch.randn(2, 2, device=device, requires_grad=True) x = root.clone() v1 = x.narrow(0, 0, 1) v2 = v1.narrow(1, 1, 1) v2.mul_(2) x.sum().backward() self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1]]) def test_inplace_view_gradcheck(self, device): # gradcheck modifications to views a = torch.randn(4, 4, device=device, requires_grad=True) b = torch.randn(2, 2, device=device, requires_grad=True) def func(root, b): x = root.clone() x.narrow(1, 2, 2).narrow(0, 1, 2).mul_(b) x.narrow(1, 0, 2).narrow(0, 1, 2).mul_(b) return x gradcheck(func, [a, b], raise_exception=True) go = torch.randn(a.size(), device=device, requires_grad=True) gradgradcheck(func, (a, b), (go,)) def test_inplace_view_multiple_outputs(self, device): root = torch.arange(9.).reshape(3, 3).requires_grad_() x = root.clone() v1 = x.unbind() with self.assertRaises(RuntimeError): v1[0].mul_(2) def test_inplace_view_makes_base_require_grad(self, device): # in-place modification to view makes base require grad a = torch.randn(4, 4, device=device, requires_grad=False) b = torch.randn(4, 2, device=device, requires_grad=True) def func(root, b): x = root.clone() self.assertFalse(x.requires_grad) x.narrow(1, 2, 2).mul_(b) self.assertTrue(x.requires_grad) return x gradcheck(func, [a, b], raise_exception=True) go = torch.randn(a.size(), device=device, requires_grad=True) gradgradcheck(func, (a, b), (go,)) def test_inplace_view_backprop_view(self, device): # modify view and backprop through view a = torch.tensor([2., 5.], device=device, requires_grad=False) b = torch.tensor([3.], device=device, requires_grad=True) res = a.narrow(0, 1, 1).mul_(b) res.sum().backward() self.assertEqual(b.grad.tolist(), [5]) self.assertIsNone(a.grad) def test_inplace_view_modify_base(self, device): # Test that an in-place operation on a base that forced it to require # grad also forces any previous views to require grad and backprop # correctly r = torch.ones(1, device=device, requires_grad=True) def fn(r): x = torch.ones(5, device=device) v = x.select(0, 1) self.assertFalse(v.requires_grad) self.assertIsNone(v.grad_fn) x.add_(r) # v is now dependent on r due to the in-place op on x self.assertTrue(v.requires_grad) return v gradcheck(fn, [r]) gradgradcheck(fn, [r]) def test_inplace_view_python(self, device): # in-place modifications of Python-autograd created view a = torch.randn(4, 4, device=device, requires_grad=True) b = torch.randn(2, 2, device=device, requires_grad=True) class PyAdd(torch.autograd.Function): @staticmethod def forward(ctx, x, y): ctx.mark_dirty(x) x.add_(y) return x @staticmethod def backward(ctx, grad): return grad, grad def func(root, b): x = root.clone() PyAdd.apply(x.narrow(1, 2, 2).narrow(0, 1, 2), b) PyAdd.apply(x.narrow(1, 0, 2).narrow(0, 1, 2), b) return x gradcheck(func, [a, b], raise_exception=True) go = torch.randn(a.size(), device=device, requires_grad=True) gradgradcheck(func, (a, b), (go,)) def test_inplace_view_non_contig(self, device): root = torch.ones(2, 3, 2, device=device).select(2, 1).t().requires_grad_(True) x = root.clone() v1 = x.narrow(0, 0, 1) v2 = v1.narrow(1, 1, 1) v2.mul_(2) x.sum().backward() self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1], [1, 1]]) def test_inplace_view_multi_output_unsafe(self, device): for f in [lambda t: t.unsafe_split(1), lambda t: t.unsafe_split_with_sizes((1, 1, 1)), lambda t: t.unsafe_chunk(3)]: a = torch.randn(3, 3, device=device, requires_grad=True) b = a + a s1, s2, s3 = f(b) s1.mul_(s2) s1.sum().backward() def test_inplace_view_multi_output_safe(self, device): for f in [lambda t: t.split(1), lambda t: t.split_with_sizes((1, 1, 1)), lambda t: t.chunk(3)]: a = torch.randn(3, 3, device=device, requires_grad=True) b = a + a s1, s2, s3 = f(b) with warnings.catch_warnings(record=True) as w: s1.mul_(s2) self.assertIn('Consider using `unsafe_` version', str(w[0].message)) def test_mv_grad_stride_0(self, device): # Reference: https://github.com/pytorch/pytorch/issues/38315 mat = torch.randn(2, 2, device=device) vec = torch.randn(1, device=device).requires_grad_(True) def fn(vec): # Expand inside the function to make sure the input to # gradcheck does not have overlapping memory vec = vec.expand(2) return (mat @ vec).sum() gradcheck(fn, (vec)) gradgradcheck(fn, (vec)) def test_logcumsumexp_large_value(self, device): a = torch.rand(4, 4, 4, dtype=torch.double, requires_grad=True) with torch.no_grad(): # Large Number a[0] = 10000 gradcheck(lambda x: x.logcumsumexp(0), a) gradgradcheck(lambda x: x.logcumsumexp(0), a) gradcheck(lambda x: x.logcumsumexp(1), a) gradgradcheck(lambda x: x.logcumsumexp(1), a) gradcheck(lambda x: x.logcumsumexp(2), a) gradgradcheck(lambda x: x.logcumsumexp(2), a) def test_strided_leaf_grad_layout(self, device): # (1) If leaf is non-overlapping and dense, grad's layout should match its leaf. for fmt_a in (torch.contiguous_format, torch.channels_last): for fmt_b in (torch.contiguous_format, torch.channels_last): a = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_a) b = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_b) a.requires_grad_() b.requires_grad_() # checks (1) for broadcasted gradients a.sum().backward() self.assertEqual(a.grad.stride(), a.stride()) b.sum().backward() self.assertEqual(b.grad.stride(), b.stride()) # checks (1) for non-broadcasted gradients a.grad = None b.grad = None (a * b).sum().backward() self.assertEqual(a.grad.stride(), a.stride()) self.assertEqual(b.grad.stride(), b.stride()) # (2) If leaf isn't dense, checks that grads are rowmajor contiguous. c = torch.empty_strided((2, 2), (4, 2), device=device).copy_(torch.rand((2, 2), device=device)) c.requires_grad_() d = torch.rand((2, 2), device=device) # checks (2) for broadcasted gradients c.sum().backward() self.assertEqual(c.grad.stride(), (2, 1)) # checks (2) for non-broadcasted gradients c.grad = None (c * d).sum().backward() self.assertEqual(c.grad.stride(), (2, 1)) def test_movedim(self, device): x = torch.randn(4, 3, 2, 1, dtype=torch.double, device=device, requires_grad=True) # Positive axis gradcheck(lambda x: torch.movedim(x, (0, 1, 2, 3), (3, 2, 1, 0)), x) gradgradcheck(lambda x: torch.movedim(x, (0, 1, 2, 3), (3, 2, 1, 0)), x) # Negative axis gradcheck(lambda x: torch.movedim(x, (0, -1, -2, -3), (-3, -2, -1, -0)), x) gradgradcheck(lambda x: torch.movedim(x, (0, -1, -2, -3), (-3, -2, -1, -0)), x) def _test_atleast(self, device, torch_fn): # 0-dim s = torch.tensor(0.5, dtype=torch.double, requires_grad=True) gradcheck(lambda x: torch_fn(x), s) gradgradcheck(lambda x: torch_fn(x), s) # 1-dim a = torch.rand(4, dtype=torch.double, requires_grad=True) gradcheck(lambda x: torch_fn(x), a) gradgradcheck(lambda x: torch_fn(x), a) # 2,3,4-dim b = torch.rand(4, 3, dtype=torch.double, requires_grad=True) c = torch.rand(4, 3, 2, dtype=torch.double, requires_grad=True) d = torch.rand(4, 3, 2, 1, dtype=torch.double, requires_grad=True) input_tuple = (s, a, b, c, d) gradcheck(lambda s, w, x, y, z: torch_fn(s, w, x, y, z), input_tuple) gradgradcheck(lambda s, w, x, y, z: torch_fn(s, w, x, y, z), input_tuple) def test_atleast(self, device): self._test_atleast(device, torch.atleast_1d) self._test_atleast(device, torch.atleast_2d) self._test_atleast(device, torch.atleast_3d) class TestMultithreadAutograd(TestCase): def _run_py_multithread_fn(self, fn, args=(), num_threads=10, kwargs=None): threads = [] for _ in range(num_threads): p = threading.Thread(target=fn, args=(args)) p.start() threads.append(p) for p in threads: p.join() def test_simple_backward(self): # simple multithreaded backward that create threads in the beginning of training # and everything else is training separately, i.e. inputs, operations, etc. def train_fn(): x = torch.ones(5, 5, requires_grad=True) y = (x + 3) * (x + 4) * 0.5 y.sum().backward() self.assertEqual(x.grad, x + 3.5) self._run_py_multithread_fn(train_fn) def test_simple_backward_same_input(self): # simple multithreaded backward with only shared inputs (i.e. This is common # for things like Hogwild multithreaded training with multiple CPU threads) def train_fn_backward(x): y = (x + 3) * (x + 4) * 0.5 y.sum().backward() x = torch.ones(5, 5, requires_grad=True) self._run_py_multithread_fn(train_fn_backward, (x,)) # Since we are calling backward from multiple threads # and all threads share the same input, when we do backward # concurrently, different backwards will all accumulate to # the same .grad for each input, and the gradients should # be equal to num_threads * gradient self.assertEqual(x.grad, 10 * (x + 3.5)) def train_fn_grad(x): y = (x + 3) * (x + 4) * 0.5 grads = torch.autograd.grad(y.sum(), x) self.assertEqual(len(grads), 1) self.assertEqual(grads[0], x + 3.5) # since we use functional grad() api, gradients will not # be accumulate to the same place and should be the same self._run_py_multithread_fn(train_fn_grad, (x,)) def test_python_thread_in_middle(self): # User might write a network that starts on one CPU thread, then runs its second half # concurrently with other threads (either via python threading or fork/join calls), # then calls backward()/grad() on BOTH threads, like a Y pattern from input at the # bottom to output at the top. This way part of the GraphTask is being shared across # different threads and we need to ensure user specify retain_graph=True, otherwise # error out with the correct error message # Case 1: multiple backward with python threads, retain_graph=False # should throw error in some threads with no retain_graph. success_vs_raises = [0, 0] def train_fn_no_retain_graph(x): y = x + x ** 2 try: y.sum().backward() success_vs_raises[0] += 1 except RuntimeError as error: success_vs_raises[1] += 1 self.assertRegex(str(error), "Specify retain_graph=True") x_no_retain = torch.ones(5, 5, requires_grad=True) y_no_retain = x_no_retain + x_no_retain ** 2 self._run_py_multithread_fn(train_fn_no_retain_graph, (y_no_retain,), num_threads=5) # at least one thread will be success in this case, all other threads should raise # with the error that throw to user to recommend them specify retain_graph=True self.assertTrue(success_vs_raises[0] >= 1) # multiple backward with python threads, no error with retain_graph=True def train_fn_retain_graph(x): y = x + x ** 2 y.sum().backward(retain_graph=True) x_retain = torch.ones(5, 5, requires_grad=True) y_retain = x_retain + x_retain ** 2 self._run_py_multithread_fn(train_fn_retain_graph, (y_retain,), num_threads=5) # result should equal to num_thread * gradients self.assertEqual(x_retain.grad, 5 * (4 * x_retain ** 3 + 6 * (x_retain ** 2) + 4 * x_retain + 1)) def test_fork_join_in_middle(self): # multiple backward with jit threads (fork/join primitive) # similar to test_python_thread_in_middle, we test with retain_graph=False/True # Case 1: multiple grad() calls with jit threads, retain_graph=False # should throw error in some threads with no retain_graph. @torch.jit.script def train_fn_jit_no_retain(middle, orig_x): y = middle + middle ** 2 return torch.autograd.grad([y.sum()], [orig_x]) @torch.jit.script def train_fn_fork_join_calls_no_retain(x): y_no_retain = (x + 3) * (x + 4) * 0.5 fut = torch.jit._fork(train_fn_jit_no_retain, y_no_retain, x) grad_hat = train_fn_jit_no_retain(y_no_retain, x) grad = torch.jit._wait(fut) return grad, grad_hat try: train_fn_fork_join_calls_no_retain(torch.randn(5, 5, requires_grad=True)) except RuntimeError as error: self.assertRegex(str(error), "Specify retain_graph=True") # Case 2: no error with retain_graph=True @torch.jit.script def train_fn_jit_retain(middle, orig_x): y = middle + middle ** 2 return torch.autograd.grad([y.sum()], [orig_x], retain_graph=True) @torch.jit.script def train_fn_fork_join_calls_retain(x): y_retain = (x + 3) * (x + 4) * 0.5 fut1 = torch.jit._fork(train_fn_jit_retain, y_retain, x) fut2 = torch.jit._fork(train_fn_jit_retain, y_retain, x) grad = train_fn_jit_retain(y_retain, x) grad1 = torch.jit._wait(fut1) grad2 = torch.jit._wait(fut2) return grad, grad1, grad2 grad, grad1, grad2 = train_fn_fork_join_calls_retain(torch.randn(5, 5, requires_grad=True)) self.assertEqual(grad, grad1) self.assertEqual(grad, grad2) def test_preserve_backtrace(self): class Foo(torch.autograd.Function): @staticmethod def forward(ctx, input): return input @staticmethod def backward(ctx, *grad): raise ValueError("something") t = torch.rand(10, requires_grad=True) try: Foo.apply(t).sum().backward() except Exception: import traceback tb = sys.exc_info()[2] tb_str = "\n".join(traceback.format_tb(tb)) self.assertTrue('raise ValueError("something")' in tb_str) for test in method_tests(): add_test(*test) # e.g., TestAutogradDeviceTypeCPU and TestAutogradDeviceTypeCUDA instantiate_device_type_tests( TestAutogradDeviceType, globals(), # Exclude ROCM for now, there are a lot of failures. See # https://github.com/pytorch/pytorch/issues/30845 except_for='cuda' if TEST_WITH_ROCM else None ) if __name__ == '__main__': run_tests()
import torch import os import numpy as np import math from einops import rearrange import logging logger = logging.getLogger(__name__) import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt _GCONST_ = -0.9189385332046727 # ln(sqrt(2*pi)) def get_logp_z(z): # import pdb; pdb.set_trace() C = 2 logp = C * _GCONST_ - 0.5*torch.sum(z**2, 1) # logp = - C * 0.5 * math.log(math.pi * 2) - 0.5*torch.sum(z**2, 1) + logdet_J return logp def positionalencoding2d(D, H, W): """ :param D: dimension of the model :param H: H of the positions :param W: W of the positions :return: DxHxW position matrix """ if D % 4 != 0: raise ValueError("Cannot use sin/cos positional encoding with odd dimension (got dim={:d})".format(D)) P = torch.zeros(D, H, W) # Each dimension use half of D D = D // 2 div_term = torch.exp(torch.arange(0.0, D, 2) * -(math.log(1e4) / D)) pos_w = torch.arange(0.0, W).unsqueeze(1) pos_h = torch.arange(0.0, H).unsqueeze(1) P[0:D:2, :, :] = torch.sin(pos_w * div_term).transpose(0, 1).unsqueeze(1).repeat(1, H, 1) P[1:D:2, :, :] = torch.cos(pos_w * div_term).transpose(0, 1).unsqueeze(1).repeat(1, H, 1) P[D::2, :, :] = torch.sin(pos_h * div_term).transpose(0, 1).unsqueeze(2).repeat(1, 1, W) P[D+1::2,:, :] = torch.cos(pos_h * div_term).transpose(0, 1).unsqueeze(2).repeat(1, 1, W) return P @torch.no_grad() def plot(batch_id, model, potential_or_sampling_fn, args): model.eval() n_pts = args.plot_resolution range_lim = 4 # construct test points test_grid = setup_grid(range_lim, n_pts, args) # plot if args.density_matching: if args.flow == "boosted": #plt_height = max(1, int(np.ceil(np.sqrt(args.num_components + 2)))) #plt_width = max(2, int(np.ceil((args.num_components + 2) / plt_height))) plt_width = 2 plt_height = 1 fig, axs = plt.subplots(plt_height, plt_width, figsize=(12,12), subplot_kw={'aspect': 'equal'}, squeeze=False) plot_potential(potential_or_sampling_fn, axs[0, 0], test_grid, n_pts) plot_flow_samples(model, axs[0, 1], n_pts, args.batch_size, args) #plot_boosted_inv_flow_density(model, axs, test_grid, n_pts, args.batch_size, args, plt_height, plt_width) else: fig, axs = plt.subplots(1, 3, figsize=(16,8), subplot_kw={'aspect': 'equal'}, squeeze=False) plot_potential(potential_or_sampling_fn, axs[0, 0], test_grid, n_pts) plot_flow_samples(model, axs[0, 1], n_pts, args.batch_size, args) plot_inv_flow_density(model, axs[0, 2], test_grid, n_pts, args.batch_size, args) else: if args.flow == "boosted": plt_width = max(2, int(np.ceil(np.sqrt(args.num_components)))) plt_height = max(2, int(np.ceil(np.sqrt(args.num_components))) + 1) #plt_height = max(1, int(np.ceil(np.sqrt(args.num_components + 2)))) #plt_width = max(1, int(np.ceil((args.num_components + 2) / plt_height))) fig, axs = plt.subplots(plt_height, plt_width, figsize=(12,12), subplot_kw={'aspect': 'equal'}, squeeze=False) plot_samples(potential_or_sampling_fn, axs[0,0], range_lim, n_pts) total_prob = plot_boosted_fwd_flow_density(model, axs, test_grid, n_pts, args.batch_size, args) else: fig, axs = plt.subplots(1, 2, figsize=(12,12), subplot_kw={'aspect': 'equal'}) plot_samples(potential_or_sampling_fn, axs[0], range_lim, n_pts) plot_fwd_flow_density(model, axs[1], test_grid, n_pts, args.batch_size, args) # format for ax in plt.gcf().axes: format_ax(ax, range_lim) #plt.tight_layout(rect=[0, 0, 1.0, 0.95]) plt.tight_layout() title = f'{args.dataset.title()}: {args.flow.title()} Flow, cb={args.coupling_blocks}' title += f', C={args.num_components}' if args.flow == "boosted" else '' title += f', Reg={args.regularization_rate:.2f}' if args.flow == "boosted" and args.density_matching else '' annealing_type = f', Annealed' if args.min_beta < 1.0 else ', No Annealing' title += annealing_type if args.density_matching else '' # annealing isn't done for density sampling fig.suptitle(title, y=0.98, fontsize=20) fig.subplots_adjust(top=0.85) # save fname = f'{args.dataset}_{args.flow}_cb{args.coupling_blocks}_bs{args.batch_size}' fname += f'_C{args.num_components}_reg{int(100*args.regularization_rate):d}_{args.component_type}' if args.flow == 'boosted' else '' fname += f'_{args.coupling_network}{args.coupling_network_depth}_hsize{args.h_size}' if args.flow == 'realnvp' else '' fname += f'_hidden{args.coupling_network_depth}_hsize{args.h_size}' if args.flow == 'iaf' else '' fname += '_annealed' if args.min_beta < 1.0 else '' fname += '_lr_scheduling' if not args.no_lr_schedule else '' plt.savefig(os.path.join(args.snap_dir, fname + f'_step{batch_id:07d}.png')) plt.close() # plot densities using gaussian interpolation if args.density_matching: if args.flow == "boosted": plot_boosted_inv_flow(model, batch_id, 1000, args.batch_size, args) else: plot_inv_flow(model, batch_id, 1000, args.batch_size, args) # PLOT THE FINAL RESULT IF THIS IS THE LAST BATCH if batch_id == args.num_steps: fig, axs = plt.subplots(1, 2, figsize=(12,12), subplot_kw={'aspect': 'equal'}) if args.density_matching: plot_potential(potential_or_sampling_fn, axs[0], test_grid, n_pts) plot_flow_samples(model, axs[1], n_pts, args.batch_size, args) else: plot_samples(potential_or_sampling_fn, axs[0], range_lim, n_pts) if args.flow == "boosted": xx, yy, zz = test_grid axs[1].pcolormesh(xx, yy, total_prob, cmap=plt.cm.viridis) axs[1].set_facecolor(plt.cm.viridis(0.)) axs[1].set_title('Boosted Density - All Components', fontdict={'fontsize': 20}) else: plot_fwd_flow_density(model, axs[1], test_grid, n_pts, args.batch_size, args) for ax in plt.gcf().axes: format_ax(ax, range_lim) #plt.tight_layout(rect=[0, 0, 1.0, 0.95]) plt.tight_layout() title = f'{args.dataset.title()}: {args.flow.title()} Flow, cb={args.coupling_blocks}' title += f', Annealed' if args.min_beta < 1.0 else ', No Annealing' title += f', C={args.num_components}, Reg={args.regularization_rate:.2f}' if args.flow == "boosted" else '' fig.suptitle(title, y=0.98, fontsize=20) fig.subplots_adjust(top=0.85) # too much? plt.savefig(os.path.join(args.snap_dir, fname + '.png')) plt.close() def setup_grid(range_lim, n_pts, args): x = torch.linspace(-range_lim, range_lim, n_pts) xx, yy = torch.meshgrid((x, x)) zz = torch.stack((xx.flatten(), yy.flatten()), dim=1) return xx, yy, zz.to(args.device) def format_ax(ax, range_lim): ax.set_xlim(-range_lim, range_lim) ax.set_ylim(-range_lim, range_lim) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax.invert_yaxis() def plot_potential(potential_fn, ax, test_grid, n_pts): xx, yy, zz = test_grid ax.pcolormesh(xx, yy, torch.exp(-1.0 * potential_fn(zz)).view(n_pts, n_pts).cpu().data, cmap=plt.cm.viridis) ax.set_title('Target Density', fontdict={'fontsize': 20}) def plot_samples(samples_fn, ax, range_lim, n_pts): samples = samples_fn(n_pts**2).numpy() ax.hist2d(samples[:,0], samples[:,1], range=[[-range_lim, range_lim], [-range_lim, range_lim]], bins=n_pts, cmap=plt.cm.viridis) ax.set_title('Target Samples', fontdict={'fontsize': 20}) def plot_flow_samples(model, ax, n_pts, batch_size, args): z = model.base_dist.sample((n_pts**2,)) if args.flow == "boosted": caption = f" - All Components" zk = torch.cat([model.flow(z_, sample_from="1:c")[0][-1] for z_ in z.split(batch_size, dim=0)], 0) else: caption = f"" zk = torch.cat([model.flow(z_)[0] for z_ in z.split(batch_size, dim=0)], 0) zk = torch.clamp(zk, min=-25.0, max=25.0) zk = zk.cpu().numpy() # plot ax.hist2d(zk[:,0], zk[:,1], bins=n_pts, cmap=plt.cm.viridis) ax.set_facecolor(plt.cm.viridis(0.)) ax.set_title('Flow Samples' + caption, fontdict={'fontsize': 20}) def plot_fwd_flow_density(model, ax, test_grid, n_pts, batch_size, args): """ plots square grid and flow density; where density under the flow is exp(log_flow_base_dist + logdet) """ xx, yy, zz = test_grid # compute posterior approx density zzk, logdet = [], [] B = batch_size H=1; W=1 P = args.condition_vec pos = positionalencoding2d(P, H, W).to(args.device).unsqueeze(0).repeat(B, 1, 1, 1) c_r = rearrange(pos, 'b c h w -> (b h w) c') for zz_i in zz.split(batch_size, dim=0): #zzk_i, logdet_i = model.flow(zz_i) zzk_i, logdet_i = model(zz_i, [c_r,]) zzk += [zzk_i] logdet += [logdet_i] zzk, logdet = torch.cat(zzk, 0), torch.cat(logdet, 0) q_log_prob = get_logp_z(zzk) / 2 log_prob = q_log_prob + logdet prob = log_prob.exp().cpu() # plot ax.pcolormesh(xx, yy, prob.view(n_pts,n_pts).data, cmap=plt.cm.viridis) ax.set_facecolor(plt.cm.viridis(0.)) ax.set_title('Flow Density', fontdict={'fontsize': 20}) def plot_boosted_fwd_flow_density(model, axs, test_grid, n_pts, batch_size, args, batch_id=None): """ plots square grid and flow density; where density under the flow is exp(log_flow_base_dist + logdet) """ xx, yy, zz = test_grid num_fixed_plots = 2 # every image will show the true samples and the density for the full model #plt_height = max(1, int(np.ceil(np.sqrt(args.num_components + num_fixed_plots)))) #plt_width = max(1, int(np.ceil((args.num_components + num_fixed_plots) / plt_height))) plt_width = max(2, int(np.ceil(np.sqrt(args.num_components)))) plt_height = max(2, int(np.ceil(np.sqrt(args.num_components))) + 1) total_prob = torch.zeros(n_pts, n_pts) num_components_to_plot = max(1, args.num_components if model.all_trained else model.component + 1) for c in range(num_components_to_plot): if model.rho[c] == 0.0: continue #row = int(np.floor((c + num_fixed_plots) / plt_width)) #col = int((c + num_fixed_plots) % plt_width) row = int(1 + np.floor(c / plt_width)) col = int(c % plt_width) zzk, logdet = [], [] for zz_i in zz.split(batch_size, dim=0): ZZ_i, _, _, logdet_i, _ = model(x=zz_i, components=c) zzk += [ZZ_i] logdet += [logdet_i] zzk, logdet = torch.cat(zzk, 0), torch.cat(logdet, 0) q_log_prob = model.base_dist.log_prob(zzk).sum(1) log_prob = q_log_prob + logdet prob = log_prob.exp().cpu().view(n_pts,n_pts).data # plot component c axs[row,col].pcolormesh(xx, yy, prob, cmap=plt.cm.viridis) axs[row,col].set_facecolor(plt.cm.viridis(0.)) axs[row,col].set_title(f'c={c}', fontdict={'fontsize': 20}) # save total model probs total_prob += log_prob.cpu().view(n_pts, n_pts).data * model.rho[c] # plot full model total_prob = torch.exp(total_prob / torch.sum(model.rho[0:num_components_to_plot])) axs[0,1].pcolormesh(xx, yy, total_prob, cmap=plt.cm.viridis) axs[0,1].set_facecolor(plt.cm.viridis(0.)) axs[0,1].set_title('GBF - All Components', fontdict={'fontsize': 20}) return total_prob def plot_inv_flow_density(model, ax, test_grid, n_pts, batch_size, args): """ plots transformed grid and density; where density is exp(loq_flow_base_dist - logdet) """ xx, yy, zz = test_grid # compute posterior approx density zzk, logdet = [], [] for zz_i in zz.split(batch_size, dim=0): zzk_i, logdet_i = model.flow(zz_i) zzk += [zzk_i] logdet += [logdet_i] zzk, logdet = torch.cat(zzk, 0), torch.cat(logdet, 0) log_q0 = model.base_dist.log_prob(zz).sum(1) log_qk = log_q0 - logdet qk = log_qk.exp().cpu() zzk = zzk.cpu() # plot ax.pcolormesh(zzk[:,0].view(n_pts,n_pts).data, zzk[:,1].view(n_pts,n_pts).data, qk.view(n_pts,n_pts).data, cmap=plt.cm.viridis) ax.set_facecolor(plt.cm.viridis(0.0)) ax.set_title('Flow Density', fontdict={'fontsize': 20}) def plot_inv_flow(model, batch_id, n_pts, batch_size, args): fname = f'{args.dataset}_{args.flow}_cb{args.coupling_blocks}_bs{args.batch_size}' fname += f'_{args.coupling_network}{args.coupling_network_depth}_hsize{args.h_size}' if args.component_type == 'realnvp' or args.flow == 'realnvp' else '' fname += f'_hidden{args.coupling_network_depth}_hsize{args.h_size}' if args.flow == 'iaf' else '' fname += '_annealed' if args.min_beta < 1.0 else '' fname += '_lr_scheduling' if not args.no_lr_schedule else '' Z = np.hstack([model.flow(torch.randn(n_pts, 2).to(args.device) * model.base_dist_var + model.base_dist_mean)[0].t().cpu().data.numpy() for _ in range(n_pts)]) H, _, _ = np.histogram2d(Z[0], Z[1], bins=(np.arange(-4, 4, 0.05), np.arange(-4, 4, 0.05))) plt.figure(figsize=(12, 12)) plt.imshow(H.T, interpolation='gaussian') plt.axis('off') plt.subplots_adjust(left=0, right=1, bottom=0, top=1) plt.savefig(os.path.join(args.snap_dir, f'final_{fname}_step{batch_id:07d}.png')) def plot_boosted_inv_flow_density(model, axs, test_grid, n_pts, batch_size, args, plt_height, plt_width): """ plots transformed grid and density; where density is exp(loq_flow_base_dist - logdet) """ xx, yy, zz = test_grid num_fixed_plots = 2 # every image will show the true density and samples from the full model num_components_to_plot = args.num_components if model.all_trained else model.component + 1 for c in range(num_components_to_plot): if model.rho[c] == 0.0: continue row = int(np.floor((c + num_fixed_plots) / plt_width)) col = int((c + num_fixed_plots) % plt_width) zzk, logdet = [], [] for zz_i in zz.split(batch_size, dim=0): ZZ_i, logdet_i = model.component_forward_flow(zz_i, c) zzk += [ZZ_i[-1]] # grab K-th element logdet += [logdet_i] zzk, logdet = torch.cat(zzk, 0), torch.cat(logdet, 0) log_q0 = model.base_dist.log_prob(zz).sum(1) log_qk = log_q0 - logdet qk = log_qk.exp().cpu() zzk = zzk.cpu() # plot component c axs[row,col].pcolormesh(zzk[:,0].view(n_pts,n_pts).data, zzk[:,1].view(n_pts,n_pts).data, qk.view(n_pts,n_pts).data, cmap=plt.cm.viridis) axs[row,col].set_facecolor(plt.cm.viridis(0.0)) axs[row,col].set_title(f'Boosted Flow Density for c={c}', fontdict={'fontsize': 20}) def plot_boosted_inv_flow(model, batch_id, n_pts, batch_size, args): """ plots transformed grid and density; where density is a gaussian interpolation of the model's samples """ fname = f'{args.dataset}_{args.flow}_K{args.num_flows}_bs{args.batch_size}' fname += f'_C{args.num_components}_reg{int(100*args.regularization_rate):d}_{args.component_type}' fname += f'_{args.coupling_network}{args.coupling_network_depth}_hsize{args.h_size}' if args.component_type == 'realnvp' or args.flow == 'realnvp' else '' fname += '_annealed' if args.min_beta < 1.0 else '' fname += '_lr_scheduling' if not args.no_lr_schedule else '' Z = [] num_components_to_plot = args.num_components if model.all_trained else model.component + 1 for c in range(num_components_to_plot): zc = np.hstack([model.component_forward_flow( torch.randn(n_pts, 2).to(args.device) * model.base_dist_var + model.base_dist_mean, c)[0][-1].t().cpu().data.numpy() for _ in range(n_pts)]) num_sampled = int(np.ceil(( model.rho[c] / model.rho.sum() ) * n_pts * n_pts)) Z.append(zc[:, 0:num_sampled]) # plot component c Hc, _, _ = np.histogram2d(zc[0], zc[1], bins=(np.arange(-4, 4, 0.05), np.arange(-4, 4, 0.05))) plt.figure(figsize=(12, 12)) plt.imshow(Hc.T, interpolation='gaussian') plt.axis('off') plt.subplots_adjust(left=0, right=1, bottom=0, top=1) plt.savefig(os.path.join(args.snap_dir, f'{c}_{fname}_step{batch_id:07d}.png')) if model.component == 0 and not model.all_trained: # don't bother plotting components that haven't been trained at all break # plot full model Z = np.hstack(Z) H, _, _ = np.histogram2d(Z[0], Z[1], bins=(np.arange(-4, 4, 0.05), np.arange(-4, 4, 0.05))) plt.figure(figsize=(12, 12)) plt.imshow(H.T, interpolation='gaussian') plt.axis('off') plt.subplots_adjust(left=0, right=1, bottom=0, top=1) plt.savefig(os.path.join(args.snap_dir, f'final_{fname}_step{batch_id:07d}.png')) def plot_q0_density(model, ax, test_grid, n_pts, batch_size, args): """ Plot the base distribution (some type of standard gaussian) """ xx, yy, zz = test_grid log_q0 = model.base_dist.log_prob(zz).sum(1) q0 = log_q0.exp().cpu() # plot ax.pcolormesh(zz[:,0].view(n_pts,n_pts).data, zz[:,1].view(n_pts,n_pts).data, q0.view(n_pts,n_pts).data, cmap=plt.cm.viridis) ax.set_facecolor(plt.cm.viridis(0.)) ax.set_title('Base q_0 Density', fontdict={'fontsize': 20})
/* * Copyright 2018 - 2021 Swiss Federal Institute of Technology Lausanne (EPFL) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * This open source software code was developed in part or in whole in the * Human Brain Project, funded from the European Union's Horizon 2020 * Framework Programme for Research and Innovation under * Specific Grant Agreements No. 720270, No. 785907, and No. 945539 * (Human Brain Project SGA1, SGA2 and SGA3). * */ import React from "react"; import ReactDOM from "react-dom"; import { Provider } from "react-redux"; import { ConnectedRouter } from "connected-react-router"; import { store, history } from "./store"; import App from "./containers/App/App"; import "./services/IconsImport"; import "normalize.css/normalize.css"; import "bootstrap/dist/css/bootstrap.min.css"; import "bootstrap/dist/js/bootstrap.min.js"; import "./index.css"; import "./ie.css"; import ReactPiwik from "react-piwik"; import * as Sentry from "@sentry/browser"; new ReactPiwik({ url: process.env.REACT_APP_MATOMO_URL, siteId: process.env.REACT_APP_MATOMO_SITE_ID, trackErrors: true }); Sentry.init({ dsn: process.env.REACT_APP_SENTRY_URL, environment: window.location.host }); ReactPiwik.push(["trackPageView"]); ReactDOM.render( <Provider store={store}> <ConnectedRouter history={history}> <App /> </ConnectedRouter> </Provider>, document.getElementById("root") );
from .openapi import OpenAPI # these imports appear unused, but in fact load up the subclasses ObjectBase so # that they may be referenced throughout the schema without issue from . import info, servers, paths, general, schemas, components, security, tag __all__ = ['OpenAPI']
import * as React from "react" import Layout from "../components/layout" import Seo from "../components/Seo/Seo" const NotFoundPage = () => ( <Layout> <Seo title="404: Not found" /> <h1>404: Not Found</h1> <p>You just hit a route that doesn&#39;t exist... the sadness.</p> </Layout> ) export default NotFoundPage
export const SCOPES = { MANAGEMENT: 'https://management.azure.com/user_impersonation', STORAGE: 'https://storage.azure.com/user_impersonation' }
({ mainConfigFile: 'main.js', name: "main", out: "main-built.js" })
/** * Copyright JS Foundation and other contributors, http://js.foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. **/ module.exports = function(RED) { "use strict"; var fs = require("fs-extra"); var os = require("os"); var path = require("path"); function FileNode(n) { RED.nodes.createNode(this,n); this.filename = n.filename; this.appendNewline = n.appendNewline; this.overwriteFile = n.overwriteFile.toString(); this.createDir = n.createDir || false; var node = this; node.wstream = null; node.msgQueue = []; node.closing = false; node.closeCallback = null; function processMsg(msg, done) { var filename = node.filename || msg.filename || ""; if ((!node.filename) && (!node.tout)) { node.tout = setTimeout(function() { node.status({fill:"grey",shape:"dot",text:filename}); clearTimeout(node.tout); node.tout = null; },333); } if (filename === "") { node.warn(RED._("file.errors.nofilename")); done(); } else if (node.overwriteFile === "delete") { fs.unlink(filename, function (err) { if (err) { node.error(RED._("file.errors.deletefail",{error:err.toString()}),msg); } else { if (RED.settings.verbose) { node.log(RED._("file.status.deletedfile",{file:filename})); } node.send(msg); } done(); }); } else if (msg.hasOwnProperty("payload") && (typeof msg.payload !== "undefined")) { var dir = path.dirname(filename); if (node.createDir) { try { fs.ensureDirSync(dir); } catch(err) { node.error(RED._("file.errors.createfail",{error:err.toString()}),msg); done(); return; } } var data = msg.payload; if ((typeof data === "object") && (!Buffer.isBuffer(data))) { data = JSON.stringify(data); } if (typeof data === "boolean") { data = data.toString(); } if (typeof data === "number") { data = data.toString(); } if ((node.appendNewline) && (!Buffer.isBuffer(data))) { data += os.EOL; } if (node.overwriteFile === "true") { var wstream = fs.createWriteStream(filename, { encoding:'binary', flags:'w', autoClose:true }); node.wstream = wstream; wstream.on("error", function(err) { node.error(RED._("file.errors.writefail",{error:err.toString()}),msg); done(); }); wstream.on("open", function() { wstream.end(data, function() { node.send(msg); done(); }); }) return; } else { // Append mode var recreateStream = !node.wstream || !node.filename; if (node.wstream && node.wstreamIno) { // There is already a stream open and we have the inode // of the file. Check the file hasn't been deleted // or deleted and recreated. try { var stat = fs.statSync(filename); // File exists - check the inode matches if (stat.ino !== node.wstreamIno) { // The file has been recreated. Close the current // stream and recreate it recreateStream = true; node.wstream.end(); delete node.wstream; delete node.wstreamIno; } } catch(err) { // File does not exist recreateStream = true; node.wstream.end(); delete node.wstream; delete node.wstreamIno; } } if (recreateStream) { node.wstream = fs.createWriteStream(filename, { encoding:'binary', flags:'a', autoClose:true }); node.wstream.on("open", function(fd) { try { var stat = fs.statSync(filename); node.wstreamIno = stat.ino; } catch(err) { } }); node.wstream.on("error", function(err) { node.error(RED._("file.errors.appendfail",{error:err.toString()}),msg); done(); }); } if (node.filename) { // Static filename - write and reuse the stream next time node.wstream.write(data, function() { node.send(msg); done(); }); } else { // Dynamic filename - write and close the stream node.wstream.end(data, function() { node.send(msg); delete node.wstream; delete node.wstreamIno; done(); }); } } } else { done(); } } function processQ(queue) { var msg = queue[0]; processMsg(msg, function() { queue.shift(); if (queue.length > 0) { processQ(queue); } else if (node.closing) { closeNode(); } }); } this.on("input", function(msg) { var msgQueue = node.msgQueue; if (msgQueue.push(msg) > 1) { // pending write exists return; } try { processQ(msgQueue); } catch (e) { node.msgQueue = []; if (node.closing) { closeNode(); } throw e; } }); function closeNode() { if (node.wstream) { node.wstream.end(); } if (node.tout) { clearTimeout(node.tout); } node.status({}); var cb = node.closeCallback; node.closeCallback = null; node.closing = false; if (cb) { cb(); } } this.on('close', function(done) { if (node.closing) { // already closing return; } node.closing = true; if (done) { node.closeCallback = done; } if (node.msgQueue.length > 0) { // close after queue processed return; } else { closeNode(); } }); } RED.nodes.registerType("file",FileNode); function FileInNode(n) { RED.nodes.createNode(this,n); this.filename = n.filename; this.format = n.format; this.chunk = false; if (n.sendError === undefined) { this.sendError = true; } else { this.sendError = n.sendError; } if (this.format === "lines") { this.chunk = true; } if (this.format === "stream") { this.chunk = true; } var node = this; this.on("input",function(msg) { var filename = node.filename || msg.filename || ""; if (!node.filename) { node.status({fill:"grey",shape:"dot",text:filename}); } if (filename === "") { node.warn(RED._("file.errors.nofilename")); } else { msg.filename = filename; var lines = Buffer.from([]); var spare = ""; var count = 0; var type = "buffer"; var ch = ""; if (node.format === "lines") { ch = "\n"; type = "string"; } var hwm; var getout = false; var rs = fs.createReadStream(filename) .on('readable', function () { var chunk; var hwm = rs._readableState.highWaterMark; while (null !== (chunk = rs.read())) { if (node.chunk === true) { getout = true; if (node.format === "lines") { spare += chunk.toString(); var bits = spare.split("\n"); for (var i=0; i < bits.length - 1; i++) { var m = { payload:bits[i], topic:msg.topic, filename:msg.filename, parts:{index:count, ch:ch, type:type, id:msg._msgid} } count += 1; node.send(m); } spare = bits[i]; } if (node.format === "stream") { var m = { payload:chunk, topic:msg.topic, filename:msg.filename, parts:{index:count, ch:ch, type:type, id:msg._msgid} } count += 1; if (chunk.length < hwm) { // last chunk is smaller that high water mark = eof getout = false; m.parts.count = count; } node.send(m); } } else { lines = Buffer.concat([lines,chunk]); } } }) .on('error', function(err) { node.error(err, msg); if (node.sendError) { var sendMessage = RED.util.cloneMessage(msg); delete sendMessage.payload; sendMessage.error = err; node.send(sendMessage); } }) .on('end', function() { if (node.chunk === false) { if (node.format === "utf8") { msg.payload = lines.toString(); } else { msg.payload = lines; } node.send(msg); } else if (node.format === "lines") { var m = { payload: spare, parts: { index: count, count: count+1, ch: ch, type: type, id: msg._msgid } }; node.send(m); } else if (getout) { // last chunk same size as high water mark - have to send empty extra packet. var m = { parts:{index:count, count:count, ch:ch, type:type, id:msg._msgid} }; node.send(m); } }); } }); this.on('close', function() { node.status({}); }); } RED.nodes.registerType("file in",FileInNode); }
'use strict'; var REQUEST = require('../lib/request'); var util = require('./util'); // Bucket 相关 /** * 获取用户的 bucket 列表 * @param {Object} params 回调函数,必须,下面为参数列表 * 无特殊参数 * @param {Function} callback 回调函数,必须 */ function getService(params, callback) { if (typeof params === 'function') { callback = params; params = {}; } var protocol = 'https:'; var domain = this.options.ServiceDomain; var appId = params.AppId || this.options.appId; var region = params.Region; if (domain) { domain = domain.replace(/\{\{AppId\}\}/ig, appId || '') .replace(/\{\{Region\}\}/ig, region || '').replace(/\{\{.*?\}\}/ig, ''); if (!/^[a-zA-Z]+:\/\//.test(domain)) { domain = protocol + '//' + domain; } if (domain.slice(-1) === '/') { domain = domain.slice(0, -1); } } else if(region){ domain = protocol + '//cos.'+ region + '.myqcloud.com'; } else { domain = protocol + '//service.cos.myqcloud.com'; } submitRequest.call(this, { Action: 'name/cos:GetService', url: domain + '/', method: 'GET', }, function (err, data) { if (err) { return callback(err); } var buckets = (data && data.ListAllMyBucketsResult && data.ListAllMyBucketsResult.Buckets && data.ListAllMyBucketsResult.Buckets.Bucket) || []; buckets = util.isArray(buckets) ? buckets : [buckets]; var owner = (data && data.ListAllMyBucketsResult && data.ListAllMyBucketsResult.Owner) || {}; callback(null, { Buckets: buckets, Owner: owner, statusCode: data.statusCode, headers: data.headers, }); }); } /** * 查看是否存在该Bucket,是否有权限访问 * @param {Object} params 参数对象,必须 * @param {String} params.Bucket Bucket名称,必须 * @param {String} params.Region 地域名称,必须 * @param {Function} callback 回调函数,必须 * @return {Object} err 请求失败的错误,如果请求成功,则为空。https://cloud.tencent.com/document/product/436/7730 * @return {Object} data 返回的数据 * @return {Boolean} data.BucketExist Bucket是否存在 * @return {Boolean} data.BucketAuth 是否有 Bucket 的访问权限 */ function headBucket(params, callback) { submitRequest.call(this, { Action: 'name/cos:HeadBucket', Bucket: params.Bucket, Region: params.Region, headers: params.Headers, method: 'HEAD', }, function (err, data) { callback(err, data); }); } /** * 获取 Bucket 下的 object 列表 * @param {Object} params 参数对象,必须 * @param {String} params.Bucket Bucket名称,必须 * @param {String} params.Region 地域名称,必须 * @param {String} params.Prefix 前缀匹配,用来规定返回的文件前缀地址,非必须 * @param {String} params.Delimiter 定界符为一个符号,如果有Prefix,则将Prefix到delimiter之间的相同路径归为一类,非必须 * @param {String} params.Marker 默认以UTF-8二进制顺序列出条目,所有列出条目从marker开始,非必须 * @param {String} params.MaxKeys 单次返回最大的条目数量,默认1000,非必须 * @param {String} params.EncodingType 规定返回值的编码方式,非必须 * @param {Function} callback 回调函数,必须 * @return {Object} err 请求失败的错误,如果请求成功,则为空。https://cloud.tencent.com/document/product/436/7730 * @return {Object} data 返回的数据 * @return {Object} data.ListBucketResult 返回的 object 列表信息 */ function getBucket(params, callback) { var reqParams = {}; reqParams['prefix'] = params['Prefix'] || ''; reqParams['delimiter'] = params['Delimiter']; reqParams['marker'] = params['Marker']; reqParams['max-keys'] = params['MaxKeys']; reqParams['encoding-type'] = params['EncodingType']; submitRequest.call(this, { Action: 'name/cos:GetBucket', ResourceKey: reqParams['prefix'], method: 'GET', Bucket: params.Bucket, Region: params.Region, headers: params.Headers, qs: reqParams, }, function (err, data) { if (err) { return callback(err); } var ListBucketResult = data.ListBucketResult || {}; var Contents = ListBucketResult.Contents || []; var CommonPrefixes = ListBucketResult.CommonPrefixes || []; Contents = util.isArray(Contents) ? Contents : [Contents]; CommonPrefixes = util.isArray(CommonPrefixes) ? CommonPrefixes : [CommonPrefixes]; var result = util.clone(ListBucketResult); util.extend(result, { Contents: Contents, CommonPrefixes: CommonPrefixes, statusCode: data.statusCode, headers: data.headers, }); callback(null, result); }); } /** * 创建 Bucket,并初始化访问权限 * @param {Object} params 参数对象,必须 * @param {String} params.Bucket Bucket名称,必须 * @param {String} params.Region 地域名称,必须 * @param {String} params.ACL 用户自定义文件权限,可以设置:private,public-read;默认值:private,非必须 * @param {String} params.GrantRead 赋予被授权者读的权限,格式x-cos-grant-read: uin=" ",uin=" ",非必须 * @param {String} params.GrantWrite 赋予被授权者写的权限,格式x-cos-grant-write: uin=" ",uin=" ",非必须 * @param {String} params.GrantFullControl 赋予被授权者读写权限,格式x-cos-grant-full-control: uin=" ",uin=" ",非必须 * @param {Function} callback 回调函数,必须 * @return {Object} err 请求失败的错误,如果请求成功,则为空。https://cloud.tencent.com/document/product/436/7730 * @return {Object} data 返回的数据 * @return {String} data.Location 操作地址 */ function putBucket(params, callback) { var self = this; var headers = {}; headers['x-cos-acl'] = params['ACL']; headers['x-cos-grant-read'] = params['GrantRead']; headers['x-cos-grant-write'] = params['GrantWrite']; headers['x-cos-grant-read-acp'] = params['GrantReadAcp']; headers['x-cos-grant-write-acp'] = params['GrantWriteAcp']; headers['x-cos-grant-full-control'] = params['GrantFullControl']; submitRequest.call(this, { Action: 'name/cos:PutBucket', method: 'PUT', Bucket: params.Bucket, Region: params.Region, headers: headers, }, function (err, data) { if (err) { return callback(err); } var url = getUrl({ domain: self.options.Domain, bucket: params.Bucket, region: params.Region, isLocation: true, }); callback(null, { Location: url, statusCode: data.statusCode, headers: data.headers, }); }); } /** * 删除 Bucket * @param {Object} params 参数对象,必须 * @param {String} params.Bucket Bucket名称,必须 * @param {String} params.Region 地域名称,必须 * @param {Function} callback 回调函数,必须 * @return {Object} err 请求失败的错误,如果请求成功,则为空。https://cloud.tencent.com/document/product/436/7730 * @return {Object} data 返回的数据 * @return {String} data.Location 操作地址 */ function deleteBucket(params, callback) { submitRequest.call(this, { Action: 'name/cos:DeleteBucket', Bucket: params.Bucket, Region: params.Region, headers: params.Headers, method: 'DELETE', }, function (err, data) { if (err && err.statusCode === 204) { return callback(null, {statusCode: err.statusCode}); } else if (err) { return callback(err); } callback(null, { statusCode: data.statusCode, headers: data.headers, }); }); } /** * 获取 Bucket 的 权限列表 * @param {Object} params 参数对象,必须 * @param {String} params.Bucket Bucket名称,必须 * @param {String} params.Region 地域名称,必须 * @param {Function} callback 回调函数,必须 * @return {Object} err 请求失败的错误,如果请求成功,则为空。https://cloud.tencent.com/document/product/436/7730 * @return {Object} data 返回的数据 * @return {Object} data.AccessControlPolicy 访问权限信息 */ function getBucketAcl(params, callback) { submitRequest.call(this, { Action: 'name/cos:GetBucketACL', method: 'GET', Bucket: params.Bucket, Region: params.Region, headers: params.Headers, action: 'acl', }, function (err, data) { if (err) { return callback(err); } var AccessControlPolicy = data.AccessControlPolicy || {}; var Owner = AccessControlPolicy.Owner || {}; var Grant = AccessControlPolicy.AccessControlList.Grant || []; Grant = util.isArray(Grant) ? Grant : [Grant]; var result = decodeAcl(AccessControlPolicy); if (data.headers && data.headers['x-cos-acl']) { result.ACL = data.headers['x-cos-acl']; } result = util.extend(result, { Owner: Owner, Grants: Grant, statusCode: data.statusCode, headers: data.headers, }); callback(null, result); }); } /** * 设置 Bucket 的 权限列表 * @param {Object} params 参数对象,必须 * @param {String} params.Bucket Bucket名称,必须 * @param {String} params.Region 地域名称,必须 * @param {String} params.ACL 用户自定义文件权限,可以设置:private,public-read;默认值:private,非必须 * @param {String} params.GrantRead 赋予被授权者读的权限,格式x-cos-grant-read: uin=" ",uin=" ",非必须 * @param {String} params.GrantWrite 赋予被授权者写的权限,格式x-cos-grant-write: uin=" ",uin=" ",非必须 * @param {String} params.GrantFullControl 赋予被授权者读写权限,格式x-cos-grant-full-control: uin=" ",uin=" ",非必须 * @param {Function} callback 回调函数,必须 * @return {Object} err 请求失败的错误,如果请求成功,则为空。https://cloud.tencent.com/document/product/436/7730 * @return {Object} data 返回的数据 */ function putBucketAcl(params, callback) { var headers = params.Headers; var xml = ''; if (params['AccessControlPolicy']) { var AccessControlPolicy = util.clone(params['AccessControlPolicy'] || {}); var Grants = AccessControlPolicy.Grants || AccessControlPolicy.Grant; Grants = util.isArray(Grants) ? Grants : [Grants]; delete AccessControlPolicy.Grant; delete AccessControlPolicy.Grants; AccessControlPolicy.AccessControlList = {Grant: Grants}; xml = util.json2xml({AccessControlPolicy: AccessControlPolicy}); headers['Content-Type'] = 'application/xml'; headers['Content-MD5'] = util.binaryBase64(util.md5(xml)); } // Grant Header 去重 util.each(headers, function (val, key) { if (key.indexOf('x-cos-grant-') === 0) { headers[key] = uniqGrant(headers[key]); } }); submitRequest.call(this, { Action: 'name/cos:PutBucketACL', method: 'PUT', Bucket: params.Bucket, Region: params.Region, headers: headers, action: 'acl', body: xml, }, function (err, data) { if (err) { return callback(err); } callback(null, { statusCode: data.statusCode, headers: data.headers, }); }); } /** * 获取 Bucket 的 跨域设置 * @param {Object} params 参数对象,必须 * @param {String} params.Bucket Bucket名称,必须 * @param {String} params.Region 地域名称,必须 * @param {Function} callback 回调函数,必须 * @return {Object} err 请求失败的错误,如果请求成功,则为空。https://cloud.tencent.com/document/product/436/7730 * @return {Object} data 返回的数据 * @return {Object} data.CORSRules Bucket的跨域设置 */ function getBucketCors(params, callback) { submitRequest.call(this, { Action: 'name/cos:GetBucketCORS', method: 'GET', Bucket: params.Bucket, Region: params.Region, headers: params.Headers, action: 'cors', }, function (err, data) { if (err) { if (err.statusCode === 404 && err.error && err.error.Code === 'NoSuchCORSConfiguration') { var result = { CORSRules: [], statusCode: err.statusCode, }; err.headers && (result.headers = err.headers); callback(null, result); } else { callback(err); } return; } var CORSConfiguration = data.CORSConfiguration || {}; var CORSRules = CORSConfiguration.CORSRules || CORSConfiguration.CORSRule || []; CORSRules = util.clone(util.isArray(CORSRules) ? CORSRules : [CORSRules]); util.each(CORSRules, function (rule) { util.each(['AllowedOrigin', 'AllowedHeader', 'AllowedMethod', 'ExposeHeader'], function (key, j) { var sKey = key + 's'; var val = rule[sKey] || rule[key] || []; delete rule[key]; rule[sKey] = util.isArray(val) ? val : [val]; }); }); callback(null, { CORSRules: CORSRules, statusCode: data.statusCode, headers: data.headers, }); }); } /** * 设置 Bucket 的 跨域设置 * @param {Object} params 参数对象,必须 * @param {String} params.Bucket Bucket名称,必须 * @param {String} params.Region 地域名称,必须 * @param {Object} params.CORSConfiguration 相关的跨域设置,必须 * @param {Array} params.CORSConfiguration.CORSRules 对应的跨域规则 * @param {Function} callback 回调函数,必须 * @return {Object} err 请求失败的错误,如果请求成功,则为空。https://cloud.tencent.com/document/product/436/7730 * @return {Object} data 返回的数据 */ function putBucketCors(params, callback) { var CORSConfiguration = params['CORSConfiguration'] || {}; var CORSRules = CORSConfiguration['CORSRules'] || params['CORSRules'] || []; CORSRules = util.clone(util.isArray(CORSRules) ? CORSRules : [CORSRules]); util.each(CORSRules, function (rule) { util.each(['AllowedOrigin', 'AllowedHeader', 'AllowedMethod', 'ExposeHeader'], function (key, k) { var sKey = key + 's'; var val = rule[sKey] || rule[key] || []; delete rule[sKey]; rule[key] = util.isArray(val) ? val : [val]; }); }); var xml = util.json2xml({CORSConfiguration: {CORSRule: CORSRules}}); var headers = params.Headers; headers['Content-Type'] = 'application/xml'; headers['Content-MD5'] = util.binaryBase64(util.md5(xml)); submitRequest.call(this, { Action: 'name/cos:PutBucketCORS', method: 'PUT', Bucket: params.Bucket, Region: params.Region, body: xml, action: 'cors', headers: headers, }, function (err, data) { if (err) { return callback(err); } callback(null, { statusCode: data.statusCode, headers: data.headers, }); }); } /** * 删除 Bucket 的 跨域设置 * @param {Object} params 参数对象,必须 * @param {String} params.Bucket Bucket名称,必须 * @param {String} params.Region 地域名称,必须 * @param {Function} callback 回调函数,必须 * @return {Object} err 请求失败的错误,如果请求成功,则为空。https://cloud.tencent.com/document/product/436/7730 * @return {Object} data 返回的数据 */ function deleteBucketCors(params, callback) { submitRequest.call(this, { Action: 'name/cos:DeleteBucketCORS', method: 'DELETE', Bucket: params.Bucket, Region: params.Region, headers: params.Headers, action: 'cors', }, function (err, data) { if (err && err.statusCode === 204) { return callback(null, {statusCode: err.statusCode}); } else if (err) { return callback(err); } callback(null, { statusCode: data.statusCode || err.statusCode, headers: data.headers, }); }); } function putBucketPolicy(params, callback) { var Policy = params['Policy']; var PolicyStr = Policy; try { if (typeof Policy === 'string') { Policy = JSON.parse(PolicyStr); } else { PolicyStr = JSON.stringify(Policy); } } catch (e) { callback({error: 'Policy format error'}); } var headers = params.Headers; headers['Content-Type'] = 'application/json'; headers['Content-MD5'] = util.binaryBase64(util.md5(PolicyStr)); submitRequest.call(this, { Action: 'name/cos:PutBucketPolicy', method: 'PUT', Bucket: params.Bucket, Region: params.Region, action: 'policy', body: util.isBrowser ? PolicyStr : Policy, headers: headers, json: true, }, function (err, data) { if (err && err.statusCode === 204) { return callback(null, {statusCode: err.statusCode}); } else if (err) { return callback(err); } callback(null, { statusCode: data.statusCode, headers: data.headers, }); }); } /** * 删除 Bucket 的 跨域设置 * @param {Object} params 参数对象,必须 * @param {String} params.Bucket Bucket名称,必须 * @param {String} params.Region 地域名称,必须 * @param {Function} callback 回调函数,必须 * @return {Object} err 请求失败的错误,如果请求成功,则为空。https://cloud.tencent.com/document/product/436/7730 * @return {Object} data 返回的数据 */ function deleteBucketPolicy(params, callback) { submitRequest.call(this, { Action: 'name/cos:DeleteBucketPolicy', method: 'DELETE', Bucket: params.Bucket, Region: params.Region, headers: params.Headers, action: 'policy', }, function (err, data) { if (err && err.statusCode === 204) { return callback(null, {statusCode: err.statusCode}); } else if (err) { return callback(err); } callback(null, { statusCode: data.statusCode || err.statusCode, headers: data.headers, }); }); } /** * 获取 Bucket 的 地域信息 * @param {Object} params 参数对象,必须 * @param {String} params.Bucket Bucket名称,必须 * @param {String} params.Region 地域名称,必须 * @param {Function} callback 回调函数,必须 * @return {Object} err 请求失败的错误,如果请求成功,则为空。https://cloud.tencent.com/document/product/436/7730 * @return {Object} data 返回数据,包含地域信息 LocationConstraint */ function getBucketLocation(params, callback) { submitRequest.call(this, { Action: 'name/cos:GetBucketLocation', method: 'GET', Bucket: params.Bucket, Region: params.Region, headers: params.Headers, action: 'location', }, function (err, data) { if (err) { return callback(err); } callback(null, data); }); } /** * 获取 Bucket 的读取权限策略 * @param {Object} params 参数对象,必须 * @param {String} params.Bucket Bucket名称,必须 * @param {String} params.Region 地域名称,必须 * @param {Function} callback 回调函数,必须 * @return {Object} err 请求失败的错误,如果请求成功,则为空。https://cloud.tencent.com/document/product/436/7730 * @return {Object} data 返回数据 */ function getBucketPolicy(params, callback) { submitRequest.call(this, { Action: 'name/cos:GetBucketPolicy', method: 'GET', Bucket: params.Bucket, Region: params.Region, headers: params.Headers, action: 'policy', rawBody: true, }, function (err, data) { if (err) { if (err.statusCode && err.statusCode === 403) { return callback({ErrorStatus: 'Access Denied'}); } if (err.statusCode && err.statusCode === 405) { return callback({ErrorStatus: 'Method Not Allowed'}); } if (err.statusCode && err.statusCode === 404) { return callback({ErrorStatus: 'Policy Not Found'}); } return callback(err); } var Policy = {}; try { Policy = JSON.parse(data.body); } catch (e) { } callback(null, { Policy: Policy, statusCode: data.statusCode, headers: data.headers, }); }); } /** * 获取 Bucket 的标签设置 * @param {Object} params 参数对象,必须 * @param {String} params.Bucket Bucket名称,必须 * @param {String} params.Region 地域名称,必须 * @param {Function} callback 回调函数,必须 * @return {Object} err 请求失败的错误,如果请求成功,则为空。https://cloud.tencent.com/document/product/436/7730 * @return {Object} data 返回数据 */ function getBucketTagging(params, callback) { submitRequest.call(this, { Action: 'name/cos:GetBucketTagging', method: 'GET', Bucket: params.Bucket, Region: params.Region, headers: params.Headers, action: 'tagging', }, function (err, data) { if (err) { if (err.statusCode === 404 && err.error && (err.error === "Not Found" || err.error.Code === 'NoSuchTagSet')) { var result = { Tags: [], statusCode: err.statusCode, }; err.headers && (result.headers = err.headers); callback(null, result); } else { callback(err); } return; } var Tags = []; try { Tags = data.Tagging.TagSet.Tag || []; } catch (e) { } Tags = util.clone(util.isArray(Tags) ? Tags : [Tags]); callback(null, { Tags: Tags, statusCode: data.statusCode, headers: data.headers, }); }); } /** * 设置 Bucket 的标签 * @param {Object} params 参数对象,必须 * @param {String} params.Bucket Bucket名称,必须 * @param {String} params.Region 地域名称,必须 * @param {Array} params.TagSet 标签设置,必须 * @param {Function} callback 回调函数,必须 * @return {Object} err 请求失败的错误,如果请求成功,则为空。https://cloud.tencent.com/document/product/436/7730 * @return {Object} data 返回数据 */ function putBucketTagging(params, callback) { var Tagging = params['Tagging'] || {}; var Tags = Tagging.TagSet || Tagging.Tags || params['Tags'] || []; Tags = util.clone(util.isArray(Tags) ? Tags : [Tags]); var xml = util.json2xml({Tagging: {TagSet: {Tag: Tags}}}); var headers = params.Headers; headers['Content-Type'] = 'application/xml'; headers['Content-MD5'] = util.binaryBase64(util.md5(xml)); submitRequest.call(this, { Action: 'name/cos:PutBucketTagging', method: 'PUT', Bucket: params.Bucket, Region: params.Region, body: xml, action: 'tagging', headers: headers, }, function (err, data) { if (err && err.statusCode === 204) { return callback(null, {statusCode: err.statusCode}); } else if (err) { return callback(err); } callback(null, { statusCode: data.statusCode, headers: data.headers, }); }); } /** * 删除 Bucket 的 标签设置 * @param {Object} params 参数对象,必须 * @param {String} params.Bucket Bucket名称,必须 * @param {String} params.Region 地域名称,必须 * @param {Function} callback 回调函数,必须 * @return {Object} err 请求失败的错误,如果请求成功,则为空。https://cloud.tencent.com/document/product/436/7730 * @return {Object} data 返回的数据 */ function deleteBucketTagging(params, callback) { submitRequest.call(this, { Action: 'name/cos:DeleteBucketTagging', method: 'DELETE', Bucket: params.Bucket, Region: params.Region, headers: params.Headers, action: 'tagging', }, function (err, data) { if (err && err.statusCode === 204) { return callback(null, {statusCode: err.statusCode}); } else if (err) { return callback(err); } callback(null, { statusCode: data.statusCode, headers: data.headers, }); }); } function putBucketLifecycle(params, callback) { var LifecycleConfiguration = params['LifecycleConfiguration'] || {}; var Rules = LifecycleConfiguration.Rules || params.Rules || []; Rules = util.clone(Rules); var xml = util.json2xml({LifecycleConfiguration: {Rule: Rules}}); var headers = params.Headers; headers['Content-Type'] = 'application/xml'; headers['Content-MD5'] = util.binaryBase64(util.md5(xml)); submitRequest.call(this, { Action: 'name/cos:PutBucketLifecycle', method: 'PUT', Bucket: params.Bucket, Region: params.Region, body: xml, action: 'lifecycle', headers: headers, }, function (err, data) { if (err && err.statusCode === 204) { return callback(null, {statusCode: err.statusCode}); } else if (err) { return callback(err); } callback(null, { statusCode: data.statusCode, headers: data.headers, }); }); } function getBucketLifecycle(params, callback) { submitRequest.call(this, { Action: 'name/cos:GetBucketLifecycle', method: 'GET', Bucket: params.Bucket, Region: params.Region, headers: params.Headers, action: 'lifecycle', }, function (err, data) { if (err) { if (err.statusCode === 404 && err.error && err.error.Code === 'NoSuchLifecycleConfiguration') { var result = { Rules: [], statusCode: err.statusCode, }; err.headers && (result.headers = err.headers); callback(null, result); } else { callback(err); } return; } var Rules = []; try { Rules = data.LifecycleConfiguration.Rule || []; } catch (e) { } Rules = util.clone(util.isArray(Rules) ? Rules : [Rules]); callback(null, { Rules: Rules, statusCode: data.statusCode, headers: data.headers, }); }); } function deleteBucketLifecycle(params, callback) { submitRequest.call(this, { Action: 'name/cos:DeleteBucketLifecycle', method: 'DELETE', Bucket: params.Bucket, Region: params.Region, headers: params.Headers, action: 'lifecycle', }, function (err, data) { if (err && err.statusCode === 204) { return callback(null, {statusCode: err.statusCode}); } else if (err) { return callback(err); } callback(null, { statusCode: data.statusCode, headers: data.headers, }); }); } function putBucketVersioning(params, callback) { if (!params['VersioningConfiguration']) { callback({error: 'missing param VersioningConfiguration'}); return; } var VersioningConfiguration = params['VersioningConfiguration'] || {}; var xml = util.json2xml({VersioningConfiguration: VersioningConfiguration}); var headers = params.Headers; headers['Content-Type'] = 'application/xml'; headers['Content-MD5'] = util.binaryBase64(util.md5(xml)); submitRequest.call(this, { Action: 'name/cos:PutBucketVersioning', method: 'PUT', Bucket: params.Bucket, Region: params.Region, body: xml, action: 'versioning', headers: headers, }, function (err, data) { if (err && err.statusCode === 204) { return callback(null, {statusCode: err.statusCode}); } else if (err) { return callback(err); } callback(null, { statusCode: data.statusCode, headers: data.headers, }); }); } function getBucketVersioning(params, callback) { submitRequest.call(this, { Action: 'name/cos:GetBucketVersioning', method: 'GET', Bucket: params.Bucket, Region: params.Region, headers: params.Headers, action: 'versioning', }, function (err, data) { if (!err) { !data.VersioningConfiguration && (data.VersioningConfiguration = {}); } callback(err, data); }); } function putBucketReplication(params, callback) { var ReplicationConfiguration = util.clone(params.ReplicationConfiguration); var xml = util.json2xml({ReplicationConfiguration: ReplicationConfiguration}); xml = xml.replace(/<(\/?)Rules>/ig, '<$1Rule>'); xml = xml.replace(/<(\/?)Tags>/ig, '<$1Tag>'); var headers = params.Headers; headers['Content-Type'] = 'application/xml'; headers['Content-MD5'] = util.binaryBase64(util.md5(xml)); submitRequest.call(this, { Action: 'name/cos:PutBucketReplication', method: 'PUT', Bucket: params.Bucket, Region: params.Region, body: xml, action: 'replication', headers: headers, }, function (err, data) { if (err && err.statusCode === 204) { return callback(null, {statusCode: err.statusCode}); } else if (err) { return callback(err); } callback(null, { statusCode: data.statusCode, headers: data.headers, }); }); } function getBucketReplication(params, callback) { submitRequest.call(this, { Action: 'name/cos:GetBucketReplication', method: 'GET', Bucket: params.Bucket, Region: params.Region, headers: params.Headers, action: 'replication', }, function (err, data) { if (err) { if (err.statusCode === 404 && err.error && (err.error === 'Not Found' || err.error.Code === 'ReplicationConfigurationnotFoundError')) { var result = { ReplicationConfiguration: {Rules: []}, statusCode: err.statusCode, }; err.headers && (result.headers = err.headers); callback(null, result); } else { callback(err); } return; } if (!err) { !data.ReplicationConfiguration && (data.ReplicationConfiguration = {}); } if (data.ReplicationConfiguration.Rule) { data.ReplicationConfiguration.Rules = data.ReplicationConfiguration.Rule; delete data.ReplicationConfiguration.Rule; } callback(err, data); }); } function deleteBucketReplication(params, callback) { submitRequest.call(this, { Action: 'name/cos:DeleteBucketReplication', method: 'DELETE', Bucket: params.Bucket, Region: params.Region, headers: params.Headers, action: 'replication', }, function (err, data) { if (err && err.statusCode === 204) { return callback(null, {statusCode: err.statusCode}); } else if (err) { return callback(err); } callback(null, { statusCode: data.statusCode, headers: data.headers, }); }); } // Object 相关 /** * 取回对应Object的元数据,Head的权限与Get的权限一致 * @param {Object} params 参数对象,必须 * @param {String} params.Bucket Bucket名称,必须 * @param {String} params.Region 地域名称,必须 * @param {String} params.Key 文件名称,必须 * @param {String} params.IfModifiedSince 当Object在指定时间后被修改,则返回对应Object元信息,否则返回304,非必须 * @param {Function} callback 回调函数,必须 * @return {Object} err 请求失败的错误,如果请求成功,则为空。https://cloud.tencent.com/document/product/436/7730 * @return {Object} data 为指定 object 的元数据,如果设置了 IfModifiedSince ,且文件未修改,则返回一个对象,NotModified 属性为 true * @return {Boolean} data.NotModified 是否在 IfModifiedSince 时间点之后未修改该 object,则为 true */ function headObject(params, callback) { submitRequest.call(this, { Action: 'name/cos:HeadObject', method: 'HEAD', Bucket: params.Bucket, Region: params.Region, Key: params.Key, VersionId: params.VersionId, headers: params.Headers, }, function (err, data) { if (err) { var statusCode = err.statusCode; if (params.Headers['If-Modified-Since'] && statusCode && statusCode === 304) { return callback(null, { NotModified: true, statusCode: statusCode, }); } return callback(err); } if (data.headers) { var headers = data.headers; data.ETag = headers.etag || headers.Etag || headers.ETag || ''; } callback(null, data); }); } function listObjectVersions(params, callback) { var reqParams = {}; reqParams['prefix'] = params['Prefix'] || ''; reqParams['delimiter'] = params['Delimiter']; reqParams['key-marker'] = params['KeyMarker']; reqParams['version-id-marker'] = params['VersionIdMarker']; reqParams['max-keys'] = params['MaxKeys']; reqParams['encoding-type'] = params['EncodingType']; submitRequest.call(this, { Action: 'name/cos:GetBucketObjectVersions', ResourceKey: reqParams['prefix'], method: 'GET', Bucket: params.Bucket, Region: params.Region, headers: params.Headers, qs: reqParams, action: 'versions', }, function (err, data) { if (err) { return callback(err); } var ListVersionsResult = data.ListVersionsResult || {}; var DeleteMarkers = ListVersionsResult.DeleteMarker || []; DeleteMarkers = util.isArray(DeleteMarkers) ? DeleteMarkers : [DeleteMarkers]; var Versions = ListVersionsResult.Version || []; Versions = util.isArray(Versions) ? Versions : [Versions]; var result = util.clone(ListVersionsResult); delete result.DeleteMarker; delete result.Version; util.extend(result, { DeleteMarkers: DeleteMarkers, Versions: Versions, statusCode: data.statusCode, headers: data.headers, }); callback(null, result); }); } /** * 下载 object * @param {Object} params 参数对象,必须 * @param {String} params.Bucket Bucket名称,必须 * @param {String} params.Region 地域名称,必须 * @param {String} params.Key 文件名称,必须 * @param {WriteStream} params.Output 文件写入流,非必须 * @param {String} params.IfModifiedSince 当Object在指定时间后被修改,则返回对应Object元信息,否则返回304,非必须 * @param {String} params.IfUnmodifiedSince 如果文件修改时间早于或等于指定时间,才返回文件内容。否则返回 412 (precondition failed),非必须 * @param {String} params.IfMatch 当 ETag 与指定的内容一致,才返回文件。否则返回 412 (precondition failed),非必须 * @param {String} params.IfNoneMatch 当 ETag 与指定的内容不一致,才返回文件。否则返回304 (not modified),非必须 * @param {String} params.ResponseContentType 设置返回头部中的 Content-Type 参数,非必须 * @param {String} params.ResponseContentLanguage 设置返回头部中的 Content-Language 参数,非必须 * @param {String} params.ResponseExpires 设置返回头部中的 Content-Expires 参数,非必须 * @param {String} params.ResponseCacheControl 设置返回头部中的 Cache-Control 参数,非必须 * @param {String} params.ResponseContentDisposition 设置返回头部中的 Content-Disposition 参数,非必须 * @param {String} params.ResponseContentEncoding 设置返回头部中的 Content-Encoding 参数,非必须 * @param {Function} callback 回调函数,必须 * @param {Object} err 请求失败的错误,如果请求成功,则为空。https://cloud.tencent.com/document/product/436/7730 * @param {Object} data 为对应的 object 数据,包括 body 和 headers */ function getObject(params, callback) { var reqParams = {}; reqParams['response-content-type'] = params['ResponseContentType']; reqParams['response-content-language'] = params['ResponseContentLanguage']; reqParams['response-expires'] = params['ResponseExpires']; reqParams['response-cache-control'] = params['ResponseCacheControl']; reqParams['response-content-disposition'] = params['ResponseContentDisposition']; reqParams['response-content-encoding'] = params['ResponseContentEncoding']; // 如果用户自己传入了 output submitRequest.call(this, { Action: 'name/cos:GetObject', method: 'GET', Bucket: params.Bucket, Region: params.Region, Key: params.Key, VersionId: params.VersionId, headers: params.Headers, qs: reqParams, rawBody: true, }, function (err, data) { if (err) { var statusCode = err.statusCode; if (params.Headers['If-Modified-Since'] && statusCode && statusCode === 304) { return callback(null, { NotModified: true }); } return callback(err); } var result = {}; result.Body = data.body; if (data && data.headers) { var headers = data.headers; result.ETag = headers.etag || headers.Etag || headers.ETag || ''; } util.extend(result, { statusCode: data.statusCode, headers: data.headers, }); callback(null, result); }); } /** * 上传 object * @param {Object} params 参数对象,必须 * @param {String} params.Bucket Bucket名称,必须 * @param {String} params.Region 地域名称,必须 * @param {String} params.Key 文件名称,必须 * @param {String} params.Body 上传文件的内容,只支持字符串 * @param {String} params.CacheControl RFC 2616 中定义的缓存策略,将作为 Object 元数据保存,非必须 * @param {String} params.ContentDisposition RFC 2616 中定义的文件名称,将作为 Object 元数据保存,非必须 * @param {String} params.ContentEncoding RFC 2616 中定义的编码格式,将作为 Object 元数据保存,非必须 * @param {String} params.ContentLength RFC 2616 中定义的 HTTP 请求内容长度(字节),必须 * @param {String} params.ContentType RFC 2616 中定义的内容类型(MIME),将作为 Object 元数据保存,非必须 * @param {String} params.Expect 当使用 Expect: 100-continue 时,在收到服务端确认后,才会发送请求内容,非必须 * @param {String} params.Expires RFC 2616 中定义的过期时间,将作为 Object 元数据保存,非必须 * @param {String} params.ContentSha1 RFC 3174 中定义的 160-bit 内容 SHA-1 算法校验,非必须 * @param {String} params.ACL 允许用户自定义文件权限,有效值:private | public-read,非必须 * @param {String} params.GrantRead 赋予被授权者读的权限,格式 x-cos-grant-read: uin=" ",uin=" ",非必须 * @param {String} params.GrantWrite 赋予被授权者写的权限,格式 x-cos-grant-write: uin=" ",uin=" ",非必须 * @param {String} params.GrantFullControl 赋予被授权者读写权限,格式 x-cos-grant-full-control: uin=" ",uin=" ",非必须 * @param {String} params.ServerSideEncryption 支持按照指定的加密算法进行服务端数据加密,格式 x-cos-server-side-encryption: "AES256",非必须 * @param {Function} params.onProgress 上传进度回调函数 * @param {Function} callback 回调函数,必须 * @return {Object} err 请求失败的错误,如果请求成功,则为空。https://cloud.tencent.com/document/product/436/7730 * @return {Object} data 为对应的 object 数据 * @return {String} data.ETag 为对应上传文件的 ETag 值 */ function putObject(params, callback) { var self = this; var FileSize = params.ContentLength; var onProgress = util.throttleOnProgress.call(self, FileSize, params.onProgress); util.getBodyMd5(self.options.UploadCheckContentMd5, params.Body, function (md5) { md5 && (params.Headers['Content-MD5'] = util.binaryBase64(md5)); if (params.ContentLength !== undefined) { params.Headers['Content-Length'] = params.ContentLength; } submitRequest.call(self, { Action: 'name/cos:PutObject', TaskId: params.TaskId, method: 'PUT', Bucket: params.Bucket, Region: params.Region, Key: params.Key, headers: params.Headers, body: params.Body, onProgress: onProgress, }, function (err, data) { if (err) { onProgress(null, true); return callback(err); } onProgress({loaded: FileSize, total: FileSize}, true); if (data && data.headers ) { var headers = data.headers; var ETag = headers.etag || headers.Etag || headers.ETag || ''; var url = getUrl({ ForcePathStyle: self.options.ForcePathStyle, protocol: self.options.Protocol, domain: self.options.Domain, bucket: params.Bucket, region: params.Region, object: params.Key, }); url = url.substr(url.indexOf('://') + 3); return callback(null, { Location: url, ETag: ETag, statusCode: data.statusCode, headers: headers, }); } callback(null, data); }); }); } /** * 上传 object * @param {Object} params 参数对象,必须 * @param {String} params.Bucket Bucket名称,必须 * @param {String} params.Region 地域名称,必须 * @param {String} params.Key 文件名称,必须 * @param {FilePath} params.FilePath 要上传的文件路径 * @param {Function} params.onProgress 上传进度回调函数 * @param {Function} callback 回调函数,必须 * @return {Object} err 请求失败的错误,如果请求成功,则为空。https://cloud.tencent.com/document/product/436/7730 * @return {Object} data 为对应的 object 数据 * @return {String} data.ETag 为对应上传文件的 ETag 值 */ function postObject(params, callback) { var self = this; var headers = {}; headers['Cache-Control'] = params['CacheControl']; headers['Content-Disposition'] = params['ContentDisposition']; headers['Content-Encoding'] = params['ContentEncoding']; headers['Content-MD5'] = params['ContentMD5']; headers['Content-Length'] = params['ContentLength']; headers['Content-Type'] = params['ContentType']; headers['Expect'] = params['Expect']; headers['Expires'] = params['Expires']; headers['x-cos-acl'] = params['ACL']; headers['x-cos-grant-read'] = params['GrantRead']; headers['x-cos-grant-write'] = params['GrantWrite']; headers['x-cos-grant-full-control'] = params['GrantFullControl']; headers['x-cos-storage-class'] = params['StorageClass']; var filePath = params.FilePath; for (var key in params) { if (key.indexOf('x-cos-meta-') > -1) { headers[key] = params[key]; } } var onProgress = util.throttleOnProgress.call(self, headers['Content-Length'], params.onProgress); submitRequest.call(this, { Action: 'name/cos:PostObject', method: 'POST', Bucket: params.Bucket, Region: params.Region, Key: params.Key, headers: headers, filePath: filePath, onProgress: onProgress, }, function (err, data) { onProgress(null, true); if (err) { return callback(err); } if (data && data.headers) { var headers = data.headers; var ETag = headers.etag || headers.Etag || headers.ETag || ''; var url = getUrl({ ForcePathStyle: self.options.ForcePathStyle, protocol: self.options.Protocol, domain: self.options.Domain, bucket: params.Bucket, region: params.Region, object: params.Key, isLocation: true, }); return callback(null, { Location: url, statusCode: data.statusCode, headers: headers, ETag: ETag, }); } callback(null, data); }); } /** * 删除 object * @param {Object} params 参数对象,必须 * @param {String} params.Bucket Bucket名称,必须 * @param {String} params.Region 地域名称,必须 * @param {String} params.Key object名称,必须 * @param {Function} callback 回调函数,必须 * @param {Object} err 请求失败的错误,如果请求成功,则为空。https://cloud.tencent.com/document/product/436/7730 * @param {Object} data 删除操作成功之后返回的数据 */ function deleteObject(params, callback) { submitRequest.call(this, { Action: 'name/cos:DeleteObject', method: 'DELETE', Bucket: params.Bucket, Region: params.Region, Key: params.Key, headers: params.Headers, VersionId: params.VersionId, }, function (err, data) { if (err) { var statusCode = err.statusCode; if (statusCode && statusCode === 204) { return callback(null, {statusCode: statusCode}); } else if (statusCode && statusCode === 404) { return callback(null, {BucketNotFound: true, statusCode: statusCode,}); } else { return callback(err); } } callback(null, { statusCode: data.statusCode, headers: data.headers, }); }); } /** * 获取 object 的 权限列表 * @param {Object} params 参数对象,必须 * @param {String} params.Bucket Bucket名称,必须 * @param {String} params.Region 地域名称,必须 * @param {String} params.Key object名称,必须 * @param {Function} callback 回调函数,必须 * @return {Object} err 请求失败的错误,如果请求成功,则为空。https://cloud.tencent.com/document/product/436/7730 * @return {Object} data 返回的数据 * @return {Object} data.AccessControlPolicy 权限列表 */ function getObjectAcl(params, callback) { submitRequest.call(this, { Action: 'name/cos:GetObjectACL', method: 'GET', Bucket: params.Bucket, Region: params.Region, Key: params.Key, headers: params.Headers, action: 'acl', }, function (err, data) { if (err) { return callback(err); } var AccessControlPolicy = data.AccessControlPolicy || {}; var Owner = AccessControlPolicy.Owner || {}; var Grant = AccessControlPolicy.AccessControlList && AccessControlPolicy.AccessControlList.Grant || []; Grant = util.isArray(Grant) ? Grant : [Grant]; var result = decodeAcl(AccessControlPolicy); if (data.headers && data.headers['x-cos-acl']) { result.ACL = data.headers['x-cos-acl']; } result = util.extend(result, { Owner: Owner, Grants: Grant, statusCode: data.statusCode, headers: data.headers, }); callback(null, result); }); } /** * 设置 object 的 权限列表 * @param {Object} params 参数对象,必须 * @param {String} params.Bucket Bucket名称,必须 * @param {String} params.Region 地域名称,必须 * @param {String} params.Key object名称,必须 * @param {Function} callback 回调函数,必须 * @return {Object} err 请求失败的错误,如果请求成功,则为空。https://cloud.tencent.com/document/product/436/7730 * @return {Object} data 返回的数据 */ function putObjectAcl(params, callback) { var headers = params.Headers; var xml = ''; if (params['AccessControlPolicy']) { var AccessControlPolicy = util.clone(params['AccessControlPolicy'] || {}); var Grants = AccessControlPolicy.Grants || AccessControlPolicy.Grant; Grants = util.isArray(Grants) ? Grants : [Grants]; delete AccessControlPolicy.Grant; delete AccessControlPolicy.Grants; AccessControlPolicy.AccessControlList = {Grant: Grants}; xml = util.json2xml({AccessControlPolicy: AccessControlPolicy}); headers['Content-Type'] = 'application/xml'; headers['Content-MD5'] = util.binaryBase64(util.md5(xml)); } // Grant Header 去重 util.each(headers, function (val, key) { if (key.indexOf('x-cos-grant-') === 0) { headers[key] = uniqGrant(headers[key]); } }); submitRequest.call(this, { Action: 'name/cos:PutObjectACL', method: 'PUT', Bucket: params.Bucket, Region: params.Region, Key: params.Key, action: 'acl', headers: headers, body: xml, }, function (err, data) { if (err) { return callback(err); } callback(null, { statusCode: data.statusCode, headers: data.headers, }); }); } /** * Options Object请求实现跨域访问的预请求。即发出一个 OPTIONS 请求给服务器以确认是否可以进行跨域操作。 * @param {Object} params 参数对象,必须 * @param {String} params.Bucket Bucket名称,必须 * @param {String} params.Region 地域名称,必须 * @param {String} params.Key object名称,必须 * @param {Function} callback 回调函数,必须 * @return {Object} err 请求失败的错误,如果请求成功,则为空。https://cloud.tencent.com/document/product/436/7730 * @return {Object} data 返回的数据 */ function optionsObject(params, callback) { var headers = params.Headers; headers['Origin'] = params['Origin']; headers['Access-Control-Request-Method'] = params['AccessControlRequestMethod']; headers['Access-Control-Request-Headers'] = params['AccessControlRequestHeaders']; submitRequest.call(this, { Action: 'name/cos:OptionsObject', method: 'OPTIONS', Bucket: params.Bucket, Region: params.Region, Key: params.Key, headers: headers, }, function (err, data) { if (err) { if (err.statusCode && err.statusCode === 403) { return callback(null, { OptionsForbidden: true, statusCode: err.statusCode }); } return callback(err); } var headers = data.headers || {}; callback(null, { AccessControlAllowOrigin: headers['access-control-allow-origin'], AccessControlAllowMethods: headers['access-control-allow-methods'], AccessControlAllowHeaders: headers['access-control-allow-headers'], AccessControlExposeHeaders: headers['access-control-expose-headers'], AccessControlMaxAge: headers['access-control-max-age'], statusCode: data.statusCode, headers: data.headers, }); }); } /** * @param {Object} 参数列表 * @param {String} Bucket Bucket 名称 * @param {String} Region 地域名称 * @param {String} Key 文件名称 * @param {String} CopySource 源文件URL绝对路径,可以通过versionid子资源指定历史版本 * @param {String} ACL 允许用户自定义文件权限。有效值:private,public-read默认值:private。 * @param {String} GrantRead 赋予被授权者读的权限,格式 x-cos-grant-read: uin=" ",uin=" ",当需要给子账户授权时,uin="RootAcountID/SubAccountID",当需要给根账户授权时,uin="RootAcountID"。 * @param {String} GrantWrite 赋予被授权者写的权限,格式 x-cos-grant-write: uin=" ",uin=" ",当需要给子账户授权时,uin="RootAcountID/SubAccountID",当需要给根账户授权时,uin="RootAcountID"。 * @param {String} GrantFullControl 赋予被授权者读写权限,格式 x-cos-grant-full-control: uin=" ",uin=" ",当需要给子账户授权时,uin="RootAcountID/SubAccountID",当需要给根账户授权时,uin="RootAcountID"。 * @param {String} MetadataDirective 是否拷贝元数据,枚举值:Copy, Replaced,默认值Copy。假如标记为Copy,忽略Header中的用户元数据信息直接复制;假如标记为Replaced,按Header信息修改元数据。当目标路径和原路径一致,即用户试图修改元数据时,必须为Replaced * @param {String} CopySourceIfModifiedSince 当Object在指定时间后被修改,则执行操作,否则返回412。可与x-cos-copy-source-If-None-Match一起使用,与其他条件联合使用返回冲突。 * @param {String} CopySourceIfUnmodifiedSince 当Object在指定时间后未被修改,则执行操作,否则返回412。可与x-cos-copy-source-If-Match一起使用,与其他条件联合使用返回冲突。 * @param {String} CopySourceIfMatch 当Object的ETag和给定一致时,则执行操作,否则返回412。可与x-cos-copy-source-If-Unmodified-Since一起使用,与其他条件联合使用返回冲突。 * @param {String} CopySourceIfNoneMatch 当Object的ETag和给定不一致时,则执行操作,否则返回412。可与x-cos-copy-source-If-Modified-Since一起使用,与其他条件联合使用返回冲突。 * @param {String} StorageClass 存储级别,枚举值:存储级别,枚举值:Standard, Standard_IA,Archive;默认值:Standard * @param {String} CacheControl 指定所有缓存机制在整个请求/响应链中必须服从的指令。 * @param {String} ContentDisposition MIME 协议的扩展,MIME 协议指示 MIME 用户代理如何显示附加的文件 * @param {String} ContentEncoding HTTP 中用来对「采用何种编码格式传输正文」进行协定的一对头部字段 * @param {String} ContentLength 设置响应消息的实体内容的大小,单位为字节 * @param {String} ContentType RFC 2616 中定义的 HTTP 请求内容类型(MIME),例如text/plain * @param {String} Expect 请求的特定的服务器行为 * @param {String} Expires 响应过期的日期和时间 * @param {String} params.ServerSideEncryption 支持按照指定的加密算法进行服务端数据加密,格式 x-cos-server-side-encryption: "AES256",非必须 * @param {String} ContentLanguage 指定内容语言 * @param {String} x-cos-meta-* 允许用户自定义的头部信息,将作为 Object 元数据返回。大小限制2K。 */ function putObjectCopy(params, callback) { var CopySource = params.CopySource || ''; var m = CopySource.match(/^([^.]+-\d+)\.cos(v6)?\.([^.]+)\.[^/]+\/(.+)$/); if (!m) { callback({error: 'CopySource format error'}); return; } var SourceBucket = m[1]; var SourceRegion = m[3]; var SourceKey = decodeURIComponent(m[4]); submitRequest.call(this, { Scope: [{ action: 'name/cos:GetObject', bucket: SourceBucket, region: SourceRegion, prefix: SourceKey, }, { action: 'name/cos:PutObject', bucket: params.Bucket, region: params.Region, prefix: params.Key, }], method: 'PUT', Bucket: params.Bucket, Region: params.Region, Key: params.Key, VersionId: params.VersionId, headers: params.Headers, }, function (err, data) { if (err) { return callback(err); } var result = util.clone(data.CopyObjectResult || {}); util.extend(result, { statusCode: data.statusCode, headers: data.headers, }); callback(null, result); }); } function uploadPartCopy(params, callback) { var CopySource = params.CopySource || ''; var m = CopySource.match(/^([^.]+-\d+)\.cos(v6)?\.([^.]+)\.[^/]+\/(.+)$/); if (!m) { callback({error: 'CopySource format error'}); return; } var SourceBucket = m[1]; var SourceRegion = m[3]; var SourceKey = decodeURIComponent(m[4]); submitRequest.call(this, { Scope: [{ action: 'name/cos:GetObject', bucket: SourceBucket, region: SourceRegion, prefix: SourceKey, }, { action: 'name/cos:PutObject', bucket: params.Bucket, region: params.Region, prefix: params.Key, }], method: 'PUT', Bucket: params.Bucket, Region: params.Region, Key: params.Key, VersionId: params.VersionId, qs: { partNumber: params['PartNumber'], uploadId: params['UploadId'], }, headers: params.Headers, }, function (err, data) { if (err) { return callback(err); } var result = util.clone(data.CopyPartResult || {}); util.extend(result, { statusCode: data.statusCode, headers: data.headers, }); callback(null, result); }); } function deleteMultipleObject(params, callback) { var Objects = params.Objects || []; var Quiet = params.Quiet; Objects = util.isArray(Objects) ? Objects : [Objects]; var xml = util.json2xml({Delete: {Object: Objects, Quiet: Quiet || false}}); var headers = params.Headers; headers['Content-Type'] = 'application/xml'; headers['Content-MD5'] = util.binaryBase64(util.md5(xml)); var Scope = util.map(Objects, function (v) { return { action: 'name/cos:DeleteObject', bucket: params.Bucket, region: params.Region, prefix: v.Key, }; }); submitRequest.call(this, { Scope: Scope, method: 'POST', Bucket: params.Bucket, Region: params.Region, body: xml, action: 'delete', headers: headers, }, function (err, data) { if (err) { return callback(err); } var DeleteResult = data.DeleteResult || {}; var Deleted = DeleteResult.Deleted || []; var Errors = DeleteResult.Error || []; Deleted = util.isArray(Deleted) ? Deleted : [Deleted]; Errors = util.isArray(Errors) ? Errors : [Errors]; var result = util.clone(DeleteResult); util.extend(result, { Error: Errors, Deleted: Deleted, statusCode: data.statusCode, headers: data.headers, }); callback(null, result); }); } function restoreObject(params, callback) { var headers = params.Headers; if (!params['RestoreRequest']) { callback({error: 'missing param RestoreRequest'}); return; } var RestoreRequest = params.RestoreRequest || {}; var xml = util.json2xml({RestoreRequest: RestoreRequest}); headers['Content-Type'] = 'application/xml'; headers['Content-MD5'] = util.binaryBase64(util.md5(xml)); submitRequest.call(this, { Action: 'name/cos:RestoreObject', method: 'POST', Bucket: params.Bucket, Region: params.Region, Key: params.Key, VersionId: params.VersionId, body: xml, action: 'restore', headers: headers, }, function (err, data) { callback(err, data); }); } // 分块上传 /** * 初始化分块上传 * @param {Object} params 参数对象,必须 * @param {String} params.Bucket Bucket名称,必须 * @param {String} params.Region 地域名称,必须 * @param {String} params.Key object名称,必须 * @param {String} params.UploadId object名称,必须 * @param {String} params.CacheControl RFC 2616 中定义的缓存策略,将作为 Object 元数据保存,非必须 * @param {String} params.ContentDisposition RFC 2616 中定义的文件名称,将作为 Object 元数据保存 ,非必须 * @param {String} params.ContentEncoding RFC 2616 中定义的编码格式,将作为 Object 元数据保存,非必须 * @param {String} params.ContentType RFC 2616 中定义的内容类型(MIME),将作为 Object 元数据保存,非必须 * @param {String} params.Expires RFC 2616 中定义的过期时间,将作为 Object 元数据保存,非必须 * @param {String} params.ACL 允许用户自定义文件权限,非必须 * @param {String} params.GrantRead 赋予被授权者读的权限 ,非必须 * @param {String} params.GrantWrite 赋予被授权者写的权限 ,非必须 * @param {String} params.GrantFullControl 赋予被授权者读写权限 ,非必须 * @param {String} params.StorageClass 设置Object的存储级别,枚举值:Standard,Standard_IA,Archive,非必须 * @param {String} params.ServerSideEncryption 支持按照指定的加密算法进行服务端数据加密,格式 x-cos-server-side-encryption: "AES256",非必须 * @param {Function} callback 回调函数,必须 * @return {Object} err 请求失败的错误,如果请求成功,则为空。https://cloud.tencent.com/document/product/436/7730 * @return {Object} data 返回的数据 */ function multipartInit(params, callback) { submitRequest.call(this, { Action: 'name/cos:InitiateMultipartUpload', method: 'POST', Bucket: params.Bucket, Region: params.Region, Key: params.Key, action: 'uploads', headers: params.Headers, }, function (err, data) { if (err) { return callback(err); } data = util.clone(data || {}); if (data && data.InitiateMultipartUploadResult) { return callback(null, util.extend(data.InitiateMultipartUploadResult, { statusCode: data.statusCode, headers: data.headers, })); } callback(null, data); }); } /** * 分块上传 * @param {Object} params 参数对象,必须 * @param {String} params.Bucket Bucket名称,必须 * @param {String} params.Region 地域名称,必须 * @param {String} params.Key object名称,必须 * @param {String} params.Body 上传文件对象或字符串 * @param {String} params.ContentLength RFC 2616 中定义的 HTTP 请求内容长度(字节),非必须 * @param {String} params.Expect 当使用 Expect: 100-continue 时,在收到服务端确认后,才会发送请求内容,非必须 * @param {String} params.ServerSideEncryption 支持按照指定的加密算法进行服务端数据加密,格式 x-cos-server-side-encryption: "AES256",非必须 * @param {String} params.ContentSha1 RFC 3174 中定义的 160-bit 内容 SHA-1 算法校验值,非必须 * @param {Function} callback 回调函数,必须 * @return {Object} err 请求失败的错误,如果请求成功,则为空。https://cloud.tencent.com/document/product/436/7730 * @return {Object} data 返回的数据 * @return {Object} data.ETag 返回的文件分块 sha1 值 */ function multipartUpload(params, callback) { var self = this; util.getFileSize('multipartUpload', params, function () { util.getBodyMd5(self.options.UploadCheckContentMd5, params.Body, function (md5) { md5 && (params.Headers['Content-MD5'] = util.binaryBase64(md5)); submitRequest.call(self, { Action: 'name/cos:UploadPart', TaskId: params.TaskId, method: 'PUT', Bucket: params.Bucket, Region: params.Region, Key: params.Key, qs: { partNumber: params['PartNumber'], uploadId: params['UploadId'], }, headers: params.Headers, onProgress: params.onProgress, body: params.Body || null }, function (err, data) { if (err) { return callback(err); } if(data && data.headers){ var headers = data.headers; data.ETag = headers.etag || headers.Etag || headers.ETag || ''; } callback(null, data); }); }); }); } /** * 完成分块上传 * @param {Object} params 参数对象,必须 * @param {String} params.Bucket Bucket名称,必须 * @param {String} params.Region 地域名称,必须 * @param {String} params.Key object名称,必须 * @param {Array} params.Parts 分块信息列表,必须 * @param {String} params.Parts[i].PartNumber 块编号,必须 * @param {String} params.Parts[i].ETag 分块的 sha1 校验值 * @param {Function} callback 回调函数,必须 * @return {Object} err 请求失败的错误,如果请求成功,则为空。https://cloud.tencent.com/document/product/436/7730 * @return {Object} data 返回的数据 * @return {Object} data.CompleteMultipartUpload 完成分块上传后的文件信息,包括Location, Bucket, Key 和 ETag */ function multipartComplete(params, callback) { var self = this; var UploadId = params.UploadId; var Parts = params['Parts']; for (var i = 0, len = Parts.length; i < len; i++) { if (Parts[i]['ETag'].indexOf('"') === 0) { continue; } Parts[i]['ETag'] = '"' + Parts[i]['ETag'] + '"'; } var xml = util.json2xml({CompleteMultipartUpload: {Part: Parts}}); var headers = params.Headers; headers['Content-Type'] = 'application/xml'; headers['Content-MD5'] = util.binaryBase64(util.md5(xml)); submitRequest.call(this, { Action: 'name/cos:CompleteMultipartUpload', method: 'POST', Bucket: params.Bucket, Region: params.Region, Key: params.Key, qs: { uploadId: UploadId }, body: xml, headers: headers, }, function (err, data) { if (err) { return callback(err); } var url = getUrl({ ForcePathStyle: self.options.ForcePathStyle, protocol: self.options.Protocol, domain: self.options.Domain, bucket: params.Bucket, region: params.Region, object: params.Key, isLocation: true, }); var CompleteMultipartUploadResult = data.CompleteMultipartUploadResult || {}; var result = util.extend(CompleteMultipartUploadResult, { Location: url, statusCode: data.statusCode, headers: data.headers, }); callback(null, result); }); } /** * 分块上传任务列表查询 * @param {Object} params 参数对象,必须 * @param {String} params.Bucket Bucket名称,必须 * @param {String} params.Region 地域名称,必须 * @param {String} params.Delimiter 定界符为一个符号,如果有Prefix,则将Prefix到delimiter之间的相同路径归为一类,定义为Common Prefix,然后列出所有Common Prefix。如果没有Prefix,则从路径起点开始,非必须 * @param {String} params.EncodingType 规定返回值的编码方式,非必须 * @param {String} params.Prefix 前缀匹配,用来规定返回的文件前缀地址,非必须 * @param {String} params.MaxUploads 单次返回最大的条目数量,默认1000,非必须 * @param {String} params.KeyMarker 与upload-id-marker一起使用 </Br>当upload-id-marker未被指定时,ObjectName字母顺序大于key-marker的条目将被列出 </Br>当upload-id-marker被指定时,ObjectName字母顺序大于key-marker的条目被列出,ObjectName字母顺序等于key-marker同时UploadId大于upload-id-marker的条目将被列出,非必须 * @param {String} params.UploadIdMarker 与key-marker一起使用 </Br>当key-marker未被指定时,upload-id-marker将被忽略 </Br>当key-marker被指定时,ObjectName字母顺序大于key-marker的条目被列出,ObjectName字母顺序等于key-marker同时UploadId大于upload-id-marker的条目将被列出,非必须 * @param {Function} callback 回调函数,必须 * @return {Object} err 请求失败的错误,如果请求成功,则为空。https://cloud.tencent.com/document/product/436/7730 * @return {Object} data 返回的数据 * @return {Object} data.ListMultipartUploadsResult 分块上传任务信息 */ function multipartList(params, callback) { var reqParams = {}; reqParams['delimiter'] = params['Delimiter']; reqParams['encoding-type'] = params['EncodingType']; reqParams['prefix'] = params['Prefix'] || ''; reqParams['max-uploads'] = params['MaxUploads']; reqParams['key-marker'] = params['KeyMarker']; reqParams['upload-id-marker'] = params['UploadIdMarker']; reqParams = util.clearKey(reqParams); submitRequest.call(this, { Action: 'name/cos:ListMultipartUploads', ResourceKey: reqParams['prefix'], method: 'GET', Bucket: params.Bucket, Region: params.Region, headers: params.Headers, qs: reqParams, action: 'uploads', }, function (err, data) { if (err) { return callback(err); } if (data && data.ListMultipartUploadsResult) { var Upload = data.ListMultipartUploadsResult.Upload || []; var CommonPrefixes = data.ListMultipartUploadsResult.CommonPrefixes || []; CommonPrefixes = util.isArray(CommonPrefixes) ? CommonPrefixes : [CommonPrefixes]; Upload = util.isArray(Upload) ? Upload : [Upload]; data.ListMultipartUploadsResult.Upload = Upload; data.ListMultipartUploadsResult.CommonPrefixes = CommonPrefixes; } var result = util.clone(data.ListMultipartUploadsResult || {}); util.extend(result, { statusCode: data.statusCode, headers: data.headers, }); callback(null, result); }); } /** * 上传的分块列表查询 * @param {Object} params 参数对象,必须 * @param {String} params.Bucket Bucket名称,必须 * @param {String} params.Region 地域名称,必须 * @param {String} params.Key object名称,必须 * @param {String} params.UploadId 标示本次分块上传的ID,必须 * @param {String} params.EncodingType 规定返回值的编码方式,非必须 * @param {String} params.MaxParts 单次返回最大的条目数量,默认1000,非必须 * @param {String} params.PartNumberMarker 默认以UTF-8二进制顺序列出条目,所有列出条目从marker开始,非必须 * @param {Function} callback 回调函数,必须 * @return {Object} err 请求失败的错误,如果请求成功,则为空。https://cloud.tencent.com/document/product/436/7730 * @return {Object} data 返回的数据 * @return {Object} data.ListMultipartUploadsResult 分块信息 */ function multipartListPart(params, callback) { var reqParams = {}; reqParams['uploadId'] = params['UploadId']; reqParams['encoding-type'] = params['EncodingType']; reqParams['max-parts'] = params['MaxParts']; reqParams['part-number-marker'] = params['PartNumberMarker']; submitRequest.call(this, { Action: 'name/cos:ListParts', method: 'GET', Bucket: params.Bucket, Region: params.Region, Key: params.Key, headers: params.Headers, qs: reqParams, }, function (err, data) { if (err) { return callback(err); } var ListPartsResult = data.ListPartsResult || {}; var Part = ListPartsResult.Part || []; Part = util.isArray(Part) ? Part : [Part]; ListPartsResult.Part = Part; var result = util.clone(ListPartsResult); util.extend(result, { statusCode: data.statusCode, headers: data.headers, }); callback(null, result); }); } /** * 抛弃分块上传 * @param {Object} params 参数对象,必须 * @param {String} params.Bucket Bucket名称,必须 * @param {String} params.Region 地域名称,必须 * @param {String} params.Key object名称,必须 * @param {String} params.UploadId 标示本次分块上传的ID,必须 * @param {Function} callback 回调函数,必须 * @return {Object} err 请求失败的错误,如果请求成功,则为空。https://cloud.tencent.com/document/product/436/7730 * @return {Object} data 返回的数据 */ function multipartAbort(params, callback) { var reqParams = {}; reqParams['uploadId'] = params['UploadId']; submitRequest.call(this, { Action: 'name/cos:AbortMultipartUpload', method: 'DELETE', Bucket: params.Bucket, Region: params.Region, Key: params.Key, headers: params.Headers, qs: reqParams, }, function (err, data) { if (err) { return callback(err); } callback(null, { statusCode: data.statusCode, headers: data.headers, }); }); } /** * 获取签名 * @param {Object} params 参数对象,必须 * @param {String} params.Method 请求方法,必须 * @param {String} params.Key object名称,必须 * @param {String} params.Expires 名超时时间,单位秒,可选 * @return {String} data 返回签名字符串 */ function getAuth(params) { var self = this; return util.getAuth({ SecretId: params.SecretId || this.options.SecretId || '', SecretKey: params.SecretKey || this.options.SecretKey || '', Method: params.Method, Key: params.Key, Query: params.Query, Headers: params.Headers, Expires: params.Expires, SystemClockOffset: self.options.SystemClockOffset, }); } /** * 获取文件下载链接 * @param {Object} params 参数对象,必须 * @param {String} params.Bucket Bucket名称,必须 * @param {String} params.Region 地域名称,必须 * @param {String} params.Key object名称,必须 * @param {String} params.Method 请求的方法,可选 * @param {String} params.Expires 签名超时时间,单位秒,可选 * @param {Function} callback 回调函数,必须 * @return {Object} err 请求失败的错误,如果请求成功,则为空。https://cloud.tencent.com/document/product/436/7730 * @return {Object} data 返回的数据 */ function getObjectUrl(params, callback) { var self = this; var url = getUrl({ ForcePathStyle: self.options.ForcePathStyle, protocol: params.Protocol || self.options.Protocol, domain: self.options.Domain, bucket: params.Bucket, region: params.Region, object: params.Key, }); if (params.Sign !== undefined && !params.Sign) { callback(null, {Url: url}); return url; } var AuthData = getAuthorizationAsync.call(this, { Action: ((params.Method || '').toUpperCase() === 'PUT' ? 'name/cos:PutObject' : 'name/cos:GetObject'), Bucket: params.Bucket || '', Region: params.Region || '', Method: params.Method || 'get', Key: params.Key, Expires: params.Expires, }, function (err, AuthData) { if (!callback) return; if (err) { callback(err); return; } var signUrl = url; signUrl += '?' + (AuthData.Authorization.indexOf('q-signature') > -1 ? AuthData.Authorization : 'sign=' + encodeURIComponent(AuthData.Authorization)); AuthData.XCosSecurityToken && (signUrl += '&x-cos-security-token=' + AuthData.XCosSecurityToken); AuthData.ClientIP && (signUrl += '&clientIP=' + AuthData.ClientIP); AuthData.ClientUA && (signUrl += '&clientUA=' + AuthData.ClientUA); AuthData.Token && (signUrl += '&token=' + AuthData.Token); setTimeout(function () { callback(null, {Url: signUrl}); }); }); if (AuthData) { return url + '?' + AuthData.Authorization + (AuthData.XCosSecurityToken ? '&x-cos-security-token=' + AuthData.XCosSecurityToken : ''); } else { return url; } } /** * 私有方法 */ function decodeAcl(AccessControlPolicy) { var result = { GrantFullControl: [], GrantWrite: [], GrantRead: [], GrantReadAcp: [], GrantWriteAcp: [], ACL: '', }; var GrantMap = { 'FULL_CONTROL': 'GrantFullControl', 'WRITE': 'GrantWrite', 'READ': 'GrantRead', 'READ_ACP': 'GrantReadAcp', 'WRITE_ACP': 'GrantWriteAcp', }; var Grant = AccessControlPolicy.AccessControlList.Grant; if (Grant) { Grant = util.isArray(Grant) ? Grant : [Grant]; } var PublicAcl = {READ: 0, WRITE: 0, FULL_CONTROL: 0}; Grant.length && util.each(Grant, function (item) { if (item.Grantee.ID === 'qcs::cam::anyone:anyone' || item.Grantee.URI === 'http://cam.qcloud.com/groups/global/AllUsers') { PublicAcl[item.Permission] = 1; } else if (item.Grantee.ID !== AccessControlPolicy.Owner.ID) { result[GrantMap[item.Permission]].push('id="' + item.Grantee.ID + '"'); } }); if (PublicAcl.FULL_CONTROL || (PublicAcl.WRITE && PublicAcl.READ)) { result.ACL = 'public-read-write'; } else if (PublicAcl.READ) { result.ACL = 'public-read'; } else { result.ACL = 'private'; } util.each(GrantMap, function (item) { result[item] = uniqGrant(result[item].join(',')); }); return result; } // Grant 去重 function uniqGrant(str) { var arr = str.split(','); var exist = {}; var i, item; for (i = 0; i < arr.length; ) { item = arr[i].trim(); if (exist[item]) { arr.splice(i, 1); } else { exist[item] = true; arr[i] = item; i++; } } return arr.join(','); } // 生成操作 url function getUrl(params) { var longBucket = params.bucket; var shortBucket = longBucket.substr(0, longBucket.lastIndexOf('-')); var appId = longBucket.substr(longBucket.lastIndexOf('-') + 1); var domain = params.domain; var region = params.region; var object = params.object; var protocol = 'https:'; if (!domain) { if (['cn-south', 'cn-south-2', 'cn-north', 'cn-east', 'cn-southwest', 'sg'].indexOf(region) > -1) { domain = '{Region}.myqcloud.com'; } else { domain = 'cos.{Region}.myqcloud.com'; } if (!params.ForcePathStyle) { domain = '{Bucket}.' + domain; } } domain = domain.replace(/\{\{AppId\}\}/ig, appId) .replace(/\{\{Bucket\}\}/ig, shortBucket) .replace(/\{\{Region\}\}/ig, region) .replace(/\{\{.*?\}\}/ig, ''); domain = domain.replace(/\{AppId\}/ig, appId) .replace(/\{BucketName\}/ig, shortBucket) .replace(/\{Bucket\}/ig, longBucket) .replace(/\{Region\}/ig, region) .replace(/\{.*?\}/ig, ''); if (!/^[a-zA-Z]+:\/\//.test(domain)) { domain = protocol + '//' + domain; } // 去掉域名最后的斜杆 if (domain.slice(-1) === '/') { domain = domain.slice(0, -1); } var url = domain; if (params.ForcePathStyle) { url += '/' + longBucket; } url += '/'; if (object) { url += util.camSafeUrlEncode(object).replace(/%2F/g, '/'); } if (params.isLocation) { url = url.replace(/^https?:\/\//, ''); } return url; } // 异步获取签名 function getAuthorizationAsync(params, callback) { var headers = util.clone(params.Headers); delete headers['Content-Type']; delete headers['Cache-Control']; util.each(headers, function (v, k) { v === '' && delete headers[k]; }); var cb = function (AuthData) { // 检查签名格式 var formatAllow = false; var auth = AuthData.Authorization; if (auth) { if (auth.indexOf(' ') > -1) { formatAllow = false; } else if (auth.indexOf('q-sign-algorithm=') > -1 && auth.indexOf('q-ak=') > -1 && auth.indexOf('q-sign-time=') > -1 && auth.indexOf('q-key-time=') > -1 && auth.indexOf('q-url-param-list=') > -1) { formatAllow = true; } else { try { auth = atob(auth); if (auth.indexOf('a=') > -1 && auth.indexOf('k=') > -1 && auth.indexOf('t=') > -1 && auth.indexOf('r=') > -1 && auth.indexOf('b=') > -1) { formatAllow = true; } } catch (e) {} } } if (formatAllow) { callback && callback(null, AuthData); } else { callback && callback('authorization error'); } }; var self = this; var Bucket = params.Bucket || ''; var Region = params.Region || ''; // PathName var KeyName = params.Action === 'name/cos:PostObject' || !params.Key ? '' : params.Key; if (self.options.ForcePathStyle && Bucket) { KeyName = Bucket + '/' + KeyName; } var Pathname = '/' + KeyName; // Action、ResourceKey var StsData = {}; var Scope = params.Scope; if (!Scope) { var Action = params.Action || ''; var ResourceKey = params.ResourceKey || params.Key || ''; Scope = params.Scope || [{ action: Action, bucket: Bucket, region: Region, prefix: ResourceKey, }]; } var ScopeKey = util.md5(JSON.stringify(Scope)); // STS self._StsCache = self._StsCache ||[]; (function () { var i, AuthData; for (i = self._StsCache.length - 1; i >= 0; i--) { AuthData = self._StsCache[i]; var compareTime = Math.round(util.getSkewTime(self.options.SystemClockOffset) / 1000) + 30; if (AuthData.StartTime && compareTime < AuthData.StartTime || compareTime >= AuthData.ExpiredTime) { self._StsCache.splice(i, 1); continue; } if (!AuthData.ScopeLimit || AuthData.ScopeLimit && AuthData.ScopeKey === ScopeKey) { StsData = AuthData; break; } } })(); var calcAuthByTmpKey = function () { var Authorization = util.getAuth({ SecretId: StsData.TmpSecretId, SecretKey: StsData.TmpSecretKey, Method: params.Method, Pathname: Pathname, Query: params.Query, Headers: headers, Expires: params.Expires, SystemClockOffset: self.options.SystemClockOffset, }); var AuthData = { Authorization: Authorization, XCosSecurityToken: StsData.XCosSecurityToken || '', Token: StsData.Token || '', ClientIP: StsData.ClientIP || '', ClientUA: StsData.ClientUA || '', }; cb(AuthData); }; // 先判断是否有临时密钥 if (StsData.ExpiredTime && StsData.ExpiredTime - (util.getSkewTime(self.options.SystemClockOffset) / 1000) > 60) { // 如果缓存的临时密钥有效,并还有超过60秒有效期就直接使用 calcAuthByTmpKey(); } else if (self.options.getAuthorization) { // 外部计算签名或获取临时密钥 self.options.getAuthorization.call(self, { Bucket: Bucket, Region: Region, Method: params.Method, Key: KeyName, Pathname: Pathname, Query: params.Query, Headers: headers, Scope: Scope, }, function (AuthData) { if (typeof AuthData === 'string') { AuthData = {Authorization: AuthData}; } if (AuthData.TmpSecretId && AuthData.TmpSecretKey && AuthData.XCosSecurityToken && AuthData.ExpiredTime) { StsData = AuthData || {}; StsData.Scope = Scope; StsData.ScopeKey = ScopeKey; self._StsCache.push(StsData); calcAuthByTmpKey(); } else { cb(AuthData); } }); } else if (self.options.getSTS) { // 外部获取临时密钥 self.options.getSTS.call(self, { Bucket: Bucket, Region: Region, }, function (data) { StsData = data || {}; StsData.Scope = Scope; StsData.ScopeKey = ScopeKey; StsData.TmpSecretId = StsData.SecretId; StsData.TmpSecretKey = StsData.SecretKey; self._StsCache.push(StsData); calcAuthByTmpKey(); }); } else { // 内部计算获取签名 return (function () { var Authorization = util.getAuth({ SecretId: params.SecretId || self.options.SecretId, SecretKey: params.SecretKey || self.options.SecretKey, Method: params.Method, Pathname: Pathname, Query: params.Query, Headers: headers, Expires: params.Expires, SystemClockOffset: self.options.SystemClockOffset, }); var AuthData = { Authorization: Authorization, XCosSecurityToken: self.options.XCosSecurityToken, }; cb(AuthData); return AuthData; })(); } return ''; } // 调整时间偏差 function allowRetry(err) { var allowRetry = false; var isTimeError = false; var serverDate = (err.headers && (err.headers.date || err.headers.Date)) || ''; try { var errorCode = err.error.Code; var errorMessage = err.error.Message; if (errorCode === 'RequestTimeTooSkewed' || (errorCode === 'AccessDenied' && errorMessage === 'Request has expired')) { isTimeError = true; } } catch (e) { } if (err) { if (isTimeError && serverDate) { var serverTime = Date.parse(serverDate); if (this.options.CorrectClockSkew && Math.abs(util.getSkewTime(this.options.SystemClockOffset) - serverTime) >= 30000) { console.error('error: Local time is too skewed.'); this.options.SystemClockOffset = serverTime - Date.now(); allowRetry = true; } } else if (Math.round(err.statusCode / 100) === 5) { allowRetry = true; } } return allowRetry; } // 获取签名并发起请求 function submitRequest(params, callback) { var self = this; // 处理 headers !params.headers && (params.headers = {}); // 处理 query !params.qs && (params.qs = {}); params.VersionId && (params.qs.versionId = params.VersionId); params.qs = util.clearKey(params.qs); // 清理 undefined 和 null 字段 params.headers && (params.headers = util.clearKey(params.headers)); params.qs && (params.qs = util.clearKey(params.qs)); var Query = util.clone(params.qs); params.action && (Query[params.action] = ''); var next = function (tryIndex) { var oldClockOffset = self.options.SystemClockOffset; getAuthorizationAsync.call(self, { Bucket: params.Bucket || '', Region: params.Region || '', Method: params.method, Key: params.Key, Query: Query, Headers: params.headers, Action: params.Action, ResourceKey: params.ResourceKey, Scope: params.Scope, }, function (err, AuthData) { params.AuthData = AuthData; _submitRequest.call(self, params, function (err, data) { if (err && tryIndex < 2 && (oldClockOffset !== self.options.SystemClockOffset || allowRetry.call(self, err))) { if (params.headers) { delete params.headers.Authorization; delete params.headers['token']; delete params.headers['clientIP']; delete params.headers['clientUA']; delete params.headers['x-cos-security-token']; } next(tryIndex + 1); } else { callback(err, data); } }); }); }; next(0); } // 发起请求 function _submitRequest(params, callback) { var self = this; var TaskId = params.TaskId; if (TaskId && !self._isRunningTask(TaskId)) return; var bucket = params.Bucket; var region = params.Region; var object = params.Key; var method = params.method || 'GET'; var url = params.url; var body = params.body; var json = params.json; var rawBody = params.rawBody; // url url = url || getUrl({ ForcePathStyle: self.options.ForcePathStyle, protocol: self.options.Protocol, domain: self.options.Domain, bucket: bucket, region: region, object: object, }); if (params.action) { url = url + '?' + params.action; } var opt = { method: method, url: url, headers: params.headers, qs: params.qs, filePath: params.filePath, body: body, json: json, }; // 获取签名 opt.headers.Authorization = params.AuthData.Authorization; params.AuthData.Token && (opt.headers['token'] = params.AuthData.Token); params.AuthData.ClientIP && (opt.headers['clientIP'] = params.AuthData.ClientIP); params.AuthData.ClientUA && (opt.headers['clientUA'] = params.AuthData.ClientUA); params.AuthData.XCosSecurityToken && (opt.headers['x-cos-security-token'] = params.AuthData.XCosSecurityToken); // 清理 undefined 和 null 字段 opt.headers && (opt.headers = util.clearKey(opt.headers)); opt = util.clearKey(opt); // progress if (params.onProgress && typeof params.onProgress === 'function') { opt.onProgress = function (e) { if (TaskId && !self._isRunningTask(TaskId)) return; var loaded = e ? e.loaded : 0; params.onProgress({loaded: loaded, total: e.total}); }; } self.options.ForcePathStyle && (opt.pathStyle = self.options.ForcePathStyle); var sender = REQUEST(opt, function (err, response, body) { // 返回内容添加 状态码 和 headers var hasReturned; var cb = function (err, data) { TaskId && self.off('inner-kill-task', killTask); if (hasReturned) return; hasReturned = true; var attrs = {}; response && response.statusCode && (attrs.statusCode = response.statusCode); response && response.headers && (attrs.headers = response.headers); if (err) { err = util.extend(err || {}, attrs); callback(err, null); } else { data = util.extend(data || {}, attrs); callback(null, data); } }; // 请求错误,发生网络错误 if (err) { cb({error: err}); return; } var jsonRes; try { jsonRes = util.xml2json(body) || {}; } catch (e) { jsonRes = body || {}; } // 请求返回码不为 200 var statusCode = response.statusCode; var statusSuccess = Math.floor(statusCode / 100) === 2; // 200 202 204 206 if (!statusSuccess) { cb({error: jsonRes.Error || jsonRes}); return; } // 不对 body 进行转换,body 直接挂载返回 if (rawBody) { jsonRes = {}; jsonRes.body = body; } if (jsonRes.Error) { cb({error: jsonRes.Error}); return; } cb(null, jsonRes); }); // kill task var killTask = function (data) { if (data.TaskId === TaskId) { sender && sender.abort && sender.abort(); self.off('inner-kill-task', killTask); } }; TaskId && self.on('inner-kill-task', killTask); } var API_MAP = { // Bucket 相关方法 getService: getService, putBucket: putBucket, getBucket: getBucket, headBucket: headBucket, deleteBucket: deleteBucket, getBucketAcl: getBucketAcl, putBucketAcl: putBucketAcl, getBucketCors: getBucketCors, putBucketCors: putBucketCors, deleteBucketCors: deleteBucketCors, getBucketLocation: getBucketLocation, putBucketTagging: putBucketTagging, getBucketTagging: getBucketTagging, deleteBucketTagging: deleteBucketTagging, getBucketPolicy: getBucketPolicy, putBucketPolicy: putBucketPolicy, deleteBucketPolicy: deleteBucketPolicy, getBucketLifecycle: getBucketLifecycle, putBucketLifecycle: putBucketLifecycle, deleteBucketLifecycle: deleteBucketLifecycle, putBucketVersioning: putBucketVersioning, getBucketVersioning: getBucketVersioning, putBucketReplication: putBucketReplication, getBucketReplication: getBucketReplication, deleteBucketReplication: deleteBucketReplication, // Object 相关方法 getObject: getObject, headObject: headObject, listObjectVersions: listObjectVersions, putObject: putObject, postObject: postObject, deleteObject: deleteObject, getObjectAcl: getObjectAcl, putObjectAcl: putObjectAcl, optionsObject: optionsObject, putObjectCopy: putObjectCopy, deleteMultipleObject: deleteMultipleObject, restoreObject: restoreObject, // 分块上传相关方法 uploadPartCopy: uploadPartCopy, multipartInit: multipartInit, multipartUpload: multipartUpload, multipartComplete: multipartComplete, multipartList: multipartList, multipartListPart: multipartListPart, multipartAbort: multipartAbort, // 工具方法 getObjectUrl: getObjectUrl, getAuth: getAuth, }; module.exports.init = function (COS, task) { task.transferToTaskMethod(API_MAP, 'postObject'); util.each(API_MAP, function (fn, apiName) { COS.prototype[apiName] = util.apiWrapper(apiName, fn); }); };
var _curry2 = require('./internal/_curry2'); /** * Returns a partial copy of an object containing only the keys specified. If the key does not exist, the * property is ignored. * * @func * @memberOf R * @category Object * @sig [k] -> {k: v} -> {k: v} * @param {Array} names an array of String property names to copy onto a new object * @param {Object} obj The object to copy from * @return {Object} A new object with only properties from `names` on it. * @see R.omit * @example * * R.pick(['a', 'd'], {a: 1, b: 2, c: 3, d: 4}); //=> {a: 1, d: 4} * R.pick(['a', 'e', 'f'], {a: 1, b: 2, c: 3, d: 4}); //=> {a: 1} */ module.exports = _curry2(function pick(names, obj) { var result = {}; var idx = 0; while (idx < names.length) { if (names[idx] in obj) { result[names[idx]] = obj[names[idx]]; } idx += 1; } return result; });
const mongoose = require("mongoose"); const connectDB = async () => { try { const databaseURI = process.env.MONGODB_URI || "mongodb://localhost:27017/Bizup"; const connect = await mongoose.connect(databaseURI, { useNewUrlParser: true, useUnifiedTopology: true, serverSelectionTimeoutMS: 10000, }); console.log("Connected to database"); } catch (err) { console.log(err.message); console.log("Database connection error"); process.exit(1); } }; module.exports = connectDB;
import Icon from '../icon'; import PropTypes from 'prop-types'; import { animated, useSpring } from 'react-spring'; import React, { useEffect, useRef, useState } from 'react'; import Style, { enterOverlay, progress, timerBar } from './toast.style'; import { isNil, merge } from 'lodash'; const Animated = animated(Style); // custom hook for handling the countdown to exit const useTimer = (duration = 8000) => { const [countdown, setCountdown] = useState(duration); const [isRunning, setIsRunning] = useState(true); useEffect(() => { let interval; if (!isNil(countdown) && isRunning) { interval = setInterval(() => { setCountdown(current => current - 1000); }, 1000); } else { clearInterval(interval); } return () => { clearInterval(interval); }; }, [countdown, isRunning]); return [countdown, isRunning, setIsRunning]; }; const Toast = ({ onEnter, onExit, ...props }) => { const [exit, setExit] = useState(false); const multiButtonRef = useRef(props.actions?.length > 1 || props.actionRequired); const ref = useRef(); // call the onEnter callback once on first render. const idRef = useRef(props.id); const onEnterRef = useRef(onEnter); useEffect(() => { onEnterRef.current(idRef.current); }, []); const onExitRef = useRef(); useEffect(() => { onExitRef.current = onExit; }, [onExit]); const enterProps = { from: { opacity: 0, transform: 'translate(0, -80px) scale(1, 1)', marginBottom: 0, height: 'auto' }, to: { height: 'auto', opacity: 1, transform: 'translate(0, 0px) scale(1, 1)', marginBottom: 10, paddingTop: props.messageBar ? 8 : 14, paddingBottom: props.messageBar ? 8 : 14, }, }; if (props.bottom) { merge(enterProps, { from: { transform: 'translate(0, 80px) scale(1, 1)' }, to: { transform: 'translate(0, 0px) scale(1, 1)', marginTop: 10 }, }); } if (props.enterInPlace) { merge(enterProps, { from: { transform: 'translate(0, 0px) scale(0, 0)' }, to: { transform: 'translate(0, 0px) scale(1, 1)', marginBottom: 0, marginTop: 0 }, }); } const [springProps, setSpring] = useSpring(() => ({ ...enterProps, config: { tension: 180, friction: 20, clamp: !!props.enterInPlace }, onRest: () => { springProps.height.setValue(ref.current.offsetHeight); }, })); // set a timer for the toast's original duration. A timeout of null has unlimited duration. const [countdown, isRunning, setIsRunning] = useTimer( props.actionRequired || props.actions?.length > 1 ? null : props.timeout ); useEffect(() => { if (!isNil(countdown) && countdown <= 0) { if (!props.timer) { setExit(true); } else { setIsRunning(false); setExit(true); } } }, [countdown, props.timer, setIsRunning, setSpring]); // the countdown pauses while the mouse hovers over the toast. const handleHover = e => { if (props.pauseOnHover) { setIsRunning(false); } }; const timeoutRef = useRef(props.timeout); // the countdown resumes when the mouse leaves the toast. const handleHoverEnd = () => { if (props.pauseOnHover) { timeoutRef.current = countdown; setIsRunning(true); } }; // set the exit animation, with callback to remove the toast on completion useEffect(() => { if (exit) { const exitProps = { to: { opacity: 0, transform: 'translate(0, -60px) scale(1, 1)', height: 0, marginBottom: 0, paddingTop: 0, paddingBottom: 0, }, }; if (props.enterInPlace) { merge(exitProps, { to: { transform: 'translate(0, -60px) scale(1, 1)' } }); } if (props.bottom) { merge(exitProps, { to: { transform: 'translate(0, 60px) scale(1, 1)', top: 60, marginTop: 0 } }); } if (props.exitInPlace) { merge(exitProps, { to: { transform: 'translate(0, 0px), scale(1, 0)' } }); } setSpring({ ...exitProps, config: { friction: 25, }, delay: 150, onRest: () => { onExitRef.current(idRef.current); }, }); } }, [exit, props.bottom, props.enterInPlace, props.exitInPlace, setSpring]); // User clicks the close icon to dismiss the toast const handleClickDismiss = () => { if (!props.dismissOnClick || props.actions?.length) { setSpring({ to: { transform: 'translate(0, 0) scale(0, 0)', height: 0, marginBottom: 0, paddingTop: 0, paddingBottom: 0, }, config: { clamp: true, tension: 280 }, onRest: () => onExitRef.current(props.id), }); } }; // programmatically dismiss the toast early by setting its timeout to 0. useEffect(() => { if (props.timeout === 0) { setExit(true); } }, [props.timeout]); const renderAction = () => { if (!props.actions) return null; return props.actions.map?.(action => { if (React.isValidElement(action)) return action; return ( <button key={action.label} className='action-default' onClick={() => { if (action.label !== 'Change Type') setExit(true); const actionKey = Object.keys(action).find(k => k !== 'label'); if (typeof action[actionKey] === 'function') action[actionKey](props.id); }} > {action.label} </button> ); }); }; const renderTimer = () => { const timer = props.timer || props.timerBar || props.timerTime; const bar = props.timer || props.timerBar; const time = props.timer || props.timerTime; if (!timer || countdown === null) return null; const minutes = Math.floor(countdown / 60000); const seconds = (countdown / 1000) % 60; return ( <> {time && ( <span> {minutes.toString().padStart(2, '0')}:{seconds.toString().padStart(2, '0')} </span> )} {bar && ( <div css={[ timerBar, isRunning && { animation: `${timeoutRef.current / 1000}s linear ${progress( (timeoutRef.current / props.timeout) * 100 )}`, }, !isRunning && countdown > 0 && { width: `${(countdown / props.timeout) * 100}%`, }, ]} ></div> )} </> ); }; const classes = [ props.type, multiButtonRef.current && 'multi-button', props.actions && props.actionRequired && 'action-required', ] .filter(Boolean) .join(' '); return ( <> {props.actions?.length && props.actionRequired && ( <div className='toast-overlay' css={[ { position: 'fixed', top: 0, left: 0, opacity: 1, width: '100vw', height: '100vh', background: 'rgba(0, 0, 0, 0.8)', animation: `200ms ease-in-out ${enterOverlay}`, }, exit && { opacity: 0, transition: 'opacity 300ms 200ms ease-in-out', }, ]} ></div> )} <Animated ref={ref} className={classes} onClick={props.dismissOnClick && !props.actions?.length ? () => setExit(true) : () => {}} style={springProps} css={[ props.timer && !props.actions && { '.action': { display: 'none', }, }, props.dismissOnClick && !props.actions?.length && { cursor: 'pointer', }, !props.enterInPlace && !exit && { transformOrigin: 'top right', }, props.exitInPlace && exit && { transformOrigin: 'top center', }, props.className, ]} onMouseEnter={handleHover} onMouseLeave={handleHoverEnd} onTouchStart={handleHover} onTouchEnd={handleHoverEnd} > <div className='message'>{props.message}</div> <div className='timer'>{renderTimer()}</div> <div className='action'>{renderAction()}</div> <div className='dismiss' onClick={handleClickDismiss}> <Icon name='times-lgt' /> </div> </Animated> </> ); }; // prettier-ignore Toast.componentDescription = 'Flash message notification & user confirmation widget.'; Toast.componentKey = 'toast'; Toast.componentName = 'Toast'; // prettier-ignore Toast.propTypes = { /** A list of objects with button labels and callback functions to be called when the user clicks the action label. */ actions: PropTypes.arrayOf(PropTypes.shape({ action: PropTypes.func, label: PropTypes.node, })), /** Whether an action is required to dismiss the toast. Hides the close icon and addes a window overlay behind the toast. Has no effect if there are no actions. */ actionRequired: PropTypes.bool, /** The message to be displayed. */ message: PropTypes.node, /** Whether the notification will be displayed as a message bar instead of a toast */ messageBar: PropTypes.bool, /** Whether to pause the timeout countdown on hover. */ pauseOnHover: PropTypes.bool, /** The duration in ms the toast should be displayed. Null is indefinite duration. If taost is multi-button or actionRequired is true, there is no timeout, the toast will only be dismissed on a button click. */ timeout: PropTypes.oneOfType([PropTypes.number, PropTypes.string]), /** A shortcut to setting both timerBar and timerTime to true. Has no effect for multi-button or actionRequired toasts. */ timer: PropTypes.bool, /** Whether to show the timerBar. Automatically true if timer is true. Has no effect for multi-button or actionRequired toasts. */ timerBar: PropTypes.bool, /** Whether to show the timer countdown. Automatically true if timer is true. Has no effect for multi-button or actionRequired toasts. */ timerTime: PropTypes.bool, /** The type determines the toast's color. Leaving type undefined uses the primary color. */ type: PropTypes.oneOf(['success', 'info', 'warning', 'error', 'primary']), /** Whether the toast can be dismissed by clicking anywhere in it. */ dismissOnClick: PropTypes.bool, /** Whether the toast container is anchored at the bottom of the window. */ bottom: PropTypes.bool, /** Changes the enter animation to not have vertical motion. For use when configured as header-bar notification rather than toast. */ enterInPlace: PropTypes.bool, /** Changes the exit animation to not have vertical motion. For use when configured as header-bar notification rather than toast. */ exitInPlace: PropTypes.bool, /** Callback function invoked when the toast first renders. */ onEnter: PropTypes.func, /** Callback function invoked when the timeout expires. */ onExit: PropTypes.func, }; Toast.defaultProps = { message: '', pauseOnHover: true, timeout: 8000, onEnter: () => {}, onExit: () => {}, }; export default Toast;
/* * This is based on the SelectBox.js that comes with Django admin media * */ var SelectBox = { cache: new Object(), init: function(id) { var box = document.getElementById(id); var node; SelectBox.cache[id] = new Array(); var cache = SelectBox.cache[id]; for (var i = 0; (node = box.options[i]); i++) { cache.push({value: node.value, text: node.text, displayed: 1, tree_id: node.getAttribute('data-tree-id'), left_val: node.getAttribute('data-left-value')}); } }, redisplay: function(id) { // Repopulate HTML select box from cache var box = document.getElementById(id); // for some reason both these steps are neccessary to get browsers to work properly... for (i = 0; i < box.options.length; i++) { box.options[0] = null; } box.options.length = 0; SelectBox.sort(id); for (var i = 0, j = SelectBox.cache[id].length; i < j; i++) { var node = SelectBox.cache[id][i]; if (node.displayed) { newOpt = new Option(node.text, node.value, false, false); newOpt.setAttribute('data-tree-id', node.tree_id); newOpt.setAttribute('data-left-value', node.left_val); box.options[box.options.length] = newOpt; } } }, filter: function(id, text) { // Redisplay the HTML select box, displaying only the choices containing ALL // the words in text. (It's an AND search.) var tokens = text.toLowerCase().split(/\s+/); var node, token; for (var i = 0; (node = SelectBox.cache[id][i]); i++) { node.displayed = 1; for (var j = 0; (token = tokens[j]); j++) { if (node.text.toLowerCase().indexOf(token) == -1) { node.displayed = 0; } } } SelectBox.redisplay(id); }, delete_from_cache: function(id, value) { var node, delete_index = null; for (var i = 0; (node = SelectBox.cache[id][i]); i++) { if (node.value == value) { delete_index = i; break; } } var j = SelectBox.cache[id].length - 1; for (var i = delete_index; i < j; i++) { SelectBox.cache[id][i] = SelectBox.cache[id][i+1]; } SelectBox.cache[id].length--; }, add_to_cache: function(id, option) { // in this case option is an anonymous object, not an html element SelectBox.cache[id].push({value: option.value, text: option.text, displayed: 1, tree_id: option.tree_id, left_val: option.left_val}); }, cache_contains: function(id, value) { // Check if an item is contained in the cache var node; for (var i = 0; (node = SelectBox.cache[id][i]); i++) { if (node.value == value) { return true; } } return false; }, move: function(from, to) { var from_box = document.getElementById(from); var to_box = document.getElementById(to); var option; for (var i = 0; (option = from_box.options[i]); i++) { if (option.selected && SelectBox.cache_contains(from, option.value)) { SelectBox.add_to_cache(to, {value: option.value, text: option.text, displayed: 1, tree_id: option.getAttribute('data-tree-id'), left_val: option.getAttribute('data-left-value')}); SelectBox.delete_from_cache(from, option.value); } } SelectBox.redisplay(from); SelectBox.redisplay(to); }, move_all: function(from, to) { var from_box = document.getElementById(from); var to_box = document.getElementById(to); var option; for (var i = 0; (option = from_box.options[i]); i++) { if (SelectBox.cache_contains(from, option.value)) { SelectBox.add_to_cache(to, {value: option.value, text: option.text, displayed: 1, tree_id: option.getAttribute('data-tree-id'), left_val: option.getAttribute('data-left-value')}); SelectBox.delete_from_cache(from, option.value); } } SelectBox.redisplay(from); SelectBox.redisplay(to); }, sort: function(id) { SelectBox.cache[id].sort( function(a, b) { a_tree_id = parseInt(a.tree_id); b_tree_id = parseInt(b.tree_id); a_left_val = parseInt(a.left_val); b_left_val = parseInt(b.left_val); try { if (a_tree_id > b_tree_id) return 1; if (a_tree_id < b_tree_id) return -1; if (a_tree_id == b_tree_id) { if (a_left_val > b_left_val) return 1; if (a_left_val < b_left_val) return -1; } } catch (e) { // silently fail on IE 'unknown' exception } return 0; } ); }, select_all: function(id) { var box = document.getElementById(id); for (var i = 0; i < box.options.length; i++) { box.options[i].selected = 'selected'; } } }
# Interview Question 17.6 # Time complexity: O(n^2) # Auxiliary space complexity: O(n) def min_unsorted_seq(intergers): if not integers: return 0, 0 N = len(integers) farthest_less_than = [0] * N for i in range(N): for j in range(i, N): if integers[j] < integers[i]: farthest_less_than[i] = j m = next((i for i, x in enumerate(farthest_less_than) if x > 0), 0) n = max(farthest_less_than) if n == 0: n = N - 1 return m, n if __name__ == '__main__': integers = [] print(min_unsorted_seq(integers)) integers = [1, 2, 4, 7, 10, 11, 7, 12, 6, 7, 16, 18, 19] print(min_unsorted_seq(integers)) integers = list(range(13)) print(min_unsorted_seq(integers))
/*global describe, beforeEach, it*/ 'use strict'; var path = require('path'); var helpers = require('yeoman-generator').test; describe('yii generator', function () { beforeEach(function (done) { helpers.testDirectory(path.join(__dirname, 'temp'), function (err) { if (err) { return done(err); } this.app = helpers.createGenerator('yii:app', [ '../../app' ]); done(); }.bind(this)); }); it('creates expected files', function (done) { var expected = [ // add files you expect to exist here. '.jshintrc', '.editorconfig' ]; helpers.mockPrompt(this.app, { 'someOption': 'Y' }); this.app.options['skip-install'] = true; this.app.run({}, function () { helpers.assertFiles(expected); done(); }); }); });
export const STORE_UPDATE = "STORE_UPDATE"; export const STORE_ADD = "STORE_ADD"; export const STORE_REMOVE = "STORE_REMOVE";
import moment from 'moment'; import { put, takeLatest } from 'redux-saga/effects'; import settingsActions, { INIT } from '../actions/settings'; import taxiServicesActions from '../actions/taxiServices'; import expensesSettingsActions from '../actions/expensesSettings'; import expensesActions from '../actions/expenses'; import ridesActions from '../actions/rides'; import { predefinedServices, predefinedExpenses, predefinedFuel } from '../constants'; import db from '../db'; import { getCurrency, getDistanceName } from '../components/LangSwitch'; import i18n from '../i18n'; const filterByCurrentMonth = row => moment(row.timestamp).isSame(moment(), 'month'); function* save(settingsData) { const settings = yield db.settings.create(settingsData.settings); const services = yield db.services.create(settingsData.services); const expensesSettings = yield db.expensesSettings.create(settingsData.expensesSettings); return { settings: (settings && settings[0]) || {}, services, expensesSettings, }; } function* read() { const settings = yield db.settings.read(); const services = yield db.services.read(); const expensesSettings = yield db.expensesSettings.read(); const expenses = yield db.expenses.read({ query: filterByCurrentMonth }); const rides = yield db.rides.read({ query: filterByCurrentMonth }); return { settings: (settings && settings[0]) || {}, services, expensesSettings, expenses, rides, }; } function* putToStore(settingsData) { yield put(settingsActions.initSuccess(settingsData.settings)); yield put(taxiServicesActions.initSuccess(settingsData.services)); yield put(expensesSettingsActions.initSuccess(settingsData.expensesSettings)); yield put(expensesActions.initSuccess(settingsData.expenses)); yield put(ridesActions.initSuccess(settingsData.rides)); } function* init() { try { const [maybeSettings] = yield db.settings.read(); if (!maybeSettings) { const settings = { initialized: true, done: false, distanceName: getDistanceName(), currency: getCurrency(), taxiDriver: true, fuelPrice: predefinedFuel.price, fuelConsumption: predefinedFuel.consumption, activeStep: 0, }; const data = yield save({ settings, services: predefinedServices, expensesSettings: predefinedExpenses.map(el => ({ ...el, name: i18n.t(el.name) })), }); yield putToStore(data); } else { const data = yield read(); yield putToStore(data); } } catch (error) { yield put(settingsActions.initFailure(error)); } } export default function* initSaga() { yield takeLatest(INIT, init); }
/* Copyright (c) 2003-2019, CKSource - Frederico Knabben. All rights reserved. For licensing, see LICENSE.md or https://ckeditor.com/legal/ckeditor-oss-license */ CKEDITOR.plugins.setLang( 'fakeobjects', 'nb', { anchor: 'Anker', flash: 'Flash-animasjon', hiddenfield: 'Skjult felt', iframe: 'IFrame', unknown: 'Ukjent objekt' } );
define(["dojo/dom-attr","dojo/dom-construct","dojo/_base/declare", "dojo/string", "dojo/dom-class", "dojo/_base/kernel", "dojo/query", "dojo/_base/array", "dojo/_base/lang", "dijit/_Widget", "dijit/_Templated", "esri/toolbars/draw", "dojo/on","esri/symbols/SimpleFillSymbol", "esri/layers/GraphicsLayer","esri/graphic", "esri/symbols/SimpleMarkerSymbol","esri/symbols/SimpleLineSymbol","dojo/text!./templates/DrawToolBar.html", "dojo/NodeList-dom", "dojo/domReady!"], function (domAttr,domConstruct,declare, string,domClass, kernel, query, array, lang, _Widget, _Templated, Draw, on,SimpleFillSymbol, GraphicsLayer,Graphic,SimpleMarkerSymbol,SimpleLineSymbol, template) { return declare([_Widget, _Templated], { _listHtml: "", templateString: template, _NodeList:[], _ClickList:[], _listPara: [], _enabled: !0, _drawType: null, _baseClass:null, _drawTool: null, _map: null, _handler: null, _node: null, _domId: null, _drawEndMethod:null, constructor: function (options, srcRefNode) { this._listPara = options.drawObjs; this._map = options.map; this._DomID = srcRefNode; this._callback = options.callback; this._markLayer = options.markLayer|| new GraphicsLayer(); // this._baseClass = options.baseClass; this.ulClz = options.class; this._drawTool = new Draw(this._map); }, postCreate: function () { //this.deactivate(); this.inherited(arguments); this._renderHtml(); }, startup: function () { this.inherited(arguments); this._map.addLayer(this._markLayer); //this._drawList.innerHTML = '<ul class="boxinfo">'+this._NodeList.join("") + '</ul>'; //domClass.add(this.domNode, this._baseClass); //this._uiConnect(); }, _markGraphic:function(evt){ /* * 构建地图的图形及渲染 * */ var symbol; switch (evt.geometry.type) { case "point": symbol = new SimpleMarkerSymbol({ "color": [255,255,255,64], "size": 12, "type": "esriSMS", "style": "esriSMSCircle", "outline": { "color": [0,0,0,255], "width": 1, "type": "esriSLS", "style": "esriSLSSolid" } }); break; case "multipoint": symbol = new SimpleMarkerSymbol({ "color": [255,255,255,64], "size": 12, "type": "esriSMS", "style": "esriSMSCircle", "outline": { "color": [0,0,0,255], "width": 1, "type": "esriSLS", "style": "esriSLSSolid" } }); break; case "polyline": symbol = new SimpleLineSymbol(); break; default: symbol = new SimpleFillSymbol({ "type": "esriSFS", "style": "esriSFSSolid", "color": [255,255,255,64], "outline": { "type": "esriSLS", "style": "esriSLSSolid", "color": [0,0,0,255], "width": 2 } }); break; } var graphic = new Graphic(evt.geometry, symbol); this._markLayer.add(graphic); }, setEnabled: function (a) { this._enabled = a; }, _drawEnd:function(c){ this.inherited(arguments); this._drawEndMethod && this._drawEndMethod.apply(this, [c]); }, _renderHtml:function(){ var ul = domConstruct.create("ul",{"class":this.ulClz},this._drawList); dojo.forEach(this._listPara,lang.hitch(this, function (para) { this.createItem(para,ul); //this._NodeList.push(lang.replace("\x3cli class\x3d'{type}' type='{type}' \x3e\x3ci class\x3d'ace-icon fa {iconName} bigger-100' style\x3d'margin-right: 6px;'\x3e\x3c/i\x3e\x3cspan\x3e{alias}\x3c/span\x3e \x3c/li\x3e", para)); })); }, createItem:function(item,srcNodeRef){ var li = domConstruct.create("li",{"id":"li_draw_"+item.id,"class":item.class},srcNodeRef); if(item.subData){ item.html = lang.replace(item.html,item.subData); } var aOpt = { "innerHTML":item.html } if(item.type =="clear"){ aOpt["onclick"]=lang.hitch(this,this.clear); }else{ aOpt["onclick"]=lang.hitch(this,this._startDraw,item.type,null) } var a = domConstruct.create("a",aOpt,li); return li; }, _startDraw: function (type,callback,e) { this._drawTool.deactivate(); console.log(type); this._drawTool.activate(type); this._map.setMapCursor("crosshair"); this._drawTool.on("draw-end",lang.hitch(this,function(evt){ console.log(evt); this._map.setMapCursor("default"); this._drawTool.deactivate(); this._markGraphic(evt); callback && callback.apply(this,[evt]); })); }, clear:function(){ this._drawTool.deactivate(); this._markLayer.clear(); }, deactivate:function(){ this._drawTool.deactivate(); }, _uiConnect: function () { dojo.forEach(this._ClickList, dojo.disconnect); dojo.connect(this._domClear,"onclick",lang.hitch(this,function(evt){ this.clear(); })); //dojo.query("#"+this._DomID + " ul li").forEach(function (a) { dojo.query("ul li",this._drawList).forEach(function (a) { console.log(a); //this._ClickList.push(dojo.connect(a, "onclick", lang.hitch(this, "_startDraw", para.geoType, para.opr))); this._ClickList.push(dojo.connect(a, "onclick", lang.hitch(this, "_startDraw", a, this._callback))) }, this); }, _updateUI: function () { this.setEnabled(1); this._enabled && this._node && (dojo.hasClass(this._node, "drawSelect")) && (dojo.removeClass(this._node, "drawSelect")); //(!this._enabled) && this._node && (!dojo.hasClass(this._node, "drawSelect") && (dojo.removeClass(this._node, "drawSelect"))); } }); });
describe("UserProtocol", () => { describe("#register", () => { }); });
import { fireEvent, render, screen, wait } from "@testing-library/react"; import React from "react"; import BookmarkService from "../Service/BookmarkService"; import sampleData from "../Service/__mocks__/data.json"; import { AppStore, StoreContext } from "../Store"; import GroupForm from "./GroupForm"; jest.mock("react-i18next"); jest.mock("../Service/BookmarkService"); const mockClose = jest.fn(); let appStore; let groupModalStore; beforeEach(() => { BookmarkService.mockClear(); mockClose.mockClear(); appStore = new AppStore(); appStore.setData(sampleData.bookmarks, sampleData.groups); groupModalStore = appStore.groupModalStore; }); test("new group with correct input", async () => { groupModalStore.open(); render( <StoreContext.Provider value={appStore}> <GroupForm onClose={mockClose} /> </StoreContext.Provider> ); const groupCount = appStore.groups.length; const group = { name: "group name", column: 2, order: 3, }; const nameTextbox = screen.getByRole("textbox", { name: "form.name" }); expect(nameTextbox).toBeInTheDocument(); const leftRadio = screen.getByRole("radio", { name: "form.left" }); expect(leftRadio).toBeInTheDocument(); const rightRadio = screen.getByRole("radio", { name: "form.right" }); expect(rightRadio).toBeInTheDocument(); const orderCombobox = screen.getByRole("combobox", { name: "form.order" }); expect(orderCombobox).toBeInTheDocument(); const saveButton = screen.getByRole("button", { name: "button.save" }); expect(saveButton).toBeInTheDocument(); await wait(() => { fireEvent.change(nameTextbox, { target: { value: group.name } }); }); await wait(() => { fireEvent.click(rightRadio); }); await wait(() => { fireEvent.change(orderCombobox, { target: { value: group.order } }); }); await wait(() => { fireEvent.click(saveButton); }); expect(appStore.groups).toHaveLength(groupCount + 1); const newGroup = appStore.groups[groupCount]; expect(newGroup.name).toBe(group.name); expect(newGroup.column).toBe(group.column); expect(newGroup.order).toBe(group.order); expect(mockClose).toHaveBeenCalledTimes(1); }); test("new group with incorrect input", async () => { groupModalStore.open(); render( <StoreContext.Provider value={appStore}> <GroupForm onClose={mockClose} /> </StoreContext.Provider> ); const nameTextbox = screen.getByRole("textbox", { name: "form.name" }); expect(nameTextbox).toBeInTheDocument(); const nameError = screen.getByRole("alert", { name: "form.name" }); expect(nameError).toBeInTheDocument(); const saveButton = screen.getByRole("button", { name: "button.save" }); expect(saveButton).toBeInTheDocument(); expect(nameError).toBeEmpty(); await wait(() => { fireEvent.click(saveButton); }); expect(nameError).not.toBeEmpty(); expect(mockClose).not.toHaveBeenCalled(); await wait(() => { fireEvent.change(nameTextbox, { target: { value: "a".repeat(51) } }); }); expect(nameError).not.toBeEmpty(); await wait(() => { fireEvent.change(nameTextbox, { target: { value: "a".repeat(50) } }); }); expect(nameError).toBeEmpty(); await wait(() => { fireEvent.click(saveButton); }); expect(mockClose).toHaveBeenCalledTimes(1); }); test("edit group", async () => { const group = appStore.groups[0]; groupModalStore.open(group.id); render( <StoreContext.Provider value={appStore}> <GroupForm onClose={mockClose} /> </StoreContext.Provider> ); const newGroup = { name: group.name + " new", column: group.column === 1 ? 2 : 1, order: group.order + 1, }; const nameTextbox = screen.getByRole("textbox", { name: "form.name" }); expect(nameTextbox).toBeInTheDocument(); const nameError = screen.getByRole("alert", { name: "form.name" }); expect(nameError).toBeInTheDocument(); const saveButton = screen.getByRole("button", { name: "button.save" }); expect(saveButton).toBeInTheDocument(); const leftRadio = screen.getByRole("radio", { name: "form.left" }); expect(leftRadio).toBeInTheDocument(); const rightRadio = screen.getByRole("radio", { name: "form.right" }); expect(rightRadio).toBeInTheDocument(); const orderCombobox = screen.getByRole("combobox", { name: "form.order" }); expect(orderCombobox).toBeInTheDocument(); expect(nameTextbox.value).toBe(group.name); let columnRadio = group.column === 1 ? leftRadio : rightRadio; expect(columnRadio.checked).toBeTruthy(); expect( screen.getByRole("option", { name: group.order.toString() }).selected ).toBeTruthy(); await wait(() => { fireEvent.change(nameTextbox, { target: { value: newGroup.name } }); }); columnRadio = newGroup.column === 1 ? leftRadio : rightRadio; await wait(() => { fireEvent.click(columnRadio); }); await wait(() => { fireEvent.change(orderCombobox, { target: { value: newGroup.order } }); }); await wait(() => { fireEvent.click(saveButton); }); expect(group.name).toBe(newGroup.name); expect(group.column).toBe(newGroup.column); expect(group.order).toBe(newGroup.order); expect(mockClose).toHaveBeenCalledTimes(1); });
/** * @flow */ import loadConfig from '../'; import { cleanup, writeFiles, getTempDirectory, } from '../../../../../../e2e/helpers'; const DIR = getTempDirectory('resolve_config_path_test'); // Removes string from all key/values within an object const removeString = (config, str) => JSON.parse( JSON.stringify(config).replace(new RegExp(str, 'g'), '<<REPLACED>>'), ); beforeEach(() => { cleanup(DIR); jest.resetModules(); }); afterEach(() => cleanup(DIR)); test('should have a valid structure by default', () => { writeFiles(DIR, { 'package.json': `{ "react-native": { "reactNativePath": "." } }`, }); const config = loadConfig(DIR); expect(removeString(config, DIR)).toMatchSnapshot(); }); test('should return dependencies from package.json', () => { writeFiles(DIR, { 'node_modules/react-native-test/package.json': '{}', 'node_modules/react-native-test/ios/HelloWorld.xcodeproj/project.pbxproj': '', 'package.json': `{ "dependencies": { "react-native-test": "0.0.1" }, "react-native": { "reactNativePath": "." } }`, }); const {dependencies} = loadConfig(DIR); expect(removeString(dependencies, DIR)).toMatchSnapshot(); }); test('should read a config of a dependency and use it to load other settings', () => { writeFiles(DIR, { 'node_modules/react-native-test/package.json': `{ "react-native": { "dependency": { "platforms": { "ios": { "project": "./customLocation/customProject.xcodeproj" } } } } }`, 'package.json': `{ "dependencies": { "react-native-test": "0.0.1" }, "react-native": { "reactNativePath": "." } }`, }); const {dependencies} = loadConfig(DIR); expect( removeString(dependencies['react-native-test'], DIR), ).toMatchSnapshot(); }); test('should deep merge project configuration with default values', () => { writeFiles(DIR, { 'node_modules/react-native-test/package.json': '{}', 'node_modules/react-native-test/ios/HelloWorld.xcodeproj/project.pbxproj': '', 'package.json': `{ "dependencies": { "react-native-test": "0.0.1" }, "react-native": { "reactNativePath": ".", "dependencies": { "react-native-test": { "platforms": { "ios": { "sourceDir": "./abc" } } } } } }`, }); const config = loadConfig(DIR); expect(removeString(config, DIR)).toMatchSnapshot(); }); test('should read `rnpm` config from a dependency and transform it to a new format', () => { writeFiles(DIR, { 'node_modules/react-native-foo/package.json': `{ "name": "react-native-foo", "rnpm": { "ios": { "project": "./customLocation/customProject.xcodeproj" } } }`, 'package.json': `{ "dependencies": { "react-native-foo": "0.0.1" }, "react-native": { "reactNativePath": "." } }`, }); const {dependencies} = loadConfig(DIR); expect(removeString(dependencies['react-native-foo'], DIR)).toMatchSnapshot(); }); test('should load commands from "react-native-foo" and "react-native-bar" packages', () => { writeFiles(DIR, { 'node_modules/react-native-foo/package.json': `{ "react-native": { "commands": [ "./command-foo.js" ] } }`, 'node_modules/react-native-bar/package.json': `{ "react-native": { "commands": [ "./command-bar.js" ] } }`, 'package.json': `{ "dependencies": { "react-native-foo": "0.0.1", "react-native-bar": "0.0.1" }, "react-native": { "reactNativePath": "." } }`, }); const {commands} = loadConfig(DIR); expect(removeString(commands, DIR)).toMatchSnapshot(); }); test('should load an out-of-tree "windows" platform that ships with a dependency', () => { writeFiles(DIR, { 'node_modules/react-native-windows/platform.js': ` module.exports = {"windows": {}}; `, 'node_modules/react-native-windows/package.json': `{ "name": "react-native-windows", "rnpm": { "haste": { "platforms": [ "windows" ], "providesModuleNodeModules": [ "react-native-windows" ] }, "plugin": "./plugin.js", "platform": "./platform.js" } }`, 'package.json': `{ "dependencies": { "react-native-windows": "0.0.1" }, "react-native": { "reactNativePath": "." } }`, }); const {haste, platforms} = loadConfig(DIR); expect(removeString({haste, platforms}, DIR)).toMatchSnapshot(); });
require('dotenv').config(); const { ApiPromise, WsProvider } = require('@polkadot/api'); const typedefs = require('@phala/typedefs').khalaDev; const tokenomic = require('./utils/tokenomic'); const khala = { // The allowed relay starting hashes for pRuntime to start syncing // - 0: The Kusama Genesis // - 8325311: The Kusama block right before the first parachain parent block. relayGenesisHashes: [ [0, '0xb0a8d493285c2df73290dfb7e61f870f17b41801197a149ca93654499ea3dafe'], [8325311, '0xff93a4a903207ad45af110a3e15f8b66c903a0045f886c528c23fe7064532b08'], ], // The pRuntime hashes, signed by Phala's Intel SGX IAS key. pruntimeHashes: [ '0x2099244f418ee770f71173f99956c23873f25f65c874082040010dff0a027a8300000000815f42f11cf64430c30bab7816ba596a1da0130c3b028b673133a66cf9a3e0e6', ], // The worker pubkey of the team-created GK in US genesisGatekeeper: '0x60067697c486c809737e50d30a67480c5f0cede44be181b96f7d59bc2116a850', // The worker pubkey of the team-created GK in EU secondGatekeeper: '0xaa37d91141bb1c0d77467ac66066e3927ece5708eded765e9090a7e1dcef5b2f', // The Khala tokenomic parameters. The notable change is to extend the heartbeatWindow from 10 to 20. tokenomic: { "phaRate": "0.83", "rho": "1.000000666600231", "budgetPerBlock": "8.3333333333333333333", "vMax": "30000", "costK": "0.000000019054528616676159186", "costB": "0.00004061623205149357237", "slashRate": "0", "treasuryRatio": "0.2", "heartbeatWindow": "20", "rigK": "0.36144578313253012048", "rigB": "0", "re": "1.5", "k": "50", "kappa": "1" }, enableStakePoolAt: 414189, }; // Create a council motion proposal with a threshold of 3 (3 of 5). function propose(api, call) { const lenthBound = call.toU8a().length + 10; return api.tx.council.propose(3, call, lenthBound); } async function main() { const wsProvider = new WsProvider(process.env.ENDPOINT); const api = await ApiPromise.create({ provider: wsProvider, types: typedefs }); // We use the Khala parameter now. const params = khala; // Motion 2: const motion2 = api.tx.utility.batchAll([ // 1. Register the whitelisted pRuntime hashes ... params.pruntimeHashes.map(h => api.tx.phalaRegistry.addPruntime(h)), // 2. Register the whitelisted relayc chain genesis hashes ... params.relayGenesisHashes.map(([_n, blockHash]) => api.tx.phalaRegistry.addRelaychainGenesisBlockHash(blockHash) ), ]); // Motion 3: const typedP = tokenomic.humanToTyped(api, params.tokenomic); const updateTokenomicCall = tokenomic.createUpdateCall(api, typedP); const motion3 = api.tx.utility.batchAll([ // 1. Register the Genesis Gatekeeper (with its registered worker public key) api.tx.phalaRegistry.registerGatekeeper(params.genesisGatekeeper), // 2. Update the tokenomic updateTokenomicCall, // 3. Schedule the StakePool pallet to enable mining at the specific block api.tx.scheduler.schedule( params.enableStakePoolAt, null, 0, api.tx.phalaStakePool.setMiningEnable(true) ), ]); // Motion 4: // - Register the second Gatekeeper const motion4 = api.tx.phalaRegistry.registerGatekeeper(params.secondGatekeeper); const proposal2 = propose(api, motion2); const proposal3 = propose(api, motion3); const proposal4 = propose(api, motion4); console.log({ motion2: proposal2.toHex(), motion3: proposal3.toHex(), motion4: proposal4.toHex(), }); // Print the decoded proposal console.dir({ call2: proposal2.toHuman(), call3: proposal3.toHuman(), call4: proposal4.toHuman(), }, {depth: 9}) } main().catch(console.error).finally(() => process.exit());
/* * (C) Copyright 2014 Kurento (http://kurento.org/) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ var nodeunit = require('nodeunit'); var EventTarget = require('eventtarget'); var RpcBuilder = require(".."); var JsonRpcClient = RpcBuilder.clients.JsonRpcClient; var packer = RpcBuilder.packers.JsonRPC; var ws_uri = "ws://localhost:8888/kurento"; const METHOD = 'test'; function noop(error, result){}; function connectCallback(){ connected = true; } function disconnectCallback(){ connected = false; } exports['encode JsonRPC 2.0'] = { setUp: function(callback) { this.rpcBuilder = new RpcBuilder(packer); callback(); }, tearDown: function(callback) { this.rpcBuilder.close(); callback(); }, 'notification': function(test) { test.expect(5); var notification = this.rpcBuilder.encode(METHOD); test.deepEqual(JSON.parse(notification), { jsonrpc: '2.0', method: METHOD }); // Test notification notification = this.rpcBuilder.decode(notification); test.ok(notification instanceof RpcBuilder.RpcNotification); test.equal(notification.duplicate, undefined); test.equal(notification.method, METHOD); test.deepEqual(notification.params, {}); test.done(); }, 'request': function(test) { test.expect(5); var request = this.rpcBuilder.encode(METHOD, noop); test.deepEqual(JSON.parse(request), { jsonrpc: '2.0', method: METHOD, id: 0 }); // Test request request = this.rpcBuilder.decode(request); test.ok(request instanceof RpcBuilder.RpcNotification); test.equal(request.duplicated, false); test.equal(request.method, METHOD); test.deepEqual(request.params, {}); test.done(); }, 'request timeout': function(test) { test.expect(2); var request = this.rpcBuilder.encode(METHOD, function(error, result) { test.notEqual(error, undefined); test.deepEqual(error.request, request); test.done(); }); }, 'request timeout and retry': function(test) { var self = this; test.expect(4); var gotError = false; var request = this.rpcBuilder.encode(METHOD, function(error, result) { if(!gotError) { gotError = true; test.notEqual(error, undefined); test.deepEqual(error.request, request); var request2 = error.retry(); test.deepEqual(request2, request); // Process request on 'server' request2 = self.rpcBuilder.decode(request2); var response = request2.reply(); // Process response by 'client' self.rpcBuilder.decode(response); } else { test.equal(error, undefined); test.done(); } }); }, 'cancel request': function(test) { test.expect(0); var request = this.rpcBuilder.encode(METHOD, function(error, result) { test.ifError(error); }); this.rpcBuilder.cancel(request); setTimeout(function() { test.done(); }, 6*1000) }, 'duplicated request': function(test) { test.expect(3); var request = this.rpcBuilder.encode(METHOD, noop); // Test request var request1 = this.rpcBuilder.decode(request); test.equal(request1.duplicated, false); var reply1 = request1.reply(null, null); var request2 = this.rpcBuilder.decode(request); test.equal(request2.duplicated, true); var reply2 = request2.reply(); test.deepEqual(reply1, reply2); test.done(); }, 'duplicated request with transport': function(test) { test.expect(2); var request = this.rpcBuilder.encode(METHOD, noop); // Test request var request1 = this.rpcBuilder.decode(request); test.equal(request1.duplicated, false); var reply1 = request1.reply(null, null); var request2 = this.rpcBuilder.decode(request, function(reply2) { test.deepEqual(reply1, reply2); test.done(); }); test.equal(request2, undefined); }, 'override duplicated request': function(test) { test.expect(4); var request = this.rpcBuilder.encode(METHOD, noop); // Test request var request1 = this.rpcBuilder.decode(request); test.equal(request1.duplicated, false); var reply1 = request1.reply(null, null); var request2 = this.rpcBuilder.decode(request); test.equal(request2.duplicated, true); var reply2 = request2.reply(null, 'ok'); test.equal(JSON.parse(reply1).result, null); test.equal(JSON.parse(reply2).result, 'ok'); test.done(); }, 'response': function(test) { test.expect(2); var request = this.rpcBuilder.encode(METHOD, function(error, result) { test.equal(result, null); }); // Compose response manually from the request var response = JSON.parse(request); delete response.method; response.result = null; response = JSON.stringify(response); // Test response response = this.rpcBuilder.decode(response); test.equal(response, undefined); test.done(); }, 'duplicate response': function(test) { test.expect(3); var request = this.rpcBuilder.encode(METHOD, function(error, result) { test.equal(result, null); }); // Compose response manually from the request var response = JSON.parse(request); delete response.method; response.result = null; response = JSON.stringify(response); // Test response var result = this.rpcBuilder.decode(response); test.equal(result, undefined); // Ignored response var result = this.rpcBuilder.decode(response); test.equal(result, undefined); test.done(); }, 'request reply response': function(test) { test.expect(3); var value = {'asdf': 'qwert'}; var request = this.rpcBuilder.encode(METHOD, function(error, result) { test.deepEqual(result, value); }); // Response request request = this.rpcBuilder.decode(request); var response = request.reply(null, value); // Test response message test.deepEqual(JSON.parse(response), { jsonrpc: '2.0', result: value, id: 0 }); response = this.rpcBuilder.decode(response); // Test response as processed test.equal(response, undefined); test.done(); }, 'reply with transport': function(test) { test.expect(4); var self = this; var value = {'asdf': 'qwert'}; var request = this.rpcBuilder.encode(METHOD, function(error, result) { test.deepEqual(result, value); }); // Response request request = this.rpcBuilder.decode(request); var response = request.reply(null, value, function(message) { // Test response message test.deepEqual(JSON.parse(message), { jsonrpc: '2.0', result: value, id: 0 }); message = self.rpcBuilder.decode(message); // Test response as processed test.equal(message, undefined); }); // Test response as send by reply transport test.equal(response, undefined); test.done(); }, 'decode with transport': function(test) { test.expect(4); var self = this; var value = {'asdf': 'qwert'}; var request = this.rpcBuilder.encode(METHOD, function(error, result) { test.deepEqual(result, value); }); // Response request request = this.rpcBuilder.decode(request, function(message) { // Test response message test.deepEqual(JSON.parse(message), { jsonrpc: '2.0', result: value, id: 0 }); message = self.rpcBuilder.decode(message); // Test response as processed test.equal(message, undefined); }); var response = request.reply(null, value); // Test response as send by reply transport test.equal(response, undefined); test.done(); }, 'transport with message event': function(test) { test.expect(2); var self = this; var value = {'asdf': 'qwert'}; var transport = new EventTarget; transport.onmessage = null; transport.send = function(message) { message = JSON.parse(message); var event = { type: 'message', data: JSON.stringify( { jsonrpc: '2.0', result: message.params, id: 0 }) }; this.dispatchEvent(event); }; this.rpcBuilder.transport = transport; var request = this.rpcBuilder.encode(METHOD, value, function(error, result) { test.ifError(error); test.deepEqual(result, value); test.done(); }); // Test response as send by reply transport test.equal(request, undefined); }, 'request event': function(test) { test.expect(1); var transport = new EventTarget; transport.onmessage = null; this.rpcBuilder.transport = transport; this.rpcBuilder.on('request', function(request) { test.deepEqual(request.method, METHOD); test.done(); }); var event = { type: 'message', data: JSON.stringify( { jsonrpc: '2.0', method: METHOD }) }; transport.dispatchEvent(event); }, 'create JsonRpcClientWs with WS': function(test) { test.expect(1); var configuration = { sendCloseMessage : false, ws : { uri : ws_uri, useSockJS: false, onconnected : connectCallback, ondisconnect : disconnectCallback, onreconnecting : disconnectCallback, onreconnected : connectCallback }, rpc : { requestTimeout : 15000 } }; var jsonRpcClientWs = new JsonRpcClient(configuration); test.ok(jsonRpcClientWs instanceof JsonRpcClient); setTimeout(function() { jsonRpcClientWs.close(); test.done(); }, 4*1000) } };
/** * grunt/pipeline.js * * The order in which your css, javascript, and template files should be * compiled and linked from your views and static HTML files. * * (Note that you can take advantage of Grunt-style wildcard/glob/splat expressions * for matching multiple files.) */ // CSS files to inject in order // // (if you're using LESS with the built-in default config, you'll want // to change `assets/styles/importer.less` instead.) var cssFilesToInject = [ 'vendor/**/*.css', 'styles/**/*.css' ]; // Client-side javascript files to inject in order // (uses Grunt-style wildcard/glob/splat expressions) var jsFilesToInject = [ // Load sails.io before everything else 'js/dependencies/sails.io.js', // bower's vendor 'vendor/angular/angular.js', 'vendor/angular-route/angular-route.js', 'vendor/jquery/jquery.js', 'vendor/**/*.js', // Dependencies like jQuery, or Angular are brought in here 'js/dependencies/**/*.js', // All of the rest of your client-side js files // will be injected here in no particular order. 'js/**/*.js', // For angular 'app.js', 'ng-components/**/*.js', 'ng-views/**/*.js' ]; // Client-side HTML templates are injected using the sources below // The ordering of these templates shouldn't matter. // (uses Grunt-style wildcard/glob/splat expressions) // // By default, Sails uses JST templates and precompiles them into // functions for you. If you want to use jade, handlebars, dust, etc., // with the linker, no problem-- you'll just want to make sure the precompiled // templates get spit out to the same file. Be sure and check out `tasks/README.md` // for information on customizing and installing new tasks. var templateFilesToInject = [ 'templates/**/*.html' ]; // Prefix relative paths to source files so they point to the proper locations // (i.e. where the other Grunt tasks spit them out, or in some cases, where // they reside in the first place) module.exports.cssFilesToInject = cssFilesToInject.map(function(path) { return '.tmp/public/' + path; }); module.exports.jsFilesToInject = jsFilesToInject.map(function(path) { return '.tmp/public/' + path; }); module.exports.templateFilesToInject = templateFilesToInject.map(function(path) { return 'assets/' + path; });
/* * luminateExtend.js * Version: 1.6 (28-JAN-2014) * Requires: jQuery v1.5.1+ or Zepto v1.1+ * Includes: SimpleDateFormatJS v1.3 (https://github.com/noahcooper/SimpleDateFormatJS) */ (function($) { /* private helper functions */ var validateLocale = function(locale) { /* if a locale is provided that is not supported, default to "en_US" */ if(locale && $.inArray(locale, ['es_US', 'en_CA', 'fr_CA', 'en_GB', 'en_AU']) < 0) { locale = 'en_US'; } return locale; }, setLocale = function(locale) { if(locale) { locale = validateLocale(locale); luminateExtend.sessionVars.set('locale', locale); } return locale; }, buildServerUrl = function(useHTTPS, data) { return (useHTTPS ? (luminateExtend.global.path.secure + 'S') : luminateExtend.global.path.nonsecure) + 'PageServer' + (luminateExtend.global.sessionCookie ? (';' + luminateExtend.global.sessionCookie) : '') + '?pagename=luminateExtend_server&pgwrap=n' + (data ? ('&' + data) : ''); }, apiCallbackHandler = function(requestSettings, responseData) { if(requestSettings.responseFilter && requestSettings.responseFilter.array && requestSettings.responseFilter.filter) { if(luminateExtend.utils.stringToObj(requestSettings.responseFilter.array, responseData)) { var filterKey = requestSettings.responseFilter.filter.split('==')[0].split('!=')[0].replace(/^\s+|\s+$/g, ''), filterOperator, filterValue; if(requestSettings.responseFilter.filter.indexOf('!=') != -1) { filterOperator = 'nequal'; filterValue = requestSettings.responseFilter.filter.split('!=')[1]; } else if(requestSettings.responseFilter.filter.indexOf('==') != -1) { filterOperator = 'equal'; filterValue = requestSettings.responseFilter.filter.split('==')[1]; } if(filterOperator && filterValue) { filterValue = filterValue.replace(/^\s+|\s+$/g, ''); var filteredArray = [], arrayIsFiltered = false; $.each(luminateExtend.utils.ensureArray(luminateExtend.utils.stringToObj(requestSettings.responseFilter.array, responseData)), function() { if((filterOperator == 'nequal' && this[filterKey] == filterValue) || (filterOperator == 'equal' && this[filterKey] != filterValue)) { arrayIsFiltered = true; } else { filteredArray.push(this); } }); if(arrayIsFiltered) { var filterArrayParts = requestSettings.responseFilter.array.split('.'); $.each(responseData, function(i, val0) { if(i == filterArrayParts[0]) { $.each(val0, function(j, val1) { if(j == filterArrayParts[1]) { if(filterArrayParts.length == 2) { responseData[i][j] = filteredArray; } else { $.each(val1, function(k, val2) { if(k == filterArrayParts[2]) { if(filterArrayParts.length == 3) { responseData[i][j][k] = filteredArray; } else { $.each(val2, function(l, val3) { if(l == filterArrayParts[3] && filterArrayParts.length == 4) { responseData[i][j][k][l] = filteredArray; } }); } } }); } } }); } }); } } } } var callbackFn = $.noop; if(requestSettings.callback) { if(typeof requestSettings.callback === 'function') { callbackFn = requestSettings.callback; } else if(requestSettings.callback.error && responseData.errorResponse) { callbackFn = requestSettings.callback.error; } else if(requestSettings.callback.success && !responseData.errorResponse) { callbackFn = requestSettings.callback.success; } } if(!((requestSettings.data.indexOf('&method=login') != -1 && requestSettings.data.indexOf('&method=loginTest') == -1) || requestSettings.data.indexOf('&method=logout') != -1)) { callbackFn(responseData); } /* get a new auth token after login or logout */ else { var newAuthCallback = function() { callbackFn(responseData); }; luminateExtend.api.getAuth({ callback: newAuthCallback, useCache: false, useHTTPS: requestSettings.useHTTPS }); } }; /* library core */ window.luminateExtend = function(initOptions) { /* make luminateExtend an alias for the init method if called directly */ luminateExtend.init(initOptions || {}); }; /* library info */ luminateExtend.library = { version: '1.6' }; /* global settings */ luminateExtend.global = { update: function(settingName, settingValue) { if(settingName) { if(settingName.length) { if(settingValue) { if(settingName == 'locale') { settingValue = setLocale(settingValue); } luminateExtend.global[settingName] = settingValue; } } else { if(settingName.locale) { settingName.locale = setLocale(settingName.locale); } luminateExtend.global = $.extend(luminateExtend.global, settingName); } } } }; /* init library */ luminateExtend.init = function(options) { var settings = $.extend({ apiCommon: {}, auth: { type: 'auth' }, path: {} }, options || {}); if(settings.locale) { settings.locale = validateLocale(settings.locale); } /* check if the browser supports CORS and the withCredentials property */ settings.supportsCORS = false; if(window.XMLHttpRequest) { var testXHR = new XMLHttpRequest(); if('withCredentials' in testXHR) { settings.supportsCORS = true; } } luminateExtend.global = $.extend(luminateExtend.global, settings); return luminateExtend; }; /* api */ luminateExtend.api = function(requestOptions) { /* make luminateExtend.api an alias for the request method if called directly */ luminateExtend.api.request(requestOptions || {}); }; luminateExtend.api.bind = function(selector) { selector = selector || 'form.luminateApi'; if($(selector).length > 0) { $(selector).each(function() { if(this.nodeName.toLowerCase() == 'form') { $(this).bind('submit', function(e) { e.cancelBubble = true; e.returnValue = false; if(e.stopPropagation) { e.stopPropagation(); e.preventDefault(); } if(!$(this).attr('id')) { $(this).attr('id', 'luminateApi-' + new Date().getTime()); } var formAction = $(this).attr('action'), formActionQuery = formAction.split('?'), formApiData = $(this).data('luminateapi'), requestApi = (formActionQuery[0].indexOf('/site/') != -1) ? formActionQuery[0].split('/site/')[1] : formActionQuery[0], requestCallback, requestContentType = $(this).attr('enctype'), requestData = (formActionQuery.length > 1) ? formActionQuery[1] : '', requestForm = '#' + $(this).attr('id'), requestRequiresAuth = false, requestType = $(this).attr('method'), requestUseHTTPS = false; if(formApiData) { if(formApiData.callback) { requestCallback = luminateExtend.utils.stringToObj(formApiData.callback); } if(formApiData.requiresAuth && formApiData.requiresAuth == 'true') { requestRequiresAuth = true; } if(formAction.indexOf('https:') == 0 || (window.location.protocol == 'https:' && formAction.indexOf('http') == -1)) { requestUseHTTPS = true; } } luminateExtend.api.request({ api: requestApi, callback: requestCallback, contentType: requestContentType, data: requestData, form: requestForm, requestType: requestType, requiresAuth: requestRequiresAuth, useHTTPS: requestUseHTTPS }); }); } }); } return luminateExtend; }; luminateExtend.api.getAuth = function(options) { var settings = $.extend({ useCache: true, useHTTPS: false }, options || {}); /* don't try to get an auth token if there's already a request outstanding */ if(luminateExtend.api.getAuthLoad) { luminateExtend.api.getAuthLoad = false; if(settings.useCache && luminateExtend.global.auth.type && luminateExtend.global.auth.token) { luminateExtend.api.getAuthLoad = true; if(settings.callback) { settings.callback(); } } else { var getAuthCallback = function(globalData) { luminateExtend.global.update(globalData); luminateExtend.api.getAuthLoad = true; if(settings.callback) { settings.callback(); } }; if(luminateExtend.global.supportsCORS) { $.ajax({ data: 'luminateExtend=' + luminateExtend.library.version + '&api_key=' + luminateExtend.global.apiKey + '&method=getLoginUrl&response_format=json&v=1.0', dataType: 'json', success: function(data) { getAuthCallback({ auth: { type: 'auth', token: data.getLoginUrlResponse.token }, sessionCookie: data.getLoginUrlResponse.url.split(';')[1] }); }, url: (settings.useHTTPS ? luminateExtend.global.path.secure : luminateExtend.global.path.nonsecure) + 'CRConsAPI', xhrFields: { withCredentials: false } }); } else { $.ajax({ dataType: 'jsonp', success: getAuthCallback, url: buildServerUrl(settings.useHTTPS, 'action=getAuth&callback=?') }); } } } else { var retryGetAuth = function() { luminateExtend.api.getAuth(settings); }, t = setTimeout(retryGetAuth, 1000); } }; luminateExtend.api.getAuthLoad = true; var sendRequest = function(options) { var settings = $.extend({ contentType: 'application/x-www-form-urlencoded', data: '', requestType: 'GET', requiresAuth: false, useHashTransport: false, useHTTPS: null }, options || {}); var servletShorthand = ['addressbook', 'advocacy', 'connect', 'cons', 'content', 'datasync', 'donation', 'email', 'group', 'orgevent', 'recurring', 'survey', 'teamraiser']; if($.inArray(settings.api.toLowerCase(), servletShorthand) >= 0) { /* add "CR", capitalize the first letter, and add "API" */ settings.api = 'CR' + settings.api.charAt(0).toUpperCase() + settings.api.slice(1).toLowerCase() + 'API'; /* special cases where a letter in the middle of the servlet name needs to be capitalized */ settings.api = settings.api.replace('Addressbook', 'AddressBook') .replace('Datasync', 'DataSync') .replace('Orgevent', 'OrgEvent'); } /* don't make the request unless we have all the required data */ if(luminateExtend.global.path.nonsecure && luminateExtend.global.path.secure && luminateExtend.global.apiKey && settings.api) { if(settings.contentType != 'multipart/form-data') { settings.contentType = 'application/x-www-form-urlencoded'; } settings.data = 'luminateExtend=' + luminateExtend.library.version + ((settings.data == '') ? '' : ('&' + settings.data)); if(settings.form && $(settings.form).length > 0) { settings.data += '&' + $(settings.form).eq(0).serialize(); } if(settings.data.indexOf('&api_key=') == -1) { settings.data += '&api_key=' + luminateExtend.global.apiKey; } if(luminateExtend.global.apiCommon.centerId && settings.data.indexOf('&center_id=') == -1) { settings.data += '&center_id=' + luminateExtend.global.apiCommon.centerId; } if(luminateExtend.global.apiCommon.categoryId && settings.data.indexOf('&list_category_id=') == -1) { settings.data += '&list_category_id=' + luminateExtend.global.apiCommon.categoryId; } if(settings.data.indexOf('&response_format=xml') != -1) { settings.data = settings.data.replace(/&response_format=xml/g, '&response_format=json'); } else if(settings.data.indexOf('&response_format=') == -1) { settings.data += '&response_format=json'; } if(luminateExtend.global.apiCommon.source && settings.data.indexOf('&source=') == -1) { settings.data += '&source=' + luminateExtend.global.apiCommon.source; } if(luminateExtend.global.apiCommon.subSource && settings.data.indexOf('&sub_source=') == -1) { settings.data += '&sub_source=' + luminateExtend.global.apiCommon.subSource; } if(settings.data.indexOf('&suppress_response_codes=') == -1) { settings.data += '&suppress_response_codes=true'; } if(luminateExtend.global.locale && settings.data.indexOf('&s_locale=') == -1) { settings.data += '&s_locale=' + luminateExtend.global.locale; } if(settings.data.indexOf('&v=') == -1) { settings.data += '&v=1.0'; } settings.requestType = settings.requestType.toLowerCase() === 'post' ? 'POST' : 'GET'; var requestUrl = 'http://', requestPath = luminateExtend.global.path.nonsecure.split('http://')[1]; if(settings.api == 'CRDonationAPI' || settings.api == 'CRTeamraiserAPI' || (settings.api != 'CRConnectAPI' && ((window.location.protocol == 'https:' && settings.useHTTPS == null) || settings.useHTTPS == true))) { settings.useHTTPS = true; } else { settings.useHTTPS = false; } if(settings.useHTTPS) { requestUrl = 'https://', requestPath = luminateExtend.global.path.secure.split('https://')[1]; } requestUrl += requestPath + settings.api; var isLuminateOnlineAndSameProtocol = false, useAjax = false, usePostMessage = false; if(window.location.protocol == requestUrl.split('//')[0] && document.domain == requestPath.split('/')[0] && !settings.useHashTransport) { isLuminateOnlineAndSameProtocol = true, useAjax = true; } else { if(luminateExtend.global.supportsCORS && !settings.useHashTransport) { useAjax = true; } else if('postMessage' in window && !settings.useHashTransport) { usePostMessage = true; } } var doRequest; if(useAjax) { doRequest = function() { if(settings.requiresAuth && settings.data.indexOf('&' + luminateExtend.global.auth.type + '=') == -1) { settings.data += '&' + luminateExtend.global.auth.type + '=' + luminateExtend.global.auth.token; } if(luminateExtend.global.sessionCookie) { requestUrl += ';' + luminateExtend.global.sessionCookie; } settings.data += '&ts=' + new Date().getTime(); $.ajax({ contentType: settings.contentType, data: settings.data, /* set dataType explicitly as API sends Content-Type: text/plain rather than application/json (E-62659) */ dataType: 'json', success: function(data) { apiCallbackHandler(settings, data); }, type: settings.requestType, url: requestUrl, xhrFields: { withCredentials: false } }); }; } else if(usePostMessage) { doRequest = function() { var postMessageTimestamp = new Date().getTime(), postMessageFrameId = 'luminateApiPostMessage' + postMessageTimestamp, postMessageUrl = buildServerUrl(settings.useHTTPS, 'action=postMessage'); if(settings.requiresAuth && settings.data.indexOf('&' + luminateExtend.global.auth.type + '=') == -1) { settings.data += '&' + luminateExtend.global.auth.type + '=' + luminateExtend.global.auth.token; } settings.data += '&ts=' + postMessageTimestamp; if(!luminateExtend.api.request.postMessageEventHandler) { luminateExtend.api.request.postMessageEventHandler = {}; luminateExtend.api.request.postMessageEventHandler.handler = function(e) { if(luminateExtend.global.path.nonsecure.indexOf(e.origin) != -1 || luminateExtend.global.path.secure.indexOf(e.origin) != -1) { var parsedData = $.parseJSON(e.data), messageFrameId = parsedData.postMessageFrameId, responseData = $.parseJSON(decodeURIComponent(parsedData.response)); if(luminateExtend.api.request.postMessageEventHandler[messageFrameId]) { luminateExtend.api.request.postMessageEventHandler[messageFrameId](messageFrameId, responseData); } } }; if(typeof window.addEventListener != 'undefined') { window.addEventListener('message', luminateExtend.api.request.postMessageEventHandler.handler, false); } else if(typeof window.attachEvent != 'undefined') { window.attachEvent('onmessage', luminateExtend.api.request.postMessageEventHandler.handler); } } luminateExtend.api.request.postMessageEventHandler[postMessageFrameId] = function(frameId, data) { apiCallbackHandler(settings, data); $('#' + frameId).remove(); delete luminateExtend.api.request.postMessageEventHandler[frameId]; }; $('body').append('<iframe style="position: absolute; top: 0; left: -999em;" ' + 'name="' + postMessageFrameId + '" id="' + postMessageFrameId + '">' + '</iframe>'); $('#' + postMessageFrameId).bind('load', function() { var postMessageString = '{' + '"postMessageFrameId": "' + $(this).attr('id') + '", ' + '"requestUrl": "' + requestUrl + '", ' + '"requestContentType": "' + settings.contentType + '", ' + '"requestData": "' + settings.data + '", ' + '"requestType": "' + settings.requestType + '"' + '}', postMessageOrigin = requestUrl.split('/site/')[0].split('/admin/')[0]; document.getElementById($(this).attr('id')).contentWindow .postMessage(postMessageString, postMessageOrigin); }); $('#' + postMessageFrameId).attr('src', postMessageUrl); }; } else { doRequest = function() { var hashTransportTimestamp = new Date().getTime(), hashTransportFrameId = 'luminateApiHashTransport' + hashTransportTimestamp, hashTransportUrl = buildServerUrl(settings.useHTTPS, 'action=hashTransport'), hashTransportClientUrl = window.location.protocol + '//' + document.domain + '/luminateExtend_client.html'; if(settings.requiresAuth && settings.data.indexOf('&' + luminateExtend.global.auth.type + '=') == -1) { settings.data += '&' + luminateExtend.global.auth.type + '=' + luminateExtend.global.auth.token; } settings.data += '&ts=' + hashTransportTimestamp; hashTransportUrl += '#&hashTransportClientUrl=' + encodeURIComponent(hashTransportClientUrl) + '&hashTransportFrameId=' + hashTransportFrameId + '&requestUrl=' + encodeURIComponent(requestUrl) + '&requestContentType=' + encodeURIComponent(settings.contentType) + '&requestData=' + encodeURIComponent(settings.data) + '&requestType=' + settings.requestType; if(!luminateExtend.api.request.hashTransportEventHandler) { luminateExtend.api.request.hashTransportEventHandler = {}; luminateExtend.api.request.hashTransportEventHandler.handler = function(frameId, data) { if(luminateExtend.api.request.hashTransportEventHandler[frameId]) { luminateExtend.api.request.hashTransportEventHandler[frameId](frameId, data); } }; } luminateExtend.api.request.hashTransportEventHandler[hashTransportFrameId] = function(frameId, data) { apiCallbackHandler(settings, data); $('#' + frameId).remove(); delete luminateExtend.api.request.hashTransportEventHandler[frameId]; }; $('body').append('<iframe style="position: absolute; top: 0; left: -999em;" ' + 'name="' + hashTransportFrameId + '" id="' + hashTransportFrameId + '" ' + 'src="' + hashTransportUrl + '"></iframe>'); }; } if(settings.requiresAuth || (!useAjax && !isLuminateOnlineAndSameProtocol && !luminateExtend.global.sessionCookie)) { luminateExtend.api.getAuth({ callback: doRequest, useHTTPS: settings.useHTTPS }); } else { doRequest(); } } }; luminateExtend.api.request = function(requests) { /* check for single requests */ if(!$.isArray(requests)) { sendRequest(requests); } else { requests.reverse(); var asyncRequests = []; /* check for synchronous requests */ $.each(requests, function(requestInverseIndex) { var requestSettings = $.extend({ async: true }, this); if(!requestSettings.async && requestInverseIndex != requests.length - 1) { var prevRequest = requests[requestInverseIndex + 1]; if(prevRequest.callback && typeof prevRequest.callback != 'function') { var oCallbackSuccess = prevRequest.callback.success || $.noop; prevRequest.callback.success = function(response) { oCallbackSuccess(response); sendRequest(requestSettings); }; } else { var prevRequest = requests[requestInverseIndex + 1], oCallbackFn = prevRequest.callback || $.noop; prevRequest.callback = { success: function(response) { oCallbackFn(response); sendRequest(requestSettings); }, error: function(response) { oCallbackFn(response); } }; } } else { asyncRequests.push(requestSettings); } }); /* make each asynchronous request */ asyncRequests.reverse(); $.each(asyncRequests, function() { sendRequest(this); }); } }; /* session variables */ luminateExtend.sessionVars = { set: function(varName, varValue, callback) { var pingOptions = {}; if(callback) { pingOptions.callback = callback; } if(varName) { pingOptions.data = 's_' + varName + '=' + (varValue || ''); luminateExtend.utils.ping(pingOptions); } } }; /* luminate tags */ luminateExtend.tags = function(tagTypes, selector) { /* make luminateExtend.tags an alias for the parse method if called directly */ luminateExtend.tags.parse(tagTypes, selector); }; luminateExtend.tags.parse = function(tagTypes, selector) { /* use the widgets plugin if available */ if(luminateExtend.widgets) { luminateExtend.widgets(tagTypes, selector); } else { if(!tagTypes || tagTypes == 'all') { tagTypes = ['cons']; } else { tagTypes = luminateExtend.utils.ensureArray(tagTypes); } selector = selector || 'body'; $.each(tagTypes, function(i, tagType) { if(tagType == 'cons') { var $consTags = $(selector).find(document.getElementsByTagName('luminate:cons')); if($consTags.length > 0) { var parseConsTags = function(data) { $consTags.each(function() { if(data.getConsResponse) { $(this).replaceWith(luminateExtend.utils.stringToObj($(this).attr('field'), data.getConsResponse)); } else { $(this).remove(); } }); }; luminateExtend.api.request({ api: 'cons', callback: parseConsTags, data: 'method=getUser', requestType: 'POST', requiresAuth: true }); } } }); } }; /* public helper functions */ luminateExtend.utils = { /* ensure an object is an array so it may be iterated over, i.e. using $.each(), as the API uses an array if there are 2 or more instances of an object, but does not use an array if there is exactly 1 (E-47741) */ ensureArray: function(pArray) { if($.isArray(pArray)) { return pArray; } else if(pArray) { return [pArray]; } else { return []; } }, stringToObj: function(str, obj) { var objReturn = obj || window; if(str) { var strParts = str.split('.'); for(var i = 0; i < strParts.length; i++) { if(i < (strParts.length - 1) && !objReturn[strParts[i]]) { return {}; } objReturn = objReturn[strParts[i]]; } } return objReturn; }, ping: function(options) { var settings = $.extend({ data: null }, options || {}); var pingImgId = 'luminatePing' + new Date().getTime(); $('body').append('<img style="position: absolute; left: -999em; top: 0;" ' + 'id="' + pingImgId + '" />'); $('#' + pingImgId).bind('load', function() { $(this).remove(); if(settings.callback) { settings.callback(); } }); $('#' + pingImgId).attr('src', ((window.location.protocol == 'https:') ? luminateExtend.global.path.secure : luminateExtend.global.path.nonsecure) + 'EstablishSession?' + ((settings.data == null) ? '' : (settings.data + '&')) + 'NEXTURL=' + encodeURIComponent(((window.location.protocol == 'https:') ? luminateExtend.global.path.secure : luminateExtend.global.path.nonsecure) + 'PixelServer')); }, simpleDateFormat: function(unformattedDate, pattern, locale) { locale = locale || luminateExtend.global.locale; locale = validateLocale(locale); pattern = pattern || (($.inArray(locale, ['en_CA', 'fr_CA', 'en_GB', 'en_AU']) >= 0) ? 'd/M/yy' : 'M/d/yy'); unformattedDate = unformattedDate || new Date(); if(!(unformattedDate instanceof Date)) { var unformattedDateParts = unformattedDate.split('T')[0].split('-'), unformattedDateTimeParts = (unformattedDate.split('T').length > 1) ? unformattedDate.split('T')[1] .split('.')[0] .split('Z')[0] .split('-')[0] .split(':') : ['00', '00', '00']; unformattedDate = new Date(unformattedDateParts[0], (unformattedDateParts[1] - 1), unformattedDateParts[2], unformattedDateTimeParts[0], unformattedDateTimeParts[1], unformattedDateTimeParts[2]); } var oneDigitNumber = function(num) { num = '' + num; return (num.indexOf('0') == 0 && num != '0') ? num.substring(1) : num; }, twoDigitNumber = function(num) { num = Number(num); return (isNaN(num)) ? '00' : (((num < 10) ? '0' : '') + num); }, dateParts = { month: twoDigitNumber(unformattedDate.getMonth() + 1), date: twoDigitNumber(unformattedDate.getDate()), year: twoDigitNumber(unformattedDate.getFullYear()), day: unformattedDate.getDay(), hour24: unformattedDate.getHours(), hour12: unformattedDate.getHours(), minutes: twoDigitNumber(unformattedDate.getMinutes()), ampm: 'AM' }; if(dateParts.hour24 > 11) { dateParts.ampm = 'PM'; } dateParts.hour24 = twoDigitNumber(dateParts.hour24); if(dateParts.hour12 == 0) { dateParts.hour12 = 12; } if(dateParts.hour12 > 12) { dateParts.hour12 = dateParts.hour12 - 12; } dateParts.hour12 = twoDigitNumber(dateParts.hour12); var formattedDate, patternReplace = function(patternPart) { var patternPartFormatted = patternPart.replace(/yy+(?=y)/g, 'yy') .replace(/MMM+(?=M)/g, 'MMM') .replace(/d+(?=d)/g, 'd') .replace(/EEE+(?=E)/g, 'EEE') .replace(/a+(?=a)/g, '') .replace(/k+(?=k)/g, 'k') .replace(/h+(?=h)/g, 'h') .replace(/m+(?=m)/g, 'm'), formattedPart = patternPartFormatted.replace(/yyy/g, dateParts.year) .replace(/yy/g, dateParts.year.substring(2)) .replace(/y/g, dateParts.year) .replace(/dd/g, dateParts.date) .replace(/d/g, oneDigitNumber(dateParts.date)), adjustTimePattern = function(timeParts, timePatternPart, operator) { for(var i = 1; i < timeParts.length; i++) { if(!isNaN(timeParts[i].substring(0, 1))) { var timePartOperand = timeParts[i].substring(0, 2); timeParts[i] = timeParts[i].substring(2); if(isNaN(timePartOperand.substring(1))) { timeParts[i] = timePartOperand.substring(1) + timeParts[i]; timePartOperand = timePartOperand.substring(0, 1); } timePartOperand = Number(timePartOperand); if(timePartOperand > 23) { timePartOperand = 23; } var timePartResult = (operator == '+') ? timePartOperand : (0 - timePartOperand); if(timePatternPart == 'kk' || timePatternPart == 'k') { timePartResult = (Number(dateParts.hour24) + timePartResult); if(timePartResult > 24) { timePartResult = timePartResult - 24; } else if(timePartResult < 0) { timePartResult = timePartResult + 24; } } else { timePartResult = (Number(dateParts.hour12) + timePartResult); if(timePartResult > 24) { timePartResult = timePartResult - 24; } else if(timePartResult < 0) { timePartResult = timePartResult + 24; } if(timePartResult > 12) { timePartResult = timePartResult - 12; } } timePartResult = '' + timePartResult; if(timePatternPart == 'kk' || timePatternPart == 'hh') { timePartResult = twoDigitNumber(timePartResult); } if((timePatternPart == 'h' && timePartResult == 0) || (timePatternPart == 'hh' && timePartResult == '00')) { timePartResult = '12'; } timeParts[i] = timePartResult + timeParts[i]; } } return timeParts.join(''); }; if(formattedPart.indexOf('k+') != -1) { formattedPart = adjustTimePattern(formattedPart.split('kk+'), 'kk', '+'); formattedPart = adjustTimePattern(formattedPart.split('k+'), 'k', '+'); } if(formattedPart.indexOf('k-') != -1) { formattedPart = adjustTimePattern(formattedPart.split('kk-'), 'kk', '-'); formattedPart = adjustTimePattern(formattedPart.split('k-'), 'k', '-'); } formattedPart = formattedPart.replace(/kk/g, dateParts.hour24) .replace(/k/g, oneDigitNumber(dateParts.hour24)); if(formattedPart.indexOf('h+') != -1) { formattedPart = adjustTimePattern(formattedPart.split('hh+'), 'hh', '+'); formattedPart = adjustTimePattern(formattedPart.split('h+'), 'h', '+'); } if(formattedPart.indexOf('h-') != -1) { formattedPart = adjustTimePattern(formattedPart.split('hh-'), 'hh', '-'); formattedPart = adjustTimePattern(formattedPart.split('h-'), 'h', '-'); } formattedPart = formattedPart.replace(/hh/g, ((dateParts.hour12 < 12 && dateParts.hour12.indexOf && dateParts.hour12.indexOf('0') != 0) ? ('0' + dateParts.hour12) : dateParts.hour12)) .replace(/h/g, oneDigitNumber(dateParts.hour12)); formattedPart = formattedPart.replace(/mm/g, dateParts.minutes) .replace(/m/g, oneDigitNumber(dateParts.minutes)); formattedPart = formattedPart.replace(/a/g, 'A'); var formattedMonthNames = ['January', 'February', 'march', 'april', 'may', 'June', 'July', 'august', 'September', 'October', 'November', 'December']; if(locale == 'es_US') { formattedMonthNames = ['enero', 'febrero', 'marzo', 'abril', 'mayo', 'junio', 'julio', 'agosto', 'septiembre', 'octubre', 'noviembre', 'diciembre']; } if(locale == 'fr_CA') { formattedMonthNames = ['janvier', 'f&' + '#233;vrier', 'mars', 'avril', 'mai', 'juin', 'juillet', 'ao&' + '#251;t', 'septembre', 'octobre', 'novembre', 'd&' + '#233;cembre']; } formattedPart = formattedPart.replace(/MMMM/g, formattedMonthNames[Number(dateParts.month) - 1]) .replace(/MMM/g, formattedMonthNames[Number(dateParts.month) - 1] .substring(0, 3)) .replace(/MM/g, dateParts.month) .replace(/M/g, oneDigitNumber(dateParts.month)) .replace(/march/g, 'March') .replace(/may/g, 'May') .replace(/Mayo/g, 'mayo'); var formattedDayNames = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday']; if(locale == 'es_US') { formattedDayNames = ['domingo', 'lunes', 'martes', 'mi&' + 'eacute;rcoles', 'jueves', 'viernes', 's&' + 'aacute;bado']; } if(locale == 'fr_CA') { formattedDayNames = ['dimanche', 'lundi', 'mardi', 'mercredi', 'jeudi', 'vendredi', 'samedi']; } formattedPart = formattedPart.replace(/EEEE/g, formattedDayNames[dateParts.day]) .replace(/EEE/g, formattedDayNames[dateParts.day].substring(0, 3)) .replace(/EE/g, formattedDayNames[dateParts.day].substring(0, 3)) .replace(/E/g, formattedDayNames[dateParts.day].substring(0, 3)); formattedPart = formattedPart.replace(/A/g, dateParts.ampm) .replace(/april/g, 'April') .replace(/august/g, 'August'); return formattedPart; }; if(pattern.indexOf('\'') == -1) { formattedDate = patternReplace(pattern); } else { var formatPatternParts = pattern.replace(/\'+(?=\')/g, '\'\'').split('\'\''); if(formatPatternParts.length == 1) { formatPatternParts = pattern.split('\''); for(var i = 0; i < formatPatternParts.length; i++) { if(i % 2 == 0) { formatPatternParts[i] = patternReplace(formatPatternParts[i]); } } return formatPatternParts.join(''); } else { for(var i = 0; i < formatPatternParts.length; i++) { var formatPatternParts2 = formatPatternParts[i].split('\''); for(var j = 0; j < formatPatternParts2.length; j++) { if(j % 2 == 0) { formatPatternParts2[j] = patternReplace(formatPatternParts2[j]); } } formatPatternParts[i] = formatPatternParts2.join(''); } return formatPatternParts.join('\''); } } return formattedDate; } }; })(typeof jQuery === 'undefined' && typeof Zepto === 'function' ? Zepto : jQuery);
from adafruit_blinka.microcontroller.sama5 import pin PD23 = pin.PD23 AD4 = pin.PD23 PD21 = pin.PD21 AD2 = pin.PD21 PD20 = pin.PD20 AD1 = pin.PD20 PD24 = pin.PD24 AD5 = pin.PD24 PD22 = pin.PD22 AD3 = pin.PD22 PD19 = pin.PD19 AD0 = pin.PD19 PA14 = pin.PA14 SCK = pin.PA14 SCLK = pin.PA14 PA15 = pin.PA15 MOSI = pin.PA15 PA16 = pin.PA16 MISO = pin.PA16 PD2 = pin.PD2 RX = pin.PD2 PD3 = pin.PD3 TX = pin.PD3 PD13 = pin.PD13 PD31 = pin.PD31 PB0 = pin.PB0 PWM1 = pin.PB0 PB7 = pin.PB7 PWM3 = pin.PB7 PB1 = pin.PB1 PWML1 = pin.PB1 PB5 = pin.PB5 PWM2 = pin.PB5 PB3 = pin.PB3 PC0 = pin.PC0 SCL = pin.PC0 PB31 = pin.PB31 SDA = pin.PB31
/** * Created by cRazy on 2016/12/27. */ Ext.define('Cxt.window.Ratio', { extend: 'Ext.window.Window', xtype: 'ratiowindow', requires: [ 'Cxt.util.TaxCalculator', 'Ext.button.Button', 'Ext.data.Store', 'Ext.form.field.ComboBox', 'Ext.form.field.Number', 'Ext.grid.Panel', 'Ext.grid.plugin.CellEditing', 'Ext.toolbar.Fill', 'Ext.util.Format', 'Ext.window.Window' ], modal: true, bodyPadding: 5, width: 550, messageDock: 'top', constrainPosition: 'center', viewModel: { formulas: { clazz: { get: function (get) { return get('ratio.@class'); }, set: function (value) { this.set('ratio.@class', value); } }, isNormal: function (get) { return get('ratio.@class') == 'com.hd123.m3.commons.biz.ratio.NormalRatio'; }, normalRateReadOnly: function (get) { return get('taxInclusive') != true || get('editable') != true; }, normalRateWithoutTax: function (get) { return get('ratio.@class') == 'com.hd123.m3.commons.biz.ratio.NormalRatio' && get('taxInclusive') == false; } } }, allowBlank: false, editable: true, /** * 设置后,ratioType唯一,不允许操作者修改 */ ratioTypeOnly: false, /** * 设置ratioType */ ratioType: false, /** * 是否含税,当设置为false时,为不含税。根据去税计算含税 */ taxInclusive: true, config: { /** * 小数位数 */ scale: 4, /** * @param roundingMode * 舍入处理 */ roundingMode: Ext.ROUND_HALF_UP, /** * 税率,自动计算使用。 */ taxRate: { rate: 0 } }, initComponent: function () { var me = this; Ext.apply(me, { items: [{ xtype: 'combobox', itemId: 'ratioType', width: '80%', queryMode: 'local', fieldLabel: '提成方式', labelAlign: 'right', editable: false, valueField: '@class', hidden: me.ratioTypeOnly, readOnly: !me.editable, bind: { value: '{clazz}' }, store: { type: 'store', data: [{ '@class': 'com.hd123.m3.commons.biz.ratio.NormalRatio', text: '固定比例' }, { '@class': 'com.hd123.m3.commons.biz.ratio.PieceRatio', text: '分段比例' }, { '@class': 'com.hd123.m3.commons.biz.ratio.AbovePieceRatio', text: '超额分段比例' }] }, listeners: { change: function (field, value) { if (value != me.getViewModel().get('ratio.@class')) { me.setValue({ '@class': value }) } } } }, { xtype: 'numberfield', width: '80%', fieldLabel: '提成率(去税)', suffix: Ext.util.Format.percentSign, decimalPrecision: me.scale, labelAlign: 'right', allowBlank: me.allowBlank, readOnly: !me.editable, maxValue: 1, minValue: 0, hidden: true, bind: { value: '{ratio.rateWithoutTax}', hidden: '{!normalRateWithoutTax}' }, listeners: { change: function (field, value) { if (value > 0) { if (me.getViewModel().get('normalRateWithoutTax')) { me.getViewModel().set('ratio.rate', Cxt.util.TaxCalculator.total(value, me.taxRate, me.scale, me.roundingMode)); } } } } }, { xtype: 'numberfield', width: '80%', fieldLabel: '提成率', suffix: Ext.util.Format.percentSign, decimalPrecision: me.scale, allowBlank: me.allowBlank, readOnly: !me.editable, labelAlign: 'right', maxValue: 1, minValue: 0, bind: { value: '{ratio.rate}', hidden: '{!isNormal}', readOnly: '{normalRateReadOnly}' }, listeners: { change: function (field, value) { if (value > 0) { me.getViewModel().set('ratio.rate', Cxt.util.TaxCalculator.amount(value, me.taxRate, me.scale, me.roundingMode)) } } } }, { xtype: 'grid', itemId: 'pieceratio', width: '100%', maxHeight: 400, autoAppend: false, columnLines: false, sortableColumns: false, store: { type: 'store' }, bind: { store: {data: '{ratio.lines}'}, hidden: '{isNormal}' }, plugins: { ptype: 'cellediting', clicksToEdit: 1, listeners: { beforeedit: Ext.bind(me.onBeforeEdit, me), edit: Ext.bind(me.onCellEdit, me) } }, listeners: { cellClick: Ext.bind(me.onCellClick, me) }, columns: [{ dataIndex: 'low', text: '金额下限', flex: 1, align: 'right', renderer: Ext.util.Format.numberRenderer(',#.00'), editor: { xtype: 'numberfield', allowBlank: false, fieldStyle: 'text-align:right' } }, { dataIndex: 'high', text: '金额上限(含)', flex: 1, align: 'right', allowBlank: false, renderer: function (v, metaData, record, rowIndex, colIndex, store) { if (rowIndex == store.getData().length - 1) { return '∞'; } return Ext.util.Format.number(v, ',#.00'); }, editor: { xtype: 'numberfield', allowBlank: false, maxValue: 99999999999.99, fieldStyle: 'text-align:right' } }, { dataIndex: 'rateWithoutTax', text: '提成率(去税)', flex: 1, align: 'right', allowBlank: me.allowBlank, renderer: Ext.util.Format.percentRenderer('#.######'), bind: { hidden: '{taxInclusive}' }, editor: { xtype: 'numberfield', suffix: Ext.util.Format.percentSign, decimalPrecision: me.scale, allowBlank: me.allowBlank, fieldStyle: 'text-align:right', maxValue: 1, minValue: 0 } }, { dataIndex: 'rate', text: '提成率', flex: 1, align: 'right', allowBlank: me.allowBlank, renderer: Ext.util.Format.percentRenderer('#.######'), editor: { xtype: 'numberfield', suffix: Ext.util.Format.percentSign, decimalPrecision: me.scale, allowBlank: me.allowBlank, fieldStyle: 'text-align:right', maxValue: 1, minValue: 0 } }, { dataIndex: 'row', text: '操作', width: 100, tdCls: 'x-grid-operate-cell', hidden: !me.editable, bind: { hidden: '{term.formula.byYear}' }, renderer: function (v, metaData, record, rowIndex) { var details = me.getViewModel().get('ratio.lines'), maxValue = 99999999999 - details.length + rowIndex + 2, removable = !(me.authorized === false || rowIndex == 0 || rowIndex == details.length - 1) ? '' : 'disabled', appendable = !(me.authorized === false || rowIndex == details.length - 1 || (record.get('high') >= maxValue)) ? '' : 'disabled'; return '<button ' + removable + (rowIndex == 0 || rowIndex == details.length - 1 ? ' style="color: transparent;"' : '') + ' class="fa fa-minus-circle removeItem"/><button ' + appendable + ' style="margin-left: 15px;' + (rowIndex == details.length - 1 ? 'color: transparent;' : '') + '" class="fa fa-plus-circle appendItem"/>' } }] }], dockedItems: [{ xtype: 'toolbar', dock: 'bottom', items: ['->', { xtype: 'button', text: '确定', ui: 'primary', width: 80, handler: function () { me.doConfirm(); } }, { xtype: 'button', text: '取消', width: 80, hidden: !me.editable, handler: function () { me.close(); } }, '->'] }] }); me.callParent(arguments); }, afterRender: function () { var me = this; me.callParent(arguments); me.getViewModel().set('editable', !!me.editable); me.setTaxInclusive(me.taxInclusive); if (me.ratioType) { me.setRatioType(me.ratioType); } if (me.value) { me.setValue(me.value); } }, setValue: function (value) { var me = this; value = Ext.clone(Ext.valueFrom(value, {}));// 作为一个设置对话框,存在取消的风险,这边仅对value的备份进行后续的修改。 Ext.applyIf(value, { '@class': 'com.hd123.m3.commons.biz.ratio.NormalRatio', rate: 0, rateWithoutTax: 0 }); if (value['@class'] != 'com.hd123.m3.commons.biz.ratio.NormalRatio') { value.lines = Ext.Array.from(value.lines); if (value.lines.length == 0) { value.lines.push({ low: 0, high: 1, rate: 0, rateWithoutTax: 0 }); value.lines.push({ low: 1, high: 99999999999.99, rate: 0, rateWithoutTax: 0 }); } } me.value = value; if (!me.rendered) return; me.getViewModel().set('ratio', value); }, getValue: function () { var me = this; return me.getViewModel().get('ratio'); }, setTaxInclusive: function (taxInclusive) { var me = this; me.getViewModel().set('taxInclusive', taxInclusive); }, setRatioType: function (ratioType) { var me = this; me.setValue({ '@class': ratioType }) }, doConfirm: function () { var me = this; if (me.isValid() == false) { me.messagePanel.setMessages(me.getErrors()); return; } me.fireEvent('change', me, me.getValue()); me.close(); }, onBeforeEdit: function (editor, context) { var me = this, field = context.field, rowIdx = context.rowIdx, cellEditor = context.column.getEditor(), record = context.record, details = me.getViewModel().get('ratio.lines'); if (!me.editable) return context.cancel = true; if (field == 'rate' && !me.getViewModel().get('taxInclusive')) { return context.cancel = true; } else if (field == 'low') { return context.cancel = true; } else if (field == 'high') { if (rowIdx == details.length - 1) { return context.cancel = true; } cellEditor.setMinValue(record.get('low'), false); } }, onCellEdit: function (editor, context) { var me = this, record = context.record, grid = context.grid, field = context.field, cellEditor = context.column.getEditor(), details = me.getViewModel().get('ratio.lines'); if (field == 'high') { if (cellEditor.getValue()) { me.calcNextLines(context.rowIdx + 1, details); grid.getStore().load(); } } else if (field == 'rateWithoutTax') { record.set('rate', Cxt.util.TaxCalculator.total(record.get('rateWithoutTax'), me.taxRate, me.scale, me.roundingMode)); if (record.get('rate') > 1) { record.set('rate', 1); } } else if (field == 'rate') { record.set('rateWithoutTax', Cxt.util.TaxCalculator.amount(record.get('rate'), me.taxRate, me.scale, me.roundingMode)); } }, onCellClick: function (grid, td, cellIndex, record, tr, rowIndex, e) { var me = this, details = me.getViewModel().get('ratio.lines'), appendButton = e.getTarget('.appendItem'), removeButton = e.getTarget('.removeItem'); if (!appendButton && !removeButton) {// 判断是否有效点击 return; } if (details.length - 1 == rowIndex) { return; } if (Ext.isEmpty(appendButton) == false) { //上一行的金额上限 var rowIndexHigh = details[rowIndex].high; if (Ext.isEmpty(rowIndexHigh) || rowIndexHigh >= 99999999998.99) { return; } var appendObj = [{ rate: 0, rateWithoutTax: 0, low: rowIndexHigh, high: rowIndexHigh + 1 }]; Ext.Array.insert(details, rowIndex + 1, appendObj); me.calcNextLines(rowIndex + 2, details); } else if (Ext.isEmpty(removeButton) == false) { if (0 == rowIndex) { return; } if (details[rowIndex + 1] && details[rowIndex - 1]) details[rowIndex + 1].low = details[rowIndex - 1].high; Ext.Array.remove(details, details[rowIndex]); } grid.getStore().load(); }, calcNextLines: function (idx, details) { var index = idx; while (index < details.length) { details[index].low = details[index - 1].high; if (details[index].high >= details[index - 1].high + 1) { break; } details[index].high = details[index - 1].high + 1; index++; } index = details.length - 2; while (index > 0) { if (details[index].high > 99999999999.99) { if (!Ext.isEmpty(details[index + 1])) details[index + 1].low = details[index].low; Ext.Array.remove(details, details[index]); } else { break; } index--; } } });
<!-- MAIN CONTENT --> <div class="container"> <div class="row"> <div class="col-md-offset-3 col-md-6"> <h1><%= title %></h1> <form class="form" method="post"> <label for="comment">Why would you like to block auditor <%= auditor %> ?</label> <textarea name="comment" class="form-control" rows="5" required></textarea> <br> <button type="submit" class="btn btn-primary"><i class="fa fa-check-square-o"></i> Block auditor</button> </form> </div> </div> </div>
# -*- coding: utf-8 -*- """ Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix) Copyright (C) 2018 Caphm (original implementation module) Helper functions for setting infolabels of list items SPDX-License-Identifier: MIT See LICENSES/MIT.md for more information. """ from __future__ import absolute_import, division, unicode_literals import copy import re from future.utils import iteritems, itervalues import resources.lib.api.paths as paths import resources.lib.api.shakti as api import resources.lib.cache as cache import resources.lib.common as common import resources.lib.kodi.library as library from resources.lib.globals import g try: # Python 2 unicode except NameError: # Python 3 unicode = str # pylint: disable=redefined-builtin QUALITIES = [ {'codec': 'h264', 'width': '960', 'height': '540'}, {'codec': 'h264', 'width': '1920', 'height': '1080'}, {'codec': 'h265', 'width': '3840', 'height': '2160'} ] JSONRPC_MAPPINGS = { 'showtitle': 'tvshowtitle', 'userrating': 'rating' } def add_info(videoid, list_item, item, raw_data, set_info=False): """Add infolabels to the list_item. The passed in list_item is modified in place and the infolabels are returned.""" # pylint: disable=too-many-locals cache_identifier = unicode(videoid) + '_' + g.LOCAL_DB.get_profile_config('language', '') try: cache_entry = g.CACHE.get(cache.CACHE_INFOLABELS, cache_identifier) infos = cache_entry['infos'] quality_infos = cache_entry['quality_infos'] except cache.CacheMiss: infos, quality_infos = parse_info(videoid, item, raw_data) g.CACHE.add(cache.CACHE_INFOLABELS, cache_identifier, {'infos': infos, 'quality_infos': quality_infos}, ttl=g.CACHE_METADATA_TTL, to_disk=True) # Use a deepcopy of dict to not reflect future changes to the dictionary also to the cache infos_copy = copy.deepcopy(infos) if videoid.mediatype == common.VideoId.EPISODE or \ videoid.mediatype == common.VideoId.MOVIE or \ videoid.mediatype == common.VideoId.SUPPLEMENTAL: list_item.setProperty('isFolder', 'false') list_item.setProperty('IsPlayable', 'true') else: list_item.setProperty('isFolder', 'true') for stream_type, quality_infos in iteritems(quality_infos): list_item.addStreamInfo(stream_type, quality_infos) if item.get('dpSupplementalMessage'): # Short information about future release of tv show season or other infos_copy['plot'] += ' [COLOR green]{}[/COLOR]'.format(item['dpSupplementalMessage']) if set_info: list_item.setInfo('video', infos_copy) return infos_copy def add_art(videoid, list_item, item, raw_data=None): """Add art infolabels to list_item""" try: art = g.CACHE.get(cache.CACHE_ARTINFO, videoid) except cache.CacheMiss: art = parse_art(videoid, item, raw_data) g.CACHE.add(cache.CACHE_ARTINFO, videoid, art, ttl=g.CACHE_METADATA_TTL, to_disk=True) list_item.setArt(art) return art @common.time_execution(immediate=False) def add_info_for_playback(videoid, list_item): """Retrieve infolabels and art info and add them to the list_item""" try: return add_info_from_library(videoid, list_item) except library.ItemNotFound: common.debug('Can not get infolabels from the library, submit a request to netflix') return add_info_from_netflix(videoid, list_item) def parse_info(videoid, item, raw_data): """Parse info from a path request response into Kodi infolabels""" if (videoid.mediatype == common.VideoId.UNSPECIFIED and hasattr(item, 'contained_titles')): # Special handling for VideoLists return { 'plot': common.get_local_string(30087).format( ', '.join(item.contained_titles)) if item.contained_titles else common.get_local_string(30111) }, {} infos = {'mediatype': ('tvshow' if videoid.mediatype == common.VideoId.SHOW or videoid.mediatype == common.VideoId.SUPPLEMENTAL else videoid.mediatype)} if videoid.mediatype in common.VideoId.TV_TYPES: infos['tvshowtitle'] = raw_data['videos'][videoid.tvshowid]['title'] if item.get('watched', False): infos['playcount'] = 1 infos.update(parse_atomic_infos(item)) infos.update(parse_referenced_infos(item, raw_data)) infos.update(parse_tags(item)) return infos, get_quality_infos(item) def parse_atomic_infos(item): """Parse those infos into infolabels that are directly accesible from the item dict""" return {target: _get_and_transform(source, target, item) for target, source in iteritems(paths.INFO_MAPPINGS)} def _get_and_transform(source, target, item): """Get the value for source and transform it if neccessary""" value = common.get_path_safe(source, item) if isinstance(value, dict) or value is None: return '' return (paths.INFO_TRANSFORMATIONS[target](value) if target in paths.INFO_TRANSFORMATIONS else value) def parse_referenced_infos(item, raw_data): """Parse those infos into infolabels that need their references resolved within the raw data""" return {target: [person['name'] for _, person in paths.resolve_refs(item.get(source, {}), raw_data)] for target, source in iteritems(paths.REFERENCE_MAPPINGS)} def parse_tags(item): """Parse the tags""" return {'tag': [tagdef['name'] for tagdef in itervalues(item.get('tags', {})) if isinstance(tagdef.get('name', {}), unicode)]} def get_quality_infos(item): """Return audio and video quality infolabels""" quality_infos = {} delivery = item.get('delivery') if delivery: quality_infos['video'] = QUALITIES[ min((delivery.get('hasUltraHD', False) << 1 | delivery.get('hasHD')), 2)] quality_infos['audio'] = { 'channels': 2 + 4 * delivery.get('has51Audio', False)} if g.ADDON.getSettingBool('enable_dolby_sound'): if delivery.get('hasDolbyAtmos', False): quality_infos['audio']['codec'] = 'truehd' else: quality_infos['audio']['codec'] = 'eac3' else: quality_infos['audio']['codec'] = 'aac' return quality_infos def parse_art(videoid, item, raw_data): # pylint: disable=unused-argument """Parse art info from a path request response to Kodi art infolabels""" boxarts = common.get_multiple_paths( paths.ART_PARTIAL_PATHS[0] + ['url'], item) interesting_moment = common.get_multiple_paths( paths.ART_PARTIAL_PATHS[1] + ['url'], item, {}).get(paths.ART_SIZE_FHD) clearlogo = common.get_path_safe( paths.ART_PARTIAL_PATHS[3] + ['url'], item) fanart = common.get_path_safe( paths.ART_PARTIAL_PATHS[4] + [0, 'url'], item) return assign_art(videoid, boxarts[paths.ART_SIZE_FHD], boxarts[paths.ART_SIZE_SD], boxarts[paths.ART_SIZE_POSTER], interesting_moment, clearlogo, fanart) def assign_art(videoid, boxart_large, boxart_small, poster, interesting_moment, clearlogo, fanart): """Assign the art available from Netflix to appropriate Kodi art""" # pylint: disable=too-many-arguments art = {'poster': _best_art([poster]), 'fanart': _best_art([fanart, interesting_moment, boxart_large, boxart_small]), 'thumb': ((interesting_moment if videoid.mediatype == common.VideoId.EPISODE or videoid.mediatype == common.VideoId.SUPPLEMENTAL else '') or boxart_large or boxart_small)} art['landscape'] = art['thumb'] if videoid.mediatype != common.VideoId.UNSPECIFIED: art['clearlogo'] = _best_art([clearlogo]) return art def _best_art(arts): """Return the best art (determined by list order of arts) or an empty string if none is available""" return next((art for art in arts if art), '') def add_info_from_netflix(videoid, list_item): """Apply infolabels with info from Netflix API""" try: infos = add_info(videoid, list_item, None, None, True) art = add_art(videoid, list_item, None) common.debug('Got infolabels and art from cache') except (AttributeError, TypeError): common.debug('Infolabels or art were not in cache, retrieving from API') api_data = api.single_info(videoid) infos = add_info(videoid, list_item, api_data['videos'][videoid.value], api_data, True) art = add_art(videoid, list_item, api_data['videos'][videoid.value]) return infos, art def add_info_from_library(videoid, list_item): """Apply infolabels with info from Kodi library""" details = library.get_item(videoid) common.debug('Got file info from library: {}'.format(details)) art = details.pop('art', {}) # Resuming for strm files in library is currently broken in all kodi versions # keeping this for reference / in hopes this will get fixed resume = details.pop('resume', {}) # if resume: # start_percent = resume['position'] / resume['total'] * 100.0 # list_item.setProperty('startPercent', str(start_percent)) infos = { 'DBID': details.pop('{}id'.format(videoid.mediatype)), 'mediatype': videoid.mediatype } # WARNING!! Remove unsupported ListItem.setInfo keys from 'details' by using _sanitize_infos # reference to Kodi ListItem.cpp _sanitize_infos(details) infos.update(details) list_item.setInfo('video', infos) list_item.setArt(art) # Workaround for resuming strm files from library infos['resume'] = resume return infos, art def _sanitize_infos(details): for source, target in iteritems(JSONRPC_MAPPINGS): if source in details: details[target] = details.pop(source) for prop in ['file', 'label', 'runtime']: details.pop(prop, None) def add_highlighted_title(list_item, videoid, infos): """Highlight menu item title when the videoid is contained in my-list""" highlight_index = g.ADDON.getSettingInt('highlight_mylist_titles') if not highlight_index: return highlight_color = ['black', 'blue', 'red', 'green', 'white', 'yellow'][highlight_index] remove_color = videoid not in api.mylist_items() if list_item.getProperty('isFolder') == 'true': updated_title = _colorize_title(g.py2_decode(list_item.getVideoInfoTag().getTitle()), highlight_color, remove_color) list_item.setLabel(updated_title) infos['title'] = updated_title else: # When menu item is not a folder 'label' is replaced by 'title' property of infoLabel infos['title'] = _colorize_title(infos['title'], highlight_color, remove_color) def _colorize_title(text, color, remove_color=False): matches = re.match(r'(\[COLOR\s.+\])(.*)(\[/COLOR\])', text) if remove_color: if matches: return matches.groups()[1] else: if not matches: return '[COLOR {}]{}[/COLOR]'.format(color, text) return text
import random import numpy as np from gym import utils from gym.envs.mujoco import mujoco_env class HalfCheetahFANEnv(mujoco_env.MujocoEnv, utils.EzPickle): def __init__(self): self.reward_direction = 1 # True mujoco_env.MujocoEnv.__init__(self, 'half_cheetah.xml', 5) utils.EzPickle.__init__(self) def step(self, action): xposbefore = self.sim.data.qpos[0] self.do_simulation(action, self.frame_skip) xposafter = self.sim.data.qpos[0] ob = self._get_obs() reward_ctrl = - 0.1 * np.square(action).sum() # NOTE: FAN change; modify by reward direction reward_run = self.reward_direction*(xposafter - xposbefore)/self.dt reward = reward_ctrl + reward_run done = False return ob, reward, done, dict(reward_run=reward_run, reward_ctrl=reward_ctrl) def _get_obs(self): return np.concatenate([ self.sim.data.qpos.flat[1:], self.sim.data.qvel.flat, ]) def reset_model(self): # NOTE: Set reward direction randomly self.reward_direction = random.choice([-1, 1]) qpos = self.init_qpos + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq) qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1 self.set_state(qpos, qvel) return self._get_obs() def viewer_setup(self): self.viewer.cam.distance = self.model.stat.extent * 0.5
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest import retworkx import numpy class TestDispatchPyGraph(unittest.TestCase): class_type = "PyGraph" def setUp(self): super().setUp() if self.class_type == "PyGraph": self.graph = retworkx.undirected_gnp_random_graph(10, 0.5, seed=42) else: self.graph = retworkx.directed_gnp_random_graph(10, 0.5, seed=42) def test_distance_matrix(self): res = retworkx.distance_matrix(self.graph) self.assertIsInstance(res, numpy.ndarray) def test_distance_matrix_as_undirected(self): if self.class_type == "PyGraph": with self.assertRaises(TypeError): retworkx.distance_matrix(self.graph, as_undirected=True) else: res = retworkx.distance_matrix(self.graph, as_undirected=True) self.assertIsInstance(res, numpy.ndarray) def test_adjacency_matrix(self): res = retworkx.adjacency_matrix(self.graph) self.assertIsInstance(res, numpy.ndarray) def test_all_simple_paths(self): res = retworkx.all_simple_paths(self.graph, 0, 1) self.assertIsInstance(res, list) def test_floyd_warshall(self): res = retworkx.floyd_warshall(self.graph) self.assertIsInstance(res, retworkx.AllPairsPathLengthMapping) def test_floyd_warshall_numpy(self): res = retworkx.floyd_warshall_numpy(self.graph) self.assertIsInstance(res, numpy.ndarray) if self.class_type == "PyGraph": expected_res = retworkx.graph_floyd_warshall_numpy(self.graph) else: expected_res = retworkx.digraph_floyd_warshall_numpy(self.graph) self.assertTrue(numpy.array_equal(expected_res, res)) def test_astar_shortest_path(self): res = retworkx.astar_shortest_path(self.graph, 0, lambda _: True, lambda _: 1, lambda _: 1) self.assertIsInstance(list(res), list) def test_dijkstra_shortest_paths(self): res = retworkx.dijkstra_shortest_paths(self.graph, 0) self.assertIsInstance(res, retworkx.PathMapping) def test_dijkstra_shortest_path_lengths(self): res = retworkx.dijkstra_shortest_path_lengths(self.graph, 0, lambda _: 1) self.assertIsInstance(res, retworkx.PathLengthMapping) def test_k_shortest_path_lengths(self): res = retworkx.k_shortest_path_lengths(self.graph, 0, 2, lambda _: 1) self.assertIsInstance(res, retworkx.PathLengthMapping) def test_dfs_edges(self): res = retworkx.dfs_edges(self.graph, 0) self.assertIsInstance(list(res), list) def test_all_pairs_dijkstra_shortest_paths(self): res = retworkx.all_pairs_dijkstra_shortest_paths(self.graph, lambda _: 1) self.assertIsInstance(res, retworkx.AllPairsPathMapping) def test_all_pairs_dijkstra_path_lengthss(self): res = retworkx.all_pairs_dijkstra_path_lengths(self.graph, lambda _: 1) self.assertIsInstance(res, retworkx.AllPairsPathLengthMapping) def test_is_isomorphic_nodes_incompatible_raises(self): with self.assertRaises(TypeError): if self.class_type == "PyGraph": retworkx.is_isomorphic(self.graph, retworkx.PyDiGraph()) else: retworkx.is_isomorphic(self.graph, retworkx.PyGraph()) def test_betweenness_centrality(self): res = retworkx.betweenness_centrality(self.graph) self.assertIsInstance(res, retworkx.CentralityMapping) class TestDispatchPyDiGraph(TestDispatchPyGraph): class_type = "PyDiGraph"
load("bf4b12814bc95f34eeb130127d8438ab.js"); load("93fae755edd261212639eed30afa2ca4.js"); // Copyright (c) 2012 Ecma International. All rights reserved. // This code is governed by the BSD license found in the LICENSE file. /*--- es5id: 15.9.5.44-0-1 description: Date.prototype.toJSON must exist as a function ---*/ var f = Date.prototype.toJSON; assert.sameValue(typeof(f), "function", 'typeof(f)');
/*jshint node:true*/ /* global require, module */ var EmberApp = require('ember-cli/lib/broccoli/ember-app'); module.exports = function(defaults) { var app = new EmberApp(defaults, { 'hinting': true // Add options here }); // Use `app.import` to add additional libraries to the generated // output files. // // If you need to use different assets in different // environments, specify an object as the first parameter. That // object's keys should be the environment name and the values // should be the asset to use in that environment. // // If the library that you are including contains AMD or ES6 // modules that you would like to import into your application // please specify an object with the list of modules as keys // along with the exports of each module as its value. app.import('bower_components/qunit/qunit/qunit.js'); app.import('bower_components/ember-qunit/ember-qunit.amd.js'); app.import('bower_components/noty/js/noty/packaged/jquery.noty.packaged.js'); app.import('bower_components/bootstrap/dist/css/bootstrap.css'); return app.toTree(); };
/** * @fileoverview Enforces or disallows inline comments. * @author Greg Cochard */ "use strict"; var astUtils = require("../ast-utils"); //------------------------------------------------------------------------------ // Rule Definition //------------------------------------------------------------------------------ module.exports = { meta: { docs: { description: "disallow inline comments after code", category: "Stylistic Issues", recommended: false }, schema: [] }, create: function(context) { var sourceCode = context.getSourceCode(); /** * Will check that comments are not on lines starting with or ending with code * @param {ASTNode} node The comment node to check * @private * @returns {void} */ function testCodeAroundComment(node) { // Get the whole line and cut it off at the start of the comment var startLine = String(sourceCode.lines[node.loc.start.line - 1]); var endLine = String(sourceCode.lines[node.loc.end.line - 1]); var preamble = startLine.slice(0, node.loc.start.column).trim(); // Also check after the comment var postamble = endLine.slice(node.loc.end.column).trim(); // Check that this comment isn't an ESLint directive var isDirective = astUtils.isDirectiveComment(node); // Should be empty if there was only whitespace around the comment if (!isDirective && (preamble || postamble)) { context.report(node, "Unexpected comment inline with code."); } } //-------------------------------------------------------------------------- // Public //-------------------------------------------------------------------------- return { LineComment: testCodeAroundComment, BlockComment: testCodeAroundComment }; } };
'use strict';(function(e){function q(b){var c=b.charCodeAt(0),a=1114112,d=0,l=b.length|0,f="";switch(c>>>4){case 12:case 13:a=(c&31)<<6|b.charCodeAt(1)&63;d=128>a?0:2;break;case 14:a=(c&15)<<12|(b.charCodeAt(1)&63)<<6|b.charCodeAt(2)&63;d=2048>a?0:3;break;case 15:30===c>>>3&&(a=(c&7)<<18|(b.charCodeAt(1)&63)<<12|(b.charCodeAt(2)&63)<<6|b.charCodeAt(3),d=65536>a?0:4)}d&&(l<d?d=0:65536>a?f=h(a):1114112>a?(a=a-65664|0,f=h((a>>>10)+55296|0,(a&1023)+56320|0)):d=0);for(;d<l;d=d+1|0)f+="\ufffd";return f} function m(){}var h=String.fromCharCode,k={}.toString,n=e.SharedArrayBuffer,r=n?k.call(n):"",g=e.Uint8Array,p=g||Array,t=k.call((g?ArrayBuffer:p).prototype);m.prototype.decode=function(b){var c=b&&b.buffer||b,a=k.call(c);if(a!==t&&a!==r&&void 0!==b)throw TypeError("Failed to execute 'decode' on 'TextDecoder': The provided value is not of type '(ArrayBuffer or ArrayBufferView)'");b=g?new p(c):c;c="";a=0;for(var d=b.length|0;a<d;a=a+32768|0)c+=h.apply(0,b[g?"subarray":"slice"](a,a+32768|0));return c.replace(/[\xc0-\xff][\x80-\xbf]+|[\x80-\xff]/g, q)};e.TextDecoder||(e.TextDecoder=m)})("undefined"==typeof global?"undefined"==typeof self?this:self:global);//AnonyCo
import Env from './env'; let config = { SITE_TITLE: 'DEVOPS系统', env: Env, apiBaseUrl: 'http://devops.we.com/api/v1', LS_KEY_PROFILE: 'ls-profile', LS_KEY_APPID: 'ls-appid' }; export default config;
import React from "react" import { getCurrentLanguageString } from "../../utility/helper" import { connect } from "react-redux" import PropTypes from "prop-types" import { AboutComponentWrapper, AboutPageContent } from "./about.styles" import AboutFunding from "./about-funding" import AboutTeamBlock from "./about-team-block" import AboutAdvisoryBoard from "./about-advisory-board" import AboutOrganisation from "./about-organisation" import { PageTitle } from "../../templates/page.styles"; const AboutComponents = props => { const content = props.content let renderComponent const language = getCurrentLanguageString(props.languages) switch (content.slug) { case "/about": renderComponent = ( <> <PageTitle> {language === "EN" ? "About" : "über"}</PageTitle> <AboutPageContent dangerouslySetInnerHTML={{ __html: content.acf[`${language}_row`].description, }} /> </> ) break case "/de/uber": renderComponent = ( <> <PageTitle> {language === "EN" ? "About" : "über"}</PageTitle> <AboutPageContent dangerouslySetInnerHTML={{ __html: content.acf[`${language}_row`].description, }} /> </> ) break case "/about/advisory-board": renderComponent = ( <AboutAdvisoryBoard team_block={content.acf.team_block} /> ) break case "/de/uber/beirat": renderComponent = ( <AboutAdvisoryBoard team_block={content.acf.team_block} /> ) break case "/about/support": renderComponent = <AboutFunding funding={content.acf.funding} /> break case "/de/uber/unterstutzung": renderComponent = <AboutFunding funding={content.acf.funding} /> break case "/about/team": renderComponent = <AboutTeamBlock team={content.acf.team_block} /> break case "/de/uber/team": renderComponent = <AboutTeamBlock team={content.acf.team_block} /> break case "/about/organization": renderComponent = ( <AboutOrganisation team_block={content.acf.team_block} /> ) break case "/de/uber/verein": renderComponent = ( <AboutOrganisation team_block={content.acf.team_block} /> ) break default: renderComponent = <div></div> } return <AboutComponentWrapper>{renderComponent}</AboutComponentWrapper> } const mapStateToProps = state => { return { languages: state.languages, } } AboutComponents.propTypes = { content: PropTypes.object, } export default connect( mapStateToProps, null )(AboutComponents)
# -*- coding: utf-8 -*- """ Created on Sat Jun 8 13:12:04 2019 @author: DiPu """
/** * @fileoverview Prevent using silent option in functions that cause events * @author Ilya Volodin * @copyright 2014 Ilya Volodin. All rights reserved. */ "use strict"; //------------------------------------------------------------------------------ // Rule Definition //------------------------------------------------------------------------------ var helper = require("../backbone-helper.js"); module.exports = { meta: { docs: { description: "Prevent using silent option in functions that cause events", category: "Best Practices", recommended: false }, schema: [] }, create: function(context) { var settings = context.settings || /* istanbul ignore next */ {}; var backbone = []; //-------------------------------------------------------------------------- // Public //-------------------------------------------------------------------------- return { "CallExpression": function(node) { backbone.push(backbone[backbone.length - 1] || helper.isBackboneAny(node, settings.backbone)); }, "CallExpression:exit": function(node) { if (helper.isBackboneAny(node, settings.backbone)) { backbone.pop(); } }, "Identifier": function(node) { var functions = ["set", "unset", "reset", "clear", "remove", "add", "push", "unshift", "shift", "sort", "create"]; var ancestors = context.getAncestors(node); var parent = ancestors.pop(); if (backbone[backbone.length - 1] && (node.name === "silent") && parent.type === "Property" && parent.value.type === "Literal" && parent.value.value === true) { parent = ancestors[ancestors.length - 2]; if (parent && parent.type === "CallExpression" && parent.callee.type === "MemberExpression" && parent.callee.property.type === "Identifier" && functions.indexOf(parent.callee.property.name) > -1) { context.report(node, "Do not silence events."); } } } }; } };
/* jQWidgets v6.0.6 (2018-August) Copyright (c) 2011-2018 jQWidgets. License: https://jqwidgets.com/license/ */ /* eslint-disable */ (function(a){a.jqx.jqxWidget("jqxFormattedInput","",{});a.extend(a.jqx._jqxFormattedInput.prototype,{defineInstance:function(){var b={width:null,height:null,radix:10,decimalNotation:"default",value:"0",min:"-9223372036854775808",max:"9223372036854775807",upperCase:false,spinButtons:true,spinButtonsStep:1,dropDown:false,dropDownWidth:null,popupZIndex:20000,placeHolder:"",roundedCorners:true,disabled:false,rtl:false,changeType:null,template:"",hint:true,_opened:false,$popup:a("<ul></ul>"),item:'<li><a href="#"></a></li>',events:["open","close","change","radixChange"]};if(this===a.jqx._jqxFormattedInput.prototype){return b}a.extend(true,this,b);return b},createInstance:function(){var b=this;b._Long();b._regex={2:new RegExp(/([0-1])/),8:new RegExp(/([0-7])/),10:new RegExp(/([0-9\-])/),16:new RegExp(/([0-9]|[a-f])/i)};b.render()},render:function(){var e=this;e._radixNumber=e._getRadix(e.radix);if(e.value!==""){e._number=new e.longObj.math.Long.fromString((e.value).toString(),e._radixNumber)}if(this.element instanceof HTMLInputElement){var g=a("<div></div>");g.addClass(e.toThemeProperty("jqx-input-group"));this.host.after(g);var b=this.element;var f=this.host.data();g.append(b);g[0].id=this.element.id;this.element.removeAttribute("id");this.element.setAttribute("hint",true);g[0].style=this.element.style;e.input=e.element;if(!(this.input instanceof HTMLInputElement)){this.input=this.host.find("input");if(this.input.length>0){this.input=this.input[0]}a(this.input).addClass(this.toThemeProperty("jqx-input-widget"))}this.element.style=""}if(e.baseHost){e.host=e.baseHost;e.element=e.host[0]}if(this.element.nodeName.toLowerCase()==="div"){this.baseHost=this.element;var b=this.host.find("input");var d=false;a.each(b,function(){var h=this.type;if(h===null||h==="text"||h==="textarea"){b=a(this);d=true;return false}});if(!d){throw new Error("jqxFormattedInput: Missing Text Input in the Input Group")}if(b.length>0){this.baseHost=a(this.element);var f=this.host.data();this.host=b;this.element=b[0];this.host.data(f);this.baseHost.addClass(this.toThemeProperty("jqx-widget"));this.baseHost.addClass(this.toThemeProperty("jqx-rc-all"));this.baseHost.addClass(this.toThemeProperty("jqx-input-group"));this.baseHost.addClass(this.toThemeProperty("jqx-formattedinput"));var c=this.baseHost.children();a.each(c,function(h){a(this).addClass(e.toThemeProperty("jqx-input-group-addon"));a(this).removeClass(e.toThemeProperty("jqx-rc-all"));if(h===0){a(this).addClass(e.toThemeProperty("jqx-rc-l"))}if(h===c.length-1){a(this).addClass(e.toThemeProperty("jqx-rc-r"))}if(this!==e.element){a(this).addClass(e.toThemeProperty("jqx-fill-state-normal"))}if(this.nodeName.toLowerCase()==="div"){e.appendSpinButtons=function(k){e._spinButtonsContainer=a(k);e._spinButtonsContainer.addClass(e.toThemeProperty("jqx-formatted-input-spin-buttons-container"));var j='<div class="'+e.toThemeProperty("jqx-fill-state-normal jqx-formatted-input-spin-button")+'"><div class="'+e.toThemeProperty("jqx-input-icon")+'"></div></div>';e._upbutton=a(j);e._spinButtonsContainer.append(e._upbutton);e._downbutton=a(j);e._spinButtonsContainer.append(e._downbutton);e._upArrow=e._upbutton.find("div");e._upArrow.addClass(e.toThemeProperty("jqx-icon-arrow-up"));e._downArrow=e._downbutton.find("div");e._downArrow.addClass(e.toThemeProperty("jqx-icon-arrow-down"));if(e.template){e._upbutton.addClass(e.toThemeProperty("jqx-"+e.template));e._downbutton.addClass(e.toThemeProperty("jqx-"+e.template))}e._spinButtonsStepLong=new e.longObj.math.Long.fromNumber(e.spinButtonsStep)};var i=function(j){e._addon=a(j);e._addon.addClass(e.toThemeProperty("jqx-formatted-input-addon"));if(!e._arrow){e._arrow=a('<div class="'+e.toThemeProperty("jqx-icon")+" "+e.toThemeProperty("jqx-icon-arrow-down")+'"></div>');e._arrow.appendTo(e._addon)}if(e.template){e._addon.addClass(e.toThemeProperty("jqx-"+e.template))}};if(e.rtl===false){if(!e._spinButtonsContainer&&e.spinButtons===true){e.appendSpinButtons(this)}else{if(!e._addon&&e.dropDown===true&&((h===2)||(h===1&&e.spinButtons===false))){i(this)}}}else{if(!e._addon&&e.dropDown===true){i(this);if(e.spinButtons===true){e._addon.addClass(e.toThemeProperty("jqx-formatted-input-addon-rtl"))}}else{if(!e._spinButtonsContainer&&e.spinButtons===true&&((h===1)||(h===0&&e.dropDown===false))){e.appendSpinButtons(this);e._spinButtonsContainer.addClass(e.toThemeProperty("jqx-formatted-input-spin-buttons-container-rtl"));if(e.dropDown===true){e._addon.addClass(e.toThemeProperty("jqx-formatted-input-addon-rtl"))}}}}}})}}e._inputAndAddon=e.host;if(e.baseHost){if(e._spinButtonsContainer){e._inputAndAddon=e._inputAndAddon.add(e._spinButtonsContainer)}if(e._addon){e._inputAndAddon=e._inputAndAddon.add(e._addon)}}e.removeHandlers();this.addHandlers();if(this.rtl){this.host.addClass(this.toThemeProperty("jqx-rtl"))}this.host.attr("role","textbox");a.jqx.aria(this,"aria-autocomplete","both");a.jqx.aria(this,"aria-disabled",this.disabled);a.jqx.aria(this,"aria-readonly",false);a.jqx.aria(this,"aria-multiline",false);a.jqx.aria(this,"aria-haspopup",true);if(e.value!==""&&e.value!==null){if(e.upperCase===true){e.host.addClass(e.toThemeProperty("jqx-formatted-input-upper-case"))}else{e.host.addClass(e.toThemeProperty("jqx-formatted-input-lower-case"))}if(e._radixNumber===10&&e.decimalNotation==="exponential"){e.element.value=e._getDecimalNotation("exponential")}else{e.element.value=e.value}}else{if(e._spinButtonsContainer){e._spinButtonsContainer.addClass(e.toThemeProperty("jqx-fill-state-disabled"))}}if(e._radixNumber!==10&&e.min.toString()==="-9223372036854775808"){e._minLong=new e.longObj.math.Long.fromNumber(e.min)}else{e._setMinMax("min")}if(e._radixNumber!==10&&e.max.toString()==="9223372036854775807"){e._maxLong=new e.longObj.math.Long.fromNumber(e.max)}else{e._setMinMax("max")}this._addBarAndLabel((this.baseHost&&a(this.baseHost.children()[this.baseHost.children.length-1]))||this.host);if(e.isMaterialized()){setTimeout(function(){if(e.hint){e.label[0].innerHTML=e.placeHolder}if(!e.baseHost){if(e.element.value.length===0){e.element.removeAttribute("hint")}else{e.element.setAttribute("hint",true)}e.bar.css("top","");return}if(e.element.value.length===0){e.baseHost[0].removeAttribute("hint")}else{e.baseHost[0].setAttribute("hint",true)}})}},_refreshClasses:function(c){var b=c?"addClass":"removeClass";this.host[b](this.toThemeProperty("jqx-widget-content"));this.host[b](this.toThemeProperty("jqx-input"));this.host[b](this.toThemeProperty("jqx-formatted-input"));this.host[b](this.toThemeProperty("jqx-widget"));this.$popup[b](this.toThemeProperty("jqx-popup"));if(a.jqx.browser.msie){this.$popup[b](this.toThemeProperty("jqx-noshadow"))}this.$popup[b](this.toThemeProperty("jqx-input-popup"));this.$popup[b](this.toThemeProperty("jqx-menu"));this.$popup[b](this.toThemeProperty("jqx-menu-vertical"));this.$popup[b](this.toThemeProperty("jqx-menu-dropdown"));this.$popup[b](this.toThemeProperty("jqx-widget"));this.$popup[b](this.toThemeProperty("jqx-widget-content"));if(this.roundedCorners){this.host[b](this.toThemeProperty("jqx-rc-all"));this.$popup[b](this.toThemeProperty("jqx-rc-all"));if(this.baseHost){this.baseHost[b](this.toThemeProperty("jqx-rc-all"));if(this.rtl===false){this.host[b](this.toThemeProperty("jqx-rc-l"));if(this._addon){this._addon[b](this.toThemeProperty("jqx-rc-r"))}}else{this.host[b](this.toThemeProperty("jqx-rc-r"));if(this._addon){this._addon[b](this.toThemeProperty("jqx-rc-l"))}}}}else{this.host.removeClass(this.toThemeProperty("jqx-rc-all"));this.$popup.removeClass(this.toThemeProperty("jqx-rc-all"));if(this.baseHost){this.baseHost.removeClass(this.toThemeProperty("jqx-rc-all"));if(this.rtl===false){this.host.removeClass(this.toThemeProperty("jqx-rc-l"));if(this.dropDown){this._addon.removeClass(this.toThemeProperty("jqx-rc-r"))}else{if(this.spinButtons){this._spinButtonsContainer.removeClass(this.toThemeProperty("jqx-rc-r"))}}}else{this.host.removeClass(this.toThemeProperty("jqx-rc-r"));if(this.dropDown){this._addon.removeClass(this.toThemeProperty("jqx-rc-l"))}else{if(this.spinButtons){this._spinButtonsContainer.removeClass(this.toThemeProperty("jqx-rc-l"))}}}}}if(this.disabled){this.host[b](this.toThemeProperty("jqx-fill-state-disabled"));if(this.baseHost){if(this._spinButtonsContainer){this._spinButtonsContainer[b](this.toThemeProperty("jqx-fill-state-disabled"))}if(this._addon){this._addon[b](this.toThemeProperty("jqx-fill-state-disabled"))}}}else{this.host.removeClass(this.toThemeProperty("jqx-fill-state-disabled"));if(this.baseHost&&this.value!==""&&this.value!==null){if(this._spinButtonsContainer){this._spinButtonsContainer.removeClass(this.toThemeProperty("jqx-fill-state-disabled"))}if(this._addon){this._addon.removeClass(this.toThemeProperty("jqx-fill-state-disabled"))}}}},selectAll:function(){var b=this.host;setTimeout(function(){if("selectionStart" in b[0]){b[0].focus();b[0].setSelectionRange(0,b[0].value.length)}else{var c=b[0].createTextRange();c.collapse(true);c.moveEnd("character",b[0].value.length);c.moveStart("character",0);c.select()}},10)},selectLast:function(){var b=this.host;this.selectStart(b[0].value.length)},selectFirst:function(){this.selectStart(0)},selectStart:function(c){var b=this.host;setTimeout(function(){if("selectionStart" in b[0]){b[0].focus();b[0].setSelectionRange(c,c)}else{var d=b[0].createTextRange();d.collapse(true);d.moveEnd("character",c);d.moveStart("character",c);d.select()}},10)},focus:function(){try{this.host.focus();var c=this;setTimeout(function(){c.host.focus()},25)}catch(b){}},refresh:function(){var f=this;this._refreshClasses(false);this._refreshClasses(true);if(!this.baseHost){if(this.width){this.host.width(this.width)}if(this.height){this.host.height(this.height)}}else{if(this.width){this.baseHost.width(this.width)}if(this.height){this.baseHost.height(this.height);var e=0;var j=this.baseHost.height()-2;if(a.jqx.browser.msie&&a.jqx.browser.version<8){this.baseHost.css("display","inline-block")}a.each(this.baseHost.children(),function(){if(this.className.indexOf("jqx-input-bar")>=0){return true}if(this.className.indexOf("jqx-input-label")>=0){return true}a(this).css("height","100%");if(a.jqx.browser.msie&&a.jqx.browser.version<8){a(this).css("height",j+"px")}if(this!==f.element){e+=a(this).outerWidth()}});var c=(typeof f.width==="string"&&f.width.charAt(f.width.length-1)==="%")?1:0;this.host.css("width",this.baseHost.width()-e-c+"px");if(a.jqx.browser.msie&&a.jqx.browser.version<9){if(f._spinButtonsContainer){if(f.rtl===false||f.rtl===true&&f._addon){f._spinButtonsContainer.css("border-left-width","0")}}if(f._addon){if(f.rtl===false){f._addon.css("border-left-width","0")}else{if(!f._spinButtonsContainer){f._addon.css("border-right-width","0")}}}var h=0;if(a.jqx.browser.version<8){var g=0;var d=parseInt(f.host.css("border-left-width"),10)+parseInt(f.host.css("border-right-width"),10);var i=parseInt(f.host.css("padding-left"),10)+parseInt(f.host.css("padding-right"),10);if(f._spinButtonsContainer){d+=parseInt(f._spinButtonsContainer.css("border-left-width"),10)+parseInt(f._spinButtonsContainer.css("border-right-width"),10);i+=parseInt(f._spinButtonsContainer.css("padding-left"),10)+parseInt(f._spinButtonsContainer.css("padding-right"),10);if(!f._addon){g=2}}if(f._addon){d+=parseInt(f._addon.css("border-left-width"),10)+parseInt(f._addon.css("border-right-width"),10);i+=parseInt(f._addon.css("padding-left"),10)+parseInt(f._addon.css("padding-right"),10);if(!f._spinButtonsContainer){g=2}}f.host.width(f.host.width()-(i+d)-g);h=6}f.host.height(f.baseHost.height()-(parseInt(f.host.css("border-top-width"),10)+parseInt(f.host.css("border-bottom-width"),10)+parseInt(f.host.css("padding-top"),10)+parseInt(f.host.css("padding-bottom"),10)+h));var b=f.host.height()+"px";f.host.css("min-height",b);f.host.css("line-height",b)}}if(f.baseHost&&f.bar){f.bar.css("top",1+f.host.outerHeight())}}this.host.attr("disabled",this.disabled);if(!this.host.attr("placeholder")){this._refreshPlaceHolder()}},_refreshPlaceHolder:function(){var b=this;if(this.isMaterialized()&&this.hint){this.label[0].innerHTML=this.placeHolder;return}if("placeholder" in this.element){this.host.attr("placeHolder",this.placeHolder)}else{var b=this;if(this.element.value===""){this.element.value=this.placeHolder;this.host.focus(function(){if(b.element.value===b.placeHolder){b.element.value=""}});this.host.blur(function(){if(b.element.value===""||b.element.value===b.placeHolder){b.element.value=b.placeHolder}})}}},destroy:function(){this.removeHandlers();if(this.baseHost){a.jqx.utilities.resize(this.baseHost,null,true);this.baseHost.remove()}else{a.jqx.utilities.resize(this.host,null,true);this.host.remove()}if(this.$popup){this.$popup.remove()}},propertyChangedHandler:function(b,d,g,f){if(d==="placeHolder"){b._refreshPlaceHolder();return}if(d=="template"){if(b.template){b._upbutton.removeClass(b.toThemeProperty("jqx-"+g));b._downbutton.removeClass(b.toThemeProperty("jqx-"+g));b._addon.removeClass(b.toThemeProperty("jqx-"+g));b._upbutton.addClass(b.toThemeProperty("jqx-"+b.template));b._downbutton.addClass(b.toThemeProperty("jqx-"+b.template));b._addon.addClass(b.toThemeProperty("jqx-"+b.template))}}if(d==="theme"){a.jqx.utilities.setTheme(g,f,b.host);return}if(d==="disabled"){a.jqx.aria(b,"aria-disabled",b.disabled)}if(d==="value"&&g.toString().toUpperCase()!==f.toString().toUpperCase()){b.val(f);return}if(g!==f&&d==="radix"){b._changeRadix(f);return}if(g!==f&&d==="decimalNotation"&&b._radixNumber===10){if(f==="exponential"){b.element.value=b._getDecimalNotation("exponential")}else{b.element.value=b._number.toString(10)}}if(g!==f&&(d==="min"||d==="max")){b._setMinMax(d);b._validateValue(b.value,true);b.value=b.element.value;return}if(g!==f&&(d==="upperCase")&&b.element.value!==""){if(f===true){b.host.removeClass(b.toThemeProperty("jqx-formatted-input-lower-case"));b.host.addClass(b.toThemeProperty("jqx-formatted-input-upper-case"))}else{b.host.removeClass(b.toThemeProperty("jqx-formatted-input-upper-case"));b.host.addClass(b.toThemeProperty("jqx-formatted-input-lower-case"))}return}function c(i,j){var k=b.host.width();var h=i.outerWidth();if(j===false){b.host.width(k+h);i.hide();if(b.rtl===true){if(b.spinButtons===true){b._spinButtonsContainer.addClass(b.toThemeProperty("jqx-formatted-input-spin-buttons-container-rtl-border"))}if(b.dropDown===true){b._addon.removeClass(b.toThemeProperty("jqx-formatted-input-addon-rtl"))}}}else{b.host.width(k-h);i.show();if(b.rtl===true&&b.spinButtons===true&&b.dropDown===true){b._spinButtonsContainer.removeClass(b.toThemeProperty("jqx-formatted-input-spin-buttons-container-rtl-border"));b._addon.addClass(b.toThemeProperty("jqx-formatted-input-addon-rtl"))}}}function e(j,l){if(l===true){var k=a("<div></div>");if(b.baseHost){var h=b.baseHost.children("div");if((b.rtl===false&&j==="spinButtons")||(b.rtl===true&&j==="dropDown")){h.before(k)}else{h.after(k)}b.render();b.host.width(b.host.width()-k.outerWidth())}else{var n=b.element.id;b.host.removeAttr("id");b.host.wrap('<div id="'+n+'" style="display: inline-block;"></div>');var m=a("#"+n);if(b.rtl===false){m.append(k)}else{m.prepend(k)}var i=b.host.data();i.jqxFormattedInput.host=m;i.jqxFormattedInput.element=m[0];b.baseHost=m;b.baseHost.data(i);b.render();b.refresh()}}}if(d==="spinButtons"){if(g!==f){if(b._spinButtonsContainer){c(b._spinButtonsContainer,f)}else{e("spinButtons",f)}return}else{return}}if(g!==f&&d==="spinButtonsStep"){b._spinButtonsStepLong=new b.longObj.math.Long.fromNumber(f)}if(d==="dropDown"){if(g!==f){if(b._addon){c(b._addon,f)}else{e("dropDown",f)}return}else{return}}b.refresh()},select:function(d,e,b){var c=this;if(!b){b=c.$popup.find(".jqx-fill-state-pressed").attr("data-value")}c._changeRadix(parseInt(b,10));c._setMaxLength(true);c.close()},val:function(g){var f=this;if((g||g==="")&&!(typeof g==="object"&&a.isEmptyObject(g)===true)&&g!=="binary"&&g!=="octal"&&g!=="decimal"&&g!=="exponential"&&g!=="scientific"&&g!=="engineering"&&g!=="hexadecimal"){g=g.toString();if(g.toUpperCase()!==f.element.value.toString().toUpperCase()){var b=f.element.value;if(f.upperCase===true){g=g.toUpperCase()}var e=g.split("");for(var c=0;c<e.length;c++){if(!f._regex[""+f._radixNumber+""].test(e[c])){return}}var h=f._validateValue(g,true);f._raiseEvent("2",{value:h,oldValue:b,radix:f._radixNumber});f.value=h;return h}else{return g}}else{if(g&&!(typeof g==="object"&&a.isEmptyObject(g)===true)){if(g==="exponential"||g==="scientific"||g==="engineering"){return f._getDecimalNotation(g)}else{var d=f._getRadix(g);return f._number.toString(d)}}else{return f.element.value}}},_changeRadix:function(d){var f=this;var e=f._getRadix(d);var g=f.value!==""?f._number.toString(e):"";var b=f.radix;var c=f.value;f.radix=d;f._radixNumber=e;f.element.value=g;f.value=g;this._raiseEvent("3",{radix:d,oldRadix:b,value:g,oldValue:c})},_raiseEvent:function(f,c){if(c===undefined){c={owner:null}}var d=this.events[f];c.owner=this;var e=new a.Event(d);e.owner=this;if(f==2){c.type=this.changeType;this.changeType=null}e.args=c;if(e.preventDefault){e.preventDefault()}var b;if(this.baseHost){b=this.baseHost.trigger(e)}else{b=this.host.trigger(e)}return b},open:function(){var f=this;f._setPopupOptions();f._render(f._popupOptions);if(a.jqx.isHidden(this.host)){return}var c;if(f.baseHost){c=a.extend({},f.baseHost.coord(true),{height:f.baseHost[0].offsetHeight})}else{c=a.extend({},f.host.coord(true),{height:f.host[0].offsetHeight})}if(this.$popup.parent().length===0){var e=this.element.id+"_popup";this.$popup[0].id=e;a.jqx.aria(this,"aria-owns",e)}this.$popup.appendTo(a(document.body)).css({position:"absolute",zIndex:this.popupZIndex,top:c.top+c.height,left:c.left}).show();var b=0;var d=this.$popup.children();a.each(d,function(){b+=a(this).outerHeight(true)-1});this.$popup.height(b);this._opened=true;if(f.baseHost){f._addon.addClass(f.toThemeProperty("jqx-fill-state-pressed jqx-combobox-arrow-selected"));f._arrow.addClass(f.toThemeProperty("jqx-icon-arrow-down-selected"))}this._raiseEvent("0",{popup:this.$popup});a.jqx.aria(this,"aria-expanded",true);return this},close:function(){var b=this;this.$popup.hide();this._opened=false;if(b.baseHost){b._addon.removeClass(b.toThemeProperty("jqx-fill-state-pressed jqx-combobox-arrow-selected"));b._arrow.removeClass(b.toThemeProperty("jqx-icon-arrow-down-selected"))}this._raiseEvent("1",{popup:this.$popup});a.jqx.aria(this,"aria-expanded",false);return this},_render:function(c){var e=this;c=a(c).map(function(h,j){var k=j;var f;switch(h){case 0:f=2;break;case 1:f=8;break;case 2:f=10;break;case 3:f=16;break}h=a(e.item).attr("data-value",f);h.find("a").html(k).attr("data-value",f);var g="";if(e.rtl){g=" "+e.toThemeProperty("jqx-rtl")+" "+e.toThemeProperty("jqx-formatted-input-item-rtl")}h[0].className=e.toThemeProperty("jqx-item")+" "+e.toThemeProperty("jqx-menu-item")+" "+e.toThemeProperty("jqx-formatted-input-item")+" "+e.toThemeProperty("jqx-rc-all")+g;return h[0]});var b;switch(e._radixNumber){case 2:b=0;break;case 8:b=1;break;case 10:b=2;break;case 16:b=3;break}c.eq(b).addClass(this.toThemeProperty("jqx-fill-state-pressed"));this.$popup.html(c);if(!this.dropDownWidth){if(e.baseHost){var d=(typeof e.width==="string"&&e.width.charAt(e.width.length-1)==="%")?1:0;this.$popup.width(this.baseHost.outerWidth()-6-d)}else{this.$popup.width(this.host.outerWidth()-6)}}else{this.$popup.width(this.dropDownWidth)}return this},next:function(){var c=this.$popup.find(".jqx-fill-state-pressed").removeClass(this.toThemeProperty("jqx-fill-state-pressed")),b=c.next();if(!b.length){b=a(this.$popup.find("li")[0])}b.addClass(this.toThemeProperty("jqx-fill-state-pressed"))},prev:function(){var c=this.$popup.find(".jqx-fill-state-pressed").removeClass(this.toThemeProperty("jqx-fill-state-pressed")),b=c.prev();if(!b.length){b=this.$popup.find("li").last()}b.addClass(this.toThemeProperty("jqx-fill-state-pressed"))},addHandlers:function(){var c=this;this.addHandler(this.host,"focus",a.proxy(this.onFocus,this));this.addHandler(this.host,"blur",a.proxy(this.onBlur,this));this.addHandler(this.host,"keypress",a.proxy(this.keypress,this));this.addHandler(this.host,"keyup",a.proxy(this.keyup,this));this.addHandler(this.host,"keydown",a.proxy(this.keydown,this));this.addHandler(this.$popup,"mousedown",a.proxy(this.click,this));if(this.host.on){this.$popup.on("mouseenter","li",a.proxy(this.mouseenter,this))}else{this.$popup.bind("mouseenter","li",a.proxy(this.mouseenter,this))}this.addHandler(this.host,"change",function(f){f.stopPropagation();f.preventDefault()});if(c.baseHost){var d=c.baseHost.attr("id");if(c._spinButtonsContainer){var b=c._upbutton.add(c._downbutton);c.addHandler(c._upbutton,"mousedown.jqxFormattedInputSpinButtonUp"+d,function(){if(!c.disabled&&c.value!==""&&c.value!==null){c._upbutton.addClass(c.toThemeProperty("jqx-fill-state-pressed"));c.changeType="mouse";c._incrementOrDecrement("add")}});c.addHandler(c._upbutton,"mouseup.jqxFormattedInputSpinButtonUp"+d,function(){if(!c.disabled&&c.value!==""&&c.value!==null){c._upbutton.removeClass(c.toThemeProperty("jqx-fill-state-pressed"))}});c.addHandler(c._downbutton,"mousedown.jqxFormattedInputSpinButtonDown"+d,function(){if(!c.disabled&&c.value!==""&&c.value!==null){c.changeType="mouse";c._downbutton.addClass(c.toThemeProperty("jqx-fill-state-pressed"));c._incrementOrDecrement("subtract")}});c.addHandler(c._downbutton,"mouseup.jqxFormattedInputSpinButtonDown"+d,function(){if(!c.disabled&&c.value!==""&&c.value!==null){c._downbutton.removeClass(c.toThemeProperty("jqx-fill-state-pressed"))}});c.addHandler(b,"mouseenter.jqxFormattedInputSpinButtons"+d,function(g){if(!c.disabled&&c.value!==""&&c.value!==null){var f=a(g.target);if(f.hasClass("jqx-icon-arrow-up")||f.children().hasClass("jqx-icon-arrow-up")){c._upbutton.addClass(c.toThemeProperty("jqx-fill-state-hover"));c._upArrow.addClass(c.toThemeProperty("jqx-icon-arrow-up-hover"))}else{c._downbutton.addClass(c.toThemeProperty("jqx-fill-state-hover"));c._downArrow.addClass(c.toThemeProperty("jqx-icon-arrow-down-hover"))}}});c.addHandler(b,"mouseleave.jqxFormattedInputSpinButtons"+d,function(g){if(!c.disabled&&c.value!==""&&c.value!==null){var f=a(g.target);if(f.hasClass("jqx-icon-arrow-up")||f.children().hasClass("jqx-icon-arrow-up")){c._upbutton.removeClass(c.toThemeProperty("jqx-fill-state-hover"));c._upArrow.removeClass(c.toThemeProperty("jqx-icon-arrow-up-hover"))}else{c._downbutton.removeClass(c.toThemeProperty("jqx-fill-state-hover"));c._downArrow.removeClass(c.toThemeProperty("jqx-icon-arrow-down-hover"))}}});c.addHandler(a("body"),"mouseup.jqxFormattedInputSpinButtons"+d,function(){c._upbutton.add(c._downbutton).removeClass(c.toThemeProperty("jqx-fill-state-pressed"))})}if(c._addon){c.addHandler(c._addon,"click.jqxFormattedInputAddon"+d,function(){if(!c.disabled){if(c._opened){c.close()}else{c.open()}}});c.addHandler(c._addon,"mouseenter.jqxFormattedInputAddon"+d,function(){if(!c.disabled&&c.value!==""&&c.value!==null){c._addon.addClass(c.toThemeProperty("jqx-fill-state-hover jqx-combobox-arrow-hover"));c._arrow.addClass(c.toThemeProperty("jqx-icon-arrow-down-hover"))}});c.addHandler(c._addon,"mouseleave.jqxFormattedInputAddon"+d,function(){if(!c.disabled&&c.value!==""&&c.value!==null){c._addon.removeClass(c.toThemeProperty("jqx-fill-state-hover jqx-combobox-arrow-hover"));c._arrow.removeClass(c.toThemeProperty("jqx-icon-arrow-down-hover"))}});c.addHandler(c._addon.add(c._arrow),"blur.jqxFormattedInputAddon"+d,function(){if(c._opened&&!c.disabled){c.close()}})}a.jqx.utilities.resize(c.baseHost,function(){if(c._opened===true){c.close()}var e=0;if(c._spinButtonsContainer){e+=c._spinButtonsContainer.outerWidth()}if(c._addon){e+=c._addon.outerWidth()}c.host.css("width",c.baseHost.width()-e-1)})}},removeHandlers:function(){var c=this;this.removeHandler(this.host,"focus",a.proxy(this.onFocus,this));this.removeHandler(this.host,"blur",a.proxy(this.onBlur,this));this.removeHandler(this.host,"keypress",a.proxy(this.keypress,this));this.removeHandler(this.host,"keyup",a.proxy(this.keyup,this));this.removeHandler(this.host,"keydown",a.proxy(this.keydown,this));this.removeHandler(this.$popup,"mousedown",a.proxy(this.click,this));if(this.host.off){this.$popup.off("mouseenter","li",a.proxy(this.mouseenter,this))}else{this.$popup.unbind("mouseenter","li",a.proxy(this.mouseenter,this))}if(c.baseHost){var d=c.baseHost.attr("id");if(c._spinButtonsContainer){var b=c._upbutton.add(c._downbutton);c.removeHandler(c._upbutton,"mousedown.jqxFormattedInputSpinButtonUp"+d);c.removeHandler(c._upbutton,"mouseup.jqxFormattedInputSpinButtonUp"+d);c.removeHandler(c._downbutton,"mousedown.jqxFormattedInputSpinButtonDown"+d);c.removeHandler(c._downbutton,"mouseup.jqxFormattedInputSpinButtonDown"+d);c.removeHandler(b,"mouseenter.jqxFormattedInputSpinButtons"+d);c.removeHandler(b,"mouseleave.jqxFormattedInputSpinButtons"+d);c.removeHandler(a("body"),"mouseup.jqxFormattedInputSpinButtons"+d)}if(c._addon){c.removeHandler(c._addon,"click.jqxFormattedInputAddon"+d);c.removeHandler(c._addon,"mouseenter.jqxFormattedInputAddon"+d);c.removeHandler(c._addon,"mouseleave.jqxFormattedInputAddon"+d);c.removeHandler(c._addon.add(c._arrow),"blur.jqxFormattedInputAddon"+d)}}},move:function(b){if(!this._opened){return}switch(b.keyCode){case 9:case 13:case 27:b.preventDefault();break;case 38:b.preventDefault();this.prev();break;case 40:b.preventDefault();this.next();break}b.stopPropagation()},keydown:function(k){var j=this;j.changeType="keyboard";this.suppressKeyPressRepeat=~a.inArray(k.keyCode,[40,38,9,13,27]);this.move(k);var o=!k.charCode?k.which:k.charCode,m=String.fromCharCode(o);if(o>=96&&o<=105){m=o-96;o=o-48}if(k.altKey===true){if(o===40){if(j._addon){this.open()}return}else{if(o===38){if(j._addon){this.close()}return}}}if(k.ctrlKey===true){if(o===67){return}else{if(o===65){j.selectAll();return}}}var d=[8,9,13,37,38,39,40,46,88];var i=j._regex[""+j._radixNumber+""];if(d.indexOf(o)===-1&&(!i.test(m)&&!i.test(k.key)&&!i.test(k["char"]))){k.preventDefault();return false}else{var l=j.host[0].selectionStart;var g=j.host[0].selectionEnd-l;var f=this._getCaretPosition(this.host[0]);var b=this.element.value;var c=b.split("");if(o===8){if(g>0){c.splice(l,g)}else{c.splice(f-1,1)}}else{if(o===46){if(g>0){c.splice(l,g)}else{c.splice(f,1)}}else{if(o===88){if(k.ctrlKey===true){if(g>0){c.splice(l,g)}}else{k.preventDefault()}}else{if(o===189){if(c[0]==="-"){c.splice(0,1);j._minus=false}else{c.splice(0,0,"-");j._minus=true}k.preventDefault()}else{var h=d.indexOf(o)===-1?m:"";if(g>0){c.splice(l,g);c.splice(l,0,h)}else{c.splice(f,0,h)}}}}}c=c.join("");if(c!==b){var n=j._validateValue(c,false);if(n===false){j._inputAndAddon.addClass(j.toThemeProperty("jqx-input-invalid"))}else{j._inputAndAddon.removeClass(j.toThemeProperty("jqx-input-invalid"))}}}},keypress:function(c){var b=this;if(b.suppressKeyPressRepeat){return}b.move(c)},keyup:function(c){var b=this;switch(c.keyCode){case 40:case 38:case 16:case 17:case 18:break;case 9:case 13:if(this._opened){this.select(c,this)}else{b._change()}break;case 27:if(!this._opened){return}this.close();break;case 189:if(b._radixNumber===10){if(b._minus===true){b.element.value="-"+b.element.value}else{b.element.value=b.element.value.slice(1)}}break}c.stopPropagation();c.preventDefault();if(b.element.value!==""){if(b.upperCase){b.host.addClass(b.toThemeProperty("jqx-formatted-input-upper-case"))}else{b.host.addClass(b.toThemeProperty("jqx-formatted-input-lower-case"))}if(b._spinButtonsContainer){b._spinButtonsContainer.removeClass(b.toThemeProperty("jqx-fill-state-disabled"))}}else{b.host.removeClass(b.toThemeProperty("jqx-formatted-input-upper-case jqx-formatted-input-lower-case"));if(b._spinButtonsContainer){b._spinButtonsContainer.addClass(b.toThemeProperty("jqx-fill-state-disabled"))}}if(b.isMaterialized()&&b.hint){setTimeout(function(){b.label[0].innerHTML=b.placeHolder;if(b.baseHost){if(b.element.value.length===0){b.baseHost[0].removeAttribute("hint")}else{b.baseHost[0].setAttribute("hint",true)}}})}},_getCaretPosition:function(b){var d=0;if(document.selection){b.focus();var c=document.selection.createRange();c.moveStart("character",-b.value.length);d=c.text.length}else{if(b.selectionStart||b.selectionStart==="0"){d=b.selectionStart}}return(d)},onBlur:function(){var b=this;if(b._opened){b.close()}b._setMaxLength();b._inputAndAddon.removeClass(b.toThemeProperty("jqx-fill-state-focus"));b._change();if(b._radixNumber===10&&b.decimalNotation==="exponential"){b.element.value=b._getDecimalNotation("exponential")}b._refreshPlaceHolder()},onFocus:function(){var b=this;b._setMaxLength(true);b._inputAndAddon.addClass(b.toThemeProperty("jqx-fill-state-focus"));if(b._radixNumber===10&&b.decimalNotation==="exponential"){b.element.value=b._number.toString(10)}},click:function(c){c.stopPropagation();c.preventDefault();var b=a(c.target).attr("data-value");this.select(c,this,b)},mouseenter:function(b){this.$popup.find(".jqx-fill-state-pressed").removeClass(this.toThemeProperty("jqx-fill-state-pressed"));a(b.currentTarget).addClass(this.toThemeProperty("jqx-fill-state-pressed"))},_change:function(){var c=this;var b=c.value;var d=c._validateValue(c.element.value,true);c._inputAndAddon.removeClass(c.toThemeProperty("jqx-input-invalid"));if(d.toUpperCase()!==b.toString().toUpperCase()){c._raiseEvent("2",{value:d,oldValue:b,radix:c._radixNumber});c.value=d}},_getRadix:function(b){switch(b){case 10:case"decimal":return 10;case 2:case"binary":return 2;case 8:case"octal":return 8;case 16:case"hexadecimal":return 16}},_setPopupOptions:function(){var b=this;b._popupOptions=new Array();if(b.value!==""){b._popupOptions.push(b._number.toString(2)+" <em>(BIN)</em>");b._popupOptions.push(b._number.toString(8)+" <em>(OCT)</em>");b._popupOptions.push(b._number.toString(10)+" <em>(DEC)</em>");b._popupOptions.push(b._number.toString(16)+" <em>(HEX)</em>")}else{b._popupOptions.push("BIN");b._popupOptions.push("OCT");b._popupOptions.push("DEC");b._popupOptions.push("HEX")}},_validateValue:function(e,g){var d=this;if(e!==""){var f=new d.longObj.math.Long.fromString((e).toString(),d._radixNumber);if(f.lessThan(d._minLong)){if(g){d._number=d._minLong;var c=d._minLong.toString(d._radixNumber);if(d._radixNumber===16&&d.upperCase===true){c=c.toUpperCase()}d.element.value=c;return c}else{return false}}else{if(f.greaterThan(d._maxLong)){if(g){d._number=d._maxLong;var b=d._maxLong.toString(d._radixNumber);if(d._radixNumber===16&&d.upperCase===true){b=b.toUpperCase()}d.element.value=b;return b}else{return false}}else{if(g){d._number=f;d.element.value=e;return e}else{return true}}}}else{if(g){d.element.value="";return e}else{return true}}},_getNegativeDecimal:function(l,h){var o=l;if(h===8){var n=new Array();for(var f=0;f<11;f++){var b=parseInt(l.charAt(f),8).toString(2);while(b.length!==3){b="0"+b}n.push(b)}o=n.join("");if(o.charAt(0)==="0"){o=o.slice(1)}}else{if(h===16){var p=new Array();for(var e=0;e<8;e++){var m=parseInt(l.charAt(e),16).toString(2);while(m.length!==4){m="0"+m}p.push(m)}o=p.join("")}}var d="";for(var c=0;c<o.length;c++){var g=o.charAt(c)==="1"?"0":"1";d+=g}d=(parseInt(d,2)+1)*-1;return d},_setMaxLength:function(c){var d=this;var b;if(c===true){switch(d._radixNumber){case 2:b=64;break;case 8:b=22;break;case 10:b=20;break;case 16:b=16;break}}else{b=524288}d.host.attr("maxlength",b)},_setMinMax:function(b){var c=this;c["_"+b+"Long"]=new c.longObj.math.Long.fromString((c[b]).toString(),c._radixNumber)},_getDecimalNotation:function(c){var e=this;var f=e._number.toString(10);function h(k){if(k==="0"){return parseInt(k,10).toExponential()}var j;if(k.charAt(0)==="-"){j="-";k=k.slice(1,k.length)}else{j=""}var l=k.length-1;while(k.charAt(k.length-1)==="0"){k=k.slice(0,k.length-1)}var i=k.slice(1,k.length);if(i!==""){i="."+i}return j+""+k.charAt(0)+i+"e+"+l}function d(l){var k=l.indexOf("e");var j=l.slice(k+1);var i=l.slice(0,k+1);i=i.replace("e","×10");i+=e._toSuperScript(j);i=i.replace("+","");return i}function b(o){var n=o.indexOf("e");var m=o.slice(n+1);var k=o.slice(0,n);var l=parseInt(m,10)%3;k=k*Math.pow(10,l);var j=o.slice(0,n).length-l-2;if(j>=0){k=k.toFixed(j)}var i=k+"×10"+e._toSuperScript((parseInt(m,10)-l).toString());return i}var g=h(f);if(c==="scientific"){return d(g)}else{if(c==="engineering"){return b(g)}else{return g}}},_toSuperScript:function(h,g){var f="-0123456789";var d="⁻⁰¹²³⁴⁵⁶⁷⁸⁹";var c="";for(var e=0;e<h.length;e++){if(g===true){var b=d.indexOf(h.charAt(e));c+=(b!==-1?f[b]:h[e])}else{var j=f.indexOf(h.charAt(e));c+=(j!==-1?d[j]:h[e])}}return c},_incrementOrDecrement:function(c){var b=this;if(b._number.toString(b._radixNumber)!==b.element.value){b._number=new b.longObj.math.Long.fromString(b.element.value,b._radixNumber)}b._number=b._number[c](b._spinButtonsStepLong);b.element.value=b._number.toString(b._radixNumber);b._change()},_negativeBinary:function(u,r){var s="";u=u.slice(1,u.length);while(u.length<64){u="0"+u}for(var o=0;o<u.length;o++){var t=u.charAt(o)==="1"?"0":"1";s+=t}var d=true;var g="";for(var n=s.length-1;n>=0;n--){var q=s.charAt(n);var b;if(q==="0"){if(d===true){b="1";d=false}else{b="0"}}else{if(d===true){b="0"}else{b="1"}}g=b+""+g}switch(r){case 2:return g;case 8:g="00"+g;var f="";for(var m=22;m>=1;m--){var p=g[m*3-3]+""+g[m*3-2]+""+g[m*3-1];f=parseInt(p,2).toString(8)+""+f}return f;case 16:var e="";for(var h=16;h>=1;h--){var c=g[h*4-4]+""+g[h*4-3]+""+g[h*4-2]+""+g[h*4-1];e=parseInt(c,2).toString(16)+""+e}return e}},_Long:function(){var c=this;c.longObj=new Object();var b=c.longObj;b.math=new Object();b.math.Long=new Object();b.math.Long=function(d,e){this.lowBits=d|0;this.highBits=e|0};b.math.Long.IntCache={};b.math.Long.fromInt=function(d){if(-128<=d&&d<128){var f=b.math.Long.IntCache[d];if(f){return f}}var e=new b.math.Long(d|0,d<0?-1:0);if(-128<=d&&d<128){b.math.Long.IntCache[d]=e}return e};b.math.Long.fromNumber=function(d){if(isNaN(d)||!isFinite(d)){return b.math.Long.ZERO}else{if(d<=-b.math.Long.TWO_PWR_63_DBL_){return b.math.Long.MIN_VALUE}else{if(d+1>=b.math.Long.TWO_PWR_63_DBL_){return b.math.Long.MAX_VALUE}else{if(d<0){return b.math.Long.fromNumber(-d).negate()}else{return new b.math.Long((d%b.math.Long.TWO_PWR_32_DBL_)|0,(d/b.math.Long.TWO_PWR_32_DBL_)|0)}}}}};b.math.Long.fromBits=function(d,e){return new b.math.Long(d,e)};b.math.Long.fromString=function(f,j){if(f.length===0){throw new Error("number format error: empty string")}var g=j||10;if(g<2||36<g){throw new Error("radix out of range: "+g)}if(f.charAt(0)==="-"){return b.math.Long.fromString(f.substring(1),g).negate()}else{if(f.indexOf("-")>=0){throw new Error('number format error: interior "-" character: '+f)}}var k=b.math.Long.fromNumber(Math.pow(g,8));var m=b.math.Long.ZERO;for(var e=0;e<f.length;e+=8){var l=Math.min(8,f.length-e);var h=parseInt(f.substring(e,e+l),g);if(l<8){var d=b.math.Long.fromNumber(Math.pow(g,l));m=m.multiply(d).add(b.math.Long.fromNumber(h))}else{m=m.multiply(k);m=m.add(b.math.Long.fromNumber(h))}}return m};b.math.Long.TWO_PWR_16_DBL_=1<<16;b.math.Long.TWO_PWR_24_DBL_=1<<24;b.math.Long.TWO_PWR_32_DBL_=b.math.Long.TWO_PWR_16_DBL_*b.math.Long.TWO_PWR_16_DBL_;b.math.Long.TWO_PWR_31_DBL_=b.math.Long.TWO_PWR_32_DBL_/2;b.math.Long.TWO_PWR_48_DBL_=b.math.Long.TWO_PWR_32_DBL_*b.math.Long.TWO_PWR_16_DBL_;b.math.Long.TWO_PWR_64_DBL_=b.math.Long.TWO_PWR_32_DBL_*b.math.Long.TWO_PWR_32_DBL_;b.math.Long.TWO_PWR_63_DBL_=b.math.Long.TWO_PWR_64_DBL_/2;b.math.Long.ZERO=b.math.Long.fromInt(0);b.math.Long.ONE=b.math.Long.fromInt(1);b.math.Long.NEG_ONE=b.math.Long.fromInt(-1);b.math.Long.MAX_VALUE=b.math.Long.fromBits(4294967295|0,2147483647|0);b.math.Long.MIN_VALUE=b.math.Long.fromBits(0,2147483648|0);b.math.Long.TWO_PWR_24_=b.math.Long.fromInt(1<<24);b.math.Long.prototype.toInt=function(){return this.lowBits};b.math.Long.prototype.toNumber=function(){return this.highBits*b.math.Long.TWO_PWR_32_DBL_+this.getLowBitsUnsigned()};b.math.Long.prototype.toString=function(j){var h=j||10;if(h<2||36<h){throw new Error("radix out of range: "+h)}if(this.isZero()){return"0"}var k,m;if(this.isNegative()){if(this.equals(b.math.Long.MIN_VALUE)){var f=b.math.Long.fromNumber(h);var d=this.div(f);k=d.multiply(f).subtract(this);return d.toString(h)+k.toInt().toString(h)}else{switch(h){case 2:case 8:case 16:m="-"+this.negate().toString(2);return c._negativeBinary(m,h);default:m="-"+this.negate().toString(h);return m}}}var l=b.math.Long.fromNumber(Math.pow(h,6));k=this;m="";while(true){var i=k.div(l);var g=k.subtract(i.multiply(l)).toInt();var e=g.toString(h);k=i;if(k.isZero()){return e+m}else{while(e.length<6){e="0"+e}m=""+e+m}}};b.math.Long.prototype.getHighBits=function(){return this.highBits};b.math.Long.prototype.getLowBits=function(){return this.lowBits};b.math.Long.prototype.getLowBitsUnsigned=function(){return(this.lowBits>=0)?this.lowBits:b.math.Long.TWO_PWR_32_DBL_+this.lowBits};b.math.Long.prototype.getNumBitsAbs=function(){if(this.isNegative()){if(this.equals(b.math.Long.MIN_VALUE)){return 64}else{return this.negate().getNumBitsAbs()}}else{var e=this.highBits!==0?this.highBits:this.lowBits;for(var d=31;d>0;d--){if((e&(1<<d))!==0){break}}return this.highBits!==0?d+33:d+1}};b.math.Long.prototype.isZero=function(){return this.highBits===0&&this.lowBits===0};b.math.Long.prototype.isNegative=function(){return this.highBits<0};b.math.Long.prototype.isOdd=function(){return(this.lowBits&1)===1};b.math.Long.prototype.equals=function(d){return(this.highBits===d.highBits)&&(this.lowBits===d.lowBits)};b.math.Long.prototype.notEquals=function(d){return(this.highBits!==d.highBits)||(this.lowBits!==d.lowBits)};b.math.Long.prototype.lessThan=function(d){return this.compare(d)<0};b.math.Long.prototype.lessThanOrEqual=function(d){return this.compare(d)<=0};b.math.Long.prototype.greaterThan=function(d){return this.compare(d)>0};b.math.Long.prototype.greaterThanOrEqual=function(d){return this.compare(d)>=0};b.math.Long.prototype.compare=function(e){if(this.equals(e)){return 0}var d=this.isNegative();var f=e.isNegative();if(d&&!f){return -1}if(!d&&f){return 1}if(this.subtract(e).isNegative()){return -1}else{return 1}};b.math.Long.prototype.negate=function(){if(this.equals(b.math.Long.MIN_VALUE)){return b.math.Long.MIN_VALUE}else{return this.not().add(b.math.Long.ONE)}};b.math.Long.prototype.add=function(k){var i=this.highBits>>>16;var e=this.highBits&65535;var l=this.lowBits>>>16;var f=this.lowBits&65535;var n=k.highBits>>>16;var g=k.highBits&65535;var o=k.lowBits>>>16;var h=k.lowBits&65535;var p=0,j=0,d=0,m=0;m+=f+h;d+=m>>>16;m&=65535;d+=l+o;j+=d>>>16;d&=65535;j+=e+g;p+=j>>>16;j&=65535;p+=i+n;p&=65535;return b.math.Long.fromBits((d<<16)|m,(p<<16)|j)};b.math.Long.prototype.subtract=function(d){return this.add(d.negate())};b.math.Long.prototype.multiply=function(k){if(this.isZero()){return b.math.Long.ZERO}else{if(k.isZero()){return b.math.Long.ZERO}}if(this.equals(b.math.Long.MIN_VALUE)){return k.isOdd()?b.math.Long.MIN_VALUE:b.math.Long.ZERO}else{if(k.equals(b.math.Long.MIN_VALUE)){return this.isOdd()?b.math.Long.MIN_VALUE:b.math.Long.ZERO}}if(this.isNegative()){if(k.isNegative()){return this.negate().multiply(k.negate())}else{return this.negate().multiply(k).negate()}}else{if(k.isNegative()){return this.multiply(k.negate()).negate()}}if(this.lessThan(b.math.Long.TWO_PWR_24_)&&k.lessThan(b.math.Long.TWO_PWR_24_)){return b.math.Long.fromNumber(this.toNumber()*k.toNumber())}var i=this.highBits>>>16;var e=this.highBits&65535;var l=this.lowBits>>>16;var f=this.lowBits&65535;var n=k.highBits>>>16;var g=k.highBits&65535;var o=k.lowBits>>>16;var h=k.lowBits&65535;var p=0,j=0,d=0,m=0;m+=f*h;d+=m>>>16;m&=65535;d+=l*h;j+=d>>>16;d&=65535;d+=f*o;j+=d>>>16;d&=65535;j+=e*h;p+=j>>>16;j&=65535;j+=l*o;p+=j>>>16;j&=65535;j+=f*g;p+=j>>>16;j&=65535;p+=i*h+e*o+l*g+f*n;p&=65535;return b.math.Long.fromBits((d<<16)|m,(p<<16)|j)};b.math.Long.prototype.div=function(f){if(f.isZero()){throw new Error("division by zero")}else{if(this.isZero()){return b.math.Long.ZERO}}var i,k;if(this.equals(b.math.Long.MIN_VALUE)){if(f.equals(b.math.Long.ONE)||f.equals(b.math.Long.NEG_ONE)){return b.math.Long.MIN_VALUE}else{if(f.equals(b.math.Long.MIN_VALUE)){return b.math.Long.ONE}else{var d=this.shiftRight(1);i=d.div(f).shiftLeft(1);if(i.equals(b.math.Long.ZERO)){return f.isNegative()?b.math.Long.ONE:b.math.Long.NEG_ONE}else{k=this.subtract(f.multiply(i));var m=i.add(k.div(f));return m}}}}else{if(f.equals(b.math.Long.MIN_VALUE)){return b.math.Long.ZERO}}if(this.isNegative()){if(f.isNegative()){return this.negate().div(f.negate())}else{return this.negate().div(f).negate()}}else{if(f.isNegative()){return this.div(f.negate()).negate()}}var g=b.math.Long.ZERO;k=this;while(k.greaterThanOrEqual(f)){i=Math.max(1,Math.floor(k.toNumber()/f.toNumber()));var l=Math.ceil(Math.log(i)/Math.LN2);var j=(l<=48)?1:Math.pow(2,l-48);var e=b.math.Long.fromNumber(i);var h=e.multiply(f);while(h.isNegative()||h.greaterThan(k)){i-=j;e=b.math.Long.fromNumber(i);h=e.multiply(f)}if(e.isZero()){e=b.math.Long.ONE}g=g.add(e);k=k.subtract(h)}return g};b.math.Long.prototype.modulo=function(d){return this.subtract(this.div(d).multiply(d))};b.math.Long.prototype.not=function(){return b.math.Long.fromBits(~this.lowBits,~this.highBits)};b.math.Long.prototype.and=function(d){return b.math.Long.fromBits(this.lowBits&d.lowBits,this.highBits&d.highBits)};b.math.Long.prototype.or=function(d){return b.math.Long.fromBits(this.lowBits|d.lowBits,this.highBits|d.highBits)};b.math.Long.prototype.xor=function(d){return b.math.Long.fromBits(this.lowBits^d.lowBits,this.highBits^d.highBits)};b.math.Long.prototype.shiftLeft=function(f){f&=63;if(f===0){return this}else{var d=this.lowBits;if(f<32){var e=this.highBits;return b.math.Long.fromBits(d<<f,(e<<f)|(d>>>(32-f)))}else{return b.math.Long.fromBits(0,d<<(f-32))}}};b.math.Long.prototype.shiftRight=function(f){f&=63;if(f===0){return this}else{var e=this.highBits;if(f<32){var d=this.lowBits;return b.math.Long.fromBits((d>>>f)|(e<<(32-f)),e>>f)}else{return b.math.Long.fromBits(e>>(f-32),e>=0?0:-1)}}};b.math.Long.prototype.shiftRightUnsigned=function(f){f&=63;if(f===0){return this}else{var e=this.highBits;if(f<32){var d=this.lowBits;return b.math.Long.fromBits((d>>>f)|(e<<(32-f)),e>>>f)}else{if(f===32){return b.math.Long.fromBits(e,0)}else{return b.math.Long.fromBits(e>>>(f-32),0)}}}}}})})(jqxBaseFramework);