hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
11 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
251
max_stars_repo_name
stringlengths
4
130
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
sequencelengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
251
max_issues_repo_name
stringlengths
4
130
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
sequencelengths
1
10
max_issues_count
int64
1
116k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
251
max_forks_repo_name
stringlengths
4
130
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
sequencelengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
1
1.05M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.04M
alphanum_fraction
float64
0
1
6ab9d713b15cf7e2722180a91c20d945c012ee0e
514
py
Python
test/crossrunner/compat.py
BluechipSystems/thrift
c595aa18cba0032e074f9585aa2d6ca548f07197
[ "Apache-2.0" ]
null
null
null
test/crossrunner/compat.py
BluechipSystems/thrift
c595aa18cba0032e074f9585aa2d6ca548f07197
[ "Apache-2.0" ]
null
null
null
test/crossrunner/compat.py
BluechipSystems/thrift
c595aa18cba0032e074f9585aa2d6ca548f07197
[ "Apache-2.0" ]
null
null
null
import os import sys if sys.version_info[0] == 2: _ENCODE = sys.getfilesystemencoding() logfile_open = open else: path_join = os.path.join str_join = str.join
20.56
53
0.678988
6aba555a9c95d6e5cd6afe857fa51108b432e61a
1,518
py
Python
test/test_vom.py
usamaahmadkhan/vpp
cece3e682f6dba68ba86b66b295f99a33496d9ee
[ "Apache-2.0" ]
null
null
null
test/test_vom.py
usamaahmadkhan/vpp
cece3e682f6dba68ba86b66b295f99a33496d9ee
[ "Apache-2.0" ]
null
null
null
test/test_vom.py
usamaahmadkhan/vpp
cece3e682f6dba68ba86b66b295f99a33496d9ee
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python """ VAPI test """ import unittest import os import signal from framework import VppTestCase, running_extended_tests, \ VppTestRunner, Worker if __name__ == '__main__': unittest.main(testRunner=VppTestRunner)
33
75
0.590909
6abaa4631fe046cd2892f35a91bca62bc7f0f887
3,096
py
Python
locations/spiders/tesco.py
bealbrown/allhours
f750ee7644246a97bd16879f14115d7845f76b89
[ "MIT" ]
null
null
null
locations/spiders/tesco.py
bealbrown/allhours
f750ee7644246a97bd16879f14115d7845f76b89
[ "MIT" ]
null
null
null
locations/spiders/tesco.py
bealbrown/allhours
f750ee7644246a97bd16879f14115d7845f76b89
[ "MIT" ]
null
null
null
import json import re import scrapy from locations.hourstudy import inputoutput DAYS = { 'mo': 'Mo', 'tu': 'Tu', 'we': 'We', 'fr': 'Fr', 'th': 'Th', 'sa': 'Sa', 'su': 'Su', }
38.222222
257
0.549096
6abb4495b3d52a4655573442ecead7d8db0e2301
12,883
py
Python
astropy/table/serialize.py
tacaswell/astropy
75046e61916da36dffe87ddf59a7c6bfb00de81c
[ "BSD-3-Clause" ]
1
2019-10-05T18:20:27.000Z
2019-10-05T18:20:27.000Z
astropy/table/serialize.py
tacaswell/astropy
75046e61916da36dffe87ddf59a7c6bfb00de81c
[ "BSD-3-Clause" ]
null
null
null
astropy/table/serialize.py
tacaswell/astropy
75046e61916da36dffe87ddf59a7c6bfb00de81c
[ "BSD-3-Clause" ]
null
null
null
from importlib import import_module import re from copy import deepcopy from collections import OrderedDict from astropy.utils.data_info import MixinInfo from .column import Column from .table import Table, QTable, has_info_class from astropy.units.quantity import QuantityInfo __construct_mixin_classes = ('astropy.time.core.Time', 'astropy.time.core.TimeDelta', 'astropy.units.quantity.Quantity', 'astropy.coordinates.angles.Latitude', 'astropy.coordinates.angles.Longitude', 'astropy.coordinates.angles.Angle', 'astropy.coordinates.distances.Distance', 'astropy.coordinates.earth.EarthLocation', 'astropy.coordinates.sky_coordinate.SkyCoord', 'astropy.table.table.NdarrayMixin', 'astropy.table.column.MaskedColumn') def _represent_mixin_as_column(col, name, new_cols, mixin_cols, exclude_classes=()): """Carry out processing needed to serialize ``col`` in an output table consisting purely of plain ``Column`` or ``MaskedColumn`` columns. This relies on the object determine if any transformation is required and may depend on the ``serialize_method`` and ``serialize_context`` context variables. For instance a ``MaskedColumn`` may be stored directly to FITS, but can also be serialized as separate data and mask columns. This function builds up a list of plain columns in the ``new_cols`` arg (which is passed as a persistent list). This includes both plain columns from the original table and plain columns that represent data from serialized columns (e.g. ``jd1`` and ``jd2`` arrays from a ``Time`` column). For serialized columns the ``mixin_cols`` dict is updated with required attributes and information to subsequently reconstruct the table. Table mixin columns are always serialized and get represented by one or more data columns. In earlier versions of the code *only* mixin columns were serialized, hence the use within this code of "mixin" to imply serialization. Starting with version 3.1, the non-mixin ``MaskedColumn`` can also be serialized. """ obj_attrs = col.info._represent_as_dict() ordered_keys = col.info._represent_as_dict_attrs # If serialization is not required (see function docstring above) # or explicitly specified as excluded, then treat as a normal column. if not obj_attrs or col.__class__ in exclude_classes: new_cols.append(col) return # Subtlety here is handling mixin info attributes. The basic list of such # attributes is: 'name', 'unit', 'dtype', 'format', 'description', 'meta'. # - name: handled directly [DON'T store] # - unit: DON'T store if this is a parent attribute # - dtype: captured in plain Column if relevant [DON'T store] # - format: possibly irrelevant but settable post-object creation [DO store] # - description: DO store # - meta: DO store info = {} for attr, nontrivial, xform in (('unit', lambda x: x is not None and x != '', str), ('format', lambda x: x is not None, None), ('description', lambda x: x is not None, None), ('meta', lambda x: x, None)): col_attr = getattr(col.info, attr) if nontrivial(col_attr): info[attr] = xform(col_attr) if xform else col_attr data_attrs = [key for key in ordered_keys if key in obj_attrs and getattr(obj_attrs[key], 'shape', ())[:1] == col.shape[:1]] for data_attr in data_attrs: data = obj_attrs[data_attr] # New column name combines the old name and attribute # (e.g. skycoord.ra, skycoord.dec).unless it is the primary data # attribute for the column (e.g. value for Quantity or data # for MaskedColumn) if data_attr == col.info._represent_as_dict_primary_data: new_name = name else: new_name = name + '.' + data_attr if not has_info_class(data, MixinInfo): new_cols.append(Column(data, name=new_name, **info)) obj_attrs[data_attr] = SerializedColumn({'name': new_name}) else: # recurse. This will define obj_attrs[new_name]. _represent_mixin_as_column(data, new_name, new_cols, obj_attrs) obj_attrs[data_attr] = SerializedColumn(obj_attrs.pop(new_name)) # Strip out from info any attributes defined by the parent for attr in col.info.attrs_from_parent: if attr in info: del info[attr] if info: obj_attrs['__info__'] = info # Store the fully qualified class name obj_attrs['__class__'] = col.__module__ + '.' + col.__class__.__name__ mixin_cols[name] = obj_attrs def represent_mixins_as_columns(tbl, exclude_classes=()): """Represent input Table ``tbl`` using only `~astropy.table.Column` or `~astropy.table.MaskedColumn` objects. This function represents any mixin columns like `~astropy.time.Time` in ``tbl`` to one or more plain ``~astropy.table.Column`` objects and returns a new Table. A single mixin column may be split into multiple column components as needed for fully representing the column. This includes the possibility of recursive splitting, as shown in the example below. The new column names are formed as ``<column_name>.<component>``, e.g. ``sc.ra`` for a `~astropy.coordinates.SkyCoord` column named ``sc``. In addition to splitting columns, this function updates the table ``meta`` dictionary to include a dict named ``__serialized_columns__`` which provides additional information needed to construct the original mixin columns from the split columns. This function is used by astropy I/O when writing tables to ECSV, FITS, HDF5 formats. Note that if the table does not include any mixin columns then the original table is returned with no update to ``meta``. Parameters ---------- tbl : `~astropy.table.Table` or subclass Table to represent mixins as Columns exclude_classes : tuple of classes Exclude any mixin columns which are instannces of any classes in the tuple Returns ------- tbl : `~astropy.table.Table` New Table with updated columns, or else the original input ``tbl`` Examples -------- >>> from astropy.table import Table, represent_mixins_as_columns >>> from astropy.time import Time >>> from astropy.coordinates import SkyCoord >>> x = [100.0, 200.0] >>> obstime = Time([1999.0, 2000.0], format='jyear') >>> sc = SkyCoord([1, 2], [3, 4], unit='deg', obstime=obstime) >>> tbl = Table([sc, x], names=['sc', 'x']) >>> represent_mixins_as_columns(tbl) <Table length=2> sc.ra sc.dec sc.obstime.jd1 sc.obstime.jd2 x deg deg float64 float64 float64 float64 float64 ------- ------- -------------- -------------- ------- 1.0 3.0 2451180.0 -0.25 100.0 2.0 4.0 2451545.0 0.0 200.0 """ # Dict of metadata for serializing each column, keyed by column name. # Gets filled in place by _represent_mixin_as_column(). mixin_cols = {} # List of columns for the output table. For plain Column objects # this will just be the original column object. new_cols = [] # Go through table columns and represent each column as one or more # plain Column objects (in new_cols) + metadata (in mixin_cols). for col in tbl.itercols(): _represent_mixin_as_column(col, col.info.name, new_cols, mixin_cols, exclude_classes=exclude_classes) # If no metadata was created then just return the original table. if not mixin_cols: return tbl meta = deepcopy(tbl.meta) meta['__serialized_columns__'] = mixin_cols out = Table(new_cols, meta=meta, copy=False) return out
41.028662
87
0.645579
6abe0f56148406c214bae2a7180acf428b205f37
1,914
py
Python
UVa 573 - The Snail/sample/main.py
tadvi/uva
0ac0cbdf593879b4fb02a3efc09adbb031cb47d5
[ "MIT" ]
1
2020-11-24T03:17:21.000Z
2020-11-24T03:17:21.000Z
UVa 573 - The Snail/sample/main.py
tadvi/uva
0ac0cbdf593879b4fb02a3efc09adbb031cb47d5
[ "MIT" ]
null
null
null
UVa 573 - The Snail/sample/main.py
tadvi/uva
0ac0cbdf593879b4fb02a3efc09adbb031cb47d5
[ "MIT" ]
1
2021-04-11T16:22:31.000Z
2021-04-11T16:22:31.000Z
''' Created on Jun 18, 2013 @author: Yubin Bai All rights reserved. ''' import time from multiprocessing.pool import Pool parallelSolve = False infinity = 1 << 30 if __name__ == '__main__': solver = Solver() if parallelSolve: solver.parallel() else: solver.sequential()
24.538462
78
0.527168
6abe9ac6695fe5a1d34b503ad56c8e41374a9ea6
5,074
py
Python
scibert/models/text_classifier.py
tomhoper/scibert
3cc65f433808f7879c973dc4fc41bd25e465dc15
[ "Apache-2.0" ]
1,143
2019-03-27T01:49:11.000Z
2022-03-24T10:43:47.000Z
scibert/models/text_classifier.py
tomhoper/scibert
3cc65f433808f7879c973dc4fc41bd25e465dc15
[ "Apache-2.0" ]
91
2019-03-27T17:20:27.000Z
2022-03-29T09:29:58.000Z
scibert/models/text_classifier.py
tomhoper/scibert
3cc65f433808f7879c973dc4fc41bd25e465dc15
[ "Apache-2.0" ]
206
2019-03-28T02:22:30.000Z
2022-03-30T07:07:05.000Z
from typing import Dict, Optional, List, Any import torch import torch.nn.functional as F from allennlp.data import Vocabulary from allennlp.models.model import Model from allennlp.modules import FeedForward, TextFieldEmbedder, Seq2SeqEncoder from allennlp.nn import InitializerApplicator, RegularizerApplicator from allennlp.nn import util from allennlp.training.metrics import CategoricalAccuracy, F1Measure from overrides import overrides
40.919355
120
0.653331
6abec40d890d6b8f05f738693cce2c79127a8924
4,716
py
Python
plugins/template/tasks.py
crotwell/cmd2
5ce3a64e41258b6a694ad45bb1c604be53a1e974
[ "MIT" ]
469
2016-02-16T16:18:48.000Z
2022-03-31T15:24:40.000Z
plugins/template/tasks.py
crotwell/cmd2
5ce3a64e41258b6a694ad45bb1c604be53a1e974
[ "MIT" ]
1,076
2016-02-19T02:50:47.000Z
2022-03-22T03:08:06.000Z
plugins/template/tasks.py
crotwell/cmd2
5ce3a64e41258b6a694ad45bb1c604be53a1e974
[ "MIT" ]
138
2016-02-19T02:46:23.000Z
2022-03-30T13:13:01.000Z
# # -*- coding: utf-8 -*- """Development related tasks to be run with 'invoke'""" import os import pathlib import shutil import invoke TASK_ROOT = pathlib.Path(__file__).resolve().parent TASK_ROOT_STR = str(TASK_ROOT) # shared function def rmrf(items, verbose=True): """Silently remove a list of directories or files""" if isinstance(items, str): items = [items] for item in items: if verbose: print("Removing {}".format(item)) shutil.rmtree(item, ignore_errors=True) # rmtree doesn't remove bare files try: os.remove(item) except FileNotFoundError: pass # create namespaces namespace = invoke.Collection() namespace_clean = invoke.Collection('clean') namespace.add_collection(namespace_clean, 'clean') ##### # # pytest, pylint, and codecov # ##### namespace.add_task(pytest) namespace_clean.add_task(pytest_clean, 'pytest') namespace.add_task(pylint) namespace.add_task(pylint_tests) ##### # # build and distribute # ##### BUILDDIR = 'build' DISTDIR = 'dist' namespace_clean.add_task(build_clean, 'build') namespace_clean.add_task(dist_clean, 'dist') namespace_clean.add_task(eggs_clean, 'eggs') namespace_clean.add_task(bytecode_clean, 'bytecode') # # make a dummy clean task which runs all the tasks in the clean namespace clean_tasks = list(namespace_clean.tasks.values()) namespace_clean.add_task(clean_all, 'all') namespace.add_task(sdist) namespace.add_task(wheel) # # these two tasks are commented out so you don't # accidentally run them and upload this template to pypi # # @invoke.task(pre=[sdist, wheel]) # def pypi(context): # """Build and upload a distribution to pypi""" # context.run('twine upload dist/*') # namespace.add_task(pypi) # @invoke.task(pre=[sdist, wheel]) # def pypi_test(context): # """Build and upload a distribution to https://test.pypi.org""" # context.run('twine upload --repository-url https://test.pypi.org/legacy/ dist/*') # namespace.add_task(pypi_test)
23.231527
87
0.671968
6abef106632056c480a54511ff7725bfc1193a55
4,116
py
Python
scripts/automation/trex_control_plane/interactive/trex/examples/stl/ndr_plugin.py
timgates42/trex-core
efe94752fcb2d0734c83d4877afe92a3dbf8eccd
[ "Apache-2.0" ]
956
2015-06-24T15:04:55.000Z
2022-03-30T06:25:04.000Z
scripts/automation/trex_control_plane/interactive/trex/examples/stl/ndr_plugin.py
angelyouyou/trex-core
fddf78584cae285d9298ef23f9f5c8725e16911e
[ "Apache-2.0" ]
782
2015-09-20T15:19:00.000Z
2022-03-31T23:52:05.000Z
scripts/automation/trex_control_plane/interactive/trex/examples/stl/ndr_plugin.py
angelyouyou/trex-core
fddf78584cae285d9298ef23f9f5c8725e16911e
[ "Apache-2.0" ]
429
2015-06-27T19:34:21.000Z
2022-03-23T11:02:51.000Z
import stl_path # dynamic load of python module def register(): return MyNDRPlugin()
35.482759
196
0.557337
6abf04d8aaa93e623f487cf9322ec9b114c31f92
2,590
py
Python
homeassistant/components/epsonworkforce/sensor.py
maexono/home-assistant
c174b83f5408124fc7834e8282969a1e8f9cca16
[ "Apache-2.0" ]
2
2019-12-30T14:12:33.000Z
2021-07-05T10:33:08.000Z
homeassistant/components/epsonworkforce/sensor.py
maexono/home-assistant
c174b83f5408124fc7834e8282969a1e8f9cca16
[ "Apache-2.0" ]
2
2022-01-13T04:00:03.000Z
2022-03-12T01:02:40.000Z
homeassistant/components/epsonworkforce/sensor.py
maexono/home-assistant
c174b83f5408124fc7834e8282969a1e8f9cca16
[ "Apache-2.0" ]
3
2019-04-28T16:35:45.000Z
2020-05-28T15:21:59.000Z
"""Support for Epson Workforce Printer.""" from datetime import timedelta import logging import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import CONF_HOST, CONF_MONITORED_CONDITIONS from homeassistant.exceptions import PlatformNotReady import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity REQUIREMENTS = ['epsonprinter==0.0.8'] _LOGGER = logging.getLogger(__name__) MONITORED_CONDITIONS = { 'black': ['Inklevel Black', '%', 'mdi:water'], 'magenta': ['Inklevel Magenta', '%', 'mdi:water'], 'cyan': ['Inklevel Cyan', '%', 'mdi:water'], 'yellow': ['Inklevel Yellow', '%', 'mdi:water'], 'clean': ['Inklevel Cleaning', '%', 'mdi:water'], } PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_HOST): cv.string, vol.Required(CONF_MONITORED_CONDITIONS): vol.All(cv.ensure_list, [vol.In(MONITORED_CONDITIONS)]), }) SCAN_INTERVAL = timedelta(minutes=60) def setup_platform(hass, config, add_devices, discovery_info=None): """Set up the cartridge sensor.""" host = config.get(CONF_HOST) from epsonprinter_pkg.epsonprinterapi import EpsonPrinterAPI api = EpsonPrinterAPI(host) if not api.available: raise PlatformNotReady() sensors = [EpsonPrinterCartridge(api, condition) for condition in config[CONF_MONITORED_CONDITIONS]] add_devices(sensors, True)
30.116279
71
0.679151
6abf99810278b3e6bb4dfbe19a2991c6db839dec
19,661
py
Python
bot/exts/help_channels/_cog.py
bast0006/bot
dec9a9dba77aa4322f9dc37b6493a8410e7482ec
[ "MIT", "BSD-3-Clause" ]
null
null
null
bot/exts/help_channels/_cog.py
bast0006/bot
dec9a9dba77aa4322f9dc37b6493a8410e7482ec
[ "MIT", "BSD-3-Clause" ]
null
null
null
bot/exts/help_channels/_cog.py
bast0006/bot
dec9a9dba77aa4322f9dc37b6493a8410e7482ec
[ "MIT", "BSD-3-Clause" ]
null
null
null
import asyncio import logging import random import typing as t from datetime import datetime, timezone from operator import attrgetter import discord import discord.abc from discord.ext import commands from bot import constants from bot.bot import Bot from bot.exts.help_channels import _caches, _channel, _cooldown, _message, _name, _stats from bot.utils import channel as channel_utils, lock, scheduling log = logging.getLogger(__name__) NAMESPACE = "help" HELP_CHANNEL_TOPIC = """ This is a Python help channel. You can claim your own help channel in the Python Help: Available category. """
42.010684
120
0.670363
6ac05bd39a70de6163a586a9ee9a2b3649ee2eef
16,516
py
Python
code/menu/screens/shopmenu.py
LordZagreus/LodeRunner
68aab36be47cabe31e52f3ee43520bdafcdf3c95
[ "MIT" ]
1
2017-10-31T22:26:22.000Z
2017-10-31T22:26:22.000Z
code/menu/screens/shopmenu.py
team-sparrow/LodeRunner
68aab36be47cabe31e52f3ee43520bdafcdf3c95
[ "MIT" ]
2
2019-07-05T03:17:18.000Z
2019-07-08T16:15:29.000Z
code/menu/screens/shopmenu.py
team-sparrow/LodeRunner
68aab36be47cabe31e52f3ee43520bdafcdf3c95
[ "MIT" ]
1
2020-10-15T09:03:20.000Z
2020-10-15T09:03:20.000Z
import os import math import random import time from code.menu.menu import Menu from code.tools.eventqueue import EventQueue from code.tools.xml import XMLParser from code.utils.common import coalesce, intersect, offset_rect, log, log2, xml_encode, xml_decode, translate_rgb_to_string from code.constants.common import SCREEN_WIDTH, SCREEN_HEIGHT, PAUSE_MENU_X, PAUSE_MENU_Y, PAUSE_MENU_WIDTH, PAUSE_MENU_HEIGHT, MODE_GAME, TILE_WIDTH, TILE_HEIGHT, DIR_UP, DIR_RIGHT, DIR_DOWN, DIR_LEFT, SPLASH_MODE_GREYSCALE_ANIMATED from code.constants.states import STATUS_ACTIVE, STATUS_INACTIVE, GAME_STATE_ACTIVE, GAME_STATE_NOT_READY from code.constants.newsfeeder import *
31.339658
233
0.60069
6ac069f3cef035db6da504010b64c5c2110dea99
3,665
py
Python
lib/bridgedb/runner.py
liudonghua123/bridgedb
94dd10673f9e6650e8a00e162f348e64f7a1ecab
[ "BSD-3-Clause-Clear" ]
null
null
null
lib/bridgedb/runner.py
liudonghua123/bridgedb
94dd10673f9e6650e8a00e162f348e64f7a1ecab
[ "BSD-3-Clause-Clear" ]
null
null
null
lib/bridgedb/runner.py
liudonghua123/bridgedb
94dd10673f9e6650e8a00e162f348e64f7a1ecab
[ "BSD-3-Clause-Clear" ]
null
null
null
# -*- coding: utf-8 ; test-case-name: bridgedb.test.test_runner -*- # # This file is part of BridgeDB, a Tor bridge distribution system. # # :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <[email protected]> # please also see AUTHORS file # :copyright: (c) 2007-2015, The Tor Project, Inc. # (c) 2007-2015, all entities within the AUTHORS file # (c) 2012-2015, Isis Lovecruft # :license: 3-clause BSD, see included LICENSE for information """Classes for running components and servers, as well as daemonisation. ** Module Overview: ** """ from __future__ import print_function import logging import sys import os from twisted.python import procutils def find(filename): """Find the executable ``filename``. :param string filename: The executable to search for. Must be in the effective user ID's $PATH. :rtype: string :returns: The location of the executable, if found. Otherwise, returns None. """ executable = None logging.debug("Searching for installed '%s'..." % filename) which = procutils.which(filename, os.X_OK) if len(which) > 0: for that in which: if os.stat(that).st_uid == os.geteuid(): executable = that break if not executable: return None logging.debug("Found installed script at '%s'" % executable) return executable def generateDescriptors(count=None, rundir=None): """Run a script which creates fake bridge descriptors for testing purposes. This will run Leekspin_ to create bridge server descriptors, bridge extra-info descriptors, and networkstatus document. .. warning: This function can take a very long time to run, especially in headless environments where entropy sources are minimal, because it creates the keys for each mocked OR, which are embedded in the server descriptors, used to calculate the OR fingerprints, and sign the descriptors, among other things. .. _Leekspin: https://gitweb.torproject.org/user/isis/leekspin.git :param integer count: Number of mocked bridges to generate descriptor for. (default: 3) :type rundir: string or None :param rundir: If given, use this directory as the current working directory for the bridge descriptor generator script to run in. The directory MUST already exist, and the descriptor files will be created in it. If None, use the whatever directory we are currently in. """ import subprocess import os.path proc = None statuscode = 0 script = 'leekspin' rundir = rundir if os.path.isdir(rundir) else None count = count if count else 3 try: proc = subprocess.Popen([script, '-n', str(count)], close_fds=True, cwd=rundir) finally: if proc is not None: proc.wait() if proc.returncode: print("There was an error generating bridge descriptors.", "(Returncode: %d)" % proc.returncode) statuscode = proc.returncode else: print("Sucessfully generated %s descriptors." % str(count)) del subprocess return statuscode def doDumpBridges(config): """Dump bridges by assignment to a file. This function handles the commandline '--dump-bridges' option. :type config: :class:`bridgedb.Main.Conf` :param config: The current configuration. """ import bridgedb.Bucket as bucket bucketManager = bucket.BucketManager(config) bucketManager.assignBridgesToBuckets() bucketManager.dumpBridges()
33.318182
79
0.661937
6ac1a5f132a19c0dca01d22ddfd3613255dba8b5
4,258
py
Python
wce_triage/ops/create_image_runner.py
pfrouleau/wce-triage-v2
25610cda55f5cb2170e13e121ae1cbaa92ef7626
[ "MIT" ]
3
2019-07-25T03:24:23.000Z
2021-06-23T14:01:34.000Z
wce_triage/ops/create_image_runner.py
pfrouleau/wce-triage-v2
25610cda55f5cb2170e13e121ae1cbaa92ef7626
[ "MIT" ]
1
2019-12-20T16:04:19.000Z
2019-12-20T16:04:19.000Z
wce_triage/ops/create_image_runner.py
pfrouleau/wce-triage-v2
25610cda55f5cb2170e13e121ae1cbaa92ef7626
[ "MIT" ]
2
2019-07-25T03:24:26.000Z
2021-02-14T05:27:11.000Z
#!/usr/bin/env python3 # # Create disk image # import re, sys, traceback from .tasks import task_fetch_partitions, task_refresh_partitions, task_mount, task_remove_persistent_rules, task_remove_logs, task_fsck, task_shrink_partition, task_expand_partition, task_unmount from .partclone_tasks import task_create_disk_image from .ops_ui import console_ui from ..components.disk import create_storage_instance from .runner import Runner from ..lib.disk_images import make_disk_image_name from .json_ui import json_ui from ..lib.util import init_triage_logger, is_block_device # "Waiting", "Prepare", "Preflight", "Running", "Success", "Failed"] my_messages = { "Waiting": "Saving disk is waiting.", "Prepare": "Savign disk is preparing.", "Preflight": "Saving disk is preparing.", "Running": "{step} of {steps}: Running {task}", "Success": "Saving disk completed successfully.", "Failed": "Saving disk failed." } # if __name__ == "__main__": tlog = init_triage_logger() if len(sys.argv) == 1: print( 'Unloader: devicename part destdir') sys.exit(0) # NOTREACHED pass devname = sys.argv[1] if not is_block_device(devname): print( '%s is not a block device.' % devname) sys.exit(1) # NOTREACHED pass part = sys.argv[2] # This is a partition id destdir = sys.argv[3] # Destination directory disk = create_storage_instance(devname) # Preflight is for me to see the tasks. http server runs this with json_ui. do_it = True if destdir == "preflight": ui = console_ui() do_it = False pass elif destdir == "testflight": ui = console_ui() do_it = True pass else: ui = json_ui(wock_event="saveimage", message_catalog=my_messages) pass if re.match(part, '\d+'): part = int(part) pass runner_id = disk.device_name runner = ImageDiskRunner(ui, runner_id, disk, destdir, partition_id=part) try: runner.prepare() runner.preflight() runner.explain() runner.run() sys.exit(0) # NOTREACHED except Exception as exc: sys.stderr.write(traceback.format_exc(exc) + "\n") sys.exit(1) # NOTREACHED pass pass
35.190083
196
0.711837
6ac297a5895de04303f5fe688063a599cff885d4
4,053
py
Python
batch_processing_dataflow/play_store_flow.py
KeeplerIO/meetup-hands-on-gcp-2019
3674922d89d2be8984eb5719f0faaae127823ab4
[ "MIT" ]
1
2019-04-03T17:47:04.000Z
2019-04-03T17:47:04.000Z
batch_processing_dataflow/play_store_flow.py
KeeplerIO/meetup-hands-on-gcp-2019
3674922d89d2be8984eb5719f0faaae127823ab4
[ "MIT" ]
2
2020-08-10T10:52:57.000Z
2022-01-22T04:18:42.000Z
batch_processing_dataflow/play_store_flow.py
KeeplerIO/meetup-hands-on-gcp-2019
3674922d89d2be8984eb5719f0faaae127823ab4
[ "MIT" ]
null
null
null
import argparse import logging import apache_beam as beam from apache_beam.io import WriteToBigQuery from apache_beam.io import ReadFromText, WriteToText from apache_beam.options.pipeline_options import PipelineOptions def run(argv=None): """Main entry point. It defines and runs the pipeline.""" parser = argparse.ArgumentParser() parser.add_argument('--input', dest='input', default='gs://meetup-batch-processing/input/googleplaystore.csv', help='Input file to process.') parser.add_argument('--output', dest='output', default='gs://meetup-batch-processing/output/googleplaystore.csv', help='Output file to process.') parser.add_argument('--table-output', dest='table_output', default='meetup-hands-on-gcp-2019:googleplaystore_batch_dataflow.play_store', help='Bigquery table name for output.') known_args, pipeline_args = parser.parse_known_args(argv) pipeline_options = PipelineOptions(pipeline_args) with beam.Pipeline(options=pipeline_options) as pipeline: raw_lines = pipeline | 'ReadFromCsv' >> ReadFromText(known_args.input, skip_header_lines=1) lines = raw_lines | 'processCsv' >> beam.ParDo(ProcessCSV()) output = lines | 'parseRecord' >> beam.ParDo(ParseRecord()) output | 'writeBigQuery' >> WriteToBigQuery(known_args.table_output, write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE, create_disposition=beam.io.BigQueryDisposition.CREATE_NEVER) logging.info('Finished.') if __name__ == '__main__': logging.getLogger().setLevel(logging.INFO) run()
42.663158
116
0.593881
6ac30849631c3b7df115a92dba1c94f0bb05ed26
4,259
py
Python
backend/main/server/resources/Message.py
Manotomo-Alliance-Support-Squad/WWS
3df21a3f715eeb3b57314bf08c38f2239b2ba399
[ "MIT" ]
null
null
null
backend/main/server/resources/Message.py
Manotomo-Alliance-Support-Squad/WWS
3df21a3f715eeb3b57314bf08c38f2239b2ba399
[ "MIT" ]
20
2021-03-15T20:30:35.000Z
2021-06-02T19:16:55.000Z
backend/main/server/resources/Message.py
Manotomo-Alliance-Support-Squad/WWS
3df21a3f715eeb3b57314bf08c38f2239b2ba399
[ "MIT" ]
null
null
null
from flask import request from flask_jwt import jwt_required from flask_restful import Resource from main.server import app, cache, db from main.server.models import Message, MessageSchema messages_schema = MessageSchema(many=True) message_schema = MessageSchema()
35.789916
182
0.621273
6ac3173f834c06ec5469554b76a1d8e391432cee
5,171
py
Python
demos/chicken_pasta/chicken_pasta.py
icaros-usc/wecook
27bbb6b78a48e04765a87d33cc8a5d3748d2d4cc
[ "BSD-3-Clause" ]
15
2019-09-15T05:24:19.000Z
2021-02-26T20:31:19.000Z
demos/chicken_pasta/chicken_pasta.py
icaros-usc/wecook
27bbb6b78a48e04765a87d33cc8a5d3748d2d4cc
[ "BSD-3-Clause" ]
16
2019-10-10T23:27:00.000Z
2020-05-14T02:30:56.000Z
demos/chicken_pasta/chicken_pasta.py
icaros-usc/wecook
27bbb6b78a48e04765a87d33cc8a5d3748d2d4cc
[ "BSD-3-Clause" ]
2
2020-02-01T16:31:29.000Z
2020-04-07T21:00:04.000Z
#!/usr/bin/env python3 import rospy from wecook.msg import ActionMsg, TaskMsg, SceneMsg, ObjectMsg, ContainingMsg, AgentMsg if __name__ == '__main__': try: talker() except rospy.ROSInterruptException: pass
55.010638
98
0.375169
6ac35d88701fa7c3171d4b1e9eb134859f289cd2
5,380
py
Python
volttron/platform/vip/agent/subsystems/heartbeat.py
rmay-intwine/volttron
a449f70e32f73ff0136a838d0feddb928ede6298
[ "Apache-2.0" ]
null
null
null
volttron/platform/vip/agent/subsystems/heartbeat.py
rmay-intwine/volttron
a449f70e32f73ff0136a838d0feddb928ede6298
[ "Apache-2.0" ]
null
null
null
volttron/platform/vip/agent/subsystems/heartbeat.py
rmay-intwine/volttron
a449f70e32f73ff0136a838d0feddb928ede6298
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # # Copyright 2017, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This material was prepared as an account of work sponsored by an agency of # the United States Government. Neither the United States Government nor the # United States Department of Energy, nor Battelle, nor any of their # employees, nor any jurisdiction or organization that has cooperated in the # development of these materials, makes any warranty, express or # implied, or assumes any legal liability or responsibility for the accuracy, # completeness, or usefulness or any information, apparatus, product, # software, or process disclosed, or represents that its use would not infringe # privately owned rights. Reference herein to any specific commercial product, # process, or service by trade name, trademark, manufacturer, or otherwise # does not necessarily constitute or imply its endorsement, recommendation, or # favoring by the United States Government or any agency thereof, or # Battelle Memorial Institute. The views and opinions of authors expressed # herein do not necessarily state or reflect those of the # United States Government or any agency thereof. # # PACIFIC NORTHWEST NATIONAL LABORATORY operated by # BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY # under Contract DE-AC05-76RL01830 # }}} import os import weakref from datetime import datetime from .base import SubsystemBase from volttron.platform.messaging.headers import TIMESTAMP from volttron.platform.agent.utils import (get_aware_utc_now, format_timestamp) from volttron.platform.scheduling import periodic from ..errors import Unreachable, VIPError """The heartbeat subsystem adds an optional periodic publish to all agents. Heartbeats can be started with agents and toggled on and off at runtime. """ __docformat__ = 'reStructuredText' __version__ = '1.0'
34.487179
86
0.666729
6ac367d8d5ec9f368f230df751f19e5799e20bdd
18,984
py
Python
datasets/experimental/ni_superalloys/Ni_superalloy.py
kyawlin/smlb
79c757d7fc040fb30ad44410be158b3ce3bdf30d
[ "Apache-2.0" ]
null
null
null
datasets/experimental/ni_superalloys/Ni_superalloy.py
kyawlin/smlb
79c757d7fc040fb30ad44410be158b3ce3bdf30d
[ "Apache-2.0" ]
null
null
null
datasets/experimental/ni_superalloys/Ni_superalloy.py
kyawlin/smlb
79c757d7fc040fb30ad44410be158b3ce3bdf30d
[ "Apache-2.0" ]
null
null
null
"""Ni-Superalloy dataset. Scientific Machine Learning Benchmark A benchmark of regression models in chem- and materials informatics. 2019, Brendan Folie, Citrine Informatics. See class NiSuperalloyDataset for details. """ import os import json import zipfile from typing import List, Optional, Tuple, Union import numpy as np from smlb.exceptions import InvalidParameterError from smlb.parameters import params from smlb.tabular_data import TabularData def _parse_json_labels(self, entry: dict, labels_to_load: Optional[List[str]] = None): """ Helper function to parse labels in a single row from the raw json. Parameters: entry (dict): A json entry corresponding to a row in the dataset. labels_to_load (List[str]): Optional list of labels to load. Returns: array Array of labels in this row that we are interested in. """ if labels_to_load is None: labels_to_load = [ "Yield Strength", "Ultimate Tensile Strength", "Stress Rupture Time", "Stress Rupture Stress", "Elongation", ] properties = entry.get("properties") if properties is None or not isinstance(properties, list): raise InvalidParameterError( expected="A list of dictionaries, one for each property", got=properties ) labels_array = [] for label in labels_to_load: labels_array.append(self._get_scalar_property(properties, label, default_value=None)) return labels_array def _get_scalar_property( self, properties: List[dict], property_name: str, units: Optional[str] = None, default_value: Optional[float] = None, ) -> float: """ A helper function to get a single scalar property. This calls _get_single_property and then checks that the result can be turned into a float. Parameters: properties: A list of dicts, each of which is a single property. property_name: The name of the property to get the value of. units: Optional expected units string. default_value: Value to return if `property_name` is not present. Raises: InvalidParameterError: if the value cannot be expressed as a float Returns: float The value of the desired property. """ try: val = self._get_single_property(properties, property_name, units, default_value) if val is None: return None return float(val) except ValueError: raise InvalidParameterError( expected=f"Property {property_name} should have a value " f"that can be expressed as a float", got=properties, ) def _get_categorical_property( self, properties: List[dict], property_name: str, categories_dict: dict ) -> int: """ Helper function to get a single categorical property as an int. Parameters: properties: A list of dicts, each of which is a single property. property_name: The name of the property to get the value of. categories_dict: Dict from the categorical property (string) to a unique integer value. Raises: InvalidParameterError: if the value is not in the expected list of possible categories as given by the keys in `categories_dict` Returns: int An integer that corresponds to the value of the desired property. """ category = self._get_single_property(properties, property_name) try: return categories_dict[category] except KeyError: raise InvalidParameterError( f"A value in the array: {categories_dict.keys()}", category )
40.650964
99
0.629794
6ac3c0aa131a8fbf4b061367a8fbb2e23790a4c8
3,777
py
Python
metricbeat/module/postgresql/test_postgresql.py
SHolzhauer/beats
39679a536a22e8a0d7534a2475504488909d19fd
[ "ECL-2.0", "Apache-2.0" ]
4
2020-11-17T06:29:30.000Z
2021-08-08T11:56:01.000Z
metricbeat/module/postgresql/test_postgresql.py
SHolzhauer/beats
39679a536a22e8a0d7534a2475504488909d19fd
[ "ECL-2.0", "Apache-2.0" ]
36
2021-02-02T14:18:40.000Z
2022-03-20T15:07:30.000Z
metricbeat/module/postgresql/test_postgresql.py
SHolzhauer/beats
39679a536a22e8a0d7534a2475504488909d19fd
[ "ECL-2.0", "Apache-2.0" ]
6
2021-03-10T05:38:32.000Z
2021-08-16T13:11:19.000Z
import metricbeat import os import pytest import sys import unittest
32.843478
76
0.581943
6ac43cedb06c0b3488172628809f67d3f8c8275d
2,520
py
Python
pytorch_lightning/accelerators/cpu_backend.py
ozen/pytorch-lightning
3b0b402d30fa19e0fef7d150c30ff4bb14a64230
[ "Apache-2.0" ]
null
null
null
pytorch_lightning/accelerators/cpu_backend.py
ozen/pytorch-lightning
3b0b402d30fa19e0fef7d150c30ff4bb14a64230
[ "Apache-2.0" ]
null
null
null
pytorch_lightning/accelerators/cpu_backend.py
ozen/pytorch-lightning
3b0b402d30fa19e0fef7d150c30ff4bb14a64230
[ "Apache-2.0" ]
null
null
null
# Copyright The PyTorch Lightning team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from pytorch_lightning.accelerators.base_backend import Accelerator from pytorch_lightning.utilities import AMPType, rank_zero_warn from pytorch_lightning.utilities.exceptions import MisconfigurationException
34.520548
99
0.681746
6ac4859e41ada58d0b49c47a334602416d451187
219
py
Python
books/admin.py
aurphillus/Django-Library-Completed
f46e45f85c888e7694323e22f6e966c291a4a0be
[ "MIT" ]
null
null
null
books/admin.py
aurphillus/Django-Library-Completed
f46e45f85c888e7694323e22f6e966c291a4a0be
[ "MIT" ]
null
null
null
books/admin.py
aurphillus/Django-Library-Completed
f46e45f85c888e7694323e22f6e966c291a4a0be
[ "MIT" ]
null
null
null
from django.contrib import admin from books.models import Genre, Author, Book, TBR # Register your models here. admin.site.register(Genre) admin.site.register(Author) admin.site.register(Book) admin.site.register(TBR)
24.333333
49
0.799087
6ac4ca9b00a8492410dc6166ad36ac8d64fdcffc
2,337
py
Python
rabbitmq/tests/common.py
jfmyers9/integrations-core
8793c784f1d5b2c9541b2dd4214dd91584793ced
[ "BSD-3-Clause" ]
1
2021-03-24T13:00:14.000Z
2021-03-24T13:00:14.000Z
rabbitmq/tests/common.py
jfmyers9/integrations-core
8793c784f1d5b2c9541b2dd4214dd91584793ced
[ "BSD-3-Clause" ]
null
null
null
rabbitmq/tests/common.py
jfmyers9/integrations-core
8793c784f1d5b2c9541b2dd4214dd91584793ced
[ "BSD-3-Clause" ]
null
null
null
# (C) Datadog, Inc. 2018-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) import os from packaging import version from datadog_checks.base.utils.common import get_docker_hostname HERE = os.path.dirname(os.path.abspath(__file__)) ROOT = os.path.dirname(os.path.dirname(HERE)) RABBITMQ_VERSION_RAW = os.environ['RABBITMQ_VERSION'] RABBITMQ_VERSION = version.parse(RABBITMQ_VERSION_RAW) CHECK_NAME = 'rabbitmq' HOST = get_docker_hostname() PORT = 15672 URL = 'http://{}:{}/api/'.format(HOST, PORT) CONFIG = { 'rabbitmq_api_url': URL, 'rabbitmq_user': 'guest', 'rabbitmq_pass': 'guest', 'queues': ['test1'], 'tags': ["tag1:1", "tag2"], 'exchanges': ['test1'], } CONFIG_NO_NODES = { 'rabbitmq_api_url': URL, 'rabbitmq_user': 'guest', 'rabbitmq_pass': 'guest', 'queues': ['test1'], 'tags': ["tag1:1", "tag2"], 'exchanges': ['test1'], 'collect_node_metrics': False, } CONFIG_REGEX = { 'rabbitmq_api_url': URL, 'rabbitmq_user': 'guest', 'rabbitmq_pass': 'guest', 'queues_regexes': [r'test\d+'], 'exchanges_regexes': [r'test\d+'], } CONFIG_VHOSTS = { 'rabbitmq_api_url': URL, 'rabbitmq_user': 'guest', 'rabbitmq_pass': 'guest', 'vhosts': ['/', 'myvhost'], } CONFIG_WITH_FAMILY = { 'rabbitmq_api_url': URL, 'rabbitmq_user': 'guest', 'rabbitmq_pass': 'guest', 'tag_families': True, 'queues_regexes': [r'(test)\d+'], 'exchanges_regexes': [r'(test)\d+'], } CONFIG_DEFAULT_VHOSTS = { 'rabbitmq_api_url': URL, 'rabbitmq_user': 'guest', 'rabbitmq_pass': 'guest', 'vhosts': ['/', 'test'], } CONFIG_TEST_VHOSTS = { 'rabbitmq_api_url': URL, 'rabbitmq_user': 'guest', 'rabbitmq_pass': 'guest', 'vhosts': ['test', 'test2'], } EXCHANGE_MESSAGE_STATS = { 'ack': 1.0, 'ack_details': {'rate': 1.0}, 'confirm': 1.0, 'confirm_details': {'rate': 1.0}, 'deliver_get': 1.0, 'deliver_get_details': {'rate': 1.0}, 'publish': 1.0, 'publish_details': {'rate': 1.0}, 'publish_in': 1.0, 'publish_in_details': {'rate': 1.0}, 'publish_out': 1.0, 'publish_out_details': {'rate': 1.0}, 'return_unroutable': 1.0, 'return_unroutable_details': {'rate': 1.0}, 'redeliver': 1.0, 'redeliver_details': {'rate': 1.0}, }
23.606061
64
0.618314
6ac4e4fc48c67f3dafab5b728a225aa95eec15e2
7,668
py
Python
st2common/st2common/util/pack.py
timgates42/st2
0e8ae756f30ffe2e017c64bff67830abdee7f7c9
[ "Apache-2.0" ]
null
null
null
st2common/st2common/util/pack.py
timgates42/st2
0e8ae756f30ffe2e017c64bff67830abdee7f7c9
[ "Apache-2.0" ]
15
2021-02-11T22:58:54.000Z
2021-08-06T18:03:47.000Z
st2common/st2common/util/pack.py
timgates42/st2
0e8ae756f30ffe2e017c64bff67830abdee7f7c9
[ "Apache-2.0" ]
null
null
null
# Copyright 2020 The StackStorm Authors. # Copyright 2019 Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import import os import re import collections import six from st2common.util import schema as util_schema from st2common.constants.pack import MANIFEST_FILE_NAME from st2common.constants.pack import PACK_REF_WHITELIST_REGEX from st2common.content.loader import MetaLoader from st2common.persistence.pack import Pack from st2common.exceptions.apivalidation import ValueValidationException from st2common.util import jinja as jinja_utils __all__ = [ 'get_pack_ref_from_metadata', 'get_pack_metadata', 'get_pack_warnings', 'get_pack_common_libs_path_for_pack_ref', 'get_pack_common_libs_path_for_pack_db', 'validate_config_against_schema', 'normalize_pack_version' ] # Common format for python 2.7 warning if six.PY2: PACK_PYTHON2_WARNING = "DEPRECATION WARNING: Pack %s only supports Python 2.x. " \ "Python 2 support will be dropped in future releases. " \ "Please consider updating your packs to work with Python 3.x" else: PACK_PYTHON2_WARNING = "DEPRECATION WARNING: Pack %s only supports Python 2.x. " \ "Python 2 support has been removed since st2 v3.4.0. " \ "Please update your packs to work with Python 3.x" def get_pack_ref_from_metadata(metadata, pack_directory_name=None): """ Utility function which retrieves pack "ref" attribute from the pack metadata file. If this attribute is not provided, an attempt is made to infer "ref" from the "name" attribute. :rtype: ``str`` """ pack_ref = None # The rules for the pack ref are as follows: # 1. If ref attribute is available, we used that # 2. If pack_directory_name is available we use that (this only applies to packs # which are in sub-directories) # 2. If attribute is not available, but pack name is and pack name meets the valid name # criteria, we use that if metadata.get('ref', None): pack_ref = metadata['ref'] elif pack_directory_name and re.match(PACK_REF_WHITELIST_REGEX, pack_directory_name): pack_ref = pack_directory_name else: if re.match(PACK_REF_WHITELIST_REGEX, metadata['name']): pack_ref = metadata['name'] else: msg = ('Pack name "%s" contains invalid characters and "ref" attribute is not ' 'available. You either need to add "ref" attribute which contains only word ' 'characters to the pack metadata file or update name attribute to contain only' 'word characters.') raise ValueError(msg % (metadata['name'])) return pack_ref def get_pack_metadata(pack_dir): """ Return parsed metadata for a particular pack directory. :rtype: ``dict`` """ manifest_path = os.path.join(pack_dir, MANIFEST_FILE_NAME) if not os.path.isfile(manifest_path): raise ValueError('Pack "%s" is missing %s file' % (pack_dir, MANIFEST_FILE_NAME)) meta_loader = MetaLoader() content = meta_loader.load(manifest_path) if not content: raise ValueError('Pack "%s" metadata file is empty' % (pack_dir)) return content def get_pack_warnings(pack_metadata): """ Return warning string if pack metadata indicates only python 2 is supported :rtype: ``str`` """ warning = None versions = pack_metadata.get('python_versions', None) pack_name = pack_metadata.get('name', None) if versions and set(versions) == set(['2']): warning = PACK_PYTHON2_WARNING % pack_name return warning def validate_config_against_schema(config_schema, config_object, config_path, pack_name=None): """ Validate provided config dictionary against the provided config schema dictionary. """ # NOTE: Lazy improt to avoid performance overhead of importing this module when it's not used import jsonschema pack_name = pack_name or 'unknown' schema = util_schema.get_schema_for_resource_parameters(parameters_schema=config_schema, allow_additional_properties=True) instance = config_object try: cleaned = util_schema.validate(instance=instance, schema=schema, cls=util_schema.CustomValidator, use_default=True, allow_default_none=True) for key in cleaned: if (jinja_utils.is_jinja_expression(value=cleaned.get(key)) and "decrypt_kv" in cleaned.get(key) and config_schema.get(key).get('secret')): raise ValueValidationException('Values specified as "secret: True" in config ' 'schema are automatically decrypted by default. Use ' 'of "decrypt_kv" jinja filter is not allowed for ' 'such values. Please check the specified values in ' 'the config or the default values in the schema.') except jsonschema.ValidationError as e: attribute = getattr(e, 'path', []) if isinstance(attribute, (tuple, list, collections.Iterable)): attribute = [str(item) for item in attribute] attribute = '.'.join(attribute) else: attribute = str(attribute) msg = ('Failed validating attribute "%s" in config for pack "%s" (%s): %s' % (attribute, pack_name, config_path, six.text_type(e))) raise jsonschema.ValidationError(msg) return cleaned def get_pack_common_libs_path_for_pack_db(pack_db): """ Return the pack's common lib path. This is the path where common code for sensors and actions are placed. For example, if the pack is at /opt/stackstorm/packs/my_pack, you can place common library code for actions and sensors in /opt/stackstorm/packs/my_pack/lib/. This common library code is only available for python sensors and actions. The lib structure also needs to follow a python convention with a __init__.py file. :param pack_db: Pack DB model :type pack_db: :class:`PackDB` :rtype: ``str`` """ pack_dir = getattr(pack_db, 'path', None) if not pack_dir: return None libs_path = os.path.join(pack_dir, 'lib') return libs_path def normalize_pack_version(version): """ Normalize old, pre StackStorm v2.1 non valid semver version string (e.g. 0.2) to a valid semver version string (0.2.0). :rtype: ``str`` """ version = str(version) version_seperator_count = version.count('.') if version_seperator_count == 1: version = version + '.0' return version
36.514286
100
0.666275
6ac55faf90a367de65f30a569842061f13204e0c
2,952
py
Python
module1-introduction-to-sql/query.py
jrslagle/DS-Unit-3-Sprint-2-SQL-and-Databases
8a6b3fd14b6a6833ee3a14b2d8a7db3bee494a14
[ "MIT" ]
null
null
null
module1-introduction-to-sql/query.py
jrslagle/DS-Unit-3-Sprint-2-SQL-and-Databases
8a6b3fd14b6a6833ee3a14b2d8a7db3bee494a14
[ "MIT" ]
null
null
null
module1-introduction-to-sql/query.py
jrslagle/DS-Unit-3-Sprint-2-SQL-and-Databases
8a6b3fd14b6a6833ee3a14b2d8a7db3bee494a14
[ "MIT" ]
null
null
null
# Look at the charactercreator_character table # GET_CHARACTERS = """ # SELECT * # FROM charactercreator_character; # """ # How many total Characters are there? (302) TOTAL_CHARACTERS = """ SELECT COUNT(*) as number_of_characters FROM charactercreator_character; """ # How many of each specific subclass? # TOTAL_SUBCLASS = """ # SELECT # (SELECT COUNT(*) FROM charactercreator_necromancer) AS necros, # (SELECT COUNT(*) FROM charactercreator_mage) AS mages, # (SELECT COUNT(*) FROM charactercreator_thief) AS thiefs, # (SELECT COUNT(*) FROM charactercreator_cleric) AS clerics, # (SELECT COUNT(*) FROM charactercreator_fighter) AS fighters; # """ CLASS = "SELECT COUNT(*) FROM charactercreator_" # How many total Items? (174) TOTAL_ITEMS = """ SELECT COUNT(item_id) as items FROM armory_item; """ # How many of the Items are weapons? (37) WEAPONS = """ SELECT COUNT(item_ptr_id) FROM armory_weapon; """ # How many of the items are not weapons? (137) NON_WEAPONS = """ SELECT COUNT(items.name) FROM armory_item as items WHERE items.item_id NOT IN( SELECT armory_weapon.item_ptr_id FROM armory_weapon); """ # How many Items does each character have? (Return first 20 rows) CHARACTER_ITEMS = """ SELECT character.name as "character_name", COUNT(inventory.id) as "#_of_items" FROM charactercreator_character AS character, charactercreator_character_inventory AS inventory WHERE character.character_id = inventory.character_id GROUP BY character.name ORDER BY character.name LIMIT 20; """ # How many Weapons does each character have? (Return first 20 rows) CHARACTER_WEAPONS = """ SELECT character.name as "character_name", COUNT(weapon.item_ptr_id) as "#_of_weapons" FROM charactercreator_character AS character, charactercreator_character_inventory AS inventory, armory_weapon as weapon WHERE character.character_id = inventory.character_id AND inventory.item_id = weapon.item_ptr_id GROUP BY character.name ORDER BY character.name LIMIT 20; """ # On average, how many Items does each Character have? (3.02) AVG_CHARACTER_ITEMS = """ SELECT AVG("#_of_items") as "avg_#_of_items" FROM ( SELECT COUNT(inventory.id) AS "#_of_items" FROM charactercreator_character AS character, charactercreator_character_inventory AS inventory WHERE character.character_id = inventory.character_id GROUP BY character.name ); """ # On average, how many Weapons does each character have? (0.67) AVG_CHARACTER_WEAPONS = """ SELECT AVG(weapon_count) as avg_weapons_per_char FROM ( SELECT character.character_id, COUNT(DISTINCT weapon.item_ptr_id) as weapon_count FROM charactercreator_character AS character LEFT JOIN charactercreator_character_inventory inventory -- characters may have zero items ON character.character_id = inventory.character_id LEFT JOIN armory_weapon weapon -- many items are not weapons, so only retain weapons ON inventory.item_id = weapon.item_ptr_id GROUP BY character.character_id ) subq; """
28.941176
120
0.774051
6ac65f8d4a911234497385069b667c9dd2f68934
21,364
py
Python
pixelproject/grid.py
MickaelRigault/pixelproject
d98db99a8e69eafa7a979c02a099e4c07f5fd568
[ "Apache-2.0" ]
null
null
null
pixelproject/grid.py
MickaelRigault/pixelproject
d98db99a8e69eafa7a979c02a099e4c07f5fd568
[ "Apache-2.0" ]
null
null
null
pixelproject/grid.py
MickaelRigault/pixelproject
d98db99a8e69eafa7a979c02a099e4c07f5fd568
[ "Apache-2.0" ]
null
null
null
#! /usr/bin/env python # import warnings import numpy as np UNIT_SQUARE = np.asarray([[0,0],[0,1],[1,1],[1,0]])-0.5 from propobject import BaseObject from shapely import geometry import pandas import geopandas # ======================= # # # # Functions # # # # ======================= # def get_simple_grid(xbounds, ybounds, shift_origin=None): """ """ xbounds = np.atleast_1d(xbounds) if len(xbounds)==1: xmin,xmax = 0,xbounds[0] else: xmin,xmax = xbounds ybounds = np.atleast_1d(ybounds) if len(ybounds)==1: ymin,ymax = 0,ybounds[0] else: ymin,ymax = ybounds pixels = np.mgrid[xmin:xmax,ymin:ymax] pixels2_flat = np.concatenate(pixels.T, axis=0) if shift_origin is not None: # not += because conflict between int and float array pixels2_flat = pixels2_flat+ shift_origin return Grid(pixels2_flat, UNIT_SQUARE) # ======================= # # # # Classes # # # # ======================= # def set_pixels(self, pixels, shape=None, update=True): """ provide the pixels. Pixels define the position up on which the geometries are defined. NB: vertices = pixels+shape """ # Setting the pixels if np.shape(pixels)[-1] != 2: raise ValueError("pixels must be [N,2] arrays") self._properties["pixels"] = np.asarray(pixels) if shape is not None: self.set_pixelshapes(shape, update=False) if update: self._update_geodataframe_() def set_pixelshapes(self, shape, update=True): """ """ # Setting the pixel shape.s if len(np.shape(shape))==2: self._properties["shape"] = np.asarray(shape) elif len(np.shape(shape))==3: if self.pixels is not None and np.shape(shape)[0] != self.npixels: raise AssertionError("`shape` must be unique or have the same lenth as pixels") self._properties["shape"] = np.asarray(shape) else: raise ValueError("Cannot parse the given shape, must be [M,2] or [N,M,2] when N is the number of pixel and M the number of vertices") if update: self._update_geodataframe_() def set_vertices(self, vertices, overwrite=False, **kwargs): """ """ if not overwrite and (self.pixels is not None and self.shape is not None): raise ValueError("Pixels and shape already defined. set the overwrite option to true, to update vertices") try: pixels = np.mean(vertices, axis=1) except: # Means vertices have different size. self._derived_properties["vertices"] = vertices pixels = np.asarray([np.mean(v_, axis=0) for v_ in vertices]) self.set_pixels(pixels, None, **kwargs) return self._derived_properties["vertices"] = np.asarray(vertices) shape = self.vertices - pixels[:,None] shape_unique = np.unique(shape, axis=0) if len(shape_unique)==1: shape = shape_unique[0] self.set_pixels(pixels, shape, **kwargs) def set_geodataframe(self, geodataframe, overwrite=False): """ """ if not overwrite and (self.pixels is not None and self.shape is not None): raise ValueError("Pixels and shape already defined. set the overwrite option to true, to update geodataframe") if "geometry" not in geodataframe.columns: raise TypeError("The given geodataframe does not have 'geometry' column. It is required") self._derived_properties["geodataframe"] = geodataframe if "id" not in geodataframe.columns: self.geodataframe["id"] = self.indexes if self.pixels is not None else np.arange( len(geodataframe) ) # - get the vertices: vertices = geodataframe["geometry"].apply(get_verts).values self.set_vertices(vertices, update=False) # don't update the geodataframe # --------- # # UPDATE # # --------- # def _update_geodataframe_(self): """ """ dataseries = self.get_geoseries() x,y = self.pixels.T self._derived_properties["geodataframe"] = \ geopandas.GeoDataFrame({'geometry': dataseries, 'id':self.indexes, 'x':x,'y':y}) def add_data(self, data, name, indexes=None, inplace=True): """ """ if indexes is None: indexes = self.indexes s_ = pandas.Series(data, name=name, index=indexes) if not inplace: return self.geodataframe.join(s_) self._derived_properties["geodataframe"] = self.geodataframe.join(s_) # --------- # # GETTER # # --------- # def get_geoseries(self): """ build a new geodataframe and returns it. """ import geopandas return geopandas.GeoSeries([geometry.Polygon(v) for v in self.vertices]) def get_triangulation_grid(self): """ Returns a grid of triangulation. """ return Grid.set_from( np.concatenate(self.triangulation, axis=0) ) def get_pixels_in(self, polygon, invert=False): """ checks if the centroid of the pixel is in or out the given shapely polygon. Parameters ---------- polygon: [shapely.geometry] reference polygon invert: [bool] -optional- Get the pixel inside the polygon [invert=False] or outsite [invert=True] Returns ------- list of pixels and boolean mask """ from shapely import vectorized flagin = vectorized.contains(polygon, *self.pixels.T) if invert: flagin = ~flagin return self.pixels[flagin], flagin # --------- # # Project # # --------- # def project_to(self, othergrid, column="*", asgrid=True, use="sum"): """ project data in the given grid Parameters ---------- othergrid: [Grid] New grid where data should be projected to column: [str/None/list of] -optional- Which data should be projected ? If None or '*' all the non-structural columns will be (structural columns are 'geometry', 'id', 'x', 'y') asgrid: [bool] -optional- Should this return a new Grid (actually same object as othergrid) or a dict [asgrid=False]? Returns ------- Grid or dict (see asgrid) """ gproj = GridProjector(self, othergrid) if column is None or column in ["*","all"]: column = [k for k in self.geodataframe if k not in ['geometry', 'id', 'x', 'y']] datas = {k:gproj.project_data(k, use=use) for k in column} if not asgrid: return datas # building and setting the new grid gout = othergrid.__class__.set_from(othergrid.geodataframe) for k in column: gout.add_data(datas[k],k) return gout def project_to_wcs(self, wcs_, asgrid=True, **kwargs): """ provide an astropy.wcs.WCS and this will project the current grid into it (assuming grid's vertices coordinates are in pixels) Parameters ---------- wcs_: [astropy.wcs.WCS] The world coordinate solution asgrid: [bool] -optional- Should this return a load Grid object or an array of vertices (in degree) **kwargs goes to wcs_.all_pix2world Returns ------- Grid or array (see asgrid) """ verts = self.vertices verts_shape = np.shape(verts) flatten_verts = np.concatenate(verts, axis=0) # flatten_verts_wcs = np.asarray(wcs_.all_pix2world(flatten_verts[:,0], flatten_verts[:,1], 0, **kwargs)).T # verts_wcs = flatten_verts_wcs.reshape(verts_shape) if not asgrid: return verts_wcs g_wcs = Grid.set_from(verts_wcs) g_wcs.geodataframe["x_pix"],g_wcs.geodataframe["y_pix"] = self.pixels.T return g_wcs def derive_triangulation(self, fast_unique=True): """ """ def triangulate(geom): """ Return triangulate format that quadpy likes """ from shapely import ops triangles = ops.triangulate(geom) return np.stack([np.asarray(t.exterior.coords.xy).T[:-1] for t in triangles]) if not self.is_shape_unique or not fast_unique: self._derived_properties["triangulation"] = self.geodataframe["geometry"].apply(triangulate) else: self._derived_properties["triangulation"] = self.pixels[:,None,None] + triangulate(geometry.Polygon(self.shape)) # --------- # # PLOTTER # # --------- # def show(self, column=None, ax=None, edgecolor="0.7", facecolor="None", **kwargs): """ """ if column is not None: facecolor=None return self.geodataframe.plot(column, ax=ax,facecolor=facecolor, edgecolor=edgecolor, **kwargs) # =================== # # Properties # # =================== # # -- Side # -- Derived
35.08046
145
0.550178
6ac66c22ad3d5b81a13742eecef45d93fd664ee6
31,445
py
Python
numpy/lib/format.py
AnirudhDagar/numpy
77bc3225e6f4badf83190ec300a0e10e56949644
[ "BSD-3-Clause" ]
5
2021-08-23T06:23:15.000Z
2022-02-05T07:27:30.000Z
numpy/lib/format.py
AnirudhDagar/numpy
77bc3225e6f4badf83190ec300a0e10e56949644
[ "BSD-3-Clause" ]
75
2021-07-12T01:28:50.000Z
2022-03-28T20:09:00.000Z
numpy/lib/format.py
AnirudhDagar/numpy
77bc3225e6f4badf83190ec300a0e10e56949644
[ "BSD-3-Clause" ]
1
2019-11-05T15:23:08.000Z
2019-11-05T15:23:08.000Z
""" Binary serialization NPY format ========== A simple format for saving numpy arrays to disk with the full information about them. The ``.npy`` format is the standard binary file format in NumPy for persisting a *single* arbitrary NumPy array on disk. The format stores all of the shape and dtype information necessary to reconstruct the array correctly even on another machine with a different architecture. The format is designed to be as simple as possible while achieving its limited goals. The ``.npz`` format is the standard format for persisting *multiple* NumPy arrays on disk. A ``.npz`` file is a zip file containing multiple ``.npy`` files, one for each array. Capabilities ------------ - Can represent all NumPy arrays including nested record arrays and object arrays. - Represents the data in its native binary form. - Supports Fortran-contiguous arrays directly. - Stores all of the necessary information to reconstruct the array including shape and dtype on a machine of a different architecture. Both little-endian and big-endian arrays are supported, and a file with little-endian numbers will yield a little-endian array on any machine reading the file. The types are described in terms of their actual sizes. For example, if a machine with a 64-bit C "long int" writes out an array with "long ints", a reading machine with 32-bit C "long ints" will yield an array with 64-bit integers. - Is straightforward to reverse engineer. Datasets often live longer than the programs that created them. A competent developer should be able to create a solution in their preferred programming language to read most ``.npy`` files that they have been given without much documentation. - Allows memory-mapping of the data. See `open_memmap`. - Can be read from a filelike stream object instead of an actual file. - Stores object arrays, i.e. arrays containing elements that are arbitrary Python objects. Files with object arrays are not to be mmapable, but can be read and written to disk. Limitations ----------- - Arbitrary subclasses of numpy.ndarray are not completely preserved. Subclasses will be accepted for writing, but only the array data will be written out. A regular numpy.ndarray object will be created upon reading the file. .. warning:: Due to limitations in the interpretation of structured dtypes, dtypes with fields with empty names will have the names replaced by 'f0', 'f1', etc. Such arrays will not round-trip through the format entirely accurately. The data is intact; only the field names will differ. We are working on a fix for this. This fix will not require a change in the file format. The arrays with such structures can still be saved and restored, and the correct dtype may be restored by using the ``loadedarray.view(correct_dtype)`` method. File extensions --------------- We recommend using the ``.npy`` and ``.npz`` extensions for files saved in this format. This is by no means a requirement; applications may wish to use these file formats but use an extension specific to the application. In the absence of an obvious alternative, however, we suggest using ``.npy`` and ``.npz``. Version numbering ----------------- The version numbering of these formats is independent of NumPy version numbering. If the format is upgraded, the code in `numpy.io` will still be able to read and write Version 1.0 files. Format Version 1.0 ------------------ The first 6 bytes are a magic string: exactly ``\\x93NUMPY``. The next 1 byte is an unsigned byte: the major version number of the file format, e.g. ``\\x01``. The next 1 byte is an unsigned byte: the minor version number of the file format, e.g. ``\\x00``. Note: the version of the file format is not tied to the version of the numpy package. The next 2 bytes form a little-endian unsigned short int: the length of the header data HEADER_LEN. The next HEADER_LEN bytes form the header data describing the array's format. It is an ASCII string which contains a Python literal expression of a dictionary. It is terminated by a newline (``\\n``) and padded with spaces (``\\x20``) to make the total of ``len(magic string) + 2 + len(length) + HEADER_LEN`` be evenly divisible by 64 for alignment purposes. The dictionary contains three keys: "descr" : dtype.descr An object that can be passed as an argument to the `numpy.dtype` constructor to create the array's dtype. "fortran_order" : bool Whether the array data is Fortran-contiguous or not. Since Fortran-contiguous arrays are a common form of non-C-contiguity, we allow them to be written directly to disk for efficiency. "shape" : tuple of int The shape of the array. For repeatability and readability, the dictionary keys are sorted in alphabetic order. This is for convenience only. A writer SHOULD implement this if possible. A reader MUST NOT depend on this. Following the header comes the array data. If the dtype contains Python objects (i.e. ``dtype.hasobject is True``), then the data is a Python pickle of the array. Otherwise the data is the contiguous (either C- or Fortran-, depending on ``fortran_order``) bytes of the array. Consumers can figure out the number of bytes by multiplying the number of elements given by the shape (noting that ``shape=()`` means there is 1 element) by ``dtype.itemsize``. Format Version 2.0 ------------------ The version 1.0 format only allowed the array header to have a total size of 65535 bytes. This can be exceeded by structured arrays with a large number of columns. The version 2.0 format extends the header size to 4 GiB. `numpy.save` will automatically save in 2.0 format if the data requires it, else it will always use the more compatible 1.0 format. The description of the fourth element of the header therefore has become: "The next 4 bytes form a little-endian unsigned int: the length of the header data HEADER_LEN." Format Version 3.0 ------------------ This version replaces the ASCII string (which in practice was latin1) with a utf8-encoded string, so supports structured types with any unicode field names. Notes ----- The ``.npy`` format, including motivation for creating it and a comparison of alternatives, is described in the :doc:`"npy-format" NEP <neps:nep-0001-npy-format>`, however details have evolved with time and this document is more current. """ import numpy import io import warnings from numpy.lib.utils import safe_eval from numpy.compat import ( isfileobj, os_fspath, pickle ) __all__ = [] EXPECTED_KEYS = {'descr', 'fortran_order', 'shape'} MAGIC_PREFIX = b'\x93NUMPY' MAGIC_LEN = len(MAGIC_PREFIX) + 2 ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096 BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes # difference between version 1.0 and 2.0 is a 4 byte (I) header length # instead of 2 bytes (H) allowing storage of large structured arrays _header_size_info = { (1, 0): ('<H', 'latin1'), (2, 0): ('<I', 'latin1'), (3, 0): ('<I', 'utf8'), } def magic(major, minor): """ Return the magic string for the given file format version. Parameters ---------- major : int in [0, 255] minor : int in [0, 255] Returns ------- magic : str Raises ------ ValueError if the version cannot be formatted. """ if major < 0 or major > 255: raise ValueError("major version must be 0 <= major < 256") if minor < 0 or minor > 255: raise ValueError("minor version must be 0 <= minor < 256") return MAGIC_PREFIX + bytes([major, minor]) def read_magic(fp): """ Read the magic string to get the version of the file format. Parameters ---------- fp : filelike object Returns ------- major : int minor : int """ magic_str = _read_bytes(fp, MAGIC_LEN, "magic string") if magic_str[:-2] != MAGIC_PREFIX: msg = "the magic string is not correct; expected %r, got %r" raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2])) major, minor = magic_str[-2:] return major, minor def dtype_to_descr(dtype): """ Get a serializable descriptor from the dtype. The .descr attribute of a dtype object cannot be round-tripped through the dtype() constructor. Simple types, like dtype('float32'), have a descr which looks like a record array with one field with '' as a name. The dtype() constructor interprets this as a request to give a default name. Instead, we construct descriptor that can be passed to dtype(). Parameters ---------- dtype : dtype The dtype of the array that will be written to disk. Returns ------- descr : object An object that can be passed to `numpy.dtype()` in order to replicate the input dtype. """ if _has_metadata(dtype): warnings.warn("metadata on a dtype may be saved or ignored, but will " "raise if saved when read. Use another form of storage.", UserWarning, stacklevel=2) if dtype.names is not None: # This is a record array. The .descr is fine. XXX: parts of the # record array with an empty name, like padding bytes, still get # fiddled with. This needs to be fixed in the C implementation of # dtype(). return dtype.descr else: return dtype.str def descr_to_dtype(descr): """ Returns a dtype based off the given description. This is essentially the reverse of `dtype_to_descr()`. It will remove the valueless padding fields created by, i.e. simple fields like dtype('float32'), and then convert the description to its corresponding dtype. Parameters ---------- descr : object The object retreived by dtype.descr. Can be passed to `numpy.dtype()` in order to replicate the input dtype. Returns ------- dtype : dtype The dtype constructed by the description. """ if isinstance(descr, str): # No padding removal needed return numpy.dtype(descr) elif isinstance(descr, tuple): # subtype, will always have a shape descr[1] dt = descr_to_dtype(descr[0]) return numpy.dtype((dt, descr[1])) titles = [] names = [] formats = [] offsets = [] offset = 0 for field in descr: if len(field) == 2: name, descr_str = field dt = descr_to_dtype(descr_str) else: name, descr_str, shape = field dt = numpy.dtype((descr_to_dtype(descr_str), shape)) # Ignore padding bytes, which will be void bytes with '' as name # Once support for blank names is removed, only "if name == ''" needed) is_pad = (name == '' and dt.type is numpy.void and dt.names is None) if not is_pad: title, name = name if isinstance(name, tuple) else (None, name) titles.append(title) names.append(name) formats.append(dt) offsets.append(offset) offset += dt.itemsize return numpy.dtype({'names': names, 'formats': formats, 'titles': titles, 'offsets': offsets, 'itemsize': offset}) def header_data_from_array_1_0(array): """ Get the dictionary of header metadata from a numpy.ndarray. Parameters ---------- array : numpy.ndarray Returns ------- d : dict This has the appropriate entries for writing its string representation to the header of the file. """ d = {'shape': array.shape} if array.flags.c_contiguous: d['fortran_order'] = False elif array.flags.f_contiguous: d['fortran_order'] = True else: # Totally non-contiguous data. We will have to make it C-contiguous # before writing. Note that we need to test for C_CONTIGUOUS first # because a 1-D array is both C_CONTIGUOUS and F_CONTIGUOUS. d['fortran_order'] = False d['descr'] = dtype_to_descr(array.dtype) return d def _wrap_header(header, version): """ Takes a stringified header, and attaches the prefix and padding to it """ import struct assert version is not None fmt, encoding = _header_size_info[version] if not isinstance(header, bytes): # always true on python 3 header = header.encode(encoding) hlen = len(header) + 1 padlen = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize(fmt) + hlen) % ARRAY_ALIGN) try: header_prefix = magic(*version) + struct.pack(fmt, hlen + padlen) except struct.error: msg = "Header length {} too big for version={}".format(hlen, version) raise ValueError(msg) from None # Pad the header with spaces and a final newline such that the magic # string, the header-length short and the header are aligned on a # ARRAY_ALIGN byte boundary. This supports memory mapping of dtypes # aligned up to ARRAY_ALIGN on systems like Linux where mmap() # offset must be page-aligned (i.e. the beginning of the file). return header_prefix + header + b' '*padlen + b'\n' def _wrap_header_guess_version(header): """ Like `_wrap_header`, but chooses an appropriate version given the contents """ try: return _wrap_header(header, (1, 0)) except ValueError: pass try: ret = _wrap_header(header, (2, 0)) except UnicodeEncodeError: pass else: warnings.warn("Stored array in format 2.0. It can only be" "read by NumPy >= 1.9", UserWarning, stacklevel=2) return ret header = _wrap_header(header, (3, 0)) warnings.warn("Stored array in format 3.0. It can only be " "read by NumPy >= 1.17", UserWarning, stacklevel=2) return header def _write_array_header(fp, d, version=None): """ Write the header for an array and returns the version used Parameters ---------- fp : filelike object d : dict This has the appropriate entries for writing its string representation to the header of the file. version: tuple or None None means use oldest that works explicit version will raise a ValueError if the format does not allow saving this data. Default: None """ header = ["{"] for key, value in sorted(d.items()): # Need to use repr here, since we eval these when reading header.append("'%s': %s, " % (key, repr(value))) header.append("}") header = "".join(header) if version is None: header = _wrap_header_guess_version(header) else: header = _wrap_header(header, version) fp.write(header) def write_array_header_1_0(fp, d): """ Write the header for an array using the 1.0 format. Parameters ---------- fp : filelike object d : dict This has the appropriate entries for writing its string representation to the header of the file. """ _write_array_header(fp, d, (1, 0)) def write_array_header_2_0(fp, d): """ Write the header for an array using the 2.0 format. The 2.0 format allows storing very large structured arrays. .. versionadded:: 1.9.0 Parameters ---------- fp : filelike object d : dict This has the appropriate entries for writing its string representation to the header of the file. """ _write_array_header(fp, d, (2, 0)) def read_array_header_1_0(fp): """ Read an array header from a filelike object using the 1.0 file format version. This will leave the file object located just after the header. Parameters ---------- fp : filelike object A file object or something with a `.read()` method like a file. Returns ------- shape : tuple of int The shape of the array. fortran_order : bool The array data will be written out directly if it is either C-contiguous or Fortran-contiguous. Otherwise, it will be made contiguous before writing it out. dtype : dtype The dtype of the file's data. Raises ------ ValueError If the data is invalid. """ return _read_array_header(fp, version=(1, 0)) def read_array_header_2_0(fp): """ Read an array header from a filelike object using the 2.0 file format version. This will leave the file object located just after the header. .. versionadded:: 1.9.0 Parameters ---------- fp : filelike object A file object or something with a `.read()` method like a file. Returns ------- shape : tuple of int The shape of the array. fortran_order : bool The array data will be written out directly if it is either C-contiguous or Fortran-contiguous. Otherwise, it will be made contiguous before writing it out. dtype : dtype The dtype of the file's data. Raises ------ ValueError If the data is invalid. """ return _read_array_header(fp, version=(2, 0)) def _filter_header(s): """Clean up 'L' in npz header ints. Cleans up the 'L' in strings representing integers. Needed to allow npz headers produced in Python2 to be read in Python3. Parameters ---------- s : string Npy file header. Returns ------- header : str Cleaned up header. """ import tokenize from io import StringIO tokens = [] last_token_was_number = False for token in tokenize.generate_tokens(StringIO(s).readline): token_type = token[0] token_string = token[1] if (last_token_was_number and token_type == tokenize.NAME and token_string == "L"): continue else: tokens.append(token) last_token_was_number = (token_type == tokenize.NUMBER) return tokenize.untokenize(tokens) def _read_array_header(fp, version): """ see read_array_header_1_0 """ # Read an unsigned, little-endian short int which has the length of the # header. import struct hinfo = _header_size_info.get(version) if hinfo is None: raise ValueError("Invalid version {!r}".format(version)) hlength_type, encoding = hinfo hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length") header_length = struct.unpack(hlength_type, hlength_str)[0] header = _read_bytes(fp, header_length, "array header") header = header.decode(encoding) # The header is a pretty-printed string representation of a literal # Python dictionary with trailing newlines padded to a ARRAY_ALIGN byte # boundary. The keys are strings. # "shape" : tuple of int # "fortran_order" : bool # "descr" : dtype.descr # Versions (2, 0) and (1, 0) could have been created by a Python 2 # implementation before header filtering was implemented. if version <= (2, 0): header = _filter_header(header) try: d = safe_eval(header) except SyntaxError as e: msg = "Cannot parse header: {!r}" raise ValueError(msg.format(header)) from e if not isinstance(d, dict): msg = "Header is not a dictionary: {!r}" raise ValueError(msg.format(d)) if EXPECTED_KEYS != d.keys(): keys = sorted(d.keys()) msg = "Header does not contain the correct keys: {!r}" raise ValueError(msg.format(keys)) # Sanity-check the values. if (not isinstance(d['shape'], tuple) or not all(isinstance(x, int) for x in d['shape'])): msg = "shape is not valid: {!r}" raise ValueError(msg.format(d['shape'])) if not isinstance(d['fortran_order'], bool): msg = "fortran_order is not a valid bool: {!r}" raise ValueError(msg.format(d['fortran_order'])) try: dtype = descr_to_dtype(d['descr']) except TypeError as e: msg = "descr is not a valid dtype descriptor: {!r}" raise ValueError(msg.format(d['descr'])) from e return d['shape'], d['fortran_order'], dtype def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None): """ Write an array to an NPY file, including a header. If the array is neither C-contiguous nor Fortran-contiguous AND the file_like object is not a real file object, this function will have to copy data in memory. Parameters ---------- fp : file_like object An open, writable file object, or similar object with a ``.write()`` method. array : ndarray The array to write to disk. version : (int, int) or None, optional The version number of the format. None means use the oldest supported version that is able to store the data. Default: None allow_pickle : bool, optional Whether to allow writing pickled data. Default: True pickle_kwargs : dict, optional Additional keyword arguments to pass to pickle.dump, excluding 'protocol'. These are only useful when pickling objects in object arrays on Python 3 to Python 2 compatible format. Raises ------ ValueError If the array cannot be persisted. This includes the case of allow_pickle=False and array being an object array. Various other errors If the array contains Python objects as part of its dtype, the process of pickling them may raise various errors if the objects are not picklable. """ _check_version(version) _write_array_header(fp, header_data_from_array_1_0(array), version) if array.itemsize == 0: buffersize = 0 else: # Set buffer size to 16 MiB to hide the Python loop overhead. buffersize = max(16 * 1024 ** 2 // array.itemsize, 1) if array.dtype.hasobject: # We contain Python objects so we cannot write out the data # directly. Instead, we will pickle it out if not allow_pickle: raise ValueError("Object arrays cannot be saved when " "allow_pickle=False") if pickle_kwargs is None: pickle_kwargs = {} pickle.dump(array, fp, protocol=3, **pickle_kwargs) elif array.flags.f_contiguous and not array.flags.c_contiguous: if isfileobj(fp): array.T.tofile(fp) else: for chunk in numpy.nditer( array, flags=['external_loop', 'buffered', 'zerosize_ok'], buffersize=buffersize, order='F'): fp.write(chunk.tobytes('C')) else: if isfileobj(fp): array.tofile(fp) else: for chunk in numpy.nditer( array, flags=['external_loop', 'buffered', 'zerosize_ok'], buffersize=buffersize, order='C'): fp.write(chunk.tobytes('C')) def read_array(fp, allow_pickle=False, pickle_kwargs=None): """ Read an array from an NPY file. Parameters ---------- fp : file_like object If this is not a real file object, then this may take extra memory and time. allow_pickle : bool, optional Whether to allow writing pickled data. Default: False .. versionchanged:: 1.16.3 Made default False in response to CVE-2019-6446. pickle_kwargs : dict Additional keyword arguments to pass to pickle.load. These are only useful when loading object arrays saved on Python 2 when using Python 3. Returns ------- array : ndarray The array from the data on disk. Raises ------ ValueError If the data is invalid, or allow_pickle=False and the file contains an object array. """ version = read_magic(fp) _check_version(version) shape, fortran_order, dtype = _read_array_header(fp, version) if len(shape) == 0: count = 1 else: count = numpy.multiply.reduce(shape, dtype=numpy.int64) # Now read the actual data. if dtype.hasobject: # The array contained Python objects. We need to unpickle the data. if not allow_pickle: raise ValueError("Object arrays cannot be loaded when " "allow_pickle=False") if pickle_kwargs is None: pickle_kwargs = {} try: array = pickle.load(fp, **pickle_kwargs) except UnicodeError as err: # Friendlier error message raise UnicodeError("Unpickling a python object failed: %r\n" "You may need to pass the encoding= option " "to numpy.load" % (err,)) from err else: if isfileobj(fp): # We can use the fast fromfile() function. array = numpy.fromfile(fp, dtype=dtype, count=count) else: # This is not a real file. We have to read it the # memory-intensive way. # crc32 module fails on reads greater than 2 ** 32 bytes, # breaking large reads from gzip streams. Chunk reads to # BUFFER_SIZE bytes to avoid issue and reduce memory overhead # of the read. In non-chunked case count < max_read_count, so # only one read is performed. # Use np.ndarray instead of np.empty since the latter does # not correctly instantiate zero-width string dtypes; see # https://github.com/numpy/numpy/pull/6430 array = numpy.ndarray(count, dtype=dtype) if dtype.itemsize > 0: # If dtype.itemsize == 0 then there's nothing more to read max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize) for i in range(0, count, max_read_count): read_count = min(max_read_count, count - i) read_size = int(read_count * dtype.itemsize) data = _read_bytes(fp, read_size, "array data") array[i:i+read_count] = numpy.frombuffer(data, dtype=dtype, count=read_count) if fortran_order: array.shape = shape[::-1] array = array.transpose() else: array.shape = shape return array def open_memmap(filename, mode='r+', dtype=None, shape=None, fortran_order=False, version=None): """ Open a .npy file as a memory-mapped array. This may be used to read an existing file or create a new one. Parameters ---------- filename : str or path-like The name of the file on disk. This may *not* be a file-like object. mode : str, optional The mode in which to open the file; the default is 'r+'. In addition to the standard file modes, 'c' is also accepted to mean "copy on write." See `memmap` for the available mode strings. dtype : data-type, optional The data type of the array if we are creating a new file in "write" mode, if not, `dtype` is ignored. The default value is None, which results in a data-type of `float64`. shape : tuple of int The shape of the array if we are creating a new file in "write" mode, in which case this parameter is required. Otherwise, this parameter is ignored and is thus optional. fortran_order : bool, optional Whether the array should be Fortran-contiguous (True) or C-contiguous (False, the default) if we are creating a new file in "write" mode. version : tuple of int (major, minor) or None If the mode is a "write" mode, then this is the version of the file format used to create the file. None means use the oldest supported version that is able to store the data. Default: None Returns ------- marray : memmap The memory-mapped array. Raises ------ ValueError If the data or the mode is invalid. IOError If the file is not found or cannot be opened correctly. See Also -------- numpy.memmap """ if isfileobj(filename): raise ValueError("Filename must be a string or a path-like object." " Memmap cannot use existing file handles.") if 'w' in mode: # We are creating the file, not reading it. # Check if we ought to create the file. _check_version(version) # Ensure that the given dtype is an authentic dtype object rather # than just something that can be interpreted as a dtype object. dtype = numpy.dtype(dtype) if dtype.hasobject: msg = "Array can't be memory-mapped: Python objects in dtype." raise ValueError(msg) d = dict( descr=dtype_to_descr(dtype), fortran_order=fortran_order, shape=shape, ) # If we got here, then it should be safe to create the file. with open(os_fspath(filename), mode+'b') as fp: _write_array_header(fp, d, version) offset = fp.tell() else: # Read the header of the file first. with open(os_fspath(filename), 'rb') as fp: version = read_magic(fp) _check_version(version) shape, fortran_order, dtype = _read_array_header(fp, version) if dtype.hasobject: msg = "Array can't be memory-mapped: Python objects in dtype." raise ValueError(msg) offset = fp.tell() if fortran_order: order = 'F' else: order = 'C' # We need to change a write-only mode to a read-write mode since we've # already written data to the file. if mode == 'w+': mode = 'r+' marray = numpy.memmap(filename, dtype=dtype, shape=shape, order=order, mode=mode, offset=offset) return marray def _read_bytes(fp, size, error_template="ran out of data"): """ Read from file-like object until size bytes are read. Raises ValueError if not EOF is encountered before size bytes are read. Non-blocking objects only supported if they derive from io objects. Required as e.g. ZipExtFile in python 2.6 can return less data than requested. """ data = bytes() while True: # io files (default in python3) return None or raise on # would-block, python2 file will truncate, probably nothing can be # done about that. note that regular files can't be non-blocking try: r = fp.read(size - len(data)) data += r if len(r) == 0 or len(data) == size: break except io.BlockingIOError: pass if len(data) != size: msg = "EOF: reading %s, expected %d bytes got %d" raise ValueError(msg % (error_template, size, len(data))) else: return data
34.21654
87
0.641437
6ac6979dc72c67c44ef423ebf8b3a34cc0b6d4cc
539
py
Python
gva/data/validator/is_valid_enum.py
gva-jjoyce/gva_data
cda990d0abb4b175025aaf16e75192bd9cc213af
[ "Apache-2.0" ]
null
null
null
gva/data/validator/is_valid_enum.py
gva-jjoyce/gva_data
cda990d0abb4b175025aaf16e75192bd9cc213af
[ "Apache-2.0" ]
24
2020-12-24T12:21:42.000Z
2021-01-28T14:22:38.000Z
gva/data/validator/is_valid_enum.py
gva-jjoyce/gva_data
cda990d0abb4b175025aaf16e75192bd9cc213af
[ "Apache-2.0" ]
null
null
null
""" Enumerator Test """ from typing import Any
22.458333
56
0.569573
6ac7d878414c23d75e260d1c447ced1efb264340
2,420
py
Python
events_page/app.py
los-verdes/lv-event-pagenerator
88416b626ff2dca6e2d71fa60bff4823954b3131
[ "MIT" ]
null
null
null
events_page/app.py
los-verdes/lv-event-pagenerator
88416b626ff2dca6e2d71fa60bff4823954b3131
[ "MIT" ]
7
2022-01-16T15:36:40.000Z
2022-01-25T22:02:12.000Z
events_page/app.py
los-verdes/lv-event-pagenerator
88416b626ff2dca6e2d71fa60bff4823954b3131
[ "MIT" ]
null
null
null
#!/usr/bin/env python from zoneinfo import ZoneInfo import flask from dateutil.parser import parse from flask_assets import Bundle, Environment from logzero import logger, setup_logger from webassets.filter import get_filter from config import cfg from apis import calendar as gcal setup_logger(name=__name__) app = flask.Flask(__name__) libsass = get_filter( "libsass", as_output=True, style="compressed", ) assets = Environment(app) # create an Environment instance bundles = { # define nested Bundle "style": Bundle( "scss/*.scss", filters=(libsass), output="style.css", ) } assets.register(bundles) def get_base_url(): if prefix := cfg.gcs_bucket_prefix: return f"https://{cfg.hostname}/{prefix}" return f"https://{cfg.hostname}" def create_app(): cfg.load() # TODO: do this default settings thing better? default_app_config = dict( display_timezone=cfg.display_timezone, FREEZER_BASE_URL=get_base_url(), FREEZER_STATIC_IGNORE=["*.scss", ".webassets-cache/*", ".DS_Store"], FREEZER_RELATIVE_URLS=False, FREEZER_REMOVE_EXTRA_FILES=True, ) logger.info(f"create_app() => {default_app_config=}") app.config.update(default_app_config) return app if __name__ == "__main__": app = create_app() app.run( host="0.0.0.0", debug=True, )
24.444444
87
0.648347
6ac8631a21e5b850ca9b81ac1543a63108f70e71
6,090
py
Python
bin/focus_scan.py
desihub/desicmx
6f7c9a3cff25c970af57de20e3a12001382deb23
[ "BSD-3-Clause" ]
3
2019-11-15T23:17:23.000Z
2019-11-27T17:19:33.000Z
bin/focus_scan.py
desihub/desicmx
6f7c9a3cff25c970af57de20e3a12001382deb23
[ "BSD-3-Clause" ]
4
2019-12-12T03:37:32.000Z
2020-01-28T21:29:51.000Z
bin/focus_scan.py
desihub/desicmx
6f7c9a3cff25c970af57de20e3a12001382deb23
[ "BSD-3-Clause" ]
2
2019-12-20T08:21:52.000Z
2020-06-30T15:21:53.000Z
#!/usr/bin/env python import astropy.io.fits as fits import numpy as np import os import matplotlib.pyplot as plt import argparse if __name__ == "__main__": descr = 'GFA focus sequence plots/analysis' parser = argparse.ArgumentParser(description=descr) parser.add_argument('first_expid', type=int, nargs=1) parser.add_argument('night', type=str, nargs=1) parser.add_argument('--basedir', default='/n/home/datasystems/users/ameisner/reduced/focus', type=str, help='base directory for GFA reductions') parser.add_argument('--outdir', default='/n/home/desiobserver/focus_scan_pngs', type=str, help='output directory for plot PNGs') parser.add_argument('--no_popups', default=False, action='store_true', help='write PNGs without popping up plot windows') args = parser.parse_args() expids = args.first_expid + np.arange(16, dtype=int) print(expids) print(args.night[0]) print(args.basedir) outdir = args.outdir if os.path.exists(args.outdir) else '.' focus_plots(args.night[0], expids, basedir=args.basedir, outdir=outdir, no_popups=args.no_popups)
35.614035
129
0.612315
6ac89300a5b9e4ad6f97864631998446abb69eb0
313
py
Python
proto_3/ddq/topics/logics/topic.py
jadnohra/connect
8eb21e6f122898094447bc3d5edb3053d5a2adf2
[ "Unlicense" ]
null
null
null
proto_3/ddq/topics/logics/topic.py
jadnohra/connect
8eb21e6f122898094447bc3d5edb3053d5a2adf2
[ "Unlicense" ]
6
2021-03-19T12:06:56.000Z
2022-03-12T00:23:09.000Z
proto_3/ddq/topics/logics/topic.py
jadnohra/connect
8eb21e6f122898094447bc3d5edb3053d5a2adf2
[ "Unlicense" ]
null
null
null
from typing import List from ddq.taxonomy.reference import Reference from ddq.topics.topic import Topic
26.083333
59
0.610224
6ac951af97aa3d1a0ef9e931276c0e45ff2d14cc
4,344
py
Python
pythia/utils/logger.py
abhiskk/pythia
c33fb45d74353c25b6269b44551bcafefecb5c7e
[ "BSD-3-Clause" ]
2
2019-05-23T02:07:03.000Z
2019-06-08T18:56:05.000Z
pythia/utils/logger.py
abhiskk/pythia
c33fb45d74353c25b6269b44551bcafefecb5c7e
[ "BSD-3-Clause" ]
null
null
null
pythia/utils/logger.py
abhiskk/pythia
c33fb45d74353c25b6269b44551bcafefecb5c7e
[ "BSD-3-Clause" ]
null
null
null
# Copyright (c) Facebook, Inc. and its affiliates. import base64 import logging import os import sys from tensorboardX import SummaryWriter from pythia.utils.distributed_utils import is_main_process from pythia.utils.general import (ckpt_name_from_core_args, foldername_from_config_override) from pythia.utils.timer import Timer
34.204724
81
0.63628
6ac9be98a456dcdce40e3c4f391cc313ab62f054
13,522
py
Python
sdk/python/pulumi_google_native/healthcare/v1beta1/user_data_mapping.py
AaronFriel/pulumi-google-native
75d1cda425e33d4610348972cd70bddf35f1770d
[ "Apache-2.0" ]
44
2021-04-18T23:00:48.000Z
2022-02-14T17:43:15.000Z
sdk/python/pulumi_google_native/healthcare/v1beta1/user_data_mapping.py
AaronFriel/pulumi-google-native
75d1cda425e33d4610348972cd70bddf35f1770d
[ "Apache-2.0" ]
354
2021-04-16T16:48:39.000Z
2022-03-31T17:16:39.000Z
sdk/python/pulumi_google_native/healthcare/v1beta1/user_data_mapping.py
AaronFriel/pulumi-google-native
75d1cda425e33d4610348972cd70bddf35f1770d
[ "Apache-2.0" ]
8
2021-04-24T17:46:51.000Z
2022-01-05T10:40:21.000Z
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from . import outputs from ._inputs import * __all__ = ['UserDataMappingArgs', 'UserDataMapping'] def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, consent_store_id: Optional[pulumi.Input[str]] = None, data_id: Optional[pulumi.Input[str]] = None, dataset_id: Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, resource_attributes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AttributeArgs']]]]] = None, user_id: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = UserDataMappingArgs.__new__(UserDataMappingArgs) if consent_store_id is None and not opts.urn: raise TypeError("Missing required property 'consent_store_id'") __props__.__dict__["consent_store_id"] = consent_store_id if data_id is None and not opts.urn: raise TypeError("Missing required property 'data_id'") __props__.__dict__["data_id"] = data_id if dataset_id is None and not opts.urn: raise TypeError("Missing required property 'dataset_id'") __props__.__dict__["dataset_id"] = dataset_id __props__.__dict__["location"] = location __props__.__dict__["name"] = name __props__.__dict__["project"] = project __props__.__dict__["resource_attributes"] = resource_attributes if user_id is None and not opts.urn: raise TypeError("Missing required property 'user_id'") __props__.__dict__["user_id"] = user_id __props__.__dict__["archive_time"] = None __props__.__dict__["archived"] = None super(UserDataMapping, __self__).__init__( 'google-native:healthcare/v1beta1:UserDataMapping', resource_name, __props__, opts)
45.837288
400
0.654859
6aca7a5f520c3a19c81c989f925529d891ca4d67
661
py
Python
_doc/sphinxdoc/source/conf.py
Jerome-maker/ensae_teaching_cs
43ea044361ee60c00c85aea354a7b25c21c0fd07
[ "MIT" ]
null
null
null
_doc/sphinxdoc/source/conf.py
Jerome-maker/ensae_teaching_cs
43ea044361ee60c00c85aea354a7b25c21c0fd07
[ "MIT" ]
null
null
null
_doc/sphinxdoc/source/conf.py
Jerome-maker/ensae_teaching_cs
43ea044361ee60c00c85aea354a7b25c21c0fd07
[ "MIT" ]
null
null
null
import sys import os import sphinx_rtd_theme source_path = os.path.normpath( os.path.join( os.path.abspath( os.path.split(__file__)[0]))) try: from conf_base import * except ImportError: sys.path.append(source_path) from conf_base import * html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] templates_path = [os.path.join(source_path, 'phdoc_static')] html_static_path = [os.path.join(source_path, 'phdoc_static')] if not os.path.exists(templates_path[0]): raise FileNotFoundError(templates_path[0]) blog_root = "http://www.xavierdupre.fr/app/ensae_teaching_cs/helpsphinx3/"
25.423077
74
0.741301
6acaa1d753027b9aa151aee9ced96ca2d15567b0
2,247
py
Python
dolfyn/adv/api.py
lkilcher/dolfyn-light
416bf6aa8a3455cebf973f416c9e4ba89a801a71
[ "Apache-2.0" ]
null
null
null
dolfyn/adv/api.py
lkilcher/dolfyn-light
416bf6aa8a3455cebf973f416c9e4ba89a801a71
[ "Apache-2.0" ]
null
null
null
dolfyn/adv/api.py
lkilcher/dolfyn-light
416bf6aa8a3455cebf973f416c9e4ba89a801a71
[ "Apache-2.0" ]
null
null
null
""" This module contains routines for reading and working with adv data. It contains: +-----------------------------------+-----------------------------------------+ | Name | Description | +===================================+=========================================+ | :func:`~dolfyn.adv.base.load` | A function for loading ADV data in | | | DOLfYN format. | +-----------------------------------+-----------------------------------------+ | :func:`~dolfyn.adv.base.mmload` | A function for loading ADV data in | | | DOLfYN format (as memory mapped arrays).| +-----------------------------------+-----------------------------------------+ | :func:`~dolfyn.io.nortek.\ | A function for reading Nortek Vector | | read_nortek` | files. | +-----------------------------------+-----------------------------------------+ | :mod:`rotate <dolfyn.adv.rotate>` | A module containing classes and | | | functions for rotating adv data between | | | different coordinate systems | +-----------------------------------+-----------------------------------------+ | :mod:`motion <dolfyn.adv.rotate>` | A module containing classes and | | | functions for performing motion | | | correction. | +-----------------------------------+-----------------------------------------+ | :class:`~dolfyn.\ | A class for breaking ADV data into | | adv.turbulence.TurbBinner` | 'bins', averaging it and estimating | | | various turbulence statistics. | +-----------------------------------+-----------------------------------------+ Examples -------- .. literalinclude:: ../examples/adv_example01.py """ from .base import load, mmload from .turbulence import TurbBinner from . import clean from ..io.nortek import read_nortek from . import rotate from . import motion
51.068182
79
0.348465
6acb7ed968b97603aa5b744b910e0997b0f3f62d
561
py
Python
server/api/migrations/0002_auto_20201011_1053.py
ShahriarDhruvo/WebTech_Assignment2
845d198a91b1dcc8ed149362499754167fca419d
[ "MIT" ]
null
null
null
server/api/migrations/0002_auto_20201011_1053.py
ShahriarDhruvo/WebTech_Assignment2
845d198a91b1dcc8ed149362499754167fca419d
[ "MIT" ]
null
null
null
server/api/migrations/0002_auto_20201011_1053.py
ShahriarDhruvo/WebTech_Assignment2
845d198a91b1dcc8ed149362499754167fca419d
[ "MIT" ]
null
null
null
# Generated by Django 3.1.2 on 2020-10-11 10:53 from django.db import migrations, models
23.375
72
0.57041
6acc395ad3bfafbc612c2d532d32bbb5ce80e13f
4,123
py
Python
flink-ai-flow/lib/notification_service/notification_service/mongo_event_storage.py
lisy09/flink-ai-extended
011a5a332f7641f66086653e715d0596eab2e107
[ "Apache-2.0", "BSD-2-Clause", "MIT", "ECL-2.0", "BSD-3-Clause" ]
1
2021-08-06T04:24:36.000Z
2021-08-06T04:24:36.000Z
flink-ai-flow/lib/notification_service/notification_service/mongo_event_storage.py
sentimentist/flink-ai-extended
689d000f2db8919fd80e0725a1609918ca4a26f4
[ "Apache-2.0", "BSD-2-Clause", "MIT", "ECL-2.0", "BSD-3-Clause" ]
null
null
null
flink-ai-flow/lib/notification_service/notification_service/mongo_event_storage.py
sentimentist/flink-ai-extended
689d000f2db8919fd80e0725a1609918ca4a26f4
[ "Apache-2.0", "BSD-2-Clause", "MIT", "ECL-2.0", "BSD-3-Clause" ]
1
2021-05-20T02:17:11.000Z
2021-05-20T02:17:11.000Z
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import time import socket from collections import Iterable from typing import Union, Tuple from mongoengine import connect from notification_service.event_storage import BaseEventStorage from notification_service.base_notification import BaseEvent from notification_service.mongo_notification import MongoEvent
38.175926
97
0.64031
6acc7db3216417c3207f16b6723988768ff50b66
711
py
Python
src/unicon/plugins/confd/csp/__init__.py
tahigash/unicon.plugins
1b43a5a61244ea9312387fd855442ace37c65db9
[ "Apache-2.0" ]
1
2021-02-25T19:36:56.000Z
2021-02-25T19:36:56.000Z
src/unicon/plugins/confd/csp/__init__.py
tahigash/unicon.plugins
1b43a5a61244ea9312387fd855442ace37c65db9
[ "Apache-2.0" ]
null
null
null
src/unicon/plugins/confd/csp/__init__.py
tahigash/unicon.plugins
1b43a5a61244ea9312387fd855442ace37c65db9
[ "Apache-2.0" ]
null
null
null
__author__ = "Dave Wapstra <[email protected]>" from unicon.plugins.confd import ConfdServiceList, ConfdConnection, ConfdConnectionProvider from .statemachine import CspStateMachine from .settings import CspSettings from . import service_implementation as csp_svc
28.44
91
0.759494
6accba984dd52f022ed6544e1f7ad42db7180437
665
py
Python
setup.py
rrwen/search_google
e647868ba5da2803e787a3c06b32e09452068736
[ "MIT" ]
15
2017-08-24T18:44:55.000Z
2021-02-01T22:07:53.000Z
setup.py
rrwen/search_google
e647868ba5da2803e787a3c06b32e09452068736
[ "MIT" ]
5
2017-09-05T12:25:09.000Z
2021-10-18T06:45:24.000Z
setup.py
rrwen/search_google
e647868ba5da2803e787a3c06b32e09452068736
[ "MIT" ]
1
2018-02-20T13:44:44.000Z
2018-02-20T13:44:44.000Z
# -*- coding: utf-8 -*- from setuptools import setup import search_google as package setup( name=package.__name__, version=package.__version__, description=package.__description__, long_description=readme(), author=package.__author__, author_email=package.__email__, license=package.__license__, url=package.__url__, download_url=package.__download_url__, keywords =package. __keywords__, entry_points=package.__entry_points__, packages=package.__packages__, package_data=package.__package_data__, install_requires=package.__install_requires__ )
24.62963
47
0.771429
6acd5e71b7f337a2cb3ca947d7cf6d05f0a0b474
851
py
Python
setup.py
chearon/macpack
1cf6ce453dd33a811343e4bb6ee5575bc9fe919d
[ "MIT" ]
24
2016-11-14T14:09:57.000Z
2022-01-26T02:22:45.000Z
setup.py
najiji/macpack
20b518e9bc0f4e58d47c5416a686a4b246a3764d
[ "MIT" ]
5
2016-11-14T14:09:53.000Z
2019-04-18T15:49:14.000Z
setup.py
najiji/macpack
20b518e9bc0f4e58d47c5416a686a4b246a3764d
[ "MIT" ]
3
2018-01-27T15:38:46.000Z
2019-04-09T16:21:23.000Z
import setuptools import os try: import pypandoc description = pypandoc.convert('README.md', 'rst') if os.path.exists('README.md') else '' except ImportError: description = '' setuptools.setup( name = 'macpack', packages = setuptools.find_packages(), version = '1.0.3', description = 'Makes a macOS binary redistributable by searching the dependency tree and copying/patching non-system libraries.', long_description = description, author = 'Caleb Hearon', author_email = '[email protected]', url = 'https://github.com/chearon/macpack', download_url = 'https://github.com/chearon/macpack/tarball/v1.0.3', keywords = ['macos', 'bundle', 'package', 'redistribute', 'redistributable', 'install_name_tool', 'otool', 'mach'], classifiers = [], entry_points = { 'console_scripts': ['macpack=macpack.patcher:main'], } )
32.730769
131
0.706228
6acdf3a0dc36f1ce88eb6431d38ef46ea81f633b
1,371
py
Python
WEEK2/day5/scripts/06_NB_Challenges_Isolines.py
tizon9804/SS2017
7cb374ad21cdfeeef223ac4a65cbbf40dab22e06
[ "MIT" ]
null
null
null
WEEK2/day5/scripts/06_NB_Challenges_Isolines.py
tizon9804/SS2017
7cb374ad21cdfeeef223ac4a65cbbf40dab22e06
[ "MIT" ]
null
null
null
WEEK2/day5/scripts/06_NB_Challenges_Isolines.py
tizon9804/SS2017
7cb374ad21cdfeeef223ac4a65cbbf40dab22e06
[ "MIT" ]
null
null
null
import vtk # Read the file (to test that it was written correctly) reader = vtk.vtkXMLImageDataReader() reader.SetFileName("../data/wind_image.vti") reader.Update() print(reader.GetOutput()) # Convert the image to a polydata imageDataGeometryFilter = vtk.vtkImageDataGeometryFilter() imageDataGeometryFilter.SetInputConnection(reader.GetOutputPort()) imageDataGeometryFilter.Update() scalarRange = reader.GetOutput().GetPointData().GetScalars().GetRange(-1) contoursFilter = vtk.vtkContourFilter() contoursFilter.SetInputConnection(imageDataGeometryFilter.GetOutputPort()) contoursFilter.GenerateValues(60, scalarRange) contoursMapper = vtk.vtkPolyDataMapper() contoursMapper.SetInputConnection(contoursFilter.GetOutputPort()) contoursMapper.SetColorModeToMapScalars() contoursMapper.ScalarVisibilityOn() contoursMapper.SelectColorArray("JPEGImage") contoursMapper.SetScalarRange(scalarRange) contoursActor = vtk.vtkActor() contoursActor.SetMapper(contoursMapper) actor = vtk.vtkActor() actor.SetMapper(contoursMapper) # Setup rendering renderer = vtk.vtkRenderer() renderer.AddActor(actor) renderer.SetBackground(1,1,1) renderer.ResetCamera() renderWindow = vtk.vtkRenderWindow() renderWindow.AddRenderer(renderer) renderWindowInteractor = vtk.vtkRenderWindowInteractor() renderWindowInteractor.SetRenderWindow(renderWindow) renderWindowInteractor.Start()
30.466667
74
0.835157
6ace6e18f6860e091c836de50634b3a607e70811
11,303
py
Python
mbbl_envs/mbbl/env/gym_env/invertedPendulum.py
hbutsuak95/iv_rl
0f72a8f077a238237027ea96b7d1160c35ac9959
[ "MIT" ]
9
2022-01-16T11:27:00.000Z
2022-03-13T14:04:48.000Z
mbbl_envs/mbbl/env/gym_env/invertedPendulum.py
hbutsuak95/iv_rl
0f72a8f077a238237027ea96b7d1160c35ac9959
[ "MIT" ]
null
null
null
mbbl_envs/mbbl/env/gym_env/invertedPendulum.py
hbutsuak95/iv_rl
0f72a8f077a238237027ea96b7d1160c35ac9959
[ "MIT" ]
null
null
null
""" # ----------------------------------------------------------------------------- # @brief: # Tingwu: reset the reward function so that it's more similar to the one # defined in GYM # ----------------------------------------------------------------------------- """ import numpy as np from mbbl.config import init_path from mbbl.env import base_env_wrapper as bew from mbbl.env import env_register from mbbl.env import env_util from mbbl.util.common import logger if __name__ == '__main__': # test_env_name = ['gym_doublePendulum'] test_env_name = ['gym_invertedPendulum'] for env_name in test_env_name: test_env = env(env_name, 1234, {}) api_env = env(env_name, 1234, {}) api_env.reset() ob, reward, _, _ = test_env.reset() for _ in range(100): action = np.random.uniform(-1, 1, test_env._env.action_space.shape) new_ob, reward, _, _ = test_env.step(action) # test the reward api reward_from_api = \ api_env.reward({'start_state': ob, 'action': action}) reward_error = np.sum(np.abs(reward_from_api - reward)) # test the dynamics api newob_from_api = \ api_env.fdynamics({'start_state': ob, 'action': action}) ob_error = np.sum(np.abs(newob_from_api - new_ob)) ob = new_ob print('reward error: {}, dynamics error: {}'.format( reward_error, ob_error) )
35.656151
79
0.529594
6ad075ac74b446b2102501aae9ff9190489a4090
270
py
Python
ACME/visdom/__init__.py
mauriziokovacic/ACME
2615b66dd4addfd5c03d9d91a24c7da414294308
[ "MIT" ]
3
2019-10-23T23:10:55.000Z
2021-09-01T07:30:14.000Z
ACME/visdom/__init__.py
mauriziokovacic/ACME-Python
2615b66dd4addfd5c03d9d91a24c7da414294308
[ "MIT" ]
null
null
null
ACME/visdom/__init__.py
mauriziokovacic/ACME-Python
2615b66dd4addfd5c03d9d91a24c7da414294308
[ "MIT" ]
1
2020-07-11T11:35:43.000Z
2020-07-11T11:35:43.000Z
from .bar import * from .create_session import * from .image import * from .line import * from .mesh import * from .pie import * from .text import * from .VisdomFigure import * from .VisdomScene import *
27
29
0.555556
6ad0bc72be93fcbf7c2b0d3f4185b26d3bfb3b1c
1,426
py
Python
web/pingpongpiweb.py
andrewdyersmith/pingpongpi
63e969468da24b2d00e86033dfcb22de75f264bc
[ "MIT" ]
null
null
null
web/pingpongpiweb.py
andrewdyersmith/pingpongpi
63e969468da24b2d00e86033dfcb22de75f264bc
[ "MIT" ]
null
null
null
web/pingpongpiweb.py
andrewdyersmith/pingpongpi
63e969468da24b2d00e86033dfcb22de75f264bc
[ "MIT" ]
null
null
null
# Ping Pong Pi web UI running on flask. # Uses zmq to speak to daemon controlling screen. from flask import Flask, render_template, appcontext_tearing_down, request from multiprocessing import Process, Queue from multiprocessing.connection import Client import atexit import time import zmq app = Flask(__name__) MODE="mode" message_queue = Queue() message_process = None atexit.register(stop_message_loop) if __name__ == '__main__': app.run(debug=True, host='0.0.0.0')
22.634921
74
0.704067
6ad190f41233de2c7f9d3aa69edc83f906187598
5,171
py
Python
watcher/tests/decision_engine/strategy/strategies/test_base.py
ajaytikoo/watcher
6dbac1f6ae7f3e10dfdcef5721fa4af7af54e159
[ "Apache-2.0" ]
64
2015-10-18T02:57:24.000Z
2022-01-13T11:27:51.000Z
watcher/tests/decision_engine/strategy/strategies/test_base.py
ajaytikoo/watcher
6dbac1f6ae7f3e10dfdcef5721fa4af7af54e159
[ "Apache-2.0" ]
null
null
null
watcher/tests/decision_engine/strategy/strategies/test_base.py
ajaytikoo/watcher
6dbac1f6ae7f3e10dfdcef5721fa4af7af54e159
[ "Apache-2.0" ]
35
2015-12-25T13:53:21.000Z
2021-07-19T15:50:16.000Z
# -*- encoding: utf-8 -*- # Copyright (c) 2019 European Organization for Nuclear Research (CERN) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from watcher.common import exception from watcher.decision_engine.datasources import manager from watcher.decision_engine.model import model_root from watcher.decision_engine.strategy import strategies from watcher.tests import base from watcher.tests.decision_engine.model import faker_cluster_state
36.673759
76
0.705473
6ad2141e919181f75e53ccffa43344d1aae6eea7
346
py
Python
main.py
BenG49/sudoku
e4b14655e23d04c161feb16ceb1338537f519bdb
[ "MIT" ]
null
null
null
main.py
BenG49/sudoku
e4b14655e23d04c161feb16ceb1338537f519bdb
[ "MIT" ]
null
null
null
main.py
BenG49/sudoku
e4b14655e23d04c161feb16ceb1338537f519bdb
[ "MIT" ]
null
null
null
from sudoku import Sudoku if __name__ == '__main__': main()
12.814815
26
0.297688
6ad243bce2bf880a6b70228da5819c87e92c557b
776
py
Python
test/test_sampler.py
pfnet-research/autogbt-alt
57f7ae1bce2923d11f73c3631e34be49c7dd25da
[ "MIT" ]
83
2019-04-01T05:45:37.000Z
2021-04-13T02:33:04.000Z
test/test_sampler.py
pfnet-research/autogbt-alt
57f7ae1bce2923d11f73c3631e34be49c7dd25da
[ "MIT" ]
null
null
null
test/test_sampler.py
pfnet-research/autogbt-alt
57f7ae1bce2923d11f73c3631e34be49c7dd25da
[ "MIT" ]
10
2019-04-15T03:15:42.000Z
2020-03-30T11:52:12.000Z
import numpy as np import pandas as pd from autogbt.sampler import MajorityUnderSampler
23.515152
73
0.640464
6ad3007b95e5d17415b05151d343ee3326e45e1d
2,157
py
Python
experiment/diabetes/accuracy_info.py
leandro-santiago/bloomwisard
4c02610c4ef2d2cf8424797c8a815da182ca2383
[ "MIT" ]
2
2020-10-25T17:01:10.000Z
2020-12-04T14:26:26.000Z
experiment/diabetes/accuracy_info.py
leandro-santiago/bloomwisard
4c02610c4ef2d2cf8424797c8a815da182ca2383
[ "MIT" ]
null
null
null
experiment/diabetes/accuracy_info.py
leandro-santiago/bloomwisard
4c02610c4ef2d2cf8424797c8a815da182ca2383
[ "MIT" ]
null
null
null
import numpy as np import sys from timeit import default_timer as timer sys.path.append("../../") from core import wnn from encoding import thermometer from encoding import util #Load Diabetes data base_path = "../../dataset/diabetes/" #2/3 Test bits_encoding = 20 train_data, train_label, test_data, test_label, data_min, data_max = util.load_3data(base_path) ths = [] for i in range(len(data_max)): ths.append(thermometer.Thermometer(data_min[i], data_max[i], bits_encoding)) train_bin = [] test_bin = [] i = 0 for data in train_data: train_bin.append(np.array([], dtype=bool)) t = 0 for v in data: binarr = ths[t].binarize(v) train_bin[i] = np.append(train_bin[i], binarr) t += 1 i += 1 i = 0 for data in test_data: test_bin.append(np.array([], dtype=bool)) t = 0 for v in data: binarr = ths[t].binarize(v) test_bin[i] = np.append(test_bin[i], binarr) t += 1 i += 1 #print test_label #Wisard num_classes = 2 tuple_list = [2, 4, 8, 14, 16, 18, 20, 22, 24, 26, 28, 30] acc_list = [] test_length = len(test_label) entry_size = len(train_bin[0]) #print entry_size for t in tuple_list: wisard = wnn.Wisard(entry_size, t, num_classes) wisard.train(train_bin, train_label) rank_result = wisard.rank(test_bin) num_hits = 0 for i in range(test_length): if rank_result[i] == test_label[i]: num_hits += 1 acc_list.append(float(num_hits)/float(test_length)) #Bloom Wisard btuple_list = [2, 4, 8, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 40, 56] bacc_list = [] #capacity = len(train_bin) capacity = 10 print capacity for t in btuple_list: bwisard = wnn.BloomWisard(entry_size, t, num_classes, capacity) bwisard.train(train_bin, train_label) rank_result = bwisard.rank(test_bin) num_hits = 0 for i in range(test_length): if rank_result[i] == test_label[i]: num_hits += 1 bacc_list.append(float(num_hits)/float(test_length)) print "Tuples=", tuple_list print "Wisard Accuracy=", acc_list print "Tuples=", btuple_list print "BloomWisard Accuracy=",bacc_list
23.445652
95
0.658785
6ad38265801ddbc75fcce3bfaba00694854f353b
690
py
Python
PyGame/pygame1/tutorial1/startercode.py
hoppfull/Legacy-Python
43f465bfdb76c91f2ac16aabb0783fdf5f459adb
[ "MIT" ]
null
null
null
PyGame/pygame1/tutorial1/startercode.py
hoppfull/Legacy-Python
43f465bfdb76c91f2ac16aabb0783fdf5f459adb
[ "MIT" ]
null
null
null
PyGame/pygame1/tutorial1/startercode.py
hoppfull/Legacy-Python
43f465bfdb76c91f2ac16aabb0783fdf5f459adb
[ "MIT" ]
null
null
null
from pygamehelper import * from pygame import * from pygame.locals import * from vec2d import * from random import uniform import numpy as np s = Starter() s.mainLoop(40)
22.258065
98
0.588406
6ad3e60ef95d7e5c040fd394c92201b95875defd
1,155
py
Python
main.py
thewhiteninja/twitch-recorder
815b571e22917daa906d054a8ab2fe794e99bb8a
[ "MIT" ]
null
null
null
main.py
thewhiteninja/twitch-recorder
815b571e22917daa906d054a8ab2fe794e99bb8a
[ "MIT" ]
null
null
null
main.py
thewhiteninja/twitch-recorder
815b571e22917daa906d054a8ab2fe794e99bb8a
[ "MIT" ]
null
null
null
import glob import os import sys import utils from recorder import StreamRec OUTDIR = "" if __name__ == '__main__': main()
20.625
91
0.565368
6ad40da9c9320f7c8df4a83d064f6172f24c03ec
2,268
py
Python
karbor-1.3.0/karbor/policies/protectables.py
scottwedge/OpenStack-Stein
7077d1f602031dace92916f14e36b124f474de15
[ "Apache-2.0" ]
null
null
null
karbor-1.3.0/karbor/policies/protectables.py
scottwedge/OpenStack-Stein
7077d1f602031dace92916f14e36b124f474de15
[ "Apache-2.0" ]
5
2019-08-14T06:46:03.000Z
2021-12-13T20:01:25.000Z
karbor-1.3.0/karbor/policies/protectables.py
scottwedge/OpenStack-Stein
7077d1f602031dace92916f14e36b124f474de15
[ "Apache-2.0" ]
2
2020-03-15T01:24:15.000Z
2020-07-22T20:34:26.000Z
# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from karbor.policies import base GET_POLICY = 'protectable:get' GET_ALL_POLICY = 'protectable:get_all' INSTANCES_GET_POLICY = 'protectable:instance_get' INSTANCES_GET_ALL_POLICY = 'protectable:instance_get_all' protectables_policies = [ policy.DocumentedRuleDefault( name=GET_POLICY, check_str=base.RULE_ADMIN_OR_OWNER, description='Show a protectable type.', operations=[ { 'method': 'GET', 'path': '/protectables/{protectable_type}' } ]), policy.DocumentedRuleDefault( name=GET_ALL_POLICY, check_str=base.RULE_ADMIN_OR_OWNER, description='List protectable types.', operations=[ { 'method': 'GET', 'path': '/protectables' } ]), policy.DocumentedRuleDefault( name=INSTANCES_GET_POLICY, check_str=base.RULE_ADMIN_OR_OWNER, description='Show a protectable instance.', operations=[ { 'method': 'GET', 'path': '/protectables/{protectable_type}/' 'instances/{resource_id}' } ]), policy.DocumentedRuleDefault( name=INSTANCES_GET_ALL_POLICY, check_str=base.RULE_ADMIN_OR_OWNER, description='List protectable instances.', operations=[ { 'method': 'GET', 'path': '/protectables/{protectable_type}/instances' } ]), ]
31.068493
78
0.619489
6ad42755c1900a89dea96e604b75bea9a5c3b32c
9,507
py
Python
router.example.py
unyo/uhpackage
07d0263c586e5daa0012c3ff82754be381850911
[ "BSD-3-Clause" ]
null
null
null
router.example.py
unyo/uhpackage
07d0263c586e5daa0012c3ff82754be381850911
[ "BSD-3-Clause" ]
null
null
null
router.example.py
unyo/uhpackage
07d0263c586e5daa0012c3ff82754be381850911
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/python # -*- coding: utf-8 -*- # routers are dictionaries of URL routing parameters. # # For each request, the effective router is: # the built-in default base router (shown below), # updated by the BASE router in routes.py routers, # updated by the app-specific router in routes.py routers (if any), # updated by the app-specific router from applcations/app/routes.py routers (if any) # # # Router members: # # default_application: default application name # applications: list of all recognized applications, or 'ALL' to use all currently installed applications # Names in applications are always treated as an application names when they appear first in an incoming URL. # Set applications to None to disable the removal of application names from outgoing URLs. # domains: optional dict mapping domain names to application names # The domain name can include a port number: domain.com:8080 # The application name can include a controller: appx/ctlrx # path_prefix: a path fragment that is prefixed to all outgoing URLs and stripped from all incoming URLs # # Note: default_application, applications, domains & path_prefix are permitted only in the BASE router, # and domain makes sense only in an application-specific router. # The remaining members can appear in the BASE router (as defaults for all applications) # or in application-specific routers. # # default_controller: name of default controller # default_function: name of default function (all controllers) # controllers: list of valid controllers in selected app # or "DEFAULT" to use all controllers in the selected app plus 'static' # or None to disable controller-name removal. # Names in controllers are always treated as controller names when they appear in an incoming URL after # the (optional) application and language names. # languages: list of all supported languages # Names in controllers are always treated as language names when they appear in an incoming URL after # the (optional) application name. # default_language # The language code (for example: en, it-it) optionally appears in the URL following # the application (which may be omitted). For incoming URLs, the code is copied to # request.language; for outgoing URLs it is taken from request.language. # If languages=None, language support is disabled. # The default_language, if any, is omitted from the URL. # root_static: list of static files accessed from root # (mapped to the current application's static/ directory) # Each application has its own root-static files. # domain: the domain that maps to this application (alternative to using domains in the BASE router) # map_hyphen: If True (default), hyphens in incoming /a/c/f fields are converted to underscores, # and back to hyphens in outgoing URLs. Language, args and the query string are not affected. # map_static: By default, the default application is not stripped from static URLs. Set map_static=True # to override this policy. # acfe_match: regex for valid application, controller, function, extension /a/c/f.e # file_match: regex for valid file (used for static file names) # args_match: regex for valid args # This validation provides a measure of security. # If it is changed, the application perform its own validation. # # # The built-in default router supplies default values (undefined members are None): # # default_router = dict( # default_application = 'init', # applications = 'ALL', # default_controller = 'default', # controllers = 'DEFAULT', # default_function = 'index', # default_language = None, # languages = None, # root_static = ['favicon.ico', 'robots.txt'], # domains = None, # map_hyphen = True, # acfe_match = r'\w+$', # legal app/ctlr/fcn/ext # file_match = r'(\w+[-=./]?)+$', # legal file (path) name # args_match = r'([\w@ -]+[=.]?)+$', # legal arg in args # ) # # See rewrite.map_url_in() and rewrite.map_url_out() for implementation details. # This simple router set overrides only the default application name, # but provides full rewrite functionality. routers = dict( # base router BASE = dict( default_application = 'welcome', ), # 'admin' application router admin = dict( controllers = [], # don't remove controller names from admin URLs map_hyphen = False, # don't map hyphens to underscores ), ) # Error-handling redirects all HTTP errors (status codes >= 400) to a specified # path. If you wish to use error-handling redirects, uncomment the tuple # below. You can customize responses by adding a tuple entry with the first # value in 'appName/HTTPstatusCode' format. ( Only HTTP codes >= 400 are # routed. ) and the value as a path to redirect the user to. You may also use # '*' as a wildcard. # # The error handling page is also passed the error code and ticket as # variables. Traceback information will be stored in the ticket. # # routes_onerror = [ # (r'init/400', r'/init/default/login') # ,(r'init/*', r'/init/static/fail.html') # ,(r'*/404', r'/init/static/cantfind.html') # ,(r'*/*', r'/init/error/index') # ] # specify action in charge of error handling # # error_handler = dict(application='error', # controller='default', # function='index') # In the event that the error-handling page itself returns an error, web2py will # fall back to its old static responses. You can customize them here. # ErrorMessageTicket takes a string format dictionary containing (only) the # "ticket" key. # error_message = '<html><body><h1>Invalid request</h1></body></html>' # error_message_ticket = '<html><body><h1>Internal error</h1>Ticket issued: <a href="/admin/default/ticket/%(ticket)s" target="_blank">%(ticket)s</a></body></html>' def __routes_doctest(): ''' Dummy function for doctesting routes.py. Use filter_url() to test incoming or outgoing routes; filter_err() for error redirection. filter_url() accepts overrides for method and remote host: filter_url(url, method='get', remote='0.0.0.0', out=False) filter_err() accepts overrides for application and ticket: filter_err(status, application='app', ticket='tkt') >>> import os >>> import gluon.main >>> from gluon.rewrite import load, filter_url, filter_err, get_effective_router >>> load(routes=os.path.basename(__file__)) >>> filter_url('http://domain.com/abc', app=True) 'welcome' >>> filter_url('http://domain.com/welcome', app=True) 'welcome' >>> os.path.relpath(filter_url('http://domain.com/favicon.ico')) 'applications/welcome/static/favicon.ico' >>> filter_url('http://domain.com/abc') '/welcome/default/abc' >>> filter_url('http://domain.com/index/abc') "/welcome/default/index ['abc']" >>> filter_url('http://domain.com/default/abc.css') '/welcome/default/abc.css' >>> filter_url('http://domain.com/default/index/abc') "/welcome/default/index ['abc']" >>> filter_url('http://domain.com/default/index/a bc') "/welcome/default/index ['a bc']" >>> filter_url('http://domain.com/admin/bad!ctl') Traceback (most recent call last): ... HTTP: 400 BAD REQUEST [invalid controller] >>> filter_url('http://domain.com/admin/ctl/bad!fcn') Traceback (most recent call last): ... HTTP: 400 BAD REQUEST [invalid function] >>> filter_url('http://domain.com/admin/ctl/fcn.bad!ext') Traceback (most recent call last): ... HTTP: 400 BAD REQUEST [invalid extension] >>> filter_url('http://domain.com/admin/ctl/fcn/bad!arg') Traceback (most recent call last): ... HTTP: 400 BAD REQUEST [invalid arg <bad!arg>] >>> filter_url('https://domain.com/app/ctr/fcn', out=True) '/app/ctr/fcn' >>> filter_url('https://domain.com/welcome/ctr/fcn', out=True) '/ctr/fcn' >>> filter_url('https://domain.com/welcome/default/fcn', out=True) '/fcn' >>> filter_url('https://domain.com/welcome/default/index', out=True) '/' >>> filter_url('https://domain.com/welcome/appadmin/index', out=True) '/appadmin' >>> filter_url('http://domain.com/welcome/default/fcn?query', out=True) '/fcn?query' >>> filter_url('http://domain.com/welcome/default/fcn#anchor', out=True) '/fcn#anchor' >>> filter_url('http://domain.com/welcome/default/fcn?query#anchor', out=True) '/fcn?query#anchor' >>> filter_url('http://domain.com/appadmin/fcn-1') '/welcome/appadmin/fcn_1' >>> filter_url('http://domain.com/welcome/appadmin/fcn_1', out=True) '/appadmin/fcn-1' >>> filter_url('http://domain.com/examples/appadmin/fcn-1') '/examples/appadmin/fcn_1' >>> filter_url('http://domain.com/examples/appadmin/fcn_1', out=True) '/examples/appadmin/fcn-1' >>> filter_url('http://domain.com/app/static/filename-with_underscore', out=True) '/app/static/filename-with_underscore' >>> os.path.relpath(filter_url('http://domain.com/admin/static/filename-with_underscore')) 'applications/admin/static/filename-with_underscore' >>> filter_err(200) 200 >>> filter_err(399) 399 >>> filter_err(400) 400 ''' pass if __name__ == '__main__': import doctest doctest.testmod()
42.632287
164
0.679289
6ad467fa9905c0ca84ad3c1dc298047956f35818
252
py
Python
notebooks/2018.11.09 Meeting.py
costrouc/uarray
c3c42147181a88265942ad5f9cf439467f746782
[ "BSD-3-Clause" ]
null
null
null
notebooks/2018.11.09 Meeting.py
costrouc/uarray
c3c42147181a88265942ad5f9cf439467f746782
[ "BSD-3-Clause" ]
null
null
null
notebooks/2018.11.09 Meeting.py
costrouc/uarray
c3c42147181a88265942ad5f9cf439467f746782
[ "BSD-3-Clause" ]
null
null
null
#%% from uarray.core import * #%% s = Scalar(Int(10)) #%% #%% register(Call(Always(w("a")), w("idx")), lambda a, idx: a) #%% a_ten = Always(s) #%% s = Sequence(Int(10), a_ten)
10.956522
58
0.559524
6ad4fd638f3c8440ee1f4046774d447aac8466fb
2,540
py
Python
var/spack/repos/builtin/packages/py-black/package.py
dwstreetNNL/spack
8f929707147c49606d00386a10161529dad4ec56
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
null
null
null
var/spack/repos/builtin/packages/py-black/package.py
dwstreetNNL/spack
8f929707147c49606d00386a10161529dad4ec56
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
null
null
null
var/spack/repos/builtin/packages/py-black/package.py
dwstreetNNL/spack
8f929707147c49606d00386a10161529dad4ec56
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
null
null
null
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import *
47.924528
96
0.663386
6ad54c23aea43b34f6b32a39f371e9919e5e2f64
3,772
py
Python
store/adminshop/templatetags/admin_extras.py
vallemrv/my_store_test
2da624fd02c5f1784464f15b751b488f3dd2bae6
[ "Apache-2.0" ]
null
null
null
store/adminshop/templatetags/admin_extras.py
vallemrv/my_store_test
2da624fd02c5f1784464f15b751b488f3dd2bae6
[ "Apache-2.0" ]
null
null
null
store/adminshop/templatetags/admin_extras.py
vallemrv/my_store_test
2da624fd02c5f1784464f15b751b488f3dd2bae6
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # @Author: Manuel Rodriguez <valle> # @Date: 27-Aug-2017 # @Email: [email protected] # @Filename: admin_extras.py # @Last modified by: valle # @Last modified time: 02-Feb-2018 # @License: Apache license vesion 2.0 from django import template from django.db.models import Q try: from django.core.urlresolvers import reverse except ImportError: from django.urls import reverse from adminshop.models import Testeo, Compras, Presupuesto import json import sys register = template.Library()
24.980132
72
0.656946
6ad59b00bcc766f57088e62e448110d102b95431
17,165
py
Python
doc/tutorial/using_gpu_solution_1.py
abdalazizrashid/Theano-PyMC
90fa750461e91fb6281d494ae86404e2153fd7eb
[ "BSD-3-Clause" ]
null
null
null
doc/tutorial/using_gpu_solution_1.py
abdalazizrashid/Theano-PyMC
90fa750461e91fb6281d494ae86404e2153fd7eb
[ "BSD-3-Clause" ]
null
null
null
doc/tutorial/using_gpu_solution_1.py
abdalazizrashid/Theano-PyMC
90fa750461e91fb6281d494ae86404e2153fd7eb
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python # Aesara tutorial # Solution to Exercise in section 'Using the GPU' # 1. Raw results import numpy as np import aesara import aesara.tensor as tt aesara.config.floatX = "float32" rng = np.random N = 400 feats = 784 D = ( rng.randn(N, feats).astype(aesara.config.floatX), rng.randint(size=N, low=0, high=2).astype(aesara.config.floatX), ) training_steps = 10000 # Declare Aesara symbolic variables x = aesara.shared(D[0], name="x") y = aesara.shared(D[1], name="y") w = aesara.shared(rng.randn(feats).astype(aesara.config.floatX), name="w") b = aesara.shared(np.asarray(0.0, dtype=aesara.config.floatX), name="b") x.tag.test_value = D[0] y.tag.test_value = D[1] # print "Initial model:" # print w.get_value(), b.get_value() # Construct Aesara expression graph p_1 = 1 / (1 + tt.exp(-tt.dot(x, w) - b)) # Probability of having a one prediction = p_1 > 0.5 # The prediction that is done: 0 or 1 xent = -y * tt.log(p_1) - (1 - y) * tt.log(1 - p_1) # Cross-entropy cost = tt.cast(xent.mean(), "float32") + 0.01 * (w ** 2).sum() # The cost to optimize gw, gb = tt.grad(cost, [w, b]) # Compile expressions to functions train = aesara.function( inputs=[], outputs=[prediction, xent], updates=[(w, w - 0.01 * gw), (b, b - 0.01 * gb)], name="train", ) predict = aesara.function(inputs=[], outputs=prediction, name="predict") if any( [ n.op.__class__.__name__ in ["Gemv", "CGemv", "Gemm", "CGemm"] for n in train.maker.fgraph.toposort() ] ): print("Used the cpu") elif any( [ n.op.__class__.__name__ in ["GpuGemm", "GpuGemv"] for n in train.maker.fgraph.toposort() ] ): print("Used the gpu") else: print("ERROR, not able to tell if aesara used the cpu or the gpu") print(train.maker.fgraph.toposort()) for i in range(training_steps): pred, err = train() # print "Final model:" # print w.get_value(), b.get_value() print("target values for D") print(D[1]) print("prediction on D") print(predict()) """ # 2. Profiling # 2.1 Profiling for CPU computations # In your terminal, type: $ THEANO_FLAGS=profile=True,device=cpu python using_gpu_solution_1.py # You'll see first the output of the script: Used the cpu target values for D prediction on D # Followed by the output of profiling.. You'll see profiling results for each function # in the script, followed by a summary for all functions. # We'll show here only the summary: Results were produced using an Intel(R) Core(TM) i7-5930K CPU @ 3.50GHz Function profiling ================== Message: Sum of all(2) printed profiles at exit excluding Scan op profile. Time in 10001 calls to Function.__call__: 1.300452e+00s Time in Function.fn.__call__: 1.215823e+00s (93.492%) Time in thunks: 1.157602e+00s (89.015%) Total compile time: 8.922548e-01s Number of Apply nodes: 17 Aesara Optimizer time: 6.270301e-01s Aesara validate time: 5.993605e-03s Aesara Linker time (includes C, CUDA code generation/compiling): 2.949309e-02s Import time 3.543139e-03s Time in all call to aesara.grad() 1.848292e-02s Time since aesara import 2.864s Class --- <% time> <sum %> <apply time> <time per call> <type> <#call> <#apply> <Class name> 64.5% 64.5% 0.747s 3.73e-05s C 20001 3 aesara.tensor.blas_c.CGemv 33.1% 97.7% 0.384s 4.79e-06s C 80001 9 aesara.tensor.elemwise.Elemwise 1.0% 98.6% 0.011s 1.14e-06s C 10000 1 aesara.tensor.elemwise.Sum 0.7% 99.4% 0.009s 2.85e-07s C 30001 4 aesara.tensor.elemwise.DimShuffle 0.3% 99.7% 0.004s 3.64e-07s C 10001 2 aesara.tensor.basic.AllocEmpty 0.3% 100.0% 0.004s 1.78e-07s C 20001 3 aesara.compile.ops.Shape_i ... (remaining 0 Classes account for 0.00%(0.00s) of the runtime) Ops --- <% time> <sum %> <apply time> <time per call> <type> <#call> <#apply> <Op name> 64.5% 64.5% 0.747s 3.73e-05s C 20001 3 CGemv{inplace} 18.7% 83.2% 0.217s 2.17e-05s C 10000 1 Elemwise{Composite{((i0 * scalar_softplus(i1)) - (i2 * i3 * scalar_softplus(i4)))}}[(0, 4)] 8.9% 92.1% 0.103s 1.03e-05s C 10000 1 Elemwise{Composite{(((scalar_sigmoid(i0) * i1 * i2) / i3) - ((scalar_sigmoid((-i0)) * i1 * i4) / i3))}}[(0, 0)] 4.3% 96.4% 0.050s 4.98e-06s C 10000 1 Elemwise{Composite{GT(scalar_sigmoid(i0), i1)}} 1.0% 97.4% 0.011s 1.14e-06s C 10000 1 Sum{acc_dtype=float64} 0.5% 97.9% 0.006s 2.83e-07s C 20001 3 InplaceDimShuffle{x} 0.4% 98.3% 0.004s 4.22e-07s C 10000 1 Elemwise{sub,no_inplace} 0.3% 98.6% 0.004s 3.70e-07s C 10000 1 Elemwise{neg,no_inplace} 0.3% 98.9% 0.004s 3.64e-07s C 10001 2 AllocEmpty{dtype='float32'} 0.3% 99.2% 0.004s 1.78e-07s C 20001 3 Shape_i{0} 0.2% 99.5% 0.003s 2.88e-07s C 10000 1 InplaceDimShuffle{1,0} 0.2% 99.7% 0.003s 2.65e-07s C 10000 1 Elemwise{Composite{((-i0) - i1)}}[(0, 0)] 0.2% 99.9% 0.002s 1.98e-07s C 10000 1 Elemwise{Cast{float32}} 0.1% 100.0% 0.002s 1.54e-07s C 10000 1 Elemwise{Composite{(i0 - (i1 * i2))}}[(0, 0)] 0.0% 100.0% 0.000s 4.77e-06s C 1 1 Elemwise{Composite{GT(scalar_sigmoid((-((-i0) - i1))), i2)}} ... (remaining 0 Ops account for 0.00%(0.00s) of the runtime) Apply ------ <% time> <sum %> <apply time> <time per call> <#call> <id> <Apply name> 34.0% 34.0% 0.394s 3.94e-05s 10000 7 CGemv{inplace}(AllocEmpty{dtype='float32'}.0, TensorConstant{1.0}, x, w, TensorConstant{0.0}) 30.5% 64.5% 0.353s 3.53e-05s 10000 15 CGemv{inplace}(w, TensorConstant{-0.00999999977648}, x.T, Elemwise{Composite{(((scalar_sigmoid(i0) * i1 * i2) / i3) - ((scalar_sigmoid((-i0)) * i1 * i4) / i3))}}[(0, 0)].0, TensorConstant{0.999800026417}) 18.7% 83.2% 0.217s 2.17e-05s 10000 12 Elemwise{Composite{((i0 * scalar_softplus(i1)) - (i2 * i3 * scalar_softplus(i4)))}}[(0, 4)](y, Elemwise{Composite{((-i0) - i1)}}[(0, 0)].0, TensorConstant{(1,) of -1.0}, Elemwise{sub,no_inplace}.0, Elemwise{neg,no_inplace}.0) 8.9% 92.1% 0.103s 1.03e-05s 10000 13 Elemwise{Composite{(((scalar_sigmoid(i0) * i1 * i2) / i3) - ((scalar_sigmoid((-i0)) * i1 * i4) / i3))}}[(0, 0)](Elemwise{Composite{((-i0) - i1)}}[(0, 0)].0, TensorConstant{(1,) of -1.0}, y, Elemwise{Cast{float32}}.0, Elemwise{sub,no_inplace}.0) 4.3% 96.4% 0.050s 4.98e-06s 10000 11 Elemwise{Composite{GT(scalar_sigmoid(i0), i1)}}(Elemwise{neg,no_inplace}.0, TensorConstant{(1,) of 0.5}) 1.0% 97.4% 0.011s 1.14e-06s 10000 14 Sum{acc_dtype=float64}(Elemwise{Composite{(((scalar_sigmoid(i0) * i1 * i2) / i3) - ((scalar_sigmoid((-i0)) * i1 * i4) / i3))}}[(0, 0)].0) 0.4% 97.8% 0.004s 4.22e-07s 10000 4 Elemwise{sub,no_inplace}(TensorConstant{(1,) of 1.0}, y) 0.3% 98.1% 0.004s 3.76e-07s 10000 0 InplaceDimShuffle{x}(b) 0.3% 98.4% 0.004s 3.70e-07s 10000 10 Elemwise{neg,no_inplace}(Elemwise{Composite{((-i0) - i1)}}[(0, 0)].0) 0.3% 98.7% 0.004s 3.64e-07s 10000 5 AllocEmpty{dtype='float32'}(Shape_i{0}.0) 0.2% 99.0% 0.003s 2.88e-07s 10000 2 InplaceDimShuffle{1,0}(x) 0.2% 99.2% 0.003s 2.65e-07s 10000 9 Elemwise{Composite{((-i0) - i1)}}[(0, 0)](CGemv{inplace}.0, InplaceDimShuffle{x}.0) 0.2% 99.4% 0.002s 2.21e-07s 10000 1 Shape_i{0}(x) 0.2% 99.6% 0.002s 1.98e-07s 10000 8 Elemwise{Cast{float32}}(InplaceDimShuffle{x}.0) 0.2% 99.7% 0.002s 1.90e-07s 10000 6 InplaceDimShuffle{x}(Shape_i{0}.0) 0.1% 99.9% 0.002s 1.54e-07s 10000 16 Elemwise{Composite{(i0 - (i1 * i2))}}[(0, 0)](b, TensorConstant{0.00999999977648}, Sum{acc_dtype=float64}.0) 0.1% 100.0% 0.001s 1.34e-07s 10000 3 Shape_i{0}(y) 0.0% 100.0% 0.000s 3.89e-05s 1 3 CGemv{inplace}(AllocEmpty{dtype='float32'}.0, TensorConstant{1.0}, x, w, TensorConstant{0.0}) 0.0% 100.0% 0.000s 4.77e-06s 1 4 Elemwise{Composite{GT(scalar_sigmoid((-((-i0) - i1))), i2)}}(CGemv{inplace}.0, InplaceDimShuffle{x}.0, TensorConstant{(1,) of 0.5}) 0.0% 100.0% 0.000s 1.19e-06s 1 0 InplaceDimShuffle{x}(b) ... (remaining 2 Apply instances account for 0.00%(0.00s) of the runtime) # 2.2 Profiling for GPU computations # In your terminal, type: $ CUDA_LAUNCH_BLOCKING=1 THEANO_FLAGS=profile=True,device=cuda python using_gpu_solution_1.py # You'll see first the output of the script: Used the gpu target values for D prediction on D Results were produced using a GeForce GTX TITAN X # Profiling summary for all functions: Function profiling ================== Message: Sum of all(2) printed profiles at exit excluding Scan op profile. Time in 10001 calls to Function.__call__: 4.181247e+00s Time in Function.fn.__call__: 4.081113e+00s (97.605%) Time in thunks: 3.915566e+00s (93.646%) Total compile time: 9.256095e+00s Number of Apply nodes: 21 Aesara Optimizer time: 9.996419e-01s Aesara validate time: 6.523132e-03s Aesara Linker time (includes C, CUDA code generation/compiling): 8.239602e+00s Import time 4.228115e-03s Time in all call to aesara.grad() 3.286195e-02s Time since aesara import 15.415s Class --- <% time> <sum %> <apply time> <time per call> <type> <#call> <#apply> <Class name> 59.5% 59.5% 2.329s 1.16e-04s C 20001 3 aesara.sandbox.gpuarray.blas.GpuGemv 29.8% 89.3% 1.166s 1.30e-05s C 90001 10 aesara.sandbox.gpuarray.elemwise.GpuElemwise 4.1% 93.4% 0.162s 8.10e-06s C 20001 3 aesara.sandbox.gpuarray.basic_ops.HostFromGpu 3.3% 96.7% 0.131s 1.31e-05s C 10000 1 aesara.sandbox.gpuarray.elemwise.GpuCAReduceCuda 1.6% 98.3% 0.061s 6.10e-06s C 10000 1 aesara.sandbox.gpuarray.basic_ops.GpuFromHost 0.8% 99.1% 0.033s 1.09e-06s C 30001 4 aesara.sandbox.gpuarray.elemwise.GpuDimShuffle 0.7% 99.8% 0.026s 2.59e-06s C 10001 2 aesara.sandbox.gpuarray.basic_ops.GpuAllocEmpty 0.2% 100.0% 0.008s 3.95e-07s C 20001 3 aesara.compile.ops.Shape_i ... (remaining 0 Classes account for 0.00%(0.00s) of the runtime) Ops --- <% time> <sum %> <apply time> <time per call> <type> <#call> <#apply> <Op name> 59.5% 59.5% 2.329s 1.16e-04s C 20001 3 GpuGemv{inplace=True} 4.1% 63.6% 0.162s 8.10e-06s C 20001 3 HostFromGpu(gpuarray) 4.0% 67.6% 0.157s 1.57e-05s C 10000 1 GpuElemwise{Composite{((i0 * scalar_softplus(i1)) - (i2 * i3 * scalar_softplus(i4)))}}[]<gpuarray> 3.8% 71.4% 0.149s 1.49e-05s C 10000 1 GpuElemwise{Composite{(((scalar_sigmoid(i0) * i1 * i2) / i3) - ((i4 * i1 * i5) / i3))}}[(0, 0)]<gpuarray> 3.7% 75.1% 0.144s 1.44e-05s C 10000 1 GpuElemwise{sub,no_inplace} 3.6% 78.7% 0.141s 1.41e-05s C 10000 1 GpuElemwise{gt,no_inplace} 3.4% 82.1% 0.133s 1.33e-05s C 10000 1 GpuElemwise{Cast{float32}}[]<gpuarray> 3.4% 85.5% 0.133s 1.33e-05s C 10000 1 GpuElemwise{Composite{((-i0) - i1)}}[(0, 0)]<gpuarray> 3.3% 88.8% 0.131s 1.31e-05s C 10000 1 GpuCAReduceCuda{add} 2.9% 91.7% 0.112s 1.12e-05s C 10000 1 GpuElemwise{neg,no_inplace} 2.6% 94.3% 0.102s 1.02e-05s C 10000 1 GpuElemwise{Composite{(i0 - (i1 * i2))}}[(0, 0)]<gpuarray> 2.5% 96.7% 0.096s 9.63e-06s C 10000 1 GpuElemwise{ScalarSigmoid}[(0, 0)]<gpuarray> 1.6% 98.3% 0.061s 6.10e-06s C 10000 1 GpuFromHost<None> 0.7% 99.0% 0.026s 2.59e-06s C 10001 2 GpuAllocEmpty{dtype='float32', context_name=None} 0.5% 99.5% 0.021s 1.06e-06s C 20001 3 InplaceGpuDimShuffle{x} 0.3% 99.8% 0.011s 1.14e-06s C 10000 1 InplaceGpuDimShuffle{1,0} 0.2% 100.0% 0.008s 3.95e-07s C 20001 3 Shape_i{0} 0.0% 100.0% 0.000s 2.00e-05s C 1 1 GpuElemwise{Composite{GT(scalar_sigmoid((-((-i0) - i1))), i2)}}[]<gpuarray> ... (remaining 0 Ops account for 0.00%(0.00s) of the runtime) Apply ------ <% time> <sum %> <apply time> <time per call> <#call> <id> <Apply name> 55.0% 55.0% 2.154s 2.15e-04s 10000 7 GpuGemv{inplace=True}(GpuAllocEmpty{dtype='float32', context_name=None}.0, TensorConstant{1.0}, x, w, TensorConstant{0.0}) 4.5% 59.5% 0.176s 1.76e-05s 10000 18 GpuGemv{inplace=True}(w, TensorConstant{-0.00999999977648}, InplaceGpuDimShuffle{1,0}.0, GpuElemwise{Composite{(((scalar_sigmoid(i0) * i1 * i2) / i3) - ((i4 * i1 * i5) / i3))}}[(0, 0)]<gpuarray>.0, TensorConstant{0.999800026417}) 4.0% 63.5% 0.157s 1.57e-05s 10000 12 GpuElemwise{Composite{((i0 * scalar_softplus(i1)) - (i2 * i3 * scalar_softplus(i4)))}}[]<gpuarray>(y, GpuElemwise{Composite{((-i0) - i1)}}[(0, 0)]<gpuarray>.0, GpuArrayConstant{[-1.]}, GpuElemwise{sub,no_inplace}.0, GpuElemwise{neg,no_inplace}.0) 3.8% 67.3% 0.149s 1.49e-05s 10000 15 GpuElemwise{Composite{(((scalar_sigmoid(i0) * i1 * i2) / i3) - ((i4 * i1 * i5) / i3))}}[(0, 0)]<gpuarray>(GpuElemwise{Composite{((-i0) - i1)}}[(0, 0)]<gpuarray>.0, GpuArrayConstant{[-1.]}, y, GpuElemwise{Cast{float32}}[]<gpuarray>.0, GpuElemwise{ScalarSigmoid}[(0, 0)]<gpuarray>.0, GpuElemwise{sub,no_inplace}.0) 3.7% 71.0% 0.144s 1.44e-05s 10000 4 GpuElemwise{sub,no_inplace}(GpuArrayConstant{[ 1.]}, y) 3.6% 74.6% 0.141s 1.41e-05s 10000 16 GpuElemwise{gt,no_inplace}(GpuElemwise{ScalarSigmoid}[(0, 0)]<gpuarray>.0, GpuArrayConstant{[ 0.5]}) 3.4% 78.0% 0.133s 1.33e-05s 10000 10 GpuElemwise{Cast{float32}}[]<gpuarray>(InplaceGpuDimShuffle{x}.0) 3.4% 81.4% 0.133s 1.33e-05s 10000 9 GpuElemwise{Composite{((-i0) - i1)}}[(0, 0)]<gpuarray>(GpuGemv{inplace=True}.0, InplaceGpuDimShuffle{x}.0) 3.3% 84.7% 0.131s 1.31e-05s 10000 17 GpuCAReduceCuda{add}(GpuElemwise{Composite{(((scalar_sigmoid(i0) * i1 * i2) / i3) - ((i4 * i1 * i5) / i3))}}[(0, 0)]<gpuarray>.0) 2.9% 87.5% 0.112s 1.12e-05s 10000 11 GpuElemwise{neg,no_inplace}(GpuElemwise{Composite{((-i0) - i1)}}[(0, 0)]<gpuarray>.0) 2.6% 90.1% 0.102s 1.02e-05s 10000 20 GpuElemwise{Composite{(i0 - (i1 * i2))}}[(0, 0)]<gpuarray>(b, GpuArrayConstant{0.00999999977648}, GpuCAReduceCuda{add}.0) 2.5% 92.6% 0.096s 9.63e-06s 10000 13 GpuElemwise{ScalarSigmoid}[(0, 0)]<gpuarray>(GpuElemwise{neg,no_inplace}.0) 2.3% 94.9% 0.090s 9.04e-06s 10000 19 HostFromGpu(gpuarray)(GpuElemwise{gt,no_inplace}.0) 1.8% 96.7% 0.072s 7.16e-06s 10000 14 HostFromGpu(gpuarray)(GpuElemwise{Composite{((i0 * scalar_softplus(i1)) - (i2 * i3 * scalar_softplus(i4)))}}[]<gpuarray>.0) 1.6% 98.3% 0.061s 6.10e-06s 10000 6 GpuFromHost<None>(Shape_i{0}.0) 0.7% 99.0% 0.026s 2.59e-06s 10000 5 GpuAllocEmpty{dtype='float32', context_name=None}(Shape_i{0}.0) 0.3% 99.3% 0.013s 1.33e-06s 10000 0 InplaceGpuDimShuffle{x}(b) 0.3% 99.6% 0.011s 1.14e-06s 10000 2 InplaceGpuDimShuffle{1,0}(x) 0.2% 99.8% 0.008s 7.94e-07s 10000 8 InplaceGpuDimShuffle{x}(GpuFromHost<None>.0) 0.1% 99.9% 0.005s 5.27e-07s 10000 1 Shape_i{0}(x) ... (remaining 7 Apply instances account for 0.07%(0.00s) of the runtime) # 3. Conclusions Examine and compare 'Ops' summaries for CPU and GPU. Usually GPU ops 'GpuFromHost' and 'HostFromGpu' by themselves consume a large amount of extra time, but by making as few as possible data transfers between GPU and CPU, you can minimize their overhead. Notice that each of the GPU ops consumes more time than its CPU counterpart. This is because the ops operate on small inputs; if you increase the input data size (e.g. set N = 4000), you will see a gain from using the GPU. """
62.192029
374
0.579435
6ad5dcf7e9f96dc2d1c33142dc858481b208540e
1,242
py
Python
chainercv/transforms/bbox/translate_bbox.py
souravsingh/chainercv
8f76510472bc95018c183e72f37bc6c34a89969c
[ "MIT" ]
1
2018-08-24T02:28:31.000Z
2018-08-24T02:28:31.000Z
chainercv/transforms/bbox/translate_bbox.py
souravsingh/chainercv
8f76510472bc95018c183e72f37bc6c34a89969c
[ "MIT" ]
null
null
null
chainercv/transforms/bbox/translate_bbox.py
souravsingh/chainercv
8f76510472bc95018c183e72f37bc6c34a89969c
[ "MIT" ]
2
2019-12-16T02:20:26.000Z
2022-01-17T02:00:49.000Z
def translate_bbox(bbox, y_offset=0, x_offset=0): """Translate bounding boxes. This method is mainly used together with image transforms, such as padding and cropping, which translates the left top point of the image from coordinate :math:`(0, 0)` to coordinate :math:`(y, x) = (y_{offset}, x_{offset})`. The bounding boxes are expected to be packed into a two dimensional tensor of shape :math:`(R, 4)`, where :math:`R` is the number of bounding boxes in the image. The second axis represents attributes of the bounding box. They are :math:`(y_{min}, x_{min}, y_{max}, x_{max})`, where the four attributes are coordinates of the top left and the bottom right vertices. Args: bbox (~numpy.ndarray): Bounding boxes to be transformed. The shape is :math:`(R, 4)`. :math:`R` is the number of bounding boxes. y_offset (int or float): The offset along y axis. x_offset (int or float): The offset along x axis. Returns: ~numpy.ndarray: Bounding boxes translated according to the given offsets. """ out_bbox = bbox.copy() out_bbox[:, :2] += (y_offset, x_offset) out_bbox[:, 2:] += (y_offset, x_offset) return out_bbox
37.636364
78
0.65942
6ad9fa52c59620d080c895b1dcbcc37ef6f3e407
504
py
Python
behave/features/environment.py
ministryofjustice/cla-end-to-end-tests
3d7e525c17f38403a91087c2b1af460ca1109a9b
[ "MIT" ]
1
2022-02-09T13:12:57.000Z
2022-02-09T13:12:57.000Z
behave/features/environment.py
ministryofjustice/cla-end-to-end-tests
3d7e525c17f38403a91087c2b1af460ca1109a9b
[ "MIT" ]
3
2021-09-16T12:24:44.000Z
2022-03-08T10:21:26.000Z
behave/features/environment.py
ministryofjustice/cla-end-to-end-tests
3d7e525c17f38403a91087c2b1af460ca1109a9b
[ "MIT" ]
null
null
null
import os from configparser import ConfigParser from helper.helper_web import get_browser
28
67
0.71627
6ada8f8c31036f868e794a58c29dd691ac89f964
2,422
py
Python
recipe_organizer/gui/recipe_list/recipe_source.py
j-sommer/recipe-organizer
91d39e12c453ecf3d3254645b565bbceacaecde9
[ "MIT" ]
null
null
null
recipe_organizer/gui/recipe_list/recipe_source.py
j-sommer/recipe-organizer
91d39e12c453ecf3d3254645b565bbceacaecde9
[ "MIT" ]
null
null
null
recipe_organizer/gui/recipe_list/recipe_source.py
j-sommer/recipe-organizer
91d39e12c453ecf3d3254645b565bbceacaecde9
[ "MIT" ]
null
null
null
from pathlib import Path from tkinter import Frame, Label from recipe_organizer.events.event import Event, EventType from recipe_organizer.events.event_observer import EventObserver from recipe_organizer.events.event_publisher import EventPublisher from recipe_organizer.gui.interfaces.widget_container import WidgetContainer from recipe_organizer.gui.recipe_summary.recipe_summary import RecipeSummary from recipe_organizer.recipe.recipe import Recipe
34.112676
111
0.671346
6ada8fe0ced127e4eb158cbef0bc674aa2bd2da2
917
py
Python
var/spack/repos/builtin/packages/spot/package.py
xiki-tempula/spack
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
[ "ECL-2.0", "Apache-2.0", "MIT" ]
1
2020-05-24T15:23:12.000Z
2020-05-24T15:23:12.000Z
var/spack/repos/builtin/packages/spot/package.py
xiki-tempula/spack
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
[ "ECL-2.0", "Apache-2.0", "MIT" ]
6
2022-02-26T11:44:34.000Z
2022-03-12T12:14:50.000Z
var/spack/repos/builtin/packages/spot/package.py
xiki-tempula/spack
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
[ "ECL-2.0", "Apache-2.0", "MIT" ]
1
2021-01-06T18:58:26.000Z
2021-01-06T18:58:26.000Z
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import *
38.208333
96
0.707743
6adbdd74efd4b512d8ebd0cf5926e792b5bd7772
42
py
Python
0.py
itspuneet/itspuneet
d44f78afcff275aa56f03bba738ac3e4f2c30843
[ "bzip2-1.0.6" ]
null
null
null
0.py
itspuneet/itspuneet
d44f78afcff275aa56f03bba738ac3e4f2c30843
[ "bzip2-1.0.6" ]
null
null
null
0.py
itspuneet/itspuneet
d44f78afcff275aa56f03bba738ac3e4f2c30843
[ "bzip2-1.0.6" ]
null
null
null
k=0 while k!=1: print(k) k+=1
8.4
13
0.404762
6adc3f2423ac6cf2c778f44e1751ae2e595e05f5
74,159
py
Python
jss_figures_replication_script.py
Cole-vJ/AdvEMDpy
160cd44b371a2c8aa66961f23062c1d7305dd728
[ "Unlicense" ]
null
null
null
jss_figures_replication_script.py
Cole-vJ/AdvEMDpy
160cd44b371a2c8aa66961f23062c1d7305dd728
[ "Unlicense" ]
null
null
null
jss_figures_replication_script.py
Cole-vJ/AdvEMDpy
160cd44b371a2c8aa66961f23062c1d7305dd728
[ "Unlicense" ]
null
null
null
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2) length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) length_top_2 = time_series[-1] * np.ones_like(length_time_2) length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2) symmetry_axis_1_time = minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time = np.linspace(time[-1] - width, time[-1] + width, 101) end_signal = time_series[-1] * np.ones_like(end_time) anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101) anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Symmetry Edge Effects Example') plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(max_dash_time, max_dash, 'k-') plt.plot(min_dash_time, min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(length_distance_time, length_distance, 'k--') plt.plot(length_distance_time_2, length_distance_2, 'k--') plt.plot(length_time, length_top, 'k-') plt.plot(length_time, length_bottom, 'k-') plt.plot(length_time_2, length_top_2, 'k-') plt.plot(length_time_2, length_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10)) plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10)) plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1) max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1) min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1) min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101) dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101) s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1]) slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1 max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1) max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101) dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101) dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101) s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1]) slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2 min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1) min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101) dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time) dash_4 = np.linspace(slope_based_maximum, slope_based_minimum) maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101) maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash) maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash) maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash) maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101) maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time) minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101) minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash) minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash) minima_dash_time_3 = slope_based_minimum_time * np.ones_like(minima_dash) minima_line_dash_time = np.linspace(minima_x[-2], slope_based_minimum_time, 101) minima_line_dash = -3.4 * np.ones_like(minima_line_dash_time) # slightly edit signal to make difference between slope-based method and improved slope-based method more clear time_series[time >= minima_x[-1]] = 1.5 * (time_series[time >= minima_x[-1]] - time_series[time == minima_x[-1]]) + \ time_series[time == minima_x[-1]] improved_slope_based_maximum_time = time[-1] improved_slope_based_maximum = time_series[-1] improved_slope_based_minimum_time = slope_based_minimum_time improved_slope_based_minimum = improved_slope_based_maximum + s2 * (improved_slope_based_minimum_time - improved_slope_based_maximum_time) min_dash_4 = np.linspace(improved_slope_based_minimum - width, improved_slope_based_minimum + width, 101) min_dash_time_4 = improved_slope_based_minimum_time * np.ones_like(min_dash_4) dash_final_time = np.linspace(improved_slope_based_maximum_time, improved_slope_based_minimum_time, 101) dash_final = np.linspace(improved_slope_based_maximum, improved_slope_based_minimum, 101) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Slope-Based Edge Effects Example') plt.plot(max_dash_time_1, max_dash_1, 'k-') plt.plot(max_dash_time_2, max_dash_2, 'k-') plt.plot(max_dash_time_3, max_dash_3, 'k-') plt.plot(min_dash_time_1, min_dash_1, 'k-') plt.plot(min_dash_time_2, min_dash_2, 'k-') plt.plot(min_dash_time_3, min_dash_3, 'k-') plt.plot(min_dash_time_4, min_dash_4, 'k-') plt.plot(maxima_dash_time_1, maxima_dash, 'k-') plt.plot(maxima_dash_time_2, maxima_dash, 'k-') plt.plot(maxima_dash_time_3, maxima_dash, 'k-') plt.plot(minima_dash_time_1, minima_dash, 'k-') plt.plot(minima_dash_time_2, minima_dash, 'k-') plt.plot(minima_dash_time_3, minima_dash, 'k-') plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.12 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.50 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.30 * np.pi, 0.35, r'$s_1$') plt.text(4.43 * np.pi, -0.20, r'$s_2$') plt.text(4.30 * np.pi + (minima_x[-1] - minima_x[-2]), 0.35 + (minima_y[-1] - minima_y[-2]), r'$s_1$') plt.text(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]), -0.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.text(4.50 * np.pi + (slope_based_minimum_time - minima_x[-1]), 1.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.plot(minima_line_dash_time, minima_line_dash, 'k--') plt.plot(maxima_line_dash_time, maxima_line_dash, 'k--') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(dash_3_time, dash_3, 'k--') plt.plot(dash_4_time, dash_4, 'k--') plt.plot(dash_final_time, dash_final, 'k--') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(slope_based_maximum_time, slope_based_maximum, c='orange', zorder=4, label=textwrap.fill('Slope-based maximum', 11)) plt.scatter(slope_based_minimum_time, slope_based_minimum, c='purple', zorder=4, label=textwrap.fill('Slope-based minimum', 11)) plt.scatter(improved_slope_based_maximum_time, improved_slope_based_maximum, c='deeppink', zorder=4, label=textwrap.fill('Improved slope-based maximum', 11)) plt.scatter(improved_slope_based_minimum_time, improved_slope_based_minimum, c='dodgerblue', zorder=4, label=textwrap.fill('Improved slope-based minimum', 11)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_slope_based.png') plt.show() # plot 5 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] A2 = np.abs(maxima_y[-2] - minima_y[-2]) / 2 A1 = np.abs(maxima_y[-1] - minima_y[-1]) / 2 P2 = 2 * np.abs(maxima_x[-2] - minima_x[-2]) P1 = 2 * np.abs(maxima_x[-1] - minima_x[-1]) Huang_time = (P1 / P2) * (time[time >= maxima_x[-2]] - time[time == maxima_x[-2]]) + maxima_x[-1] Huang_wave = (A1 / A2) * (time_series[time >= maxima_x[-2]] - time_series[time == maxima_x[-2]]) + maxima_y[-1] Coughlin_time = Huang_time Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0])) Average_max_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) Average_max = (maxima_y[-2] + maxima_y[-1]) / 2 Average_min_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) Average_min = (minima_y[-2] + minima_y[-1]) / 2 utils_Huang = emd_utils.Utility(time=time, time_series=Huang_wave) Huang_max_bool = utils_Huang.max_bool_func_1st_order_fd() Huang_min_bool = utils_Huang.min_bool_func_1st_order_fd() utils_Coughlin = emd_utils.Utility(time=time, time_series=Coughlin_wave) Coughlin_max_bool = utils_Coughlin.max_bool_func_1st_order_fd() Coughlin_min_bool = utils_Coughlin.min_bool_func_1st_order_fd() Huang_max_time = Huang_time[Huang_max_bool] Huang_max = Huang_wave[Huang_max_bool] Huang_min_time = Huang_time[Huang_min_bool] Huang_min = Huang_wave[Huang_min_bool] Coughlin_max_time = Coughlin_time[Coughlin_max_bool] Coughlin_max = Coughlin_wave[Coughlin_max_bool] Coughlin_min_time = Coughlin_time[Coughlin_min_bool] Coughlin_min = Coughlin_wave[Coughlin_min_bool] max_2_x_time = np.linspace(maxima_x[-2] - width, maxima_x[-2] + width, 101) max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) max_2_x = maxima_y[-2] * np.ones_like(max_2_x_time) min_2_x_time = np.linspace(minima_x[-2] - width, minima_x[-2] + width, 101) min_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) min_2_x = minima_y[-2] * np.ones_like(min_2_x_time) dash_max_min_2_x = np.linspace(minima_y[-2], maxima_y[-2], 101) dash_max_min_2_x_time = 5.3 * np.pi * np.ones_like(dash_max_min_2_x) max_2_y = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) max_2_y_time = maxima_x[-2] * np.ones_like(max_2_y) min_2_y = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) min_2_y_time = minima_x[-2] * np.ones_like(min_2_y) dash_max_min_2_y_time = np.linspace(minima_x[-2], maxima_x[-2], 101) dash_max_min_2_y = -1.8 * np.ones_like(dash_max_min_2_y_time) max_1_x_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) max_1_x = maxima_y[-1] * np.ones_like(max_1_x_time) min_1_x_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) min_1_x = minima_y[-1] * np.ones_like(min_1_x_time) dash_max_min_1_x = np.linspace(minima_y[-1], maxima_y[-1], 101) dash_max_min_1_x_time = 5.4 * np.pi * np.ones_like(dash_max_min_1_x) max_1_y = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) max_1_y_time = maxima_x[-1] * np.ones_like(max_1_y) min_1_y = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) min_1_y_time = minima_x[-1] * np.ones_like(min_1_y) dash_max_min_1_y_time = np.linspace(minima_x[-1], maxima_x[-1], 101) dash_max_min_1_y = -2.1 * np.ones_like(dash_max_min_1_y_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Characteristic Wave Effects Example') plt.plot(time, time_series, LineWidth=2, label='Signal') plt.scatter(Huang_max_time, Huang_max, c='magenta', zorder=4, label=textwrap.fill('Huang maximum', 10)) plt.scatter(Huang_min_time, Huang_min, c='lime', zorder=4, label=textwrap.fill('Huang minimum', 10)) plt.scatter(Coughlin_max_time, Coughlin_max, c='darkorange', zorder=4, label=textwrap.fill('Coughlin maximum', 14)) plt.scatter(Coughlin_min_time, Coughlin_min, c='dodgerblue', zorder=4, label=textwrap.fill('Coughlin minimum', 14)) plt.scatter(Average_max_time, Average_max, c='orangered', zorder=4, label=textwrap.fill('Average maximum', 14)) plt.scatter(Average_min_time, Average_min, c='cyan', zorder=4, label=textwrap.fill('Average minimum', 14)) plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.plot(Huang_time, Huang_wave, '--', c='darkviolet', label=textwrap.fill('Huang Characteristic Wave', 14)) plt.plot(Coughlin_time, Coughlin_wave, '--', c='darkgreen', label=textwrap.fill('Coughlin Characteristic Wave', 14)) plt.plot(max_2_x_time, max_2_x, 'k-') plt.plot(max_2_x_time_side, max_2_x, 'k-') plt.plot(min_2_x_time, min_2_x, 'k-') plt.plot(min_2_x_time_side, min_2_x, 'k-') plt.plot(dash_max_min_2_x_time, dash_max_min_2_x, 'k--') plt.text(5.16 * np.pi, 0.85, r'$2a_2$') plt.plot(max_2_y_time, max_2_y, 'k-') plt.plot(max_2_y_time, max_2_y_side, 'k-') plt.plot(min_2_y_time, min_2_y, 'k-') plt.plot(min_2_y_time, min_2_y_side, 'k-') plt.plot(dash_max_min_2_y_time, dash_max_min_2_y, 'k--') plt.text(4.08 * np.pi, -2.2, r'$\frac{p_2}{2}$') plt.plot(max_1_x_time, max_1_x, 'k-') plt.plot(max_1_x_time_side, max_1_x, 'k-') plt.plot(min_1_x_time, min_1_x, 'k-') plt.plot(min_1_x_time_side, min_1_x, 'k-') plt.plot(dash_max_min_1_x_time, dash_max_min_1_x, 'k--') plt.text(5.42 * np.pi, -0.1, r'$2a_1$') plt.plot(max_1_y_time, max_1_y, 'k-') plt.plot(max_1_y_time, max_1_y_side, 'k-') plt.plot(min_1_y_time, min_1_y, 'k-') plt.plot(min_1_y_time, min_1_y_side, 'k-') plt.plot(dash_max_min_1_y_time, dash_max_min_1_y, 'k--') plt.text(4.48 * np.pi, -2.5, r'$\frac{p_1}{2}$') plt.xlim(3.9 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_characteristic_wave.png') plt.show() # plot 6 t = np.linspace(5, 95, 100) signal_orig = np.cos(2 * np.pi * t / 50) + 0.6 * np.cos(2 * np.pi * t / 25) + 0.5 * np.sin(2 * np.pi * t / 200) util_nn = emd_utils.Utility(time=t, time_series=signal_orig) maxima = signal_orig[util_nn.max_bool_func_1st_order_fd()] minima = signal_orig[util_nn.min_bool_func_1st_order_fd()] cs_max = CubicSpline(t[util_nn.max_bool_func_1st_order_fd()], maxima) cs_min = CubicSpline(t[util_nn.min_bool_func_1st_order_fd()], minima) time = np.linspace(0, 5 * np.pi, 1001) lsq_signal = np.cos(time) + np.cos(5 * time) knots = np.linspace(0, 5 * np.pi, 101) time_extended = time_extension(time) time_series_extended = np.zeros_like(time_extended) / 0 time_series_extended[int(len(lsq_signal) - 1):int(2 * (len(lsq_signal) - 1) + 1)] = lsq_signal neural_network_m = 200 neural_network_k = 100 # forward -> P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[(-(neural_network_m + neural_network_k - col)):(-(neural_network_m - col))] P[-1, col] = 1 # for additive constant t = lsq_signal[-neural_network_m:] # test - top seed_weights = np.ones(neural_network_k) / neural_network_k weights = 0 * seed_weights.copy() train_input = P[:-1, :] lr = 0.01 for iterations in range(1000): output = np.matmul(weights, train_input) error = (t - output) gradients = error * (- train_input) # guess average gradients average_gradients = np.mean(gradients, axis=1) # steepest descent max_gradient_vector = average_gradients * (np.abs(average_gradients) == max(np.abs(average_gradients))) adjustment = - lr * average_gradients # adjustment = - lr * max_gradient_vector weights += adjustment # test - bottom weights_right = np.hstack((weights, 0)) max_count_right = 0 min_count_right = 0 i_right = 0 while ((max_count_right < 1) or (min_count_right < 1)) and (i_right < len(lsq_signal) - 1): time_series_extended[int(2 * (len(lsq_signal) - 1) + 1 + i_right)] = \ sum(weights_right * np.hstack((time_series_extended[ int(2 * (len(lsq_signal) - 1) + 1 - neural_network_k + i_right): int(2 * (len(lsq_signal) - 1) + 1 + i_right)], 1))) i_right += 1 if i_right > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_right += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_right += 1 # backward <- P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[int(col + 1):int(col + neural_network_k + 1)] P[-1, col] = 1 # for additive constant t = lsq_signal[:neural_network_m] vx = cvx.Variable(int(neural_network_k + 1)) objective = cvx.Minimize(cvx.norm((2 * (vx * P) + 1 - t), 2)) # linear activation function is arbitrary prob = cvx.Problem(objective) result = prob.solve(verbose=True, solver=cvx.ECOS) weights_left = np.array(vx.value) max_count_left = 0 min_count_left = 0 i_left = 0 while ((max_count_left < 1) or (min_count_left < 1)) and (i_left < len(lsq_signal) - 1): time_series_extended[int(len(lsq_signal) - 2 - i_left)] = \ 2 * sum(weights_left * np.hstack((time_series_extended[int(len(lsq_signal) - 1 - i_left): int(len(lsq_signal) - 1 - i_left + neural_network_k)], 1))) + 1 i_left += 1 if i_left > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_left += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_left += 1 lsq_utils = emd_utils.Utility(time=time, time_series=lsq_signal) utils_extended = emd_utils.Utility(time=time_extended, time_series=time_series_extended) maxima = lsq_signal[lsq_utils.max_bool_func_1st_order_fd()] maxima_time = time[lsq_utils.max_bool_func_1st_order_fd()] maxima_extrapolate = time_series_extended[utils_extended.max_bool_func_1st_order_fd()][-1] maxima_extrapolate_time = time_extended[utils_extended.max_bool_func_1st_order_fd()][-1] minima = lsq_signal[lsq_utils.min_bool_func_1st_order_fd()] minima_time = time[lsq_utils.min_bool_func_1st_order_fd()] minima_extrapolate = time_series_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] minima_extrapolate_time = time_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Single Neuron Neural Network Example') plt.plot(time, lsq_signal, zorder=2, label='Signal') plt.plot(time_extended, time_series_extended, c='g', zorder=1, label=textwrap.fill('Extrapolated signal', 12)) plt.scatter(maxima_time, maxima, c='r', zorder=3, label='Maxima') plt.scatter(minima_time, minima, c='b', zorder=3, label='Minima') plt.scatter(maxima_extrapolate_time, maxima_extrapolate, c='magenta', zorder=3, label=textwrap.fill('Extrapolated maxima', 12)) plt.scatter(minima_extrapolate_time, minima_extrapolate, c='cyan', zorder=4, label=textwrap.fill('Extrapolated minima', 12)) plt.plot(((time[-302] + time[-301]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k', label=textwrap.fill('Neural network inputs', 13)) plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(((time_extended[-1001] + time_extended[-1002]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k') plt.plot(((time[-202] + time[-201]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed', label=textwrap.fill('Neural network targets', 13)) plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(((time_extended[-1001] + time_extended[-1000]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed') plt.xlim(3.4 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/neural_network.png') plt.show() # plot 6a np.random.seed(0) time = np.linspace(0, 5 * np.pi, 1001) knots_51 = np.linspace(0, 5 * np.pi, 51) time_series = np.cos(2 * time) + np.cos(4 * time) + np.cos(8 * time) noise = np.random.normal(0, 1, len(time_series)) time_series += noise advemdpy = EMD(time=time, time_series=time_series) imfs_51, hts_51, ifs_51 = advemdpy.empirical_mode_decomposition(knots=knots_51, max_imfs=3, edge_effect='symmetric_anchor', verbose=False)[:3] knots_31 = np.linspace(0, 5 * np.pi, 31) imfs_31, hts_31, ifs_31 = advemdpy.empirical_mode_decomposition(knots=knots_31, max_imfs=2, edge_effect='symmetric_anchor', verbose=False)[:3] knots_11 = np.linspace(0, 5 * np.pi, 11) imfs_11, hts_11, ifs_11 = advemdpy.empirical_mode_decomposition(knots=knots_11, max_imfs=1, edge_effect='symmetric_anchor', verbose=False)[:3] fig, axs = plt.subplots(3, 1) plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences', 40)) plt.subplots_adjust(hspace=0.1) axs[0].plot(time, time_series, label='Time series') axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)) print(f'DFA fluctuation with 51 knots: {np.round(np.var(time_series - (imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :])), 3)}') for knot in knots_51: axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[0].set_xticklabels(['', '', '', '', '', '']) axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[0].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[0].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(time, time_series, label='Time series') axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19)) axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19)) print(f'DFA fluctuation with 31 knots: {np.round(np.var(time_series - (imfs_31[1, :] + imfs_31[2, :])), 3)}') for knot in knots_31: axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[1].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[1].set_xticklabels(['', '', '', '', '', '']) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[1].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[1].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') axs[2].plot(time, time_series, label='Time series') axs[2].plot(time, imfs_11[1, :], label='IMF 1 with 11 knots') axs[2].plot(time, imfs_31[2, :], label='IMF 2 with 31 knots') axs[2].plot(time, imfs_51[3, :], label='IMF 3 with 51 knots') print(f'DFA fluctuation with 11 knots: {np.round(np.var(time_series - imfs_51[3, :]), 3)}') for knot in knots_11: axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[2].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[2].set_xticklabels(['$0$', r'$\pi$', r'$2\pi$', r'$3\pi$', r'$4\pi$', r'$5\pi$']) box_2 = axs[2].get_position() axs[2].set_position([box_2.x0 - 0.05, box_2.y0, box_2.width * 0.85, box_2.height]) axs[2].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[2].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[2].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[2].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[2].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') plt.savefig('jss_figures/DFA_different_trends.png') plt.show() # plot 6b fig, axs = plt.subplots(3, 1) plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences Zoomed Region', 40)) plt.subplots_adjust(hspace=0.1) axs[0].plot(time, time_series, label='Time series') axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)) for knot in knots_51: axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[0].set_xticklabels(['', '', '', '', '', '']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[0].set_ylim(-5.5, 5.5) axs[0].set_xlim(0.95 * np.pi, 1.55 * np.pi) axs[1].plot(time, time_series, label='Time series') axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19)) axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19)) for knot in knots_31: axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[1].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[1].set_xticklabels(['', '', '', '', '', '']) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].set_ylim(-5.5, 5.5) axs[1].set_xlim(0.95 * np.pi, 1.55 * np.pi) axs[2].plot(time, time_series, label='Time series') axs[2].plot(time, imfs_11[1, :], label='IMF 1 with 11 knots') axs[2].plot(time, imfs_31[2, :], label='IMF 2 with 31 knots') axs[2].plot(time, imfs_51[3, :], label='IMF 3 with 51 knots') for knot in knots_11: axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[2].set_xticks([np.pi, (3 / 2) * np.pi]) axs[2].set_xticklabels([r'$\pi$', r'$\frac{3}{2}\pi$']) box_2 = axs[2].get_position() axs[2].set_position([box_2.x0 - 0.05, box_2.y0, box_2.width * 0.85, box_2.height]) axs[2].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[2].set_ylim(-5.5, 5.5) axs[2].set_xlim(0.95 * np.pi, 1.55 * np.pi) plt.savefig('jss_figures/DFA_different_trends_zoomed.png') plt.show() hs_ouputs = hilbert_spectrum(time, imfs_51, hts_51, ifs_51, max_frequency=12, plot=False) # plot 6c ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Simple Sinusoidal Time Seres with Added Noise', 50)) x_hs, y, z = hs_ouputs z_min, z_max = 0, np.abs(z).max() ax.pcolormesh(x_hs, y, np.abs(z), cmap='gist_rainbow', vmin=z_min, vmax=z_max) ax.plot(x_hs[0, :], 8 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 8$', Linewidth=3) ax.plot(x_hs[0, :], 4 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 4$', Linewidth=3) ax.plot(x_hs[0, :], 2 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 2$', Linewidth=3) ax.set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi]) ax.set_xticklabels(['$0$', r'$\pi$', r'$2\pi$', r'$3\pi$', r'$4\pi$']) plt.ylabel(r'Frequency (rad.s$^{-1}$)') plt.xlabel('Time (s)') box_0 = ax.get_position() ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.85, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/DFA_hilbert_spectrum.png') plt.show() # plot 6c time = np.linspace(0, 5 * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) knots = np.linspace(0, 5 * np.pi, 51) fluc = Fluctuation(time=time, time_series=time_series) max_unsmoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='maxima', smooth=False) max_smoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='maxima', smooth=True) min_unsmoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='minima', smooth=False) min_smoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='minima', smooth=True) util = Utility(time=time, time_series=time_series) maxima = util.max_bool_func_1st_order_fd() minima = util.min_bool_func_1st_order_fd() ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title(textwrap.fill('Plot Demonstrating Unsmoothed Extrema Envelopes if SchoenbergWhitney Conditions are Not Satisfied', 50)) plt.plot(time, time_series, label='Time series', zorder=2, LineWidth=2) plt.scatter(time[maxima], time_series[maxima], c='r', label='Maxima', zorder=10) plt.scatter(time[minima], time_series[minima], c='b', label='Minima', zorder=10) plt.plot(time, max_unsmoothed[0], label=textwrap.fill('Unsmoothed maxima envelope', 10), c='darkorange') plt.plot(time, max_smoothed[0], label=textwrap.fill('Smoothed maxima envelope', 10), c='red') plt.plot(time, min_unsmoothed[0], label=textwrap.fill('Unsmoothed minima envelope', 10), c='cyan') plt.plot(time, min_smoothed[0], label=textwrap.fill('Smoothed minima envelope', 10), c='blue') for knot in knots[:-1]: plt.plot(knot * np.ones(101), np.linspace(-3.0, -2.0, 101), '--', c='grey', zorder=1) plt.plot(knots[-1] * np.ones(101), np.linspace(-3.0, -2.0, 101), '--', c='grey', label='Knots', zorder=1) plt.xticks((0, 1 * np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi), (r'$0$', r'$\pi$', r'2$\pi$', r'3$\pi$', r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) plt.xlim(-0.25 * np.pi, 5.25 * np.pi) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/Schoenberg_Whitney_Conditions.png') plt.show() # plot 7 a = 0.25 width = 0.2 time = np.linspace((0 + a) * np.pi, (5 - a) * np.pi, 1001) knots = np.linspace((0 + a) * np.pi, (5 - a) * np.pi, 11) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] inflection_bool = utils.inflection_point() inflection_x = time[inflection_bool] inflection_y = time_series[inflection_bool] fluctuation = emd_mean.Fluctuation(time=time, time_series=time_series) maxima_envelope = fluctuation.envelope_basis_function_approximation(knots, 'maxima', smooth=False, smoothing_penalty=0.2, edge_effect='none', spline_method='b_spline')[0] maxima_envelope_smooth = fluctuation.envelope_basis_function_approximation(knots, 'maxima', smooth=True, smoothing_penalty=0.2, edge_effect='none', spline_method='b_spline')[0] minima_envelope = fluctuation.envelope_basis_function_approximation(knots, 'minima', smooth=False, smoothing_penalty=0.2, edge_effect='none', spline_method='b_spline')[0] minima_envelope_smooth = fluctuation.envelope_basis_function_approximation(knots, 'minima', smooth=True, smoothing_penalty=0.2, edge_effect='none', spline_method='b_spline')[0] inflection_points_envelope = fluctuation.direct_detrended_fluctuation_estimation(knots, smooth=True, smoothing_penalty=0.2, technique='inflection_points')[0] binomial_points_envelope = fluctuation.direct_detrended_fluctuation_estimation(knots, smooth=True, smoothing_penalty=0.2, technique='binomial_average', order=21, increment=20)[0] derivative_of_lsq = utils.derivative_forward_diff() derivative_time = time[:-1] derivative_knots = np.linspace(knots[0], knots[-1], 31) # change (1) detrended_fluctuation_technique and (2) max_internal_iter and (3) debug (confusing with external debugging) emd = AdvEMDpy.EMD(time=derivative_time, time_series=derivative_of_lsq) imf_1_of_derivative = emd.empirical_mode_decomposition(knots=derivative_knots, knot_time=derivative_time, text=False, verbose=False)[0][1, :] utils = emd_utils.Utility(time=time[:-1], time_series=imf_1_of_derivative) optimal_maxima = np.r_[False, utils.derivative_forward_diff() < 0, False] & \ np.r_[utils.zero_crossing() == 1, False] optimal_minima = np.r_[False, utils.derivative_forward_diff() > 0, False] & \ np.r_[utils.zero_crossing() == 1, False] EEMD_maxima_envelope = fluctuation.envelope_basis_function_approximation_fixed_points(knots, 'maxima', optimal_maxima, optimal_minima, smooth=False, smoothing_penalty=0.2, edge_effect='none')[0] EEMD_minima_envelope = fluctuation.envelope_basis_function_approximation_fixed_points(knots, 'minima', optimal_maxima, optimal_minima, smooth=False, smoothing_penalty=0.2, edge_effect='none')[0] ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Detrended Fluctuation Analysis Examples') plt.plot(time, time_series, LineWidth=2, label='Time series') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(time[optimal_maxima], time_series[optimal_maxima], c='darkred', zorder=4, label=textwrap.fill('Optimal maxima', 10)) plt.scatter(time[optimal_minima], time_series[optimal_minima], c='darkblue', zorder=4, label=textwrap.fill('Optimal minima', 10)) plt.scatter(inflection_x, inflection_y, c='magenta', zorder=4, label=textwrap.fill('Inflection points', 10)) plt.plot(time, maxima_envelope, c='darkblue', label=textwrap.fill('EMD envelope', 10)) plt.plot(time, minima_envelope, c='darkblue') plt.plot(time, (maxima_envelope + minima_envelope) / 2, c='darkblue') plt.plot(time, maxima_envelope_smooth, c='darkred', label=textwrap.fill('SEMD envelope', 10)) plt.plot(time, minima_envelope_smooth, c='darkred') plt.plot(time, (maxima_envelope_smooth + minima_envelope_smooth) / 2, c='darkred') plt.plot(time, EEMD_maxima_envelope, c='darkgreen', label=textwrap.fill('EEMD envelope', 10)) plt.plot(time, EEMD_minima_envelope, c='darkgreen') plt.plot(time, (EEMD_maxima_envelope + EEMD_minima_envelope) / 2, c='darkgreen') plt.plot(time, inflection_points_envelope, c='darkorange', label=textwrap.fill('Inflection point envelope', 10)) plt.plot(time, binomial_points_envelope, c='deeppink', label=textwrap.fill('Binomial average envelope', 10)) plt.plot(time, np.cos(time), c='black', label='True mean') plt.xticks((0, 1 * np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi), (r'$0$', r'$\pi$', r'2$\pi$', r'3$\pi$', r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) plt.xlim(-0.25 * np.pi, 5.25 * np.pi) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/detrended_fluctuation_analysis.png') plt.show() # Duffing Equation Example t = np.linspace(0, 150, 1501) XY0 = [1, 1] solution = odeint(duffing_equation, XY0, t) x = solution[:, 0] dxdt = solution[:, 1] x_points = [0, 50, 100, 150] x_names = {0, 50, 100, 150} y_points_1 = [-2, 0, 2] y_points_2 = [-1, 0, 1] fig, axs = plt.subplots(2, 1) plt.subplots_adjust(hspace=0.2) axs[0].plot(t, x) axs[0].set_title('Duffing Equation Displacement') axs[0].set_ylim([-2, 2]) axs[0].set_xlim([0, 150]) axs[1].plot(t, dxdt) axs[1].set_title('Duffing Equation Velocity') axs[1].set_ylim([-1.5, 1.5]) axs[1].set_xlim([0, 150]) axis = 0 for ax in axs.flat: ax.label_outer() if axis == 0: ax.set_ylabel('x(t)') ax.set_yticks(y_points_1) if axis == 1: ax.set_ylabel(r'$ \dfrac{dx(t)}{dt} $') ax.set(xlabel='t') ax.set_yticks(y_points_2) ax.set_xticks(x_points) ax.set_xticklabels(x_names) axis += 1 plt.savefig('jss_figures/Duffing_equation.png') plt.show() # compare other packages Duffing - top pyemd = pyemd0215() py_emd = pyemd(x) IP, IF, IA = emd040.spectra.frequency_transform(py_emd.T, 10, 'hilbert') freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 0.2, 100) hht = emd040.spectra.hilberthuang(IF, IA, freq_edges) hht = gaussian_filter(hht, sigma=1) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 1.0 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Duffing Equation using PyEMD 0.2.10', 40)) plt.pcolormesh(t, freq_bins, hht, cmap='gist_rainbow', vmin=0, vmax=np.max(np.max(np.abs(hht)))) plt.plot(t[:-1], 0.124 * np.ones_like(t[:-1]), '--', label=textwrap.fill('Hamiltonian frequency approximation', 15)) plt.plot(t[:-1], 0.04 * np.ones_like(t[:-1]), 'g--', label=textwrap.fill('Driving function frequency', 15)) plt.xticks([0, 50, 100, 150]) plt.yticks([0, 0.1, 0.2]) plt.ylabel('Frequency (Hz)') plt.xlabel('Time (s)') box_0 = ax.get_position() ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.75, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/Duffing_equation_ht_pyemd.png') plt.show() plt.show() emd_sift = emd040.sift.sift(x) IP, IF, IA = emd040.spectra.frequency_transform(emd_sift, 10, 'hilbert') freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 0.2, 100) hht = emd040.spectra.hilberthuang(IF, IA, freq_edges) hht = gaussian_filter(hht, sigma=1) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 1.0 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Duffing Equation using emd 0.3.3', 40)) plt.pcolormesh(t, freq_bins, hht, cmap='gist_rainbow', vmin=0, vmax=np.max(np.max(np.abs(hht)))) plt.plot(t[:-1], 0.124 * np.ones_like(t[:-1]), '--', label=textwrap.fill('Hamiltonian frequency approximation', 15)) plt.plot(t[:-1], 0.04 * np.ones_like(t[:-1]), 'g--', label=textwrap.fill('Driving function frequency', 15)) plt.xticks([0, 50, 100, 150]) plt.yticks([0, 0.1, 0.2]) plt.ylabel('Frequency (Hz)') plt.xlabel('Time (s)') box_0 = ax.get_position() ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.75, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/Duffing_equation_ht_emd.png') plt.show() # compare other packages Duffing - bottom emd_duffing = AdvEMDpy.EMD(time=t, time_series=x) emd_duff, emd_ht_duff, emd_if_duff, _, _, _, _ = emd_duffing.empirical_mode_decomposition(verbose=False) fig, axs = plt.subplots(2, 1) plt.subplots_adjust(hspace=0.3) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) axs[0].plot(t, emd_duff[1, :], label='AdvEMDpy') axs[0].plot(t, py_emd[0, :], '--', label='PyEMD 0.2.10') axs[0].plot(t, emd_sift[:, 0], '--', label='emd 0.3.3') axs[0].set_title('IMF 1') axs[0].set_ylim([-2, 2]) axs[0].set_xlim([0, 150]) axs[1].plot(t, emd_duff[2, :], label='AdvEMDpy') print(f'AdvEMDpy driving function error: {np.round(sum(abs(0.1 * np.cos(0.04 * 2 * np.pi * t) - emd_duff[2, :])), 3)}') axs[1].plot(t, py_emd[1, :], '--', label='PyEMD 0.2.10') print(f'PyEMD driving function error: {np.round(sum(abs(0.1 * np.cos(0.04 * 2 * np.pi * t) - py_emd[1, :])), 3)}') axs[1].plot(t, emd_sift[:, 1], '--', label='emd 0.3.3') print(f'emd driving function error: {np.round(sum(abs(0.1 * np.cos(0.04 * 2 * np.pi * t) - emd_sift[:, 1])), 3)}') axs[1].plot(t, 0.1 * np.cos(0.04 * 2 * np.pi * t), '--', label=r'$0.1$cos$(0.08{\pi}t)$') axs[1].set_title('IMF 2') axs[1].set_ylim([-0.2, 0.4]) axs[1].set_xlim([0, 150]) axis = 0 for ax in axs.flat: ax.label_outer() if axis == 0: ax.set_ylabel(r'$\gamma_1(t)$') ax.set_yticks([-2, 0, 2]) if axis == 1: ax.set_ylabel(r'$\gamma_2(t)$') ax.set_yticks([-0.2, 0, 0.2]) box_0 = ax.get_position() ax.set_position([box_0.x0, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) ax.set_xticks(x_points) ax.set_xticklabels(x_names) axis += 1 plt.savefig('jss_figures/Duffing_equation_imfs.png') plt.show() hs_ouputs = hilbert_spectrum(t, emd_duff, emd_ht_duff, emd_if_duff, max_frequency=1.3, plot=False) ax = plt.subplot(111) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Duffing Equation using AdvEMDpy', 40)) x, y, z = hs_ouputs y = y / (2 * np.pi) z_min, z_max = 0, np.abs(z).max() figure_size = plt.gcf().get_size_inches() factor = 1.0 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) ax.pcolormesh(x, y, np.abs(z), cmap='gist_rainbow', vmin=z_min, vmax=z_max) plt.plot(t[:-1], 0.124 * np.ones_like(t[:-1]), '--', label=textwrap.fill('Hamiltonian frequency approximation', 15)) plt.plot(t[:-1], 0.04 * np.ones_like(t[:-1]), 'g--', label=textwrap.fill('Driving function frequency', 15)) plt.xticks([0, 50, 100, 150]) plt.yticks([0, 0.1, 0.2]) plt.ylabel('Frequency (Hz)') plt.xlabel('Time (s)') box_0 = ax.get_position() ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.75, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/Duffing_equation_ht.png') plt.show() # Carbon Dioxide Concentration Example CO2_data = pd.read_csv('Data/co2_mm_mlo.csv', header=51) plt.plot(CO2_data['month'], CO2_data['decimal date']) plt.title(textwrap.fill('Mean Monthly Concentration of Carbon Dioxide in the Atmosphere', 35)) plt.ylabel('Parts per million') plt.xlabel('Time (years)') plt.savefig('jss_figures/CO2_concentration.png') plt.show() signal = CO2_data['decimal date'] signal = np.asarray(signal) time = CO2_data['month'] time = np.asarray(time) # compare other packages Carbon Dioxide - top pyemd = pyemd0215() py_emd = pyemd(signal) IP, IF, IA = emd040.spectra.frequency_transform(py_emd[:2, :].T, 12, 'hilbert') print(f'PyEMD annual frequency error: {np.round(sum(np.abs(IF[:, 0] - np.ones_like(IF[:, 0]))), 3)}') freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 2, 100) hht = emd040.spectra.hilberthuang(IF, IA, freq_edges) hht = gaussian_filter(hht, sigma=1) fig, ax = plt.subplots() figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of CO$_{2}$ Concentration using PyEMD 0.2.10', 45)) plt.ylabel('Frequency (year$^{-1}$)') plt.xlabel('Time (years)') plt.pcolormesh(time, freq_bins, hht, cmap='gist_rainbow', vmin=0, vmax=np.max(np.max(np.abs(hht)))) plt.plot(time, np.ones_like(time), 'k--', label=textwrap.fill('Annual cycle', 10)) box_0 = ax.get_position() ax.set_position([box_0.x0 + 0.0125, box_0.y0 + 0.075, box_0.width * 0.8, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/CO2_Hilbert_pyemd.png') plt.show() emd_sift = emd040.sift.sift(signal) IP, IF, IA = emd040.spectra.frequency_transform(emd_sift[:, :1], 12, 'hilbert') print(f'emd annual frequency error: {np.round(sum(np.abs(IF - np.ones_like(IF)))[0], 3)}') freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 2, 100) hht = emd040.spectra.hilberthuang(IF, IA, freq_edges) hht = gaussian_filter(hht, sigma=1) fig, ax = plt.subplots() figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of CO$_{2}$ Concentration using emd 0.3.3', 45)) plt.ylabel('Frequency (year$^{-1}$)') plt.xlabel('Time (years)') plt.pcolormesh(time, freq_bins, hht, cmap='gist_rainbow', vmin=0, vmax=np.max(np.max(np.abs(hht)))) plt.plot(time, np.ones_like(time), 'k--', label=textwrap.fill('Annual cycle', 10)) box_0 = ax.get_position() ax.set_position([box_0.x0 + 0.0125, box_0.y0 + 0.075, box_0.width * 0.8, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/CO2_Hilbert_emd.png') plt.show() # compare other packages Carbon Dioxide - bottom knots = np.linspace(time[0], time[-1], 200) emd_example = AdvEMDpy.EMD(time=time, time_series=signal) imfs, hts, ifs, _, _, _, _ = \ emd_example.empirical_mode_decomposition(knots=knots, knot_time=time, verbose=False) print(f'AdvEMDpy annual frequency error: {np.round(sum(np.abs(ifs[1, :] / (2 * np.pi) - np.ones_like(ifs[1, :]))), 3)}') fig, axs = plt.subplots(2, 2) plt.subplots_adjust(hspace=0.5) axs[0, 0].plot(time, signal) axs[0, 1].plot(time, signal) axs[0, 1].plot(time, imfs[0, :], label='Smoothed') axs[0, 1].legend(loc='lower right') axs[1, 0].plot(time, imfs[1, :]) axs[1, 1].plot(time, imfs[2, :]) axis = 0 for ax in axs.flat: if axis == 0: ax.set(ylabel=R'C0$_2$ concentration') if axis == 1: pass if axis == 2: ax.set(ylabel=R'C0$_2$ concentration') ax.set(xlabel='Time (years)') if axis == 3: ax.set(xlabel='Time (years)') axis += 1 plt.gcf().subplots_adjust(bottom=0.15) axs[0, 0].set_title(r'Original CO$_2$ Concentration') axs[0, 1].set_title('Smoothed CO$_2$ Concentration') axs[1, 0].set_title('IMF 1') axs[1, 1].set_title('Residual') plt.gcf().subplots_adjust(bottom=0.15) plt.savefig('jss_figures/CO2_EMD.png') plt.show() hs_ouputs = hilbert_spectrum(time, imfs, hts, ifs, max_frequency=10, which_imfs=[1], plot=False) x_hs, y, z = hs_ouputs y = y / (2 * np.pi) z_min, z_max = 0, np.abs(z).max() fig, ax = plt.subplots() figure_size = plt.gcf().get_size_inches() factor = 0.7 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) ax.pcolormesh(x_hs, y, np.abs(z), cmap='gist_rainbow', vmin=z_min, vmax=z_max) ax.set_title(textwrap.fill(r'Gaussian Filtered Hilbert Spectrum of CO$_{2}$ Concentration using AdvEMDpy', 40)) plt.ylabel('Frequency (year$^{-1}$)') plt.xlabel('Time (years)') plt.plot(x_hs[0, :], np.ones_like(x_hs[0, :]), 'k--', label=textwrap.fill('Annual cycle', 10)) ax.axis([x_hs.min(), x_hs.max(), y.min(), y.max()]) box_0 = ax.get_position() ax.set_position([box_0.x0 + 0.0125, box_0.y0 + 0.075, box_0.width * 0.8, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/CO2_Hilbert.png') plt.show()
48.949835
135
0.664141
6adce2d4edcce50b7803e777bec26f0e2dbe1ef5
8,165
py
Python
GPT-distributed.py
wenhuchen/LogicNLG
e986516e5b6d310219215510b3fe1603d03215cd
[ "MIT" ]
141
2020-04-23T03:30:16.000Z
2022-03-19T08:36:31.000Z
GPT-distributed.py
wenhuchen/LogicNLG
e986516e5b6d310219215510b3fe1603d03215cd
[ "MIT" ]
15
2020-04-26T07:12:30.000Z
2021-06-10T16:40:35.000Z
GPT-distributed.py
wenhuchen/LogicNLG
e986516e5b6d310219215510b3fe1603d03215cd
[ "MIT" ]
20
2020-04-27T03:07:10.000Z
2022-01-22T22:13:15.000Z
import argparse import logging import torch import torch.nn.functional as F import numpy as np from torch import nn from torch.autograd import Variable from transformers import GPT2Config from transformers import GPT2LMHeadModel, GPT2Tokenizer, BertTokenizer from DataLoader import * from Model import BERTGen from utils import sample_sequence import torch.optim as optim import math import sys import pandas import os import numpy import nltk from torch.utils.tensorboard import SummaryWriter import warnings from tqdm import tqdm, trange from torch.utils.data import RandomSampler, SequentialSampler from torch.utils.data import DataLoader as DL import torch from torch.utils.data.distributed import DistributedSampler warnings.filterwarnings("ignore", category=UserWarning) device = torch.device('cuda') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("--model", default='gpt2', type=str) parser.add_argument("--top_k", type=int, default=0) parser.add_argument("--top_p", type=float, default=0.9) parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--do_train', default=False, action="store_true", help="whether to train or test the model") parser.add_argument('--do_rl', default=False, action="store_true", help="whether to train or test the model") parser.add_argument('--do_val', default=False, action="store_true", help="whether to train or test the model") parser.add_argument('--do_test', default=False, action="store_true", help="whether to compute the BLEU scores on test split") parser.add_argument('--do_test_challenge', default=False, action="store_true", help="whether to compute the BLEU scores on challenge split") parser.add_argument('--do_ppl', default=False, action="store_true", help="whether to compute perplexity of the model") parser.add_argument('--do_verify', default=False, action="store_true", help="whether compute the adv-acc score on test split") parser.add_argument('--do_verify_challenge', default=False, action="store_true", help="whether compute the adv-acc score on challenge split") parser.add_argument('--epoch', default=10, type=int, help="whether to train or test the model") parser.add_argument('--batch_size', default=6, type=int, help="whether to train or test the model") parser.add_argument('--local_rank', default=-1, type=int, help="whether to train or test the model") parser.add_argument('--learning_rate', default=2e-6, type=float, help="whether to train or test the model") parser.add_argument('--dataset', default='table', type=str, help="whether to train or test the model") parser.add_argument('--every', default=50, type=int, help="whether to train or test the model") parser.add_argument('--load_from', default='', type=str, help="whether to train or test the model") parser.add_argument('--id', default='models', type=str, help="specify the id of the experiment") parser.add_argument('--max_len', default=800, type=int, help="whether to train or test the model") parser.add_argument('--dim', default=768, type=int, help="whether to train or test the model") parser.add_argument('--layers', default=3, type=int, help="whether to train or test the model") parser.add_argument('--head', default=4, type=int, help="whether to train or test the model") parser.add_argument("--modelpath", type=str, default="bert-base-uncased", help="For distributed training: local_rank") parser.add_argument('--gradient_accumulation_steps', type=int, default=5, help="accumulation steps for gradient") parser.add_argument('--decode_first_K', type=int, default=10000, help="For debugging purpose") args = parser.parse_args() if args.local_rank == -1: device = torch.device("cuda") args.n_gpu = 1 else: torch.cuda.set_device(args.local_rank) device = torch.device('cuda', args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device if args.local_rank not in [-1, 0]: torch.distributed.barrier() tokenizer = GPT2Tokenizer.from_pretrained(args.model) model = GPT2LMHeadModel.from_pretrained(args.model) #model = nn.DataParallel(model) model.to(args.device) if args.local_rank == 0: torch.distributed.barrier() criterion = nn.CrossEntropyLoss(reduction='none', ignore_index=-1) if args.do_train: if args.local_rank in [-1, 0]: if not os.path.exists(args.id): os.mkdir(args.id) tb_writer = SummaryWriter(log_dir='tensorboard/GPT2-{}'.format(args.model)) dataset = GPTTableDataset2('data/train_lm_preprocessed.json', tokenizer, args.max_len) if args.local_rank == -1: sampler = RandomSampler(dataset) else: sampler = DistributedSampler(dataset) train_dataloader = DL(dataset, sampler=sampler, batch_size=args.batch_size, num_workers=0) model.train() optimizer = optim.Adam(model.parameters(), args.learning_rate) avg_loss = 0 global_step = 0 if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) else: model = torch.nn.DataParallel(model) for epoch_idx in trange(0, args.epoch, desc='Epoch', disable=args.local_rank not in [-1, 0]): #for idx in range(0, dataset.train_len()): for idx, batch in enumerate(tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])): batch = tuple(Variable(t).to(device) for t in batch) trg_inp, trg_out, mask, caption = batch inputs = torch.cat([caption, trg_inp], 1) model.zero_grad() optimizer.zero_grad() logits = model(inputs)[0] logits = logits[:, -trg_out.shape[1]:, :].contiguous() loss = criterion(logits.view(-1, logits.shape[-1]), trg_out.view(-1)) loss = loss * mask.view(-1) loss = loss.sum() / mask.sum() avg_loss += loss.item() loss.backward() optimizer.step() global_step += 1 if args.local_rank in [-1, 0] and idx % args.every == 0 and idx > 0: tb_writer.add_scalar("perplexity", math.exp(avg_loss / args.every), global_step) fake_inputs = caption gt_inputs = trg_out.cpu().data.numpy() #samples = model.sample(fake_inputs, tabfeat, caption, highlight_idx, bert) samples = sample_sequence(model, 30, fake_inputs, []) samples = samples[:, caption.shape[1]:] samples = samples.cpu().data.numpy() for s, gt in zip(samples, gt_inputs): text = tokenizer.decode(s, clean_up_tokenization_spaces=True) text = text[: text.find(tokenizer.eos_token)] print("PREDICTION |||||| ", text) text = tokenizer.decode(gt, clean_up_tokenization_spaces=True) text = text[: text.find(tokenizer.eos_token)] print("GROUNDTRUH |||||| ",text) break avg_loss = 0 if args.local_rank in [-1, 0]: if args.model == 'gpt2': torch.save(model.state_dict(), '{}/GPT_ep{}.pt'.format(args.id, epoch_idx)) else: torch.save(model.state_dict(), '{}/GPT_medium_ep{}.pt'.format(args.id, epoch_idx)) if args.local_rank in [-1, 0]: tb_writer.close()
47.196532
145
0.643846
6addc56efc2458ffaaa37a8a1a9d3060123eac26
9,901
py
Python
bentoml/saved_bundle/loader.py
niits/BentoML
3954f36762e10f5df15af7e0ae6dd71f5f214261
[ "Apache-2.0" ]
3,451
2019-04-02T01:47:42.000Z
2022-03-31T16:20:49.000Z
bentoml/saved_bundle/loader.py
niits/BentoML
3954f36762e10f5df15af7e0ae6dd71f5f214261
[ "Apache-2.0" ]
1,925
2019-04-03T00:19:05.000Z
2022-03-31T22:41:54.000Z
bentoml/saved_bundle/loader.py
niits/BentoML
3954f36762e10f5df15af7e0ae6dd71f5f214261
[ "Apache-2.0" ]
451
2019-04-02T01:53:41.000Z
2022-03-29T08:49:06.000Z
# Copyright 2019 Atalaya Tech, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import io import os import sys import tarfile import logging import tempfile import shutil from functools import wraps from contextlib import contextmanager from urllib.parse import urlparse from typing import TYPE_CHECKING from pathlib import PureWindowsPath, PurePosixPath from bentoml.utils.s3 import is_s3_url from bentoml.utils.gcs import is_gcs_url from bentoml.exceptions import BentoMLException from bentoml.saved_bundle.config import SavedBundleConfig from bentoml.saved_bundle.pip_pkg import ZIPIMPORT_DIR if TYPE_CHECKING: from bentoml.yatai.proto.repository_pb2 import BentoServiceMetadata logger = logging.getLogger(__name__) def resolve_remote_bundle(func): """Decorate a function to handle remote bundles.""" return wrapper
35.487455
86
0.692051
6adeb529cfb4e14bdceab8619cd0e9f75dad5fb6
615
py
Python
migrations/versions/0158_remove_rate_limit_default.py
cds-snc/notifier-api
90b385ec49efbaee7e607516fc7d9f08991af813
[ "MIT" ]
41
2019-11-28T16:58:41.000Z
2022-01-28T21:11:16.000Z
migrations/versions/0158_remove_rate_limit_default.py
cds-snc/notification-api
b1c1064f291eb860b494c3fa65ac256ad70bf47c
[ "MIT" ]
1,083
2019-07-08T12:57:24.000Z
2022-03-08T18:53:40.000Z
migrations/versions/0158_remove_rate_limit_default.py
cds-snc/notifier-api
90b385ec49efbaee7e607516fc7d9f08991af813
[ "MIT" ]
9
2020-01-24T19:56:43.000Z
2022-01-27T21:36:53.000Z
""" Revision ID: 0158_remove_rate_limit_default Revises: 0157_add_rate_limit_to_service Create Date: 2018-01-09 14:33:08.313893 """ import sqlalchemy as sa from alembic import op revision = "0158_remove_rate_limit_default" down_revision = "0157_add_rate_limit_to_service"
26.73913
82
0.785366
6adecc40e2158fa98b341e37dfb8d034335bed2b
1,267
py
Python
gen-post.py
younghk/younghk.netlify.com
605ab089252127c0b768d31afb027e8896ae33b4
[ "MIT" ]
null
null
null
gen-post.py
younghk/younghk.netlify.com
605ab089252127c0b768d31afb027e8896ae33b4
[ "MIT" ]
null
null
null
gen-post.py
younghk/younghk.netlify.com
605ab089252127c0b768d31afb027e8896ae33b4
[ "MIT" ]
null
null
null
import os import errno from datetime import datetime print("Generating A New Post\n") post_name = input('Input Post Name: ') date_time = datetime.now() date_time_dir = date_time.strftime("%Y-%m-%d") date_time_post = date_time.strftime("%Y-%m-%d %H:%M:%S") p_name = post_name.replace(" ","-") p_name = p_name.replace("[","") p_name = p_name.replace("]","") p_name = p_name.lower() f_name = date_time_dir+"---"+p_name dir = "./src/pages/articles/"+f_name+"/" f_dir = dir+f_name+".md" try: if not(os.path.isdir(dir)): os.makedirs(os.path.join(dir)) except OSError as e: if e.errno != errno.EEXIST: print("Failed to create directory!!!!!") raise print("Generating post : ",f_dir) with open(f_dir, 'w') as f: f.write('---') f.write('\n') f.write('draft: true') f.write('\n') f.write('title: \"'+post_name+'\"') f.write('\n') f.write('date: \"'+date_time_post+'\"') f.write('\n') f.write('layout: post') f.write('\n') f.write('path: \"/posts/'+p_name+'/\"') f.write('\n') f.write('category: \"\"') f.write('\n') f.write('tags: ') f.write('\n') f.write('description: ""') f.write('\n') f.write('---') f.write('\n') print("Done :)")
23.462963
56
0.561168
6ae0041ec06abb5f41acc8d9e0ad54c9727be449
39,758
py
Python
rmgpy/reactionTest.py
Lyle-zhang/RMG-Py
273eb51fa3c175562056c85d7d61814d5fa2986d
[ "MIT" ]
null
null
null
rmgpy/reactionTest.py
Lyle-zhang/RMG-Py
273eb51fa3c175562056c85d7d61814d5fa2986d
[ "MIT" ]
null
null
null
rmgpy/reactionTest.py
Lyle-zhang/RMG-Py
273eb51fa3c175562056c85d7d61814d5fa2986d
[ "MIT" ]
1
2021-08-14T13:47:18.000Z
2021-08-14T13:47:18.000Z
#!/usr/bin/env python # encoding: utf-8 -*- """ This module contains unit tests of the rmgpy.reaction module. """ import numpy import unittest from external.wip import work_in_progress from rmgpy.species import Species, TransitionState from rmgpy.reaction import Reaction from rmgpy.statmech.translation import Translation, IdealGasTranslation from rmgpy.statmech.rotation import Rotation, LinearRotor, NonlinearRotor, KRotor, SphericalTopRotor from rmgpy.statmech.vibration import Vibration, HarmonicOscillator from rmgpy.statmech.torsion import Torsion, HinderedRotor from rmgpy.statmech.conformer import Conformer from rmgpy.kinetics import Arrhenius from rmgpy.thermo import Wilhoit import rmgpy.constants as constants ################################################################################ ################################################################################ if __name__ == '__main__': unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
42.567452
208
0.566452
6ae016a3900fe6ed337451d458c99fc65e3be76f
888
py
Python
backend/core/api_urls.py
albeiks/omaralbeik.com
8d096130393919612863aac6280dffaf6e00961d
[ "MIT" ]
10
2020-05-05T16:20:04.000Z
2021-07-22T15:15:13.000Z
backend/core/api_urls.py
albeiks/omaralbeik.com
8d096130393919612863aac6280dffaf6e00961d
[ "MIT" ]
null
null
null
backend/core/api_urls.py
albeiks/omaralbeik.com
8d096130393919612863aac6280dffaf6e00961d
[ "MIT" ]
1
2020-05-06T22:31:48.000Z
2020-05-06T22:31:48.000Z
from django.conf.urls import url, include from core.routers import OptionalTrailingSlashRouter from blog import views as blogViews from snippets import views as snippetsViews from projects import views as projectsViews from tags import views as tagsViews from contents import views as contentsViews from contact import views as contactViews router = OptionalTrailingSlashRouter() router.register(r"blog", blogViews.PostViewSet) router.register(r"snippets", snippetsViews.SnippetViewSet) router.register(r"languages", snippetsViews.ProgrammingLanguageViewSet) router.register(r"projects", projectsViews.ProjectViewSet) router.register(r"tags", tagsViews.TagViewSet) router.register(r"contents", contentsViews.ContentViewSet) router.register(r"contact", contactViews.MessageViewSet) # List or url patterns for the api subdomain urlpatterns = [ url(r"^v2/", include(router.urls)), ]
35.52
71
0.824324
6ae09675e3f04c208d0aada0fe5dc7452f3a90fa
9,402
py
Python
python/video_ADG.py
alexberndt/mobile-AGV-optimization
76b97fd5aa3898fd6cb6f74f8d87140555c92af5
[ "MIT" ]
2
2021-12-22T03:07:08.000Z
2022-03-19T09:41:29.000Z
python/video_ADG.py
alexberndt/mobile-AGV-optimization
76b97fd5aa3898fd6cb6f74f8d87140555c92af5
[ "MIT" ]
null
null
null
python/video_ADG.py
alexberndt/mobile-AGV-optimization
76b97fd5aa3898fd6cb6f74f8d87140555c92af5
[ "MIT" ]
1
2021-11-22T10:58:38.000Z
2021-11-22T10:58:38.000Z
""" closed-loop MILP solved to determine optimal ordering defined by ADG """ import sys import yaml import time import matplotlib.colors as mcolors import matplotlib import matplotlib.pyplot as plt import random import logging import time import networkx as nx import csv import statistics as stat import os import sys from mip import Model, ProgressLog, xsum, maximize, minimize, BINARY, CONTINUOUS, Constr, ConstrList sys.path.insert(1, "functions/") from planners import * from visualizers import * from milp_formulation import * from robot import * from adg import * from adg_node import * from process_results import * logger = logging.getLogger(__name__) logging.basicConfig(format='%(name)s - %(levelname)s :: %(message)s', level=logging.INFO) def main(): """ --------------------------- INPUTS --------------------------------- """ show_visual = False show_ADG = True #not show_visual run_MILP = True #False #True save_file = False sim_timeout = 500 # define prediction and control horizons: H_prediction >= H_control H_prediction = np.NaN # integer value for forward node lookup H_control = 5 random_seed = 0 mu = 0.5 robust_param = 0.0 delay_amount = 5 delayed_robot_cnt = 2 w = 1.4 # sub-optimality bound: w = 1.0 -> CBS, else ECBS! fldr = "nuernberg_small" # auto_gen_01_nuernberg | auto_gen_00_large | auto_gen_02_simple | manual_03_maxplus random.seed(random_seed) np.random.seed(random_seed) """ -------------------------------------------------------------------- """ # start initial pwd = os.path.dirname(os.path.abspath(__file__)) logger.info(pwd) map_file = pwd + "/data/" + fldr + "/csv_map_yaml.yaml" robot_file = pwd + "/data/" + fldr + "/csv_robots_yaml.yaml" robot_file_tmp = pwd + "/data/tmp/robots.yaml" start_time = time.time() plans = run_CBS(map_file, robot_file, w=w) # if w > 1.0, run_CBS uses ECBS! logger.info(" with sub-optimality w={}".format(w)) logger.info(" plan statistics: {} \n".format(plans["statistics"])) logger.debug(plans["schedule"]) # show factory map # show_factory_map(map_file, robot_file, True) # plt.show() map_gen_robot_count = 10 map_gen_seedval = "NaN" try: map_gen_robot_count = int(sys.argv[1]) map_gen_seedval = int(sys.argv[2]) H_control = int(sys.argv[3]) robust_param = int(sys.argv[4]) random.seed(map_gen_seedval) # map_gen_seedval np.random.seed(map_gen_seedval) # map_gen_seedval except: print(" no valid inputs given, ignoring ...") # determine ADG, reverse ADG and dependency groups ADG, robot_plan, goal_positions = determine_ADG(plans, show_graph=False) nodes_all, edges_type_1, dependency_groups = analyze_ADG(ADG, plans, show_graph=False) ADG_reverse = ADG.reverse(copy=False) # initialize simulation robots = [] solve_time = [] robots_done = [] time_to_goal = {} colors = plt.cm.rainbow( np.arange(len(robot_plan))/len(robot_plan) ) for robot_id in robot_plan: plan = robot_plan[robot_id] logger.debug("Robot {} - plan: {} \t \t positions: {}".format(robot_id, plan["nodes"], plan["positions"])) new_robot = Robot(robot_id, plan, colors[robot_id], goal_positions[robot_id]) robots.append(new_robot) robots_done.append(False) time_to_goal[robot_id] = 0 if show_visual: visualizer = Visualizer(map_file, robots) # initialize optimization MIP object m_opt m_opt = Model('MILP_sequence', solver='CBC') # print(m_opt.max_nodes) pl_opt = ProgressLog() # pl_opt.settings = "objective_value" # print("pl_opt.settings: {}".format(pl_opt.settings)) # print("pl_opt.log: {}".format(pl_opt.log)) # pl_opt.instance = m_opt.name # print("pl_opt.instance: {}".format(pl_opt.instance)) ADG_fig = plt.figure(figsize=(12,8)) plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0) metadata = dict(title='Movie Test', artist='Matplotlib', comment='Movie support!') writer = FFMpegWriter(fps=2, metadata=metadata) with writer.saving(ADG_fig, "ADG_video.mp4", 500): # run a simulation in time k = 0 robot_IDs_to_delay = [] while (not all(robots_done)) and (k < sim_timeout): print("pl_opt.log: {}".format(pl_opt.log)) m_opt.clear() # show current robot status logger.info("-------------------- @ time step k = {} --------------------".format(k)) for robot in robots: node_info = ADG.node[robot.current_node]["data"] logger.debug(" - Robot {} # {} @ {} => status: {}".format(robot.robot_ID, node_info.ID, node_info.s_loc, robot.status)) # solve MILP for the advanced ADG to potentially adjust ordering res, solve_t = solve_MILP(robots, dependency_groups, ADG, ADG_reverse, H_control, H_prediction, m_opt, pl_opt, run=run_MILP, uncertainty_bound=robust_param) solve_time.append(solve_t) if not (res is None or res == "OptimizationStatus.OPTIMAL"): ValueError("Optimization NOT optimal") # ADG after MILP if show_ADG: # draw_ADG(ADG, robots, "ADG after MILP ADG | k = {}".format(k), writer=writer) # plt.show() # check for cycles try: nx.find_cycle(ADG, orientation="original") logger.warning("Cycle detected!!") raise Exception("ADG has a cycle => deadlock! something is wrong with optimization") except nx.NetworkXNoCycle: logger.debug("no cycle detected in ADG => no deadlock. good!") pass if (k % delay_amount) == 0: robot_IDs = np.arange(map_gen_robot_count) robot_IDs_to_delay = np.random.choice(map_gen_robot_count, size=delayed_robot_cnt, replace=False) logger.info("delaying robots (ID): {}".format(robot_IDs_to_delay)) # Advance robots if possible (dependencies have been met) for robot in robots: # check if all dependencies have been met, to advance to next node node_info = ADG.node[robot.current_node]["data"] node_dependencies_list = list(ADG_reverse.neighbors(robot.current_node)) all_dependencies_completed = True for dependency in node_dependencies_list: if (ADG.node[dependency]["data"].status != Status.FINISHED): all_dependencies_completed = False # if all dependencies are completed, the robot can advance! # delay_amount = np.random.poisson(mu) # same sample every time if all_dependencies_completed and k > 0: # (robot.robot_ID == 2 or k > 5) if (not (robot.robot_ID in robot_IDs_to_delay)): # or (k < 10 or k > 20)): # or (robot.robot_ID == 3 or k > 8): ADG.node[robot.current_node]["data"].status = Status.FINISHED robot.advance() if not robot.is_done(): time_to_goal[robot.robot_ID] += 1 else: robots_done[robot.robot_ID] = True if show_visual: visualizer.redraw(robots, pause_length=0.1) # return 0 k += 1 # end of while loop total_time = 0 for idx, t in time_to_goal.items(): total_time += t logger.info("Total time to complete missions: {}".format(total_time)) logger.info("horizon = {}".format(H_control)) logger.info("") logger.info("Computation time:") logger.info(" - max: {}".format(max(solve_time))) logger.info(" - avg: {}".format(stat.mean(solve_time))) # create data to save to YAML file simulation_results = {} simulation_results["parameters"] = {} simulation_results["parameters"]["H_control"] = H_control simulation_results["parameters"]["random seed"] = random_seed simulation_results["parameters"]["ECBS w"] = w simulation_results["parameters"]["mu"] = mu simulation_results["parameters"]["robust param"] = robust_param simulation_results["parameters"]["delay amount"] = delay_amount simulation_results["map details"] = {} simulation_results["map details"]["robot_count"] = map_gen_robot_count simulation_results["map details"]["seed val"] = map_gen_seedval simulation_results["results"] = {} simulation_results["results"]["comp time"] = {} simulation_results["results"]["comp time"]["solve_time"] = [solve_time] simulation_results["results"]["comp time"]["max"] = max(solve_time) simulation_results["results"]["comp time"]["avg"] = stat.mean(solve_time) simulation_results["results"]["total time"] = total_time logger.info(simulation_results) file_name = pwd + "/results/robust_" +str(delayed_robot_cnt) + "x" + str(delay_amount) + "/res_robots_" + str(map_gen_robot_count) + "_horizon_" + str(H_control) + "_mapseed_" + str(map_gen_seedval) + "_robustparam_" + str(robust_param) + ".yaml" if save_file: save_to_yaml(simulation_results, file_name) if __name__ == "__main__": main()
40.008511
251
0.623591
6ae0b2e5802c9a882bad8e33b9afb8e9564c00ff
4,558
py
Python
tests/model/test_guest.py
bcurnow/rfid-security-svc
d3806cb74d3d0cc2623ea425230dc8781ba4d8b4
[ "Apache-2.0" ]
null
null
null
tests/model/test_guest.py
bcurnow/rfid-security-svc
d3806cb74d3d0cc2623ea425230dc8781ba4d8b4
[ "Apache-2.0" ]
null
null
null
tests/model/test_guest.py
bcurnow/rfid-security-svc
d3806cb74d3d0cc2623ea425230dc8781ba4d8b4
[ "Apache-2.0" ]
null
null
null
import pytest from unittest.mock import patch import rfidsecuritysvc.model.guest as model from rfidsecuritysvc.model.color import Color from rfidsecuritysvc.model.guest import Guest from rfidsecuritysvc.model.sound import Sound from rfidsecuritysvc.exception import SoundNotFoundError def test__model_no_color(creatable_guest): row = creatable_guest.test_to_row() row['color'] = None g = model.__model(row) assert g.color is None def test__model_no_sound(creatable_guest): row = creatable_guest.test_to_row() row['sound'] = None g = model.__model(row) assert g.sound is None def _default(index=1): return _model(index, f'first {index}', f'last {index}', Sound(index, f'sound_name {index}', '2021-09-25 23:13:25'), Color(0xABCDEF)) def _model(id, first_name, last_name, sound, color): return Guest(id, first_name, last_name, sound, color)
33.762963
136
0.740895
6ae26b063b0fbd07c2ce06161f218674d84af1d4
1,119
py
Python
ice/consoles.py
reavessm/Ice
e78d046abfd6006b1a81d1cbdb516b7c3e141ac9
[ "MIT" ]
578
2015-01-02T12:43:52.000Z
2022-03-27T23:45:32.000Z
ice/consoles.py
reavessm/Ice
e78d046abfd6006b1a81d1cbdb516b7c3e141ac9
[ "MIT" ]
271
2015-01-05T01:56:38.000Z
2021-08-14T02:51:24.000Z
ice/consoles.py
reavessm/Ice
e78d046abfd6006b1a81d1cbdb516b7c3e141ac9
[ "MIT" ]
156
2015-01-07T15:43:20.000Z
2021-12-11T19:10:44.000Z
# encoding: utf-8 import os import roms def console_roms_directory(configuration, console): """ If the user has specified a custom ROMs directory in consoles.txt then return that. Otherwise, append the shortname of the console to the default ROMs directory given by config.txt. """ if console.custom_roms_directory: return console.custom_roms_directory return os.path.join(roms.roms_directory(configuration), console.shortname) def path_is_rom(console, path): """ This function determines if a given path is actually a valid ROM file. If a list of extensions is supplied for this console, we check if the path has a valid extension If no extensions are defined for this console, we just accept any file """ if console.extensions == "": return True # Normalize the extension based on the things we validly ignore. # Aka capitalization, whitespace, and leading dots normalize = lambda ext: ext.lower().strip().lstrip('.') (name, ext) = os.path.splitext(path) valid_extensions = console.extensions.split(',') return normalize(ext) in map(normalize, valid_extensions)
31.971429
98
0.747096
6ae285af81cb46f32301f55fbf5e2dcaee2e26e6
5,527
py
Python
clue/c3.py
dumpmemory/roformer-v2
95b71ae03b8bb910998285e194d7752b1e4104c0
[ "Apache-2.0" ]
44
2022-03-17T02:58:27.000Z
2022-03-31T13:08:29.000Z
clue/c3.py
dumpmemory/roformer-v2
95b71ae03b8bb910998285e194d7752b1e4104c0
[ "Apache-2.0" ]
null
null
null
clue/c3.py
dumpmemory/roformer-v2
95b71ae03b8bb910998285e194d7752b1e4104c0
[ "Apache-2.0" ]
2
2022-03-17T05:47:06.000Z
2022-03-22T10:33:54.000Z
#! -*- coding:utf-8 -*- # CLUE # c3 # import json import numpy as np from snippets import * from bert4keras.backend import keras from bert4keras.snippets import sequence_padding, DataGenerator from bert4keras.snippets import open from bert4keras.snippets import truncate_sequences from tqdm import tqdm # num_classes = 4 maxlen = 512 batch_size = 4 epochs = 10 def load_data(filename): """ [(, , , id)] """ D = [] with open(filename) as f: data = json.load(f) for d in data: p = u'||'.join(d[0]) for qa in d[1]: q = qa['question'] while len(qa['choice']) < num_classes: qa['choice'].append(u'') c = qa['choice'][:num_classes] if 'answer' in qa: a = qa['choice'].index(qa['answer']) else: a = 0 D.append((p, q, c, a)) return D # train_data = load_data(data_path + 'c3/m-train.json') train_data += load_data(data_path + 'c3/d-train.json') valid_data = load_data(data_path + 'c3/m-dev.json') valid_data += load_data(data_path + 'c3/d-dev.json') # train_generator = data_generator(train_data, batch_size) valid_generator = data_generator(valid_data, batch_size) def multichoice_crossentropy(y_true, y_pred): """ """ y_true = K.cast(y_true, 'int32')[::num_classes] y_pred = K.reshape(y_pred, (-1, num_classes)) return K.mean( K.sparse_categorical_crossentropy(y_true, y_pred, from_logits=True) ) def multichoice_accuracy(y_true, y_pred): """ """ y_true = K.cast(y_true, 'int32')[::num_classes, 0] y_pred = K.reshape(y_pred, (-1, num_classes)) y_pred = K.cast(K.argmax(y_pred, axis=1), 'int32') return K.mean(K.cast(K.equal(y_true, y_pred), K.floatx())) # output = base.model.output output = keras.layers.Lambda(lambda x: x[:, 0])(output) output = keras.layers.Dense(units=1, kernel_initializer=base.initializer)(output) model = keras.models.Model(base.model.input, output) model.summary() model.compile( loss=multichoice_crossentropy, optimizer=optimizer4, metrics=[multichoice_accuracy] ) def test_predict(in_file, out_file): """ https://www.cluebenchmarks.com """ test_data = load_data(in_file) test_generator = data_generator(test_data, batch_size) results = [] for x_true, _ in tqdm(test_generator, ncols=0): y_pred = model.predict(x_true).reshape((-1, num_classes)) y_pred = y_pred.argmax(axis=1) results.extend(y_pred) fw = open(out_file, 'w') with open(in_file) as fr: data = json.load(fr) i = 0 for d in data: for qa in d[1]: l = json.dumps({'id': str(qa['id']), 'label': str(results[i])}) fw.write(l + '\n') i += 1 fw.close() if __name__ == '__main__': evaluator = Evaluator() model.fit_generator( train_generator.forfit(), steps_per_epoch=len(train_generator), epochs=epochs, callbacks=[evaluator] ) model.load_weights('weights/c3.weights') test_predict( in_file=data_path + 'c3/test1.0.json', out_file='results/c310_predict.json' ) test_predict( in_file=data_path + 'c3/test1.1.json', out_file='results/c311_predict.json' ) else: model.load_weights('weights/c3.weights')
29.089474
79
0.599602
6ae3ed28439c3795f0a3092e3b0da325e69356b7
1,590
py
Python
tools/perf/contrib/oop_raster/oop_raster.py
zipated/src
2b8388091c71e442910a21ada3d97ae8bc1845d3
[ "BSD-3-Clause" ]
2,151
2020-04-18T07:31:17.000Z
2022-03-31T08:39:18.000Z
tools/perf/contrib/oop_raster/oop_raster.py
cangulcan/src
2b8388091c71e442910a21ada3d97ae8bc1845d3
[ "BSD-3-Clause" ]
395
2020-04-18T08:22:18.000Z
2021-12-08T13:04:49.000Z
tools/perf/contrib/oop_raster/oop_raster.py
cangulcan/src
2b8388091c71e442910a21ada3d97ae8bc1845d3
[ "BSD-3-Clause" ]
338
2020-04-18T08:03:10.000Z
2022-03-29T12:33:22.000Z
# Copyright 2018 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from benchmarks import smoothness,thread_times import page_sets from telemetry import benchmark # pylint: disable=protected-access def CustomizeBrowserOptionsForOopRasterization(options): """Enables flags needed for out of process rasterization.""" options.AppendExtraBrowserArgs('--force-gpu-rasterization') options.AppendExtraBrowserArgs('--enable-oop-rasterization')
34.565217
78
0.796855
6ae4d12e5b6c5a2ce81f0095493d76c6afcfb99b
3,481
py
Python
logpy/util.py
mrocklin/logpy
7e32f4da10a0ab5b86fb23947cfce9a4d49c6b3f
[ "BSD-3-Clause" ]
1
2016-09-20T16:05:12.000Z
2016-09-20T16:05:12.000Z
logpy/util.py
mrocklin/logpy
7e32f4da10a0ab5b86fb23947cfce9a4d49c6b3f
[ "BSD-3-Clause" ]
null
null
null
logpy/util.py
mrocklin/logpy
7e32f4da10a0ab5b86fb23947cfce9a4d49c6b3f
[ "BSD-3-Clause" ]
null
null
null
import itertools as it from toolz.compatibility import range, map, iteritems def transitive_get(key, d): """ Transitive dict.get >>> from logpy.util import transitive_get >>> d = {1: 2, 2: 3, 3: 4} >>> d.get(1) 2 >>> transitive_get(1, d) 4 """ while hashable(key) and key in d: key = d[key] return key def deep_transitive_get(key, d): """ Transitive get that propagates within tuples >>> from logpy.util import transitive_get, deep_transitive_get >>> d = {1: (2, 3), 2: 12, 3: 13} >>> transitive_get(1, d) (2, 3) >>> deep_transitive_get(1, d) (12, 13) """ key = transitive_get(key, d) if isinstance(key, tuple): return tuple(map(lambda k: deep_transitive_get(k, d), key)) else: return key def take(n, seq): if n is None: return seq if n == 0: return tuple(seq) return tuple(it.islice(seq, 0, n)) def evalt(t): """ Evaluate tuple if unevaluated >>> from logpy.util import evalt >>> add = lambda x, y: x + y >>> evalt((add, 2, 3)) 5 >>> evalt(add(2, 3)) 5 """ if isinstance(t, tuple) and len(t) >= 1 and callable(t[0]): return t[0](*t[1:]) else: return t def intersection(*seqs): return (item for item in seqs[0] if all(item in seq for seq in seqs[1:])) def pprint(g): """ Pretty print a tree of goals """ if callable(g) and hasattr(g, '__name__'): return g.__name__ if isinstance(g, type): return g.__name__ if isinstance(g, tuple): return "(" + ', '.join(map(pprint, g)) + ")" return str(g) def index(tup, ind): """ Fancy indexing with tuples """ return tuple(tup[i] for i in ind)
24.687943
67
0.545246
6ae561e06496768e94110f91362d5a5eeb524bdb
545
py
Python
index.py
rinocloud/rinobot-plugin-shift
4f7f16a5e610b91b64377733d24b6ab4b63daa67
[ "MIT" ]
null
null
null
index.py
rinocloud/rinobot-plugin-shift
4f7f16a5e610b91b64377733d24b6ab4b63daa67
[ "MIT" ]
null
null
null
index.py
rinocloud/rinobot-plugin-shift
4f7f16a5e610b91b64377733d24b6ab4b63daa67
[ "MIT" ]
null
null
null
import rinobot_plugin as bot import numpy as np if __name__ == "__main__": main()
25.952381
59
0.669725
6ae5c492e4c7a58f8381ae12e47edd808dc70752
1,912
py
Python
gluon/contrib/memcache/__init__.py
arsfeld/fog-web2py
32263a03d4183dcaf7537c87edcb4e574d4bec6e
[ "BSD-3-Clause" ]
null
null
null
gluon/contrib/memcache/__init__.py
arsfeld/fog-web2py
32263a03d4183dcaf7537c87edcb4e574d4bec6e
[ "BSD-3-Clause" ]
null
null
null
gluon/contrib/memcache/__init__.py
arsfeld/fog-web2py
32263a03d4183dcaf7537c87edcb4e574d4bec6e
[ "BSD-3-Clause" ]
1
2019-03-13T08:20:25.000Z
2019-03-13T08:20:25.000Z
from gluon.contrib.memcache.memcache import Client import time """ examle of usage: cache.memcache=MemcacheClient(request,[127.0.0.1:11211],debug=true) """ import cPickle as pickle import thread locker = thread.allocate_lock()
28.537313
72
0.620816
6ae692e1d71ed7665097647321e9c90f2d578c91
206
py
Python
test_data/parse/unexpected/symbol_table/inheritance_from_non_class/meta_model.py
aas-core-works/aas-core-csharp-codegen
731f706e2d12bf80722ac55d920fcf5402fb26ef
[ "MIT" ]
null
null
null
test_data/parse/unexpected/symbol_table/inheritance_from_non_class/meta_model.py
aas-core-works/aas-core-csharp-codegen
731f706e2d12bf80722ac55d920fcf5402fb26ef
[ "MIT" ]
2
2021-11-17T22:11:59.000Z
2021-11-18T15:17:40.000Z
test_data/parse/unexpected/symbol_table/inheritance_from_non_class/meta_model.py
aas-core-works/aas-core-csharp-codegen
731f706e2d12bf80722ac55d920fcf5402fb26ef
[ "MIT" ]
null
null
null
__book_url__ = "dummy" __book_version__ = "dummy" associate_ref_with(Reference)
12.875
33
0.737864
6ae7079318bfafa24286324b4e5be07089c8ccfb
11,135
py
Python
deepcut/deepcut.py
wannaphong/deepcut
e4f7779caa087c5ffbad3bc4e88f919e300d020c
[ "MIT" ]
17
2020-10-06T12:35:19.000Z
2021-11-19T07:33:15.000Z
deepcut/deepcut.py
wannaphong/deepcut
e4f7779caa087c5ffbad3bc4e88f919e300d020c
[ "MIT" ]
3
2020-10-07T06:29:33.000Z
2020-10-23T15:21:09.000Z
deepcut/deepcut.py
wannaphong/deepcut
e4f7779caa087c5ffbad3bc4e88f919e300d020c
[ "MIT" ]
1
2020-10-06T13:16:54.000Z
2020-10-06T13:16:54.000Z
#!/usr/bin/env python # encoding: utf-8 import numbers import os import re import sys from itertools import chain import numpy as np import scipy.sparse as sp import six import pickle from .model import get_convo_nn2 from .stop_words import THAI_STOP_WORDS from .utils import CHAR_TYPES_MAP, CHARS_MAP, create_feature_array MODULE_PATH = os.path.dirname(__file__) WEIGHT_PATH = os.path.join(MODULE_PATH, 'weight', 'cnn_without_ne_ab.h5') TOKENIZER = None def tokenize(text, custom_dict=None): """ Tokenize given Thai text string Input ===== text: str, Thai text string custom_dict: str (or list), path to customized dictionary file It allows the function not to tokenize given dictionary wrongly. The file should contain custom words separated by line. Alternatively, you can provide list of custom words too. Output ====== tokens: list, list of tokenized words Example ======= >> deepcut.tokenize('') >> ['','','',''] """ global TOKENIZER if not TOKENIZER: TOKENIZER = DeepcutTokenizer() return TOKENIZER.tokenize(text, custom_dict=custom_dict) def _document_frequency(X): """ Count the number of non-zero values for each feature in sparse X. """ if sp.isspmatrix_csr(X): return np.bincount(X.indices, minlength=X.shape[1]) return np.diff(sp.csc_matrix(X, copy=False).indptr) def _check_stop_list(stop): """ Check stop words list ref: https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py#L87-L95 """ if stop == "thai": return THAI_STOP_WORDS elif isinstance(stop, six.string_types): raise ValueError("not a built-in stop list: %s" % stop) elif stop is None: return None # assume it's a collection return frozenset(stop) def load_model(file_path): """ Load saved pickle file of DeepcutTokenizer Parameters ========== file_path: str, path to saved model from ``save_model`` method in DeepcutTokenizer """ tokenizer = pickle.load(open(file_path, 'rb')) tokenizer.model = get_convo_nn2() tokenizer.model = tokenizer.model.load_weights(WEIGHT_PATH) return tokenizer
34.367284
116
0.587068
6ae71d1265e8389449c1186df9eae1ba04f43f40
4,824
py
Python
spconv/utils/__init__.py
djiajunustc/spconv
647927ce6b64dc51fbec4eb50c7194f8ca5007e5
[ "Apache-2.0" ]
null
null
null
spconv/utils/__init__.py
djiajunustc/spconv
647927ce6b64dc51fbec4eb50c7194f8ca5007e5
[ "Apache-2.0" ]
null
null
null
spconv/utils/__init__.py
djiajunustc/spconv
647927ce6b64dc51fbec4eb50c7194f8ca5007e5
[ "Apache-2.0" ]
null
null
null
# Copyright 2021 Yan Yan # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from cumm import tensorview as tv from contextlib import AbstractContextManager from spconv.cppconstants import CPU_ONLY_BUILD from spconv.core_cc.csrc.utils.boxops import BoxOps from spconv.core_cc.csrc.sparse.all.ops_cpu1d import Point2VoxelCPU as Point2VoxelCPU1d from spconv.core_cc.csrc.sparse.all.ops_cpu2d import Point2VoxelCPU as Point2VoxelCPU2d from spconv.core_cc.csrc.sparse.all.ops_cpu3d import Point2VoxelCPU as Point2VoxelCPU3d from spconv.core_cc.csrc.sparse.all.ops_cpu4d import Point2VoxelCPU as Point2VoxelCPU4d if not CPU_ONLY_BUILD: from spconv.core_cc.csrc.sparse.all.ops1d import Point2Voxel as Point2VoxelGPU1d from spconv.core_cc.csrc.sparse.all.ops2d import Point2Voxel as Point2VoxelGPU2d from spconv.core_cc.csrc.sparse.all.ops3d import Point2Voxel as Point2VoxelGPU3d from spconv.core_cc.csrc.sparse.all.ops4d import Point2Voxel as Point2VoxelGPU4d
41.230769
101
0.676824
6ae7a32e4ed90bb4e297477064266c060efd4768
5,346
py
Python
build/android/gyp/lint.py
justremotephone/android_external_chromium_org
246856e61da7acf5494076c74198f2aea894a721
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
2
2019-01-16T03:57:28.000Z
2021-01-23T15:29:45.000Z
build/android/gyp/lint.py
justremotephone/android_external_chromium_org
246856e61da7acf5494076c74198f2aea894a721
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
1
2018-02-10T21:00:08.000Z
2018-03-20T05:09:50.000Z
build/android/gyp/lint.py
justremotephone/android_external_chromium_org
246856e61da7acf5494076c74198f2aea894a721
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
1
2020-11-04T07:24:13.000Z
2020-11-04T07:24:13.000Z
#!/usr/bin/env python # # Copyright (c) 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Runs Android's lint tool.""" import optparse import os import sys from xml.dom import minidom from util import build_utils _SRC_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..')) if __name__ == '__main__': sys.exit(main())
33.4125
78
0.62963
6ae97714d8f4b22a1d08d058e87732477cbb19c0
9,424
py
Python
clif/pybind11/generator.py
snu5mumr1k/clif
3a907dd7b0986f2b3306c88503d414f4d4f963ae
[ "Apache-2.0" ]
null
null
null
clif/pybind11/generator.py
snu5mumr1k/clif
3a907dd7b0986f2b3306c88503d414f4d4f963ae
[ "Apache-2.0" ]
null
null
null
clif/pybind11/generator.py
snu5mumr1k/clif
3a907dd7b0986f2b3306c88503d414f4d4f963ae
[ "Apache-2.0" ]
null
null
null
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generates pybind11 bindings code.""" from typing import Dict, Generator, List, Text, Set from clif.protos import ast_pb2 from clif.pybind11 import classes from clif.pybind11 import enums from clif.pybind11 import function from clif.pybind11 import function_lib from clif.pybind11 import type_casters from clif.pybind11 import utils I = utils.I def write_to(channel, lines): """Writes the generated code to files.""" for s in lines: channel.write(s) channel.write('\n')
38.622951
80
0.671053
6aea2be020c7e8aa245e0f3059dcd2d6daefd1b7
2,865
py
Python
advent/model/discriminator.py
ChristopheGraveline064/ADVENT
fc0ecd099862ed68979b2197423f1bb34df09c74
[ "Apache-2.0" ]
1
2021-01-17T06:02:10.000Z
2021-01-17T06:02:10.000Z
advent/model/discriminator.py
ChristopheGraveline064/ADVENT
fc0ecd099862ed68979b2197423f1bb34df09c74
[ "Apache-2.0" ]
2
2021-01-17T06:21:29.000Z
2021-01-17T20:19:50.000Z
advent/model/discriminator.py
ChristopheGraveline064/ADVENT
fc0ecd099862ed68979b2197423f1bb34df09c74
[ "Apache-2.0" ]
null
null
null
from torch import nn # def get_fe_discriminator(num_classes, ndf=64): # 256-128-64-32-16 # return nn.Sequential( # nn.Conv2d(num_classes, ndf * 4, kernel_size=4, stride=2, padding=1), # nn.LeakyReLU(negative_slope=0.2, inplace=True), # nn.Conv2d(ndf * 4, ndf * 2, kernel_size=4, stride=2, padding=1), # nn.LeakyReLU(negative_slope=0.2, inplace=True), # nn.Conv2d(ndf * 2, ndf, kernel_size=2, stride=2, padding=0), # nn.LeakyReLU(negative_slope=0.2, inplace=True), # # nn.Conv2d(ndf * 4, ndf * 8, kernel_size=4, stride=2, padding=1), # # nn.LeakyReLU(negative_slope=0.2, inplace=True), # nn.Conv2d(ndf, 1, kernel_size=2, stride=2, padding=0), # ) # def get_fe_discriminator(num_classes, ndf=64): # return nn.Sequential( # nn.Conv2d(num_classes, ndf, kernel_size=4, stride=2, padding=1), # nn.LeakyReLU(negative_slope=0.2, inplace=True), # nn.Conv2d(ndf, ndf * 2, kernel_size=4, stride=2, padding=1), # nn.LeakyReLU(negative_slope=0.2, inplace=True), # nn.Conv2d(ndf * 2, ndf * 4, kernel_size=4, stride=2, padding=1), # nn.LeakyReLU(negative_slope=0.2, inplace=True), # # nn.Conv2d(ndf * 4, ndf * 8, kernel_size=4, stride=2, padding=1), # # nn.LeakyReLU(negative_slope=0.2, inplace=True), # nn.Conv2d(ndf * 4, 1, kernel_size=1, stride=1, padding=0), # )
49.396552
90
0.624433
6aea61815f42420b447d1ce164aa7c65f5c5bc94
3,652
py
Python
spyder/dependencies.py
aglotero/spyder
075d32fa359b728416de36cb0e744715fa5e3943
[ "MIT" ]
2
2019-04-25T08:25:37.000Z
2019-04-25T08:25:43.000Z
spyder/dependencies.py
aglotero/spyder
075d32fa359b728416de36cb0e744715fa5e3943
[ "MIT" ]
1
2020-10-29T19:53:11.000Z
2020-10-29T19:53:11.000Z
spyder/dependencies.py
aglotero/spyder
075d32fa359b728416de36cb0e744715fa5e3943
[ "MIT" ]
1
2019-02-18T01:28:51.000Z
2019-02-18T01:28:51.000Z
# -*- coding: utf-8 -*- # # Copyright Spyder Project Contributors # Licensed under the terms of the MIT License # (see spyder/__init__.py for details) """Module checking Spyder runtime dependencies""" import os # Local imports from spyder.utils import programs DEPENDENCIES = [] def add(modname, features, required_version, installed_version=None, optional=False): """Add Spyder dependency""" global DEPENDENCIES for dependency in DEPENDENCIES: if dependency.modname == modname: raise ValueError("Dependency has already been registered: %s"\ % modname) DEPENDENCIES += [Dependency(modname, features, required_version, installed_version, optional)] def check(modname): """Check if required dependency is installed""" for dependency in DEPENDENCIES: if dependency.modname == modname: return dependency.check() else: raise RuntimeError("Unkwown dependency %s" % modname) def status(deps=DEPENDENCIES, linesep=os.linesep): """Return a status of dependencies""" maxwidth = 0 col1 = [] col2 = [] for dependency in deps: title1 = dependency.modname title1 += ' ' + dependency.required_version col1.append(title1) maxwidth = max([maxwidth, len(title1)]) col2.append(dependency.get_installed_version()) text = "" for index in range(len(deps)): text += col1[index].ljust(maxwidth) + ': ' + col2[index] + linesep return text[:-1] def missing_dependencies(): """Return the status of missing dependencies (if any)""" missing_deps = [] for dependency in DEPENDENCIES: if not dependency.check() and not dependency.optional: missing_deps.append(dependency) if missing_deps: return status(deps=missing_deps, linesep='<br>') else: return ""
32.035088
78
0.585706
6aea82e968ce364fdac8932cf3b83554a12ac797
2,947
py
Python
setup.py
jasperhyp/Chemprop4SE
c02b604b63b6766464db829fea0b306c67302e82
[ "MIT" ]
1
2021-12-15T05:18:07.000Z
2021-12-15T05:18:07.000Z
setup.py
jasperhyp/chemprop4SE
c02b604b63b6766464db829fea0b306c67302e82
[ "MIT" ]
null
null
null
setup.py
jasperhyp/chemprop4SE
c02b604b63b6766464db829fea0b306c67302e82
[ "MIT" ]
null
null
null
import os from setuptools import find_packages, setup # Load version number __version__ = None src_dir = os.path.abspath(os.path.dirname(__file__)) version_file = os.path.join(src_dir, 'chemprop', '_version.py') with open(version_file, encoding='utf-8') as fd: exec(fd.read()) # Load README with open('README.md', encoding='utf-8') as f: long_description = f.read() setup( name='chemprop', version=__version__, author='Kyle Swanson, Kevin Yang, Wengong Jin, Lior Hirschfeld, Allison Tam', author_email='[email protected]', description='Molecular Property Prediction with Message Passing Neural Networks', long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/chemprop/chemprop', download_url=f'https://github.com/chemprop/chemprop/v_{__version__}.tar.gz', project_urls={ 'Documentation': 'https://chemprop.readthedocs.io/en/latest/', 'Source': 'https://github.com/chemprop/chemprop', 'PyPi': 'https://pypi.org/project/chemprop/', 'Demo': 'http://chemprop.csail.mit.edu/', }, license='MIT', packages=find_packages(), package_data={'chemprop': ['py.typed']}, entry_points={ 'console_scripts': [ 'chemprop_train=chemprop.train:chemprop_train', 'chemprop_predict=chemprop.train:chemprop_predict', 'chemprop_fingerprint=chemprop.train:chemprop_fingerprint', 'chemprop_hyperopt=chemprop.hyperparameter_optimization:chemprop_hyperopt', 'chemprop_interpret=chemprop.interpret:chemprop_interpret', 'chemprop_web=chemprop.web.run:chemprop_web', 'sklearn_train=chemprop.sklearn_train:sklearn_train', 'sklearn_predict=chemprop.sklearn_predict:sklearn_predict', ] }, install_requires=[ 'flask>=1.1.2', 'hyperopt>=0.2.3', 'matplotlib>=3.1.3', 'numpy>=1.18.1', 'pandas>=1.0.3', 'pandas-flavor>=0.2.0', 'scikit-learn>=0.22.2.post1', 'scipy>=1.4.1', 'sphinx>=3.1.2', 'tensorboardX>=2.0', 'torch>=1.5.1', 'tqdm>=4.45.0', 'typed-argument-parser>=1.6.1' ], extras_require={ 'test': [ 'pytest>=6.2.2', 'parameterized>=0.8.1' ] }, python_requires='>=3.6', classifiers=[ 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent' ], keywords=[ 'chemistry', 'machine learning', 'property prediction', 'message passing neural network', 'graph neural network' ] )
33.873563
88
0.599932
6aec0377fc121dfeab883792414df3e21c04a712
2,335
py
Python
mars/tensor/indexing/slice.py
HarshCasper/mars
4c12c968414d666c7a10f497bc22de90376b1932
[ "Apache-2.0" ]
2
2019-03-29T04:11:10.000Z
2020-07-08T10:19:54.000Z
mars/tensor/indexing/slice.py
HarshCasper/mars
4c12c968414d666c7a10f497bc22de90376b1932
[ "Apache-2.0" ]
null
null
null
mars/tensor/indexing/slice.py
HarshCasper/mars
4c12c968414d666c7a10f497bc22de90376b1932
[ "Apache-2.0" ]
null
null
null
# Copyright 1999-2020 Alibaba Group Holding Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ... import opcodes as OperandDef from ...serialize import KeyField, ListField from ..operands import TensorHasInput, TensorOperandMixin from ..array_utils import get_array_module from ..core import TensorOrder
33.84058
76
0.636403
6aec42c6af54cc3a34d294f61a827b50bebc2cb6
50,221
py
Python
ftplugin/python/python/pyflakes/pyflakes/checker.py
leewckk/vim.configuration
db3faa4343714dd3eb3b7ab19f8cd0b64a52ee57
[ "MIT" ]
null
null
null
ftplugin/python/python/pyflakes/pyflakes/checker.py
leewckk/vim.configuration
db3faa4343714dd3eb3b7ab19f8cd0b64a52ee57
[ "MIT" ]
null
null
null
ftplugin/python/python/pyflakes/pyflakes/checker.py
leewckk/vim.configuration
db3faa4343714dd3eb3b7ab19f8cd0b64a52ee57
[ "MIT" ]
null
null
null
""" Main module. Implement the central Checker class. Also, it models the Bindings and Scopes. """ import __future__ import doctest import os import sys PY2 = sys.version_info < (3, 0) PY32 = sys.version_info < (3, 3) # Python 2.5 to 3.2 PY33 = sys.version_info < (3, 4) # Python 2.5 to 3.3 PY34 = sys.version_info < (3, 5) # Python 2.5 to 3.4 try: sys.pypy_version_info PYPY = True except AttributeError: PYPY = False builtin_vars = dir(__import__('__builtin__' if PY2 else 'builtins')) try: import ast except ImportError: # Python 2.5 import _ast as ast if 'decorator_list' not in ast.ClassDef._fields: # Patch the missing attribute 'decorator_list' ast.ClassDef.decorator_list = () ast.FunctionDef.decorator_list = property(lambda s: s.decorators) from pyflakes import messages if PY2: else: # Python >= 3.3 uses ast.Try instead of (ast.TryExcept + ast.TryFinally) if PY32: else: if PY34: LOOP_TYPES = (ast.While, ast.For) else: LOOP_TYPES = (ast.While, ast.For, ast.AsyncFor) def counter(items): """ Simplest required implementation of collections.Counter. Required as 2.6 does not have Counter in collections. """ results = {} for item in items: results[item] = results.get(item, 0) + 1 return results # Globally defined names which are not attributes of the builtins module, or # are only present on some platforms. _MAGIC_GLOBALS = ['__file__', '__builtins__', 'WindowsError'] def getNodeName(node): # Returns node.id, or node.name, or None if hasattr(node, 'id'): # One of the many nodes with an id return node.id if hasattr(node, 'name'): # an ExceptHandler node return node.name def checkDeadScopes(self): """ Look at scopes which have been fully examined and report names in them which were imported but unused. """ for scope in self.deadScopes: # imports in classes are public members if isinstance(scope, ClassScope): continue all_binding = scope.get('__all__') if all_binding and not isinstance(all_binding, ExportBinding): all_binding = None if all_binding: all_names = set(all_binding.names) undefined = all_names.difference(scope) else: all_names = undefined = [] if undefined: if not scope.importStarred and \ os.path.basename(self.filename) != '__init__.py': # Look for possible mistakes in the export list for name in undefined: self.report(messages.UndefinedExport, scope['__all__'].source, name) # mark all import '*' as used by the undefined in __all__ if scope.importStarred: for binding in scope.values(): if isinstance(binding, StarImportation): binding.used = all_binding # Look for imported names that aren't used. for value in scope.values(): if isinstance(value, Importation): used = value.used or value.name in all_names if not used: messg = messages.UnusedImport self.report(messg, value.source, str(value)) for node in value.redefined: if isinstance(self.getParent(node), ast.For): messg = messages.ImportShadowedByLoopVar elif used: continue else: messg = messages.RedefinedWhileUnused self.report(messg, node, value.name, value.source) def differentForks(self, lnode, rnode): """True, if lnode and rnode are located on different forks of IF/TRY""" ancestor = self.getCommonAncestor(lnode, rnode, self.root) parts = getAlternatives(ancestor) if parts: for items in parts: if self.descendantOf(lnode, items, ancestor) ^ \ self.descendantOf(rnode, items, ancestor): return True return False def addBinding(self, node, value): """ Called when a binding is altered. - `node` is the statement responsible for the change - `value` is the new value, a Binding instance """ # assert value.source in (node, node.parent): for scope in self.scopeStack[::-1]: if value.name in scope: break existing = scope.get(value.name) if existing and not self.differentForks(node, existing.source): parent_stmt = self.getParent(value.source) if isinstance(existing, Importation) and isinstance(parent_stmt, ast.For): self.report(messages.ImportShadowedByLoopVar, node, value.name, existing.source) elif scope is self.scope: if (isinstance(parent_stmt, ast.comprehension) and not isinstance(self.getParent(existing.source), (ast.For, ast.comprehension))): self.report(messages.RedefinedInListComp, node, value.name, existing.source) elif not existing.used and value.redefines(existing): self.report(messages.RedefinedWhileUnused, node, value.name, existing.source) elif isinstance(existing, Importation) and value.redefines(existing): existing.redefined.append(node) if value.name in self.scope: # then assume the rebound name is used as a global or within a loop value.used = self.scope[value.name].used self.scope[value.name] = value def isDocstring(self, node): """ Determine if the given node is a docstring, as long as it is at the correct place in the node tree. """ return isinstance(node, ast.Str) or (isinstance(node, ast.Expr) and isinstance(node.value, ast.Str)) _getDoctestExamples = doctest.DocTestParser().get_examples # "stmt" type nodes DELETE = PRINT = FOR = ASYNCFOR = WHILE = IF = WITH = WITHITEM = \ ASYNCWITH = ASYNCWITHITEM = RAISE = TRYFINALLY = EXEC = \ EXPR = ASSIGN = handleChildren PASS = ignore # "expr" type nodes BOOLOP = BINOP = UNARYOP = IFEXP = SET = \ COMPARE = CALL = REPR = ATTRIBUTE = SUBSCRIPT = \ STARRED = NAMECONSTANT = handleChildren NUM = STR = BYTES = ELLIPSIS = ignore # "slice" type nodes SLICE = EXTSLICE = INDEX = handleChildren # expression contexts are node instances too, though being constants LOAD = STORE = DEL = AUGLOAD = AUGSTORE = PARAM = ignore # same for operators AND = OR = ADD = SUB = MULT = DIV = MOD = POW = LSHIFT = RSHIFT = \ BITOR = BITXOR = BITAND = FLOORDIV = INVERT = NOT = UADD = USUB = \ EQ = NOTEQ = LT = LTE = GT = GTE = IS = ISNOT = IN = NOTIN = \ MATMULT = ignore # additional node types COMPREHENSION = KEYWORD = FORMATTEDVALUE = JOINEDSTR = handleChildren def GLOBAL(self, node): """ Keep track of globals declarations. """ global_scope_index = 1 if self._in_doctest() else 0 global_scope = self.scopeStack[global_scope_index] # Ignore 'global' statement in global scope. if self.scope is not global_scope: # One 'global' statement can bind multiple (comma-delimited) names. for node_name in node.names: node_value = Assignment(node_name, node) # Remove UndefinedName messages already reported for this name. # TODO: if the global is not used in this scope, it does not # become a globally defined name. See test_unused_global. self.messages = [ m for m in self.messages if not isinstance(m, messages.UndefinedName) or m.message_args[0] != node_name] # Bind name to global scope if it doesn't exist already. global_scope.setdefault(node_name, node_value) # Bind name to non-global scopes, but as already "used". node_value.used = (global_scope, node) for scope in self.scopeStack[global_scope_index + 1:]: scope[node_name] = node_value NONLOCAL = GLOBAL LISTCOMP = handleChildren if PY2 else GENERATOREXP DICTCOMP = SETCOMP = GENERATOREXP def NAME(self, node): """ Handle occurrence of Name (which can be a load/store/delete access.) """ # Locate the name in locals / function / globals scopes. if isinstance(node.ctx, (ast.Load, ast.AugLoad)): self.handleNodeLoad(node) if (node.id == 'locals' and isinstance(self.scope, FunctionScope) and isinstance(node.parent, ast.Call)): # we are doing locals() call in current scope self.scope.usesLocals = True elif isinstance(node.ctx, (ast.Store, ast.AugStore)): self.handleNodeStore(node) elif isinstance(node.ctx, ast.Del): self.handleNodeDelete(node) else: # must be a Param context -- this only happens for names in function # arguments, but these aren't dispatched through here raise RuntimeError("Got impossible expression context: %r" % (node.ctx,)) BREAK = CONTINUE AWAIT = YIELDFROM = YIELD ASYNCFUNCTIONDEF = FUNCTIONDEF def CLASSDEF(self, node): """ Check names used in a class definition, including its decorators, base classes, and the body of its definition. Additionally, add its name to the current scope. """ for deco in node.decorator_list: self.handleNode(deco, node) for baseNode in node.bases: self.handleNode(baseNode, node) if not PY2: for keywordNode in node.keywords: self.handleNode(keywordNode, node) self.pushScope(ClassScope) # doctest does not process doctest within a doctest # classes within classes are processed. if (self.withDoctest and not self._in_doctest() and not isinstance(self.scope, FunctionScope)): self.deferFunction(lambda: self.handleDoctests(node)) for stmt in node.body: self.handleNode(stmt, node) self.popScope() self.addBinding(node, ClassDefinition(node.name, node)) LIST = TUPLE TRYEXCEPT = TRY
36.900073
87
0.567054
6aed26d63f42531533566c9bcedcbe6f5289c5e4
3,349
py
Python
AutoScreenShot.py
infinyte7/Auto-Screenshot
5d8e39af61f3361f372ffb48add53171b7cea672
[ "MIT" ]
3
2020-10-29T13:57:15.000Z
2021-02-19T21:59:15.000Z
AutoScreenShot.py
infinyte7/Auto-Screenshot
5d8e39af61f3361f372ffb48add53171b7cea672
[ "MIT" ]
null
null
null
AutoScreenShot.py
infinyte7/Auto-Screenshot
5d8e39af61f3361f372ffb48add53171b7cea672
[ "MIT" ]
1
2021-02-19T21:59:48.000Z
2021-02-19T21:59:48.000Z
# Project Name: Auto Screenshot # Description: Take screenshot of screen when any change take place. # Author: Mani (Infinyte7) # Date: 26-10-2020 # License: MIT from pyscreenshot import grab from PIL import ImageChops import os import time import subprocess, sys from datetime import datetime import tkinter as tk from tkinter import * from tkinter import font if __name__ == "__main__": root = Tk() gui = AutoScreenshot(root) root.mainloop()
32.833333
101
0.616602
6aed847e420c882fffa9edfe88238102ee06ac09
2,749
py
Python
rqalpha/utils/logger.py
HaidongHe/rqalpha
bb824178425909e051c456f6062a6c5bdc816421
[ "Apache-2.0" ]
1
2020-11-10T05:44:39.000Z
2020-11-10T05:44:39.000Z
rqalpha/utils/logger.py
HaidongHe/rqalpha
bb824178425909e051c456f6062a6c5bdc816421
[ "Apache-2.0" ]
null
null
null
rqalpha/utils/logger.py
HaidongHe/rqalpha
bb824178425909e051c456f6062a6c5bdc816421
[ "Apache-2.0" ]
1
2020-03-05T05:06:45.000Z
2020-03-05T05:06:45.000Z
# -*- coding: utf-8 -*- # 2019 # # # # * # Apache License 2.0Apache 2.0 Apache 2.0 http://www.apache.org/licenses/LICENSE-2.0 # # # * # # Apache 2.0 Apache 2.0 # [email protected] from datetime import datetime import logbook from logbook import Logger, StderrHandler from rqalpha.utils.py2 import to_utf8 logbook.set_datetime_format("local") # patch warn logbook.base._level_names[logbook.base.WARNING] = 'WARN' __all__ = [ "user_log", "system_log", "user_system_log", ] DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S.%f" user_std_handler = StderrHandler(bubble=True) user_std_handler.formatter = user_std_handler_log_formatter # loggers # logger user_log = Logger("user_log") # user_system_log = Logger("user_system_log") # user_detail_log = Logger("user_detail_log") # user_detail_log.handlers.append(StderrHandler(bubble=True)) # system_log = Logger("system_log") basic_system_log = Logger("basic_system_log") # std_log = Logger("std_log") init_logger()
25.220183
144
0.694434
6aee73a3b8946a07512f9eca678734d10d671560
5,517
py
Python
salt/modules/oracle.py
wikimedia/operations-debs-salt
be6342abc7401ff92f67ed59f7834f1359f35314
[ "Apache-2.0" ]
null
null
null
salt/modules/oracle.py
wikimedia/operations-debs-salt
be6342abc7401ff92f67ed59f7834f1359f35314
[ "Apache-2.0" ]
null
null
null
salt/modules/oracle.py
wikimedia/operations-debs-salt
be6342abc7401ff92f67ed59f7834f1359f35314
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- ''' Oracle DataBase connection module :mainteiner: Vladimir Bormotov <[email protected]> :maturity: new :depends: cx_Oracle :platform: all :configuration: module provide connections for multiple Oracle DB instances. **OS Environment** .. code-block:: text ORACLE_HOME: path to oracle product PATH: path to Oracle Client libs need to be in PATH **pillar** .. code-block:: text oracle.dbs: list of known based oracle.dbs.<db>.uri: connection credentials in format: user/password@host[:port]/sid[ as {sysdba|sysoper}] ''' import os import logging from salt.utils.decorators import depends log = logging.getLogger(__name__) try: import cx_Oracle MODE = { 'sysdba': cx_Oracle.SYSDBA, 'sysoper': cx_Oracle.SYSOPER } HAS_CX_ORACLE = True except ImportError: MODE = {'sysdba': 2, 'sysoper': 4} HAS_CX_ORACLE = False __virtualname__ = 'oracle' def __virtual__(): ''' Load module only if cx_Oracle installed ''' return __virtualname__ if HAS_CX_ORACLE else False def _cx_oracle_req(): ''' Fallback function stub ''' return 'Need "cx_Oracle" and Oracle Client installed for this functin exist' def _unicode_output(cursor, name, default_type, size, precision, scale): ''' Return strings values as python unicode string http://www.oracle.com/technetwork/articles/dsl/tuininga-cx-oracle-084866.html ''' if default_type in (cx_Oracle.STRING, cx_Oracle.LONG_STRING, cx_Oracle.FIXED_CHAR, cx_Oracle.CLOB): return cursor.var(unicode, size, cursor.arraysize) def _connect(uri): ''' uri = user/password@host[:port]/sid[ as {sysdba|sysoper}] Return cx_Oracle.Connection instance ''' # cx_Oracle.Connection() not support 'as sysdba' syntax uri_l = uri.rsplit(' as ', 1) if len(uri_l) == 2: credentials, mode = uri_l mode = MODE[mode] else: credentials = uri_l[0] mode = 0 userpass, hostportsid = credentials.split('@') user, password = userpass.split('/') hostport, sid = hostportsid.split('/') hostport_l = hostport.split(':') if len(hostport_l) == 2: host, port = hostport_l else: host = hostport_l[0] port = 1521 log.debug('connect: {0}'.format((user, password, host, port, sid, mode))) # force UTF-8 client encoding os.environ['NLS_LANG'] = '.AL32UTF8' conn = cx_Oracle.connect(user, password, cx_Oracle.makedsn(host, port, sid), mode) conn.outputtypehandler = _unicode_output return conn def show_dbs(*dbs): ''' Show databases configuration from pillar. Filter by args .. code-block:: bash salt '*' oracle.show_dbs salt '*' oracle.show_dbs my_db ''' if dbs: log.debug('get dbs from pillar: {0}'.format(dbs)) result = {} for db in dbs: result[db] = __salt__['pillar.get']('oracle:dbs:' + db) return result else: pillar_dbs = __salt__['pillar.get']('oracle:dbs') log.debug('get all ({0}) dbs from pillar'.format(len(pillar_dbs))) return pillar_dbs def show_pillar(item=None): ''' Show Pillar segment oracle.* and subitem with notation "item:subitem" CLI Example: .. code-block:: bash salt '*' oracle.show_pillar salt '*' oracle.show_pillar dbs:my_db ''' if item: return __salt__['pillar.get']('oracle:' + item) else: return __salt__['pillar.get']('oracle') def show_env(): ''' Show Environment used by Oracle Client CLI Example: .. code-block:: bash salt '*' oracle.show_env .. note:: at first _connect() ``NLS_LANG`` will forced to '.AL32UTF8' ''' envs = ['PATH', 'ORACLE_HOME', 'TNS_ADMIN', 'NLS_LANG'] result = {} for env in envs: if env in os.environ: result[env] = os.environ[env] return result
24.52
82
0.610839
6aef1e728fe8745d27da0badcde01e88381bd9b3
32,785
py
Python
tests/test_std.py
ashwini-balnaves/python-consul
4ddec9b57eb5284b58967ce1a9b2422519f88cc2
[ "MIT" ]
469
2015-01-02T18:36:39.000Z
2022-03-10T09:18:13.000Z
tests/test_std.py
ashwini-balnaves/python-consul
4ddec9b57eb5284b58967ce1a9b2422519f88cc2
[ "MIT" ]
249
2015-01-21T19:06:34.000Z
2022-01-12T09:12:58.000Z
tests/test_std.py
ashwini-balnaves/python-consul
4ddec9b57eb5284b58967ce1a9b2422519f88cc2
[ "MIT" ]
279
2015-01-17T04:25:04.000Z
2022-03-11T22:06:46.000Z
import base64 import operator import struct import time import pytest import six import consul import consul.std Check = consul.Check
34.766702
78
0.574257
6af148a08a578a5383b105b30ec3598b62b9c1f1
385
py
Python
Q58/sol.py
shivamT95/projecteuler
3e87b64235edd8444bc27198717a38e0ae0e0c0b
[ "MIT" ]
null
null
null
Q58/sol.py
shivamT95/projecteuler
3e87b64235edd8444bc27198717a38e0ae0e0c0b
[ "MIT" ]
null
null
null
Q58/sol.py
shivamT95/projecteuler
3e87b64235edd8444bc27198717a38e0ae0e0c0b
[ "MIT" ]
null
null
null
import math tot = 1 dia = 0 for side_length in range(3,100001,2): hi = side_length**2 for i in range(4): if is_prime(hi-i*side_length+i): dia = dia+1 tot = tot+4 if dia/tot < 0.1: print(side_length) break
18.333333
60
0.592208
6af16d0caa3aded0dbc0cbf9957a4f9e9107ae10
1,530
py
Python
lesson_07/02.py
alexartwww/geekbrains
f58720dc1d29bc94201b8b9c9239813c0d14ed64
[ "MIT" ]
null
null
null
lesson_07/02.py
alexartwww/geekbrains
f58720dc1d29bc94201b8b9c9239813c0d14ed64
[ "MIT" ]
null
null
null
lesson_07/02.py
alexartwww/geekbrains
f58720dc1d29bc94201b8b9c9239813c0d14ed64
[ "MIT" ]
null
null
null
task = ''' . () , . . : ( ) ( ). : V H, . : (V/6.5 + 0.5), (2 * H + 0.3). . . : , @property. ''' if __name__ == '__main__': print(task) objects = [ 231, 22, Coat(32), 'test', True, Costume(87), Coat(32) ] need_material = 0 for obj in objects: if isinstance(obj, Clothes): need_material += obj.need_material print(need_material)
25.081967
83
0.67451
0a7357e7ea86e6139bf2479d07ddffa8fab66e70
1,170
py
Python
core/fanarttvapi.py
SchadLucas/pyscrape
814a5e767ed899b5929533729c15262f1ad6a52b
[ "MIT" ]
null
null
null
core/fanarttvapi.py
SchadLucas/pyscrape
814a5e767ed899b5929533729c15262f1ad6a52b
[ "MIT" ]
1
2015-05-07T11:38:32.000Z
2015-05-07T11:38:32.000Z
core/fanarttvapi.py
SchadLucas/pyscrape
814a5e767ed899b5929533729c15262f1ad6a52b
[ "MIT" ]
null
null
null
import urllib2 import json import time from core.helpers.decorator import Cached from core.helpers.config import config from core.helpers.logger import log, LogLevel
27.857143
89
0.655556
0a73919f13735ea63c30a1b71cb346f2f001cba6
2,096
py
Python
metrics.py
AndreasLH/Image-Colourization
b41182354446feeb80000a84e5db9100b30e9d81
[ "MIT" ]
1
2021-11-01T09:53:34.000Z
2021-11-01T09:53:34.000Z
metrics.py
AndreasLH/Image-Colourization
b41182354446feeb80000a84e5db9100b30e9d81
[ "MIT" ]
null
null
null
metrics.py
AndreasLH/Image-Colourization
b41182354446feeb80000a84e5db9100b30e9d81
[ "MIT" ]
null
null
null
from math import log10, sqrt import cv2 import numpy as np def PSNR(original, compressed): ''' Calculates the Peak signal to noise ratio between a ground truth image and predicted image. see https://www.geeksforgeeks.org/python-peak-signal-to-noise-ratio-psnr/ for reference Parameters ---------- true image (cv2 image) predicted image (cv2 image) Returns ------- PSNR score ''' mse = np.mean((original - compressed) ** 2) if(mse == 0): # MSE is zero means no noise is present in the signal . # Therefore PSNR have no importance. return 100 max_pixel = 255.0 psnr = 20 * log10(max_pixel / sqrt(mse)) return psnr def colourfulnessMetric(img): """ Created on Mon Nov 15 10:55:16 2021 @author: Yucheng Parameters ---------- img : cv2 RGB image Returns ------- M : colourness metric ----------------------------- |not colourful | 0 | |slightly colorful | 15 | |moderately colourful | 33 | |averagely colourful | 45 | |quite colourful | 59 | |highly colourful | 82 | |extremely colourful | 109 | ----------------------------- """ # Get RGB components R,G,B = cv2.split(img.astype("float")) # colourfulness metric from Hasler et al., section 7 rg = R - G yb = (1/2) * (R+G) - B sigma_rgyb = np.sqrt(np.var(rg) + np.var(yb)) mu_rgyb = np.sqrt(np.mean(rg)**2 + np.mean(yb)**2) M = sigma_rgyb + 0.3 * mu_rgyb return M if __name__ == "__main__": main()
24.372093
95
0.564885
0a750b96f7d83d3d539bea6b3d201533cd437b4f
832
py
Python
redirink/insights/tests/test_models.py
Egor4ik325/redirink
17ef85f48145ee6112f2fcbab60dcd9d65ba78bf
[ "MIT" ]
null
null
null
redirink/insights/tests/test_models.py
Egor4ik325/redirink
17ef85f48145ee6112f2fcbab60dcd9d65ba78bf
[ "MIT" ]
null
null
null
redirink/insights/tests/test_models.py
Egor4ik325/redirink
17ef85f48145ee6112f2fcbab60dcd9d65ba78bf
[ "MIT" ]
1
2021-12-31T00:46:31.000Z
2021-12-31T00:46:31.000Z
"""Test insight model is working the way it should.""" import pytest from django.core.exceptions import ValidationError from django.db import DataError from .factories import InsightFactory pytestmark = pytest.mark.django_db
23.111111
65
0.796875
0a7533a2a833e21052e44904ba80f9df53fd03e4
4,560
py
Python
scripts/list-all-test-suites-for-ci.py
uc-cdis/gen3-qa
6634678b17cb5dd86533667c22037b1e2ddeb0b8
[ "Apache-2.0" ]
4
2019-08-30T22:25:24.000Z
2021-09-15T19:19:44.000Z
scripts/list-all-test-suites-for-ci.py
uc-cdis/gen3-qa
6634678b17cb5dd86533667c22037b1e2ddeb0b8
[ "Apache-2.0" ]
148
2018-04-16T17:26:54.000Z
2022-03-04T16:16:02.000Z
scripts/list-all-test-suites-for-ci.py
uc-cdis/gen3-qa
6634678b17cb5dd86533667c22037b1e2ddeb0b8
[ "Apache-2.0" ]
3
2019-08-01T03:15:38.000Z
2022-03-07T01:23:12.000Z
import os import subprocess test_suites_that_cant_run_in_parallel = [ "test-apis-dbgapTest", # not thread-safe "test-google-googleDataAccessTest", # not thread-safe "test-google-googleServiceAccountRemovalTest", # not thread-safe "test-guppy-guppyTest", # not thread-safe "test-smokeTests-brainTests", # manual (executable test) "test-batch-GoogleBucketManifestGenerationTest", # @donot "test-batch-S3BucketManifestGenerationTest", # @donot "test-portal-dataguidOrgTest", # @donot "test-mariner-marinerIntegrationTest", # @donot "test-suites-fail", # special suite to force failures for invalid test labels "test-portal-roleBasedUITest", # manual (executable test) "test-portal-limitedFilePFBExportTestPlan", # manual (executable test) "test-access-accessGUITest", # manual (executable test) "test-portal-tieredAccessTest", # manual (executable test) "test-portal-discoveryPageTestPlan", # manual (executable test) "test-portal-dashboardReportsTest", # manual (executable test) "test-guppy-nestedAggTest", # manual (executable test) "test-portal-404pageTest", # manual (executable test) "test-apis-dcfDataReplicationTest", # manual (executable test) "test-portal-exportPfbToWorkspaceTest", # manual (executable test) "test-portal-homepageChartNodesExecutableTestPlan",# manual (executable test) "test-portal-profilePageTest", # manual (executable test) "test-portal-terraExportWarningTestPlan", # manual (executable test) "test-pelican-exportPfbTest", # not ready "test-regressions-exportPerformanceTest", # legacy (disabled test) "test-regressions-generateTestData", # legacy (disabled test) "test-regressions-queryPerformanceTest", # legacy (disabled test) "test-regressions-submissionPerformanceTest", # legacy (disabled test) "test-dream-challenge-DCgen3clientTest", # legacy (disabled test) "test-dream-challenge-synapaseLoginTest", # legacy (disabled test) "test-prod-checkAllProjectsBucketAccessTest", # prod test "test-portal-pfbExportTest", # nightly build test "test-apis-etlTest", # long-running test "test-apis-centralizedAuth", # long-running test "test-google-googleServiceAccountTest", # long-running test "test-google-googleServiceAccountKeyTest", # long-running test "test-portal-dataUploadTest", # SUPER long-running test "test-portal-indexingPageTest", # long-running test "test-apis-metadataIngestionTest", # long-running test "test-apis-auditServiceTest" # long-running test ] if __name__ == "__main__": main()
49.032258
112
0.608114
0a7597366e4cc059f4fa32bc7a905bc75e50266d
1,045
py
Python
querybook/server/lib/query_executor/all_executors.py
set5think/querybook
25738fe113faa8ee414826d1aa910354ae8a4146
[ "Apache-2.0" ]
1
2021-04-01T15:30:11.000Z
2021-04-01T15:30:11.000Z
querybook/server/lib/query_executor/all_executors.py
set5think/querybook
25738fe113faa8ee414826d1aa910354ae8a4146
[ "Apache-2.0" ]
null
null
null
querybook/server/lib/query_executor/all_executors.py
set5think/querybook
25738fe113faa8ee414826d1aa910354ae8a4146
[ "Apache-2.0" ]
1
2021-04-02T17:43:41.000Z
2021-04-02T17:43:41.000Z
from lib.utils.plugin import import_plugin from .base_executor import parse_exception from .executors.hive import HiveQueryExecutor from .executors.presto import PrestoQueryExecutor from .executors.sqlalchemy import ( MysqlQueryExecutor, DruidQueryExecutor, SqliteQueryExecutor, SnowflakeQueryExecutor, ) from .executors.bigquery import BigQueryQueryExecutor ALL_PLUGIN_EXECUTORS = import_plugin("executor_plugin", "ALL_PLUGIN_EXECUTORS", []) ALL_EXECUTORS = [ HiveQueryExecutor, PrestoQueryExecutor, MysqlQueryExecutor, DruidQueryExecutor, SqliteQueryExecutor, BigQueryQueryExecutor, SnowflakeQueryExecutor, ] + ALL_PLUGIN_EXECUTORS # Re-export parse_exception parse_exception
24.880952
83
0.749282
0a7705eb0f14b8b24300a2a99d4b3ece8aed7a37
3,189
py
Python
bot/exts/info/pypi.py
MrGrote/bot
acaae30d1c6d401d383e3c1cc55dd1c19ced32c3
[ "MIT" ]
1
2022-03-08T07:10:30.000Z
2022-03-08T07:10:30.000Z
bot/exts/info/pypi.py
MrGrote/bot
acaae30d1c6d401d383e3c1cc55dd1c19ced32c3
[ "MIT" ]
null
null
null
bot/exts/info/pypi.py
MrGrote/bot
acaae30d1c6d401d383e3c1cc55dd1c19ced32c3
[ "MIT" ]
null
null
null
import itertools import random import re from contextlib import suppress from disnake import Embed, NotFound from disnake.ext.commands import Cog, Context, command from disnake.utils import escape_markdown from bot.bot import Bot from bot.constants import Colours, NEGATIVE_REPLIES, RedirectOutput from bot.log import get_logger from bot.utils.messages import wait_for_deletion URL = "https://pypi.org/pypi/{package}/json" PYPI_ICON = "https://cdn.discordapp.com/emojis/766274397257334814.png" PYPI_COLOURS = itertools.cycle((Colours.yellow, Colours.blue, Colours.white)) ILLEGAL_CHARACTERS = re.compile(r"[^-_.a-zA-Z0-9]+") INVALID_INPUT_DELETE_DELAY = RedirectOutput.delete_delay log = get_logger(__name__) def setup(bot: Bot) -> None: """Load the PyPi cog.""" bot.add_cog(PyPi(bot))
36.238636
117
0.625902
0a77c9190291f537620b1ae307203f1368f48062
853
py
Python
app/decorators.py
GinnyGaga/lanbo
d0bd200b93643d3ede69b5fcce72cefd5c167e37
[ "MIT" ]
null
null
null
app/decorators.py
GinnyGaga/lanbo
d0bd200b93643d3ede69b5fcce72cefd5c167e37
[ "MIT" ]
null
null
null
app/decorators.py
GinnyGaga/lanbo
d0bd200b93643d3ede69b5fcce72cefd5c167e37
[ "MIT" ]
null
null
null
from functools import wraps from flask import abort from flask_login import current_user from .models import Permission <<<<<<< HEAD ======= >>>>>>> 17-app-1
25.088235
56
0.695193
0a77fdb1c15169709a632c8652ce9cffd62abd68
491
py
Python
jnpy/experiments/Qt/pyqtgraph_tutorial/codeloop_org_materials/c4_drawing_curves.py
jojoquant/jnpy
c874060af4b129ae09cee9f8542517b7b2f6573b
[ "MIT" ]
5
2020-05-19T07:32:39.000Z
2022-03-14T09:09:48.000Z
jnpy/experiments/Qt/pyqtgraph_tutorial/codeloop_org_materials/c4_drawing_curves.py
jojoquant/jnpy
c874060af4b129ae09cee9f8542517b7b2f6573b
[ "MIT" ]
null
null
null
jnpy/experiments/Qt/pyqtgraph_tutorial/codeloop_org_materials/c4_drawing_curves.py
jojoquant/jnpy
c874060af4b129ae09cee9f8542517b7b2f6573b
[ "MIT" ]
3
2020-04-02T08:30:17.000Z
2020-05-03T12:12:05.000Z
# !/usr/bin/env python3 # -*- coding:utf-8 -*- # @Datetime : 2019/11/14 2:26 # @Author : Fangyang # @Software : PyCharm import sys from PyQt5.QtWidgets import QApplication import pyqtgraph as pg import numpy as np app = QApplication(sys.argv) x = np.arange(1000) y = np.random.normal(size=(3, 1000)) plotWidget = pg.plot(title='Three plot curves') for i in range(3): plotWidget.plot(x, y[i], pen=(i, 3)) status = app.exec_() sys.exit(status) if __name__ == '__main__': pass
19.64
47
0.678208
0a78179deb3bba9140ba6fad7537f792839802d1
826
py
Python
compyle/api.py
nauaneed/compyle
218c76de8aa684e1fb198072e40cb97a5e6845b3
[ "BSD-3-Clause" ]
null
null
null
compyle/api.py
nauaneed/compyle
218c76de8aa684e1fb198072e40cb97a5e6845b3
[ "BSD-3-Clause" ]
null
null
null
compyle/api.py
nauaneed/compyle
218c76de8aa684e1fb198072e40cb97a5e6845b3
[ "BSD-3-Clause" ]
null
null
null
from .array import Array, wrap from .ast_utils import (get_symbols, get_assigned, get_unknown_names_and_calls, has_return, has_node) from .config import get_config, set_config, use_config, Config from .cython_generator import ( CythonGenerator, get_func_definition ) from .ext_module import ExtModule from .extern import Extern from .low_level import Kernel, LocalMem, Cython, cast from .parallel import ( Elementwise, Reduction, Scan, elementwise ) from .profile import ( get_profile_info, named_profile, profile, profile_ctx, print_profile, profile_kernel, ProfileContext, profile2csv ) from .translator import ( CConverter, CStructHelper, OpenCLConverter, detect_type, ocl_detect_type, py2c ) from .types import KnownType, annotate, declare from .utils import ArgumentParser
34.416667
77
0.77724