max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
venv/lib/python3.8/site-packages/kivy/tools/changelog_parser.py | felipesch92/projeto_kivy | 13,889 | 12600268 | <filename>venv/lib/python3.8/site-packages/kivy/tools/changelog_parser.py<gh_stars>1000+
"""
Changelog parser
================
This generates a changelog from a json file of the PRs of a given milestone,
dumped to json, using the [GitHub CLI](https://github.com/cli/cli).
First, in the command line, create the following alias::
gh alias set --shell viewMilestone "gh api graphql -F owner='kivy' \
-F name='kivy' -F number=\\$1 -f query='
query GetMilestones(\\$name: String!, \\$owner: String!, \\$number: \
Int!) {
repository(owner: \\$owner, name: \\$name) {
milestone(number: \\$number) {
pullRequests(states: MERGED, first: 1000) {
nodes {
number
title
labels (first: 25) {
nodes {
name
}
}
}
}
}
}
}
'"
Then, log in using ``gh`` and run::
gh viewMilestone 26 > prs.json
This will generate ``prs.json``. Then, to generate the changelog, run::
python -m kivy.tools.changelog_parser prs.json changelog.md
to generate a markdown changelog at ``changelog.md``. Then, edit as desired
and paste into the
[changelog here](https://github.com/kivy/kivy/blob/master/doc/sources\
/changelog.rst).
"""
from os.path import exists
import sys
from collections import defaultdict
import json
__all__ = ('process_changelog', )
def write_special_section(fh, items, header):
items = sorted(items, key=lambda x: x[0])
if items:
fh.write(f'{header}\n{"-" * len(header)}\n\n')
for n, title in items:
fh.write(f'- [:repo:`{n}`]: {title}\n')
fh.write('\n')
def process_changelog(filename_in, filename_out):
if exists(filename_out):
raise ValueError(
'{} already exists and would be overwritten'.format(filename_out))
with open(filename_in, 'r') as fh:
data = json.load(fh)
prs = data["data"]["repository"]["milestone"]["pullRequests"]["nodes"]
bad_pr = False
grouped = defaultdict(list)
highlighted = []
api_breaks = []
deprecates = []
for item in prs:
n = item['number']
title = item['title']
labels = [label['name'] for label in item['labels']['nodes']]
api_break = 'Notes: API-break' in labels
highlight = 'Notes: Release-highlight' in labels
deprecated = 'Notes: API-deprecation' in labels
component_str = 'Component: '
components = [
label[len(component_str):]
for label in labels if label.startswith(component_str)
]
if not components:
print(f'Found no component label for #{n}')
bad_pr = True
continue
if len(components) > 1:
print(f'Found more than one component label for #{n}')
bad_pr = True
continue
grouped[components[0]].append((n, title))
if highlight:
highlighted.append((n, title))
if api_break:
api_breaks.append((n, title))
if deprecated:
deprecates.append((n, title))
if bad_pr:
raise ValueError(
'One or more PRs have no, or more than one component label')
with open(filename_out, 'w') as fh:
write_special_section(fh, highlighted, 'Highlights')
write_special_section(fh, deprecates, 'Deprecated')
write_special_section(fh, api_breaks, 'Breaking changes')
for group, items in sorted(grouped.items(), key=lambda x: x[0]):
write_special_section(fh, items, group.capitalize())
if __name__ == '__main__':
process_changelog(*sys.argv[1:])
|
crabageprediction/venv/Lib/site-packages/numpy/typing/tests/data/pass/warnings_and_errors.py | 13rianlucero/CrabAgePrediction | 20,453 | 12600318 | <filename>crabageprediction/venv/Lib/site-packages/numpy/typing/tests/data/pass/warnings_and_errors.py
import numpy as np
np.AxisError("test")
np.AxisError(1, ndim=2)
np.AxisError(1, ndim=2, msg_prefix="error")
np.AxisError(1, ndim=2, msg_prefix=None)
|
src/GridCal/Engine/Devices/groupings.py | mzy2240/GridCal | 284 | 12600325 | # This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from GridCal.Engine.basic_structures import BusMode
from GridCal.Engine.Devices.editable_device import EditableDevice, DeviceType, GCProp
class GenericAreaGroup(EditableDevice):
def __init__(self, name='', code='', idtag=None, device_type=DeviceType.GenericArea, latitude=0.0, longitude=0.0):
"""
:param name:
:param idtag:
:param device_type:
:param latitude:
:param longitude:
"""
EditableDevice.__init__(self,
name=name,
code=code,
idtag=idtag,
active=True,
device_type=device_type,
editable_headers={'name': GCProp('', str, 'Name of the bus'),
'idtag': GCProp('', str, 'Unique ID'),
'longitude': GCProp('deg', float, 'longitude of the bus.'),
'latitude': GCProp('deg', float, 'latitude of the bus.')},
non_editable_attributes=['idtag'],
properties_with_profile={})
self.latitude = latitude
self.longitude = longitude
def get_properties_dict(self, version=3):
data = {'id': self.idtag,
'name': self.name,
'code': self.code
}
return data
def get_profiles_dict(self, version=3):
data = {'id': self.idtag}
return data
def get_units_dict(self, version=3):
data = {}
return data
class Substation(GenericAreaGroup):
def __init__(self, name='Substation', idtag=None, code='', latitude=0.0, longitude=0.0):
"""
:param name:
:param idtag:
:param latitude:
:param longitude:
"""
GenericAreaGroup.__init__(self,
name=name,
idtag=idtag,
code=code,
device_type=DeviceType.SubstationDevice,
latitude=latitude,
longitude=longitude)
class Area(GenericAreaGroup):
def __init__(self, name='Area', idtag=None, code='', latitude=0.0, longitude=0.0):
"""
:param name:
:param idtag:
:param latitude:
:param longitude:
"""
GenericAreaGroup.__init__(self,
name=name,
idtag=idtag,
code=code,
device_type=DeviceType.AreaDevice,
latitude=latitude,
longitude=longitude)
class Zone(GenericAreaGroup):
def __init__(self, name='Zone', idtag=None, code='',latitude=0.0, longitude=0.0):
"""
:param name:
:param idtag:
:param latitude:
:param longitude:
"""
GenericAreaGroup.__init__(self,
name=name,
idtag=idtag,
code=code,
device_type=DeviceType.ZoneDevice,
latitude=latitude,
longitude=longitude)
class Country(GenericAreaGroup):
def __init__(self, name='Country', idtag=None, code='',latitude=0.0, longitude=0.0):
"""
:param name:
:param idtag:
:param latitude:
:param longitude:
"""
GenericAreaGroup.__init__(self,
name=name,
idtag=idtag,
code=code,
device_type=DeviceType.CountryDevice,
latitude=latitude,
longitude=longitude)
|
tests/test_fuzzing.py | odidev/cmaes | 134 | 12600333 | <reponame>odidev/cmaes
import hypothesis.extra.numpy as npst
import unittest
from hypothesis import given, strategies as st
from cmaes import CMA, SepCMA
class TestFuzzing(unittest.TestCase):
@given(
data=st.data(),
)
def test_cma_tell(self, data):
dim = data.draw(st.integers(min_value=2, max_value=100))
mean = data.draw(npst.arrays(dtype=float, shape=dim))
sigma = data.draw(st.floats(min_value=1e-16))
n_iterations = data.draw(st.integers(min_value=1))
try:
optimizer = CMA(mean, sigma)
except AssertionError:
return
popsize = optimizer.population_size
for _ in range(n_iterations):
tell_solutions = data.draw(
st.lists(
st.tuples(npst.arrays(dtype=float, shape=dim), st.floats()),
min_size=popsize,
max_size=popsize,
)
)
optimizer.ask()
try:
optimizer.tell(tell_solutions)
except AssertionError:
return
optimizer.ask()
@given(
data=st.data(),
)
def test_sepcma_tell(self, data):
dim = data.draw(st.integers(min_value=2, max_value=100))
mean = data.draw(npst.arrays(dtype=float, shape=dim))
sigma = data.draw(st.floats(min_value=1e-16))
n_iterations = data.draw(st.integers(min_value=1))
try:
optimizer = SepCMA(mean, sigma)
except AssertionError:
return
popsize = optimizer.population_size
for _ in range(n_iterations):
tell_solutions = data.draw(
st.lists(
st.tuples(npst.arrays(dtype=float, shape=dim), st.floats()),
min_size=popsize,
max_size=popsize,
)
)
optimizer.ask()
try:
optimizer.tell(tell_solutions)
except AssertionError:
return
optimizer.ask()
|
src/aihwkit/experiments/experiments/base.py | todd-deshane/aihwkit | 133 | 12600357 | <reponame>todd-deshane/aihwkit
# -*- coding: utf-8 -*-
# (C) Copyright 2020, 2021 IBM. All Rights Reserved.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Base class for an Experiment."""
from enum import Enum
from typing import Any, Callable, Dict, Optional
class Signals(Enum):
"""Signals emitted by an Experiment."""
EXPERIMENT_START = 1
EXPERIMENT_END = 2
EPOCH_START = 10
EPOCH_END = 11
TRAIN_EPOCH_START = 20
TRAIN_EPOCH_END = 21
TRAIN_EPOCH_BATCH_START = 22
TRAIN_EPOCH_BATCH_END = 23
VALIDATION_EPOCH_START = 30
VALIDATION_EPOCH_END = 31
VALIDATION_EPOCH_BATCH_START = 32
VALIDATION_EPOCH_BATCH_END = 33
class Experiment:
"""Base class for an Experiment.
This class is used as the base class for more specific experiments. The
experiments use ``hooks`` for reporting the different status changes to the
``Metrics`` during the execution of the experiment.
"""
def __init__(self) -> None:
self.hooks: Dict = {
Signals.EXPERIMENT_START: [],
Signals.EXPERIMENT_END: [],
Signals.EPOCH_START: [],
Signals.EPOCH_END: [],
Signals.TRAIN_EPOCH_START: [],
Signals.TRAIN_EPOCH_END: [],
Signals.TRAIN_EPOCH_BATCH_START: [],
Signals.TRAIN_EPOCH_BATCH_END: [],
Signals.VALIDATION_EPOCH_START: [],
Signals.VALIDATION_EPOCH_END: [],
Signals.VALIDATION_EPOCH_BATCH_START: [],
Signals.VALIDATION_EPOCH_BATCH_END: []
}
self.results: Optional[Any] = None
def add_hook(self, key: Signals, hook: Callable) -> None:
"""Register a hook for the experiment.
Register a new hook for a particular signal. During the execution of
the experiment, the ``hook`` function will be called.
Args:
key: signal which the hook will be registered to.
hook: a function that will be called when the signal is emitted.
"""
self.hooks[key].append(hook)
def clear_hooks(self) -> None:
"""Remove all the hooks from the experiment."""
for key in self.hooks:
self.hooks[key] = []
def _call_hook(self, key: Signals, *args: Any, **kwargs: Any) -> Dict:
"""Invoke the hooks for a specific key."""
ret = {}
for hook in self.hooks[key]:
hook_ret = hook(*args, **kwargs)
if isinstance(hook_ret, dict):
ret.update(hook_ret)
return ret
|
test/hlt/pytest/python/com/huawei/iotplatform/client/dto/NotifyRuleEventDTO.py | yuanyi-thu/AIOT- | 128 | 12600379 | <filename>test/hlt/pytest/python/com/huawei/iotplatform/client/dto/NotifyRuleEventDTO.py<gh_stars>100-1000
from com.huawei.iotplatform.client.dto.ActionResult import ActionResult
from com.huawei.iotplatform.client.dto.ConditionReason import ConditionReason
class NotifyRuleEventDTO(object):
reasons = ConditionReason()
actionsResults = ActionResult()
def __init__(self):
self.notifyType = None
self.author = None
self.ruleId = None
self.ruleName = None
self.logic = None
self.triggerTime = None
def getNotifyType(self):
return self.notifyType
def setNotifyType(self, notifyType):
self.notifyType = notifyType
def getAuthor(self):
return self.author
def setAuthor(self, author):
self.author = author
def getRuleId(self):
return self.ruleId
def setRuleId(self, ruleId):
self.ruleId = ruleId
def getRuleName(self):
return self.ruleName
def setRuleName(self, ruleName):
self.ruleName = ruleName
def getLogic(self):
return self.logic
def setLogic(self, logic):
self.logic = logic
def getTriggerTime(self):
return self.triggerTime
def setTriggerTime(self, triggerTime):
self.triggerTime = triggerTime
def getReasons(self):
return self.reasons
def setReasons(self, reasons):
self.reasons = reasons
def getActionsResults(self):
return self.actionsResults
def setActionsResults(self, actionsResults):
self.actionsResults = actionsResults
|
examples/howto/ipywidgets/ipyvolume_camera.py | goncaloperes/bokeh | 15,193 | 12600384 | import ipyvolume as ipv
import ipywidgets as ipw
import numpy as np
from ipywidgets_bokeh import IPyWidget
from bokeh.layouts import column, row
from bokeh.models import Slider
from bokeh.plotting import curdoc
x, y, z = np.random.random((3, 1000))
ipv.quickscatter(x, y, z, size=1, marker="sphere")
plot = ipv.current.figure
x_slider = Slider(start=0, end=359, value=0, step=1, title="X-axis")
y_slider = Slider(start=0, end=359, value=0, step=1, title="Y-axis")
z_slider = Slider(start=0, end=359, value=0, step=1, title="Z-axis")
def randomize(button):
x, y, z = np.random.random((3, 1000))
scatter = plot.scatters[0]
with plot.hold_sync():
scatter.x = x
scatter.y = y
scatter.z = z
randomize_button = ipw.Button(description="Randomize")
randomize_button.on_click(randomize)
def change_anglex(change):
v = round(np.degrees(change["new"])) % 360
x_slider.value = v
def change_angley(change):
v = round(np.degrees(change["new"])) % 360
y_slider.value = v
def change_anglez(change):
v = round(np.degrees(change["new"])) % 360
z_slider.value = v
plot.observe(change_anglex, names="anglex")
plot.observe(change_angley, names="angley")
plot.observe(change_anglez, names="anglez")
def change_x(_attr, _old, new):
plot.anglex = np.radians(new)
def change_y(_attr, _old, new):
plot.angley = np.radians(new)
def change_z(_attr, _old, new):
plot.anglez = np.radians(new)
x_slider.on_change("value", change_x)
y_slider.on_change("value", change_y)
z_slider.on_change("value", change_z)
button_wrapper = IPyWidget(widget=randomize_button)
plot_wrapper = IPyWidget(widget=plot)
vbox = column([x_slider, y_slider, z_slider, button_wrapper])
hbox = row([vbox, plot_wrapper])
doc = curdoc()
doc.add_root(hbox)
|
src/exabgp/configuration/l2vpn/__init__.py | pierky/exabgp | 1,560 | 12600388 | # encoding: utf-8
"""
l2vpn/__init__.py
Created by <NAME> on 2015-06-04.
Copyright (c) 2009-2017 Exa Networks. All rights reserved.
License: 3-clause BSD. (See the COPYRIGHT file)
"""
from exabgp.configuration.l2vpn.vpls import ParseVPLS
from exabgp.bgp.message.update.nlri import VPLS
from exabgp.bgp.message.update.attribute import Attributes
from exabgp.rib.change import Change
from exabgp.configuration.announce import ParseAnnounce
class ParseL2VPN(ParseVPLS):
syntax = 'vpls %s;\n' % ' '.join(ParseVPLS.definition)
action = dict(ParseVPLS.action)
name = 'L2VPN'
def __init__(self, tokeniser, scope, error):
ParseVPLS.__init__(self, tokeniser, scope, error)
def clear(self):
return True
def pre(self):
return True
def post(self):
routes = self.scope.pop_routes()
if routes:
self.scope.extend('routes', routes)
return True
@ParseL2VPN.register('vpls', 'append-route')
def vpls(tokeniser):
change = Change(VPLS(None, None, None, None, None), Attributes())
while True:
command = tokeniser()
if not command:
break
action = ParseVPLS.action[command]
if 'nlri-set' in action:
change.nlri.assign(ParseVPLS.assign[command], ParseL2VPN.known[command](tokeniser))
elif 'attribute-add' in action:
change.attributes.add(ParseL2VPN.known[command](tokeniser))
elif action == 'nexthop-and-attribute':
nexthop, attribute = ParseVPLS.known[command](tokeniser)
change.nlri.nexthop = nexthop
change.attributes.add(attribute)
else:
raise ValueError('vpls: unknown command "%s"' % command)
return [
change,
]
|
inltk/__init__.py | Shubhamjain27/inltk | 814 | 12600416 | <reponame>Shubhamjain27/inltk
name = "inltk" |
homeassistant/components/apple_tv/browse_media.py | MrDelik/core | 22,481 | 12600422 | """Support for media browsing."""
from homeassistant.components.media_player import BrowseMedia
from homeassistant.components.media_player.const import (
MEDIA_CLASS_APP,
MEDIA_CLASS_DIRECTORY,
MEDIA_TYPE_APP,
MEDIA_TYPE_APPS,
)
def build_app_list(app_list):
"""Create response payload for app list."""
app_list = [
{"app_id": app_id, "title": app_name, "type": MEDIA_TYPE_APP}
for app_name, app_id in app_list.items()
]
return BrowseMedia(
media_class=MEDIA_CLASS_DIRECTORY,
media_content_id=None,
media_content_type=MEDIA_TYPE_APPS,
title="Apps",
can_play=True,
can_expand=False,
children=[item_payload(item) for item in app_list],
children_media_class=MEDIA_CLASS_APP,
)
def item_payload(item):
"""
Create response payload for a single media item.
Used by async_browse_media.
"""
return BrowseMedia(
title=item["title"],
media_class=MEDIA_CLASS_APP,
media_content_type=MEDIA_TYPE_APP,
media_content_id=item["app_id"],
can_play=False,
can_expand=False,
)
|
btalib/meta/groups.py | demattia/bta-lib | 352 | 12600469 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
# Copyright (C) 2020 <NAME>
# Use of this source code is governed by the MIT License
###############################################################################
__all__ = []
def _generate(cls, bases, dct, **kwargs):
# Try to find a group definition in the directory of the class and it not
# possible, get the attribute which will have been inherited from the class
# Add the final attribute in tuple form, to support many
grps = dct.get('group', ()) or getattr(cls, 'group', ())
if isinstance(grps, str):
grps = (grps,) # if only str, simulate iterable
cls.group = grps # set it in the instance, let others process
|
park/spaces/tuple_space.py | utkarsh5k/park | 180 | 12600470 | from park import core
class Tuple(core.Space):
"""
A tuple (i.e., product) of simpler spaces
Example usage:
self.observation_space = spaces.Tuple((spaces.Discrete(2), spaces.Discrete(3)))
"""
def __init__(self, spaces):
self.spaces = spaces
core.Space.__init__(self, None, None)
def __getitem__(self, idx):
return self.spaces[idx]
def sample(self):
return tuple([space.sample() for space in self.spaces])
def contains(self, x):
if isinstance(x, list):
x = tuple(x) # Promote list to tuple for contains check
return isinstance(x, tuple) and len(x) == len(self.spaces) and all(
space.contains(part) for (space,part) in zip(self.spaces,x))
|
shellpython/__init__.py | wujuguang/shellpy | 706 | 12600476 | <reponame>wujuguang/shellpy
import sys
from shellpython.importer import PreprocessorImporter
_importer = PreprocessorImporter()
def init():
"""Initialize shellpython by installing the import hook
"""
if _importer not in sys.meta_path:
sys.meta_path.insert(0, _importer)
def uninit():
"""Uninitialize shellpython by removing the import hook
"""
sys.meta_path.remove(_importer)
|
burun/0007/codes/coupon_code_generator.py | saurabh896/python-1 | 3,976 | 12600554 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Date: 23-02-15
# Author: Liang
import random
import string
coupon_number = 200
coupon_size = 12
for i in range(coupon_number):
coupon = ''.join(
random.sample(string.digits + string.ascii_uppercase, coupon_size))
print(coupon)
|
srcs/python/kungfu/tensorflow/policy/__init__.py | Pandinosaurus/KungFu | 291 | 12600560 | <filename>srcs/python/kungfu/tensorflow/policy/__init__.py
from .base_policy import BasePolicy
from .policy_hook import PolicyHook
|
m2-modified/ims/common/agentless-system-crawler/crawler/plugins/systems/metric_vm_crawler.py | CCI-MOC/ABMI | 108 | 12600574 | <filename>m2-modified/ims/common/agentless-system-crawler/crawler/plugins/systems/metric_vm_crawler.py
import logging
import time
import psutil
from icrawl_plugin import IVMCrawler
from utils.features import MetricFeature
try:
import psvmi
except ImportError:
psvmi = None
logger = logging.getLogger('crawlutils')
class MetricVmCrawler(IVMCrawler):
"""
To calculate rates like packets sent per second, we need to
store the last measurement. We store it in this dictionary.
"""
def __init__(self):
self._cached_values = {}
def _cache_put_value(self, key, value):
self._cached_values[key] = (value, time.time())
def _cache_get_value(self, key):
if key in self._cached_values:
return self._cached_values[key]
else:
return None, None
def _crawl_metrics_cpu_percent(self, process):
p = process
cpu_percent = 0
feature_key = '{0}-{1}'.format('process', p.ident())
cache_key = '{0}-{1}'.format('OUTVM', feature_key)
curr_proc_cpu_time, curr_sys_cpu_time = p.get_cpu_times()
(cputimeList, timestamp) = self._cache_get_value(cache_key)
self._cache_put_value(
cache_key, [curr_proc_cpu_time, curr_sys_cpu_time])
if cputimeList is not None:
prev_proc_cpu_time = cputimeList[0]
prev_sys_cpu_time = cputimeList[1]
if prev_proc_cpu_time and prev_sys_cpu_time:
if curr_proc_cpu_time == -1 or prev_proc_cpu_time == -1:
cpu_percent = -1 # unsupported for this VM
else:
if curr_sys_cpu_time == prev_sys_cpu_time:
cpu_percent = 0
else:
cpu_percent = (float(curr_proc_cpu_time -
prev_proc_cpu_time) * 100 /
float(curr_sys_cpu_time -
prev_sys_cpu_time))
return cpu_percent
def crawl(self, vm_desc, **kwargs):
created_since = -1
logger.debug('Crawling Metrics')
if psvmi is None:
raise NotImplementedError()
else:
(domain_name, kernel_version, distro, arch) = vm_desc
# XXX: this has to be read from some cache instead of
# instead of once per plugin/feature
vm_context = psvmi.context_init(
domain_name, domain_name, kernel_version, distro, arch)
list = psvmi.process_iter(vm_context)
for p in list:
create_time = (
p.create_time() if hasattr(
p.create_time,
'__call__') else p.create_time)
if create_time <= created_since:
continue
name = (p.name() if hasattr(p.name, '__call__'
) else p.name)
pid = (p.pid() if hasattr(p.pid, '__call__') else p.pid)
status = (p.status() if hasattr(p.status, '__call__'
) else p.status)
if status == psutil.STATUS_ZOMBIE:
continue
username = (
p.username() if hasattr(
p.username,
'__call__') else p.username)
meminfo = (
p.get_memory_info() if hasattr(
p.get_memory_info,
'__call__') else p.memory_info)
ioinfo = (
p.get_io_counters() if hasattr(
p.get_io_counters,
'__call__') else p.io_counters)
cpu_percent = self._crawl_metrics_cpu_percent(p)
memory_percent = (
p.get_memory_percent() if hasattr(
p.get_memory_percent,
'__call__') else p.memory_percent)
feature_key = '{0}/{1}'.format(name, pid)
yield (feature_key, MetricFeature(
round(cpu_percent, 2),
round(memory_percent, 2),
name,
pid,
ioinfo.read_bytes,
meminfo.rss,
str(status),
username,
meminfo.vms,
ioinfo.write_bytes,
), 'metric')
def get_feature(self):
return 'metric'
|
migrations/versions/394f85935d21_.py | eleweek/WatchPeopleCode | 200 | 12600584 | """empty message
Revision ID: 394f85935d21
Revises: 186a4a79b60e
Create Date: 2015-03-20 20:21:01.792691
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '186a4a79b60e'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('wpc_stream',
sa.Column('path', sa.String(length=30), nullable=False),
sa.Column('is_live', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('path')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('wpc_stream')
### end Alembic commands ###
|
DQM/TrackingMonitorSource/python/TrackingSourceConfig_Tier0_HeavyIons_cff.py | malbouis/cmssw | 852 | 12600586 | import FWCore.ParameterSet.Config as cms
# TrackingMonitor ####
from DQM.TrackingMonitor.TrackerHeavyIonTrackingMonitor_cfi import *
TrackMon_hi = TrackerHeavyIonTrackMon.clone(
FolderName = 'Tracking/TrackParameters',
BSFolderName = 'Tracking/TrackParameters/BeamSpotParameters',
TrackProducer = "hiGeneralTracks"
)
TrackMonDQMTier0_hi = cms.Sequence(TrackMon_hi)
|
test/test_pushsafer.py | linkmauve/apprise | 4,764 | 12600620 | <reponame>linkmauve/apprise
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 <NAME> <<EMAIL>>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import pytest
import mock
import requests
from json import dumps
from apprise import AppriseAttachment
from apprise import NotifyType
from apprise import plugins
# Disable logging for a cleaner testing output
import logging
logging.disable(logging.CRITICAL)
# Attachment Directory
TEST_VAR_DIR = os.path.join(os.path.dirname(__file__), 'var')
@mock.patch('requests.post')
def test_notify_pushsafer_plugin(mock_post):
"""
API: NotifyPushSafer() Tests
"""
# Disable Throttling to speed testing
plugins.NotifyBase.request_rate_per_sec = 0
# Private Key
privatekey = 'abc123'
# Prepare Mock
mock_post.return_value = requests.Request()
mock_post.return_value.status_code = requests.codes.ok
mock_post.return_value.content = dumps({
'status': 1,
'success': "okay",
})
# Exception should be thrown about the fact no private key was specified
with pytest.raises(TypeError):
plugins.NotifyPushSafer(privatekey=None)
# Multiple Attachment Support
path = os.path.join(TEST_VAR_DIR, 'apprise-test.gif')
attach = AppriseAttachment()
for _ in range(0, 4):
attach.add(path)
obj = plugins.NotifyPushSafer(privatekey=privatekey)
assert obj.notify(
body='body', title='title', notify_type=NotifyType.INFO,
attach=attach) is True
# Test error reading attachment from disk
with mock.patch('io.open', side_effect=OSError):
obj.notify(
body='body', title='title', notify_type=NotifyType.INFO,
attach=attach)
# Test unsupported mime type
attach = AppriseAttachment(path)
attach[0]._mimetype = 'application/octet-stream'
# We gracefully just don't send the attachment in these cases;
# The notify itself will still be successful
mock_post.reset_mock()
assert obj.notify(
body='body', title='title', notify_type=NotifyType.INFO,
attach=attach) is True
# the 'p', 'p2', and 'p3' are the data variables used when including an
# image.
assert 'data' in mock_post.call_args[1]
assert 'p' not in mock_post.call_args[1]['data']
assert 'p2' not in mock_post.call_args[1]['data']
assert 'p3' not in mock_post.call_args[1]['data']
# Invalid file path
path = os.path.join(TEST_VAR_DIR, '/invalid/path/to/an/invalid/file.jpg')
assert obj.notify(
body='body', title='title', notify_type=NotifyType.INFO,
attach=path) is False
|
machine_learning/python/eval_metrics/performance_metrics.py | CarbonDDR/al-go-rithms | 1,253 | 12600632 | # You must estimate the quality of a set of predictions when training a machine learning model. Performance metrics like classification accuracy and root mean squared error can give you a clear objective idea of how good a set of predictions is, and in turn how good the model is that generated them.
# This is important as it allows you to tell the difference and select among:
# 1 - Different transforms of the data used to train the same machine learning model.
# 2 - Different machine learning models trained on the same data.
# 3 - Different configurations for a machine learning model trained on the same data.
# As such, performance metrics are a required building block in implementing machine learning algorithms from scratch.
# Classification Accuracy: Calculate accuracy percentage between two lists
def accuracy_metric(actual, predicted):
correct = 0
for i in range(len(actual)):
if actual[i] == predicted[i]:
correct += 1
return correct / float(len(actual)) * 100.0
# Test accuracy
actual = [0,0,0,0,0,1,1,1,1,1]
predicted = [0,1,0,0,0,1,0,1,1,1]
accuracy = accuracy_metric(actual, predicted)
print(accuracy)
# Confusion Matrix: calculate a confusion matrix
def confusion_matrix(actual, predicted):
unique = set(actual)
matrix = [list() for x in range(len(unique))]
for i in range(len(unique)):
matrix[i] = [0 for x in range(len(unique))]
lookup = dict()
for i, value in enumerate(unique):
lookup[value] = i
for i in range(len(actual)):
x = lookup[actual[i]]
y = lookup[predicted[i]]
matrix[x][y] += 1
return unique, matrix
# pretty print a confusion matrix
def print_confusion_matrix(unique, matrix):
print('(P)' + ' '.join(str(x) for x in unique))
print('(A)---')
for i, x in enumerate(unique):
print("%s| %s" % (x, ' '.join(str(x) for x in matrix[i])))
# Test confusion matrix with integers
actual = [0,0,0,0,0,1,1,1,1,1]
predicted = [0,1,1,0,0,1,0,1,1,1]
unique, matrix = confusion_matrix(actual, predicted)
print_confusion_matrix(unique, matrix)
# Mean Absolute Error: Calculate mean absolute error
def mae_metric(actual, predicted):
sum_error = 0.0
for i in range(len(actual)):
sum_error += abs(predicted[i] - actual[i])
return sum_error / float(len(actual))
# Test RMSE
actual = [0.1, 0.2, 0.3, 0.4, 0.5]
predicted = [0.11, 0.19, 0.29, 0.41, 0.5]
mae = mae_metric(actual, predicted)
print(mae)
# Root Mean Squared Error: Calculate root mean squared error
from math import sqrt
def rmse_metric(actual, predicted):
sum_error = 0.0
for i in range(len(actual)):
prediction_error = predicted[i] - actual[i]
sum_error += (prediction_error ** 2)
mean_error = sum_error / float(len(actual))
return sqrt(mean_error)
# Test RMSE
actual = [0.1, 0.2, 0.3, 0.4, 0.5]
predicted = [0.11, 0.19, 0.29, 0.41, 0.5]
rmse = rmse_metric(actual, predicted)
print(rmse)
|
quant/collect.py | vincent87lee/alphahunter | 149 | 12600655 | <reponame>vincent87lee/alphahunter
# -*- coding:utf-8 -*-
"""
行情采集模块
Project: alphahunter
Author: HJQuant
Description: Asynchronous driven quantitative trading framework
"""
import sys
from collections import defaultdict
from quant import const
from quant.state import State
from quant.utils import tools, logger
from quant.utils.mongo import MongoDB
from quant.config import config
from quant.market import Market, Kline, Orderbook, Trade, Ticker
from quant.order import Order, Fill
from quant.position import Position
from quant.asset import Asset
from quant.tasks import LoopRunTask, SingleTask
from quant.gateway import ExchangeGateway
from quant.trader import Trader
from quant.strategy import Strategy
from quant.event import EventOrderbook, EventKline, EventTrade, EventTicker
class Collect(Strategy):
def __init__(self):
""" 初始化
"""
super(Collect, self).__init__()
self.strategy = config.strategy
self.platform = config.platforms[0]["platform"]
self.symbols = config.platforms[0]["symbols"]
# 接口参数
params = {
"strategy": self.strategy,
"platform": self.platform,
"symbols": self.symbols,
"enable_kline_update": True,
"enable_orderbook_update": True,
"enable_trade_update": True,
"enable_ticker_update": True,
"enable_order_update": False,
"enable_fill_update": False,
"enable_position_update": False,
"enable_asset_update": False,
"direct_kline_update": True,
"direct_orderbook_update": True,
"direct_trade_update": True,
"direct_ticker_update": True
}
self.gw = self.create_gateway(**params)
#为数据库保存行情做准备
self.t_orderbook_map = defaultdict(lambda:None)
self.t_trade_map = defaultdict(lambda:None)
self.t_kline_map = defaultdict(lambda:None)
self.d_orderbook_map = defaultdict(lambda:[])
self.d_trade_map = defaultdict(lambda:[])
if config.mongodb:
for sym in self.symbols:
postfix = sym.replace('-','').replace('_','').replace('/','').lower() #将所有可能的情况转换为我们自定义的数据库表名规则
#订单薄
name = "t_orderbook_{}_{}".format(self.platform, postfix).lower()
self.t_orderbook_map[sym] = MongoDB("db_market", name)
#逐笔成交
name = "t_trade_{}_{}".format(self.platform, postfix).lower()
self.t_trade_map[sym] = MongoDB("db_market", name)
#K线
name = "t_kline_{}_{}".format(self.platform, postfix).lower()
self.t_kline_map[sym] = MongoDB("db_market", name)
async def on_state_update_callback(self, state: State, **kwargs):
""" 状态变化(底层交易所接口,框架等)通知回调函数
"""
logger.info("on_state_update_callback:", state, caller=self)
async def on_kline_update_callback(self, kline: Kline):
""" 市场K线更新
"""
logger.info("kline:", kline, caller=self)
#行情保存进数据库
kwargs = {
"open": kline.open,
"high": kline.high,
"low": kline.low,
"close": kline.close,
"volume": kline.volume,
"begin_dt": kline.timestamp,
"end_dt": kline.timestamp+60*1000-1
}
async def save(kwargs):
t_kline = self.t_kline_map[kline.symbol]
if t_kline:
s, e = await t_kline.insert(kwargs)
if e:
logger.error("insert kline:", e, caller=self)
SingleTask.run(save, kwargs)
#发布行情到消息队列
kwargs = {
"platform": kline.platform,
"symbol": kline.symbol,
"open": kline.open,
"high": kline.high,
"low": kline.low,
"close": kline.close,
"volume": kline.volume,
"timestamp": kline.timestamp,
"kline_type": kline.kline_type
}
EventKline(**kwargs).publish()
async def on_orderbook_update_callback(self, orderbook: Orderbook):
""" 订单薄更新
"""
logger.info("orderbook:", orderbook, caller=self)
#行情保存进数据库
kwargs = {}
i = 1
for ask in orderbook.asks:
kwargs[f'askprice{i}'] = ask[0]
kwargs[f'asksize{i}'] = ask[1]
i = i + 1
if i > 20: break
i = 1
for bid in orderbook.bids:
kwargs[f'bidprice{i}'] = bid[0]
kwargs[f'bidsize{i}'] = bid[1]
i = i + 1
if i > 20: break
kwargs["pubdt"] = orderbook.timestamp #交易所发布行情的时间
kwargs["dt"] = tools.get_cur_timestamp_ms() #本地采集行情的时间
async def save(kwargs):
#一秒内会有多次通知,将一秒内的通知都收集在一起,一次性写入数据库,约一秒写一次,提高数据库性能
dlist = self.d_orderbook_map[orderbook.symbol]
dlist.append(kwargs)
if dlist[len(dlist)-1]["dt"]-dlist[0]["dt"] > 1000: #每秒写一次数据库
#因为是异步并发,所以先清空列表,重新收集新的一秒内的所有通知,而不是等待数据库IO完成再清空(python的变量只是对象的引用)
#xxx = copy.deepcopy(dlist)
#dlist.clear()
#insert xxx
self.d_orderbook_map[orderbook.symbol] = []
#写数据库
t_orderbook = self.t_orderbook_map[orderbook.symbol]
if t_orderbook:
s, e = await t_orderbook.insert(dlist)
if e:
logger.error("insert orderbook:", e, caller=self)
SingleTask.run(save, kwargs)
#发布行情到消息队列
kwargs = {
"platform": orderbook.platform,
"symbol": orderbook.symbol,
"asks": orderbook.asks,
"bids": orderbook.bids,
"timestamp": orderbook.timestamp
}
EventOrderbook(**kwargs).publish()
async def on_trade_update_callback(self, trade: Trade):
""" 市场最新成交更新
"""
logger.info("trade:", trade, caller=self)
#行情保存进数据库
kwargs = {
"direction": trade.action,
"tradeprice": trade.price,
"volume": trade.quantity,
"tradedt": trade.timestamp,
"dt": tools.get_cur_timestamp_ms()
}
async def save(kwargs):
#一秒内会有多次通知,将一秒内的通知都收集在一起,一次性写入数据库,约一秒写一次,提高数据库性能
dlist = self.d_trade_map[trade.symbol]
dlist.append(kwargs)
if dlist[len(dlist)-1]["dt"]-dlist[0]["dt"] > 1000: #每秒写一次数据库
#因为是异步并发,所以先清空列表,重新收集新的一秒内的所有通知,而不是等待数据库IO完成再清空(python的变量只是对象的引用)
#xxx = copy.deepcopy(dlist)
#dlist.clear()
#insert xxx
self.d_trade_map[trade.symbol] = []
#写数据库
t_trade = self.t_trade_map[trade.symbol]
if t_trade:
s, e = await t_trade.insert(dlist)
if e:
logger.error("insert trade:", e, caller=self)
SingleTask.run(save, kwargs)
#发布行情到消息队列
kwargs = {
"platform": trade.platform,
"symbol": trade.symbol,
"action": trade.action,
"price": trade.price,
"quantity": trade.quantity,
"timestamp": trade.timestamp
}
EventTrade(**kwargs).publish()
async def on_ticker_update_callback(self, ticker: Ticker):
""" 市场行情tick更新
"""
logger.info("ticker:", ticker, caller=self)
kwargs = {
"platform": ticker.platform,
"symbol": ticker.symbol,
"ask": ticker.ask,
"bid": ticker.bid,
"last": ticker.last,
"timestamp": ticker.timestamp
}
EventTicker(**kwargs).publish()
async def on_order_update_callback(self, order: Order): ...
async def on_fill_update_callback(self, fill: Fill): ...
async def on_position_update_callback(self, position: Position): ...
async def on_asset_update_callback(self, asset: Asset): ...
|
sfepy/terms/terms_th.py | clazaro/sfepy | 510 | 12600682 | <reponame>clazaro/sfepy<gh_stars>100-1000
import numpy as nm
from sfepy.base.base import Struct
from sfepy.terms.terms import Term
class THTerm(Term):
"""
Base class for terms depending on time history (fading memory
terms).
"""
def eval_real(self, shape, fargs, mode='eval', term_mode=None,
diff_var=None, **kwargs):
if diff_var is None:
if mode == 'eval':
out = 0.0
else:
out = nm.zeros(shape, dtype=nm.float64)
iter_kernel = fargs
for ii, fargs in iter_kernel():
out1, status = Term.eval_real(self, shape, fargs, mode=mode,
term_mode=term_mode,
diff_var=diff_var, **kwargs)
out += out1
else:
out, status = Term.eval_real(self, shape, fargs, mode=mode,
term_mode=term_mode,
diff_var=diff_var, **kwargs)
return out, status
class ETHTerm(Term):
"""
Base class for terms depending on time history with exponential
convolution kernel (fading memory terms).
"""
def get_eth_data(self, key, state, decay, values):
step_cache = state.evaluate_cache.setdefault('eth', {})
cache = step_cache.setdefault(None, {})
data_key = key + (self.arg_derivatives[state.name],)
if data_key in cache:
out = cache[data_key]
out.values = values
else:
out = Struct(history=nm.zeros_like(values),
values=values,
decay=decay,
__advance__=self.advance_eth_data)
cache[data_key] = out
return out
def advance_eth_data(self, ts, data):
data.history[:] = data.decay * (data.history + data.values)
|
moya/context/dataindex.py | moyaproject/moya | 129 | 12600744 | <filename>moya/context/dataindex.py
from __future__ import unicode_literals
from __future__ import print_function
from ..compat import implements_to_string, text_type, string_types, implements_bool
from operator import truth
@implements_bool
@implements_to_string
class ParseResult(object):
"""An immutable list like object that stores the results of a parsed index"""
__slots__ = ["tokens", "from_root", "index"]
def __init__(self, tokens, from_root):
self.tokens = tokens
self.from_root = from_root
self.index = None
def __str__(self):
if self.index is None:
if self.from_root:
self.index = "." + build(self.tokens)
else:
self.index = build(self.tokens)
return self.index
def __repr__(self):
return "ParseResult(%r)" % (text_type(self))
@property
def top_tail(self):
return self.tokens[0], self.tokens[1:]
def __moyarepr__(self, context):
return text_type(self)
def get(self, index, default=None):
return self.tokens.get(index, default)
def as_list(self):
return self.tokens[:]
def __iter__(self):
return iter(self.tokens)
def __len__(self):
return len(self.tokens)
def __getitem__(self, i):
return self.tokens[i]
def __eq__(self, other):
return self.tokens == other
def __ne__(self, other):
return self.tokens != other
def __bool__(self):
return truth(self.tokens)
def parse(s, parse_cache={}):
"""Parse a string containing a dotted notation data index in to a list of indices"""
if isinstance(s, ParseResult):
return s
cached_result = parse_cache.get(s, None)
if cached_result is not None:
return cached_result
from_root = s.startswith(".")
iter_chars = iter(s)
tokens = [] # Token accumulator
token = [] # Current token
append_token = tokens.append
append_char = token.append
def pop():
c = next(iter_chars, None)
if c is None:
return None, None
if c == "\\":
c = next(iter_chars, None)
if c is None:
return None, None
return True, c
if c == ".":
return True, None
else:
return False, c
def pop2():
c = next(iter_chars, None)
if c is None:
return None, None
return False, c
def asint(s):
return int(s) if s.isdigit() else s
join = "".join
while 1:
literal, c = pop()
if literal is None:
break
if c is None:
continue
if not literal and c == '"':
while 1:
literal, c = pop2()
if c is None:
break
elif not literal and c == '"':
append_token(join(token))
del token[:]
break
else:
append_char(c)
else:
append_char(c)
while 1:
literal, c = pop()
if c is None:
append_token(asint(join(token)))
del token[:]
break
else:
append_char(c)
if token:
append_token(asint(join(token)))
tokens = ParseResult(tokens, from_root)
parse_cache[s] = tokens
return tokens
def build(indices, absolute=False):
"""Combines a sequence of indices in to a data index string"""
if isinstance(indices, string_types):
return indices
def escape(s):
if isinstance(s, string_types):
if " " in s or "." in s:
s = '"%s"' % s.replace('"', '\\"')
return s
else:
return text_type(s)
if absolute:
return "." + ".".join(escape(s) for s in indices)
else:
return ".".join(escape(s) for s in indices)
def is_from_root(indices):
"""Test a string index is from the root"""
# Mainly here for self documentation purposes
if hasattr(indices, "from_root"):
return indices.from_root
return indices.startswith(".")
def normalise(s):
"""Normalizes a data index"""
return build(parse(s))
normalize = normalise # For Americans
def iter_index(index):
index_accumulator = []
push = index_accumulator.append
join = ".".join
for name in parse(index):
push(name)
yield name, join(text_type(s) for s in index_accumulator)
def join(*indices):
"""Joins 2 or more inidices in to one"""
absolute = False
joined = []
append = joined.append
for index in indices:
if isinstance(index, string_types):
if index.startswith("."):
absolute = True
del joined[:]
append(parse(index[1:]))
else:
append(parse(index))
else:
if getattr(index, "from_root", False):
absolute = True
del joined[:]
append(index)
new_indices = []
for index in joined:
new_indices.extend(index)
return build(new_indices, absolute)
indexjoin = join
def makeindex(*subindices):
"""Make an index from sub indexes"""
return ".".join(text_type(i) for i in subindices)
def join_parsed(*indices):
absolute = False
joined = []
append = joined.append
for index in indices:
if isinstance(index, string_types):
if index.startswith("."):
absolute = True
del joined[:]
append(parse(index[1:]))
else:
append(parse(index))
else:
if getattr(index, "from_root", False):
absolute = True
del joined[:]
append(index)
new_indices = []
for index in joined:
new_indices.extend(index)
return ParseResult(new_indices, absolute)
def make_absolute(index):
"""Make an index absolute (preceded by a '.')"""
if not isinstance(index, string_types):
index = build(index)
return "." + text_type(index).lstrip(".")
if __name__ == "__main__":
test = 'foo.1.2."3"."sdsd.sdsd".1:2.1:.2:.file\.txt.3'
print(test)
print(parse(test))
print(normalize(test))
print(parse(normalize(test)))
print(join("call", "param1", ("a", "b", "c")))
print(join(["callstack", 1], "foo"))
|
1000-1100q/1037.py | rampup01/Leetcode | 990 | 12600759 | <reponame>rampup01/Leetcode
'''
A boomerang is a set of 3 points that are all distinct and not in a straight line.
Given a list of three points in the plane, return whether these points are a boomerang.
Example 1:
Input: [[1,1],[2,3],[3,2]]
Output: true
Example 2:
Input: [[1,1],[2,2],[3,3]]
Output: false
Note:
points.length == 3
points[i].length == 2
0 <= points[i][j] <= 100
'''
class Solution(object):
def isBoomerang(self, points):
"""
:type points: List[List[int]]
:rtype: bool
"""
x1, x2, x3, y1, y2, y3 = points[0][0], points[1][0], points[2][0], points[0][1], points[1][1] ,points[2][1]
if ((y3 - y2)*(x2 - x1) == (y2 - y1)*(x3 - x2)):
return False
return True
|
frameworks/Python/pyramid/setup.py | efectn/FrameworkBenchmarks | 5,300 | 12600801 | from setuptools import setup, find_packages
requires = [
"pyramid",
"pyramid_chameleon",
"sqlalchemy[postgresql]",
"gunicorn",
"orjson",
]
tests_require = ["webtest"]
setup(
name="frameworkbenchmarks",
version="0.0",
description="FrameworkBenchmarks",
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author="",
author_email="",
url="",
keywords="web pyramid pylons",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
tests_require=tests_require,
test_suite="frameworkbenchmarks",
entry_points="""\
[paste.app_factory]
main = frameworkbenchmarks:main
""",
)
|
benchmarks/distributed/rpc/parameter_server/data/__init__.py | Hacky-DH/pytorch | 60,067 | 12600806 | from .DummyData import DummyData
data_map = {
"DummyData": DummyData
}
|
src/c3nav/mapdata/migrations/0033_auto_20170807_1423.py | johnjohndoe/c3nav | 132 | 12600891 | <reponame>johnjohndoe/c3nav<filename>src/c3nav/mapdata/migrations/0033_auto_20170807_1423.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-07 12:23
from __future__ import unicode_literals
import c3nav.mapdata.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mapdata', '0032_remove_graphnode_space_transfer'),
]
operations = [
migrations.AlterField(
model_name='altitudearea',
name='geometry',
field=c3nav.mapdata.fields.GeometryField(default=None, geomtype='multipolygon'),
),
]
|
tests/test_long_tablename.py | Steff94190/sqlalchemy-redshift | 177 | 12600897 | from rs_sqla_test_utils import models
def test_long_tablename(redshift_session):
session = redshift_session
examples = [models.LongTablename(metric=i) for i in range(5)]
session.add_all(examples)
rows = session.query(models.LongTablename.metric)
assert set(row.metric for row in rows) == set([0, 1, 2, 3, 4])
|
pygithub3/services/repos/hooks.py | teamorchard/python-github3 | 107 | 12600921 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from . import Service
class Hooks(Service):
""" Consume `Hooks API
<http://developer.github.com/v3/repos/hooks>`_
.. warning::
You must be authenticated and have repository's admin-permission
"""
def list(self, user=None, repo=None):
""" Get repository's hooks
:param str user: Username
:param str repo: Repository
:returns: A :doc:`result`
.. note::
Remember :ref:`config precedence`
"""
request = self.make_request('repos.hooks.list', user=user, repo=repo)
return self._get_result(request)
def get(self, hook_id, user=None, repo=None):
""" Get a single hook
:param int hook_id: Hook id
:param str user: Username
:param str repo: Repository
.. note::
Remember :ref:`config precedence`
"""
request = self.make_request('repos.hooks.get',
id=hook_id, user=user, repo=repo)
return self._get(request)
def create(self, data, user=None, repo=None):
""" Create a hook
:param dict data: Input. See `github hooks doc`_
:param str user: Username
:param str repo: Repository
.. note::
Remember :ref:`config precedence`
::
data = {
"name": "acunote",
"active": True,
"config": {
'token': 'AAA...',
},
"events": ['push', 'issues'],
}
hooks_service.create(data, user='octocat', repo='oct_repo')
"""
request = self.make_request('repos.hooks.create',
user=user, repo=repo, body=data)
return self._post(request)
def update(self, hook_id, data, user=None, repo=None):
""" Update a single hook
:param int hook_id: Hook id
:param dict data: Input. See `github hooks doc`_
:param str user: Username
:param str repo: Repository
.. note::
Remember :ref:`config precedence`
::
hooks_service.update(42, dict(active=False), user='octocat',
repo='oct_repo')
"""
request = self.make_request('repos.hooks.update',
id=hook_id, user=user, repo=repo, body=data)
return self._patch(request)
def test(self, hook_id, user=None, repo=None):
""" Test a hook
:param str user: Username
:param str repo: Repository
.. note::
Remember :ref:`config precedence`
This will trigger the hook with the latest push to the current
repository.
"""
request = self.make_request('repos.hooks.test',
id=hook_id, user=user, repo=repo)
self._request('post', request)
def delete(self, hook_id, user=None, repo=None):
""" Delete a single hook
:param int hook_id: Hook id
:param str user: Username
:param str repo: Repository
.. note::
Remember :ref:`config precedence`
"""
request = self.make_request('repos.hooks.delete',
id=hook_id, user=user, repo=repo)
self._delete(request)
|
codigo/Live76/fake_exemplo.py | cassiasamp/live-de-python | 572 | 12600922 | <filename>codigo/Live76/fake_exemplo.py
"""Exemplo do dublê Fake."""
class Pedido:
def __init__(self, valor, frete, usuario):
self.valor = valor
self.frete = frete
self.usuario = usuario
@property
def resumo(self):
"""Informações gerais sobre o pedido."""
return f'''
Pedido por: {self.usuario.nome_completo}
Valor: {self.valor}
Frete: {self.frete}
'''
class FakePessoa:
@property
def nome_completo(self):
return '<NAME>'
Pedido(100.00, 13.00, FakePessoa()).resumo
|
boto3_type_annotations_with_docs/boto3_type_annotations/eks/client.py | cowboygneox/boto3_type_annotations | 119 | 12600949 | <reponame>cowboygneox/boto3_type_annotations<filename>boto3_type_annotations_with_docs/boto3_type_annotations/eks/client.py
from typing import Optional
from botocore.client import BaseClient
from botocore.waiter import Waiter
from typing import Union
from typing import Dict
from botocore.paginate import Paginator
class Client(BaseClient):
def can_paginate(self, operation_name: str = None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:return: ``True`` if the operation can be paginated,
``False`` otherwise.
"""
pass
def create_cluster(self, name: str, roleArn: str, resourcesVpcConfig: Dict, version: str = None, logging: Dict = None, clientRequestToken: str = None) -> Dict:
"""
Creates an Amazon EKS control plane.
The Amazon EKS control plane consists of control plane instances that run the Kubernetes software, like ``etcd`` and the API server. The control plane runs in an account managed by AWS, and the Kubernetes API is exposed via the Amazon EKS API server endpoint. Each Amazon EKS cluster control plane is single-tenant and unique, and runs on its own set of Amazon EC2 instances.
The cluster control plane is provisioned across multiple Availability Zones and fronted by an Elastic Load Balancing Network Load Balancer. Amazon EKS also provisions elastic network interfaces in your VPC subnets to provide connectivity from the control plane instances to the worker nodes (for example, to support ``kubectl exec`` , ``logs`` , and ``proxy`` data flows).
Amazon EKS worker nodes run in your AWS account and connect to your cluster's control plane via the Kubernetes API server endpoint and a certificate file that is created for your cluster.
You can use the ``endpointPublicAccess`` and ``endpointPrivateAccess`` parameters to enable or disable public and private access to your cluster's Kubernetes API server endpoint. By default, public access is enabled and private access is disabled. For more information, see `Amazon EKS Cluster Endpoint Access Control <https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html>`__ in the * *Amazon EKS User Guide* * .
You can use the ``logging`` parameter to enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs are not exported to CloudWatch Logs. For more information, see `Amazon EKS Cluster Control Plane Logs <https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html>`__ in the * *Amazon EKS User Guide* * .
.. note::
CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see `Amazon CloudWatch Pricing <http://aws.amazon.com/cloudwatch/pricing/>`__ .
Cluster creation typically takes between 10 and 15 minutes. After you create an Amazon EKS cluster, you must configure your Kubernetes tooling to communicate with the API server and launch worker nodes into your cluster. For more information, see `Managing Cluster Authentication <https://docs.aws.amazon.com/eks/latest/userguide/managing-auth.html>`__ and `Launching Amazon EKS Worker Nodes <https://docs.aws.amazon.com/eks/latest/userguide/launch-workers.html>`__ in the *Amazon EKS User Guide* .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/eks-2017-11-01/CreateCluster>`_
**Request Syntax**
::
response = client.create_cluster(
name='string',
version='string',
roleArn='string',
resourcesVpcConfig={
'subnetIds': [
'string',
],
'securityGroupIds': [
'string',
],
'endpointPublicAccess': True|False,
'endpointPrivateAccess': True|False
},
logging={
'clusterLogging': [
{
'types': [
'api'|'audit'|'authenticator'|'controllerManager'|'scheduler',
],
'enabled': True|False
},
]
},
clientRequestToken='string'
)
**Response Syntax**
::
{
'cluster': {
'name': 'string',
'arn': 'string',
'createdAt': datetime(2015, 1, 1),
'version': 'string',
'endpoint': 'string',
'roleArn': 'string',
'resourcesVpcConfig': {
'subnetIds': [
'string',
],
'securityGroupIds': [
'string',
],
'vpcId': 'string',
'endpointPublicAccess': True|False,
'endpointPrivateAccess': True|False
},
'logging': {
'clusterLogging': [
{
'types': [
'api'|'audit'|'authenticator'|'controllerManager'|'scheduler',
],
'enabled': True|False
},
]
},
'status': 'CREATING'|'ACTIVE'|'DELETING'|'FAILED',
'certificateAuthority': {
'data': 'string'
},
'clientRequestToken': 'string',
'platformVersion': 'string'
}
}
**Response Structure**
- *(dict) --*
- **cluster** *(dict) --*
The full description of your new cluster.
- **name** *(string) --*
The name of the cluster.
- **arn** *(string) --*
The Amazon Resource Name (ARN) of the cluster.
- **createdAt** *(datetime) --*
The Unix epoch timestamp in seconds for when the cluster was created.
- **version** *(string) --*
The Kubernetes server version for the cluster.
- **endpoint** *(string) --*
The endpoint for your Kubernetes API server.
- **roleArn** *(string) --*
The Amazon Resource Name (ARN) of the IAM role that provides permissions for the Kubernetes control plane to make calls to AWS API operations on your behalf.
- **resourcesVpcConfig** *(dict) --*
The VPC configuration used by the cluster control plane. Amazon EKS VPC resources have specific requirements to work properly with Kubernetes. For more information, see `Cluster VPC Considerations <https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html>`__ and `Cluster Security Group Considerations <https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html>`__ in the *Amazon EKS User Guide* .
- **subnetIds** *(list) --*
The subnets associated with your cluster.
- *(string) --*
- **securityGroupIds** *(list) --*
The security groups associated with the cross-account elastic network interfaces that are used to allow communication between your worker nodes and the Kubernetes control plane.
- *(string) --*
- **vpcId** *(string) --*
The VPC associated with your cluster.
- **endpointPublicAccess** *(boolean) --*
This parameter indicates whether the Amazon EKS public API server endpoint is enabled. If the Amazon EKS public API server endpoint is disabled, your cluster's Kubernetes API server can only receive requests that originate from within the cluster VPC.
- **endpointPrivateAccess** *(boolean) --*
This parameter indicates whether the Amazon EKS private API server endpoint is enabled. If the Amazon EKS private API server endpoint is enabled, Kubernetes API requests that originate from within your cluster's VPC will use the private VPC endpoint instead of traversing the internet.
- **logging** *(dict) --*
The logging configuration for your cluster.
- **clusterLogging** *(list) --*
The cluster control plane logging configuration for your cluster.
- *(dict) --*
An object representing the enabled or disabled Kubernetes control plane logs for your cluster.
- **types** *(list) --*
The available cluster control plane log types.
- *(string) --*
- **enabled** *(boolean) --*
If a log type is enabled, then that log type exports its control plane logs to CloudWatch Logs. If a log type is not enabled, then that log type does not export its control plane logs. Each individual log type can be enabled or disabled independently.
- **status** *(string) --*
The current status of the cluster.
- **certificateAuthority** *(dict) --*
The ``certificate-authority-data`` for your cluster.
- **data** *(string) --*
The base64 encoded certificate data required to communicate with your cluster. Add this to the ``certificate-authority-data`` section of the ``kubeconfig`` file for your cluster.
- **clientRequestToken** *(string) --*
Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.
- **platformVersion** *(string) --*
The platform version of your Amazon EKS cluster. For more information, see `Platform Versions <https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html>`__ in the * *Amazon EKS User Guide* * .
:type name: string
:param name: **[REQUIRED]**
The unique name to give to your cluster.
:type version: string
:param version:
The desired Kubernetes version for your cluster. If you do not specify a value here, the latest version available in Amazon EKS is used.
:type roleArn: string
:param roleArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the IAM role that provides permissions for Amazon EKS to make calls to other AWS API operations on your behalf. For more information, see `Amazon EKS Service IAM Role <https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html>`__ in the * *Amazon EKS User Guide* * .
:type resourcesVpcConfig: dict
:param resourcesVpcConfig: **[REQUIRED]**
The VPC configuration used by the cluster control plane. Amazon EKS VPC resources have specific requirements to work properly with Kubernetes. For more information, see `Cluster VPC Considerations <https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html>`__ and `Cluster Security Group Considerations <https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html>`__ in the *Amazon EKS User Guide* . You must specify at least two subnets. You may specify up to five security groups, but we recommend that you use a dedicated security group for your cluster control plane.
- **subnetIds** *(list) --*
Specify subnets for your Amazon EKS worker nodes. Amazon EKS creates cross-account elastic network interfaces in these subnets to allow communication between your worker nodes and the Kubernetes control plane.
- *(string) --*
- **securityGroupIds** *(list) --*
Specify one or more security groups for the cross-account elastic network interfaces that Amazon EKS creates to use to allow communication between your worker nodes and the Kubernetes control plane. If you do not specify a security group, the default security group for your VPC is used.
- *(string) --*
- **endpointPublicAccess** *(boolean) --*
Set this value to ``false`` to disable public access for your cluster\'s Kubernetes API server endpoint. If you disable public access, your cluster\'s Kubernetes API server can only receive requests from within the cluster VPC. The default value for this parameter is ``true`` , which enables public access for your Kubernetes API server. For more information, see `Amazon EKS Cluster Endpoint Access Control <https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html>`__ in the * *Amazon EKS User Guide* * .
- **endpointPrivateAccess** *(boolean) --*
Set this value to ``true`` to enable private access for your cluster\'s Kubernetes API server endpoint. If you enable private access, Kubernetes API requests from within your cluster\'s VPC will use the private VPC endpoint. The default value for this parameter is ``false`` , which disables private access for your Kubernetes API server. For more information, see `Amazon EKS Cluster Endpoint Access Control <https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html>`__ in the * *Amazon EKS User Guide* * .
:type logging: dict
:param logging:
Enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs are not exported to CloudWatch Logs. For more information, see `Amazon EKS Cluster Control Plane Logs <https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html>`__ in the * *Amazon EKS User Guide* * .
.. note::
CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see `Amazon CloudWatch Pricing <http://aws.amazon.com/cloudwatch/pricing/>`__ .
- **clusterLogging** *(list) --*
The cluster control plane logging configuration for your cluster.
- *(dict) --*
An object representing the enabled or disabled Kubernetes control plane logs for your cluster.
- **types** *(list) --*
The available cluster control plane log types.
- *(string) --*
- **enabled** *(boolean) --*
If a log type is enabled, then that log type exports its control plane logs to CloudWatch Logs. If a log type is not enabled, then that log type does not export its control plane logs. Each individual log type can be enabled or disabled independently.
:type clientRequestToken: string
:param clientRequestToken:
Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.
This field is autopopulated if not provided.
:rtype: dict
:returns:
"""
pass
def delete_cluster(self, name: str) -> Dict:
"""
Deletes the Amazon EKS cluster control plane.
.. note::
If you have active services in your cluster that are associated with a load balancer, you must delete those services before deleting the cluster so that the load balancers are deleted properly. Otherwise, you can have orphaned resources in your VPC that prevent you from being able to delete the VPC. For more information, see `Deleting a Cluster <https://docs.aws.amazon.com/eks/latest/userguide/delete-cluster.html>`__ in the *Amazon EKS User Guide* .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/eks-2017-11-01/DeleteCluster>`_
**Request Syntax**
::
response = client.delete_cluster(
name='string'
)
**Response Syntax**
::
{
'cluster': {
'name': 'string',
'arn': 'string',
'createdAt': datetime(2015, 1, 1),
'version': 'string',
'endpoint': 'string',
'roleArn': 'string',
'resourcesVpcConfig': {
'subnetIds': [
'string',
],
'securityGroupIds': [
'string',
],
'vpcId': 'string',
'endpointPublicAccess': True|False,
'endpointPrivateAccess': True|False
},
'logging': {
'clusterLogging': [
{
'types': [
'api'|'audit'|'authenticator'|'controllerManager'|'scheduler',
],
'enabled': True|False
},
]
},
'status': 'CREATING'|'ACTIVE'|'DELETING'|'FAILED',
'certificateAuthority': {
'data': 'string'
},
'clientRequestToken': 'string',
'platformVersion': 'string'
}
}
**Response Structure**
- *(dict) --*
- **cluster** *(dict) --*
The full description of the cluster to delete.
- **name** *(string) --*
The name of the cluster.
- **arn** *(string) --*
The Amazon Resource Name (ARN) of the cluster.
- **createdAt** *(datetime) --*
The Unix epoch timestamp in seconds for when the cluster was created.
- **version** *(string) --*
The Kubernetes server version for the cluster.
- **endpoint** *(string) --*
The endpoint for your Kubernetes API server.
- **roleArn** *(string) --*
The Amazon Resource Name (ARN) of the IAM role that provides permissions for the Kubernetes control plane to make calls to AWS API operations on your behalf.
- **resourcesVpcConfig** *(dict) --*
The VPC configuration used by the cluster control plane. Amazon EKS VPC resources have specific requirements to work properly with Kubernetes. For more information, see `Cluster VPC Considerations <https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html>`__ and `Cluster Security Group Considerations <https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html>`__ in the *Amazon EKS User Guide* .
- **subnetIds** *(list) --*
The subnets associated with your cluster.
- *(string) --*
- **securityGroupIds** *(list) --*
The security groups associated with the cross-account elastic network interfaces that are used to allow communication between your worker nodes and the Kubernetes control plane.
- *(string) --*
- **vpcId** *(string) --*
The VPC associated with your cluster.
- **endpointPublicAccess** *(boolean) --*
This parameter indicates whether the Amazon EKS public API server endpoint is enabled. If the Amazon EKS public API server endpoint is disabled, your cluster's Kubernetes API server can only receive requests that originate from within the cluster VPC.
- **endpointPrivateAccess** *(boolean) --*
This parameter indicates whether the Amazon EKS private API server endpoint is enabled. If the Amazon EKS private API server endpoint is enabled, Kubernetes API requests that originate from within your cluster's VPC will use the private VPC endpoint instead of traversing the internet.
- **logging** *(dict) --*
The logging configuration for your cluster.
- **clusterLogging** *(list) --*
The cluster control plane logging configuration for your cluster.
- *(dict) --*
An object representing the enabled or disabled Kubernetes control plane logs for your cluster.
- **types** *(list) --*
The available cluster control plane log types.
- *(string) --*
- **enabled** *(boolean) --*
If a log type is enabled, then that log type exports its control plane logs to CloudWatch Logs. If a log type is not enabled, then that log type does not export its control plane logs. Each individual log type can be enabled or disabled independently.
- **status** *(string) --*
The current status of the cluster.
- **certificateAuthority** *(dict) --*
The ``certificate-authority-data`` for your cluster.
- **data** *(string) --*
The base64 encoded certificate data required to communicate with your cluster. Add this to the ``certificate-authority-data`` section of the ``kubeconfig`` file for your cluster.
- **clientRequestToken** *(string) --*
Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.
- **platformVersion** *(string) --*
The platform version of your Amazon EKS cluster. For more information, see `Platform Versions <https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html>`__ in the * *Amazon EKS User Guide* * .
:type name: string
:param name: **[REQUIRED]**
The name of the cluster to delete.
:rtype: dict
:returns:
"""
pass
def describe_cluster(self, name: str) -> Dict:
"""
Returns descriptive information about an Amazon EKS cluster.
The API server endpoint and certificate authority data returned by this operation are required for ``kubelet`` and ``kubectl`` to communicate with your Kubernetes API server. For more information, see `Create a kubeconfig for Amazon EKS <https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html>`__ .
.. note::
The API server endpoint and certificate authority data are not available until the cluster reaches the ``ACTIVE`` state.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/eks-2017-11-01/DescribeCluster>`_
**Request Syntax**
::
response = client.describe_cluster(
name='string'
)
**Response Syntax**
::
{
'cluster': {
'name': 'string',
'arn': 'string',
'createdAt': datetime(2015, 1, 1),
'version': 'string',
'endpoint': 'string',
'roleArn': 'string',
'resourcesVpcConfig': {
'subnetIds': [
'string',
],
'securityGroupIds': [
'string',
],
'vpcId': 'string',
'endpointPublicAccess': True|False,
'endpointPrivateAccess': True|False
},
'logging': {
'clusterLogging': [
{
'types': [
'api'|'audit'|'authenticator'|'controllerManager'|'scheduler',
],
'enabled': True|False
},
]
},
'status': 'CREATING'|'ACTIVE'|'DELETING'|'FAILED',
'certificateAuthority': {
'data': 'string'
},
'clientRequestToken': 'string',
'platformVersion': 'string'
}
}
**Response Structure**
- *(dict) --*
- **cluster** *(dict) --*
The full description of your specified cluster.
- **name** *(string) --*
The name of the cluster.
- **arn** *(string) --*
The Amazon Resource Name (ARN) of the cluster.
- **createdAt** *(datetime) --*
The Unix epoch timestamp in seconds for when the cluster was created.
- **version** *(string) --*
The Kubernetes server version for the cluster.
- **endpoint** *(string) --*
The endpoint for your Kubernetes API server.
- **roleArn** *(string) --*
The Amazon Resource Name (ARN) of the IAM role that provides permissions for the Kubernetes control plane to make calls to AWS API operations on your behalf.
- **resourcesVpcConfig** *(dict) --*
The VPC configuration used by the cluster control plane. Amazon EKS VPC resources have specific requirements to work properly with Kubernetes. For more information, see `Cluster VPC Considerations <https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html>`__ and `Cluster Security Group Considerations <https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html>`__ in the *Amazon EKS User Guide* .
- **subnetIds** *(list) --*
The subnets associated with your cluster.
- *(string) --*
- **securityGroupIds** *(list) --*
The security groups associated with the cross-account elastic network interfaces that are used to allow communication between your worker nodes and the Kubernetes control plane.
- *(string) --*
- **vpcId** *(string) --*
The VPC associated with your cluster.
- **endpointPublicAccess** *(boolean) --*
This parameter indicates whether the Amazon EKS public API server endpoint is enabled. If the Amazon EKS public API server endpoint is disabled, your cluster's Kubernetes API server can only receive requests that originate from within the cluster VPC.
- **endpointPrivateAccess** *(boolean) --*
This parameter indicates whether the Amazon EKS private API server endpoint is enabled. If the Amazon EKS private API server endpoint is enabled, Kubernetes API requests that originate from within your cluster's VPC will use the private VPC endpoint instead of traversing the internet.
- **logging** *(dict) --*
The logging configuration for your cluster.
- **clusterLogging** *(list) --*
The cluster control plane logging configuration for your cluster.
- *(dict) --*
An object representing the enabled or disabled Kubernetes control plane logs for your cluster.
- **types** *(list) --*
The available cluster control plane log types.
- *(string) --*
- **enabled** *(boolean) --*
If a log type is enabled, then that log type exports its control plane logs to CloudWatch Logs. If a log type is not enabled, then that log type does not export its control plane logs. Each individual log type can be enabled or disabled independently.
- **status** *(string) --*
The current status of the cluster.
- **certificateAuthority** *(dict) --*
The ``certificate-authority-data`` for your cluster.
- **data** *(string) --*
The base64 encoded certificate data required to communicate with your cluster. Add this to the ``certificate-authority-data`` section of the ``kubeconfig`` file for your cluster.
- **clientRequestToken** *(string) --*
Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.
- **platformVersion** *(string) --*
The platform version of your Amazon EKS cluster. For more information, see `Platform Versions <https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html>`__ in the * *Amazon EKS User Guide* * .
:type name: string
:param name: **[REQUIRED]**
The name of the cluster to describe.
:rtype: dict
:returns:
"""
pass
def describe_update(self, name: str, updateId: str) -> Dict:
"""
Returns descriptive information about an update against your Amazon EKS cluster.
When the status of the update is ``Succeeded`` , the update is complete. If an update fails, the status is ``Failed`` , and an error detail explains the reason for the failure.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/eks-2017-11-01/DescribeUpdate>`_
**Request Syntax**
::
response = client.describe_update(
name='string',
updateId='string'
)
**Response Syntax**
::
{
'update': {
'id': 'string',
'status': 'InProgress'|'Failed'|'Cancelled'|'Successful',
'type': 'VersionUpdate'|'EndpointAccessUpdate'|'LoggingUpdate',
'params': [
{
'type': 'Version'|'PlatformVersion'|'EndpointPrivateAccess'|'EndpointPublicAccess'|'ClusterLogging',
'value': 'string'
},
],
'createdAt': datetime(2015, 1, 1),
'errors': [
{
'errorCode': 'SubnetNotFound'|'SecurityGroupNotFound'|'EniLimitReached'|'IpNotAvailable'|'AccessDenied'|'OperationNotPermitted'|'VpcIdNotFound'|'Unknown',
'errorMessage': 'string',
'resourceIds': [
'string',
]
},
]
}
}
**Response Structure**
- *(dict) --*
- **update** *(dict) --*
The full description of the specified update.
- **id** *(string) --*
A UUID that is used to track the update.
- **status** *(string) --*
The current status of the update.
- **type** *(string) --*
The type of the update.
- **params** *(list) --*
A key-value map that contains the parameters associated with the update.
- *(dict) --*
An object representing the details of an update request.
- **type** *(string) --*
The keys associated with an update request.
- **value** *(string) --*
The value of the keys submitted as part of an update request.
- **createdAt** *(datetime) --*
The Unix epoch timestamp in seconds for when the update was created.
- **errors** *(list) --*
Any errors associated with a ``Failed`` update.
- *(dict) --*
An object representing an error when an asynchronous operation fails.
- **errorCode** *(string) --*
A brief description of the error.
* **SubnetNotFound** : One of the subnets associated with the cluster could not be found.
* **SecurityGroupNotFound** : One of the security groups associated with the cluster could not be found.
* **EniLimitReached** : You have reached the elastic network interface limit for your account.
* **IpNotAvailable** : A subnet associated with the cluster does not have any free IP addresses.
* **AccessDenied** : You do not have permissions to perform the specified operation.
* **OperationNotPermitted** : The service role associated with the cluster does not have the required access permissions for Amazon EKS.
* **VpcIdNotFound** : The VPC associated with the cluster could not be found.
- **errorMessage** *(string) --*
A more complete description of the error.
- **resourceIds** *(list) --*
An optional field that contains the resource IDs associated with the error.
- *(string) --*
:type name: string
:param name: **[REQUIRED]**
The name of the Amazon EKS cluster to update.
:type updateId: string
:param updateId: **[REQUIRED]**
The ID of the update to describe.
:rtype: dict
:returns:
"""
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
``ClientMethod``.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method\'s model.
:returns: The presigned url
"""
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
"""
pass
def list_clusters(self, maxResults: int = None, nextToken: str = None) -> Dict:
"""
Lists the Amazon EKS clusters in your AWS account in the specified Region.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/eks-2017-11-01/ListClusters>`_
**Request Syntax**
::
response = client.list_clusters(
maxResults=123,
nextToken='string'
)
**Response Syntax**
::
{
'clusters': [
'string',
],
'nextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **clusters** *(list) --*
A list of all of the clusters for your account in the specified Region.
- *(string) --*
- **nextToken** *(string) --*
The ``nextToken`` value to include in a future ``ListClusters`` request. When the results of a ``ListClusters`` request exceed ``maxResults`` , this value can be used to retrieve the next page of results. This value is ``null`` when there are no more results to return.
:type maxResults: integer
:param maxResults:
The maximum number of cluster results returned by ``ListClusters`` in paginated output. When this parameter is used, ``ListClusters`` only returns ``maxResults`` results in a single page along with a ``nextToken`` response element. The remaining results of the initial request can be seen by sending another ``ListClusters`` request with the returned ``nextToken`` value. This value can be between 1 and 100. If this parameter is not used, then ``ListClusters`` returns up to 100 results and a ``nextToken`` value if applicable.
:type nextToken: string
:param nextToken:
The ``nextToken`` value returned from a previous paginated ``ListClusters`` request where ``maxResults`` was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the ``nextToken`` value.
.. note::
This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.
:rtype: dict
:returns:
"""
pass
def list_updates(self, name: str, nextToken: str = None, maxResults: int = None) -> Dict:
"""
Lists the updates associated with an Amazon EKS cluster in your AWS account, in the specified Region.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/eks-2017-11-01/ListUpdates>`_
**Request Syntax**
::
response = client.list_updates(
name='string',
nextToken='string',
maxResults=123
)
**Response Syntax**
::
{
'updateIds': [
'string',
],
'nextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **updateIds** *(list) --*
A list of all the updates for the specified cluster and Region.
- *(string) --*
- **nextToken** *(string) --*
The ``nextToken`` value to include in a future ``ListUpdates`` request. When the results of a ``ListUpdates`` request exceed ``maxResults`` , this value can be used to retrieve the next page of results. This value is ``null`` when there are no more results to return.
:type name: string
:param name: **[REQUIRED]**
The name of the Amazon EKS cluster for which to list updates.
:type nextToken: string
:param nextToken:
The ``nextToken`` value returned from a previous paginated ``ListUpdates`` request where ``maxResults`` was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the ``nextToken`` value.
:type maxResults: integer
:param maxResults:
The maximum number of update results returned by ``ListUpdates`` in paginated output. When this parameter is used, ``ListUpdates`` only returns ``maxResults`` results in a single page along with a ``nextToken`` response element. The remaining results of the initial request can be seen by sending another ``ListUpdates`` request with the returned ``nextToken`` value. This value can be between 1 and 100. If this parameter is not used, then ``ListUpdates`` returns up to 100 results and a ``nextToken`` value if applicable.
:rtype: dict
:returns:
"""
pass
def update_cluster_config(self, name: str, resourcesVpcConfig: Dict = None, logging: Dict = None, clientRequestToken: str = None) -> Dict:
"""
Updates an Amazon EKS cluster configuration. Your cluster continues to function during the update. The response output includes an update ID that you can use to track the status of your cluster update with the DescribeUpdate API operation.
You can use this API operation to enable or disable public and private access to your cluster's Kubernetes API server endpoint. By default, public access is enabled and private access is disabled. For more information, see `Amazon EKS Cluster Endpoint Access Control <https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html>`__ in the * *Amazon EKS User Guide* * .
You can also use this API operation to enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs are not exported to CloudWatch Logs. For more information, see `Amazon EKS Cluster Control Plane Logs <https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html>`__ in the * *Amazon EKS User Guide* * .
.. note::
CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see `Amazon CloudWatch Pricing <http://aws.amazon.com/cloudwatch/pricing/>`__ .
Cluster updates are asynchronous, and they should finish within a few minutes. During an update, the cluster status moves to ``UPDATING`` (this status transition is eventually consistent). When the update is complete (either ``Failed`` or ``Successful`` ), the cluster status moves to ``Active`` .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/eks-2017-11-01/UpdateClusterConfig>`_
**Request Syntax**
::
response = client.update_cluster_config(
name='string',
resourcesVpcConfig={
'subnetIds': [
'string',
],
'securityGroupIds': [
'string',
],
'endpointPublicAccess': True|False,
'endpointPrivateAccess': True|False
},
logging={
'clusterLogging': [
{
'types': [
'api'|'audit'|'authenticator'|'controllerManager'|'scheduler',
],
'enabled': True|False
},
]
},
clientRequestToken='string'
)
**Response Syntax**
::
{
'update': {
'id': 'string',
'status': 'InProgress'|'Failed'|'Cancelled'|'Successful',
'type': 'VersionUpdate'|'EndpointAccessUpdate'|'LoggingUpdate',
'params': [
{
'type': 'Version'|'PlatformVersion'|'EndpointPrivateAccess'|'EndpointPublicAccess'|'ClusterLogging',
'value': 'string'
},
],
'createdAt': datetime(2015, 1, 1),
'errors': [
{
'errorCode': 'SubnetNotFound'|'SecurityGroupNotFound'|'EniLimitReached'|'IpNotAvailable'|'AccessDenied'|'OperationNotPermitted'|'VpcIdNotFound'|'Unknown',
'errorMessage': 'string',
'resourceIds': [
'string',
]
},
]
}
}
**Response Structure**
- *(dict) --*
- **update** *(dict) --*
An object representing an asynchronous update.
- **id** *(string) --*
A UUID that is used to track the update.
- **status** *(string) --*
The current status of the update.
- **type** *(string) --*
The type of the update.
- **params** *(list) --*
A key-value map that contains the parameters associated with the update.
- *(dict) --*
An object representing the details of an update request.
- **type** *(string) --*
The keys associated with an update request.
- **value** *(string) --*
The value of the keys submitted as part of an update request.
- **createdAt** *(datetime) --*
The Unix epoch timestamp in seconds for when the update was created.
- **errors** *(list) --*
Any errors associated with a ``Failed`` update.
- *(dict) --*
An object representing an error when an asynchronous operation fails.
- **errorCode** *(string) --*
A brief description of the error.
* **SubnetNotFound** : One of the subnets associated with the cluster could not be found.
* **SecurityGroupNotFound** : One of the security groups associated with the cluster could not be found.
* **EniLimitReached** : You have reached the elastic network interface limit for your account.
* **IpNotAvailable** : A subnet associated with the cluster does not have any free IP addresses.
* **AccessDenied** : You do not have permissions to perform the specified operation.
* **OperationNotPermitted** : The service role associated with the cluster does not have the required access permissions for Amazon EKS.
* **VpcIdNotFound** : The VPC associated with the cluster could not be found.
- **errorMessage** *(string) --*
A more complete description of the error.
- **resourceIds** *(list) --*
An optional field that contains the resource IDs associated with the error.
- *(string) --*
:type name: string
:param name: **[REQUIRED]**
The name of the Amazon EKS cluster to update.
:type resourcesVpcConfig: dict
:param resourcesVpcConfig:
An object representing the VPC configuration to use for an Amazon EKS cluster.
- **subnetIds** *(list) --*
Specify subnets for your Amazon EKS worker nodes. Amazon EKS creates cross-account elastic network interfaces in these subnets to allow communication between your worker nodes and the Kubernetes control plane.
- *(string) --*
- **securityGroupIds** *(list) --*
Specify one or more security groups for the cross-account elastic network interfaces that Amazon EKS creates to use to allow communication between your worker nodes and the Kubernetes control plane. If you do not specify a security group, the default security group for your VPC is used.
- *(string) --*
- **endpointPublicAccess** *(boolean) --*
Set this value to ``false`` to disable public access for your cluster\'s Kubernetes API server endpoint. If you disable public access, your cluster\'s Kubernetes API server can only receive requests from within the cluster VPC. The default value for this parameter is ``true`` , which enables public access for your Kubernetes API server. For more information, see `Amazon EKS Cluster Endpoint Access Control <https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html>`__ in the * *Amazon EKS User Guide* * .
- **endpointPrivateAccess** *(boolean) --*
Set this value to ``true`` to enable private access for your cluster\'s Kubernetes API server endpoint. If you enable private access, Kubernetes API requests from within your cluster\'s VPC will use the private VPC endpoint. The default value for this parameter is ``false`` , which disables private access for your Kubernetes API server. For more information, see `Amazon EKS Cluster Endpoint Access Control <https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html>`__ in the * *Amazon EKS User Guide* * .
:type logging: dict
:param logging:
Enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs are not exported to CloudWatch Logs. For more information, see `Amazon EKS Cluster Control Plane Logs <https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html>`__ in the * *Amazon EKS User Guide* * .
.. note::
CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see `Amazon CloudWatch Pricing <http://aws.amazon.com/cloudwatch/pricing/>`__ .
- **clusterLogging** *(list) --*
The cluster control plane logging configuration for your cluster.
- *(dict) --*
An object representing the enabled or disabled Kubernetes control plane logs for your cluster.
- **types** *(list) --*
The available cluster control plane log types.
- *(string) --*
- **enabled** *(boolean) --*
If a log type is enabled, then that log type exports its control plane logs to CloudWatch Logs. If a log type is not enabled, then that log type does not export its control plane logs. Each individual log type can be enabled or disabled independently.
:type clientRequestToken: string
:param clientRequestToken:
Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.
This field is autopopulated if not provided.
:rtype: dict
:returns:
"""
pass
def update_cluster_version(self, name: str, version: str, clientRequestToken: str = None) -> Dict:
"""
Updates an Amazon EKS cluster to the specified Kubernetes version. Your cluster continues to function during the update. The response output includes an update ID that you can use to track the status of your cluster update with the DescribeUpdate API operation.
Cluster updates are asynchronous, and they should finish within a few minutes. During an update, the cluster status moves to ``UPDATING`` (this status transition is eventually consistent). When the update is complete (either ``Failed`` or ``Successful`` ), the cluster status moves to ``Active`` .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/eks-2017-11-01/UpdateClusterVersion>`_
**Request Syntax**
::
response = client.update_cluster_version(
name='string',
version='string',
clientRequestToken='string'
)
**Response Syntax**
::
{
'update': {
'id': 'string',
'status': 'InProgress'|'Failed'|'Cancelled'|'Successful',
'type': 'VersionUpdate'|'EndpointAccessUpdate'|'LoggingUpdate',
'params': [
{
'type': 'Version'|'PlatformVersion'|'EndpointPrivateAccess'|'EndpointPublicAccess'|'ClusterLogging',
'value': 'string'
},
],
'createdAt': datetime(2015, 1, 1),
'errors': [
{
'errorCode': 'SubnetNotFound'|'SecurityGroupNotFound'|'EniLimitReached'|'IpNotAvailable'|'AccessDenied'|'OperationNotPermitted'|'VpcIdNotFound'|'Unknown',
'errorMessage': 'string',
'resourceIds': [
'string',
]
},
]
}
}
**Response Structure**
- *(dict) --*
- **update** *(dict) --*
The full description of the specified update
- **id** *(string) --*
A UUID that is used to track the update.
- **status** *(string) --*
The current status of the update.
- **type** *(string) --*
The type of the update.
- **params** *(list) --*
A key-value map that contains the parameters associated with the update.
- *(dict) --*
An object representing the details of an update request.
- **type** *(string) --*
The keys associated with an update request.
- **value** *(string) --*
The value of the keys submitted as part of an update request.
- **createdAt** *(datetime) --*
The Unix epoch timestamp in seconds for when the update was created.
- **errors** *(list) --*
Any errors associated with a ``Failed`` update.
- *(dict) --*
An object representing an error when an asynchronous operation fails.
- **errorCode** *(string) --*
A brief description of the error.
* **SubnetNotFound** : One of the subnets associated with the cluster could not be found.
* **SecurityGroupNotFound** : One of the security groups associated with the cluster could not be found.
* **EniLimitReached** : You have reached the elastic network interface limit for your account.
* **IpNotAvailable** : A subnet associated with the cluster does not have any free IP addresses.
* **AccessDenied** : You do not have permissions to perform the specified operation.
* **OperationNotPermitted** : The service role associated with the cluster does not have the required access permissions for Amazon EKS.
* **VpcIdNotFound** : The VPC associated with the cluster could not be found.
- **errorMessage** *(string) --*
A more complete description of the error.
- **resourceIds** *(list) --*
An optional field that contains the resource IDs associated with the error.
- *(string) --*
:type name: string
:param name: **[REQUIRED]**
The name of the Amazon EKS cluster to update.
:type version: string
:param version: **[REQUIRED]**
The desired Kubernetes version following a successful update.
:type clientRequestToken: string
:param clientRequestToken:
Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.
This field is autopopulated if not provided.
:rtype: dict
:returns:
"""
pass
|
lingua_franca/lang/common_data_de.py | NeonDaniel/lingua-franca | 191 | 12601009 | _DE_NUMBERS = {
'null': 0,
'ein': 1,
'eins': 1,
'eine': 1,
'einer': 1,
'einem': 1,
'einen': 1,
'eines': 1,
'zwei': 2,
'drei': 3,
'vier': 4,
'fünf': 5,
'sechs': 6,
'sieben': 7,
'acht': 8,
'neun': 9,
'zehn': 10,
'elf': 11,
'zwölf': 12,
'dreizehn': 13,
'vierzehn': 14,
'fünfzehn': 15,
'sechzehn': 16,
'siebzehn': 17,
'achtzehn': 18,
'neunzehn': 19,
'zwanzig': 20,
'einundzwanzig': 21,
'zweiundzwanzig': 22,
'dreiundzwanzig': 23,
'vierundzwanzig': 24,
'fünfundzwanzig': 25,
'sechsundzwanzig': 26,
'siebenundzwanzig': 27,
'achtundzwanzig': 28,
'neunundzwanzig': 29,
'dreißig': 30,
'einunddreißig': 31,
'vierzig': 40,
'fünfzig': 50,
'sechzig': 60,
'siebzig': 70,
'achtzig': 80,
'neunzig': 90,
'hundert': 100,
'zweihundert': 200,
'dreihundert': 300,
'vierhundert': 400,
'fünfhundert': 500,
'sechshundert': 600,
'siebenhundert': 700,
'achthundert': 800,
'neunhundert': 900,
'tausend': 1000,
'million': 1000000
}
_MONTHS_DE = ['januar', 'februar', 'märz', 'april', 'mai', 'juni',
'juli', 'august', 'september', 'oktober', 'november',
'dezember']
_NUM_STRING_DE = {
0: 'null',
1: 'ein', # ein Viertel etc., nicht eins Viertel
2: 'zwei',
3: 'drei',
4: 'vier',
5: 'fünf',
6: 'sechs',
7: 'sieben',
8: 'acht',
9: 'neun',
10: 'zehn',
11: 'elf',
12: 'zwölf',
13: 'dreizehn',
14: 'vierzehn',
15: 'fünfzehn',
16: 'sechzehn',
17: 'siebzehn',
18: 'achtzehn',
19: 'neunzehn',
20: 'zwanzig',
30: 'dreißig',
40: 'vierzig',
50: 'fünfzig',
60: 'sechzig',
70: 'siebzig',
80: 'achtzig',
90: 'neunzig',
100: 'hundert'
}
# German uses "long scale" https://en.wikipedia.org/wiki/Long_and_short_scales
# Currently, numbers are limited to 1000000000000000000000000,
# but _NUM_POWERS_OF_TEN can be extended to include additional number words
_NUM_POWERS_OF_TEN_DE = [
'', 'tausend', 'Million', 'Milliarde', 'Billion', 'Billiarde', 'Trillion',
'Trilliarde'
]
_FRACTION_STRING_DE = {
2: 'halb',
3: 'drittel',
4: 'viertel',
5: 'fünftel',
6: 'sechstel',
7: 'siebtel',
8: 'achtel',
9: 'neuntel',
10: 'zehntel',
11: 'elftel',
12: 'zwölftel',
13: 'dreizehntel',
14: 'vierzehntel',
15: 'fünfzehntel',
16: 'sechzehntel',
17: 'siebzehntel',
18: 'achtzehntel',
19: 'neunzehntel',
20: 'zwanzigstel'
}
# Numbers below 1 million are written in one word in German, yielding very
# long words
# In some circumstances it may better to seperate individual words
# Set _EXTRA_SPACE_DA=" " for separating numbers below 1 million (
# orthographically incorrect)
# Set _EXTRA_SPACE_DA="" for correct spelling, this is standard
# _EXTRA_SPACE_DA = " "
_EXTRA_SPACE_DE = ""
|
03-machine-learning-tabular-crossection/09 - Balanceamento/solutions/solution_09.py | abefukasawa/datascience_course | 331 | 12601010 | <reponame>abefukasawa/datascience_course<gh_stars>100-1000
plt.figure(figsize=(12,10)) # on this line I just set the size of figure to 12 by 10.
p=sns.heatmap(diabetes_data_copy.corr(), annot=True,cmap ='RdYlGn') |
dataset/psenet/check_dataloader.py | doem97/PSENet | 1,213 | 12601022 | from psenet_ctw import PSENET_CTW
import torch
import numpy as np
import cv2
import random
import os
torch.manual_seed(123456)
torch.cuda.manual_seed(123456)
np.random.seed(123456)
random.seed(123456)
def to_rgb(img):
img = img.reshape(img.shape[0], img.shape[1], 1)
img = np.concatenate((img, img, img), axis=2) * 255
return img
def save(img_path, imgs):
if not os.path.exists('vis/'):
os.makedirs('vis/')
for i in range(len(imgs)):
imgs[i] = cv2.copyMakeBorder(imgs[i], 3, 3, 3, 3, cv2.BORDER_CONSTANT, value=[255, 0, 0])
res = np.concatenate(imgs, axis=1)
if type(img_path) != str:
img_name = img_path[0].split('/')[-1]
else:
img_name = img_path.split('/')[-1]
print('saved %s.' % img_name)
cv2.imwrite('vis/' + img_name, res)
# data_loader = SynthLoader(split='train', is_transform=True, img_size=640, kernel_scale=0.5, short_size=640,
# for_rec=True)
# data_loader = IC15Loader(split='train', is_transform=True, img_size=736, kernel_scale=0.5, short_size=736,
# for_rec=True)
# data_loader = CombineLoader(split='train', is_transform=True, img_size=736, kernel_scale=0.5, short_size=736,
# for_rec=True)
# data_loader = TTLoader(split='train', is_transform=True, img_size=640, kernel_scale=0.8, short_size=640,
# for_rec=True, read_type='pil')
# data_loader = CombineAllLoader(split='train', is_transform=True, img_size=736, kernel_scale=0.5, short_size=736,
# for_rec=True)
data_loader = PSENET_CTW(split='test', is_transform=True, img_size=736)
# data_loader = MSRALoader(split='train', is_transform=True, img_size=736, kernel_scale=0.5, short_size=736,
# for_rec=True)
# data_loader = CTWv2Loader(split='train', is_transform=True, img_size=640, kernel_scale=0.7, short_size=640,
# for_rec=True)
# data_loader = IC15(split='train', is_transform=True, img_size=640,)
train_loader = torch.utils.data.DataLoader(
data_loader,
batch_size=1,
shuffle=False,
num_workers=0,
drop_last=True)
for batch_idx, imgs in enumerate(train_loader):
if batch_idx > 100:
break
# image_name = data_loader.img_paths[batch_idx].split('/')[-1].split('.')[0]
# print('%d/%d %s'%(batch_idx, len(train_loader), data_loader.img_paths[batch_idx]))
print('%d/%d' % (batch_idx, len(train_loader)))
img = imgs[0].numpy()
img = ((img * np.array([0.229, 0.224, 0.225]).reshape(3, 1, 1) +
np.array([0.485, 0.456, 0.406]).reshape(3, 1, 1)) * 255).astype(np.uint8)
img = np.transpose(img, (1, 2, 0))[:, :, ::-1].copy()
# gt_text = to_rgb(gt_texts[0].numpy())
# gt_kernel_0 = to_rgb(gt_kernels[0, 0].numpy())
# gt_kernel_1 = to_rgb(gt_kernels[0, 1].numpy())
# gt_kernel_2 = to_rgb(gt_kernels[0, 2].numpy())
# gt_kernel_3 = to_rgb(gt_kernels[0, 3].numpy())
# gt_kernel_4 = to_rgb(gt_kernels[0, 4].numpy())
# gt_kernel_5 = to_rgb(gt_kernels[0, 5].numpy())
# gt_text_mask = to_rgb(training_masks[0].numpy().astype(np.uint8))
# save('%d.png' % batch_idx, [img, gt_text, gt_kernel_0, gt_kernel_1, gt_kernel_2, gt_kernel_3, gt_kernel_4, gt_kernel_5, gt_text_mask])
save('%d_test.png' % batch_idx, [img]) |
__scraping__/listado.mercadolibre.com.pe/main.py | whitmans-max/python-examples | 140 | 12601037 | <filename>__scraping__/listado.mercadolibre.com.pe/main.py<gh_stars>100-1000
#!/usr/bin/env python3
# date: 2020.04.23
# https://stackoverflow.com/questions/61376200/i-dont-get-all-the-product-description-data-with-scrapy/61377436#61377436
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
#from mercadolibre.items import MercadolibreItem
class MercadolibreperuSpider(CrawlSpider):
name = 'mercadolibreperu'
allowed_domains = ['mercadolibre.com.pe']
start_urls = ['https://listado.mercadolibre.com.pe/lima/mascarilla-n95_ITEM*CONDITION_2230284']
rules = (
#Rule(LinkExtractor(allow=r'Items/'), callback='parse_item', follow=True),
Rule(
LinkExtractor(
restrict_xpaths=(
'//section[@id="results-section"]',
),
),
callback='parse_item',
follow=True
),
)
def parse_item_old(self, response):
#item['domain_id'] = response.xpath('//input[@id="sid"]/@value').get()
#item['name'] = response.xpath('//div[@id="name"]').get()
#item['description'] = response.xpath('//div[@id="description"]').get()
for element in response.xpath('//h2[@class="item__title list-view-item-title"]/a/span/text()').getall():
#item = {}
item = MercadolibreItem()
item['descripcion'] = element
yield item
def parse_item(self, response):
#item['domain_id'] = response.xpath('//input[@id="sid"]/@value').get()
#item['name'] = response.xpath('//div[@id="name"]').get()
#item['description'] = response.xpath('//div[@id="description"]').get()
for element in response.xpath('//li[@class="results-item highlighted article stack item-without-installmets"]'):
item = {}
#item = MercadolibreItem()
item['title'] = element.xpath('.//span[@class="main-title"]//text()').get()
item['price_symbol'] = element.xpath('.//span[@class="price__symbol"]//text()').get()
item['price_fraction'] = element.xpath('.//span[@class="price__fraction"]//text()').get()
yield item
from scrapy.crawler import CrawlerProcess
c = CrawlerProcess({
'USER_AGENT': 'Mozilla/5.0',
# save in file CSV, JSON or XML
'FEED_FORMAT': 'csv', # csv, json, xml
'FEED_URI': 'output.csv', #
})
c.crawl(MercadolibreperuSpider)
c.start()
|
mantraml/core/management/commands/upload.py | rohan1790/mantra | 330 | 12601040 | <gh_stars>100-1000
import os
import shutil
import uuid
import subprocess
import mantraml
from mantraml.core.hashing.MantraHashed import MantraHashed
from mantraml.core.management.commands.BaseCommand import BaseCommand
import tempfile
from pathlib import Path
import sys
import getpass
import itertools
import requests
from urllib.parse import urljoin
import json
from mantraml.core.management.commands.importcmd import find_artefacts
class UploadCmd(BaseCommand):
def add_arguments(self, parser):
parser.add_argument("artefacts", type=str, nargs="*")
parser.add_argument("--remote", type=str, default="https://api.mantrahub.io")
return parser
def handle(self, args, unknown):
if not Path("mantra.yml").exists():
print("ERROR: Please run this command from your mantra project directory (i.e. the directory containing `mantra.yml`)")
sys.exit(1)
# collect the artefacts to upload
if len(args.artefacts) == 0:
# get all the datasets, models and results
print("Uploading all datasets, models, tasks and results...")
all_models = find_artefacts("", "models", "model.py")
all_datasets = find_artefacts("", "data", "data.py")
all_tasks = find_artefacts("", "tasks", "task.py")
if Path("results").exists():
all_results = [str(p) for p in Path("results").iterdir() if p.is_dir()]
else:
all_results = []
all_artefacts = list(itertools.chain(all_models, all_datasets, all_tasks, all_results))
else:
all_artefacts = args.artefacts
missing_artefacts = [a for a in all_artefacts if not Path(a).exists()]
if len(missing_artefacts) > 0:
print("ERROR: The following artefact(s) are missing: `%s`" % missing_artefacts)
sys.exit(1)
# TODO: Results will have dependencies, make sure those are taken into account
# 1) Get the hashes for all the files and dependencies
all_hashes = []
for artefact_dir in all_artefacts:
artefact_hash, file_hashes = MantraHashed.get_folder_hash(artefact_dir)
all_hashes.append({
"artefact_dir": artefact_dir,
"artefact_hash": artefact_hash,
"file_hashes": file_hashes,
})
# 2) Get the credentials
# prompt for username and password
mantrahub_user = input("Your mantrahub username: ")
if mantrahub_user.strip() == "":
print("ERROR: The username cannot be empty, quitting...")
sys.exit(1)
mantrahub_pass = getpass.getpass("Your mantrahub password: ")
# 3) Send the request to the server to see which files need to be uploaded
full_url = urljoin(args.remote, "/api/artefacts_diff")
json_payload = json.dumps({"all_hashes": all_hashes})
diff_response = requests.post(full_url, json=json_payload, auth=(mantrahub_user, mantrahub_pass))
if diff_response.status_code == 200:
diff = json.loads(diff_response.json())["diff_hashes"]
elif diff_response.status_code == 403:
print('Invalid username/password')
return
if diff:
upload_url_base = urljoin(args.remote, "api/upload_file/")
for artefact in diff:
for k,v in artefact["file_hashes"].items():
print("Uploading `%s`..." % v["path"])
files = {'file': open(v["path"], 'rb')}
h = {"Content-Disposition": "attachment; filename=%s" % v["path"]}
r = requests.put(upload_url_base+v["path"], files=files, headers=h,
auth=(mantrahub_user, mantrahub_pass))
else:
print("No new files to upload...")
# Finally, commit all the results
commit_url = urljoin(args.remote, "api/artefacts_diff_commit")
json_payload = json.dumps({"all_hashes": all_hashes, "diff_hashes": diff})
commit_response = requests.post(commit_url, json=json_payload, auth=(mantrahub_user, mantrahub_pass))
if commit_response.status_code != requests.codes.ok:
print("ERROR: Commit not successful: %s" % commit_response.text)
|
ssod/utils/signature.py | huimlight/SoftTeacher | 604 | 12601045 | <gh_stars>100-1000
import inspect
def parse_method_info(method):
sig = inspect.signature(method)
params = sig.parameters
return params
|
mmf/datasets/builders/ocrvqa/dataset.py | anas-awadalla/mmf | 3,252 | 12601105 | # Copyright (c) Facebook, Inc. and its affiliates.
from mmf.datasets.builders.textvqa.dataset import TextVQADataset
class OCRVQADataset(TextVQADataset):
def __init__(self, config, dataset_type, imdb_file_index, *args, **kwargs):
super().__init__(config, dataset_type, imdb_file_index, *args, **kwargs)
self.dataset_name = "ocrvqa"
def preprocess_sample_info(self, sample_info):
# Do nothing in this case
return sample_info
|
tests/validation/response/test_produces_validation.py | maroux/flex | 160 | 12601107 | import pytest
from flex.exceptions import ValidationError
from flex.validation.response import (
validate_response,
)
from flex.error_messages import MESSAGES
from tests.factories import (
ResponseFactory,
SchemaFactory,
)
from tests.utils import assert_message_in_errors
#
# produces mimetype validation.
#
def test_produces_validation_is_noop_when_produces_not_declared():
"""
Test that the `produces` validation is a noop when no content types are
declared.
"""
response = ResponseFactory(
content_type='application/json',
url='http://www.example.com/get',
)
schema = SchemaFactory(
paths={
'/get': {'get': {'responses': {'200': {'description': 'Success'}}}},
},
)
validate_response(
response=response,
request_method='get',
schema=schema,
)
def test_produces_validation_valid_mimetype_from_global_definition():
"""
Test that a response content_type that is in the global api produces
definitions is valid.
"""
response = ResponseFactory(
content_type='application/json',
url='http://www.example.com/get',
)
schema = SchemaFactory(
produces=['application/json'],
paths={
'/get': {'get': {'responses': {'200': {'description': 'Success'}}}},
},
)
validate_response(
response=response,
request_method='get',
schema=schema,
)
def test_produces_validation_invalid_mimetype_from_global_definition():
"""
Test that a response content_type that is in the global api produces
definitions is valid.
"""
response = ResponseFactory(
content_type='application/json',
url='http://www.example.com/get',
)
schema = SchemaFactory(
produces=['application/xml'],
paths={
'/get': {'get': {'responses': {'200': {'description': 'Success'}}}},
},
)
with pytest.raises(ValidationError):
validate_response(
response=response,
request_method='get',
schema=schema,
)
def test_produces_validation_for_valid_mimetype_from_operation_definition():
"""
Test that when `produces` is defined in an operation definition, that the
local value is used in place of any global `produces` definition.
"""
response = ResponseFactory(
content_type='application/json',
url='http://www.example.com/get',
)
schema = SchemaFactory(
produces=['application/xml'],
paths={
'/get': {'get': {
'responses': {'200': {'description': 'Success'}},
'produces': ['application/json'],
}},
},
)
validate_response(
response=response,
request_method='get',
schema=schema,
)
def test_produces_validation_for_invalid_mimetype_from_operation_definition():
"""
Test the situation when the operation definition has overridden the global
allowed mimetypes, that that the local value is used for validation.
"""
response = ResponseFactory(
content_type='application/xml',
url='http://www.example.com/get',
)
schema = SchemaFactory(
produces=['application/xml'],
paths={
'/get': {'get': {
'responses': {'200': {'description': 'Success'}},
'produces': ['application/json'],
}},
},
)
with pytest.raises(ValidationError) as err:
validate_response(
response=response,
request_method='get',
schema=schema,
)
assert_message_in_errors(
MESSAGES['content_type']['invalid'],
err.value.detail,
'body.produces',
)
|
Lib/objc/_VoiceShortcutClient.py | snazari/Pyto | 701 | 12601115 | <reponame>snazari/Pyto
"""
Classes from the 'VoiceShortcutClient' framework.
"""
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
VCWidgetWorkflow = _Class("VCWidgetWorkflow")
WFSingleConnectionXPCListener = _Class("WFSingleConnectionXPCListener")
VCInteractionDonation = _Class("VCInteractionDonation")
WFCoreDataChangeNotification = _Class("WFCoreDataChangeNotification")
WFCoreDataObjectChange = _Class("WFCoreDataObjectChange")
WFObservableResult = _Class("WFObservableResult")
WFObservableObjectResult = _Class("WFObservableObjectResult")
WFObservableArrayResult = _Class("WFObservableArrayResult")
WFColor = _Class("WFColor")
WFWorkflowRunDescriptor = _Class("WFWorkflowRunDescriptor")
WFINShortcutRunDescriptor = _Class("WFINShortcutRunDescriptor")
WFWorkflowDatabaseRunDescriptor = _Class("WFWorkflowDatabaseRunDescriptor")
VCActionDonationFetcher = _Class("VCActionDonationFetcher")
VCUserActivityDonationFetcher = _Class("VCUserActivityDonationFetcher")
VCInteractionDonationFetcher = _Class("VCInteractionDonationFetcher")
VCUserActivityDonation = _Class("VCUserActivityDonation")
WFGradient = _Class("WFGradient")
WFWorkflowRunnerClient = _Class("WFWorkflowRunnerClient")
WFActionExtensionWorkflowRunnerClient = _Class("WFActionExtensionWorkflowRunnerClient")
WFAccessibilityWorkflowRunnerClient = _Class("WFAccessibilityWorkflowRunnerClient")
WFWidgetWorkflowRunnerClient = _Class("WFWidgetWorkflowRunnerClient")
WFSleepWorkflowRunnerClient = _Class("WFSleepWorkflowRunnerClient")
WFSuggestionsWorkflowRunnerClient = _Class("WFSuggestionsWorkflowRunnerClient")
VCAccessSpecifier = _Class("VCAccessSpecifier")
VCSleepAction = _Class("VCSleepAction")
VCSleepDonationAction = _Class("VCSleepDonationAction")
VCSleepOpenAppAction = _Class("VCSleepOpenAppAction")
VCSleepHomeAccessoryAction = _Class("VCSleepHomeAccessoryAction")
WFShareSheetWorkflow = _Class("WFShareSheetWorkflow")
WFWorkflowRunRequest = _Class("WFWorkflowRunRequest")
VCVoiceShortcut = _Class("VCVoiceShortcut")
WFRemoteImageDrawingContext = _Class("WFRemoteImageDrawingContext")
WFDatabaseObjectDescriptor = _Class("WFDatabaseObjectDescriptor")
WFWorkflowDescriptor = _Class("WFWorkflowDescriptor")
WFAccessibilityWorkflow = _Class("WFAccessibilityWorkflow")
WFCoreDataResultState = _Class("WFCoreDataResultState")
WFWorkflowQuery = _Class("WFWorkflowQuery")
WFWorkflowRunningContext = _Class("WFWorkflowRunningContext")
VCConfiguredSleepWorkflow = _Class("VCConfiguredSleepWorkflow")
VCSleepWorkflow = _Class("VCSleepWorkflow")
VCVoiceShortcutClient = _Class("VCVoiceShortcutClient")
|
bandicoot/tests/test_spatial.py | Seabreg/bandicoot | 209 | 12601144 | # The MIT License (MIT)
#
# Copyright (c) 2015-2016 Massachusetts Institute of Technology.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import division
import unittest
import os
import bandicoot as bc
import numpy as np
class TestChurn(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._dir_changed = False
def setUp(self):
if not TestChurn._dir_changed:
abspath = os.path.abspath(__file__)
name = abspath.index(os.path.basename(__file__))
abspath = abspath[:name]
os.chdir(abspath)
TestChurn._dir_changed = True
self.user = bc.io.read_csv("churn_user", "samples",
describe=False, warnings=False)
def test_churn(self):
distribution = bc.spatial.churn_rate(self.user, summary=None)
v1 = [1 / 3, 1 / 3, 1 / 3, 0]
v2 = v1
v3 = [1 / 4, 3 / 4, 0, 0]
v4 = [0, 0, 1 / 2, 1 / 2]
cos_1 = 1 - np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
cos_2 = 1 - np.dot(v2, v3) / (np.linalg.norm(v2) * np.linalg.norm(v3))
cos_3 = 1 - np.dot(v3, v4) / (np.linalg.norm(v3) * np.linalg.norm(v4))
np.testing.assert_almost_equal(distribution, [cos_1, cos_2, cos_3])
churn_rate = bc.spatial.churn_rate(self.user)
np.testing.assert_almost_equal(churn_rate['mean'],
np.mean([cos_1, cos_2, cos_3]))
np.testing.assert_almost_equal(churn_rate['std'],
np.std([cos_1, cos_2, cos_3]))
|
multitask.py | neulab/RIPPLe | 130 | 12601226 | import torch.nn as nn
from pytorch_transformers import (
BertForSequenceClassification, BertTokenizer
)
class BertForMultitaskClassification(BertForSequenceClassification):
def __init__(self, config):
assert hasattr(config, "num_labels_per_task")
assert sum(config.num_labels_per_task) == config.num_labels
super().__init__(config)
self.num_tasks = len(config.num_labels_per_task)
def loss_fct(self, logits, labels):
loss = 0
inner_loss_fct = nn.CrossEntropyLoss(reduction="none")
offset = 0
task_masks = labels[:, 1:].float() # this conversion is inefficient...
# TODO: if this turns out to be slow, optimize
for task_id, nl in enumerate(self.config.num_labels_per_task):
task_loss = inner_loss_fct(logits[:, offset:offset+nl],
labels[:, 0])
loss += (task_loss * task_masks[:, task_id]).mean()
offset += nl
return loss
def forward(self, input_ids, attention_mask=None, token_type_ids=None,
position_ids=None, head_mask=None, labels=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss = self.loss_fct(logits.view(-1, self.num_labels), labels.view(-1, 1+self.num_tasks))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
|
datasketch/__init__.py | sergiiz/datasketch | 1,771 | 12601260 | <reponame>sergiiz/datasketch<filename>datasketch/__init__.py
from datasketch.hyperloglog import HyperLogLog, HyperLogLogPlusPlus
from datasketch.minhash import MinHash
from datasketch.b_bit_minhash import bBitMinHash
from datasketch.lsh import MinHashLSH
from datasketch.weighted_minhash import WeightedMinHash, WeightedMinHashGenerator
from datasketch.lshforest import MinHashLSHForest
from datasketch.lshensemble import MinHashLSHEnsemble
from datasketch.lean_minhash import LeanMinHash
from datasketch.hashfunc import sha1_hash32
# Alias
WeightedMinHashLSH = MinHashLSH
WeightedMinHashLSHForest = MinHashLSHForest
# Version
from datasketch.version import __version__
|
src/starkware/air/rescue/rescue_hash_test.py | ChihChengLiang/ethSTARK | 123 | 12601268 | <reponame>ChihChengLiang/ethSTARK<filename>src/starkware/air/rescue/rescue_hash_test.py
from .rescue_hash import rescue_hash
def test_rescue_hash():
# The list of constants being compared to the result of rescue_hash was generated by performing
# the hash on [1, 2, 3, 4, 5, 6, 7, 8] with the marvellous_hash function given in
# https://starkware.co/hash-challenge-implementation-reference-code/#marvellous.
assert rescue_hash([1, 2, 3, 4, 5, 6, 7, 8]) == \
[1701009513277077950, 394821372906024995,
428352609193758013, 1822402221604548281]
|
BitTornado/tests/test_bencode.py | crossbrowsertesting/BitTornado | 116 | 12601280 | import unittest
from ..Meta.bencode import bencode, bdecode, Bencached
class CodecTests(unittest.TestCase):
def test_bencode(self):
"""Test encoding of encodable and unencodable data structures"""
self.assertEqual(bencode(4), b'i4e')
self.assertEqual(bencode(0), b'i0e')
self.assertEqual(bencode(-10), b'i-10e')
self.assertEqual(bencode(12345678901234567890),
b'i12345678901234567890e')
self.assertEqual(bencode(''), b'0:')
self.assertEqual(bencode('abc'), b'3:abc')
self.assertEqual(bencode('1234567890'), b'10:1234567890')
self.assertEqual(bencode([]), b'le')
self.assertEqual(bencode([1, 2, 3]), b'li1ei2ei3ee')
self.assertEqual(bencode([['Alice', 'Bob'], [2, 3]]),
b'll5:Alice3:Bobeli2ei3eee')
self.assertEqual(bencode({}), b'de')
self.assertEqual(bencode({'age': 25, 'eyes': 'blue'}),
b'd3:agei25e4:eyes4:bluee')
self.assertEqual(bencode({'spam.mp3': {'author': 'Alice',
'length': 100000}}),
b'd8:spam.mp3d6:author5:Alice6:lengthi100000eee')
self.assertRaises(TypeError, bencode, {1: 'foo'})
self.assertRaises(TypeError, bencode, {'foo': 1.0})
cached = Bencached.cache({'age': 25})
self.assertEqual(bencode(cached), cached.bencoded)
self.assertEqual(bencode(''), bencode(b''))
def test_bdecode(self):
"""Test decoding of valid and erroneous sample strings"""
self.assertWarns(Warning, bdecode, b'0:0:')
self.assertRaises(ValueError, bdecode, b'ie')
self.assertRaises(ValueError, bdecode, b'i341foo382e')
self.assertEqual(bdecode(b'i4e'), 4)
self.assertEqual(bdecode(b'i0e'), 0)
self.assertEqual(bdecode(b'i123456789e'), 123456789)
self.assertEqual(bdecode(b'i-10e'), -10)
self.assertRaises(ValueError, bdecode, b'i-0e')
self.assertRaises(ValueError, bdecode, b'i123')
self.assertRaises(ValueError, bdecode, b'')
self.assertWarns(Warning, bdecode, b'i6easd')
self.assertRaises(ValueError, bdecode, b'35208734823ljdahflajhdf')
self.assertWarns(Warning, bdecode, b'2:abfdjslhfld')
self.assertEqual(bdecode(b'0:'), '')
self.assertEqual(bdecode(b'3:abc'), 'abc')
self.assertEqual(bdecode(b'10:1234567890'), '1234567890')
self.assertRaises(ValueError, bdecode, b'02:xy')
self.assertRaises(ValueError, bdecode, b'l')
self.assertEqual(bdecode(b'le'), [])
self.assertWarns(Warning, bdecode, b'leanfdldjfh')
self.assertEqual(bdecode(b'l0:0:0:e'), ['', '', ''])
self.assertRaises(ValueError, bdecode, b'relwjhrlewjh')
self.assertEqual(bdecode(b'li1ei2ei3ee'), [1, 2, 3])
self.assertEqual(bdecode(b'l3:asd2:xye'), ['asd', 'xy'])
self.assertEqual(bdecode(b'll5:Alice3:Bobeli2ei3eee'),
[['Alice', 'Bob'], [2, 3]])
self.assertRaises(ValueError, bdecode, b'd')
self.assertWarns(Warning, bdecode, b'defoobar')
self.assertEqual(bdecode(b'de'), {})
self.assertEqual(bdecode(b'd3:agei25e4:eyes4:bluee'),
{'age': 25, 'eyes': 'blue'})
self.assertEqual(
bdecode(b'd8:spam.mp3d6:author5:Alice6:lengthi100000eee'),
{'spam.mp3': {'author': 'Alice', 'length': 100000}})
self.assertRaises(ValueError, bdecode, b'd3:fooe')
self.assertRaises(ValueError, bdecode, b'di1e0:e')
self.assertRaises(ValueError, bdecode, b'd1:b0:1:a0:e')
self.assertRaises(ValueError, bdecode, b'd1:a0:1:a0:e')
self.assertRaises(ValueError, bdecode, b'i03e')
self.assertRaises(ValueError, bdecode, b'l01:ae')
self.assertRaises(ValueError, bdecode, b'9999:x')
self.assertRaises(ValueError, bdecode, b'l0:')
self.assertRaises(ValueError, bdecode, b'd0:0:')
self.assertRaises(ValueError, bdecode, b'd0:')
if __name__ == '__main__':
unittest.main()
|
notebooks-text-format/linreg_hierarchical_non_centered_pymc3.py | arpitvaghela/probml-notebooks | 166 | 12601289 | <reponame>arpitvaghela/probml-notebooks
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/book1/bayes_stats/linreg_hierarchical_non_centered_pymc3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="f_py_lrTPdK1"
#
#
# # Hierarchical non-centered Bayesian Linear Regression in PyMC3
#
# The text and code for this notebook are taken directly from [this blog post](https://twiecki.io/blog/2017/02/08/bayesian-hierchical-non-centered/)
# by <NAME>. [Original notebook](https://github.com/twiecki/WhileMyMCMCGentlySamples/blob/master/content/downloads/notebooks/GLM_hierarchical_non_centered.ipynb)
#
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="XcsJEi91Qelr" outputId="5acfa41f-63a5-4d1b-a397-35ba6fb959d2"
# !pip install arviz
# + id="QPTA4cZCPdK1"
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pymc3 as pm
import pandas as pd
import theano
import seaborn as sns
sns.set_style('whitegrid')
np.random.seed(123)
url = 'https://github.com/twiecki/WhileMyMCMCGentlySamples/blob/master/content/downloads/notebooks/radon.csv?raw=true'
data = pd.read_csv(url)
#data = pd.read_csv('../data/radon.csv')
data['log_radon'] = data['log_radon'].astype(theano.config.floatX)
county_names = data.county.unique()
county_idx = data.county_code.values
n_counties = len(data.county.unique())
# + [markdown] id="KdWGECP9PdK1"
# ## The intuitive specification
#
# Usually, hierachical models are specified in a *centered* way. In a regression model, individual slopes would be centered around a group mean with a certain group variance, which controls the shrinkage:
# + id="9jkliKmhPdK1"
with pm.Model() as hierarchical_model_centered:
# Hyperpriors for group nodes
mu_a = pm.Normal('mu_a', mu=0., sd=100**2)
sigma_a = pm.HalfCauchy('sigma_a', 5)
mu_b = pm.Normal('mu_b', mu=0., sd=100**2)
sigma_b = pm.HalfCauchy('sigma_b', 5)
# Intercept for each county, distributed around group mean mu_a
# Above we just set mu and sd to a fixed value while here we
# plug in a common group distribution for all a and b (which are
# vectors of length n_counties).
a = pm.Normal('a', mu=mu_a, sd=sigma_a, shape=n_counties)
# Intercept for each county, distributed around group mean mu_a
b = pm.Normal('b', mu=mu_b, sd=sigma_b, shape=n_counties)
# Model error
eps = pm.HalfCauchy('eps', 5)
# Linear regression
radon_est = a[county_idx] + b[county_idx] * data.floor.values
# Data likelihood
radon_like = pm.Normal('radon_like', mu=radon_est, sd=eps, observed=data.log_radon)
# + colab={"base_uri": "https://localhost:8080/"} id="tGJqbLoKPdK1" outputId="1c74d94e-2468-4529-cb89-d4bbdce3eadb"
# Inference button (TM)!
with hierarchical_model_centered:
hierarchical_centered_trace = pm.sample(draws=5000, tune=1000)[1000:]
# + colab={"base_uri": "https://localhost:8080/", "height": 495} id="y6Hr50CfPdK2" outputId="e75e74b5-83c7-4edf-fb9b-8a687503884d"
pm.traceplot(hierarchical_centered_trace);
# + [markdown] id="OAbZ_QXGPdK2"
# I have seen plenty of traces with terrible convergences but this one might look fine to the unassuming eye. Perhaps `sigma_b` has some problems, so let's look at the Rhat:
# + colab={"base_uri": "https://localhost:8080/"} id="EdTq66JUPdK2" outputId="10a80577-cd4f-4348-d71d-d989468b46d3"
print('Rhat(sigma_b) = {}'.format(pm.diagnostics.gelman_rubin(hierarchical_centered_trace)['sigma_b']))
# + [markdown] id="JHSPBEbQPdK2"
# Not too bad -- well below 1.01. I used to think this wasn't a big deal but <NAME> in his [StanCon 2017 talk](https://www.youtube.com/watch?v=DJ0c7Bm5Djk&feature=youtu.be&t=4h40m9s) makes a strong point that it is actually very problematic. To understand what's going on, let's take a closer look at the slopes `b` and their group variance (i.e. how far they are allowed to move from the mean) `sigma_b`. I'm just plotting a single chain now.
# + colab={"base_uri": "https://localhost:8080/", "height": 268} id="AzfoQz2RPdK2" outputId="7fed960d-9835-4488-cabf-70f328bb0fe8"
fig, axs = plt.subplots(nrows=2)
axs[0].plot(hierarchical_centered_trace.get_values('sigma_b', chains=1), alpha=.5);
axs[0].set(ylabel='sigma_b');
axs[1].plot(hierarchical_centered_trace.get_values('b', chains=1), alpha=.5);
axs[1].set(ylabel='b');
# + [markdown] id="0zBgOlmnPdK2"
# `sigma_b` seems to drift into this area of very small values and get stuck there for a while. This is a common pattern and the sampler is trying to tell you that there is a region in space that it can't quite explore efficiently. While stuck down there, the slopes `b_i` become all squished together. We've entered **The Funnel of Hell** (it's just called the funnel, I added the last part for dramatic effect).
# + [markdown] id="iTckxwW7PdK2"
# ## The Funnel of Hell (and how to escape it)
#
# Let's look at the joint posterior of a single slope `b` (I randomly chose the 75th one) and the slope group variance `sigma_b`.
# + colab={"base_uri": "https://localhost:8080/", "height": 493} id="e1gZ_JZSPdK2" outputId="59a35ecd-cdfe-458d-9528-77ed39f0c5de"
x = pd.Series(hierarchical_centered_trace['b'][:, 75], name='slope b_75')
y = pd.Series(hierarchical_centered_trace['sigma_b'], name='slope group variance sigma_b')
sns.jointplot(x, y, ylim=(0, .7));
# + [markdown] id="qDSDPDswPdK3"
# This makes sense, as the slope group variance goes to zero (or, said differently, we apply maximum shrinkage), individual slopes are not allowed to deviate from the slope group mean, so they all collapose to the group mean.
#
# While this property of the posterior in itself is not problematic, it makes the job extremely difficult for our sampler. Imagine a [Metropolis-Hastings](https://twiecki.github.io/blog/2015/11/10/mcmc-sampling/) exploring this space with a medium step-size (we're using NUTS here but the intuition works the same): in the wider top region we can comfortably make larger jumps to explore the space efficiently. However, once we move to the narrow bottom region we can change `b_75` and `sigma_b` only by tiny amounts. This causes the sampler to become trapped in that region of space. Most of the proposals will be rejected because our step-size is too large for this narrow part of the space and exploration will be very inefficient.
#
# You might wonder if we could somehow choose the step-size based on the denseness (or curvature) of the space. Indeed that's possible and it's called [Riemannian HMC](https://arxiv.org/abs/0907.1100). It works very well but is quite costly to run. Here, we will explore a different, simpler method.
#
# Finally, note that this problem does not exist for the intercept parameters `a`. Because we can determine individual intercepts `a_i` with enough confidence, `sigma_a` is not small enough to be problematic. Thus, the funnel of hell can be a problem in hierarchical models, but it does not have to be. (Thanks to <NAME> for pointing this out).
#
#
# ## Reparameterization
#
# If we can't easily make the sampler step-size adjust to the region of space, maybe we can adjust the region of space to make it simpler for the sampler? This is indeed possible and quite simple with a small reparameterization trick, we will call this the *non-centered* version.
# + id="Hx9btgsoPdK3"
with pm.Model() as hierarchical_model_non_centered:
# Hyperpriors for group nodes
mu_a = pm.Normal('mu_a', mu=0., sd=100**2)
sigma_a = pm.HalfCauchy('sigma_a', 5)
mu_b = pm.Normal('mu_b', mu=0., sd=100**2)
sigma_b = pm.HalfCauchy('sigma_b', 5)
# Before:
# a = pm.Normal('a', mu=mu_a, sd=sigma_a, shape=n_counties)
# Transformed:
a_offset = pm.Normal('a_offset', mu=0, sd=1, shape=n_counties)
a = pm.Deterministic("a", mu_a + a_offset * sigma_a)
# Before:
# b = pm.Normal('b', mu=mu_b, sd=sigma_b, shape=n_counties)
# Now:
b_offset = pm.Normal('b_offset', mu=0, sd=1, shape=n_counties)
b = pm.Deterministic("b", mu_b + b_offset * sigma_b)
# Model error
eps = pm.HalfCauchy('eps', 5)
radon_est = a[county_idx] + b[county_idx] * data.floor.values
# Data likelihood
radon_like = pm.Normal('radon_like', mu=radon_est, sd=eps, observed=data.log_radon)
# + [markdown] id="3Be9WYvFPdK3"
# Pay attention to the definitions of `a_offset`, `a`, `b_offset`, and `b` and compare them to before (commented out). What's going on here? It's pretty neat actually. Instead of saying that our individual slopes `b` are normally distributed around a group mean (i.e. modeling their absolute values directly), we can say that they are offset from a group mean by a certain value (`b_offset`; i.e. modeling their values relative to that mean). Now we still have to consider how far from that mean we actually allow things to deviate (i.e. how much shrinkage we apply). This is where `sigma_b` makes a comeback. We can simply multiply the offset by this scaling factor to get the same effect as before, just under a different parameterization. For a more formal introduction, see e.g. [Betancourt & Girolami (2013)](https://arxiv.org/pdf/1312.0906.pdf).
#
# Critically, `b_offset` and `sigma_b` are now mostly independent. This will become more clear soon. Let's first look at if this transform helped our sampling:
# + colab={"base_uri": "https://localhost:8080/"} id="GsBLxJF-PdK3" outputId="17c7ffb0-9986-49e9-9736-2cd0044e6fa1"
# Inference button (TM)!
with hierarchical_model_non_centered:
hierarchical_non_centered_trace = pm.sample(draws=5000, tune=1000)[1000:]
# + id="5-IwvuaDPdK3" outputId="0193a4dc-6dc5-4280-b520-00fa1a320c72"
pm.traceplot(hierarchical_non_centered_trace, varnames=['sigma_b']);
# + [markdown] id="b1lMZjlxPdK3"
# That looks much better as also confirmed by the joint plot:
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="V_MkqMhHPdK3" outputId="333694d2-c61c-4320-ef66-146def97329b"
fig, axs = plt.subplots(ncols=2, sharex=True, sharey=True)
x = pd.Series(hierarchical_centered_trace['b'][:, 75], name='slope b_75')
y = pd.Series(hierarchical_centered_trace['sigma_b'], name='slope group variance sigma_b')
axs[0].plot(x, y, '.');
axs[0].set(title='Centered', ylabel='sigma_b', xlabel='b_75')
x = pd.Series(hierarchical_non_centered_trace['b'][:, 75], name='slope b_75')
y = pd.Series(hierarchical_non_centered_trace['sigma_b'], name='slope group variance sigma_b')
axs[1].plot(x, y, '.');
axs[1].set(title='Non-centered', xlabel='b_75');
# + [markdown] id="Q_W701t6PdK3"
# To really drive this home, let's also compare the `sigma_b` marginal posteriors of the two models:
# + colab={"base_uri": "https://localhost:8080/", "height": 312} id="XJxFSFbnPdK3" outputId="462dbcfa-989a-45eb-806d-24a57a906e79"
pm.kdeplot(np.stack([hierarchical_centered_trace['sigma_b'], hierarchical_non_centered_trace['sigma_b'], ]).T)
plt.axvline(hierarchical_centered_trace['sigma_b'].mean(), color='b', linestyle='--')
plt.axvline(hierarchical_non_centered_trace['sigma_b'].mean(), color='g', linestyle='--')
plt.legend(['Centered', 'Non-cenetered', 'Centered posterior mean', 'Non-centered posterior mean']);
plt.xlabel('sigma_b'); plt.ylabel('Probability Density');
# + [markdown] id="QXe9_4vIPdK3"
# That's crazy -- there's a large region of very small `sigma_b` values that the sampler could not even explore before. In other words, our previous inferences ("Centered") were severely biased towards higher values of `sigma_b`. Indeed, if you look at the [previous blog post](https://twiecki.github.io/blog/2014/03/17/bayesian-glms-3/) the sampler never even got stuck in that low region causing me to believe everything was fine. These issues are hard to detect and very subtle, but they are meaningful as demonstrated by the sizable difference in posterior mean.
#
# But what does this concretely mean for our analysis? Over-estimating `sigma_b` means that we have a biased (=false) belief that we can tell individual slopes apart better than we actually can. There is less information in the individual slopes than what we estimated.
# + [markdown] id="3G2KQzuvPdK3"
# ### Why does the reparameterized model work better?
#
# To more clearly understand why this model works better, let's look at the joint distribution of `b_offset`:
# + colab={"base_uri": "https://localhost:8080/", "height": 510} id="X98SiQX-PdK3" outputId="4b5658f6-e2f9-4fe0-f5e9-6105e49a429e"
x = pd.Series(hierarchical_non_centered_trace['b_offset'][:, 75], name='slope b_offset_75')
y = pd.Series(hierarchical_non_centered_trace['sigma_b'], name='slope group variance sigma_b')
sns.jointplot(x, y, ylim=(0, .7))
# + [markdown] id="iUUIWErkPdK3"
# This is the space the sampler sees; you can see how the funnel is flattened out. We can freely change the (relative) slope offset parameters even if the slope group variance is tiny as it just acts as a scaling parameter.
#
# Note that the funnel is still there -- it's a perfectly valid property of the model -- but the sampler has a much easier time exploring it in this different parameterization.
# + [markdown] id="5Klof7DEPdK3"
# ## Why hierarchical models are Bayesian
#
# Finally, I want to take the opportunity to make another point that is not directly related to hierarchical models but can be demonstrated quite well here.
#
# Usually when talking about the perils of Bayesian statistics we talk about priors, uncertainty, and flexibility when coding models using Probabilistic Programming. However, an even more important property is rarely mentioned because it is much harder to communicate. <NAME> touched on this point in his tweet:
# + [markdown] id="i4dat7gDPdK3"
# <blockquote class="twitter-tweet" data-lang="en"><p lang="en" dir="ltr">It's interesting that many summarize Bayes as being about priors; but real power is its focus on integrals/expectations over maxima/modes</p>— <NAME> (@rosstaylor90) <a href="https://twitter.com/rosstaylor90/status/827263854002401281">February 2, 2017</a></blockquote>
# <script async src="//platform.twitter.com/widgets.js" charset="utf-8"></script>
# + [markdown] id="4tJwmkxRPdK3"
# <NAME> makes a similar point when he says ["Expectations are the only thing that make sense."](https://www.youtube.com/watch?v=pHsuIaPbNbY&t=8s)
#
# But what's wrong with maxima/modes? Aren't those really close to the posterior mean (i.e. the expectation)? Unfortunately, that's only the case for the simple models we teach to build up intuitions. In complex models, like the hierarchical one, the MAP can be far away and not be interesting or meaningful at all.
#
# Let's compare the posterior mode (i.e. the MAP) to the posterior mean of our hierachical linear regression model:
# + colab={"base_uri": "https://localhost:8080/"} id="KxekXiFaPdK3" outputId="b5071e65-c849-4b9b-bf3c-5d96c6a9cdef"
with hierarchical_model_centered:
mode = pm.find_MAP()
# + colab={"base_uri": "https://localhost:8080/"} id="df4orfyOPdK3" outputId="208dd434-cd4d-45da-ffc2-bf9f806b36a6"
mode['b']
# + colab={"base_uri": "https://localhost:8080/", "height": 158} id="rsadfvlSPdK3" outputId="2fa0f3b6-48b5-4cef-d60b-dca2460f895e"
np.exp(mode['sigma_b_log_'])
# + [markdown] id="muQpdSipPdK3"
# As you can see, the slopes are all identical and the group slope variance is effectively zero. The reason is again related to the funnel. The MAP only cares about the probability **density** which is highest at the bottom of the funnel.
#
# But if you could only choose one point in parameter space to summarize the posterior above, would this be the one you'd pick? Probably not.
#
# Let's instead look at the **Expected Value** (i.e. posterior mean) which is computed by integrating probability **density** and **volume** to provide probabilty **mass** -- the thing we really care about. Under the hood, that's the integration performed by the MCMC sampler.
# + id="dXcdSr_UPdK3" outputId="337435da-c4c5-4347-fd31-3d86eee64300"
hierarchical_non_centered_trace['b'].mean(axis=0)
# + id="9h-FzVGJPdK3" outputId="c5aa9395-4bd5-494b-e13e-f7cd7ea45b4c"
hierarchical_non_centered_trace['sigma_b'].mean(axis=0)
# + [markdown] id="-AL504GdPdK3"
# Quite a difference. This also explains why it can be a bad idea to use the MAP to initialize your sampler: in certain models the MAP is not at all close to the region you want to explore (i.e. the "typical set").
#
# This strong divergence of the MAP and the Posterior Mean does not only happen in hierarchical models but also in high dimensional ones, where our intuitions from low-dimensional spaces gets twisted in serious ways. [This talk by <NAME>](https://www.youtube.com/watch?v=pHsuIaPbNbY&t=8s) makes the point quite nicely.
#
# So why do people -- especially in Machine Learning -- still use the MAP/MLE? As we all learned in high school first hand, integration is much harder than differentation. This is really the only reason.
#
# Final disclaimer: This might provide the impression that this is a property of being in a Bayesian framework, which is not true. Technically, we can talk about Expectations vs Modes irrespective of that. Bayesian statistics just happens to provide a very intuitive and flexible framework for expressing and estimating these models.
#
# See [here](https://rawgithub.com/twiecki/WhileMyMCMCGentlySamples/master/content/downloads/notebooks/GLM_hierarchical_non_centered.ipynb) for the underlying notebook of this blog post.
# + [markdown] id="SzMHO6fNPdK3"
# ## Acknowledgements
#
# Thanks to [<NAME>](https://twitter.com/jonsedar) for helpful comments on an earlier draft.
|
sqllineage/io.py | treff7es/sqllineage | 238 | 12601329 | <reponame>treff7es/sqllineage<filename>sqllineage/io.py
from typing import Any, Dict, List
from networkx import DiGraph
def to_cytoscape(graph: DiGraph, compound=False) -> List[Dict[str, Dict[str, Any]]]:
"""
compound nodes is used to group nodes together to their parent.
See https://js.cytoscape.org/#notation/compound-nodes for reference.
"""
if compound:
parents_dict = {
node.parent: {
"name": str(node.parent) if node.parent is not None else "<unknown>",
"type": type(node.parent).__name__
if node.parent is not None
else "Table or SubQuery",
}
for node in graph.nodes
}
nodes = [
{
"data": {
"id": str(node),
"parent": parents_dict[node.parent]["name"],
"parent_candidates": [
{"name": str(p), "type": type(p).__name__}
for p in node.parent_candidates
],
"type": type(node).__name__,
}
}
for node in graph.nodes
]
nodes += [
{"data": {"id": attr["name"], "type": attr["type"]}}
for _, attr in parents_dict.items()
]
else:
nodes = [{"data": {"id": str(node)}} for node in graph.nodes]
edges: List[Dict[str, Dict[str, Any]]] = [
{"data": {"id": f"e{i}", "source": str(edge[0]), "target": str(edge[1])}}
for i, edge in enumerate(graph.edges)
]
return nodes + edges
|
python/tests/testdata/region_AD.py | rodgar-nvkz/python-phonenumbers | 2,424 | 12601332 | """Auto-generated file, do not edit by hand. AD metadata"""
from phonenumbers.phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_AD = PhoneMetadata(id='AD', country_code=376, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='\\d{6}', possible_length=(6,)),
fixed_line=PhoneNumberDesc(national_number_pattern='\\d{6}', example_number='123456', possible_length=(6,)))
|
bindings/python/test_m68k.py | columbia/egalito-capstone | 5,220 | 12601344 | #!/usr/bin/env python
# Capstone Python bindings, by <NAME> <<EMAIL>>
from __future__ import print_function
from capstone import *
from capstone.m68k import *
from xprint import to_hex, to_x
M68K_CODE = b"\x4c\x00\x54\x04\x48\xe7\xe0\x30\x4c\xdf\x0c\x07\xd4\x40\x87\x5a\x4e\x71\x02\xb4\xc0\xde\xc0\xde\x5c\x00\x1d\x80\x71\x12\x01\x23\xf2\x3c\x44\x22\x40\x49\x0e\x56\x54\xc5\xf2\x3c\x44\x00\x44\x7a\x00\x00\xf2\x00\x0a\x28\x4e\xb9\x00\x00\x00\x12\x4e\x75"
all_tests = (
(CS_ARCH_M68K, CS_MODE_BIG_ENDIAN | CS_MODE_M68K_040, M68K_CODE, "M68K"),
)
s_addressing_modes = {
0: "<invalid mode>",
1: "Register Direct - Data",
2: "Register Direct - Address",
3: "Register Indirect - Address",
4: "Register Indirect - Address with Postincrement",
5: "Register Indirect - Address with Predecrement",
6: "Register Indirect - Address with Displacement",
7: "Address Register Indirect With Index - 8-bit displacement",
8: "Address Register Indirect With Index - Base displacement",
9: "Memory indirect - Postindex",
10: "Memory indirect - Preindex",
11: "Program Counter Indirect - with Displacement",
12: "Program Counter Indirect with Index - with 8-Bit Displacement",
13: "Program Counter Indirect with Index - with Base Displacement",
14: "Program Counter Memory Indirect - Postindexed",
15: "Program Counter Memory Indirect - Preindexed",
16: "Absolute Data Addressing - Short",
17: "Absolute Data Addressing - Long",
18: "Immediate value",
19: "Branch Displacement",
}
def print_read_write_regs(insn):
for m in insn.regs_read:
print("\treading from reg: %s" % insn.reg_name(m))
for m in insn.regs_write:
print("\twriting to reg: %s" % insn.reg_name(m))
def print_insn_detail(insn):
if len(insn.operands) > 0:
print("\top_count: %u" % (len(insn.operands)))
print("\tgroups_count: %u" % len(insn.groups))
print_read_write_regs(insn)
for i, op in enumerate(insn.operands):
if op.type == M68K_OP_REG:
print("\t\toperands[%u].type: REG = %s" % (i, insn.reg_name(op.reg)))
elif op.type == M68K_OP_IMM:
print("\t\toperands[%u].type: IMM = 0x%x" % (i, op.imm & 0xffffffff))
elif op.type == M68K_OP_MEM:
print("\t\toperands[%u].type: MEM" % (i))
if op.mem.base_reg != M68K_REG_INVALID:
print("\t\t\toperands[%u].mem.base: REG = %s" % (i, insn.reg_name(op.mem.base_reg)))
if op.mem.index_reg != M68K_REG_INVALID:
print("\t\t\toperands[%u].mem.index: REG = %s" % (i, insn.reg_name(op.mem.index_reg)))
mem_index_str = "w"
if op.mem.index_size > 0:
mem_index_str = "l"
print("\t\t\toperands[%u].mem.index: size = %s" % (i, mem_index_str))
if op.mem.disp != 0:
print("\t\t\toperands[%u].mem.disp: 0x%x" % (i, op.mem.disp))
if op.mem.scale != 0:
print("\t\t\toperands[%u].mem.scale: %d" % (i, op.mem.scale))
print("\t\taddress mode: %s" % (s_addressing_modes[op.address_mode]))
elif op.type == M68K_OP_FP_SINGLE:
print("\t\toperands[%u].type: FP_SINGLE" % i)
print("\t\toperands[%u].simm: %f", i, op.simm)
elif op.type == M68K_OP_FP_DOUBLE:
print("\t\toperands[%u].type: FP_DOUBLE" % i)
print("\t\toperands[%u].dimm: %lf", i, op.dimm)
elif op.type == M68K_OP_BR_DISP:
print("\t\toperands[%u].br_disp.disp: 0x%x" % (i, op.br_disp.disp))
print("\t\toperands[%u].br_disp.disp_size: %d" % (i, op.br_disp.disp_size))
print()
# ## Test class Cs
def test_class():
address = 0x01000
for (arch, mode, code, comment) in all_tests:
print("*" * 16)
print("Platform: %s" % comment)
print("Code: %s " % to_hex(code))
print("Disasm:")
try:
md = Cs(arch, mode)
md.detail = True
last_address = 0
for insn in md.disasm(code, address):
last_address = insn.address + insn.size
print("0x%x:\t%s\t%s" % (insn.address, insn.mnemonic, insn.op_str))
print_insn_detail(insn)
print("0x%x:\n" % (last_address))
except CsError as e:
print("ERROR: %s" % e.__str__())
if __name__ == '__main__':
test_class()
|
pylayers/em/openems/test/Horn_Antenna.py | usmanwardag/pylayers | 143 | 12601345 | """
Tutorials / horn antenna
Description at:
http://openems.de/index.php/Tutorial:_Horn_Antenna
(C) 2011,2012,2013 <NAME> <<EMAIL>>
Python Adaptation : ESIR Project 2015
"""
from pylayers.em.openems.openems import *
import scipy.constants as cst
import numpy as np
# setup the simulation
unit = 1e-3 # all length in mm
class HornAntenna(object):
def __init__(self,**kwargs):
defaults = {'unit' : 1e-3,
'width' : 20,
'height' : 30 ,
'length' : 50 ,
'feed_length' : 50 ,
'thickness' : 2,
'angle' : np.array([20,20])*np.pi/180.
}
for k in defaults:
if k not in kwargs:
kwargs[k]=defaults[k]
self.unit = kwargs['unit']
self.width = kwargs['width']
self.height = kwargs['height']
self.length = kwargs['length']
self.feed_length = kwargs['feed_length']
self.thickness = kwargs['thickness']
self.angle = kwargs['angle']
HA = HornAntenna()
# size of the simulation box
SimBox = np.r_[200,200,200]
# frequency range of interest
f_start = 10e9
f_stop = 20e9
# frequency of interest
f0 = 15e9
#waveguide TE-mode definition
TE_mode = 'TE10'
a = HA.width
b = HA.height
# setup FDTD parameter & excitation function
F = FDTD(EndCriteria="1e-4")
F.add(Exc(typ='Gaussian',f0=0.5*(f_start+f_stop),fc=0.5*(f_stop-f_start)))
F.add(BoundaryCond(['PML 8','PML 8','PML 8','PML 8','PML 8','PML 8']))
# setup CSXCAD geometry & mesh
# currently, openEMS cannot automatically generate a mesh
max_res = ((cst.c/f_stop)/unit)/15. # cell size: lambda/20
C = CSX()
#
# Warning : It is not the same thing to add a new properties (add) and to add
# a new primitive to an existing property (primitive)
#
C.add(Matter('horn',
p=Box(
P1=[-a/2.-HA.thickness,-b/2.,0],
P2=[-a/2.,-b/2.,0],Pr=10)
))
#
# Define Mesh
#
linex = [-SimBox[0]/2.,-a/2., a/2., SimBox[0]/2.]
meshx = SmoothMeshLine( linex, max_res, 1.4)
liney = [-SimBox[1]/2., -b/2., b/2., SimBox[1]/2.]
meshy = SmoothMeshLine( liney, max_res, 1.4 )
linez = [-HA.feed_length, 0 ,SimBox[2]-HA.feed_length ]
meshz = SmoothMeshLine( linez, max_res, 1.4 )
C.add(RectilinearGrid(meshx,meshy,meshz))
#
# Waveguide
#
C.primitive('horn',Box(
P1=[-a/2.-HA.thickness,-b/2.,meshz[0]],
P2=[-a/2.,b/2.,0],Pr=10)
)
C.primitive('horn',Box(
P1=[a/2.+HA.thickness,-b/2.,meshz[0]],
P2=[a/2.,b/2.,0],Pr=10)
)
C.primitive('horn', Box(
P1=[-a/2.-HA.thickness,b/2.+HA.thickness,meshz[0]],
P2=[a/2.+HA.thickness,b/2.,0],Pr=10)
)
C.primitive('horn', Box(
P1=[-a/2.-HA.thickness,-b/2.-HA.thickness,meshz[0]],
P2=[a/2.+HA.thickness,-b/2.,0],Pr=10)
)
#
# horn opening 4 metallic plates
#
horn_opening1 = np.array([[0, HA.length, HA.length, 0],
[a/2.,
a/2 + np.sin(HA.angle[0])*HA.length,
-a/2 - np.sin(HA.angle[0])*HA.length,
-a/2.]])
horn_opening2 = np.array([[b/2+HA.thickness,
b/2+HA.thickness + np.sin(HA.angle[1])*HA.length,
-b/2-HA.thickness - np.sin(HA.angle[1])*HA.length,
-b/2-HA.thickness],
[ 0, HA.length, HA.length, 0]])
L1 = LinPoly(lp=horn_opening1.T,Pr=10)
L2 = LinPoly(lp=horn_opening1.T,Pr=10)
L3 = LinPoly(lp=horn_opening2.T,Pr=10,normdir=0)
L4 = LinPoly(lp=horn_opening2.T,Pr=10,normdir=0)
T1 = Transformation()
T2 = Transformation()
T3 = Transformation()
T4 = Transformation()
# y translate
Tr1 = Translate([0,-b/2-HA.thickness/2,0])
Tr2 = Translate([0,b/2+HA.thickness/2,0])
# x translate
Tr3 = Translate([-a/2-HA.thickness/2,0,0])
Tr4 = Translate([a/2+HA.thickness/2,0,0])
Rx1 = Rotate_X(HA.angle[1])
Rx2 = Rotate_X(-HA.angle[1])
Rx3 = Rotate_Y(-HA.angle[1])
Rx4 = Rotate_Y(HA.angle[1])
T1.append(Rx1)
T1.append(Tr1)
T2.append(Rx2)
T2.append(Tr2)
T3.append(Rx3)
T3.append(Tr3)
T4.append(Rx4)
T4.append(Tr4)
L1.append(T1)
L2.append(T2)
L3.append(T3)
L4.append(T4)
C.primitive('horn',L1)
C.primitive('horn',L2)
C.primitive('horn',L3)
C.primitive('horn',L4)
## first ProbeBox
#C.add(ProbeBox(name='port_ut1', Type='wv', Weight='1'),
# a=Attributes([(0*cos(0.15708*(x--10))*sin(0*(y--15))),
# (-0.05*sin(0.15708*(x--10))*cos(0*(y--15))),0]),
# p=Box(P1=[-10,-15,-25],P2=[10,15,-25],Pr=0)
#
## second ProbeBox
#
#C.add(ProbeBox(name='port_it1', Type='wc', Weight='1'), a=Attributes([(0.05*sin(0.15708*(x--10))*cos(0*(y--15))),0*cos(0.15708*(x--10))*sin(0*(y--15))),0]), p=Box(P1=[-10,-15,-25],P2=[10,15,-25],Pr=0)
#
#
##
A = (a + 2*np.sin(HA.angle[0])*HA.length)*unit * (b + 2*np.sin(HA.angle[1])*HA.length)*unit;
##
## apply the excitation
start=[-a/2, -b/2 ,meshz[7] ];
stop =[ a/2, b/2 ,meshz[0]+HA.feed_length/2. ];
C.add(Excitation('port_excite_1',typ="Es",excite="1,1,0"))
# AddRectWaveGuidePort( CSX, 0, 1, start, stop, 2, a*unit, b*unit, TE_mode, 1);
##
##%% nf2ff calc
##start = [mesh.x(9) mesh.y(9) mesh.z(9)];
##stop = [mesh.x(end-8) mesh.y(end-8) mesh.z(end-8)];
##[CSX nf2ff] = CreateNF2FFBox(CSX, 'nf2ff', start, stop, 'Directions', [1 1 1 1 0 1]);
##
##%% prepare simulation folder
##Sim_Path = 'tmp_Horn_Antenna';
##Sim_CSX = 'horn_ant.xml';
##
##[status, message, messageid] = rmdir( Sim_Path, 's' ); % clear previous directory
##[status, message, messageid] = mkdir( Sim_Path ); % create empty simulation folder
##
##%% write openEMS compatible xml-file
##WriteOpenEMS([Sim_Path '/' Sim_CSX], FDTD, CSX);
##
##%% show the structure
##CSXGeomPlot([Sim_Path '/' Sim_CSX]);
##
##%% run openEMS
##RunOpenEMS(Sim_Path, Sim_CSX);
##
##%% postprocessing & do the plots
##freq = linspace(f_start,f_stop,201);
##
##port = calcPort(port, Sim_Path, freq);
##
##Zin = port.uf.tot ./ port.if.tot;
##s11 = port.uf.ref ./ port.uf.inc;
##
##plot( freq/1e9, 20*log10(abs(s11)), 'k-', 'Linewidth', 2 );
##ylim([-60 0]);
##grid on
##title( 'reflection coefficient S_{11}' );
##xlabel( 'frequency f / GHz' );
##ylabel( 'reflection coefficient |S_{11}|' );
##
##drawnow
##
##%% NFFF contour plots %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
##
##% calculate the far field at phi=0 degrees and at phi=90 degrees
##thetaRange = (0:2:359) - 180;
##disp( 'calculating far field at phi=[0 90] deg...' );
##nf2ff = CalcNF2FF(nf2ff, Sim_Path, f0, thetaRange*pi/180, [0 90]*pi/180);
##
##Dlog=10*log10(nf2ff.Dmax);
##G_a = 4*pi*A/(c0/f0)^2;
##e_a = nf2ff.Dmax/G_a;
##
##% display some antenna parameter
##disp( ['radiated power: Prad = ' num2str(nf2ff.Prad) ' Watt']);
##disp( ['directivity: Dmax = ' num2str(Dlog) ' dBi'] );
##disp( ['aperture efficiency: e_a = ' num2str(e_a*100) '%'] );
##
##%%
##% normalized directivity
##figure
##plotFFdB(nf2ff,'xaxis','theta','param',[1 2]);
##drawnow
##% D_log = 20*log10(nf2ff.E_norm{1}/max(max(nf2ff.E_norm{1})));
##% D_log = D_log + 10*log10(nf2ff.Dmax);
##% plot( nf2ff.theta, D_log(:,1) ,'k-', nf2ff.theta, D_log(:,2) ,'r-' );
##
##% polar plot
##figure
##polarFF(nf2ff,'xaxis','theta','param',[1 2],'logscale',[-40 20], 'xtics', 12);
##drawnow
##% polar( nf2ff.theta, nf2ff.E_norm{1}(:,1) )
##
##%% calculate 3D pattern
##phiRange = sort( unique( [-180:5:-100 -100:2.5:-50 -50:1:50 50:2.5:100 100:5:180] ) );
##thetaRange = sort( unique([ 0:1:50 50:2.:100 100:5:180 ]));
##
##disp( 'calculating 3D far field...' );
##nf2ff = CalcNF2FF(nf2ff, Sim_Path, f0, thetaRange*pi/180, phiRange*pi/180, 'Verbose',2,'Outfile','nf2ff_3D.h5');
##
##figure
##plotFF3D(nf2ff);
##
##%%
##E_far_normalized = nf2ff.E_norm{1}/max(nf2ff.E_norm{1}(:));
##DumpFF2VTK([Sim_Path '/Horn_Pattern.vtk'],E_far_normalized,thetaRange,phiRange,'scale',1e-3);
S = OpenEMS(F,C)
#
S.save(filename='HornAntenna.xml')
|
apps/user/views.py | crazypenguin/devops | 300 | 12601358 | from django.shortcuts import render, redirect, get_object_or_404
from django.urls import reverse
from .models import User, LoginLog, Group, Permission
from server.models import RemoteUserBindHost
from .forms import LoginForm
from util.tool import login_required, hash_code, event_log
import django.utils.timezone as timezone
from django.db.models import Q
from django.conf import settings
from ratelimit.decorators import ratelimit # 限速
from ratelimit import ALL
from util.rate import rate, key
from util.permission import init_permission
from django.http import Http404
from collections import OrderedDict
import time
import json
import traceback
# Create your views here.
@ratelimit(key=key, rate=rate, method=ALL, block=True)
def login(request):
if request.session.get('islogin', None): # 不允许重复登录
return redirect(reverse('server:index'))
if request.method == "POST":
login_form = LoginForm(request.POST)
error_message = '请检查填写的内容!'
if login_form.is_valid():
username = login_form.cleaned_data.get('username')
password = <PASSWORD>('password')
try:
user = User.objects.get(username=username)
if not user.enabled:
error_message = '用户已禁用!'
event_log(user, 3, '用户 [{}] 已禁用'.format(username), request.META.get('REMOTE_ADDR', None), request.META.get('HTTP_USER_AGENT', None))
return render(request, 'user/login.html', locals())
except Exception:
error_message = '用户不存在!'
event_log(None, 3, '用户 [{}] 不存在'.format(username), request.META.get('REMOTE_ADDR', None), request.META.get('HTTP_USER_AGENT', None))
return render(request, 'user/login.html', locals())
# if user.password == password:
if user.password == <PASSWORD>_code(password):
data = {'last_login_time': timezone.now()}
User.objects.filter(username=username).update(**data)
request.session.set_expiry(0)
request.session['issuperuser'] = False
if user.role == 1: # 超级管理员
request.session['issuperuser'] = True
request.session['islogin'] = True
request.session['userid'] = user.id
request.session['username'] = user.username
request.session['nickname'] = user.nickname
request.session['locked'] = False # 锁定屏幕
now = int(time.time())
request.session['logintime'] = now
request.session['lasttime'] = now
if user.username == 'admin' and user.role == 1: # admin 为系统特殊超级管理员,拥有所有权限
permission_dict, menu_list = init_permission(user.username, is_super=True)
else:
permission_dict, menu_list = init_permission(user.username) # 初始化权限和菜单
request.session[settings.INIT_PERMISSION] = permission_dict
request.session[settings.INIT_MENU] = menu_list
event_log(user, 1, '用户 [{}] 登陆成功'.format(username), request.META.get('REMOTE_ADDR', None), request.META.get('HTTP_USER_AGENT', None))
return redirect(reverse('server:index'))
else:
error_message = '密码错误!'
event_log(user, 3, '用户 [{}] 密码错误'.format(username), request.META.get('REMOTE_ADDR', None), request.META.get('HTTP_USER_AGENT', None))
return render(request, 'user/login.html', locals())
else:
event_log(None, 3, '登陆表单验证错误', request.META.get('REMOTE_ADDR', None), request.META.get('HTTP_USER_AGENT', None))
return render(request, 'user/login.html', locals())
return render(request, 'user/login.html')
@ratelimit(key=key, rate=rate, method=ALL, block=True)
def logout(request):
if not request.session.get('islogin', None):
return redirect(reverse('user:login'))
user = User.objects.get(id=int(request.session.get('userid')))
# request.session.flush() # 清除所有后包括django-admin登陆状态也会被清除
# 或者使用下面的方法
try:
del request.session['issuperuser']
del request.session['islogin']
del request.session['userid']
del request.session['username']
del request.session['nickname']
del request.session['locked']
del request.session['logintime']
del request.session['lasttime']
del request.session['referer_url']
del request.session[settings.INIT_PERMISSION]
del request.session[settings.INIT_MENU]
except Exception:
pass
event_log(user, 2, '用户 [{}] 退出'.format(user.username), request.META.get('REMOTE_ADDR', None), request.META.get('HTTP_USER_AGENT', None))
return redirect(reverse('user:login'))
@ratelimit(key=key, rate=rate, method=ALL, block=True)
@login_required
def lockscreen(request):
if request.method == 'GET':
try:
request.session['locked'] = True # 锁定屏幕
if 'referer_url' not in request.session:
referer_url = request.META.get('HTTP_REFERER', reverse('server:index'))
request.session['referer_url'] = referer_url
except Exception:
pass
return render(request, 'user/lockscreen.html')
elif request.method == 'POST':
try:
password = request.POST.get('password', None)
if password:
user = User.objects.get(pk=request.session['userid'])
if user.password == <PASSWORD>_code(password):
request.session['locked'] = False
return_url = request.session.get('referer_url', reverse('server:index'))
try:
del request.session['referer_url']
except Exception:
pass
return redirect(return_url)
else:
return render(request, 'user/lockscreen.html', {'error_message': '请输入正确的密码'})
else:
return render(request, 'user/lockscreen.html', {'error_message': '请输入密码'})
except Exception:
pass
return redirect(reverse('user:lockscreen'))
@ratelimit(key=key, rate=rate, method=ALL, block=True)
@login_required
def users(request):
users = User.objects.exclude(pk=request.session['userid']).exclude(username='admin') # exclude 排除当前登陆用户
return render(request, 'user/users.html', locals())
@ratelimit(key=key, rate=rate, method=ALL, block=True)
@login_required
def groups(request):
groups = Group.objects.all()
return render(request, 'user/groups.html', locals())
@ratelimit(key=key, rate=rate, method=ALL, block=True)
@login_required
def logs(request):
logs = LoginLog.objects.all()
return render(request, 'user/logs.html', locals())
@ratelimit(key=key, rate=rate, method=ALL, block=True)
@login_required
def profile(request):
user = get_object_or_404(User, pk=request.session.get('userid'))
clissh = json.loads(user.setting)['clissh']
ssh_app = None
for i in clissh:
if i['enable']:
ssh_app = i
break
clisftp = json.loads(user.setting)['clisftp']
sftp_app = None
for i in clisftp:
if i['enable']:
sftp_app = i
break
return render(request, 'user/profile.html', locals())
@ratelimit(key=key, rate=rate, method=ALL, block=True)
@login_required
def profile_edit(request):
user = get_object_or_404(User, pk=request.session.get('userid'))
clissh = json.loads(user.setting)['clissh']
clisftp = json.loads(user.setting)['clisftp']
sex_choices = (
('male', "男"),
('female', "女"),
)
return render(request, 'user/profile_edit.html', locals())
@ratelimit(key=key, rate=rate, method=ALL, block=True)
@login_required
def user(request, user_id):
user = get_object_or_404(User, pk=user_id)
if user.id == request.session['userid'] or (user.username == 'admin' and user.role == 1):
raise Http404('Not found')
return render(request, 'user/user.html', locals())
@ratelimit(key=key, rate=rate, method=ALL, block=True)
@login_required
def user_edit(request, user_id):
user = get_object_or_404(User, pk=user_id)
if user.id == request.session['userid'] or (user.username == 'admin' and user.role == 1):
raise Http404('Not found')
other_groups = Group.objects.filter( # 查询当前用户不属于的组
~Q(user__id=user_id),
)
if request.session['issuperuser'] and request.session['username'] == 'admin':
other_hosts = RemoteUserBindHost.objects.filter(
~Q(user__id=user_id),
)
else:
other_hosts = RemoteUserBindHost.objects.filter(
~Q(user__id=user_id),
Q(user__username=request.session['username']) | Q(group__user__username=request.session['username']),
).distinct()
sex_choices = (
('male', "男"),
('female', "女"),
)
role_choices = (
(2, '普通用户'),
(1, '超级管理员'),
)
include_permission_ids = [ x.id for x in user.permission.all() ]
# 特殊管理员 admin 可以分配所有权限,其他用户只能分配自己的权限
if request.session['issuperuser'] and request.session['username'] == 'admin':
all_permissions = Permission.objects.all()
else:
all_permissions = Permission.objects.filter(
Q(user__username=request.session['username']) | Q(group__user__username=request.session['username']),
).distinct()
# 转换为前端 ztree 数据类型
permissions = OrderedDict()
for permission in all_permissions:
if not permission.menu:
permissions[permission.title] = {
'name': permission.title,
'value': permission.id,
'checked': True if permission.id in include_permission_ids else False
}
else:
if permission.menu in permissions:
permissions[permission.menu]['children'].append({
'name': permission.title,
'value': permission.id,
'checked': True if permission.id in include_permission_ids else False
})
else:
permissions[permission.menu] = {
'name': permission.menu,
'children': [
{
'name': permission.title,
'value': permission.id,
'checked': True if permission.id in include_permission_ids else False
}
]
}
for x in permissions[permission.menu]['children']:
if x['checked']:
permissions[permission.menu]['open'] = True
break
else:
permissions[permission.menu]['open'] = False
ztree_permissions = [ permissions[x] for x in permissions ]
ztree_permissions = json.dumps(ztree_permissions, ensure_ascii=True)
return render(request, 'user/user_edit.html', locals())
@ratelimit(key=key, rate=rate, method=ALL, block=True)
@login_required
def user_add(request):
all_groups = Group.objects.all()
if request.session['issuperuser'] and request.session['username'] == 'admin':
all_hosts = RemoteUserBindHost.objects.all()
else:
all_hosts = RemoteUserBindHost.objects.filter(
Q(user__username=request.session['username']) | Q(group__user__username=request.session['username']),
).distinct()
sex_choices = (
('male', "男"),
('female', "女"),
)
role_choices = (
(2, '普通用户'),
(1, '超级管理员'),
)
# 特殊管理员 admin 可以分配所有权限,其他用户只能分配自己的权限
if request.session['issuperuser'] and request.session['username'] == 'admin':
all_permissions = Permission.objects.all()
else:
all_permissions = Permission.objects.filter(
Q(user__username=request.session['username']) | Q(group__user__username=request.session['username']),
).distinct()
# 转换为前端 ztree 数据类型
permissions = OrderedDict()
for permission in all_permissions:
if not permission.menu:
permissions[permission.title] = {
'name': permission.title,
'value': permission.id
}
else:
if permission.menu in permissions:
permissions[permission.menu]['children'].append({
'name': permission.title,
'value': permission.id
})
else:
permissions[permission.menu] = {
'name': permission.menu,
'open': False,
'children': [
{'name': permission.title, 'value': permission.id}
]
}
ztree_permissions = [ permissions[x] for x in permissions ]
ztree_permissions = json.dumps(ztree_permissions, ensure_ascii=True)
return render(request, 'user/user_add.html', locals())
@ratelimit(key=key, rate=rate, method=ALL, block=True)
@login_required
def group(request, group_id):
group = get_object_or_404(Group, pk=group_id)
return render(request, 'user/group.html', locals())
@ratelimit(key=key, rate=rate, method=ALL, block=True)
@login_required
def group_edit(request, group_id):
group = get_object_or_404(Group, pk=group_id)
other_users = User.objects.filter( # 查询当前组不包含的用户
~Q(groups__id=group_id),
~Q(id=request.session['userid']),
~Q(username='admin'),
)
if request.session['issuperuser'] and request.session['username'] == 'admin':
other_hosts = RemoteUserBindHost.objects.filter(
~Q(group__id=group_id)
)
else:
other_hosts = RemoteUserBindHost.objects.filter(
~Q(group__id=group_id),
Q(user__username=request.session['username']) | Q(group__user__username=request.session['username'])
).distinct()
# 特殊管理员 admin 可以分配所有权限,其他用户只能分配自己的权限
if request.session['issuperuser'] and request.session['username'] == 'admin':
all_permissions = Permission.objects.all()
else:
all_permissions = Permission.objects.filter(
Q(user__username=request.session['username']) | Q(group__user__username=request.session['username']),
).distinct()
include_permission_ids = [x.id for x in group.permission.all()]
# 转换为前端 ztree 数据类型
permissions = OrderedDict()
for permission in all_permissions:
if not permission.menu:
permissions[permission.title] = {
'name': permission.title,
'value': permission.id,
'checked': True if permission.id in include_permission_ids else False
}
else:
if permission.menu in permissions:
permissions[permission.menu]['children'].append({
'name': permission.title,
'value': permission.id,
'checked': True if permission.id in include_permission_ids else False
})
else:
permissions[permission.menu] = {
'name': permission.menu,
'children': [
{
'name': permission.title,
'value': permission.id,
'checked': True if permission.id in include_permission_ids else False
}
]
}
for x in permissions[permission.menu]['children']:
if x['checked']:
permissions[permission.menu]['open'] = True
break
else:
permissions[permission.menu]['open'] = False
ztree_permissions = [ permissions[x] for x in permissions ]
ztree_permissions = json.dumps(ztree_permissions, ensure_ascii=True)
return render(request, 'user/group_edit.html', locals())
@ratelimit(key=key, rate=rate, method=ALL, block=True)
@login_required
def group_add(request):
all_users = User.objects.exclude(pk=request.session['userid']).exclude(username='admin')
if request.session['issuperuser'] and request.session['username'] == 'admin':
all_hosts = RemoteUserBindHost.objects.all()
else:
all_hosts = RemoteUserBindHost.objects.filter(
Q(user__username=request.session['username']) | Q(group__user__username=request.session['username'])
).distinct()
# 特殊管理员 admin 可以分配所有权限,其他用户只能分配自己的权限
if request.session['issuperuser'] and request.session['username'] == 'admin':
all_permissions = Permission.objects.all()
else:
all_permissions = Permission.objects.filter(
Q(user__username=request.session['username']) | Q(group__user__username=request.session['username']),
).distinct()
# 转换为前端 ztree 数据类型
permissions = OrderedDict()
for permission in all_permissions:
if not permission.menu:
permissions[permission.title] = {
'name': permission.title,
'value': permission.id
}
else:
if permission.menu in permissions:
permissions[permission.menu]['children'].append({
'name': permission.title,
'value': permission.id
})
else:
permissions[permission.menu] = {
'name': permission.menu,
'open': False,
'children': [
{'name': permission.title, 'value': permission.id}
]
}
ztree_permissions = [ permissions[x] for x in permissions ]
ztree_permissions = json.dumps(ztree_permissions, ensure_ascii=True)
return render(request, 'user/group_add.html', locals())
|
neo/VM/VMState.py | BSathvik/neo-python | 387 | 12601376 |
NONE = 0
HALT = 1 << 0
FAULT = 1 << 1
BREAK = 1 << 2
def VMStateStr(_VMState):
if _VMState == NONE:
return "NONE"
state = []
if _VMState & HALT:
state.append("HALT")
if _VMState & FAULT:
state.append("FAULT")
if _VMState & BREAK:
state.append("BREAK")
return ", ".join(state)
|
code/utils.py | bsun0802/Zero-DCE | 106 | 12601464 | <reponame>bsun0802/Zero-DCE
import os
import shutil
import sys
from datetime import datetime
from pathlib import Path
import numpy as np
import cv2
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
def alpha_total_variation(A):
'''
Links: https://remi.flamary.com/demos/proxtv.html
https://kornia.readthedocs.io/en/latest/_modules/kornia/losses/total_variation.html#total_variation
'''
delta_h = A[:, :, 1:, :] - A[:, :, :-1, :]
delta_w = A[:, :, :, 1:] - A[:, :, :, :-1]
# TV used here: L-1 norm, sum R,G,B independently
# Other variation of TV loss can be found by google search
tv = delta_h.abs().mean((2, 3)) + delta_w.abs().mean((2, 3))
loss = torch.mean(tv.sum(1) / (A.shape[1] / 3))
return loss
def exposure_control_loss(enhances, rsize=16, E=0.6):
avg_intensity = F.avg_pool2d(enhances, rsize).mean(1) # to gray: (R+G+B)/3
exp_loss = (avg_intensity - E).abs().mean()
return exp_loss
# Color constancy loss via gray-world assumption. In use.
def color_constency_loss(enhances):
plane_avg = enhances.mean((2, 3))
col_loss = torch.mean((plane_avg[:, 0] - plane_avg[:, 1]) ** 2
+ (plane_avg[:, 1] - plane_avg[:, 2]) ** 2
+ (plane_avg[:, 2] - plane_avg[:, 0]) ** 2)
return col_loss
# Averaged color component ratio preserving loss. Not in use.
def color_constency_loss2(enhances, originals):
enh_cols = enhances.mean((2, 3))
ori_cols = originals.mean((2, 3))
rg_ratio = (enh_cols[:, 0] / enh_cols[:, 1] - ori_cols[:, 0] / ori_cols[:, 1]).abs()
gb_ratio = (enh_cols[:, 1] / enh_cols[:, 2] - ori_cols[:, 1] / ori_cols[:, 2]).abs()
br_ratio = (enh_cols[:, 2] / enh_cols[:, 0] - ori_cols[:, 2] / ori_cols[:, 0]).abs()
col_loss = (rg_ratio + gb_ratio + br_ratio).mean()
return col_loss
# pixel-wise color component ratio preserving loss. Not in use.
def anti_color_shift_loss(enhances, originals):
def solver(c1, c2, d1, d2):
pos = (c1 > 0) & (c2 > 0) & (d1 > 0) & (d2 > 0)
return torch.mean((c1[pos] / c2[pos] - d1[pos] / d2[pos]) ** 2)
enh_avg = F.avg_pool2d(enhances, 4)
ori_avg = F.avg_pool2d(originals, 4)
rg_loss = solver(enh_avg[:, 0, ...], enh_avg[:, 1, ...],
ori_avg[:, 0, ...], ori_avg[:, 1, ...])
gb_loss = solver(enh_avg[:, 1, ...], enh_avg[:, 2, ...],
ori_avg[:, 1, ...], ori_avg[:, 2, ...])
br_loss = solver(enh_avg[:, 2, ...], enh_avg[:, 0, ...],
ori_avg[:, 2, ...], ori_avg[:, 0, ...])
anti_shift_loss = rg_loss + gb_loss + br_loss
if torch.any(torch.isnan(anti_shift_loss)).item():
sys.exit('Color Constancy loss is nan')
return anti_shift_loss
def get_kernels(device):
# weighted RGB to gray
K1 = torch.tensor([0.3, 0.59, 0.1], dtype=torch.float32).view(1, 3, 1, 1).to(device)
# K1 = torch.tensor([1 / 3, 1 / 3, 1 / 3], dtype=torch.float32).view(1, 3, 1, 1).to(device)
# kernel for neighbor diff
K2 = torch.tensor([[[0, -1, 0], [0, 1, 0], [0, 0, 0]],
[[0, 0, 0], [0, 1, 0], [0, -1, 0]],
[[0, 0, 0], [-1, 1, 0], [0, 0, 0]],
[[0, 0, 0], [0, 1, -1], [0, 0, 0]]], dtype=torch.float32)
K2 = K2.unsqueeze(1).to(device)
return K1, K2
def spatial_consistency_loss(enhances, originals, to_gray, neigh_diff, rsize=4):
# convert to gray
enh_gray = F.conv2d(enhances, to_gray)
ori_gray = F.conv2d(originals, to_gray)
# average intensity of local regision
enh_avg = F.avg_pool2d(enh_gray, rsize)
ori_avg = F.avg_pool2d(ori_gray, rsize)
# calculate spatial consistency loss via convolution
enh_pad = F.pad(enh_avg, (1, 1, 1, 1), mode='replicate')
ori_pad = F.pad(ori_avg, (1, 1, 1, 1), mode='replicate')
enh_diff = F.conv2d(enh_pad, neigh_diff)
ori_diff = F.conv2d(ori_pad, neigh_diff)
spa_loss = torch.pow((enh_diff - ori_diff), 2).sum(1).mean()
return spa_loss
# training helper functions
class Logger:
TRAIN_INFO = '[TRAIN] - EPOCH {:d}/{:d}, Iters {:d}/{:d}, {:.1f} s/iter, \
LOSS / LOSS(AVG): {:.4f}/{:.4f}, Loss[spa,exp,col,tvA] / Loss(avg) : {} / {}'.strip()
VAL_INFO = '[Validation] - EPOCH {:d}/{:d} - Validation Avg. LOSS: {:.4f}, in {:.2f} secs '
VAL_INFO += '- ' + datetime.now().strftime('%X') + ' -'
def __init__(self, n):
self.val = np.zeros(n)
self.sum = np.zeros(n)
self.count = 0
self.avg = np.zeros(n)
self.val_losses = []
def update(self, losses):
self.val = np.array(losses) # log the loss of current batch
self.sum += self.val
self.count += 1
self.avg = self.sum / self.count # averaged loss of batches seen so far
def save_ckpt(state, is_best, experiment, epoch, ckpt_dir):
filename = os.path.join(ckpt_dir, f'{experiment}_ckpt.pth')
torch.save(state, filename)
if is_best:
print(f'[BEST MODEL] Saving best model, obtained on epoch = {epoch + 1}')
shutil.copy(filename, os.path.join(ckpt_dir, f'{experiment}_best_model.pth'))
# 4. tensor and numpy
def gamma_correction(img, gamma):
return np.power(img, gamma)
def gamma_like(img, enhanced):
x, y = img.mean(), enhanced.mean()
gamma = np.log(y) / np.log(x)
return gamma_correction(img, gamma)
def to_numpy(t, squeeze=False, to_HWC=True):
x = t.detach().cpu().numpy()
if squeeze:
x = x.squeeze()
if to_HWC:
x = x.transpose((1, 2, 0))
return x
# 5. Visulization
def plot_result(img, enhanced, Astack, n_LE, scaler=None):
Ar, Ag, Ab = Astack[..., 0::3].mean(2), Astack[..., 1::3].mean(2), Astack[..., 2::3].mean(2)
if scaler and Ar.min() < 0:
Ar = scaler(Ar)
if scaler and Ag.min() < 0:
Ag = scaler(Ag)
if scaler and Ab.min() < 0:
Ab = scaler(Ab)
fig, axes = plt.subplots(1, 5, figsize=(12.5, 2.5))
fig.subplots_adjust(wspace=0.1)
axes[0].imshow(img)
rmap = axes[1].imshow(Ar, cmap='jet')
gmap = axes[2].imshow(Ag, cmap='jet')
bmap = axes[3].imshow(Ab, cmap='jet')
fig.colorbar(rmap, ax=axes[1])
fig.colorbar(gmap, ax=axes[2])
fig.colorbar(bmap, ax=axes[3])
axes[4].imshow(enhanced)
titles = ['Original',
r'$\mathcal{A}^{R}' + f'_{n_LE}$', r'$\mathcal{A}^{G}' + f'_{n_LE}$',
r'$\mathcal{A}^{B}' + f'_{n_LE}$', 'Enhanced']
for i in range(5):
axes[i].set_title(titles[i])
axes[i].axis('off')
fig.tight_layout()
return fig
def plot_alpha_hist(Astack):
n = Astack.shape[2] // 3
figsize = (15, 1.5 * 3) if n > 4 else (10, 2 * 3)
fig, axes = plt.subplots(3, n, figsize=figsize)
for r in range(3):
channels = Astack[..., r::3]
for c in range(n):
axes[r][c].hist(channels[..., c].ravel())
for c in range(n):
axes[0][c].set_title(c + 1)
fig.tight_layout()
return fig
def putText(im, *args):
text, pos, font, size, color, scale = args
im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
im = cv2.putText(im, text, pos, font, size, color, scale)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
return im
def row_arrange(inp, fixed, adaptive, algo):
if algo.shape != fixed.shape:
algo = cv2.resize(algo, (fixed.shape[1], fixed.shape[0]))
pos = (25, 50)
font = cv2.FONT_HERSHEY_SIMPLEX
color = (128 / 255., 117 / 255., 0.)
inp = putText(inp, 'Input', pos, font, 2, color, 3)
fixed = putText(fixed, 'Gamma(fixed=0.4)', pos, font, 2, color, 3)
adaptive = putText(adaptive, 'Gamma(adaptive)', pos, font, 2, color, 3)
algo = putText(algo, 'ZeroDCE', pos, font, 2, color, 3)
return cv2.hconcat([inp, fixed, adaptive, algo])
def make_grid(dataset, vsep=8):
n = len(dataset)
img = to_numpy(dataset[0]['img'])
h, w, _ = img.shape
grid = np.ones((n * h + vsep * (n - 1), 4 * w, 3), dtype=np.float32)
return grid, vsep
# system path
def create_dir(path):
'create directory if not exist'
if isinstance(path, str):
path = Path(path).expanduser().resolve()
if path.exists():
if path.is_dir():
print('Output dir already exists.')
else:
sys.exit('[ERROR] You specified a file, not a folder. Please revise --outputDir')
else:
path.mkdir(parents=True)
return path
# DEPRECATED
# def unnormalize(x):
# 'revert [-1,1] to [0, 1]'
# return x / 2 + 0.5
# def standardize(x):
# 'standardize a tensor/array to [0, 1]'
# mi, mx = x.min(), x.max()
# return (x - mi) / (mx - mi + 1e-10)
|
flybirds/core/plugin/plugins/default/ui_driver/poco/poco_position.py | LinuxSuRen/flybirds | 183 | 12601470 | <filename>flybirds/core/plugin/plugins/default/ui_driver/poco/poco_position.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Element position api
"""
import time
from poco.exceptions import PocoNoSuchNodeException
import flybirds.core.plugin.plugins.default.ui_driver.poco.poco_ele as poco_ele
from flybirds.core.plugin.plugins.default.ui_driver.poco import poco_manage
import flybirds.utils.snippet as snippet
from flybirds.core.exceptions import FlybirdPositionChanging
def position_change(poco, select_str, optional, o_position):
"""
determine whether the position of the element has changed within the
specified time.
"""
result = False
timeout = optional["timeout"]
current_wait_second = 1
while (not result) and (timeout > 0):
try:
poco_target = poco_manage.create_poco_object_by_dsl(
poco, select_str, optional
)
if poco_target.exists():
t_position = poco_target.get_position()
if not snippet.list_comparator(o_position, t_position):
result = True
if result:
break
except Exception:
pass
time.sleep(current_wait_second)
timeout -= current_wait_second
current_wait_second += 1
return result
def position_not_change(poco, select_str, optional, dur_time, verify_count):
"""
determine the position of the element has not changed
"""
poco_ele.wait_exists(poco, select_str, optional)
prev_position = None
log_time = dur_time * verify_count
result = False
while verify_count > 0 and (not result):
verify_count -= 1
try:
if prev_position is None:
prev_position = poco_manage.create_poco_object_by_dsl(
poco, select_str, optional
).get_position()
except Exception:
time.sleep(dur_time)
continue
time.sleep(dur_time)
try:
poco_target = poco_manage.create_poco_object_by_dsl(
poco, select_str, optional
)
cur_position = poco_target.get_position()
if snippet.list_comparator(prev_position, cur_position):
result = True
else:
prev_position = cur_position
if result:
break
except PocoNoSuchNodeException:
result = True
except Exception:
prev_position = None
if not result:
message = "during {}s time, {} position is changing".format(
log_time, select_str
)
raise FlybirdPositionChanging(message)
|
plugins/dbnd-test-scenarios/src/dbnd_test_scenarios/test_common/complex_package_structure/complex_package/__init__.py | busunkim96/dbnd | 224 | 12601474 | from .complex_structure_pipeline import complex_structure_pipeline
|
.venv/lib/python3.8/site-packages/rules/contrib/models.py | taharh/label-studio | 1,356 | 12601548 | from django.core.exceptions import ImproperlyConfigured
from django.db.models import Model
from django.db.models.base import ModelBase
from ..permissions import add_perm
class RulesModelBaseMixin:
"""
Mixin for the metaclass of Django's Model that allows declaring object-level
permissions in the model's Meta options.
If set, the Meta attribute "rules_permissions" has to be a dictionary with
permission types (like "add" or "change") as keys and predicates (like
rules.is_staff) as values. Permissions are then registered with the rules
framework automatically upon Model creation.
This mixin can be used for creating custom metaclasses.
"""
def __new__(cls, name, bases, attrs, **kwargs):
model_meta = attrs.get("Meta")
if hasattr(model_meta, "rules_permissions"):
perms = model_meta.rules_permissions
del model_meta.rules_permissions
if not isinstance(perms, dict):
raise ImproperlyConfigured(
"The rules_permissions Meta option of %s must be a dict, not %s."
% (name, type(perms))
)
perms = perms.copy()
else:
perms = {}
new_class = super().__new__(cls, name, bases, attrs, **kwargs)
new_class._meta.rules_permissions = perms
new_class.preprocess_rules_permissions(perms)
for perm_type, predicate in perms.items():
add_perm(new_class.get_perm(perm_type), predicate)
return new_class
class RulesModelBase(RulesModelBaseMixin, ModelBase):
"""
A subclass of Django's ModelBase with the RulesModelBaseMixin mixed in.
"""
class RulesModelMixin:
"""
A mixin for Django's Model that adds hooks for stepping into the process of
permission registration, which are called by the metaclass implementation in
RulesModelBaseMixin.
Use this mixin in a custom subclass of Model in order to change its behavior.
"""
@classmethod
def get_perm(cls, perm_type):
"""Converts permission type ("add") to permission name ("app.add_modelname")
:param perm_type: "add", "change", etc., or custom value
:type perm_type: str
:returns str:
"""
return "%s.%s_%s" % (cls._meta.app_label, perm_type, cls._meta.model_name)
@classmethod
def preprocess_rules_permissions(cls, perms):
"""May alter a permissions dict before it's processed further.
Use this, for instance, to alter the supplied permissions or insert default
values into the given dict.
:param perms:
Shallow-copied value of the rules_permissions model Meta option
:type perms: dict
"""
class RulesModel(RulesModelMixin, Model, metaclass=RulesModelBase):
"""
An abstract model with RulesModelMixin mixed in, using RulesModelBase as metaclass.
Use this as base for your models directly if you don't need to customize the
behavior of RulesModelMixin and thus don't want to create a custom base class.
"""
class Meta:
abstract = True
|
datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/tests/conftest.py | mchelen-gov/integrations-core | 663 | 12601558 | <gh_stars>100-1000
{license_header}
import pytest
@pytest.fixture(scope='session')
def dd_environment():
yield {{}}, {{'use_jmx': True}}
|
pegasus/ops/text_encoder_utils_test.py | akhilbobby/pegasus | 1,270 | 12601566 | # Copyright 2020 The PEGASUS Authors..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for pegasus.ops.text_encoder_utils."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from pegasus.ops.python import text_encoder_utils
import tensorflow as tf
_SUBWORD_VOCAB = "pegasus/ops/testdata/subwords"
_SPM_VOCAB = "pegasus/ops/testdata/sp_test.model"
class TextEncoderUtilsTest(parameterized.TestCase, tf.test.TestCase):
def test_sentencepiece(self):
e = text_encoder_utils.create_text_encoder("sentencepiece", _SPM_VOCAB)
in_text = "the quick brown fox jumps over the lazy dog"
self.assertEqual(in_text, e.decode(e.encode(in_text)))
def test_sentencepiece_offset(self):
e = text_encoder_utils.create_text_encoder("sentencepiece_newline",
_SPM_VOCAB)
in_text = "the quick brown fox jumps over the lazy dog"
ids = [25] + e.encode(in_text)
self.assertEqual(in_text, e.decode(ids))
def test_subword_decode(self):
encoder = text_encoder_utils.create_text_encoder("subword", _SUBWORD_VOCAB)
self.assertEqual(encoder.decode([9, 10, 11, 12, 1, 0]), "quick brown fox")
def test_subword_decode_numpy_int32(self):
encoder = text_encoder_utils.create_text_encoder("subword", _SUBWORD_VOCAB)
ids = np.array([9, 10, 11, 12, 1, 0], dtype=np.int32)
# Without tolist(), the test will not pass for any other np array types
# other than int64.
self.assertEqual(encoder.decode(ids.tolist()), "quick brown fox")
def test_subword_decode_numpy_int64(self):
encoder = text_encoder_utils.create_text_encoder("subword", _SUBWORD_VOCAB)
ids = np.array([9, 10, 11, 12, 1, 0], dtype=np.int64)
# Without tolist(), the test will not pass for python3
self.assertEqual(encoder.decode(ids.tolist()), "quick brown fox")
if __name__ == "__main__":
absltest.main()
|
py_feature/316_replacement.py | weiziyoung/instacart | 290 | 12601585 | <filename>py_feature/316_replacement.py<gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 5 22:36:10 2017
@author: konodera
nohup python -u 316_replacement.py &
"""
import pandas as pd
import gc
import numpy as np
from tqdm import tqdm
from collections import defaultdict
from itertools import product
import utils
utils.start(__file__)
#==============================================================================
# load
#==============================================================================
usecols = ['user_id', 'order_number', 'product_id', 'product_name', 'order_id', 'order_number_rev']
log = utils.read_pickles('../input/mk/log', usecols).sort_values(usecols[:3])
order_pids = log.groupby('order_id').product_id.apply(set).reset_index()
#item = pd.read_pickle('../input/mk/replacement2.p').head(999)
item = pd.read_pickle('../input/mk/replacement.p')
item = item[item.back>9]
# parse
item_di = defaultdict(int)
for pid1,pid2,ratio in item[['pid1', 'pid2', 'ratio']].values:
item_di['{} {}'.format(int(pid1),int(pid2))] = ratio
#==============================================================================
# def
#==============================================================================
def make(T):
"""
T = 0
folder = 'trainT-0'
"""
if T==-1:
folder = 'test'
else:
folder = 'trainT-'+str(T)
X_base = pd.read_pickle('../feature/X_base_t3.p')
label = pd.read_pickle('../feature/{}/label_reordered.p'.format(folder))
# 'inner' for removing t-n_order_id == NaN
if 'train' in folder:
df = pd.merge(X_base[X_base.is_train==1], label, on='order_id', how='inner')
elif folder == 'test':
df = pd.merge(X_base[X_base.is_train==0], label, on='order_id', how='inner')
df = pd.merge(df,
order_pids.add_prefix('t-1_'),
on='t-1_order_id', how='left')
df = pd.merge(df,
order_pids.add_prefix('t-2_'),
on='t-2_order_id', how='left')
ratio_min = []
ratio_mean = []
ratio_max = []
ratio_sum = []
ratio_len = []
for t_2,t_1,pid in tqdm(df[['t-2_product_id', 't-1_product_id', 'product_id']].values, miniters=99999):
rep = t_1 - t_2
if pid not in t_1 and pid in t_2 and len(rep)>0:
ratios = [item_di['{} {}'.format(i1,i2)] for i1,i2 in list(product([pid], rep))]
ratio_min.append(np.min(ratios))
ratio_mean.append(np.mean(ratios))
ratio_max.append(np.max(ratios))
ratio_sum.append(np.sum(ratios))
ratio_len.append(len(ratios))
else:
ratio_min.append(-1)
ratio_mean.append(-1)
ratio_max.append(-1)
ratio_sum.append(-1)
ratio_len.append(-1)
df['comeback_ratio_min'] = ratio_min
df['comeback_ratio_mean'] = ratio_mean
df['comeback_ratio_max'] = ratio_max
df['comeback_ratio_sum'] = ratio_sum
df['comeback_ratio_len'] = ratio_len
col = ['order_id', 'product_id', 'comeback_ratio_min', 'comeback_ratio_mean',
'comeback_ratio_max', 'comeback_ratio_sum', 'comeback_ratio_len']
df[col].to_pickle('../feature/{}/f316_order_product.p'.format(folder))
del df
gc.collect()
#==============================================================================
# main
#==============================================================================
make(0)
make(1)
make(2)
make(-1)
#==============================================================================
utils.end(__file__)
|
String_or_Array/Sorting/Bubble_Sort.py | Amanjakhetiya/Data_Structures_Algorithms_In_Python | 195 | 12601586 | # bubble sort function
def bubble_sort(arr):
n = len(arr)
# Repeat loop N times
# equivalent to: for(i = 0; i < n-1; i++)
for i in range(0, n-1):
# Repeat internal loop for (N-i)th largest element
for j in range(0, n-i-1):
# if jth value is greater than (j+1) value
if arr[j] > arr[j+1]:
# swap the values at j and j+1 index
# Pythonic way to swap 2 variable values -> x, y = y, x
arr[j], arr[j+1] = arr[j+1], arr[j]
arr = [64, 34, 25, 12, 22, 11, 90]
print('Before sorting:', arr)
# call bubble sort function on the array
bubble_sort(arr)
print('After sorting:', arr)
"""
Output:
Before sorting: [64, 34, 25, 12, 22, 11, 90]
After sorting: [11, 12, 22, 25, 34, 64, 90]
""" |
demo/mpi-ref-v1/ex-3.03.py | gmdzy2010/mpi4py | 533 | 12601635 | <filename>demo/mpi-ref-v1/ex-3.03.py
execfile('ex-3.02.py')
assert dtype.size == MPI.DOUBLE.size + MPI.CHAR.size
assert dtype.extent >= dtype.size
dtype.Free()
|
test/test-girvan-newman.py | ruth-ann/snap-python | 242 | 12601749 | import sys
import unittest
import snap
class TestCommunityGirvanNeuman(unittest.TestCase):
def test_CommunityGirvanNewman(self):
Rnd = snap.TRnd(42)
Graph = snap.GenPrefAttach(100, 10, Rnd)
exp_val = 0.00963802805072646
Vec = snap.TCnComV()
act_val = snap.CommunityGirvanNewman(Graph, Vec)
self.assertAlmostEqual(exp_val, act_val)
Vec = snap.TCnComV()
act_val = snap.CommunityGirvanNewman(Graph, Vec)
self.assertAlmostEqual(exp_val, act_val)
Vec = snap.TCnComV()
act_val = snap.CommunityGirvanNewman(Graph, Vec)
self.assertAlmostEqual(exp_val, act_val)
if __name__ == '__main__':
unittest.main()
|
descarteslabs/workflows/result_types/unmarshal.py | carderne/descarteslabs-python | 167 | 12601764 | registry = {}
def unmarshal(typestr, x):
try:
unmarshaller = registry[typestr]
except KeyError:
raise TypeError("No unmarshaller registered for '{}'".format(typestr))
return unmarshaller(x)
def register(typestr, unmarshaller):
if typestr in registry:
raise NameError(
"An unmarshaller is already registered for '{}'".format(typestr)
)
registry[typestr] = unmarshaller
def identity(x):
return x
def astype(typ):
"Unmarshal by casting into ``typ``, if not already an instance of ``typ``"
def unmarshaller(x):
return typ(x) if not isinstance(x, typ) else x
return unmarshaller
def unpack_into(typ):
"Unmarshal by unpacking a dict into the constructor for ``typ``"
def unmarshaller(x):
return typ(**x)
return unmarshaller
__all__ = ["unmarshal", "register", "identity", "unpack_into"]
|
cme/protocols/smb/remotefile.py | hantwister/CrackMapExec | 6,044 | 12601778 | <reponame>hantwister/CrackMapExec<gh_stars>1000+
from impacket.smb3structs import FILE_READ_DATA, FILE_WRITE_DATA
class RemoteFile:
def __init__(self, smbConnection, fileName, share='ADMIN$', access = FILE_READ_DATA | FILE_WRITE_DATA ):
self.__smbConnection = smbConnection
self.__share = share
self.__access = access
self.__fileName = fileName
self.__tid = self.__smbConnection.connectTree(share)
self.__fid = None
self.__currentOffset = 0
def open(self):
self.__fid = self.__smbConnection.openFile(self.__tid, self.__fileName, desiredAccess= self.__access)
def seek(self, offset, whence):
# Implement whence, for now it's always from the beginning of the file
if whence == 0:
self.__currentOffset = offset
def read(self, bytesToRead):
if bytesToRead > 0:
data = self.__smbConnection.readFile(self.__tid, self.__fid, self.__currentOffset, bytesToRead)
self.__currentOffset += len(data)
return data
return ''
def close(self):
if self.__fid is not None:
self.__smbConnection.closeFile(self.__tid, self.__fid)
self.__fid = None
def delete(self):
self.__smbConnection.deleteFile(self.__share, self.__fileName)
def tell(self):
return self.__currentOffset
def __str__(self):
return "\\\\{}\\{}\\{}".format(self.__smbConnection.getRemoteHost(), self.__share, self.__fileName) |
base/base_model.py | caisarl76/TADE-AgnosticLT | 175 | 12601783 | import torch.nn as nn
import numpy as np
from abc import abstractmethod
class BaseModel(nn.Module):
"""
Base class for all models
"""
@abstractmethod
def forward(self, *inputs):
"""
Forward pass logic
:return: Model output
"""
raise NotImplementedError
|
tests/test_houston_configmap.py | syamasakigoodrx/astronomer | 314 | 12601797 | import yaml
from tests.helm_template_generator import render_chart
import pytest
import tempfile
from subprocess import check_call
def common_test_cases(docs):
"""Test some things that should apply to all cases."""
assert len(docs) == 1
doc = docs[0]
assert doc["kind"] == "ConfigMap"
assert doc["apiVersion"] == "v1"
assert doc["metadata"]["name"] == "RELEASE-NAME-houston-config"
local_prod = yaml.safe_load(doc["data"]["local-production.yaml"])
assert local_prod == {}
prod = yaml.safe_load(doc["data"]["production.yaml"])
assert prod["deployments"]["helm"]["airflow"]["useAstroSecurityManager"] is True
airflow_local_settings = prod["deployments"]["helm"]["airflow"][
"airflowLocalSettings"
]
with tempfile.NamedTemporaryFile() as f:
f.write(airflow_local_settings.encode())
f.flush()
# validate embedded python. returns if black succeeds, else raises CalledProcessError.
check_call(["black", "-q", f.name])
def test_houston_configmap():
"""Validate the houston configmap and its embedded data."""
docs = render_chart(
show_only=["charts/astronomer/templates/houston/houston-configmap.yaml"],
)
common_test_cases(docs)
doc = docs[0]
prod = yaml.safe_load(doc["data"]["production.yaml"])
# Ensure airflow elasticsearch param is at correct location
assert prod["deployments"]["helm"]["airflow"]["elasticsearch"]["enabled"] is True
with pytest.raises(KeyError):
# Ensure sccEnabled is not defined by default
assert prod["deployments"]["helm"]["sccEnabled"] is False
def test_houston_configmapwith_scc_enabled():
"""Validate the houston configmap and its embedded data with sscEnabled."""
docs = render_chart(
values={"global": {"sccEnabled": True}},
show_only=["charts/astronomer/templates/houston/houston-configmap.yaml"],
)
common_test_cases(docs)
doc = docs[0]
prod = yaml.safe_load(doc["data"]["production.yaml"])
assert prod["deployments"]["helm"]["sccEnabled"] is True
def test_houston_configmap_with_azure_enabled():
"""Validate the houston configmap and its embedded data with azure enabled."""
docs = render_chart(
values={"global": {"azure": {"enabled": True}}},
show_only=["charts/astronomer/templates/houston/houston-configmap.yaml"],
)
common_test_cases(docs)
doc = docs[0]
prod = yaml.safe_load(doc["data"]["production.yaml"])
with pytest.raises(KeyError):
assert prod["deployments"]["helm"]["sccEnabled"] is False
livenessProbe = prod["deployments"]["helm"]["airflow"]["webserver"]["livenessProbe"]
assert livenessProbe["failureThreshold"] == 25
assert livenessProbe["periodSeconds"] == 10
|
tests/ut/scripts/test_start.py | mindspore-ai/mindinsight | 216 | 12601806 | <gh_stars>100-1000
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Function:
Test start script.
Usage:
pytest tests/ut/script/test_start.py
"""
import pytest
from mindinsight.conf import settings
from mindinsight.scripts.start import Command
from mindinsight.utils.exceptions import SettingValueError
class TestStartScript:
"""Test start script."""
@pytest.mark.parametrize('value', [6143, 2147483648, 2.1, True, False, 'str'])
def test_offline_debugger_mem_limit_value(self, value):
"""Test offline debugger mem limit value."""
cmd = Command()
settings.OFFLINE_DEBUGGER_MEM_LIMIT = value
with pytest.raises(SettingValueError) as exc:
cmd.check_offline_debugger_setting()
expected_msg = f"[SettingValueError] code: 5054000D, msg: Offline debugger memory limit " \
f"should be integer ranging from 6144 to 2147483647 MB, but got %s. Please check the " \
f"environment variable MINDINSIGHT_OFFLINE_DEBUGGER_MEM_LIMIT" % value
assert expected_msg == str(exc.value)
settings.OFFLINE_DEBUGGER_MEM_LIMIT = 16 * 1024
@pytest.mark.parametrize('value', [0, 3, 1.1, True, False, 'str'])
def test_max_offline_debugger_session_num_value(self, value):
"""Test offline debugger mem limit type."""
cmd = Command()
settings.MAX_OFFLINE_DEBUGGER_SESSION_NUM = value
with pytest.raises(SettingValueError) as exc:
cmd.check_offline_debugger_setting()
expected_msg = f"[SettingValueError] code: 5054000D, msg: Max offline debugger session number " \
f"should be integer ranging from 1 to 2, but got %s. Please check the environment " \
f"variable MINDINSIGHT_MAX_OFFLINE_DEBUGGER_SESSION_NUM" % value
assert expected_msg == str(exc.value)
settings.MAX_OFFLINE_DEBUGGER_SESSION_NUM = 2
|
tests_obsolete/extension/dataflow_/reduceadd_valid/test_dataflow_reduceadd_valid.py | akmaru/veriloggen | 232 | 12601827 | from __future__ import absolute_import
from __future__ import print_function
import veriloggen
import dataflow_reduceadd_valid
expected_verilog = """
module test
(
);
reg CLK;
reg RST;
reg [32-1:0] xdata;
reg xvalid;
wire xready;
wire [32-1:0] zdata;
wire zvalid;
reg zready;
wire [1-1:0] vdata;
wire vvalid;
reg vready;
main
uut
(
.CLK(CLK),
.RST(RST),
.xdata(xdata),
.xvalid(xvalid),
.xready(xready),
.zdata(zdata),
.zvalid(zvalid),
.zready(zready),
.vdata(vdata),
.vvalid(vvalid),
.vready(vready)
);
reg reset_done;
initial begin
$dumpfile("uut.vcd");
$dumpvars(0, uut);
end
initial begin
CLK = 0;
forever begin
#5 CLK = !CLK;
end
end
initial begin
RST = 0;
reset_done = 0;
xdata = 0;
xvalid = 0;
zready = 0;
#100;
RST = 1;
#100;
RST = 0;
#1000;
reset_done = 1;
@(posedge CLK);
#1;
#10000;
$finish;
end
reg [32-1:0] xfsm;
localparam xfsm_init = 0;
reg [32-1:0] _tmp_0;
localparam xfsm_1 = 1;
localparam xfsm_2 = 2;
localparam xfsm_3 = 3;
localparam xfsm_4 = 4;
localparam xfsm_5 = 5;
localparam xfsm_6 = 6;
localparam xfsm_7 = 7;
localparam xfsm_8 = 8;
localparam xfsm_9 = 9;
localparam xfsm_10 = 10;
localparam xfsm_11 = 11;
localparam xfsm_12 = 12;
localparam xfsm_13 = 13;
localparam xfsm_14 = 14;
localparam xfsm_15 = 15;
localparam xfsm_16 = 16;
localparam xfsm_17 = 17;
localparam xfsm_18 = 18;
localparam xfsm_19 = 19;
localparam xfsm_20 = 20;
localparam xfsm_21 = 21;
localparam xfsm_22 = 22;
localparam xfsm_23 = 23;
localparam xfsm_24 = 24;
always @(posedge CLK) begin
if(RST) begin
xfsm <= xfsm_init;
_tmp_0 <= 0;
end else begin
case(xfsm)
xfsm_init: begin
xvalid <= 0;
if(reset_done) begin
xfsm <= xfsm_1;
end
end
xfsm_1: begin
xfsm <= xfsm_2;
end
xfsm_2: begin
xfsm <= xfsm_3;
end
xfsm_3: begin
xfsm <= xfsm_4;
end
xfsm_4: begin
xfsm <= xfsm_5;
end
xfsm_5: begin
xfsm <= xfsm_6;
end
xfsm_6: begin
xfsm <= xfsm_7;
end
xfsm_7: begin
xfsm <= xfsm_8;
end
xfsm_8: begin
xfsm <= xfsm_9;
end
xfsm_9: begin
xfsm <= xfsm_10;
end
xfsm_10: begin
xfsm <= xfsm_11;
end
xfsm_11: begin
xvalid <= 1;
xfsm <= xfsm_12;
end
xfsm_12: begin
if(xready) begin
xdata <= xdata + 1;
end
if(xready) begin
_tmp_0 <= _tmp_0 + 1;
end
if((_tmp_0 == 5) && xready) begin
xvalid <= 0;
end
if((_tmp_0 == 5) && xready) begin
xfsm <= xfsm_13;
end
end
xfsm_13: begin
xfsm <= xfsm_14;
end
xfsm_14: begin
xfsm <= xfsm_15;
end
xfsm_15: begin
xfsm <= xfsm_16;
end
xfsm_16: begin
xfsm <= xfsm_17;
end
xfsm_17: begin
xfsm <= xfsm_18;
end
xfsm_18: begin
xfsm <= xfsm_19;
end
xfsm_19: begin
xfsm <= xfsm_20;
end
xfsm_20: begin
xfsm <= xfsm_21;
end
xfsm_21: begin
xfsm <= xfsm_22;
end
xfsm_22: begin
xfsm <= xfsm_23;
end
xfsm_23: begin
xvalid <= 1;
if(xready) begin
xdata <= xdata + 1;
end
if(xready) begin
_tmp_0 <= _tmp_0 + 1;
end
if((_tmp_0 == 100) && xready) begin
xvalid <= 0;
end
if((_tmp_0 == 100) && xready) begin
xfsm <= xfsm_24;
end
end
endcase
end
end
reg [32-1:0] zfsm;
localparam zfsm_init = 0;
localparam zfsm_1 = 1;
localparam zfsm_2 = 2;
localparam zfsm_3 = 3;
localparam zfsm_4 = 4;
localparam zfsm_5 = 5;
localparam zfsm_6 = 6;
localparam zfsm_7 = 7;
localparam zfsm_8 = 8;
always @(posedge CLK) begin
if(RST) begin
zfsm <= zfsm_init;
end else begin
case(zfsm)
zfsm_init: begin
zready <= 0;
if(reset_done) begin
zfsm <= zfsm_1;
end
end
zfsm_1: begin
zfsm <= zfsm_2;
end
zfsm_2: begin
if(zvalid && vvalid) begin
zready <= 1;
end
if(zvalid && vvalid) begin
zfsm <= zfsm_3;
end
end
zfsm_3: begin
zready <= 0;
zfsm <= zfsm_4;
end
zfsm_4: begin
zready <= 0;
zfsm <= zfsm_5;
end
zfsm_5: begin
zready <= 0;
zfsm <= zfsm_6;
end
zfsm_6: begin
zready <= 0;
zfsm <= zfsm_7;
end
zfsm_7: begin
zready <= 0;
zfsm <= zfsm_8;
end
zfsm_8: begin
zfsm <= zfsm_2;
end
endcase
end
end
reg [32-1:0] vfsm;
localparam vfsm_init = 0;
localparam vfsm_1 = 1;
localparam vfsm_2 = 2;
localparam vfsm_3 = 3;
localparam vfsm_4 = 4;
localparam vfsm_5 = 5;
localparam vfsm_6 = 6;
localparam vfsm_7 = 7;
localparam vfsm_8 = 8;
always @(posedge CLK) begin
if(RST) begin
vfsm <= vfsm_init;
end else begin
case(vfsm)
vfsm_init: begin
vready <= 0;
if(reset_done) begin
vfsm <= vfsm_1;
end
end
vfsm_1: begin
vfsm <= vfsm_2;
end
vfsm_2: begin
if(zvalid && vvalid) begin
vready <= 1;
end
if(zvalid && vvalid) begin
vfsm <= vfsm_3;
end
end
vfsm_3: begin
vready <= 0;
vfsm <= vfsm_4;
end
vfsm_4: begin
vready <= 0;
vfsm <= vfsm_5;
end
vfsm_5: begin
vready <= 0;
vfsm <= vfsm_6;
end
vfsm_6: begin
vready <= 0;
vfsm <= vfsm_7;
end
vfsm_7: begin
vready <= 0;
vfsm <= vfsm_8;
end
vfsm_8: begin
vfsm <= vfsm_2;
end
endcase
end
end
always @(posedge CLK) begin
if(reset_done) begin
if(xvalid && xready) begin
$display("xdata=%d", xdata);
end
if(zvalid && zready) begin
$display("zdata=%d", zdata);
end
if(vvalid && vready) begin
$display("vdata=%d", vdata);
end
end
end
endmodule
module main
(
input CLK,
input RST,
input [32-1:0] xdata,
input xvalid,
output xready,
output [32-1:0] zdata,
output zvalid,
input zready,
output [1-1:0] vdata,
output vvalid,
input vready
);
wire [32-1:0] _dataflow_times_data_1;
wire _dataflow_times_valid_1;
wire _dataflow_times_ready_1;
wire [64-1:0] _dataflow_times_mul_odata_1;
reg [64-1:0] _dataflow_times_mul_odata_reg_1;
assign _dataflow_times_data_1 = _dataflow_times_mul_odata_reg_1;
wire _dataflow_times_mul_ovalid_1;
reg _dataflow_times_mul_valid_reg_1;
assign _dataflow_times_valid_1 = _dataflow_times_mul_valid_reg_1;
wire _dataflow_times_mul_enable_1;
wire _dataflow_times_mul_update_1;
assign _dataflow_times_mul_enable_1 = (_dataflow_times_ready_1 || !_dataflow_times_valid_1) && (xready && xready) && (xvalid && xvalid);
assign _dataflow_times_mul_update_1 = _dataflow_times_ready_1 || !_dataflow_times_valid_1;
multiplier_0
_dataflow_times_mul_1
(
.CLK(CLK),
.RST(RST),
.update(_dataflow_times_mul_update_1),
.enable(_dataflow_times_mul_enable_1),
.valid(_dataflow_times_mul_ovalid_1),
.a(xdata),
.b(xdata),
.c(_dataflow_times_mul_odata_1)
);
assign xready = (_dataflow_times_ready_1 || !_dataflow_times_valid_1) && (xvalid && xvalid) && ((_dataflow_times_ready_1 || !_dataflow_times_valid_1) && (xvalid && xvalid));
reg [32-1:0] _dataflow_reduceadd_data_4;
reg _dataflow_reduceadd_valid_4;
wire _dataflow_reduceadd_ready_4;
reg [5-1:0] _dataflow_reduceadd_count_4;
reg [1-1:0] _dataflow_pulse_data_7;
reg _dataflow_pulse_valid_7;
wire _dataflow_pulse_ready_7;
reg [5-1:0] _dataflow_pulse_count_7;
assign _dataflow_times_ready_1 = (_dataflow_reduceadd_ready_4 || !_dataflow_reduceadd_valid_4) && _dataflow_times_valid_1 && ((_dataflow_pulse_ready_7 || !_dataflow_pulse_valid_7) && _dataflow_times_valid_1);
assign zdata = _dataflow_reduceadd_data_4;
assign zvalid = _dataflow_reduceadd_valid_4;
assign _dataflow_reduceadd_ready_4 = zready;
assign vdata = _dataflow_pulse_data_7;
assign vvalid = _dataflow_pulse_valid_7;
assign _dataflow_pulse_ready_7 = vready;
always @(posedge CLK) begin
if(RST) begin
_dataflow_times_mul_odata_reg_1 <= 0;
_dataflow_times_mul_valid_reg_1 <= 0;
_dataflow_reduceadd_data_4 <= 1'sd0;
_dataflow_reduceadd_count_4 <= 0;
_dataflow_reduceadd_valid_4 <= 0;
_dataflow_pulse_data_7 <= 1'sd0;
_dataflow_pulse_count_7 <= 0;
_dataflow_pulse_valid_7 <= 0;
end else begin
if(_dataflow_times_ready_1 || !_dataflow_times_valid_1) begin
_dataflow_times_mul_odata_reg_1 <= _dataflow_times_mul_odata_1;
end
if(_dataflow_times_ready_1 || !_dataflow_times_valid_1) begin
_dataflow_times_mul_valid_reg_1 <= _dataflow_times_mul_ovalid_1;
end
if((_dataflow_reduceadd_ready_4 || !_dataflow_reduceadd_valid_4) && _dataflow_times_ready_1 && _dataflow_times_valid_1) begin
_dataflow_reduceadd_data_4 <= _dataflow_reduceadd_data_4 + _dataflow_times_data_1;
end
if((_dataflow_reduceadd_ready_4 || !_dataflow_reduceadd_valid_4) && _dataflow_times_ready_1 && _dataflow_times_valid_1) begin
_dataflow_reduceadd_count_4 <= (_dataflow_reduceadd_count_4 == 4'sd4 - 1)? 0 : _dataflow_reduceadd_count_4 + 1;
end
if(_dataflow_reduceadd_valid_4 && _dataflow_reduceadd_ready_4) begin
_dataflow_reduceadd_valid_4 <= 0;
end
if((_dataflow_reduceadd_ready_4 || !_dataflow_reduceadd_valid_4) && _dataflow_times_ready_1) begin
_dataflow_reduceadd_valid_4 <= _dataflow_times_valid_1;
end
if((_dataflow_reduceadd_ready_4 || !_dataflow_reduceadd_valid_4) && _dataflow_times_ready_1 && _dataflow_times_valid_1 && (_dataflow_reduceadd_count_4 == 0)) begin
_dataflow_reduceadd_data_4 <= 1'sd0 + _dataflow_times_data_1;
end
if((_dataflow_pulse_ready_7 || !_dataflow_pulse_valid_7) && _dataflow_times_ready_1 && _dataflow_times_valid_1) begin
_dataflow_pulse_data_7 <= _dataflow_pulse_count_7 == 4'sd4 - 1;
end
if((_dataflow_pulse_ready_7 || !_dataflow_pulse_valid_7) && _dataflow_times_ready_1 && _dataflow_times_valid_1) begin
_dataflow_pulse_count_7 <= (_dataflow_pulse_count_7 == 4'sd4 - 1)? 0 : _dataflow_pulse_count_7 + 1;
end
if(_dataflow_pulse_valid_7 && _dataflow_pulse_ready_7) begin
_dataflow_pulse_valid_7 <= 0;
end
if((_dataflow_pulse_ready_7 || !_dataflow_pulse_valid_7) && _dataflow_times_ready_1) begin
_dataflow_pulse_valid_7 <= _dataflow_times_valid_1;
end
if((_dataflow_pulse_ready_7 || !_dataflow_pulse_valid_7) && _dataflow_times_ready_1 && _dataflow_times_valid_1 && (_dataflow_pulse_count_7 == 0)) begin
_dataflow_pulse_data_7 <= _dataflow_pulse_count_7 == 4'sd4 - 1;
end
end
end
endmodule
module multiplier_0
(
input CLK,
input RST,
input update,
input enable,
output valid,
input [32-1:0] a,
input [32-1:0] b,
output [64-1:0] c
);
reg valid_reg0;
reg valid_reg1;
reg valid_reg2;
reg valid_reg3;
reg valid_reg4;
reg valid_reg5;
assign valid = valid_reg5;
always @(posedge CLK) begin
if(RST) begin
valid_reg0 <= 0;
valid_reg1 <= 0;
valid_reg2 <= 0;
valid_reg3 <= 0;
valid_reg4 <= 0;
valid_reg5 <= 0;
end else begin
if(update) begin
valid_reg0 <= enable;
valid_reg1 <= valid_reg0;
valid_reg2 <= valid_reg1;
valid_reg3 <= valid_reg2;
valid_reg4 <= valid_reg3;
valid_reg5 <= valid_reg4;
end
end
end
multiplier_core_0
mult
(
.CLK(CLK),
.update(update),
.a(a),
.b(b),
.c(c)
);
endmodule
module multiplier_core_0
(
input CLK,
input update,
input [32-1:0] a,
input [32-1:0] b,
output [64-1:0] c
);
reg [32-1:0] _a;
reg [32-1:0] _b;
wire signed [64-1:0] _mul;
reg signed [64-1:0] _pipe_mul0;
reg signed [64-1:0] _pipe_mul1;
reg signed [64-1:0] _pipe_mul2;
reg signed [64-1:0] _pipe_mul3;
reg signed [64-1:0] _pipe_mul4;
assign _mul = $signed({ 1'd0, _a }) * $signed({ 1'd0, _b });
assign c = _pipe_mul4;
always @(posedge CLK) begin
if(update) begin
_a <= a;
_b <= b;
_pipe_mul0 <= _mul;
_pipe_mul1 <= _pipe_mul0;
_pipe_mul2 <= _pipe_mul1;
_pipe_mul3 <= _pipe_mul2;
_pipe_mul4 <= _pipe_mul3;
end
end
endmodule
"""
def test():
veriloggen.reset()
test_module = dataflow_reduceadd_valid.mkTest()
code = test_module.to_verilog()
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code)
|
Python/Tests/TestData/TestExecutor/test_stack_trace.py | techkey/PTVS | 404 | 12601852 | import unittest
class StackTraceTests(unittest.TestCase):
def test_bad_import(self):
obj = Utility()
obj.instance_method_a()
def test_not_equal(self):
self.assertEqual(1, 2)
def global_func():
def local_func():
import not_a_module # trigger exception
local_func()
class Utility(object):
@staticmethod
def class_static():
global_func()
def instance_method_b(self):
Utility.class_static()
def instance_method_a(self):
self.instance_method_b()
if __name__ == '__main__':
unittest.main()
|
Python/136.SingleNumber.py | nizD/LeetCode-Solutions | 263 | 12601955 | <reponame>nizD/LeetCode-Solutions<filename>Python/136.SingleNumber.py
#in this problem we used the Counter object which when we pass a list to it, it returns a dictionnary that has the lists's elemnts as keys and their
# occurences as values
from collections import Counter
class Solution(object):
def singleNumber(self, nums):
c=Counter(nums) # in this line we defind c as the dictionnary that has each element of the list as keys and their number of occurences as values
sorted_items=sorted(c.items(), key = lambda x:x[1]) # in this line we sorted that dictionnary based on its values , first item will be the Single Number
return sorted_items[0][0] #since the sorted() function returns a list of key-value pair of sorted dict, our SingleNumber is in the first pair
|
WebMirror/management/rss_parser_funcs/feed_parse_extractBinggoCorp.py | fake-name/ReadableWebProxy | 193 | 12601958 | def extractBinggoCorp(item):
"""
# Binggo & Corp Translations
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if '<NAME>' in item['title'] and 'Chapter' in item['title']:
return buildReleaseMessageWithType(item, '<NAME>', vol, chp, frag=frag, postfix=postfix)
if '<NAME>' in item['title'] and 'Chapter' in item['title']:
return buildReleaseMessageWithType(item, '<NAME>', vol, chp, frag=frag, postfix=postfix)
return False
|
utils/bboxes.py | sloppyjuicy/ssd_detectors | 316 | 12601974 | <filename>utils/bboxes.py
import numpy as np
from numpy.linalg import norm
eps = 1e-10
def rot_matrix(theta):
s, c = np.sin(theta), np.cos(theta)
return np.array([[c, -s],[s, c]])
def polygon_to_rbox(xy):
# center point plus width, height and orientation angle
tl, tr, br, bl = xy
# length of top and bottom edge
dt, db = tr-tl, bl-br
# center is mean of all 4 vetrices
cx, cy = c = np.sum(xy, axis=0) / len(xy)
# width is mean of top and bottom edge length
w = (norm(dt) + norm(db)) / 2.
# height is distance from center to top edge plus distance form center to bottom edge
h = norm(np.cross(dt, tl-c))/(norm(dt)+eps) + norm(np.cross(db, br-c))/(norm(db)+eps)
#h = point_line_distance(c, tl, tr) + point_line_distance(c, br, bl)
#h = (norm(tl-bl) + norm(tr-br)) / 2.
# angle is mean of top and bottom edge angle
theta = (np.arctan2(dt[0], dt[1]) + np.arctan2(db[0], db[1])) / 2.
return np.array([cx, cy, w, h, theta])
def rbox_to_polygon(rbox):
cx, cy, w, h, theta = rbox
box = np.array([[-w,h],[w,h],[w,-h],[-w,-h]]) / 2.
box = np.dot(box, rot_matrix(theta))
box += rbox[:2]
return box
def polygon_to_rbox2(xy):
# two points at the top left and top right corner plus height
tl, tr, br, bl = xy
# length of top and bottom edge
dt, db = tr-tl, bl-br
# height is mean between distance from top to bottom right and distance from top edge to bottom left
h = (norm(np.cross(dt, tl-br)) + norm(np.cross(dt, tr-bl))) / (2*(norm(dt)+eps))
return np.hstack((tl,tr,h))
def rbox2_to_polygon(rbox):
x1, y1, x2, y2, h = rbox
alpha = np.arctan2(x1-x2, y2-y1)
dx = -h*np.cos(alpha)
dy = -h*np.sin(alpha)
xy = np.reshape([x1,y1,x2,y2,x2+dx,y2+dy,x1+dx,y1+dy], (-1,2))
return xy
def polygon_to_rbox3(xy):
# two points at the center of the left and right edge plus heigth
tl, tr, br, bl = xy
# length of top and bottom edge
dt, db = tr-tl, bl-br
# height is mean between distance from top to bottom right and distance from top edge to bottom left
h = (norm(np.cross(dt, tl-br)) + norm(np.cross(dt, tr-bl))) / (2*(norm(dt)+eps))
p1 = (tl + bl) / 2.
p2 = (tr + br) / 2.
return np.hstack((p1,p2,h))
def rbox3_to_polygon(rbox):
x1, y1, x2, y2, h = rbox
alpha = np.arctan2(x1-x2, y2-y1)
dx = -h*np.cos(alpha) / 2.
dy = -h*np.sin(alpha) / 2.
xy = np.reshape([x1-dx,y1-dy,x2-dx,y2-dy,x2+dx,y2+dy,x1+dx,y1+dy], (-1,2))
return xy
def polygon_to_box(xy, box_format='xywh'):
# minimum axis aligned bounding box containing some points
xy = np.reshape(xy, (-1,2))
xmin, ymin = np.min(xy, axis=0)
xmax, ymax = np.max(xy, axis=0)
if box_format == 'xywh':
box = [xmin, ymin, xmax-xmin, ymax-ymin]
elif box_format == 'xyxy':
box = [xmin, ymin, xmax, ymax]
if box_format == 'polygon':
box = [xmin, ymin, xmax, ymin, xmax, ymax, xmin, ymax]
return np.array(box)
def iou(box, boxes):
"""Computes the intersection over union for a given axis
aligned bounding box with several others.
# Arguments
box: Bounding box, numpy array of shape (4).
(x1, y1, x2, y2)
boxes: Reference bounding boxes, numpy array of
shape (num_boxes, 4).
# Return
iou: Intersection over union,
numpy array of shape (num_boxes).
"""
# compute intersection
inter_upleft = np.maximum(boxes[:, :2], box[:2])
inter_botright = np.minimum(boxes[:, 2:4], box[2:])
inter_wh = inter_botright - inter_upleft
inter_wh = np.maximum(inter_wh, 0)
inter = inter_wh[:, 0] * inter_wh[:, 1]
# compute union
area_pred = (box[2] - box[0]) * (box[3] - box[1])
area_gt = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
union = area_pred + area_gt - inter
# compute iou
iou = inter / union
return iou
def non_maximum_suppression_slow(boxes, confs, iou_threshold, top_k):
"""Does None-Maximum Suppresion on detection results.
Intuitive but slow as hell!!!
# Agruments
boxes: Array of bounding boxes (boxes, xmin + ymin + xmax + ymax).
confs: Array of corresponding confidenc values.
iou_threshold: Intersection over union threshold used for comparing
overlapping boxes.
top_k: Maximum number of returned indices.
# Return
List of remaining indices.
"""
idxs = np.argsort(-confs)
selected = []
for idx in idxs:
if np.any(iou(boxes[idx], boxes[selected]) >= iou_threshold):
continue
selected.append(idx)
if len(selected) >= top_k:
break
return selected
def non_maximum_suppression(boxes, confs, overlap_threshold, top_k):
"""Does None-Maximum Suppresion on detection results.
# Agruments
boxes: Array of bounding boxes (boxes, xmin + ymin + xmax + ymax).
confs: Array of corresponding confidenc values.
overlap_threshold:
top_k: Maximum number of returned indices.
# Return
List of remaining indices.
# References
- <NAME> Felzenszwalb, <NAME>. and <NAME>.
[Discriminatively Trained Deformable Part Models, Release 5](http://people.cs.uchicago.edu/~rbg/latent-release5/)
"""
eps = 1e-15
boxes = np.asarray(boxes, dtype='float32')
pick = []
x1, y1, x2, y2 = boxes.T
idxs = np.argsort(confs)
area = (x2 - x1) * (y2 - y1)
while len(idxs) > 0:
i = idxs[-1]
pick.append(i)
if len(pick) >= top_k:
break
idxs = idxs[:-1]
xx1 = np.maximum(x1[i], x1[idxs])
yy1 = np.maximum(y1[i], y1[idxs])
xx2 = np.minimum(x2[i], x2[idxs])
yy2 = np.minimum(y2[i], y2[idxs])
w = np.maximum(0, xx2 - xx1)
h = np.maximum(0, yy2 - yy1)
I = w * h
overlap = I / (area[idxs] + eps)
# as in Girshick et. al.
#U = area[idxs] + area[i] - I
#overlap = I / (U + eps)
idxs = idxs[overlap <= overlap_threshold]
return pick
|
arduino/__init__.py | mraje/tempcontrol | 146 | 12601983 | #!/usr/bin/env python
from arduino import *
|
src/aioflask/ctx.py | miguelgrinberg/aioflask | 189 | 12602084 | <filename>src/aioflask/ctx.py<gh_stars>100-1000
import sys
from greenletio import async_
from flask.ctx import *
from flask.ctx import AppContext as OriginalAppContext, \
RequestContext as OriginalRequestContext, _sentinel, _app_ctx_stack, \
_request_ctx_stack, appcontext_popped
class AppContext(OriginalAppContext):
async def apush(self):
"""Binds the app context to the current context."""
self.push()
async def apop(self, exc=_sentinel):
"""Pops the app context."""
try:
self._refcnt -= 1
if self._refcnt <= 0:
if exc is _sentinel: # pragma: no cover
exc = sys.exc_info()[1]
@async_
def do_teardown_async():
_app_ctx_stack.push(self)
self.app.do_teardown_appcontext(exc)
_app_ctx_stack.pop()
await do_teardown_async()
finally:
rv = _app_ctx_stack.pop()
assert rv is self, \
f"Popped wrong app context. ({rv!r} instead of {self!r})"
appcontext_popped.send(self.app)
async def __aenter__(self):
await self.apush()
return self
async def __aexit__(self, exc_type, exc_value, tb):
await self.apop(exc_value)
class RequestContext(OriginalRequestContext):
async def apush(self):
self.push()
async def apop(self, exc=_sentinel):
app_ctx = self._implicit_app_ctx_stack.pop()
clear_request = False
try:
if not self._implicit_app_ctx_stack:
self.preserved = False
self._preserved_exc = None
if exc is _sentinel: # pragma: no cover
exc = sys.exc_info()[1]
@async_
def do_teardown():
_request_ctx_stack.push(self)
self.app.do_teardown_request(exc)
_request_ctx_stack.pop()
await do_teardown()
request_close = getattr(self.request, "close", None)
if request_close is not None: # pragma: no branch
request_close()
clear_request = True
finally:
rv = _request_ctx_stack.pop()
# get rid of circular dependencies at the end of the request
# so that we don't require the GC to be active.
if clear_request:
rv.request.environ["werkzeug.request"] = None
# Get rid of the app as well if necessary.
if app_ctx is not None:
await app_ctx.apop(exc)
assert (
rv is self
), f"Popped wrong request context. ({rv!r} instead of {self!r})"
async def aauto_pop(self, exc):
if self.request.environ.get("flask._preserve_context") or (
exc is not None and self.app.preserve_context_on_exception
): # pragma: no cover
self.preserved = True
self._preserved_exc = exc
else:
await self.apop(exc)
async def __aenter__(self):
await self.apush()
return self
async def __aexit__(self, exc_type, exc_value, tb):
await self.aauto_pop(exc_value)
|
examples/librosa_example.py | akhambhati/pytorch-NMF | 123 | 12602091 | <reponame>akhambhati/pytorch-NMF
import torch
import librosa
import numpy as np
import matplotlib.pyplot as plt
from librosa import display, feature
from torchnmf.nmf import NMFD
if __name__ == '__main__':
y, sr = librosa.load(librosa.util.example_audio_file())
y = torch.from_numpy(y)
windowsize = 2048
S = torch.stft(y, windowsize, window=torch.hann_window(windowsize)).pow(2).sum(2).sqrt()
S = torch.FloatTensor(S).unsqueeze(0)
R = 3
T = 400
F = S.shape[0] - 1
net = NMFD(S.shape, T=T, rank=R).cuda()
net.fit(S.cuda(), verbose=True)
V = net()
W, H = net.W.detach().cpu().numpy(), net.H.squeeze().detach().cpu().numpy()
V = V.squeeze().detach().cpu().numpy()
if len(W.shape) < 3:
W = W.reshape(*W.shape, 1)
plt.figure(figsize=(10, 8))
for i in range(R):
plt.subplot(3, R, i + 1)
display.specshow(librosa.amplitude_to_db(W[:, i], ref=np.max), y_axis='log')
plt.title('Template ' + str(i + 1))
plt.subplot(3, 1, 2)
display.specshow(librosa.amplitude_to_db(H, ref=np.max), x_axis='time')
plt.colorbar()
plt.title('Activations')
plt.subplot(3, 1, 3)
display.specshow(librosa.amplitude_to_db(V, ref=np.max), y_axis='log', x_axis='time')
plt.colorbar(format='%+2.0f dB')
plt.title('Reconstructed spectrogram')
plt.tight_layout()
plt.show()
|
CMSIS/DSP/SDFTools/examples/example4/main.py | DavidLesnjak/CMSIS_5 | 2,293 | 12602096 | import sched as s
import matplotlib.pyplot as plt
from custom import *
# Only ONE FileSink can be used since the data will be dumped
# into this global buffer for display with Matplotlib
# It will have to be cleaned and reworked in future to use better
# mechanism of communication with the main code
DISPBUF = np.zeros(16000)
print("Start")
nb,error = s.scheduler(DISPBUF)
print("Nb sched = %d" % nb)
plt.figure()
plt.plot(DISPBUF)
plt.show() |
l10n_br_point_of_sale/models/account_journal.py | kaoecoito/odoo-brasil | 181 | 12602105 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# © 2016 <NAME> <<EMAIL>>, Trustcode
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import fields, models
metodos = [
('01', u'Dinheiro'),
('02', u'Cheque'),
('03', u'Cartão de Crédito'),
('04', u'Cartão de Débito'),
('05', u'Crédito Loja'),
('10', u'Vale Alimentacão'),
('11', u'Vale Presente'),
('13', u'Vale Combustível'),
('99', u'Outros'),
]
class AccountJournal(models.Model):
_inherit = 'account.journal'
metodo_pagamento = fields.Selection(metodos, string='Método de Pagamento')
|
doc/test_functions.py | tasugi/nnabla | 2,792 | 12602142 | #! /usr/bin/env python
from __future__ import print_function
import yaml
from collections import OrderedDict
def ordered_load(stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):
class OrderedLoader(Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
return yaml.load(stream, OrderedLoader)
def ils(indent_level):
return ' ' * indent_level * 2
def print_yaml(y, indent_level=0):
if isinstance(y, list):
for i, v in enumerate(y):
print(ils(indent_level) + '- %d' % i)
print_yaml(v, indent_level + 1)
elif isinstance(y, OrderedDict):
for k, v in y.items():
print(ils(indent_level) + k + ':')
print_yaml(v, indent_level + 1)
elif isinstance(y, str):
print(ils(indent_level) + y.replace('\n', '\n' + ils(indent_level)))
else:
print(ils(indent_level) + str(y))
def main():
print_yaml(ordered_load(open('functions.yaml', 'r')))
if __name__ == '__main__':
main()
|
deep_recommenders/keras/models/nlp/__init__.py | LongmaoTeamTf/deep_recommenders | 143 | 12602186 | <filename>deep_recommenders/keras/models/nlp/__init__.py
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from deep_recommenders.keras.models.nlp.multi_head_attention import MultiHeadAttention
from deep_recommenders.keras.models.nlp.transformer import Transformer
|
chapter_9/pub_sub/pub_sub_sendclient.py | LifeOfGame/mongodb_redis | 183 | 12602217 | import redis
import json
import datetime
client = redis.Redis()
while True:
message = input('请输入需要发布的信息:')
now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
data = {'message': message, 'time': now_time}
client.publish('pubinfo', json.dumps(data))
|
stonesoup/deleter/base.py | Red-Portal/Stone-Soup-1 | 157 | 12602233 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from abc import abstractmethod
from typing import Set
from ..base import Base, Property
from ..types.track import Track
from ..types.update import Update
class Deleter(Base):
"""Deleter base class.
Proposes tracks for deletion.
"""
delete_last_pred: bool = Property(default=False, doc="Remove the state that caused a track to "
"be deleted if it is a prediction.")
@abstractmethod
def check_for_deletion(self, track: Track, **kwargs) -> bool:
"""Check if a given track should be deleted.
Parameters
----------
track : Track
A track object to be checked for deletion.
Returns
-------
bool
`True` if track should be deleted, `False` otherwise.
"""
pass
def delete_tracks(self, tracks: Set[Track], **kwargs) -> Set[Track]:
"""Generic/Base track deletion method.
Iterates through all tracks in a given list and calls
:meth:`~check_for_deletion` to determine which
tracks should be deleted and which should survive.
Parameters
----------
tracks : set of :class:`~.Track`
A set of :class:`~.Track` objects
Returns
-------
: set of :class:`~.Track`
Set of tracks proposed for deletion.
"""
tracks_to_delete = {track for track in tracks if self.check_for_deletion(track, **kwargs)}
if self.delete_last_pred:
for track in tracks_to_delete:
if not isinstance(track[-1], Update):
del track[-1]
del track.metadatas[-1]
return tracks_to_delete
|
Behavioral/Memento/python/memento.py | jerryshueh/design-patterns | 294 | 12602327 | <gh_stars>100-1000
class Originator:
_state = None
class Memento:
def __init__(self, state):
self._state = state
def setState(self, state):
self._state = state
def getState(self):
return self._state
def __init__(self, state = None):
self._state = state
def set(self, state):
if state != None:
self._state = state
def createMemento(self):
return self.Memento(self._state)
def restore(self, memento):
self._state = memento.getState()
return self._state
class Caretaker:
pointer = 0
savedStates = []
def saveMemento(self, element):
self.pointer += 1
self.savedStates.append(element)
def getMemento(self, index):
return self.savedStates[index-1]
def undo(self):
if self.pointer > 0:
self.pointer -= 1
return self.getMemento(self.pointer)
else:
return None
def redo(self):
if self.pointer < len(self.savedStates):
self.pointer += 1
return self.getMemento(self.pointer)
else:
return None
caretaker = Caretaker()
originator = Originator()
#Testing code
originator.set("Message")
caretaker.saveMemento(originator.createMemento())
print(originator.restore(caretaker.getMemento(caretaker.pointer)))
originator.set("Typo")
caretaker.saveMemento(originator.createMemento())
print(originator.restore(caretaker.getMemento(caretaker.pointer)))
originator.set(caretaker.undo())
print(originator.restore(caretaker.getMemento(caretaker.pointer)))
originator.set(caretaker.redo())
print(originator.restore(caretaker.getMemento(caretaker.pointer)))
|
6/master/src/openea/modules/base/optimizers.py | smurf-1119/knowledge-engeneering-experiment | 102 | 12602364 | <reponame>smurf-1119/knowledge-engeneering-experiment<filename>6/master/src/openea/modules/base/optimizers.py
import tensorflow as tf
def generate_optimizer(loss, learning_rate, var_list=None, opt='SGD'):
optimizer = get_optimizer(opt, learning_rate)
grads_and_vars = optimizer.compute_gradients(loss, var_list=var_list)
return optimizer.apply_gradients(grads_and_vars)
def get_optimizer(opt, learning_rate):
if opt == 'Adagrad':
optimizer = tf.train.AdagradOptimizer(learning_rate)
elif opt == 'Adadelta':
# To match the exact form in the original paper use 1.0.
optimizer = tf.train.AdadeltaOptimizer(learning_rate)
elif opt == 'Adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
else: # opt == 'SGD'
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
return optimizer
|
examples/Kane1985/Chapter2/Ex3.10.py | nouiz/pydy | 298 | 12602377 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Exercise 3.10 from Kane 1985."""
from __future__ import division
from sympy import cancel, collect, expand_trig, solve, symbols, trigsimp
from sympy import sin, cos
from sympy.physics.mechanics import ReferenceFrame, Point
from sympy.physics.mechanics import dot, dynamicsymbols, msprint
q1, q2, q3, q4, q5, q6, q7 = q = dynamicsymbols('q1:8')
u1, u2, u3, u4, u5, u6, u7 = u = dynamicsymbols('q1:8', level=1)
r, theta, b = symbols('r θ b', real=True, positive=True)
# define reference frames
R = ReferenceFrame('R') # fixed race rf, let R.z point upwards
A = R.orientnew('A', 'axis', [q7, R.z]) # rf that rotates with S* about R.z
# B.x, B.z are parallel with face of cone, B.y is perpendicular
B = A.orientnew('B', 'axis', [-theta, A.x])
S = ReferenceFrame('S')
S.set_ang_vel(A, u1*A.x + u2*A.y + u3*A.z)
C = ReferenceFrame('C')
C.set_ang_vel(A, u4*B.x + u5*B.y + u6*B.z)
# define points
pO = Point('O')
pS_star = pO.locatenew('S*', b*A.y)
pS_hat = pS_star.locatenew('S^', -r*B.y) # S^ touches the cone
pS1 = pS_star.locatenew('S1', -r*A.z) # S1 touches horizontal wall of the race
pS2 = pS_star.locatenew('S2', r*A.y) # S2 touches vertical wall of the race
pO.set_vel(R, 0)
pS_star.v2pt_theory(pO, R, A)
pS1.v2pt_theory(pS_star, R, S)
pS2.v2pt_theory(pS_star, R, S)
# Since S is rolling against R, v_S1_R = 0, v_S2_R = 0.
vc = [dot(p.vel(R), basis) for p in [pS1, pS2] for basis in R]
pO.set_vel(C, 0)
pS_star.v2pt_theory(pO, C, A)
pS_hat.v2pt_theory(pS_star, C, S)
# Since S is rolling against C, v_S^_C = 0.
# Cone has only angular velocity in R.z direction.
vc += [dot(pS_hat.vel(C), basis).subs(vc_map) for basis in A]
vc += [dot(C.ang_vel_in(R), basis) for basis in [R.x, R.y]]
vc_map = solve(vc, u)
# Pure rolling between S and C, dot(ω_C_S, B.y) = 0.
b_val = solve([dot(C.ang_vel_in(S), B.y).subs(vc_map).simplify()], b)[0][0]
print('b = {0}'.format(msprint(collect(cancel(expand_trig(b_val)), r))))
b_expected = r*(1 + sin(theta))/(cos(theta) - sin(theta))
assert trigsimp(b_val - b_expected) == 0
|
tutorials/tutorial_LabelImage.py | xemio/ANTsPy | 338 | 12602381 | """
# A tutorial about Label Images in ANTsPy
In ANTsPy, we have a special class for dealing with what I call
"Label Images" - a brain image where each pixel/voxel is associated with
a specific label. For instance, an atlas or parcellation is the prime example
of a label image. But `LabelImage` types dont <i>just</i> have labels... they
also can have real values associated with those labels. For instance, suppose
you have a set of Cortical Thickness values derived from an atlas, and you want
to assign those regional values *back* onto an actual brain image for plotting
or to perform analysis tasks which require some notion of spatial location.
`LabelImage` types let you do this.
Basically, to create a label image in *ANTsPy*, you need two things (one is
optional but highly recommended):
- a discrete atlas image (a normal `ANTsImage` type)
- (optionally) a pandas dataframe or python dictionary with a mapping
from discrete values in the atlas image to string atlas labels
This tutorial will show you all the beautiful things you can do with `LabelImage` types.
"""
"""
## A simple example
We will start with a simple example to demonstrate label images - a 2D square
with four regions
"""
import ants
import os
import numpy as np
import pandas as pd
# create discrete image
square = np.zeros((20,20))
square[:10,:10] = 0
square[:10,10:] = 1
square[10:,:10] = 2
square[10:,10:] = 3
# create regular ANTsImage from numpy array
img = ants.from_numpy(square).astype('uint8')
# plot image
#img.plot(cmap=None)
"""
Above, we created our discrete "atlas" image. Next, we will
create a dictionary containing the names for each value in
the atlas. We will make simple names.
"""
label_df = np.asarray([['TopRight', 'Right', 'Top'],
['BottomRight', 'Right', 'Bottom'],
['TopLeft', 'Left', 'Top'],
['BottomLeft', 'Left', 'Bottom']])
label_df = pd.DataFrame(label_df, index=[1,2,3,4],
columns=['Quadrant', 'Right/Left', 'Top/Bottom'])
atlas = ants.LabelImage(label_image=img, label_info=label_df)
"""
You can index a label image like a dictionary, and it will return
the unique image values corresponding to that label, or more than
one if appropriate.
"""
up_right_idx = atlas['UpperRight']
print(up_right_idx) # should be 1
right_idxs = atlas['Right']
print(right_idxs) # should be [1, 2]
"""
## A real example
Now that we have the basics of the `ants.LabelImage` class down, we
can move on to a real example to show how this would work in practice.
In this example, we have a Freesurfer atlas (the Desikan-killany atlas,
aka "aparc+aseg.mgz") and a data frame of aggregated cortical thickness values
for a subset of those regions for a collection of subjects.
Our first task is to create a LabelImage for this atlas.
"""
"""
We start by loading in the label info as a pandas dataframe
"""
proc_dir = '/users/ncullen/desktop/projects/tadpole/data/processed/'
raw_dir = '/users/ncullen/desktop/projects/tadpole/data/raw/'
label_df = pd.read_csv(os.path.join(proc_dir, 'UCSF_FS_Map.csv'), index_col=0)
print(label_df.head())
"""
As you can see, the label dataframe has the the atlas values as the dataframe
index and a set of columns with different labels for each index.
Next, we load in the discrete atlas image.
"""
atlas_img = ants.image_read(os.path.join(raw_dir, 'freesurfer/aparc+aseg.mgz')).astype('uint32')
atlas_img.plot()
label_img = ants.LabelImage(image=atlas_img, info=label_df)
"""
Let's see this in action on a template
"""
t1_img = ants.image_read(os.path.join(raw_dir,'freesurfer/T1.mgz'))
t1_img.plot()
# set the label image
t1_img.set_label_image(atlas_img)
"""
Our second task is create an image for each subject that fills in the brain
region locations with the associated region's cortical thickness
"""
data = pd.read_csv(os.path.join())
|
sodapy/__init__.py | johnclary/sodapy | 349 | 12602388 | from sodapy.socrata import Socrata
from sodapy import version
__all__ = [
"Socrata",
]
__version__ = version.__version__
|
notebook/while_usage.py | vhn0912/python-snippets | 174 | 12602415 | i = 0
while i < 3:
print(i)
i += 1
# 0
# 1
# 2
i = 0
while i < 3:
print(i)
if i == 1:
print('!!BREAK!!')
break
i += 1
# 0
# 1
# !!BREAK!!
i = 0
while i < 3:
if i == 1:
print('!!CONTINUE!!')
i += 1
continue
print(i)
i += 1
# 0
# !!CONTINUE!!
# 2
i = 0
while i < 3:
print(i)
i += 1
else:
print('!!FINISH!!')
# 0
# 1
# 2
# !!FINISH!!
i = 0
while i < 3:
print(i)
if i == 1:
print('!!BREAK!!')
break
i += 1
else:
print('!!FINISH!!')
# 0
# 1
# !!BREAK!!
i = 0
while i < 3:
if i == 1:
print('!!SKIP!!')
i += 1
continue
print(i)
i += 1
else:
print('!!FINISH!!')
# 0
# !!SKIP!!
# 2
# !!FINISH!!
import time
start = time.time()
while True:
time.sleep(1)
print('processing...')
if time.time() - start > 5:
print('!!BREAK!!')
break
# processing...
# processing...
# processing...
# processing...
# processing...
# !!BREAK!!
start = time.time()
while 1:
time.sleep(1)
print('processing...')
if time.time() - start > 5:
print('!!BREAK!!')
break
# processing...
# processing...
# processing...
# processing...
# processing...
# !!BREAK!!
start = time.time()
while time.time() - start <= 5:
time.sleep(1)
print('processing...')
else:
print('!!FINISH!!')
# processing...
# processing...
# processing...
# processing...
# processing...
# !!FINISH!!
|
test/Parallel/failed-build/fixture/myfail.py | jcassagnol-public/scons | 1,403 | 12602449 | import os
import sys
import time
import http.client
sys.path.append(os.getcwd())
from teststate import Response
WAIT = 10
count = 0
conn = http.client.HTTPConnection("127.0.0.1", port=int(sys.argv[3]))
def check_test_state():
conn.request("GET", "/?get_mycopy_started=1")
response = conn.getresponse()
response.read()
status = response.status
return status == Response.OK.value
while not check_test_state() and count < WAIT:
time.sleep(0.1)
count += 0.1
if count >= WAIT:
sys.exit(99)
conn.request("GET", "/?set_myfail_done=1&pid=" + str(os.getpid()))
conn.close()
sys.exit(1) |
test/test_plugins/test_wiki.py | fenwar/limbo | 369 | 12602486 | <gh_stars>100-1000
# -*- coding: UTF-8 -*-
import os
import sys
import vcr
DIR = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(DIR, '../../limbo/plugins'))
from wiki import on_message
def test_basic():
with vcr.use_cassette('test/fixtures/wiki_basic.yaml'):
ret = on_message({"text": u"!wiki dog"}, None)
assert "member of the canidae family" in ret
assert "http://en.wikipedia.org/wiki/Dog" in ret
def test_unicode():
with vcr.use_cassette('test/fixtures/wiki_unicode.yaml'):
ret = on_message({"text": u"!wiki नेपाल"}, None)
# not blowing up == success
|
tests/SampleApps/python/python2-rest-framework-app/snippets/tests.py | samruddhikhandale/Oryx | 403 | 12602505 | from django.contrib.auth.models import User
from django.test import TestCase
from django.urls import reverse
from .models import Snippet
class SnippetAdminTests(TestCase):
def test_snippet_admin_can_create_snippets(self):
user = User.objects.create_superuser(
"superuser", '<EMAIL>', '<PASSWORD>'
)
self.client.force_login(user)
data = {
'title': 'Some Code',
'code': "print('Hello, World!')",
'owner': str(user.pk),
'language': 'python',
'style': 'friendly',
}
response = self.client.post(reverse('admin:snippets_snippet_add'), data)
self.assertRedirects(response, reverse('admin:snippets_snippet_changelist'))
self.assertIs(Snippet.objects.count(), 1)
|
pygrametl/drawntabletesting/formattable.py | ssrika17/pygrametl | 259 | 12602548 | <gh_stars>100-1000
"""Script that automatically format a drawn table testing table."""
# Copyright (c) 2021, Aalborg University (<EMAIL>)
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import pygrametl.drawntabletesting as dtt
if len(sys.argv) != 3:
print("usage: " + sys.argv[0] + " file line")
sys.exit(1)
path = sys.argv[1]
point = int(sys.argv[2]) - 1 # Expected to be one-based
# Extracts the table from the document
with open(path, 'r') as f:
lines = f.readlines()
length = len(lines)
start = point
while start >= 0 and '|' in lines[start]:
start -= 1
start += 1 # Do not include the header
end = point
while end < length and '|' in lines[end]:
end += 1
end -= 1 # Do not include the delimiter
# The table's indention must be taken into account
table = ''.join(lines[start:end + 1])
first_char = table.find('|')
last_char = table.rfind('|')
prefix = table[:first_char]
suffix = table[last_char + 1:]
table = table[first_char:last_char + 1]
# The indention level must be added for each line
table = dtt.Table('', table, testconnection=object())
table = str(table).split('\n')
write = 0
indention = '\n' + ' ' * first_char
for output in range(start, end):
lines[output] = indention + table[write]
write += 1
lines[start] = prefix + table[0]
lines[end] = indention + table[-1] + suffix
# The file is updated to format the table
with open(path, 'w') as f:
f.writelines(lines)
|
src/admin/widgets/boolean.py | aimanow/sft | 280 | 12602557 | <gh_stars>100-1000
import wtforms
from godmode.widgets.base import BaseWidget
class BooleanWidget(BaseWidget):
field = wtforms.BooleanField()
def render_list(self, item):
value = getattr(item, self.name, None)
if value:
return "<i class='icon-ok' style='color: #0c0;'></i>"
return "<i class='icon-remove' style='color: #c00;'></i>"
class BooleanReverseWidget(BooleanWidget):
field = wtforms.BooleanField()
def render_list(self, item):
value = getattr(item, self.name, None)
if value:
return "<i class='icon-ok' style='color: #c00;'></i>"
return "<i class='icon-remove' style='color: #0c0;'></i>"
|
examples/warren_buffet.py | Mahesh-Salunke/financial_fundamentals | 122 | 12602577 | <filename>examples/warren_buffet.py
'''
Created on Sep 24, 2013
@author: akittredge
'''
from zipline.algorithm import TradingAlgorithm
from datetime import datetime
import pytz
from financial_fundamentals import sqlite_fundamentals_cache,\
mongo_fundamentals_cache, mongo_price_cache
from financial_fundamentals.accounting_metrics import QuarterlyEPS
from financial_fundamentals import sqlite_price_cache
from financial_fundamentals.indicies import DOW_TICKERS, S_P_500_TICKERS
from zipline.transforms.batch_transform import batch_transform
import numpy as np
import pandas as pd
import scipy.integrate
class BuysLowSellsHigh(TradingAlgorithm):
def initialize(self, earnings):
@batch_transform
def price_to_earnings(datapanel):
# Wes McKinney would probably do this differently.
p_e_ratio = datapanel.price / (earnings * 4) # assuming quarterly eps,
latest_date = p_e_ratio.first_valid_index()
latest_p_e_ratios = p_e_ratio.T[latest_date]
latest_p_e_ratios.name = 'p/e ratios on {}'.format(latest_date)
return latest_p_e_ratios.copy()
self.price_to_earnings_transform = price_to_earnings(refresh_period=1,
window_length=1)
self.init = True
def handle_data(self, data):
p_e_ratios = self.price_to_earnings_transform.handle_data(data)
unknowns = p_e_ratios[p_e_ratios.isnull()].fillna(0)
p_e_ratios = p_e_ratios.dropna()
p_e_ratios.sort(ascending=False)
desired_port = self.portfolio_weights(sorted_universe=p_e_ratios)
prices = pd.Series({item[0] : item[1]['price'] for item in data.iteritems()})
if self.init:
positions_value = self.portfolio.starting_cash
else:
positions_value = self.portfolio.positions_value + \
self.portfolio.cash
current_position = pd.Series({item[0] : item[1]['amount'] for item in
self.portfolio.positions.items()},
index=p_e_ratios.index).fillna(0)
self.rebalance_portfolio(desired_port=pd.concat([desired_port, unknowns]),
prices=prices,
positions_value=positions_value,
current_amount=current_position)
self.init = False
def portfolio_weights(self, sorted_universe):
'''the universe weighted by area of equal width intervals under a curve.'''
curve = lambda x : x # linear
interval_width = 1. / sorted_universe.size
interval_start = pd.Series(np.linspace(start=0,
stop=1,
num=sorted_universe.size,
endpoint=False),
index=sorted_universe.index)
weight_func = lambda x : scipy.integrate.quad(func=curve,
a=x,
b=(x + interval_width)
)[0]
portfolio_weight = interval_start.map(arg=weight_func)
portfolio_weights_summed_to_one = (portfolio_weight *
(1 / portfolio_weight.sum()))
return portfolio_weights_summed_to_one
def rebalance_portfolio(self, desired_port, prices,
positions_value, current_amount):
'''after zipline.examples.olmar'''
desired_amount = np.round(desired_port * positions_value / prices)
self.last_desired_port = desired_port
diff_amount = desired_amount - current_amount
for stock, order_amount in diff_amount[diff_amount != 0].dropna().iteritems():
self.order(sid=stock, amount=order_amount)
def buy_low_sell_high(start=datetime(2013, 6, 1, tzinfo=pytz.UTC),
end=datetime(2013, 9, 15, tzinfo=pytz.UTC),
metric=QuarterlyEPS,
fundamentals_cache=mongo_fundamentals_cache,
price_cache=mongo_price_cache,
stocks=S_P_500_TICKERS):
earnings = fundamentals_cache(metric).load_from_cache(stocks=stocks,
start=start,
end=end)
earnings[earnings < 0] = 0 # negative p/e's don't make sense.
algo = BuysLowSellsHigh(earnings=earnings)
prices = price_cache().load_from_cache(stocks=stocks, start=start, end=end)
results = algo.run(prices)
return results, algo
if __name__ == '__main__':
buy_low_sell_high() |
sdk/python/pulumi_kubernetes/helm/v3/_inputs.py | polivbr/pulumi-kubernetes | 277 | 12602611 | <filename>sdk/python/pulumi_kubernetes/helm/v3/_inputs.py
# coding=utf-8
# *** WARNING: this file was generated by pulumigen. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'RepositoryOptsArgs',
]
@pulumi.input_type
class RepositoryOptsArgs:
def __init__(__self__, *,
ca_file: Optional[pulumi.Input[str]] = None,
cert_file: Optional[pulumi.Input[str]] = None,
key_file: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
repo: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None):
"""
Specification defining the Helm chart repository to use.
:param pulumi.Input[str] ca_file: The Repository's CA File
:param pulumi.Input[str] cert_file: The repository's cert file
:param pulumi.Input[str] key_file: The repository's cert key file
:param pulumi.Input[str] password: <PASSWORD>
:param pulumi.Input[str] repo: Repository where to locate the requested chart. If is a URL the chart is installed without installing the repository.
:param pulumi.Input[str] username: Username for HTTP basic authentication
"""
if ca_file is not None:
pulumi.set(__self__, "ca_file", ca_file)
if cert_file is not None:
pulumi.set(__self__, "cert_file", cert_file)
if key_file is not None:
pulumi.set(__self__, "key_file", key_file)
if password is not None:
pulumi.set(__self__, "password", password)
if repo is not None:
pulumi.set(__self__, "repo", repo)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="caFile")
def ca_file(self) -> Optional[pulumi.Input[str]]:
"""
The Repository's CA File
"""
return pulumi.get(self, "ca_file")
@ca_file.setter
def ca_file(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ca_file", value)
@property
@pulumi.getter(name="certFile")
def cert_file(self) -> Optional[pulumi.Input[str]]:
"""
The repository's cert file
"""
return pulumi.get(self, "cert_file")
@cert_file.setter
def cert_file(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cert_file", value)
@property
@pulumi.getter(name="keyFile")
def key_file(self) -> Optional[pulumi.Input[str]]:
"""
The repository's cert key file
"""
return pulumi.get(self, "key_file")
@key_file.setter
def key_file(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_file", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
Password for HTTP basic authentication
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def repo(self) -> Optional[pulumi.Input[str]]:
"""
Repository where to locate the requested chart. If is a URL the chart is installed without installing the repository.
"""
return pulumi.get(self, "repo")
@repo.setter
def repo(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "repo", value)
@property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
"""
Username for HTTP basic authentication
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username", value)
|
tests/clients/test_client_interceptor.py | willtsai/python-sdk | 125 | 12602612 | # -*- coding: utf-8 -*-
"""
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from dapr.clients.grpc._helpers import DaprClientInterceptor, _ClientCallDetails
class DaprClientInterceptorTests(unittest.TestCase):
def setUp(self):
self._fake_request = "fake request"
def fake_continuation(self, call_details, request):
return call_details
def test_intercept_unary_unary_single_header(self):
interceptor = DaprClientInterceptor([('api-token', 'test-token')])
call_details = _ClientCallDetails("method1", 10, None, None, None, None)
response = interceptor.intercept_unary_unary(
self.fake_continuation, call_details, self._fake_request)
self.assertIsNotNone(response)
self.assertEqual(1, len(response.metadata))
self.assertEqual([('api-token', 'test-token')], response.metadata)
def test_intercept_unary_unary_existing_metadata(self):
interceptor = DaprClientInterceptor([('api-token', 'test-token')])
call_details = _ClientCallDetails("method1", 10, [('header', 'value')], None, None, None)
response = interceptor.intercept_unary_unary(
self.fake_continuation, call_details, self._fake_request)
self.assertIsNotNone(response)
self.assertEqual(2, len(response.metadata))
self.assertEqual([('header', 'value'), ('api-token', 'test-token')], response.metadata)
|
src/masonite/validation/providers/ValidationProvider.py | cercos/masonite | 1,816 | 12602613 | """A Validation Service Provider."""
from ...providers import Provider
from .. import Validator, ValidationFactory, MessageBag
from ..commands.MakeRuleEnclosureCommand import MakeRuleEnclosureCommand
from ..commands.MakeRuleCommand import MakeRuleCommand
class ValidationProvider(Provider):
def __init__(self, application):
self.application = application
def register(self):
validator = Validator()
self.application.bind("validator", validator)
self.application.make("commands").add(
MakeRuleEnclosureCommand(self.application),
MakeRuleCommand(self.application),
)
MessageBag.get_errors = self._get_errors
self.application.make("view").share({"bag": MessageBag.view_helper})
validator.extend(ValidationFactory().registry)
def boot(self):
pass
def _get_errors(self):
request = self.application.make("request")
messages = []
for error, message in (
request.session.get_flashed_messages().get("errors", {}).items()
):
messages += message
return messages
|
EmmetNPP/emmet/context.py | chcg/npp | 192 | 12602623 | # coding=utf-8
import sys
import os
import os.path
import codecs
import json
import gc
import imp
import re
from file import File
BASE_PATH = os.path.abspath(os.path.dirname(__file__))
is_python3 = sys.version_info[0] > 2
core_files = ['emmet-app.js', 'python-wrapper.js']
def should_use_unicode():
"""
WinXP unable to eval JS in unicode object (while other OSes requires it)
This function checks if we have to use unicode when reading files
"""
ctx = PyV8.JSContext()
ctx.enter()
use_unicode = True
try:
ctx.eval(u'(function(){return;})()')
except:
use_unicode = False
ctx.leave()
return use_unicode
def make_path(filename):
return os.path.normpath(os.path.join(BASE_PATH, filename))
def js_log(message):
print(message)
def import_pyv8():
# Importing non-existing modules is a bit tricky in Python:
# if we simply call `import PyV8` and module doesn't exists,
# Python will cache this failed import and will always
# throw exception even if this module appear in PYTHONPATH.
# To prevent this, we have to manually test if
# PyV8.py(c) exists in PYTHONPATH before importing PyV8
if 'PyV8' in sys.modules and 'PyV8' not in globals():
# PyV8 was loaded by ST2, create global alias
globals()['PyV8'] = __import__('PyV8')
return
loaded = False
f, pathname, description = imp.find_module('PyV8')
bin_f, bin_pathname, bin_description = imp.find_module('_PyV8')
if f:
try:
imp.acquire_lock()
globals()['_PyV8'] = imp.load_module('_PyV8', bin_f, bin_pathname, bin_description)
globals()['PyV8'] = imp.load_module('PyV8', f, pathname, description)
imp.release_lock()
loaded = True
finally:
# Since we may exit via an exception, close fp explicitly.
if f:
f.close()
if bin_f:
bin_f.close()
if not loaded:
raise ImportError('No PyV8 module found')
class Context():
"""
Creates Emmet JS core context.
Before instantiating this class, make sure PyV8
is available in `sys.path`
@param files: Additional files to load with JS core
@param path: Path to Emmet extensions
@param contrib: Python objects to contribute to JS execution context
@param pyv8_path: Location of PyV8 binaries
"""
def __init__(self, files=[], ext_path=None, contrib=None, logger=None):
self.logger = logger
try:
import_pyv8()
except ImportError as e:
pass
self._ctx = None
self._contrib = contrib
self._should_load_extension = True
# detect reader encoding
self._use_unicode = None
self._core_files = [] + core_files + files
self._ext_path = None
self.set_ext_path(ext_path)
self._user_data = None
def log(self, message):
if self.logger:
self.logger(message)
def get_ext_path(self):
return self._ext_path
def set_ext_path(self, val):
try:
if val and val[:1] == '~':
val = os.path.expanduser(val)
val = os.path.abspath(val)
except Exception as e:
return
if val == self._ext_path:
return
self._ext_path = val
self.reset()
def load_extensions(self, path=None):
if path is None:
path = self._ext_path;
if path and os.path.isdir(path):
ext_files = []
self.log('Loading Emmet extensions from %s' % self._ext_path)
for dirname, dirnames, filenames in os.walk(self._ext_path):
for filename in filenames:
ext_files.append(os.path.join(dirname, filename))
self.js().locals.pyLoadExtensions(ext_files)
def js(self):
"Returns JS context"
if not self._ctx:
try:
import_pyv8()
except ImportError as e:
return None
if 'PyV8' not in sys.modules:
# Binary is not available yet
return None
if self._use_unicode is None:
self._use_unicode = should_use_unicode()
glue = u'\n' if self._use_unicode else '\n'
core_src = [self.read_js_file(make_path(f)) for f in self._core_files]
self._ctx = PyV8.JSContext()
self._ctx.enter()
self._ctx.eval(glue.join(core_src))
# load default snippets
self._ctx.locals.pyLoadSystemSnippets(self.read_js_file(make_path('snippets.json')))
# expose some methods
self._ctx.locals.log = js_log
self._ctx.locals.pyFile = File()
if self._contrib:
for k in self._contrib:
self._ctx.locals[k] = self._contrib[k]
if self._should_load_extension:
self._ctx.locals.pyResetUserData()
self._should_load_extension = False
self.load_extensions()
if self._user_data:
self._ctx.locals.pyLoadUserData(self._user_data)
self._user_data = None
return self._ctx
def load_user_data(self, data):
"Loads user data payload from JSON"
self._user_data = data
# self.js().locals.pyLoadUserData(data)
def reset(self):
"Resets JS execution context"
if self._ctx:
self._ctx.leave()
self._ctx = None
PyV8.JSEngine.collect()
gc.collect()
self._should_load_extension = True
def read_js_file(self, file_path):
if self._use_unicode:
f = codecs.open(file_path, 'r', 'utf-8')
else:
f = open(file_path, 'r')
content = f.read()
f.close()
return content
def eval(self, source):
self.js().eval(source)
def eval_js_file(self, file_path):
self.eval(self.read_js_file(file_path))
|
214 Shortest Palindrome.py | ChiFire/legend_LeetCode | 872 | 12602628 | <reponame>ChiFire/legend_LeetCode
"""
Given a string S, you are allowed to convert it to a palindrome by adding characters in front of it. Find and return the
shortest palindrome you can find by performing this transformation.
For example:
Given "aacecaaa", return "aaacecaaa".
Given "abcd", return "dcbabcd".
"""
__author__ = 'Daniel'
class Solution:
def shortestPalindrome(self, s):
"""
KMP
:type s: str
:rtype: str
"""
s_r = s[::-1]
l = len(s)
if l < 2:
return s
# construct T
T = [0 for _ in xrange(l+1)]
T[0] = -1
pos = 2
cnd = 0
while pos <= l:
if s[pos-1] == s[cnd]:
T[pos] = cnd+1
cnd += 1
pos += 1
elif T[cnd] != -1:
cnd = T[cnd]
else:
T[pos] = 0
cnd = 0
pos += 1
# search
i = 0
b = 0
while i+b < l:
if s[i] == s_r[i+b]:
i += 1
if i == l:
return s
elif T[i] != -1:
b = b+i-T[i]
i = T[i]
else:
b += 1
i = 0
# where it falls off
return s_r+s[i:]
if __name__ == "__main__":
assert Solution().shortestPalindrome("abcd") == "dcbabcd"
|
exoplanet-ml/experimental/beam/transit_search/prediction_fns.py | ritwik12/exoplanet-ml | 286 | 12602665 | <gh_stars>100-1000
# Copyright 2018 The Exoplanet ML Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DoFns for making predictions on BLS detections with an AstroNet model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import os.path
import apache_beam as beam
from apache_beam.metrics import Metrics
import numpy as np
import pandas as pd
import tensorflow as tf
from astronet import models
from astronet.data import preprocess
from tf_util import configdict
class MakePredictionsDoFn(beam.DoFn):
"""Generates predictions from a trained AstroNet model."""
def __init__(self, model_name, model_dir, config_name=None):
"""Initializes the DoFn.
Args:
model_name: Name of the model class.
model_dir: Directory containing a model checkpoint.
config_name: Optional name of the model configuration. If not specified,
the file 'config.json' in model_dir is used.
"""
# Look up the model class.
model_class = models.get_model_class(model_name)
# Find the latest checkpoint.
checkpoint_file = tf.train.latest_checkpoint(model_dir)
if not checkpoint_file:
raise ValueError("No checkpoint file found in: {}".format(model_dir))
# Get the model configuration.
if config_name:
config = models.get_model_config(model_name, config_name)
else:
with tf.gfile.Open(os.path.join(model_dir, "config.json")) as f:
config = json.load(f)
config = configdict.ConfigDict(config)
self.model_class = model_class
self.checkpoint_file = checkpoint_file
self.config = config
def start_bundle(self):
# Build the model.
g = tf.Graph()
with g.as_default():
example_placeholder = tf.placeholder(tf.string, shape=[])
parsed_features = tf.parse_single_example(
example_placeholder,
features={
feature_name: tf.FixedLenFeature([feature.length], tf.float32)
for feature_name, feature in self.config.inputs.features.items()
})
features = {}
for feature_name, value in parsed_features.items():
value = tf.expand_dims(value, 0) # Add batch dimension.
if self.config.inputs.features[feature_name].is_time_series:
features.setdefault("time_series_features", {})[feature_name] = value
else:
features.setdefault("aux_features", {})[feature_name] = value
model = self.model_class(
features=features,
labels=None,
hparams=self.config.hparams,
mode=tf.estimator.ModeKeys.PREDICT)
model.build()
saver = tf.train.Saver()
sess = tf.Session(graph=g)
saver.restore(sess, self.checkpoint_file)
tf.logging.info("Successfully loaded checkpoint %s at global step %d.",
self.checkpoint_file, sess.run(model.global_step))
self.example_placeholder = example_placeholder
self.model = model
self.session = sess
def finish_bundle(self):
self.session.close()
def process(self, inputs):
"""Generates predictions for a single light curve."""
lc = inputs["light_curve_for_predictions"]
time = np.array(lc.light_curve.time, dtype=np.float)
flux = np.array(lc.light_curve.flux, dtype=np.float)
norm_curve = np.array(lc.light_curve.norm_curve, dtype=np.float)
flux /= norm_curve # Normalize flux.
# Extract the TCE.
top_result = inputs["top_result"]
example = None
if top_result.HasField("fitted_params"):
tce = {
"tce_period": top_result.fitted_params.period,
"tce_duration": top_result.fitted_params.t0,
"tce_time0bk": top_result.fitted_params.duration,
}
try:
example = preprocess.generate_example_for_tce(time, flux, tce)
except ValueError:
Metrics.counter(self.__class__.__name__,
"generate-example-failures").inc()
if example is None:
prediction = -1
serialized_example = tf.train.Example().SerializeToString()
else:
serialized_example = example.SerializeToString()
prediction = self.session.run(
self.model.predictions,
feed_dict={self.example_placeholder: serialized_example})[0][0]
inputs["prediction"] = prediction
inputs["serialized_example"] = serialized_example
yield inputs
class ToCsvDoFn(beam.DoFn):
"""Converts predictions to CSV format."""
def __init__(self, planet_num=-1):
self.columns = [
("kepid", lambda inputs: inputs["kepler_id"]),
("planet_num", lambda inputs: inputs.get("planet_num", planet_num)),
("prediction", lambda inputs: inputs["prediction"]),
("period", lambda inputs: inputs["top_result"].result.period),
("duration", lambda inputs: inputs["top_result"].result.duration),
("epoch", lambda inputs: inputs["top_result"].result.epoch),
("score_method", lambda inputs: inputs["top_result"].score_method),
("score", lambda inputs: inputs["top_result"].score),
("depth", lambda inputs: inputs["top_result"].result.depth),
("baseline", lambda inputs: inputs["top_result"].result.baseline),
("complete_transits", lambda inputs: inputs["complete_transits"]),
("partial_transits", lambda inputs: inputs["partial_transits"]),
("nbins", lambda inputs: inputs["top_result"].result.nbins),
("bls_start",
lambda inputs: inputs["top_result"].result.bls_result.start),
("bls_width",
lambda inputs: inputs["top_result"].result.bls_result.width),
("bls_r", lambda inputs: inputs["top_result"].result.bls_result.r),
("bls_s", lambda inputs: inputs["top_result"].result.bls_result.s),
("bls_power",
lambda inputs: inputs["top_result"].result.bls_result.power),
("width_min",
lambda inputs: inputs["top_result"].result.options.width_min),
("width_max",
lambda inputs: inputs["top_result"].result.options.width_max),
("weight_min",
lambda inputs: inputs["top_result"].result.options.weight_min),
("weight_max",
lambda inputs: inputs["top_result"].result.options.weight_max),
]
def csv_header(self):
return ",".join([column[0] for column in self.columns])
def process(self, inputs):
df = pd.DataFrame([
collections.OrderedDict(
[(name, fn(inputs)) for name, fn in self.columns])
])
yield df.to_csv(header=False, index=False).strip()
|
run_mnist.py | odeonus/example_forgetting | 125 | 12602701 | from __future__ import print_function
import argparse
import numpy as np
import numpy.random as npr
import time
import os
import sys
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
# Format time for printing purposes
def get_hms(seconds):
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return h, m, s
# Setup basic CNN model
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
if args.no_dropout:
x = F.relu(F.max_pool2d(self.conv2(x), 2))
else:
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
if not args.no_dropout:
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
# Train model for one epoch
#
# example_stats: dictionary containing statistics accumulated over every presentation of example
#
def train(args, model, device, trainset, optimizer, epoch, example_stats):
train_loss = 0
correct = 0
total = 0
batch_size = args.batch_size
model.train()
# Get permutation to shuffle trainset
trainset_permutation_inds = npr.permutation(
np.arange(len(trainset.train_labels)))
for batch_idx, batch_start_ind in enumerate(
range(0, len(trainset.train_labels), batch_size)):
# Get trainset indices for batch
batch_inds = trainset_permutation_inds[batch_start_ind:
batch_start_ind + batch_size]
# Get batch inputs and targets, transform them appropriately
transformed_trainset = []
for ind in batch_inds:
transformed_trainset.append(trainset.__getitem__(ind)[0])
inputs = torch.stack(transformed_trainset)
targets = torch.LongTensor(
np.array(trainset.train_labels)[batch_inds].tolist())
# Map to available device
inputs, targets = inputs.to(device), targets.to(device)
# Forward propagation, compute loss, get predictions
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
_, predicted = torch.max(outputs.data, 1)
# Update statistics and loss
acc = predicted == targets
for j, index in enumerate(batch_inds):
# Get index in original dataset (not sorted by forgetting)
index_in_original_dataset = train_indx[index]
# Compute missclassification margin
output_correct_class = outputs.data[
j, targets[j].item()] # output for correct class
sorted_output, _ = torch.sort(outputs.data[j, :])
if acc[j]:
# Example classified correctly, highest incorrect class is 2nd largest output
output_highest_incorrect_class = sorted_output[-2]
else:
# Example misclassified, highest incorrect class is max output
output_highest_incorrect_class = sorted_output[-1]
margin = output_correct_class.item(
) - output_highest_incorrect_class.item()
# Add the statistics of the current training example to dictionary
index_stats = example_stats.get(index_in_original_dataset,
[[], [], []])
index_stats[0].append(loss[j].item())
index_stats[1].append(acc[j].sum().item())
index_stats[2].append(margin)
example_stats[index_in_original_dataset] = index_stats
# Update loss, backward propagate, update optimizer
loss = loss.mean()
train_loss += loss.item()
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
loss.backward()
optimizer.step()
sys.stdout.write('\r')
sys.stdout.write(
'| Epoch [%3d/%3d] Iter[%3d/%3d]\t\tLoss: %.4f Acc@1: %.3f%%' %
(epoch, args.epochs, batch_idx + 1,
(len(trainset) // batch_size) + 1, loss.item(),
100. * correct.item() / total))
sys.stdout.flush()
# Add training accuracy to dict
index_stats = example_stats.get('train', [[], []])
index_stats[1].append(100. * correct.item() / float(total))
example_stats['train'] = index_stats
# Evaluate model predictions on heldout test data
#
# example_stats: dictionary containing statistics accumulated over every presentation of example
#
def test(args, model, device, testset, example_stats):
test_loss = 0
correct = 0
total = 0
test_batch_size = 32
model.eval()
for batch_idx, batch_start_ind in enumerate(
range(0, len(testset.test_labels), test_batch_size)):
# Get batch inputs and targets
transformed_testset = []
for ind in range(
batch_start_ind,
min(
len(testset.test_labels),
batch_start_ind + test_batch_size)):
transformed_testset.append(testset.__getitem__(ind)[0])
inputs = torch.stack(transformed_testset)
targets = torch.LongTensor(
np.array(testset.test_labels)[batch_start_ind:batch_start_ind +
test_batch_size].tolist())
# Map to available device
inputs, targets = inputs.to(device), targets.to(device)
# Forward propagation, compute loss, get predictions
outputs = model(inputs)
loss = criterion(outputs, targets)
loss = loss.mean()
test_loss += loss.item()
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
# Add test accuracy to dict
acc = 100. * correct.item() / total
index_stats = example_stats.get('test', [[], []])
index_stats[1].append(100. * correct.item() / float(total))
example_stats['test'] = index_stats
print("\n| Validation Epoch #%d\t\t\tLoss: %.4f Acc@1: %.2f%%" %
(epoch, loss.item(), acc))
parser = argparse.ArgumentParser(description='training MNIST')
parser.add_argument(
'--dataset',
default='mnist',
help='dataset to use, can be mnist or permuted_mnist')
parser.add_argument(
'--batch_size',
type=int,
default=64,
metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument(
'--epochs',
type=int,
default=200,
metavar='N',
help='number of epochs to train (default: 200)')
parser.add_argument(
'--lr',
type=float,
default=0.01,
metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument(
'--momentum',
type=float,
default=0.5,
metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument(
'--no_cuda',
action='store_true',
default=False,
help='disables CUDA training')
parser.add_argument(
'--seed',
type=int,
default=1,
metavar='S',
help='random seed (default: 1)')
parser.add_argument(
'--sorting_file',
default="none",
help=
'name of a file containing order of examples sorted by a certain metric (default: "none", i.e. not sorted)'
)
parser.add_argument(
'--remove_n',
type=int,
default=0,
help='number of sorted examples to remove from training')
parser.add_argument(
'--keep_lowest_n',
type=int,
default=0,
help=
'number of sorted examples to keep that have the lowest metric score, equivalent to start index of removal; if a negative number given, remove random draw of examples'
)
parser.add_argument(
'--no_dropout', action='store_true', default=False, help='remove dropout')
parser.add_argument(
'--input_dir',
default='mnist_results/',
help='directory where to read sorting file from')
parser.add_argument(
'--output_dir', required=True, help='directory where to save results')
# Enter all arguments that you want to be in the filename of the saved output
ordered_args = [
'dataset', 'no_dropout', 'seed', 'sorting_file', 'remove_n',
'keep_lowest_n'
]
# Parse arguments and setup name of output file with forgetting stats
args = parser.parse_args()
args_dict = vars(args)
print(args_dict)
save_fname = '__'.join(
'{}_{}'.format(arg, args_dict[arg]) for arg in ordered_args)
# Set appropriate devices
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
# Set random seed for initialization
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
npr.seed(args.seed)
# Setup transforms
all_transforms = [
transforms.ToTensor(),
transforms.Normalize((0.1307, ), (0.3081, ))
]
if args.dataset == 'permuted_mnist':
pixel_permutation = torch.randperm(28 * 28)
all_transforms.append(
transforms.Lambda(
lambda x: x.view(-1, 1)[pixel_permutation].view(1, 28, 28)))
transform = transforms.Compose(all_transforms)
os.makedirs(args.output_dir, exist_ok=True)
# Load the appropriate train and test datasets
trainset = datasets.MNIST(
root='/tmp/data', train=True, download=True, transform=transform)
testset = datasets.MNIST(
root='/tmp/data', train=False, download=True, transform=transform)
# Get indices of examples that should be used for training
if args.sorting_file == 'none':
train_indx = np.array(range(len(trainset.train_labels)))
else:
try:
with open(
os.path.join(args.input_dir, args.sorting_file) + '.pkl',
'rb') as fin:
ordered_indx = pickle.load(fin)['indices']
except IOError:
with open(os.path.join(args.input_dir, args.sorting_file),
'rb') as fin:
ordered_indx = pickle.load(fin)['indices']
# Get the indices to remove from training
elements_to_remove = np.array(
ordered_indx)[args.keep_lowest_n:args.keep_lowest_n + args.remove_n]
# Remove the corresponding elements
train_indx = np.setdiff1d(
range(len(trainset.train_labels)), elements_to_remove)
# Remove remove_n number of examples from the train set at random
if args.keep_lowest_n < 0:
train_indx = npr.permutation(np.arange(len(
trainset.train_labels)))[:len(trainset.train_labels) - args.remove_n]
# Reassign train data and labels
trainset.train_data = trainset.train_data[train_indx, :, :]
trainset.train_labels = np.array(trainset.train_labels)[train_indx].tolist()
print('Training on ' + str(len(trainset.train_labels)) + ' examples')
# Setup model and optimizer
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
# Setup loss
criterion = nn.CrossEntropyLoss()
criterion.__init__(reduce=False)
# Initialize dictionary to save statistics for every example presentation
example_stats = {}
elapsed_time = 0
for epoch in range(args.epochs):
start_time = time.time()
train(args, model, device, trainset, optimizer, epoch, example_stats)
test(args, model, device, testset, example_stats)
epoch_time = time.time() - start_time
elapsed_time += epoch_time
print('| Elapsed time : %d:%02d:%02d' % (get_hms(elapsed_time)))
# Save the stats dictionary
fname = os.path.join(args.output_dir, save_fname)
with open(fname + "__stats_dict.pkl", "wb") as f:
pickle.dump(example_stats, f)
# Log the best train and test accuracy so far
with open(fname + "__best_acc.txt", "w") as f:
f.write('train test \n')
f.write(str(max(example_stats['train'][1])))
f.write(' ')
f.write(str(max(example_stats['test'][1])))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.