max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tests/test_templatetags.py | adeweb-be/django-filebrowser-tinyMCEv5 | 107 | 12626342 | # coding: utf-8
from django.test import TestCase
from django.http import QueryDict
from filebrowser.templatetags.fb_tags import get_file_extensions
class GetFileExtensionsTemplateTagTests(TestCase):
def test_get_all(self):
self.assertEqual(
sorted(eval(get_file_extensions(''))),
sorted([
'.pdf', '.doc', '.rtf', '.txt', '.xls', '.csv', '.docx', '.mov',
'.wmv', '.mpeg', '.mpg', '.avi', '.rm', '.jpg', '.jpeg', '.gif', '.png',
'.tif', '.tiff', '.mp3', '.mp4', '.wav', '.aiff', '.midi', '.m4p', '.m4v', '.webm'
]))
def test_get_filtered(self):
self.assertEqual(
get_file_extensions(QueryDict('type=image')),
"['.jpg', '.jpeg', '.gif', '.png', '.tif', '.tiff']"
)
|
ctools/worker/learner/learner_hook.py | XinyuJing/DI-star | 267 | 12626343 | import numbers
import os
from abc import ABC, abstractmethod
from typing import Any, Dict
import torch
from easydict import EasyDict
from ctools.utils import allreduce
class Hook(ABC):
"""
Overview:
Abstract class for hooks
Interfaces:
__init__
Property:
name, priority
"""
def __init__(self, name: str, priority: float, **kwargs) -> None:
"""
Overview:
super.init method for hooks, set name and priority
Arguments:
- name (:obj:`str`): the name of hook
- priority (:obj:`float`): the priority in call_hook, lower value means higher priority
"""
self._name = name
assert priority >= 0, "invalid priority value: {}".format(priority)
self._priority = priority
@property
def name(self) -> str:
return self._name
@property
def priority(self) -> float:
return self._priority
@abstractmethod
def __call__(self, engine: Any) -> Any:
"""
Overview:
Should be overwritten by subclass.
Arguments:
- engine (:obj:`Any`): For LearnerHook, it should be BaseLearner.
"""
raise NotImplementedError
class LearnerHook(Hook):
"""
Overview:
Abstract class for hooks used in Learner. (``self.__call__`` should be implemented by subclass)
Interfaces:
__init__
Property:
name, priority, position
"""
positions = ['before_run', 'after_run', 'before_iter', 'after_iter']
def __init__(self, *args, position: str, **kwargs) -> None:
"""
Overview:
init LearnerHook
Arguments:
- position (:obj:`str`): the position to call hook in learner,\
must be in ['before_run', 'after_run', 'before_iter', 'after_iter']
"""
super().__init__(*args, **kwargs)
assert position in self.positions
self._position = position
@property
def position(self) -> str:
return self._position
class LrSchedulerHook(LearnerHook):
"""
Overview:
Hook used to set LrScheduler in learner
Interfaces:
__init__, __call__
Property:
name, priority, position
"""
def __init__(self, *args, ext_args: EasyDict = EasyDict(), **kwargs) -> None:
"""
Overview:
init LrSchedulerHook
Arguments:
- ext_args (:obj:`EasyDict`): extended_args, use ext_args.freq to set lr_freq
"""
super().__init__(*args, **kwargs)
if ext_args == {}:
self._freq = 1
else:
self._freq = ext_args.freq
def __call__(self, engine: 'BaseLearner') -> None: # noqa
"""
Overview:
step the lr_scheduler to get new learning rate in learner
Arguments:
- engine (:obj:`BaseLearner`): the BaseLearner to use lr_scheduler
"""
if engine.last_iter.val % self._freq == 0:
engine.lr_scheduler.step()
# for the normal case that all the parameters have the same lr
engine.log_buffer['cur_lr'] = engine.lr_scheduler.get_lr()[0]
class LoadCkptHook(LearnerHook):
"""
Overview:
Hook to load checkpoint
Interfaces:
__init__, __call__
Property:
name, priority, position
"""
def __init__(self, *args, ext_args: EasyDict = EasyDict(), **kwargs) -> None:
"""
Overview:
init LoadCkptHook
Arguments:
- ext_args (:obj:`EasyDict`): extended_args, use ext_args.freq to set load_ckpt_freq
"""
super().__init__(*args, **kwargs)
def __call__(self, engine: 'BaseLearner') -> None: # noqa
"""
Overview:
Load check point
Arguments:
- engine (:obj:`BaseLearner`): the BaseLearner to load checkpoint to
"""
path = engine.load_path
if path == '': # not load
return
engine.checkpoint_manager.load(
path,
model=engine.agent.model,
optimizer=engine.optimizer,
last_iter=engine.last_iter,
logger_prefix='({})'.format(engine.name),
)
engine.info('{} load ckpt in {}'.format(engine.name, path))
class SaveCkptHook(LearnerHook):
"""
Overview:
Hook to save checkpoint
Interfaces:
__init__, __call__
Property:
name, priority, position
"""
def __init__(self, *args, ext_args: EasyDict = EasyDict(), **kwargs) -> None:
"""
Overview:
init SaveCkptHook
Arguments:
- ext_args (:obj:`EasyDict`): extended_args, use ext_args.freq to set save_ckpt_freq
"""
super().__init__(*args, **kwargs)
if ext_args == {}:
self._freq = 1
else:
self._freq = ext_args.freq
def __call__(self, engine: 'BaseLearner') -> None: # noqa
"""
Overview:
Save check point in corresponding path, using ``engine.checkpoint_manager``
Arguments:
- engine (:obj:`BaseLearner`): the BaseLearner which needs to save checkpoint
"""
if engine.rank == 0 and engine.last_iter.val % self._freq == 0:
dirname = os.path.join(engine.save_path, 'ckpt')
if not os.path.exists(dirname):
try:
os.mkdir(dirname)
except FileExistsError:
pass
path = os.path.join(dirname, 'iteration_{}.pth.tar'.format(engine.last_iter.val))
engine.checkpoint_manager.save(
path,
model=engine.agent.model,
optimizer=engine.optimizer,
last_iter=engine.last_iter,
)
engine.last_ckpt_path = path
engine.info('{} save ckpt in {}'.format(engine.name, path))
class LogShowHook(LearnerHook):
"""
Overview:
Hook to show log
Interfaces:
__init__, __call__
Property:
name, priority, position
"""
def __init__(self, *args, ext_args: EasyDict = EasyDict(), **kwargs) -> None:
"""
Overview:
init LogShowHook
Arguments:
- ext_args (:obj:`EasyDict`): extended_args, use ext_args.freq to set freq
"""
super().__init__(*args, **kwargs)
if ext_args == {}:
self._freq = 1
else:
self._freq = ext_args.freq
def __call__(self, engine: 'BaseLearner') -> None: # noqa
"""
Overview:
Show log, update record and tb_logger if rank is 0 and at interval iterations,
clear the log buffer for all learners regardless of rank
Arguments:
- engine (:obj:`BaseLearner`): the BaseLearner
"""
if engine.rank != 0: # only show log at rank 0
engine.log_buffer.clear() # reset log buffer
return
engine.record.update_var(engine.log_buffer)
engine.log_buffer.clear()
iters = engine.last_iter.val
frames = int(iters * engine._world_size * engine._cfg.learner.data.batch_size * engine._cfg.learner.unroll_len)
if iters % self._freq == 0:
engine.info("=== Training Iteration {} Result ===".format(iters))
engine.info(engine.record.get_star_text())
tb_keys = engine.tb_logger.scalar_var_names
engine.tb_logger.add_val_list(
engine.record.get_vars_tb_format(tb_keys, frames, var_type='scalar'), viz_type='scalar'
)
class LogReduceHook(LearnerHook):
"""
Overview:
Hook to reduce the distributed logs
Interfaces:
__init__, __call__
Property:
name, priority, position
"""
def __init__(self, *args, ext_args: EasyDict = EasyDict(), **kwargs) -> None:
"""
Overview:
init LogReduceHook
Arguments:
- ext_args (:obj:`EasyDict`): extended_args, use ext_args.freq to set log_reduce_freq
"""
super().__init__(*args, **kwargs)
def __call__(self, engine: 'BaseLearner') -> None: # noqa
"""
Overview:
reduce the logs from distributed learners
Arguments:
- engine (:obj:`BaseLearner`): the BaseLearner
"""
assert engine.use_distributed
def aggregate(data):
r"""
Overview:
aggregate the information from all ranks(usually use sync allreduce)
Arguments:
- data (:obj:`dict`): data needs to be reduced.\
Could be dict, torch.Tensor, numbers.Integral or numbers.Real.
Returns:
- new_data (:obj:`dict`): data after reduce
"""
if isinstance(data, dict):
new_data = {k: aggregate(v) for k, v in data.items()}
elif isinstance(data, list) or isinstance(data, tuple):
new_data = [aggregate(t) for t in data]
elif isinstance(data, torch.Tensor):
new_data = data.clone().detach()
allreduce(new_data) # get data from other processes
elif isinstance(data, numbers.Integral) or isinstance(data, numbers.Real):
new_data = torch.scalar_tensor(data).reshape([1])
allreduce(new_data)
new_data = new_data.item()
else:
raise TypeError("invalid type in reduce: {}".format(type(data)))
return new_data
engine.log_buffer = aggregate(engine.log_buffer)
hook_mapping = {
'lr_scheduler': LrSchedulerHook,
'load_ckpt': LoadCkptHook,
'save_ckpt': SaveCkptHook,
'log_show': LogShowHook,
'log_reduce': LogReduceHook,
}
def register_learner_hook(name: str, hook_type: type) -> None:
"""
Overview:
Add a new LearnerHook class to hook_mapping, so you can build one instance with `build_learner_hook_by_cfg`.
You can reference
<https://gitlab.bj.sensetime.com/open-XLab/cell/ctools/blob/master/ctools/worker/learner/tests/test_base_learner.py#L81>
or see Example below
Arguments:
- name (:obj:`str`): name of the register hook
- hook_type (:obj:`type`): the register hook_type you implemented that realize LearnerHook
Examples:
>>> class HookToRegister(LearnerHook):
>>> def __init__(*args, **kargs):
>>> ...
>>> ...
>>> def __call__(*args, **kargs):
>>> ...
>>> ...
>>> ...
>>> register_learner_hook('name_of_hook', HookToRegister)
>>> ...
>>> hooks = build_learner_hook_by_cfg(cfg)
"""
assert issubclass(hook_type, LearnerHook)
hook_mapping[name] = hook_type
def build_learner_hook_by_cfg(cfg: EasyDict) -> dict:
"""
Overview:
Build the learner hooks in hook_mapping by config.
This function is often used to initialize `hooks` according to cfg,
while add_learner_hook() is often used to add an existing LearnerHook to `hooks`.
Arguments:
- cfg (:obj:`EasyDict`): the config dict wrapped by EasyDict, should be {'hook': [xxx, xxx]}
Returns:
- hooks (:obj:`dict`): key should be in ['before_run', 'after_run', 'before_iter', 'after_iter'],\
value should be a list containing all hooks in this position.
Note:
lower value means higher priority
"""
hooks = {k: [] for k in LearnerHook.positions}
for item in cfg.values():
priority = item.get('priority', 100)
pos = item.position
idx = 0
for i in reversed(range(len(hooks[pos]))):
if priority >= hooks[pos][i].priority:
idx = i + 1
break
ext_args = item.get('ext_args', {})
hook = hook_mapping[item.type](item.name, priority, position=pos, ext_args=ext_args)
hooks[pos].insert(idx, hook)
return hooks
def add_learner_hook(hooks: dict, hook: LearnerHook) -> None:
"""
Overview:
add a learner hook to hooks
Arguments:
- hooks (:obj:`dict`): you can reference build_learner_hook_by_cfg()'s return `hooks`.
- hook (:obj:`LearnerHook`): the LearnerHook which will be added to `hooks`
"""
position = hook.position
priority = hook.priority
idx = 0
for i in reversed(range(len(hooks[position]))):
if priority >= hooks[position][i].priority:
idx = i + 1
break
assert isinstance(hook, LearnerHook)
hooks[position].insert(idx, hook)
def merge_hooks(hooks1: Dict[str, list], hooks2: Dict[str, list]) -> Dict[str, list]:
"""
Overview:
merge two hooks, which has the same keys, each value is sorted by hook priority with stable method
Arguments:
- hooks1 (:obj:`dict`): hooks1 to be merged
- hooks2 (:obj:`dict`): hooks2 to be merged
Returns:
- new_hooks (:obj:`dict`): merged new hooks
.. note::
This merge function uses stable sort method without disturbing the same priority hook
"""
assert set(hooks1.keys()) == set(hooks2.keys())
new_hooks = {}
for k in hooks1.keys():
new_hooks[k] = sorted(hooks1[k] + hooks2[k], key=lambda x: x.priority)
return new_hooks
|
python/rikai/parquet/shuffler.py | chunyang/rikai | 111 | 12626383 | <filename>python/rikai/parquet/shuffler.py
# Copyright 2021 Rikai Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from typing import Generic, Optional, TypeVar
__all__ = ["RandomShuffler"]
Elem = TypeVar("Elem")
class RandomShuffler(Generic[Elem]):
"""Reservoir sampling-based shuffler to provide randomized access over elements.
:py:class:`RandomShuffler` maintains an internal buffer, and uses `reservoir sampling`_
to offer randomness with uniform distribution. The buffer ``capacity`` does
not affect the possibility distribution.
Parameters
----------
capacity : int, optional
The capacity of the internal random access buffer. Note that setting this value to
``1`` or ``0`` makes this :py:class:`RandomShuffler` to a FIFO queue. Default
value: ``32``.
seed : int, optional
Random seed.
Example
-------
.. code-block:: python
def __iter__(self):
\"\"\"Provide random access over a Stream\"\"\"
shuffler = RandomShuffler(capacity=128)
for elem in stream:
shuffler.append(elem)
# Approximately maintain the shuffler at its capacity.
while shuffler.full():
yield shuffler.pop()
while shuffler:
yield shuffler.pop()
Notes
-----
- Set ``capacity`` to ``1`` or ``0``, makes :py:class:`RandomShuffler` a FIFO queue.
- This class is not thread-safe.
References
----------
- `Reservoir Sampling`_
- Petastorm `Shuffling Buffer <https://github.com/uber/petastorm/blob/master/petastorm/reader_impl/shuffling_buffer.py>`_
.. _Reservoir Sampling: https://en.wikipedia.org/wiki/Reservoir_sampling
""" # noqa
DEFAULT_CAPACITY = 32
def __init__(
self, capacity: int = DEFAULT_CAPACITY, seed: Optional[int] = None
):
"""Construct a :py:class:`RandomShuffler`"""
self.capacity = capacity
self.seed = seed
self.buffer = []
random.seed(self.seed)
def __repr__(self) -> str:
return "RandomShuffler(capacity={})".format(self.capacity)
def __len__(self) -> int:
"""Returns the number of elements in the shuffler."""
return len(self.buffer)
def __bool__(self) -> bool:
"""Return True if this shuffler is not empty."""
return len(self.buffer) > 0
def full(self) -> bool:
"""Return True if this shuffler reaches to its capacity."""
return len(self) >= self.capacity
def append(self, elem: Elem):
"""Append a new element to the shuffler"""
self.buffer.append(elem)
def pop(self) -> Elem:
"""Pop out one random element from the shuffler.
Raises
------
IndexError
If the internal buffer is empty.
"""
if len(self.buffer) == 0:
raise IndexError("Buffer is empty")
idx = random.randrange(len(self.buffer))
item = self.buffer[idx]
self.buffer[idx] = self.buffer[-1]
self.buffer.pop()
return item
|
tests/ssz/test_to_dict.py | muta6150/beacon_chain | 217 | 12626406 | <reponame>muta6150/beacon_chain<filename>tests/ssz/test_to_dict.py
import pytest
from ssz.ssz import (
to_dict
)
@pytest.mark.parametrize(
'value, result',
[
({}, {}),
({'a': 1, 'b': 2}, {'a': 1, 'b': 2}),
([], []),
([{'a': 1}, {'b': 2}], [{'a': 1}, {'b': 2}])
]
)
def test_to_dict(value, result):
assert to_dict(value) == result
@pytest.mark.parametrize(
'field_data',
[
[],
[('a', 'int64', 1), ('b', 'hash32', b'two')]
]
)
def test_object_to_dict(field_data):
class foo:
fields = {name: typ for name, typ, _ in field_data}
defaults = {name: value for name, _, value in field_data}
o = foo()
for name, _, value in field_data:
setattr(o, name, value)
assert to_dict(o) == {name: value for name, _, value in field_data}
|
tello_driver/launch/emulators_launch.py | crisdeodates/DJI-Tello_ros2_cpp | 114 | 12626442 | from launch import LaunchDescription
from launch.actions import ExecuteProcess
from launch_ros.actions import Node
# Launch two emulators and two drivers for testing
def generate_launch_description():
emulator_path = 'install/tello_driver/lib/tello_driver/tello_emulator'
localhost = '127.0.0.1'
dr1_cmd_port = 11001
dr2_cmd_port = 11002
em1_port = 12001
em2_port = 12002
dr1_data_port = 13001
dr2_data_port = 13002
dr1_video_port = 14001
dr2_video_port = 14002
dr1_params = [{
'drone_ip': localhost,
'drone_port': em1_port,
'command_port': dr1_cmd_port,
'data_port': dr1_data_port,
'video_port': dr1_video_port
}]
dr2_params = [{
'drone_ip': localhost,
'drone_port': em2_port,
'command_port': dr2_cmd_port,
'data_port': dr2_data_port,
'video_port': dr2_video_port
}]
return LaunchDescription([
ExecuteProcess(cmd=[emulator_path, 'em1', str(em1_port), str(dr1_data_port), str(dr1_video_port)],
output='screen'),
ExecuteProcess(cmd=[emulator_path, 'em2', str(em2_port), str(dr2_data_port), str(dr2_video_port)],
output='screen'),
Node(package='tello_driver', executable='tello_driver_main', node_name='dr1', namespace='dr1',
parameters=dr1_params, output='screen'),
Node(package='tello_driver', executable='tello_driver_main', node_name='dr2', namespace='dr2',
parameters=dr2_params, output='screen'),
])
|
content/public/PRESUBMIT.py | sarang-apps/darshan_browser | 575 | 12626449 | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Content public presubmit script
See https://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
def _CheckConstInterfaces(input_api, output_api):
# Matches 'virtual...const = 0;', 'virtual...const;' or 'virtual...const {}'
pattern = input_api.re.compile(r'virtual[^;]*const\s*(=\s*0)?\s*({}|;)',
input_api.re.MULTILINE)
files = []
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if not f.LocalPath().endswith('.h'):
continue
contents = input_api.ReadFile(f)
if pattern.search(contents):
files.append(f)
if len(files):
return [output_api.PresubmitError(
'Do not add const to content/public '
'interfaces. See '
'https://www.chromium.org/developers/content-module/content-api',
files) ]
return []
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CheckConstInterfaces(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CheckConstInterfaces(input_api, output_api))
return results
|
src/losses/multi/__init__.py | xiongzhiyao/pytorch-segmentation | 359 | 12626469 | <gh_stars>100-1000
import torch.nn as nn
from .focal_loss import FocalLoss
from .lovasz_loss import LovaszSoftmax
from .ohem_loss import OhemCrossEntropy2d
from .softiou_loss import SoftIoULoss
class MultiClassCriterion(nn.Module):
def __init__(self, loss_type='CrossEntropy', **kwargs):
super().__init__()
if loss_type == 'CrossEntropy':
self.criterion = nn.CrossEntropyLoss(**kwargs)
elif loss_type == 'Focal':
self.criterion = FocalLoss(**kwargs)
elif loss_type == 'Lovasz':
self.criterion = LovaszSoftmax(**kwargs)
elif loss_type == 'OhemCrossEntropy':
self.criterion = OhemCrossEntropy2d(**kwargs)
elif loss_type == 'SoftIOU':
self.criterion = SoftIoULoss(**kwargs)
else:
raise NotImplementedError
def forward(self, preds, labels):
loss = self.criterion(preds, labels)
return loss
|
rclpy/test/test_validate_namespace.py | RoboStack/rclpy | 121 | 12626520 | # Copyright 2017 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from rclpy.exceptions import InvalidNamespaceException
from rclpy.validate_namespace import validate_namespace
class TestValidateNamespace(unittest.TestCase):
def test_validate_namespace(self):
tests = [
'/my_ns',
'/',
]
for topic in tests:
# Will raise if invalid
validate_namespace(topic)
def test_validate_namespace_failures(self):
# namespace must not be empty
with self.assertRaisesRegex(InvalidNamespaceException, 'empty'):
validate_namespace('')
# namespace must start with /
with self.assertRaisesRegex(InvalidNamespaceException, 'must be absolute'):
validate_namespace('invalid_namespace')
if __name__ == '__main__':
unittest.main()
|
numba/core/unsafe/nrt.py | auderson/numba | 6,620 | 12626524 | <gh_stars>1000+
"""
Contains unsafe intrinsic that calls NRT C API
"""
from numba.core import types
from numba.core.typing import signature
from numba.core.extending import intrinsic
@intrinsic
def NRT_get_api(tyctx):
"""NRT_get_api()
Calls NRT_get_api() from the NRT C API
Returns LLVM Type i8* (void pointer)
"""
def codegen(cgctx, builder, sig, args):
return cgctx.nrt.get_nrt_api(builder)
sig = signature(types.voidptr)
return sig, codegen
|
tests/python_print_width/print_width.py | hixio-mh/plugin-python | 362 | 12626534 | def foo():
return "line_with_79_chars_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
return "line_with_80_chars_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
|
image_synthesis/data/imagenet_dataset.py | buxiangzhiren/VQ-Diffusion_office | 236 | 12626577 | from torch.utils.data import Dataset
import numpy as np
import io
from PIL import Image
import os
import json
import random
from image_synthesis.utils.misc import instantiate_from_config
def load_img(filepath):
img = Image.open(filepath).convert('RGB')
return img
class ImageNetDataset(Dataset):
def __init__(self, data_root, input_file, phase = 'train', im_preprocessor_config=None):
self.transform = instantiate_from_config(im_preprocessor_config)
self.root = os.path.join(data_root, phase)
input_file = os.path.join(data_root, input_file)
temp_label = json.load(open('image_synthesis/data/imagenet_class_index.json', 'r'))
self.labels = {}
for i in range(1000):
self.labels[temp_label[str(i)][0]] = i
self.A_paths = []
self.A_labels = []
with open(input_file, 'r') as f:
temp_path = f.readlines()
for path in temp_path:
label = self.labels[path.split('/')[0]]
self.A_paths.append(os.path.join(self.root, path.strip()))
self.A_labels.append(label)
self.num = len(self.A_paths)
self.A_size = len(self.A_paths)
def __len__(self):
return self.num
def __getitem__(self, index):
try:
return self.load_img(index)
except:
return self.__getitem__(random.randint(0, self.__len__()-1))
def load_img(self, index):
A_path = self.A_paths[index % self.A_size]
A = load_img(A_path)
# if self.transform is not None:
A = self.transform(A)['image']
A_label = self.A_labels[index % self.A_size]
data = {
'image': np.transpose(A.astype(np.float32), (2, 0, 1)),
'label': A_label,
}
return data
|
examples/basic_workflow_parallel.py | densmirn/sdc | 540 | 12626581 | <filename>examples/basic_workflow_parallel.py
# *****************************************************************************
# Copyright (c) 2019-2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import pandas as pd
from numba import njit, prange
# Dataset for analysis
FNAME = "employees.csv"
# This function gets compiled by Numba* and multi-threaded
@njit(parallel=True)
def get_analyzed_data():
df = pd.read_csv(FNAME)
s_bonus = pd.Series(df['Bonus %'])
s_first_name = pd.Series(df['First Name'])
# Use explicit loop to compute the mean. It will be compiled as parallel loop
m = 0.0
for i in prange(s_bonus.size):
m += s_bonus.values[i]
m /= s_bonus.size
names = s_first_name.sort_values()
return m, names
# Printing names and their average bonus percent
mean_bonus, sorted_first_names = get_analyzed_data()
print(sorted_first_names)
print('Average Bonus %:', mean_bonus)
|
PythonMiddleware/extensions.py | chandlerette/Python-Middleware | 101 | 12626595 | <reponame>chandlerette/Python-Middleware
def getExtensionObjectFromString(strExtension):
try:
assetID,tempData=strExtension.split("$")
itemVER, tempData=tempData.split("@")
itemID,tempData=tempData.split(";")
return extensions(assetID,itemVER,itemID,tempData)
except: return None
class extensions:
def __init__(self, assetID, itemVER, itemID, data):
if assetID and itemVER and itemID:
self.assetID = assetID
self.itemVER = itemVER
self.itemID=itemID
self.data= "%s$%s@%s;%s" % (self.assetID,self.itemVER,self.itemID, data)
def string(self):
return self.data
def compareWithId(self,itemid):
try:
if(self.itemID==itemid):
return True
else:
return False
except:
return False
def compareWithVER(self,ver):
try:
if(self.itemVER==ver):
return True
except:
return False
|
setup.py | tombry/virlutils | 133 | 12626619 | <filename>setup.py
# coding: utf-8
from setuptools import setup, find_packages # noqa: H301
from virl import __version__
NAME = "virlutils"
CMLNAME = "cmlutils"
VERSION = __version__
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
def requirements(f):
with open(f, "r") as fd:
return fd.read()
def readme():
with open("README.md", "r") as f:
return f.read()
setup(
name=NAME,
version=VERSION,
description="A collection of utilities for interacting with Cisco VIRL/CML",
author="<NAME>", # With a big thanks to its original author, <NAME>
author_email="<EMAIL>",
url="https://github.com/CiscoDevNet/virlutils",
entry_points={"console_scripts": ["virl=virl.cli.main:virl", "cml=virl.cli.main:virl"]},
packages=find_packages(),
package_data={"virl": ["templates/**/*.j2", "swagger/templates/*", "swagger/static/*", "examples/plugins/*"]},
include_package_data=True,
install_requires=requirements("requirements.txt"),
long_description_content_type="text/markdown",
long_description=readme(),
test_suite="tests",
tests_require=requirements("test-requirements.txt"),
zip_safe=False,
python_requires=">=3.6",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
atlas/foundations_rest_api/src/foundations_rest_api/config/configs.py | DeepLearnI/atlas | 296 | 12626629 | <gh_stars>100-1000
import yaml
import os
import pathlib
ATLAS = yaml.load(open(os.getenv('AUTH_CLIENT_CONFIG_PATH', pathlib.Path(os.path.abspath(__file__)).parent / 'auth_client_config.yaml')))
|
tests/nnet/activations/test_sigmoid.py | Zac-HD/MyGrad | 147 | 12626640 | import sys
import numpy as np
from mygrad.nnet.activations import sigmoid
from tests.wrappers.uber import backprop_test_factory, fwdprop_test_factory
@fwdprop_test_factory(
mygrad_func=sigmoid,
true_func=lambda x: 1 / (1 + np.exp(-x)),
num_arrays=1,
index_to_bnds={0: (-np.log(sys.float_info.max), None)},
)
def test_sigmoid_fwd():
pass
@backprop_test_factory(
mygrad_func=sigmoid,
true_func=lambda x: 1 / (1 + np.exp(-x)),
num_arrays=1,
index_to_bnds={0: (-np.log(sys.float_info.max), None)},
)
def test_sigmoid_bkwd():
pass
|
docker/s3tests/test_bucket_policy.py | yuchen-sun/chubaofs | 2,498 | 12626648 | <gh_stars>1000+
# Copyright 2020 The ChubaoFS Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
# -*- coding: utf-8 -*-
import env
import json
from base import S3TestCase, get_env_s3_client
POLICY = '{' \
'"Version": "2012-10-17", ' \
'"Statement": [{ ' \
'"Sid": "id-1",' \
'"Effect": "Allow",' \
'"Principal": {"AWS": ["arn:aws:iam::123456789012:root"]}, ' \
'"Action": ["s3:PutObject"], ' \
'"Resource": ["arn:aws:s3:::acl3/*" ] ' \
'}]}'
class PolicyTest(S3TestCase):
s3 = None
def __init__(self, case):
super(PolicyTest, self).__init__(case)
self.s3 = get_env_s3_client()
def test_policy_set(self):
# Get bucket policy configuration
self.assert_get_bucket_policy_result(
result=self.s3.get_bucket_policy(Bucket=env.BUCKET), policy=None)
# Put bucket policy configuration
self.assert_result_status_code(
result=self.s3.put_bucket_policy(Bucket=env.BUCKET, Policy=POLICY))
# Get bucket policy configuration
self.assert_get_bucket_policy_result(
result=self.s3.get_bucket_policy(Bucket=env.BUCKET), policy=json.loads(POLICY))
# Delete bucket policy configuration
self.assert_result_status_code(
result=self.s3.delete_bucket_policy(Bucket=env.BUCKET), status_code=204)
# Get bucket policy configuration
self.assert_get_bucket_policy_result(
result=self.s3.get_bucket_policy(Bucket=env.BUCKET), policy=None)
|
vendor/gx/ipfs/QmYkNhwAviNzN974MB3koxuBRhtbvCotnuQcugrPF96BPp/client_model/setup.py | u5surf/go-livepeer | 4,695 | 12626657 | <filename>vendor/gx/ipfs/QmYkNhwAviNzN974MB3koxuBRhtbvCotnuQcugrPF96BPp/client_model/setup.py
#!/usr/bin/python
from setuptools import setup
setup(
name = 'prometheus_client_model',
version = '0.0.1',
author = '<NAME>',
author_email = '<EMAIL>',
description = 'Data model artifacts for the Prometheus client.',
license = 'Apache License 2.0',
url = 'http://github.com/prometheus/client_model',
packages = ['prometheus', 'prometheus/client', 'prometheus/client/model'],
package_dir = {'': 'python'},
requires = ['protobuf(==2.4.1)'],
platforms = 'Platform Independent',
classifiers = ['Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Testing',
'Topic :: System :: Monitoring'])
|
experiments/faster_rcnn/rcnn_dota_e2e.py | Amberrferr/Faster_RCNN_for_DOTA | 344 | 12626678 | # --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Modified by <NAME>
# --------------------------------------------------------
import os
import sys
os.environ['PYTHONUNBUFFERED'] = '1'
os.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = '0'
os.environ['MXNET_ENABLE_GPU_P2P'] = '0'
#os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'
this_dir = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(this_dir, '..', '..', 'faster_rcnn'))
import train_end2end
import test_dota
if __name__ == "__main__":
# train_end2end.main()
train_end2end.main()
test_dota.main()
|
venv/lib/python3.8/site-packages/statsmodels/nonparametric/api.py | johncollinsai/post-high-frequency-data | 6,931 | 12626685 | __all__ = [
"KDEUnivariate",
"KDEMultivariate", "KDEMultivariateConditional", "EstimatorSettings",
"KernelReg", "KernelCensoredReg",
"lowess", "bandwidths",
"pdf_kernel_asym", "cdf_kernel_asym"
]
from .kde import KDEUnivariate
from .smoothers_lowess import lowess
from . import bandwidths
from .kernel_density import \
KDEMultivariate, KDEMultivariateConditional, EstimatorSettings
from .kernel_regression import KernelReg, KernelCensoredReg
from .kernels_asymmetric import pdf_kernel_asym, cdf_kernel_asym
|
src/pythae/models/adversarial_ae/__init__.py | clementchadebec/benchmark_VAE | 143 | 12626687 | <gh_stars>100-1000
"""Implementation of an Adversarial Autoencoder model as proposed in
(https://arxiv.org/abs/1511.05644). This model tries to make the posterior distribution match
the prior using adversarial training.
Available samplers
-------------------
.. autosummary::
~pythae.samplers.NormalSampler
~pythae.samplers.GaussianMixtureSampler
~pythae.samplers.TwoStageVAESampler
~pythae.samplers.MAFSampler
~pythae.samplers.IAFSampler
:nosignatures:
"""
from .adversarial_ae_config import Adversarial_AE_Config
from .adversarial_ae_model import Adversarial_AE
__all__ = ["Adversarial_AE", "Adversarial_AE_Config"]
|
gcp_variant_transforms/transforms/merge_header_definitions.py | tsa87/gcp-variant-transforms | 113 | 12626693 | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Beam combiner function for merging VCF file header definitions."""
from typing import Dict, List # pylint: disable=unused-import
import apache_beam as beam
from gcp_variant_transforms.beam_io import vcf_header_io # pylint: disable=unused-import
from gcp_variant_transforms.libs import vcf_header_definitions_merger
# An alias for the header key constants to make referencing easier.
_VcfHeaderDefinitions = vcf_header_definitions_merger.VcfHeaderDefinitions
class _MergeDefinitionsFn(beam.CombineFn):
"""Combiner function for merging definitions."""
def __init__(self, definitions_merger):
# type: (vcf_header_definitions_merger.DefinitionsMerger) -> None
super().__init__()
self._definitions_merger = definitions_merger
def create_accumulator(self):
return vcf_header_definitions_merger.VcfHeaderDefinitions()
def add_input(self,
source, # type: _VcfHeaderDefinitions
to_merge # type: vcf_header_io.VcfHeader
):
# type: (...) -> _VcfHeaderDefinitions
return self.merge_accumulators(
[source,
vcf_header_definitions_merger.VcfHeaderDefinitions(
vcf_header=to_merge)])
def merge_accumulators(self, accumulators):
# type: (List[_VcfHeaderDefinitions]) -> _VcfHeaderDefinitions
merged_definitions = self.create_accumulator()
for to_merge in accumulators:
self._definitions_merger.merge(merged_definitions, to_merge)
return merged_definitions
def extract_output(self, merged_definitions):
# type: (_VcfHeaderDefinitions) -> _VcfHeaderDefinitions
return merged_definitions
class MergeDefinitions(beam.PTransform):
"""A PTransform to merge header definitions.
Reads a PCollection of `VcfHeader` and produces a PCollection of
`VcfHeaderDefinitions`.
"""
def __init__(self):
"""Initializes `MergeDefinitions` object."""
super().__init__()
self._definitions_merger = vcf_header_definitions_merger.DefinitionsMerger()
def expand(self, pcoll):
return pcoll | beam.CombineGlobally(
_MergeDefinitionsFn(self._definitions_merger)).without_defaults()
|
pypeit/specobj.py | brackham/PypeIt | 107 | 12626695 | <reponame>brackham/PypeIt
"""
Module for the SpecObj classes
.. include common links, assuming primary doc root is up one directory
.. include:: ../include/links.rst
"""
import copy
import inspect
from IPython import embed
import numpy as np
from scipy import interpolate
from astropy import units
from linetools.spectra import xspectrum1d
from pypeit import msgs
from pypeit.core import flexure
from pypeit.core import parse
from pypeit.core import flux_calib
from pypeit.core.wavecal import wvutils
from pypeit import utils
from pypeit import datamodel
from pypeit.images import detector_container
def det_hdu_prefix(det):
return 'DET{:02d}-'.format(det)
class SpecObj(datamodel.DataContainer):
"""
Class to handle object spectra from a single exposure.
One generates one of these Objects for each spectrum in the exposure. They
are instantiated by the object finding routine, and then all spectral
extraction information for the object are assigned as attributes
Args:
pypeline (str): Name of the PypeIt pypeline method
Allowed options are: MultiSlit, Echelle, IFU
DET (int): Detector number
copy_dict (dict, optional): Used to set the entire internal dict of the object.
Only used in the copy() method so far.
objtype (str, optional)
Type of object ('unknown', 'standard', 'science')
slitid (int, optional):
Identifier for the slit (max=9999).
Multislit and IFU
specobj_dict (dict, optional):
Uswed in the objfind() method of extract.py to Instantiate
orderindx (int, optional):
Running index for the order
ech_order (int, optional):
Physical order number
Attributes:
See datamodel and _init_internals()
"""
version = '1.1.4'
hdu_prefix = None
datamodel = {'TRACE_SPAT': dict(otype=np.ndarray, atype=float,
descr='Object trace along the spec (spatial pixel)'),
'FWHM': dict(otype=float, descr='Spatial FWHM of the object (pixels)'),
'FWHMFIT': dict(otype=np.ndarray,
descr='Spatial FWHM across the detector (pixels)'),
'OPT_WAVE': dict(otype=np.ndarray, atype=float,
descr='Optimal Wavelengths in vacuum (Angstroms)'),
'OPT_FLAM': dict(otype=np.ndarray, atype=float,
descr='Optimal flux (1e-17 erg/s/cm^2/Ang)'),
'OPT_FLAM_SIG': dict(otype=np.ndarray, atype=float,
descr='Optimal flux uncertainty (1e-17 erg/s/cm^2/Ang)'),
'OPT_FLAM_IVAR': dict(otype=np.ndarray, atype=float,
descr='Optimal flux inverse variance (1e-17 erg/s/cm^2/Ang)^-2'),
'OPT_COUNTS': dict(otype=np.ndarray, atype=float, descr='Optimal flux (counts)'),
'OPT_COUNTS_IVAR': dict(otype=np.ndarray, atype=float,
descr='Inverse variance of optimally extracted flux '
'using modelivar image (counts^2)'),
'OPT_COUNTS_SIG': dict(otype=np.ndarray, atype=float,
descr='Optimally extracted noise from IVAR (counts)'),
'OPT_COUNTS_NIVAR': dict(otype=np.ndarray, atype=float,
descr='Optimally extracted noise variance, sky+read '
'noise only (counts^2)'),
'OPT_MASK': dict(otype=np.ndarray, atype=np.bool_,
descr='Mask for optimally extracted flux. True=good'),
'OPT_COUNTS_SKY': dict(otype=np.ndarray, atype=float,
descr='Optimally extracted sky (counts)'),
'OPT_COUNTS_SIG_DET': dict(otype=np.ndarray, atype=float,
descr='Optimally extracted detector noise (counts)'),
'OPT_FRAC_USE': dict(otype=np.ndarray, atype=float,
descr='Fraction of pixels in the object profile subimage '
'used for this extraction'),
'OPT_CHI2': dict(otype=np.ndarray, atype=float,
descr='Reduced chi2 of the model fit for this spectral pixel'),
# TODO -- Confirm BOX_NPIX should be a float and not int!
'BOX_NPIX': dict(otype=np.ndarray, atype=float,
descr='Number of pixels used for the boxcar extraction; can be '
'fractional'),
'BOX_WAVE': dict(otype=np.ndarray, atype=float,
descr='Boxcar Wavelengths in vacuum (Angstroms)'),
'BOX_FLAM': dict(otype=np.ndarray, atype=float,
descr='Boxcar flux (erg/s/cm^2/Ang)'),
'BOX_FLAM_SIG': dict(otype=np.ndarray, atype=float,
descr='Boxcar flux uncertainty (1e-17 erg/s/cm^2/Ang)'),
'BOX_FLAM_IVAR': dict(otype=np.ndarray, atype=float,
descr='Boxcar flux inverse variance (1e-17 erg/s/cm^2/Ang)^-2'),
'BOX_COUNTS': dict(otype=np.ndarray, atype=float, descr='Boxcar flux (counts)'),
'BOX_COUNTS_IVAR': dict(otype=np.ndarray, atype=float,
descr='Inverse variance of optimally extracted flux '
'using modelivar image (counts^2)'),
'BOX_COUNTS_SIG': dict(otype=np.ndarray, atype=float,
descr='Boxcar extracted noise from IVAR (counts)'),
'BOX_COUNTS_NIVAR': dict(otype=np.ndarray, atype=float,
descr='Boxcar extracted noise variance, sky+read noise '
'only (counts^2)'),
'BOX_MASK': dict(otype=np.ndarray, atype=np.bool_,
descr='Mask for boxcar extracted flux. True=good'),
'BOX_COUNTS_SKY': dict(otype=np.ndarray, atype=float,
descr='Boxcar extracted sky (counts)'),
'BOX_COUNTS_SIG_DET': dict(otype=np.ndarray, atype=float,
descr='Boxcar extracted detector noise (counts)'),
'BOX_FRAC_USE': dict(otype=np.ndarray, atype=float,
descr='Fraction of pixels in the object profile subimage '
'used for this extraction'),
'BOX_CHI2': dict(otype=np.ndarray, atype=float,
descr='Reduced chi2 of the model fit for this spectral pixel'),
'BOX_RADIUS': dict(otype=float, descr='Size of boxcar radius (pixels)'),
#
'FLEX_SHIFT_GLOBAL': dict(otype=float, descr='Global shift of the spectrum to correct for spectral'
'flexure (pixels). This is based on the sky spectrum at'
'the center of the slit'),
'FLEX_SHIFT_LOCAL': dict(otype=float, descr='Local shift of the spectrum to correct for spectral'
'flexure (pixels). This should be a small correction to'
'the global value, and is based on the sky spectrum'
'extracted near the object'),
'FLEX_SHIFT_TOTAL': dict(otype=float, descr='Total shift of the spectrum to correct for spectral'
'flexure (pixels). This is the sum of the global and'
'local FLEX_SHIFT'),
'VEL_TYPE': dict(otype=str, descr='Type of heliocentric correction (if any)'),
'VEL_CORR': dict(otype=float,
descr='Relativistic velocity correction for wavelengths'),
# Detector
# TODO: Why are both det and detector attributes, isn't det in detector?
'DET': dict(otype=(int, np.integer), descr='Detector number'),
'DETECTOR': dict(otype=detector_container.DetectorContainer,
descr='Detector DataContainer'),
#
'PYPELINE': dict(otype=str, descr='Name of the PypeIt pipeline mode'),
'OBJTYPE': dict(otype=str, descr='PypeIt type of object (standard, science)'),
'SPAT_PIXPOS': dict(otype=(float, np.floating),
descr='Spatial location of the trace on detector (pixel) at half-way'),
'SPAT_FRACPOS': dict(otype=(float, np.floating),
descr='Fractional location of the object on the slit'),
'trace_spec': dict(otype=np.ndarray, atype=(int,np.integer),
descr='Array of pixels along the spectral direction'),
'maskwidth': dict(otype=(float, np.floating),
descr='Size (in units of fwhm) of the region used for local sky subtraction'),
# Slit and Object
'WAVE_RMS': dict(otype=(float, np.floating),
descr='RMS (pix) for the wavelength solution for this slit.'),
'SLITID': dict(otype=(int, np.integer),
descr='PypeIt slit ID (aka SPAT_ID).'),
'OBJID': dict(otype=(int, np.integer),
descr='Object ID for multislit data. Each object is given an index '
'for the slit it appears increasing from from left to right. '
'These are one based.'),
'NAME': dict(otype=str, descr='Name of the object following the naming model'),
'RA': dict(otype=float, descr='Right Ascension (J2000) decimal degree'),
'DEC': dict(otype=float, descr='Declination (J2000) decimal degree'),
'MASKDEF_ID': dict(otype=(int, np.integer), descr='Slitmask definition ID'),
'MASKDEF_OBJNAME': dict(otype=str, descr='Name of the object from the slitmask definition'),
'MASKDEF_EXTRACT': dict(otype=bool, descr='Boolean indicating if this is a forced extraction '
'at the expected location from slitmask design. '),
'hand_extract_flag': dict(otype=bool, descr='Boolean indicating if this is a forced extraction '
'at the location provided by the user. '),
#
'ECH_OBJID': dict(otype=(int, np.integer),
descr='Object ID for echelle data. Each object is given an '
'index in the order it appears increasing from from left '
'to right. These are one based.'),
'ECH_ORDERINDX': dict(otype=(int, np.integer),
descr='Order indx, analogous to SLITID for echelle. '
'Zero based.'),
'ECH_FRACPOS': dict(otype=(float, np.floating),
descr='Synced echelle fractional location of the object on '
'the slit'),
'ECH_ORDER': dict(otype=(int, np.integer), descr='Physical echelle order'),
'ECH_NAME': dict(otype=str,
descr='Name of the object for echelle data. Same as NAME above '
'but order numbers are omitted giving a unique name per '
'object.')}
def __init__(self, PYPELINE, DET, OBJTYPE='unknown',
SLITID=None, ECH_ORDER=None, ECH_ORDERINDX=None):
args, _, _, values = inspect.getargvalues(inspect.currentframe())
_d = dict([(k,values[k]) for k in args[1:]])
# Setup the DataContainer
datamodel.DataContainer.__init__(self, d=_d)
self.FLEX_SHIFT_GLOBAL = 0.
self.FLEX_SHIFT_LOCAL = 0.
self.FLEX_SHIFT_TOTAL = 0.
# Name
self.set_name()
@classmethod
def from_arrays(cls, PYPE_LINE:str, wave:np.ndarray,
counts:np.ndarray, ivar:np.ndarray, mode='OPT',
DET=1, SLITID=0, **kwargs):
# Instantiate
slf = cls(PYPE_LINE, DET, SLITID=SLITID)
# Add in arrays
for item, attr in zip((wave, counts, ivar),
['_WAVE', '_COUNTS', '_COUNTS_IVAR']):
setattr(slf, mode+attr, item.astype(float))
# Mask
slf[mode+'_MASK'] = slf[mode+'_COUNTS_IVAR'] > 0.
return slf
def _init_internals(self):
# Object finding
self.smash_peakflux = None
self.smash_nsig = None
# Hand
self.hand_extract_flag = False
self.hand_extract_spec = None
self.hand_extract_spat = None
self.hand_extract_det = None
self.hand_extract_fwhm = None
# Object profile
self.prof_nsigma = None
self.sign = 1.0
self.min_spat = None
self.max_spat = None
# Echelle
self.ech_frac_was_fit = None #
self.ech_snr = None #
def _bundle(self, **kwargs):
"""
Over-ride DataContainer._bundle() to deal with DETECTOR
Args:
kwargs:
Passed to DataContainer._bundle()
Returns:
list:
"""
_d = super(SpecObj, self)._bundle(**kwargs)
# Move DetectorContainer into its own HDU
if _d[0]['DETECTOR'] is not None:
_d.append(dict(detector=_d[0].pop('DETECTOR')))
# Return
return _d
def to_hdu(self, hdr=None, add_primary=False, primary_hdr=None,
limit_hdus=None, force_to_bintbl=True):
"""
Over-ride :func:`pypeit.datamodel.DataContainer.to_hdu` to force to
a BinTableHDU
See that func for Args and Returns
"""
args, _, _, values = inspect.getargvalues(inspect.currentframe())
_d = dict([(k,values[k]) for k in args[1:]])
# Force
_d['force_to_bintbl'] = True
# Do it
return super(SpecObj, self).to_hdu(**_d)
@property
def slit_order(self):
if self.PYPELINE == 'Echelle':
return self.ECH_ORDER
elif self.PYPELINE == 'MultiSlit':
return self.SLITID
elif self.PYPELINE == 'IFU':
return self.SLITID
else:
msgs.error("Bad PYPELINE")
@property
def slit_orderindx(self):
if self.PYPELINE == 'Echelle':
return self.ECH_ORDERINDX
elif self.PYPELINE == 'MultiSlit':
return self.SLITID
elif self.PYPELINE == 'IFU':
return self.SLITID
else:
msgs.error("Bad PYPELINE")
@property
def mnx_wave(self):
"""Return min, max wavelength of the spectrum
Uses OPT_WAVE if present and then BOX_WAVE
Returns:
tuple: min, max (float)
"""
mnx = (0., 0.)
for pref in ['OPT', 'BOX']:
if self[pref+'_WAVE'] is not None:
mnx = self[pref+'_WAVE'].min(), self[pref+'_WAVE'].max()
if mnx[0] != 0.:
break
return mnx
@property
def med_s2n(self):
"""Return median S/N of the spectrum
Uses OPT_COUNTS if present and then BOX_COUNTS
Returns:
float
"""
SN = 0.
for pref in ['OPT', 'BOX']:
if self[pref+'_COUNTS'] is not None:
SN = np.median(self[pref+'_COUNTS'] * np.sqrt(self[pref+'_COUNTS_IVAR']))
if SN != 0.:
break
return SN
def set_name(self):
"""
Generate a unique index for this spectrum based on the
slit/order, its position and for multi-slit the detector.
Multi-slit
Each object is named by its:
- spatial position (pixel number) on the reduced image [SPAT]
- the slit number based on SPAT center of the slit or SlitMask ID [SLIT]
- the detector number [DET]
For example::
SPAT0176-SLIT0185-DET01
Echelle
Returns:
str:
"""
naming_model = {}
for skey in ['SPAT', 'SLIT', 'DET', 'SCI', 'OBJ', 'ORDER']:
naming_model[skey.lower()] = skey
if 'Echelle' in self.PYPELINE:
# ObjID
name = naming_model['obj']
ech_name = naming_model['obj']
if self['ECH_FRACPOS'] is None:
name += '----'
else:
# JFH TODO Why not just write it out with the decimal place. That is clearer than this??
name += '{:04d}'.format(int(np.rint(1000*self.ECH_FRACPOS)))
ech_name += '{:04d}'.format(int(np.rint(1000*self.ECH_FRACPOS)))
sdet = parse.get_dnum(self.DET, prefix=False)
name += '-{:s}{:s}'.format(naming_model['det'], sdet)
ech_name += '-{:s}{:s}'.format(naming_model['det'], sdet)
# Order number
name += '-'+naming_model['order']
name += '{:04d}'.format(self.ECH_ORDER)
self.ECH_NAME = ech_name
self.NAME = name
elif 'MultiSlit' in self.PYPELINE:
# Spat
name = naming_model['spat']
if self['SPAT_PIXPOS'] is None:
name += '----'
else:
name += '{:04d}'.format(int(np.rint(self.SPAT_PIXPOS)))
# Slit
name += '-'+naming_model['slit']
name += '{:04d}'.format(self.SLITID)
sdet = parse.get_dnum(self.DET, prefix=False)
name += '-{:s}{:s}'.format(naming_model['det'], sdet)
self.NAME = name
elif 'IFU' in self.PYPELINE:
# Spat
name = naming_model['spat']
if self['SPAT_PIXPOS'] is None:
name += '----'
else:
name += '{:04d}'.format(int(np.rint(self.SPAT_PIXPOS)))
# Slit
name += '-' + naming_model['slit']
name += '{:04d}'.format(self.SLITID)
sdet = parse.get_dnum(self.DET, prefix=False)
name += '-{:s}{:s}'.format(naming_model['det'], sdet)
self.NAME = name
else:
msgs.error("Bad PYPELINE")
def copy(self):
"""
Generate a copy of this object
Returns:
:class:`SpecObj`:
"""
# Return
return copy.deepcopy(self)
def apply_spectral_flexure(self, shift, sky_spec):
"""
Apply interpolation with the flexure dict
Args:
shift (float):
additive spectral flexure in pixels
sky_spec (`linetools.spectra.xspectrum1d.XSpectrum1D`_):
Sky Spectrum
Returns:
`linetools.spectra.xspectrum1d.XSpectrum1D`_: New sky
spectrum (mainly for QA)
"""
# Simple interpolation to apply
# Apply
for attr in ['BOX', 'OPT']:
if self[attr+'_WAVE'] is not None:
msgs.info("Applying flexure correction to {0:s} extraction for object:".format(attr) +
msgs.newline() + "{0:s}".format(str(self.NAME)))
self[attr+'_WAVE'] = flexure.flexure_interp(shift, self[attr+'_WAVE']).copy()
# Shift sky spec too
twave = flexure.flexure_interp(shift, sky_spec.wavelength.value) * units.AA
new_sky = xspectrum1d.XSpectrum1D.from_tuple((twave, sky_spec.flux))
# Save - since flexure may have been applied/calculated twice, this needs to be additive
self.update_flex_shift(shift, flex_type='local')
# Return
return new_sky
def update_flex_shift(self, shift, flex_type='local'):
"""Store the total spectral flexure shift in pixels
Args:
shift (float):
additive spectral flexure in pixels
"""
if flex_type == 'global':
self.FLEX_SHIFT_GLOBAL = shift
elif flex_type == 'local':
self.FLEX_SHIFT_LOCAL = shift
else:
msgs.error("Spectral flexure type must be 'global' or 'local' only")
# Now update the total flexure
self.FLEX_SHIFT_TOTAL += shift
# TODO This should be a wrapper calling a core algorithm.
def apply_flux_calib(self, wave_zp, zeropoint, exptime, tellmodel=None, extinct_correct=False,
airmass=None, longitude=None, latitude=None, extrap_sens=False):
"""
Apply a sensitivity function to our spectrum
FLAM, FLAM_SIG, and FLAM_IVAR are generated
Args:
wave_zp (float array)
Zeropoint wavelength array
zeropoint (float array):
zeropoint array
exptime (float):
Exposure time
tellmodel:
Telluric correction
extinct_correct:
If True, extinction correct
airmass (float, optional):
Airmass
longitude (float, optional):
longitude in degree for observatory
latitude:
latitude in degree for observatory
Used for extinction correction
extrap_sens (bool, optional):
Extrapolate the sensitivity function (instead of crashing out)
"""
# Loop on extraction modes
for attr in ['BOX', 'OPT']:
if self[attr+'_WAVE'] is None:
continue
msgs.info("Fluxing {:s} extraction for:".format(attr) + msgs.newline() + "{}".format(self))
wave = self[attr+'_WAVE']
# Interpolate the sensitivity function onto the wavelength grid of the data
sens_factor = flux_calib.get_sensfunc_factor(
wave, wave_zp, zeropoint, exptime, tellmodel=tellmodel, extinct_correct=extinct_correct,
airmass=airmass, longitude=longitude, latitude=latitude, extrap_sens=extrap_sens)
flam = self[attr+'_COUNTS']*sens_factor
flam_sig = sens_factor/np.sqrt(self[attr+'_COUNTS_IVAR'])
flam_ivar = self[attr+'_COUNTS_IVAR']/sens_factor**2
# Mask bad pixels
msgs.info(" Masking bad pixels")
msk = np.zeros_like(sens_factor).astype(bool)
msk[sens_factor <= 0.] = True
msk[self[attr+'_COUNTS_IVAR'] <= 0.] = True
flam[msk] = 0.
flam_sig[msk] = 0.
flam_ivar[msk] = 0.
# TODO JFH We need to update the mask here. I think we need a mask for the counts and a mask for the flam,
# since they can in principle be different. We are masking bad sensfunc locations.
# Finish
self[attr+'_FLAM'] = flam
self[attr+'_FLAM_SIG'] = flam_sig
self[attr+'_FLAM_IVAR'] = flam_ivar
def apply_helio(self, vel_corr, refframe):
"""
Apply a heliocentric correction
Wavelength arrays are modified in place
Args:
vel_corr (float):
refframe (str):
"""
# Apply
for attr in ['BOX', 'OPT']:
if self[attr+'_WAVE'] is not None:
msgs.info('Applying {0} correction to '.format(refframe)
+ '{0} extraction for object:'.format(attr)
+ msgs.newline() + "{0}".format(str(self.NAME)))
self[attr+'_WAVE'] *= vel_corr
# Record
self['VEL_TYPE'] = refframe
self['VEL_CORR'] = vel_corr
def to_arrays(self, extraction='OPT', fluxed=True):
"""
Convert spectrum into np.ndarray arrays
Args:
extraction (str): Extraction method to convert
fluxed:
Returns:
tuple: wave, flux, ivar, mask arrays
"""
swave = extraction+'_WAVE'
smask = extraction+'_MASK'
if self[swave] is None:
msgs.error("This object has not been extracted with extract={}.".format(extraction))
# Fluxed?
if fluxed:
sflux = extraction+'_FLAM'
sivar = extraction+'_FLAM_IVAR'
else:
sflux = extraction+'_COUNTS'
sivar = extraction+'_COUNTS_IVAR'
# Return
return self[swave], self[sflux], self[sivar], self[smask]
def to_xspec1d(self, **kwargs):
"""
Push the data in :class:`SpecObj` into an XSpectrum1D object
Returns:
linetools.spectra.xspectrum1d.XSpectrum1D: Spectrum object
"""
wave, flux, ivar, _ = self.to_arrays(**kwargs)
sig = np.sqrt(utils.inverse(ivar))
# Create
return xspectrum1d.XSpectrum1D.from_tuple((wave, flux, sig))
def ready_for_extraction(self):
""" Simple method to check all the items are filled
and ready for skysub and extraction.
Returns:
bool: True if all checks have passed
"""
required = ['TRACE_SPAT', 'SPAT_PIXPOS', 'SPAT_FRACPOS',
'trace_spec', 'OBJID', 'FWHM', 'maskwidth', 'NAME',
'SLITID', 'DET', 'PYPELINE', 'OBJTYPE']
if 'Echelle' in self.PYPELINE:
required += ['ECH_NAME']
passed = True
for key in required:
if self[key] is None:
msgs.warn("Item {} is missing from SpecObj. Failing vette".format(key))
msgs.warn('{}'.format(self))
passed = False
#
return passed
def __repr__(self):
""" Over-ride print representation
Returns:
str: Basics of the Data Container
"""
repr = '<{:s}: '.format(self.__class__.__name__)
# Image
rdict = {}
for attr in self.datamodel.keys():
if hasattr(self, attr) and getattr(self, attr) is not None:
# Special ones
if attr in ['DET', 'SLITID', 'SPAT_PIXPOS', 'NAME', 'RA',
'DEC', 'MASKDEF_ID', 'MASKDEF_OBJNAME', 'MASKDEF_EXTRACT']:
rdict[attr] = getattr(self,attr)
else:
rdict[attr] = True
else:
rdict[attr] = False
#repr += ' items={}'.format(rdict)
repr += ' items={'
for key in rdict.keys():
if rdict[key] is not False:
repr += '{}: {}\n'.format(key, rdict[key])
return repr + '>'
|
src/build/fuchsia/symbolizer.py | Chilledheart/naiveproxy | 14,668 | 12626721 | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import subprocess
from common import SDK_ROOT
from common import GetHostArchFromPlatform
from common import GetHostToolPathFromPlatform
def BuildIdsPaths(package_paths):
"""Generates build ids paths for symbolizer processes."""
return [
os.path.join(os.path.dirname(package_path), 'ids.txt')
for package_path in package_paths
]
def RunSymbolizer(input_fd, output_fd, ids_txt_paths):
"""Starts a symbolizer process.
input_fd: Input file to be symbolized.
output_fd: Output file for symbolizer stdout and stderr.
ids_txt_paths: Path to the ids.txt files which map build IDs to
unstripped binaries on the filesystem.
Returns a Popen object for the started process."""
symbolizer = GetHostToolPathFromPlatform('symbolizer')
symbolizer_cmd = [
symbolizer, '--omit-module-lines', '--build-id-dir',
os.path.join(SDK_ROOT, '.build-id')
]
for ids_txt in ids_txt_paths:
symbolizer_cmd.extend(['--ids-txt', ids_txt])
logging.debug('Running "%s".' % ' '.join(symbolizer_cmd))
return subprocess.Popen(symbolizer_cmd,
stdin=input_fd,
stdout=output_fd,
stderr=subprocess.STDOUT,
close_fds=True)
|
securify/analyses/patterns/ast/shadowed_builtin_pattern.py | AlexandreH/securify2 | 258 | 12626726 | <reponame>AlexandreH/securify2
from typing import List
from securify.analyses.patterns.abstract_pattern import Severity, PatternMatch, MatchComment
from securify.analyses.patterns.ast.abstract_ast_pattern import AbstractAstPattern
from securify.analyses.patterns.ast.declaration_utils import DeclarationUtils
class ShadowedBuiltinPattern(DeclarationUtils, AbstractAstPattern):
name = "Shadowed Builtin"
description = "Reports declarations that shadow Solidity's builtin symbols."
severity = Severity.MEDIUM
tags = {}
def find_matches(self) -> List[PatternMatch]:
ast_root = self.get_ast_root()
def match_violation(name, tpe, node):
return self.match_violation().with_info(
MatchComment(f"{tpe.capitalize()} shadows builtin symbol '{name}'."),
*self.ast_node_info(node)
)
for decl_name, decl_type, decl_node in self.find_named_nodes(ast_root):
if decl_name in self.builtin_symbols:
yield match_violation(decl_name, decl_type, decl_node)
builtin_symbols = {
"assert", "require", "revert",
"blockhash", "block", "gasleft", "msg", "now", "tx", "abi",
"addmod", "mulmod",
"keccak256", "sha256", "sha3", "ripemd160", "ecrecover",
"this", "super",
"selfdestruct", "suicide",
"abstract", "after", "alias", "apply", "auto",
"case", "catch", "copyof",
"default", "define",
"final",
"immutable", "implements", "in", "inline",
"let",
"macro", "match", "mutable",
"null",
"of", "override",
"partial", "promise",
"reference", "relocatable",
"sealed", "sizeof", "static", "supports", "switch",
"try", "type", "typedef", "typeof",
"unchecked"
}
|
src/python/grapl-template-generator/grapl_template_generator/common_types.py | grapl-security/grapl | 291 | 12626738 | <reponame>grapl-security/grapl<gh_stars>100-1000
from typing import NewType
VersionConstraint = NewType("VersionConstraint", str)
|
usaspending_api/disaster/v2/views/object_class/loans.py | ststuck/usaspending-api | 217 | 12626782 | from typing import List
from decimal import Decimal
from django.db.models import F, Value, TextField, Min
from django.db.models.functions import Cast
from usaspending_api.common.cache_decorator import cache_response
from usaspending_api.common.helpers.orm_helpers import ConcatAll
from usaspending_api.disaster.v2.views.disaster_base import (
LoansPaginationMixin,
LoansMixin,
FabaOutlayMixin,
)
from usaspending_api.disaster.v2.views.elasticsearch_account_base import ElasticsearchAccountDisasterBase
from usaspending_api.references.models import ObjectClass
class ObjectClassLoansViewSet(LoansMixin, FabaOutlayMixin, LoansPaginationMixin, ElasticsearchAccountDisasterBase):
"""Provides insights on the Object Classes' loans from disaster/emergency funding per the requested filters"""
endpoint_doc = "usaspending_api/api_contracts/contracts/v2/disaster/object_class/loans.md"
agg_key = "financial_accounts_by_award.object_class" # primary (tier-1) aggregation key
nested_nonzero_fields = {"obligation": "transaction_obligated_amount", "outlay": "gross_outlay_amount_by_award_cpe"}
query_fields = [
"major_object_class_name",
"major_object_class_name.contains",
"object_class_name",
"object_class_name.contains",
]
top_hits_fields = [
"financial_accounts_by_award.object_class_id",
"financial_accounts_by_award.major_object_class_name",
"financial_accounts_by_award.major_object_class",
"financial_accounts_by_award.object_class_name",
"financial_accounts_by_award.object_class",
]
@cache_response()
def post(self, request):
self.filters.update({"award_type_codes": ["07", "08"]})
self.has_children = True
return self.perform_elasticsearch_search(loans=True)
@property
def queryset(self):
query = self.construct_loan_queryset(
ConcatAll("object_class__major_object_class", Value(":"), "object_class__object_class"),
ObjectClass.objects.annotate(join_key=ConcatAll("major_object_class", Value(":"), "object_class")),
"join_key",
)
annotations = {
"major_code": F("major_object_class"),
"description": Min("object_class_name"),
"code": F("object_class"),
"id_": Cast(Min("id"), output_field=TextField()),
"major_description": Min("major_object_class_name"),
"obligation": query.obligation_column,
"outlay": query.outlay_column,
"total_budgetary_resources": query.face_value_of_loan_column,
"award_count": query.award_count_column,
}
return query.queryset.values("major_object_class", "object_class").annotate(**annotations).values(*annotations)
def build_elasticsearch_result(self, info_buckets: List[dict]) -> List[dict]:
temp_results = {}
child_results = []
for bucket in info_buckets:
child = self._build_child_json_result(bucket)
child_results.append(child)
for child in child_results:
result = self._build_json_result(child)
child.pop("parent_data")
if result["id"] in temp_results.keys():
temp_results[result["id"]] = {
"id": int(result["id"]),
"code": result["code"],
"description": result["description"],
"award_count": temp_results[result["id"]]["award_count"] + result["award_count"],
# the count of distinct awards contributing to the totals
"obligation": temp_results[result["id"]]["obligation"] + result["obligation"],
"outlay": temp_results[result["id"]]["outlay"] + result["outlay"],
"face_value_of_loan": bucket["count_awards_by_dim"]["sum_loan_value"]["value"],
"children": temp_results[result["id"]]["children"] + result["children"],
}
else:
temp_results[result["id"]] = result
results = [x for x in temp_results.values()]
return results
def _build_json_result(self, child):
return {
"id": child["parent_data"][1],
"code": child["parent_data"][1],
"description": child["parent_data"][0],
"award_count": child["award_count"],
# the count of distinct awards contributing to the totals
"obligation": child["obligation"],
"outlay": child["outlay"],
"face_value_of_loan": child["face_value_of_loan"],
"children": [child],
}
def _build_child_json_result(self, bucket: dict):
return {
"id": bucket["dim_metadata"]["hits"]["hits"][0]["_source"]["object_class_id"],
"code": bucket["key"],
"description": bucket["dim_metadata"]["hits"]["hits"][0]["_source"]["object_class_name"],
# the count of distinct awards contributing to the totals
"award_count": int(bucket["count_awards_by_dim"]["award_count"]["value"]),
**{
key: Decimal(bucket.get(f"sum_{val}", {"value": 0})["value"])
for key, val in self.nested_nonzero_fields.items()
},
"face_value_of_loan": bucket["count_awards_by_dim"]["sum_loan_value"]["value"],
"parent_data": [
bucket["dim_metadata"]["hits"]["hits"][0]["_source"]["major_object_class_name"],
bucket["dim_metadata"]["hits"]["hits"][0]["_source"]["major_object_class"],
],
}
|
statuspage/settings.py | bctransitapps/statuspage | 113 | 12626789 | <reponame>bctransitapps/statuspage
import os
import dj_database_url
import logging
logger = logging.getLogger(__name__)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PRODUCTION = os.environ.get('PRODUCTION', False)
STATUS_TICKET_URL = os.environ.get('STATUS_TICKET_URL', None)
STATUS_LOGO_URL = os.environ.get('STATUS_LOGO_URL', None)
STATUS_TITLE = os.environ.get('STATUS_TITLE', None)
STATUS_ANALYTICS = os.environ.get('STATUS_ANALYTICS', None)
SLACK_CHANNEL = os.environ.get('SLACK_CHANNEL', '#engineering')
SLACK_TOKEN = os.environ.get('SLACK_TOKEN', None)
SLACK_USERNAME = os.environ.get('SLACK_USERNAME', 'STATUSBOT')
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
ALLOWED_HOSTS = [
'*',
]
if os.environ.get('PRODUCTION', False) in (True, 'True', 'TRUE', 'true', '1', 1):
PRODUCTION = True
DEBUG = False
else:
PRODUCTION = False
DEBUG = True
INTERNAL_IPS = (
'127.0.0.1',
)
ADMINS = (
('<NAME>', '<EMAIL>'),
)
MANAGERS = ADMINS
DATABASES = {
'default': dj_database_url.config(default=os.environ.get('DATABASE_URL', "sqlite:///statuspage.db"))
}
TIME_ZONE = 'America/Los_Angeles'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOGIN_URL = 'login'
LOGIN_REDIRECT_URL = '/'
LOGOUT_URL = 'logout'
MEDIA_ROOT = 'media'
STATIC_ROOT = 'static'
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
BOWER_COMPONENTS_ROOT = os.path.join(PROJECT_PATH, '../')
STATICFILES_DIRS = (
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '<KEY>'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
'status.context_processors.analytics',
],
},
},
]
MIDDLEWARE = [
'status.middleware.LoginRequiredShimMiddleware',
'django.middleware.gzip.GZipMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
STRONGHOLD_DEFAULTS = True
STRONGHOLD_PUBLIC_URLS = (
r'^/api/',
)
ROOT_URLCONF = 'statuspage.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'statuspage.wsgi.application'
TEMPLATE_DIRS = (
)
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.humanize',
'debug_toolbar',
'django_extensions',
'bootstrap3',
'tastypie',
'avatar',
'gunicorn',
'stronghold',
'status',
)
try:
MIDDLEWARE_CLASSES += (
# 'raven.contrib.django.raven_compat.middleware.Sentry404CatchMiddleware',
'raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware',
)
INSTALLED_APPS += ('raven.contrib.django.raven_compat',)
RAVEN_CONFIG = {
'dsn': os.environ.get('SENTRY_URL', None)
}
except Exception as e:
logger.warn('Unable to load Raven: %s' % (e))
if os.environ.get('REDIS_URL', None):
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "%s/1" % (os.environ.get('REDIS_URL', None)),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}
}
BROKER_URL = os.environ.get("REDIS_URL", None)
APPEND_SLASH = True
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
EMAIL_HOST = os.environ.get('MAIL_SERVER', None)
EMAIL_PORT = os.environ.get('MAIL_PORT', 25)
EMAIL_HOST_USER = os.environ.get('MAIL_USER', None)
EMAIL_HOST_PASSWORD = os.environ.get('MAIL_PASSWORD', None)
EMAIL_USE_TLS = os.environ.get('MAIL_TLS', False)
DEFAULT_FROM_EMAIL = os.environ.get('MAIL_FROM', '<EMAIL>')
|
aiida/parsers/plugins/diff_tutorial/parsers.py | azadoks/aiida-core | 180 | 12626795 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Parsers for DiffCalculation of plugin tutorial.
Register parsers via the "aiida.parsers" entry point in the setup.json file.
"""
# START PARSER HEAD
from aiida.engine import ExitCode
from aiida.orm import SinglefileData
from aiida.parsers.parser import Parser
from aiida.plugins import CalculationFactory
DiffCalculation = CalculationFactory('diff-tutorial')
class DiffParser(Parser):
# END PARSER HEAD
"""
Parser class for DiffCalculation.
"""
def parse(self, **kwargs):
"""
Parse outputs, store results in database.
:returns: non-zero exit code, if parsing fails
"""
output_filename = self.node.get_option('output_filename')
# Check that folder content is as expected
files_retrieved = self.retrieved.list_object_names()
files_expected = [output_filename]
# Note: set(A) <= set(B) checks whether A is a subset of B
if not set(files_expected) <= set(files_retrieved):
self.logger.error(f"Found files '{files_retrieved}', expected to find '{files_expected}'")
return self.exit_codes.ERROR_MISSING_OUTPUT_FILES
# add output file
self.logger.info(f"Parsing '{output_filename}'")
with self.retrieved.open(output_filename, 'rb') as handle:
output_node = SinglefileData(file=handle)
self.out('diff', output_node)
return ExitCode(0)
class DiffParserSimple(Parser):
"""
Simple Parser class for DiffCalculation.
"""
def parse(self, **kwargs):
"""
Parse outputs, store results in database.
"""
output_filename = self.node.get_option('output_filename')
# add output file
self.logger.info(f"Parsing '{output_filename}'")
with self.retrieved.open(output_filename, 'rb') as handle:
output_node = SinglefileData(file=handle)
self.out('diff', output_node)
return ExitCode(0)
|
app/grandchallenge/core/templatetags/dict_lookup.py | kaczmarj/grand-challenge.org | 101 | 12626812 | from django import template
register = template.Library()
@register.simple_tag
def get_dict_values(dictionary, key):
try:
return dictionary.get(key)
except AttributeError:
return None
|
networks/raw_rnn.py | Fred1991/VAE-GMVAE | 193 | 12626834 | <reponame>Fred1991/VAE-GMVAE
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 13 11:17:53 2018
@author: psanch
"""
import tensorflow as tf
import utils.constants as const
from utils.utils import get1toT
from networks.base_raw_rnn import BaseRawRNN
from networks.dense_net import DenseNet
import utils.utils as utils
class RawRNNConcat(BaseRawRNN):
def __init__(self, cell_type, state_dim, input_, max_time, output_dim, reuse, drop_rate_x=0.,
kinit=tf.contrib.layers.xavier_initializer(),
bias_init=tf.constant_initializer(0.01), var_shared=False):
super().__init__(input_, max_time, output_dim, cell_type, state_dim, reuse, kinit, bias_init)
self.rnn_input_dim = self.input_dim + self.output_dim
self.drop_rate_x = drop_rate_x
self.act_out_mean = None
self.act_out_var = tf.nn.softplus
self.var_shared = var_shared
self.output_mean, self.output_var, self.output_z = self.my_build()
def my_build(self):
output_list, state_list = self.build(self.get_loop_fn())
outputs_mean = output_list[0]
outputs_var = output_list[1]
outputs_z = output_list[2]
states_all_c = state_list[0]
states_all_h = state_list[1]
print('Means: ', outputs_mean.get_shape().as_list())
print('Vars: ', outputs_var.get_shape().as_list())
print('Sampled z: ', outputs_z.get_shape().as_list())
print('States c: ', states_all_c.get_shape().as_list())
print('States h: ', states_all_h.get_shape().as_list())
return outputs_mean, outputs_var, outputs_z
def get_output_step(self, cell_output):
with tf.variable_scope('mean', reuse=tf.AUTO_REUSE):
mean_net = DenseNet(input_=cell_output,
hidden_dim=-1,
output_dim=self.output_dim,
num_layers=1,
transfer_fct=None,
act_out=self.act_out_mean,
reuse=tf.AUTO_REUSE,
kinit=self.kinit,
bias_init=self.bias_init)
mean = mean_net.output
with tf.variable_scope('var', reuse=tf.AUTO_REUSE):
if(self.var_shared):
var = utils.get_variable(self.output_dim, 'var')
var = tf.tile(var, [self.batch_size, 1])# [batch_size, var.dim]
else:
var_net = DenseNet(input_=cell_output,
hidden_dim=-1,
output_dim=self.output_dim,
num_layers=1,
transfer_fct=None,
act_out=self.act_out_var,
reuse=tf.AUTO_REUSE,
kinit=self.kinit,
bias_init=self.bias_init)
var = var_net.output
eps = tf.random_normal((self.batch_size, self.output_dim), 0, 1, dtype=tf.float32)
current_z = tf.add(mean, tf.multiply(tf.sqrt(var), eps))
return mean, var, current_z
def get_next_input(self, x_time, current_z):
with tf.variable_scope('aux', reuse=tf.AUTO_REUSE):
aux_net = DenseNet(input_=current_z,
hidden_dim=-1,
output_dim=self.output_dim,
num_layers=1,
transfer_fct=None,
act_out=tf.nn.sigmoid,
reuse=tf.AUTO_REUSE)
current_z = aux_net.output
return tf.concat([tf.layers.dropout(x_time, rate=self.drop_rate_x), current_z],1)
def get_loop_fn(self):
inputs_ta, output_ta = self.get_tensor_arrays(self.input_)
def loop_fn(time, cell_output, cell_state, loop_state):
elements_finished = (time >= self.max_time)
finished = tf.reduce_all(elements_finished)
if cell_output is None:
'''
time == 0, used for initialization before first call to cell
This is just to defined the desired shape of the tensors
'''
next_cell_state = self.cell.zero_state(self.batch_size, tf.float32)
'''
the emit_output in this case tells TF how future emits look
For the first call to loop_fn the emit_output corresponds to
the emit_structure which is then used to determine the size of
the zero_tensor for the emit_ta (defaults to cell.output_size).
'''
emit_output = tf.tuple([tf.zeros([self.output_dim]), tf.zeros([self.output_dim]),
tf.zeros([self.output_dim])])
# tf.zeros([config.batch_size, output_dim], dtype=tf.float32) # tf.zeros([output_dim])
next_loop_state = output_ta
'''
this is the initial step, i.e. there is no output from a previous time step, what we feed here
can highly depend on the data. In this case we just assign the actual input in the first time step.
'''
init_z = tf.zeros((self.batch_size, self.output_dim), dtype=tf.float32)
#init_z = tf.random_normal((config.batch_size, output_dim), 0, 1, dtype=tf.float32)
x_time = tf.layers.dropout(inputs_ta.read(time), rate= self.drop_rate_x)
next_in = tf.concat([x_time, init_z],1)
else:
'''
t > 0, called right after call to cell, i.e. cell_output is the output from time t-1.
here you can do whatever ou want with cell_output before assigning it to emit_output.
In this case, we don't do anything pass the last state to the next
'''
next_cell_state = cell_state
next_loop_state = self.get_next_loop_state(loop_state, cell_state, time)
'''Next Output'''
# cell_output = tf.Print(cell_output,[cell_output], message="cell_output")
mean, var, current_z = self.get_output_step(cell_output)
# current_z = tf.Print(current_z,[current_z], message="current z")
emit_output = tf.tuple([mean, var, current_z])
# tf.tuple([mean, var]) tf.concat([mean, var],1) cell_output mean
next_in = tf.cond(finished,
lambda: tf.zeros([self.batch_size, self.rnn_input_dim], dtype=tf.float32),
lambda: self.get_next_input(inputs_ta.read(time), current_z) )
next_input = tf.cond(finished,
lambda: tf.zeros([self.batch_size, self.rnn_input_dim], dtype=tf.float32),
lambda: next_in)
next_input.set_shape([None, self.rnn_input_dim])
return (finished, next_input, next_cell_state, emit_output, next_loop_state)
return loop_fn
'''
Inference Network for TVAE1
'''
'''
Generator Network for TVAE
'''
class RawRNNGener(BaseRawRNN):
def __init__(self, cell_type, state_dim, input_, max_time, output_dim, reuse,
kinit=tf.contrib.layers.xavier_initializer(),
bias_init=tf.constant_initializer(0.01), var_shared=False):
super().__init__(input_, max_time, output_dim, cell_type, state_dim, reuse, kinit, bias_init)
self.rnn_input_dim = self.output_dim
self.act_out_mean = None
self.act_out_var = tf.nn.softplus
self.var_shared = var_shared
self.is_sample = len(input_.get_shape().as_list())==2
self.is_time = not self.is_sample
self.output_mean, self.output_var, self.output_z = self.my_build()
def my_build(self):
loop_fn, inputs_ta = self.get_loop_fn()
output_list, state_list = self.build(loop_fn)
outputs_mean = output_list[0]
outputs_var = output_list[1]
outputs_z = output_list[2]
outputs_mean = get1toT(output_list[0], tf.zeros([self.batch_size, self.output_dim]), self.max_time)
outputs_var = get1toT(output_list[1], tf.ones([self.batch_size, self.output_dim]), self.max_time)
outputs_z = get1toT(output_list[2], self.input_, self.max_time)
if(self.is_sample):
outputs_z = get1toT(output_list[2], self.input_, self.max_time)
else:
outputs_z = get1toT(output_list[2], inputs_ta.read(0), self.max_time)
states_all_c = state_list[0]
states_all_h = state_list[1]
print('Means: ', outputs_mean.get_shape().as_list())
print('Vars: ', outputs_var.get_shape().as_list())
print('Sampled z: ', outputs_z.get_shape().as_list())
print('States c: ', states_all_c.get_shape().as_list())
print('States h: ', states_all_h.get_shape().as_list())
return outputs_mean, outputs_var, outputs_z
def get_output_step(self, cell_output):
with tf.variable_scope('mean', reuse=tf.AUTO_REUSE):
mean_net = DenseNet(input_=cell_output,
hidden_dim=-1,
output_dim=self.output_dim,
num_layers=1,
transfer_fct=None,
act_out=self.act_out_mean,
reuse=tf.AUTO_REUSE,
kinit=self.kinit,
bias_init=self.bias_init)
mean = mean_net.output
with tf.variable_scope('var', reuse=tf.AUTO_REUSE):
if(self.var_shared):
var = utils.get_variable(self.output_dim, 'var')
var = tf.tile(var, [self.batch_size, 1])# [batch_size, var.dim]
else:
var_net = DenseNet(input_=cell_output,
hidden_dim=-1,
output_dim=self.output_dim,
num_layers=1,
transfer_fct=None,
act_out=self.act_out_var,
reuse=tf.AUTO_REUSE,
kinit=self.kinit,
bias_init=self.bias_init)
var = var_net.output
eps = tf.random_normal((self.batch_size, self.output_dim), 0, 1, dtype=tf.float32)
current_z = tf.add(mean, tf.multiply(tf.sqrt(var), eps))
return mean, var, current_z
def get_next_input(self, x_time, current_z):
return
def get_loop_fn(self):
inputs_ta, output_ta = self.get_tensor_arrays(self.input_)
def loop_fn(time, cell_output, cell_state, loop_state):
elements_finished = (time >= self.max_time)
finished = tf.reduce_all(elements_finished)
if cell_output is None:
'''
time == 0, used for initialization before first call to cell
This is just to defined the desired shape of the tensors
'''
next_cell_state = self.cell.zero_state(self.batch_size, tf.float32)
'''
the emit_output in this case tells TF how future emits look
For the first call to loop_fn the emit_output corresponds to
the emit_structure which is then used to determine the size of
the zero_tensor for the emit_ta (defaults to cell.output_size).
'''
emit_output = tf.tuple([tf.zeros([self.output_dim]), tf.zeros([self.output_dim]),
tf.zeros([self.output_dim])])
# tf.zeros([config.batch_size, output_dim], dtype=tf.float32) # tf.zeros([output_dim])
next_loop_state = output_ta
'''
this is the initial step, i.e. there is no output from a previous time step, what we feed here
can highly depend on the data. In this case we just assign the actual input in the first time step.
'''
if(self.is_sample):
next_in = self.input_
else:
next_in = inputs_ta.read(time)
else:
'''
t > 0, called right after call to cell, i.e. cell_output is the output from time t-1.
here you can do whatever ou want with cell_output before assigning it to emit_output.
In this case, we don't do anything pass the last state to the next
'''
next_cell_state = cell_state
next_loop_state = self.get_next_loop_state(loop_state, cell_state, time)
'''Next Output'''
# cell_output = tf.Print(cell_output,[cell_output], message="cell_output")
mean, var, current_z = self.get_output_step(cell_output)
# current_z = tf.Print(current_z,[current_z], message="current z")
emit_output = tf.tuple([mean, var, current_z])
# tf.tuple([mean, var]) tf.concat([mean, var],1) cell_output mean
next_in = current_z
if(self.is_sample):
next_input = tf.cond(finished,
lambda: tf.zeros([self.batch_size, self.rnn_input_dim], dtype=tf.float32),
lambda: next_in)
else:
next_input = tf.cond(finished,
lambda: tf.zeros([self.batch_size, self.rnn_input_dim], dtype=tf.float32),
lambda: inputs_ta.read(time))
next_input.set_shape([None, self.rnn_input_dim])
return (finished, next_input, next_cell_state, emit_output, next_loop_state)
return loop_fn, inputs_ta
|
hv6/hv6/spec/kernel/spec/invariants.py | ProKil/OS2018spring-projects-g10 | 132 | 12626901 | <filename>hv6/hv6/spec/kernel/spec/invariants.py
#
# Copyright 2017 Hyperkernel Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import z3
from libirpy import util
from libirpy import solver
import hv6py.kernel.spec.datatypes as dt
from helpers import (
is_fd_valid,
is_fn_valid,
is_pid_bounded,
is_pid_valid,
is_pn_valid,
is_status_live,
pn_has_owner_and_type,
)
__all__ = ['impl_invariants_c', 'impl_invariants_py', 'impl_invariants', 'spec_invariants']
def impl_invariants_c(ctx):
conj = []
pid = util.FreshBitVec('pid', dt.pid_t)
pn = util.FreshBitVec('pn', dt.pn_t)
fd = util.FreshBitVec('fd', dt.fd_t)
try:
old_solver = ctx.solver
old_globals = ctx.globals
def implies(ctx, a, b):
return z3.Implies(a, b)
def and_(ctx, *args):
return z3.And(*args)
def or_(ctx, *args):
return z3.Or(*args)
ctx.globals['@implies'] = implies
ctx.globals['@and2'] = and_
ctx.globals['@and3'] = and_
ctx.globals['@and4'] = and_
ctx.globals['@and5'] = and_
ctx.globals['@and6'] = and_
ctx.globals['@and7'] = and_
ctx.globals['@and8'] = and_
ctx.globals['@and9'] = and_
ctx.globals['@or2'] = or_
ctx.globals['@or3'] = or_
ctx.globals['@or4'] = or_
ctx.solver = solver.Solver()
ctx.solver.add(z3.BoolVal(False))
conj.append(z3.ForAll([pid], ctx.call('@inv_proc_owns_pns', pid)))
conj.append(z3.ForAll([pid], ctx.call(
'@inv_sleeping_proc_owns_ipc', pid)))
conj.append(z3.ForAll([pid], ctx.call(
'@inv_sleeping_proc_ipc_fd_valid_empty', pid)))
conj.append(z3.ForAll([pid], ctx.call('@inv_proc_pns_valid', pid)))
conj.append(z3.ForAll([pid], ctx.call('@inv_io_bitmap', pid)))
conj.append(z3.ForAll([pn], ctx.call('@inv_page_owner', pn)))
conj.append(z3.ForAll([pn], ctx.call('@inv_proc_unused_refs', pn)))
conj.append(z3.ForAll([pid, fd], ctx.call( '@inv_proc_fds_valid', pid, fd)))
conj.append(z3.ForAll([pn], ctx.call('@inv_page_freelist_valid', pn)))
conj.append(z3.ForAll([pid], ctx.call('@inv_proc_freelist_valid', pid)))
conj.append(ctx.call('@inv_current_valid'))
conj.append(ctx.call('@inv_current_running'))
finally:
ctx.solver = old_solver
ctx.globals = old_globals
return z3.And(*conj)
def impl_invariants_py(ctx):
conj = []
pid = util.FreshBitVec('pid', dt.pid_t)
pn = util.FreshBitVec('pn', dt.pn_t)
fd = util.FreshBitVec('fd', dt.fd_t)
# fn = util.FreshBitVec('fn', dt.fn_t)
# embryos, runnable or running processes own the pages in their structs
conj.append(z3.ForAll([pid], z3.Implies(
z3.Or(util.global_field_element(ctx, '@proc_table', 'state', pid) == dt.proc_state.PROC_EMBRYO,
util.global_field_element(
ctx, '@proc_table', 'state', pid) == dt.proc_state.PROC_RUNNING,
util.global_field_element(
ctx, '@proc_table', 'state', pid) == dt.proc_state.PROC_RUNNABLE,
util.global_field_element(ctx, '@proc_table', 'state', pid) == dt.proc_state.PROC_SLEEPING),
z3.And(
pn_has_owner_and_type(ctx, util.global_field_element(
ctx, '@proc_table', 'page_table_root', pid), pid, dt.page_type.PAGE_TYPE_X86_PML4),
pn_has_owner_and_type(ctx, util.global_field_element(
ctx, '@proc_table', 'hvm', pid), pid, dt.page_type.PAGE_TYPE_PROC_DATA),
pn_has_owner_and_type(ctx, util.global_field_element(
ctx, '@proc_table', 'stack', pid), pid, dt.page_type.PAGE_TYPE_PROC_DATA),
))))
# sleeping processes own their ipc_page
conj.append(z3.ForAll([pid], z3.Implies(z3.And(is_pid_valid(pid),
util.global_field_element(ctx, '@proc_table', 'state', pid) != dt.proc_state.PROC_ZOMBIE),
z3.Implies(util.global_field_element(ctx, '@proc_table', 'use_io_bitmap', pid) != 0,
z3.And(
is_pn_valid(util.global_field_element(ctx, '@proc_table', 'io_bitmap_a', pid)),
is_pn_valid(util.global_field_element(ctx, '@proc_table', 'io_bitmap_b', pid)),
pn_has_owner_and_type(ctx, util.global_field_element(ctx, '@proc_table', 'io_bitmap_a', pid), pid, dt.page_type.PAGE_TYPE_PROC_DATA),
pn_has_owner_and_type(ctx, util.global_field_element(ctx, '@proc_table', 'io_bitmap_b', pid), pid, dt.page_type.PAGE_TYPE_PROC_DATA))))))
# sleeping processes own their ipc_page
conj.append(z3.ForAll([pid], z3.Implies(is_pid_valid(pid),
z3.Implies(util.global_field_element(ctx, '@proc_table', 'state', pid) == dt.proc_state.PROC_SLEEPING,
pn_has_owner_and_type(ctx, util.global_field_element(ctx, '@proc_table', 'ipc_page', pid), pid, dt.page_type.PAGE_TYPE_FRAME)))))
conj.append(z3.ForAll([pid],
z3.And(
is_pn_valid(util.global_field_element(
ctx, '@proc_table', 'page_table_root', pid)),
is_pn_valid(util.global_field_element(
ctx, '@proc_table', 'hvm', pid)),
is_pn_valid(util.global_field_element(ctx, '@proc_table', 'stack', pid)))))
# sleeping processes' ipc fd are empty if valid
conj.append(z3.ForAll([pid], z3.Implies(is_pid_valid(pid),
z3.Implies(util.global_field_element(ctx, '@proc_table', 'state', pid) == dt.proc_state.PROC_SLEEPING,
z3.Implies(is_fd_valid(util.global_field_element(ctx, '@proc_table', 'ipc_fd', pid)),
util.global_field_element(ctx, '@proc_table', 'ofile', pid,
z3.ZeroExt(32, util.global_field_element(ctx, '@proc_table', 'ipc_fd', pid))) == z3.BitVecVal(0, dt.fn_t))))))
conj.append(z3.ForAll([pid],
z3.And(
is_pn_valid(util.global_field_element(
ctx, '@proc_table', 'page_table_root', pid)),
is_pn_valid(util.global_field_element(
ctx, '@proc_table', 'hvm', pid)),
is_pn_valid(util.global_field_element(ctx, '@proc_table', 'stack', pid)))))
# page has an owner <=> page is not free
conj.append(z3.ForAll([pn], z3.Implies(is_pn_valid(pn),
is_pid_valid(util.global_field_element(ctx, '@page_desc_table', 'pid', pn)) ==
(util.global_field_element(ctx, '@page_desc_table', 'type', pn) != dt.page_type.PAGE_TYPE_FREE))))
# unused procs have zero refcnt
conj.append(z3.ForAll([pid], z3.Implies(
z3.And(
is_pid_valid(pid),
util.global_field_element(ctx, '@proc_table', 'state', pid) == dt.proc_state.PROC_UNUSED),
z3.And(
util.global_field_element(ctx, '@proc_table', 'nr_children', pid) == z3.BitVecVal(0, dt.size_t),
util.global_field_element(ctx, '@proc_table', 'nr_fds', pid) == z3.BitVecVal(0, dt.size_t),
util.global_field_element(ctx, '@proc_table', 'nr_pages', pid) == z3.BitVecVal(0, dt.size_t),
util.global_field_element(ctx, '@proc_table', 'nr_dmapages', pid) == z3.BitVecVal(0, dt.size_t),
util.global_field_element(ctx, '@proc_table', 'nr_devs', pid) == z3.BitVecVal(0, dt.size_t),
util.global_field_element(ctx, '@proc_table', 'nr_ports', pid) == z3.BitVecVal(0, dt.size_t),
util.global_field_element(ctx, '@proc_table', 'nr_vectors', pid) == z3.BitVecVal(0, dt.size_t),
util.global_field_element(ctx, '@proc_table', 'nr_intremaps', pid) == z3.BitVecVal(0, dt.size_t)))))
# conj.append(z3.ForAll([pid, fd], z3.Implies(z3.And(is_pid_valid(pid), is_fd_valid(fd)),
# z3.Implies(
# util.global_field_element(ctx, '@proc_table', 'nr_fds', pid) == z3.BitVecVal(0, dt.size_t),
# z3.Not(is_fn_valid(util.global_field_element(ctx, '@proc_table', 'ofile', pid, z3.ZeroExt(32, fd))))))))
# # unused procs have zero fds
# conj.append(z3.ForAll([pid, fd], z3.Implies(z3.And(is_pid_valid(pid), is_fd_valid(fd)),
# z3.Implies(
# util.global_field_element(ctx, '@proc_table', 'state', pid) == dt.proc_state.PROC_UNUSED,
# util.global_field_element(ctx, '@proc_table', 'ofile', pid, z3.ZeroExt(32, fd)) == z3.BitVecVal(0, dt.fn_t)))))
# fds valid
conj.append(z3.ForAll([pid, fd], z3.Implies(z3.And(is_pid_valid(pid), is_fd_valid(fd)),
z3.Or(
util.global_field_element(ctx, '@proc_table', 'ofile', pid, z3.ZeroExt(32, fd)) == z3.BitVecVal(0, dt.fn_t),
is_fn_valid(util.global_field_element(ctx, '@proc_table', 'ofile', pid, z3.ZeroExt(32, fd)))))))
# # FD_NONE's refcount is 0
# conj.append(z3.ForAll([fn], z3.Implies(
# z3.And(
# is_fn_valid(fn),
# util.global_field_element(ctx, '@file_table', 'type', fn) == dt.file_type.FD_NONE),
# util.global_field_element(ctx, '@file_table', 'refcnt', fn) == z3.BitVecVal(0, dt.size_t))))
# # FD never points to FD_NONE
# conj.append(z3.ForAll([pid, fd], z3.Implies(
# z3.And(
# is_pid_valid(pid),
# is_fd_valid(fd),
# is_fn_valid(util.global_field_element(ctx, '@proc_table', 'ofile', pid, z3.ZeroExt(32, fd)))),
# util.global_field_element(ctx, '@file_table', 'type',
# util.global_field_element(ctx, '@proc_table', 'ofile', pid, z3.ZeroExt(32, fd))) != dt.file_type.FD_NONE)))
# page freelist is well formed
conj.append(z3.ForAll([pn], z3.Implies(is_pn_valid(pn),
z3.And(
is_pn_valid(util.global_field_element(ctx, '@page_desc_table', ['link', 'prev'], pn)),
is_pn_valid(util.global_field_element(ctx, '@page_desc_table', ['link', 'next'], pn))))))
# ready queue is well formed
# don't use is_pid_valid as some ->prev and ->next are zeros
conj.append(z3.ForAll([pid], z3.Implies(is_pid_bounded(pid),
z3.And(
is_pid_bounded(util.global_field_element(ctx, '@proc_table', ['ready', 'prev'], pid)),
is_pid_bounded(util.global_field_element(ctx, '@proc_table', ['ready', 'next'], pid))))))
# Current is always a valid and running
conj.append(is_pid_valid(util.global_value(ctx, '@current')))
conj.append(util.global_field_element(ctx, '@proc_table', 'state', util.global_value(ctx, '@current')) == dt.proc_state.PROC_RUNNING)
return z3.And(*conj)
impl_invariants = impl_invariants_py
# Generic invariants
def spec_invariants(kernelstate):
conj = []
pid = util.FreshBitVec('pid', dt.pid_t)
pn = util.FreshBitVec('pn', dt.pn_t)
#
# procs' page table, hvm and stack are
#
# 1) valid
conj.append(z3.ForAll([pid], z3.Implies(is_pid_valid(pid),
z3.And(
is_pn_valid(kernelstate.procs[pid].page_table_root),
is_pn_valid(kernelstate.procs[pid].hvm),
is_pn_valid(kernelstate.procs[pid].stack)))))
# 2) owned by that proc
conj.append(z3.ForAll([pid], z3.Implies(is_pid_valid(pid),
z3.Implies(
is_status_live(kernelstate.procs[pid].state),
z3.And(
kernelstate.pages[kernelstate.procs[pid].page_table_root].owner == pid,
kernelstate.pages[kernelstate.procs[pid].hvm].owner == pid,
kernelstate.pages[kernelstate.procs[pid].stack].owner == pid)))))
# 3) have the correct type
conj.append(z3.ForAll([pid], z3.Implies(is_pid_valid(pid),
z3.Implies(
is_status_live(kernelstate.procs[pid].state),
z3.And(
kernelstate.pages[kernelstate.procs[pid].page_table_root].type == dt.page_type.PAGE_TYPE_X86_PML4,
kernelstate.pages[kernelstate.procs[pid].hvm].type == dt.page_type.PAGE_TYPE_PROC_DATA,
kernelstate.pages[kernelstate.procs[pid].stack].type == dt.page_type.PAGE_TYPE_PROC_DATA)))))
##
# Sleeping PROC's ipc_page is a frame owned by that pid
conj.append(z3.ForAll([pid], z3.Implies(is_pid_valid(pid),
z3.Implies(
kernelstate.procs[pid].state == dt.proc_state.PROC_SLEEPING,
z3.And(
is_pn_valid(kernelstate.procs[pid].ipc_page),
kernelstate.pages[kernelstate.procs[pid]
.ipc_page].type == dt.page_type.PAGE_TYPE_FRAME,
kernelstate.pages[kernelstate.procs[pid].ipc_page].owner == pid)))))
## Non-zombie procs with use_io_bitmaps own their (valid) bitmap pages
conj.append(z3.ForAll([pid],
z3.Implies(
z3.And(
is_pid_valid(pid),
kernelstate.procs[pid].use_io_bitmap,
kernelstate.procs[pid].state != dt.proc_state.PROC_ZOMBIE),
z3.And(
is_pn_valid(kernelstate.procs[pid].io_bitmap_a),
is_pn_valid(kernelstate.procs[pid].io_bitmap_b),
kernelstate.pages[kernelstate.procs[pid].io_bitmap_a].owner == pid,
kernelstate.pages[kernelstate.procs[pid].io_bitmap_b].owner == pid,
kernelstate.pages[kernelstate.procs[pid].io_bitmap_a].type == dt.page_type.PAGE_TYPE_PROC_DATA,
kernelstate.pages[kernelstate.procs[pid].io_bitmap_b].type == dt.page_type.PAGE_TYPE_PROC_DATA))))
# page has an owner <=> page is not free
conj.append(z3.ForAll([pn], z3.Implies(is_pn_valid(pn),
is_pid_valid(kernelstate.pages[pn].owner) == (kernelstate.pages[pn].type != dt.page_type.PAGE_TYPE_FREE))))
conj.append(z3.ForAll([pn], z3.Implies(is_pn_valid(pn),
z3.Implies(kernelstate.pages[pn].type == dt.page_type.PAGE_TYPE_FREE,
z3.Not(is_pid_valid(kernelstate.pages[pn].owner))))))
# a sleeping proc's ipc_fd is either invalid or empty
conj.append(z3.ForAll([pid], z3.Implies(z3.And(
is_pid_valid(pid),
kernelstate.procs[pid].state == dt.proc_state.PROC_SLEEPING),
z3.Or(z3.Not(is_fd_valid(kernelstate.procs[pid].ipc_fd)),
z3.Not(is_fn_valid(kernelstate.procs[pid].ofile(kernelstate.procs[pid].ipc_fd)))))))
##############
# Unused procs's refcount is all zero
# conj.append(z3.ForAll([pid], z3.Implies(is_pid_valid(pid),
# z3.Implies(kernelstate.procs[pid].state == dt.proc_state.PROC_UNUSED,
# z3.And(
# kernelstate.procs[pid].nr_pages(dt.NPAGE - 1) == z3.BitVecVal(0, dt.size_t))))))
# kernelstate.procs[pid].nr_children(dt.NPROC - 1) == z3.BitVecVal(0, dt.size_t),
# kernelstate.procs[pid].nr_fds(dt.NOFILE - 1) == z3.BitVecVal(0, dt.size_t),
# kernelstate.procs[pid].nr_devs(dt.NPCIDEV - 1) == z3.BitVecVal(0, dt.size_t))))))
# # unused procs don't have a parent
# conj.append(z3.ForAll([pid], z3.Implies(
# z3.And(
# is_pid_valid(pid),
# kernelstate.procs[pid].state == dt.proc_state.PROC_UNUSED),
# kernelstate.procs[pid].ppid == z3.BitVecVal(0, dt.pid_t))))
# # unused procs don't have fds
# conj.append(z3.ForAll([pid, fd], z3.Implies(
# z3.And(
# is_pid_valid(pid),
# kernelstate.procs[pid].state == dt.proc_state.PROC_UNUSED),
# z3.Not(is_fn_valid(kernelstate.procs[pid].ofile(fd))))))
# unused fn has refcount == 0
# conj.append(z3.ForAll([fn], z3.Implies(is_fn_valid(fn),
# z3.Implies(kernelstate.files[fn].type == dt.file_type.FD_NONE,
# kernelstate.files[fn].refcnt(
# z3.Concat(
# z3.BitVecVal(dt.NPROC - 1, dt.pid_t),
# z3.BitVecVal(dt.NOFILE - 1, dt.fd_t))) == z3.BitVecVal(0, dt.size_t)))))
##############
# disjointed-ness of memory regions
conj.append(z3.And(
z3.Extract(63, 40, z3.UDiv(kernelstate.pages_ptr_to_int, util.i64(4096)) + dt.NPAGES_PAGES) == z3.BitVecVal(0, 24),
z3.Extract(63, 40, z3.UDiv(kernelstate.proc_table_ptr_to_int, util.i64(4096)) + dt.NPAGES_PROC_TABLE) == z3.BitVecVal(0, 24),
z3.Extract(63, 40, z3.UDiv(kernelstate.page_desc_table_ptr_to_int, util.i64(4096)) + dt.NPAGES_PAGE_DESC_TABLE) == z3.BitVecVal(0, 24),
z3.Extract(63, 40, z3.UDiv(kernelstate.file_table_ptr_to_int, util.i64(4096)) + dt.NPAGES_FILE_TABLE) == z3.BitVecVal(0, 24),
z3.Extract(63, 40, z3.UDiv(kernelstate.devices_ptr_to_int,util.i64(4096)) + dt.NPAGES_DEVICES) == z3.BitVecVal(0, 24),
z3.Extract(63, 40, z3.UDiv(kernelstate.dmapages_ptr_to_int,util.i64(4096)) + dt.NDMAPAGE) == z3.BitVecVal(0, 24),
z3.Extract(63, 40, z3.UDiv(kernelstate.pages_ptr_to_int, util.i64(4096))) == z3.BitVecVal(0, 24),
z3.Extract(63, 40, z3.UDiv(kernelstate.proc_table_ptr_to_int, util.i64(4096))) == z3.BitVecVal(0, 24),
z3.Extract(63, 40, z3.UDiv(kernelstate.page_desc_table_ptr_to_int, util.i64(4096))) == z3.BitVecVal(0, 24),
z3.Extract(63, 40, z3.UDiv(kernelstate.file_table_ptr_to_int, util.i64(4096))) == z3.BitVecVal(0, 24),
z3.Extract(63, 40, z3.UDiv(kernelstate.devices_ptr_to_int, util.i64(4096))) == z3.BitVecVal(0, 24),
z3.Extract(63, 40, z3.UDiv(kernelstate.dmapages_ptr_to_int, util.i64(4096))) == z3.BitVecVal(0, 24),
z3.ULT(z3.UDiv(kernelstate.pages_ptr_to_int, util.i64(4096)) + dt.NPAGES_PAGES, z3.UDiv(kernelstate.proc_table_ptr_to_int, util.i64(4096))),
z3.ULT(z3.UDiv(kernelstate.proc_table_ptr_to_int, util.i64(4096)) + dt.NPAGES_PROC_TABLE, z3.UDiv(kernelstate.page_desc_table_ptr_to_int, util.i64(4096))),
z3.ULT(z3.UDiv(kernelstate.page_desc_table_ptr_to_int, util.i64(4096)) + dt.NPAGES_PAGE_DESC_TABLE, z3.UDiv(kernelstate.file_table_ptr_to_int, util.i64(4096))),
z3.ULT(z3.UDiv(kernelstate.file_table_ptr_to_int, util.i64(4096)) + dt.NPAGES_FILE_TABLE, z3.UDiv(kernelstate.devices_ptr_to_int, util.i64(4096))),
z3.ULT(z3.UDiv(kernelstate.devices_ptr_to_int, util.i64(4096)) + dt.NPCIDEV, z3.UDiv(kernelstate.dmapages_ptr_to_int, util.i64(4096))),
z3.ULT(z3.UDiv(kernelstate.dmapages_ptr_to_int, util.i64(4096)) + dt.NDMAPAGE, z3.UDiv(dt.PCI_START, util.i64(4096))),
))
# Current is a valid pid
conj.append(is_pid_valid(kernelstate.current))
# Current is always running
conj.append(kernelstate.procs[kernelstate.current].state == dt.proc_state.PROC_RUNNING),
# A running proc must be current
conj.append(z3.ForAll([pid], z3.Implies(is_pid_valid(pid),
z3.Implies(kernelstate.procs[pid].state == dt.proc_state.PROC_RUNNING,
pid == kernelstate.current))))
return z3.And(*conj)
|
hyperparameter_hunter/space/space_core.py | beyondacm/hyperparameter_hunter | 688 | 12626922 | """Defines utilities intended for internal use only, most notably
:class:`hyperparameter_hunter.space.space_core.Space`. These tools are used behind the scenes by
:class:`hyperparameter_hunter.optimization.protocol_core.BaseOptPro` to combine instances of
dimensions defined in :mod:`hyperparameter_hunter.space.dimensions` into a usable hyperparameter
search Space
Related
-------
:mod:`hyperparameter_hunter.space.dimensions`
Defines concrete descendants of :class:`hyperparameter_hunter.space.dimensions.Dimension`, which
are intended for direct use. :class:`hyperparameter_hunter.space.space_core.Space` is used
to combine these Dimension instances
Notes
-----
Many of the tools defined herein (although substantially modified) are based on those provided by
the excellent [Scikit-Optimize](https://github.com/scikit-optimize/scikit-optimize) library. See
:mod:`hyperparameter_hunter.optimization.backends.skopt` for a copy of SKOpt's license"""
##################################################
# Import Own Assets
##################################################
from hyperparameter_hunter.space.dimensions import Dimension, Real, Integer, Categorical
from hyperparameter_hunter.utils.general_utils import short_repr
##################################################
# Import Miscellaneous Assets
##################################################
from functools import reduce
import numbers
import numpy as np
from sys import maxsize
##################################################
# Import Learning Assets
##################################################
from sklearn.utils import check_random_state
from sklearn.utils.fixes import sp_version
NONE = object()
##################################################
# Utilities
##################################################
def check_dimension(dimension, transform=None):
"""Turn a provided dimension description into a dimension object. Checks that the provided
dimension falls into one of the supported types, listed below in the description of `dimension`
Parameters
----------
dimension: Tuple, list, or Dimension
Search space `Dimension`. May be any of the following:
* `(lower_bound, upper_bound)` tuple (`Real` or `Integer`)
* `(lower_bound, upper_bound, prior)` tuple (`Real`)
* List of categories (`Categorical`)
* `Dimension` instance (`Real`, `Integer` or `Categorical`)
transform: {"identity", "normalize", "onehot"} (optional)
* `Categorical` dimensions support "onehot" or "identity". See `Categorical` documentation
for more information
* `Real` and `Integer` dimensions support "identity" or "normalize". See `Real` or `Integer`
documentation for more information
Returns
-------
dimension: Dimension
Dimension instance created from the provided `dimension` description. If `dimension` is
already an instance of `Dimension`, it is returned unchanged"""
if isinstance(dimension, Dimension):
return dimension
if not isinstance(dimension, (list, tuple, np.ndarray)):
raise ValueError("Dimension has to be a list or tuple")
# `Dimension` subclasses define actual `transform` defaults - Only pass `transform` if not None
kwargs = dict(transform=transform) if transform else {}
if len(dimension) == 1:
return Categorical(dimension, **kwargs)
if len(dimension) == 2:
if any([isinstance(d, (str, bool)) or isinstance(d, np.bool_) for d in dimension]):
return Categorical(dimension, **kwargs)
elif all([isinstance(dim, numbers.Integral) for dim in dimension]):
return Integer(*dimension, **kwargs)
elif any([isinstance(dim, numbers.Real) for dim in dimension]):
return Real(*dimension, **kwargs)
if len(dimension) == 3:
# TODO: Below `any` should prolly be `all`
if any([isinstance(dim, (float, int)) for dim in dimension[:2]]) and dimension[2] in [
"uniform",
"log-uniform",
]:
return Real(*dimension, **kwargs)
else:
return Categorical(dimension, **kwargs)
if len(dimension) > 3:
return Categorical(dimension, **kwargs)
raise ValueError(f"Invalid `dimension` {dimension}. See documentation for supported types")
##################################################
# Space
##################################################
class Space:
def __init__(self, dimensions):
"""Initialize a search space from given specifications
Parameters
----------
dimensions: List
List of search space `Dimension` instances or representatives. Each search dimension
may be any of the following:
* `(lower_bound, upper_bound)` tuple (`Real` or `Integer`)
* `(lower_bound, upper_bound, prior)` tuple (`Real`)
* List of categories (`Categorical`)
* `Dimension` instance (`Real`, `Integer` or `Categorical`)
Notes
-----
The upper and lower bounds are inclusive for `Integer` dimensions"""
self.dimensions = [check_dimension(dim) for dim in dimensions]
def __eq__(self, other):
return all([a == b for a, b in zip(self.dimensions, other.dimensions)])
def __repr__(self):
dims = short_repr(self.dimensions, affix_size=15)
return "Space([{}])".format(",\n ".join(map(str, dims)))
def __iter__(self):
return iter(self.dimensions)
def __len__(self):
"""Determine the number of possible search points in :attr:`dimensions`
Returns
-------
search_space_size: Integer, or `sys.maxsize`
The number of different hyperparameter search points. If the hyperparameter search space
is infinitely large, `sys.maxsize` is returned to represent `np.inf`, which cannot
itself be returned because `__len__` is required to produce an int >= 0"""
if any(isinstance(_, Real) for _ in self.dimensions):
search_space_size = maxsize
else:
search_space_size = reduce(
lambda x, y: x * y,
[
(_.high - _.low + 1) if isinstance(_, Integer) else len(_.bounds)
for _ in self.dimensions
],
1,
)
return search_space_size
def __contains__(self, point):
"""Determine whether `point` fits within the bounds of the space
Parameters
----------
point: List
Search space point, expected to be of the same length as :attr:`dimensions`
Returns
-------
Boolean
True if `point` fits within :attr:`dimensions`. Else, False"""
for component, dim in zip(point, self.dimensions):
if component not in dim:
return False
return True
##################################################
# Core Methods
##################################################
def rvs(self, n_samples=1, random_state=None):
"""Draw random samples. Samples are in the original (untransformed) space. They must be
transformed before being passed to a model or minimizer via :meth:`transform`
Parameters
----------
n_samples: Int, default=1
Number of samples to be drawn from the space
random_state: Int, RandomState, or None, default=None
Set random state to something other than None for reproducible results
Returns
-------
List
Randomly drawn samples from the original space. Will be a list of lists, of shape
(`n_samples`, :attr:`n_dims`)"""
rng = check_random_state(random_state)
#################### Draw ####################
columns = []
for dim in self.dimensions:
new_val = None
try:
if sp_version < (0, 16):
new_val = dim.rvs(n_samples=n_samples)
else:
new_val = dim.rvs(n_samples=n_samples, random_state=rng)
except TypeError: # `'<' not supported between instances of 'Version' and 'str'`
new_val = dim.rvs(n_samples=n_samples, random_state=rng)
finally:
columns.append(new_val)
#################### Transpose ####################
rows = []
# TODO: Use `np.transpose`? Might that screw up the dimension types (mostly `Categorical`)
for i in range(n_samples):
r = []
for j in range(self.n_dims):
r.append(columns[j][i])
rows.append(r)
return rows
def transform(self, data):
"""Transform samples from the original space into a warped space
Parameters
----------
data: List
Samples to transform. Should be of shape (<# samples>, :attr:`n_dims`)
Returns
-------
data_t: List
Samples transformed into a warped space. Will be of shape
(<# samples>, :attr:`transformed_n_dims`)
Notes
-----
Expected to be used to project samples into a suitable space for numerical optimization"""
#################### Pack by Dimension ####################
columns = [[] for _ in self.dimensions]
for i in range(len(data)):
for j in range(self.n_dims):
columns[j].append(data[i][j])
#################### Transform ####################
for j in range(self.n_dims):
columns[j] = self.dimensions[j].transform(columns[j])
#################### Repack as Array ####################
data_t = np.hstack([np.asarray(c).reshape((len(data), -1)) for c in columns])
return data_t
def inverse_transform(self, data_t):
"""Inverse transform samples from the warped space back to the original space
Parameters
----------
data_t: List
Samples to inverse transform. Should be of shape
(<# samples>, :attr:`transformed_n_dims`)
Returns
-------
List
Samples transformed back to the original space. Will be of shape
(<# samples>, :attr:`n_dims`)"""
#################### Inverse Transform ####################
columns = []
start = 0
for j in range(self.n_dims):
dim = self.dimensions[j]
offset = dim.transformed_size
if offset == 1:
columns.append(dim.inverse_transform(data_t[:, start]))
else:
columns.append(dim.inverse_transform(data_t[:, start : start + offset]))
start += offset
#################### Transpose ####################
rows = []
# TODO: Use `np.transpose`? Might that screw up the dimension types (mostly `Categorical`)
for i in range(len(data_t)):
r = []
for j in range(self.n_dims):
r.append(columns[j][i])
rows.append(r)
return rows
##################################################
# Descriptive Properties
##################################################
@property
def n_dims(self) -> int:
"""Dimensionality of the original space
Returns
-------
Int
Length of :attr:`dimensions`"""
return len(self.dimensions)
@property
def transformed_n_dims(self) -> int:
"""Dimensionality of the warped space
Returns
-------
Int
Sum of the `transformed_size` of all dimensions in :attr:`dimensions`"""
return sum([dim.transformed_size for dim in self.dimensions])
@property
def bounds(self):
"""The dimension bounds, in the original space
Returns
-------
List
Collection of the `bounds` of each dimension in :attr:`dimensions`"""
b = []
for dim in self.dimensions:
if dim.size == 1:
b.append(dim.bounds)
else:
b.extend(dim.bounds)
return b
@property
def transformed_bounds(self):
"""The dimension bounds, in the warped space
Returns
-------
List
Collection of the `transformed_bounds` of each dimension in :attr:`dimensions`"""
b = []
for dim in self.dimensions:
if dim.transformed_size == 1:
b.append(dim.transformed_bounds)
else:
b.extend(dim.transformed_bounds)
return b
@property
def is_real(self):
"""Whether :attr:`dimensions` contains exclusively `Real` dimensions
Returns
-------
Boolean
True if all dimensions in :attr:`dimensions` are `Real`. Else, False"""
return all([isinstance(dim, Real) for dim in self.dimensions])
@property
def is_categorical(self) -> bool:
"""Whether :attr:`dimensions` contains exclusively `Categorical` dimensions
Returns
-------
Boolean
True if all dimensions in :attr:`dimensions` are `Categorical`. Else, False"""
return all([isinstance(dim, Categorical) for dim in self.dimensions])
##################################################
# Helper Methods
##################################################
def names(self, use_location=True):
"""Retrieve the names, or locations of all dimensions in the hyperparameter search space
Parameters
----------
use_location: Boolean, default=True
If True and a dimension has a non-null attribute called 'location', its value will be
used instead of 'name'
Returns
-------
names: List
A list of strings or tuples, in which each value is the name or location of the
dimension at that index"""
names = []
for dimension in self.dimensions:
if use_location and hasattr(dimension, "location") and dimension.location:
names.append(dimension.location)
else:
names.append(dimension.name)
return names
def get_by_name(self, name, use_location=True, default=NONE):
"""Retrieve a single dimension by its name
Parameters
----------
name: Tuple, or str
Name of the dimension in :attr:`dimensions` to return
use_location: Boolean, default=True
If True and a dimension has a non-null attribute called "location", its value will be
used instead of that dimension's "name"
default: Any (optional)
If given and `name` is not found, `default` will be returned. Otherwise, `KeyError` will
be raised when `name` is not found
Returns
-------
Dimension
Dimension subclass in :attr:`dimensions`, whose "name" attribute is equal to `name`"""
for dimension in self.dimensions:
if use_location and getattr(dimension, "location", None) == name:
return dimension
elif dimension.name == name:
return dimension
if default != NONE:
return default
raise KeyError(f"{name} not found in dimensions")
def distance(self, point_a, point_b):
"""Compute distance between two points in this space. Both `point_a` and `point_b` are
expected to be of the same length as :attr:`dimensions`, with values corresponding to the
`Dimension` bounds of :attr:`dimensions`
Parameters
----------
point_a: List
First point
point_b: List
Second point
Returns
-------
Number
Distance between `point_a` and `point_b`"""
distance = 0.0
for a, b, dim in zip(point_a, point_b, self.dimensions):
distance += dim.distance(a, b)
return distance
def normalize_dimensions(dimensions):
"""Create a `Space` where all dimensions are instructed to be normalized to unit range. Note
that this doesn't *really* return normalized `dimensions`. It just returns the given
`dimensions`, with each one's `transform` set to the appropriate value, so that when each
dimension's :meth:`transform` is called, the dimensions are actually normalized
Parameters
----------
dimensions: List
List of search space dimensions. Each search dimension can be defined as any of the
following: 1) a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer` dimensions).
2) A `(lower_bound, upper_bound, "prior")` tuple (for `Real` dimensions).
3) A list of categories (for `Categorical` dimensions).
4) An instance of a `Dimension` object (`Real`, `Integer`, or `Categorical`)
Returns
-------
:class:`hyperparameter_hunter.space.Space`
Hyperparameter space class instance, in which dimensions have been instructed to be
normalized to unit range upon invocation of the `transform` method
Raises
------
RuntimeError
If a processed element of `dimensions` is not one of: `Real`, `Integer`, `Categorical`
Notes
-----
The upper and lower bounds are inclusive for `Integer` dimensions"""
space = Space(dimensions)
transformed_dimensions = []
if space.is_categorical:
for dim in space:
# `skopt.utils.normalize_dimensions` makes comment on explicitly setting
# `transform="identity"`, so apparently there's a good reason for it...
# Using original `transform` fixes all-`Categorical`/`BayesianOptPro` bug and proper
# saved experiment result matching, but optimizer could be secretly misbehaving...
transformed_dimensions.append(
Categorical(dim.categories, dim.prior, transform=dim.transform_, name=dim.name)
# Categorical(dim.categories, dim.prior, transform="identity", name=dim.name)
)
else:
for dim in space.dimensions:
if isinstance(dim, Categorical):
transformed_dimensions.append(dim)
elif isinstance(dim, Real):
transformed_dimensions.append(
Real(dim.low, dim.high, dim.prior, transform="normalize", name=dim.name)
)
elif isinstance(dim, Integer):
transformed_dimensions.append(
Integer(dim.low, dim.high, transform="normalize", name=dim.name)
)
else:
raise RuntimeError(f"Unknown dimension type: {type(dim)}")
#################### Replace Lost Attributes ####################
if hasattr(dim, "location"):
transformed_dimensions[-1].location = dim.location
return Space(transformed_dimensions)
|
amazon/paapi5_python_sdk/item_info.py | frenners/python-amazon-paapi | 121 | 12626927 | <reponame>frenners/python-amazon-paapi
# coding: utf-8
"""
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
"""
"""
ProductAdvertisingAPI
https://webservices.amazon.com/paapi5/documentation/index.html # noqa: E501
"""
import pprint
import re # noqa: F401
import six
from .by_line_info import ByLineInfo # noqa: F401,E501
from .classifications import Classifications # noqa: F401,E501
from .content_info import ContentInfo # noqa: F401,E501
from .content_rating import ContentRating # noqa: F401,E501
from .external_ids import ExternalIds # noqa: F401,E501
from .manufacture_info import ManufactureInfo # noqa: F401,E501
from .multi_valued_attribute import MultiValuedAttribute # noqa: F401,E501
from .product_info import ProductInfo # noqa: F401,E501
from .single_string_valued_attribute import SingleStringValuedAttribute # noqa: F401,E501
from .technical_info import TechnicalInfo # noqa: F401,E501
from .trade_in_info import TradeInInfo # noqa: F401,E501
class ItemInfo(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'by_line_info': 'ByLineInfo',
'classifications': 'Classifications',
'content_info': 'ContentInfo',
'content_rating': 'ContentRating',
'external_ids': 'ExternalIds',
'features': 'MultiValuedAttribute',
'manufacture_info': 'ManufactureInfo',
'product_info': 'ProductInfo',
'technical_info': 'TechnicalInfo',
'title': 'SingleStringValuedAttribute',
'trade_in_info': 'TradeInInfo'
}
attribute_map = {
'by_line_info': 'ByLineInfo',
'classifications': 'Classifications',
'content_info': 'ContentInfo',
'content_rating': 'ContentRating',
'external_ids': 'ExternalIds',
'features': 'Features',
'manufacture_info': 'ManufactureInfo',
'product_info': 'ProductInfo',
'technical_info': 'TechnicalInfo',
'title': 'Title',
'trade_in_info': 'TradeInInfo'
}
def __init__(self, by_line_info=None, classifications=None, content_info=None, content_rating=None, external_ids=None, features=None, manufacture_info=None, product_info=None, technical_info=None, title=None, trade_in_info=None): # noqa: E501
"""ItemInfo - a model defined in Swagger""" # noqa: E501
self._by_line_info = None
self._classifications = None
self._content_info = None
self._content_rating = None
self._external_ids = None
self._features = None
self._manufacture_info = None
self._product_info = None
self._technical_info = None
self._title = None
self._trade_in_info = None
self.discriminator = None
if by_line_info is not None:
self.by_line_info = by_line_info
if classifications is not None:
self.classifications = classifications
if content_info is not None:
self.content_info = content_info
if content_rating is not None:
self.content_rating = content_rating
if external_ids is not None:
self.external_ids = external_ids
if features is not None:
self.features = features
if manufacture_info is not None:
self.manufacture_info = manufacture_info
if product_info is not None:
self.product_info = product_info
if technical_info is not None:
self.technical_info = technical_info
if title is not None:
self.title = title
if trade_in_info is not None:
self.trade_in_info = trade_in_info
@property
def by_line_info(self):
"""Gets the by_line_info of this ItemInfo. # noqa: E501
:return: The by_line_info of this ItemInfo. # noqa: E501
:rtype: ByLineInfo
"""
return self._by_line_info
@by_line_info.setter
def by_line_info(self, by_line_info):
"""Sets the by_line_info of this ItemInfo.
:param by_line_info: The by_line_info of this ItemInfo. # noqa: E501
:type: ByLineInfo
"""
self._by_line_info = by_line_info
@property
def classifications(self):
"""Gets the classifications of this ItemInfo. # noqa: E501
:return: The classifications of this ItemInfo. # noqa: E501
:rtype: Classifications
"""
return self._classifications
@classifications.setter
def classifications(self, classifications):
"""Sets the classifications of this ItemInfo.
:param classifications: The classifications of this ItemInfo. # noqa: E501
:type: Classifications
"""
self._classifications = classifications
@property
def content_info(self):
"""Gets the content_info of this ItemInfo. # noqa: E501
:return: The content_info of this ItemInfo. # noqa: E501
:rtype: ContentInfo
"""
return self._content_info
@content_info.setter
def content_info(self, content_info):
"""Sets the content_info of this ItemInfo.
:param content_info: The content_info of this ItemInfo. # noqa: E501
:type: ContentInfo
"""
self._content_info = content_info
@property
def content_rating(self):
"""Gets the content_rating of this ItemInfo. # noqa: E501
:return: The content_rating of this ItemInfo. # noqa: E501
:rtype: ContentRating
"""
return self._content_rating
@content_rating.setter
def content_rating(self, content_rating):
"""Sets the content_rating of this ItemInfo.
:param content_rating: The content_rating of this ItemInfo. # noqa: E501
:type: ContentRating
"""
self._content_rating = content_rating
@property
def external_ids(self):
"""Gets the external_ids of this ItemInfo. # noqa: E501
:return: The external_ids of this ItemInfo. # noqa: E501
:rtype: ExternalIds
"""
return self._external_ids
@external_ids.setter
def external_ids(self, external_ids):
"""Sets the external_ids of this ItemInfo.
:param external_ids: The external_ids of this ItemInfo. # noqa: E501
:type: ExternalIds
"""
self._external_ids = external_ids
@property
def features(self):
"""Gets the features of this ItemInfo. # noqa: E501
:return: The features of this ItemInfo. # noqa: E501
:rtype: MultiValuedAttribute
"""
return self._features
@features.setter
def features(self, features):
"""Sets the features of this ItemInfo.
:param features: The features of this ItemInfo. # noqa: E501
:type: MultiValuedAttribute
"""
self._features = features
@property
def manufacture_info(self):
"""Gets the manufacture_info of this ItemInfo. # noqa: E501
:return: The manufacture_info of this ItemInfo. # noqa: E501
:rtype: ManufactureInfo
"""
return self._manufacture_info
@manufacture_info.setter
def manufacture_info(self, manufacture_info):
"""Sets the manufacture_info of this ItemInfo.
:param manufacture_info: The manufacture_info of this ItemInfo. # noqa: E501
:type: ManufactureInfo
"""
self._manufacture_info = manufacture_info
@property
def product_info(self):
"""Gets the product_info of this ItemInfo. # noqa: E501
:return: The product_info of this ItemInfo. # noqa: E501
:rtype: ProductInfo
"""
return self._product_info
@product_info.setter
def product_info(self, product_info):
"""Sets the product_info of this ItemInfo.
:param product_info: The product_info of this ItemInfo. # noqa: E501
:type: ProductInfo
"""
self._product_info = product_info
@property
def technical_info(self):
"""Gets the technical_info of this ItemInfo. # noqa: E501
:return: The technical_info of this ItemInfo. # noqa: E501
:rtype: TechnicalInfo
"""
return self._technical_info
@technical_info.setter
def technical_info(self, technical_info):
"""Sets the technical_info of this ItemInfo.
:param technical_info: The technical_info of this ItemInfo. # noqa: E501
:type: TechnicalInfo
"""
self._technical_info = technical_info
@property
def title(self):
"""Gets the title of this ItemInfo. # noqa: E501
:return: The title of this ItemInfo. # noqa: E501
:rtype: SingleStringValuedAttribute
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this ItemInfo.
:param title: The title of this ItemInfo. # noqa: E501
:type: SingleStringValuedAttribute
"""
self._title = title
@property
def trade_in_info(self):
"""Gets the trade_in_info of this ItemInfo. # noqa: E501
:return: The trade_in_info of this ItemInfo. # noqa: E501
:rtype: TradeInInfo
"""
return self._trade_in_info
@trade_in_info.setter
def trade_in_info(self, trade_in_info):
"""Sets the trade_in_info of this ItemInfo.
:param trade_in_info: The trade_in_info of this ItemInfo. # noqa: E501
:type: TradeInInfo
"""
self._trade_in_info = trade_in_info
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ItemInfo, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ItemInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
pre_process_sysu.py | shuoyang129/Cross-Modal-Re-ID-baseline | 249 | 12626928 | import numpy as np
from PIL import Image
import pdb
import os
data_path = '/home/datasets/prml/computervision/re-id/sysu-mm01/ori_data'
rgb_cameras = ['cam1','cam2','cam4','cam5']
ir_cameras = ['cam3','cam6']
# load id info
file_path_train = os.path.join(data_path,'exp/train_id.txt')
file_path_val = os.path.join(data_path,'exp/val_id.txt')
with open(file_path_train, 'r') as file:
ids = file.read().splitlines()
ids = [int(y) for y in ids[0].split(',')]
id_train = ["%04d" % x for x in ids]
with open(file_path_val, 'r') as file:
ids = file.read().splitlines()
ids = [int(y) for y in ids[0].split(',')]
id_val = ["%04d" % x for x in ids]
# combine train and val split
id_train.extend(id_val)
files_rgb = []
files_ir = []
for id in sorted(id_train):
for cam in rgb_cameras:
img_dir = os.path.join(data_path,cam,id)
if os.path.isdir(img_dir):
new_files = sorted([img_dir+'/'+i for i in os.listdir(img_dir)])
files_rgb.extend(new_files)
for cam in ir_cameras:
img_dir = os.path.join(data_path,cam,id)
if os.path.isdir(img_dir):
new_files = sorted([img_dir+'/'+i for i in os.listdir(img_dir)])
files_ir.extend(new_files)
# relabel
pid_container = set()
for img_path in files_ir:
pid = int(img_path[-13:-9])
pid_container.add(pid)
pid2label = {pid:label for label, pid in enumerate(pid_container)}
fix_image_width = 144
fix_image_height = 288
def read_imgs(train_image):
train_img = []
train_label = []
for img_path in train_image:
# img
img = Image.open(img_path)
img = img.resize((fix_image_width, fix_image_height), Image.ANTIALIAS)
pix_array = np.array(img)
train_img.append(pix_array)
# label
pid = int(img_path[-13:-9])
pid = pid2label[pid]
train_label.append(pid)
return np.array(train_img), np.array(train_label)
# rgb imges
train_img, train_label = read_imgs(files_rgb)
np.save(data_path + 'train_rgb_resized_img.npy', train_img)
np.save(data_path + 'train_rgb_resized_label.npy', train_label)
# ir imges
train_img, train_label = read_imgs(files_ir)
np.save(data_path + 'train_ir_resized_img.npy', train_img)
np.save(data_path + 'train_ir_resized_label.npy', train_label)
|
Scripts/sims4communitylib/debug/interactions/object_break.py | ColonolNutty/Sims4CommunityLibrary | 118 | 12626947 | """
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
from typing import Any
from event_testing.results import TestResult
from interactions.context import InteractionContext
from objects.game_object import GameObject
from sims.sim import Sim
from sims4communitylib.classes.interactions.common_immediate_super_interaction import CommonImmediateSuperInteraction
from sims4communitylib.mod_support.mod_identity import CommonModIdentity
from sims4communitylib.modinfo import ModInfo
from sims4communitylib.utils.common_type_utils import CommonTypeUtils
from sims4communitylib.utils.objects.common_object_state_utils import CommonObjectStateUtils
class S4CLDebugObjectBreakInteraction(CommonImmediateSuperInteraction):
"""S4CLDebugObjectBreakInteraction(*_, **__)
Set the target Object to a broken state.
"""
# noinspection PyMissingOrEmptyDocstring
@classmethod
def get_mod_identity(cls) -> CommonModIdentity:
return ModInfo.get_identity()
# noinspection PyMissingOrEmptyDocstring
@classmethod
def get_log_identifier(cls) -> str:
return 's4cl_debug_break_object'
# noinspection PyMissingOrEmptyDocstring
@classmethod
def on_test(cls, interaction_sim: Sim, interaction_target: Any, interaction_context: InteractionContext, **kwargs) -> TestResult:
if interaction_target is None or CommonTypeUtils.is_sim_or_sim_info(interaction_target):
cls.get_log().debug('Failed, Target is None.')
return TestResult.NONE
interaction_target: GameObject = interaction_target
if CommonObjectStateUtils.is_broken(interaction_target):
cls.get_log().debug('Failed, the Object is already broken.')
return TestResult.NONE
cls.get_log().debug('Success, can break object.')
return TestResult.TRUE
# noinspection PyMissingOrEmptyDocstring
def on_started(self, interaction_sim: Sim, interaction_target: GameObject) -> bool:
return CommonObjectStateUtils.break_object(interaction_target)
|
tests/nlu/tokenizers/test_jieba_tokenizer.py | Next-Trends/rasa | 3,603 | 12626960 | import logging
from typing import Dict, Optional
from _pytest.logging import LogCaptureFixture
from _pytest.tmpdir import TempPathFactory
from rasa.engine.graph import ExecutionContext
from rasa.engine.storage.resource import Resource
from rasa.engine.storage.storage import ModelStorage
from rasa.nlu.tokenizers.jieba_tokenizer import JiebaTokenizer
import pytest
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.message import Message
from rasa.nlu.constants import TOKENS_NAMES
from rasa.shared.nlu.constants import TEXT, INTENT
def create_jieba(config: Optional[Dict] = None) -> JiebaTokenizer:
config = config if config else {}
return JiebaTokenizer.create(
{**JiebaTokenizer.get_default_config(), **config}, None, None, None
)
@pytest.mark.parametrize(
"text, expected_tokens, expected_indices",
[
(
"我想去吃兰州拉面",
["我", "想", "去", "吃", "兰州", "拉面"],
[(0, 1), (1, 2), (2, 3), (3, 4), (4, 6), (6, 8)],
),
(
"Micheal你好吗?",
["Micheal", "你好", "吗", "?"],
[(0, 7), (7, 9), (9, 10), (10, 11)],
),
],
)
def test_jieba(text, expected_tokens, expected_indices):
tk = create_jieba()
tokens = tk.tokenize(Message(data={TEXT: text}), attribute=TEXT)
assert [t.text for t in tokens] == expected_tokens
assert [t.start for t in tokens] == [i[0] for i in expected_indices]
assert [t.end for t in tokens] == [i[1] for i in expected_indices]
def test_jieba_load_and_persist_dictionary(
tmp_path_factory: TempPathFactory,
default_model_storage: ModelStorage,
default_execution_context: ExecutionContext,
caplog: LogCaptureFixture,
):
dictionary_directory = tmp_path_factory.mktemp("dictionaries")
dictionary_path = dictionary_directory / "dictionary_1"
dictionary_contents = """
创新办 3 i
云计算 5
凱特琳 nz
台中
"""
dictionary_path.write_text(dictionary_contents, encoding="utf-8")
component_config = {"dictionary_path": dictionary_directory}
resource = Resource("jieba")
tk = JiebaTokenizer.create(
{**JiebaTokenizer.get_default_config(), **component_config},
default_model_storage,
resource,
default_execution_context,
)
tk.process_training_data(TrainingData([Message(data={TEXT: ""})]))
# The dictionary has not been persisted yet.
with caplog.at_level(logging.DEBUG):
JiebaTokenizer.load(
{**JiebaTokenizer.get_default_config(), **component_config},
default_model_storage,
resource,
default_execution_context,
)
assert any(
"Failed to load JiebaTokenizer from model storage." in message
for message in caplog.messages
)
tk.persist()
# Check the persisted dictionary matches the original file.
with default_model_storage.read_from(resource) as resource_dir:
contents = (resource_dir / "dictionary_1").read_text(encoding="utf-8")
assert contents == dictionary_contents
# Delete original files to show that we read from the model storage.
dictionary_path.unlink()
dictionary_directory.rmdir()
JiebaTokenizer.load(
{**JiebaTokenizer.get_default_config(), **component_config},
default_model_storage,
resource,
default_execution_context,
)
tk.process([Message(data={TEXT: ""})])
@pytest.mark.parametrize(
"text, expected_tokens",
[
("Forecast_for_LUNCH", ["Forecast_for_LUNCH"]),
("Forecast for LUNCH", ["Forecast for LUNCH"]),
],
)
def test_custom_intent_symbol(text, expected_tokens):
component_config = {"intent_tokenization_flag": True, "intent_split_symbol": "+"}
tk = create_jieba(component_config)
message = Message(data={TEXT: text})
message.set(INTENT, text)
tk.process_training_data(TrainingData([message]))
assert [t.text for t in message.get(TOKENS_NAMES[INTENT])] == expected_tokens
|
Chapter24/solver.py | haohaoxiao/Deep-Reinforcement-Learning-Hands-On-Second-Edition | 621 | 12626976 | <reponame>haohaoxiao/Deep-Reinforcement-Learning-Hands-On-Second-Edition
#!/usr/bin/env python3
"""
Solver using MCTS and trained model
"""
import time
import argparse
import random
import logging
import datetime
import collections
import csv
from tqdm import tqdm
import seaborn as sns
import matplotlib.pylab as plt
import torch
from libcube import cubes
from libcube import model
from libcube import mcts
log = logging.getLogger("solver")
DataPoint = collections.namedtuple("DataPoint", field_names=(
'start_dt', 'stop_dt', 'duration', 'depth', 'scramble', 'is_solved', 'solve_steps', 'sol_len_naive', 'sol_len_bfs',
'depth_max', 'depth_mean'
))
DEFAULT_MAX_SECONDS = 60
PLOT_MAX_DEPTHS = 50
PLOT_TASKS = 20
def generate_task(env, depth):
res = []
prev_a = None
for _ in range(depth):
a = env.sample_action(prev_action=prev_a)
res.append(a.value)
prev_a = a
return res
def gather_data(cube_env, net, max_seconds, max_steps, max_depth, samples_per_depth, batch_size, device):
"""
Try to solve lots of cubes to get data
:param cube_env: CubeEnv
:param net: model to be used
:param max_seconds: time limit per cube in seconds
:param max_steps: limit of steps, if not None it superseeds max_seconds
:param max_depth: maximum depth of scramble
:param samples_per_depth: how many cubes of every depth to generate
:param device: torch.device
:return: list DataPoint entries
"""
result = []
try:
for depth in range(1, max_depth+1):
solved_count = 0
for task_idx in tqdm(range(samples_per_depth)):
start_dt = datetime.datetime.utcnow()
task = generate_task(cube_env, depth)
tree, solution = solve_task(cube_env, task, net, cube_idx=task_idx, max_seconds=max_seconds,
max_steps=max_steps, device=device, quiet=True, batch_size=batch_size)
is_solved = solution is not None
stop_dt = datetime.datetime.utcnow()
duration = (stop_dt - start_dt).total_seconds()
scramble = " ".join(map(str, task))
tree_depth_stats = tree.get_depth_stats()
sol_len_naive, sol_len_bfs = -1, -1
if is_solved:
sol_len_naive = len(solution)
sol_len_bfs = len(tree.find_solution())
data_point = DataPoint(start_dt=start_dt, stop_dt=stop_dt, duration=duration, depth=depth,
scramble=scramble, is_solved=is_solved, solve_steps=len(tree),
sol_len_naive=sol_len_naive, sol_len_bfs=sol_len_bfs,
depth_max=tree_depth_stats['max'], depth_mean=tree_depth_stats['mean'])
result.append(data_point)
if is_solved:
solved_count += 1
log.info("Depth %d processed, solved %d/%d (%.2f%%)", depth, solved_count, samples_per_depth,
100.0*solved_count/samples_per_depth)
except KeyboardInterrupt:
log.info("Interrupt received, got %d data samples, use them", len(result))
return result
def save_output(data, output_file):
with open(output_file, "wt", encoding='utf-8') as fd:
writer = csv.writer(fd)
writer.writerow(['start_dt', 'stop_dt', 'duration', 'depth', 'scramble', 'is_solved', 'solve_steps',
'sol_len_naive', 'sol_len_bfs', 'tree_depth_max', 'tree_depth_mean'])
for dp in data:
writer.writerow([
dp.start_dt.isoformat(),
dp.stop_dt.isoformat(),
dp.duration,
dp.depth,
dp.scramble,
int(dp.is_solved),
dp.solve_steps,
dp.sol_len_naive,
dp.sol_len_bfs,
dp.depth_max,
dp.depth_mean
])
def solve_task(env, task, net, cube_idx=None, max_seconds=DEFAULT_MAX_SECONDS, max_steps=None,
device=torch.device("cpu"), quiet=False, batch_size=1):
if not quiet:
log_prefix = "" if cube_idx is None else "cube %d: " % cube_idx
log.info("%sGot task %s, solving...", log_prefix, task)
cube_state = env.scramble(map(env.action_enum, task))
tree = mcts.MCTS(env, cube_state, net, device=device)
step_no = 0
ts = time.time()
while True:
if batch_size > 1:
solution = tree.search_batch(batch_size)
else:
solution = tree.search()
if solution:
if not quiet:
log.info("On step %d we found goal state, unroll. Speed %.2f searches/s",
step_no, (step_no*batch_size) / (time.time() - ts))
log.info("Tree depths: %s", tree.get_depth_stats())
bfs_solution = tree.find_solution()
log.info("Solutions: naive %d, bfs %d", len(solution), len(bfs_solution))
log.info("BFS: %s", bfs_solution)
log.info("Naive: %s", solution)
# tree.dump_solution(solution)
# tree.dump_solution(bfs_solution)
# tree.dump_root()
# log.info("Tree: %s", tree)
return tree, solution
step_no += 1
if max_steps is not None:
if step_no > max_steps:
if not quiet:
log.info("Maximum amount of steps has reached, cube wasn't solved. "
"Did %d searches, speed %.2f searches/s",
step_no, (step_no*batch_size) / (time.time() - ts))
log.info("Tree depths: %s", tree.get_depth_stats())
return tree, None
elif time.time() - ts > max_seconds:
if not quiet:
log.info("Time is up, cube wasn't solved. Did %d searches, speed %.2f searches/s..",
step_no, (step_no*batch_size) / (time.time() - ts))
log.info("Tree depths: %s", tree.get_depth_stats())
return tree, None
def produce_plots(data, prefix, max_seconds, max_steps):
data_solved = [(dp.depth, int(dp.is_solved)) for dp in data]
data_steps = [(dp.depth, dp.solve_steps) for dp in data if dp.is_solved]
if max_steps is not None:
suffix = "(steps limit %d)" % max_steps
else:
suffix = "(time limit %d secs)" % max_seconds
sns.set()
d, v = zip(*data_solved)
plot = sns.lineplot(d, v)
plot.set_title("Solve ratio per depth %s" % suffix)
plot.get_figure().savefig(prefix + "-solve_vs_depth.png")
plt.clf()
d, v = zip(*data_steps)
plot = sns.lineplot(d, v)
plot.set_title("Steps to solve per depth %s" % suffix)
plot.get_figure().savefig(prefix + "-steps_vs_depth.png")
if __name__ == "__main__":
logging.basicConfig(format="%(asctime)-15s %(levelname)s %(message)s", level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--env", required=True, help="Type of env to train, supported types=%s" % cubes.names())
parser.add_argument("-m", "--model", required=True, help="Model file to load, has to match env type")
parser.add_argument("--max-time", type=int, default=DEFAULT_MAX_SECONDS,
help="Limit in seconds for each task, default=%s" % DEFAULT_MAX_SECONDS)
parser.add_argument("--max-steps", type=int, help="Limit amount of MCTS searches to be done. "
"If specified, superseeds --max-time")
parser.add_argument("--max-depth", type=int, default=PLOT_MAX_DEPTHS,
help="Maximum depth for plots and data, default=%s" % PLOT_MAX_DEPTHS)
parser.add_argument("--samples", type=int, default=PLOT_TASKS,
help="Count of tests of each depth, default=%s" % PLOT_TASKS)
parser.add_argument("-b", "--batch", type=int, default=1, help="Batch size to use during the search, default=1")
parser.add_argument("--cuda", default=False, action="store_true", help="Enable cuda")
parser.add_argument("--seed", type=int, default=42, help="Seed to use, if zero, no seed used. default=42")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-i", "--input", help="Text file with permutations to read cubes to solve, "
"possibly produced by gen_cubes.py")
group.add_argument("-p", "--perm", help="Permutation in form of actions list separated by comma")
group.add_argument("-r", "--random", metavar="DEPTH", type=int, help="Generate random scramble of given depth")
group.add_argument("--plot", metavar="PREFIX", help="Produce plots of model solve accuracy")
group.add_argument("-o", "--output", help="Write test result into csv file with given name")
args = parser.parse_args()
if args.seed:
random.seed(args.seed)
device = torch.device("cuda" if args.cuda else "cpu")
cube_env = cubes.get(args.env)
log.info("Using environment %s", cube_env)
assert isinstance(cube_env, cubes.CubeEnv) # just to help pycharm understand type
net = model.Net(cube_env.encoded_shape, len(cube_env.action_enum)).to(device)
net.load_state_dict(torch.load(args.model, map_location=lambda storage, loc: storage))
net.eval()
log.info("Network loaded from %s", args.model)
if args.random is not None:
task = generate_task(cube_env, args.random)
solve_task(cube_env, task, net, max_seconds=args.max_time, max_steps=args.max_steps, device=device,
batch_size=args.batch)
elif args.perm is not None:
task = list(map(int, args.perm.split(',')))
solve_task(cube_env, task, net, max_seconds=args.max_time, max_steps=args.max_steps, device=device,
batch_size=args.batch)
elif args.input is not None:
log.info("Processing scrambles from %s", args.input)
count = 0
solved = 0
with open(args.input, 'rt', encoding='utf-8') as fd:
for idx, l in enumerate(fd):
task = list(map(int, l.strip().split(',')))
_, solution = solve_task(cube_env, task, net, cube_idx=idx, max_seconds=args.max_time,
max_steps=args.max_steps, device=device, batch_size=args.batch)
if solution is not None:
solved += 1
count += 1
log.info("Solved %d out of %d cubes, which is %.2f%% success ratio", solved, count, 100*solved / count)
elif args.plot is not None:
log.info("Produce plots with prefix %s", args.plot)
data = gather_data(cube_env, net, args.max_time, args.max_steps, args.max_depth, args.samples,
args.batch, device)
produce_plots(data, args.plot, args.max_time)
elif args.output is not None:
data = gather_data(cube_env, net, args.max_time, args.max_steps, args.max_depth, args.samples,
args.batch, device)
save_output(data, args.output)
pass
|
Z - Tool Box/LaZagne/Windows/lazagne/softwares/windows/vaultfiles.py | dfirpaul/Active-Directory-Exploitation-Cheat-Sheet-1 | 1,290 | 12626987 | <filename>Z - Tool Box/LaZagne/Windows/lazagne/softwares/windows/vaultfiles.py
# -*- coding: utf-8 -*-
from lazagne.config.module_info import ModuleInfo
from lazagne.config.constant import constant
import os
class VaultFiles(ModuleInfo):
def __init__(self):
ModuleInfo.__init__(self, 'vaultfiles', 'windows', dpapi_used=True)
def run(self):
pwd_found = []
if constant.user_dpapi and constant.user_dpapi.unlocked:
main_vault_directory = os.path.join(constant.profile['APPDATA'], u'..', u'Local', u'Microsoft', u'Vault')
main_vault_directory = os.path.abspath(main_vault_directory)
if os.path.exists(main_vault_directory):
for vault_directory in os.listdir(main_vault_directory):
cred = constant.user_dpapi.decrypt_vault(os.path.join(main_vault_directory, vault_directory))
if cred:
pwd_found.append(cred)
return pwd_found
|
wgan_gp_loss.py | deepsound-project/pggan-pytorch | 115 | 12626988 | import torch
from torch.autograd import Variable, grad
mixing_factors = None
grad_outputs = None
def mul_rowwise(a, b):
s = a.size()
return (a.view(s[0], -1) * b).view(s)
def calc_gradient_penalty(D, real_data, fake_data, iwass_lambda, iwass_target):
global mixing_factors, grad_outputs
if mixing_factors is None or real_data.size(0) != mixing_factors.size(0):
mixing_factors = torch.cuda.FloatTensor(real_data.size(0), 1)
mixing_factors.uniform_()
mixed_data = Variable(mul_rowwise(real_data, 1 - mixing_factors) + mul_rowwise(fake_data, mixing_factors), requires_grad=True)
mixed_scores = D(mixed_data)
if grad_outputs is None or mixed_scores.size(0) != grad_outputs.size(0):
grad_outputs = torch.cuda.FloatTensor(mixed_scores.size())
grad_outputs.fill_(1.)
gradients = grad(outputs=mixed_scores, inputs=mixed_data,
grad_outputs=grad_outputs,
create_graph=True, retain_graph=True,
only_inputs=True)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - iwass_target) ** 2) * iwass_lambda / (iwass_target ** 2)
return gradient_penalty
def wgan_gp_D_loss(D, G, real_images_in, fake_latents_in,
iwass_lambda = 10.0,
iwass_epsilon = 0.001,
iwass_target = 1.0,
return_all = True):
D.zero_grad()
G.zero_grad()
real_data_v = Variable(real_images_in)
# train with real
D_real = D(real_data_v)
D_real_loss = -D_real + D_real ** 2 * iwass_epsilon
# train with fake
noisev = Variable(fake_latents_in, volatile=True) # totally freeze netG
fake = Variable(G(noisev).data)
inputv = fake
D_fake = D(inputv)
D_fake_loss = D_fake
# train with gradient penalty
gradient_penalty = calc_gradient_penalty(D, real_data_v.data, fake.data, iwass_lambda, iwass_target)
gp = gradient_penalty
# gp.backward()
D_cost = (D_fake_loss + D_real_loss + gp).mean()
if return_all:
return D_cost, D_real_loss, D_fake_loss
return D_cost
def wgan_gp_G_loss(G, D, fake_latents_in):
G.zero_grad()
noisev = Variable(fake_latents_in)
G_new = G(noisev)
D_new = -D(G_new)
G_cost = D_new.mean()
return G_cost |
demo/memory_tree/aloi_script.py | Ark-kun/vowpal_wabbit | 4,332 | 12627001 | import os
import time
import numpy as np
# for shot in available_shots.iterkeys():
print("## perform experiments on aloi ##")
num_of_classes = 1000
leaf_example_multiplier = 4 # 8
shots = 100
lr = 0.001
bits = 29
alpha = 0.1 # 0.3
passes = 3 # 3 #5
use_oas = False
dream_at_update = 0
learn_at_leaf = True # turn on leaf at leaf actually works better
num_queries = 5 # int(np.log(passes*num_of_classes*shots))
loss = "squared"
dream_repeats = 3
online = False
tree_node = int(
2
* passes
* (
num_of_classes
* shots
/ (np.log(num_of_classes * shots) / np.log(2) * leaf_example_multiplier)
)
)
train_data = "aloi_train.vw"
test_data = "aloi_test.vw"
if os.path.exists(train_data) is not True:
os.system("wget http://kalman.ml.cmu.edu/wen_datasets/{}".format(train_data))
if os.path.exists(test_data) is not True:
os.system("wget http://kalman.ml.cmu.edu/wen_datasets/{}".format(test_data))
saved_model = "{}.vw".format(train_data)
print("## Training...")
start = time.time()
command_train = f"../../build/vowpalwabbit/vw -d {train_data} --memory_tree {tree_node} {'--learn_at_leaf' if learn_at_leaf else ''} --max_number_of_labels {num_of_classes} --dream_at_update {dream_at_update} --dream_repeats {dream_repeats} {'--oas' if use_oas else ''} {'--online' if online else ''} --leaf_example_multiplier {leaf_example_multiplier} --alpha {alpha} -l {lr} -b {bits} -c --passes {passes} --loss_function {loss} --holdout_off -f {saved_model}"
print(command_train)
os.system(command_train)
train_time = time.time() - start
# test:
print("## Testing...")
start = time.time()
os.system("../../build/vowpalwabbit/vw {} -i {}".format(test_data, saved_model))
test_time = time.time() - start
print("## train time {}, and test time {}".format(train_time, test_time))
|
download_model.py | AnnLyma/gpt-2 | 105 | 12627007 | import os
import sys
import requests
import argparse
from tqdm import tqdm
parser = argparse.ArgumentParser(
description='Pre-encode text files into tokenized training set.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--vocab', action='store_true', help='Download only encoder.json, hparams.json, and vocab.bpe?')
parser.add_argument('models', metavar='MODEL', type=str, default=['117M'], nargs='*', help='Pretrained model name(s)')
def main(args):
for model in tqdm(args.models):
subdir = os.path.join('models', model)
if not os.path.exists(subdir):
os.makedirs(subdir)
subdir = subdir.replace('\\','/') # needed for Windows
vocab_files = ['encoder.json','hparams.json','vocab.bpe']
files = vocab_files + (['checkpoint','model.ckpt.data-00000-of-00001', 'model.ckpt.index', 'model.ckpt.meta'] if not args.vocab else [])
for filename in tqdm(files):
r = requests.get("https://openaipublic.blob.core.windows.net/gpt-2/" + subdir + "/" + filename, stream=True)
with open(os.path.join(subdir, filename), 'wb') as f:
file_size = int(r.headers["content-length"])
chunk_size = 1000
with tqdm(ncols=100, desc="Fetching " + filename, total=file_size, unit_scale=True) as pbar:
# 1k for chunk_size, since Ethernet packet size is around 1500 bytes
for chunk in r.iter_content(chunk_size=chunk_size):
f.write(chunk)
pbar.update(chunk_size)
if __name__ == '__main__':
args = parser.parse_args()
main(args)
|
examples/fun.py | rparini/numdifftools | 181 | 12627008 | <gh_stars>100-1000
import numdifftools as nd
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(-2, 2, 100)
for i in range(0, 10):
df = nd.Derivative(np.tanh, n=i)
y = df(x)
plt.plot(x, y/np.abs(y).max())
plt.axis('off')
plt.axis('tight')
plt.savefig("fun.png")
plt.clf()
|
geopyspark/geotrellis/union.py | geotrellis/geotrellis-python | 182 | 12627015 | <filename>geopyspark/geotrellis/union.py
from geopyspark import get_spark_context
from geopyspark.geotrellis import LayerType, check_layers
from geopyspark.geotrellis.layer import RasterLayer, TiledRasterLayer
__all__ = ['union']
def union(layers):
"""Unions togther two or more ``RasterLayer``\s or ``TiledRasterLayer``\s.
All layers must have the same ``layer_type``. If the layers are ``TiledRasterLayer``\s,
then all of the layers must also have the same :class:`~geopyspark.geotrellis.TileLayout`
and ``CRS``.
Note:
If the layers to be unioned share one or more keys, then the resulting layer will contain
duplicates of that key. One copy for each instance of the key.
Args:
layers ([:class:`~geopyspark.RasterLayer`] or [:class:`~geopyspark.TiledRasterLayer`] or (:class:`~geopyspark.RasterLayer`) or (:class:`~geopyspark.TiledRasterLayer`)): A
colection of two or more ``RasterLayer``\s or ``TiledRasterLayer``\s layers to be unioned together.
Returns:
:class:`~geopyspark.RasterLayer` or :class:`~geopyspark.TiledRasterLayer`
"""
if len(layers) == 1:
raise ValueError("union can only be performed on 2 or more layers")
base_layer = layers[0]
base_layer_type = base_layer.layer_type
check_layers(base_layer, base_layer_type, layers)
pysc = get_spark_context()
if isinstance(base_layer, RasterLayer):
if base_layer_type == LayerType.SPATIAL:
result = pysc._gateway.jvm.geopyspark.geotrellis.ProjectedRasterLayer.unionLayers(pysc._jsc.sc(),
[x.srdd for x in layers])
else:
result = pysc._gateway.jvm.geopyspark.geotrellis.TemporalRasterLayer.unionLayers(pysc._jsc.sc(),
[x.srdd for x in layers])
return RasterLayer(base_layer_type, result)
else:
if base_layer_type == LayerType.SPATIAL:
result = pysc._gateway.jvm.geopyspark.geotrellis.SpatialTiledRasterLayer.unionLayers(pysc._jsc.sc(),
[x.srdd for x in layers])
else:
result = pysc._gateway.jvm.geopyspark.geotrellis.TemporalTiledRasterLayer.unionLayers(pysc._jsc.sc(),
[x.srdd for x in layers])
return TiledRasterLayer(base_layer_type, result)
|
h2o-py/tests/testdir_jira/pyunit_hexdev_29_import_types.py | ahmedengu/h2o-3 | 6,098 | 12627019 | <filename>h2o-py/tests/testdir_jira/pyunit_hexdev_29_import_types.py
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
################################################################################
##
## Verifying that Python can define features as categorical or continuous on import
##
################################################################################
def continuous_or_categorical():
df_hex = h2o.import_file(pyunit_utils.locate("smalldata/jira/hexdev_29.csv"), col_types=["enum"]*3)
df_hex.summary()
assert (df_hex['h1'].isfactor())
assert (df_hex['h2'].isfactor())
assert (df_hex['h3'].isfactor())
if __name__ == "__main__":
pyunit_utils.standalone_test(continuous_or_categorical)
else:
continuous_or_categorical()
|
detect_secrets_server/core/usage/common/options.py | zynga-jpetersen/detect-secrets-server | 110 | 12627020 | <reponame>zynga-jpetersen/detect-secrets-server<gh_stars>100-1000
import os
from abc import ABCMeta
from .. import s3
from .storage import get_storage_options
class CommonOptions(object):
"""There are some common flags between the various different subparsers, that
we don't want to display in the main help section.
This contains those flags.
"""
__metaclass__ = ABCMeta
def __init__(self, subparser, action):
self.parser = subparser.add_parser(action)
self._add_common_arguments()
def add_arguments(self):
self.add_local_flag()
return self
def add_local_flag(self):
self.parser.add_argument(
'-L',
'--local',
action='store_true',
help=(
'Indicates that the repo argument is a locally stored '
'repository (rather than a git URL to be cloned).'
),
)
return self
def _add_common_arguments(self):
self.parser.add_argument(
'-s',
'--storage',
choices=get_storage_options(),
default='file',
help=(
'Determines the datastore to use for storing metadata.'
),
)
self.parser.add_argument(
'--root-dir',
type=str,
nargs=1,
default=['~/.detect-secrets-server'],
help=(
'Specify location to clone git repositories to. This '
'folder will also hold any metadata tracking files, if '
'no other persistent storage option is selected. '
'Default: ~/.detect-secrets-server'
),
)
s3.S3Options(self.parser).add_arguments()
@staticmethod
def consolidate_args(args):
args.root_dir = os.path.abspath(
os.path.expanduser(args.root_dir[0])
)
s3.S3Options.consolidate_args(args)
|
alipay/aop/api/domain/MiniEntityBindVO.py | antopen/alipay-sdk-python-all | 213 | 12627051 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.MiniContentProperty import MiniContentProperty
class MiniEntityBindVO(object):
def __init__(self):
self._entity_id = None
self._principal_id = None
self._property_list = None
@property
def entity_id(self):
return self._entity_id
@entity_id.setter
def entity_id(self, value):
self._entity_id = value
@property
def principal_id(self):
return self._principal_id
@principal_id.setter
def principal_id(self, value):
self._principal_id = value
@property
def property_list(self):
return self._property_list
@property_list.setter
def property_list(self, value):
if isinstance(value, list):
self._property_list = list()
for i in value:
if isinstance(i, MiniContentProperty):
self._property_list.append(i)
else:
self._property_list.append(MiniContentProperty.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.entity_id:
if hasattr(self.entity_id, 'to_alipay_dict'):
params['entity_id'] = self.entity_id.to_alipay_dict()
else:
params['entity_id'] = self.entity_id
if self.principal_id:
if hasattr(self.principal_id, 'to_alipay_dict'):
params['principal_id'] = self.principal_id.to_alipay_dict()
else:
params['principal_id'] = self.principal_id
if self.property_list:
if isinstance(self.property_list, list):
for i in range(0, len(self.property_list)):
element = self.property_list[i]
if hasattr(element, 'to_alipay_dict'):
self.property_list[i] = element.to_alipay_dict()
if hasattr(self.property_list, 'to_alipay_dict'):
params['property_list'] = self.property_list.to_alipay_dict()
else:
params['property_list'] = self.property_list
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MiniEntityBindVO()
if 'entity_id' in d:
o.entity_id = d['entity_id']
if 'principal_id' in d:
o.principal_id = d['principal_id']
if 'property_list' in d:
o.property_list = d['property_list']
return o
|
src/models/sequence/ss/linear_system_recurrence.py | dumpmemory/state-spaces | 513 | 12627071 | """ Earlier version of LSSL module that uses pure recurrence (with variable step sizes) """
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
device = torch.device("cuda")
class LinearSystem(nn.Module):
def __init__(
self,
N,
transition,
C,
D,
):
"""
N: the order of the HiPPO projection
dt: discretization step size - should be roughly inverse to the length of the sequence
C: (..., M, N)
D: (..., M)
"""
super().__init__()
self.N = N
self.transition = transition
self.C = C
self.D = D
def forward(self, dt, u, x_=None):
"""
u : (length, ...)
x : (..., N)
Returns
y : (length, ..., M)
"""
if x_ is None:
x_ = u.new_zeros(u.shape[1:] + (self.N,))
ys = []
for dt_, u_ in zip(dt, u):
x_ = self.transition.bilinear(dt_, x_, u_) # (..., N)
y = (self.C @ x_.unsqueeze(-1)).squeeze(
-1
) # TODO can use sum instead of matmul if M = 1
ys.append(y)
y = torch.stack(ys, dim=0)
v = u.unsqueeze(-1) * self.D # (L, ..., M)
y = y + v # (L, ..., M)
return y, x_
def adjoint_input(self, dy, dt):
"""Computes adjoint to the input u
dy: (L, ..., M)
dt: (L, ...)
"""
# Compute dx_
dx_ = torch.sum(dy[-1].unsqueeze(-1) * self.C, dim=-2) # (..., N)
dyC = (self.C.transpose(-1, -2) @ dy.unsqueeze(-1)).squeeze(
-1
) # C^T dy (L, ..., N)
dyD = torch.sum(dy * self.D, dim=-1) # D^T dy (L, ...)
du = []
for dt_, dyC_ in zip(dt.flip(0), dyC.flip(0)):
dx_ = self.transition.inverse_mult(dx_, dt_ / 2, transpose=True) # (..., N)
du_ = torch.sum(self.transition.B * dx_, dim=-1) # (...)
du_ = dt_ * du_ # (...)
dx_ = (
self.transition.forward_mult(dx_, dt_ / 2, transpose=True) + dyC_
) # (..., N)
du.append(du_)
du = torch.stack(du, dim=0) # (L, ...)
du = du.flip(0)
du = du + dyD
return du
def adjoint_projection(self, dy, dt, u):
"""Computes adjoint to the projection parameters C, D
dy: (L, ..., M)
u: (L, ...)
dt: (L, ...)
"""
dC = torch.zeros_like(self.C)
x_ = u.new_zeros(u.shape[1:] + (self.N,))
for dt_, u_, dy_ in zip(dt, u, dy):
x_ = self.transition.bilinear(dt_, x_, u_) # (..., N)
dC_ = dy_.unsqueeze(-1) * x_.unsqueeze(-2) # (..., M, N)
dC += dC_.view((-1,) + self.C.shape).sum(dim=0) # (M, N)
dD = dy * u.unsqueeze(-1) # (L, ..., M)
dD = dD.view((-1,) + self.D.shape).sum(dim=0) # (M,)
return dC, dD
class LinearSystemStepsize(nn.Module):
def __init__(
self,
N,
transition,
C,
D,
):
"""
N: the order of the HiPPO projection
dt: discretization step size - should be roughly inverse to the length of the sequence
"""
super().__init__()
self.N = N
self.transition = transition
self.C = C
self.D = D
def forward(self, dt, u, x=None):
"""
u : (length, ...)
x : (..., N)
Returns
y : (length, ..., M)
"""
v = u.unsqueeze(-1) * self.D # (L, ..., M)
if x is None:
x = u.new_zeros(u.shape[1:] + (self.N,))
ys = []
for dt_, u_ in zip(dt, u):
x = self.transition.bilinear(dt_, x, u_) # (..., N)
y = (self.C @ x.unsqueeze(-1)).squeeze(
-1
) # TODO can use sum instead of matmul if M = 1
ys.append(y)
y = torch.stack(ys, dim=0)
y = y + v # (L, ..., M)
return y, x
def adjoint(self, dy, x_, dt, u):
"""
gradient:
dy: (L, ..., M)
state:
# dx_: (..., N)
x: (..., N)
cached arguments:
dt: (L, ...)
u: (L, ...)
"""
dx_ = torch.sum(dy[-1].unsqueeze(-1) * self.C, dim=-2) # (..., N)
dyC = (self.C.transpose(-1, -2) @ dy.unsqueeze(-1)).squeeze(
-1
) # C^T dy (L, ..., N)
dyD = torch.sum(dy * self.D, dim=-1) # D^T dy (L, ...)
dC = torch.zeros_like(self.C)
dD = torch.zeros_like(self.D)
du = []
ddt = []
for dt_, dyC_, u_, dy_ in zip(dt.flip(0), dyC.flip(0), u.flip(0), dy.flip(0)):
# dy_: (..., M)
# x_: (..., N)
# u_, dt_: (...)
dC_ = dy_.unsqueeze(-1) * x_.unsqueeze(-2) # (..., M, N)
dC += dC_.view((-1,) + self.C.shape).sum(dim=0) # (M, N)
dD_ = dy_ * u_.unsqueeze(-1) # (..., M)
dD += dD_.view((-1,) + self.D.shape).sum(dim=0) # (M,)
dx_ = self.transition.inverse_mult(dx_, dt_ / 2, transpose=True) # (..., N)
# Compute du
du_ = torch.sum(self.transition.B * dx_, dim=-1) # (...)
du_ = dt_ * du_ # (...)
du.append(du_)
x_prev = self.transition.bilinear(-dt_, x_, u_) # (..., N)
ddt_ = self.transition.quadratic(dx_, 0.5 * (x_prev + x_)) # (...)
ddt_ = ddt_ + torch.sum(self.transition.B * dx_, dim=-1) * u_
ddt.append(ddt_) # (...)
x_ = x_prev
dx_ = (
self.transition.forward_mult(dx_, dt_ / 2, transpose=True) + dyC_
) # (..., N)
du = torch.stack(du, dim=0).flip(0) # (L, ...)
du = du + dyD
ddt = torch.stack(ddt, dim=0).flip(0) # (L, ...)
# Sanity check
# print(f"{x_=}") # should be 0 (initial state)
return du, ddt, dC, dD
class LinearSystemFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, x, dt, u, C, D, transition):
"""
dt : (L, ...)
u : (L, ...)
C : (M, N)
D : (M,)
transition: Transition objective implementing forward_mult, inverse_mult, bilinear, quadratic
Returns:
y : (L, ..., M)
"""
ctx.transition = transition
ctx.save_for_backward(dt, u, C, D)
with torch.no_grad():
if x is None:
x = u.new_zeros(u.shape[1:] + (transition.N,))
ys = []
for dt_, u_ in zip(dt, u):
# breakpoint()
x = transition.bilinear(dt_, x, u_) # (..., N)
y = (C @ x.unsqueeze(-1)).squeeze(
-1
) # TODO can use sum instead of matmul if M = 1
ys.append(y)
y = torch.stack(ys, dim=0)
# breakpoint()
v = u.unsqueeze(-1) * D # (L, ..., M)
y = y + v # (L, ..., M)
return y
@staticmethod
def backward(ctx, dy):
"""Computes adjoint to the input u
dy: (L, ..., M)
"""
dt, u, C, D = ctx.saved_tensors
transition = ctx.transition
with torch.no_grad():
# Compute dx_
dx_ = torch.sum(dy[-1].unsqueeze(-1) * C, dim=-2) # (..., N)
# Compute du
dyC = (C.transpose(-1, -2) @ dy.unsqueeze(-1)).squeeze(
-1
) # C^T dy (L, ..., N)
dyD = torch.sum(dy * D, dim=-1) # D^T dy (L, ...)
du = []
for dt_, dyC_ in zip(dt.flip(0), dyC.flip(0)):
dx_ = transition.inverse_mult(dx_, dt_ / 2, transpose=True) # (..., N)
du_ = torch.sum(transition.B * dx_, dim=-1) # (...)
du_ = dt_ * du_ # (...)
dx_ = (
transition.forward_mult(dx_, dt_ / 2, transpose=True) + dyC_
) # (..., N)
du.append(du_)
du = torch.stack(du, dim=0) # (L, ...)
du = du.flip(0)
du = du + dyD
# Compute dC, dD
dC = torch.zeros_like(C)
x_ = u.new_zeros(u.shape[1:] + (transition.N,))
for dt_, u_, dy_ in zip(dt, u, dy):
x_ = transition.bilinear(dt_, x_, u_) # (..., N)
dC_ = dy_.unsqueeze(-1) * x_.unsqueeze(-2) # (..., M, N)
dC += dC_.view((-1,) + C.shape).sum(dim=0) # (M, N)
dD = dy * u.unsqueeze(-1) # (L, ..., M)
dD = dD.view((-1,) + D.shape).sum(dim=0) # (M,)
if not ctx.needs_input_grad[0]:
dx_ = None
return dx_, None, du, dC, dD, None
linearsystem = LinearSystemFunction.apply
class LinearSystemStepsizeFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, x, dt, u, C, D, transition):
"""
dt : (L, ...)
u : (L, ...)
C : (M, N)
D : (M,)
transition: Transition objective implementing forward_mult, inverse_mult, bilinear, quadratic
Returns:
y : (L, ..., M)
"""
ctx.transition = transition
# ctx.save_for_backward(dt, u, C, D)
v = u.unsqueeze(-1) * D # (L, ..., M)
if x is None:
x = u.new_zeros(u.shape[1:] + (transition.N,))
ys = []
for dt_, u_ in zip(dt, u):
x = transition.bilinear(dt_, x, u_) # (..., N)
y = (C @ x.unsqueeze(-1)).squeeze(
-1
) # TODO can use sum instead of matmul if M = 1
ys.append(y)
y = torch.stack(ys, dim=0)
y = y + v # (L, ..., M)
ctx.save_for_backward(dt, u, C, D, x)
return y
@staticmethod
def backward(ctx, dy):
"""
gradient:
dy: (L, ..., M)
state:
# dx_: (..., N)
x: (..., N)
cached arguments:
dt: (L, ...)
u: (L, ...)
"""
# dt, u, C, D = ctx.saved_tensors
dt, u, C, D, x_ = ctx.saved_tensors
transition = ctx.transition
# Compute dx_
dx_ = torch.sum(dy[-1].unsqueeze(-1) * C, dim=-2) # (..., N)
dyC = (C.transpose(-1, -2) @ dy.unsqueeze(-1)).squeeze(-1) # C^T dy (L, ..., N)
dyD = torch.sum(dy * D, dim=-1) # D^T dy (L, ...)
dC = torch.zeros_like(C)
dD = torch.zeros_like(D)
du = []
ddt = []
for dt_, dyC_, u_, dy_ in zip(dt.flip(0), dyC.flip(0), u.flip(0), dy.flip(0)):
# dy_: (..., M)
# x_: (..., N)
# u_, dt_: (...)
dC_ = dy_.unsqueeze(-1) * x_.unsqueeze(-2) # (..., M, N)
dC += dC_.view((-1,) + C.shape).sum(dim=0) # (M, N)
dD_ = dy_ * u_.unsqueeze(-1) # (..., M)
dD += dD_.view((-1,) + D.shape).sum(dim=0) # (M,)
dx_ = transition.inverse_mult(dx_, dt_ / 2, transpose=True) # (..., N)
# Compute du
du_ = torch.sum(transition.B * dx_, dim=-1) # (...)
du_ = dt_ * du_ # (...)
du.append(du_)
x_prev = transition.bilinear(-dt_, x_, u_) # (..., N)
ddt_ = transition.quadratic(dx_, 0.5 * (x_prev + x_)) # (...)
ddt_ = ddt_ + torch.sum(transition.B * dx_, dim=-1) * u_
ddt.append(ddt_) # (...)
x_ = x_prev
dx_ = (
transition.forward_mult(dx_, dt_ / 2, transpose=True) + dyC_
) # (..., N)
du = torch.stack(du, dim=0).flip(0) # (L, ...)
du = du + dyD
ddt = torch.stack(ddt, dim=0).flip(0) # (L, ...)
# Sanity check
# print(f"{x_=}") # should be 0 (initial state)
if not ctx.needs_input_grad[0]:
dx_ = None
return dx_, ddt, du, dC, dD, None
linearsystemstepsize = LinearSystemStepsizeFunction.apply
def _abs_err(x, y):
x_ = x.detach().cpu().numpy()
y_ = y.detach().cpu().numpy()
return (y_ - x_) / x_
def test_linear_system(L, batch, dim, N, M, stepsize=False):
from models.hippo import transition # for testing
# Define A, B, C, D
A = torch.eye(N)
B = torch.ones(N)
C = torch.ones(dim, M, N, requires_grad=True).to(device)
D = torch.ones(dim, M, requires_grad=True).to(device)
C.retain_grad()
D.retain_grad()
# Create u and dt
u = torch.arange(L, dtype=torch.float, requires_grad=True).to(device)
u = u.unsqueeze(-1).unsqueeze(-1).repeat((1, batch, dim)) # (L, B, D)
u.retain_grad()
dt = torch.ones(L, batch, dim) * 0.001 # for LegT
# dt = torch.ones_like(u, requires_grad=True).to(device) * 0.001 # for LegT
# dt = torch.ones_like(u, requires_grad=True).to(device) * 0.1 # for LagT
# dt.retain_grad()
# Construct model
transition = transition.ManualAdaptiveTransition(N, A, B).to(device)
# transition = transition.ConstantBilinearTransition(N, A, B, dt[0]).to(device)
# transition = transition.LegTAdaptiveTransition(N).to(device)
# transition = transition.LagTCumsumAdaptiveTransition(N).to(device)
dt = dt.to(device)
if stepsize:
hippo = LinearSystemStepsize(N, transition, C, D) # .to(device)
dt.requires_grad_(True)
dt.retain_grad()
else:
hippo = LinearSystem(N, transition, C, D) # .to(device)
# Autograd
if stepsize:
y, x = hippo.forward(dt, u)
else:
y, x = hippo.forward(dt, u)
x.retain_grad()
y.retain_grad()
z = y.sum()
z.backward(retain_graph=True)
# print(f"{y=}")
# Manual adjoint
if stepsize:
du, ddt, dC, dD = hippo.adjoint(y.grad, x, dt, u)
print("du", u.grad, "\nerror", _abs_err(u.grad, du))
print("ddt", dt.grad, "\nerror", _abs_err(dt.grad, ddt))
print("dC", C.grad, "\nerror", _abs_err(C.grad, dC))
print("dD", D.grad, "\nerror", _abs_err(D.grad, dD))
print("Function vs Module abs error")
u.grad.zero_()
dt.grad.zero_()
C.grad.zero_()
D.grad.zero_()
y_ = linearsystemstepsize(None, dt, u, C, D, transition)
print(f"y", y_ - y)
y_.sum().backward()
print("du", u.grad - du)
print("ddt", dt.grad - ddt)
print("dC", C.grad - dC)
print("dD", D.grad - dD)
else:
du = hippo.adjoint_input(y.grad, dt)
dC, dD = hippo.adjoint_projection(y.grad, dt, u)
print("du", u.grad, "\nerror", _abs_err(u.grad, du))
print("dC", C.grad, "\nerror", _abs_err(C.grad, dC))
print("dD", D.grad, "\nerror", _abs_err(D.grad, dD))
print("Function vs Module abs error")
u.grad.zero_()
C.grad.zero_()
D.grad.zero_()
y_ = linearsystem(None, dt, u, C, D, transition)
print(f"y", y_ - y)
y_.sum().backward()
print("du", u.grad - du)
print("dC", C.grad - dC)
print("dD", D.grad - dD)
if __name__ == "__main__":
L = 8
B = 1
D = 2
N = 8
M = 1
test_linear_system(L, B, D, N, M, False)
# test_linear_system(L, B, D, N, M, True)
|
hyppo/ksample/_utils.py | zdbzdb1212/hyppo | 116 | 12627072 | import numpy as np
from ..tools import contains_nan
class _CheckInputs:
def __init__(self, inputs, indep_test=None, reps=None):
self.inputs = inputs
self.reps = reps
self.indep_test = indep_test
def __call__(self):
self._check_ndarray_inputs()
for i in self.inputs:
contains_nan(i)
self.inputs = self.check_dim()
self.inputs = self._convert_inputs_float64()
self._check_indep_test()
self._check_min_samples()
return self.inputs
def _check_ndarray_inputs(self):
if len(self.inputs) < 2:
raise ValueError("there must be at least 2 inputs")
for i in self.inputs:
if not isinstance(i, np.ndarray):
raise ValueError("x and y must be ndarrays")
def check_dim(self):
# check if inputs are ndarrays
new_inputs = []
dims = []
for i in self.inputs:
# convert arrays of type (n,) to (n, 1)
if i.ndim == 1:
i = i[:, np.newaxis]
elif i.ndim != 2:
raise ValueError(
"Expected a 2-D array `i`, found shape " "{}".format(i.shape)
)
dims.append(i.shape[1])
new_inputs.append(i)
self._check_nd_ksampletest(dims)
return new_inputs
def _check_nd_ksampletest(self, dims):
if len(set(dims)) > 1:
raise ValueError(
"Shape mismatch, inputs must have shape " "[n, p] and [m, p]."
)
def _convert_inputs_float64(self):
return [np.asarray(i).astype(np.float64) for i in self.inputs]
def _check_indep_test(self):
tests = ["cca", "dcorr", "hhg", "rv", "hsic", "mgc", "kmerf"]
if self.indep_test not in tests and self.indep_test is not None:
raise ValueError("indep_test must be in {}".format(tests))
def _check_min_samples(self):
for i in self.inputs:
if i.shape[0] <= 3:
raise ValueError("Number of samples is too low")
def k_sample_transform(inputs, test_type="normal"):
"""
Computes a `k`-sample transform of the inputs.
For :math:`k` groups, this creates two matrices, the first vertically stacks the
inputs.
In order to use this function, the inputs must have the same number of dimensions
:math:`p` and can have varying number of samples :math:`n`. The second output is a
label
matrix the one-hoc encodes the groups. The outputs are thus ``(N, p)`` and
``(N, k)`` where `N` is the total number of samples. In the case where the test
a random forest based tests, it creates a ``(N, 1)`` where the entries are
varlues from 1 to :math:`k` based on the number of samples.
Parameters
----------
inputs : list of ndarray
A list of the inputs. All inputs must be ``(n, p)`` where `n` is the number
of samples and `p` is the number of dimensions. `n` can vary between samples,
but `p` must be the same among all the samples.
test_type : {"normal", "rf"}, default: "normal"
Whether to one-hoc encode the inputs ("normal") or use a one-dimensional
categorical encoding ("rf").
Returns
-------
u : ndarray
The matrix of concatenated inputs of shape ``(N, p)``.
v : ndarray
The label matrix of shape ``(N, k)`` ("normal") or ``(N, 1)`` ("rf").
"""
n_inputs = len(inputs)
u = np.vstack(inputs)
if np.var(u) == 0:
raise ValueError("Test cannot be run, the inputs have 0 variance")
if test_type == "rf":
v = np.concatenate(
[np.repeat(i, inputs[i].shape[0]) for i in range(n_inputs)], axis=0
)
elif test_type == "normal":
if n_inputs == 2:
n1 = inputs[0].shape[0]
n2 = inputs[1].shape[0]
v = np.vstack([np.zeros((n1, 1)), np.ones((n2, 1))])
else:
vs = []
for i in range(n_inputs):
n = inputs[i].shape[0]
encode = np.zeros(shape=(n, n_inputs))
encode[:, i] = np.ones(shape=n)
vs.append(encode)
v = np.concatenate(vs)
else:
raise ValueError("test_type must be normal or rf")
return u, v
|
demos/vis/streamlit/covid19starterkit.py | carlboudreau007/ecosys | 245 | 12627076 | <reponame>carlboudreau007/ecosys
import pyTigerGraph as tg
import streamlit as st
import pandas as pd
import flat_table
import altair as alt
import plotly.figure_factory as ff
from bokeh.plotting import figure
import plotly.express as px
import plotly.graph_objects as go
st.title('Dynamically Visualize South Korea COVID-19 data using TigerGraph and Streamlit')
min_age, max_age = st.slider("Select Age Range", 0, 104, [10, 20])
sex = st.multiselect('Sex', ['male', 'female'])
results = graph.runInstalledQuery("streamlit")
df = pd.DataFrame(results[0]["s2"])
data = flat_table.normalize(df) # Cleaning uo the data
data = data[['v_id', 'attributes.Age', 'attributes.Sex', 'attributes.Location.latitude', 'attributes.Location.longitude']]
if(len(sex)==1): # Filtering the data based on the sex filter input
data = data[data['attributes.Sex']==sex[0]]
data = data[data['attributes.Age'].between(left=min_age, right=max_age)] # Filtering the data based on age input
# grabbing location data for map
locations = data[['attributes.Location.latitude', 'attributes.Location.longitude']]
locations = locations.rename({'attributes.Location.latitude': 'lat', 'attributes.Location.longitude': 'lon'}, axis=1)
st.map(locations) # Using the streamlit map widget with locations input
gender_data = data['attributes.Sex']
age = data['attributes.Age']
s = age.value_counts()
age = pd.DataFrame({'Age':s.index, 'Count':s.values})
st.write(data)
st.write('Bar chart of Male and Females')
st.bar_chart(gender_data)
st.write('Line chart of Age')
fig = px.scatter(age, x="Age", y="Count")
st.plotly_chart(fig)
|
lib/datasets/test_utils.py | rainwangphy/AutoDL-Projects | 385 | 12627083 | ##################################################
# Copyright (c) <NAME> [GitHub D-X-Y], 2019 #
##################################################
import os
def test_imagenet_data(imagenet):
total_length = len(imagenet)
assert total_length == 1281166 or total_length == 50000, 'The length of ImageNet is wrong : {}'.format(total_length)
map_id = {}
for index in range(total_length):
path, target = imagenet.imgs[index]
folder, image_name = os.path.split(path)
_, folder = os.path.split(folder)
if folder not in map_id:
map_id[folder] = target
else:
assert map_id[folder] == target, 'Class : {} is not {}'.format(folder, target)
assert image_name.find(folder) == 0, '{} is wrong.'.format(path)
print ('Check ImageNet Dataset OK')
|
doc/sphinxext/gallery_generator.py | zaxtax/arviz | 1,159 | 12627098 | """
Sphinx plugin to run example scripts and create a gallery page.
Modified from the seaborn project, which modified the mpld3 project.
Also inspired in bokeh's bokeh_gallery sphinxext.
"""
import glob
import os
import os.path as op
import re
import shutil
import token
import tokenize
from typing import Optional
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import image
from arviz.rcparams import rc_context
from arviz import rcParams
matplotlib.use("Agg")
MPL_RST_TEMPLATE = """
.. _{sphinx_tag}:
{docstring}
**API documentation:** {api_name}
.. tab-set::
.. tab-item:: Matplotlib
.. image:: {img_file}
**Python source code:** :download:`[download source: {fname}]<{fname}>`
.. literalinclude:: {fname}
:lines: {end_line}-
"""
BOKEH_RST_TEMPLATE = """
.. tab-item:: Bokeh
.. bokeh-plot:: {absfname}
:source-position: none
**Python source code:** :download:`[download source: {fname}]<{fname}>`
.. literalinclude:: {fname}
:lines: {end_line}-
"""
RST_TEMPLATES = {"matplotlib": MPL_RST_TEMPLATE, "bokeh": BOKEH_RST_TEMPLATE}
INDEX_TEMPLATE = """
.. raw:: html
<style type="text/css">
.figure {{
position: relative;
float: left;
margin: 10px;
width: 180px;
height: 200px;
}}
.figure img {{
position: absolute;
display: inline;
left: 0;
width: 170px;
height: 170px;
opacity:1.0;
filter:alpha(opacity=100); /* For IE8 and earlier */
}}
.figure:hover img {{
-webkit-filter: blur(3px);
-moz-filter: blur(3px);
-o-filter: blur(3px);
-ms-filter: blur(3px);
filter: blur(3px);
opacity:1.0;
filter:alpha(opacity=100); /* For IE8 and earlier */
}}
span.figure-label {{
position: absolute;
display: inline;
left: 0;
width: 170px;
height: 170px;
background: #000;
color: #fff;
visibility: hidden;
opacity: 0;
z-index: 100;
}}
.figure p {{
position: absolute;
top: 45%;
width: 170px;
font-size: 110%;
}}
.figure:hover span {{
visibility: visible;
opacity: .4;
}}
.caption {{
position: absolute;
width: 180px;
top: 170px;
text-align: center !important;
}}
.figure .gallery-figure-title p {{
position: relative;
top: 170px;
color: black;
visibility: visible;
text-align: center !important;
line-height: normal;
}}
.figure .gallery-figure-title span {{
top: 170px;
position: relative;
visibility: visible;
}}
</style>
.. _{sphinx_tag}:
Example gallery
===============
{toctrees_contents}
"""
CONTENTS_ENTRY_TEMPLATE = (
".. raw:: html\n\n"
" <div class='figure align-center'>\n"
" <a href=./{htmlfilename}>\n"
" <img src=../_static/{thumbfilename}>\n"
" <span class='figure-label'>\n"
" <p>{sphinx_tag}</p>\n"
" </span>\n"
' <span class="gallery-figure-title">\n'
" <p>{title}</p>\n"
" </span>\n"
" </a>\n"
" </div>\n\n"
"\n\n"
""
)
def create_thumbnail(infile, thumbfile, width=275, height=275, cx=0.5, cy=0.5, border=4):
im = image.imread(infile)
rows, cols = im.shape[:2]
size = min(rows, cols)
if size == cols:
xslice = slice(0, size)
ymin = min(max(0, int(cy * rows - size // 2)), rows - size)
yslice = slice(ymin, ymin + size)
else:
yslice = slice(0, size)
xmin = min(max(0, int(cx * cols - size // 2)), cols - size)
xslice = slice(xmin, xmin + size)
thumb = im[yslice, xslice]
thumb[:border, :, :3] = thumb[-border:, :, :3] = 0
thumb[:, :border, :3] = thumb[:, -border:, :3] = 0
dpi = 100
fig = plt.figure(figsize=(width / dpi, height / dpi), dpi=dpi, constrained_layout=True)
ax = fig.add_axes([0, 0, 1, 1], aspect="auto", frameon=False, xticks=[], yticks=[])
ax.imshow(thumb, aspect="auto", resample=True, interpolation="bilinear")
fig.savefig(thumbfile, dpi=dpi)
plt.close(fig)
def indent(s, N=4):
"""indent a string"""
return s.replace("\n", "\n" + N * " ")
class ExampleGenerator:
"""Tools for generating an example page from a file"""
_title: Optional[str]
def __init__(self, filename, target_dir, backend, thumb_dir, target_dir_orig):
self.filename = filename
self.target_dir = target_dir
self.thumb_dir = thumb_dir
self.backend = backend
self.thumbloc = 0.5, 0.5
self._title = None
self.extract_docstring()
with open(filename, "r") as fid:
self.filetext = fid.read()
outfilename = op.join(target_dir_orig, self.rstfilename)
# Only actually run it if the output RST file doesn't
# exist or it was modified less recently than the example
if not op.exists(outfilename) or (op.getmtime(outfilename) < op.getmtime(filename)):
self.exec_file()
else:
print("skipping {0}".format(self.filename))
@property
def title(self) -> str:
if self._title is not None:
return self._title
return self.modulename
@property
def dirname(self):
return op.split(self.filename)[0]
@property
def fname(self):
return op.split(self.filename)[1]
@property
def modulename(self) -> str:
return op.splitext(self.fname)[0]
@property
def basename(self) -> str:
return self.modulename.split("_", 1)[1]
@property
def pyfilename(self):
return self.modulename + ".py"
@property
def rstfilename(self):
return self.basename + ".rst"
@property
def htmlfilename(self):
return self.basename + ".html"
@property
def pngfilename(self):
pngfile = self.modulename + ".png"
return "_images/" + pngfile
@property
def thumbfilename(self):
pngfile = self.basename + "_thumb.png"
return pngfile
@property
def apiname(self):
with open(op.join(self.target_dir, self.pyfilename), "r") as file:
regex = r"az\.(plot\_[a-z_]+)\("
name = re.findall(regex, file.read())
apitext = name[0] if name else ""
return (
":func:`~arviz.{apitext}`".format(apitext=apitext)
if apitext
else "No API Documentation available"
)
@property
def sphinxtag(self):
return f"example_{self.basename}"
@property
def pagetitle(self):
return self.docstring.strip().split("\n")[0].strip()
def extract_docstring(self):
"""Extract a module-level docstring"""
lines = open(self.filename).readlines()
start_row = 0
if lines[0].startswith("#!"):
lines.pop(0)
start_row = 1
docstring = ""
first_par = ""
line_iter = lines.__iter__()
tokens = tokenize.generate_tokens(lambda: next(line_iter))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ("NEWLINE", "COMMENT", "NL", "INDENT", "DEDENT"):
continue
elif tok_type == "STRING":
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs,
# extract the first one:
paragraphs = "\n".join(line.rstrip() for line in docstring.split("\n")).split(
"\n\n"
)
if len(paragraphs) > 0:
first_par = paragraphs[0]
break
thumbloc = None
title: Optional[str] = None
ex_title: str = ""
for line in docstring.split("\n"):
# we've found everything we need...
if thumbloc and title and ex_title != "":
break
m = re.match(r"^_thumb: (\.\d+),\s*(\.\d+)", line)
if m:
thumbloc = float(m.group(1)), float(m.group(2))
continue
m = re.match(r"^_example_title: (.*)$", line)
if m:
title = m.group(1)
continue
# capture the first non-empty line of the docstring as title
if ex_title == "":
ex_title = line
assert ex_title != ""
if thumbloc is not None:
self.thumbloc = thumbloc
docstring = "\n".join([l for l in docstring.split("\n") if not l.startswith("_thumb")])
if title is not None:
docstring = "\n".join(
[l for l in docstring.split("\n") if not l.startswith("_example_title")]
)
else:
title = ex_title
self._title = title
self.docstring = docstring
self.short_desc = first_par
self.end_line = erow + 1 + start_row # pylint: disable=undefined-loop-variable
def exec_file(self):
# pylint: disable=exec-used
print("running {0}".format(self.filename))
plt.close("all")
if self.backend == "matplotlib":
thumbfile = op.join(self.thumb_dir, self.thumbfilename)
cx, cy = self.thumbloc
pngfile = op.join(self.target_dir, self.pngfilename)
my_globals = {"plt": plt}
with open(self.filename, "r") as fp:
code_text = fp.read()
code_text = re.sub(r"(plt\.show\S+)", "", code_text)
exec(compile(code_text, self.filename, "exec"), my_globals)
fig = plt.gcf()
fig.canvas.draw()
fig.savefig(pngfile, dpi=75)
create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)
elif self.backend == "bokeh":
with open(self.filename, "r") as fp:
code_text = fp.read()
with rc_context(rc={"plot.bokeh.show": False}):
exec(code_text)
def toctree_entry(self):
return " ./{}\n\n".format(op.join(op.splitext(self.htmlfilename)[0]))
def contents_entry(self) -> str:
return CONTENTS_ENTRY_TEMPLATE.format(
backend=self.backend,
htmlfilename=self.htmlfilename,
thumbfilename=self.thumbfilename,
sphinx_tag=self.sphinxtag,
title=self.title,
)
def main(app):
working_dir = os.getcwd()
os.chdir(app.builder.srcdir)
static_dir = op.join(app.builder.srcdir, "..", "build", "_static")
target_dir_orig = op.join(app.builder.srcdir, "examples")
backends = ("matplotlib", "bokeh")
backend_prefixes = ("mpl", "bokeh")
toctrees_contents = ""
thumb_dir = op.join(app.builder.srcdir, "example_thumbs")
if not op.exists(static_dir):
os.makedirs(static_dir)
if not op.exists(thumb_dir):
os.makedirs(thumb_dir)
path_dict = {}
for backend in backends:
target_dir = op.join(target_dir_orig, backend)
image_dir = op.join(target_dir, "_images")
source_dir = op.abspath(op.join(app.builder.srcdir, "..", "..", "examples", backend))
if not op.exists(source_dir):
os.makedirs(source_dir)
if not op.exists(target_dir):
os.makedirs(target_dir)
if not op.exists(image_dir):
os.makedirs(image_dir)
path_dict[backend] = {
"source_dir": source_dir,
"target_dir": target_dir,
"image_dir": image_dir,
}
toctree = "\n\n.. toctree::\n :hidden:\n\n"
contents = "\n\n"
# Write individual example files
files = sorted(glob.glob(op.join(path_dict["matplotlib"]["source_dir"], "*.py")))
for filename in files:
base_filename = op.split(filename)[1].split("_", 1)[1]
example_contents = ""
for backend, prefix in zip(backends, backend_prefixes):
source_dir = path_dict[backend]["source_dir"]
target_dir = path_dict[backend]["target_dir"]
expected_filename = op.join(source_dir, f"{prefix}_{base_filename}")
if not op.exists(expected_filename):
if backend == "matplotlib":
raise ValueError("All examples must have a matplotlib counterpart.")
continue
ex = ExampleGenerator(
expected_filename, target_dir, backend, thumb_dir, target_dir_orig
)
shutil.copyfile(expected_filename, op.join(target_dir, ex.pyfilename))
output = RST_TEMPLATES[backend].format(
sphinx_tag=ex.sphinxtag,
docstring=ex.docstring,
end_line=ex.end_line,
fname=op.join(backend, ex.pyfilename),
absfname=op.join(target_dir, ex.pyfilename),
img_file=op.join(backend, ex.pngfilename),
api_name=ex.apiname,
)
example_contents += output
with open(op.join(target_dir_orig, ex.rstfilename), "w") as f:
f.write(example_contents)
toctree += ex.toctree_entry()
contents += ex.contents_entry()
toctrees_contents = "\n".join((toctree, contents))
toctrees_contents += """.. raw:: html\n\n <div style="clear: both"></div>"""
# write index file
index_file = op.join(target_dir, "..", "index.rst")
with open(index_file, "w") as index:
index.write(
INDEX_TEMPLATE.format(
sphinx_tag="example_gallery",
toctrees_contents=toctrees_contents,
examples_source=source_dir,
)
)
os.chdir(working_dir)
def setup(app):
app.connect("builder-inited", main)
|
sina/spider/Ajax_weibo.py | rua-aaa/awesome-python-login-model | 5,857 | 12627117 | # -*- coding: utf-8 -*-
from urllib.parse import urlencode
import requests, pymysql
from pyquery import PyQuery as pq
from selenium import webdriver
from time import sleep
# 连接数据库
connection = pymysql.connect(host='localhost',
port=3306,
user='root',
passwd='<PASSWORD>',
db='python',
charset='utf8')
cursor = connection.cursor()
sql = "USE python;"
cursor.execute(sql)
connection.commit()
base_url = 'https://m.weibo.cn/api/container/getIndex?'
headers = {
'Host': 'm.weibo.cn',
'Referer': 'https://m.weibo.cn/u/2145291155',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest',
}
def create_sheet(bozhu):
try:
weibo = '''
CREATE TABLE weibo(
ID VARCHAR (255) NOT NULL PRIMARY KEY,
text VARCHAR (255),
attitudes VARCHAR (255),
comments VARCHAR (255),
reposts VARCHAR (255)
)
'''
# 序号 INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
cursor.execute(weibo)
connection.commit()
except:
pass
def url_get():
# # 自动保持cookie,不需要自己维护cookie内容
# cookies = {}
# s = requests.session()
# with open('E:\example\豆瓣读书爬虫\cookie.txt')as file:
# raw_cookies = file.read()
# for line in raw_cookies.split(';'):
# key, value = line.split('=', 1)
# cookies[key] = value
# # 完善header
# header = {'Upgrade-Insecure-Requests': '1',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Encoding': 'gzip, deflate, br',
# 'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
# }
# # get请求,应答解码
# response = s.get(url=xl_url, headers=header,cookies=cookies)
browser = webdriver.PhantomJS()
browser.get(url='https://m.weibo.cn/')
wb_name = browser.find_element_by_class_name("W_input")
wb_name.send_keys(input('输入博主ID:'))
sleep(10)
search = browser.find_element_by_class_name('W_ficon ficon_search S_ficon')
search.click()
sleep(5)
bz_num = browser.find_element_by_class_name('name_txt')
bz_num.click()
sleep(5)
# 开启了一个新页面,需要跳转到新页面
handles = browser.window_handles
browser.switch_to_window(handles[1])
# https://m.weibo.cn/api/container/getIndex?type=uid&value=2145291155&containerid=1076032145291155
# 拼接url
def get_page(page):
# 查询字符串
params = {
'type': 'uid',
'value': '2145291155',
'containerid': '1076032145291155',
'page': page
}
# 调用urlencode() 方法将params参数转化为 URL 的 GET请求参数
url = base_url + urlencode(params)
try:
response = requests.get(url, headers=headers)
if response.status_code == 200:
# print(response.json())
return response.json()
except requests.ConnectionError as e:
print('Error', e.args)
# 存储数据,存储到数据库
def parse_page(json):
if json:
items = json.get('data').get('cards')
for index, item in enumerate(items):
if page == 1 and index == 1:
continue
else:
item = item.get('mblog')
# weibo = {}
# weibo['id'] = item.get('id')
# weibo['text'] =
# weibo['attitudes'] = item.get('attitudes_count')
# weibo['comments'] = item.get('comments_count')
# weibo['reposts'] = item.get('reposts_count')
weibo = []
weibo.append(item.get('id'))
weibo.append(pq(item.get('text')).text())
weibo.append(item.get('attitudes_count'))
weibo.append(item.get('comments_count'))
weibo.append(item.get('reposts_count'))
# 遇见重复数据,pass,是根据主键来判断,如果是重复数据,忽略,但是报警告
try:
sql = '''INSERT INTO weibo (ID,text,attitudes,comments,reposts)
VALUES (%s,%s,%s,%s,%s) '''
cursor.execute(sql, weibo)
connection.commit()
except:
pass
yield weibo
if __name__ == '__main__':
for page in range(1, 17):
json = get_page(page)
results = parse_page(json)
for result in results:
print(result)
cursor.close()
# 可以爬任意指定博主所有微博,以博主名建立表,分别储存信息
# 使用selenium+PhantomJS抓取对应博主主页链接
|
src/demo.py | championway/pygoturn | 132 | 12627122 | import os
import argparse
import torch
import cv2
from test import GOTURN
args = None
parser = argparse.ArgumentParser(description='GOTURN Testing')
parser.add_argument('-w', '--model-weights',
type=str, help='path to pretrained model')
parser.add_argument('-d', '--data-directory',
default='../data/OTB/Man', type=str,
help='path to video frames')
parser.add_argument('-s', '--save-directory',
default='../result',
type=str, help='path to save directory')
def axis_aligned_iou(boxA, boxB):
# make sure that x1,y1,x2,y2 of a box are valid
assert(boxA[0] <= boxA[2])
assert(boxA[1] <= boxA[3])
assert(boxB[0] <= boxB[2])
assert(boxB[1] <= boxB[3])
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
def save(im, bb, gt_bb, idx):
im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
bb = [int(val) for val in bb] # GOTURN output
gt_bb = [int(val) for val in gt_bb] # groundtruth box
# plot GOTURN predictions with red rectangle
im = cv2.rectangle(im, (bb[0], bb[1]), (bb[2], bb[3]),
(0, 0, 255), 2)
# plot annotations with white rectangle
im = cv2.rectangle(im, (gt_bb[0], gt_bb[1]), (gt_bb[2], gt_bb[3]),
(255, 255, 255), 2)
save_path = os.path.join(args.save_directory, str(idx)+'.jpg')
cv2.imwrite(save_path, im)
def main(args):
cuda = torch.cuda.is_available()
device = torch.device('cuda:0' if cuda else 'cpu')
tester = GOTURN(args.data_directory,
args.model_weights,
device)
if os.path.exists(args.save_directory):
print('Save directory %s already exists' % (args.save_directory))
else:
os.makedirs(args.save_directory)
# save initial frame with bounding box
save(tester.img[0][0], tester.prev_rect, tester.prev_rect, 1)
tester.model.eval()
# loop through sequence images
for i in range(tester.len):
# get torch input tensor
sample = tester[i]
# predict box
bb = tester.get_rect(sample)
gt_bb = tester.gt[i]
tester.prev_rect = bb
# save current image with predicted rectangle and gt box
im = tester.img[i][1]
save(im, bb, gt_bb, i+2)
# print stats
print('frame: %d, IoU = %f' % (
i+2, axis_aligned_iou(gt_bb, bb)))
if __name__ == "__main__":
args = parser.parse_args()
main(args)
|
utest/run/test_process.py | adrianyorke/RIDE | 775 | 12627146 | <reponame>adrianyorke/RIDE<gh_stars>100-1000
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
from robotide.run.process import Process
from nose.tools import assert_equal
SCRIPT = os.path.join(os.path.dirname(__file__),
'process_test_scripts.py').replace(' ', '<SPACE>')
class TestProcess(unittest.TestCase):
def test_command_as_string(self):
initial_command = 'python hupu count_args a1 a2<SPACE>2<SPACE>1 a3<SPACE>'
processed_command = Process(initial_command)._command
assert_equal(len(processed_command), len(initial_command.split()))
assert_equal(processed_command[4], 'a2 2 1')
def test_writing_to_stderr(self):
self.proc = self._create_process('python %s stderr' % SCRIPT)
assert_equal(self.proc.get_output(wait_until_finished=True),
'This is stderr\n')
def _create_process(self, command):
proc = Process(command)
proc.start()
return proc
if __name__ == '__main__':
unittest.main()
|
demos/cnn_function_caller.py | bancopoppa/adds | 135 | 12627165 | """
Function caller for the CNN experiments.
-- <EMAIL>
"""
# pylint: disable=arguments-differ
import os
from time import sleep
# Local
from opt.nn_function_caller import NNFunctionCaller
from cg.cifar import run_tensorflow_cifar
import traceback
_MAX_TRIES = 3
_SLEEP_BETWEEN_TRIES_SECS = 3
def get_default_cnn_tf_params():
""" Default MLP training parameters for tensorflow. """
return {
'trainBatchSize':32,
'valiBatchSize':32,
'trainNumStepsPerLoop':4000,
'valiNumStepsPerLoop':313,
'numLoops':20,
'learningRate':0.005
}
class CNNFunctionCaller(NNFunctionCaller):
""" Function caller to be used in the MLP experiments. """
def __init__(self, *args, **kwargs):
super(CNNFunctionCaller, self).__init__(*args, **kwargs)
# Load data
self.data_file_str = self.train_params.data_dir
# Check tf_params
if not hasattr(self.train_params, 'tf_params'):
self.train_params.tf_params = get_default_cnn_tf_params()
def _eval_validation_score(self, nn, qinfo, noisy=False):
# pylint: disable=unused-argument
# pylint: disable=bare-except
""" Evaluates the validation score. """
os.environ['CUDA_VISIBLE_DEVICES'] = str(qinfo.worker_id)
num_tries = 0
succ_eval = False
# self.reporter.writeln('Evaluating %s on GPU %d.'%(nn, qinfo.worker_id))
while num_tries < _MAX_TRIES and not succ_eval:
try:
vali_error = run_tensorflow_cifar.compute_validation_error(nn, self.data_file_str,
qinfo.worker_id, self.train_params.tf_params, self.tmp_dir)
succ_eval = True
sleep(_SLEEP_BETWEEN_TRIES_SECS)
except:
num_tries += 1
self.reporter.writeln('********* Failed on try %d with gpu %d.'%(
num_tries, qinfo.worker_id))
traceback.print_exc()
traceback.print_exc(file=self.reporter.out)
return vali_error
|
tests/terraform/checks/resource/gcp/test_GoogleKMSKeyRotationPeriod.py | cclauss/checkov | 4,013 | 12627183 | import unittest
import hcl2
from checkov.terraform.checks.resource.gcp.GoogleKMSRotationPeriod import check
from checkov.common.models.enums import CheckResult
class TestGoogleKMSKeyRotationPeriod(unittest.TestCase):
def test_failure(self):
hcl_res = hcl2.loads("""
resource "google_kms_crypto_key" "key" {
name = "crypto-key-example"
key_ring = google_kms_key_ring.keyring.id
lifecycle {
prevent_destroy = true
}
}
""")
resource_conf = hcl_res['resource'][0]['google_kms_crypto_key']['key']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success(self):
hcl_res = hcl2.loads("""
resource "google_kms_crypto_key" "key" {
name = "crypto-key-example"
key_ring = google_kms_key_ring.keyring.id
rotation_period = "100000s"
lifecycle {
prevent_destroy = true
}
}
""")
resource_conf = hcl_res['resource'][0]['google_kms_crypto_key']['key']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main()
|
livelossplot/outputs/bokeh_plot.py | Bartolo1024/livelossplot | 1,239 | 12627201 | from typing import List, Dict, Tuple
from livelossplot.main_logger import MainLogger, LogItem
from livelossplot.outputs.base_output import BaseOutput
class BokehPlot(BaseOutput):
"""Simple plugin for a bokeh framework"""
def __init__(
self,
max_cols: int = 2,
skip_first: int = 2,
cell_size: Tuple[int, int] = (400, 300),
output_file: str = './bokeh_output.html'
):
"""
Args:
max_cols: max number of charts in one row
skip_first: flag, skip first log
cell_size: size of one chart
output_file: file to save the output
"""
from bokeh import plotting, io, palettes
self.plotting = plotting
self.io = io
self.plot_width, self.plot_height = cell_size
self.max_cols = max_cols
self.skip_first = skip_first # think about it
self.figures = {}
self.is_notebook = False
self.output_file = output_file
self.colors = palettes.Category10[10]
def send(self, logger: MainLogger) -> None:
"""Draw figures with metrics and show"""
log_groups = logger.grouped_log_history()
new_grid_plot = False
for idx, (group_name, group_logs) in enumerate(log_groups.items(), start=1):
fig = self.figures.get(group_name)
if not fig:
fig = self.plotting.figure(title=group_name)
new_grid_plot = True
self.figures[group_name] = self._draw_metric_subplot(fig, group_logs)
if new_grid_plot:
self._create_grid_plot()
if self.is_notebook:
self.io.push_notebook(handle=self.target)
else:
self.plotting.save(self.grid)
def _draw_metric_subplot(self, fig, group_logs: Dict[str, List[LogItem]]):
"""
Args:
fig: bokeh Figure
group_logs: groups with list of log items
Notes:
for now, with local imports, no output annotation -> self.plotting.Figure
there used to be skip first part, but I skip it first
"""
from bokeh.models import ColumnDataSource, HoverTool
for i, (name, logs) in enumerate(group_logs.items()):
if len(logs) > 0:
source = ColumnDataSource(
data={
'step': [log.step for log in logs],
'value': [log.value for log in logs],
}
)
fig.line(x='step', y='value', color=self.colors[i], legend_label=name, source=source)
fig.add_tools(
HoverTool(
tooltips=[
('step', '@step'),
('value', '@value{0.3f}'),
],
formatters={
'step': 'printf',
'value': 'printf',
},
mode='vline'
)
)
return fig
def _create_grid_plot(self):
rows = []
row = []
for idx, fig in enumerate(self.figures.values(), start=1):
row.append(fig)
if idx % self.max_cols == 0:
rows.append(row)
row = []
self.grid = self.plotting.gridplot(
rows, sizing_mode='scale_width', plot_width=self.plot_width, plot_height=self.plot_height
)
self.target = self.plotting.show(self.grid, notebook_handle=self.is_notebook)
def _set_output_mode(self, mode: str):
"""Set notebook or script mode"""
self.is_notebook = mode == 'notebook'
if self.is_notebook:
self.io.output_notebook()
else:
self.io.output_file(self.output_file)
|
scripts/readme_example_human_microbiome.py | ptallada/pysparkling | 260 | 12627209 | from pysparkling import Context
by_subject_rdd = Context().textFile(
's3n://human-microbiome-project/DEMO/HM16STR/46333/by_subject/*'
)
print(by_subject_rdd.takeSample(True, 1))
|
src/python/pants/option/options_bootstrapper_test.py | yoav-orca/pants | 1,806 | 12627229 | <reponame>yoav-orca/pants<gh_stars>1000+
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os
from functools import partial
from pathlib import Path
from textwrap import dedent
from pants.base.build_environment import get_buildroot
from pants.option.option_value_container import OptionValueContainer
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.option.scope import ScopeInfo
from pants.util.contextutil import temporary_file, temporary_file_path
from pants.util.logging import LogLevel
class TestOptionsBootstrapper:
@staticmethod
def _config_path(path: str | None) -> list[str]:
if path is None:
return ["--pants-config-files=[]"]
return [f"--pants-config-files=['{path}']"]
def assert_bootstrap_options(
self,
*,
config: dict[str, str] | None = None,
env: dict[str, str] | None = None,
args: list[str] | None = None,
**expected_entries,
) -> None:
with temporary_file(binary_mode=False) as fp:
fp.write("[DEFAULT]\n")
if config:
for k, v in config.items():
fp.write(f"{k} = {repr(v)}\n")
fp.close()
args = [*self._config_path(fp.name), *(args or [])]
bootstrapper = OptionsBootstrapper.create(env=env or {}, args=args, allow_pantsrc=False)
vals = bootstrapper.get_bootstrap_options().for_global_scope()
vals_dict = {k: getattr(vals, k) for k in expected_entries}
assert expected_entries == vals_dict
def test_bootstrap_seed_values(self) -> None:
def assert_seed_values(
*,
config: dict[str, str] | None = None,
env: dict[str, str] | None = None,
args: list[str] | None = None,
workdir: str | None = None,
distdir: str | None = None,
) -> None:
self.assert_bootstrap_options(
config=config,
env=env,
args=args,
pants_workdir=workdir or os.path.join(get_buildroot(), ".pants.d"),
pants_distdir=distdir or os.path.join(get_buildroot(), "dist"),
)
# Check for valid default seed values
assert_seed_values()
# Check getting values from config, env and args.
assert_seed_values(
config={"pants_workdir": "/from_config/.pants.d"},
workdir="/from_config/.pants.d",
)
assert_seed_values(args=["--pants-distdir=/from_args/dist"], distdir="/from_args/dist")
# Check that args > env > config.
assert_seed_values(
config={
"pants_workdir": "/from_config/.pants.d",
"pants_distdir": "/from_config/dist",
},
args=["--pants-distdir=/from_args/dist"],
workdir="/from_config/.pants.d",
distdir="/from_args/dist",
)
# Check that unrelated args and config don't confuse us.
assert_seed_values(
config={
"pants_workdir": "/from_config/.pants.d",
"pants_distdir": "/from_config/dist",
"unrelated": "foo",
},
env={
"PANTS_DISTDIR": "/from_env/dist",
"PANTS_NO_RELATIONSHIP": "foo",
},
args=["--pants-distdir=/from_args/dist", "--foo=bar", "--baz"],
workdir="/from_config/.pants.d",
distdir="/from_args/dist",
)
def test_bootstrap_bool_option_values(self) -> None:
# Check the default.
self.assert_bootstrap_options(pantsrc=True)
assert_pantsrc_is_false = partial(self.assert_bootstrap_options, pantsrc=False)
assert_pantsrc_is_false(args=["--no-pantsrc"])
assert_pantsrc_is_false(config={"pantsrc": "false"})
assert_pantsrc_is_false(env={"PANTS_PANTSRC": "False"})
def test_create_bootstrapped_options(self) -> None:
# Check that we can set a bootstrap option from a cmd-line flag and have that interpolate
# correctly into regular config.
with temporary_file(binary_mode=False) as fp:
fp.write(
dedent(
"""
[foo]
bar = "%(pants_workdir)s/baz"
[fruit]
apple = "%(pants_distdir)s/banana"
"""
)
)
fp.close()
args = ["--pants-workdir=/qux"] + self._config_path(fp.name)
bootstrapper = OptionsBootstrapper.create(
env={"PANTS_DISTDIR": "/pear"}, args=args, allow_pantsrc=False
)
opts = bootstrapper.full_options_for_scopes(
known_scope_infos=[
ScopeInfo(""),
ScopeInfo("foo"),
ScopeInfo("fruit"),
]
)
# So we don't choke on these on the cmd line.
opts.register("", "--pants-workdir")
opts.register("", "--pants-config-files")
opts.register("foo", "--bar")
opts.register("fruit", "--apple")
assert "/qux/baz" == opts.for_scope("foo").bar
assert "/pear/banana" == opts.for_scope("fruit").apple
def test_bootstrapped_options_ignore_irrelevant_env(self) -> None:
included = "PANTS_DISTDIR"
excluded = "NON_PANTS_ENV"
bootstrapper = OptionsBootstrapper.create(
env={excluded: "pear", included: "banana"}, args=[], allow_pantsrc=False
)
assert included in bootstrapper.env
assert excluded not in bootstrapper.env
def test_create_bootstrapped_multiple_pants_config_files(self) -> None:
"""When given multiple config files, the later files should take precedence when options
conflict."""
def create_options_bootstrapper(*config_paths: str) -> OptionsBootstrapper:
return OptionsBootstrapper.create(
env={},
args=[f"--pants-config-files={cp}" for cp in config_paths],
allow_pantsrc=False,
)
def assert_config_read_correctly(
options_bootstrapper: OptionsBootstrapper,
*,
expected_worker_count: int,
) -> None:
options = options_bootstrapper.full_options_for_scopes(
known_scope_infos=[
ScopeInfo(""),
ScopeInfo("compile_apt"),
ScopeInfo("fruit"),
],
)
# So we don't choke on these on the cmd line.
options.register("", "--pants-config-files", type=list)
options.register("", "--config-override", type=list)
options.register("compile_apt", "--worker-count")
options.register("fruit", "--apple")
assert str(expected_worker_count) == options.for_scope("compile_apt").worker_count
assert "red" == options.for_scope("fruit").apple
with temporary_file(binary_mode=False) as fp1, temporary_file(binary_mode=False) as fp2:
fp1.write(
dedent(
"""\
[compile_apt]
worker_count = 1
[fruit]
apple = "red"
"""
)
)
fp2.write(
dedent(
"""\
[compile_apt]
worker_count = 2
"""
)
)
fp1.close()
fp2.close()
assert_config_read_correctly(
create_options_bootstrapper(fp1.name),
expected_worker_count=1,
)
assert_config_read_correctly(
create_options_bootstrapper(fp1.name, fp2.name),
expected_worker_count=2,
)
assert_config_read_correctly(
create_options_bootstrapper(fp2.name, fp1.name),
expected_worker_count=1,
)
def test_options_pantsrc_files(self) -> None:
def create_options_bootstrapper(*config_paths: str) -> OptionsBootstrapper:
return OptionsBootstrapper.create(
env={},
args=[f"--pantsrc-files={cp}" for cp in config_paths],
allow_pantsrc=True,
)
with temporary_file(binary_mode=False) as fp:
fp.write(
dedent(
"""
[resolver]
resolver = "coursier"
"""
)
)
fp.close()
bootstrapped_options = create_options_bootstrapper(fp.name)
opts_single_config = bootstrapped_options.full_options_for_scopes(
known_scope_infos=[
ScopeInfo(""),
ScopeInfo("resolver"),
]
)
opts_single_config.register("", "--pantsrc-files", type=list)
opts_single_config.register("resolver", "--resolver")
assert "coursier" == opts_single_config.for_scope("resolver").resolver
def test_full_options_caching(self) -> None:
with temporary_file_path() as config:
args = self._config_path(config)
bootstrapper = OptionsBootstrapper.create(env={}, args=args, allow_pantsrc=False)
opts1 = bootstrapper.full_options_for_scopes(
known_scope_infos=[
ScopeInfo(""),
ScopeInfo("foo"),
]
)
opts2 = bootstrapper.full_options_for_scopes(
known_scope_infos=[
ScopeInfo("foo"),
ScopeInfo(""),
]
)
assert opts1 is opts2
opts3 = bootstrapper.full_options_for_scopes(
known_scope_infos=[
ScopeInfo(""),
ScopeInfo("foo"),
ScopeInfo(""),
]
)
assert opts1 is opts3
opts4 = bootstrapper.full_options_for_scopes(known_scope_infos=[ScopeInfo("")])
assert opts1 is not opts4
opts5 = bootstrapper.full_options_for_scopes(known_scope_infos=[ScopeInfo("")])
assert opts4 is opts5
assert opts1 is not opts5
def test_bootstrap_short_options(self) -> None:
def parse_options(*args: str) -> OptionValueContainer:
full_args = [*args, *self._config_path(None)]
return (
OptionsBootstrapper.create(env={}, args=full_args, allow_pantsrc=False)
.get_bootstrap_options()
.for_global_scope()
)
# No short options passed - defaults presented.
vals = parse_options()
assert vals.logdir is None
assert LogLevel.INFO == vals.level
# Unrecognized short options passed and ignored - defaults presented.
vals = parse_options("-_UnderscoreValue", "-^")
assert vals.logdir is None
assert LogLevel.INFO == vals.level
vals = parse_options("-d/tmp/logs", "-ldebug")
assert "/tmp/logs" == vals.logdir
assert LogLevel.DEBUG == vals.level
def test_bootstrap_options_passthrough_dup_ignored(self) -> None:
def parse_options(*args: str) -> OptionValueContainer:
full_args = [*args, *self._config_path(None)]
return (
OptionsBootstrapper.create(env={}, args=full_args, allow_pantsrc=False)
.get_bootstrap_options()
.for_global_scope()
)
vals = parse_options("main", "args", "-d/tmp/frogs", "--", "-d/tmp/logs")
assert "/tmp/frogs" == vals.logdir
vals = parse_options("main", "args", "--", "-d/tmp/logs")
assert vals.logdir is None
def test_bootstrap_options_explicit_config_path(self) -> None:
def config_path(*args, **env):
return OptionsBootstrapper.get_config_file_paths(env, args)
assert ["/foo/bar/pants.toml"] == config_path(
"main", "args", "--pants-config-files=['/foo/bar/pants.toml']"
)
assert ["/from/env1", "/from/env2"] == config_path(
"main", "args", PANTS_CONFIG_FILES="['/from/env1', '/from/env2']"
)
assert ["/from/flag"] == config_path(
"main",
"args",
"-x",
"--pants-config-files=['/from/flag']",
"goal",
"--other-flag",
PANTS_CONFIG_FILES="['/from/env']",
)
# Test appending to the default.
assert [f"{get_buildroot()}/pants.toml", "/from/env", "/from/flag"] == config_path(
"main",
"args",
"-x",
"--pants-config-files=+['/from/flag']",
"goal",
"--other-flag",
PANTS_CONFIG_FILES="+['/from/env']",
)
# Test replacing the default, then appending.
assert ["/from/env", "/from/flag"] == config_path(
"main",
"args",
"-x",
"--pants-config-files=+['/from/flag']",
"goal",
"--other-flag",
PANTS_CONFIG_FILES="['/from/env']",
)
assert ["/from/flag"] == config_path(
"main",
"args",
"-x",
"--pants-config-files=['/from/flag']",
"goal",
"--other-flag",
PANTS_CONFIG_FILES="+['/from/env']",
)
def test_setting_pants_config_in_config(self, tmp_path: Path) -> None:
# Test that setting pants_config in the config file has no effect.
config1 = tmp_path / "config1"
config2 = tmp_path / "config2"
config1.write_text(f"[DEFAULT]\npants_config_files = ['{config2}']\nlogdir = 'logdir1'\n")
config2.write_text("[DEFAULT]\nlogdir = 'logdir2'\n")
ob = OptionsBootstrapper.create(
env={}, args=[f"--pants-config-files=['{config1.as_posix()}']"], allow_pantsrc=False
)
logdir = ob.get_bootstrap_options().for_global_scope().logdir
assert "logdir1" == logdir
def test_alias_pyupgrade(self, tmp_path: Path) -> None:
config = tmp_path / "config"
config.write_text(
dedent(
"""\
[cli.alias]
pyupgrade = "--backend-packages=pants.backend.python.lint.pyupgrade fmt"
"""
)
)
config_arg = f"--pants-config-files=['{config.as_posix()}']"
ob = OptionsBootstrapper.create(env={}, args=[config_arg, "pyupgrade"], allow_pantsrc=False)
assert (
config_arg,
"--backend-packages=pants.backend.python.lint.pyupgrade",
"fmt",
) == ob.args
assert (
"./pants",
config_arg,
"--backend-packages=pants.backend.python.lint.pyupgrade",
) == ob.bootstrap_args
|
src/tfi/doc/example_code_generators/__init__.py | ajbouh/tfi | 160 | 12627246 | from tfi.doc.example_code_generators.json import Json as _Json
from tfi.doc.example_code_generators.tensorflow_grpc import TensorFlowGrpc as _TensorFlowGrpc
_EXAMPLE_CODE_GENERATORS = {}
def example_code_generator(name):
return _EXAMPLE_CODE_GENERATORS[name]
def _register_code_generator(ex):
_EXAMPLE_CODE_GENERATORS[ex.name] = ex
_register_code_generator(_Json())
# _register_code_generator(_Python())
_register_code_generator(_TensorFlowGrpc())
|
external/mesh-fusion/librender/test.py | FooVizDevs/occupancy_networks | 801 | 12627261 | import pyrender
import numpy as np
from matplotlib import pyplot
import math
# render settings
img_h = 480
img_w = 480
fx = 480.
fy = 480.
cx = 240
cy = 240
def model():
# note that xx is height here!
xx = -0.2
yy = -0.2
zz = -0.2
v000 = (xx, yy, zz) # 0
v001 = (xx, yy, zz + 0.4) # 1
v010 = (xx, yy + 0.4, zz) # 2
v011 = (xx, yy + 0.4, zz + 0.4) # 3
v100 = (xx + 0.4, yy, zz) # 4
v101 = (xx + 0.4, yy, zz + 0.4) # 5
v110 = (xx + 0.4, yy + 0.4, zz) # 6
v111 = (xx + 0.4, yy + 0.4, zz + 0.4) # 7
f1 = [0, 2, 4]
f2 = [4, 2, 6]
f3 = [1, 3, 5]
f4 = [5, 3, 7]
f5 = [0, 1, 2]
f6 = [1, 3, 2]
f7 = [4, 5, 7]
f8 = [4, 7, 6]
f9 = [4, 0, 1]
f10 = [4, 5, 1]
f11 = [2, 3, 6]
f12 = [3, 7, 6]
vertices = []
vertices.append(v000)
vertices.append(v001)
vertices.append(v010)
vertices.append(v011)
vertices.append(v100)
vertices.append(v101)
vertices.append(v110)
vertices.append(v111)
faces = []
faces.append(f1)
faces.append(f2)
faces.append(f3)
faces.append(f4)
faces.append(f5)
faces.append(f6)
faces.append(f7)
faces.append(f8)
faces.append(f9)
faces.append(f10)
faces.append(f11)
faces.append(f12)
return vertices, faces
def render(vertices, faces):
x = 0
y = math.pi/4
z = 0
R_x = np.array([[1, 0, 0], [0, math.cos(x), -math.sin(x)], [0, math.sin(x), math.cos(x)]])
R_y = np.array([[math.cos(y), 0, math.sin(y)], [0, 1, 0], [-math.sin(y), 0, math.cos(y)]])
R_z = np.array([[math.cos(z), -math.sin(z), 0], [math.sin(z), math.cos(z), 0], [0, 0, 1]])
R = R_z.dot(R_y.dot(R_x))
np_vertices = np.array(vertices).astype(np.float64)
np_vertices = R.dot(np_vertices.T).T
np_vertices[:, 2] += 1.5
np_faces = np.array(faces).astype(np.float64)
np_faces += 1
depthmap, mask, img = pyrender.render(np_vertices.T.copy(), np_faces.T.copy(), np.array([fx, fy, cx, cy]), np.array([1., 2.]), np.array([img_h, img_w], dtype=np.int32))
pyplot.imshow(depthmap)
pyplot.show()
pyplot.imshow(img)
pyplot.show()
if __name__ == '__main__':
vertices, faces = model()
render(vertices, faces)
|
TouTiao/toutiao.py | wangbl11/ECommerceCrawlers | 3,469 | 12627267 | import requests
from requests.exceptions import ConnectionError
from lxml import etree
import time
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import csv
import pandas as pd
from urllib.parse import quote
import re
from fake_useragent import UserAgent
import random
base_url = 'https://www.toutiao.com/api/search/content/'
timestamp = int(time.time()*1000)
ua = UserAgent(verify_ssl=False)
article_url_list = []
csv_name = pd.read_csv("typhoon_toutiao.csv")
page_urls = ["http://dev.kdlapi.com/testproxy",
"https://dev.kdlapi.com/testproxy",
]
# 隧道服务器
tunnel_host = "tps189.kdlapi.com"
tunnel_port = "15818"
# 隧道用户名密码
tid = "t17888082960619"
password = "<PASSWORD>"
proxies = {
"http": "http://%s:%s@%s:%s/" % (tid, password, tunnel_host, tunnel_port),
"https": "https://%s:%s@%s:%s/" % (tid, password, tunnel_host, tunnel_port)
}
# 防止重复
constract_list = []
# 获取到一个页面内所有的article url
def get_article_urls(name):
decde = quote(name)
referer = 'https://www.toutiao.com/search/?keyword='+decde
for offset in range(0, 120, 20): # 搜索结果有10个页面,所以只120,有时页面没这么多
params = {
'aid': 24,
'app_name': 'web_search',
'offset': offset,
'format': 'json',
'keyword': name,
'autoload': 'true',
'count': 20,
'en_qc': 1,
'cur_tab': 1,
'from': 'search_tab',
'pd': 'synthesis',
'timestamp': timestamp
}
headers = {
'cookie': 'tt_webid=6781305717874820616; WEATHER_CITY=%E5%8C%97%E4%BA%AC; tt_webid=6781305717874820616; s_v_web_id=59cfa658a89df645e8a82f1618a81bd0; __tasessionId=g8ptymp5v1579144106433',
'user-agent': ua.random,
'x-requested-with': 'XMLHttpRequest',
'referer': referer,
}
html = requests.get(url=base_url, params=params,
headers=headers, proxies=proxies)
result = list(html.json().get('data'))
for item in result:
article_url = item.get('article_url') # 提取每篇文章的url
if article_url and len(article_url) < 100 and (".mp4" not in article_url) and "toutiao.com" in article_url:
if '/group/' in article_url:
article_url = article_url.replace(
'/group/', '/a').replace('http://', 'https://www.')
article_url_list.append(article_url)
print(article_url)
def request_AND_storage(name):
filename = name+".csv"
try:
get_article_urls(name)
except Exception as e:
print(e)
browser = webdriver.Chrome()
time.sleep(2)
for url in article_url_list:
print(url)
try:
browser.get(url)
time.sleep(1)
text_res = browser.find_element_by_xpath(
'//div[@class="article-box"]')
print(text_res)
text_res = text_res.text
print(text_res)
with open(filename, 'a', encoding='utf-8') as f:
writer = csv.writer(f)
L = [name, text_res]
writer.writerow(L)
except:
continue
browser.close()
if __name__ == '__main__':
try:
request_AND_storage('武汉疫情')
article_url_list = []
time.sleep(10)
except Exception as e:
print(e)
article_url_list = []
time.sleep(1)
continue
|
demo3/wiki/__init__.py | DerekDick/CrawlerDemos | 1,061 | 12627272 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright (c) 2013 <NAME> <<EMAIL>>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on 2013-5-29
@author: Chine
'''
import os
import re
import urlparse
from datetime import datetime
from cola.core.urls import UrlPatterns, Url
from cola.core.parsers import Parser
from cola.core.opener import MechanizeOpener
from cola.core.errors import DependencyNotInstalledError
from cola.core.config import Config
from cola.job import JobDescription
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
try:
from bs4 import BeautifulSoup
except ImportError:
raise DependencyNotInstalledError('BeautifulSoup4')
try:
from dateutil.parser import parse
except ImportError:
raise DependencyNotInstalledError('python-dateutil')
try:
from mongoengine import connect, DoesNotExist, \
Document, StringField, DateTimeField
except ImportError:
raise DependencyNotInstalledError('mongoengine')
get_user_conf = lambda s: os.path.join(os.path.dirname(os.path.abspath(__file__)), s)
user_conf = get_user_conf('test.yaml')
if not os.path.exists(user_conf):
user_conf = get_user_conf('wiki.yaml')
user_config = Config(user_conf)
starts = [start.url for start in user_config.job.starts]
mongo_host = user_config.job.mongo.host
mongo_port = user_config.job.mongo.port
db_name = user_config.job.db
connect(db_name, host=mongo_host, port=mongo_port)
class WikiDocument(Document):
title = StringField()
content = StringField()
last_update = DateTimeField()
class WikiParser(Parser):
def __init__(self, opener=None, url=None, **kw):
super(WikiParser, self).__init__(opener=opener, url=url, **kw)
if self.opener is None:
self.opener = MechanizeOpener()
self.html_comment_reg = re.compile(r'<!--[^-]+-->', re.DOTALL)
self.en_time_reg = re.compile(r'\d{1,2} [A-Z][a-z]{2,} \d{4} at \d{1,2}:\d{1,2}')
self.zh_time_reg = re.compile(ur'\d{4}年\d{1,2}月\d{1,2}日 \(.+\) \d{1,2}:\d{1,2}')
def store(self, title, content, last_update):
try:
doc = WikiDocument.objects.get(title=title)
if last_update > doc.last_update:
doc.content = content
doc.last_update = last_update
doc.update(upsert=True)
except DoesNotExist:
doc = WikiDocument(title=title, content=content, last_update=last_update)
doc.save()
def _extract(self, soup):
if soup.head is None:
return None, None, None
title = soup.head.title.text
if '-' in title:
title = title.split('-')[0].strip()
content = soup.find('div', attrs={'id': 'mw-content-text', 'class': 'mw-content-ltr'})
while content.table is not None:
content.table.extract()
content = content.text
last_update_str = soup.find('li', attrs={'id': 'footer-info-lastmod'}).text
last_update = None
match_en_time = self.en_time_reg.search(last_update_str)
if match_en_time:
last_update = match_en_time.group()
last_update = parse(last_update)
match_zh_time = self.zh_time_reg.search(last_update_str)
if match_zh_time:
last_update = match_zh_time.group()
last_update = re.sub(r'\([^\)]+\)\s', '', last_update)
last_update = last_update.replace(u'年', '-').replace(u'月', '-').replace(u'日', '')
last_update = parse(last_update)
if last_update is None:
last_update = datetime.now()
return title, content, last_update
def parse(self, url=None):
url = url or self.url
lang = url.strip('http://').split('.', 1)[0]
br = self.opener.browse_open(url)
html = br.response().read()
html = self.html_comment_reg.sub('', html)
soup = BeautifulSoup(html)
title, content, last_update = self._extract(soup)
if title is None:
return
title = title + ' ' + lang
self.store(title, content, last_update)
def _is_same(out_url):
return out_url.rsplit('#', 1)[0] == url
for link in br.links():
if link.url.startswith('http://'):
out_url = link.url
if not _is_same(out_url):
yield out_url
else:
out_url = urlparse.urljoin(link.base_url, link.url)
if not _is_same(out_url):
yield out_url
url_patterns = UrlPatterns(
Url(r'^http://(zh|en).wikipedia.org/wiki/[^(:|/)]+$', 'wiki_page', WikiParser)
)
def get_job_desc():
return JobDescription('wikipedia crawler', url_patterns, MechanizeOpener,
user_config, starts)
if __name__ == "__main__":
from cola.context import Context
ctx = Context(local_mode=True)
ctx.run_job(os.path.dirname(os.path.abspath(__file__))) |
docs/the_guide/first.2.py | asnt/moderngl | 916 | 12627371 | import moderngl
ctx = moderngl.create_standalone_context()
prog = ctx.program(
vertex_shader='''
#version 330
in vec2 in_vert;
in vec3 in_color;
out vec3 v_color;
void main() {
v_color = in_color;
gl_Position = vec4(in_vert, 0.0, 1.0);
}
''',
fragment_shader='''
#version 330
in vec3 v_color;
out vec3 f_color;
void main() {
f_color = v_color;
}
''',
)
|
tests/test_04_dxf_high_level_structs/test_420_load_dxf_file.py | jkjt/ezdxf | 515 | 12627372 | # Copyright (c) 2019-2020 <NAME>
# License: MIT License
import pytest
import ezdxf
@pytest.fixture(scope="module", params=["R12", "R2000"])
def dxf(request, tmpdir_factory):
doc = ezdxf.new()
msp = doc.modelspace()
msp.add_line((0, 0), (1, 0))
psp = doc.layout()
psp.add_circle((0, 0), 1)
filename = tmpdir_factory.mktemp(request.param).join("test.dxf")
doc.dxfversion = request.param
doc.saveas(filename)
return filename
def test_load_dxf(dxf):
doc = ezdxf.readfile(dxf)
msp = doc.modelspace()
assert len(msp) == 1
assert msp[0].dxftype() == "LINE"
psp = doc.layout()
assert len(psp) == 1
assert psp[0].dxftype() == "CIRCLE"
|
testPyCallJs.py | icarito/guy | 194 | 12627379 | #!/usr/bin/python3 -u
# -*- coding: utf-8 -*-
import guy
class TestPyCallJs(guy.Guy):
"""
<script>
function myjsmethod(a,b) {
document.body.innerHTML+= `sync call (${a},${b})<br>`;
return Math.random();
}
async function myLONGjsmethodAsync(a,b) {
document.body.innerHTML+= `async call long (${a},${b})...`;
await new Promise(r => setTimeout(r, 2000));
document.body.innerHTML+= `...ok<br>`;
return Math.random();
}
async function myjsmethodAsync(a,b) {
document.body.innerHTML+= `async call (${a},${b})<br>`;
return Math.random();
}
function myKAPUTTjsmethod() {
callInError(); // raise an exception on js side
}
async function myKAPUTTjsmethodAsync() {
callInError(); // raise an exception on js side
}
</script>
<button onclick="self.test_ok()">call js ok</button>
<button onclick="self.test_ok_async()">call async js ok</button>
<button onclick="self.test_long_async()">call async long js ok</button>
<button onclick="self.test_NF()">call js not found</button>
<button onclick="self.test_ko()">call js ko</button>
<button onclick="self.test_ko_async()">call async js ko</button>
<button onclick="self.test_prompt()">test promt()</button>
<br/>
"""
size=(500, 300) # set the size of the client window
async def test_prompt(self):
name = await self.js.prompt("What's your name ?")
print("==========js returns=========>",name)
return "ok prompt"
async def test_ok(self):
r=await self.js.myjsmethod("Python World!",42)
print("==========js returns=========>",r)
return "ok sync"
async def test_ok_async(self):
r=await self.js.myjsmethodAsync("Python World!",44)
print("==========js returns=========>",r)
return "ok async"
async def test_long_async(self):
r=await self.js.myLONGjsmethodAsync("Python World!",45)
print("==========js returns=========>",r)
return "ok async"
async def test_NF(self):
r=await self.js.myUNDECLAREDjsmethod()
print("==========js returns=========>",r)
return "nf"
async def test_ko(self):
r=await self.js.myKAPUTTjsmethod()
print("==========js returns=========>",r)
return "ko"
async def test_ko_async(self):
r=await self.js.myKAPUTTjsmethodAsync()
print("==========js returns=========>",r)
return "ko"
if __name__ == "__main__":
TestPyCallJs().run(log=True) |
ubteacher/data/dataset_mapper.py | Feobi1999/unbiased-teacher | 306 | 12627397 | <reponame>Feobi1999/unbiased-teacher
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import logging
import numpy as np
from PIL import Image
import torch
import detectron2.data.detection_utils as utils
import detectron2.data.transforms as T
from detectron2.data.dataset_mapper import DatasetMapper
from ubteacher.data.detection_utils import build_strong_augmentation
class DatasetMapperTwoCropSeparate(DatasetMapper):
"""
This customized mapper produces two augmented images from a single image
instance. This mapper makes sure that the two augmented images have the same
cropping and thus the same size.
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by the model.
This is the default callable to be used to map your dataset dict into training data.
You may need to follow it to implement your own one for customized logic,
such as a different way to read or transform images.
See :doc:`/tutorials/data_loading` for details.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies cropping/geometric transforms to the image and annotations
3. Prepare data and annotations to Tensor and :class:`Instances`
"""
def __init__(self, cfg, is_train=True):
self.augmentation = utils.build_augmentation(cfg, is_train)
# include crop into self.augmentation
if cfg.INPUT.CROP.ENABLED and is_train:
self.augmentation.insert(
0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)
)
logging.getLogger(__name__).info(
"Cropping used in training: " + str(self.augmentation[0])
)
self.compute_tight_boxes = True
else:
self.compute_tight_boxes = False
self.strong_augmentation = build_strong_augmentation(cfg, is_train)
# fmt: off
self.img_format = cfg.INPUT.FORMAT
self.mask_on = cfg.MODEL.MASK_ON
self.mask_format = cfg.INPUT.MASK_FORMAT
self.keypoint_on = cfg.MODEL.KEYPOINT_ON
self.load_proposals = cfg.MODEL.LOAD_PROPOSALS
# fmt: on
if self.keypoint_on and is_train:
self.keypoint_hflip_indices = utils.create_keypoint_hflip_indices(
cfg.DATASETS.TRAIN
)
else:
self.keypoint_hflip_indices = None
if self.load_proposals:
self.proposal_min_box_size = cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE
self.proposal_topk = (
cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
if is_train
else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST
)
self.is_train = is_train
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
utils.check_image_size(dataset_dict, image)
if "sem_seg_file_name" in dataset_dict:
sem_seg_gt = utils.read_image(
dataset_dict.pop("sem_seg_file_name"), "L"
).squeeze(2)
else:
sem_seg_gt = None
aug_input = T.StandardAugInput(image, sem_seg=sem_seg_gt)
transforms = aug_input.apply_augmentations(self.augmentation)
image_weak_aug, sem_seg_gt = aug_input.image, aug_input.sem_seg
image_shape = image_weak_aug.shape[:2] # h, w
if sem_seg_gt is not None:
dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long"))
if self.load_proposals:
utils.transform_proposals(
dataset_dict,
image_shape,
transforms,
proposal_topk=self.proposal_topk,
min_box_size=self.proposal_min_box_size,
)
if not self.is_train:
dataset_dict.pop("annotations", None)
dataset_dict.pop("sem_seg_file_name", None)
return dataset_dict
if "annotations" in dataset_dict:
for anno in dataset_dict["annotations"]:
if not self.mask_on:
anno.pop("segmentation", None)
if not self.keypoint_on:
anno.pop("keypoints", None)
annos = [
utils.transform_instance_annotations(
obj,
transforms,
image_shape,
keypoint_hflip_indices=self.keypoint_hflip_indices,
)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
instances = utils.annotations_to_instances(
annos, image_shape, mask_format=self.mask_format
)
if self.compute_tight_boxes and instances.has("gt_masks"):
instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
bboxes_d2_format = utils.filter_empty_instances(instances)
dataset_dict["instances"] = bboxes_d2_format
# apply strong augmentation
# We use torchvision augmentation, which is not compatiable with
# detectron2, which use numpy format for images. Thus, we need to
# convert to PIL format first.
image_pil = Image.fromarray(image_weak_aug.astype("uint8"), "RGB")
image_strong_aug = np.array(self.strong_augmentation(image_pil))
dataset_dict["image"] = torch.as_tensor(
np.ascontiguousarray(image_strong_aug.transpose(2, 0, 1))
)
dataset_dict_key = copy.deepcopy(dataset_dict)
dataset_dict_key["image"] = torch.as_tensor(
np.ascontiguousarray(image_weak_aug.transpose(2, 0, 1))
)
assert dataset_dict["image"].size(1) == dataset_dict_key["image"].size(1)
assert dataset_dict["image"].size(2) == dataset_dict_key["image"].size(2)
return (dataset_dict, dataset_dict_key)
|
env/Lib/site-packages/plotly/express/data/__init__.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 11,750 | 12627425 | <reponame>andresgreen-byte/Laboratorio-1--Inversion-de-Capital
"""Built-in datasets for demonstration, educational and test purposes.
"""
from __future__ import absolute_import
from plotly.data import *
__all__ = [
"carshare",
"election",
"election_geojson",
"experiment",
"gapminder",
"iris",
"medals_wide",
"medals_long",
"stocks",
"tips",
"wind",
]
|
rpython/jit/backend/ppc/test/test_loop_unroll.py | nanjekyejoannah/pypy | 381 | 12627430 | <filename>rpython/jit/backend/ppc/test/test_loop_unroll.py
import py
from rpython.jit.backend.ppc.test.support import JitPPCMixin
from rpython.jit.metainterp.test import test_loop_unroll
class TestLoopSpec(JitPPCMixin, test_loop_unroll.LoopUnrollTest):
# for the individual tests see
# ====> ../../../metainterp/test/test_loop.py
pass
|
example/tests/unit/test_pagination.py | anehx/django-rest-framework-json-api | 1,011 | 12627438 | <reponame>anehx/django-rest-framework-json-api
from collections import OrderedDict
from rest_framework.request import Request
from rest_framework.test import APIRequestFactory
from rest_framework.utils.urls import replace_query_param
from rest_framework_json_api import pagination
factory = APIRequestFactory()
class TestLimitOffset:
"""
Unit tests for `pagination.JsonApiLimitOffsetPagination`.
"""
def setup(self):
class ExamplePagination(pagination.JsonApiLimitOffsetPagination):
default_limit = 10
max_limit = 15
self.pagination = ExamplePagination()
self.queryset = range(1, 101)
self.base_url = "http://testserver/"
def paginate_queryset(self, request):
return list(self.pagination.paginate_queryset(self.queryset, request))
def get_paginated_content(self, queryset):
response = self.pagination.get_paginated_response(queryset)
return response.data
def get_test_request(self, arguments):
return Request(factory.get("/", arguments))
def test_valid_offset_limit(self):
"""
Basic test, assumes offset and limit are given.
"""
offset = 10
limit = 5
count = len(self.queryset)
last_offset = (count // limit) * limit
next_offset = 15
prev_offset = 5
request = self.get_test_request(
{
self.pagination.limit_query_param: limit,
self.pagination.offset_query_param: offset,
}
)
base_url = replace_query_param(
self.base_url, self.pagination.limit_query_param, limit
)
last_url = replace_query_param(
base_url, self.pagination.offset_query_param, last_offset
)
first_url = base_url
next_url = replace_query_param(
base_url, self.pagination.offset_query_param, next_offset
)
prev_url = replace_query_param(
base_url, self.pagination.offset_query_param, prev_offset
)
queryset = self.paginate_queryset(request)
content = self.get_paginated_content(queryset)
next_offset = offset + limit
expected_content = {
"results": list(range(offset + 1, next_offset + 1)),
"links": OrderedDict(
[
("first", first_url),
("last", last_url),
("next", next_url),
("prev", prev_url),
]
),
"meta": {
"pagination": OrderedDict(
[
("count", count),
("limit", limit),
("offset", offset),
]
)
},
}
assert queryset == list(range(offset + 1, next_offset + 1))
assert content == expected_content
|
pytorch/wrapper/bilateralfilter/setup.py | Pandinosaurus/rloss | 185 | 12627451 | #File: setup.py
#!/usr/bin/python
from distutils.core import setup, Extension
# Third-party modules - we depend on numpy for everything
import numpy
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = numpy.get_include()
except AttributeError:
numpy_include = numpy.get_numpy_include()
pht_module = Extension('_bilateralfilter',
sources=['bilateralfilter_wrap.cxx',
'bilateralfilter.cpp',
'permutohedral.cpp'
],
extra_compile_args = ["-fopenmp"],
include_dirs = [numpy_include]
)
setup(name = 'bilateralfilter',
version = '0.1',
author = 'SWIG Docs',
description = 'Simple swig pht from docs',
ext_modules = [pht_module],
py_modules = ['bilateralfilter'],
)
|
srcs/python/kungfu/tensorflow/optimizers/sma_sgd.py | Pandinosaurus/KungFu | 291 | 12627471 | import tensorflow as tf
from kungfu.tensorflow.compat import _tf_assign
from kungfu.tensorflow.ops import current_cluster_size, group_all_reduce
from .core import (_create_kungfu_keras_optimizer, _create_kungfu_optimizer,
_KungFuAlgorithm)
def SynchronousAveragingOptimizer(optimizer,
name=None,
alpha=0.1,
use_locking=False,
with_keras=False):
"""SynchronousAveragingOptimizer implements the [SMA]_ algorithm.
[EA-SGD]_ proposed to use model averaging to train deep learning models and prove its convergence.
[SMA]_ further improves [EA-SGD]_ results and show model averaging can benefit small-batch training
and achieves fast convergence compared to synchronous SGD.
.. [EA-SGD] Deep learning with Elastic Averaging SGD, NIPS 2015, `EA-SGD Paper <https://arxiv.org/abs/1412.6651>`_
.. [SMA] CrossBow: Scaling Deep Learning with Small Batch Sizes on Multi-GPU Servers, VLDB 2019, `SMA Paper <http://www.vldb.org/pvldb/vol12/p1399-koliousis.pdf>`_
Arguments:
optimizer {tf.train.Optimizer, tf.keras.optimizers.Optimizer} -- Optimizer to use for computing gradients and applying updates.
Keyword Arguments:
- name {str} -- name prefix for the operations created when applying gradients. Defaults to "KungFu" followed by the provided optimizer type. (default: {None})
- alpha {float} -- the ratio of a central model during averaging (Check the SMA and EA-SGD papers for its intuition). (default: {0.1})
- use_locking {bool} -- Whether to use locking when updating variables. (default: {False})
- with_keras {bool} -- Runs with pure Keras or not (default: {False})
Raises:
TypeError: Wrapped optimizer is not a subclass of tf.train.Optimizer or tf.keras.optimizers.Optimizer
Returns:
optimizer {tf.train.Optimizer, tf.keras.optimizers.Optimizer} -- KungFu distributed optimizer
"""
sma_algo = _SynchronousAveraging(alpha)
if not with_keras:
return _create_kungfu_optimizer(optimizer, sma_algo, name, use_locking)
else:
return _create_kungfu_keras_optimizer(optimizer, sma_algo)
class _SynchronousAveraging(_KungFuAlgorithm):
def __init__(self, alpha):
self._num_workers = current_cluster_size()
self._alpha = alpha
def apply_gradients(self, apply_grads_func, grads_and_vars, **kwargs):
gradients, variables = list(zip(*grads_and_vars))
# filter out grad == None
filtered_variables = [
var for (grad, var) in list(zip(gradients, variables))
if grad is not None
]
# It is important to apply model averaging every iteration [2]
sum_vars = group_all_reduce(filtered_variables)
avg_vars = [g / self._num_workers for g in sum_vars]
# TODO: Apply momentum to the averaged model [2]
assign_ops = [
_tf_assign(v, (1 - self._alpha) * v + self._alpha * avg_v)
for v, avg_v in zip(filtered_variables, avg_vars)
]
# We need to re-zip gradients and variables as grads_and_vars can be only unzipped once.
new_grads_and_vars = zip(gradients, variables)
# We can overlap model averaging and local SGD [2].
with tf.control_dependencies(assign_ops):
return apply_grads_func(new_grads_and_vars, **kwargs)
|
search_relax/dataset.py | latstars/DADA | 160 | 12627497 | <reponame>latstars/DADA
import torch
import torchvision
from operation import apply_augment
from torch.utils.data import SubsetRandomSampler, Sampler, Subset, ConcatDataset
from sklearn.model_selection import StratifiedShuffleSplit
from primitives import sub_policies
import torch.nn.functional as F
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
from torchvision import transforms
from PIL import Image
from imagenet import ImageNet
from operation import Lighting
import os
import numpy as np
class CutoutDefault(object):
"""
Reference : https://github.com/quark0/darts/blob/master/cnn/utils.py
"""
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
class SubsetSampler(Sampler):
r"""Samples elements from a given list of indices, without replacement.
Arguments:
indices (sequence): a sequence of indices
"""
def __init__(self, indices):
self.indices = indices
def __iter__(self):
return (i for i in self.indices)
def __len__(self):
return len(self.indices)
class AugmentDataset(torch.utils.data.Dataset):
def __init__(self, dataset, pre_transforms, after_transforms, valid_transforms, ops_names, search, magnitudes):
super(AugmentDataset, self).__init__()
self.dataset = dataset
self.pre_transforms = pre_transforms
self.after_transforms = after_transforms
self.valid_transforms = valid_transforms
self.ops_names = ops_names
self.search = search
self.magnitudes = magnitudes
def __getitem__(self, index):
if self.search:
# start_time = time.time()
img, target = self.dataset.__getitem__(index)
img = self.pre_transforms(img)
magnitude = self.magnitudes.clamp(0, 1)[self.weights_index.item()]
sub_policy = self.ops_names[self.weights_index.item()]
probability_index = self.probabilities_index[self.weights_index.item()]
image = img
for i, ops_name in enumerate(sub_policy):
if probability_index[i].item() != 0.0:
image = apply_augment(image, ops_name, magnitude[i])
image = self.after_transforms(image)
return image, target
# outs = [None for i in range(2**len(sub_policy)) ]
# def dfs(image, index, depth):
# if depth == len(sub_policy):
# # print(index)
# outs[index] = self.after_transforms(image)
# return
# dfs(image, index, depth+1)
# new_image = apply_augment(image, sub_policy[depth], magnitude[depth])
# dfs(new_image, (1<<depth) + index, depth+1)
# dfs(img, 0, 0)
# image = img
# for i, ops_name in enumerate(sub_policy):
# image = apply_augment(image, ops_name, magnitude[i])
# image = self.after_transforms(image)
# print(self.magnitudes)
# print(self.weights_index)
# end_time = time.time()
# print("%f" % (end_time - start_time))
# return tuple(outs), target
else:
img, target = self.dataset.__getitem__(index)
if self.valid_transforms is not None:
img = self.valid_transforms(img)
return img, target
def __len__(self):
return self.dataset.__len__()
_IMAGENET_PCA = {
'eigval': [0.2175, 0.0188, 0.0045],
'eigvec': [
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203],
]
}
_CIFAR_MEAN, _CIFAR_STD = (0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)
def num_class(dataset):
return {
'cifar10': 10,
'reduced_cifar10': 10,
'cifar10.1': 10,
'cifar100': 100,
'reduced_cifar100': 100,
'svhn': 10,
'reduced_svhn': 10,
'imagenet': 1000,
'reduced_imagenet': 120,
}[dataset]
def get_dataloaders(dataset, batch, num_workers, dataroot, ops_names, magnitudes, cutout, cutout_length, split=0.5, split_idx=0, target_lb=-1):
if 'cifar' in dataset or 'svhn' in dataset:
transform_train_pre = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
])
transform_train_after = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(_CIFAR_MEAN, _CIFAR_STD),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(_CIFAR_MEAN, _CIFAR_STD),
])
elif 'imagenet' in dataset:
transform_train_pre = transforms.Compose([
transforms.RandomResizedCrop(224, scale=(0.08, 1.0), interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=0.4,
),
])
transform_train_after = transforms.Compose([
transforms.ToTensor(),
Lighting(0.1, _IMAGENET_PCA['eigval'], _IMAGENET_PCA['eigvec']),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
transform_test = transforms.Compose([
transforms.Resize(256, interpolation=Image.BICUBIC),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
else:
raise ValueError('dataset=%s' % dataset)
if cutout and cutout_length != 0:
transform_train_after.transforms.append(CutoutDefault(cutout_length))
if dataset == 'cifar10':
total_trainset = torchvision.datasets.CIFAR10(root=dataroot, train=True, download=True, transform=None)
# testset = torchvision.datasets.CIFAR10(root=dataroot, train=False, download=True, transform=None)
elif dataset == 'reduced_cifar10':
total_trainset = torchvision.datasets.CIFAR10(root=dataroot, train=True, download=True, transform=None)
sss = StratifiedShuffleSplit(n_splits=1, test_size=46000, random_state=0) # 4000 trainset
sss = sss.split(list(range(len(total_trainset))), total_trainset.targets)
train_idx, valid_idx = next(sss)
targets = [total_trainset.targets[idx] for idx in train_idx]
total_trainset = Subset(total_trainset, train_idx)
total_trainset.targets = targets
# testset = torchvision.datasets.CIFAR10(root=dataroot, train=False, download=True, transform=None)
elif dataset == 'cifar100':
total_trainset = torchvision.datasets.CIFAR100(root=dataroot, train=True, download=True, transform=None)
# testset = torchvision.datasets.CIFAR100(root=dataroot, train=False, download=True, transform=transform_test)
elif dataset == 'reduced_cifar100':
total_trainset = torchvision.datasets.CIFAR100(root=dataroot, train=True, download=True, transform=None)
sss = StratifiedShuffleSplit(n_splits=1, test_size=46000, random_state=0) # 4000 trainset
sss = sss.split(list(range(len(total_trainset))), total_trainset.targets)
train_idx, valid_idx = next(sss)
targets = [total_trainset.targets[idx] for idx in train_idx]
total_trainset = Subset(total_trainset, train_idx)
total_trainset.targets = targets
# testset = torchvision.datasets.CIFAR10(root=dataroot, train=False, download=True, transform=None)
elif dataset == 'svhn':
trainset = torchvision.datasets.SVHN(root=dataroot, split='train', download=True, transform=None)
extraset = torchvision.datasets.SVHN(root=dataroot, split='extra', download=True, transform=None)
total_trainset = ConcatDataset([trainset, extraset])
# testset = torchvision.datasets.SVHN(root=dataroot, split='test', download=True, transform=transform_test)
elif dataset == 'reduced_svhn':
total_trainset = torchvision.datasets.SVHN(root=dataroot, split='train', download=True, transform=None)
sss = StratifiedShuffleSplit(n_splits=1, test_size=73257-1000, random_state=0) # 1000 trainset
# sss = sss.split(list(range(len(total_trainset))), total_trainset.targets)
sss = sss.split(list(range(len(total_trainset))), total_trainset.labels)
train_idx, valid_idx = next(sss)
# targets = [total_trainset.targets[idx] for idx in train_idx]
targets = [total_trainset.labels[idx] for idx in train_idx]
total_trainset = Subset(total_trainset, train_idx)
# total_trainset.targets = targets
total_trainset.labels = targets
total_trainset.targets = targets
# testset = torchvision.datasets.SVHN(root=dataroot, split='test', download=True, transform=transform_test)
elif dataset == 'imagenet':
total_trainset = ImageNet(root=os.path.join(dataroot, 'imagenet-pytorch'), transform=None)
# testset = ImageNet(root=os.path.join(dataroot, 'imagenet-pytorch'), split='val', transform=transform_test)
# compatibility
total_trainset.targets = [lb for _, lb in total_trainset.samples]
elif dataset == 'reduced_imagenet':
# randomly chosen indices
# idx120 = sorted(random.sample(list(range(1000)), k=120))
idx120 = [16, 23, 52, 57, 76, 93, 95, 96, 99, 121, 122, 128, 148, 172, 181, 189, 202, 210, 232, 238, 257, 258, 259, 277, 283, 289, 295, 304, 307, 318, 322, 331, 337, 338, 345, 350, 361, 375, 376, 381, 388, 399, 401, 408, 424, 431, 432, 440, 447, 462, 464, 472, 483, 497, 506, 512, 530, 541, 553, 554, 557, 564, 570, 584, 612, 614, 619, 626, 631, 632, 650, 657, 658, 660, 674, 675, 680, 682, 691, 695, 699, 711, 734, 736, 741, 754, 757, 764, 769, 770, 780, 781, 787, 797, 799, 811, 822, 829, 830, 835, 837, 842, 843, 845, 873, 883, 897, 900, 902, 905, 913, 920, 925, 937, 938, 940, 941, 944, 949, 959]
total_trainset = ImageNet(root=os.path.join(dataroot, 'imagenet-pytorch'), transform=None)
testset = ImageNet(root=os.path.join(dataroot, 'imagenet-pytorch'), split='val', transform=None)
# compatibility
total_trainset.targets = [lb for _, lb in total_trainset.samples]
sss = StratifiedShuffleSplit(n_splits=1, test_size=len(total_trainset) - 50000, random_state=0) # 4000 trainset
sss = sss.split(list(range(len(total_trainset))), total_trainset.targets)
train_idx, valid_idx = next(sss)
# filter out
# train_idx = list(filter(lambda x: total_trainset.labels[x] in idx120, train_idx))
# valid_idx = list(filter(lambda x: total_trainset.labels[x] in idx120, valid_idx))
# test_idx = list(filter(lambda x: testset.samples[x][1] in idx120, range(len(testset))))
train_idx = list(filter(lambda x: total_trainset.targets[x] in idx120, train_idx))
valid_idx = list(filter(lambda x: total_trainset.targets[x] in idx120, valid_idx))
test_idx = list(filter(lambda x: testset.samples[x][1] in idx120, range(len(testset))))
targets = [idx120.index(total_trainset.targets[idx]) for idx in train_idx]
for idx in range(len(total_trainset.samples)):
if total_trainset.samples[idx][1] not in idx120:
continue
total_trainset.samples[idx] = (total_trainset.samples[idx][0], idx120.index(total_trainset.samples[idx][1]))
total_trainset = Subset(total_trainset, train_idx)
total_trainset.targets = targets
for idx in range(len(testset.samples)):
if testset.samples[idx][1] not in idx120:
continue
testset.samples[idx] = (testset.samples[idx][0], idx120.index(testset.samples[idx][1]))
testset = Subset(testset, test_idx)
print('reduced_imagenet train=', len(total_trainset))
elif dataset == 'reduced_imagenet':
# randomly chosen indices
idx120 = [904, 385, 759, 884, 784, 844, 132, 214, 990, 786, 979, 582, 104, 288, 697, 480, 66, 943, 308, 282, 118, 926, 882, 478, 133, 884, 570, 964, 825, 656, 661, 289, 385, 448, 705, 609, 955, 5, 703, 713, 695, 811, 958, 147, 6, 3, 59, 354, 315, 514, 741, 525, 685, 673, 657, 267, 575, 501, 30, 455, 905, 860, 355, 911, 24, 708, 346, 195, 660, 528, 330, 511, 439, 150, 988, 940, 236, 803, 741, 295, 111, 520, 856, 248, 203, 147, 625, 589, 708, 201, 712, 630, 630, 367, 273, 931, 960, 274, 112, 239, 463, 355, 955, 525, 404, 59, 981, 725, 90, 782, 604, 323, 418, 35, 95, 97, 193, 690, 869, 172]
total_trainset = ImageNet(root=os.path.join(dataroot, 'imagenet-pytorch'), transform=None)
# testset = ImageNet(root=os.path.join(dataroot, 'imagenet-pytorch'), split='val', transform=transform_test)
# compatibility
total_trainset.targets = [lb for _, lb in total_trainset.samples]
# sss = StratifiedShuffleSplit(n_splits=1, test_size=len(total_trainset) - 6000, random_state=0) # 4000 trainset
# sss = StratifiedShuffleSplit(n_splits=1, test_size=0, random_state=0) # 4000 trainset
# sss = sss.split(list(range(len(total_trainset))), total_trainset.targets)
# train_idx, valid_idx = next(sss)
# print(len(train_idx), len(valid_idx))
# filter out
# train_idx = list(filter(lambda x: total_trainset.labels[x] in idx120, train_idx))
# valid_idx = list(filter(lambda x: total_trainset.labels[x] in idx120, valid_idx))
# # test_idx = list(filter(lambda x: testset.samples[x][1] in idx120, range(len(testset))))
train_idx = list(range(len(total_trainset)))
filter_train_idx = list(filter(lambda x: total_trainset.targets[x] in idx120, train_idx))
# valid_idx = list(filter(lambda x: total_trainset.targets[x] in idx120, valid_idx))
# test_idx = list(filter(lambda x: testset.samples[x][1] in idx120, range(len(testset))))
# print(len(filter_train_idx))
targets = [idx120.index(total_trainset.targets[idx]) for idx in filter_train_idx]
sss = StratifiedShuffleSplit(n_splits=1, test_size=len(filter_train_idx) - 6000, random_state=0) # 4000 trainset
sss = sss.split(list(range(len(filter_train_idx))), targets)
train_idx, valid_idx = next(sss)
train_idx = [filter_train_idx[x] for x in train_idx]
valid_idx = [filter_train_idx[x] for x in valid_idx]
targets = [idx120.index(total_trainset.targets[idx]) for idx in train_idx]
for idx in range(len(total_trainset.samples)):
if total_trainset.samples[idx][1] not in idx120:
continue
total_trainset.samples[idx] = (total_trainset.samples[idx][0], idx120.index(total_trainset.samples[idx][1]))
total_trainset = Subset(total_trainset, train_idx)
total_trainset.targets = targets
# for idx in range(len(testset.samples)):
# if testset.samples[idx][1] not in idx120:
# continue
# testset.samples[idx] = (testset.samples[idx][0], idx120.index(testset.samples[idx][1]))
# testset = Subset(testset, test_idx)
print('reduced_imagenet train=', len(total_trainset))
else:
raise ValueError('invalid dataset name=%s' % dataset)
train_sampler = None
if split > 0.0:
sss = StratifiedShuffleSplit(n_splits=5, test_size=split, random_state=0)
sss = sss.split(list(range(len(total_trainset))), total_trainset.targets)
for _ in range(split_idx + 1):
train_idx, valid_idx = next(sss)
if target_lb >= 0:
train_idx = [i for i in train_idx if total_trainset.targets[i] == target_lb]
valid_idx = [i for i in valid_idx if total_trainset.targets[i] == target_lb]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetSampler(valid_idx)
# if horovod:
# import horovod.torch as hvd
# train_sampler = torch.utils.data.distributed.DistributedSampler(train_sampler, num_replicas=hvd.size(), rank=hvd.rank())
else:
valid_sampler = SubsetSampler([])
# if horovod:
# import horovod.torch as hvd
# train_sampler = torch.utils.data.distributed.DistributedSampler(valid_sampler, num_replicas=hvd.size(), rank=hvd.rank())
train_data = AugmentDataset(total_trainset, transform_train_pre, transform_train_after, transform_test, ops_names, True, magnitudes)
valid_data = AugmentDataset(total_trainset, transform_train_pre, transform_train_after, transform_test, ops_names, False, magnitudes)
trainloader = torch.utils.data.DataLoader(
train_data, batch_size=batch, shuffle=False,
sampler=train_sampler, drop_last=False,
pin_memory=True, num_workers=num_workers)
validloader = torch.utils.data.DataLoader(
valid_data, batch_size=batch,
# sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]),
sampler=valid_sampler, drop_last=False,
pin_memory=True, num_workers=num_workers)
# trainloader = torch.utils.data.DataLoader(
# total_trainset, batch_size=batch, shuffle=True if train_sampler is None else False, num_workers=32, pin_memory=True,
# sampler=train_sampler, drop_last=True)
# validloader = torch.utils.data.DataLoader(
# total_trainset, batch_size=batch, shuffle=False, num_workers=16, pin_memory=True,
# sampler=valid_sampler, drop_last=False)
# testloader = torch.utils.data.DataLoader(
# testset, batch_size=batch, shuffle=False, num_workers=32, pin_memory=True,
# drop_last=False
# )
print(len(train_data))
return trainloader, validloader
|
shop/cascade/extensions.py | 2000-ion/TIDPP-Lab3 | 2,160 | 12627506 | from django.utils.translation import gettext_lazy as _
from cms.plugin_pool import plugin_pool
from cmsplugin_cascade.plugin_base import TransparentContainer
from shop.cascade.plugin_base import ShopPluginBase
class ShopExtendableMixin:
"""
Add this mixin class to the list of ``model_mixins``, in the plugin class wishing to use extensions.
"""
@property
def left_extension(self):
if self.child_plugin_instances is None:
return
result = [cp for cp in self.child_plugin_instances if cp.plugin_type == 'ShopLeftExtension']
if result:
return result[0]
@property
def right_extension(self):
if self.child_plugin_instances is None:
return
result = [cp for cp in self.child_plugin_instances if cp.plugin_type == 'ShopRightExtension']
if result:
return result[0]
class LeftRightExtensionMixin:
"""
Plugin classes wishing to use extensions shall inherit from this class.
"""
@classmethod
def get_child_classes(cls, slot, page, instance=None):
child_classes = ['ShopLeftExtension', 'ShopRightExtension', None]
# allow only one left and one right extension
for child in instance.get_children():
child_classes.remove(child.plugin_type)
return child_classes
class ShopLeftExtension(TransparentContainer, ShopPluginBase):
name = _("Left Extension")
require_parent = True
parent_classes = ('ShopCartPlugin', 'ShopOrderViewsPlugin')
allow_children = True
render_template = 'cascade/generic/wrapper.html'
plugin_pool.register_plugin(ShopLeftExtension)
class ShopRightExtension(TransparentContainer, ShopPluginBase):
name = _("Right Extension")
require_parent = True
parent_classes = ('ShopCartPlugin', 'ShopOrderViewsPlugin')
allow_children = True
render_template = 'cascade/generic/wrapper.html'
plugin_pool.register_plugin(ShopRightExtension)
|
rasa/graph_components/providers/nlu_training_data_provider.py | fintzd/rasa | 9,701 | 12627514 | from __future__ import annotations
from typing import Dict, Text, Any
from rasa.engine.graph import GraphComponent, ExecutionContext
from rasa.engine.storage.resource import Resource
from rasa.engine.storage.storage import ModelStorage
from rasa.shared.importers.importer import TrainingDataImporter
from rasa.shared.nlu.training_data.training_data import (
TrainingData,
DEFAULT_TRAINING_DATA_OUTPUT_PATH,
)
class NLUTrainingDataProvider(GraphComponent):
"""Provides NLU training data during training."""
def __init__(
self, config: Dict[Text, Any], model_storage: ModelStorage, resource: Resource,
) -> None:
"""Creates a new NLU training data provider."""
self._config = config
self._model_storage = model_storage
self._resource = resource
@classmethod
def get_default_config(cls) -> Dict[Text, Any]:
"""Returns the default config for NLU training data provider."""
return {"persist": False, "language": None}
@classmethod
def create(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
) -> NLUTrainingDataProvider:
"""Creates a new NLU training data provider."""
return cls(config, model_storage, resource)
def _persist(self, training_data: TrainingData) -> None:
"""Persists NLU training data to model storage."""
with self._model_storage.write_to(self._resource) as resource_directory:
training_data.persist(
dir_name=str(resource_directory),
filename=DEFAULT_TRAINING_DATA_OUTPUT_PATH,
)
def provide(self, importer: TrainingDataImporter,) -> TrainingData:
"""Provides nlu training data during training."""
if "language" in self._config:
training_data = importer.get_nlu_data(language=self._config["language"])
else:
training_data = importer.get_nlu_data()
if self._config["persist"]:
self._persist(training_data)
return training_data
|
tests/acceptance/test_decorators.py | rspadim/aiocache | 213 | 12627516 | <filename>tests/acceptance/test_decorators.py
import asyncio
import pytest
import random
from unittest import mock
from aiocache import cached, cached_stampede, multi_cached
async def return_dict(keys=None):
ret = {}
for value, key in enumerate(keys or [pytest.KEY, pytest.KEY_1]):
ret[key] = str(value)
return ret
async def stub(*args, key=None, seconds=0, **kwargs):
await asyncio.sleep(seconds)
if key:
return str(key)
return str(random.randint(1, 50))
class TestCached:
@pytest.fixture(autouse=True)
def default_cache(self, mocker, cache):
mocker.patch("aiocache.decorators._get_cache", return_value=cache)
@pytest.mark.asyncio
async def test_cached_ttl(self, cache):
@cached(ttl=1, key=pytest.KEY)
async def fn():
return str(random.randint(1, 50))
resp1 = await fn()
resp2 = await fn()
assert await cache.get(pytest.KEY) == resp1 == resp2
await asyncio.sleep(1)
assert await cache.get(pytest.KEY) is None
@pytest.mark.asyncio
async def test_cached_key_builder(self, cache):
def build_key(f, self, a, b):
return "{}_{}_{}_{}".format(self, f.__name__, a, b)
@cached(key_builder=build_key)
async def fn(self, a, b=2):
return "1"
await fn("self", 1, 3)
assert await cache.exists(build_key(fn, "self", 1, 3)) is True
class TestCachedStampede:
@pytest.fixture(autouse=True)
def default_cache(self, mocker, cache):
mocker.patch("aiocache.decorators._get_cache", return_value=cache)
@pytest.mark.asyncio
async def test_cached_stampede(self, mocker, cache):
mocker.spy(cache, "get")
mocker.spy(cache, "set")
decorator = cached_stampede(ttl=10, lease=2)
await asyncio.gather(decorator(stub)(0.5), decorator(stub)(0.5))
cache.get.assert_called_with("acceptance.test_decoratorsstub(0.5,)[]")
assert cache.get.call_count == 4
cache.set.assert_called_with("acceptance.test_decoratorsstub(0.5,)[]", mock.ANY, ttl=10)
assert cache.set.call_count == 1
@pytest.mark.asyncio
async def test_locking_dogpile_lease_expiration(self, mocker, cache):
mocker.spy(cache, "get")
mocker.spy(cache, "set")
decorator = cached_stampede(ttl=10, lease=3)
await asyncio.gather(
decorator(stub)(1, seconds=1),
decorator(stub)(1, seconds=2),
decorator(stub)(1, seconds=3),
)
assert cache.get.call_count == 6
assert cache.set.call_count == 3
@pytest.mark.asyncio
async def test_locking_dogpile_task_cancellation(self, mocker, cache):
@cached_stampede()
async def cancel_task():
raise asyncio.CancelledError()
with pytest.raises(asyncio.CancelledError):
await cancel_task()
class TestMultiCachedDecorator:
@pytest.fixture(autouse=True)
def default_cache(self, mocker, cache):
mocker.patch("aiocache.decorators._get_cache", return_value=cache)
@pytest.mark.asyncio
async def test_multi_cached(self, cache):
multi_cached_decorator = multi_cached("keys")
default_keys = {pytest.KEY, pytest.KEY_1}
await multi_cached_decorator(return_dict)(keys=default_keys)
for key in default_keys:
assert await cache.get(key) is not None
@pytest.mark.asyncio
async def test_keys_without_kwarg(self, cache):
@multi_cached("keys")
async def fn(keys):
return {pytest.KEY: 1}
await fn([pytest.KEY])
assert await cache.exists(pytest.KEY) is True
@pytest.mark.asyncio
async def test_multi_cached_key_builder(self, cache):
def build_key(key, f, self, keys, market="ES"):
return "{}_{}_{}".format(f.__name__, key, market)
@multi_cached(keys_from_attr="keys", key_builder=build_key)
async def fn(self, keys, market="ES"):
return {pytest.KEY: 1, pytest.KEY_1: 2}
await fn("self", keys=[pytest.KEY, pytest.KEY_1])
assert await cache.exists("fn_" + pytest.KEY + "_ES") is True
assert await cache.exists("fn_" + pytest.KEY_1 + "_ES") is True
@pytest.mark.asyncio
async def test_fn_with_args(self, cache):
@multi_cached("keys")
async def fn(keys, *args):
assert len(args) == 1
return {pytest.KEY: 1}
await fn([pytest.KEY], "arg")
assert await cache.exists(pytest.KEY) is True
@pytest.mark.asyncio
async def test_double_decorator(self, cache):
def dummy_d(fn):
async def wrapper(*args, **kwargs):
await fn(*args, **kwargs)
return wrapper
@dummy_d
@multi_cached("keys")
async def fn(keys):
return {pytest.KEY: 1}
await fn([pytest.KEY])
assert await cache.exists(pytest.KEY) is True
|
bro-scripts/adversaries/hurricane-panda/rogue-dns/dynamic/scrape-alexa.py | kingtuna/cs-bro | 131 | 12627519 | <gh_stars>100-1000
# Rudimentary script to collect domains in the Alexa top 500
# This script can be run as often as needed to refresh the list of domains
# CrowdStrike 2015
# <EMAIL>
import requests
import bs4
# File containing Alexa top 500 domains
# This file name and path is referenced in the Bro script and can be modified
f = open('alexa_domains.txt','w')
f.write('#fields\talexa\n')
# Alexa's top 500 domains are spread across 20 pages
# To change the number of domains collected (top 50, top 250), modify the range
for num in range(0,20):
site = "http://www.alexa.com/topsites/global;" + str(num)
page = requests.get(site)
soup = bs4.BeautifulSoup(page.text)
for link in soup.find_all('a'):
if 'siteinfo' in str(link):
f.write((link.get('href')).split("/")[2] + "\n" )
|
tests/test_variable_visitor.py | thejcannon/darglint | 405 | 12627523 | import ast
from unittest import (
TestCase,
)
from darglint.analysis.variable_visitor import (
VariableVisitor,
)
from .utils import (
reindent,
)
class VariableVisitorTests(TestCase):
def assertFound(self, program, *variables):
"""Assert that the return was found.
Args:
program: The program to run the analysis on.
variables: The variables which we expect to have found
(or empty, if we expect none.)
Returns:
The visitor, in case you want to do more analysis.
"""
function = ast.parse(reindent(program)).body[0]
visitor = VariableVisitor()
visitor.visit(function)
self.assertEqual(sorted({
x.id for x in visitor.variables
}), sorted(variables))
return visitor
def test_no_variables(self):
program = '''
def f(x):
return x * 2
'''
self.assertFound(program)
def test_one_variables(self):
program = '''
def f(x):
y = x * 2
return y
'''
self.assertFound(program, 'y')
def test_many_variables(self):
program = '''
def f(x):
y = 2 * x
pi = 3.1415
something = 'cat'
return something * int(y * pi)
'''
self.assertFound(program, 'y', 'pi', 'something')
def test_no_variables_in_method(self):
program = '''
class X:
def f(self, x):
self.x = x * 2
return self.x
'''
self.assertFound(program)
def test_one_variable_in_method(self):
program = '''
class X:
def f(self, x):
y = x * 2
self.x = y
return y
'''
self.assertFound(program, 'y')
def test_many_variables_in_method(self):
program = '''
class X:
def f(self, x):
y = 2 * x
pi = 3.1415
something = 'cat'
self.msg = something * int(y * pi)
return self.msg
'''
self.assertFound(program, 'y', 'pi', 'something')
|
py4web/server_adapters.py | macneiln/py4web | 133 | 12627532 | import logging
from ombott.server_adapters import ServerAdapter
try:
from .utils.wsservers import *
except ImportError:
wsservers_list = []
__all__ = [
"geventWebSocketServer",
"wsgirefThreadingServer",
"rocketServer",
] + wsservers_list
def geventWebSocketServer():
from gevent import pywsgi
from geventwebsocket.handler import WebSocketHandler
from geventwebsocket.logging import create_logger
class GeventWebSocketServer(ServerAdapter):
def run(self, handler):
server = pywsgi.WSGIServer(
(self.host, self.port),
handler,
handler_class=WebSocketHandler,
**self.options
)
if not self.quiet:
server.logger = create_logger("geventwebsocket.logging")
server.logger.setLevel(logging.INFO)
server.logger.addHandler(logging.StreamHandler())
server.serve_forever()
return GeventWebSocketServer
def wsgirefThreadingServer():
# https://www.electricmonk.nl/log/2016/02/15/multithreaded-dev-web-server-for-the-python-bottle-web-framework/
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
from wsgiref.simple_server import make_server
from socketserver import ThreadingMixIn
import socket
from concurrent.futures import ThreadPoolExecutor # pip install futures
class WSGIRefThreadingServer(ServerAdapter):
def run(self, app):
class PoolMixIn(ThreadingMixIn):
def process_request(self, request, client_address):
self.pool.submit(
self.process_request_thread, request, client_address
)
class ThreadingWSGIServer(PoolMixIn, WSGIServer):
daemon_threads = True
pool = ThreadPoolExecutor(max_workers=40)
class Server:
def __init__(
self, server_address=("127.0.0.1", 8000), handler_cls=None
):
self.wsgi_app = None
self.listen, self.port = server_address
self.handler_cls = handler_cls
def set_app(self, app):
self.wsgi_app = app
def get_app(self):
return self.wsgi_app
def serve_forever(self):
self.server = make_server(
self.listen,
self.port,
self.wsgi_app,
ThreadingWSGIServer,
self.handler_cls,
)
self.server.serve_forever()
class FixedHandler(WSGIRequestHandler):
def address_string(self): # Prevent reverse DNS lookups please.
return self.client_address[0]
def log_request(*args, **kw):
if not self.quiet:
return WSGIRequestHandler.log_request(*args, **kw)
handler_cls = self.options.get("handler_class", FixedHandler)
server_cls = Server
if ":" in self.host: # Fix wsgiref for IPv6 addresses.
if getattr(server_cls, "address_family") == socket.AF_INET:
class server_cls(server_cls):
address_family = socket.AF_INET6
srv = make_server(self.host, self.port, app, server_cls, handler_cls)
srv.serve_forever()
return WSGIRefThreadingServer
def rocketServer():
try:
from rocket3 import Rocket3 as Rocket
except ImportError:
from .rocket3 import Rocket3 as Rocket
import logging.handlers
class RocketServer(ServerAdapter):
def run(self, app):
if not self.quiet:
log = logging.getLogger("Rocket")
log.setLevel(logging.INFO)
log.addHandler(logging.StreamHandler())
server = Rocket((self.host, self.port), "wsgi", dict(wsgi_app=app))
server.start()
return RocketServer
|
dataset/kitti.py | lebionick/stereo-transformer | 410 | 12627578 | # Authors: <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>
#
# Copyright (c) 2020. Johns Hopkins University - All rights reserved.
import os
import numpy as np
import torch.utils.data as data
from PIL import Image
from albumentations import Compose
from natsort import natsorted
from dataset.preprocess import augment, normalization
from dataset.stereo_albumentation import RGBShiftStereo, RandomBrightnessContrastStereo, random_crop
class KITTIBaseDataset(data.Dataset):
def __init__(self, datadir, split='train'):
super(KITTIBaseDataset, self).__init__()
self.datadir = datadir
self.split = split
if split == 'train' or split == 'validation' or split == 'validation_all':
self.sub_folder = 'training/'
elif split == 'test':
self.sub_folder = 'testing/'
# to be set by child classes
self.left_fold = None
self.right_fold = None
self.disp_fold = None
self._augmentation()
def _read_data(self):
assert self.left_fold is not None
self.left_data = natsorted([os.path.join(self.datadir, self.sub_folder, self.left_fold, img) for img in
os.listdir(os.path.join(self.datadir, self.sub_folder, self.left_fold)) if
img.find('_10') > -1])
self.right_data = [img.replace(self.left_fold, self.right_fold) for img in self.left_data]
self.disp_data = [img.replace(self.left_fold, self.disp_fold) for img in self.left_data]
self._split_data()
def _split_data(self):
train_val_frac = 0.95
# split data
if len(self.left_data) > 1:
if self.split == 'train':
self.left_data = self.left_data[:int(len(self.left_data) * train_val_frac)]
self.right_data = self.right_data[:int(len(self.right_data) * train_val_frac)]
self.disp_data = self.disp_data[:int(len(self.disp_data) * train_val_frac)]
elif self.split == 'validation':
self.left_data = self.left_data[int(len(self.left_data) * train_val_frac):]
self.right_data = self.right_data[int(len(self.right_data) * train_val_frac):]
self.disp_data = self.disp_data[int(len(self.disp_data) * train_val_frac):]
def _augmentation(self):
if self.split == 'train':
self.transformation = Compose([
RGBShiftStereo(always_apply=True, p_asym=0.5),
RandomBrightnessContrastStereo(always_apply=True, p_asym=0.5)
])
elif self.split == 'validation' or self.split == 'test' or self.split == 'validation_all':
self.transformation = None
else:
raise Exception("Split not recognized")
def __len__(self):
return len(self.left_data)
def __getitem__(self, idx):
input_data = {}
# left
left_fname = self.left_data[idx]
left = np.array(Image.open(left_fname)).astype(np.uint8)
input_data['left'] = left
# right
right_fname = self.right_data[idx]
right = np.array(Image.open(right_fname)).astype(np.uint8)
input_data['right'] = right
# disp
if not self.split == 'test': # no disp for test files
disp_fname = self.disp_data[idx]
disp = np.array(Image.open(disp_fname)).astype(np.float) / 256.
input_data['disp'] = disp
input_data['occ_mask'] = np.zeros_like(disp).astype(np.bool)
if self.split == 'train':
input_data = random_crop(200, 640, input_data, self.split)
input_data = augment(input_data, self.transformation)
else:
input_data = normalization(**input_data)
return input_data
class KITTI2015Dataset(KITTIBaseDataset):
def __init__(self, datadir, split='train'):
super(KITTI2015Dataset, self).__init__(datadir, split)
self.left_fold = 'image_2/'
self.right_fold = 'image_3/'
self.disp_fold = 'disp_occ_0/' # we read disp data with occlusion since we compute occ directly
self._read_data()
class KITTI2012Dataset(KITTIBaseDataset):
def __init__(self, datadir, split='train'):
super(KITTI2012Dataset, self).__init__(datadir, split)
self.left_fold = 'colored_0/'
self.right_fold = 'colored_1/'
self.disp_fold = 'disp_occ/' # we read disp data with occlusion since we compute occ directly
self._read_data()
class KITTIDataset(KITTIBaseDataset):
"""
Merged KITTI dataset with 2015 and 2012 data
"""
def __init__(self, datadir, split='train'):
super(KITTIDataset, self).__init__(datadir, split)
self.left_fold_2015 = 'image_2'
self.right_fold_2015 = 'image_3'
self.disp_fold_2015 = 'disp_occ_0' # we read disp data with occlusion since we compute occ directly
self.preprend_2015 = '2015'
self.left_fold_2012 = 'colored_0'
self.right_fold_2012 = 'colored_1'
self.disp_fold_2012 = 'disp_occ' # we we read disp data with occlusion since we compute occ directly
self.preprend_2012 = '2012'
self._read_data()
def _read_data(self):
assert self.left_fold_2015 is not None
assert self.left_fold_2012 is not None
left_data_2015 = [os.path.join(self.datadir, self.preprend_2015, self.sub_folder, self.left_fold_2015, img) for
img in os.listdir(os.path.join(self.datadir, '2015', self.sub_folder, self.left_fold_2015)) if
img.find('_10') > -1]
left_data_2015 = natsorted(left_data_2015)
right_data_2015 = [img.replace(self.left_fold_2015, self.right_fold_2015) for img in left_data_2015]
disp_data_2015 = [img.replace(self.left_fold_2015, self.disp_fold_2015) for img in left_data_2015]
left_data_2012 = [os.path.join(self.datadir, self.preprend_2012, self.sub_folder, self.left_fold_2012, img) for
img in os.listdir(os.path.join(self.datadir, '2012', self.sub_folder, self.left_fold_2012)) if
img.find('_10') > -1]
left_data_2012 = natsorted(left_data_2012)
right_data_2012 = [img.replace(self.left_fold_2012, self.right_fold_2012) for img in left_data_2012]
disp_data_2012 = [img.replace(self.left_fold_2012, self.disp_fold_2012) for img in left_data_2012]
self.left_data = natsorted(left_data_2015 + left_data_2012)
self.right_data = natsorted(right_data_2015 + right_data_2012)
self.disp_data = natsorted(disp_data_2015 + disp_data_2012)
self._split_data()
|
tests/RunTests/PythonTests/test2011_033.py | maurizioabba/rose | 488 | 12627579 | <reponame>maurizioabba/rose
# test if stmts and exps
if 1:
print "one"
if 2:
print "two"
else:
print "three"
if 0:
print "four"
else:
print "five"
if 1:
print "six"
elif 2:
print "seven"
if 0:
print "eight"
elif 2:
print "nine"
if 0+0:
print "ten"
elif 0*0:
print "eleven"
else:
print "twelve"
print 1 if 1 else 2
print 3 if 1 and 2 else 4
print 5 if 0 else 6
print 6*7 if 0 and 1 else 8*9
print 10*11 if 1 and 2 else 12*13
|
src/metrics/size.py | MohammedAljahdali/shrinkbench | 345 | 12627598 | """Model size metrics
"""
import numpy as np
from . import nonzero, dtype2bits
def model_size(model, as_bits=False):
"""Returns absolute and nonzero model size
Arguments:
model {torch.nn.Module} -- Network to compute model size over
Keyword Arguments:
as_bits {bool} -- Whether to account for the size of dtype
Returns:
int -- Total number of weight & bias params
int -- Out total_params exactly how many are nonzero
"""
total_params = 0
nonzero_params = 0
for tensor in model.parameters():
t = np.prod(tensor.shape)
nz = nonzero(tensor.detach().cpu().numpy())
if as_bits:
bits = dtype2bits[tensor.dtype]
t *= bits
nz *= bits
total_params += t
nonzero_params += nz
return int(total_params), int(nonzero_params)
|
utest/test/keywords/test_webdrivercreator_service_log_path.py | hugovk/SeleniumLibrary | 792 | 12627603 | <filename>utest/test/keywords/test_webdrivercreator_service_log_path.py
import os
from collections import namedtuple
import pytest
from mockito import mock, when, unstub, ANY
from selenium import webdriver
from SeleniumLibrary.keywords import WebDriverCreator
from SeleniumLibrary.utils import WINDOWS
@pytest.fixture(scope="module")
def creator():
curr_dir = os.path.dirname(os.path.abspath(__file__))
output_dir = os.path.abspath(os.path.join(curr_dir, "..", "..", "output_dir"))
creator = WebDriverCreator(output_dir)
Creator = namedtuple("Creator", "creator, output_dir")
return Creator(creator, output_dir)
def teardown_function():
unstub()
def test_no_log_file(creator):
assert creator.creator._get_log_path(None) is None
def test_log_file_with_rf_file_separator(creator):
log_file = "C:\\path\\to\\own_name.txt" if WINDOWS else "/path/to/own_name.txt"
file_name = creator.creator._get_log_path(log_file)
log_file = log_file.replace("/", os.sep)
assert file_name == log_file
def test_log_file_with_index(creator):
log_file = os.path.join(creator.output_dir, "firefox-{index}.log")
file_name = creator.creator._get_log_path(log_file)
assert file_name == log_file.format(index="1")
def test_log_file_with_index_exist(creator):
log_file = os.path.join(creator.output_dir, "firefox-{index}.log")
with open(
os.path.join(creator.output_dir, log_file.format(index="1")), "w"
) as file:
file.close()
file_name = creator.creator._get_log_path(log_file)
assert file_name == log_file.format(index="2")
def test_create_chrome_with_service_log_path_none(creator):
expected_webdriver = mock()
when(webdriver).Chrome(
options=None, service_log_path=None, executable_path="chromedriver"
).thenReturn(expected_webdriver)
driver = creator.creator.create_chrome({}, None, service_log_path=None)
assert driver == expected_webdriver
def test_create_chrome_with_service_log_path_real_path(creator):
log_file = os.path.join(creator.output_dir, "firefox-{index}.log")
expected_webdriver = mock()
when(webdriver).Chrome(
options=None, service_log_path=log_file, executable_path="chromedriver"
).thenReturn(expected_webdriver)
driver = creator.creator.create_chrome({}, None, service_log_path=log_file)
assert driver == expected_webdriver
def test_create_headlesschrome_with_service_log_path_real_path(creator):
log_file = os.path.join(creator.output_dir, "firefox-{index}.log")
expected_webdriver = mock()
options = mock()
when(webdriver).ChromeOptions().thenReturn(options)
when(webdriver).Chrome(
options=options, service_log_path=log_file, executable_path="chromedriver"
).thenReturn(expected_webdriver)
driver = creator.creator.create_headless_chrome({}, None, service_log_path=log_file)
assert driver == expected_webdriver
def test_create_firefox_with_service_log_path_none(creator):
log_file = os.path.join(creator.output_dir, "geckodriver-1.log")
expected_webdriver = mock()
profile = mock()
when(webdriver).FirefoxProfile().thenReturn(profile)
when(webdriver).Firefox(
options=None,
firefox_profile=profile,
executable_path="geckodriver",
service_log_path=log_file,
).thenReturn(expected_webdriver)
driver = creator.creator.create_firefox({}, None, None, service_log_path=None)
assert driver == expected_webdriver
def test_create_firefox_with_service_log_path_real_path(creator):
log_file = os.path.join(creator.output_dir, "firefox-{index}.log")
expected_webdriver = mock()
profile = mock()
when(webdriver).FirefoxProfile().thenReturn(profile)
when(webdriver).Firefox(
options=None,
firefox_profile=profile,
executable_path="geckodriver",
service_log_path=log_file,
).thenReturn(expected_webdriver)
driver = creator.creator.create_firefox(
{}, None, ff_profile_dir=None, service_log_path=log_file
)
assert driver == expected_webdriver
def test_create_headlessfirefox_with_service_log_path_real_path(creator):
log_file = os.path.join(creator.output_dir, "firefox-{index}.log")
expected_webdriver = mock()
profile = mock()
when(webdriver).FirefoxProfile().thenReturn(profile)
options = mock()
when(webdriver).FirefoxOptions().thenReturn(options)
when(webdriver).Firefox(
options=options,
firefox_profile=profile,
service_log_path=log_file,
executable_path="geckodriver",
).thenReturn(expected_webdriver)
driver = creator.creator.create_headless_firefox(
{}, None, ff_profile_dir=None, service_log_path=log_file
)
assert driver == expected_webdriver
def test_create_firefox_from_create_driver(creator):
log_file = os.path.join(creator.output_dir, "firefox-1.log")
expected_webdriver = mock()
profile = mock()
when(webdriver).FirefoxProfile().thenReturn(profile)
options = mock()
when(webdriver).FirefoxOptions().thenReturn(options)
executable_path = "geckodriver"
when(creator.creator)._get_executable_path(ANY).thenReturn(executable_path)
when(webdriver).Firefox(
options=None,
firefox_profile=profile,
service_log_path=log_file,
executable_path=executable_path,
).thenReturn(expected_webdriver)
driver = creator.creator.create_driver(
"firefox ", {}, remote_url=None, profile_dir=None, service_log_path=log_file
)
assert driver == expected_webdriver
def test_create_ie_with_service_log_path_real_path(creator):
log_file = os.path.join(creator.output_dir, "ie-1.log")
expected_webdriver = mock()
when(webdriver).Ie(
options=None, service_log_path=log_file, executable_path="IEDriverServer.exe"
).thenReturn(expected_webdriver)
driver = creator.creator.create_ie({}, None, service_log_path=log_file)
assert driver == expected_webdriver
def test_create_edge_with_service_log_path_real_path(creator):
executable_path = "MicrosoftWebDriver.exe"
log_file = os.path.join(creator.output_dir, "ie-1.log")
expected_webdriver = mock()
when(creator.creator)._has_options(ANY).thenReturn(False)
when(webdriver).Edge(
service_log_path=log_file, executable_path=executable_path
).thenReturn(expected_webdriver)
driver = creator.creator.create_edge({}, None, service_log_path=log_file)
assert driver == expected_webdriver
def test_create_opera_with_service_log_path_real_path(creator):
executable_path = "operadriver"
log_file = os.path.join(creator.output_dir, "ie-1.log")
expected_webdriver = mock()
when(webdriver).Opera(
options=None, service_log_path=log_file, executable_path=executable_path
).thenReturn(expected_webdriver)
driver = creator.creator.create_opera({}, None, service_log_path=log_file)
assert driver == expected_webdriver
def test_create_safari_no_support_for_service_log_path(creator):
log_file = os.path.join(creator.output_dir, "ie-1.log")
expected_webdriver = mock()
executable_path = "/usr/bin/safaridriver"
when(webdriver).Safari(executable_path=executable_path).thenReturn(
expected_webdriver
)
driver = creator.creator.create_safari({}, None, service_log_path=log_file)
assert driver == expected_webdriver
def test_create_phantomjs_with_service_log_path_real_path(creator):
log_file = os.path.join(creator.output_dir, "ie-1.log")
expected_webdriver = mock()
executable_path = "phantomjs"
when(webdriver).PhantomJS(
service_log_path=log_file, executable_path=executable_path
).thenReturn(expected_webdriver)
driver = creator.creator.create_phantomjs({}, None, service_log_path=log_file)
assert driver == expected_webdriver
|
notebooks-text-format/flax_intro.py | arpitvaghela/probml-notebooks | 166 | 12627610 | <reponame>arpitvaghela/probml-notebooks<gh_stars>100-1000
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/book1/mlp/flax_intro.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="rF208fIxvq8m"
# # Introduction to neural networks using Flax
#
#
#
# Flax / Linen is a neural net library, built on top of JAX, "designed to offer an implicit variable management API to save the user from having to manually thread thousands of variables through a complex tree of functions." To handle both current and future JAX transforms (configured and composed in any way), Linen Modules are defined as explicit functions of the form
# $$
# f(v_{in}, x) \rightarrow v_{out}, y
# $$
# Where $v_{in}$ is the collection of variables (eg. parameters) and PRNG state used by the model, $v_{out}$ the mutated output variable collections, $x$ the input data and $y$ the output data. We illustrate this below. Our tutorial is based on the official [flax intro](https://flax.readthedocs.io/en/latest/notebooks/flax_basics.html) and [linen colab](https://github.com/google/flax/blob/master/docs/notebooks/linen_intro.ipynb). Details are in the [flax source code](https://flax.readthedocs.io/en/latest/_modules/index.html). Note: please be sure to read our [JAX tutorial](https://github.com/probml/pyprobml/blob/master/book1/intro/jax_intro.ipynb) first.
#
# + id="uRAzAXYXvztz"
import numpy as np
#np.set_printoptions(precision=3)
np.set_printoptions(formatter={'float': lambda x: "{0:0.5f}".format(x)})
import matplotlib.pyplot as plt
# + colab={"base_uri": "https://localhost:8080/"} id="1ob8P9ALvkcM" outputId="573415d3-eba0-4d6a-e51a-674aea067217"
# Install the latest JAXlib version.
# #!pip install --upgrade -q pip jax jaxlib
# + id="68kI74E1vvEI" colab={"base_uri": "https://localhost:8080/"} outputId="4525fc58-0c43-4909-dad9-1172ef516cbf"
import jax
from jax import lax, random, numpy as jnp
key = random.PRNGKey(0)
# + id="N3dXu6XY6U0H"
from typing import Any, Callable, Dict, Iterator, Mapping, Optional, Sequence, Tuple
# Useful type aliases
Array = jnp.ndarray
PRNGKey = Array
Batch = Mapping[str, np.ndarray]
OptState = Any
# + colab={"base_uri": "https://localhost:8080/"} id="7pcNcE9_Qj_l" outputId="107ed9cb-1d4b-46ab-8032-60836e4b083e"
# Install Flax at head:
# !pip install --upgrade -q git+https://github.com/google/flax.git
# + id="s80k9sonQfDi"
import flax
from flax.core import freeze, unfreeze
from flax import linen as nn
from flax import optim
from jax.config import config
config.enable_omnistaging() # Linen requires enabling omnistaging
# + [markdown] id="aGrUFJYxjyL7"
# # MLP in vanilla JAX
#
# We construct a simple MLP with L hidden layers (relu activation), and scalar output (linear activation).
#
# Note: JAX and Flax, like NumPy, are row-based systems, meaning that vectors are represented as row vectors and not column vectors.
#
# + id="mWQGVJMP0VMB"
# We define the parameter initializers using a signature that is flax-compatible
# https://flax.readthedocs.io/en/latest/_modules/jax/_src/nn/initializers.html
def weights_init(key, shape, dtype=jnp.float32):
return random.normal(key, shape, dtype)
#return jnp.ones(shape, dtype)
def bias_init(key, shape, dtype=jnp.float32):
return jnp.zeros(shape, dtype)
def relu(a):
return jnp.maximum(a, 0)
# + id="GepkhhTh-9b-"
# A minimal MLP class
class MLP0():
features: Sequence[int] # number of features in each layer
def __init__(self, features): # class constructor
self.features = features
def init(self, key, x): # initialize parameters
in_size = np.shape(x)[1]
sizes = np.concatenate( ([in_size], self.features) )
nlayers = len(sizes)
params = {}
for i in range(nlayers-1):
in_size = sizes[i]
out_size = sizes[i+1]
subkey1, subkey2, key = random.split(key, num=3)
W = weights_init(subkey1, (in_size, out_size) )
b = bias_init(subkey2, out_size)
params[f'W{i}'] = W
params[f'b{i}'] = b
return params
def apply(self, params, x): # forwards pass
activations = x
nhidden_layers = len(self.features)-1
for i in range(nhidden_layers):
W = params[f'W{i}'];
b = params[f'b{i}'];
outputs = jnp.dot(activations, W) + b
activations = relu(outputs)
# for final layer, no activation function
i = nhidden_layers
outputs = jnp.dot(activations, params[f'W{i}']) + params[f'b{i}']
return outputs
# + colab={"base_uri": "https://localhost:8080/"} id="8jFS4SNO0V_I" outputId="72d9d449-742e-45d1-9641-a4f3a6390e26"
key = random.PRNGKey(0)
D = 3
N = 2
x = random.normal(key, (N,D,))
layer_sizes = [3,1] # 1 hidden layer of size 3, 1 scalar output
model0 = MLP0(layer_sizes)
params0 = model0.init(key, x)
print('params')
for k,v in params0.items():
print(k, v.shape)
print(v)
y0 = model0.apply(params0, x)
print('\noutput')
print(y0)
# + [markdown] id="rBtPT-drBkGA"
# # Our first flax model
#
# Here we recreate the vanilla model in flax. Since we don't specify how the parameters are initialized, the behavior will not be identical to the vanilla model --- we will fix this below, but for now, we focus on model construction.
#
# We see that the model is a subclass of `nn.Module`, which is a subclass of Python's dataclass. The child class (written by the user) must define a `model.call(inputs)` method, that applies the function to the input, and a `model.setup()` method, that creates the modules inside this model.
#
# The module (parent) class defines two main methods: `model.apply(variables, input`, that applies the function to the input (and variables) to generate an output; and `model.init(key, input)`, that initializes the variables and returns them as a "frozen dictionary". This dictionary can contain multiple *kinds* of variables. In the example below, the only kind are parameters, which are immutable variables (that will usually get updated in an external optimization loop, as we show later). The parameters are automatically named after the corresponding module (here, dense0, dense1, etc). In this example, both modules are dense layers, so their parameters are a weight matrix (called 'kernel') and a bias vector.
#
# The hyper-parameters (in this case, the size of each layer) are stored as attributes of the class, and are specified when the module is constructed.
# + id="3zueDo1r0Qav"
class MLP(nn.Module):
features: Sequence[int]
default_attr: int = 42
def setup(self):
print('setup')
self.layers = [nn.Dense(feat) for feat in self.features]
def __call__(self, inputs):
print('call')
x = inputs
for i, lyr in enumerate(self.layers):
x = lyr(x)
if i != len(self.layers) - 1:
x = nn.relu(x)
return x
# + colab={"base_uri": "https://localhost:8080/"} id="OoYDn8lX7_ZH" outputId="9aed2723-3248-417b-9a25-408ef49763d7"
key = random.PRNGKey(0)
D = 3
N = 2
x = random.normal(key, (N,D,))
layer_sizes = [3,1] # 1 hidden layer of size 3, 1 scalar output
print('calling constructor')
model = MLP(layer_sizes) # just initialize attributes of the object
print('OUTPUT')
print(model)
print('\ncalling init')
variables = model.init(key, x) # calls setup then __call___
print('OUTPUT')
print(variables)
print('Calling apply')
y = model.apply(variables, x) # calls setup then __call___
print(y)
# + [markdown] id="5lwM1j1WksDG"
# # Compact modules
#
# To reduce the amount of boiler plate code, flax makes it possible to define a module just by writing the `call` method, avoiding the need to write a `setup` function. The corresponding layers will be created when the `init` funciton is called, so the input shape can be inferred lazily (when passed an input).
# + colab={"base_uri": "https://localhost:8080/"} id="Akq_iXXdktwb" outputId="2827db63-b8a0-4500-dd78-bd6b49f72065"
class MLP(nn.Module):
features: Sequence[int]
@nn.compact
def __call__(self, inputs):
x = inputs
for i, feat in enumerate(self.features):
x = nn.Dense(feat)(x)
if i != len(self.features) - 1:
x = nn.relu(x)
return x
model = MLP(layer_sizes)
print(model)
params = model.init(key, x)
print(params)
y = model.apply(params, x)
print(y)
# + [markdown] id="dNiuZ54yB7Gj"
# # Explicit parameter initialization
#
# We can control the initialization of the random parameters in each submodule by specifying an init function. Below we show how to initialize our MLP to match the vanilla JAX model. We then check both methods give the same outputs.
# + id="5W_lEFsU4t04"
def make_const_init(x):
def init_params(key, shape, dtype=jnp.float32):
return x
return init_params
class MLP_init(nn.Module):
features: Sequence[int]
params_init: Dict
def setup(self):
nlayers = len(self.features)
layers = []
for i in range(nlayers):
W = self.params_init[f'W{i}'];
b = self.params_init[f'b{i}'];
weights_init = make_const_init(W)
bias_init = make_const_init(b)
layer = nn.Dense(self.features[i], kernel_init=weights_init, bias_init=bias_init)
layers.append(layer)
self.layers = layers
def __call__(self, inputs):
x = inputs
for i, lyr in enumerate(self.layers):
x = lyr(x)
if i != len(self.layers) - 1:
x = nn.relu(x)
return x
# + colab={"base_uri": "https://localhost:8080/"} id="R27PhzrLY_zJ" outputId="adaba9d1-90a4-444b-95be-cef4da0768fe"
params_init = params0
model = MLP_init(layer_sizes, params_init)
print(model)
variables = model.init(key, x)
params = variables['params']
print(params)
W0 = params0['W0']
W = params['layers_0']['kernel']
assert np.allclose(W, W0)
y = model.apply(variables, x)
print(y)
assert np.allclose(y, y0)
# + [markdown] id="Rf8avaA_nGJ1"
# # Creating your own modules
#
# Now we illustrate how to create a module with its own parameters, instead of relying on composing built-in primitives. As an example, we write our own dense layer class.
# + colab={"base_uri": "https://localhost:8080/"} id="WUJ98XpSnS8F" outputId="ccdc09ef-f87f-4234-d64e-0bd583406851"
class SimpleDense(nn.Module):
features: int # num output features for this layer
kernel_init: Callable = nn.initializers.lecun_normal()
bias_init: Callable = nn.initializers.zeros
@nn.compact
def __call__(self, inputs):
features_in = inputs.shape[-1] # infer shape from input
features_out = self.features
kernel = self.param('kernel', self.kernel_init, (features_in, features_out))
bias = self.param('bias', self.bias_init, (features_out,))
outputs = jnp.dot(inputs, kernel) + bias
return outputs
model = SimpleDense(features=3)
print(model)
vars = model.init(key, x)
print(vars)
y = model.apply(vars, x)
print(y)
# + [markdown] id="BJpBh933-GTW"
# # Stochastic layers
#
# Some layers may need a source of randomness. If so, we must pass them a PRNG in the `init` and `apply` functions, in addition to the PRNG used for parameter initialization. We illustrate this below using dropout. We construct two versions, one which is stochastic (for training), and one which is deterministic (for evaluation).
# + colab={"base_uri": "https://localhost:8080/"} id="tMSpLucO-Yfj" outputId="9726f1f3-ca25-4946-f9da-29692b1b034c"
class Block(nn.Module):
features: int
training: bool
@nn.compact
def __call__(self, inputs):
x = nn.Dense(self.features)(inputs)
x = nn.Dropout(rate=0.5)(x, deterministic=not self.training)
return x
N = 1; D = 2;
x = random.uniform(key, (N,D))
model = Block(features=3, training=True)
key = random.PRNGKey(0)
variables = model.init({'params': key, 'dropout': key}, x)
#variables = model.init(key, x) # cannot share the rng
print('variables', variables)
# Apply stochastic model
for i in range(2):
key, subkey = random.split(key)
y = model.apply(variables, x, rngs={'dropout': subkey})
print(f'train output {i}, ', y)
# Now make a deterministic version
eval_model = Block(features=3, training=False)
key = random.PRNGKey(0)
#variables = eval_model.init({'params': key, 'dropout': key}, x)
for i in range(2):
key, subkey = random.split(key)
y = eval_model.apply(variables, x, rngs={'dropout': subkey})
print(f'eval output {i}, ', y)
# + [markdown] id="CHieB2aAumdg"
# # Stateful layers
#
# In addition to parameters, linen modules can contain other kinds of variables, which may be mutable as we illustrate below.
# Indeed, parameters are just a special case of variable.
# In particular, this line
# ```
# p = self.param('param_name', init_fn, shape, dtype)
# ```
# is a convenient shorthand for this:
# ```
# p = self.variable('params', 'param_name', lambda s, d: init_fn(self.make_rng('params'), s, d), shape, dtype).value
# ```
#
# + [markdown] id="EAQxx2Tu8xln"
# ## Example: counter
# + colab={"base_uri": "https://localhost:8080/"} id="BeGNa8zaut41" outputId="eb8923e0-ed62-46f9-f11b-80dec053e31a"
class Counter(nn.Module):
@nn.compact
def __call__(self):
# variable(collection, name, init_fn, *init_args)
counter1 = self.variable('counter', 'count1', lambda: jnp.zeros((), jnp.int32))
counter2 = self.variable('counter', 'count2', lambda: jnp.zeros((), jnp.int32))
is_initialized = self.has_variable('counter', 'count1')
if is_initialized:
counter1.value += 1
counter2.value += 2
return counter1.value, counter2.value
model = Counter()
print(model)
init_variables = model.init(key) # calls the `call` method
print('initialized variables:\n', init_variables)
counter = init_variables['counter']['count1']
print('counter 1 value', counter)
y, mutated_variables = model.apply(init_variables, mutable=['counter'])
print('mutated variables:\n', mutated_variables)
print('output:\n', y)
# + [markdown] id="1IaC2RT1v65t"
# ## Combining mutable variables and immutable parameters
#
# We can combine mutable variables with immutable parameters.
# As an example, consider a simplified version of batch normalization, which
# computes the running mean of its inputs, and adds an optimzable offset (bias) term.
#
#
# + id="NXP19telv_Y_"
class BiasAdderWithRunningMean(nn.Module):
decay: float = 0.99
@nn.compact
def __call__(self, x):
is_initialized = self.has_variable('params', 'bias')
# variable(collection, name, init_fn, *init_args)
ra_mean = self.variable('batch_stats', 'mean', lambda s: jnp.zeros(s), x.shape[1:])
dummy_mutable = self.variable('mutables', 'dummy', lambda s: 42, 0)
# param(name, init_fn, *init_args)
bias = self.param('bias', lambda rng, shape: jnp.ones(shape), x.shape[1:])
if is_initialized:
ra_mean.value = self.decay * ra_mean.value + (1.0 - self.decay) * jnp.mean(x, axis=0, keepdims=True)
return x - ra_mean.value + bias
# + [markdown] id="x_WsMGY8xA_x"
#
# The intial variables are:
# params = (bias=1), batch_stats=(mean=0)
#
# If we pass in x=ones(N,D), the running average becomes
# $$
# 0.99*0 + (1-0.99)*1 = 0.01
# $$
# and the output becomes
# $$
# 1 - 0.01 + 1 = 1.99
# $$
#
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="dvXKCE8yxiTu" outputId="4ddb1117-32a0-481d-d8bb-876010d0e821"
key = random.PRNGKey(0)
N = 2
D = 5
x = jnp.ones((N,D))
model = BiasAdderWithRunningMean()
variables = model.init(key, x)
print('initial variables:\n', variables)
nonstats, stats = variables.pop('batch_stats')
print('nonstats', nonstats)
print('stats', stats)
# + colab={"base_uri": "https://localhost:8080/"} id="Ytr2_w9U12PT" outputId="30555a51-9b09-4ef2-8222-98c9b52e4a47"
y, mutables = model.apply(variables, x, mutable=['batch_stats'])
print('output', y)
print('mutables', mutables)
# + [markdown] id="B1g2GW3f3B-Z"
# To call the function with the updated batch stats, we have to stitch together the new mutated state with the old state, as shown below.
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="cpBb21A72Bdj" outputId="d5ce7521-90c4-48a0-a180-4f02be5fe5f8"
variables = unfreeze(nonstats)
print(variables)
variables['batch_stats'] = mutables['batch_stats']
variables = freeze(variables)
print(variables)
# + [markdown] id="sa7nH74Y5-Lg"
# If we pass in x=2*ones(N,D), the running average gets updated to
# $$
# 0.99 * 0.01 + (1-0.99) * 2.0 = 0.0299
# $$
# and the output becomes
# $$
# 2- 0.0299 + 1 = 2.9701
# $$
# + colab={"base_uri": "https://localhost:8080/"} id="t2dF2si51QN5" outputId="6177ee1c-41b7-40e6-d954-d0c1c85b0180"
x = 2*jnp.ones((N,D))
y, mutables = model.apply(variables, x, mutable=['batch_stats'])
print('output', y)
print('batch_stats', mutables)
assert np.allclose(y, 2.9701)
assert np.allclose(mutables['batch_stats']['mean'], 0.0299)
# + [markdown] id="cnBmgGxOoPKU"
# # Optimization
#
# Flax has several built-in (first-order) optimizers, as we illustrate below on a random linear function. (Note that we can also fit a model defined in flax using some other kind of optimizer, such as that provided by the [optax library](https://github.com/deepmind/optax).)
# + colab={"base_uri": "https://localhost:8080/"} id="OTHgj_pMra3H" outputId="d142c2bb-c725-47e6-8cba-5b55f9f16b48"
D = 5
key = jax.random.PRNGKey(0)
params = {'w': jax.random.normal(key, (D,))}
print(params)
x = jax.random.normal(key, (D,))
def loss(params):
w = params['w']
return jnp.dot(x, w)
loss_grad_fn = jax.value_and_grad(loss)
v, g = loss_grad_fn(params)
print(v)
print(g)
# + id="7KdmBHa8oWFY" colab={"base_uri": "https://localhost:8080/"} outputId="3695c5f5-339c-4e27-d80e-30d100ddae66"
from flax import optim
optimizer_def = optim.Momentum(learning_rate=0.1, beta=0.9)
print(optimizer_def)
optimizer = optimizer_def.create(params)
print(optimizer)
# + colab={"base_uri": "https://localhost:8080/"} id="1JpgauX_ox_w" outputId="c649c61c-93ba-41a0-a834-2c254a53a243"
for i in range(10):
params = optimizer.target
loss_val, grad = loss_grad_fn(params)
optimizer = optimizer.apply_gradient(grad)
params = optimizer.target
print('step {}, loss {:0.3f}, params {}'.format(i, loss_val, params))
# + [markdown] id="_ITTDWT2ECxC"
# # Worked example: MLP for MNIST
#
# We demonstrate how to fit a shallow MLP to MNIST using Flax.
# We use this function:
# https://github.com/probml/pyprobml/blob/master/scripts/fit_flax.py
# To allow us to edit this file locally (in colab), and push commits back to github, we sync this colab with github. (For details see [this colab](https://colab.research.google.com/github/probml/pyprobml/blob/master/book1/intro/colab_intro.ipynb), the cell labeled "Working with github".)
#
#
# + [markdown] id="vavamofruHS_"
# ## Import code
# + colab={"base_uri": "https://localhost:8080/"} id="L3KR7bMQWQ2A" outputId="6d6f555c-4da0-46dd-975c-ce7daeb24564"
# !ls
# + colab={"base_uri": "https://localhost:8080/"} id="3OhoJQUnuE1Q" outputId="d40b0ab5-8fb3-4286-a925-51e41be70d40"
from google.colab import drive
drive.mount('/content/drive')
# !ls /content/drive/MyDrive/ssh/
# + colab={"base_uri": "https://localhost:8080/"} id="ItaIH9dyocZA" outputId="aebdafd4-9c3c-4bb5-a9a7-f72dbe21f979"
# !rm -rf probml_tools*.*
# !wget https://raw.githubusercontent.com/probml/pyprobml/master/scripts/probml_tools.py
import probml_tools as pml
# + colab={"base_uri": "https://localhost:8080/"} id="rUYY3-N4uWYh" outputId="a9e455f0-0b49-4429-ad1d-689f5a6936cc"
# !rm -rf pyprobml
pml.git_ssh("git clone https://github.com/probml/pyprobml.git")
# + id="fjUVc9yKvFFP"
# %load_ext autoreload
# %autoreload 2
# + colab={"base_uri": "https://localhost:8080/", "height": 17} id="OQ7d5hfulh7r" outputId="744eedb7-72d1-4912-fe55-bd79c3fbd3fe"
from google.colab import files
files.view('/content/pyprobml/scripts/fit_flax.py')
# + colab={"base_uri": "https://localhost:8080/"} id="gKnFd4MFu_jE" outputId="0d715cf0-8b5c-495d-a1c4-4357d3fe1038"
import pyprobml.scripts.fit_flax as ff
ff.test()
# + [markdown] id="AMjR542vpGAQ"
# Edit the file, then commit changes.
# + colab={"base_uri": "https://localhost:8080/"} id="YJXwfqz0-_XJ" outputId="01fe932f-10f2-4486-c8bd-80ef64c06e68"
# If made any local changes to fit_flax.py, save them to github
# %cd /content/pyprobml
pml.git_ssh("git add scripts; git commit -m 'push from colab'; git push")
# %cd /content
# + [markdown] id="E_xSZi3v03pC"
# ## Data
# + colab={"base_uri": "https://localhost:8080/"} id="l4uNqjBIW0we" outputId="566ff9c6-ca6f-42a7-dbf4-abf2c78f6d53"
def process_record(batch):
image = batch['image']
label = batch['label']
# flatten image to vector
shape = image.get_shape().as_list()
D = np.prod(shape) # no batch dimension
image = tf.reshape(image, (D,))
# rescale to -1..+1
image = tf.cast(image, dtype=tf.float32)
image = ((image / 255.) - .5) * 2.
# convert to standard names
return {'X': image, 'y': label}
def load_mnist(split, batch_size):
dataset, info = tfds.load("mnist", split=split, with_info=True)
dataset = dataset.map(process_record)
if split=="train":
dataset = dataset.shuffle(10*batch_size, seed=0)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
dataset = dataset.cache()
dataset = dataset.repeat()
dataset = tfds.as_numpy(dataset) # leave TF behind
num_examples = info.splits[split].num_examples
return iter(dataset), num_examples
batch_size = 100
train_iter, num_train = load_mnist("train", batch_size)
test_iter, num_test = load_mnist("test", batch_size)
num_epochs = 3
num_steps = num_train // batch_size
print(f'{num_epochs} epochs with batch size {batch_size} will take {num_steps} steps')
batch = next(train_iter)
print(batch['X'].shape)
print(batch['y'].shape)
# + [markdown] id="rLiWUSjR05BQ"
# ## Model
#
#
# + id="cLwAwqd4Nzvy"
class Model(nn.Module):
nhidden: int
nclasses: int
@nn.compact
def __call__(self, x):
if self.nhidden > 0:
x = nn.Dense(self.nhidden)(x)
x = nn.relu(x)
x = nn.Dense(self.nclasses)(x) # logits
x = nn.log_softmax(x) # log probabilities
return x
# + [markdown] id="9JsVFGfU628j"
# ## Training loop
#
# + colab={"base_uri": "https://localhost:8080/", "height": 497} id="KDAJthPTvxI7" outputId="3f277974-c0db-4c0e-c39d-04648ae16f8f"
model = Model(nhidden = 128, nclasses=10)
rng = jax.random.PRNGKey(0)
num_steps = 200
params, history = ff.fit_model(
model, rng, num_steps, train_iter, test_iter, print_every=20)
display(history)
# + colab={"base_uri": "https://localhost:8080/", "height": 278} id="RWv2Sspl8EAN" outputId="88ac8b9a-9bc6-4921-8e0c-71ec0d61210d"
plt.figure()
plt.plot(history['step'], history['test_accuracy'], 'o-', label='test accuracy')
plt.xlabel('num. minibatches')
plt.legend()
plt.show()
# + id="oWe69Z51Q3Kz"
|
python/oneflow/compatible/single_client/test/ops/test_broadcast_to_compatible_with.py | wangyuyue/oneflow | 3,285 | 12627619 | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow.compatible.single_client.unittest
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as oft
def _of_broadcast_to_compatible_with(x, compatible_shape, x_shape=None):
assert isinstance(compatible_shape, (list, tuple))
if x_shape is None:
x_shape = x.shape
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def broadcast_to_compatible_with_fn(
x_def: oft.ListNumpy.Placeholder(shape=x_shape, dtype=flow.float)
):
compatible_var = [
flow.get_variable(
"compatible_var_{}".format(i),
shape=cp_shape,
dtype=flow.float,
initializer=flow.random_normal_initializer(),
trainable=False,
)
for (i, cp_shape) in enumerate(compatible_shape)
]
return flow.broadcast_to_compatible_with(x_def, compatible_var)
return broadcast_to_compatible_with_fn([x]).get().numpy_list()[0]
def _of_broadcast_to_compatible_with_dynamic(
x, a, b, x_shape=None, a_shape=None, b_shape=None
):
if x_shape is None:
x_shape = x.shape
if a_shape is None:
a_shape = a.shape
if b_shape is None:
b_shape = b.shape
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def broadcast_to_compatible_with_fn(
x_def: oft.ListNumpy.Placeholder(x_shape, dtype=flow.float),
a_def: oft.ListNumpy.Placeholder(a_shape, dtype=flow.float),
b_def: oft.ListNumpy.Placeholder(b_shape, dtype=flow.float),
):
return flow.broadcast_to_compatible_with(
x_def, [flow.identity(a_def), flow.identity(b_def)]
)
return broadcast_to_compatible_with_fn([x], [a], [b]).get().numpy_list()[0]
def _of_broadcast_to_compatible_with_grad(x, compatible_shape, dx_watcher):
assert isinstance(compatible_shape, (list, tuple))
assert callable(dx_watcher)
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(type="train", function_config=func_config)
def broadcast_to_compatible_with_fn(
x_def: oft.Numpy.Placeholder(x.shape, dtype=flow.float)
):
x_var = flow.get_variable(
"x_var",
shape=x.shape,
dtype=flow.float,
initializer=flow.constant_initializer(0),
trainable=True,
)
compatible_var = [
flow.get_variable(
"compatible_var_{}".format(i),
shape=cp_shape,
dtype=flow.float,
initializer=flow.random_normal_initializer(),
trainable=False,
)
for (i, cp_shape) in enumerate(compatible_shape)
]
x_var = x_var + x_def
y = flow.broadcast_to_compatible_with(x_var, compatible_var)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [0.001]), momentum=0
).minimize(y)
flow.watch_diff(x_var, dx_watcher)
return y
return broadcast_to_compatible_with_fn(x).get().numpy()
@flow.unittest.skip_unless_1n1d()
class TestBroadcastToCompatibleWith(flow.unittest.TestCase):
def test_broadcast_to_compatible_with(test_case):
x = np.random.standard_normal((5, 2)).astype(np.float32)
compatible_shape = [[4, 5, 2], [4, 5, 1]]
ret = _of_broadcast_to_compatible_with(x, compatible_shape)
expected_ret = np.broadcast_to(x, [4, 5, 2])
test_case.assertTrue(np.array_equal(expected_ret, ret))
def test_dynamic_broadcast_to_compatible_with(test_case):
x = np.random.standard_normal((10, 6)).astype(np.float32)
x_static_shape = (15, 6)
a = np.random.standard_normal((3, 10, 6)).astype(np.float32)
a_static_shape = (3, 15, 6)
b = np.random.standard_normal((3, 10, 1)).astype(np.float32)
b_static_shape = (3, 15, 1)
ret = _of_broadcast_to_compatible_with_dynamic(
x, a, b, x_static_shape, a_static_shape, b_static_shape
)
expected_ret = np.broadcast_to(x, [3, 10, 6])
test_case.assertTrue(np.array_equal(expected_ret, ret))
def test_dynamic_broadcast_to_compatible_with_case_2(test_case):
x = np.random.standard_normal((20, 1, 1)).astype(np.float32)
x_static_shape = (23, 1, 1)
a = np.random.standard_normal((11, 1)).astype(np.float32)
a_static_shape = (15, 1)
b = np.random.standard_normal((7,)).astype(np.float32)
b_static_shape = (8,)
ret = _of_broadcast_to_compatible_with_dynamic(
x, a, b, x_static_shape, a_static_shape, b_static_shape
)
expected_ret = np.broadcast_to(x, [20, 11, 7])
test_case.assertTrue(np.array_equal(expected_ret, ret))
def test_broadcast_to_compatible_with_grad(test_case):
x = np.random.standard_normal((7, 1, 4)).astype(np.float32)
compatible_shape = [[7, 1, 4], [5, 4]]
def compare_dy(dx_blob):
dx = np.ones([7, 5, 4], dtype=np.float32).sum(axis=1).reshape(x.shape)
test_case.assertTrue(np.array_equal(dx, dx_blob.numpy()))
ret = _of_broadcast_to_compatible_with_grad(x, compatible_shape, compare_dy)
exp_ret = np.broadcast_to(x, [7, 5, 4])
test_case.assertTrue(np.array_equal(exp_ret, ret))
def test_broadcast_to_compatible_with_grad_case_2(test_case):
x = np.random.standard_normal((7, 1, 4)).astype(np.float32)
compatible_shape = [[1, 7, 5, 4]]
def compare_dy(dx_blob):
dx = np.ones([7, 5, 4], dtype=np.float32).sum(axis=1).reshape(x.shape)
test_case.assertTrue(np.array_equal(dx, dx_blob.numpy()))
ret = _of_broadcast_to_compatible_with_grad(x, compatible_shape, compare_dy)
exp_ret = np.broadcast_to(x, [1, 7, 5, 4])
test_case.assertTrue(np.array_equal(exp_ret, ret))
def test_broadcast_to_compatible_with_no_broadcast(test_case):
x = np.random.standard_normal((9, 9, 6)).astype(np.float32)
x_static_shape = (10, 9, 6)
compatible_shape = [[6], [9, 1]]
ret = _of_broadcast_to_compatible_with(x, compatible_shape, x_static_shape)
test_case.assertTrue(np.array_equal(x, ret))
if __name__ == "__main__":
unittest.main()
|
key.py | Chasbob/TwitchPlaysX | 256 | 12627635 | <reponame>Chasbob/TwitchPlaysX
# For Windows
# http://stackoverflow.com/questions/1823762/sendkeys-for-python-3-1-on-windows
# https://stackoverflow.com/a/38888131
import win32api
import win32con
import win32gui
import time, sys
keyDelay = 0.1
# https://docs.microsoft.com/en-us/windows/win32/inputdev/virtual-key-codes
keymap = {
"Up": win32con.VK_UP,
"Left": win32con.VK_LEFT,
"Down": win32con.VK_DOWN,
"Right": win32con.VK_RIGHT,
"b": 0x42, # ord("B"),
"a": 0x41, # ord("A"),
"y": 0x59, # ord("Y"), # for DS
"x": 0x58, # ord("X"), # for DS
"s": 0x53, # ord("S"), # Start
"e": 0x45, # ord("E"), # Select
}
# this way has to keep window in focus
def sendKey(button):
win32api.keybd_event(keymap[button], 0, 0, 0)
time.sleep(keyDelay)
win32api.keybd_event(keymap[button], 0, win32con.KEYEVENTF_KEYUP, 0)
def SimpleWindowCheck(windowname):
window = None
try:
window = win32gui.FindWindow(windowName, None)
except win32gui.error:
try:
window = win32gui.FindWindow(None, windowName)
except win32gui.error:
return False
else:
return window
else:
return window
if __name__ == "__main__":
windowName = sys.argv[1]
key = sys.argv[2]
winId = SimpleWindowCheck(windowName)
# winId = None
if not (winId):
windowList = []
def enumHandler(hwnd, list):
if windowName in win32gui.GetWindowText(hwnd):
list.append(hwnd)
win32gui.EnumWindows(enumHandler, windowList)
# only the first id, may need to try the others
winId = windowList[0]
# can check with this
for hwnd in windowList:
hwndChild = win32gui.GetWindow(hwnd, win32con.GW_CHILD)
# print("window title/id/child id: ", win32gui.GetWindowText(hwnd), "/", hwnd, "/", hwndChild)
win32gui.ShowWindow(winId, win32con.SW_SHOWNORMAL)
win32gui.SetForegroundWindow(winId)
sendKey(key) |
example_problems/tutorial/bit_edit_to_zero/old/gen/valida.py | DottaPaperella/TALight | 409 | 12627653 | <reponame>DottaPaperella/TALight
#!/usr/bin/env python2
|
examples/Backup.py | compose-x/troposphere | 4,573 | 12627654 | <gh_stars>1000+
from troposphere import Template, backup
from troposphere.iam import Role
template = Template("AWS Backup")
template.set_version()
backup_vault = template.add_resource(
backup.BackupVault(
"Vault",
BackupVaultName="my-backup-vault",
BackupVaultTags=dict(
Project="Project",
Environment="Environment",
Classifier="Classifier",
),
# EncryptionKeyArn="KmsKeyId",
)
)
vault_arn = "arn:aws:backup:ca-central-1:111112222212:backup-vault:TestVault"
copy_action = backup.CopyActionResourceType(
DestinationBackupVaultArn=vault_arn,
Lifecycle=backup.LifecycleResourceType(DeleteAfterDays=31),
)
backup_plan = template.add_resource(
backup.BackupPlan(
"Backup",
BackupPlan=backup.BackupPlanResourceType(
BackupPlanName="BackupPlan",
BackupPlanRule=[
backup.BackupRuleResourceType(
TargetBackupVault=backup_vault.ref(),
Lifecycle=backup.LifecycleResourceType(DeleteAfterDays=31),
RecoveryPointTags=dict(
Project="Project",
Environment="Environment",
Classifier="Classifier",
),
RuleName="Rule 1",
ScheduleExpression="cron(0 0/12 * * ? *)",
CopyActions=[copy_action],
)
],
),
BackupPlanTags=dict(
Project="Project",
Environment="Environment",
Classifier="Classifier",
),
)
)
service_role = template.add_resource(
Role(
"BackupServiceRole",
AssumeRolePolicyDocument={
"Statement": [
{
"Effect": "Allow",
"Principal": {"Service": ["backup.amazonaws.com"]},
"Action": ["sts:AssumeRole"],
}
]
},
ManagedPolicyArns=[
(
"arn:aws:iam::aws:policy/service-role/"
"AWSBackupServiceRolePolicyForBackup"
),
(
"arn:aws:iam::aws:policy/service-role/"
"AWSBackupServiceRolePolicyForRestores"
),
],
)
)
template.add_resource(
backup.BackupSelection(
"StorageBackupSelectionByTags",
BackupSelection=backup.BackupSelectionResourceType(
IamRoleArn=service_role.get_att("Arn"),
ListOfTags=[
backup.ConditionResourceType(
ConditionKey="Backup",
ConditionType="STRINGEQUALS",
ConditionValue="True",
)
],
SelectionName="MySelection",
),
BackupPlanId=backup_plan.ref(),
)
)
print(template.to_json())
|
data_management/databases/make_detection_db_for_viewing.py | dnarqq/WildHack | 402 | 12627669 | #
# make_detection_db_for_viewing.py
#
# Given a .json file with ground truth bounding boxes, and a .p file containing detections for the same images,
# creates a new .json file with separate classes for ground truth and detection, suitable for viewing in the Visipedia
# annotation tool.
#
#%% Imports and constants
import json
import pickle
import uuid
detection_file = '/ai4efs/models/object_detection/inception_resnet_v2_atrous/train_on_eccv_18_and_imerit_2/predictions/ss_test.p'
gt_db = '/ai4efs/annotations/modified_annotations/imerit_ss_annotations_1.json'
output_file = '/ai4efs/models/object_detection/inception_resnet_v2_atrous/train_on_eccv_18_and_imerit_2/predictions/ss_test_detection_db.json'
#%% Main function
def make_detection_db(detection_file, gt_db, det_thresh=0.9):
with open(detection_file,'r') as f:
detection_results = pickle.load(f)
with open(gt_db,'r') as f:
data = json.load(f)
images = data['images']
# im_id_to_im = {im['id']:im for im in images}
for im in images:
im['id'] = im['id'].split('/')[-1]
print(images[0])
annotations = data['annotations']
for ann in annotations:
ann['image_id'] = ann['image_id'].split('/')[-1]
# make new categories to distinguish between ground truth and detections
categories = [{'name': 'gt', 'id': 0},{'name':'det','id':1}]
# update all gt annotations to be class "gt"
for ann in annotations:
ann['category_id'] = 0
# collect all detections by image
per_image_detections = {detection_results['images'][idx] :{'bboxes': detection_results['detections'][idx], 'scores': detection_results['detection_scores'][idx], 'labels':detection_results['detection_labels'][idx]} for idx in range(len(detection_results['images']))}
# keep any detection with score above det_thresh
for im, dets in per_image_detections.iteritems():
for idx in range(len(dets['bboxes'])):
if dets['scores'][idx] >= det_thresh:
new_ann = {}
new_ann['image_id'] = im.split('/')[-1]
new_ann['category_id'] = 1 #category "det" for detection
#need to convert bbox from [x1,y1,x2,y2] to [x,y,w,h]
bbox = dets['bboxes'][idx]
bbox[2] = bbox[2] - bbox[0]
bbox[3] = bbox[3] - bbox[1]
new_ann['bbox'] = bbox
new_ann['score'] = float(dets['scores'][idx])
new_ann['id'] = str(uuid.uuid1())
annotations.append(new_ann)
# add "info" and "licenses" for annotation tools to function
info = data['info']
info['description'] = 'detections above %0.2f'.format(det_thresh)
licenses = []
# create new db
new_data = {}
new_data['images'] = images
new_data['categories'] = categories
new_data['annotations'] = annotations
new_data['licenses'] = licenses
new_data['info'] = info
return new_data
#%% Command-line handling
if __name__ == '__main__':
new_data = make_detection_db(detection_file, gt_db)
with open(output_file,'w') as f:
json.dump(new_data,f)
|
loaner/web_app/backend/api/auth.py | gng-demo/travisfix | 175 | 12627687 | <filename>loaner/web_app/backend/api/auth.py
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to enforce authentication on endpoints.method.
Each user of the API methods has one or more roles, and these roles grant
specific permissions. For API methods with a permission specified, this
auth.method decorator checks that a user has at least one role
with that permission. For methods with no permission specified, it simply checks
that the user is logged in and a member of the domain. Users that are
superadmins have all permissions by default.
Usage:
-----
The following method will execute if the current user is authenticated properly.
@auth.method(
chrome_message.ChromeRequest,
chrome_message.ChromeResponse,
name='heartbeat',
path='heartbeat',
http_method='GET')
def do_something(self, request):
...
The following method will execute if the current user has a role with the
permission "view."
# configuration of an endpoints method with enforced permission.
@loaner_endpoints.authed_method(
chrome_message.ChromeRequest,
chrome_message.ChromeResponse,
name='heartbeat',
path='heartbeat',
http_method='GET',
permission='view')
def do_something(self, request):
...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from absl import logging
import endpoints
from loaner.web_app import constants
from loaner.web_app.backend.lib import user as user_lib
from loaner.web_app.backend.models import config_model
from loaner.web_app.backend.models import user_model
_FORBIDDEN_MSG = (
'Permission check failed. Please make sure you are logged in and have the '
'correct permission to perform this action.')
def method(*args, **kwargs):
"""Configures an endpoint method and enforces permissions."""
def auth_method_decorator(auth_function):
"""Decorator for auth_method."""
permission = kwargs.pop('permission', None)
auth_function = _check_auth(permission)(auth_function)
return endpoints.method(*args, **kwargs)(auth_function)
return auth_method_decorator
def _check_auth(permission):
"""Auth check for method calls."""
def auth_check_decorator(function_without_auth_check):
"""Decorator that adds an auth check to method calls."""
@functools.wraps(function_without_auth_check)
def wrapper(*args, **kwargs):
"""Wrapper for auth check decorator."""
if constants.ON_LOCAL:
logging.info(
'Application is running locally. Skipping all auth checks.')
return function_without_auth_check(*args, **kwargs)
# Get logged in user.
try:
user_email = user_lib.get_user_email()
except user_lib.UserNotFound as err:
raise endpoints.UnauthorizedException(str(err))
# Only allow domain users.
_forbid_non_domain_users(user_email)
datastore_user = user_model.User.get_user(user_email)
# If the user is not a superadmin, we need to check and see if the
# application is in maintenance mode.
if not datastore_user.superadmin:
_is_maintenance_mode()
# If there are no specified permissions, continue with the function.
if not permission:
return function_without_auth_check(*args, **kwargs)
# If there are permissions get the datastore user and compare permissions.
if datastore_user.superadmin or (
permission in datastore_user.get_permissions()):
return function_without_auth_check(*args, **kwargs)
# Logged in user does not have correct permissions.
raise endpoints.ForbiddenException(_FORBIDDEN_MSG)
return wrapper
return auth_check_decorator
def _forbid_non_domain_users(user_email):
"""Checks to make sure that the user is a domain user.
Args:
user_email: str, the user email.
Raises:
UnauthorizedException: An error will occur when user is not from app domain.
"""
if user_email.split('@')[1] not in constants.APP_DOMAINS:
raise endpoints.UnauthorizedException(
'{} is not an authorized user for one of the domains: {}'.format(
user_email, ', '.join(constants.APP_DOMAINS)))
def _is_maintenance_mode():
"""Checks to see if the application is under maintenance.
Raises:
endpoints.InternalServerErrorException: If the application is currently
under maintenance.
"""
if (constants.MAINTENANCE or not
config_model.Config.get('bootstrap_completed')):
raise endpoints.InternalServerErrorException(
'The application is currently undergoing maintenance.')
|
napari/utils/events/containers/_set.py | MaksHess/napari | 1,345 | 12627689 | <filename>napari/utils/events/containers/_set.py
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Iterable, Iterator, MutableSet, TypeVar
from ....utils.events import EmitterGroup
from ....utils.translations import trans
_T = TypeVar("_T")
if TYPE_CHECKING:
from pydantic.fields import ModelField
class EventedSet(MutableSet[_T]):
"""An unordered collection of unique elements.
Parameters
----------
data : iterable, optional
Elements to initialize the set with.
Events
------
changed (added: Set[_T], removed: Set[_T])
Emitted when the set changes, includes item(s) that have been added
and/or removed from the set.
"""
events: EmitterGroup
def __init__(self, data: Iterable[_T] = ()):
_events = {'changed': None}
# For inheritance: If the mro already provides an EmitterGroup, add...
if hasattr(self, 'events') and isinstance(self.events, EmitterGroup):
self.events.add(**_events)
else:
# otherwise create a new one
self.events = EmitterGroup(source=self, **_events)
self._set: set[_T] = set()
self.update(data)
# #### START Required Abstract Methods
def __contains__(self, x: Any) -> bool:
return x in self._set
def __iter__(self) -> Iterator[_T]:
return iter(self._set)
def __len__(self) -> int:
return len(self._set)
def _pre_add_hook(self, value):
# for subclasses to potentially check value before adding
return value
def _emit_change(self, added=set(), removed=set()):
# provides a hook for subclasses to update internal state before emit
self.events.changed(added=added, removed=removed)
def add(self, value: _T) -> None:
"""Add an element to the set, if not already present."""
if value not in self:
value = self._pre_add_hook(value)
self._set.add(value)
self._emit_change(added={value}, removed={})
def discard(self, value: _T) -> None:
"""Remove an element from a set if it is a member.
If the element is not a member, do nothing.
"""
if value in self:
self._set.discard(value)
self._emit_change(added={}, removed={value})
# #### END Required Abstract Methods
# methods inherited from Set:
# __le__, __lt__, __eq__, __ne__, __gt__, __ge__, __and__, __or__,
# __sub__, __xor__, and isdisjoint
# methods inherited from MutableSet:
# clear, pop, remove, __ior__, __iand__, __ixor__, and __isub__
# The rest are for parity with builtins.set:
def clear(self) -> None:
if self._set:
values = set(self)
self._set.clear()
self._emit_change(added={}, removed=values)
def __repr__(self) -> str:
return f"{type(self).__name__}({repr(self._set)})"
def update(self, others: Iterable[_T] = ()) -> None:
"""Update this set with the union of this set and others"""
to_add = set(others).difference(self._set)
if to_add:
to_add = {self._pre_add_hook(i) for i in to_add}
self._set.update(to_add)
self._emit_change(added=set(to_add), removed={})
def copy(self) -> EventedSet[_T]:
"""Return a shallow copy of this set."""
return type(self)(self._set)
def difference(self, others: Iterable[_T] = ()) -> EventedSet[_T]:
"""Return set of all elements that are in this set but not other."""
return type(self)(self._set.difference(others))
def difference_update(self, others: Iterable[_T] = ()) -> None:
"""Remove all elements of another set from this set."""
to_remove = self._set.intersection(others)
if to_remove:
self._set.difference_update(to_remove)
self._emit_change(added={}, removed=set(to_remove))
def intersection(self, others: Iterable[_T] = ()) -> EventedSet[_T]:
"""Return all elements that are in both sets as a new set."""
return type(self)(self._set.intersection(others))
def intersection_update(self, others: Iterable[_T] = ()) -> None:
"""Remove all elements of in this set that are not present in other."""
self.difference_update(self._set.symmetric_difference(others))
def issubset(self, others: Iterable[_T]) -> bool:
"""Returns whether another set contains this set or not"""
return self._set.issubset(others)
def issuperset(self, others: Iterable[_T]) -> bool:
"""Returns whether this set contains another set or not"""
return self._set.issuperset(others)
def symmetric_difference(self, others: Iterable[_T]) -> EventedSet[_T]:
"""Returns set of elements that are in exactly one of the sets"""
return type(self)(self._set.symmetric_difference(others))
def symmetric_difference_update(self, others: Iterable[_T]) -> None:
"""Update set to the symmetric difference of itself and another.
This will remove any items in this set that are also in `other`, and
add any items in others that are not present in this set.
"""
to_add = set(others).difference(self._set)
to_remove = self._set.intersection(others)
self._set.difference_update(to_remove)
self._set.update(to_add)
self._emit_change(added=to_add, removed=to_remove)
def union(self, others: Iterable[_T] = ()) -> EventedSet[_T]:
"""Return a set containing the union of sets"""
return type(self)(self._set.union(others))
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, v, field: ModelField):
"""Pydantic validator."""
from pydantic.utils import sequence_like
if not sequence_like(v):
raise TypeError(
trans._(
'Value is not a valid sequence: {value}',
deferred=True,
value=v,
)
)
if not field.sub_fields:
return cls(v)
type_field = field.sub_fields[0]
errors = []
for i, v_ in enumerate(v):
_valid_value, error = type_field.validate(v_, {}, loc=f'[{i}]')
if error:
errors.append(error)
if errors:
from pydantic import ValidationError
raise ValidationError(errors, cls) # type: ignore
return cls(v)
def _json_encode(self):
"""Return an object that can be used by json.dumps."""
return list(self)
|
neptune/new/internal/utils/runningmode.py | Raalsky/neptune-client | 254 | 12627696 | #
# Copyright (c) 2021, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
def in_interactive():
"""Based on: https://stackoverflow.com/a/2356427/1565454"""
return hasattr(sys, "ps1")
def in_notebook():
"""Based on: https://stackoverflow.com/a/22424821/1565454"""
try:
from IPython import get_ipython
ipy = get_ipython()
if (
ipy is None
or not hasattr(ipy, "config")
or not isinstance(ipy.config, dict)
or "IPKernelApp" not in ipy.config
):
return False
except ImportError:
return False
return True
|
homeassistant/components/netgear/button.py | MrDelik/core | 30,023 | 12627713 | """Support for Netgear Button."""
from collections.abc import Callable, Coroutine
from dataclasses import dataclass
from typing import Any
from homeassistant.components.button import (
ButtonDeviceClass,
ButtonEntity,
ButtonEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import DOMAIN, KEY_COORDINATOR, KEY_ROUTER
from .router import NetgearRouter, NetgearRouterEntity
@dataclass
class NetgearButtonEntityDescriptionRequired:
"""Required attributes of NetgearButtonEntityDescription."""
action: Callable[[NetgearRouter], Callable[[], Coroutine[Any, Any, None]]]
@dataclass
class NetgearButtonEntityDescription(
ButtonEntityDescription, NetgearButtonEntityDescriptionRequired
):
"""Class describing Netgear button entities."""
BUTTONS = [
NetgearButtonEntityDescription(
key="reboot",
name="Reboot",
device_class=ButtonDeviceClass.RESTART,
entity_category=EntityCategory.CONFIG,
action=lambda router: router.async_reboot,
)
]
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up button for Netgear component."""
router = hass.data[DOMAIN][entry.entry_id][KEY_ROUTER]
coordinator = hass.data[DOMAIN][entry.entry_id][KEY_COORDINATOR]
async_add_entities(
NetgearRouterButtonEntity(coordinator, router, entity_description)
for entity_description in BUTTONS
)
class NetgearRouterButtonEntity(NetgearRouterEntity, ButtonEntity):
"""Netgear Router button entity."""
entity_description: NetgearButtonEntityDescription
def __init__(
self,
coordinator: DataUpdateCoordinator,
router: NetgearRouter,
entity_description: NetgearButtonEntityDescription,
) -> None:
"""Initialize a Netgear device."""
super().__init__(coordinator, router)
self.entity_description = entity_description
self._name = f"{router.device_name} {entity_description.name}"
self._unique_id = f"{router.serial_number}-{entity_description.key}"
async def async_press(self) -> None:
"""Triggers the button press service."""
async_action = self.entity_description.action(self._router)
await async_action()
@callback
def async_update_device(self) -> None:
"""Update the Netgear device."""
|
notebooks-text-format/flow_2d_mlp.py | arpitvaghela/probml-notebooks | 166 | 12627722 | <reponame>arpitvaghela/probml-notebooks
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/probml/probml-notebooks/blob/main/flow_2d_mlp.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="UNsR5KGRU3HI"
# # Mapping a 2d standard Gaussian to a more complex distribution using an invertible MLP
#
# Author: <NAME>
#
# Based on the example by <NAME> from
# https://blog.evjang.com/2018/01/nf1.html
#
# Reproduces Figure 23.1 of the book *Probabilistic Machine Learning: Advanced Topics* by <NAME>
# + [markdown] id="ygG6LSeF4m2t"
# ## Imports and definitions
# + id="aGna32BcyeTI"
from typing import Sequence
import distrax
import haiku as hk
import jax
import jax.numpy as jnp
import matplotlib.pyplot as plt
import optax
Array = jnp.ndarray
PRNGKey = Array
prng = hk.PRNGSequence(42)
# + [markdown] id="EdCdDC-A4qdn"
# ## Create flow model
# + id="JibqwcduyKTU"
class Parameter(hk.Module):
"""Helper Haiku module for defining model parameters."""
def __init__(self,
module_name: str,
param_name: str,
shape: Sequence[int],
init: hk.initializers.Initializer):
"""Initializer.
Args:
module_name: name of the module.
param_name: name of the parameter.
shape: shape of the parameter.
init: initializer of the parameter value.
"""
super().__init__(name=module_name)
self._param = hk.get_parameter(param_name, shape=shape, init=init)
def __call__(self) -> Array:
return self._param
class LeakyRelu(distrax.Lambda):
"""Leaky ReLU elementwise bijector."""
def __init__(self, slope: Array):
"""Initializer.
Args:
slope: the slope for x < 0. Must be positive.
"""
forward = lambda x: jnp.where(x >= 0., x, x * slope)
inverse = lambda y: jnp.where(y >= 0., y, y / slope)
forward_log_det_jacobian = lambda x: jnp.where(x >= 0., 0., jnp.log(slope))
inverse_log_det_jacobian = lambda y: jnp.where(y >= 0., 0., -jnp.log(slope))
super().__init__(
forward=forward,
inverse=inverse,
forward_log_det_jacobian=forward_log_det_jacobian,
inverse_log_det_jacobian=inverse_log_det_jacobian,
event_ndims_in=0)
def make_model() -> distrax.Transformed:
"""Creates the flow model."""
num_layers = 6
layers = []
for _ in range(num_layers - 1):
# Each intermediate layer is an affine transformation followed by a leaky
# ReLU nonlinearity.
matrix = Parameter(
'affine',
'matrix',
shape=[2, 2],
init=hk.initializers.Identity())()
bias = Parameter(
'affine',
'bias',
shape=[2],
init=hk.initializers.TruncatedNormal(2.))()
affine = distrax.UnconstrainedAffine(matrix, bias)
slope = Parameter('nonlinearity', 'slope', shape=[2], init=jnp.ones)()
nonlinearity = distrax.Block(LeakyRelu(slope), 1)
layers.append(distrax.Chain([nonlinearity, affine]))
# The final layer is just an affine transformation.
matrix = Parameter(
'affine',
'matrix',
shape=[2, 2],
init=hk.initializers.Identity())()
bias = Parameter(
'affine',
'bias',
shape=[2],
init=jnp.zeros)()
affine = distrax.UnconstrainedAffine(matrix, bias)
layers.append(affine)
flow = distrax.Chain(layers[::-1])
base = distrax.MultivariateNormalDiag(
loc=jnp.zeros(2),
scale_diag=jnp.ones(2))
return distrax.Transformed(base, flow)
@hk.without_apply_rng
@hk.transform
def model_log_prob(x: Array) -> Array:
model = make_model()
return model.log_prob(x)
@hk.without_apply_rng
@hk.transform
def model_sample(key: PRNGKey, num_samples: int) -> Array:
model = make_model()
return model.sample(seed=key, sample_shape=[num_samples])
# + [markdown] id="uq8dBvLz6aVK"
# ## Define target distribution
# + colab={"height": 281} id="V9SGQ83H1DO4" outputId="4f4f7100-5d2a-44e2-9b51-86f6e2e9f517"
def target_sample(key: PRNGKey, num_samples: int) -> Array:
"""Generates samples from target distribution.
Args:
key: a PRNG key.
num_samples: number of samples to generate.
Returns:
An array of shape [num_samples, 2] containing the samples.
"""
key1, key2 = jax.random.split(key)
x = 0.6 * jax.random.normal(key1, [num_samples])
y = 0.8 * x ** 2 + 0.2 * jax.random.normal(key2, [num_samples])
return jnp.concatenate([y[:, None], x[:, None]], axis=-1)
# Plot samples from target distribution.
data = target_sample(next(prng), num_samples=1000)
plt.plot(data[:, 0], data[:, 1], '.', color='red', label='Target')
plt.axis('equal')
plt.title('Samples from target distribution')
plt.legend();
# + [markdown] id="zPFHR0Sd8joE"
# ## Train model
# + colab={"height": 281} id="gsnjWDi90tw1" outputId="0791fd77-a7e5-4d28-a272-6e8a2267bf4d"
# Initialize model parameters.
params = model_sample.init(next(prng), next(prng), num_samples=1)
# Plot samples from the untrained model.
x = target_sample(next(prng), num_samples=1000)
y = model_sample.apply(params, next(prng), num_samples=1000)
plt.plot(x[:, 0], x[:, 1], '.', color='red', label='Target')
plt.plot(y[:, 0], y[:, 1], '.', color='green', label='Model')
plt.axis('equal')
plt.title('Samples from untrained model')
plt.legend();
# + id="ZRQaTdDN1F7K" outputId="f551b6c7-698d-457d-89f6-440b535d5a82"
# Loss function is negative log likelihood.
loss_fn = jax.jit(lambda params, x: -jnp.mean(model_log_prob.apply(params, x)))
# Optimizer.
optimizer = optax.adam(1e-3)
opt_state = optimizer.init(params)
# Training loop.
for i in range(5000):
data = target_sample(next(prng), num_samples=100)
loss, g = jax.value_and_grad(loss_fn)(params, data)
updates, opt_state = optimizer.update(g, opt_state)
params = optax.apply_updates(params, updates)
if i % 100 == 0:
print(f'Step {i}, loss = {loss:.3f}')
# + colab={"height": 281} id="VMuj1oH11MOu" outputId="0aeb750e-e4d3-453a-ca75-9f572c383a5e"
# Plot samples from the trained model.
x = target_sample(next(prng), num_samples=1000)
y = model_sample.apply(params, next(prng), num_samples=1000)
plt.plot(x[:, 0], x[:, 1], '.', color='red', label='Target')
plt.plot(y[:, 0], y[:, 1], '.', color='green', label='Model')
plt.axis('equal')
plt.title('Samples from trained model')
plt.legend();
# + [markdown] id="XAlCxXqq_cqj"
# ## Create plot with intermediate distributions
# + id="_8kGzlUO1Oli"
@hk.without_apply_rng
@hk.transform
def model_sample_intermediate(key: PRNGKey, num_samples: int) -> Array:
model = make_model()
samples = []
x = model.distribution.sample(seed=key, sample_shape=[num_samples])
samples.append(x)
for layer in model.bijector.bijectors[::-1]:
x = layer.forward(x)
samples.append(x)
return samples
xs = model_sample_intermediate.apply(params, next(prng), num_samples=2000)
# + colab={"height": 237} id="NbjnETx-1Q67" outputId="89171275-4371-40a8-875c-96fec3119f59"
plt.rcParams['figure.figsize'] = [2 * len(xs), 3]
fig, axs = plt.subplots(1, len(xs))
fig.tight_layout()
color = xs[0][:, 1]
cm = plt.cm.get_cmap('gnuplot2')
for i, (x, ax) in enumerate(zip(xs, axs)):
ax.scatter(x[:, 0], x[:, 1], s=10, cmap=cm, c=color)
ax.axis('equal')
if i == 0:
title = 'Base distribution'
else:
title = f'Layer {i}'
ax.set_title(title)
|
wrapcache/adapter/MemcachedAdapter.py | Max1993Liu/wrapcache | 117 | 12627742 | #-*-coding: utf-8 -*-
'''
Memcached Adapter object.
'''
from wrapcache.adapter.BaseAdapter import BaseAdapter
from wrapcache.adapter.CacheException import CacheExpiredException, DBNotSetException
class MemcachedAdapter(BaseAdapter):
'''
use for memcached cache
'''
def __init__(self, timeout = -1):
super(MemcachedAdapter, self).__init__(timeout = timeout)
if not MemcachedAdapter.db:
MemcachedAdapter.db = None
def _check_db_instanse(self):
if MemcachedAdapter.db == None:
raise DBNotSetException('memcached instanse not set, use MemcachedAdapter.db = memcache_instance before use.')
def get(self, key):
self._check_db_instanse()
value = MemcachedAdapter.db.get(key)
if value == None:
raise CacheExpiredException(key)
return value
def set(self, key, value):
MemcachedAdapter.db.set(key, value, time = self.timeout)
return True
def remove(self, key):
try:
v = self.get(key)
MemcachedAdapter.db.delete(key)
return v
except CacheExpiredException, _:
return False
def flush(self):
self._check_db_instanse()
MemcachedAdapter.db.flush_all()
return True
if __name__ == '__main__':
import unittest, memcache, time
class TestCase(unittest.TestCase):
def setUp(self):
#init redis instance
self.test_class = MemcachedAdapter(timeout = 3)
def tearDown(self):
pass
def test_memory_adapter(self):
# test redis error
self.assertRaises(DBNotSetException, self.test_class.get, 'test_key')
memcache_inst = memcache.Client(['10.246.14.164:11211'])
MemcachedAdapter.db = memcache_inst #初始化装饰器缓存
self.assertRaises(CacheExpiredException, self.test_class.get, 'test_key') #链接不上
memcache_inst = memcache.Client(['10.246.14.165:11211'])
MemcachedAdapter.db = memcache_inst #初始化装饰器缓存
key = 'test_key_1'
value = str(time.time())
#test set / get
self.test_class.set(key, value)
self.assertEqual(self.test_class.get(key).decode('utf-8'), value)
#test remove
self.test_class.set(key, value)
self.test_class.remove(key)
self.assertRaises(CacheExpiredException, self.test_class.get, key)
#test flush
self.test_class.set(key, value)
self.test_class.flush()
self.assertRaises(CacheExpiredException, self.test_class.get, key)
unittest.main() |
plugins/modules/oci_resource_manager_job.py | slmjy/oci-ansible-collection | 108 | 12627746 | <reponame>slmjy/oci-ansible-collection<filename>plugins/modules/oci_resource_manager_job.py
#!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_resource_manager_job
short_description: Manage a Job resource in Oracle Cloud Infrastructure
description:
- This module allows the user to create, update and delete a Job resource in Oracle Cloud Infrastructure
- For I(state=present), creates a job.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
stack_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the stack that is associated with the current job.
- Required for create using I(state=present).
type: str
display_name:
description:
- Description of the job.
- Required for create, update, delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
- This parameter is updatable when C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["name"]
operation:
description:
- Terraform-specific operation to execute.
type: str
job_operation_details:
description:
- ""
type: dict
suboptions:
operation:
description:
- Terraform-specific operation to execute.
type: str
choices:
- "IMPORT_TF_STATE"
- "APPLY"
- "PLAN"
- "DESTROY"
required: true
tf_state_base64_encoded:
description:
- Base64-encoded state file
- Required when operation is 'IMPORT_TF_STATE'
type: str
terraform_advanced_options:
description:
- ""
- Applicable when operation is one of ['DESTROY', 'APPLY', 'PLAN']
type: dict
suboptions:
is_refresh_required:
description:
- "Specifies whether to refresh the state for each resource before running the job (operation).
Refreshing the state can affect performance. Consider setting to `false` if the configuration includes several resources.
Used with the following operations: `PLAN`, `APPLY`, `DESTROY`."
- Applicable when operation is 'APPLY'
type: bool
parallelism:
description:
- "Limits the number of concurrent Terraform operations when L(walking the
graph,https://www.terraform.io/docs/internals/graph.html#walking-the-graph).
Use this parameter to help debug Terraform issues or to accomplish certain special use cases.
A higher value might cause resources to be throttled.
Used with the following operations: `PLAN`, `APPLY`, `DESTROY`."
- Applicable when operation is 'APPLY'
type: int
detailed_log_level:
description:
- "Enables detailed logs at the specified verbosity for running the job (operation).
Used with the following operations: `PLAN`, `APPLY`, `DESTROY`."
- Applicable when operation is 'APPLY'
type: str
choices:
- "ERROR"
- "WARN"
- "INFO"
- "DEBUG"
- "TRACE"
execution_plan_strategy:
description:
- Specifies the source of the execution plan to apply.
Use `AUTO_APPROVED` to run the job without an execution plan.
- Applicable when operation is 'APPLY'
- Required when operation is 'DESTROY'
type: str
execution_plan_job_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of a plan job, for use when specifying
`FROM_PLAN_JOB_ID` as the `executionPlanStrategy`.
- Applicable when operation is 'APPLY'
type: str
apply_job_plan_resolution:
description:
- ""
type: dict
suboptions:
plan_job_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) that specifies the most recently executed plan
job.
type: str
is_use_latest_job_id:
description:
- Specifies whether to use the L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the most recently run
plan job.
`True` if using the latest job L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm). Must be a plan job
that completed successfully.
type: bool
is_auto_approved:
description:
- Specifies whether to use the configuration directly, without reference to a Plan job.
`True` if using the configuration directly. Note that it is not necessary
for a Plan job to have run successfully.
type: bool
freeform_tags:
description:
- "Free-form tags associated with this resource. Each tag is a key-value pair with no predefined name, type, or namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
Example: `{\\"Department\\": \\"Finance\\"}`"
- This parameter is updatable.
type: dict
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
- This parameter is updatable.
type: dict
job_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the job.
- Required for update using I(state=present) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
- Required for delete using I(state=absent) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["id"]
is_forced:
description:
- Indicates whether a forced cancellation is requested for the job while it was running.
A forced cancellation can result in an incorrect state file.
For example, the state file might not reflect the exact state of the provisioned resources.
type: bool
state:
description:
- The state of the Job.
- Use I(state=present) to create or update a Job.
- Use I(state=absent) to delete a Job.
type: str
required: false
default: 'present'
choices: ["present", "absent"]
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_creatable_resource, oracle.oci.oracle_wait_options ]
"""
EXAMPLES = """
- name: Create job
oci_resource_manager_job:
# required
stack_id: "ocid1.stack.oc1..xxxxxxEXAMPLExxxxxx"
# optional
display_name: display_name_example
operation: operation_example
job_operation_details:
# required
operation: IMPORT_TF_STATE
tf_state_base64_encoded: tf_state_base64_encoded_example
apply_job_plan_resolution:
# optional
plan_job_id: "ocid1.planjob.oc1..xxxxxxEXAMPLExxxxxx"
is_use_latest_job_id: true
is_auto_approved: true
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
- name: Update job
oci_resource_manager_job:
# required
job_id: "ocid1.job.oc1..xxxxxxEXAMPLExxxxxx"
# optional
display_name: display_name_example
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
- name: Update job using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_resource_manager_job:
# required
display_name: display_name_example
# optional
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
- name: Delete job
oci_resource_manager_job:
# required
job_id: "ocid1.job.oc1..xxxxxxEXAMPLExxxxxx"
state: absent
# optional
is_forced: true
- name: Delete job using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_resource_manager_job:
# required
display_name: display_name_example
state: absent
"""
RETURN = """
job:
description:
- Details of the Job resource acted upon by the current operation
returned: on success
type: complex
contains:
id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the job.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
stack_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the stack that is associated with the job.
returned: on success
type: str
sample: "ocid1.stack.oc1..xxxxxxEXAMPLExxxxxx"
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment in which the job's associated stack
resides.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
display_name:
description:
- The job's display name.
returned: on success
type: str
sample: display_name_example
operation:
description:
- The type of job executing.
returned: on success
type: str
sample: PLAN
job_operation_details:
description:
- ""
returned: on success
type: complex
contains:
operation:
description:
- Terraform-specific operation to execute.
returned: on success
type: str
sample: APPLY
terraform_advanced_options:
description:
- ""
returned: on success
type: complex
contains:
is_refresh_required:
description:
- "Specifies whether to refresh the state for each resource before running the job (operation).
Refreshing the state can affect performance. Consider setting to `false` if the configuration includes several resources.
Used with the following operations: `PLAN`, `APPLY`, `DESTROY`."
returned: on success
type: bool
sample: true
parallelism:
description:
- "Limits the number of concurrent Terraform operations when L(walking the
graph,https://www.terraform.io/docs/internals/graph.html#walking-the-graph).
Use this parameter to help debug Terraform issues or to accomplish certain special use cases.
A higher value might cause resources to be throttled.
Used with the following operations: `PLAN`, `APPLY`, `DESTROY`."
returned: on success
type: int
sample: 56
detailed_log_level:
description:
- "Enables detailed logs at the specified verbosity for running the job (operation).
Used with the following operations: `PLAN`, `APPLY`, `DESTROY`."
returned: on success
type: str
sample: ERROR
execution_plan_strategy:
description:
- Specifies the source of the execution plan to apply.
Use `AUTO_APPROVED` to run the job without an execution plan.
returned: on success
type: str
sample: FROM_PLAN_JOB_ID
execution_plan_job_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the plan job that contains the execution
plan used for this job,
or `null` if no execution plan was used.
returned: on success
type: str
sample: "ocid1.executionplanjob.oc1..xxxxxxEXAMPLExxxxxx"
apply_job_plan_resolution:
description:
- ""
returned: on success
type: complex
contains:
plan_job_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) that specifies the most recently executed plan
job.
returned: on success
type: str
sample: "ocid1.planjob.oc1..xxxxxxEXAMPLExxxxxx"
is_use_latest_job_id:
description:
- Specifies whether to use the L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the most recently
run plan job.
`True` if using the latest job L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm). Must be a plan job
that completed successfully.
returned: on success
type: bool
sample: true
is_auto_approved:
description:
- Specifies whether to use the configuration directly, without reference to a Plan job.
`True` if using the configuration directly. Note that it is not necessary
for a Plan job to have run successfully.
returned: on success
type: bool
sample: true
resolved_plan_job_id:
description:
- Deprecated. Use the property `executionPlanJobId` in `jobOperationDetails` instead.
The plan job L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) that was used (if this was an apply job and
was not auto-approved).
returned: on success
type: str
sample: "ocid1.resolvedplanjob.oc1..xxxxxxEXAMPLExxxxxx"
time_created:
description:
- "The date and time when the job was created.
Format is defined by RFC3339.
Example: `2020-01-25T21:10:29.600Z`"
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
time_finished:
description:
- "The date and time when the job stopped running, irrespective of whether the job ran successfully.
Format is defined by RFC3339.
Example: `2020-01-25T21:10:29.600Z`"
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
lifecycle_state:
description:
- Current state of the specified job.
For more information about job lifecycle states in Resource Manager, see
L(Key Concepts,https://docs.cloud.oracle.com/iaas/Content/ResourceManager/Concepts/resourcemanager.htm#concepts__JobStates).
returned: on success
type: str
sample: ACCEPTED
failure_details:
description:
- ""
returned: on success
type: complex
contains:
code:
description:
- Job failure reason.
returned: on success
type: str
sample: INTERNAL_SERVICE_ERROR
message:
description:
- A human-readable error string.
returned: on success
type: str
sample: message_example
cancellation_details:
description:
- ""
returned: on success
type: complex
contains:
is_forced:
description:
- Indicates whether a forced cancellation was requested for the job while it was running.
A forced cancellation can result in an incorrect state file.
For example, the state file might not reflect the exact state of the provisioned resources.
returned: on success
type: bool
sample: true
working_directory:
description:
- File path to the directory from which Terraform runs.
If not specified, the root directory is used.
This parameter is ignored for the `configSourceType` value of `COMPARTMENT_CONFIG_SOURCE`.
returned: on success
type: str
sample: working_directory_example
variables:
description:
- "Terraform variables associated with this resource.
Maximum number of variables supported is 250.
The maximum size of each variable, including both name and value, is 8192 bytes.
Example: `{\\"CompartmentId\\": \\"compartment-id-value\\"}`"
returned: on success
type: dict
sample: {}
config_source:
description:
- ""
returned: on success
type: complex
contains:
config_source_record_type:
description:
- The type of configuration source to use for the Terraform configuration.
returned: on success
type: str
sample: ZIP_UPLOAD
configuration_source_provider_id:
description:
- Unique identifier (L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm))
for the Git configuration source.
returned: on success
type: str
sample: "ocid1.configurationsourceprovider.oc1..xxxxxxEXAMPLExxxxxx"
repository_url:
description:
- The URL of the Git repository.
returned: on success
type: str
sample: repository_url_example
branch_name:
description:
- The name of the branch within the Git repository.
returned: on success
type: str
sample: branch_name_example
commit_id:
description:
- The unique identifier (SHA-1 hash) of the individual change to the Git repository.
returned: on success
type: str
sample: "ocid1.commit.oc1..xxxxxxEXAMPLExxxxxx"
region:
description:
- "The name of the bucket's region.
Example: `PHX`"
returned: on success
type: str
sample: us-phoenix-1
namespace:
description:
- The Object Storage namespace that contains the bucket.
returned: on success
type: str
sample: namespace_example
bucket_name:
description:
- The name of the bucket that contains the Terraform configuration files.
returned: on success
type: str
sample: bucket_name_example
freeform_tags:
description:
- "Free-form tags associated with this resource. Each tag is a key-value pair with no predefined name, type, or namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
Example: `{\\"Department\\": \\"Finance\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
sample: {
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"stack_id": "ocid1.stack.oc1..xxxxxxEXAMPLExxxxxx",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"display_name": "display_name_example",
"operation": "PLAN",
"job_operation_details": {
"operation": "APPLY",
"terraform_advanced_options": {
"is_refresh_required": true,
"parallelism": 56,
"detailed_log_level": "ERROR"
},
"execution_plan_strategy": "FROM_PLAN_JOB_ID",
"execution_plan_job_id": "ocid1.executionplanjob.oc1..xxxxxxEXAMPLExxxxxx"
},
"apply_job_plan_resolution": {
"plan_job_id": "ocid1.planjob.oc1..xxxxxxEXAMPLExxxxxx",
"is_use_latest_job_id": true,
"is_auto_approved": true
},
"resolved_plan_job_id": "ocid1.resolvedplanjob.oc1..xxxxxxEXAMPLExxxxxx",
"time_created": "2013-10-20T19:20:30+01:00",
"time_finished": "2013-10-20T19:20:30+01:00",
"lifecycle_state": "ACCEPTED",
"failure_details": {
"code": "INTERNAL_SERVICE_ERROR",
"message": "message_example"
},
"cancellation_details": {
"is_forced": true
},
"working_directory": "working_directory_example",
"variables": {},
"config_source": {
"config_source_record_type": "ZIP_UPLOAD",
"configuration_source_provider_id": "ocid1.configurationsourceprovider.oc1..xxxxxxEXAMPLExxxxxx",
"repository_url": "repository_url_example",
"branch_name": "branch_name_example",
"commit_id": "ocid1.commit.oc1..xxxxxxEXAMPLExxxxxx",
"region": "us-phoenix-1",
"namespace": "namespace_example",
"bucket_name": "bucket_name_example"
},
"freeform_tags": {'Department': 'Finance'},
"defined_tags": {'Operations': {'CostCenter': 'US'}}
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceHelperBase,
get_custom_class,
)
try:
from oci.resource_manager import ResourceManagerClient
from oci.resource_manager.models import CreateJobDetails
from oci.resource_manager.models import UpdateJobDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class JobHelperGen(OCIResourceHelperBase):
"""Supported operations: create, update, get, list and delete"""
def get_module_resource_id_param(self):
return "job_id"
def get_module_resource_id(self):
return self.module.params.get("job_id")
def get_get_fn(self):
return self.client.get_job
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_job, job_id=self.module.params.get("job_id"),
)
def get_required_kwargs_for_list(self):
return dict()
def get_optional_kwargs_for_list(self):
optional_list_method_params = ["stack_id", "display_name"]
return dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
and (
self._use_name_as_identifier()
or (
not self.module.params.get("key_by")
or param in self.module.params.get("key_by")
)
)
)
def list_resources(self):
required_kwargs = self.get_required_kwargs_for_list()
optional_kwargs = self.get_optional_kwargs_for_list()
kwargs = oci_common_utils.merge_dicts(required_kwargs, optional_kwargs)
return oci_common_utils.list_all_resources(self.client.list_jobs, **kwargs)
def get_create_model_class(self):
return CreateJobDetails
def create_resource(self):
create_details = self.get_create_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.create_job,
call_fn_args=(),
call_fn_kwargs=dict(create_job_details=create_details,),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.CREATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.CREATE_OPERATION_KEY,
),
)
def get_update_model_class(self):
return UpdateJobDetails
def update_resource(self):
update_details = self.get_update_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.update_job,
call_fn_args=(),
call_fn_kwargs=dict(
job_id=self.module.params.get("job_id"),
update_job_details=update_details,
),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.UPDATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.UPDATE_OPERATION_KEY,
),
)
def delete_resource(self):
return oci_wait_utils.call_and_wait(
call_fn=self.client.cancel_job,
call_fn_args=(),
call_fn_kwargs=dict(
job_id=self.module.params.get("job_id"),
is_forced=self.module.params.get("is_forced"),
),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.DELETE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.DELETE_OPERATION_KEY,
),
)
JobHelperCustom = get_custom_class("JobHelperCustom")
class ResourceHelper(JobHelperCustom, JobHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=True, supports_wait=True
)
module_args.update(
dict(
stack_id=dict(type="str"),
display_name=dict(aliases=["name"], type="str"),
operation=dict(type="str"),
job_operation_details=dict(
type="dict",
options=dict(
operation=dict(
type="str",
required=True,
choices=["IMPORT_TF_STATE", "APPLY", "PLAN", "DESTROY"],
),
tf_state_base64_encoded=dict(type="str"),
terraform_advanced_options=dict(
type="dict",
options=dict(
is_refresh_required=dict(type="bool"),
parallelism=dict(type="int"),
detailed_log_level=dict(
type="str",
choices=["ERROR", "WARN", "INFO", "DEBUG", "TRACE"],
),
),
),
execution_plan_strategy=dict(type="str"),
execution_plan_job_id=dict(type="str"),
),
),
apply_job_plan_resolution=dict(
type="dict",
options=dict(
plan_job_id=dict(type="str"),
is_use_latest_job_id=dict(type="bool"),
is_auto_approved=dict(type="bool"),
),
),
freeform_tags=dict(type="dict"),
defined_tags=dict(type="dict"),
job_id=dict(aliases=["id"], type="str"),
is_forced=dict(type="bool"),
state=dict(type="str", default="present", choices=["present", "absent"]),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="job",
service_client_class=ResourceManagerClient,
namespace="resource_manager",
)
result = dict(changed=False)
if resource_helper.is_delete_using_name():
result = resource_helper.delete_using_name()
elif resource_helper.is_delete():
result = resource_helper.delete()
elif resource_helper.is_update_using_name():
result = resource_helper.update_using_name()
elif resource_helper.is_update():
result = resource_helper.update()
elif resource_helper.is_create():
result = resource_helper.create()
module.exit_json(**result)
if __name__ == "__main__":
main()
|
python/jittor/test/test_conv_tuner.py | Exusial/jittor | 2,571 | 12627774 | <reponame>Exusial/jittor<filename>python/jittor/test/test_conv_tuner.py<gh_stars>1000+
# ***************************************************************
# Copyright (c) 2021 Jittor. All Rights Reserved.
# Maintainers:
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>.
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import unittest
import jittor as jt
import os
import numpy as np
from jittor import compile_extern
# TODO: compare with pytorch
from jittor.test.test_log import find_log_with_re
if jt.has_cuda:
from jittor.compile_extern import cublas_ops, cudnn_ops
else:
cublas_ops = cudnn_ops = None
def conv_nchw(x, in_planes, out_planes, kernel_size, padding, stride = 1, dilation=1, init_method=None, w_ = None):
Kw = kernel_size
Kh = kernel_size
_C = in_planes
Kc = out_planes
N,C,H,W = x.shape
assert C==_C
if w_ is None:
assert 0
else:
w = w_
oh = (H-Kh*dilation+dilation-1+padding*2)//stride+1
ow = (W-Kw*dilation+dilation-1+padding*2)//stride+1
xx = x.reindex([N,Kc,C,oh,ow,Kh,Kw], [
'i0', # Nid
'i2', # Cid
f'i3*{stride}-{padding}+i5*{dilation}', # Hid+Khid
f'i4*{stride}-{padding}+i6*{dilation}', # Wid+KWid
])
ww = w.broadcast(xx.shape, [0,3,4])
yy = xx*ww
y = yy.sum([2,5,6]) # C, Kh, Kw
return y
def conv_nhwc(x, in_planes, out_planes, kernel_size, padding, stride = 1, dilation=1, init_method=None, w_ = None):
Kw = kernel_size
Kh = kernel_size
_C = in_planes
Kc = out_planes
N,H,W,C = x.shape
assert C==_C
if w_ is None:
assert 0
else:
w = w_
oh = (H-Kh*dilation+dilation-1+padding*2)//stride+1
ow = (W-Kw*dilation+dilation-1+padding*2)//stride+1
xx = x.reindex([N,Kc,C,oh,ow,Kh,Kw], [
'i0', # Nid
f'i3*{stride}-{padding}+i5*{dilation}', # Hid+Khid
f'i4*{stride}-{padding}+i6*{dilation}', # Wid+KWid
'i2', # Cid
])
ww = w.broadcast(xx.shape, [0,3,4])
yy = xx*ww
y = yy.sum([2,5,6]) # C, Kh, Kw
return y
def test_nhwc(x, w, stride, padding, dilation):
out_planes, in_planes, kernel_size, _ = w.shape
return conv_nhwc(x, in_planes, out_planes, kernel_size, padding, stride=stride, dilation=dilation, w_=w)
def test_nchw(x, w, stride, padding, dilation):
out_planes, in_planes, kernel_size, _ = w.shape
return conv_nchw(x, in_planes, out_planes, kernel_size, padding, stride=stride, dilation=dilation, w_=w)
def check_forward(xshape, wshape, stride, padding, dilation, use_cuda, nhwc):
if nhwc:
test_func = test_nhwc
else:
test_func = test_nchw
if use_cuda == 1:
op_name = "cudnn_conv"
else:
op_name = "mkl_conv"
with jt.log_capture_scope(use_cuda=use_cuda, enable_tuner=1,
log_v=0, log_vprefix="op.cc=100,conv_tuner=1000", compile_options={"test":266}
) as raw_log:
x = jt.random(xshape)
w = jt.random(wshape)
y = test_func(x, w, stride, padding, dilation)
y.sync()
with jt.flag_scope(use_cuda=0, enable_tuner=0,
compile_options={"test":255}):
cy = test_func(x, w, stride, padding, dilation)
cy.sync()
logs = find_log_with_re(raw_log, "(Jit op key (not )?found: " + op_name + ".*)")
assert len(logs)==1 and "oihw" in logs[0][0], logs
assert np.allclose(y.data, cy.data)
def check_backward(xshape, wshape, stride, padding, dilation, use_cuda, nhwc):
if nhwc:
test_func = test_nhwc
else:
test_func = test_nchw
if use_cuda == 1:
op_name = "cudnn_conv"
else:
op_name = "mkl_conv"
with jt.log_capture_scope(use_cuda=use_cuda, enable_tuner=1,
log_v=1, log_vprefix="op.cc=1000,exe=1000,conv_t=1000", compile_options={"test":244}
) as raw_log:
x = jt.random(xshape)
w = jt.random(wshape)
y = test_func(x, w, stride, padding, dilation)
loss = y.mean()
dx, dw = jt.grad(loss, [x, w])
jt.sync([y, loss, dx, dw])
with jt.flag_scope(use_cuda=0, enable_tuner=0, compile_options={"test":233}):
cy = test_func(x, w, stride, padding, dilation)
closs = cy.mean()
cdx, cdw = jt.grad(closs, [x, w])
jt.sync([cy, closs, cdx, cdw])
logs = find_log_with_re(raw_log, "(Jit op key (not )?found: " + op_name + ".*)")
assert len(logs)==3 and "oihw" in logs[0][0], (logs)
assert np.allclose(y.data, cy.data, 1e-3)
assert np.allclose(dw.data, cdw.data, 1e-3), (dw.data, cdw.data)
assert np.allclose(dx.data, cdx.data, 1e-3), (dx.data, cdx.data, np.abs(cdx.data).max(), np.abs(dx.data - cdx.data).max())
class TestConvTuner(unittest.TestCase):
def test_forward(self):
for dilation in [1,2,3]:
check_forward([10,100,100,3], [5,3,3,3], 2, 0, dilation, 0, True)
check_forward([10,40,50,4], [5,4,5,5], 1, 1, dilation, 0, True)
check_forward([10,40,50,4], [5,4,4,4], 3, 1, dilation, 0, True)
check_forward([10,3,100,100], [5,3,3,3], 2, 0, dilation, 0, False)
check_forward([10,4,40,50], [5,4,5,5], 1, 1, dilation, 0, False)
check_forward([10,4,40,50], [5,4,4,4], 3, 1, dilation, 0, False)
def test_backward(self):
for dilation in [1,2,3]:
check_backward([10,3,100,100], [5,3,3,3], 2, 0, dilation, 0, False)
check_backward([10,4,40,50], [5,4,5,5], 1, 1, dilation, 0, False)
check_backward([10,4,40,50], [5,4,4,4], 3, 1, dilation, 0, False)
@unittest.skipIf(not jt.compiler.has_cuda, "No CUDA found")
def test_forward_cuda(self):
for dilation in [1,2,3]:
check_forward([10,100,100,3], [5,3,3,3], 2, 0, dilation, 1, True)
check_forward([10,40,50,4], [5,4,5,5], 1, 1, dilation, 1, True)
check_forward([10,40,50,4], [5,4,4,4], 3, 1, dilation, 1, True)
check_forward([10,3,100,100], [5,3,3,3], 2, 0, dilation, 1, False)
check_forward([10,4,40,50], [5,4,5,5], 1, 1, dilation, 1, False)
check_forward([10,4,40,50], [5,4,4,4], 3, 1, dilation, 1, False)
@unittest.skipIf(not jt.compiler.has_cuda, "No CUDA found")
def test_backward_cuda(self):
for dilation in [1,2,3]:
check_backward([10,3,100,100], [5,3,3,3], 2, 0, dilation, 1, False)
check_backward([10,4,40,50], [5,4,5,5], 1, 1, dilation, 1, False)
check_backward([10,4,40,50], [5,4,4,4], 3, 1, dilation, 1, False)
if __name__ == "__main__":
unittest.main()
|
myia/operations/macro_resolve.py | strint/myia | 222 | 12627781 | """Implementation of the 'resolve' operation."""
from ..lib import Constant, MyiaNameError, MyiaTypeError, Namespace, macro
@macro
async def resolve(info, r_data, r_item):
"""Perform static name resolution on a Namespace."""
data_v, item_v = await info.build_all(r_data, r_item)
if not isinstance(data_v, Namespace): # pragma: no cover
raise MyiaTypeError(
f"data argument to resolve must be Namespace," f" not {data_v}"
)
if not isinstance(item_v, str): # pragma: no cover
raise MyiaTypeError(
f"item argument to resolve must be a string," f" not {item_v}."
)
try:
resolved = data_v[item_v]
except NameError:
raise MyiaNameError(f"Cannot resolve name '{item_v}'")
return Constant(resolved)
__operation_defaults__ = {
"name": "resolve",
"registered_name": "resolve",
"mapping": resolve,
"python_implementation": None,
}
|
lingvo/core/gshard_layers_test.py | allenwang28/lingvo | 2,611 | 12627787 | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Test code for gshard_layers."""
from lingvo import compat as tf
from lingvo.core import gshard_builder
from lingvo.core import gshard_layers
from lingvo.core import test_utils
import numpy as np
FLAGS = tf.flags.FLAGS
class CausalDepthwiseConv1DLayerTest(test_utils.TestCase):
def _GetRefParams(self, kernel_size, dim):
builder = gshard_builder.MoEBuilder.Params().Set(
model_dim=dim).Instantiate()
return builder.DepthwiseConvAutoregressive('conv', kernel_size)
def _GetParams(self, kernel_size, dim):
p = gshard_layers.CausalDepthwiseConv1DLayer.Params().Set(
name='conv',
kernel_size=kernel_size,
model_dims=dim,
compatible_with_mtf_ckpt=True)
return p
def _GetInputs(self, batch, seqlen, dim):
np.random.seed(None)
return tf.convert_to_tensor(
np.random.rand(batch, seqlen, dim).astype(np.float32))
def testEqualToDepthwiseConvAutoregressive(self):
b, seqlen, d, k = 2, 8, 4, 3
with tf.variable_scope('ref'):
ref_l = self._GetRefParams(k, d).Instantiate()
with tf.variable_scope('act'):
exp_l = self._GetParams(k, d).Instantiate()
inputs = self._GetInputs(b, seqlen, d)
# [b, t, d]
ref_out = ref_l.FProp(ref_l.theta, inputs)
# [b, t, d]
act_out = exp_l.FProp(exp_l.theta, inputs)
init_op = tf.global_variables_initializer()
with self.session(use_gpu=False) as sess:
sess.run(init_op)
expected, actual = sess.run([ref_out, act_out])
self.assertAllClose(expected, actual)
class Conv1DStateLayerTest(test_utils.TestCase):
def _GetParams(self, kernel_size, dim):
p = gshard_layers.CausalDepthwiseConv1DLayer.Params().Set(
name='conv', kernel_size=kernel_size, model_dims=dim)
p.state_layer = gshard_layers.Conv1DStateLayer.Params().Set(
shape=[None, None, dim])
return p
def _GetInputs(self, batch, seqlen, dim):
np.random.seed(None)
np_inputs = np.random.rand(batch, seqlen, dim).astype(np.float32)
tf.logging.info(f'np_inputs: {np_inputs}')
return tf.convert_to_tensor(np_inputs)
def testSingleStep(self):
b, seqlen, dim, k, beam = 2, 8, 2, 3, 1
inputs = self._GetInputs(b, seqlen * beam, dim)
l = self._GetParams(k, dim).Instantiate()
# Normal Fprop with a len=seqlen sequence.
outputs = l.FProp(l.theta, inputs)
state0 = gshard_layers.StateLayer.InitState(l, [b, beam, k])
tf.logging.info(f'state0: {repr(state0)}')
all_outputs = []
state_t = state0
theta_t = l.theta.DeepCopy()
for i in range(seqlen):
inputs_t = inputs[:, i:i + 1 * beam, :]
# Copies state to theta.
theta_t = gshard_layers.StateLayer.UpdateTheta(l, theta_t, state_t, t=i)
tf.logging.info(f'theta_{i}: {repr(theta_t)}')
# Updates theta inplace.
out_t = l.FProp(theta_t, inputs_t)
# Copies theta to state.
state_t = gshard_layers.StateLayer.UpdateState(l, theta_t, state_t)
tf.logging.info(f'state_{i}: {repr(state_t)}')
all_outputs.append(out_t)
# seqlen steps of FProp(), each with len=1.
concat_step_outputs = tf.concat(all_outputs, axis=1)
init_op = tf.global_variables_initializer()
with self.session(use_gpu=False) as sess:
sess.run(init_op)
expected, actual = sess.run([outputs, concat_step_outputs])
print(f'expected: {expected}')
print(f'actual: {actual}')
self.assertAllClose(expected, actual)
if __name__ == '__main__':
tf.test.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.