max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
third_party/google-endpoints/endpoints/test/test_util.py | tingshao/catapult | 2,151 | 12612410 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test utilities for API modules.
Classes:
ModuleInterfaceTest: Test framework for developing public modules.
"""
# pylint: disable=g-bad-name
import __future__
import json
import os
import types
def AssertDictEqual(expected, actual, testcase):
"""Utility method to dump diffs if the dictionaries aren't equal.
Args:
expected: dict, the expected results.
actual: dict, the actual results.
testcase: unittest.TestCase, the test case this assertion is used within.
"""
if expected != actual:
testcase.assertMultiLineEqual(
json.dumps(expected, indent=2, sort_keys=True),
json.dumps(actual, indent=2, sort_keys=True))
class ModuleInterfaceTest(object):
r"""Test to ensure module interface is carefully constructed.
A module interface is the set of public objects listed in the module __all__
attribute. Modules that will be used by the public should have this interface
carefully declared. At all times, the __all__ attribute should have objects
intended to be used by the public and other objects in the module should be
considered unused.
Protected attributes (those beginning with '_') and other imported modules
should not be part of this set of variables. An exception is for variables
that begin and end with '__' which are implicitly part of the interface
(eg. __name__, __file__, __all__ itself, etc.).
Modules that are imported in to the tested modules are an exception and may
be left out of the __all__ definition. The test is done by checking the value
of what would otherwise be a public name and not allowing it to be exported
if it is an instance of a module. Modules that are explicitly exported are
for the time being not permitted.
To use this test class a module should define a new class that inherits first
from ModuleInterfaceTest and then from unittest.TestCase. No other tests
should be added to this test case, making the order of inheritance less
important, but if setUp for some reason is overidden, it is important that
ModuleInterfaceTest is first in the list so that its setUp method is
invoked.
Multiple inheretance is required so that ModuleInterfaceTest is not itself
a test, and is not itself executed as one.
The test class is expected to have the following class attributes defined:
MODULE: A reference to the module that is being validated for interface
correctness.
Example:
Module definition (hello.py):
import sys
__all__ = ['hello']
def _get_outputter():
return sys.stdout
def hello():
_get_outputter().write('Hello\n')
Test definition:
import test_util
import unittest
import hello
class ModuleInterfaceTest(module_testutil.ModuleInterfaceTest,
unittest.TestCase):
MODULE = hello
class HelloTest(unittest.TestCase):
... Test 'hello' module ...
def main(unused_argv):
unittest.main()
if __name__ == '__main__':
app.run()
"""
def setUp(self):
"""Set up makes sure that MODULE and IMPORTED_MODULES is defined.
This is a basic configuration test for the test itself so does not
get it's own test case.
"""
if not hasattr(self, 'MODULE'):
self.fail(
"You must define 'MODULE' on ModuleInterfaceTest sub-class %s." %
type(self).__name__)
def testAllExist(self):
"""Test that all attributes defined in __all__ exist."""
missing_attributes = []
for attribute in self.MODULE.__all__:
if not hasattr(self.MODULE, attribute):
missing_attributes.append(attribute)
if missing_attributes:
self.fail('%s of __all__ are not defined in module.' %
missing_attributes)
def testAllExported(self):
"""Test that all public attributes not imported are in __all__."""
missing_attributes = []
for attribute in dir(self.MODULE):
if not attribute.startswith('_'):
if attribute not in self.MODULE.__all__:
attribute_value = getattr(self.MODULE, attribute)
if isinstance(attribute_value, types.ModuleType):
continue
# pylint: disable=protected-access
if isinstance(attribute_value, __future__._Feature):
continue
missing_attributes.append(attribute)
if missing_attributes:
self.fail('%s are not modules and not defined in __all__.' %
missing_attributes)
def testNoExportedProtectedVariables(self):
"""Test that there are no protected variables listed in __all__."""
protected_variables = []
for attribute in self.MODULE.__all__:
if attribute.startswith('_'):
protected_variables.append(attribute)
if protected_variables:
self.fail('%s are protected variables and may not be exported.' %
protected_variables)
def testNoExportedModules(self):
"""Test that no modules exist in __all__."""
exported_modules = []
for attribute in self.MODULE.__all__:
try:
value = getattr(self.MODULE, attribute)
except AttributeError:
# This is a different error case tested for in testAllExist.
pass
else:
if isinstance(value, types.ModuleType):
exported_modules.append(attribute)
if exported_modules:
self.fail('%s are modules and may not be exported.' % exported_modules)
class DevServerTest(object):
@staticmethod
def setUpDevServerEnv(server_software_key='SERVER_SOFTWARE',
server_software_value='Development/2.0.0'):
original_env_value = os.environ.get(server_software_key)
os.environ[server_software_key] = server_software_value
return server_software_key, original_env_value
@staticmethod
def restoreEnv(server_software_key, server_software_value):
if server_software_value is None:
os.environ.pop(server_software_key, None)
else:
os.environ[server_software_key] = server_software_value
|
spikeinterface/core/baserecording.py | JuliaSprenger/spikeinterface | 116 | 12612414 | from typing import List, Union
from pathlib import Path
import warnings
import numpy as np
from probeinterface import Probe, ProbeGroup, write_probeinterface, read_probeinterface
from .base import BaseExtractor, BaseSegment
from .core_tools import write_binary_recording, write_memory_recording
from warnings import warn
class BaseRecording(BaseExtractor):
"""
Abstract class representing several a multichannel timeseries (or block of raw ephys traces).
Internally handle list of RecordingSegment
"""
_main_annotations = ['is_filtered']
_main_properties = ['group', 'location', 'gain_to_uV', 'offset_to_uV']
_main_features = [] # recording do not handle features
def __init__(self, sampling_frequency: float, channel_ids: List, dtype):
BaseExtractor.__init__(self, channel_ids)
self.is_dumpable = True
self._sampling_frequency = sampling_frequency
self._dtype = np.dtype(dtype)
self._recording_segments: List[BaseRecordingSegment] = []
# initialize main annotation and properties
self.annotate(is_filtered=False)
def __repr__(self):
clsname = self.__class__.__name__
nseg = self.get_num_segments()
nchan = self.get_num_channels()
sf_khz = self.get_sampling_frequency() / 1000.
duration = self.get_total_duration()
txt = f'{clsname}: {nchan} channels - {nseg} segments - {sf_khz:0.1f}kHz - {duration:0.3f}s'
if 'file_paths' in self._kwargs:
txt += '\n file_paths: {}'.format(self._kwargs['file_paths'])
if 'file_path' in self._kwargs:
txt += '\n file_path: {}'.format(self._kwargs['file_path'])
return txt
def get_num_segments(self):
return len(self._recording_segments)
def add_recording_segment(self, recording_segment):
# todo: check channel count and sampling frequency
self._recording_segments.append(recording_segment)
recording_segment.set_parent_extractor(self)
def get_sampling_frequency(self):
return self._sampling_frequency
@property
def channel_ids(self):
return self._main_ids
def get_channel_ids(self):
return self._main_ids
def get_num_channels(self):
return len(self.get_channel_ids())
def get_dtype(self):
return self._dtype
def get_num_samples(self, segment_index=None):
segment_index = self._check_segment_index(segment_index)
return self._recording_segments[segment_index].get_num_samples()
get_num_frames = get_num_samples
def get_total_samples(self):
s = 0
for segment_index in range(self.get_num_segments()):
s += self.get_num_samples(segment_index)
return s
def get_total_duration(self):
duration = self.get_total_samples() / self.get_sampling_frequency()
return duration
def get_traces(self,
segment_index: Union[int, None] = None,
start_frame: Union[int, None] = None,
end_frame: Union[int, None] = None,
channel_ids: Union[List, None] = None,
order: Union[str, None] = None,
return_scaled=False,
):
segment_index = self._check_segment_index(segment_index)
channel_indices = self.ids_to_indices(channel_ids, prefer_slice=True)
rs = self._recording_segments[segment_index]
traces = rs.get_traces(start_frame=start_frame, end_frame=end_frame, channel_indices=channel_indices)
if order is not None:
assert order in ["C", "F"]
traces = np.asanyarray(traces, order=order)
if return_scaled:
if not self.has_scaled_traces():
raise ValueError('This recording do not support return_scaled=True (need gain_to_uV and offset_'
'to_uV properties)')
else:
gains = self.get_property('gain_to_uV')
offsets = self.get_property('offset_to_uV')
gains = gains[channel_indices].astype('float32')
offsets = offsets[channel_indices].astype('float32')
traces = traces.astype('float32') * gains + offsets
return traces
def has_scaled_traces(self):
if self.get_property('gain_to_uV') is None or self.get_property('offset_to_uV') is None:
return False
else:
return True
def is_filtered(self):
# the is_filtered is handle with annotation
return self._annotations.get('is_filtered', False)
def get_times(self, segment_index=None):
"""
Get time vector for a recording segment.
If the segment has a time_vector, then it is returned. Otherwise
a time_vector is constructed on the fly with sampling frequency.
If t_start is defined and the time vector is constructed on the fly,
the first time will be t_start. Otherwise it will start from 0.
"""
segment_index = self._check_segment_index(segment_index)
rs = self._recording_segments[segment_index]
times = rs.get_times()
return times
def has_time_vector(self, segment_index=None):
"""
Check if the segment of the recording has a time vector.
"""
segment_index = self._check_segment_index(segment_index)
rs = self._recording_segments[segment_index]
d = rs.get_times_kwargs()
return d['time_vector'] is not None
def set_times(self, times, segment_index=None, with_warning=True):
"""
Set times for a recording segment.
"""
segment_index = self._check_segment_index(segment_index)
rs = self._recording_segments[segment_index]
assert times.ndim == 1, 'Time must have ndim=1'
assert rs.get_num_samples() == times.shape[0], 'times have wrong shape'
rs.t_start = None
rs.time_vector = times.astype('float64')
if with_warning:
warnings.warn('Setting times with Recording.set_times() is not recommended because '
'times are not always propagated to across preprocessing'
'Use use this carefully!')
_job_keys = ['n_jobs', 'total_memory', 'chunk_size', 'chunk_memory', 'progress_bar', 'verbose']
def _save(self, format='binary', **save_kwargs):
"""
This function replaces the old CacheRecordingExtractor, but enables more engines
for caching a results. At the moment only 'binary' with memmap is supported.
We plan to add other engines, such as zarr and NWB.
"""
# handle t_starts
t_starts = []
has_time_vectors = []
for segment_index, rs in enumerate(self._recording_segments):
d = rs.get_times_kwargs()
t_starts.append(d['t_start'])
has_time_vectors.append(d['time_vector'] is not None)
if all(t_start is None for t_start in t_starts):
t_starts = None
if format == 'binary':
# TODO save properties as npz!!!!!
folder = save_kwargs['folder']
file_paths = [folder / f'traces_cached_seg{i}.raw' for i in range(self.get_num_segments())]
dtype = save_kwargs.get('dtype', None)
if dtype is None:
dtype = self.get_dtype()
job_kwargs = {k: save_kwargs[k] for k in self._job_keys if k in save_kwargs}
write_binary_recording(self, file_paths=file_paths, dtype=dtype, **job_kwargs)
from .binaryrecordingextractor import BinaryRecordingExtractor
cached = BinaryRecordingExtractor(file_paths=file_paths, sampling_frequency=self.get_sampling_frequency(),
num_chan=self.get_num_channels(), dtype=dtype,
t_starts=t_starts, channel_ids=self.get_channel_ids(), time_axis=0,
file_offset=0, gain_to_uV=self.get_channel_gains(),
offset_to_uV=self.get_channel_offsets())
elif format == 'memory':
job_kwargs = {k: save_kwargs[k] for k in self._job_keys if k in save_kwargs}
traces_list = write_memory_recording(self, dtype=None, **job_kwargs)
from .numpyextractors import NumpyRecording
cached = NumpyRecording(traces_list, self.get_sampling_frequency(), t_starts=t_starts,
channel_ids=self.channel_ids)
elif format == 'zarr':
# TODO implement a format based on zarr
raise NotImplementedError
elif format == 'nwb':
# TODO implement a format based on zarr
raise NotImplementedError
else:
raise ValueError(f'format {format} not supported')
if self.get_property('contact_vector') is not None:
probegroup = self.get_probegroup()
cached.set_probegroup(probegroup)
for segment_index, rs in enumerate(self._recording_segments):
d = rs.get_times_kwargs()
time_vector = d['time_vector']
if time_vector is not None:
cached._recording_segments[segment_index].time_vector = time_vector
return cached
def _extra_metadata_from_folder(self, folder):
# load probe
folder = Path(folder)
if (folder / 'probe.json').is_file():
probegroup = read_probeinterface(folder / 'probe.json')
self.set_probegroup(probegroup, in_place=True)
# load time vector if any
for segment_index, rs in enumerate(self._recording_segments):
time_file = folder / f'times_cached_seg{segment_index}.npy'
if time_file.is_file():
time_vector = np.load(time_file)
rs.time_vector = time_vector
def _extra_metadata_to_folder(self, folder):
# save probe
if self.get_property('contact_vector') is not None:
probegroup = self.get_probegroup()
write_probeinterface(folder / 'probe.json', probegroup)
# save time vector if any
for segment_index, rs in enumerate(self._recording_segments):
d = rs.get_times_kwargs()
time_vector = d['time_vector']
if time_vector is not None:
np.save(folder / f'times_cached_seg{segment_index}.npy', time_vector)
def set_probe(self, probe, group_mode='by_probe', in_place=False):
"""
Wrapper on top on set_probes when there one unique probe.
"""
assert isinstance(probe, Probe), 'must give Probe'
probegroup = ProbeGroup()
probegroup.add_probe(probe)
return self.set_probes(probegroup, group_mode=group_mode, in_place=in_place)
def set_probegroup(self, probegroup, group_mode='by_probe', in_place=False):
return self.set_probes(probegroup, group_mode=group_mode, in_place=in_place)
def set_probes(self, probe_or_probegroup, group_mode='by_probe', in_place=False):
"""
Attach a Probe to a recording.
For this Probe.device_channel_indices is used to link contacts to recording channels.
If some contacts of the Probe are not connected (device_channel_indices=-1)
then the recording is "sliced" and only connected channel are kept.
The probe order is not kept. Channel ids are re-ordered to match the channel_ids of the recording.
Parameters
----------
probe_or_probegroup: Probe, list of Probe, or ProbeGroup
The probe(s) to be attached to the recording
group_mode: str
'by_probe' or 'by_shank'. Adds grouping property to the recording based on the probes ('by_probe')
or shanks ('by_shanks')
in_place: bool
False by default.
Useful internally when extractor do self.set_probegroup(probe)
Returns
-------
sub_recording: BaseRecording
A view of the recording (ChannelSliceRecording or clone or itself)
"""
from spikeinterface import ChannelSliceRecording
assert group_mode in ('by_probe', 'by_shank'), "'group_mode' can be 'by_probe' or 'by_shank'"
# handle several input possibilities
if isinstance(probe_or_probegroup, Probe):
probegroup = ProbeGroup()
probegroup.add_probe(probe_or_probegroup)
elif isinstance(probe_or_probegroup, ProbeGroup):
probegroup = probe_or_probegroup
elif isinstance(probe_or_probegroup, list):
assert all([isinstance(e, Probe) for e in probe_or_probegroup])
probegroup = ProbeGroup()
for probe in probe_or_probegroup:
probegroup.add_probe(probe)
else:
raise ValueError('must give Probe or ProbeGroup or list of Probe')
# handle not connected channels
assert all(probe.device_channel_indices is not None for probe in probegroup.probes), \
'Probe must have device_channel_indices'
# this is a vector with complex fileds (dataframe like) that handle all contact attr
arr = probegroup.to_numpy(complete=True)
# keep only connected contact ( != -1)
keep = arr['device_channel_indices'] >= 0
if np.any(~keep):
warn('The given probes have unconnected contacts: they are removed')
arr = arr[keep]
inds = arr['device_channel_indices']
order = np.argsort(inds)
inds = inds[order]
# check
if np.max(inds) >= self.get_num_channels():
raise ValueError('The given Probe have "device_channel_indices" that do not match channel count')
new_channel_ids = self.get_channel_ids()[inds]
arr = arr[order]
arr['device_channel_indices'] = np.arange(arr.size, dtype='int64')
# create recording : channel slice or clone or self
if in_place:
if not np.array_equal(new_channel_ids, self.get_channel_ids()):
raise Exception('set_proce(inplace=True) must have all channel indices')
sub_recording = self
else:
if np.array_equal(new_channel_ids, self.get_channel_ids()):
sub_recording = self.clone()
else:
sub_recording = ChannelSliceRecording(self, new_channel_ids)
# create a vector that handle all contacts in property
sub_recording.set_property('contact_vector', arr, ids=None)
# planar_contour is saved in annotations
for probe_index, probe in enumerate(probegroup.probes):
contour = probe.probe_planar_contour
if contour is not None:
sub_recording.set_annotation(f'probe_{probe_index}_planar_contour', contour, overwrite=True)
# duplicate positions to "locations" property
ndim = probegroup.ndim
locations = np.zeros((arr.size, ndim), dtype='float64')
for i, dim in enumerate(['x', 'y', 'z'][:ndim]):
locations[:, i] = arr[dim]
sub_recording.set_property('location', locations, ids=None)
# handle groups
groups = np.zeros(arr.size, dtype='int64')
if group_mode == 'by_probe':
for group, probe_index in enumerate(np.unique(arr['probe_index'])):
mask = arr['probe_index'] == probe_index
groups[mask] = group
elif group_mode == 'by_shank':
assert all(probe.shank_ids is not None for probe in probegroup.probes), \
'shank_ids is None in probe, you cannot group by shank'
for group, a in enumerate(np.unique(arr[['probe_index', 'shank_ids']])):
mask = (arr['probe_index'] == a['probe_index']) & (arr['shank_ids'] == a['shank_ids'])
groups[mask] = group
sub_recording.set_property('group', groups, ids=None)
return sub_recording
def get_probe(self):
probes = self.get_probes()
assert len(probes) == 1, 'there are several probe use .get_probes() or get_probegroup()'
return probes[0]
def get_probes(self):
probegroup = self.get_probegroup()
return probegroup.probes
def get_probegroup(self):
arr = self.get_property('contact_vector')
if arr is None:
positions = self.get_property('location')
if positions is None:
raise ValueError('There is not Probe attached to recording. use set_probe(...)')
else:
warn('There is no Probe attached to this recording. Creating a dummy one with contact positions')
ndim = positions.shape[1]
probe = Probe(ndim=ndim)
probe.set_contacts(positions=positions, shapes='circle', shape_params={'radius': 5})
probe.set_device_channel_indices(np.arange(self.get_num_channels(), dtype='int64'))
# probe.create_auto_shape()
probegroup = ProbeGroup()
probegroup.add_probe(probe)
else:
probegroup = ProbeGroup.from_numpy(arr)
for probe_index, probe in enumerate(probegroup.probes):
contour = self.get_annotation(f'probe_{probe_index}_planar_contour')
if contour is not None:
probe.set_planar_contour(contour)
return probegroup
def set_dummy_probe_from_locations(self, locations, shape="circle", shape_params={"radius": 1}):
probe = Probe()
probe.set_contacts(locations, shapes=shape, shape_params=shape_params)
probe.set_device_channel_indices(np.arange(self.get_num_channels()))
self.set_probe(probe, in_place=True)
def set_channel_locations(self, locations, channel_ids=None):
if self.get_property('contact_vector') is not None:
raise ValueError('set_channel_locations(..) destroy the probe description, prefer set_probes(..)')
self.set_property('location', locations, ids=channel_ids)
def get_channel_locations(self, channel_ids=None, locations_2d=True):
if channel_ids is None:
channel_ids = self.get_channel_ids()
channel_indices = self.ids_to_indices(channel_ids)
if self.get_property('contact_vector') is not None:
probe = self.get_probe()
return probe.contact_positions[channel_indices]
else:
location = self.get_property('location')
if location is None:
raise Exception('there is no channel location')
location = np.asarray(location)[channel_indices]
return location
def clear_channel_locations(self, channel_ids=None):
if channel_ids is None:
n = self.get_num_channel()
else:
n = len(channel_ids)
locations = np.zeros((n, 2)) * np.nan
self.set_property('location', locations, ids=channel_ids)
def set_channel_groups(self, groups, channel_ids=None):
if 'probes' in self._annotations:
warn('set_channel_groups(..) destroys the probe description. Using set_probe(...) is preferable')
self._annotations.pop('probes')
self.set_property('group', groups, ids=channel_ids)
def get_channel_groups(self, channel_ids=None):
groups = self.get_property('group', ids=channel_ids)
return groups
def clear_channel_groups(self, channel_ids=None):
if channel_ids is None:
n = self.get_num_channels()
else:
n = len(channel_ids)
groups = np.zeros(n, dtype='int64')
self.set_property('group', groups, ids=channel_ids)
def set_channel_gains(self, gains, channel_ids=None):
if np.isscalar(gains):
gains = [gains] * self.get_num_channels()
self.set_property('gain_to_uV', gains, ids=channel_ids)
def get_channel_gains(self, channel_ids=None):
return self.get_property('gain_to_uV', ids=channel_ids)
def set_channel_offsets(self, offsets, channel_ids=None):
if np.isscalar(offsets):
offsets = [offsets] * self.get_num_channels()
self.set_property('offset_to_uV', offsets, ids=channel_ids)
def get_channel_offsets(self, channel_ids=None):
return self.get_property('offset_to_uV', ids=channel_ids)
def get_channel_property(self, channel_id, key):
values = self.get_property(key)
v = values[self.id_to_index(channel_id)]
return v
def channel_slice(self, channel_ids, renamed_channel_ids=None):
from spikeinterface import ChannelSliceRecording
sub_recording = ChannelSliceRecording(self, channel_ids, renamed_channel_ids=renamed_channel_ids)
return sub_recording
def frame_slice(self, start_frame, end_frame):
from spikeinterface import FrameSliceRecording
sub_recording = FrameSliceRecording(self, start_frame=start_frame, end_frame=end_frame)
return sub_recording
def split_by(self, property='group', outputs='dict'):
assert outputs in ('list', 'dict')
from .channelslicerecording import ChannelSliceRecording
values = self.get_property(property)
if values is None:
raise ValueError(f'property {property} is not set')
if outputs == 'list':
recordings = []
elif outputs == 'dict':
recordings = {}
for value in np.unique(values):
inds, = np.nonzero(values == value)
new_channel_ids = self.get_channel_ids()[inds]
subrec = ChannelSliceRecording(self, new_channel_ids)
if outputs == 'list':
recordings.append(subrec)
elif outputs == 'dict':
recordings[value] = subrec
return recordings
class BaseRecordingSegment(BaseSegment):
"""
Abstract class representing a multichannel timeseries, or block of raw ephys traces
"""
def __init__(self, sampling_frequency=None, t_start=None, time_vector=None):
# sampling_frequency and time_vector are exclusive
if sampling_frequency is None:
assert time_vector is not None, "Pass either 'sampling_frequency' or 'time_vector'"
assert time_vector.ndim == 1, "time_vector should be a 1D array"
if time_vector is None:
assert sampling_frequency is not None, "Pass either 'sampling_frequency' or 'time_vector'"
self.sampling_frequency = sampling_frequency
self.t_start = t_start
self.time_vector = time_vector
BaseSegment.__init__(self)
def get_times(self):
if self.time_vector is not None:
return self.time_vector
else:
time_vector = np.arange(self.get_num_samples(), dtype='float64')
time_vector /= self.sampling_frequency
if self.t_start is not None:
time_vector += self.t_start
return time_vector
def get_times_kwargs(self):
# useful for other internal RecordingSegment
d = dict(sampling_frequency=self.sampling_frequency, t_start=self.t_start,
time_vector=self.time_vector)
return d
def sample_index_to_time(self, sample_ind):
"""
Transform sample index into time in seconds
"""
if self.time_vector is None:
time_s = sample_ind / self.sampling_frequency
if self.t_start is not None:
time_s += self.t_start
else:
time_s = self.time_vector[sample_ind]
return time_s
def time_to_sample_index(self, time_s):
"""
Transform time in seconds into sample index
"""
if self.time_vector is None:
if self.t_start is None:
sample_index = time_s * self.sampling_frequency
else:
sample_index = (time_s - self.t_start) * self.sampling_frequency
else:
sample_index = np.searchsorted(self.time_vector, time_s, side='right') - 1
return int(sample_index)
def get_num_samples(self) -> int:
"""Returns the number of samples in this signal segment
Returns:
SampleIndex: Number of samples in the signal segment
"""
# must be implemented in subclass
raise NotImplementedError
def get_traces(self,
start_frame: Union[int, None] = None,
end_frame: Union[int, None] = None,
channel_indices: Union[List, None] = None,
) -> np.ndarray:
"""
Return the raw traces, optionally for a subset of samples and/or channels
Parameters
----------
start_frame: (Union[int, None], optional)
start sample index, or zero if None. Defaults to None.
end_frame: (Union[int, None], optional)
end_sample, or number of samples if None. Defaults to None.
channel_indices: (Union[List, None], optional)
Indices of channels to return, or all channels if None. Defaults to None.
order: (Order, optional)
The memory order of the returned array.
Use Order.C for C order, Order.F for Fortran order, or Order.K to keep the order of the underlying data.
Defaults to Order.K.
Returns
-------
traces: np.ndarray
Array of traces, num_samples x num_channels
"""
# must be implemented in subclass
raise NotImplementedError
|
tests/chainerx_tests/unit_tests/test_device.py | zaltoprofen/chainer | 3,705 | 12612427 | import copy
import pickle
import pytest
import chainerx
_devices_data = [
{'index': 0},
{'index': 1},
]
@pytest.fixture(params=_devices_data)
def device_data1(request):
return request.param
@pytest.fixture(params=_devices_data)
def device_data2(request):
return request.param
@pytest.fixture
def device_instance1(request, device_data1):
return chainerx.get_global_default_context().get_device(
'native', device_data1['index'])
@pytest.fixture
def device_instance2(request, device_data2):
return chainerx.get_global_default_context().get_device(
'native', device_data2['index'])
@pytest.fixture
def cache_restore_device(request):
device = chainerx.get_default_device()
def restore_device():
chainerx.set_default_device(device)
request.addfinalizer(restore_device)
def test_creation():
ctx = chainerx.get_global_default_context()
backend = ctx.get_backend('native')
device = backend.get_device(0)
assert device.name == 'native:0'
assert device.backend is backend
assert device.context is ctx
assert device.index == 0
device = backend.get_device(1)
assert device.name == 'native:1'
assert device.backend is backend
assert device.context is ctx
assert device.index == 1
def test_synchronize():
ctx = chainerx.get_global_default_context()
device = ctx.get_device('native', 0)
device.synchronize()
@pytest.mark.usefixtures('cache_restore_device')
def test_default_device(device_instance1):
device = device_instance1
chainerx.set_default_device(device)
assert chainerx.get_default_device() is device
@pytest.mark.usefixtures('cache_restore_device')
def test_default_device_with_name(device_instance1):
device = device_instance1
chainerx.set_default_device(device.name)
assert chainerx.get_default_device() is device
@pytest.mark.usefixtures('cache_restore_device')
def test_eq(device_instance1, device_instance2):
if device_instance1 == device_instance2:
return
device1 = device_instance1
device2 = device_instance2
device1_1 = device1.backend.get_device(device1.index)
device1_2 = device1.backend.get_device(device1.index)
device2_1 = device2.backend.get_device(device2.index)
assert device1_1 == device1_2
assert device1_1 != device2_1
assert not (device1_1 != device1_2)
assert not (device1_1 == device2_1)
@pytest.mark.usefixtures('cache_restore_device')
def test_using_device(device_instance1, device_instance2):
if device_instance1 == device_instance2:
return
device1 = device_instance1
device2 = device_instance2
chainerx.set_default_device(device1)
with chainerx.using_device(device2) as scope:
assert chainerx.get_default_device() is device2
assert scope.device is device2
scope = chainerx.using_device(device2)
assert chainerx.get_default_device() == device1
assert scope.device is device2
with scope:
assert chainerx.get_default_device() == device2
assert scope.device is device2
assert chainerx.get_default_device() == device1
assert scope.device is device2
@pytest.mark.usefixtures('cache_restore_device')
def test_using_device_with_name(device_instance1, device_instance2):
if device_instance1 == device_instance2:
return
device1 = device_instance1
device2 = device_instance2
chainerx.set_default_device(device1)
with chainerx.using_device(device2.name) as scope:
assert chainerx.get_default_device() == device2
assert scope.device is device2
with chainerx.using_device(device2.backend.name, device2.index) as scope:
assert chainerx.get_default_device() == device2
assert scope.device is device2
# TODO(niboshi): Add pickle test involving context destruction and re-creation
@pytest.mark.parametrize_device(['native:0', 'native:1', 'cuda:0'])
def test_device_pickle(device):
s = pickle.dumps(device)
device2 = pickle.loads(s)
assert device is device2
# TODO(niboshi): Add deepcopy test with arbitrary context
@pytest.mark.parametrize_device(['native:0', 'native:1', 'cuda:0'])
def test_device_deepcopy(device):
device2 = copy.deepcopy(device)
assert device is device2
|
terrascript/data/hcp.py | mjuenema/python-terrascript | 507 | 12612431 | <filename>terrascript/data/hcp.py
# terrascript/data/hcp.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:18:10 UTC)
#
# For imports without namespace, e.g.
#
# >>> import terrascript.data.hcp
#
# instead of
#
# >>> import terrascript.data.hashicorp.hcp
#
# This is only available for 'official' and 'partner' providers.
from terrascript.data.hashicorp.hcp import *
|
niapy/algorithms/algorithm.py | altaregos/NiaPy | 202 | 12612485 | # encoding=utf8
import logging
import multiprocessing
import threading
import numpy as np
from numpy.random import default_rng
from niapy.util.array import objects_to_array
logging.basicConfig()
logger = logging.getLogger('niapy.util.utility')
logger.setLevel('INFO')
__all__ = [
'Algorithm',
'Individual',
'default_individual_init',
'default_numpy_init'
]
def default_numpy_init(task, population_size, rng, **_kwargs):
r"""Initialize starting population that is represented with `numpy.ndarray` with shape `(population_size, task.dimension)`.
Args:
task (Task): Optimization task.
population_size (int): Number of individuals in population.
rng (numpy.random.Generator): Random number generator.
Returns:
Tuple[numpy.ndarray, numpy.ndarray[float]]:
1. New population with shape `(population_size, task.D)`.
2. New population function/fitness values.
"""
pop = rng.uniform(task.lower, task.upper, (population_size, task.dimension))
fpop = np.apply_along_axis(task.eval, 1, pop)
return pop, fpop
def default_individual_init(task, population_size, rng, individual_type=None, **_kwargs):
r"""Initialize `population_size` individuals of type `individual_type`.
Args:
task (Task): Optimization task.
population_size (int): Number of individuals in population.
rng (numpy.random.Generator): Random number generator.
individual_type (Optional[Individual]): Class of individual in population.
Returns:
Tuple[numpy.ndarray[Individual], numpy.ndarray[float]:
1. Initialized individuals.
2. Initialized individuals function/fitness values.
"""
pop = objects_to_array([individual_type(task=task, rng=rng, e=True) for _ in range(population_size)])
return pop, np.asarray([x.f for x in pop])
class Algorithm:
r"""Class for implementing algorithms.
Date:
2018
Author
<NAME>
License:
MIT
Attributes:
Name (List[str]): List of names for algorithm.
rng (numpy.random.Generator): Random generator.
population_size (int): Population size.
initialization_function (Callable[[int, Task, numpy.random.Generator, Dict[str, Any]], Tuple[numpy.ndarray, numpy.ndarray[float]]]):
Population initialization function.
individual_type (Optional[Type[Individual]]): Type of individuals used in population, default value is None for Numpy arrays.
"""
Name = ['Algorithm', 'AAA']
def __init__(self, population_size=50, initialization_function=default_numpy_init, individual_type=None,
seed=None, *args, **kwargs):
r"""Initialize algorithm and create name for an algorithm.
Args:
population_size (Optional[int]): Population size.
initialization_function (Optional[Callable[[int, Task, numpy.random.Generator, Dict[str, Any]], Tuple[numpy.ndarray, numpy.ndarray[float]]]]):
Population initialization function.
individual_type (Optional[Type[Individual]]): Individual type used in population, default is Numpy array.
seed (Optional[int]): Starting seed for random generator.
See Also:
* :func:`niapy.algorithms.Algorithm.set_parameters`
"""
self.population_size = population_size
self.initialization_function = initialization_function
self.individual_type = individual_type
self.rng = default_rng(seed)
self.exception = None
@staticmethod
def info():
r"""Get algorithm information.
Returns:
str: Bit item.
"""
return '''Basic algorithm. No implementation!!!'''
def set_parameters(self, population_size=50, initialization_function=default_numpy_init, individual_type=None,
*args, **kwargs):
r"""Set the parameters/arguments of the algorithm.
Args:
population_size (Optional[int]): Population size.
initialization_function (Optional[Callable[[int, Task, numpy.random.Generator, Dict[str, Any]], Tuple[numpy.ndarray, numpy.ndarray[float]]]]):
Population initialization function.
individual_type (Optional[Type[Individual]]): Individual type used in population, default is Numpy array.
See Also:
* :func:`niapy.algorithms.default_numpy_init`
* :func:`niapy.algorithms.default_individual_init`
"""
self.population_size = population_size
self.initialization_function = initialization_function
self.individual_type = individual_type
def get_parameters(self):
r"""Get parameters of the algorithm.
Returns:
Dict[str, Any]:
* Parameter name (str): Represents a parameter name
* Value of parameter (Any): Represents the value of the parameter
"""
return {
'population_size': self.population_size,
'initialization_function': self.initialization_function,
'individual_type': self.individual_type
}
def random(self, size=None):
r"""Get random distribution of shape size in range from 0 to 1.
Args:
size (Union[None, int, Iterable[int]]): Shape of returned random distribution.
Returns:
Union[numpy.ndarray[float], float]: Random number or numbers :math:`\in [0, 1]`.
"""
return self.rng.random(size)
def uniform(self, low, high, size=None):
r"""Get uniform random distribution of shape size in range from "low" to "high".
Args:
low (Union[float, Iterable[float]]): Lower bound.
high (Union[float, Iterable[float]]): Upper bound.
size (Union[None, int, Iterable[int]]): Shape of returned uniform random distribution.
Returns:
Union[numpy.ndarray[float], float]: Array of numbers :math:`\in [\mathit{Lower}, \mathit{Upper}]`.
"""
return self.rng.uniform(low, high, size)
def normal(self, loc, scale, size=None):
r"""Get normal random distribution of shape size with mean "loc" and standard deviation "scale".
Args:
loc (float): Mean of the normal random distribution.
scale (float): Standard deviation of the normal random distribution.
size (Union[int, Iterable[int]]): Shape of returned normal random distribution.
Returns:
Union[numpy.ndarray[float], float]: Array of numbers.
"""
return self.rng.normal(loc, scale, size)
def standard_normal(self, size=None):
r"""Get standard normal distribution of shape size.
Args:
size (Union[int, Iterable[int]]): Shape of returned standard normal distribution.
Returns:
Union[numpy.ndarray[float], float]: Random generated numbers or one random generated number :math:`\in [0, 1]`.
"""
return self.rng.standard_normal(size)
def integers(self, low, high=None, size=None, skip=None):
r"""Get discrete uniform (integer) random distribution of D shape in range from "low" to "high".
Args:
low (Union[int, Iterable[int]]): Lower integer bound.
If high = None low is 0 and this value is used as high
high (Union[int, Iterable[int]]): One above upper integer bound.
size (Union[None, int, Iterable[int]]): shape of returned discrete uniform random distribution.
skip (Union[None, int, Iterable[int], numpy.ndarray[int]]): numbers to skip.
Returns:
Union[int, numpy.ndarray[int]]: Random generated integer number.
"""
r = self.rng.integers(low, high, size)
return r if skip is None or r not in skip else self.integers(low, high, size, skip)
@staticmethod
def get_best(population, population_fitness, best_x=None, best_fitness=np.inf):
r"""Get the best individual for population.
Args:
population (numpy.ndarray): Current population.
population_fitness (numpy.ndarray): Current populations fitness/function values of aligned individuals.
best_x (Optional[numpy.ndarray]): Best individual.
best_fitness (float): Fitness value of best individual.
Returns:
Tuple[numpy.ndarray, float]:
1. Coordinates of best solution.
2. beset fitness/function value.
"""
ib = np.argmin(population_fitness)
if isinstance(population_fitness, (float, int)) and best_fitness >= population_fitness:
best_x, best_fitness = population, population_fitness
elif isinstance(population_fitness, (np.ndarray, list)) and best_fitness >= population_fitness[ib]:
best_x, best_fitness = population[ib], population_fitness[ib]
return (best_x.x.copy() if isinstance(best_x, Individual) else best_x.copy()), best_fitness
def init_population(self, task):
r"""Initialize starting population of optimization algorithm.
Args:
task (Task): Optimization task.
Returns:
Tuple[numpy.ndarray, numpy.ndarray, Dict[str, Any]]:
1. New population.
2. New population fitness values.
3. Additional arguments.
See Also:
* :func:`niapy.algorithms.Algorithm.set_parameters`
"""
pop, fpop = self.initialization_function(task=task, population_size=self.population_size, rng=self.rng,
individual_type=self.individual_type)
return pop, fpop, {}
def run_iteration(self, task, population, population_fitness, best_x, best_fitness, **params):
r"""Core functionality of algorithm.
This function is called on every algorithm iteration.
Args:
task (Task): Optimization task.
population (numpy.ndarray): Current population coordinates.
population_fitness (numpy.ndarray): Current population fitness value.
best_x (numpy.ndarray): Current generation best individuals coordinates.
best_fitness (float): current generation best individuals fitness value.
**params (Dict[str, Any]): Additional arguments for algorithms.
Returns:
Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, float, Dict[str, Any]]:
1. New populations coordinates.
2. New populations fitness values.
3. New global best position/solution
4. New global best fitness/objective value
5. Additional arguments of the algorithm.
See Also:
* :func:`niapy.algorithms.Algorithm.iteration_generator`
"""
return population, population_fitness, best_x, best_fitness, params
def iteration_generator(self, task):
r"""Run the algorithm for a single iteration and return the best solution.
Args:
task (Task): Task with bounds and objective function for optimization.
Returns:
Generator[Tuple[numpy.ndarray, float], None, None]: Generator getting new/old optimal global values.
Yields:
Tuple[numpy.ndarray, float]:
1. New population best individuals coordinates.
2. Fitness value of the best solution.
See Also:
* :func:`niapy.algorithms.Algorithm.init_population`
* :func:`niapy.algorithms.Algorithm.run_iteration`
"""
pop, fpop, params = self.init_population(task)
xb, fxb = self.get_best(pop, fpop)
if task.stopping_condition():
yield xb, fxb
while True:
pop, fpop, xb, fxb, params = self.run_iteration(task, pop, fpop, xb, fxb, **params)
yield xb, fxb
def run_task(self, task):
r"""Start the optimization.
Args:
task (Task): Task with bounds and objective function for optimization.
Returns:
Tuple[numpy.ndarray, float]:
1. Best individuals components found in optimization process.
2. Best fitness value found in optimization process.
See Also:
* :func:`niapy.algorithms.Algorithm.iteration_generator`
"""
algo, xb, fxb = self.iteration_generator(task), None, np.inf
while not task.stopping_condition():
xb, fxb = next(algo)
task.next_iter()
return xb, fxb
def run(self, task):
r"""Start the optimization.
Args:
task (Task): Optimization task.
Returns:
Tuple[numpy.ndarray, float]:
1. Best individuals components found in optimization process.
2. Best fitness value found in optimization process.
See Also:
* :func:`niapy.algorithms.Algorithm.run_task`
"""
try:
r = self.run_task(task)
return r[0], r[1] * task.optimization_type.value
except BaseException as e:
if threading.current_thread() == threading.main_thread() and multiprocessing.current_process().name == 'MainProcess':
raise e
self.exception = e
return None, None
def bad_run(self):
r"""Check if some exceptions where thrown when the algorithm was running.
Returns:
bool: True if some error where detected at runtime of the algorithm, otherwise False
"""
return self.exception is not None
class Individual:
r"""Class that represents one solution in population of solutions.
Date:
2018
Author:
<NAME>
License:
MIT
Attributes:
x (numpy.ndarray): Coordinates of individual.
f (float): Function/fitness value of individual.
"""
def __init__(self, x=None, task=None, e=True, rng=None, **kwargs):
r"""Initialize new individual.
Parameters:
task (Optional[Task]): Optimization task.
rand (Optional[numpy.random.Generator]): Random generator.
x (Optional[numpy.ndarray]): Individuals components.
e (Optional[bool]): True to evaluate the individual on initialization. Default value is True.
"""
self.f = task.optimization_type.value * np.inf if task is not None else np.inf
if x is not None:
self.x = x if isinstance(x, np.ndarray) else np.asarray(x)
elif task is not None:
self.generate_solution(task, default_rng(rng))
if e and task is not None:
self.evaluate(task, rng)
def generate_solution(self, task, rng):
r"""Generate new solution.
Generate new solution for this individual and set it to ``self.x``.
This method uses ``rng`` for getting random numbers.
For generating random components ``rng`` and ``task`` is used.
Args:
task (Task): Optimization task.
rng (numpy.random.Generator): Random numbers generator object.
"""
self.x = rng.uniform(task.lower, task.upper, task.dimension)
def evaluate(self, task, rng=None):
r"""Evaluate the solution.
Evaluate solution ``this.x`` with the help of task.
Task is used for repairing the solution and then evaluating it.
Args:
task (Task): Objective function object.
rng (Optional[numpy.random.Generator]): Random generator.
See Also:
* :func:`niapy.task.Task.repair`
"""
self.x = task.repair(self.x, rng=rng)
self.f = task.eval(self.x)
def copy(self):
r"""Return a copy of self.
Method returns copy of ``this`` object so it is safe for editing.
Returns:
Individual: Copy of self.
"""
return Individual(x=self.x.copy(), e=False, f=self.f)
def __eq__(self, other):
r"""Compare the individuals for equalities.
Args:
other (Union[Any, numpy.ndarray]): Object that we want to compare this object to.
Returns:
bool: `True` if equal or `False` if no equal.
"""
if isinstance(other, np.ndarray):
for e in other:
if self == e:
return True
return False
return np.array_equal(self.x, other.x) and self.f == other.f
def __str__(self):
r"""Print the individual with the solution and objective value.
Returns:
str: String representation of self.
"""
return '%s -> %s' % (self.x, self.f)
def __getitem__(self, i):
r"""Get the value of i-th component of the solution.
Args:
i (int): Position of the solution component.
Returns:
Any: Value of ith component.
"""
return self.x[i]
def __setitem__(self, i, v):
r"""Set the value of i-th component of the solution to v value.
Args:
i (int): Position of the solution component.
v (Any): Value to set to i-th component.
"""
self.x[i] = v
def __len__(self):
r"""Get the length of the solution or the number of components.
Returns:
int: Number of components.
"""
return len(self.x)
|
dbaas/dbaas/features.py | didindinn/database-as-a-service | 303 | 12612502 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf import settings
LDAP_ENABLED = settings.LDAP_ENABLED
|
PythonNetwork/venv/Lib/site-packages/pip/_vendor/packaging/tags.py | Moldovandreii/RepetitionCount | 2,160 | 12612543 | <filename>PythonNetwork/venv/Lib/site-packages/pip/_vendor/packaging/tags.py
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import
import distutils.util
try:
from importlib.machinery import EXTENSION_SUFFIXES
except ImportError: # pragma: no cover
import imp
EXTENSION_SUFFIXES = [x[0] for x in imp.get_suffixes()]
del imp
import collections
import logging
import os
import platform
import re
import struct
import sys
import sysconfig
import warnings
from ._typing import TYPE_CHECKING, cast
if TYPE_CHECKING: # pragma: no cover
from typing import (
Dict,
FrozenSet,
IO,
Iterable,
Iterator,
List,
Optional,
Sequence,
Tuple,
Union,
)
PythonVersion = Sequence[int]
MacVersion = Tuple[int, int]
GlibcVersion = Tuple[int, int]
logger = logging.getLogger(__name__)
INTERPRETER_SHORT_NAMES = {
"python": "py", # Generic.
"cpython": "cp",
"pypy": "pp",
"ironpython": "ip",
"jython": "jy",
} # type: Dict[str, str]
_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32
_LEGACY_MANYLINUX_MAP = {
# CentOS 7 w/ glibc 2.17 (PEP 599)
(2, 17): "manylinux2014",
# CentOS 6 w/ glibc 2.12 (PEP 571)
(2, 12): "manylinux2010",
# CentOS 5 w/ glibc 2.5 (PEP 513)
(2, 5): "manylinux1",
}
# If glibc ever changes its major version, we need to know what the last
# minor version was, so we can build the complete list of all versions.
# For now, guess what the highest minor version might be, assume it will
# be 50 for testing. Once this actually happens, update the dictionary
# with the actual value.
_LAST_GLIBC_MINOR = collections.defaultdict(lambda: 50) # type: Dict[int, int]
glibcVersion = collections.namedtuple("Version", ["major", "minor"])
class Tag(object):
"""
A representation of the tag triple for a wheel.
Instances are considered immutable and thus are hashable. Equality checking
is also supported.
"""
__slots__ = ["_interpreter", "_abi", "_platform", "_hash"]
def __init__(self, interpreter, abi, platform):
# type: (str, str, str) -> None
self._interpreter = interpreter.lower()
self._abi = abi.lower()
self._platform = platform.lower()
# The __hash__ of every single element in a Set[Tag] will be evaluated each time
# that a set calls its `.disjoint()` method, which may be called hundreds of
# times when scanning a page of links for packages with tags matching that
# Set[Tag]. Pre-computing the value here produces significant speedups for
# downstream consumers.
self._hash = hash((self._interpreter, self._abi, self._platform))
@property
def interpreter(self):
# type: () -> str
return self._interpreter
@property
def abi(self):
# type: () -> str
return self._abi
@property
def platform(self):
# type: () -> str
return self._platform
def __eq__(self, other):
# type: (object) -> bool
if not isinstance(other, Tag):
return NotImplemented
return (
(self.platform == other.platform)
and (self.abi == other.abi)
and (self.interpreter == other.interpreter)
)
def __hash__(self):
# type: () -> int
return self._hash
def __str__(self):
# type: () -> str
return "{}-{}-{}".format(self._interpreter, self._abi, self._platform)
def __repr__(self):
# type: () -> str
return "<{self} @ {self_id}>".format(self=self, self_id=id(self))
def parse_tag(tag):
# type: (str) -> FrozenSet[Tag]
"""
Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances.
Returning a set is required due to the possibility that the tag is a
compressed tag set.
"""
tags = set()
interpreters, abis, platforms = tag.split("-")
for interpreter in interpreters.split("."):
for abi in abis.split("."):
for platform_ in platforms.split("."):
tags.add(Tag(interpreter, abi, platform_))
return frozenset(tags)
def _warn_keyword_parameter(func_name, kwargs):
# type: (str, Dict[str, bool]) -> bool
"""
Backwards-compatibility with Python 2.7 to allow treating 'warn' as keyword-only.
"""
if not kwargs:
return False
elif len(kwargs) > 1 or "warn" not in kwargs:
kwargs.pop("warn", None)
arg = next(iter(kwargs.keys()))
raise TypeError(
"{}() got an unexpected keyword argument {!r}".format(func_name, arg)
)
return kwargs["warn"]
def _get_config_var(name, warn=False):
# type: (str, bool) -> Union[int, str, None]
value = sysconfig.get_config_var(name)
if value is None and warn:
logger.debug(
"Config variable '%s' is unset, Python ABI tag may be incorrect", name
)
return value
def _normalize_string(string):
# type: (str) -> str
return string.replace(".", "_").replace("-", "_")
def _abi3_applies(python_version):
# type: (PythonVersion) -> bool
"""
Determine if the Python version supports abi3.
PEP 384 was first implemented in Python 3.2.
"""
return len(python_version) > 1 and tuple(python_version) >= (3, 2)
def _cpython_abis(py_version, warn=False):
# type: (PythonVersion, bool) -> List[str]
py_version = tuple(py_version) # To allow for version comparison.
abis = []
version = _version_nodot(py_version[:2])
debug = pymalloc = ucs4 = ""
with_debug = _get_config_var("Py_DEBUG", warn)
has_refcount = hasattr(sys, "gettotalrefcount")
# Windows doesn't set Py_DEBUG, so checking for support of debug-compiled
# extension modules is the best option.
# https://github.com/pypa/pip/issues/3383#issuecomment-173267692
has_ext = "_d.pyd" in EXTENSION_SUFFIXES
if with_debug or (with_debug is None and (has_refcount or has_ext)):
debug = "d"
if py_version < (3, 8):
with_pymalloc = _get_config_var("WITH_PYMALLOC", warn)
if with_pymalloc or with_pymalloc is None:
pymalloc = "m"
if py_version < (3, 3):
unicode_size = _get_config_var("Py_UNICODE_SIZE", warn)
if unicode_size == 4 or (
unicode_size is None and sys.maxunicode == 0x10FFFF
):
ucs4 = "u"
elif debug:
# Debug builds can also load "normal" extension modules.
# We can also assume no UCS-4 or pymalloc requirement.
abis.append("cp{version}".format(version=version))
abis.insert(
0,
"cp{version}{debug}{pymalloc}{ucs4}".format(
version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4
),
)
return abis
def cpython_tags(
python_version=None, # type: Optional[PythonVersion]
abis=None, # type: Optional[Iterable[str]]
platforms=None, # type: Optional[Iterable[str]]
**kwargs # type: bool
):
# type: (...) -> Iterator[Tag]
"""
Yields the tags for a CPython interpreter.
The tags consist of:
- cp<python_version>-<abi>-<platform>
- cp<python_version>-abi3-<platform>
- cp<python_version>-none-<platform>
- cp<less than python_version>-abi3-<platform> # Older Python versions down to 3.2.
If python_version only specifies a major version then user-provided ABIs and
the 'none' ABItag will be used.
If 'abi3' or 'none' are specified in 'abis' then they will be yielded at
their normal position and not at the beginning.
"""
warn = _warn_keyword_parameter("cpython_tags", kwargs)
if not python_version:
python_version = sys.version_info[:2]
interpreter = "cp{}".format(_version_nodot(python_version[:2]))
if abis is None:
if len(python_version) > 1:
abis = _cpython_abis(python_version, warn)
else:
abis = []
abis = list(abis)
# 'abi3' and 'none' are explicitly handled later.
for explicit_abi in ("abi3", "none"):
try:
abis.remove(explicit_abi)
except ValueError:
pass
platforms = list(platforms or _platform_tags())
for abi in abis:
for platform_ in platforms:
yield Tag(interpreter, abi, platform_)
if _abi3_applies(python_version):
for tag in (Tag(interpreter, "abi3", platform_) for platform_ in platforms):
yield tag
for tag in (Tag(interpreter, "none", platform_) for platform_ in platforms):
yield tag
if _abi3_applies(python_version):
for minor_version in range(python_version[1] - 1, 1, -1):
for platform_ in platforms:
interpreter = "cp{version}".format(
version=_version_nodot((python_version[0], minor_version))
)
yield Tag(interpreter, "abi3", platform_)
def _generic_abi():
# type: () -> Iterator[str]
abi = sysconfig.get_config_var("SOABI")
if abi:
yield _normalize_string(abi)
def generic_tags(
interpreter=None, # type: Optional[str]
abis=None, # type: Optional[Iterable[str]]
platforms=None, # type: Optional[Iterable[str]]
**kwargs # type: bool
):
# type: (...) -> Iterator[Tag]
"""
Yields the tags for a generic interpreter.
The tags consist of:
- <interpreter>-<abi>-<platform>
The "none" ABI will be added if it was not explicitly provided.
"""
warn = _warn_keyword_parameter("generic_tags", kwargs)
if not interpreter:
interp_name = interpreter_name()
interp_version = interpreter_version(warn=warn)
interpreter = "".join([interp_name, interp_version])
if abis is None:
abis = _generic_abi()
platforms = list(platforms or _platform_tags())
abis = list(abis)
if "none" not in abis:
abis.append("none")
for abi in abis:
for platform_ in platforms:
yield Tag(interpreter, abi, platform_)
def _py_interpreter_range(py_version):
# type: (PythonVersion) -> Iterator[str]
"""
Yields Python versions in descending order.
After the latest version, the major-only version will be yielded, and then
all previous versions of that major version.
"""
if len(py_version) > 1:
yield "py{version}".format(version=_version_nodot(py_version[:2]))
yield "py{major}".format(major=py_version[0])
if len(py_version) > 1:
for minor in range(py_version[1] - 1, -1, -1):
yield "py{version}".format(version=_version_nodot((py_version[0], minor)))
def compatible_tags(
python_version=None, # type: Optional[PythonVersion]
interpreter=None, # type: Optional[str]
platforms=None, # type: Optional[Iterable[str]]
):
# type: (...) -> Iterator[Tag]
"""
Yields the sequence of tags that are compatible with a specific version of Python.
The tags consist of:
- py*-none-<platform>
- <interpreter>-none-any # ... if `interpreter` is provided.
- py*-none-any
"""
if not python_version:
python_version = sys.version_info[:2]
platforms = list(platforms or _platform_tags())
for version in _py_interpreter_range(python_version):
for platform_ in platforms:
yield Tag(version, "none", platform_)
if interpreter:
yield Tag(interpreter, "none", "any")
for version in _py_interpreter_range(python_version):
yield Tag(version, "none", "any")
def _mac_arch(arch, is_32bit=_32_BIT_INTERPRETER):
# type: (str, bool) -> str
if not is_32bit:
return arch
if arch.startswith("ppc"):
return "ppc"
return "i386"
def _mac_binary_formats(version, cpu_arch):
# type: (MacVersion, str) -> List[str]
formats = [cpu_arch]
if cpu_arch == "x86_64":
if version < (10, 4):
return []
formats.extend(["intel", "fat64", "fat32"])
elif cpu_arch == "i386":
if version < (10, 4):
return []
formats.extend(["intel", "fat32", "fat"])
elif cpu_arch == "ppc64":
# TODO: Need to care about 32-bit PPC for ppc64 through 10.2?
if version > (10, 5) or version < (10, 4):
return []
formats.append("fat64")
elif cpu_arch == "ppc":
if version > (10, 6):
return []
formats.extend(["fat32", "fat"])
if cpu_arch in {"arm64", "x86_64"}:
formats.append("universal2")
if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}:
formats.append("universal")
return formats
def mac_platforms(version=None, arch=None):
# type: (Optional[MacVersion], Optional[str]) -> Iterator[str]
"""
Yields the platform tags for a macOS system.
The `version` parameter is a two-item tuple specifying the macOS version to
generate platform tags for. The `arch` parameter is the CPU architecture to
generate platform tags for. Both parameters default to the appropriate value
for the current system.
"""
version_str, _, cpu_arch = platform.mac_ver() # type: ignore
if version is None:
version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
else:
version = version
if arch is None:
arch = _mac_arch(cpu_arch)
else:
arch = arch
if (10, 0) <= version and version < (11, 0):
# Prior to Mac OS 11, each yearly release of Mac OS bumped the
# "minor" version number. The major version was always 10.
for minor_version in range(version[1], -1, -1):
compat_version = 10, minor_version
binary_formats = _mac_binary_formats(compat_version, arch)
for binary_format in binary_formats:
yield "macosx_{major}_{minor}_{binary_format}".format(
major=10, minor=minor_version, binary_format=binary_format
)
if version >= (11, 0):
# Starting with Mac OS 11, each yearly release bumps the major version
# number. The minor versions are now the midyear updates.
for major_version in range(version[0], 10, -1):
compat_version = major_version, 0
binary_formats = _mac_binary_formats(compat_version, arch)
for binary_format in binary_formats:
yield "macosx_{major}_{minor}_{binary_format}".format(
major=major_version, minor=0, binary_format=binary_format
)
if version >= (11, 0) and arch == "x86_64":
# Mac OS 11 on x86_64 is compatible with binaries from previous releases.
# Arm64 support was introduced in 11.0, so no Arm binaries from previous
# releases exist.
for minor_version in range(16, 3, -1):
compat_version = 10, minor_version
binary_formats = _mac_binary_formats(compat_version, arch)
for binary_format in binary_formats:
yield "macosx_{major}_{minor}_{binary_format}".format(
major=compat_version[0],
minor=compat_version[1],
binary_format=binary_format,
)
# From PEP 513, PEP 600
def _is_manylinux_compatible(name, arch, glibc_version):
# type: (str, str, GlibcVersion) -> bool
sys_glibc = _get_glibc_version()
if sys_glibc < glibc_version:
return False
# Check for presence of _manylinux module.
try:
import _manylinux # noqa
except ImportError:
pass
else:
if hasattr(_manylinux, "manylinux_compatible"):
result = _manylinux.manylinux_compatible(
glibc_version[0], glibc_version[1], arch
)
if result is not None:
return bool(result)
else:
if glibc_version == (2, 5):
if hasattr(_manylinux, "manylinux1_compatible"):
return bool(_manylinux.manylinux1_compatible)
if glibc_version == (2, 12):
if hasattr(_manylinux, "manylinux2010_compatible"):
return bool(_manylinux.manylinux2010_compatible)
if glibc_version == (2, 17):
if hasattr(_manylinux, "manylinux2014_compatible"):
return bool(_manylinux.manylinux2014_compatible)
return True
def _glibc_version_string():
# type: () -> Optional[str]
# Returns glibc version string, or None if not using glibc.
return _glibc_version_string_confstr() or _glibc_version_string_ctypes()
def _glibc_version_string_confstr():
# type: () -> Optional[str]
"""
Primary implementation of glibc_version_string using os.confstr.
"""
# os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
# to be broken or missing. This strategy is used in the standard library
# platform module.
# https://github.com/python/cpython/blob/fcf1d003bf4f0100c9d0921ff3d70e1127ca1b71/Lib/platform.py#L175-L183
try:
# os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17".
version_string = os.confstr( # type: ignore[attr-defined] # noqa: F821
"CS_GNU_LIBC_VERSION"
)
assert version_string is not None
_, version = version_string.split() # type: Tuple[str, str]
except (AssertionError, AttributeError, OSError, ValueError):
# os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
return None
return version
def _glibc_version_string_ctypes():
# type: () -> Optional[str]
"""
Fallback implementation of glibc_version_string using ctypes.
"""
try:
import ctypes
except ImportError:
return None
# ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
# manpage says, "If filename is NULL, then the returned handle is for the
# main program". This way we can let the linker do the work to figure out
# which libc our process is actually using.
#
# We must also handle the special case where the executable is not a
# dynamically linked executable. This can occur when using musl libc,
# for example. In this situation, dlopen() will error, leading to an
# OSError. Interestingly, at least in the case of musl, there is no
# errno set on the OSError. The single string argument used to construct
# OSError comes from libc itself and is therefore not portable to
# hard code here. In any case, failure to call dlopen() means we
# can proceed, so we bail on our attempt.
try:
# Note: typeshed is wrong here so we are ignoring this line.
process_namespace = ctypes.CDLL(None) # type: ignore
except OSError:
return None
try:
gnu_get_libc_version = process_namespace.gnu_get_libc_version
except AttributeError:
# Symbol doesn't exist -> therefore, we are not linked to
# glibc.
return None
# Call gnu_get_libc_version, which returns a string like "2.5"
gnu_get_libc_version.restype = ctypes.c_char_p
version_str = gnu_get_libc_version() # type: str
# py2 / py3 compatibility:
if not isinstance(version_str, str):
version_str = version_str.decode("ascii")
return version_str
def _parse_glibc_version(version_str):
# type: (str) -> Tuple[int, int]
# Parse glibc version.
#
# We use a regexp instead of str.split because we want to discard any
# random junk that might come after the minor version -- this might happen
# in patched/forked versions of glibc (e.g. Linaro's version of glibc
# uses version strings like "2.20-2014.11"). See gh-3588.
m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
if not m:
warnings.warn(
"Expected glibc version with 2 components major.minor,"
" got: %s" % version_str,
RuntimeWarning,
)
return -1, -1
return (int(m.group("major")), int(m.group("minor")))
_glibc_version = [] # type: List[Tuple[int, int]]
def _get_glibc_version():
# type: () -> Tuple[int, int]
if _glibc_version:
return _glibc_version[0]
version_str = _glibc_version_string()
if version_str is None:
_glibc_version.append((-1, -1))
else:
_glibc_version.append(_parse_glibc_version(version_str))
return _glibc_version[0]
# Python does not provide platform information at sufficient granularity to
# identify the architecture of the running executable in some cases, so we
# determine it dynamically by reading the information from the running
# process. This only applies on Linux, which uses the ELF format.
class _ELFFileHeader(object):
# https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header
class _InvalidELFFileHeader(ValueError):
"""
An invalid ELF file header was found.
"""
ELF_MAGIC_NUMBER = 0x7F454C46
ELFCLASS32 = 1
ELFCLASS64 = 2
ELFDATA2LSB = 1
ELFDATA2MSB = 2
EM_386 = 3
EM_S390 = 22
EM_ARM = 40
EM_X86_64 = 62
EF_ARM_ABIMASK = 0xFF000000
EF_ARM_ABI_VER5 = 0x05000000
EF_ARM_ABI_FLOAT_HARD = 0x00000400
def __init__(self, file):
# type: (IO[bytes]) -> None
def unpack(fmt):
# type: (str) -> int
try:
(result,) = struct.unpack(
fmt, file.read(struct.calcsize(fmt))
) # type: (int, )
except struct.error:
raise _ELFFileHeader._InvalidELFFileHeader()
return result
self.e_ident_magic = unpack(">I")
if self.e_ident_magic != self.ELF_MAGIC_NUMBER:
raise _ELFFileHeader._InvalidELFFileHeader()
self.e_ident_class = unpack("B")
if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}:
raise _ELFFileHeader._InvalidELFFileHeader()
self.e_ident_data = unpack("B")
if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}:
raise _ELFFileHeader._InvalidELFFileHeader()
self.e_ident_version = unpack("B")
self.e_ident_osabi = unpack("B")
self.e_ident_abiversion = unpack("B")
self.e_ident_pad = file.read(7)
format_h = "<H" if self.e_ident_data == self.ELFDATA2LSB else ">H"
format_i = "<I" if self.e_ident_data == self.ELFDATA2LSB else ">I"
format_q = "<Q" if self.e_ident_data == self.ELFDATA2LSB else ">Q"
format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q
self.e_type = unpack(format_h)
self.e_machine = unpack(format_h)
self.e_version = unpack(format_i)
self.e_entry = unpack(format_p)
self.e_phoff = unpack(format_p)
self.e_shoff = unpack(format_p)
self.e_flags = unpack(format_i)
self.e_ehsize = unpack(format_h)
self.e_phentsize = unpack(format_h)
self.e_phnum = unpack(format_h)
self.e_shentsize = unpack(format_h)
self.e_shnum = unpack(format_h)
self.e_shstrndx = unpack(format_h)
def _get_elf_header():
# type: () -> Optional[_ELFFileHeader]
try:
with open(sys.executable, "rb") as f:
elf_header = _ELFFileHeader(f)
except (IOError, OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader):
return None
return elf_header
def _is_linux_armhf():
# type: () -> bool
# hard-float ABI can be detected from the ELF header of the running
# process
# https://static.docs.arm.com/ihi0044/g/aaelf32.pdf
elf_header = _get_elf_header()
if elf_header is None:
return False
result = elf_header.e_ident_class == elf_header.ELFCLASS32
result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
result &= elf_header.e_machine == elf_header.EM_ARM
result &= (
elf_header.e_flags & elf_header.EF_ARM_ABIMASK
) == elf_header.EF_ARM_ABI_VER5
result &= (
elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD
) == elf_header.EF_ARM_ABI_FLOAT_HARD
return result
def _is_linux_i686():
# type: () -> bool
elf_header = _get_elf_header()
if elf_header is None:
return False
result = elf_header.e_ident_class == elf_header.ELFCLASS32
result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
result &= elf_header.e_machine == elf_header.EM_386
return result
def _have_compatible_manylinux_abi(arch):
# type: (str) -> bool
if arch == "armv7l":
return _is_linux_armhf()
if arch == "i686":
return _is_linux_i686()
return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"}
def _manylinux_tags(linux, arch):
# type: (str, str) -> Iterator[str]
# Oldest glibc to be supported regardless of architecture is (2, 17).
too_old_glibc2 = glibcVersion(2, 16)
if arch in {"x86_64", "i686"}:
# On x86/i686 also oldest glibc to be supported is (2, 5).
too_old_glibc2 = glibcVersion(2, 4)
current_glibc = glibcVersion(*_get_glibc_version())
glibc_max_list = [current_glibc]
# We can assume compatibility across glibc major versions.
# https://sourceware.org/bugzilla/show_bug.cgi?id=24636
#
# Build a list of maximum glibc versions so that we can
# output the canonical list of all glibc from current_glibc
# down to too_old_glibc2, including all intermediary versions.
for glibc_major in range(current_glibc.major - 1, 1, -1):
glibc_max_list.append(glibcVersion(glibc_major, _LAST_GLIBC_MINOR[glibc_major]))
for glibc_max in glibc_max_list:
if glibc_max.major == too_old_glibc2.major:
min_minor = too_old_glibc2.minor
else:
# For other glibc major versions oldest supported is (x, 0).
min_minor = -1
for glibc_minor in range(glibc_max.minor, min_minor, -1):
glibc_version = (glibc_max.major, glibc_minor)
tag = "manylinux_{}_{}".format(*glibc_version)
if _is_manylinux_compatible(tag, arch, glibc_version):
yield linux.replace("linux", tag)
# Handle the legacy manylinux1, manylinux2010, manylinux2014 tags.
if glibc_version in _LEGACY_MANYLINUX_MAP:
legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version]
if _is_manylinux_compatible(legacy_tag, arch, glibc_version):
yield linux.replace("linux", legacy_tag)
def _linux_platforms(is_32bit=_32_BIT_INTERPRETER):
# type: (bool) -> Iterator[str]
linux = _normalize_string(distutils.util.get_platform())
if is_32bit:
if linux == "linux_x86_64":
linux = "linux_i686"
elif linux == "linux_aarch64":
linux = "linux_armv7l"
_, arch = linux.split("_", 1)
if _have_compatible_manylinux_abi(arch):
for tag in _manylinux_tags(linux, arch):
yield tag
yield linux
def _generic_platforms():
# type: () -> Iterator[str]
yield _normalize_string(distutils.util.get_platform())
def _platform_tags():
# type: () -> Iterator[str]
"""
Provides the platform tags for this installation.
"""
if platform.system() == "Darwin":
return mac_platforms()
elif platform.system() == "Linux":
return _linux_platforms()
else:
return _generic_platforms()
def interpreter_name():
# type: () -> str
"""
Returns the name of the running interpreter.
"""
try:
name = sys.implementation.name # type: ignore
except AttributeError: # pragma: no cover
# Python 2.7 compatibility.
name = platform.python_implementation().lower()
return INTERPRETER_SHORT_NAMES.get(name) or name
def interpreter_version(**kwargs):
# type: (bool) -> str
"""
Returns the version of the running interpreter.
"""
warn = _warn_keyword_parameter("interpreter_version", kwargs)
version = _get_config_var("py_version_nodot", warn=warn)
if version:
version = str(version)
else:
version = _version_nodot(sys.version_info[:2])
return version
def _version_nodot(version):
# type: (PythonVersion) -> str
return "".join(map(str, version))
def sys_tags(**kwargs):
# type: (bool) -> Iterator[Tag]
"""
Returns the sequence of tag triples for the running interpreter.
The order of the sequence corresponds to priority order for the
interpreter, from most to least important.
"""
warn = _warn_keyword_parameter("sys_tags", kwargs)
interp_name = interpreter_name()
if interp_name == "cp":
for tag in cpython_tags(warn=warn):
yield tag
else:
for tag in generic_tags():
yield tag
for tag in compatible_tags():
yield tag
|
keras_retinanet/utils/config.py | kukuruza/keras-retinanet | 124 | 12612554 | <filename>keras_retinanet/utils/config.py<gh_stars>100-1000
"""
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import configparser
import numpy as np
import keras
from ..utils.anchors import AnchorParameters
def read_config_file(config_path):
config = configparser.ConfigParser()
with open(config_path, 'r') as file:
config.read_file(file)
assert 'anchor_parameters' in config, \
"Malformed config file. Verify that it contains the anchor_parameters section."
config_keys = set(config['anchor_parameters'])
default_keys = set(AnchorParameters.default.__dict__.keys())
assert config_keys <= default_keys, \
"Malformed config file. These keys are not valid: {}".format(config_keys - default_keys)
return config
def parse_anchor_parameters(config):
ratios = np.array(list(map(float, config['anchor_parameters']['ratios'].split(' '))), keras.backend.floatx())
scales = np.array(list(map(float, config['anchor_parameters']['scales'].split(' '))), keras.backend.floatx())
sizes = list(map(int, config['anchor_parameters']['sizes'].split(' ')))
strides = list(map(int, config['anchor_parameters']['strides'].split(' ')))
return AnchorParameters(sizes, strides, ratios, scales)
|
pykafka/partition.py | Instamojo/pykafka | 1,174 | 12612583 | """
Author: <NAME>, <NAME>
"""
__license__ = """
Copyright 2015 Parse.ly, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ["Partition"]
import datetime as dt
import logging
import time
import weakref
from .common import OffsetType, EPOCH
from .exceptions import LeaderNotFoundError
from .protocol import PartitionOffsetRequest
log = logging.getLogger(__name__)
class Partition(object):
"""
A Partition is an abstraction over the kafka concept of a partition.
A kafka partition is a logical division of the logs for a topic. Its
messages are totally ordered.
"""
def __init__(self, topic, id_, leader, replicas, isr):
"""Instantiate a new Partition
:param topic: The topic to which this Partition belongs
:type topic: :class:`pykafka.topic.Topic`
:param id_: The identifier for this partition
:type id_: int
:param leader: The broker that is currently acting as the leader for
this partition.
:type leader: :class:`pykafka.broker.Broker`
:param replicas: A list of brokers containing this partition's replicas
:type replicas: Iterable of :class:`pykafka.broker.Broker`
:param isr: The current set of in-sync replicas for this partition
:type isr: Iterable of :class:`pykafka.broker.Broker`
"""
self._id = id_
self._leader = leader
self._replicas = replicas
self._isr = isr
self._topic = weakref.ref(topic)
def __repr__(self):
return "<{module}.{name} at {id_} (id={my_id})>".format(
module=self.__class__.__module__,
name=self.__class__.__name__,
id_=hex(id(self)),
my_id=self._id,
)
def __lt__(self, other):
return self._id < other._id
@property
def id(self):
"""The identifying int for this partition, unique within its topic"""
return self._id
@property
def leader(self):
"""The broker currently acting as leader for this partition"""
return self._leader
@property
def replicas(self):
"""The list of brokers currently holding replicas of this partition"""
return self._replicas
@property
def isr(self):
"""The current list of in-sync replicas for this partition"""
return self._isr
@property
def topic(self):
"""The topic to which this partition belongs"""
return self._topic()
def fetch_offset_limit(self, offsets_before, max_offsets=1):
"""Use the Offset API to find a limit of valid offsets
for this partition.
:param offsets_before: Return an offset from before
this timestamp (in milliseconds). Deprecated::2.7,3.6: do not use int
:type offsets_before: `datetime.datetime` or int
:param max_offsets: The maximum number of offsets to return
:type max_offsets: int
"""
if isinstance(offsets_before, dt.datetime):
offsets_before = round((offsets_before - EPOCH).total_seconds() * 1000)
for i in range(self.topic._cluster._max_connection_retries):
if i > 0:
log.debug("Retrying offset limit fetch")
time.sleep(i * 2)
request = PartitionOffsetRequest(
self.topic.name, self.id, offsets_before, max_offsets
)
res = self._leader.request_offset_limits([request])
limit = res.topics[self.topic.name][self._id][0]
if len(limit) > 0:
return limit
def latest_available_offset(self):
"""Get the offset of the next message that would be appended to this partition"""
return self.fetch_offset_limit(OffsetType.LATEST)[0]
def earliest_available_offset(self):
"""Get the earliest offset for this partition."""
return self.fetch_offset_limit(OffsetType.EARLIEST)[0]
def __hash__(self):
return hash((self.topic, self.id))
def __eq__(self, other):
return hash(self) == hash(other)
def __ne__(self, other):
return not self == other
def update(self, brokers, metadata):
"""Update this partition with fresh metadata.
:param brokers: Brokers on which partitions exist
:type brokers: List of :class:`pykafka.broker.Broker`
:param metadata: Metadata for the partition
:type metadata: :class:`pykafka.protocol.PartitionMetadata`
"""
try:
# Check leader
if metadata.leader != self._leader.id:
log.info('Updating leader for %s from broker %s to broker %s', self,
self._leader.id, metadata.leader)
self._leader = brokers[metadata.leader]
# Check Replicas
if sorted(r.id for r in self.replicas) != sorted(metadata.replicas):
log.info('Updating replicas list for %s', self)
self._replicas = [brokers[b] for b in metadata.replicas]
# Check In-Sync-Replicas
if sorted(i.id for i in self.isr) != sorted(metadata.isr):
log.info('Updating in sync replicas list for %s', self)
self._isr = [brokers[b] for b in metadata.isr]
except KeyError:
raise LeaderNotFoundError("Replica for partition %s not available. This is "
"probably because none of its replicas are "
"available.", self.id)
|
tests/test_encode.py | Narasimha1997/blurhash-python | 105 | 12612590 | from __future__ import absolute_import
import pytest
from blurhash import encode
def test_encode_file():
with open('tests/pic2.png', 'rb') as image_file:
result = encode(image_file, 4, 3)
assert result == 'LlMF%n00%#MwS|WCWEM{R*bbWBbH'
def test_encode_with_filename():
result = encode('tests/pic2.png', 4, 3)
assert result == 'LlMF%n00%#MwS|WCWEM{R*bbWBbH'
def test_encode_black_and_white_picture():
result = encode('tests/pic2_bw.png', 4, 3)
assert result == 'LjIY5?00?bIUofWBWBM{WBofWBj['
def test_invalid_image():
with pytest.raises(IOError):
encode('README.md', 4, 3)
def test_file_does_not_exist():
with pytest.raises(IOError):
encode('pic404.png', 4, 3)
def test_invalid_x_components():
with pytest.raises(ValueError):
encode('tests/pic2.png', 10, 3)
with pytest.raises(ValueError):
encode('tests/pic2.png', 0, 3)
def test_invalid_y_components():
with pytest.raises(ValueError):
encode('tests/pic2.png', 4, 10)
with pytest.raises(ValueError):
encode('tests/pic2.png', 4, 0)
|
dask/hashing.py | aeisenbarth/dask | 9,684 | 12612598 | <reponame>aeisenbarth/dask<gh_stars>1000+
import binascii
import hashlib
hashers = [] # In decreasing performance order
# Timings on a largish array:
# - CityHash is 2x faster than MurmurHash
# - xxHash is slightly slower than CityHash
# - MurmurHash is 8x faster than SHA1
# - SHA1 is significantly faster than all other hashlib algorithms
try:
import cityhash # `python -m pip install cityhash`
except ImportError:
pass
else:
# CityHash disabled unless the reference leak in
# https://github.com/escherba/python-cityhash/pull/16
# is fixed.
if cityhash.__version__ >= "0.2.2":
def _hash_cityhash(buf):
"""
Produce a 16-bytes hash of *buf* using CityHash.
"""
h = cityhash.CityHash128(buf)
return h.to_bytes(16, "little")
hashers.append(_hash_cityhash)
try:
import xxhash # `python -m pip install xxhash`
except ImportError:
pass
else:
def _hash_xxhash(buf):
"""
Produce a 8-bytes hash of *buf* using xxHash.
"""
return xxhash.xxh64(buf).digest()
hashers.append(_hash_xxhash)
try:
import mmh3 # `python -m pip install mmh3`
except ImportError:
pass
else:
def _hash_murmurhash(buf):
"""
Produce a 16-bytes hash of *buf* using MurmurHash.
"""
return mmh3.hash_bytes(buf)
hashers.append(_hash_murmurhash)
def _hash_sha1(buf):
"""
Produce a 20-bytes hash of *buf* using SHA1.
"""
return hashlib.sha1(buf).digest()
hashers.append(_hash_sha1)
def hash_buffer(buf, hasher=None):
"""
Hash a bytes-like (buffer-compatible) object. This function returns
a good quality hash but is not cryptographically secure. The fastest
available algorithm is selected. A fixed-length bytes object is returned.
"""
if hasher is not None:
try:
return hasher(buf)
except (TypeError, OverflowError):
# Some hash libraries may have overly-strict type checking,
# not accepting all buffers
pass
for hasher in hashers:
try:
return hasher(buf)
except (TypeError, OverflowError):
pass
raise TypeError(f"unsupported type for hashing: {type(buf)}")
def hash_buffer_hex(buf, hasher=None):
"""
Same as hash_buffer, but returns its result in hex-encoded form.
"""
h = hash_buffer(buf, hasher)
s = binascii.b2a_hex(h)
return s.decode()
|
rqd/rqd/rqmachine.py | winter3030/OpenCue | 329 | 12612611 | <gh_stars>100-1000
# Copyright Contributors to the OpenCue Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Machine information access module."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
# pylint: disable=wrong-import-position
from future import standard_library
standard_library.install_aliases()
# pylint: enable=wrong-import-position
from builtins import str
from builtins import range
from builtins import object
import ctypes
import errno
import logging as log
import math
import os
import platform
import re
import subprocess
import sys
import tempfile
import time
import traceback
# pylint: disable=import-error,wrong-import-position
if platform.system() in ('Linux', 'Darwin'):
import resource
elif platform.system() == "win32":
import win32api
# pylint: enable=import-error,wrong-import-position
import psutil
import rqd.compiled_proto.host_pb2
import rqd.compiled_proto.report_pb2
import rqd.rqconstants
import rqd.rqexceptions
import rqd.rqswap
import rqd.rqutil
KILOBYTE = 1024
class Machine(object):
"""Gathers information about the machine and resources"""
def __init__(self, rqCore, coreInfo):
"""Machine class initialization
@type rqCore: rqd.rqcore.RqCore
@param rqCore: Main RQD Object, used to access frames and nimby states
@type coreInfo: rqd.compiled_proto.report_pb2.CoreDetail
@param coreInfo: Object contains information on the state of all cores
"""
self.__rqCore = rqCore
self.__coreInfo = coreInfo
self.__tasksets = set()
self.__gpusets = set()
if platform.system() == 'Linux':
self.__vmstat = rqd.rqswap.VmStat()
self.state = rqd.compiled_proto.host_pb2.UP
self.__renderHost = rqd.compiled_proto.report_pb2.RenderHost()
self.__initMachineTags()
self.__initMachineStats()
self.__bootReport = rqd.compiled_proto.report_pb2.BootReport()
# pylint: disable=no-member
self.__bootReport.core_info.CopyFrom(self.__coreInfo)
# pylint: enable=no-member
self.__hostReport = rqd.compiled_proto.report_pb2.HostReport()
# pylint: disable=no-member
self.__hostReport.core_info.CopyFrom(self.__coreInfo)
# pylint: enable=no-member
self.__pidHistory = {}
self.setupHT()
self.setupGpu()
def isNimbySafeToRunJobs(self):
"""Returns False if nimby should be triggered due to resource limits"""
if platform.system() == "Linux":
self.updateMachineStats()
# pylint: disable=no-member
if self.__renderHost.free_mem < rqd.rqconstants.MINIMUM_MEM:
return False
if self.__renderHost.free_swap < rqd.rqconstants.MINIMUM_SWAP:
return False
# pylint: enable=no-member
return True
def isNimbySafeToUnlock(self):
"""Returns False if nimby should not unlock due to resource limits"""
if not self.isNimbySafeToRunJobs():
return False
if self.getLoadAvg() / self.__coreInfo.total_cores > rqd.rqconstants.MAXIMUM_LOAD:
return False
return True
# pylint: disable=no-self-use
@rqd.rqutil.Memoize
def isDesktop(self):
"""Returns True if machine starts in run level 5 (X11)
by checking /etc/inittab. False if not."""
if rqd.rqconstants.OVERRIDE_IS_DESKTOP:
return True
if platform.system() == "Linux" and os.path.exists(rqd.rqconstants.PATH_INITTAB):
inittabFile = open(rqd.rqconstants.PATH_INITTAB, "r")
for line in inittabFile:
if line.startswith("id:5:initdefault:"):
return True
if os.path.islink(rqd.rqconstants.PATH_INIT_TARGET):
if os.path.realpath(rqd.rqconstants.PATH_INIT_TARGET).endswith('graphical.target'):
return True
return False
def isUserLoggedIn(self):
"""Returns whether a user is logged into the machine RQD is running on."""
# For non-headless systems, first check to see if there
# is a user logged into the display.
displayNums = []
try:
displayRe = re.compile(r'X(\d+)')
for displays in os.listdir('/tmp/.X11-unix'):
m = displayRe.match(displays)
if not m:
continue
displayNums.append(int(m.group(1)))
except OSError as e:
if e.errno != errno.ENOENT:
raise
if displayNums:
# Check `who` output for a user associated with a display, like:
#
# (unknown) :0 2017-11-07 18:21 (:0)
#
# In this example, the user is '(unknown)'.
for line in subprocess.check_output(['/usr/bin/who']).splitlines():
for displayNum in displayNums:
if '(:{})'.format(displayNum) in line:
cols = line.split()
# Acceptlist a user called '(unknown)' as this
# is what shows up when gdm is running and
# showing a login screen.
if cols[0] != '(unknown)':
log.warning('User %s logged into display :%s', cols[0], displayNum)
return True
# When there is a display, the above code is considered
# the authoritative check for a logged in user. The
# code below gives false positives on a non-headless
# system.
return False
# These process names imply a user is logged in.
names = {'kdesktop', 'gnome-session', 'startkde'}
for proc in psutil.process_iter():
procName = proc.name()
for name in names:
if name in procName:
return True
return False
def rssUpdate(self, frames):
"""Updates the rss and maxrss for all running frames"""
if platform.system() != 'Linux':
return
pids = {}
for pid in os.listdir("/proc"):
if pid.isdigit():
try:
with open("/proc/%s/stat" % pid, "r") as statFile:
statFields = statFile.read().split()
# See "man proc"
pids[pid] = {
"session": statFields[5],
"vsize": statFields[22],
"rss": statFields[23],
# These are needed to compute the cpu used
"utime": statFields[13],
"stime": statFields[14],
"cutime": statFields[15],
"cstime": statFields[16],
# The time in jiffies the process started
# after system boot.
"start_time": statFields[21],
}
# pylint: disable=broad-except
except Exception:
log.exception('failed to read stat file for pid %s', pid)
# pylint: disable=too-many-nested-blocks
try:
now = int(time.time())
pidData = {"time": now}
bootTime = self.getBootTime()
values = list(frames.values())
for frame in values:
if frame.pid > 0:
session = str(frame.pid)
rss = 0
vsize = 0
pcpu = 0
for pid, data in pids.items():
if data["session"] == session:
try:
rss += int(data["rss"])
vsize += int(data["vsize"])
# jiffies used by this process, last two means that dead
# children are counted
totalTime = int(data["utime"]) + \
int(data["stime"]) + \
int(data["cutime"]) + \
int(data["cstime"])
# Seconds of process life, boot time is already in seconds
seconds = now - bootTime - \
float(data["start_time"]) / rqd.rqconstants.SYS_HERTZ
if seconds:
if pid in self.__pidHistory:
# Percent cpu using decaying average, 50% from 10 seconds
# ago, 50% from last 10 seconds:
oldTotalTime, oldSeconds, oldPidPcpu = \
self.__pidHistory[pid]
# checking if already updated data
if seconds != oldSeconds:
pidPcpu = ((totalTime - oldTotalTime) /
float(seconds - oldSeconds))
pcpu += (oldPidPcpu + pidPcpu) / 2 # %cpu
pidData[pid] = totalTime, seconds, pidPcpu
else:
pidPcpu = totalTime / seconds
pcpu += pidPcpu
pidData[pid] = totalTime, seconds, pidPcpu
# pylint: disable=broad-except
except Exception as e:
log.warning(
'Failure with pid rss update due to: %s at %s',
e, traceback.extract_tb(sys.exc_info()[2]))
rss = (rss * resource.getpagesize()) // 1024
vsize = int(vsize/1024)
frame.rss = rss
frame.maxRss = max(rss, frame.maxRss)
if 'GPU_LIST' in frame.runFrame.attributes:
usedGpuMemory = 0
for unitId in frame.runFrame.attributes.get('GPU_LIST').split(','):
usedGpuMemory += self.getGpuMemoryUsed(unitId)
frame.usedGpuMemory = usedGpuMemory
frame.maxUsedGpuMemory = max(usedGpuMemory, frame.maxUsedGpuMemory)
if os.path.exists(frame.runFrame.log_dir_file):
stat = os.stat(frame.runFrame.log_dir_file).st_mtime
frame.lluTime = int(stat)
frame.vsize = vsize
frame.maxVsize = max(vsize, frame.maxVsize)
frame.runFrame.attributes["pcpu"] = str(pcpu)
# Store the current data for the next check
self.__pidHistory = pidData
# pylint: disable=broad-except
except Exception as e:
log.exception('Failure with rss update due to: %s', e)
def getLoadAvg(self):
"""Returns average number of processes waiting to be served
for the last 1 minute multiplied by 100."""
if platform.system() == "Linux":
loadAvgFile = open(rqd.rqconstants.PATH_LOADAVG, "r")
loadAvg = int(float(loadAvgFile.read().split()[0]) * 100)
if self.__enabledHT():
loadAvg = loadAvg // 2
loadAvg = loadAvg + rqd.rqconstants.LOAD_MODIFIER
loadAvg = max(loadAvg, 0)
return loadAvg
return 0
@rqd.rqutil.Memoize
def getBootTime(self):
"""Returns epoch when the system last booted"""
if platform.system() == "Linux":
statFile = open(rqd.rqconstants.PATH_STAT, "r")
for line in statFile:
if line.startswith("btime"):
return int(line.split()[1])
return 0
@rqd.rqutil.Memoize
def getGpuCount(self):
"""Returns the total gpu's on the machine"""
return self.__getGpuValues()['count']
@rqd.rqutil.Memoize
def getGpuMemoryTotal(self):
"""Returns the total gpu memory in kb for CUE_GPU_MEMORY"""
return self.__getGpuValues()['total']
def getGpuMemoryFree(self):
"""Returns the available gpu memory in kb for CUE_GPU_MEMORY"""
return self.__getGpuValues()['free']
def getGpuMemoryUsed(self, unitId):
"""Returns the available gpu memory in kb for CUE_GPU_MEMORY"""
usedMemory = self.__getGpuValues()['used']
return usedMemory[unitId] if unitId in usedMemory else 0
# pylint: disable=attribute-defined-outside-init
def __resetGpuResults(self):
self.gpuResults = {'count': 0, 'total': 0, 'free': 0, 'used': {}, 'updated': 0}
def __getGpuValues(self):
if not hasattr(self, 'gpuNotSupported'):
if not hasattr(self, 'gpuResults'):
self.__resetGpuResults()
if not rqd.rqconstants.ALLOW_GPU:
self.gpuNotSupported = True
return self.gpuResults
if self.gpuResults['updated'] > int(time.time()) - 60:
return self.gpuResults
try:
nvidia_smi = subprocess.getoutput(
'nvidia-smi --query-gpu=memory.total,memory.free,count'
' --format=csv,noheader')
total = 0
free = 0
count = 0
unitId = 0
for line in nvidia_smi.splitlines():
# Example "16130 MiB, 16103 MiB, 8"
# 1 MiB = 1048.576 KB
l = line.split()
unitTotal = math.ceil(int(l[0]) * 1048.576)
unitFree = math.ceil(int(l[2]) * 1048.576)
total += unitTotal
free += unitFree
count = int(l[-1])
self.gpuResults['used'][str(unitId)] = unitTotal - unitFree
unitId += 1
self.gpuResults['total'] = int(total)
self.gpuResults['free'] = int(free)
self.gpuResults['count'] = count
self.gpuResults['updated'] = int(time.time())
# pylint: disable=broad-except
except Exception as e:
self.gpuNotSupported = True
self.__resetGpuResults()
log.warning(
'Failed to query nvidia-smi due to: %s at %s',
e, traceback.extract_tb(sys.exc_info()[2]))
else:
self.__resetGpuResults()
return self.gpuResults
def __getSwapout(self):
if platform.system() == "Linux":
try:
return str(int(self.__vmstat.getRecentPgoutRate()))
# pylint: disable=broad-except
except Exception:
return str(0)
return str(0)
@rqd.rqutil.Memoize
def getTimezone(self):
"""Returns the desired timezone"""
if time.tzname[0] == 'IST':
return 'IST'
return 'PST8PDT'
@rqd.rqutil.Memoize
def getHostname(self):
"""Returns the machine's fully qualified domain name"""
return rqd.rqutil.getHostname()
@rqd.rqutil.Memoize
def getPathEnv(self):
"""Returns the correct path environment for the given machine"""
if platform.system() == 'Linux':
return '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'
return ''
@rqd.rqutil.Memoize
def getTempPath(self):
"""Returns the correct mcp path for the given machine"""
if platform.system() == "win32":
return win32api.GetTempPath()
if os.path.isdir("/mcp/"):
return "/mcp/"
return '%s/' % tempfile.gettempdir()
def reboot(self):
"""Reboots the machine immediately"""
if platform.system() == "Linux":
log.warning("Rebooting machine")
subprocess.Popen(['/usr/bin/sudo','/sbin/reboot', '-f'])
# pylint: disable=no-member
def __initMachineTags(self):
"""Sets the hosts tags"""
self.__renderHost.tags.append("rqdv-%s" % rqd.rqconstants.VERSION)
if rqd.rqconstants.RQD_TAGS:
for tag in rqd.rqconstants.RQD_TAGS.split():
self.__renderHost.tags.append(tag)
# Tag with desktop if it is a desktop
if self.isDesktop():
self.__renderHost.tags.append("desktop")
if platform.system() == 'Windows':
self.__renderHost.tags.append("windows")
return
if os.uname()[-1] in ("i386", "i686"):
self.__renderHost.tags.append("32bit")
elif os.uname()[-1] == "x86_64":
self.__renderHost.tags.append("64bit")
self.__renderHost.tags.append(os.uname()[2].replace(".EL.spi", "").replace("smp", ""))
def testInitMachineStats(self, pathCpuInfo):
"""Initializes machine stats outside of normal startup process. Used for testing."""
self.__initMachineStats(pathCpuInfo=pathCpuInfo)
return self.__renderHost, self.__coreInfo
def __initMachineStats(self, pathCpuInfo=None):
"""Updates static machine information during initialization"""
self.__renderHost.name = self.getHostname()
self.__renderHost.boot_time = self.getBootTime()
self.__renderHost.facility = rqd.rqconstants.DEFAULT_FACILITY
self.__renderHost.attributes['SP_OS'] = rqd.rqconstants.SP_OS
self.updateMachineStats()
__numProcs = __totalCores = 0
if platform.system() == "Linux" or pathCpuInfo is not None:
# Reads static information for mcp
mcpStat = os.statvfs(self.getTempPath())
self.__renderHost.total_mcp = mcpStat.f_blocks * mcpStat.f_frsize // KILOBYTE
# Reads static information from /proc/cpuinfo
with open(pathCpuInfo or rqd.rqconstants.PATH_CPUINFO, "r") as cpuinfoFile:
singleCore = {}
procsFound = []
for line in cpuinfoFile:
lineList = line.strip().replace("\t","").split(": ")
# A normal entry added to the singleCore dictionary
if len(lineList) >= 2:
singleCore[lineList[0]] = lineList[1]
# The end of a processor block
elif lineList == ['']:
# Check for hyper-threading
hyperthreadingMultiplier = (int(singleCore.get('siblings', '1'))
// int(singleCore.get('cpu cores', '1')))
__totalCores += rqd.rqconstants.CORE_VALUE
if "core id" in singleCore \
and "physical id" in singleCore \
and not singleCore["physical id"] in procsFound:
procsFound.append(singleCore["physical id"])
__numProcs += 1
elif "core id" not in singleCore:
__numProcs += 1
singleCore = {}
# An entry without data
elif len(lineList) == 1:
singleCore[lineList[0]] = ""
else:
hyperthreadingMultiplier = 1
if platform.system() == 'Windows':
# Windows memory information
stat = self.getWindowsMemory()
TEMP_DEFAULT = 1048576
self.__renderHost.total_mcp = TEMP_DEFAULT
self.__renderHost.total_mem = int(stat.ullTotalPhys / 1024)
self.__renderHost.total_swap = int(stat.ullTotalPageFile / 1024)
# Windows CPU information
logical_core_count = psutil.cpu_count(logical=True)
actual_core_count = psutil.cpu_count(logical=False)
hyperthreadingMultiplier = logical_core_count // actual_core_count
__totalCores = logical_core_count * rqd.rqconstants.CORE_VALUE
__numProcs = 1 # TODO: figure out how to count sockets in Python
# All other systems will just have one proc/core
if not __numProcs or not __totalCores:
__numProcs = 1
__totalCores = rqd.rqconstants.CORE_VALUE
if rqd.rqconstants.OVERRIDE_MEMORY is not None:
log.warning("Manually overriding the total memory")
self.__renderHost.total_mem = rqd.rqconstants.OVERRIDE_MEMORY
if rqd.rqconstants.OVERRIDE_CORES is not None:
log.warning("Manually overriding the number of reported cores")
__totalCores = rqd.rqconstants.OVERRIDE_CORES * rqd.rqconstants.CORE_VALUE
if rqd.rqconstants.OVERRIDE_PROCS is not None:
log.warning("Manually overriding the number of reported procs")
__numProcs = rqd.rqconstants.OVERRIDE_PROCS
# Don't report/reserve cores added due to hyperthreading
__totalCores = __totalCores // hyperthreadingMultiplier
self.__coreInfo.idle_cores = __totalCores
self.__coreInfo.total_cores = __totalCores
self.__renderHost.num_procs = __numProcs
self.__renderHost.cores_per_proc = __totalCores // __numProcs
if hyperthreadingMultiplier > 1:
self.__renderHost.attributes['hyperthreadingMultiplier'] = str(hyperthreadingMultiplier)
def getWindowsMemory(self):
"""Gets information on system memory, Windows compatible version."""
# From
# http://stackoverflow.com/questions/2017545/get-memory-usage-of-computer-in-windows-with-python
if not hasattr(self, '__windowsStat'):
class MEMORYSTATUSEX(ctypes.Structure):
"""Represents Windows memory information."""
_fields_ = [("dwLength", ctypes.c_uint),
("dwMemoryLoad", ctypes.c_uint),
("ullTotalPhys", ctypes.c_ulonglong),
("ullAvailPhys", ctypes.c_ulonglong),
("ullTotalPageFile", ctypes.c_ulonglong),
("ullAvailPageFile", ctypes.c_ulonglong),
("ullTotalVirtual", ctypes.c_ulonglong),
("ullAvailVirtual", ctypes.c_ulonglong),
("sullAvailExtendedVirtual", ctypes.c_ulonglong),]
def __init__(self):
# have to initialize this to the size of MEMORYSTATUSEX
self.dwLength = 2*4 + 7*8 # size = 2 ints, 7 longs
super(MEMORYSTATUSEX, self).__init__()
self.__windowsStat = MEMORYSTATUSEX()
ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(self.__windowsStat))
return self.__windowsStat
def updateMacMemory(self):
"""Updates the internal store of memory available, macOS compatible version."""
memsizeOutput = subprocess.getoutput('sysctl hw.memsize').strip()
memsizeRegex = re.compile(r'^hw.memsize: (?P<totalMemBytes>[\d]+)$')
memsizeMatch = memsizeRegex.match(memsizeOutput)
if memsizeMatch:
self.__renderHost.total_mem = int(memsizeMatch.group('totalMemBytes')) // 1024
else:
self.__renderHost.total_mem = 0
vmStatLines = subprocess.getoutput('vm_stat').split('\n')
lineRegex = re.compile(r'^(?P<field>.+):[\s]+(?P<pages>[\d]+).$')
vmStats = {}
for line in vmStatLines[1:-2]:
match = lineRegex.match(line)
if match:
vmStats[match.group('field')] = int(match.group('pages')) * 4096
freeMemory = vmStats.get("Pages free", 0) // 1024
inactiveMemory = vmStats.get("Pages inactive", 0) // 1024
self.__renderHost.free_mem = freeMemory + inactiveMemory
swapStats = subprocess.getoutput('sysctl vm.swapusage').strip()
swapRegex = re.compile(r'^.* free = (?P<freeMb>[\d]+)M .*$')
swapMatch = swapRegex.match(swapStats)
if swapMatch:
self.__renderHost.free_swap = int(float(swapMatch.group('freeMb')) * 1024)
else:
self.__renderHost.free_swap = 0
def updateMachineStats(self):
"""Updates dynamic machine information during runtime"""
if platform.system() == "Linux":
# Reads dynamic information for mcp
mcpStat = os.statvfs(self.getTempPath())
self.__renderHost.free_mcp = (mcpStat.f_bavail * mcpStat.f_bsize) // KILOBYTE
# Reads dynamic information from /proc/meminfo
with open(rqd.rqconstants.PATH_MEMINFO, "r") as fp:
for line in fp:
if line.startswith("MemFree"):
freeMem = int(line.split()[1])
elif line.startswith("SwapFree"):
freeSwapMem = int(line.split()[1])
elif line.startswith("Cached"):
cachedMem = int(line.split()[1])
elif line.startswith("MemTotal"):
self.__renderHost.total_mem = int(line.split()[1])
self.__renderHost.free_swap = freeSwapMem
self.__renderHost.free_mem = freeMem + cachedMem
self.__renderHost.num_gpus = self.getGpuCount()
self.__renderHost.total_gpu_mem = self.getGpuMemoryTotal()
self.__renderHost.free_gpu_mem = self.getGpuMemoryFree()
self.__renderHost.attributes['swapout'] = self.__getSwapout()
elif platform.system() == 'Darwin':
self.updateMacMemory()
elif platform.system() == 'Windows':
TEMP_DEFAULT = 1048576
stats = self.getWindowsMemory()
self.__renderHost.free_mcp = TEMP_DEFAULT
self.__renderHost.free_swap = int(stats.ullAvailPageFile / 1024)
self.__renderHost.free_mem = int(stats.ullAvailPhys / 1024)
self.__renderHost.num_gpus = self.getGpuCount()
self.__renderHost.total_gpu_mem = self.getGpuMemoryTotal()
self.__renderHost.free_gpu_mem = self.getGpuMemoryFree()
# Updates dynamic information
self.__renderHost.load = self.getLoadAvg()
self.__renderHost.nimby_enabled = self.__rqCore.nimby.active
self.__renderHost.nimby_locked = self.__rqCore.nimby.locked
self.__renderHost.state = self.state
def getHostInfo(self):
"""Updates and returns the renderHost struct"""
self.updateMachineStats()
return self.__renderHost
def getHostReport(self):
"""Updates and returns the hostReport struct"""
self.__hostReport.host.CopyFrom(self.getHostInfo())
self.__hostReport.ClearField('frames')
for frameKey in self.__rqCore.getFrameKeys():
try:
info = self.__rqCore.getFrame(frameKey).runningFrameInfo()
self.__hostReport.frames.extend([info])
except KeyError:
pass
self.__hostReport.core_info.CopyFrom(self.__rqCore.getCoreInfo())
return self.__hostReport
def getBootReport(self):
"""Updates and returns the bootReport struct"""
self.__bootReport.host.CopyFrom(self.getHostInfo())
return self.__bootReport
def __enabledHT(self):
return 'hyperthreadingMultiplier' in self.__renderHost.attributes
def setupHT(self):
""" Setup rqd for hyper-threading """
if self.__enabledHT():
self.__tasksets = set(range(self.__coreInfo.total_cores // 100))
def setupGpu(self):
""" Setup rqd for Gpus """
self.__gpusets = set(range(self.getGpuCount()))
def reserveHT(self, reservedCores):
""" Reserve cores for use by taskset
taskset -c 0,1,8,9 COMMAND
Not thread save, use with locking.
@type reservedCores: int
@param reservedCores: The total physical cores reserved by the frame.
@rtype: string
@return: The cpu-list for taskset -c
"""
if not self.__enabledHT():
return None
if reservedCores % 100:
log.debug('Taskset: Can not reserveHT with fractional cores')
return None
log.debug('Taskset: Requesting reserve of %d', (reservedCores // 100))
if len(self.__tasksets) < reservedCores // 100:
err = ('Not launching, insufficient hyperthreading cores to reserve '
'based on reservedCores')
log.critical(err)
raise rqd.rqexceptions.CoreReservationFailureException(err)
tasksets = []
for _ in range(reservedCores // 100):
core = self.__tasksets.pop()
tasksets.append(str(core))
tasksets.append(str(core + self.__coreInfo.total_cores // 100))
log.debug('Taskset: Reserving cores - %s', ','.join(tasksets))
return ','.join(tasksets)
# pylint: disable=inconsistent-return-statements
def releaseHT(self, reservedHT):
""" Release cores used by taskset
Format: 0,1,8,9
Not thread safe, use with locking.
@type: string
@param: The cpu-list used for taskset to release. ex: '0,8,1,9'
"""
if not self.__enabledHT():
return None
log.debug('Taskset: Releasing cores - %s', reservedHT)
for core in reservedHT.split(','):
if int(core) < self.__coreInfo.total_cores // 100:
self.__tasksets.add(int(core))
def reserveGpus(self, reservedGpus):
""" Reserve gpus
@type reservedGpus: int
@param reservedGpus: The total gpus reserved by the frame.
@rtype: string
@return: The gpu-list. ex: '0,1,8,9'
"""
if len(self.__gpusets) < reservedGpus:
err = 'Not launching, insufficient GPUs to reserve based on reservedGpus'
log.critical(err)
raise rqd.rqexceptions.CoreReservationFailureException(err)
gpusets = []
for _ in range(reservedGpus):
gpu = self.__gpusets.pop()
gpusets.append(str(gpu))
return ','.join(gpusets)
def releaseGpus(self, reservedGpus):
""" Release gpus
@type: string
@param: The gpu-list to release. ex: '0,1,8,9'
"""
log.debug('GPU set: Releasing gpu - %s', reservedGpus)
for gpu in reservedGpus.split(','):
if int(gpu) < self.getGpuCount():
self.__gpusets.add(int(gpu))
|
sld-api-backend/api_v1/endpoints/activity_logs.py | terjekv/Stack-Lifecycle-Deployment | 115 | 12612612 | <gh_stars>100-1000
from sqlalchemy.orm import Session
from fastapi import APIRouter, Depends, HTTPException
from schemas import schemas
from security import deps
from crud import activityLogs as crud_activity
from crud import user as crud_users
#from fastapi_limiter import FastAPILimiter
#from fastapi_limiter.depends import RateLimiter
#import aioredis
router = APIRouter()
# @router.on_event("startup")
# async def startup():
# redis = await aioredis.create_redis_pool("redis://redis:6379")
# FastAPILimiter.init(redis)
@router.get("/id/{username}")
async def get_activity_logs_by_username(
username: str,
current_user: schemas.User = Depends(deps.get_current_active_user),
db: Session = Depends(deps.get_db)):
if not crud_users.is_superuser(db, current_user):
raise HTTPException(status_code=403, detail="Not enough permissions")
if not crud_users.is_master(db, current_user):
squad = current_user.squad
return crud_activity.get_activity_by_username_squad(db=db, username=username, squad=squad)
return crud_activity.get_activity_by_username(db, username=username)
@ router.get("/all")
async def get_all_activity_logs(
current_user: schemas.User = Depends(deps.get_current_active_user),
skip: int = 0,
limit: int = 100,
db: Session = Depends(deps.get_db)):
if not crud_users.is_superuser(db, current_user):
raise HTTPException(status_code=403, detail="Not enough permissions")
try:
if not crud_users.is_master(db, current_user):
squad = current_user.squad
result = crud_activity.get_all_activity_by_squad(
db=db, squad=squad, skip=skip, limit=limit)
return result
result = crud_activity.get_all_activity(db=db, skip=skip, limit=limit)
return result
except Exception as err:
raise HTTPException(
status_code=400,
detail=f"{err}")
|
metrics/parser.py | manipopopo/TC-ResNet | 185 | 12612619 | from abc import ABC, ABCMeta
from metrics.base import DataStructure
class MetricDataParserBase(ABC):
@classmethod
def parse_build_data(cls, data):
"""
Args:
data: dictionary which will be passed to InputBuildData
"""
data = cls._validate_build_data(data)
data = cls._process_build_data(data)
return data
@classmethod
def parse_non_tensor_data(cls, data):
"""
Args:
data: dictionary which will be passed to InputDataStructure
"""
input_data = cls._validate_non_tensor_data(data)
output_data = cls._process_non_tensor_data(input_data)
return output_data
@classmethod
def _validate_build_data(cls, data):
"""
Specify assertions that tensor data should contains
Args:
data: dictionary
Return:
InputDataStructure
"""
return cls.InputBuildData(data)
@classmethod
def _validate_non_tensor_data(cls, data):
"""
Specify assertions that non-tensor data should contains
Args:
data: dictionary
Return:
InputDataStructure
"""
return cls.InputNonTensorData(data)
"""
Override these two functions if needed.
"""
@classmethod
def _process_build_data(cls, data):
"""
Process data in order to following metrics can use it
Args:
data: InputBuildData
Return:
OutputBuildData
"""
# default function is just passing data
return cls.OutputBuildData(data.to_dict())
@classmethod
def _process_non_tensor_data(cls, data):
"""
Process data in order to following metrics can use it
Args:
data: InputNonTensorData
Return:
OutputNonTensorData
"""
# default function is just passing data
return cls.OutputNonTensorData(data.to_dict())
"""
Belows should be implemented when inherit.
"""
class InputBuildData(DataStructure, metaclass=ABCMeta):
pass
class OutputBuildData(DataStructure, metaclass=ABCMeta):
pass
class InputNonTensorData(DataStructure, metaclass=ABCMeta):
pass
class OutputNonTensorData(DataStructure, metaclass=ABCMeta):
pass
class AudioDataParser(MetricDataParserBase):
class InputBuildData(DataStructure):
_keys = [
"dataset_split_name",
"label_names",
"losses", # Dict | loss_key -> Tensor
"learning_rate",
"wavs",
]
class OutputBuildData(DataStructure):
_keys = [
"dataset_split_name",
"label_names",
"losses",
"learning_rate",
"wavs",
]
class InputNonTensorData(DataStructure):
_keys = [
"dataset_split_name",
"label_names",
"predictions_onehot",
"labels_onehot",
]
class OutputNonTensorData(DataStructure):
_keys = [
"dataset_split_name",
"label_names",
"predictions_onehot",
"labels_onehot",
"predictions",
"labels",
]
@classmethod
def _process_non_tensor_data(cls, data):
predictions = data.predictions_onehot.argmax(axis=-1)
labels = data.labels_onehot.argmax(axis=-1)
return cls.OutputNonTensorData({
"dataset_split_name": data.dataset_split_name,
"label_names": data.label_names,
"predictions_onehot": data.predictions_onehot,
"labels_onehot": data.labels_onehot,
"predictions": predictions,
"labels": labels,
})
|
changes/api/patch_details.py | vault-the/changes | 443 | 12612645 | from __future__ import absolute_import
from flask import request, Response
from changes.api.base import APIView
from changes.models.patch import Patch
class PatchDetailsAPIView(APIView):
def get(self, patch_id):
patch = Patch.query.get(patch_id)
if patch is None:
return '', 404
if request.args.get('raw'):
return Response(patch.diff, mimetype='text/plain')
return self.respond(patch)
|
tests/cluecode/data/ics/markdown-markdown-extensions/codehilite.py | s4-2/scancode-toolkit | 1,511 | 12612646 | <filename>tests/cluecode/data/ics/markdown-markdown-extensions/codehilite.py<gh_stars>1000+
Adds code/syntax highlighting to standard Python-Markdown code blocks.
Copyright 2006-2008 [<NAME>](http://achinghead.com/).
Project website: <http://www.freewisdom.org/project/python-markdown/CodeHilite> |
src/3rdparty/keyvi/python/tests/index/index_test.py | pombredanne/keyvi-server | 199 | 12612649 | # -*- coding: utf-8 -*-
# Usage: py.test tests
from keyvi.index import Index, ReadOnlyIndex
import os
import random
import shutil
import tempfile
import gc
def test_open_index():
test_dir = os.path.join(tempfile.gettempdir(), "index_open_index")
try:
if not os.path.exists(test_dir):
os.mkdir(test_dir)
index = Index(os.path.join(test_dir, "index"))
index.Set("a", "{}")
del index
# required for pypy to ensure deletion/destruction of the index object
gc.collect()
index = Index(os.path.join(test_dir, "index"))
assert "a" in index
del index
finally:
shutil.rmtree(test_dir, ignore_errors=True)
def test_some_indexing():
test_dir = os.path.join(tempfile.gettempdir(), "index_some_indexing")
iterations = 10000
split = 2000
try:
if not os.path.exists(test_dir):
os.mkdir(test_dir)
index = Index(os.path.join(test_dir, "index"))
for i in range (0, iterations):
index.Set("key-{}".format(i), "value-{}".format(i))
index.Flush()
for i in range (split, iterations):
assert "key-{}".format(i) in index
index.Delete("key-{}".format(i))
index.Flush()
for i in range (0, split):
assert "key-{}".format(i) in index
for i in range (split, iterations):
assert not "key-{}".format(i) in index
del index
finally:
shutil.rmtree(test_dir, ignore_errors=True)
def test_bulk_add():
test_dir = os.path.join(tempfile.gettempdir(), "index_bulk_add")
iterations = 10
chunk_size = 1000
try:
if not os.path.exists(test_dir):
os.mkdir(test_dir)
index = Index(os.path.join(test_dir, "index"))
key_values = []
for i in range (0, chunk_size * iterations):
key_values.append(("key-{}".format(i), "value-{}".format(i)))
if i % chunk_size == 0:
index.MSet(key_values)
key_values = []
index.MSet(key_values)
index.Flush()
for i in range(0, 50):
assert "key-{}".format(random.randrange(0, chunk_size * iterations)) in index
del index
finally:
shutil.rmtree(test_dir, ignore_errors=True)
def test_get_fuzzy():
test_dir = os.path.join(tempfile.gettempdir(), "index_test_fuzzy")
try:
if not os.path.exists(test_dir):
os.mkdir(test_dir)
write_index = Index(os.path.join(test_dir, "index"))
write_index.Set("apple", "{}")
write_index.Set("apples", "{}")
write_index.Set("banana", "{}")
write_index.Set("orange", "{}")
write_index.Set("avocado", "{}")
write_index.Set("peach", "{}")
write_index.Flush()
read_only_index = ReadOnlyIndex(os.path.join(test_dir, "index"))
for index in [write_index, read_only_index]:
matches = list(index.GetFuzzy("appe", 1, 2))
assert len(matches) == 1
assert u'apple' == matches[0].GetMatchedString()
matches = list(index.GetFuzzy("appes", 2, 2))
assert len(matches) == 2
assert u'apple' == matches[0].GetMatchedString()
assert u'apples' == matches[1].GetMatchedString()
matches = list(index.GetFuzzy("apples", 1, 2))
assert len(matches) == 2
assert u'apple' == matches[0].GetMatchedString()
assert u'apples' == matches[1].GetMatchedString()
matches = list(index.GetFuzzy("atocao", 2, 1))
assert len(matches) == 1
assert u'avocado' == matches[0].GetMatchedString()
write_index.Delete("avocado")
write_index.Flush()
matches = list(write_index.GetFuzzy("atocao", 2, 1))
assert len(matches) == 0
del write_index
del read_only_index
finally:
shutil.rmtree(test_dir, ignore_errors=True)
def test_get_near():
test_dir = os.path.join(tempfile.gettempdir(), "index_test_near")
try:
if not os.path.exists(test_dir):
os.mkdir(test_dir)
write_index = Index(os.path.join(test_dir, "index"))
# the following geohashes are created from openstreetmap coordinates and translated using a geohash encoder
write_index.Set("u21xj502gs79", "{'city' : 'Kobarid', 'country': 'si'}")
write_index.Set("u21xk2uxkhh2", "{'city' : 'Trnovo ob soci', 'country': 'si'}")
write_index.Set("u21x75n34qrp", "{'city' : 'Srpnecia', 'country': 'si'}")
write_index.Set("u21x6v1nx0c3", "{'city' : 'Zaga', 'country': 'si'}")
write_index.Set("u21xs20w9ssu", "{'city' : 'Cezsoca', 'country': 'si'}")
write_index.Set("u21x6yx5cqy6", "{'city' : 'Log Cezsoski', 'country': 'si'}")
write_index.Set("u21xs7ses4s3", "{'city' : 'Bovec', 'country': 'si'}")
write_index.Flush()
read_only_index = ReadOnlyIndex(os.path.join(test_dir, "index"))
for index in [write_index, read_only_index]:
# some coordinate nearby, greedy false, so it prefers as close as possible
matches = list(index.GetNear("u21xjjhhymt7", 4))
assert len(matches) == 1
assert u'u21xj502gs79' == matches[0].GetMatchedString()
assert u"{'city' : 'Kobarid', 'country': 'si'}" == matches[0].GetValue()
# greedy match, still closest should be the 1st match
matches = list(index.GetNear("u21xjjhhymt7", 4, True))
assert len(matches) == 7
assert u'u21xj502gs79' == matches[0].GetMatchedString()
assert u"{'city' : 'Kobarid', 'country': 'si'}" == matches[0].GetValue()
# closer match near Bovec and Cezsoca but closer to Cezsoca
matches = list(index.GetNear("u21xs20w9ssu", 5))
assert len(matches) == 1
assert u'u21xs20w9ssu' == matches[0].GetMatchedString()
assert u"{'city' : 'Cezsoca', 'country': 'si'}" == matches[0].GetValue()
# greedy should return Bovec, but not the other locations due to the prefix
matches = list(index.GetNear("u21xs20w9ssu", 5, True))
assert len(matches) == 2
assert u'u21xs20w9ssu' == matches[0].GetMatchedString()
assert u"{'city' : 'Cezsoca', 'country': 'si'}" == matches[0].GetValue()
assert u'u21xs7ses4s3' == matches[1].GetMatchedString()
assert u"{'city' : 'Bovec', 'country': 'si'}" == matches[1].GetValue()
del write_index
del read_only_index
finally:
shutil.rmtree(test_dir, ignore_errors=True)
|
tests/contrib/test_strava.py | kerryhatcher/flask-dance | 836 | 12612651 | import pytest
import responses
from urlobject import URLObject
from flask import Flask
from flask_dance.contrib.strava import make_strava_blueprint, strava
from flask_dance.consumer import OAuth2ConsumerBlueprint
from flask_dance.consumer.storage import MemoryStorage
@pytest.fixture
def make_app():
"A callable to create a Flask app with the Strava provider"
def _make_app(*args, **kwargs):
app = Flask(__name__)
app.secret_key = "whatever"
blueprint = make_strava_blueprint(*args, **kwargs)
app.register_blueprint(blueprint)
return app
return _make_app
def test_blueprint_factory():
strava_bp = make_strava_blueprint(
client_id="foo", client_secret="bar", scope="identity", redirect_to="index"
)
assert isinstance(strava_bp, OAuth2ConsumerBlueprint)
assert strava_bp.session.scope == "identity"
assert strava_bp.session.base_url == "https://www.strava.com/api/v3"
assert strava_bp.session.client_id == "foo"
assert strava_bp.client_secret == "bar"
assert (
strava_bp.authorization_url == "https://www.strava.com/api/v3/oauth/authorize"
)
assert strava_bp.token_url == "https://www.strava.com/api/v3/oauth/token"
def test_load_from_config(make_app):
app = make_app()
app.config["STRAVA_OAUTH_CLIENT_ID"] = "foo"
app.config["STRAVA_OAUTH_CLIENT_SECRET"] = "bar"
resp = app.test_client().get("/strava")
url = resp.headers["Location"]
client_id = URLObject(url).query.dict.get("client_id")
assert client_id == "foo"
@responses.activate
def test_context_local(make_app):
responses.add(responses.GET, "https://google.com")
# set up two apps with two different set of auth tokens
app1 = make_app(
"foo1",
"bar1",
redirect_to="url1",
storage=MemoryStorage({"access_token": "<PASSWORD>"}),
)
app2 = make_app(
"foo2",
"bar2",
redirect_to="url2",
storage=MemoryStorage({"access_token": "<PASSWORD>"}),
)
# outside of a request context, referencing functions on the `strava` object
# will raise an exception
with pytest.raises(RuntimeError):
strava.get("https://google.com")
# inside of a request context, `strava` should be a proxy to the correct
# blueprint session
with app1.test_request_context("/"):
app1.preprocess_request()
strava.get("https://google.com")
request = responses.calls[0].request
assert request.headers["Authorization"] == "Bearer app1"
with app2.test_request_context("/"):
app2.preprocess_request()
strava.get("https://google.com")
request = responses.calls[1].request
assert request.headers["Authorization"] == "Bearer app2"
|
tests/issues/test_issue_033.py | RodrigoDeRosa/related | 190 | 12612658 | import related
@related.immutable()
class Child(object):
name = related.StringField(default="!")
@related.immutable()
class Model(object):
# non-child fields
sequence_field = related.SequenceField(str, default=set())
set_field = related.SetField(str, default=[])
mapping_field = related.MappingField(Child, "name", default={})
# child fields
child_list = related.ChildField(list, default=list)
child_set = related.ChildField(set, default=set)
child_dict = related.ChildField(dict, default=dict)
child_obj = related.ChildField(Child, default=Child)
def test_sequence_field_ok():
obj_1 = Model()
obj_1.sequence_field.append("a")
obj_1.sequence_field.append("b")
obj_1.sequence_field.append("c")
obj_1.sequence_field.append("c")
assert len(obj_1.sequence_field) == 4
assert len(Model().sequence_field) == 0
def test_set_field_ok():
obj_1 = Model()
obj_1.set_field.add("a")
obj_1.set_field.add("b")
obj_1.set_field.add("c")
obj_1.set_field.add("c")
assert len(obj_1.set_field) == 3
assert len(Model().set_field) == 0
def test_mapping_field_ok():
obj_1 = Model()
obj_1.mapping_field.add(Child("a"))
obj_1.mapping_field.add(Child("b"))
obj_1.mapping_field.add(Child("c"))
obj_1.mapping_field.add(Child("c"))
assert len(obj_1.mapping_field) == 3
assert len(Model().mapping_field) == 0
def test_child_list_ok():
obj_1 = Model()
obj_1.child_list.append(Child("a"))
obj_1.child_list.append(Child("b"))
obj_1.child_list.append(Child("c"))
obj_1.child_list.append(Child("c"))
assert len(obj_1.child_list) == 4
assert len(Model().child_list) == 0
def test_child_set_ok():
obj_1 = Model()
obj_1.child_set.add(Child("a"))
obj_1.child_set.add(Child("b"))
obj_1.child_set.add(Child("c"))
obj_1.child_set.add(Child("c"))
assert len(obj_1.child_set) == 3
assert len(Model().child_set) == 0
def test_child_dict_ok():
obj_1 = Model()
obj_1.child_dict['a'] = Child("a")
obj_1.child_dict['b'] = Child("b")
obj_1.child_dict['c'] = Child("c")
assert len(obj_1.child_dict) == 3
assert len(Model().child_dict) == 0
def test_child_obj_ok():
obj_1 = Model(child_obj=Child("a"))
assert obj_1.child_obj == Child("a")
assert Model().child_obj != obj_1.child_obj
assert Model().child_obj == Child()
|
utils/smpl.py | SomaNonaka/HandMesh | 131 | 12612669 | <reponame>SomaNonaka/HandMesh
import numpy as np
import torch
import os.path as osp
import json
# from config import cfg
import sys
from smplpytorch.pytorch.smpl_layer import SMPL_Layer
class SMPL(object):
def __init__(self, root):
self.root = root
self.layer = {'neutral': self.get_layer(), 'male': self.get_layer('male'), 'female': self.get_layer('female')}
self.vertex_num = 6890
self.face = self.layer['neutral'].th_faces.numpy()
self.joint_regressor = self.layer['neutral'].th_J_regressor.numpy()
# add nose, L/R eye, L/R ear
self.face_kps_vertex = (331, 2802, 6262, 3489, 3990) # mesh vertex idx
nose_onehot = np.array([1 if i == 331 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1, -1)
left_eye_onehot = np.array([1 if i == 2802 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1, -1)
right_eye_onehot = np.array([1 if i == 6262 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1, -1)
left_ear_onehot = np.array([1 if i == 3489 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1, -1)
right_ear_onehot = np.array([1 if i == 3990 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1, -1)
self.joint_regressor = np.concatenate(
(self.joint_regressor, nose_onehot, left_eye_onehot, right_eye_onehot, left_ear_onehot, right_ear_onehot))
self.joint_num = 29 # original: 24. manually add nose, L/R eye, L/R ear
self.joints_name = (
'Pelvis', 'L_Hip', 'R_Hip', 'Torso', 'L_Knee', 'R_Knee', 'Spine', 'L_Ankle', 'R_Ankle', 'Chest', 'L_Toe',
'R_Toe', 'Neck', 'L_Thorax', 'R_Thorax', 'Head', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist',
'R_Wrist', 'L_Hand', 'R_Hand', 'Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear')
self.flip_pairs = (
(1, 2), (4, 5), (7, 8), (10, 11), (13, 14), (16, 17), (18, 19), (20, 21), (22, 23), (25, 26), (27, 28))
self.skeleton = (
(0, 1), (1, 4), (4, 7), (7, 10), (0, 2), (2, 5), (5, 8), (8, 11), (0, 3), (3, 6), (6, 9), (9, 14), (14, 17),
(17, 19), (19, 21), (21, 23), (9, 13), (13, 16), (16, 18), (18, 20), (20, 22), (9, 12), (12, 24), (24, 15),
(24, 25), (24, 26), (25, 27), (26, 28))
self.root_joint_idx = self.joints_name.index('Pelvis')
def get_layer(self, gender='neutral'):
return SMPL_Layer(gender=gender, model_root=osp.join(self.root, 'template'))
|
gabbi/tests/test_use_prior_test.py | scottwallacesh/gabbi | 145 | 12612722 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test use_prior_test directive.
"""
import copy
import unittest
from six.moves import mock
from gabbi import case
class UsePriorTest(unittest.TestCase):
@staticmethod
def make_test_case(use_prior_test=None):
http_case = case.HTTPTestCase('test_request')
http_case.test_data = copy.copy(case.BASE_TEST)
if use_prior_test is not None:
http_case.test_data['use_prior_test'] = use_prior_test
return http_case
@mock.patch('gabbi.case.HTTPTestCase._run_test')
def test_use_prior_true(self, m_run_test):
http_case = self.make_test_case(True)
http_case.has_run = False
http_case.prior = self.make_test_case(True)
http_case.prior.run = mock.MagicMock(unsafe=True)
http_case.prior.has_run = False
http_case.test_request()
http_case.prior.run.assert_called_once()
@mock.patch('gabbi.case.HTTPTestCase._run_test')
def test_use_prior_false(self, m_run_test):
http_case = self.make_test_case(False)
http_case.has_run = False
http_case.prior = self.make_test_case(True)
http_case.prior.run = mock.MagicMock(unsafe=True)
http_case.prior.has_run = False
http_case.test_request()
http_case.prior.run.assert_not_called()
@mock.patch('gabbi.case.HTTPTestCase._run_test')
def test_use_prior_default_true(self, m_run_test):
http_case = self.make_test_case()
http_case.has_run = False
http_case.prior = self.make_test_case(True)
http_case.prior.run = mock.MagicMock(unsafe=True)
http_case.prior.has_run = False
http_case.test_request()
http_case.prior.run.assert_called_once()
|
vispy/visuals/tests/test_ellipse.py | hmaarrfk/vispy | 2,617 | 12612747 | <gh_stars>1000+
# -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Tests for EllipseVisual
All images are of size (100,100) to keep a small file size
"""
from vispy.scene import visuals, transforms
from vispy.testing import (requires_application, TestingCanvas,
run_tests_if_main)
from vispy.testing.image_tester import assert_image_approved
@requires_application()
def test_circle_draw():
"""Test drawing circles without transform using EllipseVisual"""
with TestingCanvas() as c:
ellipse = visuals.Ellipse(center=(75, 35, 0), radius=20,
color=(1, 0, 0, 1),
parent=c.scene)
assert_image_approved(c.render(), 'visuals/circle1.png')
ellipse.parent = None
ellipse = visuals.Ellipse(center=(75, 35, 0), radius=20,
color=(1, 0, 0, 1),
border_color=(0, 1, 1, 1),
parent=c.scene)
assert_image_approved(c.render(), 'visuals/circle2.png')
ellipse.parent = None
ellipse = visuals.Ellipse(center=(75, 35, 0), radius=20,
border_color=(0, 1, 1, 1),
parent=c.scene)
# low corr here because borders have some variability
# esp. w/HiDPI
assert_image_approved(c.render(), 'visuals/circle3.png',
min_corr=0.7)
@requires_application()
def test_ellipse_draw():
"""Test drawing transformed ellipses using EllipseVisual"""
with TestingCanvas() as c:
ellipse = visuals.Ellipse(center=(0., 0.), radius=(20, 15),
color=(0, 0, 1, 1),
parent=c.scene)
ellipse.transform = transforms.STTransform(scale=(2.0, 3.0),
translate=(50, 50))
assert_image_approved(c.render(), 'visuals/ellipse1.png')
ellipse.parent = None
ellipse = visuals.Ellipse(center=(0., 0.), radius=(20, 15),
color=(0, 0, 1, 1),
border_color=(1, 0, 0, 1),
parent=c.scene)
ellipse.transform = transforms.STTransform(scale=(2.0, 3.0),
translate=(50, 50))
assert_image_approved(c.render(), 'visuals/ellipse2.png')
ellipse.parent = None
ellipse = visuals.Ellipse(center=(0., 0.), radius=(20, 15),
border_color=(1, 0, 0, 1),
parent=c.scene)
ellipse.transform = transforms.STTransform(scale=(2.0, 3.0),
translate=(50, 50))
assert_image_approved(c.render(), 'visuals/ellipse3.png',
min_corr=0.7)
@requires_application()
def test_arc_draw1():
"""Test drawing arcs using EllipseVisual"""
with TestingCanvas() as c:
ellipse = visuals.Ellipse(center=(50., 50.), radius=(20, 15),
start_angle=150., span_angle=120.,
color=(0, 0, 1, 1),
parent=c.scene)
assert_image_approved(c.render(), 'visuals/arc1.png')
ellipse.parent = None
ellipse = visuals.Ellipse(center=(50., 50.), radius=(20, 15),
start_angle=150., span_angle=120.,
border_color=(1, 0, 0, 1),
parent=c.scene)
assert_image_approved(c.render(), 'visuals/arc2.png',
min_corr=0.6)
@requires_application()
def test_reactive_draw():
"""Test reactive ellipse attributes"""
with TestingCanvas() as c:
ellipse = visuals.Ellipse(center=[75, 35, 0.], radius=[20, 15],
color='yellow',
parent=c.scene)
ellipse.center = [70, 40, 0.]
assert_image_approved(c.render(), 'visuals/reactive_ellipse1.png')
ellipse.radius = 25
assert_image_approved(c.render(), 'visuals/reactive_ellipse2.png')
ellipse.color = 'red'
assert_image_approved(c.render(), 'visuals/reactive_ellipse3.png')
ellipse.border_color = 'yellow'
assert_image_approved(c.render(), 'visuals/reactive_ellipse4.png')
ellipse.start_angle = 140.
assert_image_approved(c.render(), 'visuals/reactive_ellipse5.png')
ellipse.span_angle = 100.
assert_image_approved(c.render(), 'visuals/reactive_ellipse6.png')
ellipse.num_segments = 10.
assert_image_approved(c.render(), 'visuals/reactive_ellipse7.png')
run_tests_if_main()
|
zentral/contrib/monolith/ppd.py | janheise/zentral | 634 | 12612748 | <filename>zentral/contrib/monolith/ppd.py
import zlib
KEYWORDS = {
"ModelName": ("model_name", False),
"ShortNickName": ("short_nick_name", False),
"Manufacturer": ("manufacturer", False),
"FileVersion": ("file_version", False),
"Product": ("product", True),
"PCFileName": ("pc_file_name", False),
}
def read_ppd_file(file_obj):
content = file_obj.read()
try:
content = zlib.decompress(content, 16 + zlib.MAX_WBITS)
except Exception:
return content, False
else:
return content, True
def iter_ppd(content, encoding=None):
for line in content.splitlines():
line = line.strip()
if not line or line.startswith(b"*%") or line == b"*End":
# comment
continue
try:
keyword, value = line.split(b" ", 1)
except Exception:
# strange line ?
continue
keyword = keyword.strip(b"*").strip(b":")
value = value.strip().strip(b'"')
if encoding:
yield keyword.decode(encoding), value.decode(encoding)
else:
yield keyword, value
def get_ppd_information(file_obj):
d = {}
content, d["file_compressed"] = read_ppd_file(file_obj)
encoding = None
for keyword, value in iter_ppd(content):
if keyword == b"LanguageEncoding":
if value == b"ISOLatin1":
encoding = "latin-1"
break
else:
raise NotImplementedError("Unknown encoding type")
for keyword, value in iter_ppd(content, encoding=encoding):
try:
attr, is_list = KEYWORDS[keyword]
except KeyError:
continue
value = value.strip().strip('"')
if is_list:
d.setdefault(attr, []).append(value.strip("(").strip(")"))
else:
d[attr] = value
return d
if __name__ == "__main__":
import sys
import pprint
pprint.pprint(get_ppd_information(open(sys.argv[1], "rb")))
|
empire/server/modules/powershell/collection/SharpChromium.py | chenxiangfang/Empire | 2,541 | 12612818 | from __future__ import print_function
from builtins import object
from builtins import str
from typing import Dict
from empire.server.common import helpers
from empire.server.common.module_models import PydanticModule
from empire.server.utils import data_util
from empire.server.utils.module_util import handle_error_message
class Module(object):
@staticmethod
def generate(main_menu, module: PydanticModule, params: Dict, obfuscate: bool = False, obfuscation_command: str = ""):
module_source = main_menu.installPath + "/data/module_source/collection/Get-SharpChromium.ps1"
if obfuscate:
data_util.obfuscate_module(moduleSource=module_source, obfuscationCommand=obfuscation_command)
module_source = module_source.replace("module_source", "obfuscated_module_source")
try:
f = open(module_source, 'r')
except:
return handle_error_message("[!] Could not read module source path at: " + str(module_source))
module_code = f.read()
f.close()
script = module_code
script_end = " Get-SharpChromium"
#check type
if params['Type'].lower() not in ['all','logins','history','cookies']:
print(helpers.color("[!] Invalid value of Type, use default value: all"))
params['Type']='all'
script_end += " -Type "+params['Type']
#check domain
if params['Domains'].lower() != '':
if params['Type'].lower() != 'cookies':
print(helpers.color("[!] Domains can only be used with Type cookies"))
else:
script_end += " -Domains ("
for domain in params['Domains'].split(','):
script_end += "'" + domain + "',"
script_end = script_end[:-1]
script_end += ")"
outputf = params.get("OutputFunction", "Out-String")
script_end += f" | {outputf} | " + '%{$_ + \"`n\"};"`n' + str(module.name.split("/")[-1]) + ' completed!"'
if obfuscate:
script_end = helpers.obfuscate(main_menu.installPath, psScript=script_end, obfuscationCommand=obfuscation_command)
script += script_end
script = data_util.keyword_obfuscation(script)
return script
|
service/util/auto_task.py | mutouxia/kamiFaka | 717 | 12612825 | from sqlalchemy.orm import query
from service.database.models import TempOrder
from service.api.db import db
from datetime import datetime,timedelta
def clean_tmp_order():
orders = TempOrder.query.all()
c_now = datetime.utcnow()+timedelta(hours=8)
# del_list = []
if orders:
with db.auto_commit_db():
for i in orders:
if (c_now - i.to_date()['updatetime']).days > 5:
# del_list.append(i)
db.session.delete(i)
|
inference.py | gonglinyuan/StackingBERT | 104 | 12612839 | #!/usr/bin/env python3 -u
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
"""
Evaluate the perplexity of a trained language model.
"""
import os
import torch
from fairseq import options, progress_bar, tasks, utils
def main(parsed_args):
assert parsed_args.path is not None, '--path required for evaluation!'
print(parsed_args)
use_cuda = torch.cuda.is_available() and not parsed_args.cpu
task = tasks.setup_task(parsed_args)
# Load ensemble
print('| loading model(s) from {}'.format(parsed_args.path))
models, args = utils.load_ensemble_for_inference(parsed_args.path.split(':'), task)
args.__dict__.update(parsed_args.__dict__)
print(args)
task.args = args
# Load dataset splits
task.load_dataset(args.gen_subset)
print('| {} {} {} examples'.format(args.data, args.gen_subset, len(task.dataset(args.gen_subset))))
# Optimize ensemble for generation and set the source and dest dicts on the model (required by scorer)
for model in models:
model.make_generation_fast_()
if use_cuda:
model.cuda()
if args.fp16:
model.half()
assert len(models) > 0
itr = task.get_batch_iterator(
dataset=task.dataset(args.gen_subset),
max_tokens=args.max_tokens or 10000,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(*[
model.max_positions() for model in models
]),
num_shards=args.num_shards,
shard_id=args.shard_id,
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
).next_epoch_itr(shuffle=False)
with progress_bar.build_progress_bar(args, itr, no_progress_bar='simple') as progress:
with open(args.output, 'w', encoding='utf-8') as fo:
ans = torch.empty(len(task.dataset(args.gen_subset)), dtype=torch.long)
with torch.no_grad():
for model in models:
model.eval()
for sample in progress:
if use_cuda:
sample = utils.move_to_cuda(sample)
probs = torch.stack([model.get_normalized_probs(model(**sample['net_input']), log_probs=False)
for model in models], dim=0).sum(dim=0)
for i, y in torch.stack([sample['id'], probs.argmax(dim=-1)], dim=1):
ans[i] = y
for y in ans:
fo.write(f"{y}\n")
if __name__ == '__main__':
parser = options.get_inference_parser()
args = options.parse_args_and_arch(parser)
main(args)
|
parsers/test/test_IN_AP.py | hybridcattt/electricitymap-contrib | 1,582 | 12612854 | <gh_stars>1000+
import unittest
from requests import Session
from requests_mock import Adapter, ANY
from pkg_resources import resource_string
from parsers import IN_AP
class Test_IN_AP(unittest.TestCase):
def setUp(self):
self.session = Session()
self.adapter = Adapter()
self.session.mount('https://', self.adapter)
response_text = resource_string("parsers.test.mocks", "IN_AP.html")
self.adapter.register_uri(ANY, ANY, text=str(response_text))
def test_fetch_production(self):
try:
data = IN_AP.fetch_production('IN-AP', self.session)
self.assertIsNotNone(data)
self.assertEqual(data['zoneKey'], 'IN-AP')
self.assertEqual(data['source'], 'core.ap.gov.in')
self.assertIsNotNone(data['datetime'])
self.assertIsNotNone(data['production'])
self.assertIsNotNone(data['storage'])
except Exception as ex:
self.fail(
"IN_AP.fetch_production() raised Exception: {0}".format(ex))
def test_fetch_consumption(self):
try:
data = IN_AP.fetch_consumption('IN-AP', self.session)
self.assertIsNotNone(data)
self.assertEqual(data['zoneKey'], 'IN-AP')
self.assertEqual(data['source'], 'core.ap.gov.in')
self.assertIsNotNone(data['datetime'])
self.assertIsNotNone(data['consumption'])
except Exception as ex:
self.fail(
"IN_AP.fetch_consumption() raised Exception: {0}".format(ex))
if __name__ == '__main__':
unittest.main()
|
Wallstreetbets.py | dave-knight/wallstreetbets-sentiment-analysis | 146 | 12612907 | '''*****************************************************************************
Purpose: To analyze the sentiments of r/wallstreetbets
This program uses Vader SentimentIntensityAnalyzer to calculate the ticker compound value.
You can change multiple parameters to suit your needs. See below under "set program parameters."
Implementation:
I am using sets to compare that if the ticker is valid, sets time complexity for
"x in s" is O(1) compare to list: O(n).
Limitations:
It depends mainly on the defined parameters for current implementation:
It completely ignores the heavily downvoted comments, and there can be a time when
the most mentioned ticker is heavily downvoted, but you can change that in upvotes variable.
Author: github:asad70
-------------------------------------------------------------------
****************************************************************************'''
import praw
from data import *
import time
import pandas as pd
import matplotlib.pyplot as plt
import squarify
from nltk.sentiment.vader import SentimentIntensityAnalyzer
class WallStreetBets:
def __init__(self, current_time, c_analyzed, posts, picks, top_picks, symbols, titles, picks_ayz, scores):
self.current_time = current_time
self.c_analyzed = c_analyzed
self.posts = posts
self.picks = picks
self.top_picks = top_picks
self.symbols = symbols
self.titles = titles
self.picks_ayz = picks_ayz
self.scores = scores
def analyze(clientid, clientsecret, usernme, passwrd):
# set the program parameters
limit = 500 # define the limit
upvotes = 2 # define # of upvotes, comment is considered if upvotes exceed this #
picks = 10 # define # of picks here, prints as "Top ## picks are:"
picks_ayz = 5 # define # of picks for sentiment analysis
post_flairs = ['Daily Discussion', 'Weekend Discussion', 'Discussion'] # posts flairs to search
posts, count, c_analyzed, tickers, titles, a_comments = 0, 0, 0, {}, [], {}
start_time = time.time()
reddit = reddit_login("Comment Extraction", clientid, clientsecret, usernme, passwrd)
subreddit = configure_reddit_subreddit(reddit)
hot_python = subreddit.hot() # sorting posts by hot
extract_comments_symbols(hot_python, post_flairs, titles, posts, limit, upvotes, tickers, a_comments, count, c_analyzed)
# sorts the dictionary
symbols = dict(sorted(tickers.items(), key=lambda item: item[1], reverse = True))
top_picks = list(symbols.keys())[0:picks]
current_time = (time.time() - start_time)
scores = apply_sentiment_analysis(symbols, picks_ayz, a_comments)
WSB = WallStreetBets(current_time, c_analyzed, posts, picks, top_picks, symbols, titles, picks_ayz, scores)
print_results(WSB)
print_sentiment_analysis(WSB)
return WSB
def reddit_login(user, clientid, clientsecret, usernme, passwrd):
return praw.Reddit(user_agent=user,
client_id=clientid,
client_secret=clientsecret,
username=usernme,
password=<PASSWORD>)
def configure_reddit_subreddit(reddit):
return reddit.subreddit('wallstreetbets')
def extract_comments_symbols(hot_python, post_flairs, titles, posts, limit, upvotes, tickers, a_comments, count, c_analyzed):
# Extracting comments, symbols from subreddit
for submission in hot_python:
if submission.link_flair_text in post_flairs:
submission.comment_sort = 'new'
comments = submission.comments
titles.append(submission.title)
posts += 1
submission.comments.replace_more(limit=limit)
for comment in comments:
c_analyzed += 1
if comment.score > upvotes:
split = comment.body.split(" ")
for word in split:
word = word.replace("$", "")
# upper = ticker, length of ticker <= 5, excluded words,
if word.isupper() and len(word) <= 5 and word not in blacklist and word in us:
if word in tickers:
tickers[word] += 1
a_comments[word].append(comment.body)
count += 1
else:
tickers[word] = 1
a_comments[word] = [comment.body]
count += 1
def apply_sentiment_analysis(symbols, picks_ayz, a_comments):
# Applying Sentiment Analysis
scores = {}
vader = SentimentIntensityAnalyzer()
picks_sentiment = list(symbols.keys())[0:picks_ayz]
for symbol in picks_sentiment:
stock_comments = a_comments[symbol]
for cmnt in stock_comments:
score = vader.polarity_scores(cmnt)
if symbol in scores:
for key, values in score.items():
scores[symbol][key] += score[key]
else:
scores[symbol] = score
# calculating avg.
for key in score:
scores[symbol][key] = scores[symbol][key] / symbols[symbol]
scores[symbol][key] = "{pol:.3f}".format(pol=scores[symbol][key] )
return scores
def print_results(WSB):
# print top picks
print("It took {t:.2f} seconds to analyze {c} comments in {p} posts.\n".format(t=WSB.current_time, c=WSB.c_analyzed, p=WSB.posts))
print("Posts analyzed:")
for i in WSB.titles: print(i)
print(f"\n{WSB.picks} most mentioned picks: ")
times = []
top = []
for i in WSB.top_picks:
print(f"{i}: {WSB.symbols[i]}")
times.append(WSB.symbols[i])
top.append(f"{i}: {WSB.symbols[i]}")
def print_sentiment_analysis(WSB):
# printing sentiment analysis
print(f"\nSentiment analysis of top {WSB.picks_ayz} picks:")
df = pd.DataFrame(WSB.scores)
df.index = ['Bearish', 'Neutral', 'Bullish', 'Total/Compound']
df = df.T
print(df)
# Date Visualization
# most mentioned picks
# squarify.plot(sizes=times, label=top, alpha=.7 )
# plt.axis('off')
# plt.title(f"{picks} most mentioned picks")
# plt.show()
# Sentiment analysis
# df = df.astype(float)
# colors = ['red', 'springgreen', 'forestgreen', 'coral']
# df.plot(kind = 'bar', color=colors, title=f"Sentiment analysis of top {picks_ayz} picks:")
# plt.show()
|
Configuration/Generator/python/Hadronizer_TuneEE5C_13TeV_madgraph_differentPDF_MPIoff_herwigpp_cff.py | ckamtsikis/cmssw | 852 | 12612959 | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.HerwigppDefaults_cfi import *
from Configuration.Generator.HerwigppUE_EE_5C_cfi import *
from Configuration.Generator.HerwigppPDF_CTEQ6_LO_cfi import * # Import CTEQ6L PDF as shower pdf
from Configuration.Generator.HerwigppPDF_NNPDF30_NLO_cfi import herwigppPDFSettingsBlock as herwigppHardPDFSettingsBlock # Import NNPDF30 NLO as PDF of the hard subprocess
from Configuration.Generator.HerwigppEnergy_13TeV_cfi import *
from Configuration.Generator.HerwigppLHEFile_cfi import *
from Configuration.Generator.HerwigppMECorrections_cfi import *
from Configuration.Generator.HerwigppMPI_SwitchOff_cfi import *
# Showering LO MadGraph5_aMC@NLO LHE files with a different PDF for the hard subprocess
############ WARNING ######
# This option should only be used with LO MadGraph5_aMC@NLO LHE files.
# In case of NLO, MC@NLO matched LHE files this results most likely in a mismatch of phase space
############ WARNING ######
generator = cms.EDFilter("ThePEGHadronizerFilter",
herwigDefaultsBlock,
herwigppUESettingsBlock,
herwigppPDFSettingsBlock,
herwigppHardPDFSettingsBlock, # Implementing renamed NNPDF30 config block
herwigppEnergySettingsBlock,
herwigppLHEFileSettingsBlock,
herwigppMECorrectionsSettingsBlock,
herwigppMPISettingsBlock,
configFiles = cms.vstring(),
parameterSets = cms.vstring(
'hwpp_cmsDefaults',
'hwpp_ue_EE5C',
'hwpp_cm_13TeV',
'hwpp_pdf_CTEQ6L1', # Shower PDF matching with the tune
'hwpp_pdf_NNPDF30NLO_Hard', # PDF of hard subprocess
'hwpp_LHE_MadGraph_DifferentPDFs', ### WARNING ### Use this option only with LO MadGraph5_aMC@NLO LHE files
'hwpp_MECorr_Off', # Switch off ME corrections while showering LHE files as recommended by Herwig++ authors
'hwpp_mpi_switchOff',
),
crossSection = cms.untracked.double(-1),
filterEfficiency = cms.untracked.double(1.0),
)
ProductionFilterSequence = cms.Sequence(generator)
|
tests/redirects_tests/tests.py | agarwalutkarsh554/django | 61,676 | 12612960 | from django.conf import settings
from django.contrib.redirects.middleware import RedirectFallbackMiddleware
from django.contrib.redirects.models import Redirect
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
from django.http import (
HttpResponse, HttpResponseForbidden, HttpResponseRedirect,
)
from django.test import TestCase, modify_settings, override_settings
@modify_settings(MIDDLEWARE={'append': 'django.contrib.redirects.middleware.RedirectFallbackMiddleware'})
@override_settings(APPEND_SLASH=False, ROOT_URLCONF='redirects_tests.urls', SITE_ID=1)
class RedirectTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.site = Site.objects.get(pk=settings.SITE_ID)
def test_model(self):
r1 = Redirect.objects.create(site=self.site, old_path='/initial', new_path='/new_target')
self.assertEqual(str(r1), "/initial ---> /new_target")
def test_redirect(self):
Redirect.objects.create(site=self.site, old_path='/initial', new_path='/new_target')
response = self.client.get('/initial')
self.assertRedirects(response, '/new_target', status_code=301, target_status_code=404)
@override_settings(APPEND_SLASH=True)
def test_redirect_with_append_slash(self):
Redirect.objects.create(site=self.site, old_path='/initial/', new_path='/new_target/')
response = self.client.get('/initial')
self.assertRedirects(response, '/new_target/', status_code=301, target_status_code=404)
@override_settings(APPEND_SLASH=True)
def test_redirect_with_append_slash_and_query_string(self):
Redirect.objects.create(site=self.site, old_path='/initial/?foo', new_path='/new_target/')
response = self.client.get('/initial?foo')
self.assertRedirects(response, '/new_target/', status_code=301, target_status_code=404)
@override_settings(APPEND_SLASH=True)
def test_redirect_not_found_with_append_slash(self):
"""
Exercise the second Redirect.DoesNotExist branch in
RedirectFallbackMiddleware.
"""
response = self.client.get('/test')
self.assertEqual(response.status_code, 404)
def test_redirect_shortcircuits_non_404_response(self):
"""RedirectFallbackMiddleware short-circuits on non-404 requests."""
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
def test_response_gone(self):
"""When the redirect target is '', return a 410"""
Redirect.objects.create(site=self.site, old_path='/initial', new_path='')
response = self.client.get('/initial')
self.assertEqual(response.status_code, 410)
@modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'})
def test_sites_not_installed(self):
def get_response(request):
return HttpResponse()
msg = (
'You cannot use RedirectFallbackMiddleware when '
'django.contrib.sites is not installed.'
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
RedirectFallbackMiddleware(get_response)
class OverriddenRedirectFallbackMiddleware(RedirectFallbackMiddleware):
# Use HTTP responses different from the defaults
response_gone_class = HttpResponseForbidden
response_redirect_class = HttpResponseRedirect
@modify_settings(MIDDLEWARE={'append': 'redirects_tests.tests.OverriddenRedirectFallbackMiddleware'})
@override_settings(SITE_ID=1)
class OverriddenRedirectMiddlewareTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.site = Site.objects.get(pk=settings.SITE_ID)
def test_response_gone_class(self):
Redirect.objects.create(site=self.site, old_path='/initial/', new_path='')
response = self.client.get('/initial/')
self.assertEqual(response.status_code, 403)
def test_response_redirect_class(self):
Redirect.objects.create(site=self.site, old_path='/initial/', new_path='/new_target/')
response = self.client.get('/initial/')
self.assertEqual(response.status_code, 302)
|
tests/importer/onnx_/basic/test_constantofshape.py | xhuohai/nncase | 510 | 12612979 | # Copyright 2019-2021 Canaan Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, unused-argument, import-outside-toplevel
import pytest
import onnx
from onnx import helper
from onnx import AttributeProto, TensorProto, GraphProto, numpy_helper
from onnx_test_runner import OnnxTestRunner
def _make_module(in_shape, value):
inputs = []
outputs = []
initializers = []
attributes_dict = {}
# input
input_const = helper.make_tensor(
'input',
TensorProto.INT64,
dims=[len(in_shape)],
vals=in_shape
)
inputs.append('input')
initializers.append(input_const)
type = TensorProto.FLOAT
if value is not None:
type = value[0]
tensor = onnx.helper.make_tensor("value", type, [1], [value[1]])
attributes_dict['value'] = tensor
# output
output = helper.make_tensor_value_info('output', type, in_shape)
outputs.append('output')
node = onnx.helper.make_node(
'ConstantOfShape',
inputs=inputs,
outputs=outputs,
**attributes_dict
)
nodes = []
nodes.append(node)
graph_def = helper.make_graph(
nodes,
'test-model',
[],
[output],
initializer=initializers)
model_def = helper.make_model(graph_def, producer_name='kendryte')
return model_def
in_shapes = [
[1, 3, 16, 16]
]
values = [
None,
[TensorProto.FLOAT, 0],
[TensorProto.FLOAT, 1],
]
@pytest.mark.parametrize('in_shape', in_shapes)
@pytest.mark.parametrize('value', values)
def test_constantofshape(in_shape, value, request):
model_def = _make_module(in_shape, value)
runner = OnnxTestRunner(request.node.name)
model_file = runner.from_onnx_helper(model_def)
runner.run(model_file)
if __name__ == "__main__":
pytest.main(['-vv', 'test_constantofshape.py'])
|
h/exceptions.py | tgiardina/rpp-h | 2,103 | 12612992 | <filename>h/exceptions.py<gh_stars>1000+
class InvalidUserId(Exception):
"""The userid does not meet the expected pattern."""
def __init__(self, user_id):
super().__init__(f"User id '{user_id}' is not valid")
class RealtimeMessageQueueError(Exception):
"""A message could not be sent to the realtime Rabbit queue."""
def __init__(self):
super().__init__("Could not queue message")
|
apps/webssh/websocket.py | crazypenguin/devops | 300 | 12613035 | from channels.generic.websocket import WebsocketConsumer
from .ssh import SSH
from django.conf import settings
from django.http.request import QueryDict
from django.utils.six import StringIO
import django.utils.timezone as timezone
from devops.settings import TMP_DIR
from server.models import RemoteUserBindHost
from webssh.models import TerminalLog
from django.db.models import Q
import os
import json
import re
import time
try:
session_exipry_time = settings.CUSTOM_SESSION_EXIPRY_TIME
except BaseException:
session_exipry_time = 60 * 30
def terminal_log(user, hostname, ip, protocol, port, username, cmd, detail, address, useragent, start_time):
event = TerminalLog()
event.user = user
event.hostname = hostname
event.ip = ip
event.protocol = protocol
event.port = port
event.username = username
event.cmd = cmd
event.detail = detail
event.address = address
event.useragent = useragent
event.start_time = start_time
event.save()
class WebSSH(WebsocketConsumer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.message = {'status': 0, 'message': None}
"""
status:
0: ssh 连接正常, websocket 正常
1: 发生未知错误, 关闭 ssh 和 websocket 连接
message:
status 为 1 时, message 为具体的错误信息
status 为 0 时, message 为 ssh 返回的数据, 前端页面将获取 ssh 返回的数据并写入终端页面
"""
self.session = None
self.remote_host = None
self.start_time = None
def connect(self):
"""
打开 websocket 连接, 通过前端传入的参数尝试连接 ssh 主机
:return:
"""
self.accept()
self.start_time = timezone.now()
self.session = self.scope.get('session', None)
if not self.session.get('islogin', None): # 未登录直接断开 websocket 连接
self.message['status'] = 2
self.message['message'] = 'You are not login in...'
message = json.dumps(self.message)
self.send(message)
self.close(3001)
self.check_login()
query_string = self.scope.get('query_string').decode()
ssh_args = QueryDict(query_string=query_string, encoding='utf-8')
width = ssh_args.get('width')
height = ssh_args.get('height')
width = int(width)
height = int(height)
auth = None
ssh_key_name = '123456'
hostid = int(ssh_args.get('hostid'))
try:
if not self.session['issuperuser']: # 普通用户判断是否有相关主机或者权限
hosts = RemoteUserBindHost.objects.filter(
Q(id=hostid),
Q(user__username=self.session['username']) | Q(group__user__username=self.session['username']),
).distinct()
if not hosts:
self.message['status'] = 2
self.message['message'] = 'Host is not exist...'
message = json.dumps(self.message)
self.send(message)
self.close(3001)
self.remote_host = RemoteUserBindHost.objects.get(id=hostid)
if not self.remote_host.enabled:
try:
self.message['status'] = 2
self.message['message'] = 'Host is disabled...'
message = json.dumps(self.message)
self.send(message)
self.close(3001)
except BaseException:
pass
except BaseException:
self.message['status'] = 2
self.message['message'] = 'Host is not exist...'
message = json.dumps(self.message)
self.send(message)
self.close(3001)
host = self.remote_host.ip
port = self.remote_host.port
user = self.remote_host.remote_user.username
passwd = self.remote_host.remote_user.password
timeout = 15
self.ssh = SSH(websocker=self, message=self.message)
ssh_connect_dict = {
'host': host,
'user': user,
'port': port,
'timeout': timeout,
'pty_width': width,
'pty_height': height,
'password': <PASSWORD>,
}
if auth == 'key':
ssh_key_file = os.path.join(TMP_DIR, ssh_key_name)
with open(ssh_key_file, 'r') as f:
ssh_key = f.read()
string_io = StringIO()
string_io.write(ssh_key)
string_io.flush()
string_io.seek(0)
ssh_connect_dict['ssh_key'] = string_io
os.remove(ssh_key_file)
self.ssh.connect(**ssh_connect_dict)
if self.remote_host.remote_user.enabled:
if self.session.get('issuperuser', None): # 超级管理员才能使用 su 跳转功能
if self.remote_host.remote_user.superusername:
self.ssh.su_root(
self.remote_host.remote_user.superusername,
self.remote_host.remote_user.superpassword,
0.3,
)
def disconnect(self, close_code):
try:
if close_code == 3001:
pass
else:
self.ssh.close()
except:
pass
finally:
# 过滤点结果中的颜色字符
self.ssh.res = re.sub(r'(\[\d{2};\d{2}m|\[0m)', '', self.ssh.res)
# print('命令: ')
# print(self.ssh.cmd)
# print('结果: ')
# print(res)
user_agent = None
for i in self.scope['headers']:
if i[0].decode('utf-8') == 'user-agent':
user_agent = i[1].decode('utf-8')
break
if self.ssh.cmd:
terminal_log(
self.session.get('username'),
self.remote_host.hostname,
self.remote_host.ip,
self.remote_host.get_protocol_display(),
self.remote_host.port,
self.remote_host.remote_user.username,
self.ssh.cmd,
self.ssh.res,
self.scope['client'][0],
user_agent,
self.start_time,
)
def receive(self, text_data=None, bytes_data=None):
data = json.loads(text_data)
if type(data) == dict:
if data['data'] and '\r' in data['data']:
self.check_login()
status = data['status']
if status == 0:
data = data['data']
self.ssh.shell(data)
else:
cols = data['cols']
rows = data['rows']
self.ssh.resize_pty(cols=cols, rows=rows)
def check_login(self):
lasttime = int(self.scope['session']['lasttime'])
now = int(time.time())
if now - lasttime > session_exipry_time:
self.message['status'] = 2
self.message['message'] = 'Your login is expired...'
message = json.dumps(self.message)
self.send(message)
self.close(3001)
else:
self.scope['session']['lasttime'] = now
self.scope["session"].save()
|
migrations/versions/6ea7dc05f496_fix_typo_in_history_detail.py | cwinfo/PowerDNS-Admin | 109 | 12613036 | """Fix typo in history detail
Revision ID: 6ea7dc05f496
Revises: <KEY>
Create Date: 2022-05-10 10:16:58.784497
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6ea7dc05f496'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
history_table = sa.sql.table('history',
sa.Column('detail', sa.Text),
)
def upgrade():
op.execute(
history_table.update()
.where(history_table.c.detail.like('%"add_rrests":%'))
.values({
'detail': sa.func.replace(
sa.func.replace(history_table.c.detail, '"add_rrests":', '"add_rrsets":'),
'"del_rrests":', '"del_rrsets":'
)
})
)
def downgrade():
op.execute(
history_table.update()
.where(history_table.c.detail.like('%"add_rrsets":%'))
.values({
'detail': sa.func.replace(
sa.func.replace(history_table.c.detail, '"add_rrsets":', '"add_rrests":'),
'"del_rrsets":', '"del_rrests":'
)
})
)
|
Networking-Test-Kit/LSL/lslStreamTest_FFTplot.py | thiago-roque07/OpenBCI_GUI | 500 | 12613046 | <gh_stars>100-1000
### Run the OpenBCI GUI
### Set Networking mode to LSL, FFT data type, and # Chan to 125
### Thanks to @Sentdex - Nov 2019
from pylsl import StreamInlet, resolve_stream
import numpy as np
import time
import matplotlib.pyplot as plt
from matplotlib import style
from collections import deque
last_print = time.time()
fps_counter = deque(maxlen=150)
duration = 5
# first resolve an EEG stream on the lab network
print("looking for an EEG stream...")
streams = resolve_stream('type', 'FFT')
# create a new inlet to read from the stream
inlet = StreamInlet(streams[0])
channel_data = {}
for i in range(duration): # how many iterations. Eventually this would be a while True
for i in range(16): # each of the 16 channels here
sample, timestamp = inlet.pull_sample()
if i not in channel_data:
channel_data[i] = sample
else:
channel_data[i].append(sample)
fps_counter.append(time.time() - last_print)
last_print = time.time()
cur_raw_hz = 1/(sum(fps_counter)/len(fps_counter))
print(cur_raw_hz)
for chan in channel_data:
plt.plot(channel_data[chan][:60])
plt.show() |
app/queues.py | riQQ/mtgatracker | 240 | 12613053 | <gh_stars>100-1000
import multiprocessing
all_die_queue = multiprocessing.Queue()
block_read_queue = multiprocessing.Queue()
json_blob_queue = multiprocessing.Queue()
game_state_change_queue = multiprocessing.Queue()
decklist_change_queue = multiprocessing.Queue()
general_output_queue = multiprocessing.Queue()
collection_state_change_queue = multiprocessing.Queue() |
HAN/test_han.py | RandolphVI/Text-Pairs-Relation-Classification | 150 | 12613063 | <gh_stars>100-1000
# -*- coding:utf-8 -*-
__author__ = 'Randolph'
import os
import sys
import time
import logging
import numpy as np
sys.path.append('../')
logging.getLogger('tensorflow').disabled = True
import tensorflow as tf
from utils import checkmate as cm
from utils import data_helpers as dh
from utils import param_parser as parser
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score
args = parser.parameter_parser()
MODEL = dh.get_model_name()
logger = dh.logger_fn("tflog", "logs/Test-{0}.log".format(time.asctime()))
CPT_DIR = 'runs/' + MODEL + '/checkpoints/'
BEST_CPT_DIR = 'runs/' + MODEL + '/bestcheckpoints/'
SAVE_DIR = 'output/' + MODEL
def create_input_data(data: dict):
return zip(data['f_pad_seqs'], data['b_pad_seqs'], data['onehot_labels'])
def test_han():
"""Test HAN model."""
# Print parameters used for the model
dh.tab_printer(args, logger)
# Load word2vec model
word2idx, embedding_matrix = dh.load_word2vec_matrix(args.word2vec_file)
# Load data
logger.info("Loading data...")
logger.info("Data processing...")
test_data = dh.load_data_and_labels(args, args.test_file, word2idx)
# Load han model
OPTION = dh._option(pattern=1)
if OPTION == 'B':
logger.info("Loading best model...")
checkpoint_file = cm.get_best_checkpoint(BEST_CPT_DIR, select_maximum_value=True)
else:
logger.info("Loading latest model...")
checkpoint_file = tf.train.latest_checkpoint(CPT_DIR)
logger.info(checkpoint_file)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=args.allow_soft_placement,
log_device_placement=args.log_device_placement)
session_conf.gpu_options.allow_growth = args.gpu_options_allow_growth
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{0}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
input_x_front = graph.get_operation_by_name("input_x_front").outputs[0]
input_x_behind = graph.get_operation_by_name("input_x_behind").outputs[0]
input_y = graph.get_operation_by_name("input_y").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
is_training = graph.get_operation_by_name("is_training").outputs[0]
# Tensors we want to evaluate
scores = graph.get_operation_by_name("output/topKPreds").outputs[0]
predictions = graph.get_operation_by_name("output/topKPreds").outputs[1]
loss = graph.get_operation_by_name("loss/loss").outputs[0]
# Split the output nodes name by '|' if you have several output nodes
output_node_names = "output/topKPreds"
# Save the .pb model file
output_graph_def = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def,
output_node_names.split("|"))
tf.train.write_graph(output_graph_def, "graph", "graph-han-{0}.pb".format(MODEL), as_text=False)
# Generate batches for one epoch
batches_test = dh.batch_iter(list(create_input_data(test_data)), args.batch_size, 1, shuffle=False)
# Collect the predictions here
test_counter, test_loss = 0, 0.0
true_labels = []
predicted_labels = []
predicted_scores = []
for batch_test in batches_test:
x_f, x_b, y_onehot = zip(*batch_test)
feed_dict = {
input_x_front: x_f,
input_x_behind: x_b,
input_y: y_onehot,
dropout_keep_prob: 1.0,
is_training: False
}
batch_predicted_scores, batch_predicted_labels, batch_loss \
= sess.run([scores, predictions, loss], feed_dict)
for i in y_onehot:
true_labels.append(np.argmax(i))
for j in batch_predicted_scores:
predicted_scores.append(j[0])
for k in batch_predicted_labels:
predicted_labels.append(k[0])
test_loss = test_loss + batch_loss
test_counter = test_counter + 1
test_loss = float(test_loss / test_counter)
# Calculate Precision & Recall & F1
test_acc = accuracy_score(y_true=np.array(true_labels), y_pred=np.array(predicted_labels))
test_pre = precision_score(y_true=np.array(true_labels),
y_pred=np.array(predicted_labels), average='micro')
test_rec = recall_score(y_true=np.array(true_labels),
y_pred=np.array(predicted_labels), average='micro')
test_F1 = f1_score(y_true=np.array(true_labels),
y_pred=np.array(predicted_labels), average='micro')
# Calculate the average AUC
test_auc = roc_auc_score(y_true=np.array(true_labels),
y_score=np.array(predicted_scores), average='micro')
logger.info("All Test Dataset: Loss {0:g} | Acc {1:g} | Precision {2:g} | "
"Recall {3:g} | F1 {4:g} | AUC {5:g}"
.format(test_loss, test_acc, test_pre, test_rec, test_F1, test_auc))
# Save the prediction result
if not os.path.exists(SAVE_DIR):
os.makedirs(SAVE_DIR)
dh.create_prediction_file(output_file=SAVE_DIR + "/predictions.json", front_data_id=test_data['f_id'],
behind_data_id=test_data['b_id'], true_labels=true_labels,
predict_labels=predicted_labels, predict_scores=predicted_scores)
logger.info("All Done.")
if __name__ == '__main__':
test_han()
|
algorithms/strings/decode_string.py | zhengli0817/algorithms | 22,426 | 12613081 | # Given an encoded string, return it's decoded string.
# The encoding rule is: k[encoded_string], where the encoded_string
# inside the square brackets is being repeated exactly k times.
# Note that k is guaranteed to be a positive integer.
# You may assume that the input string is always valid; No extra white spaces,
# square brackets are well-formed, etc.
# Furthermore, you may assume that the original data does not contain any
# digits and that digits are only for those repeat numbers, k.
# For example, there won't be input like 3a or 2[4].
# Examples:
# s = "3[a]2[bc]", return "aaabcbc".
# s = "3[a2[c]]", return "accaccacc".
# s = "2[abc]3[cd]ef", return "abcabccdcdcdef".
def decode_string(s):
"""
:type s: str
:rtype: str
"""
stack = []; cur_num = 0; cur_string = ''
for c in s:
if c == '[':
stack.append((cur_string, cur_num))
cur_string = ''
cur_num = 0
elif c == ']':
prev_string, num = stack.pop()
cur_string = prev_string + num * cur_string
elif c.isdigit():
cur_num = cur_num*10 + int(c)
else:
cur_string += c
return cur_string
|
tests/opentracer/utils.py | p7g/dd-trace-py | 308 | 12613103 | <reponame>p7g/dd-trace-py
from ddtrace.opentracer import Tracer
def init_tracer(service_name, dd_tracer, scope_manager=None):
"""A method that emulates what a user of OpenTracing would call to
initialize a Datadog opentracer.
It accepts a Datadog tracer that should be the same one used for testing.
"""
ot_tracer = Tracer(service_name, dd_tracer=dd_tracer, scope_manager=scope_manager)
return ot_tracer
|
observations/r/acf1.py | hajime9652/observations | 199 | 12613136 | <reponame>hajime9652/observations
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def acf1(path):
"""Aberrant Crypt Foci in Rat Colons
Numbers of aberrant crypt foci (ACF) in the section 1 of the colons of
22 rats subjected to a single dose of the carcinogen azoxymethane (AOM),
sacrificed at 3 different times.
This data frame contains the following columns:
count
The number of ACF observed in section 1 of each rat colon
endtime
Time of sacrifice, in weeks following injection of AOM
<NAME>, Faculty of Human Ecology, University of Manitoba,
Winnipeg, Canada.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `acf1.csv`.
Returns:
Tuple of np.ndarray `x_train` with 22 rows and 2 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'acf1.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/DAAG/ACF1.csv'
maybe_download_and_extract(path, url,
save_file_name='acf1.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
locations/spiders/federal_savings_bank.py | boomerwv1/alltheplaces | 297 | 12613154 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
import json
import re
import scrapy
from locations.items import GeojsonPointItem
from locations.hours import OpeningHours
class FederalSavingsBankSpider(scrapy.Spider):
name = "federal_savings_bank"
allowed_domains = ['thefederalsavingsbank.com']
start_urls = [
'https://www.thefederalsavingsbank.com/sitemap.xml',
]
def parse(self, response):
response.selector.remove_namespaces()
for url in response.xpath("//loc/text()").extract():
if "/our-locations/" in url:
yield scrapy.Request(url, callback=self.parse_store)
def parse_store(self, response):
properties = {
'ref': re.search(r'.+/(.+?)/?(?:\.html|$)', response.url).group(1),
'name': response.xpath('//h1/text()').extract_first(),
'addr_full': response.xpath('//*[@class="lpo_street"]/text()').extract_first(),
'city': response.xpath('//*[@class="lpo_city"]/text()').extract_first(),
'state': response.xpath('//*[@class="lpo_state"]/text()').extract_first(),
'postcode': response.xpath('//*[@class="lpo_zip"]/text()').extract_first(),
'country': "US",
'phone': response.xpath('//*[@class="lpo_phone"]/a/text()').extract_first(),
'website': response.url
}
yield GeojsonPointItem(**properties)
|
tests/prioritization/test_prioritization_rules.py | trumanw/ScaffoldGraph | 121 | 12613157 | <filename>tests/prioritization/test_prioritization_rules.py
"""
scaffoldgraph tests.prioritization.test_prioritization_rules
"""
import pytest
from scaffoldgraph.prioritization.prioritization_rules import *
class MockScaffoldFilterRule(BaseScaffoldFilterRule):
def filter(self, child, parents):
return parents[1:]
@property
def name(self):
return 'mock'
def test_prioritization_rules():
"""Test abstract ruletypes cannot be initialized."""
with pytest.raises(TypeError):
BaseScaffoldFilterRule()
with pytest.raises(TypeError):
ScaffoldFilterRule()
with pytest.raises(TypeError):
ScaffoldMinFilterRule()
with pytest.raises(TypeError):
ScaffoldMaxFilterRule()
def test_base_rule_subclass():
"""Test base class can be subclassed"""
mock = MockScaffoldFilterRule()
parents = [0, 1, 2, 3, 4]
assert mock.name == 'mock'
assert str(mock) == 'mock'
assert mock.filter(None, parents) == [1, 2, 3, 4]
assert mock(None, parents) == mock.filter(None, parents)
assert repr(mock) == '<MockScaffoldFilterRule at {}>'.format(hex(id(mock)))
def test_subclassing():
assert issubclass(ScaffoldFilterRule, BaseScaffoldFilterRule)
assert issubclass(ScaffoldMaxFilterRule, BaseScaffoldFilterRule)
assert issubclass(ScaffoldMinFilterRule, BaseScaffoldFilterRule)
|
pysph/examples/gas_dynamics/noh.py | nauaneed/pysph | 293 | 12613163 | <reponame>nauaneed/pysph
"""Example for the Noh's cylindrical implosion test. (10 minutes)
"""
# NumPy and standard library imports
import numpy
# PySPH base and carray imports
from pysph.base.utils import get_particle_array as gpa
from pysph.solver.application import Application
from pysph.sph.scheme import GasDScheme, SchemeChooser, ADKEScheme, GSPHScheme
from pysph.sph.wc.crksph import CRKSPHScheme
from pysph.base.nnps import DomainManager
# problem constants
dim = 2
gamma = 5.0/3.0
gamma1 = gamma - 1.0
# scheme constants
alpha1 = 1.0
alpha2 = 0.1
beta = 2.0
kernel_factor = 1.5
# numerical constants
dt = 1e-3
tf = 0.6
# domain and particle spacings
xmin = ymin = -1.0
xmax = ymax = 1.0
nx = ny = 100
dx = (xmax-xmin)/nx
dxb2 = 0.5 * dx
# initial values
h0 = kernel_factor*dx
rho0 = 1.0
m0 = dx*dx * rho0
vr = -1.0
class NohImplosion(Application):
def create_particles(self):
x, y = numpy.mgrid[
xmin:xmax:dx, ymin:ymax:dx]
# positions
x = x.ravel()
y = y.ravel()
rho = numpy.ones_like(x) * rho0
m = numpy.ones_like(x) * m0
h = numpy.ones_like(x) * h0
u = numpy.ones_like(x)
v = numpy.ones_like(x)
sin, cos, arctan = numpy.sin, numpy.cos, numpy.arctan2
for i in range(x.size):
theta = arctan(y[i], x[i])
u[i] = vr*cos(theta)
v[i] = vr*sin(theta)
fluid = gpa(
name='fluid', x=x, y=y, m=m, rho=rho, h=h, u=u, v=v, p=1e-12,
e=2.5e-11, h0=h.copy()
)
self.scheme.setup_properties([fluid])
print("Noh's problem with %d particles "
% (fluid.get_number_of_particles()))
return [fluid, ]
def create_domain(self):
return DomainManager(
xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax,
mirror_in_x=True, mirror_in_y=True
)
def create_scheme(self):
mpm = GasDScheme(
fluids=['fluid'], solids=[], dim=dim, gamma=gamma,
kernel_factor=kernel_factor, alpha1=alpha1, alpha2=alpha2,
beta=beta, adaptive_h_scheme="mpm",
update_alpha1=True, update_alpha2=True, has_ghosts=True
)
crksph = CRKSPHScheme(
fluids=['fluid'], dim=2, rho0=0, c0=0, nu=0, h0=0, p0=0,
gamma=gamma, cl=2, has_ghosts=True
)
gsph = GSPHScheme(
fluids=['fluid'], solids=[], dim=dim, gamma=gamma,
kernel_factor=1.5,
g1=0.25, g2=0.5, rsolver=7, interpolation=1, monotonicity=2,
interface_zero=True, hybrid=False, blend_alpha=2.0,
niter=40, tol=1e-6, has_ghosts=True
)
adke = ADKEScheme(
fluids=['fluid'], solids=[], dim=dim, gamma=gamma,
alpha=1, beta=1, k=1.0, eps=0.8, g1=0.5, g2=0.5,
has_ghosts=True)
s = SchemeChooser(
default='crksph', crksph=crksph, mpm=mpm, adke=adke, gsph=gsph
)
s.configure_solver(dt=dt, tf=tf, adaptive_timestep=False)
return s
def configure_scheme(self):
s = self.scheme
if self.options.scheme == 'mpm':
s.configure(kernel_factor=1.2)
s.configure_solver(
dt=dt, tf=tf, adaptive_timestep=True, pfreq=50
)
elif self.options.scheme == 'crksph':
s.configure_solver(
dt=dt, tf=tf, adaptive_timestep=False, pfreq=50
)
elif self.options.scheme == 'gsph':
s.configure_solver(
dt=dt, tf=tf, adaptive_timestep=False, pfreq=50
)
elif self.options.scheme == 'adke':
s.configure_solver(
dt=dt, tf=tf, adaptive_timestep=False, pfreq=50
)
def post_process(self):
try:
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot
except ImportError:
print("Post processing requires matplotlib.")
return
if self.rank > 0 or len(self.output_files) == 0:
return
import os
from pysph.solver.utils import load
outfile = self.output_files[-1]
data = load(outfile)
pa = data['arrays']['fluid']
x = pa.x
y = pa.y
rho = pa.rho
p = pa.p
r = numpy.sqrt(x**2 + y**2)
# exact solutions
vs = 1.0/3.0 # shock radial velocity
rs = vs * tf # position of shock
ri = numpy.linspace(0, rs, 10)
ro = numpy.linspace(rs, xmax, 100)
re = numpy.concatenate((ri, ro))
rho_e1 = numpy.ones_like(ri) * ((gamma + 1) / (gamma - 1))**dim
rho_e2 = rho0 * (1 + tf / ro)**(dim - 1)
rho_e = numpy.concatenate((rho_e1, rho_e2))
p_e1 = vs * rho_e1
p_e2 = numpy.zeros_like(ro)
p_e = numpy.concatenate((p_e1, p_e2))
pyplot.scatter(r, p, s=1)
pyplot.xlabel('r')
pyplot.ylabel('P')
pyplot.plot(re, p_e, color='r', lw=1)
pyplot.legend(
['exact', self.options.scheme]
)
fname = os.path.join(self.output_dir, 'pressure.png')
pyplot.savefig(fname, dpi=300)
pyplot.close('all')
pyplot.scatter(r, rho, s=1)
pyplot.xlabel('r')
pyplot.ylabel(r'$\rho$')
pyplot.plot(re, rho_e, color='r', lw=1)
pyplot.legend(
['exact', self.options.scheme]
)
fname = os.path.join(self.output_dir, 'density.png')
pyplot.savefig(fname, dpi=300)
pyplot.close('all')
if __name__ == '__main__':
app = NohImplosion()
app.run()
app.post_process()
|
utils/time_tools.py | jingmouren/QuantResearch | 623 | 12613172 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
def convert_date_input(input_str, default_date=None):
"""
convert date input
:param input_str: 3y, 2m, 0mm 1w, or yyyy-mm-dd
:param default_date: datetime.date
:return:datetime.date
"""
ret_date = datetime.today()
try:
if 'Y' in input_str or 'y' in input_str:
yr = int(input_str[:-1])
ret_date = ret_date.replace(year=ret_date.year+yr)
elif 'M' in input_str or 'm' in input_str:
mth = int(input_str[:-1])
total_mth = ret_date.month + mth
nm = total_mth % 12
ny = int((total_mth - nm) / 12)
ret_date = ret_date.replace(year=ret_date.year+ny)
ret_date = ret_date.replace(month=nm)
elif 'W' in input_str or 'w' in input_str:
wks = int(input_str[:-1])
ret_date = ret_date + timedelta(days=7*wks)
elif 'D' in input_str or 'd' in input_str:
ds = int(input_str[:-1])
ret_date = ret_date + timedelta(days=ds)
else:
ret_date = datetime.strptime(input_str, '%Y-%m-%d')
except:
# ret_date = ret_date + timedelta(days=-5 * 365)
ret_date = default_date
return ret_date
def locate_week():
today = datetime.today()
return [today + timedelta(days=i) for i in range(0 - today.weekday(), 7 - today.weekday())] # week of today, then intersect with datetimeindex
|
homeassistant/components/insteon/climate.py | learn-home-automation/core | 22,481 | 12613177 | """Support for Insteon thermostat."""
from __future__ import annotations
from pyinsteon.constants import ThermostatMode
from pyinsteon.operating_flag import CELSIUS
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_COOL,
CURRENT_HVAC_FAN,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
DOMAIN as CLIMATE_DOMAIN,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_HUMIDITY,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import SIGNAL_ADD_ENTITIES
from .insteon_entity import InsteonEntity
from .utils import async_add_insteon_entities
COOLING = 1
HEATING = 2
DEHUMIDIFYING = 3
HUMIDIFYING = 4
TEMPERATURE = 10
HUMIDITY = 11
SYSTEM_MODE = 12
FAN_MODE = 13
COOL_SET_POINT = 14
HEAT_SET_POINT = 15
HUMIDITY_HIGH = 16
HUMIDITY_LOW = 17
HVAC_MODES = {
0: HVAC_MODE_OFF,
1: HVAC_MODE_HEAT,
2: HVAC_MODE_COOL,
3: HVAC_MODE_HEAT_COOL,
}
FAN_MODES = {4: HVAC_MODE_AUTO, 8: HVAC_MODE_FAN_ONLY}
SUPPORTED_FEATURES = (
SUPPORT_FAN_MODE
| SUPPORT_TARGET_HUMIDITY
| SUPPORT_TARGET_TEMPERATURE
| SUPPORT_TARGET_TEMPERATURE_RANGE
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Insteon climate entities from a config entry."""
@callback
def async_add_insteon_climate_entities(discovery_info=None):
"""Add the Insteon entities for the platform."""
async_add_insteon_entities(
hass,
CLIMATE_DOMAIN,
InsteonClimateEntity,
async_add_entities,
discovery_info,
)
signal = f"{SIGNAL_ADD_ENTITIES}_{CLIMATE_DOMAIN}"
async_dispatcher_connect(hass, signal, async_add_insteon_climate_entities)
async_add_insteon_climate_entities()
class InsteonClimateEntity(InsteonEntity, ClimateEntity):
"""A Class for an Insteon climate entity."""
@property
def supported_features(self):
"""Return the supported features for this entity."""
return SUPPORTED_FEATURES
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
if self._insteon_device.properties[CELSIUS].value:
return TEMP_CELSIUS
return TEMP_FAHRENHEIT
@property
def current_humidity(self) -> int | None:
"""Return the current humidity."""
return self._insteon_device.groups[HUMIDITY].value
@property
def hvac_mode(self) -> str:
"""Return hvac operation ie. heat, cool mode."""
return HVAC_MODES[self._insteon_device.groups[SYSTEM_MODE].value]
@property
def hvac_modes(self) -> list[str]:
"""Return the list of available hvac operation modes."""
return list(HVAC_MODES.values())
@property
def current_temperature(self) -> float | None:
"""Return the current temperature."""
return self._insteon_device.groups[TEMPERATURE].value
@property
def target_temperature(self) -> float | None:
"""Return the temperature we try to reach."""
if self._insteon_device.groups[SYSTEM_MODE].value == ThermostatMode.HEAT:
return self._insteon_device.groups[HEAT_SET_POINT].value
if self._insteon_device.groups[SYSTEM_MODE].value == ThermostatMode.COOL:
return self._insteon_device.groups[COOL_SET_POINT].value
return None
@property
def target_temperature_high(self) -> float | None:
"""Return the highbound target temperature we try to reach."""
if self._insteon_device.groups[SYSTEM_MODE].value == ThermostatMode.AUTO:
return self._insteon_device.groups[COOL_SET_POINT].value
return None
@property
def target_temperature_low(self) -> float | None:
"""Return the lowbound target temperature we try to reach."""
if self._insteon_device.groups[SYSTEM_MODE].value == ThermostatMode.AUTO:
return self._insteon_device.groups[HEAT_SET_POINT].value
return None
@property
def fan_mode(self) -> str | None:
"""Return the fan setting."""
return FAN_MODES[self._insteon_device.groups[FAN_MODE].value]
@property
def fan_modes(self) -> list[str] | None:
"""Return the list of available fan modes."""
return list(FAN_MODES.values())
@property
def target_humidity(self) -> int | None:
"""Return the humidity we try to reach."""
high = self._insteon_device.groups[HUMIDITY_HIGH].value
low = self._insteon_device.groups[HUMIDITY_LOW].value
# May not be loaded yet so return a default if required
return (high + low) / 2 if high and low else None
@property
def min_humidity(self) -> int:
"""Return the minimum humidity."""
return 1
@property
def hvac_action(self) -> str | None:
"""Return the current running hvac operation if supported.
Need to be one of CURRENT_HVAC_*.
"""
if self._insteon_device.groups[COOLING].value:
return CURRENT_HVAC_COOL
if self._insteon_device.groups[HEATING].value:
return CURRENT_HVAC_HEAT
if self._insteon_device.groups[FAN_MODE].value == ThermostatMode.FAN_ALWAYS_ON:
return CURRENT_HVAC_FAN
return CURRENT_HVAC_IDLE
@property
def extra_state_attributes(self):
"""Provide attributes for display on device card."""
attr = super().extra_state_attributes
humidifier = "off"
if self._insteon_device.groups[DEHUMIDIFYING].value:
humidifier = "dehumidifying"
if self._insteon_device.groups[HUMIDIFYING].value:
humidifier = "humidifying"
attr["humidifier"] = humidifier
return attr
async def async_set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
target_temp = kwargs.get(ATTR_TEMPERATURE)
target_temp_low = kwargs.get(ATTR_TARGET_TEMP_LOW)
target_temp_high = kwargs.get(ATTR_TARGET_TEMP_HIGH)
if target_temp is not None:
if self._insteon_device.groups[SYSTEM_MODE].value == ThermostatMode.HEAT:
await self._insteon_device.async_set_heat_set_point(target_temp)
elif self._insteon_device.groups[SYSTEM_MODE].value == ThermostatMode.COOL:
await self._insteon_device.async_set_cool_set_point(target_temp)
else:
await self._insteon_device.async_set_heat_set_point(target_temp_low)
await self._insteon_device.async_set_cool_set_point(target_temp_high)
async def async_set_fan_mode(self, fan_mode: str) -> None:
"""Set new target fan mode."""
mode = list(FAN_MODES)[list(FAN_MODES.values()).index(fan_mode)]
await self._insteon_device.async_set_mode(mode)
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
mode = list(HVAC_MODES)[list(HVAC_MODES.values()).index(hvac_mode)]
await self._insteon_device.async_set_mode(mode)
async def async_set_humidity(self, humidity):
"""Set new humidity level."""
change = humidity - self.target_humidity
high = self._insteon_device.groups[HUMIDITY_HIGH].value + change
low = self._insteon_device.groups[HUMIDITY_LOW].value + change
await self._insteon_device.async_set_humidity_low_set_point(low)
await self._insteon_device.async_set_humidity_high_set_point(high)
async def async_added_to_hass(self):
"""Register INSTEON update events."""
await super().async_added_to_hass()
await self._insteon_device.async_read_op_flags()
for group in (
COOLING,
HEATING,
DEHUMIDIFYING,
HUMIDIFYING,
HEAT_SET_POINT,
FAN_MODE,
SYSTEM_MODE,
TEMPERATURE,
HUMIDITY,
HUMIDITY_HIGH,
HUMIDITY_LOW,
):
self._insteon_device.groups[group].subscribe(self.async_entity_update)
|
navec/train/ctl/merge.py | FreedomSlow/navec | 115 | 12613201 | <reponame>FreedomSlow/navec
import sys
from itertools import groupby
from heapq import (
heappush,
heappop,
)
from ..glove import (
load_glove_vocab,
format_glove_vocab,
load_glove_cooc,
format_glove_cooc
)
def merge_vocabs(args):
iters = [load_glove_vocab(_) for _ in args.paths]
records = merge(iters)
records = sum_groups(records)
lines = format_glove_vocab(records)
sys.stdout.buffer.writelines(lines)
def merge_coocs(args):
vocab = load_glove_vocab(args.vocab)
ids = dict(vocab_ids(vocab))
pairs = parse_pairs(args.pairs)
iters = [
load_decoded_cooc(*_)
for _ in pairs
]
records = merge(iters)
records = sum_groups(records)
records = encode_cooc(records, ids)
stream = format_glove_cooc(records)
sys.stdout.buffer.writelines(stream)
def vocab_words(records):
for word, count in records:
yield word
def vocab_ids(records):
for id, (word, count) in enumerate(records):
yield word, id
def decode_cooc(records, words):
# in glove cooc ids start from 1
for (source, target), weight in records:
yield (words[source - 1], words[target - 1]), weight
def encode_cooc(records, ids):
for (source, target), weight in records:
yield (ids[source] + 1, ids[target] + 1), weight
def parse_pairs(pairs):
for pair in pairs:
yield pair.split(':', 1)
def load_decoded_cooc(cooc, vocab):
records = load_glove_cooc(cooc)
vocab = load_glove_vocab(vocab)
words = list(vocab_words(vocab))
return decode_cooc(records, words)
##########
#
# MERGE
#
########
SENTINEL = None
def append_sentinel(items, sentinel=SENTINEL):
for item in items:
yield item
yield sentinel
def merge(iters):
iters = [append_sentinel(_) for _ in iters]
buffer = []
for index, records in enumerate(iters):
key, value = next(records)
heappush(buffer, (key, index, value))
while buffer:
key, index, value = heappop(buffer)
yield key, value
item = next(iters[index])
if item is not SENTINEL:
key, value = item
heappush(buffer, (key, index, value))
def first(pair):
return pair[0]
def second(pair):
return pair[1]
def sum_groups(records):
for key, group in groupby(records, first):
count = sum(second(_) for _ in group)
yield key, count
|
reports/site/site_address.py | FreeQster/reports | 104 | 12613202 | # site_address.py
# Make sure to add `geocoder` to your `local_requirements.txt` and make sure it is installed in your Python venv.
import geocoder
from extras.reports import Report
from dcim.models import Site
class checkSiteAddress(Report):
description = "Check if site has a physical address and/or geolocation information"
def test_site_address(self):
for site in Site.objects.all():
if site.physical_address:
self.log_success(site)
else:
self.log_failure(site, site.name)
def test_site_geo(self):
for site in Site.objects.all():
if site.latitude and site.longitude:
self.log_success(site)
else:
if site.physical_address:
g = geocoder.osm(site.physical_address)
if g:
self.log_warning(site, f'Missing geo location - possible ({round(g.x,6)}, {round(g.y,6)})')
else:
self.log_warning(site, f'Missing geo location ({site.latitude}, {site.longitude})')
else:
self.log_failure(site, f'Missing geo location ({site.latitude}, {site.longitude})')
|
GreedyInfoMax/vision/models/Resnet_Encoder.py | Spijkervet/Greedy_InfoMax | 288 | 12613214 | <reponame>Spijkervet/Greedy_InfoMax
import torch.nn as nn
import torch.nn.functional as F
import torch
from GreedyInfoMax.vision.models import InfoNCE_Loss, Supervised_Loss
from GreedyInfoMax.utils import model_utils
class PreActBlockNoBN(nn.Module):
"""Pre-activation version of the BasicBlock."""
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(PreActBlockNoBN, self).__init__()
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1
)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1)
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(
in_planes, self.expansion * planes, kernel_size=1, stride=stride
)
)
def forward(self, x):
out = F.relu(x)
shortcut = self.shortcut(out) if hasattr(self, "shortcut") else x
out = self.conv1(out)
out = F.relu(out)
out = self.conv2(out)
out += shortcut
return out
class PreActBottleneckNoBN(nn.Module):
"""Pre-activation version of the original Bottleneck module."""
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(PreActBottleneckNoBN, self).__init__()
# self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1)
# self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1)
# self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1)
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(
in_planes, self.expansion * planes, kernel_size=1, stride=stride
)
)
def forward(self, x):
out = F.relu(x)
shortcut = self.shortcut(out) if hasattr(self, "shortcut") else x
out = self.conv1(out)
out = self.conv2(F.relu(out))
out = self.conv3(F.relu(out))
out += shortcut
return out
class ResNet_Encoder(nn.Module):
def __init__(
self,
opt,
block,
num_blocks,
filter,
encoder_num,
patch_size=16,
input_dims=3,
calc_loss=False,
):
super(ResNet_Encoder, self).__init__()
self.encoder_num = encoder_num
self.opt = opt
self.patchify = True
self.overlap = 2
self.calc_loss = calc_loss
self.patch_size = patch_size
self.filter = filter
self.model = nn.Sequential()
if encoder_num == 0:
self.model.add_module(
"Conv1",
nn.Conv2d(
input_dims, self.filter[0], kernel_size=5, stride=1, padding=2
),
)
self.in_planes = self.filter[0]
self.first_stride = 1
elif encoder_num > 2:
self.in_planes = self.filter[0] * block.expansion
self.first_stride = 2
else:
self.in_planes = (self.filter[0] // 2) * block.expansion
self.first_stride = 2
for idx in range(len(num_blocks)):
self.model.add_module(
"layer {}".format((idx)),
self._make_layer(
block, self.filter[idx], num_blocks[idx], stride=self.first_stride
),
)
self.first_stride = 2
## loss module is always present, but only gets used when training GreedyInfoMax modules
if self.opt.loss == 0:
self.loss = InfoNCE_Loss.InfoNCE_Loss(
opt,
in_channels=self.in_planes,
out_channels=self.in_planes
)
elif self.opt.loss == 1:
self.loss = Supervised_Loss.Supervised_Loss(opt, self.in_planes, True)
else:
raise Exception("Invalid option")
if self.opt.weight_init:
self.initialize()
def initialize(self):
for m in self.modules():
if isinstance(m, (nn.Conv2d,)):
model_utils.makeDeltaOrthogonal(
m.weight, nn.init.calculate_gain("relu")
)
elif isinstance(m, (nn.BatchNorm3d, nn.BatchNorm2d)):
m.momentum = 0.3
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x, n_patches_x, n_patches_y, label, patchify_right_now=True):
if self.patchify and self.encoder_num == 0 and patchify_right_now:
x = (
x.unfold(2, self.patch_size, self.patch_size // self.overlap)
.unfold(3, self.patch_size, self.patch_size // self.overlap)
.permute(0, 2, 3, 1, 4, 5)
)
n_patches_x = x.shape[1]
n_patches_y = x.shape[2]
x = x.reshape(
x.shape[0] * x.shape[1] * x.shape[2], x.shape[3], x.shape[4], x.shape[5]
)
z = self.model(x)
out = F.adaptive_avg_pool2d(z, 1)
out = out.reshape(-1, n_patches_x, n_patches_y, out.shape[1])
out = out.permute(0, 3, 1, 2).contiguous()
accuracy = torch.zeros(1)
if self.calc_loss and self.opt.loss == 0:
loss = self.loss(out, out)
elif self.calc_loss and self.opt.loss == 1:
loss, accuracy = self.loss(out, label)
else:
loss = None
return out, z, loss, accuracy, n_patches_x, n_patches_y
|
numba/np/ufunc/__init__.py | auderson/numba | 6,620 | 12613225 | <reponame>auderson/numba<filename>numba/np/ufunc/__init__.py
# -*- coding: utf-8 -*-
from numba.np.ufunc.decorators import Vectorize, GUVectorize, vectorize, guvectorize
from numba.np.ufunc._internal import PyUFunc_None, PyUFunc_Zero, PyUFunc_One
from numba.np.ufunc import _internal, array_exprs
from numba.np.ufunc.parallel import (threading_layer, get_num_threads,
set_num_threads, _get_thread_id)
if hasattr(_internal, 'PyUFunc_ReorderableNone'):
PyUFunc_ReorderableNone = _internal.PyUFunc_ReorderableNone
del _internal, array_exprs
def _init():
def init_cuda_vectorize():
from numba.cuda.vectorizers import CUDAVectorize
return CUDAVectorize
def init_cuda_guvectorize():
from numba.cuda.vectorizers import CUDAGUFuncVectorize
return CUDAGUFuncVectorize
Vectorize.target_registry.ondemand['cuda'] = init_cuda_vectorize
GUVectorize.target_registry.ondemand['cuda'] = init_cuda_guvectorize
_init()
del _init
|
frappe/website/doctype/personal_data_download_request/test_personal_data_download_request.py | naderelabed/frappe | 3,755 | 12613259 | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies and Contributors
# License: MIT. See LICENSE
import frappe
import unittest
import json
from frappe.website.doctype.personal_data_download_request.personal_data_download_request import get_user_data
from frappe.contacts.doctype.contact.contact import get_contact_name
from frappe.core.doctype.user.user import create_contact
class TestRequestPersonalData(unittest.TestCase):
def setUp(self):
create_user_if_not_exists(email='<EMAIL>')
def tearDown(self):
frappe.db.delete("Personal Data Download Request")
def test_user_data_creation(self):
user_data = json.loads(get_user_data('<EMAIL>'))
contact_name = get_contact_name('<EMAIL>')
expected_data = {'Contact': frappe.get_all('Contact', {"name": contact_name}, ["*"])}
expected_data = json.loads(json.dumps(expected_data, default=str))
self.assertEqual({'Contact': user_data['Contact']}, expected_data)
def test_file_and_email_creation(self):
frappe.set_user('<EMAIL>')
download_request = frappe.get_doc({
"doctype": 'Personal Data Download Request',
'user': '<EMAIL>'
})
download_request.save(ignore_permissions=True)
frappe.set_user('Administrator')
file_count = frappe.db.count('File', {
'attached_to_doctype':'Personal Data Download Request',
'attached_to_name': download_request.name
})
self.assertEqual(file_count, 1)
email_queue = frappe.get_all('Email Queue',
fields=['message'],
order_by="creation DESC",
limit=1)
self.assertTrue("Subject: Download Your Data" in email_queue[0].message)
frappe.db.delete("Email Queue")
def create_user_if_not_exists(email, first_name = None):
frappe.delete_doc_if_exists("User", email)
user = frappe.get_doc({
"doctype": "User",
"user_type": "Website User",
"email": email,
"send_welcome_email": 0,
"first_name": first_name or email.split("@")[0],
"birth_date": frappe.utils.now_datetime()
}).insert(ignore_permissions=True)
create_contact(user=user)
|
alphamind/data/standardize.py | rongliang-tech/alpha-mind | 186 | 12613270 | # -*- coding: utf-8 -*-
"""
Created on 2017-4-25
@author: cheng.li
"""
import numpy as np
from alphamind.utilities import aggregate
from alphamind.utilities import array_index
from alphamind.utilities import group_mapping
from alphamind.utilities import simple_mean
from alphamind.utilities import simple_sqrsum
from alphamind.utilities import simple_std
from alphamind.utilities import transform
def standardize(x: np.ndarray, groups: np.ndarray = None, ddof=1) -> np.ndarray:
if groups is not None:
groups = group_mapping(groups)
mean_values = transform(groups, x, 'mean')
std_values = transform(groups, x, 'std', ddof)
return (x - mean_values) / np.maximum(std_values, 1e-8)
else:
return (x - simple_mean(x, axis=0)) / np.maximum(simple_std(x, axis=0, ddof=ddof), 1e-8)
def projection(x: np.ndarray, groups: np.ndarray = None, axis=1) -> np.ndarray:
if groups is not None and axis == 0:
groups = group_mapping(groups)
projected = transform(groups, x, 'project')
return projected
else:
return x / simple_sqrsum(x, axis=axis).reshape((-1, 1))
class Standardizer(object):
def __init__(self, ddof: int = 1):
self.ddof = ddof
self.mean = None
self.std = None
self.labels = None
def fit(self, x: np.ndarray, groups: np.ndarray = None):
if groups is not None:
group_index = group_mapping(groups)
self.mean = aggregate(group_index, x, 'mean')
self.std = aggregate(group_index, x, 'std', self.ddof)
self.labels = np.unique(groups)
else:
self.mean = simple_mean(x, axis=0)
self.std = simple_std(x, axis=0, ddof=self.ddof)
def transform(self, x: np.ndarray, groups: np.ndarray = None) -> np.ndarray:
if groups is not None:
index = array_index(self.labels, groups)
return (x - self.mean[index]) / np.maximum(self.std[index], 1e-8)
else:
return (x - self.mean) / np.maximum(self.std, 1e-8)
def __call__(self, x: np.ndarray, groups: np.ndarray = None) -> np.ndarray:
return standardize(x, groups, self.ddof)
|
solutions/problem_197.py | ksvr444/daily-coding-problem | 1,921 | 12613275 | <gh_stars>1000+
def rotate_index(arr, k, src_ind, src_num, count=0):
if count == len(arr):
return
des_ind = (src_ind + k) % len(arr)
des_num = arr[des_ind]
arr[des_ind] = src_num
rotate_index(arr, k, des_ind, des_num, count + 1)
def rotate_k(arr, k):
if k < 1:
return arr
start = 0
rotate_index(arr, k, start, arr[start])
# Tests
arr = [1, 2, 3, 4, 5]
rotate_k(arr, 2)
assert arr == [4, 5, 1, 2, 3]
rotate_k(arr, 2)
assert arr == [2, 3, 4, 5, 1]
rotate_k(arr, 4)
assert arr == [3, 4, 5, 1, 2]
|
docs_src/handling_errors/tutorial006.py | Aryabhata-Rootspring/fastapi | 53,007 | 12613302 | <reponame>Aryabhata-Rootspring/fastapi
from fastapi import FastAPI, HTTPException
from fastapi.exception_handlers import (
http_exception_handler,
request_validation_exception_handler,
)
from fastapi.exceptions import RequestValidationError
from starlette.exceptions import HTTPException as StarletteHTTPException
app = FastAPI()
@app.exception_handler(StarletteHTTPException)
async def custom_http_exception_handler(request, exc):
print(f"OMG! An HTTP error!: {repr(exc)}")
return await http_exception_handler(request, exc)
@app.exception_handler(RequestValidationError)
async def validation_exception_handler(request, exc):
print(f"OMG! The client sent invalid data!: {exc}")
return await request_validation_exception_handler(request, exc)
@app.get("/items/{item_id}")
async def read_item(item_id: int):
if item_id == 3:
raise HTTPException(status_code=418, detail="Nope! I don't like 3.")
return {"item_id": item_id}
|
google/ads/googleads/v8/common/__init__.py | wxxlouisa/google-ads-python | 285 | 12613334 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__all__ = (
"AdAssetPolicySummary",
"AdImageAsset",
"AdMediaBundleAsset",
"AdScheduleInfo",
"AdTextAsset",
"AdVideoAsset",
"AddressInfo",
"AffiliateLocationFeedItem",
"AgeRangeInfo",
"AppAdInfo",
"AppEngagementAdInfo",
"AppFeedItem",
"AppPaymentModelInfo",
"AssetInteractionTarget",
"BasicUserListInfo",
"BidModifierSimulationPoint",
"BidModifierSimulationPointList",
"BookOnGoogleAsset",
"BudgetCampaignAssociationStatus",
"BudgetSimulationPoint",
"BudgetSimulationPointList",
"BusinessNameFilter",
"CallAdInfo",
"CallFeedItem",
"CalloutAsset",
"CalloutFeedItem",
"CarrierInfo",
"ClickLocation",
"CombinedAudienceInfo",
"CombinedRuleUserListInfo",
"Commission",
"ConceptGroup",
"ContentLabelInfo",
"CpcBidSimulationPoint",
"CpcBidSimulationPointList",
"CpvBidSimulationPoint",
"CpvBidSimulationPointList",
"CriterionCategoryAvailability",
"CriterionCategoryChannelAvailability",
"CriterionCategoryLocaleAvailability",
"CrmBasedUserListInfo",
"CustomAffinityInfo",
"CustomAudienceInfo",
"CustomIntentInfo",
"CustomParameter",
"CustomerMatchUserListMetadata",
"DateRange",
"DateSpecificRuleUserListInfo",
"DeviceInfo",
"DisplayCallToAction",
"DisplayUploadAdInfo",
"DynamicAffiliateLocationSetFilter",
"DynamicLocationSetFilter",
"EnhancedCpc",
"ExpandedDynamicSearchAdInfo",
"ExpandedTextAdInfo",
"ExplorerAutoOptimizerSetting",
"ExpressionRuleUserListInfo",
"FinalAppUrl",
"FrequencyCapEntry",
"FrequencyCapKey",
"GenderInfo",
"GeoPointInfo",
"GmailAdInfo",
"GmailTeaser",
"HistoricalMetricsOptions",
"HotelAdInfo",
"HotelAdvanceBookingWindowInfo",
"HotelCalloutFeedItem",
"HotelCheckInDateRangeInfo",
"HotelCheckInDayInfo",
"HotelCityInfo",
"HotelClassInfo",
"HotelCountryRegionInfo",
"HotelDateSelectionTypeInfo",
"HotelIdInfo",
"HotelLengthOfStayInfo",
"HotelStateInfo",
"ImageAdInfo",
"ImageAsset",
"ImageDimension",
"ImageFeedItem",
"IncomeRangeInfo",
"InteractionTypeInfo",
"IpBlockInfo",
"ItemAttribute",
"Keyword",
"KeywordAnnotations",
"KeywordConcept",
"KeywordInfo",
"KeywordPlanAggregateMetricResults",
"KeywordPlanAggregateMetrics",
"KeywordPlanDeviceSearches",
"KeywordPlanHistoricalMetrics",
"KeywordThemeInfo",
"LanguageInfo",
"LeadFormAsset",
"LeadFormDeliveryMethod",
"LeadFormField",
"LeadFormSingleChoiceAnswers",
"LegacyAppInstallAdInfo",
"LegacyResponsiveDisplayAdInfo",
"ListingDimensionInfo",
"ListingGroupInfo",
"ListingScopeInfo",
"LocalAdInfo",
"LocationFeedItem",
"LocationGroupInfo",
"LocationInfo",
"LogicalUserListInfo",
"LogicalUserListOperandInfo",
"ManualCpc",
"ManualCpm",
"ManualCpv",
"MatchingFunction",
"MaximizeConversionValue",
"MaximizeConversions",
"MediaBundleAsset",
"Metrics",
"MobileAppCategoryInfo",
"MobileApplicationInfo",
"MobileDeviceInfo",
"Money",
"MonthlySearchVolume",
"OfflineUserAddressInfo",
"Operand",
"OperatingSystemVersionInfo",
"ParentalStatusInfo",
"PercentCpc",
"PercentCpcBidSimulationPoint",
"PercentCpcBidSimulationPointList",
"PlacementInfo",
"PolicyTopicConstraint",
"PolicyTopicEntry",
"PolicyTopicEvidence",
"PolicyValidationParameter",
"PolicyViolationKey",
"PreferredContentInfo",
"PriceFeedItem",
"PriceOffer",
"ProductBiddingCategoryInfo",
"ProductBrandInfo",
"ProductChannelExclusivityInfo",
"ProductChannelInfo",
"ProductConditionInfo",
"ProductCustomAttributeInfo",
"ProductImage",
"ProductItemIdInfo",
"ProductTypeInfo",
"ProductVideo",
"PromotionAsset",
"PromotionFeedItem",
"ProximityInfo",
"RealTimeBiddingSetting",
"ResponsiveDisplayAdControlSpec",
"ResponsiveDisplayAdInfo",
"ResponsiveSearchAdInfo",
"RuleBasedUserListInfo",
"Segments",
"ShoppingComparisonListingAdInfo",
"ShoppingProductAdInfo",
"ShoppingSmartAdInfo",
"SimilarUserListInfo",
"SitelinkAsset",
"SitelinkFeedItem",
"SmartCampaignAdInfo",
"StoreAttribute",
"StoreSalesMetadata",
"StoreSalesThirdPartyMetadata",
"StructuredSnippetAsset",
"StructuredSnippetFeedItem",
"TagSnippet",
"TargetCpa",
"TargetCpaSimulationPoint",
"TargetCpaSimulationPointList",
"TargetCpm",
"TargetImpressionShare",
"TargetImpressionShareSimulationPoint",
"TargetImpressionShareSimulationPointList",
"TargetRestriction",
"TargetRestrictionOperation",
"TargetRoas",
"TargetRoasSimulationPoint",
"TargetRoasSimulationPointList",
"TargetSpend",
"TargetingSetting",
"TextAdInfo",
"TextAsset",
"TextLabel",
"TextMessageFeedItem",
"TopicInfo",
"TransactionAttribute",
"UnknownListingDimensionInfo",
"UrlCollection",
"UserAttribute",
"UserData",
"UserIdentifier",
"UserInterestInfo",
"UserListActionInfo",
"UserListDateRuleItemInfo",
"UserListInfo",
"UserListLogicalRuleInfo",
"UserListNumberRuleItemInfo",
"UserListRuleInfo",
"UserListRuleItemGroupInfo",
"UserListRuleItemInfo",
"UserListStringRuleItemInfo",
"Value",
"VideoAdInfo",
"VideoBumperInStreamAdInfo",
"VideoNonSkippableInStreamAdInfo",
"VideoOutstreamAdInfo",
"VideoResponsiveAdInfo",
"VideoTrueViewDiscoveryAdInfo",
"VideoTrueViewInStreamAdInfo",
"WebhookDelivery",
"WebpageConditionInfo",
"WebpageInfo",
"WebpageSampleInfo",
"YearMonth",
"YearMonthRange",
"YouTubeChannelInfo",
"YouTubeVideoInfo",
"YoutubeVideoAsset",
)
|
networkx/algorithms/operators/__init__.py | jebogaert/networkx | 10,024 | 12613353 | from networkx.algorithms.operators.all import *
from networkx.algorithms.operators.binary import *
from networkx.algorithms.operators.product import *
from networkx.algorithms.operators.unary import *
|
tests/meltano/core/job/test_stale_job_failer.py | siilats/meltano | 122 | 12613355 | from datetime import datetime, timedelta
import pytest
from meltano.core.job import Job
from meltano.core.job.stale_job_failer import StaleJobFailer
class TestStaleJobFailer:
@pytest.fixture
def live_job(self, session):
job = Job(job_id="test")
job.start()
job.save(session)
return job
@pytest.fixture
def stale_job(self, session):
job = Job(job_id="test")
job.start()
job.last_heartbeat_at = datetime.utcnow() - timedelta(minutes=10)
job.save(session)
return job
@pytest.fixture
def other_stale_job(self, session):
job = Job(job_id="other")
job.start()
job.last_heartbeat_at = datetime.utcnow() - timedelta(minutes=10)
job.save(session)
return job
@pytest.fixture
def complete_job(self, session):
job = Job(job_id="other")
job.start()
job.success()
job.save(session)
return job
def test_fail_stale_jobs(
self, live_job, stale_job, other_stale_job, complete_job, session
):
assert stale_job.is_stale()
assert other_stale_job.is_stale()
failer = StaleJobFailer()
failer.fail_stale_jobs(session)
session.refresh(live_job)
session.refresh(stale_job)
session.refresh(other_stale_job)
session.refresh(complete_job)
# Leaves non-stale jobs alone
assert live_job.is_running()
assert complete_job.is_complete()
# Marks all stale jobs as failed
assert stale_job.has_error()
assert not stale_job.is_stale()
assert other_stale_job.has_error()
assert not other_stale_job.is_stale()
def test_fail_stale_jobs_with_job_id(
self, live_job, stale_job, other_stale_job, complete_job, session
):
assert stale_job.is_stale()
assert other_stale_job.is_stale()
failer = StaleJobFailer(job_id=stale_job.job_id)
failer.fail_stale_jobs(session)
session.refresh(live_job)
session.refresh(stale_job)
session.refresh(other_stale_job)
session.refresh(complete_job)
# Leaves non-stale jobs alone
assert live_job.is_running()
assert complete_job.is_complete()
# Marks stale jobs with the job ID as failed
assert stale_job.has_error()
assert not stale_job.is_stale()
# Leaves stale jobs with a different job ID alone
assert other_stale_job.is_stale()
|
lib/python/treadmill/spawn/utils.py | vrautela/treadmill | 133 | 12613365 | <filename>lib/python/treadmill/spawn/utils.py
"""Treadmill spawn utilities.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import zlib
from treadmill import utils
_LOGGER = logging.getLogger(__name__)
def get_user_safe(path):
"""Gets the user of the given path.
"""
try:
return utils.get_username(os.stat(path).st_uid)
except (OSError, KeyError):
_LOGGER.warning('Could not get user of path %r', path)
return None
def format_bucket(bucket):
"""Formats the bucket to a string.
:params ``int`` bucket:
Bucket number
:returns:
``str`` - Formatted (0 padded) bucket number.
"""
return '{:06d}'.format(bucket)
def get_bucket_for_name(name, buckets_nb):
"""Gets the bucket for the given name.
:params ``str`` name:
Name of the instance.
:params ``int`` buckets_nb:
Number of buckets
"""
return format_bucket(zlib.crc32(name.encode()) % buckets_nb)
def get_instance_path(path, spawn_paths):
"""Gets the instance path for the app.
"""
name = os.path.basename(path)
if name.endswith('.yml'):
name = name[:-4]
bucket = get_bucket_for_name(name, spawn_paths.buckets)
job_path = os.path.join(spawn_paths.jobs_dir, name)
bucket_path = os.path.join(spawn_paths.running_dir, bucket)
running_path = os.path.join(bucket_path, name)
return job_path, bucket_path, running_path
|
tools/accuracy_checker/openvino/tools/accuracy_checker/adapters/mask_rcnn_with_text.py | TolyaTalamanov/open_model_zoo | 2,201 | 12613382 | """
Copyright (c) 2018-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cv2
import numpy as np
from .mask_rcnn import MaskRCNNAdapter
from ..config import StringField, NumberField
from ..representation import TextDetectionPrediction
class MaskRCNNWithTextAdapter(MaskRCNNAdapter):
__provider__ = 'mask_rcnn_with_text'
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'classes_out': StringField(
description="Name of output layer with information about classes.",
optional=False
),
'scores_out': StringField(
description="Name of output layer with bbox scores.",
optional=True
),
'boxes_out': StringField(
description="Name of output layer with bboxes.",
optional=False
),
'raw_masks_out': StringField(
description='Name of output layer with raw instances masks.',
optional=False
),
'texts_out': StringField(
description='Name of output layer with texts.',
optional=False
),
'confidence_threshold': NumberField(
description='Confidence threshold that is used to filter out detected instances.',
optional=False
),
})
return parameters
def configure(self):
self.classes_out = self.get_value_from_config('classes_out')
self.scores_out = self.get_value_from_config('scores_out')
self.boxes_out = self.get_value_from_config('boxes_out')
self.num_detections_out = self.get_value_from_config('num_detections_out')
self.raw_masks_out = self.get_value_from_config('raw_masks_out')
self.texts_out = self.get_value_from_config('texts_out')
self.confidence_threshold = self.get_value_from_config('confidence_threshold')
self.mask_processor = self.mask_to_result if not self.scores_out else self.mask_to_result_old
self.outputs_verified = False
def select_output_blob(self, outputs):
super().select_output_blob(outputs)
self.texts_out = self.check_output_name(self.texts_out, outputs)
def process(self, raw, identifiers, frame_meta):
raw_outputs = self._extract_predictions(raw, frame_meta)
if not self.outputs_verified:
self.select_output_blob(raw_outputs)
classes = raw_outputs[self.classes_out]
if self.scores_out:
valid_detections_mask = classes > 0
scores = raw_outputs[self.scores_out][valid_detections_mask]
else:
scores = raw_outputs[self.boxes_out][:, 4]
valid_detections_mask = scores > 0
scores = scores[valid_detections_mask]
classes = classes[valid_detections_mask].astype(np.uint32)
boxes = raw_outputs[self.boxes_out][valid_detections_mask, :4]
raw_masks = raw_outputs[self.raw_masks_out][valid_detections_mask]
texts = raw_outputs[self.texts_out][valid_detections_mask]
confidence_filter = scores > self.confidence_threshold
classes = classes[confidence_filter]
boxes = boxes[confidence_filter]
texts = texts[confidence_filter]
raw_masks = raw_masks[confidence_filter]
text_filter = texts != ''
classes = classes[text_filter]
boxes = boxes[text_filter]
texts = texts[text_filter]
raw_masks = raw_masks[text_filter]
results = []
for identifier, image_meta in zip(identifiers, frame_meta):
im_scale_x, im_scale_y = image_meta['scale_x'], image_meta['scale_y']
img_h, img_w = image_meta['image_size'][:2]
boxes[:, :4] /= np.array([im_scale_x, im_scale_y, im_scale_x, im_scale_y])
boxes[:, 0:4:2] = np.clip(boxes[:, 0:4:2], 0, img_w - 1)
boxes[:, 1:4:2] = np.clip(boxes[:, 1:4:2], 0, img_h - 1)
segms = self.mask_processor(
boxes,
classes,
raw_masks,
num_classes=1,
mask_thr_binary=0.5,
img_size=(img_h, img_w)
)
rectangles = self.masks_to_rects(segms[0])
results.append(
TextDetectionPrediction(identifier, points=rectangles, description=texts))
return results
@staticmethod
def masks_to_rects(masks):
rects = []
for mask in masks:
decoded_mask = mask.astype(np.uint8)
contours = cv2.findContours(decoded_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2]
contour = sorted(contours, key=lambda x: -cv2.contourArea(x))[0]
xys = cv2.boxPoints(cv2.minAreaRect(contour))
rects.append(xys)
return rects
@staticmethod
def mask_to_result(det_bboxes,
det_labels,
det_masks,
num_classes,
mask_thr_binary=0.5,
img_size=None):
masks = det_masks
bboxes = det_bboxes[:, :4]
labels = det_labels
cls_masks = [[] for _ in range(num_classes)]
for bbox, label, mask in zip(bboxes, labels, masks):
x0, y0, x1, y1 = bbox
src_points = np.float32([[0, 0], [0, mask.shape[0]], [mask.shape[1], mask.shape[0]]]) - 0.5
dst_points = np.float32([[x0, y0], [x0, y1], [x1, y1]]) - 0.5
transform_matrix = cv2.getAffineTransform(src_points, dst_points)
mask = cv2.warpAffine(mask, transform_matrix, img_size[::-1])
mask = (mask >= mask_thr_binary).astype(np.uint8)
cls_masks[label].append(mask)
return cls_masks
@staticmethod
def mask_to_result_old(det_bboxes,
det_labels,
det_masks,
num_classes,
mask_thr_binary=0.5,
img_size=None):
def expand_boxes(boxes, scale):
"""Expand an array of boxes by a given scale."""
w_half = (boxes[:, 2] - boxes[:, 0]) * .5
h_half = (boxes[:, 3] - boxes[:, 1]) * .5
x_c = (boxes[:, 2] + boxes[:, 0]) * .5
y_c = (boxes[:, 3] + boxes[:, 1]) * .5
w_half *= scale
h_half *= scale
boxes_exp = np.zeros(boxes.shape)
boxes_exp[:, 0] = x_c - w_half
boxes_exp[:, 2] = x_c + w_half
boxes_exp[:, 1] = y_c - h_half
boxes_exp[:, 3] = y_c + h_half
return boxes_exp
def segm_postprocess(box, raw_cls_mask, im_h, im_w, full_image_mask=False, encode=False):
# Add zero border to prevent upsampling artifacts on segment borders.
raw_cls_mask = np.pad(raw_cls_mask, ((1, 1), (1, 1)), 'constant', constant_values=0)
extended_box = expand_boxes(box[np.newaxis, :], raw_cls_mask.shape[0] / (raw_cls_mask.shape[0] - 2.0))[
0]
extended_box = extended_box.astype(int)
w, h = np.maximum(extended_box[2:] - extended_box[:2] + 1, 1) # pylint: disable=E0633
x0, y0 = np.clip(extended_box[:2], a_min=0, a_max=[im_w, im_h])
x1, y1 = np.clip(extended_box[2:] + 1, a_min=0, a_max=[im_w, im_h])
raw_cls_mask = cv2.resize(raw_cls_mask, (w, h)) > 0.5
mask = raw_cls_mask.astype(np.uint8)
if full_image_mask:
# Put an object mask in an image mask.
im_mask = np.zeros((im_h, im_w), dtype=np.uint8)
mask_start_y = y0 - extended_box[1]
mask_end_y = y1 - extended_box[1]
mask_start_x = x0 - extended_box[0]
mask_end_x = x1 - extended_box[0]
im_mask[y0:y1, x0:x1] = mask[mask_start_y:mask_end_y, mask_start_x:mask_end_x]
else:
original_box = box.astype(int)
x0, y0 = np.clip(original_box[:2], a_min=0, a_max=[im_w, im_h])
x1, y1 = np.clip(original_box[2:] + 1, a_min=0, a_max=[im_w, im_h])
im_mask = np.ascontiguousarray(
mask[(y0 - original_box[1]):(y1 - original_box[1]), (x0 - original_box[0]):(x1 - original_box[0])]
)
return im_mask
masks = []
per_obj_raw_masks = []
for cls, raw_mask in zip(det_labels, det_masks):
per_obj_raw_masks.append(raw_mask[cls, ...])
for box, raw_cls_mask in zip(det_bboxes, per_obj_raw_masks):
masks.append(segm_postprocess(box, raw_cls_mask, *img_size, True, False))
return [masks]
|
run-debug-webhooks.py | MiniCodeMonkey/machine | 101 | 12613391 | #!/usr/bin/env python
''' Run openaddr.ci.webhooks.app in Flask debug mode.
'''
from openaddr.ci.web import app
if __name__ == '__main__':
app.run(debug=True)
|
ocr/tess/split_wide_boxes.py | susannahsoon/oldperth | 302 | 12613395 | <filename>ocr/tess/split_wide_boxes.py
#!/usr/bin/env python
"""Split boxes that are suspicously wide.
This maps box file --> box file.
"""
import copy
import sys
from box import BoxLine, load_box_file
def split_box(box):
"""Returns a list of (possibly just one) boxes, with appropriate widths."""
w = box.right - box.left
h = box.top - box.bottom
assert h > 0
if w < 21: return [box] # probably just a single letter.
if h > w: return [box] # maybe it's just large, not wide
num_ways = int(round(w / 12.0))
assert num_ways > 1, w
boxes = []
for i in range(0, num_ways):
b = copy.deepcopy(box)
b.left = box.left + int(round((1.0 * i / num_ways * w)))
b.right = box.left + int(round((1.0 * (i + 1) / num_ways * w)))
boxes.append(b)
return boxes
def split_boxes(boxes):
out_boxes = []
for box in boxes:
out_boxes += split_box(box)
return out_boxes
if __name__ == '__main__':
for path in sys.argv[1:]:
boxes = load_box_file(path)
out_boxes = split_boxes(boxes)
out_path = path.replace('.box', '.split.box')
open(out_path, 'w').write('\n'.join(str(x) for x in out_boxes))
|
backend/setup.py | jtimberlake/cloud-inquisitor | 462 | 12613404 | import setuptools
setuptools.setup(
name='cloud_inquisitor',
version='3.0.0',
entry_points={
'console_scripts': [
'cloud-inquisitor = cloud_inquisitor.cli:cli'
],
'cloud_inquisitor.plugins.commands': [
'auth = cloud_inquisitor.plugins.commands.auth:Auth',
'import-saml = cloud_inquisitor.plugins.commands.saml:ImportSAML',
'list_plugins = cloud_inquisitor.plugins.commands.plugins:ListPlugins',
'scheduler = cloud_inquisitor.plugins.commands.scheduler:Scheduler',
'setup = cloud_inquisitor.plugins.commands.setup:Setup',
'userdata = cloud_inquisitor.plugins.commands.userdata:UserData',
'worker = cloud_inquisitor.plugins.commands.scheduler:Worker',
],
'cloud_inquisitor.plugins.notifiers': [
'email_notify = cloud_inquisitor.plugins.notifiers.email:EmailNotifier',
'slack_notify = cloud_inquisitor.plugins.notifiers.slack:SlackNotifier',
],
'cloud_inquisitor.plugins.types': [
'ami_type = cloud_inquisitor.plugins.types.resources:AMI',
'beanstalk_type = cloud_inquisitor.plugins.types.resources:BeanStalk',
'cloudfrontdist_type = cloud_inquisitor.plugins.types.resources:CloudFrontDist',
'dnsrecord_type = cloud_inquisitor.plugins.types.resources:DNSRecord',
'dnszone_type = cloud_inquisitor.plugins.types.resources:DNSZone',
'ebssnapshot_type = cloud_inquisitor.plugins.types.resources:EBSSnapshot',
'ebsvolume_type = cloud_inquisitor.plugins.types.resources:EBSVolume',
'ec2instance_type = cloud_inquisitor.plugins.types.resources:EC2Instance',
'rdsinstance_type = cloud_inquisitor.plugins.types.resources:RDSInstance',
's3bucket_type = cloud_inquisitor.plugins.types.resources:S3Bucket',
'vpc_type = cloud_inquisitor.plugins.types.resources:VPC'
],
'cloud_inquisitor.plugins.types.accounts': [
'AWS = cloud_inquisitor.plugins.types.accounts:AWSAccount',
'DNS: AXFR = cloud_inquisitor.plugins.types.accounts:AXFRAccount',
'DNS: CloudFlare = cloud_inquisitor.plugins.types.accounts:CloudFlareAccount',
],
'cloud_inquisitor.plugins.schedulers': [],
'cloud_inquisitor.plugins.views': [
'account_details = cloud_inquisitor.plugins.views.accounts:AccountDetail',
'account_imex = cloud_inquisitor.plugins.views.accounts:AccountImportExport',
'account_list = cloud_inquisitor.plugins.views.accounts:AccountList',
'auditlog_get = cloud_inquisitor.plugins.views.auditlog:AuditLogGet',
'auditlog_list = cloud_inquisitor.plugins.views.auditlog:AuditLogList',
'config = cloud_inquisitor.plugins.views.config:ConfigGet',
'config_import_export = cloud_inquisitor.plugins.views.config:ConfigImportExport',
'config_list = cloud_inquisitor.plugins.views.config:ConfigList',
'config_namespace_get = cloud_inquisitor.plugins.views.config:NamespaceGet',
'config_namespace_list = cloud_inquisitor.plugins.views.config:Namespaces',
'email = cloud_inquisitor.plugins.views.emails:EmailGet',
'email_list = cloud_inquisitor.plugins.views.emails:EmailList',
'log = cloud_inquisitor.plugins.views.logs:Logs',
'log_details = cloud_inquisitor.plugins.views.logs:LogDetails',
'metadata = cloud_inquisitor.plugins.views.metadata:MetaData',
'password_reset = cloud_inquisitor.plugins.views.users:PasswordReset',
'role_get = cloud_inquisitor.plugins.views.roles:RoleGet',
'role_list = cloud_inquisitor.plugins.views.roles:RoleList',
'search = cloud_inquisitor.plugins.views.search:Search',
'stats = cloud_inquisitor.plugins.views.stats:StatsGet',
'template_get = cloud_inquisitor.plugins.views.templates:TemplateGet',
'template_list = cloud_inquisitor.plugins.views.templates:TemplateList',
'user_details = cloud_inquisitor.plugins.views.users:UserDetails',
'user_list = cloud_inquisitor.plugins.views.users:UserList',
]
},
packages=setuptools.find_packages(
exclude=[
'*.tests',
'*.tests.*',
'tests.*',
'tests'
]
),
include_package_data=True,
zip_safe=False,
# Requirements for setup and installation
setup_requires=['setuptools_scm'],
install_requires=[
'Flask-Compress~=1.4',
'Flask-Migrate~=2.1',
'Flask-RESTful~=0.3',
'Flask-SQLAlchemy~=2.3',
'Flask-Script~=2.0',
'Flask~=0.12',
'Jinja2~=2.9',
'MarkupSafe~=1.0',
'PyJWT~=1.5',
'SQLAlchemy~=1.1',
'argon2-cffi~=16.3',
'boto3~=1.9',
'click~=6.7',
'enum34~=1.1',
'flake8-comprehensions~=1.4',
'flake8-deprecated~=1.2',
'flake8-pep3101~=1.1',
'flake8-quotes~=0.9',
'flake8~=3.3',
'gunicorn~=19.7',
'ipython~=6.2',
'moto~=1.3',
'munch~=2.1',
'mysqlclient~=1.3',
'pyexcel-xlsx~=0.5',
'pytest~=5.0',
'pytest-cov~=2.6',
'rainbow-logging-handler~=2.2',
'requests~=2.19',
'slackclient~=1.0',
'sqlservice~=0.20',
],
# Metadata
description='Tool to enforce ownership and data security within cloud environments',
long_description='Please see https://github.com/RiotGames/cloud-inquisitor for more information',
author='<NAME> Security',
author_email='<EMAIL>',
url='https://github.com/RiotGames/cloud-inquisitor',
license='Apache 2.0',
classifiers=[
# Current project status
'Development Status :: 4 - Beta',
# Audience
'Intended Audience :: System Administrators',
'Intended Audience :: Information Technology',
# License information
'License :: OSI Approved :: Apache Software License',
# Supported python versions
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
# Frameworks used
'Framework :: Flask',
# Supported OS's
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Operating System :: Unix',
# Extra metadata
'Environment :: Console',
'Natural Language :: English',
'Topic :: Security',
'Topic :: Utilities',
],
keywords='cloud security',
)
|
WebMirror/management/rss_parser_funcs/feed_parse_extractMayonaizeshrimpWordpressCom.py | fake-name/ReadableWebProxy | 193 | 12613411 | <filename>WebMirror/management/rss_parser_funcs/feed_parse_extractMayonaizeshrimpWordpressCom.py
def extractMayonaizeshrimpWordpressCom(item):
'''
Parser for 'mayonaizeshrimp.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Love Switch', 'Love Switch', 'translated'),
('You Think It’s Fine to Just Summon Me to Another World? Huh?', 'You Think It’s Fine to Just Summon Me to Another World? Huh?', 'translated'),
('Impregnable ≪Dreadnought≫', 'Impregnable ≪Dreadnought≫', 'translated'),
('No Fatigue', 'No Fatigue: 24-jikan Tatakaeru Otoko no Tenseitan', 'translated'),
('Isekai GM', 'The GM Has Logged Into A Different World', 'translated'),
('Master\'s Smile', 'Master\'s Smile', 'translated'),
('heibon', 'E? Heibon Desu yo??', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
titlemap = [
('YTIF ', 'You Think It\'s Fine to Just Summon Me to Another World? Huh?', 'translated'),
('ToK ', 'Tower of Karma', 'translated'),
('Isekai GM ', 'The GM Has Logged Into A Different World', 'translated'),
('LHA chapter ', 'The Little Hero of Alcatar', 'oel'),
]
for titlecomponent, name, tl_type in titlemap:
if titlecomponent.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
python-exercises/while_loop.py | nakonovalov/Python-for-beginners | 158 | 12613414 | <filename>python-exercises/while_loop.py
# A simple while loop example
user_input = input('Hey how are you ')
while user_input != 'stop copying me':
print(user_input)
user_input = input()
else:
print('UGHH Fine') |
src/plot.py | steveandpeggyb/walking-salesman | 1,760 | 12613427 | <reponame>steveandpeggyb/walking-salesman
import matplotlib.pyplot as plt
import matplotlib as mpl
def plot_network(cities, neurons, name='diagram.png', ax=None):
"""Plot a graphical representation of the problem"""
mpl.rcParams['agg.path.chunksize'] = 10000
if not ax:
fig = plt.figure(figsize=(5, 5), frameon = False)
axis = fig.add_axes([0,0,1,1])
axis.set_aspect('equal', adjustable='datalim')
plt.axis('off')
axis.scatter(cities['x'], cities['y'], color='red', s=4)
axis.plot(neurons[:,0], neurons[:,1], 'r.', ls='-', color='#0063ba', markersize=2)
plt.savefig(name, bbox_inches='tight', pad_inches=0, dpi=200)
plt.close()
else:
ax.scatter(cities['x'], cities['y'], color='red', s=4)
ax.plot(neurons[:,0], neurons[:,1], 'r.', ls='-', color='#0063ba', markersize=2)
return ax
def plot_route(cities, route, name='diagram.png', ax=None):
"""Plot a graphical representation of the route obtained"""
mpl.rcParams['agg.path.chunksize'] = 10000
if not ax:
fig = plt.figure(figsize=(5, 5), frameon = False)
axis = fig.add_axes([0,0,1,1])
axis.set_aspect('equal', adjustable='datalim')
plt.axis('off')
axis.scatter(cities['x'], cities['y'], color='red', s=4)
route = cities.reindex(route)
route.loc[route.shape[0]] = route.iloc[0]
axis.plot(route['x'], route['y'], color='purple', linewidth=1)
plt.savefig(name, bbox_inches='tight', pad_inches=0, dpi=200)
plt.close()
else:
ax.scatter(cities['x'], cities['y'], color='red', s=4)
route = cities.reindex(route)
route.loc[route.shape[0]] = route.iloc[0]
ax.plot(route['x'], route['y'], color='purple', linewidth=1)
return ax
|
examples/adaptive_hgb/adaptive_hgb_client.py | cuiboyuan/plato | 135 | 12613437 | """
A federated learning client with support for Adaptive hierarchical gradient blending.
"""
import copy
import logging
import os
import time
from dataclasses import dataclass
from plato.clients import base, simple
from plato.config import Config
from plato.models.multimodal import blending
from plato.samplers import registry as samplers_registry
# simple.Client
# arguments: model=None, datasource=None, algorithm=None, trainer=None
# One can either set these four parameters in the initialization or the client will
# define these itself based on the configuration file
# # The functions are required by the client
# - configure: registe the trainer and the algorithm for this client
# - load_data: obtain the trainset and testset from the datasoruce
# - load_payload: the algorithm will be called to get the server's model to this client
# - train: self.trainer.train operate the local training stage
@dataclass
class Report(base.Report):
"""Report from a simple client, to be sent to the federated learning server."""
training_time: float
data_loading_time: float
delta_o: float
delta_g: float
class Client(simple.Client):
"""A federated learning client with support for Adaptive gradient blending.
"""
def __init__(self,
model=None,
datasource=None,
algorithm=None,
trainer=None):
super().__init__()
self.model = model
self.datasource = datasource
self.algorithm = algorithm
self.trainer = trainer
self.trainset = None # Training dataset
self.valset = None # Validation dataset
self.testset = None # Testing dataset
self.sampler = None
self.modality_sampler = None
self.data_loading_time = None
self.data_loading_time_sent = False
model_name = Config().trainer.model_name
filename = f"{model_name}_{self.client_id}_{Config().params['run_id']}.pth"
save_dir = os.path.join("learningModels",
"client_" + str(self.client_id))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
self.recored_model_path = os.path.join(save_dir, filename)
self.optimal_blending_weights = dict()
# for example of the self.optimal_blending_weights:
# {"RGB": 0.24, "Flow": 0.48, "Audio": 0.11, "Fused"; 17}
def record_model(self):
""" Save the client's model to the memory """
self.trainer.save_model(filename=self.recored_model_path)
def load_recorded_model(self):
""" Loaded the saved model """
self.trainer.load_model(filename=self.recored_model_path)
def load_data(self) -> None:
"""Generating data and loading them onto this client."""
data_loading_start_time = time.time()
logging.info("[Client #%d] Loading its data source...", self.client_id)
self.data_loaded = True
logging.info("[Client #%d] Dataset size: %s", self.client_id,
self.datasource.num_train_examples())
# Setting up the data sampler
self.sampler = samplers_registry.get(self.datasource, self.client_id)
# Setting up the modality sampler
self.modality_sampler = samplers_registry.multimodal_get(
datasource=self.datasource, client_id=self.client_id)
# PyTorch uses samplers when loading data with a data loader
self.trainset = self.datasource.get_train_set(
self.modality_sampler.get())
self.valset = self.datasource.get_val_set()
if Config().clients.do_test:
# Set the testset if local testing is needed
self.testset = self.datasource.get_test_set()
self.data_loading_time = time.time() - data_loading_start_time
def local_global_gradient_blending(self, local_model,
global_eval_avg_loses,
global_eval_subtrain_avg_losses,
local_eval_avg_losses,
local_eval_subtrain_avg_losses):
""" Blend the gradients for the received global model and the local model """
# eval_avg_losses, eval_subtrainset_avg_losses,
# local_train_avg_losses, local_eval_avg_losses
# obtain the global model directly as the global_model is actually the self.model
# global_model = self.model
# the existing modules should be the networks for modalities and the fusion net
# For example: ["RGB", "Flow", "Audio", "Fused"]
existing_modules_names = global_eval_avg_loses.keys()
for module_nm in existing_modules_names:
md_global_eval_loss = global_eval_avg_loses[module_nm]
md_global_eval_trainset_loss = global_eval_subtrain_avg_losses[
module_nm]
md_local_eval_loss = local_eval_avg_losses[module_nm]
md_local_eval_trainset_loss = local_eval_subtrain_avg_losses[
module_nm]
local_global_ogr = blending.OGR_n2N(
n_eval_avg_loss=md_local_eval_loss,
n_train_avg_loss=md_local_eval_trainset_loss,
N_eval_avg_loss=md_global_eval_loss,
N_train_avg_loss=md_global_eval_trainset_loss)
# merge the corresponding local module, global module
global_module_wt = self.model.module_name_net_mapper[
module_nm].weight.data
local_module_wt = local_model.module_name_net_mapper[
module_nm].weight.data
merged_module_wt = local_global_ogr * local_module_wt + (
1 - local_global_ogr) * global_module_wt
# the reason why we set the global directly is because
# we want to save the space
self.model.assing_weights(module_name=module_nm,
weights=merged_module_wt)
def load_payload(self, server_payload) -> None:
"""Loading the server model onto this client."""
# In general, we only need to get the previous local model by:
# local_model = self.model
# because the weights from the server have not been assigned to the self.model
# But we choose a more complex way that loads the weights from the file
# The main reaons is that we are afraid the client resource will be released
# if it is stopped
self.load_recorded_model()
local_model = copy.deepcopy(self.model)
self.algorithm.load_weights(server_payload)
# using ogr merge
eval_avg_losses, eval_subtrainset_avg_losses, \
local_eval_avg_losses, \
local_train_avg_losses = self.trainer.obtain_local_global_ogr_items(
trainset=self.trainset, evalset=self.evalset)
self.local_global_gradient_blending(
local_model=local_model,
global_eval_avg_loses=eval_avg_losses,
global_eval_subtrain_avg_losses=eval_subtrainset_avg_losses,
local_eval_avg_losses=local_eval_avg_losses,
local_eval_subtrain_avg_losses=local_train_avg_losses)
self.optimal_blending_weights = self.adaptive_gradient_blending_weights(
eval_avg_losses=eval_avg_losses,
eval_train_avg_losses=eval_subtrainset_avg_losses,
local_eval_avg_losses=local_eval_avg_losses,
local_train_avg_losses=local_train_avg_losses)
def adaptive_gradient_blending_weights(self, eval_avg_losses,
eval_train_avg_losses,
local_eval_avg_losses,
local_train_avg_losses):
""" Obtain the gradient blending weights """
modalities_losses_n = {
"eval": local_eval_avg_losses,
"train": local_train_avg_losses
}
modalities_losses_N = {
"eval": eval_avg_losses,
"train": eval_train_avg_losses
}
optimal_weights = blending.get_optimal_gradient_blend_weights(
modalities_losses_n, modalities_losses_N)
return optimal_weights
def obtain_delta_og(self):
""" Compute the overfitting-generalization-ratio """
start_eval_loss = self.trainer.global_losses_trajectory["eval"][0]
start_train_loss = self.trainer.global_losses_trajectory["train"][0]
end_eval_loss = self.trainer.global_losses_trajectory["eval"][-1]
end_train_loss = self.trainer.global_losses_trajectory["train"][-1]
delta_o = blending.compute_delta_overfitting_O(
n_eval_avg_loss=start_eval_loss,
n_train_avg_loss=start_train_loss,
N_eval_avg_loss=end_eval_loss,
N_train_avg_loss=end_train_loss)
delta_g = blending.compute_delta_generalization(
eval_avg_loss_n=start_eval_loss, eval_avg_loss_N=end_eval_loss)
return delta_o, delta_g
async def train(self):
"""The machine learning training workload on a client."""
training_start_time = time.time()
logging.info("[Client #%d] Started training.", self.client_id)
# Perform model training
if not self.trainer.train(self.trainset, self.evalset, self.sampler,
self.optimal_blending_weights):
# Training failed
await self.sio.disconnect()
# Extract model weights and biases
weights = self.algorithm.extract_weights()
# Obtain the delta O and delta G
delta_o, delta_g = self.obtain_delta_og()
# Generate a report for the server, performing model testing if applicable
if Config().clients.do_test:
accuracy = self.trainer.test(self.testset)
if accuracy == 0:
# The testing process failed, disconnect from the server
await self.sio.disconnect()
logging.info("[Client #{:d}] Test accuracy: {:.2f}%".format(
self.client_id, 100 * accuracy))
else:
accuracy = 0
training_time = time.time() - training_start_time
data_loading_time = 0
if not self.data_loading_time_sent:
data_loading_time = self.data_loading_time
self.data_loading_time_sent = True
return Report(self.sampler.trainset_size(), accuracy, training_time,
data_loading_time, delta_o, delta_g), weights
|
tests/Issue11/issue11.py | ryogrid/shellbags | 128 | 12613475 | #!/usr/bin/python
import os
import sys
# from http://stackoverflow.com/a/9806045/87207
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
parentparentdir = os.path.dirname(parentdir)
sys.path.append(parentparentdir)
from ShellItems import SHITEMLIST
sys.path.pop()
def hex_dump(data):
"""
see http://code.activestate.com/recipes/142812/
"""
byte_format = {}
for c in xrange(256):
if c > 126:
byte_format[c] = '.'
elif len(repr(chr(c))) == 3 and chr(c):
byte_format[c] = chr(c)
else:
byte_format[c] = '.'
def format_bytes(s):
return "".join([byte_format[ord(c)] for c in s])
def dump(src, length=16):
N = 0
result = ''
while src:
s, src = src[:length], src[length:]
hexa = ' '.join(["%02X" % ord(x) for x in s])
s = format_bytes(s)
result += "%04X %-*s %s\n" % (N, length * 3, hexa, s)
N += length
return result
return dump(data)
def test(filename):
with open(filename) as f:
t = f.read()
print hex_dump(t)
l = SHITEMLIST(t, 0, False)
for index, item in enumerate(l.items()):
print "item:", index
print "type:", item.__class__.__name__
print "name:", item.name()
print "mtime:", item.m_date()
def main():
import sys
hive = sys.argv[1]
import hashlib
m = hashlib.md5()
with open(hive, 'rb') as f:
m.update(f.read())
if m.hexdigest() != "14f997a39bb131ff2a03aa3c62dc32ea":
print "Please use the binary file with MD5 14f997a39bb131ff2a03aa3c62dc32ea"
sys.exit(-1)
test(hive)
if __name__ == "__main__":
main()
|
sympy/printing/python.py | ethankward/sympy | 445 | 12613481 | <reponame>ethankward/sympy<gh_stars>100-1000
from __future__ import print_function, division
import keyword as kw
import sympy
from .repr import ReprPrinter
from .str import StrPrinter
# A list of classes that should be printed using StrPrinter
STRPRINT = ("Add", "Infinity", "Integer", "Mul", "NegativeInfinity",
"Pow", "Zero")
class PythonPrinter(ReprPrinter, StrPrinter):
"""A printer which converts an expression into its Python interpretation."""
def __init__(self, settings=None):
super(PythonPrinter, self).__init__(settings)
self.symbols = []
self.functions = []
# Create print methods for classes that should use StrPrinter instead
# of ReprPrinter.
for name in STRPRINT:
f_name = "_print_%s" % name
f = getattr(StrPrinter, f_name)
setattr(PythonPrinter, f_name, f)
def _print_Function(self, expr):
func = expr.func.__name__
if not hasattr(sympy, func) and not func in self.functions:
self.functions.append(func)
return StrPrinter._print_Function(self, expr)
# procedure (!) for defining symbols which have be defined in print_python()
def _print_Symbol(self, expr):
symbol = self._str(expr)
if symbol not in self.symbols:
self.symbols.append(symbol)
return StrPrinter._print_Symbol(self, expr)
def _print_module(self, expr):
raise ValueError('Modules in the expression are unacceptable')
def python(expr, **settings):
"""Return Python interpretation of passed expression
(can be passed to the exec() function without any modifications)"""
printer = PythonPrinter(settings)
exprp = printer.doprint(expr)
result = ''
# Returning found symbols and functions
renamings = {}
for symbolname in printer.symbols:
newsymbolname = symbolname
# Escape symbol names that are reserved python keywords
if kw.iskeyword(newsymbolname):
while True:
newsymbolname += "_"
if (newsymbolname not in printer.symbols and
newsymbolname not in printer.functions):
renamings[sympy.Symbol(
symbolname)] = sympy.Symbol(newsymbolname)
break
result += newsymbolname + ' = Symbol(\'' + symbolname + '\')\n'
for functionname in printer.functions:
newfunctionname = functionname
# Escape function names that are reserved python keywords
if kw.iskeyword(newfunctionname):
while True:
newfunctionname += "_"
if (newfunctionname not in printer.symbols and
newfunctionname not in printer.functions):
renamings[sympy.Function(
functionname)] = sympy.Function(newfunctionname)
break
result += newfunctionname + ' = Function(\'' + functionname + '\')\n'
if renamings:
exprp = expr.subs(renamings)
result += 'e = ' + printer._str(exprp)
return result
def print_python(expr, **settings):
"""Print output of python() function"""
print(python(expr, **settings))
|
data_collection/gazette/spiders/sc_corupa.py | kaiocp/querido-diario | 454 | 12613521 | <gh_stars>100-1000
from gazette.spiders.base.fecam import FecamGazetteSpider
class ScCorupaSpider(FecamGazetteSpider):
name = "sc_corupa"
FECAM_QUERY = "cod_entidade:78"
TERRITORY_ID = "4204509"
|
backend/src/baserow/test_utils/fixtures/row.py | ashishdhngr/baserow | 839 | 12613552 | from baserow.contrib.database.rows.handler import RowHandler
class RowFixture:
def create_row_for_many_to_many_field(
self, table, field, values, user, model=None, **kwargs
):
"""
This is a helper function for creating a row with a many-to-many field that
preserves the order of the elements that are being passed in as a list. This is
done by creating the row with the first element in the list and successively
updating the row for each additional element in the list, mimicking how the
relationships would be added when using the frontend.
Iteration steps:
Example list: [1, 2, 3]
First = create the row with: [1]
Second = update the row with: [1, 2]
Final = update the row with: [1, 2, 3]
"""
field_id = f"field_{field.id}"
row_handler = RowHandler()
if model is None:
model = table.get_model()
# If the values list is empty, we create an empty row and return that row.
if len(values) == 0:
return row_handler.create_row(
user=user, table=table, model=model, values={field_id: values}
)
row = None
for index, value in enumerate(values):
values_to_update = values[: index + 1]
if index == 0:
row = row_handler.create_row(
user=user,
table=table,
model=model,
values={field_id: values_to_update},
)
else:
row = row_handler.update_row(
user=user,
table=table,
model=model,
row_id=row.id,
values={field_id: values_to_update},
)
return row
|
rpython/jit/backend/x86/test/test_calling_convention.py | nanjekyejoannah/pypy | 381 | 12613555 | <filename>rpython/jit/backend/x86/test/test_calling_convention.py
from rpython.jit.backend.test.calling_convention_test import CallingConvTests
from rpython.jit.backend.x86 import codebuf
from rpython.jit.backend.x86.arch import WORD
from rpython.jit.backend.x86.regloc import eax, esp
class TestCallingConv(CallingConvTests):
def make_function_returning_stack_pointer(self):
mc = codebuf.MachineCodeBlockWrapper()
mc.MOV(eax, esp)
mc.ADD_ri(eax.value, WORD)
mc.RET()
return mc.materialize(self.cpu, [])
def get_alignment_requirements(self):
return 16
|
homeassistant/components/alarmdecoder/sensor.py | learn-home-automation/core | 22,481 | 12613556 | <reponame>learn-home-automation/core
"""Support for AlarmDecoder sensors (Shows Panel Display)."""
from homeassistant.components.sensor import SensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from .const import SIGNAL_PANEL_MESSAGE
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities
) -> bool:
"""Set up for AlarmDecoder sensor."""
entity = AlarmDecoderSensor()
async_add_entities([entity])
return True
class AlarmDecoderSensor(SensorEntity):
"""Representation of an AlarmDecoder keypad."""
_attr_icon = "mdi:alarm-check"
_attr_name = "Alarm Panel Display"
_attr_should_poll = False
async def async_added_to_hass(self):
"""Register callbacks."""
self.async_on_remove(
self.hass.helpers.dispatcher.async_dispatcher_connect(
SIGNAL_PANEL_MESSAGE, self._message_callback
)
)
def _message_callback(self, message):
if self._attr_native_value != message.text:
self._attr_native_value = message.text
self.schedule_update_ha_state()
|
route/recent_record_reset.py | k0000k/openNAMU | 126 | 12613558 | <filename>route/recent_record_reset.py
from .tool.func import *
def recent_record_reset_2(conn, name):
curs = conn.cursor()
if admin_check() != 1:
return re_error('/error/3')
if flask.request.method == 'POST':
admin_check(None, 'record reset ' + name)
curs.execute(db_change("delete from history where ip = ?"), [name])
conn.commit()
return redirect('/record/' + url_pas(name))
else:
return easy_minify(flask.render_template(skin_check(),
imp = [name, wiki_set(), wiki_custom(), wiki_css(['(' + load_lang('record_reset') + ')', 0])],
data = '''
<form method="post">
<span>''' + load_lang('history_delete_warning') + '''</span>
<hr class="main_hr">
<button type="submit">''' + load_lang('reset') + '''</button>
</form>
''',
menu = [['record/' + url_pas(name), load_lang('return')]]
)) |
lektor/constants.py | yagebu/lektor | 4,104 | 12613561 | # Special value that identifies a target to the primary alt
PRIMARY_ALT = "_primary"
|
pdm/models/auth.py | houbie/pdm | 1,731 | 12613566 | from typing import List, Optional, Tuple
import click
from pdm._types import Source
from pdm.exceptions import PdmException
from pdm.models.pip_shims import MultiDomainBasicAuth
try:
import keyring
except ModuleNotFoundError:
keyring = None # type: ignore
class PdmBasicAuth(MultiDomainBasicAuth):
"""A custom auth class that differs from Pip's implementation in the
following ways:
- It shows an error message when credentials are not provided or correct.
"""
def __init__(
self, prompting: bool = True, index_urls: Optional[List[str]] = None
) -> None:
super().__init__(prompting=True, index_urls=index_urls)
self._real_prompting = prompting
def _prompt_for_password(
self, netloc: str
) -> Tuple[Optional[str], Optional[str], bool]:
if not self._real_prompting:
raise PdmException(
f"The credentials for {netloc} are incorrect. "
"Please run the command with `-v` option."
)
return super()._prompt_for_password(netloc)
def _should_save_password_to_keyring(self) -> bool:
if keyring is None:
click.secho(
"The provided credentials will not be saved into your system.\n"
"You can enable this by installing keyring:\n"
" pipx inject pdm keyring\n"
"or: pip install --user keyring",
err=True,
fg="yellow",
)
return super()._should_save_password_to_keyring()
def make_basic_auth(sources: List[Source], prompting: bool) -> PdmBasicAuth:
return PdmBasicAuth(prompting, [source["url"] for source in sources])
|
pygsheets/authorization.py | samamorgan/pygsheets | 1,346 | 12613567 | <filename>pygsheets/authorization.py
# -*- coding: utf-8 -*-.
import os
import json
import warnings
from google.oauth2 import service_account
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import Flow, InstalledAppFlow
from google.auth.transport.requests import Request
from pygsheets.client import Client
try:
input = raw_input
except NameError:
pass
def _get_user_authentication_credentials(client_secret_file, scopes, credential_directory=None, local=False):
"""Returns user credentials."""
if credential_directory is None:
credential_directory = os.getcwd()
elif credential_directory == 'global':
home_dir = os.path.expanduser('~')
credential_directory = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_directory):
os.makedirs(credential_directory)
else:
pass
credentials_path = os.path.join(credential_directory, 'sheets.googleapis.com-python.json') # TODO Change hardcoded name?
credentials = None
if os.path.exists(credentials_path):
# expect these to be valid. may expire at some point, but should be refreshed by google api client...
credentials = Credentials.from_authorized_user_file(credentials_path, scopes=scopes)
if credentials:
if credentials.expired and credentials.refresh_token:
credentials.refresh(Request())
else:
if local:
flow = InstalledAppFlow.from_client_secrets_file(client_secret_file, scopes)
credentials = flow.run_local_server()
else:
flow = Flow.from_client_secrets_file(client_secret_file, scopes=scopes,
redirect_uri='urn:ietf:wg:oauth:2.0:oob')
auth_url, _ = flow.authorization_url(prompt='consent')
print('Please go to this URL and finish the authentication flow: {}'.format(auth_url))
code = input('Enter the authorization code: ')
flow.fetch_token(code=code)
credentials = flow.credentials
# Save the credentials for the next run
credentials_as_dict = {
'token': credentials.token,
'refresh_token': credentials.refresh_token,
'id_token': credentials.id_token,
'token_uri': credentials.token_uri,
'client_id': credentials.client_id,
'client_secret': credentials.client_secret
}
with open(credentials_path, 'w') as file:
file.write(json.dumps(credentials_as_dict))
return credentials
_SCOPES = ('https://www.googleapis.com/auth/spreadsheets', 'https://www.googleapis.com/auth/drive')
_deprecated_keyword_mapping = {
'outh_file': 'client_secret',
'outh_creds_store': 'credentials_directory',
'service_file': 'service_account_file',
'credentials': 'custom_credentials'
}
def authorize(client_secret='client_secret.json',
service_account_file=None,
service_account_env_var=None,
credentials_directory='',
scopes=_SCOPES,
custom_credentials=None,
local=False,
**kwargs):
"""Authenticate this application with a google account.
See general authorization documentation for details on how to attain the necessary files.
:param client_secret: Location of the oauth2 credentials file.
:param service_account_file: Location of a service account file.
:param service_account_env_var: Use an environment variable to provide service account credentials.
:param credentials_directory: Location of the token file created by the OAuth2 process. Use 'global' to store in
global location, which is OS dependent. Default None will store token file in
current working directory. Please note that this is override your client secret.
:param custom_credentials: A custom or pre-made credentials object. Will ignore all other params.
:param scopes: The scopes for which the authentication applies.
:param local: If local then a browser will be opened to autheticate
:param kwargs: Parameters to be handed into the client constructor.
:returns: :class:`Client`
.. warning::
The `credentials_directory` overrides `client_secret`. So you might be accidently using a different credential
than intended, if you are using global `credentials_directory` in more than one script.
"""
for key in kwargs:
if key in ['outh_file', 'outh_creds_store', 'service_file', 'credentials']:
warnings.warn('The argument {} is deprecated. Use {} instead.'.format(key, _deprecated_keyword_mapping[key])
, category=DeprecationWarning)
client_secret = kwargs.pop('outh_file', client_secret)
service_account_file = kwargs.pop('service_file', service_account_file)
credentials_directory = kwargs.pop('outh_creds_store', credentials_directory)
custom_credentials = kwargs.pop('credentials', custom_credentials)
if custom_credentials is not None:
credentials = custom_credentials
elif service_account_env_var is not None:
service_account_info = json.loads(os.environ[service_account_env_var])
credentials = service_account.Credentials.from_service_account_info(
service_account_info, scopes=scopes)
elif service_account_file is not None:
credentials = service_account.Credentials.from_service_account_file(service_account_file, scopes=scopes)
else:
credentials = _get_user_authentication_credentials(client_secret, scopes, credentials_directory, local)
return Client(credentials, **kwargs)
|
bibliopixel/layout/cutter.py | rec/leds | 253 | 12613575 | """
Cut matrices by row or column and apply operations to them.
"""
class Cutter:
"""
Base class that pre-calculates cuts and can use them to
apply a function to the layout.
Each "cut" is a row or column, depending on the value of by_row.
The entries are iterated forward or backwards, depending on the
value of forward.
"""
def __init__(self, layout, by_row=True):
self.layout = layout
cuts = layout.height if by_row else layout.width
cutter = self.cut_row if by_row else self.cut_column
self.cuts = [cutter(i) for i in range(cuts)]
def apply(self, function):
"""
For each row or column in cuts, read a list of its colors,
apply the function to that list of colors, then write it back
to the layout.
"""
for cut in self.cuts:
value = self.read(cut)
function(value)
self.write(cut, value)
class Slicer(Cutter):
"""
Implementation of Cutter that uses slices of the underlying colorlist.
Does not work if the Matrix layout is serpentine or has any reflections
or rotations.
"""
def cut_row(self, i):
return slice(self.layout.width * i, self.layout.width * (i + 1))
def cut_column(self, i):
return slice(i, None, self.layout.width)
def read(self, cut):
return self.layout.color_list[cut]
def write(self, cut, value):
self.layout.color_list[cut] = value
class Indexer(Cutter):
"""
Slower implementation of Cutter that uses lists of indices and the
Matrix interface.
"""
def cut_row(self, i):
return [(column, i) for column in range(self.layout.width)]
def cut_column(self, i):
return [(i, row) for row in range(self.layout.height)]
def read(self, cut):
return [self.layout.get(*i) for i in cut]
def write(self, cut, value):
for i, v in zip(cut, value):
self.layout.set(*i, color=v)
|
h2o-py/h2o/estimators/anovaglm.py | MikolajBak/h2o-3 | 6,098 | 12613613 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# This file is auto-generated by h2o-3/h2o-bindings/bin/gen_python.py
# Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
#
from __future__ import absolute_import, division, print_function, unicode_literals
import h2o
from h2o.base import Keyed
from h2o.frame import H2OFrame
from h2o.expr import ExprNode
from h2o.expr import ASTId
from h2o.estimators.estimator_base import H2OEstimator
from h2o.exceptions import H2OValueError
from h2o.frame import H2OFrame
from h2o.utils.typechecks import assert_is_type, Enum, numeric
class H2OANOVAGLMEstimator(H2OEstimator):
"""
ANOVA for Generalized Linear Model
H2O ANOVAGLM is used to calculate Type III SS which is used to evaluate the contributions of individual predictors
and their interactions to a model. Predictors or interactions with negligible contributions to the model will have
high p-values while those with more contributions will have low p-values.
"""
algo = "anovaglm"
supervised_learning = True
def __init__(self,
model_id=None, # type: Optional[Union[None, str, H2OEstimator]]
training_frame=None, # type: Optional[Union[None, str, H2OFrame]]
seed=-1, # type: int
response_column=None, # type: Optional[str]
ignored_columns=None, # type: Optional[List[str]]
ignore_const_cols=True, # type: bool
score_each_iteration=False, # type: bool
offset_column=None, # type: Optional[str]
weights_column=None, # type: Optional[str]
family="auto", # type: Literal["auto", "gaussian", "binomial", "fractionalbinomial", "quasibinomial", "poisson", "gamma", "tweedie", "negativebinomial"]
tweedie_variance_power=0.0, # type: float
tweedie_link_power=1.0, # type: float
theta=0.0, # type: float
solver="irlsm", # type: Literal["auto", "irlsm", "l_bfgs", "coordinate_descent_naive", "coordinate_descent", "gradient_descent_lh", "gradient_descent_sqerr"]
missing_values_handling="mean_imputation", # type: Literal["mean_imputation", "skip", "plug_values"]
plug_values=None, # type: Optional[Union[None, str, H2OFrame]]
compute_p_values=True, # type: bool
standardize=True, # type: bool
non_negative=False, # type: bool
max_iterations=0, # type: int
link="family_default", # type: Literal["family_default", "identity", "logit", "log", "inverse", "tweedie", "ologit"]
prior=0.0, # type: float
alpha=None, # type: Optional[List[float]]
lambda_=[0.0], # type: List[float]
lambda_search=False, # type: bool
stopping_rounds=0, # type: int
stopping_metric="auto", # type: Literal["auto", "deviance", "logloss", "mse", "rmse", "mae", "rmsle", "auc", "aucpr", "lift_top_group", "misclassification", "mean_per_class_error", "custom", "custom_increasing"]
early_stopping=False, # type: bool
stopping_tolerance=0.001, # type: float
balance_classes=False, # type: bool
class_sampling_factors=None, # type: Optional[List[float]]
max_after_balance_size=5.0, # type: float
max_runtime_secs=0.0, # type: float
save_transformed_framekeys=False, # type: bool
highest_interaction_term=0, # type: int
nparallelism=4, # type: int
type=0, # type: int
):
"""
:param model_id: Destination id for this model; auto-generated if not specified.
Defaults to ``None``.
:type model_id: Union[None, str, H2OEstimator], optional
:param training_frame: Id of the training data frame.
Defaults to ``None``.
:type training_frame: Union[None, str, H2OFrame], optional
:param seed: Seed for pseudo random number generator (if applicable)
Defaults to ``-1``.
:type seed: int
:param response_column: Response variable column.
Defaults to ``None``.
:type response_column: str, optional
:param ignored_columns: Names of columns to ignore for training.
Defaults to ``None``.
:type ignored_columns: List[str], optional
:param ignore_const_cols: Ignore constant columns.
Defaults to ``True``.
:type ignore_const_cols: bool
:param score_each_iteration: Whether to score during each iteration of model training.
Defaults to ``False``.
:type score_each_iteration: bool
:param offset_column: Offset column. This will be added to the combination of columns before applying the link
function.
Defaults to ``None``.
:type offset_column: str, optional
:param weights_column: Column with observation weights. Giving some observation a weight of zero is equivalent
to excluding it from the dataset; giving an observation a relative weight of 2 is equivalent to repeating
that row twice. Negative weights are not allowed. Note: Weights are per-row observation weights and do
not increase the size of the data frame. This is typically the number of times a row is repeated, but
non-integer values are supported as well. During training, rows with higher weights matter more, due to
the larger loss function pre-factor. If you set weight = 0 for a row, the returned prediction frame at
that row is zero and this is incorrect. To get an accurate prediction, remove all rows with weight == 0.
Defaults to ``None``.
:type weights_column: str, optional
:param family: Family. Use binomial for classification with logistic regression, others are for regression
problems.
Defaults to ``"auto"``.
:type family: Literal["auto", "gaussian", "binomial", "fractionalbinomial", "quasibinomial", "poisson", "gamma",
"tweedie", "negativebinomial"]
:param tweedie_variance_power: Tweedie variance power
Defaults to ``0.0``.
:type tweedie_variance_power: float
:param tweedie_link_power: Tweedie link power
Defaults to ``1.0``.
:type tweedie_link_power: float
:param theta: Theta
Defaults to ``0.0``.
:type theta: float
:param solver: AUTO will set the solver based on given data and the other parameters. IRLSM is fast on on
problems with small number of predictors and for lambda-search with L1 penalty, L_BFGS scales better for
datasets with many columns.
Defaults to ``"irlsm"``.
:type solver: Literal["auto", "irlsm", "l_bfgs", "coordinate_descent_naive", "coordinate_descent",
"gradient_descent_lh", "gradient_descent_sqerr"]
:param missing_values_handling: Handling of missing values. Either MeanImputation, Skip or PlugValues.
Defaults to ``"mean_imputation"``.
:type missing_values_handling: Literal["mean_imputation", "skip", "plug_values"]
:param plug_values: Plug Values (a single row frame containing values that will be used to impute missing values
of the training/validation frame, use with conjunction missing_values_handling = PlugValues)
Defaults to ``None``.
:type plug_values: Union[None, str, H2OFrame], optional
:param compute_p_values: Request p-values computation, p-values work only with IRLSM solver and no
regularization
Defaults to ``True``.
:type compute_p_values: bool
:param standardize: Standardize numeric columns to have zero mean and unit variance
Defaults to ``True``.
:type standardize: bool
:param non_negative: Restrict coefficients (not intercept) to be non-negative
Defaults to ``False``.
:type non_negative: bool
:param max_iterations: Maximum number of iterations
Defaults to ``0``.
:type max_iterations: int
:param link: Link function.
Defaults to ``"family_default"``.
:type link: Literal["family_default", "identity", "logit", "log", "inverse", "tweedie", "ologit"]
:param prior: Prior probability for y==1. To be used only for logistic regression iff the data has been sampled
and the mean of response does not reflect reality.
Defaults to ``0.0``.
:type prior: float
:param alpha: Distribution of regularization between the L1 (Lasso) and L2 (Ridge) penalties. A value of 1 for
alpha represents Lasso regression, a value of 0 produces Ridge regression, and anything in between
specifies the amount of mixing between the two. Default value of alpha is 0 when SOLVER = 'L-BFGS'; 0.5
otherwise.
Defaults to ``None``.
:type alpha: List[float], optional
:param lambda_: Regularization strength
Defaults to ``[0.0]``.
:type lambda_: List[float]
:param lambda_search: Use lambda search starting at lambda max, given lambda is then interpreted as lambda min
Defaults to ``False``.
:type lambda_search: bool
:param stopping_rounds: Early stopping based on convergence of stopping_metric. Stop if simple moving average of
length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0 to disable)
Defaults to ``0``.
:type stopping_rounds: int
:param stopping_metric: Metric to use for early stopping (AUTO: logloss for classification, deviance for
regression and anonomaly_score for Isolation Forest). Note that custom and custom_increasing can only be
used in GBM and DRF with the Python client.
Defaults to ``"auto"``.
:type stopping_metric: Literal["auto", "deviance", "logloss", "mse", "rmse", "mae", "rmsle", "auc", "aucpr", "lift_top_group",
"misclassification", "mean_per_class_error", "custom", "custom_increasing"]
:param early_stopping: Stop early when there is no more relative improvement on train or validation (if
provided).
Defaults to ``False``.
:type early_stopping: bool
:param stopping_tolerance: Relative tolerance for metric-based stopping criterion (stop if relative improvement
is not at least this much)
Defaults to ``0.001``.
:type stopping_tolerance: float
:param balance_classes: Balance training data class counts via over/under-sampling (for imbalanced data).
Defaults to ``False``.
:type balance_classes: bool
:param class_sampling_factors: Desired over/under-sampling ratios per class (in lexicographic order). If not
specified, sampling factors will be automatically computed to obtain class balance during training.
Requires balance_classes.
Defaults to ``None``.
:type class_sampling_factors: List[float], optional
:param max_after_balance_size: Maximum relative size of the training data after balancing class counts (can be
less than 1.0). Requires balance_classes.
Defaults to ``5.0``.
:type max_after_balance_size: float
:param max_runtime_secs: Maximum allowed runtime in seconds for model training. Use 0 to disable.
Defaults to ``0.0``.
:type max_runtime_secs: float
:param save_transformed_framekeys: true to save the keys of transformed predictors and interaction column.
Defaults to ``False``.
:type save_transformed_framekeys: bool
:param highest_interaction_term: Limit the number of interaction terms, if 2 means interaction between 2 columns
only, 3 for three columns and so on... Default to 2.
Defaults to ``0``.
:type highest_interaction_term: int
:param nparallelism: Number of models to build in parallel. Default to 4. Adjust according to your system.
Defaults to ``4``.
:type nparallelism: int
:param type: Refer to the SS type 1, 2, 3, or 4. We are currently only supporting 3
Defaults to ``0``.
:type type: int
"""
super(H2OANOVAGLMEstimator, self).__init__()
self._parms = {}
self._id = self._parms['model_id'] = model_id
self.training_frame = training_frame
self.seed = seed
self.response_column = response_column
self.ignored_columns = ignored_columns
self.ignore_const_cols = ignore_const_cols
self.score_each_iteration = score_each_iteration
self.offset_column = offset_column
self.weights_column = weights_column
self.family = family
self.tweedie_variance_power = tweedie_variance_power
self.tweedie_link_power = tweedie_link_power
self.theta = theta
self.solver = solver
self.missing_values_handling = missing_values_handling
self.plug_values = plug_values
self.compute_p_values = compute_p_values
self.standardize = standardize
self.non_negative = non_negative
self.max_iterations = max_iterations
self.link = link
self.prior = prior
self.alpha = alpha
self.lambda_ = lambda_
self.lambda_search = lambda_search
self.stopping_rounds = stopping_rounds
self.stopping_metric = stopping_metric
self.early_stopping = early_stopping
self.stopping_tolerance = stopping_tolerance
self.balance_classes = balance_classes
self.class_sampling_factors = class_sampling_factors
self.max_after_balance_size = max_after_balance_size
self.max_runtime_secs = max_runtime_secs
self.save_transformed_framekeys = save_transformed_framekeys
self.highest_interaction_term = highest_interaction_term
self.nparallelism = nparallelism
self.type = type
self._parms["_rest_version"] = 3
@property
def training_frame(self):
"""
Id of the training data frame.
Type: ``Union[None, str, H2OFrame]``.
"""
return self._parms.get("training_frame")
@training_frame.setter
def training_frame(self, training_frame):
self._parms["training_frame"] = H2OFrame._validate(training_frame, 'training_frame')
@property
def seed(self):
"""
Seed for pseudo random number generator (if applicable)
Type: ``int``, defaults to ``-1``.
"""
return self._parms.get("seed")
@seed.setter
def seed(self, seed):
assert_is_type(seed, None, int)
self._parms["seed"] = seed
@property
def response_column(self):
"""
Response variable column.
Type: ``str``.
"""
return self._parms.get("response_column")
@response_column.setter
def response_column(self, response_column):
assert_is_type(response_column, None, str)
self._parms["response_column"] = response_column
@property
def ignored_columns(self):
"""
Names of columns to ignore for training.
Type: ``List[str]``.
"""
return self._parms.get("ignored_columns")
@ignored_columns.setter
def ignored_columns(self, ignored_columns):
assert_is_type(ignored_columns, None, [str])
self._parms["ignored_columns"] = ignored_columns
@property
def ignore_const_cols(self):
"""
Ignore constant columns.
Type: ``bool``, defaults to ``True``.
"""
return self._parms.get("ignore_const_cols")
@ignore_const_cols.setter
def ignore_const_cols(self, ignore_const_cols):
assert_is_type(ignore_const_cols, None, bool)
self._parms["ignore_const_cols"] = ignore_const_cols
@property
def score_each_iteration(self):
"""
Whether to score during each iteration of model training.
Type: ``bool``, defaults to ``False``.
"""
return self._parms.get("score_each_iteration")
@score_each_iteration.setter
def score_each_iteration(self, score_each_iteration):
assert_is_type(score_each_iteration, None, bool)
self._parms["score_each_iteration"] = score_each_iteration
@property
def offset_column(self):
"""
Offset column. This will be added to the combination of columns before applying the link function.
Type: ``str``.
"""
return self._parms.get("offset_column")
@offset_column.setter
def offset_column(self, offset_column):
assert_is_type(offset_column, None, str)
self._parms["offset_column"] = offset_column
@property
def weights_column(self):
"""
Column with observation weights. Giving some observation a weight of zero is equivalent to excluding it from the
dataset; giving an observation a relative weight of 2 is equivalent to repeating that row twice. Negative
weights are not allowed. Note: Weights are per-row observation weights and do not increase the size of the data
frame. This is typically the number of times a row is repeated, but non-integer values are supported as well.
During training, rows with higher weights matter more, due to the larger loss function pre-factor. If you set
weight = 0 for a row, the returned prediction frame at that row is zero and this is incorrect. To get an
accurate prediction, remove all rows with weight == 0.
Type: ``str``.
"""
return self._parms.get("weights_column")
@weights_column.setter
def weights_column(self, weights_column):
assert_is_type(weights_column, None, str)
self._parms["weights_column"] = weights_column
@property
def family(self):
"""
Family. Use binomial for classification with logistic regression, others are for regression problems.
Type: ``Literal["auto", "gaussian", "binomial", "fractionalbinomial", "quasibinomial", "poisson", "gamma",
"tweedie", "negativebinomial"]``, defaults to ``"auto"``.
"""
return self._parms.get("family")
@family.setter
def family(self, family):
assert_is_type(family, None, Enum("auto", "gaussian", "binomial", "fractionalbinomial", "quasibinomial", "poisson", "gamma", "tweedie", "negativebinomial"))
self._parms["family"] = family
@property
def tweedie_variance_power(self):
"""
Tweedie variance power
Type: ``float``, defaults to ``0.0``.
"""
return self._parms.get("tweedie_variance_power")
@tweedie_variance_power.setter
def tweedie_variance_power(self, tweedie_variance_power):
assert_is_type(tweedie_variance_power, None, numeric)
self._parms["tweedie_variance_power"] = tweedie_variance_power
@property
def tweedie_link_power(self):
"""
Tweedie link power
Type: ``float``, defaults to ``1.0``.
"""
return self._parms.get("tweedie_link_power")
@tweedie_link_power.setter
def tweedie_link_power(self, tweedie_link_power):
assert_is_type(tweedie_link_power, None, numeric)
self._parms["tweedie_link_power"] = tweedie_link_power
@property
def theta(self):
"""
Theta
Type: ``float``, defaults to ``0.0``.
"""
return self._parms.get("theta")
@theta.setter
def theta(self, theta):
assert_is_type(theta, None, numeric)
self._parms["theta"] = theta
@property
def solver(self):
"""
AUTO will set the solver based on given data and the other parameters. IRLSM is fast on on problems with small
number of predictors and for lambda-search with L1 penalty, L_BFGS scales better for datasets with many columns.
Type: ``Literal["auto", "irlsm", "l_bfgs", "coordinate_descent_naive", "coordinate_descent",
"gradient_descent_lh", "gradient_descent_sqerr"]``, defaults to ``"irlsm"``.
"""
return self._parms.get("solver")
@solver.setter
def solver(self, solver):
assert_is_type(solver, None, Enum("auto", "irlsm", "l_bfgs", "coordinate_descent_naive", "coordinate_descent", "gradient_descent_lh", "gradient_descent_sqerr"))
self._parms["solver"] = solver
@property
def missing_values_handling(self):
"""
Handling of missing values. Either MeanImputation, Skip or PlugValues.
Type: ``Literal["mean_imputation", "skip", "plug_values"]``, defaults to ``"mean_imputation"``.
"""
return self._parms.get("missing_values_handling")
@missing_values_handling.setter
def missing_values_handling(self, missing_values_handling):
assert_is_type(missing_values_handling, None, Enum("mean_imputation", "skip", "plug_values"))
self._parms["missing_values_handling"] = missing_values_handling
@property
def plug_values(self):
"""
Plug Values (a single row frame containing values that will be used to impute missing values of the
training/validation frame, use with conjunction missing_values_handling = PlugValues)
Type: ``Union[None, str, H2OFrame]``.
"""
return self._parms.get("plug_values")
@plug_values.setter
def plug_values(self, plug_values):
self._parms["plug_values"] = H2OFrame._validate(plug_values, 'plug_values')
@property
def compute_p_values(self):
"""
Request p-values computation, p-values work only with IRLSM solver and no regularization
Type: ``bool``, defaults to ``True``.
"""
return self._parms.get("compute_p_values")
@compute_p_values.setter
def compute_p_values(self, compute_p_values):
assert_is_type(compute_p_values, None, bool)
self._parms["compute_p_values"] = compute_p_values
@property
def standardize(self):
"""
Standardize numeric columns to have zero mean and unit variance
Type: ``bool``, defaults to ``True``.
"""
return self._parms.get("standardize")
@standardize.setter
def standardize(self, standardize):
assert_is_type(standardize, None, bool)
self._parms["standardize"] = standardize
@property
def non_negative(self):
"""
Restrict coefficients (not intercept) to be non-negative
Type: ``bool``, defaults to ``False``.
"""
return self._parms.get("non_negative")
@non_negative.setter
def non_negative(self, non_negative):
assert_is_type(non_negative, None, bool)
self._parms["non_negative"] = non_negative
@property
def max_iterations(self):
"""
Maximum number of iterations
Type: ``int``, defaults to ``0``.
"""
return self._parms.get("max_iterations")
@max_iterations.setter
def max_iterations(self, max_iterations):
assert_is_type(max_iterations, None, int)
self._parms["max_iterations"] = max_iterations
@property
def link(self):
"""
Link function.
Type: ``Literal["family_default", "identity", "logit", "log", "inverse", "tweedie", "ologit"]``, defaults to
``"family_default"``.
"""
return self._parms.get("link")
@link.setter
def link(self, link):
assert_is_type(link, None, Enum("family_default", "identity", "logit", "log", "inverse", "tweedie", "ologit"))
self._parms["link"] = link
@property
def prior(self):
"""
Prior probability for y==1. To be used only for logistic regression iff the data has been sampled and the mean
of response does not reflect reality.
Type: ``float``, defaults to ``0.0``.
"""
return self._parms.get("prior")
@prior.setter
def prior(self, prior):
assert_is_type(prior, None, numeric)
self._parms["prior"] = prior
@property
def alpha(self):
"""
Distribution of regularization between the L1 (Lasso) and L2 (Ridge) penalties. A value of 1 for alpha
represents Lasso regression, a value of 0 produces Ridge regression, and anything in between specifies the
amount of mixing between the two. Default value of alpha is 0 when SOLVER = 'L-BFGS'; 0.5 otherwise.
Type: ``List[float]``.
"""
return self._parms.get("alpha")
@alpha.setter
def alpha(self, alpha):
# For `alpha` and `lambda` the server reports type float[], while in practice simple floats are also ok
assert_is_type(alpha, None, numeric, [numeric])
self._parms["alpha"] = alpha
@property
def lambda_(self):
"""
Regularization strength
Type: ``List[float]``, defaults to ``[0.0]``.
"""
return self._parms.get("lambda")
@lambda_.setter
def lambda_(self, lambda_):
assert_is_type(lambda_, None, numeric, [numeric])
self._parms["lambda"] = lambda_
@property
def lambda_search(self):
"""
Use lambda search starting at lambda max, given lambda is then interpreted as lambda min
Type: ``bool``, defaults to ``False``.
"""
return self._parms.get("lambda_search")
@lambda_search.setter
def lambda_search(self, lambda_search):
assert_is_type(lambda_search, None, bool)
self._parms["lambda_search"] = lambda_search
@property
def stopping_rounds(self):
"""
Early stopping based on convergence of stopping_metric. Stop if simple moving average of length k of the
stopping_metric does not improve for k:=stopping_rounds scoring events (0 to disable)
Type: ``int``, defaults to ``0``.
"""
return self._parms.get("stopping_rounds")
@stopping_rounds.setter
def stopping_rounds(self, stopping_rounds):
assert_is_type(stopping_rounds, None, int)
self._parms["stopping_rounds"] = stopping_rounds
@property
def stopping_metric(self):
"""
Metric to use for early stopping (AUTO: logloss for classification, deviance for regression and anonomaly_score
for Isolation Forest). Note that custom and custom_increasing can only be used in GBM and DRF with the Python
client.
Type: ``Literal["auto", "deviance", "logloss", "mse", "rmse", "mae", "rmsle", "auc", "aucpr", "lift_top_group",
"misclassification", "mean_per_class_error", "custom", "custom_increasing"]``, defaults to ``"auto"``.
"""
return self._parms.get("stopping_metric")
@stopping_metric.setter
def stopping_metric(self, stopping_metric):
assert_is_type(stopping_metric, None, Enum("auto", "deviance", "logloss", "mse", "rmse", "mae", "rmsle", "auc", "aucpr", "lift_top_group", "misclassification", "mean_per_class_error", "custom", "custom_increasing"))
self._parms["stopping_metric"] = stopping_metric
@property
def early_stopping(self):
"""
Stop early when there is no more relative improvement on train or validation (if provided).
Type: ``bool``, defaults to ``False``.
"""
return self._parms.get("early_stopping")
@early_stopping.setter
def early_stopping(self, early_stopping):
assert_is_type(early_stopping, None, bool)
self._parms["early_stopping"] = early_stopping
@property
def stopping_tolerance(self):
"""
Relative tolerance for metric-based stopping criterion (stop if relative improvement is not at least this much)
Type: ``float``, defaults to ``0.001``.
"""
return self._parms.get("stopping_tolerance")
@stopping_tolerance.setter
def stopping_tolerance(self, stopping_tolerance):
assert_is_type(stopping_tolerance, None, numeric)
self._parms["stopping_tolerance"] = stopping_tolerance
@property
def balance_classes(self):
"""
Balance training data class counts via over/under-sampling (for imbalanced data).
Type: ``bool``, defaults to ``False``.
"""
return self._parms.get("balance_classes")
@balance_classes.setter
def balance_classes(self, balance_classes):
assert_is_type(balance_classes, None, bool)
self._parms["balance_classes"] = balance_classes
@property
def class_sampling_factors(self):
"""
Desired over/under-sampling ratios per class (in lexicographic order). If not specified, sampling factors will
be automatically computed to obtain class balance during training. Requires balance_classes.
Type: ``List[float]``.
"""
return self._parms.get("class_sampling_factors")
@class_sampling_factors.setter
def class_sampling_factors(self, class_sampling_factors):
assert_is_type(class_sampling_factors, None, [float])
self._parms["class_sampling_factors"] = class_sampling_factors
@property
def max_after_balance_size(self):
"""
Maximum relative size of the training data after balancing class counts (can be less than 1.0). Requires
balance_classes.
Type: ``float``, defaults to ``5.0``.
"""
return self._parms.get("max_after_balance_size")
@max_after_balance_size.setter
def max_after_balance_size(self, max_after_balance_size):
assert_is_type(max_after_balance_size, None, float)
self._parms["max_after_balance_size"] = max_after_balance_size
@property
def max_runtime_secs(self):
"""
Maximum allowed runtime in seconds for model training. Use 0 to disable.
Type: ``float``, defaults to ``0.0``.
"""
return self._parms.get("max_runtime_secs")
@max_runtime_secs.setter
def max_runtime_secs(self, max_runtime_secs):
assert_is_type(max_runtime_secs, None, numeric)
self._parms["max_runtime_secs"] = max_runtime_secs
@property
def save_transformed_framekeys(self):
"""
true to save the keys of transformed predictors and interaction column.
Type: ``bool``, defaults to ``False``.
"""
return self._parms.get("save_transformed_framekeys")
@save_transformed_framekeys.setter
def save_transformed_framekeys(self, save_transformed_framekeys):
assert_is_type(save_transformed_framekeys, None, bool)
self._parms["save_transformed_framekeys"] = save_transformed_framekeys
@property
def highest_interaction_term(self):
"""
Limit the number of interaction terms, if 2 means interaction between 2 columns only, 3 for three columns and so
on... Default to 2.
Type: ``int``, defaults to ``0``.
"""
return self._parms.get("highest_interaction_term")
@highest_interaction_term.setter
def highest_interaction_term(self, highest_interaction_term):
assert_is_type(highest_interaction_term, None, int)
self._parms["highest_interaction_term"] = highest_interaction_term
@property
def nparallelism(self):
"""
Number of models to build in parallel. Default to 4. Adjust according to your system.
Type: ``int``, defaults to ``4``.
"""
return self._parms.get("nparallelism")
@nparallelism.setter
def nparallelism(self, nparallelism):
assert_is_type(nparallelism, None, int)
self._parms["nparallelism"] = nparallelism
@property
def type(self):
"""
Refer to the SS type 1, 2, 3, or 4. We are currently only supporting 3
Type: ``int``, defaults to ``0``.
"""
return self._parms.get("type")
@type.setter
def type(self, type):
assert_is_type(type, None, int)
self._parms["type"] = type
@property
def Lambda(self):
"""DEPRECATED. Use ``self.lambda_`` instead"""
return self._parms["lambda"] if "lambda" in self._parms else None
@Lambda.setter
def Lambda(self, value):
self._parms["lambda"] = value
def result(self):
"""
Get result frame that contains information about the model building process like for maxrglm and anovaglm.
:return: the H2OFrame that contains information about the model building process like for maxrglm and anovaglm.
"""
return H2OFrame._expr(expr=ExprNode("result", ASTId(self.key)))._frame(fill_cache=True)
|
tests/xontribs/test_jedi.py | meramsey/xonsh | 4,716 | 12613616 | """Tests for the Jedi completer xontrib"""
import sys
import pytest
import importlib
from unittest.mock import MagicMock, call
from tests.tools import skip_if_on_windows, skip_if_on_darwin
from xonsh.xontribs import find_xontrib
from xonsh.completers.tools import RichCompletion
from xonsh.parsers.completion_context import CompletionContext, PythonContext
@pytest.fixture
def jedi_mock(monkeypatch):
jedi_mock = MagicMock()
jedi_mock.__version__ = "0.16.0"
jedi_mock.Interpreter().complete.return_value = []
jedi_mock.reset_mock()
monkeypatch.setitem(sys.modules, "jedi", jedi_mock)
yield jedi_mock
@pytest.fixture
def completer_mock(monkeypatch, xession):
completer_mock = MagicMock()
# so that args will be passed
def comp(args):
completer_mock(args)
monkeypatch.setitem(xession.aliases, "completer", comp)
yield completer_mock
@pytest.fixture
def jedi_xontrib(monkeypatch, source_path, jedi_mock, completer_mock):
monkeypatch.syspath_prepend(source_path)
spec = find_xontrib("jedi")
yield importlib.import_module(spec.name)
del sys.modules[spec.name]
def test_completer_added(jedi_xontrib, xession):
assert "xontrib.jedi" in sys.modules
assert "python" not in xession.completers
assert "python_mode" not in xession.completers
assert "jedi_python" in xession.completers
@pytest.mark.parametrize(
"context",
[
CompletionContext(python=PythonContext("10 + x", 6)),
],
)
@pytest.mark.parametrize("version", ["new", "old"])
def test_jedi_api(jedi_xontrib, jedi_mock, version, context, xession):
if version == "old":
jedi_mock.__version__ = "0.15.0"
jedi_mock.Interpreter().completions.return_value = []
jedi_mock.reset_mock()
jedi_xontrib.complete_jedi(context)
extra_namespace = {"__xonsh__": xession}
try:
extra_namespace["_"] = _
except NameError:
pass
namespaces = [{}, extra_namespace]
line = context.python.multiline_code
end = context.python.cursor_index
if version == "new":
assert jedi_mock.Interpreter.call_args_list == [call(line, namespaces)]
assert jedi_mock.Interpreter().complete.call_args_list == [call(1, end)]
else:
assert jedi_mock.Interpreter.call_args_list == [
call(line, namespaces, line=1, column=end)
]
assert jedi_mock.Interpreter().completions.call_args_list == [call()]
def test_multiline(jedi_xontrib, jedi_mock, monkeypatch):
complete_document = "xx = 1\n1 + x"
jedi_xontrib.complete_jedi(
CompletionContext(
python=PythonContext(complete_document, len(complete_document))
)
)
assert jedi_mock.Interpreter.call_args_list[0][0][0] == complete_document
assert jedi_mock.Interpreter().complete.call_args_list == [
call(2, 5) # line (one-indexed), column (zero-indexed)
]
@pytest.mark.parametrize(
"completion, rich_completion",
[
(
# from jedi when code is 'x' and xx=3
(
"instance",
"xx",
"x",
"int(x=None, /) -> int",
("instance", "instance int"),
),
RichCompletion(
"xx", display="xx", description="instance int", prefix_len=1
),
),
(
# from jedi when code is 'xx=3\nx'
("statement", "xx", "x", None, ("instance", "instance int")),
RichCompletion(
"xx", display="xx", description="instance int", prefix_len=1
),
),
(
# from jedi when code is 'x.' and x=3
(
"function",
"from_bytes",
"from_bytes",
"from_bytes(bytes, byteorder, *, signed=False)",
("function", "def __get__"),
),
RichCompletion(
"from_bytes",
display="from_bytes()",
description="from_bytes(bytes, byteorder, *, signed=False)",
),
),
(
# from jedi when code is 'x=3\nx.'
("function", "imag", "imag", None, ("instance", "instance int")),
RichCompletion("imag", display="imag", description="instance int"),
),
(
# from '(3).from_bytes(byt'
("param", "bytes=", "es=", None, ("instance", "instance Sequence")),
RichCompletion(
"bytes=",
display="bytes=",
description="instance Sequence",
prefix_len=3,
),
),
(
# from 'x.from_bytes(byt' when x=3
("param", "bytes=", "es=", None, None),
RichCompletion(
"bytes=", display="bytes=", description="param", prefix_len=3
),
),
(
# from 'import colle'
("module", "collections", "ctions", None, ("module", "module collections")),
RichCompletion(
"collections",
display="collections",
description="module collections",
prefix_len=5,
),
),
(
# from 'NameErr'
(
"class",
"NameError",
"or",
"NameError(*args: object)",
("class", "class NameError"),
),
RichCompletion(
"NameError",
display="NameError",
description="NameError(*args: object)",
prefix_len=7,
),
),
(
# from 'a["' when a={'name':None}
("string", '"name"', 'name"', None, None),
RichCompletion('"name"', display='"name"', description="string"),
),
(
# from 'open("/etc/pass'
("path", 'passwd"', 'wd"', None, None),
RichCompletion(
'passwd"', display='passwd"', description="path", prefix_len=4
),
),
(
# from 'cla'
("keyword", "class", "ss", None, None),
RichCompletion(
"class", display="class", description="keyword", prefix_len=3
),
),
],
)
def test_rich_completions(jedi_xontrib, jedi_mock, completion, rich_completion):
comp_type, comp_name, comp_complete, sig, inf = completion
comp_mock = MagicMock()
comp_mock.type = comp_type
comp_mock.name = comp_name
comp_mock.complete = comp_complete
if sig:
sig_mock = MagicMock()
sig_mock.to_string.return_value = sig
comp_mock.get_signatures.return_value = [sig_mock]
else:
comp_mock.get_signatures.return_value = []
if inf:
inf_type, inf_desc = inf
inf_mock = MagicMock()
inf_mock.type = inf_type
inf_mock.description = inf_desc
comp_mock.infer.return_value = [inf_mock]
else:
comp_mock.infer.return_value = []
jedi_xontrib.XONSH_SPECIAL_TOKENS = []
jedi_mock.Interpreter().complete.return_value = [comp_mock]
completions = jedi_xontrib.complete_jedi(
CompletionContext(python=PythonContext("", 0))
)
assert len(completions) == 1
(ret_completion,) = completions
assert isinstance(ret_completion, RichCompletion)
assert ret_completion == rich_completion
assert ret_completion.display == rich_completion.display
assert ret_completion.description == rich_completion.description
def test_special_tokens(jedi_xontrib):
assert jedi_xontrib.complete_jedi(
CompletionContext(python=PythonContext("", 0))
).issuperset(jedi_xontrib.XONSH_SPECIAL_TOKENS)
assert jedi_xontrib.complete_jedi(
CompletionContext(python=PythonContext("@", 1))
) == {"@", "@(", "@$("}
assert jedi_xontrib.complete_jedi(
CompletionContext(python=PythonContext("$", 1))
) == {"$[", "${", "$("}
@skip_if_on_darwin
@skip_if_on_windows
def test_no_command_path_completion(jedi_xontrib, completion_context_parse):
assert jedi_xontrib.complete_jedi(completion_context_parse("./", 2)) is None
assert jedi_xontrib.complete_jedi(completion_context_parse("~/", 2)) is None
assert jedi_xontrib.complete_jedi(completion_context_parse("./e", 3)) is None
assert jedi_xontrib.complete_jedi(completion_context_parse("/usr/bin/", 9)) is None
assert (
jedi_xontrib.complete_jedi(completion_context_parse("/usr/bin/e", 10)) is None
)
|
chapter6_operation_management/condition_based_pattern/src/api_composition_proxy/routers/routers.py | sudabon/ml-system-in-actions | 133 | 12613617 | import asyncio
import base64
import io
import logging
import uuid
from typing import Any, Dict, List
import grpc
import httpx
from fastapi import APIRouter
from PIL import Image
from src.api_composition_proxy.backend import request_tfserving
from src.api_composition_proxy.backend.data import Data
from src.api_composition_proxy.configurations import ModelConfigurations, ServiceConfigurations
from tensorflow_serving.apis import prediction_service_pb2_grpc
logger = logging.getLogger(__name__)
router = APIRouter()
channel = grpc.insecure_channel(ServiceConfigurations.grpc)
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
@router.get("/health")
def health() -> Dict[str, str]:
return {"health": "ok"}
@router.get("/label")
def label() -> List[str]:
return ModelConfigurations.labels
@router.get("/metadata")
def metadata() -> Dict[str, Any]:
return {
"data_type": "str",
"data_structure": "(1,1)",
"data_sample": "base64 encoded image file",
"prediction_type": "float32",
"prediction_structure": f"(1,{len(ModelConfigurations.labels)})",
"prediction_sample": "[0.07093159, 0.01558308, 0.01348537, ...]",
}
@router.get("/health/pred")
async def health_pred() -> Dict[str, Any]:
logger.info(f"GET redirect to: /health")
async with httpx.AsyncClient() as ac:
serving_address = (
f"http://{ServiceConfigurations.rest}/v1/models/{ModelConfigurations.model_spec_name}/versions/0/metadata"
)
logger.info(f"health pred : {serving_address}")
r = await ac.get(serving_address)
logger.info(f"health pred res: {r}")
if r.status_code == 200:
return {"health": "ok"}
else:
return {"health": "ng"}
@router.get("/predict/test")
def predict_test() -> Dict[str, Any]:
job_id = str(uuid.uuid4())[:6]
logger.info(f"{job_id} TEST GET redirect to: /predict/test")
image = Data().image_data
bytes_io = io.BytesIO()
image.save(bytes_io, format=image.format)
bytes_io.seek(0)
r = request_tfserving.request_grpc(
stub=stub,
image=bytes_io.read(),
model_spec_name=ModelConfigurations.model_spec_name,
signature_name=ModelConfigurations.signature_name,
timeout_second=ModelConfigurations.timeout_second,
)
logger.info(f"{job_id} prediction: {r}")
return r
@router.post("/predict")
def predict(data: Data) -> Dict[str, Any]:
job_id = str(uuid.uuid4())[:6]
logger.info(f"{job_id} POST redirect to: /predict")
image = base64.b64decode(str(data.image_data))
bytes_io = io.BytesIO(image)
image_data = Image.open(bytes_io)
image_data.save(bytes_io, format=image_data.format)
bytes_io.seek(0)
r = request_tfserving.request_grpc(
stub=stub,
image=bytes_io.read(),
model_spec_name=ModelConfigurations.model_spec_name,
signature_name=ModelConfigurations.signature_name,
timeout_second=ModelConfigurations.timeout_second,
)
logger.info(f"{job_id} prediction: {r}")
return r
|
tests/hwsim/wlantest.py | majacQ/fragattacks | 1,104 | 12613626 | # Python class for controlling wlantest
# Copyright (c) 2013-2019, <NAME> <<EMAIL>>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import re
import os
import posixpath
import time
import subprocess
import logging
import wpaspy
logger = logging.getLogger()
class Wlantest:
remote_host = None
setup_params = None
exe_thread = None
exe_res = []
monitor_mod = None
setup_done = False
@classmethod
def stop_remote_wlantest(cls):
if cls.exe_thread is None:
# Local flow - no need for remote operations
return
cls.remote_host.execute(["killall", "-9", "wlantest"])
cls.remote_host.thread_wait(cls.exe_thread, 5)
cls.exe_thread = None
cls.exe_res = []
@classmethod
def reset_remote_wlantest(cls):
cls.stop_remote_wlantest()
cls.remote_host = None
cls.setup_params = None
cls.exe_thread = None
cls.exe_res = []
cls.monitor_mod = None
cls.setup_done = False
@classmethod
def start_remote_wlantest(cls):
if cls.remote_host is None:
# Local flow - no need for remote operations
return
if cls.exe_thread is not None:
raise Exception("Cannot start wlantest twice")
log_dir = cls.setup_params['log_dir']
ifaces = re.split('; | |, ', cls.remote_host.ifname)
ifname = ifaces[0]
exe = cls.setup_params["wlantest"]
tc_name = cls.setup_params["tc_name"]
base_log_name = tc_name + "_wlantest_" + \
cls.remote_host.name + "_" + ifname
log_file = posixpath.join(log_dir, base_log_name + ".log")
pcap_file = posixpath.join(log_dir, base_log_name + ".pcapng")
cmd = "{} -i {} -n {} -c -dtN -L {}".format(exe, ifname,
pcap_file, log_file)
cls.remote_host.add_log(log_file)
cls.remote_host.add_log(pcap_file)
cls.exe_thread = cls.remote_host.thread_run(cmd.split(), cls.exe_res)
# Give wlantest a chance to start working
time.sleep(1)
@classmethod
def register_remote_wlantest(cls, host, setup_params, monitor_mod):
if cls.remote_host is not None:
raise Exception("Cannot register remote wlantest twice")
cls.remote_host = host
cls.setup_params = setup_params
cls.monitor_mod = monitor_mod
status, buf = host.execute(["which", setup_params['wlantest']])
if status != 0:
raise Exception(host.name + " - wlantest: " + buf)
status, buf = host.execute(["which", setup_params['wlantest_cli']])
if status != 0:
raise Exception(host.name + " - wlantest_cli: " + buf)
@classmethod
def chan_from_wpa(cls, wpa, is_p2p=False):
if cls.monitor_mod is None:
return
m = cls.monitor_mod
return m.setup(cls.remote_host, [m.get_monitor_params(wpa, is_p2p)])
@classmethod
def setup(cls, wpa, is_p2p=False):
if wpa:
cls.chan_from_wpa(wpa, is_p2p)
cls.start_remote_wlantest()
cls.setup_done = True
def __init__(self):
if not self.setup_done:
raise Exception("Cannot create Wlantest instance before setup()")
if os.path.isfile('../../wlantest/wlantest_cli'):
self.wlantest_cli = '../../wlantest/wlantest_cli'
else:
self.wlantest_cli = 'wlantest_cli'
def cli_cmd(self, params):
if self.remote_host is not None:
exe = self.setup_params["wlantest_cli"]
ret = self.remote_host.execute([exe] + params)
if ret[0] != 0:
raise Exception("wlantest_cli failed")
return ret[1]
else:
return subprocess.check_output([self.wlantest_cli] + params).decode()
def flush(self):
res = self.cli_cmd(["flush"])
if "FAIL" in res:
raise Exception("wlantest_cli flush failed")
def relog(self):
res = self.cli_cmd(["relog"])
if "FAIL" in res:
raise Exception("wlantest_cli relog failed")
def add_passphrase(self, passphrase):
res = self.cli_cmd(["add_passphrase", passphrase])
if "FAIL" in res:
raise Exception("wlantest_cli add_passphrase failed")
def add_wepkey(self, key):
res = self.cli_cmd(["add_wepkey", key])
if "FAIL" in res:
raise Exception("wlantest_cli add_key failed")
def info_bss(self, field, bssid):
res = self.cli_cmd(["info_bss", field, bssid])
if "FAIL" in res:
raise Exception("Could not get BSS info from wlantest for " + bssid)
return res
def get_bss_counter(self, field, bssid):
try:
res = self.cli_cmd(["get_bss_counter", field, bssid])
except Exception as e:
return 0
if "FAIL" in res:
return 0
return int(res)
def clear_bss_counters(self, bssid):
self.cli_cmd(["clear_bss_counters", bssid])
def info_sta(self, field, bssid, addr):
res = self.cli_cmd(["info_sta", field, bssid, addr])
if "FAIL" in res:
raise Exception("Could not get STA info from wlantest for " + addr)
return res
def get_sta_counter(self, field, bssid, addr):
res = self.cli_cmd(["get_sta_counter", field, bssid, addr])
if "FAIL" in res:
raise Exception("wlantest_cli command failed")
return int(res)
def clear_sta_counters(self, bssid, addr):
res = self.cli_cmd(["clear_sta_counters", bssid, addr])
if "FAIL" in res:
raise Exception("wlantest_cli command failed")
def tdls_clear(self, bssid, addr1, addr2):
self.cli_cmd(["clear_tdls_counters", bssid, addr1, addr2])
def get_tdls_counter(self, field, bssid, addr1, addr2):
res = self.cli_cmd(["get_tdls_counter", field, bssid, addr1, addr2])
if "FAIL" in res:
raise Exception("wlantest_cli command failed")
return int(res)
def require_ap_pmf_mandatory(self, bssid):
res = self.info_bss("rsn_capab", bssid)
if "MFPR" not in res:
raise Exception("AP did not require PMF")
if "MFPC" not in res:
raise Exception("AP did not enable PMF")
res = self.info_bss("key_mgmt", bssid)
if "PSK-SHA256" not in res:
raise Exception("AP did not enable SHA256-based AKM for PMF")
def require_ap_pmf_optional(self, bssid):
res = self.info_bss("rsn_capab", bssid)
if "MFPR" in res:
raise Exception("AP required PMF")
if "MFPC" not in res:
raise Exception("AP did not enable PMF")
def require_ap_no_pmf(self, bssid):
res = self.info_bss("rsn_capab", bssid)
if "MFPR" in res:
raise Exception("AP required PMF")
if "MFPC" in res:
raise Exception("AP enabled PMF")
def require_sta_pmf_mandatory(self, bssid, addr):
res = self.info_sta("rsn_capab", bssid, addr)
if "MFPR" not in res:
raise Exception("STA did not require PMF")
if "MFPC" not in res:
raise Exception("STA did not enable PMF")
def require_sta_pmf(self, bssid, addr):
res = self.info_sta("rsn_capab", bssid, addr)
if "MFPC" not in res:
raise Exception("STA did not enable PMF")
def require_sta_no_pmf(self, bssid, addr):
res = self.info_sta("rsn_capab", bssid, addr)
if "MFPC" in res:
raise Exception("STA enabled PMF")
def require_sta_key_mgmt(self, bssid, addr, key_mgmt):
res = self.info_sta("key_mgmt", bssid, addr)
if key_mgmt not in res:
raise Exception("Unexpected STA key_mgmt")
def get_tx_tid(self, bssid, addr, tid):
res = self.cli_cmd(["get_tx_tid", bssid, addr, str(tid)])
if "FAIL" in res:
raise Exception("wlantest_cli command failed")
return int(res)
def get_rx_tid(self, bssid, addr, tid):
res = self.cli_cmd(["get_rx_tid", bssid, addr, str(tid)])
if "FAIL" in res:
raise Exception("wlantest_cli command failed")
return int(res)
def get_tid_counters(self, bssid, addr):
tx = {}
rx = {}
for tid in range(0, 17):
tx[tid] = self.get_tx_tid(bssid, addr, tid)
rx[tid] = self.get_rx_tid(bssid, addr, tid)
return [tx, rx]
class WlantestCapture:
def __init__(self, ifname, output, netns=None):
self.cmd = None
self.ifname = ifname
if os.path.isfile('../../wlantest/wlantest'):
bin = '../../wlantest/wlantest'
else:
bin = 'wlantest'
logger.debug("wlantest[%s] starting" % ifname)
args = [bin, '-e', '-i', ifname, '-w', output]
if netns:
args = ['ip', 'netns', 'exec', netns] + args
self.cmd = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def __del__(self):
if self.cmd:
self.close()
def close(self):
logger.debug("wlantest[%s] stopping" % self.ifname)
self.cmd.terminate()
res = self.cmd.communicate()
if len(res[0]) > 0:
logger.debug("wlantest[%s] stdout: %s" % (self.ifname,
res[0].decode().strip()))
if len(res[1]) > 0:
logger.debug("wlantest[%s] stderr: %s" % (self.ifname,
res[1].decode().strip()))
self.cmd = None
|
lib/python2.7/site-packages/sklearn/gaussian_process/correlation_models.py | wfehrnstrom/harmonize | 6,989 | 12613636 | # -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
# (mostly translation, see implementation details)
# License: BSD 3 clause
"""
The built-in correlation models submodule for the gaussian_process module.
"""
import numpy as np
def absolute_exponential(theta, d):
"""
Absolute exponential autocorrelation model.
(Ornstein-Uhlenbeck stochastic process)::
n
theta, d --> r(theta, d) = exp( sum - theta_i * |d_i| )
i = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) containing the values of the
autocorrelation model.
"""
theta = np.asarray(theta, dtype=np.float64)
d = np.abs(np.asarray(d, dtype=np.float64))
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
if theta.size == 1:
return np.exp(- theta[0] * np.sum(d, axis=1))
elif theta.size != n_features:
raise ValueError("Length of theta must be 1 or %s" % n_features)
else:
return np.exp(- np.sum(theta.reshape(1, n_features) * d, axis=1))
def squared_exponential(theta, d):
"""
Squared exponential correlation model (Radial Basis Function).
(Infinitely differentiable stochastic process, very smooth)::
n
theta, d --> r(theta, d) = exp( sum - theta_i * (d_i)^2 )
i = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) containing the values of the
autocorrelation model.
"""
theta = np.asarray(theta, dtype=np.float64)
d = np.asarray(d, dtype=np.float64)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
if theta.size == 1:
return np.exp(-theta[0] * np.sum(d ** 2, axis=1))
elif theta.size != n_features:
raise ValueError("Length of theta must be 1 or %s" % n_features)
else:
return np.exp(-np.sum(theta.reshape(1, n_features) * d ** 2, axis=1))
def generalized_exponential(theta, d):
"""
Generalized exponential correlation model.
(Useful when one does not know the smoothness of the function to be
predicted.)::
n
theta, d --> r(theta, d) = exp( sum - theta_i * |d_i|^p )
i = 1
Parameters
----------
theta : array_like
An array with shape 1+1 (isotropic) or n+1 (anisotropic) giving the
autocorrelation parameter(s) (theta, p).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) with the values of the autocorrelation
model.
"""
theta = np.asarray(theta, dtype=np.float64)
d = np.asarray(d, dtype=np.float64)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
lth = theta.size
if n_features > 1 and lth == 2:
theta = np.hstack([np.repeat(theta[0], n_features), theta[1]])
elif lth != n_features + 1:
raise Exception("Length of theta must be 2 or %s" % (n_features + 1))
else:
theta = theta.reshape(1, lth)
td = theta[:, 0:-1].reshape(1, n_features) * np.abs(d) ** theta[:, -1]
r = np.exp(- np.sum(td, 1))
return r
def pure_nugget(theta, d):
"""
Spatial independence correlation model (pure nugget).
(Useful when one wants to solve an ordinary least squares problem!)::
n
theta, d --> r(theta, d) = 1 if sum |d_i| == 0
i = 1
0 otherwise
Parameters
----------
theta : array_like
None.
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) with the values of the autocorrelation
model.
"""
theta = np.asarray(theta, dtype=np.float64)
d = np.asarray(d, dtype=np.float64)
n_eval = d.shape[0]
r = np.zeros(n_eval)
r[np.all(d == 0., axis=1)] = 1.
return r
def cubic(theta, d):
"""
Cubic correlation model::
theta, d --> r(theta, d) =
n
prod max(0, 1 - 3(theta_j*d_ij)^2 + 2(theta_j*d_ij)^3) , i = 1,...,m
j = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) with the values of the autocorrelation
model.
"""
theta = np.asarray(theta, dtype=np.float64)
d = np.asarray(d, dtype=np.float64)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
lth = theta.size
if lth == 1:
td = np.abs(d) * theta
elif lth != n_features:
raise Exception("Length of theta must be 1 or " + str(n_features))
else:
td = np.abs(d) * theta.reshape(1, n_features)
td[td > 1.] = 1.
ss = 1. - td ** 2. * (3. - 2. * td)
r = np.prod(ss, 1)
return r
def linear(theta, d):
"""
Linear correlation model::
theta, d --> r(theta, d) =
n
prod max(0, 1 - theta_j*d_ij) , i = 1,...,m
j = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) with the values of the autocorrelation
model.
"""
theta = np.asarray(theta, dtype=np.float64)
d = np.asarray(d, dtype=np.float64)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
lth = theta.size
if lth == 1:
td = np.abs(d) * theta
elif lth != n_features:
raise Exception("Length of theta must be 1 or %s" % n_features)
else:
td = np.abs(d) * theta.reshape(1, n_features)
td[td > 1.] = 1.
ss = 1. - td
r = np.prod(ss, 1)
return r
|
xdfile/ccxml2xd.py | jmviz/xd | 179 | 12613663 | # -*- coding: utf-8 -*-
#!/usr/bin/env python3
import string
import re
from lxml import etree
import xdfile
from xdfile.utils import escape, consecutive, xml_escape_table, rev_xml_escape_table, error
HEADER_RENAMES = {
'Creator': 'Author'
}
# data is bytes()
def parse_ccxml(data, filename):
content = data.decode('utf-8', errors='replace')
content = escape(content, xml_escape_table)
content = consecutive(content)
content = re.sub(r'(=["]{2}([^"]+?)["]{2})+',r'=""\2""', content) # Replace double quotes
content_xml = content.encode('utf-8')
ns = {
'puzzle': 'http://crossword.info/xml/rectangular-puzzle'
}
try:
root = etree.fromstring(content_xml)
except Exception as e:
error('Exception %s' % e)
error(content)
exit
# init crossword
grid = root.xpath('//puzzle:crossword/puzzle:grid', namespaces=ns)
if not grid:
return None
grid = grid[0]
rows = int(grid.attrib['height'])
cols = int(grid.attrib['width'])
xd = xdfile.xdfile('', filename)
# add metadata
for metadata in root.xpath('//puzzle:metadata', namespaces=ns)[0]:
text = metadata.text and metadata.text.strip()
title = re.sub('\{[^\}]*\}', '', metadata.tag.title())
title = escape(title, rev_xml_escape_table)
if text:
text = escape(text, rev_xml_escape_table)
xd.set_header(HEADER_RENAMES.get(title, title), text)
# add puzzle
puzzle = []
for i in range(rows):
puzzle.append([" "] * cols)
for cell in grid.xpath('./puzzle:cell', namespaces=ns):
x = int(cell.attrib['x']) - 1
y = int(cell.attrib['y']) - 1
if 'solution' in cell.attrib:
value = cell.attrib['solution']
if 'type' in cell.attrib and cell.attrib['type'] == 'block':
value = xdfile.BLOCK_CHAR
puzzle[y][x] = value
xd.grid = ["".join(row) for row in puzzle]
# add clues
word_map = {}
for word in root.xpath('//puzzle:crossword/puzzle:word', namespaces=ns):
word_map[word.attrib['id']] = (word.attrib['x'], word.attrib['y'])
for clues in root.xpath('//puzzle:crossword/puzzle:clues', namespaces=ns):
type = clues.xpath('./puzzle:title', namespaces=ns)[0]
type = "".join(chr(x) for x in etree.tostring(type, method='text').upper() if chr(x) in string.ascii_uppercase)
type = type[0]
for clue in clues.xpath('./puzzle:clue', namespaces=ns):
word_id = clue.attrib['word']
number = int(clue.attrib['number'])
text = "|".join(clue.itertext()).strip()
text = escape(text, rev_xml_escape_table)
solution = get_solution(word_id, word_map, puzzle)
xd.clues.append(((type, number), text, solution))
return xd
def get_solution(word_id, word_map, puzzle):
def get_numbers_in_range(range_as_string, separator):
start, end = (int(num) for num in range_as_string.split(separator))
# reduce 1 to stick to a 0-based index list
start = start - 1
end = end - 1
return list(range(start, end + 1))
x, y = word_map[word_id]
word = ''
if '-' in x:
word = (puzzle[int(y) - 1][i] for i in get_numbers_in_range(x, '-'))
elif '-' in y:
word = (puzzle[i][int(x) - 1] for i in get_numbers_in_range(y, '-'))
else:
word = (puzzle[int(x) - 1][int(y) - 1])
return ''.join(word)
if __name__ == "__main__":
xdfile.main_parse(parse_ccxml)
|
scale/util/exceptions.py | kaydoh/scale | 121 | 12613672 | """Defines utility exceptions"""
from util.validation import ValidationError
class FileDoesNotExist(Exception):
"""Exception indicating an attempt was made to access a file that no longer exists
"""
pass
class InvalidBrokerUrl(Exception):
"""Exception indicating the broker URL does not meet the format requirements"""
pass
class ServiceAccountAuthFailure(Exception):
"""Exception indicating failure of request to login or communicate with DCOS using service account"""
pass
class InvalidAWSCredentials(Exception):
"""Exception indicating missing credentials required to successfully authenticate to AWS"""
pass
class RollbackTransaction(Exception):
"""Exception that can be thrown and swallowed to explicitly rollback a transaction"""
pass
class ScaleLogicBug(Exception):
"""Exception that indicates a critical Scale logic bug has occurred"""
pass
class TerminatedCommand(Exception):
"""Exception that can be thrown to indicate that a Scale command recieved a SIGTERM signal"""
pass
class UnbalancedBrackets(Exception):
"""Exception thrown when a string is provided that contains unbalanced curly brackets"""
pass
class ValidationException(Exception):
"""Exception indicating there was a validation error
"""
def __init__(self, name, description):
"""Constructor
:param name: The name of the validation error
:type name: string
:param description: The description of the validation error
:type description: string
"""
super(ValidationException, self).__init__(description)
self.error = ValidationError(name, description)
|
tests/filters/test_base_filter.py | mokeyish/thumbor | 6,837 | 12613684 | <reponame>mokeyish/thumbor
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com <EMAIL>
from preggy import expect
from tornado.testing import gen_test
import thumbor.filters
from tests.base import TestCase
from thumbor.config import Config
from thumbor.context import Context
from thumbor.filters import BaseFilter, FiltersFactory, filter_method
from thumbor.importer import Importer
FILTER_PARAMS_DATA = [
{
"type": BaseFilter.Number,
"values": [
("1", 1),
("10", 10),
("99", 99),
("-1", -1),
("-10", -10),
("010", 10),
(" 1 ", 1),
("0", 0),
],
"invalid_values": ["x", "x10", "10x", "- 1", ""],
},
{
"type": BaseFilter.PositiveNumber,
"values": [
("1", 1),
("10", 10),
("99", 99),
(" 1 ", 1),
("010", 10),
("0", 0),
],
"invalid_values": ["-1", "x", "x10", "10x", ""],
},
{
"type": BaseFilter.PositiveNonZeroNumber,
"values": [("1", 1), ("10", 10), ("99", 99), (" 1 ", 1), ("010", 10)],
"invalid_values": ["-1", "x", "x10", "10x", "0", ""],
},
{
"type": BaseFilter.NegativeNumber,
"values": [("-1", -1), ("-10", -10), (" -9 ", -9), ("-0", 0)],
"invalid_values": ["x", "x10", "10x", "- 1", ""],
},
{
"type": BaseFilter.DecimalNumber,
"values": [
("1", 1.0),
("10", 10.0),
("99", 99.0),
("-1", -1.0),
("-10", -10.0),
("010", 10.0),
(" 1 ", 1.0),
("1.0", 1.0),
("10.12", 10.12),
("9.9", 9.9),
("-1.1", -1.1),
(" -10.2 ", -10.2),
(" 1 ", 1.0),
(".11", 0.11),
("0.111", 0.111),
("0", 0.0),
],
"invalid_values": ["x", "x10", "10x", "- 1.1", "", "."],
},
{
"type": BaseFilter.String,
"values": [
("a", "a"),
("bbbb", "bbbb"),
(" cccc ", "cccc"),
(" cc:cc ", "cc:cc"),
("'a,b'", "a,b"),
],
"invalid_values": ["", ",", ",,,,"],
},
{
"type": BaseFilter.Boolean,
"values": [
("1", True),
("True", True),
("true", True),
("0", False),
("False", False),
("false", False),
(" True ", True),
],
"invalid_values": ["", "x", "TRUE", "111"],
},
{
"type": r"\dx\d",
"values": [("1x1", "1x1"), (" 9x9 ", "9x9")],
"invalid_values": ["a", ",", "9 x 9"],
},
]
class FilterParamsTestCase(TestCase):
def test_with_valid_values_should_correctly_parse_value(self):
for params in FILTER_PARAMS_DATA:
for test_data, expected_data in params["values"]:
BaseFilter.compile_regex({"name": "x", "params": [params["type"]]})
filter_instance = BaseFilter("x(%s)" % test_data)
expect(filter_instance.params[0]).to_equal(expected_data)
def test_with_invalid_values_should_correctly_parse_value(self):
for params in FILTER_PARAMS_DATA:
for test_data in params["invalid_values"]:
BaseFilter.compile_regex({"name": "x", "params": [params["type"]]})
filter_instance = BaseFilter("x(%s)" % test_data)
expect(filter_instance.params).to_be_null()
class MyFilter(BaseFilter):
@filter_method(BaseFilter.Number, BaseFilter.DecimalNumber)
async def my_filter(self, value1, value2):
return (value1, value2)
class StringFilter(BaseFilter):
@filter_method(BaseFilter.String)
async def my_string_filter(self, value):
return value
class EmptyFilter(BaseFilter):
@filter_method()
async def my_empty_filter(self):
return "ok"
class InvalidFilter(BaseFilter):
async def my_invalid_filter(self, value):
return value
class DoubleStringFilter(BaseFilter):
@filter_method(BaseFilter.String, BaseFilter.String)
async def my_string_filter(self, value1, value2):
return (value1, value2)
class OptionalParamFilter(BaseFilter):
@filter_method(BaseFilter.String, BaseFilter.String)
async def my_optional_filter(self, value1, value2="not provided"):
return (value1, value2)
class PreLoadFilter(BaseFilter):
phase = thumbor.filters.PHASE_PRE_LOAD
@filter_method(BaseFilter.String)
async def my_pre_load_filter(self, value):
return value
class BaseFilterTestCase(TestCase):
def setUp(self):
super().setUp()
self.context = self.get_context()
self.factory = FiltersFactory(
[MyFilter, StringFilter, OptionalParamFilter, PreLoadFilter]
)
self.runner = self.get_runner()
def get_runner(self):
return None
def get_context(self):
def is_multiple():
return False
cfg = Config()
importer = Importer(cfg)
importer.import_modules()
context = Context(config=cfg, importer=importer)
context.modules.engine.is_multiple = is_multiple
return context
class RunnerWithParametersFilterTestCase(BaseFilterTestCase):
def get_runner(self):
return self.factory.create_instances(
self.context,
"my_string_filter(aaaa):my_string_filter(bbb):my_pre_load_filter(ccc)",
)
def test_runner_with_parameters_should_create_two_instances(self):
post_instances = self.runner.filter_instances[
thumbor.filters.PHASE_POST_TRANSFORM
]
pre_instances = self.runner.filter_instances[thumbor.filters.PHASE_PRE_LOAD]
expect(len(post_instances)).to_equal(2)
expect(post_instances[0].__class__).to_equal(StringFilter)
expect(post_instances[1].__class__).to_equal(StringFilter)
expect(len(pre_instances)).to_equal(1)
expect(pre_instances[0].__class__).to_equal(PreLoadFilter)
@gen_test
async def test_running_post_filters_should_run_only_post_filters(self):
await self.runner.apply_filters(thumbor.filters.PHASE_POST_TRANSFORM)
post_instances = self.runner.filter_instances[
thumbor.filters.PHASE_POST_TRANSFORM
]
pre_instances = self.runner.filter_instances[thumbor.filters.PHASE_PRE_LOAD]
expect(len(post_instances)).to_equal(0)
expect(len(pre_instances)).to_equal(1)
@gen_test
async def test_running_pre_filters_should_run_only_pre_filters(self):
await self.runner.apply_filters(thumbor.filters.PHASE_POST_TRANSFORM)
await self.runner.apply_filters(thumbor.filters.PHASE_PRE_LOAD)
post_instances = self.runner.filter_instances[
thumbor.filters.PHASE_POST_TRANSFORM
]
pre_instances = self.runner.filter_instances[thumbor.filters.PHASE_PRE_LOAD]
expect(len(post_instances)).to_equal(0)
expect(len(pre_instances)).to_equal(0)
def test_invalid_filter(self):
InvalidFilter.pre_compile()
expect(hasattr(InvalidFilter, "runnable_method")).to_be_false()
def test_valid_filter_creates_a_runnable_method(self):
MyFilter.pre_compile()
expect(MyFilter.runnable_method).to_equal(MyFilter.my_filter)
@gen_test
async def test_valid_filter_sets_correct_result_value(self):
filter_instance = MyFilter("my_filter(1, -1.1)")
result = await filter_instance.run()
expect(result).to_equal([(1, -1.1)])
@gen_test
async def test_invalid_number_throws_an_error(self):
filter_instance = MyFilter("my_invalid_filter(x, 1)")
result = await filter_instance.run()
expect(hasattr(result, "result")).to_be_false()
@gen_test
async def test_double_string_filter_sets_correct_values(self):
DoubleStringFilter.pre_compile()
filter_instance = DoubleStringFilter("my_string_filter(a, b)")
result = await filter_instance.run()
expect(result).to_equal([("a", "b")])
@gen_test
async def test_with_strings_with_commas_sets_correct_values(self):
DoubleStringFilter.pre_compile()
tests = [
("my_string_filter(a,'b, c')", [("a", "b, c")]),
("my_string_filter('a,b', c)", [("a,b", "c")]),
("my_string_filter('ab', c)", [("ab", "c")]),
("my_string_filter('ab,', c)", [("ab,", "c")]),
("my_string_filter('ab,', ',c')", [("ab,", ",c")]),
("my_string_filter('ab, c)", [("'ab", "c")]),
("my_string_filter('ab, c',d)", [("ab, c", "d")]),
("my_string_filter('a,b, c)", None),
("my_string_filter('a,b, c')", None),
]
for test, expected in tests:
filter_instance = DoubleStringFilter(test)
result = await filter_instance.run()
expect(result).to_equal(expected)
@gen_test
async def test_with_empty_filter_should_call_filter(self):
EmptyFilter.pre_compile()
filter_instance = EmptyFilter("my_empty_filter()")
result = await filter_instance.run()
expect(result).to_equal(["ok"])
class WithOneValidParamFilterTestCase(BaseFilterTestCase):
def get_runner(self):
return self.factory.create_instances(
self.context, "my_filter(1, 0a):my_string_filter(aaaa)"
)
def test_should_create_one_instance(self):
instances = self.runner.filter_instances[thumbor.filters.PHASE_POST_TRANSFORM]
expect(len(instances)).to_equal(1)
expect(instances[0].__class__).to_equal(StringFilter)
class WithParameterContainingColonsFilterTestCase(BaseFilterTestCase):
def get_runner(self):
return self.factory.create_instances(
self.context, "my_string_filter(aaaa):my_string_filter(aa:aa)"
)
def test_should_create_two_instances(self):
instances = self.runner.filter_instances[thumbor.filters.PHASE_POST_TRANSFORM]
expect(len(instances)).to_equal(2)
expect(instances[0].__class__).to_equal(StringFilter)
expect(instances[1].__class__).to_equal(StringFilter)
def test_should_understant_parameters(self):
instances = self.runner.filter_instances[thumbor.filters.PHASE_POST_TRANSFORM]
expect(instances[0].params).to_equal(["aaaa"])
expect(instances[1].params).to_equal(["aa:aa"])
class WithValidParamsFilterTestCase(BaseFilterTestCase):
def get_runner(self):
return self.factory.create_instances(
self.context, "my_filter(1, 0):my_string_filter(aaaa)"
)
def test_should_create_two_instances(self):
instances = self.runner.filter_instances[thumbor.filters.PHASE_POST_TRANSFORM]
expect(len(instances)).to_equal(2)
expect(instances[0].__class__).to_equal(MyFilter)
expect(instances[1].__class__).to_equal(StringFilter)
@gen_test
async def test_when_running_should_create_two_instances(self):
result = []
instances = self.runner.filter_instances[thumbor.filters.PHASE_POST_TRANSFORM]
for instance in instances:
result.append(await instance.run())
expect(result[0]).to_equal([(1, 0.0)])
expect(result[1]).to_equal(["aaaa"])
class WithOptionalParamFilterTestCase(BaseFilterTestCase):
def get_runner(self):
return self.factory.create_instances(self.context, "my_optional_filter(aa, bb)")
def test_should_create_two_instances(self):
instances = self.runner.filter_instances[thumbor.filters.PHASE_POST_TRANSFORM]
expect(len(instances)).to_equal(1)
expect(instances[0].__class__).to_equal(OptionalParamFilter)
@gen_test
async def test_should_understand_parameters(self):
instances = self.runner.filter_instances[thumbor.filters.PHASE_POST_TRANSFORM]
expect(await instances[0].run()).to_equal([("aa", "bb")])
class WithOptionalParamsInOptionalFilterTestCase(BaseFilterTestCase):
def get_runner(self):
return self.factory.create_instances(self.context, "my_optional_filter(aa)")
def test_should_create_two_instances(self):
instances = self.runner.filter_instances[thumbor.filters.PHASE_POST_TRANSFORM]
expect(len(instances)).to_equal(1)
expect(instances[0].__class__).to_equal(OptionalParamFilter)
@gen_test
async def test_should_understand_parameters(self):
instances = self.runner.filter_instances[thumbor.filters.PHASE_POST_TRANSFORM]
result = await instances[0].run()
expect(result).to_equal([("aa", "not provided")])
class WithInvalidOptionalFilterTestCase(BaseFilterTestCase):
def get_runner(self):
return self.factory.create_instances(self.context, "my_optional_filter()")
def test_should_create_two_instances(self):
instances = self.runner.filter_instances[thumbor.filters.PHASE_POST_TRANSFORM]
expect(len(instances)).to_equal(0)
class WithPreLoadFilterTestCase(BaseFilterTestCase):
def get_runner(self):
return self.factory.create_instances(self.context, "my_pre_load_filter(aaaa)")
def should_create_two_instances(self):
instances = self.runner.filter_instances[thumbor.filters.PHASE_PRE_LOAD]
expect(len(instances)).to_equal(1)
expect(instances[0].__class__).to_equal(PreLoadFilter)
def should_understant_parameters(self):
instances = self.runner.filter_instances[thumbor.filters.PHASE_PRE_LOAD]
expect(instances[0].params).to_equal(["aaaa"])
|
modelci/utils/docker_api_utils.py | FerdinandZhong/ML-Model-CI | 170 | 12613686 | <reponame>FerdinandZhong/ML-Model-CI
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Author: <NAME>
Email: <EMAIL>
Date: 10/3/2020
Docker Container API utilization.
"""
from docker.errors import ImageNotFound
def check_container_status(docker_client, name):
"""Check an existed container running status and health.
Args:
docker_client (docker.client.DockerClient):
name (str): Name of the container.
Returns:
"""
state = docker_client.containers.get(name).attrs.get('State')
return state is not None and state.get('Status') == 'running'
def list_containers(docker_client, filters):
return docker_client.containers.list(all=True, filters=filters)
def get_image(docker_client, name, logger):
"""Get Docker image.
Args:
docker_client (docker.client.DockerClient): Docker client instance.
name (str): Image name.
logger (modelci.utils.Logger): logger instance.
Returns:
docker.models.images.Image: Docker image.
"""
try:
image = docker_client.images.get(name)
except ImageNotFound:
logger.info(f'pulling {name}...')
image = docker_client.images.pull(name)
return image
|
ddn/pytorch/node.py | pmorerio/ddn | 161 | 12613703 | <reponame>pmorerio/ddn
# DEEP DECLARATIVE NODES
# Defines the PyTorch interface for data processing nodes and declarative nodes
#
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
import torch
from torch.autograd import grad
import warnings
class AbstractNode:
"""Minimal interface for generic data processing node
that produces an output vector given an input vector.
"""
def __init__(self):
"""Create a node"""
self.b = None
self.m = None
self.n = None
def solve(self, *xs):
"""Computes the output of the node given the inputs.
The second returned object provides context for computing the gradient
if necessary. Otherwise it is None.
"""
raise NotImplementedError()
return None, None
def gradient(self, *xs, y=None, v=None, ctx=None):
"""Computes the vector--Jacobian product of the node for given inputs xs
and, optional, output y, gradient vector v and context cxt.
If y or ctx is not provided then they are recomputed from x as needed.
Implementation must return a tuple.
"""
raise NotImplementedError()
return None
def _expand_as_batch(self, x):
"""Helper function to replicate tensor along a new batch dimension
without allocating new memory.
Arguments:
x: (...) Torch tensor,
input tensor
Return Values:
batched tensor: (b, ...) Torch tensor
"""
return x.expand(self.b, *x.size())
class AbstractDeclarativeNode(AbstractNode):
"""A general deep declarative node defined by unconstrained parameterized
optimization problems of the form
minimize (over y) f(x, y)
where x is given (as a vector) and f is a scalar-valued function.
Derived classes must implement the `objective` and `solve` functions.
"""
def __init__(self, eps=1e-12, gamma=None, chunk_size=None):
"""Create a declarative node
"""
super().__init__()
self.eps = eps # tolerance to check if optimality conditions satisfied
self.gamma = gamma # damping factor: H <-- H + gamma * I
self.chunk_size = chunk_size # input is divided into chunks of at most chunk_size (None = infinity)
def objective(self, *xs, y):
"""Evaluates the objective function on a given input-output pair.
Multiple input tensors can be passed as arguments, but the final
argument must be the output tensor.
"""
warnings.warn("objective function not implemented")
return None
def solve(self, *xs):
"""Solves the optimization problem
y in argmin_u f(x, u)
and returns two outputs. The first is the optimal solution y and the
second contains the context for computing the gradient, such as the
Lagrange multipliers in the case of a constrained problem, or None
if no context is available/needed.
Multiple input tensors can be passed as arguments.
"""
raise NotImplementedError()
# Todo: LBFGS fall-back solver
return None, None
def gradient(self, *xs, y=None, v=None, ctx=None):
"""Computes the vector--Jacobian product, that is, the gradient of the
loss function with respect to the problem parameters. The returned
gradient is a tuple of batched Torch tensors. Can be overridden by the
derived class to provide a more efficient implementation.
Arguments:
xs: ((b, ...), ...) tuple of Torch tensors,
tuple of batches of input tensors
y: (b, ...) Torch tensor or None,
batch of minima of the objective function
v: (b, ...) Torch tensor or None,
batch of gradients of the loss function with respect to the
problem output J_Y(x,y)
ctx: dictionary of contextual information used for computing the
gradient
Return Values:
gradients: ((b, ...), ...) tuple of Torch tensors or Nones,
batch of gradients of the loss function with respect to the
problem parameters;
strictly, returns the vector--Jacobian products J_Y(x,y) * y'(x)
"""
xs, xs_split, xs_sizes, y, v, ctx = self._gradient_init(xs, y, v, ctx)
fY, fYY, fXY = self._get_objective_derivatives(xs, y)
if not self._check_optimality_cond(fY):
warnings.warn(
"Non-zero objective function gradient at y:\n{}".format(
fY.detach().squeeze().cpu().numpy()))
# Form H:
H = fYY
H = 0.5 * (H + H.transpose(1, 2)) # Ensure that H is symmetric
if self.gamma is not None:
H += self.gamma * torch.eye(
self.m, dtype=H.dtype, device=H.device).unsqueeze(0)
# Solve u = -H^-1 v:
v = v.reshape(self.b, -1, 1)
u = self._solve_linear_system(H, -1.0 * v) # bxmx1
u = u.squeeze(-1) # bxm
# ToDo: check for NaN values in u
# Compute -b_i^T H^-1 v (== b_i^T u) for all i:
gradients = []
for x_split, x_size, n in zip(xs_split, xs_sizes, self.n):
if isinstance(x_split[0], torch.Tensor) and x_split[0].requires_grad:
gradient = []
for Bi in fXY(x_split):
gradient.append(torch.einsum('bmc,bm->bc', (Bi, u)))
gradient = torch.cat(gradient, dim=-1) # bxn
gradients.append(gradient.reshape(x_size))
else:
gradients.append(None)
return tuple(gradients)
def jacobian(self, *xs, y=None, ctx=None):
"""Computes the Jacobian, that is, the derivative of the output with
respect to the problem parameters. The returned Jacobian is a tuple of
batched Torch tensors. Can be overridden by the derived class to provide
a more efficient implementation.
Note: this function is highly inefficient so should be used for learning
purposes only (computes the vector--Jacobian product multiple times).
Arguments:
xs: ((b, ...), ...) tuple of Torch tensors,
tuple of batches of input tensors
y: (b, ...) Torch tensor or None,
batch of minima of the objective function
ctx: dictionary of contextual information used for computing the
gradient
Return Values:
jacobians: ((b, ...), ...) tuple of Torch tensors or Nones,
batch of Jacobians of the loss function with respect to the
problem parameters
"""
v = torch.zeros_like(y) # v: bxm1xm2x...
b = v.size(0)
v = v.reshape(b, -1) # v: bxm
m = v.size(-1)
jacobians = [[] for x in xs]
for i in range(m):
v[:, i] = 1.0
gradients = self.gradient(*xs, y=y, v=v.reshape_as(y), ctx=ctx)
v[:, i] = 0.0
for j in range(len(xs)):
jacobians[j].append(gradients[j])
jacobians = [torch.stack(jacobian, dim=1).reshape(
y.shape + xs[i].shape[1:]) if (jacobian[0] is not None
) else None for i, jacobian in enumerate(jacobians)]
return tuple(jacobians)
def _gradient_init(self, xs, y, v, ctx):
# Compute optimal value if have not already done so:
if y is None:
y, ctx = torch.no_grad()(self.solve)(*xs)
y.requires_grad = True
# Set incoming gradient v = J_Y(x,y) to one if not specified:
if v is None:
v = torch.ones_like(y)
self.b = y.size(0)
self.m = y.reshape(self.b, -1).size(-1)
# Split each input x into a tuple of n//chunk_size tensors of size (b, chunk_size):
# Required since gradients can only be computed wrt individual
# tensors, not slices of a tensor. See:
# https://discuss.pytorch.org/t/how-to-calculate-gradients-wrt-one-of-inputs/24407
xs_split, xs_sizes, self.n = self._split_inputs(xs)
xs = self._cat_inputs(xs_split, xs_sizes)
return xs, xs_split, xs_sizes, y, v, ctx
@torch.enable_grad()
def _split_inputs(self, xs):
"""Split inputs into a sequence of tensors by input dimension
For each input x in xs, generates a tuple of n//chunk_size tensors of size (b, chunk_size)
"""
xs_split, xs_sizes, xs_n = [], [], []
for x in xs: # Loop over input tuple
if isinstance(x, torch.Tensor) and x.requires_grad:
if self.chunk_size is None:
xs_split.append((x.reshape(self.b, -1),))
else:
xs_split.append(x.reshape(self.b, -1).split(self.chunk_size, dim=-1))
xs_sizes.append(x.size())
xs_n.append(x.reshape(self.b, -1).size(-1))
else:
xs_split.append((x,))
xs_sizes.append(None) # May not be a tensor
xs_n.append(None)
return tuple(xs_split), tuple(xs_sizes), tuple(xs_n)
@torch.enable_grad()
def _cat_inputs(self, xs_split, xs_sizes):
"""Concatenate inputs from a sequence of tensors
"""
xs = []
for x_split, x_size in zip(xs_split, xs_sizes): # Loop over input tuple
if x_size is None:
xs.append(x_split[0])
else:
xs.append(torch.cat(x_split, dim=-1).reshape(x_size))
return tuple(xs)
def _get_objective_derivatives(self, xs, y):
# Evaluate objective function at (xs,y):
f = torch.enable_grad()(self.objective)(*xs, y=y) # b
# Compute partial derivative of f wrt y at (xs,y):
fY = grad(f, y, grad_outputs=torch.ones_like(f), create_graph=True)[0]
fY = torch.enable_grad()(fY.reshape)(self.b, -1) # bxm
if not fY.requires_grad: # if fY is independent of y
fY.requires_grad = True
# Compute second-order partial derivative of f wrt y at (xs,y):
fYY = self._batch_jacobian(fY, y) # bxmxm
fYY = fYY.detach() if fYY is not None else y.new_zeros(
self.b, self.m, self.m)
# Create function that returns generator expression for fXY given input:
fXY = lambda x: (fXiY.detach()
if fXiY is not None else torch.zeros_like(fY).unsqueeze(-1)
for fXiY in (self._batch_jacobian(fY, xi) for xi in x))
return fY, fYY, fXY
def _check_optimality_cond(self, fY):
"""Checks that the problem's 1st-order optimality condition is satisfied
"""
return torch.allclose(fY, torch.zeros_like(fY), rtol=0.0, atol=self.eps)
def _solve_linear_system(self, A, B):
"""Solves linear system AX = B.
If B is a tuple (B1, B2, ...), returns tuple (X1, X2, ...).
Otherwise returns X.
"""
B_sizes = None
# If B is a tuple, concatenate into single tensor:
if isinstance(B, (tuple, list)):
B_sizes = list(map(lambda x: x.size(-1), B))
B = torch.cat(B, dim=-1)
# Ensure B is 2D (bxmxn):
if len(B.size()) == 2:
B = B.unsqueeze(-1)
try: # Batchwise Cholesky solve
A_decomp = torch.cholesky(A, upper=False)
X = torch.cholesky_solve(B, A_decomp, upper=False) # bxmxn
except: # Revert to loop if batchwise solve fails
X = torch.zeros_like(B)
for i in range(A.size(0)):
try: # Cholesky solve
A_decomp = torch.cholesky(A[i, ...], upper=False)
X[i, ...] = torch.cholesky_solve(B[i, ...], A_decomp,
upper=False) # mxn
except: # Revert to LU solve
X[i, ...], _ = torch.solve(B[i, ...], A[i, ...]) # mxn
if B_sizes is not None:
X = X.split(B_sizes, dim=-1)
return X
@torch.enable_grad()
def _batch_jacobian(self, y, x, create_graph=False):
"""Compute Jacobian of y with respect to x and reduce over batch
dimension.
Arguments:
y: (b, m1, m2, ...) Torch tensor,
batch of output tensors
x: (b, n1, n2, ...) Torch tensor,
batch of input tensors
create_graph: Boolean
if True, graph of the derivative will be constructed,
allowing the computation of higher order derivative products
Return Values:
jacobian: (b, m, n) Torch tensor,
batch of Jacobian matrices, collecting the partial derivatives
of y with respect to x
m = product(m_i)
n = product(n_i)
Assumption:
If x is not in graph for y[:, 0], then x is not in the graph for
y[:, i], for all i
"""
y = y.reshape(self.b, -1) # bxm
m = y.size(-1)
n = x.reshape(self.b, -1).size(-1)
jacobian = y.new_zeros(self.b, m, n) # bxmxn
for i in range(m):
grad_outputs = torch.zeros_like(y, requires_grad=False) # bxm
grad_outputs[:, i] = 1.0
yiX, = grad(y, x, grad_outputs=grad_outputs, retain_graph=True,
create_graph=create_graph, allow_unused=True) # bxn1xn2x...
if yiX is None: # grad returns None instead of zero
return None # If any are None, all are None
jacobian[:, i:(i+1), :] = yiX.reshape(self.b, -1).unsqueeze(1) # bx1xn
return jacobian # bxmxn
class EqConstDeclarativeNode(AbstractDeclarativeNode):
"""A general deep declarative node defined by a parameterized optimization
problem with at least one (non-linear) equality constraint of the form
minimize (over y) f(x, y)
subject to h_i(x, y) = 0
where x is given (as a vector) and f and h_i are scalar-valued functions.
Derived classes must implement the `objective`, `equality_constraints` and
`solve` functions.
"""
def __init__(self, eps=1e-12, gamma=None, chunk_size=None):
"""Create an equality constrained declarative node
"""
super().__init__(eps=eps, gamma=gamma, chunk_size=None)
def equality_constraints(self, *xs, y):
"""Evaluates the equality constraint functions on a given input-output
pair. Multiple input tensors can be passed as arguments, but the final
argument must be the output tensor.
"""
warnings.warn("equality constraint function not implemented")
return None
def solve(self, *xs):
"""Solves the optimization problem
y in argmin_u f(x, u) subject to h_i(x, u) = 0
and returns the vector y. Optionally, also returns the Lagrange
multipliers associated with the equality constraints where the
Lagrangian is defined as
L(x, y, nu) = f(x, y) - sum_i ctx['nu'][i] * h_i(x, y)
Otherwise, should return None as second return variable.
If the calling function only cares about the optimal solution
(and not the context) then call as
y_star, _ = self.solve(x)
Multiple input tensors can be passed as arguments.
"""
raise NotImplementedError()
return None, None
def gradient(self, *xs, y=None, v=None, ctx=None):
"""Computes the vector--Jacobian product, that is, the gradient of the
loss function with respect to the problem parameters. The returned
gradient is a tuple of batched Torch tensors. Can be overridden by the
derived class to provide a more efficient implementation.
Arguments:
xs: ((b, ...), ...) tuple of Torch tensors,
tuple of batches of input tensors
y: (b, ...) Torch tensor or None,
batch of minima of the objective function
v: (b, ...) Torch tensor or None,
batch of gradients of the loss function with respect to the
problem output J_Y(x,y)
ctx: dictionary of contextual information used for computing the
gradient
Return Values:
gradients: ((b, ...), ...) tuple of Torch tensors or Nones,
batch of gradients of the loss function with respect to the
problem parameters;
strictly, returns the vector--Jacobian products J_Y(x,y) * y'(x)
"""
xs, xs_split, xs_sizes, y, v, ctx = self._gradient_init(xs, y, v, ctx)
fY, fYY, fXY = self._get_objective_derivatives(xs, y)
hY, hYY, hXY, hX = self._get_constraint_derivatives(xs, y)
nu = self._get_nu(fY, hY) if (ctx is None or 'nu' not in ctx
) else self._ensure2d(ctx['nu'])
if not self._check_optimality_cond(fY, hY, nu):
warnings.warn("Non-zero Lagrangian gradient at y:\n{}\n"
"fY: {}, hY: {}, nu: {}".format((fY - torch.einsum('ab,abc->ac',
(nu, hY))).detach().squeeze().cpu().numpy(),
fY.detach().squeeze().cpu().numpy(),
hY.detach().squeeze().cpu().numpy(),
nu.detach().squeeze().cpu().numpy()))
# Form H:
H = fYY - sum(torch.einsum('b,bmn->bmn', (nu[:, i], hiYY))
for i, hiYY in enumerate(hYY))
H = 0.5 * (H + H.transpose(1, 2)) # Ensure that H is symmetric
if self.gamma is not None:
H += self.gamma * torch.eye(
self.m, dtype=H.dtype, device=H.device).unsqueeze(0)
# Solve u = -H^-1 v (bxm) and t = H^-1 A^T (bxmxp):
A = hY.detach() # Shares storage with hY
v = v.reshape(self.b, -1, 1) # bxmx1
u, t = self._solve_linear_system(H, (-1.0 * v, A.transpose(-2, -1)))
u = u.squeeze(-1) # bxm
# ToDo: check for NaN values in u and t
# Solve s = (A H^-1 A^T)^-1 A H^-1 v = -(A t)^-1 A u:
s = self._solve_linear_system(torch.einsum('bpm,bmq->bpq', (A, t)),
torch.einsum('bpm,bm->bp', (A, -1.0 * u))) # bxpx1
s = s.squeeze(-1) # bxp
# ToDo: check for NaN values in s
# Compute u + ts:
uts = u + torch.einsum('bmp,bp->bm', (t, s)) # bxm
# Compute Bi^T (u + ts) - Ci^T s for all i:
gradients = []
for x_split, x_size, n in zip(xs_split, xs_sizes, self.n):
if isinstance(x_split[0],torch.Tensor) and x_split[0].requires_grad:
gradient = []
for i, Bi in enumerate(fXY(x_split)):
Bi -= sum(torch.einsum('b,bmc->bmc', (nu[:, j], hjXiY))
for j, hjXiY in enumerate(hXY(x_split[i])))
g = torch.einsum('bmc,bm->bc', (Bi, uts))
Ci = hX(x_split[i])
if Ci is not None:
g -= torch.einsum('bpc,bp->bc', (Ci, s))
gradient.append(g)
gradient = torch.cat(gradient, dim=-1) # bxn
gradients.append(gradient.reshape(x_size))
else:
gradients.append(None)
return tuple(gradients)
def _get_constraint_derivatives(self, xs, y):
# Evaluate constraint function(s) at (xs,y):
h = torch.enable_grad()(self._get_constraint_set)(xs, y) # bxp
# Compute partial derivative of h wrt y at (xs,y):
hY = self._batch_jacobian(h, y, create_graph=True) # bxpxm
if not hY.requires_grad: # if hY is independent of y
hY.requires_grad = True
# Compute 2nd-order partial derivative of h wrt y at (xs,y):
p = h.size(-1)
hYY = (hiYY.detach() for hiYY in (
self._batch_jacobian(torch.enable_grad()(hY.select)(1, i), y)
for i in range(p)
) if hiYY is not None)
# Compute 2nd-order partial derivative of hj wrt y and xi at (xs,y):
hXY = lambda x: (hiXY.detach() for hiXY in (
self._batch_jacobian(torch.enable_grad()(hY.select)(1, i), x)
for i in range(p)
) if hiXY is not None)
# Compute partial derivative of h wrt xi at (xs,y):
def hX(x):
hXi = self._batch_jacobian(h, x, create_graph=False)
return None if hXi is None else hXi.detach()
return hY, hYY, hXY, hX
def _get_constraint_set(self, xs, y):
"""Filters constraints.
"""
# ToDo: remove duplicate constraints (first-order identical)
h = self.equality_constraints(*xs, y=y)
if h is not None:
h = self._ensure2d(h)
if not self._check_equality_constraints(h):
warnings.warn("Constraints not satisfied exactly:\n{}".format(
h.detach().squeeze().cpu().numpy()))
return h
def _get_nu(self, fY, hY):
"""Compute nu (ie lambda) if not provided by the problem's solver.
That is, solve: hY^T nu = fY^T.
"""
p = hY.size(1)
nu = fY.new_zeros(self.b, p)
for i in range(self.b): # loop over batch
solution,_ = torch.lstsq(fY[i, :].unsqueeze(-1), hY[i, :, :].t())
nu[i, :] = solution[:p, :].squeeze() # extract first p values
return nu
def _check_equality_constraints(self, h):
"""Check that the problem's constraints are satisfied.
"""
return torch.allclose(h, torch.zeros_like(h), rtol=0.0, atol=self.eps)
def _check_optimality_cond(self, fY, hY=None, nu=None):
"""Checks that the problem's first-order optimality condition is
satisfied.
"""
if hY is None:
return super()._check_optimality_cond(fY)
nu = self._get_nu(fY, hY) if (nu is None) else nu
# Check for invalid Lagrangian (gradient of constraint zero at optimum)
if torch.allclose(hY, torch.zeros_like(hY), rtol=0.0, atol=self.eps):
warnings.warn(
"Gradient of constraint function vanishes at the optimum.")
return True
LY = fY - torch.einsum('ab,abc->ac', (nu, hY)) # bxm - bxp * bxpxm
return torch.allclose(LY, torch.zeros_like(fY), rtol=0.0, atol=self.eps)
def _ensure2d(self, x):
return x.unsqueeze(-1) if len(x.size()) == 1 else x
class IneqConstDeclarativeNode(EqConstDeclarativeNode):
"""A general deep declarative node defined by a parameterized optimization
problem with at least one (non-linear) inequality constraint of the form
minimize (over y) f(x, y)
subject to h_i(x, y) == 0
g_i(x, y) <= 0
where x is given (as a vector) and f, h_i and g_i are scalar-valued
functions. Derived classes must implement the `objective`,
`inequality_constraints` and `solve` functions.
"""
def __init__(self, eps=1e-12, gamma=None, chunk_size=None):
"""Create an inequality constrained declarative node
"""
super().__init__(eps=eps, gamma=gamma, chunk_size=None)
def equality_constraints(self, *xs, y):
"""Evaluates the equality constraint functions on a given input-output
pair. Multiple input tensors can be passed as arguments, but the final
argument must be the output tensor.
"""
return None
def inequality_constraints(self, *xs, y):
"""Evaluates the inequality constraint functions on a given input-output
pair. Multiple input tensors can be passed as arguments, but the final
argument must be the output tensor.
"""
warnings.warn("inequality constraint function not implemented")
return None
def gradient(self, *xs, y=None, v=None, ctx=None):
"""Computes the vector--Jacobian product, that is, the gradient of the
loss function with respect to the problem parameters. The returned
gradient is a tuple of batched Torch tensors. Can be overridden by the
derived class to provide a more efficient implementation.
Arguments:
xs: ((b, ...), ...) tuple of Torch tensors,
tuple of batches of input tensors
y: (b, ...) Torch tensor or None,
batch of minima of the objective function
v: (b, ...) Torch tensor or None,
batch of gradients of the loss function with respect to the
problem output J_Y(x,y)
ctx: dictionary of contextual information used for computing the
gradient
Return Values:
gradients: ((b, ...), ...) tuple of Torch tensors or Nones,
batch of gradients of the loss function with respect to the
problem parameters;
strictly, returns the vector--Jacobian products J_Y(x,y) * y'(x)
"""
xs, xs_split, xs_sizes, y, v, ctx = self._gradient_init(xs, y, v, ctx)
# Collect batch indices such that each sub-batch will have the same
# number of active constraints:
indices_list, unconstrained = self._get_uniform_indices(xs, y)
# If all batch elements have same number of active constraints:
if indices_list is None:
if unconstrained:
gradients = AbstractDeclarativeNode.gradient(self,
*xs, y=y, v=v, ctx=ctx)
else:
gradients = EqConstDeclarativeNode.gradient(self,
*xs, y=y, v=v, ctx=ctx)
else: # Otherwise, loop over uniform batch subsets:
gradients = [torch.zeros_like(x)
if x.requires_grad else None for x in xs]
for indices in indices_list:
xs_subset = tuple([x.index_select(0, indices).requires_grad_()
for x in xs])
y_subset = y.index_select(0, indices).requires_grad_()
v_subset = v.index_select(0, indices)
ctx_subset = None if ctx is None else {
key : value.index_select(0, indices)
if isinstance(value, torch.Tensor) else value
for key, value in ctx.items()}
if unconstrained:
gradients_subset = AbstractDeclarativeNode.gradient(self,
*xs_subset, y=y_subset, v=v_subset, ctx=ctx_subset)
unconstrained = False # Only first subset is uncontrained
else:
gradients_subset = EqConstDeclarativeNode.gradient(self,
*xs_subset, y=y_subset, v=v_subset, ctx=ctx_subset)
# Insert gradients into correct locations:
for i in range(len(gradients)):
if gradients[i] is not None:
gradients[i][indices, ...] = gradients_subset[i]
gradients = tuple(gradients)
return gradients
def _get_uniform_indices(self, xs, y):
"""Collects batch indices such that each subset will have the same
number of active constraints.
Arguments:
xs: ((b, ...), ...) tuple of Torch tensors,
tuple of batches of input tensors
y: (b, ...) Torch tensor,
batch of minima of the objective function
Return values:
indices_list: [(k1), (k2), ...] list of Torch tensors or None,
list of variable-length index tensors
unconstrained: bool,
true if first subset has no active constraints
"""
h = self.equality_constraints(*xs, y=y) # bxp or None
p = 0 if h is None else self._ensure2d(h).size(-1)
g = self.inequality_constraints(*xs, y=y) # bxq
if g is None:
indices_list = None
unconstrained = True if p == 0 else False
else:
g = self._ensure2d(g)
q = torch.stack([gi.isclose(torch.zeros_like(gi),
rtol=0.0, atol=self.eps).long().sum() for gi in g])
q_sorted, indices = q.sort()
q_unique, counts = q_sorted.unique_consecutive(return_counts=True)
indices_list = indices.split(counts.split(1)) if (
q_unique.size(-1) > 1) else None
unconstrained = True if (p + q_unique[0] == 0) else False
return indices_list, unconstrained
def _get_constraint_set(self, xs, y):
"""Filters constraints.
Arguments:
xs: ((b, ...), ...) tuple of Torch tensors,
tuple of batches of input tensors
y: (b, ...) Torch tensor,
batch of minima of the objective function
Return values:
constraint_set: (b, p) Torch tensor,
tensor of active constraints
Assumptions:
batch has a uniform number of active constraints
"""
# ToDo: remove duplicate constraints (first-order identical)
constraint_set = None
h = self.equality_constraints(*xs, y=y) # bxp or None
if h is not None:
h = self._ensure2d(h)
if not self._check_equality_constraints(h):
warnings.warn(
"Equality constraints not satisfied exactly:\n{}".format(
h.detach().squeeze().cpu().numpy()))
constraint_set = h # bxp
g = self.inequality_constraints(*xs, y=y) # bxq
if g is not None:
g = self._ensure2d(g)
if not self._check_inequality_constraints(g):
warnings.warn(
"Inequality constraints not satisfied exactly:\n{}".format(
g.detach().squeeze().cpu().numpy()))
# Identify active constraints:
mask = g.isclose(torch.zeros_like(g), rtol=0.0, atol=self.eps)
g = g.masked_select(mask).reshape(self.b, -1) if mask.any() else None
if h is None:
constraint_set = g # bxq
elif g is not None:
constraint_set = torch.cat((h, g), dim=-1) # bx(p+q)
return constraint_set
def _check_inequality_constraints(self, g):
"""Check that the problem's constraints are satisfied."""
return torch.all(g <= self.eps)
class LinEqConstDeclarativeNode(EqConstDeclarativeNode):
"""A deep declarative node defined by a linear equality constrained
parameterized optimization problem of the form:
minimize (over y) f(x, y)
subject to A y = d
where x is given, and A and d are independent of x. Derived classes must
implement the objective and solve functions.
"""
def __init__(self, eps=1e-12, gamma=None, chunk_size=None):
"""Create a linear equality constrained declarative node
"""
super().__init__(eps=eps, gamma=gamma, chunk_size=None)
def linear_constraint_parameters(self, y):
"""Defines the linear equality constraint parameters A and d, where the
constraint is given by Ay = d.
Arguments:
y: (b, ...) Torch tensor,
batch of minima of the objective function
Return Values:
(A, d): ((p, m), (p)) tuple of Torch tensors,
linear equality constraint parameters
"""
raise NotImplementedError()
return None, None
def gradient(self, *xs, y=None, v=None, ctx=None):
"""Computes the vector--Jacobian product, that is, the gradient of the
loss function with respect to the problem parameters. The returned
gradient is a tuple of batched Torch tensors. Can be overridden by the
derived class to provide a more efficient implementation.
Arguments:
xs: ((b, ...), ...) tuple of Torch tensors,
tuple of batches of input tensors
y: (b, ...) Torch tensor or None,
batch of minima of the objective function
v: (b, ...) Torch tensor or None,
batch of gradients of the loss function with respect to the
problem output J_Y(x,y)
ctx: dictionary of contextual information used for computing the
gradient
Return Values:
gradients: ((b, ...), ...) tuple of Torch tensors or Nones,
batch of gradients of the loss function with respect to the
problem parameters;
strictly, returns the vector--Jacobian products J_Y(x,y) * y'(x)
"""
xs, xs_split, xs_sizes, y, v, ctx = self._gradient_init(xs, y, v, ctx)
fY, fYY, fXY = self._get_objective_derivatives(xs, y)
# Get constraint parameters and form batch:
A, d = self.linear_constraint_parameters(y)
A = self._expand_as_batch(A)
d = self._expand_as_batch(d)
# Check linear equality constraints are satisfied:
h = torch.einsum('bpm,bm->bp', (A, y)) - d
if not self._check_equality_constraints(h):
warnings.warn("Constraints not satisfied exactly:\n{}".format(
h.detach().squeeze().cpu().numpy()))
# Form H:
H = fYY
H = 0.5 * (H + H.transpose(1, 2)) # Ensure that H is symmetric
if self.gamma is not None:
H += self.gamma * torch.eye(
self.m, dtype=H.dtype, device=H.device).unsqueeze(0)
# Solve u = -H^-1 v (bxm) and t = H^-1 A^T (bxmxp):
v = v.reshape(self.b, -1, 1) # bxmx1
u, t = self._solve_linear_system(H, (-1.0 * v, A.transpose(-2, -1)))
u = u.squeeze(-1) # bxm
# ToDo: check for NaN values in u and t
# Solve s = (A H^-1 A^T)^-1 A H^-1 v = -(A t)^-1 A u:
s = self._solve_linear_system(torch.einsum('bpm,bmq->bpq', (A, t)),
torch.einsum('bpm,bm->bp', (A, -1.0 * u))) # bxpx1
s = s.squeeze(-1) # bxp
# ToDo: check for NaN values in s
# Compute u + ts = -H^-1 v + H^-1 A^T (A H^-1 A^T)^-1 A H^-1 v:
uts = u + torch.einsum('bmp,bp->bm', (t, s)) # bxm
# Compute Bi^T (u + ts) for all i:
gradients = []
for x_split, x_size, n in zip(xs_split, xs_sizes, self.n):
if isinstance(x_split[0], torch.Tensor) and x_split[0].requires_grad:
gradient = []
for i, Bi in enumerate(fXY(x_split)):
gradient.append(torch.einsum('bmc,bm->bc', (Bi, u)))
gradient = torch.cat(gradient, dim=-1) # bxn
gradients.append(gradient.reshape(x_size))
else:
gradients.append(None)
return tuple(gradients)
class DeclarativeFunction(torch.autograd.Function):
"""Generic declarative autograd function.
Defines the forward and backward functions. Saves all inputs and outputs,
which may be memory-inefficient for the specific problem.
Assumptions:
* All inputs are PyTorch tensors
* All inputs have a single batch dimension (b, ...)
"""
@staticmethod
def forward(ctx, problem, *inputs):
output, solve_ctx = torch.no_grad()(problem.solve)(*inputs)
ctx.save_for_backward(output, *inputs)
ctx.problem = problem
ctx.solve_ctx = solve_ctx
return output.clone()
@staticmethod
def backward(ctx, grad_output):
output, *inputs = ctx.saved_tensors
problem = ctx.problem
solve_ctx = ctx.solve_ctx
output.requires_grad = True
inputs = tuple(inputs)
grad_inputs = problem.gradient(*inputs, y=output, v=grad_output,
ctx=solve_ctx)
return (None, *grad_inputs)
class DeclarativeLayer(torch.nn.Module):
"""Generic declarative layer.
Assumptions:
* All inputs are PyTorch tensors
* All inputs have a single batch dimension (b, ...)
Usage:
problem = <derived class of *DeclarativeNode>
declarative_layer = DeclarativeLayer(problem)
y = declarative_layer(x1, x2, ...)
"""
def __init__(self, problem):
super(DeclarativeLayer, self).__init__()
self.problem = problem
def forward(self, *inputs):
return DeclarativeFunction.apply(self.problem, *inputs)
|
mvpa2/cmdline/cmd_searchlight.py | nno/PyMVPA | 227 | 12613718 | <gh_stars>100-1000
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Traveling ROI analysis
"""
# magic line for manpage summary
# man: -*- % traveling ROI analysis
__docformat__ = 'restructuredtext'
import numpy as np
import sys
import os
import argparse
from mvpa2.base import verbose, warning, error
from mvpa2.datasets import vstack
if __debug__:
from mvpa2.base import debug
from mvpa2.cmdline.helpers \
import parser_add_common_opt, ds2hdf5, arg2ds, \
get_crossvalidation_instance, crossvalidation_opts_grp, \
arg2neighbor, script2obj
parser_args = {
'formatter_class': argparse.RawDescriptionHelpFormatter,
}
searchlight_opts_grp = ('options for searchlight setup', [
(('--payload',), dict(required=True,
help="""switch to select a particular analysis type to be run in a
searchlight fashion on a dataset. Depending on the choice the
corresponding analysis setup options are evaluated. 'cv' computes
a cross-validation analysis. Alternatively, the argument to this option
can also be a script filename in which a custom measure is built that
is then ran as a searchlight.""")),
(('--neighbors',), dict(type=arg2neighbor, metavar='SPEC', action='append',
required=True,
help="""define the size and shape of an ROI with respect to a
center/seed location. If a single integer number is given, it is
interpreted as the radius (in number of grid elements) around a seed
location. By default grid coordinates for features are taken from
a 'voxel_indices' feature attribute in the input dataset. If coordinates
shall be taken from a different attribute, the radius value can be
prefixed with the attribute name, i.e. 'altcoords:2'. For ROI shapes
other than spheres (with potentially additional parameters), the shape
name can be specified as well, i.e. 'voxel_indices:HollowSphere:3:2'.
All neighborhood objects from the mvpa2.misc.neighborhood module are
supported. For custom ROI shapes it is also possible to pass a script
filename, or an attribute name plus script filename combination, i.e.
'voxel_indices:myownshape.py' (advanced). It is possible to specify
this option multiple times to define multi-space ROI shapes for, e.g.,
spatio-temporal searchlights.""")),
(('--nproc',), dict(type=int, default=1,
help="""Use the specific number or worker processes for computing.""")),
(('--multiproc-backend',), dict(choices=('native', 'hdf5'),
default='native',
help="""Specifies the way results are provided back from a processing
block in case of --nproc > 1. 'native' is pickling/unpickling of
results, while 'hdf5' uses HDF5 based file storage. 'hdf5' might be more
time and memory efficient in some cases.""")),
(('--aggregate-fx',), dict(type=script2obj,
help="""use a custom result aggregation function for the searchlight
""")),
(('--ds-preproc-fx',), dict(type=script2obj,
help="""custom preprocessing function to be applied immediately after
loading the data""")),
])
searchlight_constraints_opts_grp = ('options for constraining the searchlight', [
(('--scatter-rois',), dict(type=arg2neighbor, metavar='SPEC',
help="""scatter ROI locations across the available space. The arguments
supported by this option are identical to those of --neighbors. ROI
locations are randomly picked from all possible locations with the
constraint that the center coordinates of any ROI is NOT within
the neighborhood (as defined by this option's argument) of a second
ROI. Increasing the size of the neighborhood therefore increases the
scarceness of the sampling.""")),
(('--roi-attr',), dict(metavar='ATTR/EXPR', nargs='+',
help="""name of a feature attribute whose non-zero values define
possible ROI seeds/centers. Alternatively, this can also be an
expression like: parcellation_roi eq 16 (see the 'select' command
on information what expressions are supported).""")),
])
# XXX this should eventually move into the main code base, once
# sufficiently generalized
def _fill_in_scattered_results(sl, dataset, roi_ids, results):
"""this requires the searchlight conditional attribute 'roi_feature_ids'
to be enabled"""
import numpy as np
from mvpa2.datasets import Dataset
resmap = None
probmap = None
for resblock in results:
for res in resblock:
if resmap is None:
# prepare the result container
resmap = np.zeros((len(res), dataset.nfeatures),
dtype=res.samples.dtype)
if 'null_prob' in res.fa:
# initialize the prob map also with zeroes, as p=0 can never
# happen as an empirical result
probmap = np.zeros((dataset.nfeatures,) + res.fa.null_prob.shape[1:],
dtype=res.samples.dtype)
observ_counter = np.zeros(dataset.nfeatures, dtype=int)
#project the result onto all features -- love broadcasting!
resmap[:, res.a.roi_feature_ids] += res.samples
if probmap is not None:
probmap[res.a.roi_feature_ids] += res.fa.null_prob
# increment observation counter for all relevant features
observ_counter[res.a.roi_feature_ids] += 1
# when all results have been added up average them according to the number
# of observations
observ_mask = observ_counter > 0
resmap[:, observ_mask] /= observ_counter[observ_mask]
result_ds = Dataset(resmap,
fa={'observations': observ_counter})
if probmap is not None:
# transpose to make broadcasting work -- creates a view, so in-place
# modification still does the job
probmap.T[:,observ_mask] /= observ_counter[observ_mask]
result_ds.fa['null_prob'] = probmap.squeeze()
if 'mapper' in dataset.a:
import copy
result_ds.a['mapper'] = copy.copy(dataset.a.mapper)
return result_ds
def setup_parser(parser):
from .helpers import parser_add_optgroup_from_def, \
parser_add_common_attr_opts, single_required_hdf5output, ca_opts_grp
parser_add_common_opt(parser, 'multidata', required=True)
parser_add_optgroup_from_def(parser, searchlight_opts_grp)
parser_add_optgroup_from_def(parser, ca_opts_grp)
parser_add_optgroup_from_def(parser, searchlight_constraints_opts_grp)
parser_add_optgroup_from_def(parser, crossvalidation_opts_grp,
prefix='--cv-')
parser_add_optgroup_from_def(parser, single_required_hdf5output)
def run(args):
if os.path.isfile(args.payload) and args.payload.endswith('.py'):
measure = script2obj(args.payload)
elif args.payload == 'cv':
if args.cv_learner is None or args.cv_partitioner is None:
raise ValueError('cross-validation payload requires --learner and --partitioner')
# get CV instance
measure = get_crossvalidation_instance(
args.cv_learner, args.cv_partitioner, args.cv_errorfx,
args.cv_sampling_repetitions, args.cv_learner_space,
args.cv_balance_training, args.cv_permutations,
args.cv_avg_datafold_results, args.cv_prob_tail)
else:
raise RuntimeError("this should not happen")
ds = arg2ds(args.data)
if args.ds_preproc_fx is not None:
ds = args.ds_preproc_fx(ds)
# setup neighborhood
# XXX add big switch to allow for setting up surface-based neighborhoods
from mvpa2.misc.neighborhood import IndexQueryEngine
qe = IndexQueryEngine(**dict(args.neighbors))
# determine ROIs
rids = None # all by default
aggregate_fx = args.aggregate_fx
if args.roi_attr is not None:
# first figure out which roi features should be processed
if len(args.roi_attr) == 1 and args.roi_attr[0] in ds.fa.keys():
# name of an attribute -> pull non-zeroes
rids = ds.fa[args.roi_attr[0]].value.nonzero()[0]
else:
# an expression?
from .cmd_select import _eval_attr_expr
rids = _eval_attr_expr(args.roi_attr, ds.fa).nonzero()[0]
seed_ids = None
if args.scatter_rois is not None:
# scatter_neighborhoods among available ids if was requested
from mvpa2.misc.neighborhood import scatter_neighborhoods
attr, nb = args.scatter_rois
coords = ds.fa[attr].value
if rids is not None:
# select only those which were chosen by ROI
coords = coords[rids]
_, seed_ids = scatter_neighborhoods(nb, coords)
if aggregate_fx is None:
# no custom one given -> use default "fill in" function
aggregate_fx = _fill_in_scattered_results
if args.enable_ca is None:
args.enable_ca = ['roi_feature_ids']
elif 'roi_feature_ids' not in args.enable_ca:
args.enable_ca += ['roi_feature_ids']
if seed_ids is None:
roi_ids = rids
else:
if rids is not None:
# we had to sub-select by scatterring among available rids
# so we would need to get original ids
roi_ids = rids[seed_ids]
else:
# scattering happened on entire feature-set
roi_ids = seed_ids
verbose(3, 'Attempting %i ROI analyses'
% ((roi_ids is None) and ds.nfeatures or len(roi_ids)))
from mvpa2.measures.searchlight import Searchlight
sl = Searchlight(measure,
queryengine=qe,
roi_ids=roi_ids,
nproc=args.nproc,
results_backend=args.multiproc_backend,
results_fx=aggregate_fx,
enable_ca=args.enable_ca,
disable_ca=args.disable_ca)
# XXX support me too!
# add_center_fa
# tmp_prefix
# nblocks
# null_dist
# run
res = sl(ds)
if (seed_ids is not None) and ('mapper' in res.a):
# strip the last mapper link in the chain, which would be the seed ID selection
res.a['mapper'] = res.a.mapper[:-1]
# XXX create more output
# and store
ds2hdf5(res, args.output, compression=args.hdf5_compression)
return res
|
tests/test_validate.py | frictionlessdata/goodtables-py | 243 | 12613734 | # -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import sys
import six
import json
import pytest
from pprint import pprint
from copy import deepcopy
from importlib import import_module
from goodtables import validate, init_datapackage, check, Error
# Infer preset
def test_validate_infer_table(log):
report = validate('data/invalid.csv')
# will report missing value error for cell that does not have a header
assert report['error-count'] == 7
def test_validate_infer_datapackage_path(log):
report = validate('data/datapackages/invalid/datapackage.json')
assert report['error-count'] == 2
def test_validate_infer_datapackage_dict(log):
with open('data/datapackages/invalid/datapackage.json') as file:
report = validate(json.load(file))
assert report['error-count'] == 2
def test_validate_infer_nested(log):
report = validate([{'source': 'data/invalid.csv'}])
# will report missing value error for cell that does not have a header
assert report['error-count'] == 7
# Report's preset
def test_validate_report_scheme_format_encoding():
report = validate('data/valid.csv')
assert report['preset'] == 'table'
# Report's scheme/format/encoding
def test_validate_report_scheme_format_encoding():
report = validate('data/valid.csv')
assert report['tables'][0]['scheme'] == 'file'
assert report['tables'][0]['format'] == 'csv'
assert report['tables'][0]['encoding'] == 'utf-8'
# Report's schema
def test_validate_report_schema():
report = validate('data/valid.csv')
assert report['tables'][0].get('schema') is None
def test_validate_report_schema_infer_schema():
report = validate('data/valid.csv', infer_schema=True)
assert report['tables'][0]['schema'] == 'table-schema'
# Nested source with individual checks
def test_validate_nested_checks(log):
source = [
['field'],
['value', 'value'],
[''],
]
report = validate(
[
{'source': source, 'checks': ['extra-value']},
{'source': source, 'checks': ['blank-row']},
]
)
assert log(report) == [
(1, 2, 2, 'extra-value'),
(2, 3, None, 'blank-row'),
]
# Invalid table schema
# TODO: enable after
# https://github.com/frictionlessdata/goodtables-py/issues/304
@pytest.mark.skip
def test_validate_invalid_table_schema(log):
source = [
['name', 'age'],
['Alex', '33'],
]
schema = {'fields': [{'name': 'name'}, {'name': 'age', 'type': 'bad'},]}
report = validate(source, schema=schema)
assert log(report) == [
(1, None, None, 'schema-error'),
]
# Datapackage with css dialect header false
def test_validate_datapackage_dialect_header_false(log):
descriptor = {
'resources': [
{
'name': 'name',
'data': [['John', '22'], ['Alex', '33'], ['Paul', '44'],],
'schema': {
'fields': [{'name': 'name'}, {'name': 'age', 'type': 'integer'},]
},
'dialect': {'header': False,},
}
]
}
report = validate(descriptor)
assert log(report) == []
# Source as pathlib.Path
@pytest.mark.skipif(sys.version_info < (3, 4), reason='not supported')
def test_source_pathlib_path_table():
pathlib = import_module('pathlib')
report = validate(pathlib.Path('data/valid.csv'))
assert report['table-count'] == 1
assert report['valid']
@pytest.mark.skipif(sys.version_info < (3, 4), reason='not supported')
def test_source_pathlib_path_datapackage():
pathlib = import_module('pathlib')
report = validate(pathlib.Path('data/datapackages/valid/datapackage.json'))
assert report['table-count'] == 2
assert report['valid']
# Catch exceptions
def test_validate_catch_all_open_exceptions(log):
report = validate('data/latin1.csv', encoding='utf-8')
assert log(report) == [
(1, None, None, 'encoding-error'),
]
def test_validate_catch_all_iter_exceptions(log):
# Reducing sample size to get raise on iter, not on open
report = validate([['h'], [1], 'bad'], sample_size=1)
assert log(report) == [
(1, None, None, 'source-error'),
]
# Warnings
def test_validate_warnings_no():
source = 'data/datapackages/invalid/datapackage.json'
report = validate(source, preset='datapackage')
assert len(report['warnings']) == 0
def test_validate_warnings_bad_datapackage_json():
source = 'data/invalid_json.json'
report = validate(source, preset='datapackage')
assert len(report['warnings']) == 1
assert 'Unable to parse JSON' in report['warnings'][0]
def test_validate_warnings_table_limit():
source = 'data/datapackages/invalid/datapackage.json'
report = validate(source, preset='datapackage', table_limit=1)
assert len(report['warnings']) == 1
assert 'table(s) limit' in report['warnings'][0]
def test_validate_warnings_row_limit():
source = 'data/datapackages/invalid/datapackage.json'
report = validate(source, preset='datapackage', row_limit=1)
assert len(report['warnings']) == 2
assert 'row(s) limit' in report['warnings'][0]
assert 'row(s) limit' in report['warnings'][1]
def test_validate_warnings_error_limit():
source = 'data/datapackages/invalid/datapackage.json'
report = validate(source, preset='datapackage', error_limit=1)
assert len(report['warnings']) == 2
assert 'error(s) limit' in report['warnings'][0]
assert 'error(s) limit' in report['warnings'][1]
def test_validate_warnings_table_and_row_limit():
source = 'data/datapackages/invalid/datapackage.json'
report = validate(source, preset='datapackage', table_limit=1, row_limit=1)
assert len(report['warnings']) == 2
assert 'table(s) limit' in report['warnings'][0]
assert 'row(s) limit' in report['warnings'][1]
def test_validate_warnings_table_and_error_limit():
source = 'data/datapackages/invalid/datapackage.json'
report = validate(source, preset='datapackage', table_limit=1, error_limit=1)
assert len(report['warnings']) == 2
assert 'table(s) limit' in report['warnings'][0]
assert 'error(s) limit' in report['warnings'][1]
# Empty source
def test_validate_empty_source():
report = validate('data/empty.csv')
assert report['tables'][0]['row-count'] == 0
assert report['tables'][0]['error-count'] == 0
# No headers source
def test_validate_no_headers():
report = validate('data/invalid_no_headers.csv', headers=None)
assert report['tables'][0]['row-count'] == 3
# will report missing header since headers are none
assert report['tables'][0]['error-count'] == 3
assert report['tables'][0]['errors'][0]['code'] == 'blank-header'
assert report['tables'][0]['errors'][1]['code'] == 'blank-header'
assert report['tables'][0]['errors'][2]['code'] == 'extra-value'
# Init datapackage
def test_init_datapackage_is_correct():
resources_paths = [
'data/valid.csv',
'data/sequential_value.csv',
]
dp = init_datapackage(resources_paths)
assert dp is not None
assert dp.valid, dp.errors
assert len(dp.resources) == 2
actual_resources_paths = [res.descriptor['path'] for res in dp.resources]
assert sorted(resources_paths) == sorted(actual_resources_paths)
# Issues
def test_composite_primary_key_unique_issue_215(log):
descriptor = {
'resources': [
{
'name': 'name',
'data': [['id1', 'id2'], ['a', '1'], ['a', '2'],],
'schema': {
'fields': [{'name': 'id1'}, {'name': 'id2'},],
'primaryKey': ['id1', 'id2'],
},
}
],
}
report = validate(descriptor)
assert log(report) == []
def test_composite_primary_key_not_unique_issue_215(log):
descriptor = {
'resources': [
{
'name': 'name',
'data': [['id1', 'id2'], ['a', '1'], ['a', '1'],],
'schema': {
'fields': [{'name': 'id1'}, {'name': 'id2'},],
'primaryKey': ['id1', 'id2'],
},
}
],
}
report = validate(descriptor, skip_checks=['duplicate-row'])
assert log(report) == [
(1, 3, 1, 'unique-constraint'),
]
def test_validate_infer_fields_issue_223():
source = [
['name1', 'name2'],
['123', 'abc'],
['456', 'def'],
['789', 'ghi'],
]
schema = {'fields': [{'name': 'name1'}]}
report = validate(source, schema=schema, infer_fields=True)
assert report['valid']
def test_validate_infer_fields_issue_225():
source = [
['name1', 'name2'],
['123', None],
['456', None],
['789', None],
]
schema = {'fields': [{'name': 'name1'}]}
report = validate(source, schema=schema, infer_fields=True)
errors = set([error.get("code") for error in report.get("tables")[0].get("errors")])
assert report is not None
assert len(errors) is 1
assert {"missing-value"} == errors
assert ~report['valid']
def test_fix_issue_312_inspector_should_report_table_as_invalid(log):
report = validate([{'source': 'data/invalid_fix_312.xlsx'}])
assert log(report) == [
(1, None, 3, 'blank-header'),
(1, None, 4, 'duplicate-header'),
(1, None, 5, 'blank-header'),
(1, None, 5, 'duplicate-header'),
(1, 2, 3, 'missing-value'),
(1, 2, 4, 'missing-value'),
(1, 2, 5, 'missing-value'),
(1, 3, None, 'duplicate-row'),
(1, 4, 3, 'missing-value'),
(1, 4, 4, 'missing-value'),
(1, 4, 5, 'missing-value'),
(1, 5, None, 'blank-row'),
]
def test_validate_missing_local_file_raises_source_error_issue_315(log):
report = validate([{'source': 'invalid'}])
assert log(report) == [
(1, None, None, 'scheme-error'),
]
def test_validate_datapackage_with_schema_issue_348(log):
DESCRIPTOR = {
'resources': [
{
'name': 'people',
'data': [
['id', 'name', 'surname'],
['p1', 'Tom', 'Hanks'],
['p2', 'Meryl', 'Streep'],
],
'schema': {
'fields': [
{'name': 'id', 'type': 'string'},
{'name': 'name', 'type': 'string'},
{'name': 'surname', 'type': 'string'},
{'name': 'dob', 'type': 'date'},
]
},
}
]
}
report = validate(DESCRIPTOR, checks=['structure', 'schema'])
assert log(report) == [
(1, None, 4, 'missing-header'),
]
def test_validate_datapackage_with_schema_structure_only_issue_348(log):
DESCRIPTOR = {
'resources': [
{
'name': 'people',
'data': [
['id', 'name', 'surname'],
['p1', 'Tom', 'Hanks'],
['p2', 'Meryl', 'Streep'],
],
'schema': {
'fields': [
{'name': 'id', 'type': 'string'},
{'name': 'name', 'type': 'string'},
{'name': 'surname', 'type': 'string'},
{'name': 'dob', 'type': 'date'},
]
},
}
]
}
report = validate(DESCRIPTOR, checks=['structure'])
assert report['valid']
def test_validate_geopoint_required_constraint_issue_231(log):
report = validate('data/datapackages/geopoint/datapackage.json')
assert report['valid']
def test_validate_fails_with_wrong_encoding_issue_274(log):
# For now, by default encoding is detected incorectly by chardet
report = validate('data/encoding-274.csv', encoding='utf-8')
assert report['valid']
def test_validate_invalid_table_schema_issue_304(log):
source = [
['name', 'age'],
['Alex', '33'],
]
schema = {'fields': [{'name': 'name'}, {'name': 'age', 'type': 'bad'},]}
report = validate(source, schema=schema)
assert not report['valid']
def test_validate_order_fields_issue_313(log):
source = 'data/order_fields_313.xlsx'
schema = {
'fields': [
{'name': 'Column_1', 'type': 'string',},
{'name': 'Column_2', 'type': 'string', 'constraints': {'required': True}},
{'name': 'Column_3', 'type': 'string'},
{'name': 'Column_4', 'type': 'string'},
{'name': 'Column_5', 'type': 'string'},
]
}
# For now, the "non-matching-header" check is required to order the fields
checks = ['non-matching-header', 'required-constraint']
report = validate(source, schema=schema, checks=checks, order_fields=True)
assert report['valid']
def test_validate_number_test_issue_232(log):
# We check here that it doesn't raise exceptions
source = 'data/number_test/datapackage.json'
report = validate(source)
assert not report['valid']
def test_validate_inline_not_a_binary_issue_349(log):
with open('data/valid.csv') as source:
report = validate(source)
error = report['tables'][0]['errors'][0]
assert error['code'] == 'source-error'
assert error['message'] == 'Only byte streams are supported.'
@pytest.mark.skipif(six.PY2, reason='only python3')
def test_validate_inline_no_format_issue_349(log):
with open('data/valid.csv', 'rb') as source:
report = validate(source)
error = report['tables'][0]['errors'][0]
assert error['code'] == 'format-error'
assert error['message'] == 'Format "None" is not supported'
def test_validate_fk_invalid_reference_table_issue_347(log):
descriptor = {
'resources': [
{
'name': 'people',
'data': [
['id', 'name', 'surname'],
['p1', 'Tom', 'Hanks'],
['p2', 'Meryl', 'Streep'],
],
'schema': {
'fields': [
{'name': 'id', 'type': 'string'},
{'name': 'name', 'type': 'string'},
{'name': 'surname', 'type': 'string'},
{'name': 'dob', 'type': 'date'},
]
},
},
{
'name': 'oscars',
'data': [
['person_id', 'category', 'year', 'work'],
['p1', 'Best Actor', 1994, 'Philadelphia'],
['p1', 'Best Actor', 1995, '<NAME>'],
['p2', 'Best Supporting Actress', 1980, 'Kramer vs. Kramer'],
['p2', 'Best Actress', 1982, 'Sophie"s Choice'],
['p2', 'Best Actress', 2012, 'The Iron Lady'],
['p3', 'Best Actor', 2019, 'Joker'],
],
'schema': {
'fields': [
{'name': 'person_id', 'type': 'string'},
{'name': 'category', 'type': 'string'},
{'name': 'year', 'type': 'year'},
{'name': 'work', 'type': 'string'},
],
'foreignKeys': [
{
'fields': 'person_id',
'reference': {'resource': 'people', 'fields': 'id'},
}
],
},
},
]
}
report = validate(descriptor, checks=['structure', 'schema', 'foreign-key'])
assert report['tables'][1]['error-count'] == 6
assert report['tables'][1]['errors'][0]['code'] == 'foreign-key'
assert (
report['tables'][1]['errors'][0]['message']
== 'Foreign key violation caused by invalid reference table: [people] Row length 3 doesn\'t match fields count 4 for row "2"'
)
def test_validate_wide_table_with_order_fields_issue_277(log):
report = validate('data/issue277.csv', schema='data/issue277.json', order_fields=True)
assert log(report) == [
(1, 49, 50, 'required-constraint'),
(1, 68, 50, 'required-constraint'),
(1, 69, 50, 'required-constraint'),
]
def test_validate_wide_table_with_order_fields_issue_368(log):
@check('custom-check', type='custom', context='body')
class CustomCheck(object):
def __init__(self, *args, **kwargs):
pass
def check_headers_hook(self, cells, sample):
errors = []
errors.append(Error('custom-check-head'))
return errors
def check_row(self, cells):
errors = []
errors.append(Error('custom-check-body'))
return errors
report = validate([['header'], ['value']], checks=['custom-check'])
assert log(report) == [
(1, None, None, 'custom-check-head'),
(1, None, None, 'custom-check-body'),
]
|
examples/pytorch/pointcloud/pointnet/pointnet_cls.py | ketyi/dgl | 9,516 | 12613736 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
class PointNetCls(nn.Module):
def __init__(self, output_classes, input_dims=3, conv1_dim=64,
dropout_prob=0.5, use_transform=True):
super(PointNetCls, self).__init__()
self.input_dims = input_dims
self.conv1 = nn.ModuleList()
self.conv1.append(nn.Conv1d(input_dims, conv1_dim, 1))
self.conv1.append(nn.Conv1d(conv1_dim, conv1_dim, 1))
self.conv1.append(nn.Conv1d(conv1_dim, conv1_dim, 1))
self.bn1 = nn.ModuleList()
self.bn1.append(nn.BatchNorm1d(conv1_dim))
self.bn1.append(nn.BatchNorm1d(conv1_dim))
self.bn1.append(nn.BatchNorm1d(conv1_dim))
self.conv2 = nn.ModuleList()
self.conv2.append(nn.Conv1d(conv1_dim, conv1_dim * 2, 1))
self.conv2.append(nn.Conv1d(conv1_dim * 2, conv1_dim * 16, 1))
self.bn2 = nn.ModuleList()
self.bn2.append(nn.BatchNorm1d(conv1_dim * 2))
self.bn2.append(nn.BatchNorm1d(conv1_dim * 16))
self.maxpool = nn.MaxPool1d(conv1_dim * 16)
self.pool_feat_len = conv1_dim * 16
self.mlp3 = nn.ModuleList()
self.mlp3.append(nn.Linear(conv1_dim * 16, conv1_dim * 8))
self.mlp3.append(nn.Linear(conv1_dim * 8, conv1_dim * 4))
self.bn3 = nn.ModuleList()
self.bn3.append(nn.BatchNorm1d(conv1_dim * 8))
self.bn3.append(nn.BatchNorm1d(conv1_dim * 4))
self.dropout = nn.Dropout(0.3)
self.mlp_out = nn.Linear(conv1_dim * 4, output_classes)
self.use_transform = use_transform
if use_transform:
self.transform1 = TransformNet(input_dims)
self.trans_bn1 = nn.BatchNorm1d(input_dims)
self.transform2 = TransformNet(conv1_dim)
self.trans_bn2 = nn.BatchNorm1d(conv1_dim)
def forward(self, x):
batch_size = x.shape[0]
h = x.permute(0, 2, 1)
if self.use_transform:
trans = self.transform1(h)
h = h.transpose(2, 1)
h = torch.bmm(h, trans)
h = h.transpose(2, 1)
h = F.relu(self.trans_bn1(h))
for conv, bn in zip(self.conv1, self.bn1):
h = conv(h)
h = bn(h)
h = F.relu(h)
if self.use_transform:
trans = self.transform2(h)
h = h.transpose(2, 1)
h = torch.bmm(h, trans)
h = h.transpose(2, 1)
h = F.relu(self.trans_bn2(h))
for conv, bn in zip(self.conv2, self.bn2):
h = conv(h)
h = bn(h)
h = F.relu(h)
h = self.maxpool(h).view(-1, self.pool_feat_len)
for mlp, bn in zip(self.mlp3, self.bn3):
h = mlp(h)
h = bn(h)
h = F.relu(h)
h = self.dropout(h)
out = self.mlp_out(h)
return out
class TransformNet(nn.Module):
def __init__(self, input_dims=3, conv1_dim=64):
super(TransformNet, self).__init__()
self.conv = nn.ModuleList()
self.conv.append(nn.Conv1d(input_dims, conv1_dim, 1))
self.conv.append(nn.Conv1d(conv1_dim, conv1_dim * 2, 1))
self.conv.append(nn.Conv1d(conv1_dim * 2, conv1_dim * 16, 1))
self.bn = nn.ModuleList()
self.bn.append(nn.BatchNorm1d(conv1_dim))
self.bn.append(nn.BatchNorm1d(conv1_dim * 2))
self.bn.append(nn.BatchNorm1d(conv1_dim * 16))
self.maxpool = nn.MaxPool1d(conv1_dim * 16)
self.pool_feat_len = conv1_dim * 16
self.mlp2 = nn.ModuleList()
self.mlp2.append(nn.Linear(conv1_dim * 16, conv1_dim * 8))
self.mlp2.append(nn.Linear(conv1_dim * 8, conv1_dim * 4))
self.bn2 = nn.ModuleList()
self.bn2.append(nn.BatchNorm1d(conv1_dim * 8))
self.bn2.append(nn.BatchNorm1d(conv1_dim * 4))
self.input_dims = input_dims
self.mlp_out = nn.Linear(conv1_dim * 4, input_dims * input_dims)
def forward(self, h):
batch_size = h.shape[0]
for conv, bn in zip(self.conv, self.bn):
h = conv(h)
h = bn(h)
h = F.relu(h)
h = self.maxpool(h).view(-1, self.pool_feat_len)
for mlp, bn in zip(self.mlp2, self.bn2):
h = mlp(h)
h = bn(h)
h = F.relu(h)
out = self.mlp_out(h)
iden = Variable(torch.from_numpy(np.eye(self.input_dims).flatten().astype(np.float32)))
iden = iden.view(1, self.input_dims * self.input_dims).repeat(batch_size, 1)
if out.is_cuda:
iden = iden.cuda()
out = out + iden
out = out.view(-1, self.input_dims, self.input_dims)
return out
|
analysis/iec_cases/batch_sim_test.py | leozz37/makani | 1,178 | 12613738 | <reponame>leozz37/makani
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for IEC cases batch sim client and worker."""
import json
import logging
import os
import re
import unittest
import gflags
import makani
from makani.analysis.iec_cases import batch_sim_client
from makani.analysis.iec_cases import batch_sim_worker
from makani.lib.python import os_util
from makani.lib.python import test_util
from makani.lib.python.batch_sim import testing_fakes
from makani.sim import sim_types
import numpy
FLAGS = gflags.FLAGS
def MakeFakeLogFile(file_name, num_samples):
"""Makes a fake log file for a worker to consume.
Args:
file_name: Name of the log file.
num_samples: Number of data points for the log file.
"""
log_file = test_util.CreateSampleHDF5File(file_name, num_samples)
telem_time = numpy.linspace(0.0, 150.0, num_samples)
simulator = log_file['messages']['kAioNodeSimulator']
with test_util.H5DatasetWriter(simulator['kMessageTypeSimTelemetry']) as sim:
sim['message']['time'] = telem_time
sim['message']['wing']['tether_force_b']['tension'][:] = 2000.0
for i, x in enumerate(('x', 'y', 'z')):
sim['message']['wing']['Xg'][x] = float(i)
controller = log_file['messages']['kAioNodeControllerA']
with test_util.H5DatasetWriter(
controller['kMessageTypeControlDebug']) as c:
c['message']['time'] = telem_time
log_file.close()
class IecCasesTest(unittest.TestCase):
SIM_NAME = 'iec_cases_test'
NUM_WORKERS = 3
BINARY_MAP = {os.path.join(makani.HOME, k): v for k, v in [
('control/sim_controller', 'Controller'),
('lib/pcap_to_hdf5/pcap_to_hdf5', 'PcapToHdf5'),
('sim/sim', 'Simulator'),
('control/sim_ground_estimator', 'GroundEstimator')]}
@test_util.FlagValueSaver()
def testClientAndWorker(self):
class FakeBinaries(testing_fakes.FakeBinaries):
_NUM_SAMPLES = 100
def Controller(self, args):
return 0
def GroundEstimator(self, args):
return 0
def Simulator(self, args):
return 0
def PcapToHdf5(self, args):
log_file_name = self._ParseArg(args, '--output_file')
MakeFakeLogFile(log_file_name, self._NUM_SAMPLES)
return 0
patch = testing_fakes.PatchAllFakes(
binary_map=self.BINARY_MAP,
binaries=FakeBinaries(),
worker_factory=batch_sim_worker.IecCasesSimWorker,
client_class=batch_sim_client.IecCasesSimClient)
with os_util.TempDir() as temp_dir:
with patch:
gflags.FLAGS.output_dir = temp_dir
client = batch_sim_client.IecCasesSimClient(
num_workers=self.NUM_WORKERS, sim_name=self.SIM_NAME)
client.Run()
# Check that all the plots and HTML files were created.
image_files = [f for f in os.listdir(temp_dir)
if re.match(r'.+\.png', f)]
self.assertEqual(len(batch_sim_client.IEC_CASES), len(image_files))
self.assertTrue(os.path.isfile(
os.path.join(temp_dir, 'index.html')))
@test_util.FlagValueSaver()
@test_util.LogDisabler(logging.WARNING)
def testNonzeroSimReturnCode(self):
class FakeBinaries(testing_fakes.FakeBinaries):
_NUM_SAMPLES = 100
def Controller(self, args):
with open(self._ParseArg(args, '--all_params'), 'r') as config_file:
config = json.load(config_file)
self._iec_case = config['sim']['iec_sim']['load_case']
# Simulate being terminated with SIGINT.
return -2
def GroundEstimator(self, args):
with open(self._ParseArg(args, '--all_params'), 'r') as config_file:
config = json.load(config_file)
self._iec_case = config['sim']['iec_sim']['load_case']
# Simulate being terminated with SIGINT.
return -2
def Simulator(self, args):
if (self._iec_case
== sim_types.kIecCaseExtremeCoherentGustWithDirectionChange):
return -1
else:
return 0
def PcapToHdf5(self, args):
log_file_name = self._ParseArg(args, '--output_file')
MakeFakeLogFile(log_file_name, self._NUM_SAMPLES)
return 0
patch = testing_fakes.PatchAllFakes(
binary_map=self.BINARY_MAP,
binaries=FakeBinaries(),
worker_factory=batch_sim_worker.IecCasesSimWorker,
client_class=batch_sim_client.IecCasesSimClient)
with os_util.TempDir() as temp_dir:
with patch:
gflags.FLAGS.cases = ['1.1', '1.3', '1.4b']
gflags.FLAGS.output_dir = temp_dir
gflags.FLAGS.local_output_dir = temp_dir
client = batch_sim_client.IecCasesSimClient(
num_workers=self.NUM_WORKERS, sim_name=self.SIM_NAME)
client.Run()
filenames = [os.path.join(temp_dir, '%d.json' % i) for i in range(3)]
for filename in filenames:
self.assertTrue(os.path.isfile(filename))
sim_successes = []
for filename in filenames:
with open(filename, 'r') as f:
config = json.load(f)
sim_successes.append(config['sim_successful'])
self.assertEqual([True, True, False], sim_successes)
if __name__ == '__main__':
unittest.main()
|
contrib/python/scipy/scipy/integrate/tests/test_quadrature.py | ibr11/catboost | 6,989 | 12613756 | <gh_stars>1000+
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy import cos, sin, pi
from numpy.testing import TestCase, run_module_suite, assert_equal, \
assert_almost_equal, assert_allclose, assert_
from scipy.integrate import (quadrature, romberg, romb, newton_cotes,
cumtrapz, quad, simps)
from scipy.integrate.quadrature import AccuracyWarning
class TestQuadrature(TestCase):
def quad(self, x, a, b, args):
raise NotImplementedError
def test_quadrature(self):
# Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return cos(n*x-z*sin(x))/pi
val, err = quadrature(myfunc, 0, pi, (2, 1.8))
table_val = 0.30614353532540296487
assert_almost_equal(val, table_val, decimal=7)
def test_quadrature_rtol(self):
def myfunc(x, n, z): # Bessel function integrand
return 1e90 * cos(n*x-z*sin(x))/pi
val, err = quadrature(myfunc, 0, pi, (2, 1.8), rtol=1e-10)
table_val = 1e90 * 0.30614353532540296487
assert_allclose(val, table_val, rtol=1e-10)
def test_quadrature_miniter(self):
# Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return cos(n*x-z*sin(x))/pi
table_val = 0.30614353532540296487
for miniter in [5, 52]:
val, err = quadrature(myfunc, 0, pi, (2, 1.8), miniter=miniter)
assert_almost_equal(val, table_val, decimal=7)
assert_(err < 1.0)
def test_quadrature_single_args(self):
def myfunc(x, n):
return 1e90 * cos(n*x-1.8*sin(x))/pi
val, err = quadrature(myfunc, 0, pi, args=2, rtol=1e-10)
table_val = 1e90 * 0.30614353532540296487
assert_allclose(val, table_val, rtol=1e-10)
def test_romberg(self):
# Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return cos(n*x-z*sin(x))/pi
val = romberg(myfunc, 0, pi, args=(2, 1.8))
table_val = 0.30614353532540296487
assert_almost_equal(val, table_val, decimal=7)
def test_romberg_rtol(self):
# Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return 1e19*cos(n*x-z*sin(x))/pi
val = romberg(myfunc, 0, pi, args=(2, 1.8), rtol=1e-10)
table_val = 1e19*0.30614353532540296487
assert_allclose(val, table_val, rtol=1e-10)
def test_romb(self):
assert_equal(romb(np.arange(17)), 128)
def test_romb_gh_3731(self):
# Check that romb makes maximal use of data points
x = np.arange(2**4+1)
y = np.cos(0.2*x)
val = romb(y)
val2, err = quad(lambda x: np.cos(0.2*x), x.min(), x.max())
assert_allclose(val, val2, rtol=1e-8, atol=0)
# should be equal to romb with 2**k+1 samples
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=AccuracyWarning)
val3 = romberg(lambda x: np.cos(0.2*x), x.min(), x.max(),
divmax=4)
assert_allclose(val, val3, rtol=1e-12, atol=0)
def test_non_dtype(self):
# Check that we work fine with functions returning float
import math
valmath = romberg(math.sin, 0, 1)
expected_val = 0.45969769413185085
assert_almost_equal(valmath, expected_val, decimal=7)
def test_newton_cotes(self):
"""Test the first few degrees, for evenly spaced points."""
n = 1
wts, errcoff = newton_cotes(n, 1)
assert_equal(wts, n*np.array([0.5, 0.5]))
assert_almost_equal(errcoff, -n**3/12.0)
n = 2
wts, errcoff = newton_cotes(n, 1)
assert_almost_equal(wts, n*np.array([1.0, 4.0, 1.0])/6.0)
assert_almost_equal(errcoff, -n**5/2880.0)
n = 3
wts, errcoff = newton_cotes(n, 1)
assert_almost_equal(wts, n*np.array([1.0, 3.0, 3.0, 1.0])/8.0)
assert_almost_equal(errcoff, -n**5/6480.0)
n = 4
wts, errcoff = newton_cotes(n, 1)
assert_almost_equal(wts, n*np.array([7.0, 32.0, 12.0, 32.0, 7.0])/90.0)
assert_almost_equal(errcoff, -n**7/1935360.0)
def test_newton_cotes2(self):
"""Test newton_cotes with points that are not evenly spaced."""
x = np.array([0.0, 1.5, 2.0])
y = x**2
wts, errcoff = newton_cotes(x)
exact_integral = 8.0/3
numeric_integral = np.dot(wts, y)
assert_almost_equal(numeric_integral, exact_integral)
x = np.array([0.0, 1.4, 2.1, 3.0])
y = x**2
wts, errcoff = newton_cotes(x)
exact_integral = 9.0
numeric_integral = np.dot(wts, y)
assert_almost_equal(numeric_integral, exact_integral)
def test_simps(self):
y = np.arange(17)
assert_equal(simps(y), 128)
assert_equal(simps(y, dx=0.5), 64)
assert_equal(simps(y, x=np.linspace(0, 4, 17)), 32)
y = np.arange(4)
x = 2**y
assert_equal(simps(y, x=x, even='avg'), 13.875)
assert_equal(simps(y, x=x, even='first'), 13.75)
assert_equal(simps(y, x=x, even='last'), 14)
class TestCumtrapz(TestCase):
def test_1d(self):
x = np.linspace(-2, 2, num=5)
y = x
y_int = cumtrapz(y, x, initial=0)
y_expected = [0., -1.5, -2., -1.5, 0.]
assert_allclose(y_int, y_expected)
y_int = cumtrapz(y, x, initial=None)
assert_allclose(y_int, y_expected[1:])
def test_y_nd_x_nd(self):
x = np.arange(3 * 2 * 4).reshape(3, 2, 4)
y = x
y_int = cumtrapz(y, x, initial=0)
y_expected = np.array([[[0., 0.5, 2., 4.5],
[0., 4.5, 10., 16.5]],
[[0., 8.5, 18., 28.5],
[0., 12.5, 26., 40.5]],
[[0., 16.5, 34., 52.5],
[0., 20.5, 42., 64.5]]])
assert_allclose(y_int, y_expected)
# Try with all axes
shapes = [(2, 2, 4), (3, 1, 4), (3, 2, 3)]
for axis, shape in zip([0, 1, 2], shapes):
y_int = cumtrapz(y, x, initial=3.45, axis=axis)
assert_equal(y_int.shape, (3, 2, 4))
y_int = cumtrapz(y, x, initial=None, axis=axis)
assert_equal(y_int.shape, shape)
def test_y_nd_x_1d(self):
y = np.arange(3 * 2 * 4).reshape(3, 2, 4)
x = np.arange(4)**2
# Try with all axes
ys_expected = (
np.array([[[4., 5., 6., 7.],
[8., 9., 10., 11.]],
[[40., 44., 48., 52.],
[56., 60., 64., 68.]]]),
np.array([[[2., 3., 4., 5.]],
[[10., 11., 12., 13.]],
[[18., 19., 20., 21.]]]),
np.array([[[0.5, 5., 17.5],
[4.5, 21., 53.5]],
[[8.5, 37., 89.5],
[12.5, 53., 125.5]],
[[16.5, 69., 161.5],
[20.5, 85., 197.5]]]))
for axis, y_expected in zip([0, 1, 2], ys_expected):
y_int = cumtrapz(y, x=x[:y.shape[axis]], axis=axis, initial=None)
assert_allclose(y_int, y_expected)
def test_x_none(self):
y = np.linspace(-2, 2, num=5)
y_int = cumtrapz(y)
y_expected = [-1.5, -2., -1.5, 0.]
assert_allclose(y_int, y_expected)
y_int = cumtrapz(y, initial=1.23)
y_expected = [1.23, -1.5, -2., -1.5, 0.]
assert_allclose(y_int, y_expected)
y_int = cumtrapz(y, dx=3)
y_expected = [-4.5, -6., -4.5, 0.]
assert_allclose(y_int, y_expected)
y_int = cumtrapz(y, dx=3, initial=1.23)
y_expected = [1.23, -4.5, -6., -4.5, 0.]
assert_allclose(y_int, y_expected)
if __name__ == "__main__":
run_module_suite()
|
src/ray_tune.py | waddupitzme/graph-neural-pde | 125 | 12613768 | <reponame>waddupitzme/graph-neural-pde<gh_stars>100-1000
import argparse
import os
from functools import partial
import numpy as np
import torch
from data import get_dataset, set_train_val_test_split
from GNN_early import GNNEarly
from GNN import GNN
from ray import tune
from ray.tune import CLIReporter
from ray.tune.schedulers import ASHAScheduler
from ray.tune.suggest.ax import AxSearch
from run_GNN import get_optimizer, test, test_OGB, train
from torch import nn
from CGNN import CGNN, get_sym_adj
from CGNN import train as train_cgnn
"""
python3 ray_tune.py --dataset ogbn-arxiv --lr 0.005 --add_source --function transformer --attention_dim 16 --hidden_dim 128 --heads 4 --input_dropout 0 --decay 0 --adjoint --adjoint_method rk4 --method rk4 --time 5.08 --epoch 500 --num_samples 1 --name ogbn-arxiv-test --gpus 1 --grace_period 50
"""
def average_test(models, datas):
if opt['dataset'] == 'ogbn-arxiv':
results = [test_OGB(model, data, opt) for model, data in zip(models, datas)]
else:
results = [test(model, data) for model, data in zip(models, datas)]
train_accs, val_accs, tmp_test_accs = [], [], []
for train_acc, val_acc, test_acc in results:
train_accs.append(train_acc)
val_accs.append(val_acc)
tmp_test_accs.append(test_acc)
return train_accs, val_accs, tmp_test_accs
def train_ray_rand(opt, checkpoint_dir=None, data_dir="../data"):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dataset = get_dataset(opt, data_dir, opt['not_lcc'])
models = []
datas = []
optimizers = []
for split in range(opt["num_splits"]):
dataset.data = set_train_val_test_split(
np.random.randint(0, 1000), dataset.data, num_development=5000 if opt["dataset"] == "CoauthorCS" else 1500)
datas.append(dataset.data)
if opt['baseline']:
opt['num_feature'] = dataset.num_node_features
opt['num_class'] = dataset.num_classes
adj = get_sym_adj(dataset.data, opt, device)
model, data = CGNN(opt, adj, opt['time'], device).to(device), dataset.data.to(device)
train_this = train_cgnn
else:
model = GNN(opt, dataset, device)
train_this = train
models.append(model)
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
model, data = model.to(device), dataset.data.to(device)
parameters = [p for p in model.parameters() if p.requires_grad]
optimizer = get_optimizer(opt["optimizer"], parameters, lr=opt["lr"], weight_decay=opt["decay"])
optimizers.append(optimizer)
# The `checkpoint_dir` parameter gets passed by Ray Tune when a checkpoint
# should be restored.
if checkpoint_dir:
checkpoint = os.path.join(checkpoint_dir, "checkpoint")
model_state, optimizer_state = torch.load(checkpoint)
model.load_state_dict(model_state)
optimizer.load_state_dict(optimizer_state)
for epoch in range(1, opt["epoch"]):
loss = np.mean(
[train_this(model, optimizer, data) for model, optimizer, data in zip(models, optimizers, datas)])
train_accs, val_accs, tmp_test_accs = average_test(models, datas)
with tune.checkpoint_dir(step=epoch) as checkpoint_dir:
best = np.argmax(val_accs)
path = os.path.join(checkpoint_dir, "checkpoint")
torch.save((models[best].state_dict(), optimizers[best].state_dict()), path)
tune.report(loss=loss, accuracy=np.mean(val_accs), test_acc=np.mean(tmp_test_accs),
train_acc=np.mean(train_accs),
forward_nfe=model.fm.sum,
backward_nfe=model.bm.sum)
def train_ray(opt, checkpoint_dir=None, data_dir="../data"):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dataset = get_dataset(opt, data_dir, opt['not_lcc'])
models = []
optimizers = []
data = dataset.data.to(device)
datas = [data for i in range(opt["num_init"])]
for split in range(opt["num_init"]):
if opt['baseline']:
opt['num_feature'] = dataset.num_node_features
opt['num_class'] = dataset.num_classes
adj = get_sym_adj(dataset.data, opt, device)
model, data = CGNN(opt, adj, opt['time'], device).to(device), dataset.data.to(device)
train_this = train_cgnn
else:
model = GNN(opt, dataset, device)
train_this = train
models.append(model)
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
model = model.to(device)
parameters = [p for p in model.parameters() if p.requires_grad]
optimizer = get_optimizer(opt["optimizer"], parameters, lr=opt["lr"], weight_decay=opt["decay"])
optimizers.append(optimizer)
# The `checkpoint_dir` parameter gets passed by Ray Tune when a checkpoint
# should be restored.
if checkpoint_dir:
checkpoint = os.path.join(checkpoint_dir, "checkpoint")
model_state, optimizer_state = torch.load(checkpoint)
model.load_state_dict(model_state)
optimizer.load_state_dict(optimizer_state)
for epoch in range(1, opt["epoch"]):
loss = np.mean([train_this(model, optimizer, data) for model, optimizer in zip(models, optimizers)])
train_accs, val_accs, tmp_test_accs = average_test(models, datas)
with tune.checkpoint_dir(step=epoch) as checkpoint_dir:
best = np.argmax(val_accs)
path = os.path.join(checkpoint_dir, "checkpoint")
torch.save((models[best].state_dict(), optimizers[best].state_dict()), path)
tune.report(loss=loss, accuracy=np.mean(val_accs), test_acc=np.mean(tmp_test_accs),
train_acc=np.mean(train_accs),
forward_nfe=model.fm.sum,
backward_nfe=model.bm.sum)
def train_ray_int(opt, checkpoint_dir=None, data_dir="../data"):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dataset = get_dataset(opt, data_dir, opt['not_lcc'])
if opt["num_splits"] > 0:
dataset.data = set_train_val_test_split(
23 * np.random.randint(0, opt["num_splits"]),
# random prime 23 to make the splits 'more' random. Could remove
dataset.data,
num_development=5000 if opt["dataset"] == "CoauthorCS" else 1500)
model = GNN(opt, dataset, device) if opt["no_early"] else GNNEarly(opt, dataset, device)
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
model, data = model.to(device), dataset.data.to(device)
parameters = [p for p in model.parameters() if p.requires_grad]
optimizer = get_optimizer(opt["optimizer"], parameters, lr=opt["lr"], weight_decay=opt["decay"])
if checkpoint_dir:
checkpoint = os.path.join(checkpoint_dir, "checkpoint")
model_state, optimizer_state = torch.load(checkpoint)
model.load_state_dict(model_state)
optimizer.load_state_dict(optimizer_state)
this_test = test_OGB if opt['dataset'] == 'ogbn-arxiv' else test
best_time = best_epoch = train_acc = val_acc = test_acc = 0
for epoch in range(1, opt["epoch"]):
loss = train(model, optimizer, data)
# need next line as it sets the attributes in the solver
if opt["no_early"]:
tmp_train_acc, tmp_val_acc, tmp_test_acc = this_test(model, data, opt)
best_time = opt['time']
else:
tmp_train_acc, tmp_val_acc, tmp_test_acc = this_test(model, data, opt)
if tmp_val_acc > val_acc:
best_epoch = epoch
train_acc = tmp_train_acc
val_acc = tmp_val_acc
test_acc = tmp_test_acc
if model.odeblock.test_integrator.solver.best_val > val_acc:
best_epoch = epoch
val_acc = model.odeblock.test_integrator.solver.best_val
test_acc = model.odeblock.test_integrator.solver.best_test
train_acc = model.odeblock.test_integrator.solver.best_train
best_time = model.odeblock.test_integrator.solver.best_time
with tune.checkpoint_dir(step=epoch) as checkpoint_dir:
path = os.path.join(checkpoint_dir, "checkpoint")
torch.save((model.state_dict(), optimizer.state_dict()), path)
tune.report(loss=loss, accuracy=val_acc, test_acc=test_acc, train_acc=train_acc, best_time=best_time,
best_epoch=best_epoch,
forward_nfe=model.fm.sum, backward_nfe=model.bm.sum)
def set_cora_search_space(opt):
opt["decay"] = tune.loguniform(0.001, 0.1) # weight decay l2 reg
if opt['regularise']:
opt["kinetic_energy"] = tune.loguniform(0.001, 10.0)
opt["directional_penalty"] = tune.loguniform(0.001, 10.0)
opt["hidden_dim"] = tune.sample_from(lambda _: 2 ** np.random.randint(6, 8)) # hidden dim of X in dX/dt
opt["lr"] = tune.uniform(0.01, 0.2)
# opt["input_dropout"] = tune.uniform(0.2, 0.8) # encoder dropout
opt["input_dropout"] = 0.5
opt["optimizer"] = tune.choice(["adam", "adamax"])
opt["dropout"] = tune.uniform(0, 0.15) # output dropout
opt["time"] = tune.uniform(2.0, 30.0) # terminal time of the ODE integrator;
# when it's big, the training hangs (probably due a big NFEs of the ODE)
if opt["block"] in {'attention', 'mixed'} or opt['function'] in {'GAT', 'transformer', 'dorsey'}:
opt["heads"] = tune.sample_from(lambda _: 2 ** np.random.randint(0, 4)) #
opt["attention_dim"] = tune.sample_from(lambda _: 2 ** np.random.randint(4, 8)) # hidden dim for attention
# opt['attention_norm_idx'] = tune.choice([0, 1])
opt['attention_norm_idx'] = 0
# opt["leaky_relu_slope"] = tune.uniform(0, 0.7)
opt["leaky_relu_slope"] = 0.2
opt["self_loop_weight"] = tune.choice([0, 1]) # whether or not to use self-loops
else:
opt["self_loop_weight"] = tune.uniform(0, 3)
opt["tol_scale"] = tune.loguniform(1, 1000) # num you multiply the default rtol and atol by
if opt["adjoint"]:
opt["adjoint_method"] = tune.choice(["dopri5", "adaptive_heun"]) # , "rk4"])
opt["tol_scale_adjoint"] = tune.loguniform(100, 10000)
opt['add_source'] = tune.choice([True, False])
opt['att_samp_pct'] = tune.uniform(0.3, 1)
opt['batch_norm'] = tune.choice([True, False])
# opt['batch_norm'] = True
if opt['rewiring'] == 'gdc':
opt['gdc_k'] = tune.sample_from(lambda _: 2 ** np.random.randint(4, 10))
opt['ppr_alpha'] = tune.uniform(0.01, 0.2)
return opt
def set_pubmed_search_space(opt):
opt["decay"] = tune.uniform(0.001, 0.1)
if opt['regularise']:
opt["kinetic_energy"] = tune.loguniform(0.01, 1.0)
opt["directional_penalty"] = tune.loguniform(0.01, 1.0)
opt["hidden_dim"] = 128 # tune.sample_from(lambda _: 2 ** np.random.randint(4, 8))
opt["lr"] = tune.loguniform(0.02, 0.1)
opt["input_dropout"] = 0.4 # tune.uniform(0.2, 0.5)
opt["dropout"] = tune.uniform(0, 0.5)
opt["time"] = tune.uniform(5.0, 20.0)
opt["optimizer"] = tune.choice(["rmsprop", "adam", "adamax"])
if opt["block"] in {'attention', 'mixed'} or opt['function'] in {'GAT', 'transformer', 'dorsey'}:
opt["heads"] = tune.sample_from(lambda _: 2 ** np.random.randint(0, 4))
opt["attention_dim"] = tune.sample_from(lambda _: 2 ** np.random.randint(4, 8))
opt['attention_norm_idx'] = tune.choice([0, 1])
opt["leaky_relu_slope"] = tune.uniform(0, 0.8)
opt["self_loop_weight"] = tune.choice([0, 0.5, 1, 2]) if opt['block'] == 'mixed' else tune.choice(
[0, 1]) # whether or not to use self-loops
else:
opt["self_loop_weight"] = tune.uniform(0, 3)
opt["tol_scale"] = tune.loguniform(1, 1e4)
if opt["adjoint"]:
opt["tol_scale_adjoint"] = tune.loguniform(1, 1e4)
opt["adjoint_method"] = tune.choice(["dopri5", "adaptive_heun"])
else:
raise Exception("Can't train on PubMed without the adjoint method.")
return opt
def set_citeseer_search_space(opt):
opt["decay"] = 0.1 # tune.loguniform(2e-3, 1e-2)
if opt['regularise']:
opt["kinetic_energy"] = tune.loguniform(0.001, 10.0)
opt["directional_penalty"] = tune.loguniform(0.001, 10.0)
opt["hidden_dim"] = 128 # tune.sample_from(lambda _: 2 ** np.random.randint(6, 8))
opt["lr"] = tune.loguniform(2e-3, 0.01)
opt["input_dropout"] = tune.uniform(0.4, 0.8)
opt["dropout"] = tune.uniform(0, 0.8)
opt["time"] = tune.uniform(0.5, 8.0)
opt["optimizer"] = tune.choice(["rmsprop", "adam", "adamax"])
#
if opt["block"] in {'attention', 'mixed'} or opt['function'] in {'GAT', 'transformer', 'dorsey'}:
opt["heads"] = tune.sample_from(lambda _: 2 ** np.random.randint(1, 4))
opt["attention_dim"] = tune.sample_from(lambda _: 2 ** np.random.randint(3, 8))
opt['attention_norm_idx'] = 1 # tune.choice([0, 1])
opt["leaky_relu_slope"] = tune.uniform(0, 0.7)
opt["self_loop_weight"] = tune.choice([0, 0.5, 1, 2]) if opt['block'] == 'mixed' else tune.choice(
[0, 1]) # whether or not to use self-loops
else:
opt["self_loop_weight"] = tune.uniform(0, 3) # 1 seems to work pretty well
opt["tol_scale"] = tune.loguniform(1, 2e3)
if opt["adjoint"]:
opt["tol_scale_adjoint"] = tune.loguniform(1, 1e5)
opt["adjoint_method"] = tune.choice(["dopri5", "adaptive_heun"]) # , "rk4"])
if opt['rewiring'] == 'gdc':
# opt['gdc_sparsification'] = tune.choice(['topk', 'threshold'])
opt['gdc_sparsification'] = 'topk'
opt['gdc_method'] = tune.choice(['ppr', 'heat'])
# opt['gdc_method'] = 'heat'
opt['gdc_k'] = tune.sample_from(lambda _: 2 ** np.random.randint(4, 8))
# opt['gdc_threshold'] = tune.loguniform(0.0001, 0.01)
opt['ppr_alpha'] = tune.uniform(0.01, 0.2)
opt['heat_time'] = tune.uniform(1, 5)
return opt
def set_computers_search_space(opt):
opt["decay"] = tune.loguniform(2e-3, 1e-2)
if opt['regularise']:
opt["kinetic_energy"] = tune.loguniform(0.01, 10.0)
opt["directional_penalty"] = tune.loguniform(0.001, 10.0)
opt["hidden_dim"] = tune.sample_from(lambda _: 2 ** np.random.randint(4, 8))
opt["lr"] = tune.loguniform(5e-5, 5e-3)
opt["input_dropout"] = tune.uniform(0.4, 0.8)
opt["dropout"] = tune.uniform(0, 0.8)
opt["self_loop_weight"] = tune.choice([0, 1])
opt["time"] = tune.uniform(0.5, 10.0)
opt["optimizer"] = tune.choice(["adam", "adamax", "rmsprop"])
if opt["block"] in {'attention', 'mixed'} or opt['function'] in {'GAT', 'transformer', 'dorsey'}:
opt["heads"] = tune.sample_from(lambda _: 2 ** np.random.randint(0, 4))
opt["attention_dim"] = tune.sample_from(lambda _: 2 ** np.random.randint(3, 8))
opt['attention_norm_idx'] = 1 # tune.choice([0, 1])
opt["leaky_relu_slope"] = tune.uniform(0, 0.8)
opt["self_loop_weight"] = tune.choice([0, 0.5, 1, 2]) if opt['block'] == 'mixed' else tune.choice(
[0, 1]) # whether or not to use self-loops
else:
opt["self_loop_weight"] = tune.uniform(0, 3)
opt["tol_scale"] = tune.loguniform(1e1, 1e4)
if opt["adjoint"]:
opt["tol_scale_adjoint"] = tune.loguniform(1, 1e5)
opt["adjoint_method"] = tune.choice(["dopri5", "adaptive_heun", "rk4"])
if opt['rewiring'] == 'gdc':
# opt['gdc_sparsification'] = tune.choice(['topk', 'threshold'])
opt['gdc_sparsification'] = 'threshold'
opt['exact'] = False
# opt['gdc_method'] = tune.choice(['ppr', 'heat'])
opt['gdc_method'] = 'ppr'
# opt['avg_degree'] = tune.sample_from(lambda _: 2 ** np.random.randint(4, 8)) # bug currently in pyg
opt['gdc_threshold'] = tune.loguniform(0.00001, 0.01)
# opt['gdc_threshold'] = None
opt['ppr_alpha'] = tune.uniform(0.01, 0.2)
# opt['heat_time'] = tune.uniform(1, 5)
return opt
def set_coauthors_search_space(opt):
opt["decay"] = tune.loguniform(1e-3, 2e-2)
if opt['regularise']:
opt["kinetic_energy"] = tune.loguniform(0.01, 10.0)
opt["directional_penalty"] = tune.loguniform(0.01, 10.0)
opt["hidden_dim"] = tune.sample_from(lambda _: 2 ** np.random.randint(4, 6))
opt["lr"] = tune.loguniform(1e-5, 0.1)
opt["input_dropout"] = tune.uniform(0.4, 0.8)
opt["dropout"] = tune.uniform(0, 0.8)
opt["self_loop_weight"] = tune.choice([0, 1])
opt["time"] = tune.uniform(0.5, 10.0)
opt["optimizer"] = tune.choice(["adam", "adamax", "rmsprop"])
if opt["block"] in {'attention', 'mixed'} or opt['function'] in {'GAT', 'transformer', 'dorsey'}:
opt["heads"] = tune.sample_from(lambda _: 2 ** np.random.randint(0, 4))
opt["attention_dim"] = tune.sample_from(lambda _: 2 ** np.random.randint(3, 8))
opt['attention_norm_idx'] = tune.choice([0, 1])
opt["leaky_relu_slope"] = tune.uniform(0, 0.8)
opt["self_loop_weight"] = tune.choice([0, 0.5, 1, 2]) if opt['block'] == 'mixed' else tune.choice(
[0, 1]) # whether or not to use self-loops
else:
opt["self_loop_weight"] = tune.uniform(0, 3)
opt["tol_scale"] = tune.loguniform(1e1, 1e4)
if opt["adjoint"]:
opt["tol_scale_adjoint"] = tune.loguniform(1, 1e5)
opt["adjoint_method"] = tune.choice(["dopri5", "adaptive_heun", "rk4"])
if opt['rewiring'] == 'gdc':
# opt['gdc_sparsification'] = tune.choice(['topk', 'threshold'])
opt['gdc_sparsification'] = 'threshold'
opt['exact'] = False
# opt['gdc_method'] = tune.choice(['ppr', 'heat'])
opt['gdc_method'] = 'ppr'
# opt['avg_degree'] = tune.sample_from(lambda _: 2 ** np.random.randint(4, 8)) # bug currently in pyg
opt['gdc_threshold'] = tune.loguniform(0.0001, 0.0005)
# opt['gdc_threshold'] = None
opt['ppr_alpha'] = tune.uniform(0.1, 0.25)
# opt['heat_time'] = tune.uniform(1, 5)
return opt
def set_photo_search_space(opt):
opt["decay"] = tune.loguniform(0.001, 1e-2)
if opt['regularise']:
opt["kinetic_energy"] = tune.loguniform(0.01, 5.0)
opt["directional_penalty"] = tune.loguniform(0.001, 10.0)
opt["hidden_dim"] = tune.sample_from(lambda _: 2 ** np.random.randint(3, 7))
opt["lr"] = tune.loguniform(1e-3, 0.1)
opt["input_dropout"] = tune.uniform(0.4, 0.8)
opt["dropout"] = tune.uniform(0, 0.8)
opt["time"] = tune.uniform(0.5, 7.0)
opt["optimizer"] = tune.choice(["adam", "adamax", "rmsprop"])
if opt["block"] in {'attention', 'mixed'} or opt['function'] in {'GAT', 'transformer', 'dorsey'}:
opt["heads"] = tune.sample_from(lambda _: 2 ** np.random.randint(0, 3))
opt["attention_dim"] = tune.sample_from(lambda _: 2 ** np.random.randint(3, 6))
opt['attention_norm_idx'] = tune.choice([0, 1])
opt["self_loop_weight"] = tune.choice([0, 0.5, 1, 2]) if opt['block'] == 'mixed' else tune.choice(
[0, 1])
opt["leaky_relu_slope"] = tune.uniform(0, 0.8)
else:
opt["self_loop_weight"] = tune.uniform(0, 3)
opt["tol_scale"] = tune.loguniform(100, 1e5)
if opt["adjoint"]:
opt["tol_scale_adjoint"] = tune.loguniform(100, 1e5)
opt["adjoint_method"] = tune.choice(["dopri5", "adaptive_heun"])
if opt['rewiring'] == 'gdc':
# opt['gdc_sparsification'] = tune.choice(['topk', 'threshold'])
opt['gdc_sparsification'] = 'threshold'
opt['exact'] = False
# opt['gdc_method'] = tune.choice(['ppr', 'heat'])
opt['gdc_method'] = 'ppr'
# opt['avg_degree'] = tune.sample_from(lambda _: 2 ** np.random.randint(4, 8)) # bug currently in pyg
opt['gdc_threshold'] = tune.loguniform(0.0001, 0.0005)
# opt['gdc_threshold'] = None
opt['ppr_alpha'] = tune.uniform(0.1, 0.25)
# opt['heat_time'] = tune.uniform(1, 5)
return opt
def set_arxiv_search_space(opt):
opt["decay"] = 0 # tune.loguniform(1e-10, 1e-6)
# # opt["decay"] = 0
# if opt['regularise']:
# opt["kinetic_energy"] = tune.loguniform(0.01, 10.0)
# opt["directional_penalty"] = tune.loguniform(0.001, 10.0)
# # opt["hidden_dim"] = tune.sample_from(lambda _: 2 ** np.random.randint(5, 9))
# opt["hidden_dim"] = 128 # best choice with attention
# # opt["hidden_dim"] = 256 # best choice without attention
# opt["lr"] = 0.005 #tune.uniform(0.001, 0.05)
# # opt['lr'] = 0.02
# opt["input_dropout"] = 0 #tune.uniform(0., 0.6)
# # opt["input_dropout"] = 0
# opt["dropout"] = 0 #tune.uniform(0, 0.6)
# # opt["dropout"] = 0
# # opt['step_size'] = tune.choice([0.5, 1])
# opt['step_size'] = 1 #0.5
# # opt['adjoint_step_size'] = tune.choice([0.5, 1])
# opt['adjoint_step_size'] = 1 #0.5
# # opt["time"] = tune.choice([1,2,3,4,5,6,7,8,9,10])
# opt['time'] = 5.08 #tune.uniform(1.5, 6)
# # opt['time'] = 5
# # opt["optimizer"] = tune.choice(["adam", "adamax", "rmsprop"])
# opt['optimizer'] = 'adam'
# if opt["block"] in {'attention', 'mixed', 'hard_attention'} or opt['function'] in {'GAT', 'transformer', 'dorsey'}:
# # opt["heads"] = tune.sample_from(lambda _: 2 ** np.random.randint(0, 3))
# opt["heads"] = 4
# # opt["attention_dim"] = tune.sample_from(lambda _: 2 ** np.random.randint(3, 7))
# opt["attention_dim"] = 16 #32
# # opt['attention_norm_idx'] = tune.choice([0, 1])
# # opt["self_loop_weight"] = tune.choice([0, 0.5, 1, 2]) if opt['block'] == 'mixed' else tune.choice(
# # [0, 1])
# opt["self_loop_weight"] = 1
# # opt["leaky_relu_slope"] = tune.uniform(0, 0.8)
# opt["leaky_relu_slope"] = 0.2
# else:
# # opt["self_loop_weight"] = tune.uniform(0, 3)
# opt["self_loop_weight"] = tune.choice([0, 1])
# # opt['data_norm'] = tune.choice(['rw', 'gcn'])
# # opt['add_source'] = tune.choice([True, False])
# opt['add_source'] = True
# opt['att_samp_pct'] = 1 #tune.uniform(0.6, 1)
# # opt['batch_norm'] = tune.choice([True, False])
# opt['batch_norm'] = False #True
# # opt['label_rate'] = tune.uniform(0.05, 0.5)
# # opt["tol_scale"] = tune.loguniform(10, 1e4)
# if opt["adjoint"]:
# # opt["tol_scale_adjoint"] = tune.loguniform(10, 1e5)
# # opt["adjoint_method"] = tune.choice(["dopri5", "adaptive_heun", "rk4"])
# # opt["adjoint_method"] = tune.choice(["adaptive_heun", "rk4"])
# opt["adjoint_method"] = "rk4"
# # opt["method"] = tune.choice(["dopri5", "rk4"])
# # opt["method"] = tune.choice(["midpoint", "rk4"])
# opt["method"] = "rk4"
# if opt['rewiring'] == 'gdc':
# # opt['gdc_sparsification'] = tune.choice(['topk', 'threshold'])
# opt['gdc_sparsification'] = 'threshold'
# opt['exact'] = False
# # opt['gdc_method'] = tune.choice(['ppr', 'heat'])
# opt['gdc_method'] = 'ppr'
# # opt['avg_degree'] = tune.sample_from(lambda _: 2 ** np.random.randint(4, 8)) # bug currently in pyg
# opt['gdc_threshold'] = tune.uniform(0.0005, 0.005)
# # opt['gdc_threshold'] = None
# # opt['ppr_alpha'] = tune.uniform(0.1, 0.25)
# opt['ppr_alpha'] = 0.15
# # opt['heat_time'] = tune.uniform(1, 5)
return opt
def set_search_space(opt):
if opt["dataset"] == "Cora":
return set_cora_search_space(opt)
elif opt["dataset"] == "Pubmed":
return set_pubmed_search_space(opt)
elif opt["dataset"] == "Citeseer":
return set_citeseer_search_space(opt)
elif opt["dataset"] == "Computers":
return set_computers_search_space(opt)
elif opt["dataset"] == "Photo":
return set_photo_search_space(opt)
elif opt["dataset"] == "CoauthorCS":
return set_coauthors_search_space(opt)
elif opt["dataset"] == "ogbn-arxiv":
return set_arxiv_search_space(opt)
def main(opt):
data_dir = os.path.abspath("../data")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
opt = set_search_space(opt)
scheduler = ASHAScheduler(
metric=opt['metric'],
mode="max",
max_t=opt["epoch"],
grace_period=opt["grace_period"],
reduction_factor=opt["reduction_factor"],
)
reporter = CLIReporter(
metric_columns=["accuracy", "test_acc", "train_acc", "loss", "training_iteration", "forward_nfe",
"backward_nfe"]
)
# choose a search algorithm from https://docs.ray.io/en/latest/tune/api_docs/suggestion.html
search_alg = AxSearch(metric=opt['metric'])
search_alg = None
train_fn = train_ray if opt["num_splits"] == 0 else train_ray_rand
result = tune.run(
partial(train_fn, data_dir=data_dir),
name=opt["name"],
resources_per_trial={"cpu": opt["cpus"], "gpu": opt["gpus"]},
search_alg=search_alg,
keep_checkpoints_num=3,
checkpoint_score_attr=opt['metric'],
config=opt,
num_samples=opt["num_samples"],
scheduler=scheduler,
max_failures=2,
local_dir="../ray_tune",
progress_reporter=reporter,
raise_on_failed_trial=False,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--use_cora_defaults",
action="store_true",
help="Whether to run with best params for cora. Overrides the choice of dataset",
)
parser.add_argument(
"--dataset", type=str, default="Cora", help="Cora, Citeseer, Pubmed, Computers, Photo, CoauthorCS"
)
parser.add_argument("--hidden_dim", type=int, default=32, help="Hidden dimension.")
parser.add_argument('--fc_out', dest='fc_out', action='store_true',
help='Add a fully connected layer to the decoder.')
parser.add_argument("--input_dropout", type=float, default=0.5, help="Input dropout rate.")
parser.add_argument("--dropout", type=float, default=0.0, help="Dropout rate.")
parser.add_argument("--batch_norm", dest='batch_norm', action='store_true', help='search over reg params')
parser.add_argument("--optimizer", type=str, default="adam", help="Optimizer.")
parser.add_argument("--lr", type=float, default=0.01, help="Learning rate.")
parser.add_argument("--decay", type=float, default=5e-4, help="Weight decay for optimization")
parser.add_argument("--self_loop_weight", type=float, default=1.0, help="Weight of self-loops.")
parser.add_argument('--use_labels', dest='use_labels', action='store_true', help='Also diffuse labels')
parser.add_argument('--label_rate', type=float, default=0.5,
help='% of training labels to use when --use_labels is set.')
parser.add_argument("--epoch", type=int, default=10, help="Number of training epochs per iteration.")
parser.add_argument("--alpha", type=float, default=1.0, help="Factor in front matrix A.")
parser.add_argument("--time", type=float, default=1.0, help="End time of ODE function.")
parser.add_argument("--augment", action="store_true",
help="double the length of the feature vector by appending zeros to stabilise ODE learning", )
parser.add_argument("--alpha_dim", type=str, default="sc", help="choose either scalar (sc) or vector (vc) alpha")
parser.add_argument('--no_alpha_sigmoid', dest='no_alpha_sigmoid', action='store_true',
help='apply sigmoid before multiplying by alpha')
parser.add_argument("--beta_dim", type=str, default="sc", help="choose either scalar (sc) or vector (vc) beta")
parser.add_argument('--use_mlp', dest='use_mlp', action='store_true',
help='Add a fully connected layer to the encoder.')
# ODE args
parser.add_argument(
"--method", type=str, default="dopri5", help="set the numerical solver: dopri5, euler, rk4, midpoint"
)
parser.add_argument('--step_size', type=float, default=1,
help='fixed step size when using fixed step solvers e.g. rk4')
parser.add_argument('--max_iters', type=float, default=100, help='maximum number of integration steps')
parser.add_argument(
"--adjoint_method", type=str, default="adaptive_heun",
help="set the numerical solver for the backward pass: dopri5, euler, rk4, midpoint"
)
parser.add_argument('--adjoint_step_size', type=float, default=1,
help='fixed step size when using fixed step adjoint solvers e.g. rk4')
parser.add_argument("--adjoint", dest='adjoint', action='store_true',
help="use the adjoint ODE method to reduce memory footprint")
parser.add_argument("--tol_scale", type=float, default=1.0, help="multiplier for atol and rtol")
parser.add_argument("--tol_scale_adjoint", type=float, default=1.0,
help="multiplier for adjoint_atol and adjoint_rtol")
parser.add_argument("--ode_blocks", type=int, default=1, help="number of ode blocks to run")
parser.add_argument('--data_norm', type=str, default='rw',
help='rw for random walk, gcn for symmetric gcn norm')
parser.add_argument('--add_source', dest='add_source', action='store_true',
help='If try get rid of alpha param and the beta*x0 source term')
# SDE args
parser.add_argument("--dt_min", type=float, default=1e-5, help="minimum timestep for the SDE solver")
parser.add_argument("--dt", type=float, default=1e-3, help="fixed step size")
parser.add_argument('--adaptive', dest='adaptive', action='store_true', help='use adaptive step sizes')
# Attention args
parser.add_argument(
"--leaky_relu_slope",
type=float,
default=0.2,
help="slope of the negative part of the leaky relu used in attention",
)
parser.add_argument('--attention_dim', type=int, default=64,
help='the size to project x to before calculating att scores')
parser.add_argument("--heads", type=int, default=4, help="number of attention heads")
parser.add_argument("--attention_norm_idx", type=int, default=0, help="0 = normalise rows, 1 = normalise cols")
parser.add_argument('--mix_features', dest='mix_features', action='store_true',
help='apply a feature transformation xW to the ODE')
parser.add_argument('--block', type=str, default='constant', help='constant, mixed, attention, SDE')
parser.add_argument('--function', type=str, default='laplacian', help='laplacian, transformer, dorsey, GAT, SDE')
parser.add_argument('--reweight_attention', dest='reweight_attention', action='store_true',
help="multiply attention scores by edge weights before softmax")
# ray args
parser.add_argument("--num_samples", type=int, default=20, help="number of ray trials")
parser.add_argument("--gpus", type=float, default=0, help="number of gpus per trial. Can be fractional")
parser.add_argument("--cpus", type=float, default=1, help="number of cpus per trial. Can be fractional")
parser.add_argument(
"--grace_period", type=int, default=5, help="number of epochs to wait before terminating trials"
)
parser.add_argument(
"--reduction_factor", type=int, default=4, help="number of trials is halved after this many epochs"
)
parser.add_argument("--name", type=str, default="ray_exp")
parser.add_argument("--num_splits", type=int, default=0, help="Number of random splits >= 0. 0 for planetoid split")
parser.add_argument("--num_init", type=int, default=1, help="Number of random initializations >= 0")
parser.add_argument("--max_nfe", type=int, default=300, help="Maximum number of function evaluations allowed in an epoch.")
parser.add_argument('--metric', type=str, default='accuracy',
help='metric to sort the hyperparameter tuning runs on')
# regularisation args
parser.add_argument('--jacobian_norm2', type=float, default=None, help="int_t ||df/dx||_F^2")
parser.add_argument('--total_deriv', type=float, default=None, help="int_t ||df/dt||^2")
parser.add_argument('--kinetic_energy', type=float, default=None, help="int_t ||f||_2^2")
parser.add_argument('--directional_penalty', type=float, default=None, help="int_t ||(df/dx)^T f||^2")
parser.add_argument("--baseline", action="store_true", help="Wheather to run the ICML baseline or not.")
parser.add_argument("--regularise", dest='regularise', action='store_true', help='search over reg params')
# rewiring args
parser.add_argument('--rewiring', type=str, default=None, help="two_hop, gdc")
parser.add_argument('--gdc_method', type=str, default='ppr', help="ppr, heat, coeff")
parser.add_argument('--gdc_sparsification', type=str, default='topk', help="threshold, topk")
parser.add_argument('--gdc_k', type=int, default=64, help="number of neighbours to sparsify to when using topk")
parser.add_argument('--gdc_threshold', type=float, default=0.0001,
help="above this edge weight, keep edges when using threshold")
parser.add_argument('--gdc_avg_degree', type=int, default=64,
help="if gdc_threshold is not given can be calculated by specifying avg degree")
parser.add_argument('--ppr_alpha', type=float, default=0.05, help="teleport probability")
parser.add_argument('--heat_time', type=float, default=3., help="time to run gdc heat kernal diffusion for")
parser.add_argument("--not_lcc", action="store_false", help="don't use the largest connected component")
parser.add_argument('--use_flux', dest='use_flux', action='store_true',
help='incorporate the feature grad in attention based edge dropout')
parser.add_argument("--exact", action="store_true",
help="for small datasets can do exact diffusion. If dataset is too big for matrix inversion then you can't")
parser.add_argument('--att_samp_pct', type=float, default=1,
help="float in [0,1). The percentage of edges to retain based on attention scores")
args = parser.parse_args()
opt = vars(args)
main(opt)
|
tests/anomaly/forecast_based/test_lstm.py | mbignotti/Merlion | 2,215 | 12613775 | <gh_stars>1000+
#
# Copyright (c) 2022 salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
import datetime
import logging
import math
from os.path import abspath, dirname, join
import sys
import unittest
import numpy as np
from merlion.transform.resample import TemporalResample
from merlion.models.anomaly.forecast_based.lstm import LSTMDetector, LSTMTrainConfig, LSTMDetectorConfig
from merlion.models.forecast.lstm import auto_stride
from merlion.post_process.threshold import AggregateAlarms
from merlion.utils.time_series import TimeSeries
from merlion.utils.data_io import csv_to_time_series
logger = logging.getLogger(__name__)
rootdir = dirname(dirname(dirname(dirname(abspath(__file__)))))
class TestLSTM(unittest.TestCase):
def test_full(self):
file_name = join(rootdir, "data", "example.csv")
sequence = TemporalResample("15min")(csv_to_time_series(file_name, timestamp_unit="ms", data_cols=["kpi"]))
logger.info(f"Data looks like:\n{sequence[:5]}")
time_stamps = sequence.univariates[sequence.names[0]].time_stamps
stride = auto_stride(time_stamps, resolution=12)
logger.info("stride = " + str(stride))
# 2 days of data for testing
test_delta = datetime.timedelta(days=2).total_seconds()
ts_train, ts_test = sequence.bisect(time_stamps[-1] - test_delta)
forecast_steps = math.ceil(len(ts_test) / stride)
self.assertGreater(forecast_steps, 1, "sequence is not long enough")
model = LSTMDetector(
LSTMDetectorConfig(max_forecast_steps=forecast_steps, nhid=256, threshold=AggregateAlarms(2, 1, 60, 300))
)
train_config = LSTMTrainConfig(
data_stride=stride,
epochs=1,
seq_len=forecast_steps * 2,
checkpoint_file=join(rootdir, "tmp", "lstm", "checkpoint.pt"),
)
train_scores = model.train(train_data=ts_train, train_config=train_config)
self.assertIsInstance(
train_scores,
TimeSeries,
msg="Expected output of train() to be a TimeSeries of anomaly "
"scores, but this seems to be a forecast. Check inheritance "
"order of this forecasting detector.",
)
train_scores = train_scores.univariates[train_scores.names[0]]
train_vals = ts_train.univariates[ts_train.names[0]]
self.assertNotAlmostEqual(
train_scores.values[-1],
train_vals.values[-1],
delta=100,
msg="Expected output of train() to be a TimeSeries of anomaly "
"scores, but this seems to be a forecast. Check inheritance "
"order of this forecasting detector.",
)
##############
scores = model.get_anomaly_score(ts_test)
logger.info("Scores look like:\n" + str(scores[:5]))
alarms = model.get_anomaly_label(ts_test)
logger.info("Alarms look like:\n" + str(alarms[:5]))
n_alarms = np.sum(alarms.to_pd().values != 0)
logger.info("# of alarms = " + str(n_alarms))
self.assertLess(n_alarms, 20)
##############
# Note: we compare scores vs scoresv2[1:] because scoresv2 has one
# extra time step included. This is because when `time_series_prev` is
# given, we compute `self.model.transform(ts_train + ts_test)` and take
# the first time step in the transformed FULL time series which matches
# with `ts_test`. This is different from the first time step of
# `self.model.transform(ts_test)` due to the difference transform.
scoresv2 = model.get_anomaly_score(ts_test, ts_train)[1:]
self.assertLess(np.max((scores.to_pd() - scoresv2.to_pd()).abs().values), 0.1)
##############
model.save(join(rootdir, "tmp", "lstm"))
model = LSTMDetector.load(join(rootdir, "tmp", "lstm"))
loaded_scores = model.get_anomaly_score(ts_test)
self.assertSequenceEqual(list(scores), list(loaded_scores))
loaded_alarms = model.get_anomaly_label(ts_test)
self.assertSequenceEqual(list(alarms), list(loaded_alarms))
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", stream=sys.stdout, level=logging.DEBUG
)
unittest.main()
|
electrum_axe/axe_peer.py | AXErunners/electrum-axe | 336 | 12613789 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Axe-Electrum - lightweight Axe client
# Copyright (C) 2019 Axe Developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import ipaddress
import logging
import random
import time
from aiohttp_socks import open_connection
from struct import pack, unpack
from typing import Optional, Tuple
from .bitcoin import public_key_to_p2pkh
from .crypto import sha256d
from .axe_msg import (SporkID, AxeType, AxeCmd, AxeVersionMsg,
AxePingMsg, AxePongMsg, AxeGetDataMsg,
AxeGetMNListDMsg, AxeSendDsqMsg)
from .ecc import ECPubkey
from .interface import GracefulDisconnect
from .logging import Logger
from .util import log_exceptions, ignore_exceptions, SilentTaskGroup
from .version import ELECTRUM_VERSION
EMPTY_PAYLOAD_CHECKSUM = b'\x5D\xF6\xE0\xE2'
AXE_PROTO_VERSION = 70216
LOCAL_IP_ADDR = ipaddress.ip_address('127.0.0.1')
PAYLOAD_LIMIT = 32*2**20 # 32MiB
READ_LIMIT = 64*2**10 # 64KiB
def deserialize_peer(peer_str: str) -> Tuple[str, str]:
# host might be IPv6 address, hence do rsplit:
host, port = str(peer_str).rsplit(':', 1)
if not host:
raise ValueError('host must not be empty')
int_port = int(port) # Throw if cannot be converted to int
if not (0 < int_port < 2**16):
raise ValueError(f'port {port} is out of valid range')
return host, int_port
class AxePeer(Logger):
LOGGING_SHORTCUT = 'P'
def __init__(self, axe_net, peer: str, proxy: Optional[dict],
debug=False, sml_entry=None, mix_session=None):
self.default_port = axe_net.default_port
self.start_str = axe_net.start_str
self._is_open = False
self.ready = asyncio.Future()
self.peer = peer
self.host, self.port = deserialize_peer(self.peer)
Logger.__init__(self)
assert axe_net.network.config.path
self.axe_net = axe_net
self.loop = axe_net.loop
self._set_proxy(proxy)
self.sml_entry = sml_entry
self.mix_session = mix_session
self.sw = None # StreamWriter
self.sr = None # StreamReader
# Dump net msgs (only for this peer). Set at runtime from the console.
self.debug = debug
# Ping data
self.ping_start = None
self.ping_time = None
self.ping_nonce = None
# Sporks data
self.sporks_done = False
# getaddr flag
self.getaddr_done = False
# mnlistdiff data
self.mnlistdiffs = asyncio.Queue(1)
# Activity data
self.read_bytes = 0
self.read_time = 0
self.write_bytes = 0
self.write_time = 0
self.ban_msg = None
self.ban_till = None
main_group_coro = self.axe_net.main_taskgroup.spawn(self.run())
asyncio.run_coroutine_threadsafe(main_group_coro, self.loop)
self.group = SilentTaskGroup()
def diagnostic_name(self):
return f'{self.host}:{self.port}'
def _set_proxy(self, proxy: dict):
if proxy:
mode = proxy.get('mode')
user, password = proxy.get('user'), proxy.get('password')
host, port = proxy.get('host'), proxy.get('port')
self.socks_url = f'{mode}://{user}:{password}@{host}:{port}'
else:
self.socks_url = None
def handle_disconnect(func):
async def wrapper_func(self: 'AxePeer', *args, **kwargs):
try:
return await func(self, *args, **kwargs)
except GracefulDisconnect as e:
self.logger.log(e.log_level, f'disconnecting due to {repr(e)}')
finally:
await self.axe_net.connection_down(self)
# if was not 'ready' yet, schedule waiting coroutines:
self.ready.cancel()
return wrapper_func
@ignore_exceptions # do not kill main_taskgroup
@log_exceptions
@handle_disconnect
async def run(self):
try:
self.ip_addr = ipaddress.ip_address(self.host)
except Exception:
addr = await self.axe_net.resolve_dns_over_https(self.host)
if addr:
self.ip_addr = ipaddress.ip_address(addr[0])
else:
self.ip_addr = ipaddress.ip_address('::')
try:
await self.open()
except (asyncio.CancelledError, OSError) as e:
self.logger.info(f'disconnecting due to: {repr(e)}')
return
def mark_ready(self):
if self.ready.cancelled():
raise GracefulDisconnect('conn establishment was too slow; '
'*ready* future was cancelled')
if self.ready.done():
return
self.ready.set_result(1)
async def open(self):
self.logger.info('open connection')
if self.socks_url is None:
self.sr, self.sw = await asyncio.open_connection(host=self.host,
port=self.port,
limit=READ_LIMIT)
else:
self.sr, self.sw = await open_connection(socks_url=self.socks_url,
host=self.host,
port=self.port,
limit=READ_LIMIT)
self._is_open = True
verack_received = False
version_received = False
await self.send_version()
for res in [await self.read_next_msg() for i in range(2)]:
if not res:
continue
if res.cmd == 'version':
self.version = res.payload
version_received = True
await self.send_msg('verack')
elif res.cmd == 'verack':
verack_received = True
if not version_received or not verack_received:
raise GracefulDisconnect('Peer version handshake failed')
await self.send_msg('senddsq', AxeSendDsqMsg(True).serialize())
self.mark_ready()
self.logger.info(f'connection established')
try:
async with self.group as group:
await group.spawn(self.process_msgs)
await group.spawn(self.process_ping)
await group.spawn(self.monitor_connection)
except GracefulDisconnect:
raise
except (asyncio.CancelledError, OSError) as e:
raise GracefulDisconnect(e) from e
except Exception as e:
raise GracefulDisconnect(e, log_level=logging.ERROR) from e
async def process_msgs(self):
while True:
res = await self.read_next_msg()
if res:
axe_net = self.axe_net
cmd = res.cmd
payload = res.payload
if cmd == 'ping':
msg = AxePongMsg(payload.nonce)
await self.send_msg('pong', msg.serialize())
elif cmd == 'pong':
now = time.time()
if payload.nonce == self.ping_nonce:
self.ping_time = round((now - self.ping_start) * 1000)
self.ping_nonce = None
self.ping_start = None
else:
self.logger.info(f'pong with unknonw nonce')
elif cmd == 'spork':
spork_msg = payload
spork_id = spork_msg.nSporkID
if not SporkID.has_value(spork_id):
self.logger.info(f'unknown spork id: {spork_id}')
continue
def verify_spork():
return self.verify_spork(spork_msg)
verify_ok = await self.loop.run_in_executor(None,
verify_spork)
if not verify_ok:
raise GracefulDisconnect('verify_spork failed')
sporks = axe_net.sporks
sporks.set_spork(spork_id, spork_msg.nValue, self.peer)
axe_net.set_spork_time = time.time()
elif cmd == 'inv':
out_inventory = []
for di in payload.inventory:
inv_hash = di.hash
if self.mix_session:
if di.type == AxeType.MSG_DSTX:
out_inventory.append(di)
elif di.type == AxeType.MSG_ISLOCK:
recent_invs = axe_net.recent_islock_invs
if inv_hash not in recent_invs:
recent_invs.append(inv_hash)
out_inventory.append(di)
if out_inventory:
msg = AxeGetDataMsg(out_inventory)
await self.send_msg('getdata', msg.serialize())
elif cmd == 'addr':
addresses = [f'{a.ip}:{a.port}'
for a in payload.addresses]
found_peers = self.axe_net.found_peers
found_peers = found_peers.union(addresses)
elif cmd == 'mnlistdiff':
try:
self.mnlistdiffs.put_nowait(payload)
except asyncio.QueueFull:
self.logger.info('excess mnlistdiff msg')
elif cmd == 'islock':
axe_net.append_to_recent_islocks(payload)
elif cmd == 'dsq':
if self.mix_session:
if payload.fReady: # session must ignore other dsq
if self.mix_session.verify_ds_msg_sig(payload):
await self.mix_session.msg_queue.put(res)
else:
exc = Exception(f'dsq vchSig verification'
f' failed {res}')
await self.mix_session.msg_queue.put(exc)
else:
axe_net.add_recent_dsq(payload)
elif cmd == 'dssu' and self.mix_session:
await self.mix_session.msg_queue.put(res)
elif cmd == 'dsf' and self.mix_session:
await self.mix_session.msg_queue.put(res)
elif cmd == 'dsc' and self.mix_session:
await self.mix_session.msg_queue.put(res)
await asyncio.sleep(0.1)
async def monitor_connection(self):
net_timeout = self.axe_net.network.get_network_timeout_seconds()
while True:
await asyncio.sleep(1)
if not self._is_open:
raise GracefulDisconnect('peer session was closed')
read_timeout = self.write_time - self.read_time
if read_timeout > net_timeout:
raise GracefulDisconnect('read timeout')
def is_active(self, num_seconds=1):
'''Peer is sending/receiving data last num_seconds'''
now = time.time()
return (now - self.read_time < num_seconds
or now - self.write_time < num_seconds)
async def process_ping(self):
while True:
while self.is_active():
await asyncio.sleep(0.5)
self.ping_nonce = random.getrandbits(64)
msg = AxePingMsg(self.ping_nonce)
msg_serialized = msg.serialize()
self.ping_start = time.time()
await self.send_msg('ping', msg_serialized)
await asyncio.sleep(300)
def close(self):
if self._is_open:
if self.sw:
self.sw.close()
if self.mix_session:
self.mix_session.msg_queue.put_nowait(None)
self._is_open = False
# monitor_connection will cancel tasks
def ban(self, ban_msg, ban_seconds=None):
self.ban_msg = ban_msg
ban_till = time.time() + ban_seconds if ban_seconds else None
self.ban_till = ban_till
till = '' if ban_till is None else ' (till %s)' % time.ctime(ban_till)
self.logger.info(f'banned{till}: {ban_msg}')
async def send_msg(self, cmd: str, payload: bytes=b''):
axe_net = self.axe_net
if self.debug or axe_net.debug:
axe_cmd = AxeCmd(cmd, payload)
if payload:
self.logger.info(f'--> {axe_cmd}')
else:
self.logger.info(f'--> {axe_cmd} (no payload)')
cmd_len = len(cmd)
if cmd_len > 12:
raise Exception('command str to long')
cmd_padding = b'\x00' * (12 - cmd_len)
cmd = cmd.encode('ascii') + cmd_padding
len_payload = len(payload)
payload_size = pack('<I', len_payload)
if len_payload > 0:
checksum = sha256d(payload)[:4]
msg = self.start_str + cmd + payload_size + checksum + payload
else:
msg = self.start_str + cmd + payload_size + EMPTY_PAYLOAD_CHECKSUM
self.sw.write(msg)
self.write_bytes += len(msg)
axe_net.write_bytes += len(msg)
self.write_time = axe_net.write_time = time.time()
await self.sw.drain()
async def send_version(self):
version = AXE_PROTO_VERSION
services = 0
timestamp = int(time.time())
recv_services = 1
recv_ip = self.ip_addr
recv_port = self.port
trans_services = services
trans_ip = LOCAL_IP_ADDR
trans_port = self.default_port
nonce = random.getrandbits(64)
user_agent = '/Axe Electrum:%s/' % ELECTRUM_VERSION
start_height = self.axe_net.network.get_local_height()
relay = 0
msg = AxeVersionMsg(version, services, timestamp,
recv_services, recv_ip, recv_port,
trans_services, trans_ip, trans_port,
nonce, user_agent, start_height, relay,
None, None)
await self.send_msg('version', msg.serialize())
async def read_next_msg(self):
start_str = None
start_bytes_read = 0
axe_net = self.axe_net
while not start_str:
try:
start_str = await self.sr.readuntil(self.start_str)
self.read_time = axe_net.read_time = time.time()
len_start_str = len(start_str)
self.read_bytes += len_start_str
axe_net.read_bytes += len_start_str
if len_start_str > 4:
self.logger.info(f'extra data before start'
f' str: {len_start_str}')
except asyncio.LimitOverrunError:
self.logger.info('start str not found, read ahead')
await self.sr.readexactly(READ_LIMIT)
self.read_time = axe_net.read_time = time.time()
self.read_bytes += READ_LIMIT
axe_net.read_bytes += READ_LIMIT
start_bytes_read += READ_LIMIT
if start_bytes_read > PAYLOAD_LIMIT:
raise GracefulDisconnect(f'start str not found in '
f'{start_bytes_read} bytes read')
except asyncio.IncompleteReadError:
if not self._is_open:
return
raise GracefulDisconnect('start str not found '
'in buffer, EOF found')
try:
res = None
cmd = await self.sr.readexactly(12)
cmd = cmd.strip(b'\x00').decode('ascii')
payload_size = await self.sr.readexactly(4)
payload_size = unpack('<I', payload_size)[0]
if payload_size > PAYLOAD_LIMIT:
raise GracefulDisconnect('incoming msg payload to large')
checksum = await self.sr.readexactly(4)
self.read_time = axe_net.read_time = time.time()
self.read_bytes += 20
axe_net.read_bytes += 20
if payload_size == 0:
if checksum != EMPTY_PAYLOAD_CHECKSUM:
self.logger.info(f'error reading msg {cmd}, '
f'checksum mismatch')
return
res = AxeCmd(cmd)
if self.debug or axe_net.debug:
self.logger.info(f'<-- {res} (no payload)')
return res
payload = await self.sr.readexactly(payload_size)
self.read_time = axe_net.read_time = time.time()
self.read_bytes += payload_size
axe_net.read_bytes += payload_size
calc_checksum = sha256d(payload)[:4]
if checksum != calc_checksum:
self.logger.info(f'error reading msg {cmd}, '
f'checksum mismatch')
return
res = AxeCmd(cmd, payload)
except asyncio.IncompleteReadError:
if not self._is_open:
return
raise GracefulDisconnect('error reading msg, EOF reached')
except Exception as e:
raise GracefulDisconnect(e) from e
if self.debug or axe_net.debug:
self.logger.info(f'<-- {res}')
return res
def verify_spork(self, spork_msg):
if spork_msg.nTimeSigned > time.time() + 2 * 3600:
self.logger.info('Spork signed to far in the future')
return False
new_sigs = self.axe_net.sporks.is_new_sigs()
try:
if self.verify_spork_sig(spork_msg, new_sigs):
return True
except Exception as e:
self.logger.info(f'Spork verification error: {repr(e)}')
try: # Try another sig type
if self.verify_spork_sig(spork_msg, not new_sigs):
return True
except Exception as e:
self.logger.info(f'Spork verification error: {repr(e)}')
self.logger.info('Spork address differs from hardcoded')
return False
def verify_spork_sig(self, spork_msg, new_sigs):
sig = spork_msg.vchSig
msg_hash = spork_msg.msg_hash(new_sigs)
public_key, compressed = ECPubkey.from_signature65(sig, msg_hash)
public_key.verify_message_hash(sig[1:], msg_hash)
pubkey_bytes = public_key.get_public_key_bytes(compressed)
spork_address = public_key_to_p2pkh(pubkey_bytes)
if self.axe_net.spork_address == spork_address:
return True
else:
return False
async def getmnlistd(self, base_height, height):
base_block_hash = await self.axe_net.get_hash(base_height)
block_hash = await self.axe_net.get_hash(height)
msg = AxeGetMNListDMsg(base_block_hash, block_hash)
if not self.mnlistdiffs.empty():
self.logger.info('unasked mnlistdiff msg')
self.mnlistdiffs.get_nowait()
await self.send_msg('getmnlistd', msg.serialize())
return await asyncio.wait_for(self.mnlistdiffs.get(), timeout=30)
|
lib/python/parallels_send_string.py | ibizaman/veewee | 1,295 | 12613815 | import sys
import prlsdkapi
import string
if len(sys.argv) != 3:
print "Usage: parallels_send_string '<VM_NAME>' '<string>'"
exit()
# Parse arguments
vm_name=sys.argv[1]
# String to use
keynames = sys.argv[2].split(' ');
prlsdk = prlsdkapi.prlsdk
consts = prlsdkapi.prlsdk.consts
# Initialize the Parallels API Library
prlsdk.InitializeSDK(consts.PAM_DESKTOP_MAC)
# Obtain a server object identifying the Parallels Service.
server = prlsdkapi.Server()
# Log in. (local as we do Parallels Desktop
login_job=server.login_local()
login_job.wait()
# Get a list of virtual machines.
# Find the specified virtual machine and
# obtain an object identifying it.
vm_list = server.get_vm_list()
result= vm_list.wait()
print prlsdkapi.prlsdk.consts.ScanCodesList
# Look for the VM with the name speficied on the CLI
found = False
for i in range(result.get_params_count()):
VM = result.get_param_by_index(i)
print VM.get_name()
if VM.get_name() == vm_name:
found = True
break
press = consts.PKE_PRESS
release = consts.PKE_RELEASE
# Access the Remote Desktop Access session
vm_io = prlsdkapi.VmIO()
try:
vm_io.connect_to_vm(VM).wait()
except prlsdkapi.PrlSDKError, e:
print "Error: %s" % e
exit()
for keyname in keynames:
if(keyname != ''):
# Keys can also contain special keys like shift, that has to be pressed before and release after
# eg. SHIFT-C (Press shift, then press C)
keys = keyname.split('#');
for keypress in keys:
scan_code = consts.ScanCodesList[keypress]
vm_io.send_key_event(VM,scan_code,press,50)
# And now the reversed order
# eg. Now release C then SHIFT
for keypress in reversed(keys):
scan_code = consts.ScanCodesList[keypress]
vm_io.send_key_event(VM,scan_code,release,50)
# End the Remote Deskop Access session
vm_io.disconnect_from_vm(VM)
# Logoff and deinitialize the library
server.logoff()
prlsdkapi.deinit_sdk
|
tests/pytests/unit/modules/test_rabbitmq.py | tomdoherty/salt | 9,425 | 12613849 | <reponame>tomdoherty/salt<filename>tests/pytests/unit/modules/test_rabbitmq.py
"""
:codeauthor: <NAME> <<EMAIL>>
"""
import logging
import pytest
import salt.modules.rabbitmq as rabbitmq
from salt.exceptions import CommandExecutionError
from tests.support.mock import MagicMock, patch
log = logging.getLogger(__name__)
@pytest.fixture
def configure_loader_modules():
return {rabbitmq: {"__context__": {"rabbitmqctl": None, "rabbitmq-plugins": None}}}
# 'list_users_rabbitmq2' function tests: 1
def test_list_users_rabbitmq2():
"""
Test if it return a list of users based off of rabbitmqctl user_list.
"""
mock_run = MagicMock(
return_value={
"retcode": 0,
"stdout": (
"Listing users ...\nguest\t[administrator,"
" user]\njustAnAdmin\t[administrator]\n"
),
"stderr": "",
}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert rabbitmq.list_users() == {
"guest": ["administrator", "user"],
"justAnAdmin": ["administrator"],
}
# 'list_users_rabbitmq3' function tests: 1
def test_list_users_rabbitmq3():
"""
Test if it return a list of users based off of rabbitmqctl user_list.
"""
mock_run = MagicMock(
return_value={
"retcode": 0,
"stdout": "guest\t[administrator user]\r\nother\t[a b]\r\n",
"stderr": "",
}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert rabbitmq.list_users() == {
"guest": ["administrator", "user"],
"other": ["a", "b"],
}
# 'list_users_with_warning_rabbitmq2' function tests: 1
def test_list_users_with_warning_rabbitmq2():
"""
Test if having a leading WARNING returns the user_list anyway.
"""
rtn_stdout = "\n".join(
[
"WARNING: ignoring /etc/rabbitmq/rabbitmq.conf -- location has moved to"
" /etc/rabbitmq/rabbitmq-env.conf",
"Listing users ...",
"guest\t[administrator, user]\n",
]
)
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": rtn_stdout, "stderr": ""}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert rabbitmq.list_users() == {"guest": ["administrator", "user"]}
# 'list_users_with_warning_rabbitmq3' function tests: 1
def test_list_users_with_warning_rabbitmq3():
"""
Test if having a leading WARNING returns the user_list anyway.
"""
rtn_stdout = "\n".join(
[
"WARNING: ignoring /etc/rabbitmq/rabbitmq.conf -- location has moved to"
" /etc/rabbitmq/rabbitmq-env.conf",
"Listing users ...",
"guest\t[administrator user]\n",
]
)
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": rtn_stdout, "stderr": ""}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert rabbitmq.list_users() == {"guest": ["administrator", "user"]}
# 'list_vhosts' function tests: 2
def test_list_vhosts():
"""
Test if it return a list of vhost based on rabbitmqctl list_vhosts.
"""
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": "/\nsaltstack\n...", "stderr": ""}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert rabbitmq.list_vhosts() == ["/", "saltstack", "..."]
def test_list_vhosts_with_warning():
"""
Test if it return a list of vhost based on rabbitmqctl list_vhosts even with a leading WARNING.
"""
rtn_stdout = "\n".join(
[
"WARNING: ignoring /etc/rabbitmq/rabbitmq.conf -- location has moved to"
" /etc/rabbitmq/rabbitmq-env.conf",
"Listing users ...",
"/",
"saltstack",
"...\n",
]
)
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": rtn_stdout, "stderr": ""}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert rabbitmq.list_vhosts() == ["/", "saltstack", "..."]
# 'user_exists' function tests: 2
def test_user_exists():
"""
Test whether a given rabbitmq-internal user exists based
on rabbitmqctl list_users.
"""
mock_run = MagicMock(
return_value={
"retcode": 0,
"stdout": "Listing users ...\nsaltstack\t[administrator]\n...done",
"stderr": "",
}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert rabbitmq.user_exists("saltstack")
def test_user_exists_negative():
"""
Negative test of whether rabbitmq-internal user exists based
on rabbitmqctl list_users.
"""
mock_run = MagicMock(
return_value={
"retcode": 0,
"stdout": "Listing users ...\nsaltstack\t[administrator]\n...done",
"stderr": "",
}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert not rabbitmq.user_exists("salt")
# 'vhost_exists' function tests: 2
def test_vhost_exists():
"""
Test if it return whether the vhost exists based
on rabbitmqctl list_vhosts.
"""
mock_run = MagicMock(
return_value={
"retcode": 0,
"stdout": "Listing vhosts ...\nsaltstack",
"stderr": "",
}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert rabbitmq.vhost_exists("saltstack")
def test_vhost_exists_negative():
"""
Test if it return whether the vhost exists based
on rabbitmqctl list_vhosts.
"""
mock_run = MagicMock(
return_value={
"retcode": 0,
"stdout": "Listing vhosts ...\nsaltstack",
"stderr": "",
}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert not rabbitmq.vhost_exists("salt")
# 'add_user' function tests: 1
def test_add_user():
"""
Test if it add a rabbitMQ user via rabbitmqctl
user_add <user> <password>
"""
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": "saltstack", "stderr": ""}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert rabbitmq.add_user("saltstack") == {"Added": "saltstack"}
mock_run = MagicMock(return_value="Error")
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
with patch.object(
rabbitmq,
"clear_password",
return_value={"Error": "Error", "retcode": 1},
):
pytest.raises(CommandExecutionError, rabbitmq.add_user, "saltstack")
# 'delete_user' function tests: 1
def test_delete_user():
"""
Test if it deletes a user via rabbitmqctl delete_user.
"""
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": "saltstack", "stderr": ""}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert rabbitmq.delete_user("saltstack") == {"Deleted": "saltstack"}
# 'check_password' function tests: 2
def test_check_password_lt_38():
"""
Test if it checks a user's password for RabbitMQ less than v3.8.
"""
mock_run = MagicMock(return_value='{rabbit,"RabbitMQ","3.5.7"}')
mock_run2 = MagicMock(
return_value={
"retcode": 0,
"stdout": 'Authenticating user "saltstack" ...\nSuccess',
"stderr": "",
}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run": mock_run, "cmd.run_all": mock_run2}):
assert rabbitmq.check_password("<PASSWORD>", "<PASSWORD>")
def test_check_password_gt_38():
"""
Test if it checks a user's password for RabbitMQ greater than v3.8.
"""
mock_run = MagicMock(return_value="RabbitMQ version: 3.8.3")
mock_run2 = MagicMock(
return_value={
"retcode": 0,
"stdout": 'Authenticating user "saltstack" ...\nSuccess',
"stderr": "",
}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run": mock_run, "cmd.run_all": mock_run2}):
assert rabbitmq.check_password("<PASSWORD>", "<PASSWORD>")
# 'change_password' function tests: 1
def test_change_password():
"""
Test if it changes a user's password.
"""
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": "saltstack", "stderr": ""}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert rabbitmq.change_password("saltstack", "<PASSWORD>") == {
"Password Changed": "<PASSWORD>"
}
# 'clear_password' function tests: 1
def test_clear_password():
"""
Test if it removes a user's password.
"""
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": "saltstack", "stderr": ""}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert rabbitmq.clear_password("<PASSWORD>") == {"Password Cleared": "<PASSWORD>"}
# 'add_vhost' function tests: 1
def test_add_vhost():
"""
Test if it adds a vhost via rabbitmqctl add_vhost.
"""
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": "saltstack", "stderr": ""}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert rabbitmq.add_vhost("saltstack") == {"Added": "saltstack"}
# 'delete_vhost' function tests: 1
def test_delete_vhost():
"""
Test if it deletes a vhost rabbitmqctl delete_vhost.
"""
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": "saltstack", "stderr": ""}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert rabbitmq.delete_vhost("saltstack") == {"Deleted": "saltstack"}
# 'set_permissions' function tests: 1
def test_set_permissions():
"""
Test if it sets permissions for vhost via rabbitmqctl set_permissions.
"""
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": "saltstack", "stderr": ""}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert rabbitmq.set_permissions("myvhost", "myuser") == {
"Permissions Set": "saltstack"
}
# 'list_permissions' function tests: 1
def test_list_permissions():
"""
Test if it lists permissions for a vhost
via rabbitmqctl list_permissions.
"""
mock_run = MagicMock(
return_value={
"retcode": 0,
"stdout": (
'[{"user":"myuser","configure":"saltstack","write":".*","read":"1"}]'
),
"stderr": "",
}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert rabbitmq.list_permissions("saltstack") == {
"myuser": {"configure": "saltstack", "write": ".*", "read": "1"},
}
# 'list_user_permissions' function tests: 1
def test_list_user_permissions():
"""
Test if it list permissions for a user
via rabbitmqctl list_user_permissions.
"""
mock_run = MagicMock(
return_value={
"retcode": 0,
"stdout": '[{"vhost":"saltstack","configure":"saltstack","write":"0","read":"1"},{"vhost":"guest","configure":"0","write":"one","read":""}]',
"stderr": "",
}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert rabbitmq.list_user_permissions("myuser") == {
"saltstack": {"configure": "saltstack", "write": "0", "read": "1"},
"guest": {"configure": "0", "write": "one", "read": ""},
}
# 'set_user_tags' function tests: 1
def test_set_user_tags():
"""
Test if it add user tags via rabbitmqctl set_user_tags.
"""
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": "saltstack", "stderr": ""}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert rabbitmq.set_user_tags("myadmin", "admin") == {"Tag(s) set": "saltstack"}
# 'status' function tests: 1
def test_status():
"""
Test if it return rabbitmq status.
"""
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": "saltstack", "stderr": ""}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert rabbitmq.status() == "saltstack"
# 'cluster_status' function tests: 1
def test_cluster_status():
"""
Test if it return rabbitmq cluster_status.
"""
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": "saltstack", "stderr": ""}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert rabbitmq.cluster_status() == "saltstack"
# 'join_cluster' function tests: 1
def test_join_cluster():
"""
Test if it join a rabbit cluster.
"""
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": "saltstack", "stderr": ""}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert rabbitmq.join_cluster("rabbit.example.com") == {"Join": "saltstack"}
# 'stop_app' function tests: 1
def test_stop_app():
"""
Test if it stops the RabbitMQ application,
leaving the Erlang node running.
"""
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": "saltstack", "stderr": ""}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert rabbitmq.stop_app() == "saltstack"
# 'start_app' function tests: 1
def test_start_app():
"""
Test if it start the RabbitMQ application.
"""
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": "saltstack", "stderr": ""}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert rabbitmq.start_app() == "saltstack"
# 'reset' function tests: 1
def test_reset():
"""
Test if it return a RabbitMQ node to its virgin state
"""
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": "saltstack", "stderr": ""}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert rabbitmq.reset() == "saltstack"
# 'force_reset' function tests: 1
def test_force_reset():
"""
Test if it forcefully Return a RabbitMQ node to its virgin state
"""
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": "saltstack", "stderr": ""}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert rabbitmq.force_reset() == "saltstack"
# 'list_queues' function tests: 1
def test_list_queues():
"""
Test if it returns queue details of the / virtual host
"""
mock_run = MagicMock(
return_value={
"retcode": 0,
"stdout": "saltstack\t0\nceleryev.234-234\t10",
"stderr": "",
}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert rabbitmq.list_queues() == {
"saltstack": ["0"],
"celeryev.234-234": ["10"],
}
# 'list_queues_vhost' function tests: 1
def test_list_queues_vhost():
"""
Test if it returns queue details of specified virtual host.
"""
mock_run = MagicMock(
return_value={
"retcode": 0,
"stdout": "saltstack\t0\nceleryev.234-234\t10",
"stderr": "",
}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert rabbitmq.list_queues_vhost("consumers") == {
"saltstack": ["0"],
"celeryev.234-234": ["10"],
}
# 'list_policies' function tests: 3
def test_list_policies():
"""
Test if it return a dictionary of policies nested by vhost
and name based on the data returned from rabbitmqctl list_policies.
"""
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": "saltstack", "stderr": ""}
)
mock_pkg = MagicMock(return_value="3.7")
with patch.dict(
rabbitmq.__salt__, {"cmd.run_all": mock_run, "pkg.version": mock_pkg}
), patch.dict(rabbitmq.__grains__, {"os_family": ""}):
assert rabbitmq.list_policies() == {}
def test_list_policies_freebsd():
"""
Test if it return a dictionary of policies nested by vhost
and name based on the data returned from rabbitmqctl list_policies.
"""
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": "saltstack", "stderr": ""}
)
mock_pkg = MagicMock(return_value="3.7")
with patch.dict(
rabbitmq.__salt__, {"cmd.run_all": mock_run, "pkg.version": mock_pkg}
), patch.dict(rabbitmq.__grains__, {"os_family": "FreeBSD"}):
assert rabbitmq.list_policies() == {}
def test_list_policies_old_version():
"""
Test if it return a dictionary of policies nested by vhost
and name based on the data returned from rabbitmqctl list_policies.
"""
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": "saltstack", "stderr": ""}
)
mock_pkg = MagicMock(return_value="3.0")
with patch.dict(
rabbitmq.__salt__, {"cmd.run_all": mock_run, "pkg.version": mock_pkg}
), patch.dict(rabbitmq.__grains__, {"os_family": ""}):
assert rabbitmq.list_policies() == {}
# 'set_policy' function tests: 1
def test_set_policy():
"""
Test if it set a policy based on rabbitmqctl set_policy.
"""
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": "saltstack", "stderr": ""}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert rabbitmq.set_policy("/", "HA", ".*", '{"ha-mode": "all"}') == {
"Set": "saltstack"
}
# 'delete_policy' function tests: 1
def test_delete_policy():
"""
Test if it delete a policy based on rabbitmqctl clear_policy.
"""
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": "saltstack", "stderr": ""}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert rabbitmq.delete_policy("/", "HA") == {"Deleted": "saltstack"}
# 'policy_exists' function tests: 1
def test_policy_exists():
"""
Test if it return whether the policy exists
based on rabbitmqctl list_policies.
"""
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": "saltstack", "stderr": ""}
)
mock_pkg = MagicMock(return_value="3.0")
with patch.dict(
rabbitmq.__salt__, {"cmd.run_all": mock_run, "pkg.version": mock_pkg}
), patch.dict(rabbitmq.__grains__, {"os_family": ""}):
assert not rabbitmq.policy_exists("/", "HA")
# 'list_available_plugins' function tests: 2
def test_list_available_plugins():
"""
Test if it returns a list of plugins.
"""
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": "saltstack\nsalt\nother", "stderr": ""}
)
mock_pkg = MagicMock(return_value="")
with patch.dict(
rabbitmq.__salt__, {"cmd.run_all": mock_run, "pkg.version": mock_pkg}
):
assert rabbitmq.list_available_plugins() == ["saltstack", "salt", "other"]
def test_list_available_plugins_space_delimited():
"""
Test if it returns a list of plugins.
"""
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": "saltstack salt other", "stderr": ""}
)
mock_pkg = MagicMock(return_value="")
with patch.dict(
rabbitmq.__salt__, {"cmd.run_all": mock_run, "pkg.version": mock_pkg}
):
assert rabbitmq.list_available_plugins() == ["saltstack", "salt", "other"]
# 'list_enabled_plugins' function tests: 2
def test_list_enabled_plugins():
"""
Test if it returns a list of plugins.
"""
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": "saltstack\nsalt\nother", "stderr": ""}
)
mock_pkg = MagicMock(return_value="")
with patch.dict(
rabbitmq.__salt__, {"cmd.run_all": mock_run, "pkg.version": mock_pkg}
):
assert rabbitmq.list_enabled_plugins() == ["saltstack", "salt", "other"]
def test_list_enabled_plugins_space_delimited():
"""
Test if it returns a list of plugins.
"""
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": "saltstack salt other", "stderr": ""}
)
mock_pkg = MagicMock(return_value="")
with patch.dict(
rabbitmq.__salt__, {"cmd.run_all": mock_run, "pkg.version": mock_pkg}
):
assert rabbitmq.list_enabled_plugins() == ["saltstack", "salt", "other"]
# 'plugin_is_enabled' function tests: 2
def test_plugin_is_enabled():
"""
Test if it returns true for an enabled plugin.
"""
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": "saltstack\nsalt\nother", "stderr": ""}
)
mock_pkg = MagicMock(return_value="")
with patch.dict(
rabbitmq.__salt__, {"cmd.run_all": mock_run, "pkg.version": mock_pkg}
):
assert rabbitmq.plugin_is_enabled("saltstack")
assert rabbitmq.plugin_is_enabled("salt")
assert rabbitmq.plugin_is_enabled("other")
def test_plugin_is_enabled_negative():
"""
Test if it returns false for a disabled plugin.
"""
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": "saltstack\nother", "stderr": ""}
)
mock_pkg = MagicMock(return_value="")
with patch.dict(
rabbitmq.__salt__, {"cmd.run_all": mock_run, "pkg.version": mock_pkg}
):
assert not rabbitmq.plugin_is_enabled("salt")
assert not rabbitmq.plugin_is_enabled("stack")
assert not rabbitmq.plugin_is_enabled("random")
# 'enable_plugin' function tests: 1
def test_enable_plugin():
"""
Test if it enable a RabbitMQ plugin via the rabbitmq-plugins command.
"""
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": "saltstack", "stderr": ""}
)
mock_pkg = MagicMock(return_value="")
with patch.dict(
rabbitmq.__salt__, {"cmd.run_all": mock_run, "pkg.version": mock_pkg}
):
assert rabbitmq.enable_plugin("salt") == {"Enabled": "saltstack"}
# 'disable_plugin' function tests: 1
def test_disable_plugin():
"""
Test if it disable a RabbitMQ plugin via the rabbitmq-plugins command.
"""
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": "saltstack", "stderr": ""}
)
mock_pkg = MagicMock(return_value="")
with patch.dict(
rabbitmq.__salt__, {"cmd.run_all": mock_run, "pkg.version": mock_pkg}
):
assert rabbitmq.disable_plugin("salt") == {"Disabled": "saltstack"}
# 'list_upstreams' function tests: 1
def test_list_upstreams():
"""
Test if it returns a list of upstreams.
"""
mock_run = MagicMock(
return_value={
"retcode": 0,
"stdout": (
'federation-upstream\tremote-name\t{"ack-mode":"on-confirm"'
',"max-hops":1,"trust-user-id":true,"uri":"amqp://username:'
'[email protected]"}'
),
"stderr": "",
}
)
mock_pkg = MagicMock(return_value="")
with patch.dict(
rabbitmq.__salt__, {"cmd.run_all": mock_run, "pkg.version": mock_pkg}
):
assert rabbitmq.list_upstreams() == {
"remote-name": (
'{"ack-mode":"on-confirm","max-hops":1,'
'"trust-user-id":true,"uri":"amqp://username:'
'<EMAIL>"}'
)
}
# 'upstream_exists' function tests: 2
def test_upstream_exists():
"""
Test whether a given rabbitmq-internal upstream exists based
on rabbitmqctl list_upstream.
"""
mock_run = MagicMock(
return_value={
"retcode": 0,
"stdout": (
'federation-upstream\tremote-name\t{"ack-mode":"on-confirm"'
',"max-hops":1,"trust-user-id":true,"uri":"amqp://username:'
'<EMAIL>"}'
),
"stderr": "",
}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert rabbitmq.upstream_exists("remote-name")
def test_upstream_exists_negative():
"""
Negative test of whether rabbitmq-internal upstream exists based
on rabbitmqctl list_upstream.
"""
mock_run = MagicMock(
return_value={
"retcode": 0,
"stdout": (
'federation-upstream\tremote-name\t{"ack-mode":"on-confirm"'
',"max-hops":1,"trust-user-id":true,"uri":"amqp://username:'
'<EMAIL>"}'
),
"stderr": "",
}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert not rabbitmq.upstream_exists("does-not-exist")
# 'add_upstream' function tests: 1
def test_set_upstream():
"""
Test if a rabbitMQ upstream gets configured properly.
"""
mock_run = MagicMock(
return_value={
"retcode": 0,
"stdout": (
'Setting runtime parameter "federation-upstream" for component '
'"remote-name" to "{"trust-user-id": true, "uri": '
'"amqp://username:<EMAIL>@remote.fqdn", "ack-mode": "on-confirm", '
'"max-hops": 1}" in vhost "/" ...'
),
"stderr": "",
}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert rabbitmq.set_upstream(
"remote-name",
"amqp://username:[email protected]",
ack_mode="on-confirm",
max_hops=1,
trust_user_id=True,
)
# 'delete_upstream' function tests: 2
def test_delete_upstream():
"""
Test if an upstream gets deleted properly using rabbitmqctl delete_upstream.
"""
mock_run = MagicMock(
return_value={
"retcode": 0,
"stdout": (
'Clearing runtime parameter "remote-name" for component '
'"federation-upstream" on vhost "/" ...'
),
"stderr": "",
}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
assert rabbitmq.delete_upstream("remote-name")
def test_delete_upstream_negative():
"""
Negative test trying to delete a non-existant upstream.
"""
mock_run = MagicMock(
return_value={
"retcode": 70,
"stdout": (
'Clearing runtime parameter "remote-name" for component '
'"federation-upstream" on vhost "/" ...'
),
"stderr": "Error:\nParameter does not exist",
}
)
with patch.dict(rabbitmq.__salt__, {"cmd.run_all": mock_run}):
pytest.raises(CommandExecutionError, rabbitmq.delete_upstream, "remote-name")
|
PhysicsTools/PatAlgos/python/slimming/offlineSlimmedPrimaryVerticesWithBS_cfi.py | Purva-Chaudhari/cmssw | 852 | 12613850 | <gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
offlineSlimmedPrimaryVerticesWithBS = cms.EDProducer("PATVertexSlimmer",
src = cms.InputTag("offlinePrimaryVerticesWithBS"),
score = cms.InputTag("primaryVertexWithBSAssociation","original"),
)
|
airbyte-integrations/connectors/source-hubspot/source_hubspot/__init__.py | rajatariya21/airbyte | 6,215 | 12613864 | from .source import SourceHubspot
__all__ = ["SourceHubspot"]
|
caliban/platform/gke/types.py | Anon-Artist/caliban | 425 | 12613901 | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""types relevant to gke"""
from enum import Enum
from typing import NamedTuple, Optional
from google.auth.credentials import Credentials
from kubernetes.client import V1Job
# ----------------------------------------------------------------------------
# Node image types
# see https://cloud.google.com/kubernetes-engine/docs/concepts/node-images
NodeImage = Enum(
'NODE_IMAGE', {
'COS': 'cos',
'UBUNTU': 'ubuntu',
'COS_CONTAINERD': 'cos_containerd',
'UBUNTU_CONTAINERD': 'ubuntu_containerd'
})
# ----------------------------------------------------------------------------
# GKE operation status, see:
# https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.operations
OpStatus = Enum(
'OP_STATUS', {
'STATUS_UNSPECIFIED': 'STATUS_UNSPECIFIED',
'PENDING': 'PENDING',
'RUNNING': 'RUNNING',
'DONE': 'DONE',
'ABORTING': 'ABORTING'
})
# ----------------------------------------------------------------------------
# Credentials data (credentials, project id)
CredentialsData = NamedTuple("CredentialsData",
[("credentials", Optional[Credentials]),
("project_id", Optional[str])])
# ----------------------------------------------------------------------------
# GKE release channel, see:
# https://cloud.google.com/kubernetes-engine/docs/concepts/release-channels
# https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#Cluster.ReleaseChannel
# https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#channel
ReleaseChannel = Enum(
'RELEASE_CHANNEL', {
'UNSPECIFIED': 'UNSPECIFIED',
'RAPID': 'RAPID',
'REGULAR': 'REGULAR',
'STABLE': 'STABLE'
})
# ----------------------------------------------------------------------------
class JobStatus(Enum):
'''gke job status'''
STATE_UNSPECIFIED = 0
PENDING = 1
RUNNING = 2
FAILED = 3
SUCCEEDED = 4
UNAVAILABLE = 5
def is_terminal(self) -> bool:
return self.name in ['FAILED', 'SUCCEEDED', 'UNAVAILABLE']
@classmethod
def from_job_info(cls, job_info: V1Job) -> "JobStatus":
if job_info is None:
return JobStatus.STATE_UNSPECIFIED
if job_info.status is None:
return JobStatus.STATE_UNSPECIFIED
# completed
if job_info.status.completion_time is not None:
if job_info.status.succeeded is not None:
if job_info.status.succeeded > 0:
return JobStatus.SUCCEEDED
else:
return JobStatus.FAILED
# active/pending
if job_info.status.active is not None:
if job_info.status.active > 0:
return JobStatus.RUNNING
else:
return JobStatus.PENDING
# unknown
return JobStatus.STATE_UNSPECIFIED
|
lib/smpl/vertex_joint_selector.py | xuchen-ethz/snarf | 150 | 12613910 | # -*- coding: utf-8 -*-
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# You can only use this computer program if you have closed
# a license agreement with MPG or you get the right to use the computer
# program from someone who is authorized to grant you that right.
# Any use of the computer program without a valid license is prohibited and
# liable to prosecution.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems and the Max Planck Institute for Biological
# Cybernetics. All rights reserved.
#
# Contact: <EMAIL>
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
import torch
import torch.nn as nn
from .utils import to_tensor
class VertexJointSelector(nn.Module):
def __init__(self, vertex_ids=None,
use_hands=True,
use_feet_keypoints=True, **kwargs):
super(VertexJointSelector, self).__init__()
extra_joints_idxs = []
face_keyp_idxs = np.array([
vertex_ids['nose'],
vertex_ids['reye'],
vertex_ids['leye'],
vertex_ids['rear'],
vertex_ids['lear']], dtype=np.int64)
extra_joints_idxs = np.concatenate([extra_joints_idxs,
face_keyp_idxs])
if use_feet_keypoints:
feet_keyp_idxs = np.array([vertex_ids['LBigToe'],
vertex_ids['LSmallToe'],
vertex_ids['LHeel'],
vertex_ids['RBigToe'],
vertex_ids['RSmallToe'],
vertex_ids['RHeel']], dtype=np.int32)
extra_joints_idxs = np.concatenate(
[extra_joints_idxs, feet_keyp_idxs])
if use_hands:
self.tip_names = ['thumb', 'index', 'middle', 'ring', 'pinky']
tips_idxs = []
for hand_id in ['l', 'r']:
for tip_name in self.tip_names:
tips_idxs.append(vertex_ids[hand_id + tip_name])
extra_joints_idxs = np.concatenate(
[extra_joints_idxs, tips_idxs])
self.register_buffer('extra_joints_idxs',
to_tensor(extra_joints_idxs, dtype=torch.long))
def forward(self, vertices, joints):
extra_joints = torch.index_select(vertices, 1, self.extra_joints_idxs)
joints = torch.cat([joints, extra_joints], dim=1)
return joints
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.