max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tests/providers/google/cloud/hooks/test_secret_manager.py | ChaseKnowlden/airflow | 15,947 | 12642692 | <reponame>ChaseKnowlden/airflow
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest.mock import MagicMock, patch
from google.api_core.exceptions import NotFound
from google.cloud.secretmanager_v1.proto.service_pb2 import AccessSecretVersionResponse
from airflow.providers.google.cloud.hooks.secret_manager import SecretsManagerHook
from tests.providers.google.cloud.utils.base_gcp_mock import (
GCP_PROJECT_ID_HOOK_UNIT_TEST,
mock_base_gcp_hook_default_project_id,
)
BASE_PACKAGE = 'airflow.providers.google.common.hooks.base_google.'
SECRETS_HOOK_PACKAGE = 'airflow.providers.google.cloud.hooks.secret_manager.'
INTERNAL_CLIENT_PACKAGE = 'airflow.providers.google.cloud._internal_client.secret_manager_client'
class TestSecretsManagerHook(unittest.TestCase):
@patch(INTERNAL_CLIENT_PACKAGE + "._SecretManagerClient.client", return_value=MagicMock())
@patch(
SECRETS_HOOK_PACKAGE + 'SecretsManagerHook._get_credentials_and_project_id',
return_value=(MagicMock(), GCP_PROJECT_ID_HOOK_UNIT_TEST),
)
@patch(BASE_PACKAGE + 'GoogleBaseHook.__init__', new=mock_base_gcp_hook_default_project_id)
def test_get_missing_key(self, mock_get_credentials, mock_client):
mock_client.secret_version_path.return_value = "full-path"
mock_client.access_secret_version.side_effect = NotFound('test-msg')
secrets_manager_hook = SecretsManagerHook(gcp_conn_id='test')
mock_get_credentials.assert_called_once_with()
secret = secrets_manager_hook.get_secret(secret_id="secret")
mock_client.secret_version_path.assert_called_once_with('example-project', 'secret', 'latest')
mock_client.access_secret_version.assert_called_once_with("full-path")
assert secret is None
@patch(INTERNAL_CLIENT_PACKAGE + "._SecretManagerClient.client", return_value=MagicMock())
@patch(
SECRETS_HOOK_PACKAGE + 'SecretsManagerHook._get_credentials_and_project_id',
return_value=(MagicMock(), GCP_PROJECT_ID_HOOK_UNIT_TEST),
)
@patch(BASE_PACKAGE + 'GoogleBaseHook.__init__', new=mock_base_gcp_hook_default_project_id)
def test_get_existing_key(self, mock_get_credentials, mock_client):
mock_client.secret_version_path.return_value = "full-path"
test_response = AccessSecretVersionResponse()
test_response.payload.data = b"result"
mock_client.access_secret_version.return_value = test_response
secrets_manager_hook = SecretsManagerHook(gcp_conn_id='test')
mock_get_credentials.assert_called_once_with()
secret = secrets_manager_hook.get_secret(secret_id="secret")
mock_client.secret_version_path.assert_called_once_with('example-project', 'secret', 'latest')
mock_client.access_secret_version.assert_called_once_with("full-path")
assert "result" == secret
|
core/layout/strategy/VerticalFlowStrategy.py | gregbugaj/TextGenerator | 166 | 12642705 | from core.layout.strategy import Strategy
class VerticalFlowStrategy(Strategy):
"""
生成一个竖直流式排布的文本贴图布局
"""
def logic(self, block_group, next_block) -> bool:
init_x = block_group.group_box[0]
init_y = block_group.group_box[1]
next_x = init_x
next_y = init_y
max_r = 0
lasb_block = None
for block in block_group.block_list:
r = block.outer_box[2]
if max_r < r:
max_r = r
lasb_block = block
if lasb_block:
next_x = lasb_block.outer_box[0]
next_y = lasb_block.outer_box[3] + 1
next_block.locate_by_outter(next_x, next_y)
if self.check_is_out(block_group=block_group, block=next_block):
next_x = max_r + 1
next_y = init_y
next_block.locate_by_outter(next_x, next_y)
if self.check_is_out(block_group=block_group, block=next_block):
return False
return True
|
model_compiler/tests/model_compiler/compilers/test_tf_model_to_saved_model.py | yuanliya/Adlik | 548 | 12642708 | <filename>model_compiler/tests/model_compiler/compilers/test_tf_model_to_saved_model.py<gh_stars>100-1000
# Copyright 2019 ZTE corporation. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from unittest import TestCase
import tensorflow as tf
import model_compiler.compilers.tf_model_to_saved_model as compiler
from model_compiler.compilers.tf_model_to_saved_model import Config
from model_compiler.models.irs.tf_model import Input, TensorFlowModel
class ConfigTestCase(TestCase):
def test_from_json_minimal(self):
config = Config.from_json({})
self.assertEqual(config, Config(input_signature=None, output_signature=None))
def test_from_json_input_names_only(self):
config = Config.from_json({'input_signatures': ['abc', 'def']})
self.assertEqual(config, Config(input_signature=['abc', 'def'], output_signature=None))
def test_from_json_output_names_only(self):
config = Config.from_json({'output_signatures': ['abc', 'def']})
self.assertEqual(config, Config(input_signature=None, output_signature=['abc', 'def']))
def test_from_json_full(self):
config = Config.from_json({'input_signatures': ['abc', 'def'],
'output_signatures': ['ghi', 'jkl', 'mno']})
self.assertEqual(config, Config(input_signature=['abc', 'def'], output_signature=['ghi', 'jkl', 'mno']))
class CompileSourceTestCase(TestCase):
def test_compile_simple(self):
with tf.Graph().as_default(), tf.compat.v1.Session().as_default() as session:
input_x = tf.compat.v1.placeholder(dtype=tf.float32, shape=[3, 4], name='x')
input_y = tf.compat.v1.placeholder(dtype=tf.float32, shape=[3, 4], name='y')
output_z = tf.add(input_x, input_y, name='z')
compiled = compiler.compile_source(source=TensorFlowModel(inputs=[Input(tensor=input_x), Input(tensor=input_y)],
outputs=[output_z],
session=session),
config=Config())
self.assertEqual(len(compiled.inputs), 2)
self.assertEqual(compiled.inputs[0].name, 'x:0')
self.assertEqual(compiled.inputs[0].tensor, input_x)
self.assertIsNone(compiled.inputs[0].data_format)
self.assertEqual(compiled.inputs[1].name, 'y:0')
self.assertEqual(compiled.inputs[1].tensor, input_y)
self.assertIsNone(compiled.inputs[1].data_format)
self.assertEqual(len(compiled.outputs), 1)
self.assertEqual(compiled.outputs[0].name, 'z:0')
self.assertIs(compiled.outputs[0].tensor, output_z)
self.assertIs(compiled.session, session)
def test_compile_with_explicit_signature(self):
with tf.Graph().as_default(), tf.compat.v1.Session().as_default() as session:
input_x = tf.compat.v1.placeholder(dtype=tf.float32, shape=[3, 4], name='x')
input_y = tf.compat.v1.placeholder(dtype=tf.float32, shape=[3, 4], name='y')
input_z = tf.add(input_x, input_y, name='z')
compiled = compiler.compile_source(source=TensorFlowModel(inputs=[Input(tensor=input_x), Input(tensor=input_y)],
outputs=[input_z],
session=session),
config=Config(input_signature=['foo', 'bar'], output_signature=['baz']))
self.assertEqual(len(compiled.inputs), 2)
self.assertEqual(compiled.inputs[0].name, 'foo')
self.assertEqual(compiled.inputs[0].tensor, input_x)
self.assertIsNone(compiled.inputs[0].data_format)
self.assertEqual(compiled.inputs[1].name, 'bar')
self.assertEqual(compiled.inputs[1].tensor, input_y)
self.assertIsNone(compiled.inputs[1].data_format)
self.assertEqual(len(compiled.outputs), 1)
self.assertEqual(compiled.outputs[0].name, 'baz')
self.assertIs(compiled.outputs[0].tensor, input_z)
self.assertIs(compiled.session, session)
|
newm/overlay/overlay.py | sadrach-cl/newm | 265 | 12642737 | from __future__ import annotations
from typing import Optional, TYPE_CHECKING
import logging
from pywm.touchpad.gestures import Gesture
if TYPE_CHECKING:
from ..state import LayoutState
from ..layout import Layout
logger = logging.getLogger(__name__)
class Overlay:
def __init__(self, layout: Layout) -> None:
self.layout = layout
self._ready = False
def ready(self) -> bool:
return self._ready
def init(self) -> None:
wm_state, dt = self._enter_transition()
if wm_state is not None and dt is not None:
logger.debug("Overlay: Enter animation")
self.layout.animate_to(lambda _: (None, wm_state), dt, self._enter_finished, overlay_safe=True)
else:
self._ready = True
self.post_init()
def _enter_finished(self) -> None:
logger.debug("Overlay: Enter animation completed")
self._ready = True
def destroy(self) -> None:
self.pre_destroy()
self._ready = False
wm_state, dt = self._exit_transition()
if wm_state is not None and dt is not None:
logger.debug("Overlay: Exit animation")
self.layout.animate_to(lambda _: (None, wm_state), dt, self._exit_finished, overlay_safe=True)
else:
self.layout.on_overlay_destroyed()
def _exit_finished(self) -> None:
logger.debug("Overlay: Exit animation completed")
self.layout.on_overlay_destroyed()
"""
Virtual methods
"""
def post_init(self) -> None:
pass
def pre_destroy(self) -> None:
pass
def _enter_transition(self) -> tuple[Optional[LayoutState], Optional[float]]:
return None, 0
def _exit_transition(self) -> tuple[Optional[LayoutState], Optional[float]]:
return None, 0
def on_key(self, time_msec: int, keycode: int, state: int, keysyms: str) -> bool:
return True
def on_modifiers(self, modifiers: int) -> bool:
return False
def on_motion(self, time_msec: int, delta_x: float, delta_y: float) -> bool:
return False
def on_button(self, time_msec: int, button: int, state: int) -> bool:
return False
def on_axis(self, time_msec: int, source: int, orientation: int, delta: float, delta_discrete: int) -> bool:
return False
def on_gesture(self, gesture: Gesture) -> bool:
return False
|
ple/games/monsterkong/coin.py | jsalvatier/PyGame-Learning-Environment | 959 | 12642759 | <filename>ple/games/monsterkong/coin.py
__author__ = '<NAME>'
import pygame
import os
from .onBoard import OnBoard
class Coin(OnBoard):
"""
This class defines all our coins.
Each coin will increase our score by an amount of 'value'
We animate each coin with 5 images
A coin inherits from the OnBoard class since we will use it as an inanimate object on our board.
"""
def __init__(self, raw_image, position, _dir):
OnBoard.__init__(self, raw_image, position)
self.__coinAnimState = 0 # Initialize animation state to 0
self.IMAGES = {
"coin1": pygame.transform.scale(pygame.image.load(os.path.join(_dir, 'assets/coin1.png')), (15, 15)).convert_alpha(),
"coin2": pygame.transform.scale(pygame.image.load(os.path.join(_dir, 'assets/coin2.png')), (15, 15)).convert_alpha(),
"coin3": pygame.transform.scale(pygame.image.load(os.path.join(_dir, 'assets/coin3.png')), (15, 15)).convert_alpha(),
"coin4": pygame.transform.scale(pygame.image.load(os.path.join(_dir, 'assets/coin4.png')), (15, 15)).convert_alpha(),
"coin5": pygame.transform.scale(pygame.image.load(os.path.join(_dir, 'assets/coin5.png')), (15, 15)).convert_alpha()
}
# Update the image of the coin
def updateImage(self, raw_image):
self.image = raw_image
# Animate the coin
def animateCoin(self):
self.__coinAnimState = (self.__coinAnimState + 1) % 25
if self.__coinAnimState / 5 == 0:
self.updateImage(self.IMAGES["coin1"])
if self.__coinAnimState / 5 == 1:
self.updateImage(self.IMAGES["coin2"])
if self.__coinAnimState / 5 == 2:
self.updateImage(self.IMAGES["coin3"])
if self.__coinAnimState / 5 == 3:
self.updateImage(self.IMAGES["coin4"])
if self.__coinAnimState / 5 == 4:
self.updateImage(self.IMAGES["coin5"])
|
examples/ner/utils.py | nlpaueb/GreekBERT | 117 | 12642770 | <reponame>nlpaueb/GreekBERT
def parse_ner_dataset_file(f):
tokens = []
for i, l in enumerate(f):
l_split = l.split()
if len(l_split) == 0:
yield tokens
tokens.clear()
continue
if len(l_split) < 2:
continue # todo: fix this
else:
tokens.append({'text': l_split[0], 'label': l_split[-1]})
if tokens:
yield tokens
|
tutorials/W2D2_LinearSystems/solutions/W2D2_Tutorial2_Solution_30701e58.py | raxosiris/course-content | 2,294 | 12642824 | <reponame>raxosiris/course-content
# hint: see np.diff()
inter_switch_intervals = np.diff(switch_times)
# plot inter-switch intervals
with plt.xkcd():
plot_interswitch_interval_histogram(inter_switch_intervals) |
tests/unit/test_plot_elements.py | olavolav/textplot | 156 | 12642885 | <filename>tests/unit/test_plot_elements.py
import numpy as np # type: ignore
from uniplot.plot_elements import character_for_2by2_pixels
######################################
# Testing: character_for_2by2_pixels #
######################################
def test_empty_square():
square = np.zeros([2, 2])
assert character_for_2by2_pixels(square) == ""
def test_full_square():
square = np.ones([2, 2])
assert character_for_2by2_pixels(square) == "█"
def test_top_left_quarter_square():
square = np.array([[1, 0], [0, 0]])
assert character_for_2by2_pixels(square) == "▘"
def test_left_half_square():
square = np.array([[1, 0], [1, 0]])
assert character_for_2by2_pixels(square) == "▌"
|
python2/rosmln/src/rosmln/srv/__init__.py | seba90/pracmln | 123 | 12642889 | <filename>python2/rosmln/src/rosmln/srv/__init__.py
from ._MLNInterface import *
|
examples/beamforming_time_domain.py | HemaZ/pyroomacoustics | 915 | 12642912 | """
This is a longer example that applies time domain beamforming towards a source
of interest in the presence of a strong interfering source.
"""
from __future__ import division, print_function
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import wavfile
import pyroomacoustics as pra
from pyroomacoustics.transform import stft
# Spectrogram figure properties
figsize = (15, 7) # figure size
fft_size = 512 # fft size for analysis
fft_hop = 8 # hop between analysis frame
fft_zp = 512 # zero padding
analysis_window = pra.hann(fft_size)
t_cut = 0.83 # length in [s] to remove at end of signal (no sound)
# Some simulation parameters
Fs = 8000
absorption = 0.1
max_order_sim = 2
sigma2_n = 5e-7
# Microphone array design parameters
mic1 = np.array([2, 1.5]) # position
M = 8 # number of microphones
d = 0.08 # distance between microphones
phi = 0.0 # angle from horizontal
max_order_design = 1 # maximum image generation used in design
shape = "Linear" # array shape
Lg_t = 0.100 # Filter size in seconds
Lg = np.ceil(Lg_t * Fs) # Filter size in samples
delay = 0.050 # Beamformer delay in seconds
# Define the FFT length
N = 1024
# Create a microphone array
if shape is "Circular":
R = pra.circular_2D_array(mic1, M, phi, d * M / (2 * np.pi))
else:
R = pra.linear_2D_array(mic1, M, phi, d)
# path to samples
path = os.path.dirname(__file__)
# The first signal (of interest) is singing
rate1, signal1 = wavfile.read(path + "/input_samples/singing_" + str(Fs) + ".wav")
signal1 = np.array(signal1, dtype=float)
signal1 = pra.normalize(signal1)
signal1 = pra.highpass(signal1, Fs)
delay1 = 0.0
# The second signal (interferer) is some german speech
rate2, signal2 = wavfile.read(path + "/input_samples/german_speech_" + str(Fs) + ".wav")
signal2 = np.array(signal2, dtype=float)
signal2 = pra.normalize(signal2)
signal2 = pra.highpass(signal2, Fs)
delay2 = 1.0
# Create the room
room_dim = [4, 6]
room1 = pra.ShoeBox(
room_dim,
absorption=absorption,
fs=Fs,
max_order=max_order_sim,
sigma2_awgn=sigma2_n,
)
# Add sources to room
good_source = np.array([1, 4.5]) # good source
normal_interferer = np.array([2.8, 4.3]) # interferer
room1.add_source(good_source, signal=signal1, delay=delay1)
room1.add_source(normal_interferer, signal=signal2, delay=delay2)
"""
MVDR direct path only simulation
"""
# compute beamforming filters
mics = pra.Beamformer(R, Fs, N=N, Lg=Lg)
room1.add_microphone_array(mics)
room1.compute_rir()
room1.simulate()
mics.rake_mvdr_filters(
room1.sources[0][0:1],
room1.sources[1][0:1],
sigma2_n * np.eye(mics.Lg * mics.M),
delay=delay,
)
# process the signal
output = mics.process()
# save to output file
input_mic = pra.normalize(pra.highpass(mics.signals[mics.M // 2], Fs))
wavfile.write(path + "/output_samples/input.wav", Fs, input_mic)
out_DirectMVDR = pra.normalize(pra.highpass(output, Fs))
wavfile.write(path + "/output_samples/output_DirectMVDR.wav", Fs, out_DirectMVDR)
"""
Rake MVDR simulation
"""
# Add the microphone array and compute RIR
mics = pra.Beamformer(R, Fs, N, Lg=Lg)
room1.mic_array = mics
room1.compute_rir()
room1.simulate()
# Design the beamforming filters using some of the images sources
good_sources = room1.sources[0][: max_order_design + 1]
bad_sources = room1.sources[1][: max_order_design + 1]
mics.rake_mvdr_filters(
good_sources, bad_sources, sigma2_n * np.eye(mics.Lg * mics.M), delay=delay
)
# process the signal
output = mics.process()
# save to output file
out_RakeMVDR = pra.normalize(pra.highpass(output, Fs))
wavfile.write(path + "/output_samples/output_RakeMVDR.wav", Fs, out_RakeMVDR)
"""
Perceptual direct path only simulation
"""
# compute beamforming filters
mics = pra.Beamformer(R, Fs, N, Lg=Lg)
room1.mic_array = mics
room1.compute_rir()
room1.simulate()
mics.rake_perceptual_filters(
room1.sources[0][0:1],
room1.sources[1][0:1],
sigma2_n * np.eye(mics.Lg * mics.M),
delay=delay,
)
# process the signal
output = mics.process()
# save to output file
out_DirectPerceptual = pra.normalize(pra.highpass(output, Fs))
wavfile.write(
path + "/output_samples/output_DirectPerceptual.wav", Fs, out_DirectPerceptual
)
"""
Rake Perceptual simulation
"""
# compute beamforming filters
mics = pra.Beamformer(R, Fs, N, Lg=Lg)
room1.mic_array = mics
room1.compute_rir()
room1.simulate()
mics.rake_perceptual_filters(
good_sources, bad_sources, sigma2_n * np.eye(mics.Lg * mics.M), delay=delay
)
# process the signal
output = mics.process()
# save to output file
out_RakePerceptual = pra.normalize(pra.highpass(output, Fs))
wavfile.write(
path + "/output_samples/output_RakePerceptual.wav", Fs, out_RakePerceptual
)
"""
Plot all the spectrogram
"""
dSNR = pra.dB(room1.direct_snr(mics.center[:, 0], source=0), power=True)
print("The direct SNR for good source is " + str(dSNR))
# remove a bit of signal at the end
n_lim = int(np.ceil(len(input_mic) - t_cut * Fs))
input_clean = signal1[:n_lim]
input_mic = input_mic[:n_lim]
out_DirectMVDR = out_DirectMVDR[:n_lim]
out_RakeMVDR = out_RakeMVDR[:n_lim]
out_DirectPerceptual = out_DirectPerceptual[:n_lim]
out_RakePerceptual = out_RakePerceptual[:n_lim]
# compute time-frequency planes
F0 = stft.analysis(input_clean, fft_size, fft_hop, win=analysis_window, zp_back=fft_zp)
F1 = stft.analysis(input_mic, fft_size, fft_hop, win=analysis_window, zp_back=fft_zp)
F2 = stft.analysis(
out_DirectMVDR, fft_size, fft_hop, win=analysis_window, zp_back=fft_zp
)
F3 = stft.analysis(out_RakeMVDR, fft_size, fft_hop, win=analysis_window, zp_back=fft_zp)
F4 = stft.analysis(
out_DirectPerceptual, fft_size, fft_hop, win=analysis_window, zp_back=fft_zp
)
F5 = stft.analysis(
out_RakePerceptual, fft_size, fft_hop, win=analysis_window, zp_back=fft_zp
)
# (not so) fancy way to set the scale to avoid having the spectrum
# dominated by a few outliers
p_min = 7
p_max = 100
all_vals = np.concatenate(
(
pra.dB(F1 + pra.eps),
pra.dB(F2 + pra.eps),
pra.dB(F3 + pra.eps),
pra.dB(F0 + pra.eps),
pra.dB(F4 + pra.eps),
pra.dB(F5 + pra.eps),
)
).flatten()
vmin, vmax = np.percentile(all_vals, [p_min, p_max])
cmap = "afmhot"
interpolation = "none"
fig, ax = plt.subplots(figsize=figsize, nrows=2, ncols=3)
def plot_spectrogram(F, title):
plt.imshow(
pra.dB(F.T),
extent=[0, 1, 0, Fs / 2],
vmin=vmin,
vmax=vmax,
origin="lower",
cmap=plt.get_cmap(cmap),
interpolation=interpolation,
)
ax.set_title(title)
ax.set_ylabel("")
ax.set_xlabel("")
ax.set_aspect("auto")
ax.axis("off")
ax = plt.subplot(2, 3, 1)
plot_spectrogram(F0, "Desired Signal")
ax = plt.subplot(2, 3, 4)
plot_spectrogram(F1, "Microphone Input")
ax = plt.subplot(2, 3, 2)
plot_spectrogram(F2, "Direct MVDR")
ax = plt.subplot(2, 3, 5)
plot_spectrogram(F3, "Rake MVDR")
ax = plt.subplot(2, 3, 3)
plot_spectrogram(F4, "Direct Perceptual")
ax = plt.subplot(2, 3, 6)
plot_spectrogram(F5, "Rake Perceptual")
fig.savefig(path + "/figures/spectrograms.png", dpi=150)
plt.show()
|
tests/tissue_masks/test_luminosity_threshold_tissue_locator.py | BostonMeditechGroup/StainTools | 197 | 12642926 | <gh_stars>100-1000
import sys
import unittest
from unittest.mock import Mock
import numpy as np
sys.modules['spams'] = Mock()
from staintools.tissue_masks.luminosity_threshold_tissue_locator import LuminosityThresholdTissueLocator
from staintools.utils.exceptions import TissueMaskException
class TestLuminosityThresholdTissueLocator(unittest.TestCase):
def test_will_locate_tissue(self):
image = np.zeros(shape=(2, 2, 3), dtype=np.uint8)
image[:, :, 0] = [
[21, 247],
[32, 250]
]
image[:, :, 1] = [
[11, 240],
[21, 239]
]
image[:, :, 2] = [
[27, 250],
[29, 255]
]
get = LuminosityThresholdTissueLocator.get_tissue_mask(image)
expect = np.array([
[True, False],
[True, False]
])
self.assertTrue(np.allclose(expect, get))
def test_throws_exception_for_white_image(self):
image = np.ones(shape=(5, 7, 3), dtype=np.uint8) * 255
raises = False
try:
LuminosityThresholdTissueLocator.get_tissue_mask(image)
except TissueMaskException:
raises = True
self.assertTrue(raises)
|
cort/test/core/test_mention_extractor.py | leonardoboliveira/cort | 141 | 12642946 | import unittest
import nltk
from cort.core import documents
from cort.core import mention_extractor
from cort.core import mentions
from cort.core import spans
__author__ = 'smartschat'
class TestMentionExtractor(unittest.TestCase):
def setUp(self):
self.real_example = """#begin document (bn/voa/02/voa_0220); part 000
bn/voa/02/voa_0220 0 0 Unidentified JJ (TOP(S(NP(NP* - - - - * -
bn/voa/02/voa_0220 0 1 gunmen NNS *) - - - - * -
bn/voa/02/voa_0220 0 2 in IN (PP* - - - - * -
bn/voa/02/voa_0220 0 3 north JJ (NP(ADJP* - - - - * -
bn/voa/02/voa_0220 0 4 western JJ *) - - - - * -
bn/voa/02/voa_0220 0 5 Colombia NNP *))) - - - - (GPE) -
bn/voa/02/voa_0220 0 6 have VBP (VP* have - - - * -
bn/voa/02/voa_0220 0 7 massacred VBN (VP* massacre - - - * -
bn/voa/02/voa_0220 0 8 at IN (NP(QP(ADVP* - - - - (CARDINAL* -
bn/voa/02/voa_0220 0 9 least JJS *) - - - - * -
bn/voa/02/voa_0220 0 10 twelve CD *) - - - - *) -
bn/voa/02/voa_0220 0 11 peasants NNS *) - - - - * -
bn/voa/02/voa_0220 0 12 in IN (PP* - - - - * -
bn/voa/02/voa_0220 0 13 the DT (NP(NP* - - - - * (0
bn/voa/02/voa_0220 0 14 second JJ * - - - - (ORDINAL) -
bn/voa/02/voa_0220 0 15 such JJ * - - - - * -
bn/voa/02/voa_0220 0 16 incident NN *) incident - 2 - * -
bn/voa/02/voa_0220 0 17 in IN (PP* - - - - * -
bn/voa/02/voa_0220 0 18 as RB (NP(QP* - - - - (DATE* -
bn/voa/02/voa_0220 0 19 many JJ *) - - - - * -
bn/voa/02/voa_0220 0 20 days NNS *)))))) day - 4 - *) 0)
bn/voa/02/voa_0220 0 21 . . *)) - - - - * -
bn/voa/02/voa_0220 0 0 Local JJ (TOP(S(NP* - - - - * (ARG0* * -
bn/voa/02/voa_0220 0 1 police NNS *) police - - - * *) * -
bn/voa/02/voa_0220 0 2 say VBP (VP* say 01 1 - * (V*) * -
bn/voa/02/voa_0220 0 3 it PRP (SBAR(S(NP*) - - - - * (ARG1* (ARG1*) -
bn/voa/02/voa_0220 0 4 's VBZ (VP* be 01 1 - * * (V*) -
bn/voa/02/voa_0220 0 5 not RB * - - - - * * (ARGM-NEG*) -
bn/voa/02/voa_0220 0 6 clear JJ (ADJP*) - - - - * * (ARG2*) -
bn/voa/02/voa_0220 0 7 who WP (SBAR(WHNP*) - - - - * * * -
bn/voa/02/voa_0220 0 8 was VBD (S(VP* be - 1 - * * * -
bn/voa/02/voa_0220 0 9 responsible JJ (ADJP* - - - - * * * -
bn/voa/02/voa_0220 0 10 for IN (PP* - - - - * * * -
bn/voa/02/voa_0220 0 11 the DT (NP* - - - - * * * (0
bn/voa/02/voa_0220 0 12 massacre NN *)))))))))) massacre - - - * *) * 0)
bn/voa/02/voa_0220 0 13 . . *)) - - - - * * * -
#end document
"""
self.another_real_example = """#begin document (mz/sinorama/10/ectb_1050); part 006
mz/sinorama/10/ectb_1050 6 0 What WP (TOP(SBARQ(WHNP*) - - - - * (R-ARG1*) -
mz/sinorama/10/ectb_1050 6 1 does VBZ (SQ* do - 7 - * * -
mz/sinorama/10/ectb_1050 6 2 this DT (NP*) - - - - * (ARG0*) -
mz/sinorama/10/ectb_1050 6 3 tell VB (VP* tell 01 1 - * (V*) -
mz/sinorama/10/ectb_1050 6 4 us PRP (NP*) - - - - * (ARG2*) -
mz/sinorama/10/ectb_1050 6 5 about IN (PP* - - - - * (ARG1* -
mz/sinorama/10/ectb_1050 6 6 the DT (NP(NP* - - - - * * -
mz/sinorama/10/ectb_1050 6 7 transformation NN *) transformation - 1 - * * -
mz/sinorama/10/ectb_1050 6 8 of IN (PP* - - - - * * -
mz/sinorama/10/ectb_1050 6 9 Taiwan NNP (NP(NP* - - - - (GPE) * -
mz/sinorama/10/ectb_1050 6 10 's POS *) - - - - * * -
mz/sinorama/10/ectb_1050 6 11 townships NNS *)))))) township - 1 - * *) -
mz/sinorama/10/ectb_1050 6 12 ? . *)) - - - - * * -
#end document"""
self.real_document = documents.CoNLLDocument(self.real_example)
self.another_real_document = documents.CoNLLDocument(
self.another_real_example)
self.tree = nltk.ParentedTree.fromstring(
"(NP (NP (NP (PRP$ his) (NN brother) (POS 's)) (NN wedding)) "
"(PP (IN in) (NP (NNP Khan) (NNPS Younes))))")
self.proper_name_mention_tree = nltk.ParentedTree.fromstring(
"(NP (NNP Taiwan) (POS 's))")
self.proper_name_mention_ner = ["GPE", "NONE"]
self.apposition_tree = nltk.ParentedTree.fromstring(
"(NP (NP (NP (NNP Secretary)) (PP (IN of) (NP (NNP State)))) "
"(NP (NNP Madeleine) (NNP Albright)))")
self.apposition_ner = ["NONE", "NONE", "NONE", "PERSON", "PERSON"]
self.more_proper_name_tree = nltk.ParentedTree.fromstring(
"(NP (NP (DT the) (NNP General) (NNP Secretary)) (PP (IN of) "
"(NP (DT the) (NNP CCP))))")
self.more_proper_name_ner = ["NONE", "NONE", "NONE", "NONE", "NONE",
"ORG"]
def test_extract_system_mentions(self):
expected_spans = sorted([
spans.Span(0, 1),
spans.Span(0, 5),
spans.Span(3, 5),
spans.Span(5, 5),
spans.Span(8, 10),
spans.Span(8, 11),
spans.Span(13, 16),
spans.Span(13, 20),
spans.Span(14, 14),
spans.Span(18, 20),
spans.Span(22, 23),
spans.Span(25, 25),
spans.Span(33, 34)
])
self.assertEqual(expected_spans,
[mention.span for
mention in mention_extractor.extract_system_mentions(
self.real_document, filter_mentions=False)[1:]])
expected_spans = sorted([
spans.Span(2, 2),
spans.Span(4, 4),
spans.Span(6, 7),
spans.Span(6, 11),
spans.Span(9, 10),
spans.Span(9, 11)
])
self.assertEqual(expected_spans,
[mention.span for
mention in mention_extractor.extract_system_mentions(
self.another_real_document,
filter_mentions=False)[1:]])
expected_spans = sorted([
spans.Span(2, 2),
spans.Span(4, 4),
spans.Span(6, 11),
spans.Span(9, 10),
spans.Span(9, 11)
])
self.assertEqual(expected_spans,
[mention.span for
mention in mention_extractor.extract_system_mentions(
self.another_real_document,
filter_mentions=True)[1:]])
def test_post_process_same_head_largest_span(self):
all_mentions = {
mentions.Mention(
None,
spans.Span(0, 3),
{"tokens": [], "type": "NOM", "head_index": 0,
"head_span": spans.Span(3, 3)}),
mentions.Mention(
None,
spans.Span(0, 6),
{"tokens": [], "type": "NOM", "head_index": 0,
"head_span": spans.Span(3, 3)}),
mentions.Mention(
None,
spans.Span(0, 2),
{"tokens": [], "type": "NOM", "head_index": 0,
"head_span": spans.Span(1, 1)}),
mentions.Mention(
None,
spans.Span(5, 6),
{"tokens": [], "type": "NOM", "head_index": 0,
"head_span": spans.Span(5, 6)}),
mentions.Mention(
None,
spans.Span(0, 0),
{"tokens": [], "type": "NOM", "head_index": 0,
"head_span": spans.Span(0, 0)})}
expected_mentions = sorted([
mentions.Mention(
None,
spans.Span(0, 6),
{"tokens": [], "type": "NOM", "head_index": 0,
"head_span": spans.Span(3, 3)}),
mentions.Mention(
None,
spans.Span(0, 2),
{"tokens": [], "type": "NOM", "head_index": 0,
"head_span": spans.Span(1, 1)}),
mentions.Mention(
None,
spans.Span(5, 6),
{"tokens": [], "type": "NOM", "head_index": 0,
"head_span": spans.Span(5, 6)}),
mentions.Mention(
None,
spans.Span(0, 0),
{"tokens": [], "type": "NOM", "head_index": 0,
"head_span": spans.Span(0, 0)})
])
self.assertEqual(
expected_mentions,
mention_extractor.post_process_same_head_largest_span(all_mentions))
all_mentions_2 = {
mentions.Mention(
None,
spans.Span(0, 1),
{"tokens": ["Taiwan", "'s"], "type": "NAM", "head_index": 0,
"head_span": spans.Span(0, 0)}),
mentions.Mention(
None,
spans.Span(0, 0),
{"tokens": ["Taiwan"], "type": "NAM", "head_index": 0,
"head_span": spans.Span(0, 0)}),
mentions.Mention(
None,
spans.Span(2, 3),
{"tokens": ["the", "CCP"], "type": "NAM", "head_index": 1,
"head_span": spans.Span(3, 3)}),
mentions.Mention(
None,
spans.Span(3, 3),
{"tokens": ["CCP"], "type": "NAM", "head_index": 0,
"head_span": spans.Span(3, 3)})}
expected_mentions_2 = sorted([
mentions.Mention(
None,
spans.Span(0, 1),
{"tokens": ["Taiwan", "'s"], "type": "NAM", "head_index": 0,
"head_span": spans.Span(0, 0)}),
mentions.Mention(
None,
spans.Span(2, 3),
{"tokens": ["the", "CCP"], "type": "NAM", "head_index": 1,
"head_span": spans.Span(3, 3)}),
])
self.assertEqual(
expected_mentions_2,
mention_extractor.post_process_same_head_largest_span(
all_mentions_2))
def test_post_process_embedded_head_largest_span(self):
all_mentions_1 = {
mentions.Mention(
None,
spans.Span(0, 3),
{"tokens": [], "type": "NOM", "head_index": 0,
"head_span": spans.Span(3, 3)}),
mentions.Mention(
None,
spans.Span(0, 6),
{"tokens": [], "type": "NOM", "head_index": 0,
"head_span": spans.Span(2, 3)}),
mentions.Mention(
None,
spans.Span(0, 2),
{"tokens": [], "type": "NOM", "head_index": 0,
"head_span": spans.Span(1, 1)}),
mentions.Mention(
None,
spans.Span(5, 6),
{"tokens": [], "type": "NOM", "head_index": 0,
"head_span": spans.Span(5, 6)})
}
expected_mentions_1 = sorted([
mentions.Mention(
None,
spans.Span(0, 6),
{"tokens": [], "type": "NOM", "head_index": 0,
"head_span": spans.Span(2, 3)}),
mentions.Mention(
None,
spans.Span(0, 2),
{"tokens": [], "type": "NOM", "head_index": 0,
"head_span": spans.Span(1, 1)}),
mentions.Mention(
None,
spans.Span(5, 6),
{"tokens": [], "type": "NOM", "head_index": 0,
"head_span": spans.Span(5, 6)})
])
self.assertEqual(
expected_mentions_1,
mention_extractor.post_process_embedded_head_largest_span(
all_mentions_1))
def test_post_process_appositions(self):
three_children_tree = nltk.ParentedTree.fromstring(
"(NP (NP (NP (NP (DT The) (NNP ROC) (POS 's)) (NN ambassador)) "
"(PP (IN to) (NP (NNP Nicaragua)))) (, ,) (NP (NNP Antonio) "
"(NNP Tsai)) (, ,))")
three_children_all_mentions = {
mentions.Mention(
None,
spans.Span(0, 6),
{"tokens": ["The", "ROC", "'s", "ambassador", "to",
"Nicaragua", ",", "Antonio", "Tsai"],
"is_apposition": True, "type": "NAM",
"parse_tree": three_children_tree}),
mentions.Mention(
None,
spans.Span(0, 4),
{"tokens": ["The", "ROC", "'s", "ambassador", "to",
"Nicaragua"],
"is_apposition": False, "type": "NOM",
"parse_tree": three_children_tree[0]}),
mentions.Mention(
None,
spans.Span(0, 3),
{"tokens": ["The", "ROC", "'s", "ambassador"],
"is_apposition": False, "type": "NOM",
"parse_tree": three_children_tree[0][0]}),
mentions.Mention(
None,
spans.Span(0, 2),
{"tokens": ["The", "ROC", "'s"], "is_apposition": False,
"type": "NAM", "parse_tree": three_children_tree[0][0][0]}),
mentions.Mention(
None,
spans.Span(4, 4),
{"tokens": ["Nicaragua"], "is_apposition": False,
"type": "NAM", "parse_tree": three_children_tree[0][1][1]}),
mentions.Mention(
None,
spans.Span(5, 6),
{"tokens": ["Antonio", "Tsai"], "is_apposition": False,
"type": "NAM", "parse_tree": three_children_tree[2]})}
three_children_expected = sorted([
mentions.Mention(
None,
spans.Span(0, 6),
{"tokens": ["The", "ROC", "'s", "ambassador", "to",
"Nicaragua", ",", "Antonio", "Tsai"],
"is_apposition": True, "type": "NAM",
"parse_tree": three_children_tree}),
mentions.Mention(
None,
spans.Span(0, 3),
{"tokens": ["The", "ROC", "'s", "ambassador"],
"is_apposition": False, "type": "NOM",
"parse_tree": three_children_tree[0][0]}),
mentions.Mention(
None,
spans.Span(0, 2),
{"tokens": ["The", "ROC", "'s"], "is_apposition": False,
"type": "NAM", "parse_tree": three_children_tree[0][0][0]}),
mentions.Mention(
None,
spans.Span(4, 4),
{"tokens": ["Nicaragua"], "is_apposition": False,
"type": "NAM", "parse_tree": three_children_tree[0][1][1]}),
])
self.assertEqual(
three_children_expected,
mention_extractor.post_process_appositions(
three_children_all_mentions))
two_children_tree = nltk.ParentedTree.fromstring(
"(NP (NP (NP (NNP Secretary)) (PP (IN of) (NP (NNP State)))) "
"(NP (NNP Madeleine) (NNP Albright)))")
two_children_all_mentions = {
mentions.Mention(
None,
spans.Span(0, 4),
{"tokens": ["Secretary", "of", "Sate", "Madeleine",
"Albright"],
"is_apposition": True, "type": "NAM",
"parse_tree": two_children_tree}),
mentions.Mention(
None,
spans.Span(0, 0),
{"tokens": ["Secretary"], "is_apposition": False,
"type": "NAM", "parse_tree": two_children_tree[0][0]}),
mentions.Mention(
None,
spans.Span(0, 2),
{"tokens": ["Secretary", "of", "State"],
"is_apposition": False, "type": "NAM",
"parse_tree": two_children_tree[0]}),
mentions.Mention(
None,
spans.Span(2, 2),
{"tokens": ["State"], "is_apposition": False,
"type": "NAM", "parse_tree": two_children_tree[0][1][1]}),
mentions.Mention(
None,
spans.Span(2, 2),
{"tokens": ["Madeleine", "Albright"], "is_apposition": False,
"type": "NAM", "parse_tree": two_children_tree[1]})}
two_children_expected = sorted([
mentions.Mention(
None,
spans.Span(0, 4),
{"tokens": ["Secretary", "of", "Sate", "Madeleine",
"Albright"],
"is_apposition": True, "type": "NAM",
"parse_tree": two_children_tree})
])
self.assertEqual(
two_children_expected,
mention_extractor.post_process_appositions(
two_children_all_mentions))
if __name__ == '__main__':
unittest.main()
|
tools/xrdb2dynamic_color.py | yous/iTerm2-Color-Schemes | 21,573 | 12642974 | <filename>tools/xrdb2dynamic_color.py
#!/usr/bin/env python3
# This script converts xrdb (X11) color scheme format to xterm style
# dynamic color OSC escape sequence scripts.
# The generated scripts allow changing the theme of the terminal
# on the fly. xterm, urxvt and wezterm are known to support these
# sequences.
#
# Usage:
# xrdb2dynamic_color.py path/to/xrdb/files -d /dynamiccolor
import os
import re
import argparse
from xrdbparser import Xrdb
def main(xrdb_path, output_path=None):
for data in Xrdb.parse_all(xrdb_path):
output = "#!/bin/sh\n# " + data.name
output += '\nprintf "\\033]4'
for i in range(0, 16):
output += ";%d;%s" % (i, data.colors[i])
output += '\\007"'
output += '\nprintf "\\033]10;%s;%s;%s\\007"' % (data.Foreground_Color, data.Background_Color, data.Cursor_Color)
if hasattr(data, "Selection_Color"):
output += '\nprintf "\\033]17;%s\\007"' % (data.Selection_Color)
if hasattr(data, "Selected_Text_Color"):
output += '\nprintf "\\033]19;%s\\007"' % (data.Selected_Text_Color)
if hasattr(data, "Bold_Color"):
output += '\nprintf "\\033]5;0;%s\\007"' % (data.Bold_Color)
output += "\n"
if not output_path:
print(output)
else:
dest = '{0}.sh'.format(os.path.join(output_path, data.name))
with open(dest, 'w+') as f:
f.write(output)
# Make sure these scripts are executable
os.chmod(dest, 0o755)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Translate X color schemes to wezterm format')
parser.add_argument('xrdb_path', type=str, help='path to xrdb files')
parser.add_argument('-d', '--destiny', type=str, dest='output_path',
help='path where wezterm config files will be' +
' created, if not provided then will be printed')
args = parser.parse_args()
main(args.xrdb_path, args.output_path)
|
backend/data/jimm/models/layers/norm.py | MikeOwino/JittorVis | 139 | 12643013 | """
Copyright VIP Group
Licensed under the Apache License, Version 2.0.
Modify from https://github.com/rwightman/pytorch-image-models
Original copyright of <NAME> below, modifications by VIP Group
Hacked together by / copyright <NAME>
"""
import jittor as jt
import jittor.nn as nn
import jittor.nn as F
class GroupNorm(nn.GroupNorm):
def __init__(self, num_channels, num_groups, eps=1e-5, affine=True):
# NOTE num_channels is swapped to first arg for consistency in swapping norm layers with BN
super().__init__(num_groups, num_channels, eps=eps, affine=affine)
def execute(self, x):
return super().execute(x)
class LayerNorm2d(nn.LayerNorm):
""" Layernorm for channels of '2d' spatial BCHW tensors """
def __init__(self, num_channels):
super().__init__([num_channels, 1, 1])
def execute(self, x: jt.Var) -> jt.Var:
return super().execute(x)
|
applications/nightly_build/test_segnet.py | TomWildenhain-Microsoft/keras-onnx | 362 | 12643065 | <gh_stars>100-1000
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import keras_segmentation
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../tests/'))
from test_utils import run_image
img_path = os.path.join(os.path.dirname(__file__), '../data', 'street.jpg')
class TestSegNet(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
def test_segnet(self):
# From https://github.com/divamgupta/image-segmentation-keras/blob/master/keras_segmentation/models/segnet.py
model = keras_segmentation.models.segnet.segnet(101)
res = run_image(model, self.model_files, img_path, target_size=(416, 608))
self.assertTrue(*res)
def test_vgg_segnet(self):
# From https://github.com/divamgupta/image-segmentation-keras/blob/master/keras_segmentation/models/segnet.py
model = keras_segmentation.models.segnet.vgg_segnet(101)
res = run_image(model, self.model_files, img_path, rtol=3.e-3, target_size=(416, 608))
self.assertTrue(*res)
def test_mobilenet_segnet(self):
# From https://github.com/divamgupta/image-segmentation-keras/blob/master/keras_segmentation/models/segnet.py
model = keras_segmentation.models.segnet.mobilenet_segnet(101)
res = run_image(model, self.model_files, img_path, target_size=(224, 224))
self.assertTrue(*res)
if __name__ == "__main__":
unittest.main()
|
exercises/ja/exc_03_06.py | Jette16/spacy-course | 2,085 | 12643113 | import spacy
# カスタムコンポーネントを定義
def length_component(doc):
# docの長さを取得
doc_length = ____
print(f"この文章は {doc_length} トークンの長さです。")
# docを返す
____
# 小サイズの日本語モデルを読み込む
nlp = spacy.load("ja_core_news_sm")
# パイプラインの最初にコンポーネントを追加し、パイプラインの名前を表示
____.____(____)
print(nlp.pipe_names)
# テキストを処理
doc = ____
|
bertModel.py | mangoeyes/FinBERT | 194 | 12643173 | <gh_stars>100-1000
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from pytorch_pretrained_bert import BertTokenizer, BertModel, BertForMaskedLM, BertConfig
class BertClassification(nn.Module):
def __init__(self, weight_path, num_labels=2, vocab="base-cased"):
super(BertClassification, self).__init__()
self.num_labels = num_labels
self.vocab = vocab
if self.vocab == "base-cased":
self.bert = BertModel.from_pretrained(weight_path)
self.config = BertConfig(vocab_size_or_config_json_file=28996, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
elif self.vocab == "base-uncased":
self.bert = BertModel.from_pretrained(weight_path)
self.config = BertConfig(vocab_size_or_config_json_file=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
elif self.vocab == "finance-cased":
self.bert = BertModel.from_pretrained(weight_path)
self.config = BertConfig(vocab_size_or_config_json_file=28573, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
elif self.vocab =="finance-uncased":
self.bert = BertModel.from_pretrained(weight_path)
self.config = BertConfig(vocab_size_or_config_json_file=30873, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
self.dropout = nn.Dropout(self.config.hidden_dropout_prob)
self.classifier = nn.Linear(self.config.hidden_size, num_labels)
nn.init.xavier_normal(self.classifier.weight)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, graphEmbeddings=None):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
return logits
class dense_opt():
def __init__(self, model):
super(dense_opt, self).__init__()
self.lrlast = .001
self.lrmain = .00001
self.optim = optim.Adam(
[ {"params":model.bert.parameters(),"lr": self.lrmain},
{"params":model.classifier.parameters(), "lr": self.lrlast},
])
def get_optim(self):
return self.optim |
gtsfm/frontend/detector_descriptor/sift.py | swershrimpy/gtsfm | 122 | 12643174 | """SIFT Detector-Descriptor implementation.
The detector was proposed in 'Distinctive Image Features from Scale-Invariant Keypoints' and is implemented by wrapping
over OpenCV's API.
References:
- https://www.cs.ubc.ca/~lowe/papers/ijcv04.pdf
- https://docs.opencv.org/3.4.2/d5/d3c/classcv_1_1xfeatures2d_1_1SIFT.html
Authors: <NAME>
"""
from typing import Tuple
import cv2 as cv
import numpy as np
import gtsfm.utils.features as feature_utils
import gtsfm.utils.images as image_utils
from gtsfm.common.image import Image
from gtsfm.common.keypoints import Keypoints
from gtsfm.frontend.detector_descriptor.detector_descriptor_base import (
DetectorDescriptorBase,
)
class SIFTDetectorDescriptor(DetectorDescriptorBase):
"""SIFT detector-descriptor using OpenCV's implementation."""
def detect_and_describe(self, image: Image) -> Tuple[Keypoints, np.ndarray]:
"""Perform feature detection as well as their description.
Refer to detect() in DetectorBase and describe() in DescriptorBase for details about the output format.
Args:
image: the input image.
Returns:
Detected keypoints, with length N <= max_keypoints.
Corr. descriptors, of shape (N, D) where D is the dimension of each descriptor.
"""
# conert to grayscale
gray_image = image_utils.rgb_to_gray_cv(image)
# Creating OpenCV object
opencv_obj = cv.SIFT_create()
# Run the opencv code
cv_keypoints, descriptors = opencv_obj.detectAndCompute(gray_image.value_array, None)
# convert to GTSFM's keypoints
keypoints = feature_utils.cast_to_gtsfm_keypoints(cv_keypoints)
# sort the features and descriptors by the score
# (need to sort here as we need the sorting order for descriptors)
sort_idx = np.argsort(-keypoints.responses)[: self.max_keypoints]
keypoints = Keypoints(
coordinates=keypoints.coordinates[sort_idx],
scales=keypoints.scales[sort_idx],
responses=keypoints.responses[sort_idx],
)
descriptors = descriptors[sort_idx]
return keypoints, descriptors
|
graphinvent/gnn/summation_mpnn.py | gooaah/GraphINVENT | 211 | 12643205 | <reponame>gooaah/GraphINVENT
"""
Defines the `SummationMPNN` class.
"""
# load general packages and functions
from collections import namedtuple
import torch
class SummationMPNN(torch.nn.Module):
"""
Abstract `SummationMPNN` class. Specific models using this class are
defined in `mpnn.py`; these are MNN, S2V, and GGNN.
"""
def __init__(self, constants : namedtuple):
super().__init__()
self.hidden_node_features = constants.hidden_node_features
self.edge_features = constants.n_edge_features
self.message_size = constants.message_size
self.message_passes = constants.message_passes
self.constants = constants
def message_terms(self, nodes : torch.Tensor, node_neighbours : torch.Tensor,
edges : torch.Tensor) -> None:
"""
Message passing function, to be implemented in all `SummationMPNN` subclasses.
Args:
----
nodes (torch.Tensor) : Batch of node feature vectors.
node_neighbours (torch.Tensor) : Batch of node feature vectors for neighbors.
edges (torch.Tensor) : Batch of edge feature vectors.
Shapes:
------
nodes : (total N nodes in batch, N node features)
node_neighbours : (total N nodes in batch, max node degree, N node features)
edges : (total N nodes in batch, max node degree, N edge features)
"""
raise NotImplementedError
def update(self, nodes : torch.Tensor, messages : torch.Tensor) -> None:
"""
Message update function, to be implemented in all `SummationMPNN` subclasses.
Args:
----
nodes (torch.Tensor) : Batch of node feature vectors.
messages (torch.Tensor) : Batch of incoming messages.
Shapes:
------
nodes : (total N nodes in batch, N node features)
messages : (total N nodes in batch, N node features)
"""
raise NotImplementedError
def readout(self, hidden_nodes : torch.Tensor, input_nodes : torch.Tensor,
node_mask : torch.Tensor) -> None:
"""
Local readout function, to be implemented in all `SummationMPNN` subclasses.
Args:
----
hidden_nodes (torch.Tensor) : Batch of node feature vectors.
input_nodes (torch.Tensor) : Batch of node feature vectors.
node_mask (torch.Tensor) : Mask for non-existing neighbors, where elements
are 1 if corresponding element exists and 0
otherwise.
Shapes:
------
hidden_nodes : (total N nodes in batch, N node features)
input_nodes : (total N nodes in batch, N node features)
node_mask : (total N nodes in batch, N features)
"""
raise NotImplementedError
def forward(self, nodes : torch.Tensor, edges : torch.Tensor) -> None:
"""
Defines forward pass.
Args:
----
nodes (torch.Tensor) : Batch of node feature matrices.
edges (torch.Tensor) : Batch of edge feature tensors.
Shapes:
------
nodes : (batch size, N nodes, N node features)
edges : (batch size, N nodes, N nodes, N edge features)
Returns:
-------
output (torch.Tensor) : This would normally be the learned graph representation,
but in all MPNN readout functions in this work,
the last layer is used to predict the action
probability distribution for a batch of graphs
from the learned graph representation.
"""
adjacency = torch.sum(edges, dim=3)
# **note: "idc" == "indices", "nghb{s}" == "neighbour(s)"
(edge_batch_batch_idc,
edge_batch_node_idc,
edge_batch_nghb_idc) = adjacency.nonzero(as_tuple=True)
(node_batch_batch_idc, node_batch_node_idc) = adjacency.sum(-1).nonzero(as_tuple=True)
same_batch = node_batch_batch_idc.view(-1, 1) == edge_batch_batch_idc
same_node = node_batch_node_idc.view(-1, 1) == edge_batch_node_idc
# element ij of `message_summation_matrix` is 1 if `edge_batch_edges[j]`
# is connected with `node_batch_nodes[i]`, else 0
message_summation_matrix = (same_batch * same_node).float()
edge_batch_edges = edges[edge_batch_batch_idc, edge_batch_node_idc, edge_batch_nghb_idc, :]
# pad up the hidden nodes
hidden_nodes = torch.zeros(nodes.shape[0],
nodes.shape[1],
self.hidden_node_features,
device=self.constants.device)
hidden_nodes[:nodes.shape[0], :nodes.shape[1], :nodes.shape[2]] = nodes.clone()
node_batch_nodes = hidden_nodes[node_batch_batch_idc, node_batch_node_idc, :]
for _ in range(self.message_passes):
edge_batch_nodes = hidden_nodes[edge_batch_batch_idc, edge_batch_node_idc, :]
edge_batch_nghbs = hidden_nodes[edge_batch_batch_idc, edge_batch_nghb_idc, :]
message_terms = self.message_terms(edge_batch_nodes,
edge_batch_nghbs,
edge_batch_edges)
if len(message_terms.size()) == 1: # if a single graph in batch
message_terms = message_terms.unsqueeze(0)
# the summation in eq. 1 of the NMPQC paper happens here
messages = torch.matmul(message_summation_matrix, message_terms)
node_batch_nodes = self.update(node_batch_nodes, messages)
hidden_nodes[node_batch_batch_idc, node_batch_node_idc, :] = node_batch_nodes.clone()
node_mask = adjacency.sum(-1) != 0
output = self.readout(hidden_nodes, nodes, node_mask)
return output
|
tests/python/twitter/common/decorators/test_lru_cache.py | zhouyijiaren/commons | 1,143 | 12643217 | # ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from twitter.common.decorators import lru_cache
def test_basic():
# just test the extra functionality that we added
eviction_queue = []
def on_eviction(element):
eviction_queue.append(element)
@lru_cache(10, on_eviction=on_eviction)
def double(value):
return value * 2
for k in range(15):
double(k)
assert eviction_queue == [0, 2, 4, 6, 8]
|
Game8/modules/sprites/turret.py | ttkaixin1998/pikachupythongames | 4,013 | 12643218 | <gh_stars>1000+
'''
Function:
炮塔类
作者:
Charles
微信公众号:
Charles的皮卡丘
'''
import pygame
from .arrow import Arrow
'''炮塔类'''
class Turret(pygame.sprite.Sprite):
def __init__(self, turret_type, cfg):
assert turret_type in range(3)
pygame.sprite.Sprite.__init__(self)
self.cfg = cfg
self.turret_type = turret_type
self.imagepaths = [cfg.IMAGEPATHS['game']['basic_tower'], cfg.IMAGEPATHS['game']['med_tower'], cfg.IMAGEPATHS['game']['heavy_tower']]
self.image = pygame.image.load(self.imagepaths[turret_type])
self.rect = self.image.get_rect()
# 箭
self.arrow = Arrow(turret_type, cfg)
# 当前的位置
self.coord = 0, 0
self.position = 0, 0
self.rect.left, self.rect.top = self.position
self.reset()
'''射击'''
def shot(self, position, angle=None):
arrow = None
if not self.is_cooling:
arrow = Arrow(self.turret_type, self.cfg)
arrow.reset(position, angle)
self.is_cooling = True
if self.is_cooling:
self.cool_time -= 1
if self.cool_time == 0:
self.reset()
return arrow
'''重置'''
def reset(self):
if self.turret_type == 0:
# 价格
self.price = 500
# 射箭的冷却时间
self.cool_time = 30
# 是否在冷却期
self.is_cooling = False
elif self.turret_type == 1:
self.price = 1000
self.cool_time = 50
self.is_cooling = False
elif self.turret_type == 2:
self.price = 1500
self.cool_time = 100
self.is_cooling = False |
paas-ce/paas/esb/esb/component/base.py | renmcc/bk-PaaS | 767 | 12643299 | <reponame>renmcc/bk-PaaS
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
import os
import copy
import json
from importlib import import_module
from common.bkerrors import bk_error_codes
from common.errors import APIError, error_codes
from common.base_utils import smart_lower, FancyDict, str_bool
from common.log import logger
from esb.outgoing import HttpClient
from esb.utils import is_py_file, fpath_to_module, config
from esb.utils.base import PathVars
from esb.response import CompResponse
from esb.bkauth.models import User, AnonymousUser
class BaseComponent(object):
"""
Base class for component
"""
sys_name = 'UNKNOWN'
api_type = 'unknown'
name_prefix = ''
# 如果定义一个Form,请求将会使用这个Form来验证输入参数的有效性
Form = None
def __init__(self, request=None, current_user=None):
self.request = request
self.response = CompResponse()
self.form_data = {}
self._current_user = current_user
self._init()
def _init(self):
# Init outgoings client for later using
self.outgoing = FancyDict()
self.outgoing.http_client = HttpClient(self)
def set_request(self, request):
assert isinstance(request, CompRequest)
self.request = request
def get_current_user(self):
"""
获取当前用户
"""
if not self.request.wsgi_request:
return AnonymousUser()
username = self.request.wsgi_request.g.get('current_user_username')
if username:
return User(username)
else:
return AnonymousUser()
@property
def current_user(self):
return self._current_user
@current_user.setter
def current_user(self, value):
self._current_user = value
def invoke(self, kwargs={}, use_test_env=False, request_id=None, is_dummy=False, app_code='', timeout=None):
"""
调用一个组件,需要注意的是,当这个组件实例被一个wsgi_request初始化过以后,
是不需要传入后面这些额外的参数的。
:param dict kwargs: 请求的参数键值对
:param bool use_test_env: 是否访问测试环境,默认为不访问
:param str request_id: 这一次请求的request_id,默认为None
:param bool is_dummy: 是否虚拟请求结果,默认为False
:param str app_code: APP身份标识
:returns: dict格式结果
:raises: 视情况可能会抛出 `common.errors.APIError` 实例
"""
if not self.request:
# 转换kwargs类型
if isinstance(kwargs, dict):
kwargs = FancyDict(kwargs)
self.set_request(
CompRequest(
input=kwargs,
use_test_env=use_test_env,
request_id=request_id,
is_dummy=is_dummy,
app_code=app_code,
timeout=timeout
)
)
if not self._current_user:
self._current_user = self.get_current_user()
self.validate_input()
self.before_handle()
try:
self.handle()
except APIError, e:
self.response.payload = e.code.as_dict()
self.after_handle()
return self.response.get_payload()
def invoke_other(self, *args, **kwargs):
"""
Use given kwargs to invoke some other component
"""
return self._invoke_other(*args, **kwargs)['result']
def _invoke_other(self, component_name, kwargs={}, use_test_env=None, timeout=None):
"""
Use given kwargs to invoke some other component
"""
comp_obj = self.prepare_other(component_name, kwargs=kwargs,
use_test_env=use_test_env, timeout=timeout)
result = comp_obj.invoke()
return {
'result': result,
'comp': comp_obj
}
def prepare_other(self, component_name, kwargs={}, use_test_env=None, timeout=None):
"""
以当前组件为基础,使用给定的参数和配置来生成一个可供调用的组件实例
:param str component_name: 待生成组件的名称
:param dict kwargs: 用来调用组件的参数
:param bool use_test_env: 是否访问测试环境,默认使用当前组件配置
"""
components_manager = get_components_manager()
comp_class = components_manager.get_comp_by_name(component_name)
if not comp_class:
raise error_codes.ARGUMENT_ERROR.format_prompt('No component can be found via name=%s' % component_name)
# use_test_env is self.request.use_test_env by default,
# but this behaviour can be overridden.
if use_test_env is None:
use_test_env = self.request.use_test_env
# 转换kwargs类型
if isinstance(kwargs, dict):
kwargs = FancyDict(kwargs)
comp_obj = comp_class()
comp_obj.current_user = self.current_user
comp_obj.set_request(
CompRequest(
input=kwargs,
use_test_env=use_test_env,
request_id=self.request.request_id,
is_dummy=self.request.is_dummy,
app_code=self.request.app_code,
timeout=timeout
)
)
return comp_obj
def validate_input(self):
"""
Validate the given input
"""
if self.Form:
self.form_data = self.Form.from_request(self.request)\
.get_cleaned_data_or_error()
self.request.kwargs.update(self.form_data)
def before_handle(self):
"""
Do things befor handle start
"""
if getattr(self, 'need_check_operate_perm', False):
self.check_operate_perm()
def handle(self):
"""
All Component should override this class
"""
pass
def after_handle(self):
"""
Do things after handle ended
"""
pass
def get_host_by_env(self, hosts):
"""
Get outgoing host by use_test_env flag
:param dict hosts: hosts, such as {'test': 'testhost', 'prod': 'prodhost'}
"""
env_name = 'test' if self.request.use_test_env else 'prod'
return hosts[env_name]
@classmethod
def set_name_prefix(cls, name_prefix):
"""
设置组件名称的前缀,将会影响get_name的结果
:param str name_prefix: 需要设置的名称前缀
"""
cls.name_prefix = name_prefix
@classmethod
def get_name(cls):
"""
Get name of this component, which should be unique
"""
return '%s%s.%s' % (cls.name_prefix, cls.sys_name.lower(), cls.get_component_name())
@classmethod
def get_component_name(cls):
return smart_lower(cls.__name__)
def get_alias_name(self):
return getattr(self, 'name', self.get_component_name())
class CompRequest(object):
"""
Request class for Component
"""
SENSITIVE_PARAMS_KEY = [
'app_secret', 'signature', 'bk_nonce', 'bk_timestamp',
'bk_app_secret', 'bk_signature',
]
NORMAL_PARAMS_KEY = [
'app_code', 'username', 'bk_token',
'bk_app_code', 'bk_username',
'__esb_skip_signature__',
'__esb_skip_comp_perm__',
]
def __init__(self, wsgi_request=None, input=None, use_test_env=False, request_id=None,
channel_type='api', is_dummy=False, app_code='', path_vars=None,
timeout=None, headers={}):
self.wsgi_request = wsgi_request
# Load data from wsgi_request if given
if self.wsgi_request:
self.kwargs = copy.copy(self.wsgi_request.g.kwargs)
self.kwargs = self._clean_sensitive_params(self.kwargs)
self.use_test_env = self.wsgi_request.g.use_test_env
self.request_id = self.wsgi_request.g.request_id
self.channel_type = self.wsgi_request.g.channel_type
self.is_dummy = str_bool(self.wsgi_request.g.kwargs.get('dummy'))
self.app_code = self.wsgi_request.g.get('app_code', '')
# 路径匹配中的变量
self.path_vars = self.wsgi_request.g.path_vars
# 超时时长
self.timeout = self.wsgi_request.g.timeout
self.headers = self.wsgi_request.g.headers
self.bk_language = self.headers.get('Blueking-Language', 'en')
else:
self.kwargs = copy.copy(input) or FancyDict()
self.use_test_env = use_test_env
self.request_id = request_id
self.channel_type = channel_type
self.is_dummy = is_dummy
self.app_code = app_code
# 路径匹配中的变量
self.path_vars = path_vars or PathVars()
# 超时时长
self.timeout = timeout
self.headers = copy.copy(headers)
self.bk_language = self.headers.get('Blueking-Language', 'en')
def get_strict_clean_params(self):
params = copy.deepcopy(self.kwargs)
params = self._clean_normal_params(params)
return params
def get_clean_params(self, ctype='form'):
if not self.wsgi_request:
return ''
if self.wsgi_request.method == 'GET':
return self._get_clean_raw_query(ctype)
else:
return self._get_clean_raw_body(ctype)
def _get_clean_raw_query(self, ctype):
query = self.wsgi_request.GET.copy()
query = self._clean_sensitive_params(query)
return query.urlencode() if ctype == 'form' else json.dumps(dict(query.items()))
def _get_clean_raw_body(self, ctype):
if self.wsgi_request.body and self.wsgi_request.body.strip().startswith('{'):
body = json.loads(self.wsgi_request.body)
body = self._clean_sensitive_params(body)
return body if ctype == 'form' else json.dumps(body)
else:
body = self.wsgi_request.POST.copy()
body = self._clean_sensitive_params(body)
return body.urlencode() if ctype == 'form' else json.dumps(dict(body.items()))
def _clean_sensitive_params(self, params):
for key in self.SENSITIVE_PARAMS_KEY:
params.pop(key, None)
return params
def _clean_normal_params(self, params):
for key in self.NORMAL_PARAMS_KEY:
params.pop(key, None)
return params
class ComponentsManager(object):
"""
Manager for Components
"""
blist_comp_fnames = [
'component.py',
'component.pyc',
]
def __init__(self, ):
self.name_component_map = {}
self.path_configs = {}
def __str__(self):
return '<ComponentsManager: path_configs=%s>' % self.path_configs
def register(self, comp_class, config={}):
"""
Register a component class by channel_config
:param dict config: 注册组件时的配置文件,比如组件的名称前缀等
"""
comp_class.set_name_prefix(config.get('name_prefix', ''))
self.name_component_map[comp_class.get_name().lower()] = comp_class
def get_comp_by_name(self, name):
ret = self.name_component_map.get(name)
return ret
def register_by_module(self, module, config={}):
"""
Register Component class
"""
cls_comp = self.find_component_class(module)
if cls_comp:
self.register(cls_comp, config=config)
def register_by_config(self, config_list):
"""
根据来自配置文件的组件配置来注册组件
"""
if not config_list:
return
if not isinstance(config_list, (list, tuple)):
config_list = [config_list]
# 保存配置到self.path_configs,并且开始搜寻加载path
for comp_config in config_list:
if not comp_config:
continue
self.path_configs[comp_config['path']] = comp_config.copy()
self.register_path(comp_config['path'])
def register_path(self, path):
"""
Walk down components path to find all valid Component object
"""
config = self.path_configs[path]
for current_folder, folders, files in os.walk(path):
for filename in files:
filename = os.path.join(current_folder, filename)
if self.should_register(filename):
try:
module = import_module(fpath_to_module(filename))
self.register_by_module(module, config=config)
except Exception:
logger.exception('%s Error when register file %s, skip',
bk_error_codes.COMPONENT_REGISTER_ERROR.code, filename)
def should_register(self, filename):
"""
Determine if `filename` should be registered
:param str filename: filename with directory, like "esb/generic/test.py"
"""
fpath, base_fname = os.path.split(filename)
# Components are not in toolkit folder
if fpath.endswith('/toolkit') or fpath.endswith('/apidoc'):
return False
return is_py_file(base_fname) and not base_fname.startswith('_') and \
base_fname not in self.blist_comp_fnames
@staticmethod
def find_component_class(module):
"""
Find the component class from the given module
"""
for attr_name in dir(module):
obj = getattr(module, attr_name)
try:
# Only if this Component class is **defined** in this module
if hasattr(obj, 'handle') and issubclass(obj, BaseComponent) and \
obj.__module__ == module.__name__:
cls_comp = obj
return cls_comp
except Exception:
pass
return
def get_registed_components(self):
return self.name_component_map
_components_manager = None
def get_components_manager():
"""
获取当前注册的components_manager
"""
global _components_manager
if _components_manager is None:
manager = ComponentsManager()
manager.register_by_config(config.ESB_CONFIG['config'].get('component_groups', []))
_components_manager = manager
return _components_manager
|
web/search/migrations/0001_initial.py | ChiChou/wiggle | 110 | 12643323 | # Generated by Django 2.2 on 2019-04-24 10:04
from django.db import migrations, models
import django.db.models.deletion
import libs.field
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Executable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('raw_path', models.TextField()),
('path', models.TextField(unique=True)),
('strings', models.TextField()),
('meta', libs.field.JSONField(default=dict)),
('libraries', libs.field.JSONField(default=dict)),
('imports', libs.field.JSONField(default=dict)),
('exports', libs.field.JSONField(default=dict)),
('segments', libs.field.JSONField(default=dict)),
('entries', libs.field.JSONField(default=dict)),
('created', models.DateField()),
('modified', models.DateField()),
('added', models.DateField()),
],
),
migrations.CreateModel(
name='MachO',
fields=[
('executable_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='search.Executable')),
('classdump', models.TextField()),
('classes', libs.field.JSONField(default=dict)),
('rpaths', libs.field.JSONField(default=dict)),
('ent', libs.field.JSONField(default={})),
('ent_str', models.TextField()),
('ent_keys', libs.field.JSONField(default=[])),
('cs_flags', models.IntegerField(default=0)),
('cs_flags_str', models.TextField()),
('lv', models.BooleanField(default=False)),
('signed', models.BooleanField(default=False)),
('apple', models.BooleanField(default=False)),
('codesign', models.TextField()),
('info_plist', libs.field.JSONField(default=dict)),
('info_plist_str', models.TextField()),
],
bases=('search.executable',),
),
migrations.CreateModel(
name='PE',
fields=[
('executable_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='search.Executable')),
('version', libs.field.JSONField(default=dict)),
],
bases=('search.executable',),
),
]
|
cloudml-template/examples/classification/census/trainer/metadata.py | tmatsuo/cloudml-samples | 1,552 | 12643324 | <reponame>tmatsuo/cloudml-samples<gh_stars>1000+
#!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
# YOU NEED TO MODIFY THE FOLLOWING METADATA TO ADAPT THE TEMPLATE TO YOUR DATA
# ******************************************************************************
# Task type can be either 'classification', 'regression', or 'custom'.
# This is based on the target feature in the dataset.
TASK_TYPE = 'classification'
# List of all the columns (header) present in the input data file(s).
# Used for parsing the input data.
COLUMN_NAMES = ['age', 'workclass', 'fnlwgt', 'education', 'education_num',
'marital_status', 'occupation', 'relationship', 'race', 'gender',
'capital_gain', 'capital_loss', 'hours_per_week',
'native_country', 'income_bracket']
# List of the columns expected during serving (which is probably different to
# the header of the training data).
SERVING_COLUMN_NAMES = [
'age', 'workclass', 'education', 'education_num',
'marital_status', 'occupation', 'relationship', 'race', 'gender',
'capital_gain', 'capital_loss', 'hours_per_week', 'native_country']
# List of the default values of all the columns present in the input data.
# This helps decoding the data types of the columns.
DEFAULTS = [[0], [''], [0], [''], [0], [''], [''], [''], [''], [''],
[0], [0], [0], [''], ['']]
# Dictionary of the feature names of type int or float. In the dictionary,
# the key is the feature name, and the value is another dictionary includes
# the mean and the variance of the numeric features.
# E.g. {feature_1: {mean: 0, variance:1}, feature_2: {mean: 10, variance:3}}
# The value can be set to None if you don't want to not normalize.
NUMERIC_FEATURE_NAMES_WITH_STATS = {
'age': None, 'education_num': None, 'capital_gain': None,
'capital_loss': None, 'hours_per_week': None
}
# Dictionary of feature names with int values, but to be treated as
# categorical features. In the dictionary, the key is the feature name,
# and the value is the num_buckets (count of distinct values).
CATEGORICAL_FEATURE_NAMES_WITH_IDENTITY = {}
# Dictionary of categorical features with few nominal values. In the dictionary,
# the key is the feature name, and the value is the list of feature vocabulary.
CATEGORICAL_FEATURE_NAMES_WITH_VOCABULARY = {
'gender': ['Female', 'Male'],
'race': [
'Amer-Indian-Eskimo', 'Asian-Pac-Islander', 'Black', 'Other', 'White'
],
'education': [
'Bachelors', 'HS-grad', '11th', 'Masters', '9th', 'Some-college',
'Assoc-acdm', 'Assoc-voc', '7th-8th', 'Doctorate', 'Prof-school',
'5th-6th', '10th', '1st-4th', 'Preschool', '12th'
],
'marital_status': [
'Married-civ-spouse', 'Divorced', 'Married-spouse-absent', 'Never-married',
'Separated', 'Married-AF-spouse', 'Widowed'
],
'relationship': [
'Husband', 'Not-in-family', 'Wife', 'Own-child', 'Unmarried',
'Other-relative'
],
'workclass': [
'Self-emp-not-inc', 'Private', 'State-gov', 'Federal-gov', 'Local-gov', '?',
'Self-emp-inc', 'Without-pay', 'Never-worked'
]
}
# Dictionary of categorical features with many values. In the dictionary,
# the key is the feature name, and the value is the number of buckets.
CATEGORICAL_FEATURE_NAMES_WITH_HASH_BUCKET = {
'occupation': 50,
'native_country': 100
}
# Column includes the relative weight of each record.
WEIGHT_COLUMN_NAME = 'fnlwgt'
# Target feature name (response or class variable).
TARGET_NAME = 'income_bracket'
# List of the class values (labels) in a classification dataset.
TARGET_LABELS = ['<=50K', '>50K'] |
applications/cli/commands/predict/tests/test_stream.py | nparkstar/nauta | 390 | 12643330 | <filename>applications/cli/commands/predict/tests/test_stream.py<gh_stars>100-1000
#
# Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from click.testing import CliRunner
import pytest
from commands.predict import stream
from commands.predict.common import InferenceVerb
from platform_resources.run import RunStatus
from cli_text_consts import PredictStreamCmdTexts as Texts
TEST_DATA = '{"instances": [1.0, 2.0, 5.0]}'
TEST_RESPONSE = '{"predictions": [3.5, 4.0, 5.5]}'
TEST_URL = 'https://nauta.com:8443/api/v1/namespaces/test/services/inf/proxy/v1/models/saved_model_half_plus_three'
TEST_API_KEY = 'Bearer blablebla'
class StreamPredictMocks:
def __init__(self, mocker):
self.get_namespace_mock = mocker.patch('commands.predict.stream.get_kubectl_current_context_namespace')
self.get_run_mock = mocker.patch('commands.predict.stream.Run.get')
self.get_run_mock.return_value.state = RunStatus.RUNNING
self.get_inference_instance_url_mock = mocker.patch('commands.predict.stream.get_inference_instance_url')
self.get_inference_instance_url_mock.return_value = TEST_URL
self.get_api_key_mock = mocker.patch('commands.predict.stream.get_api_key')
self.get_api_key_mock.return_value = TEST_API_KEY
self.inference_post_mock = mocker.patch('commands.predict.stream.requests.post')
self.inference_post_mock.return_value.text = TEST_RESPONSE
@pytest.fixture
def stream_mocks(mocker):
mocks = StreamPredictMocks(mocker=mocker)
return mocks
def test_stream(stream_mocks: StreamPredictMocks):
data_location = 'data.json'
name = 'fake-inference-instance'
verb = InferenceVerb.CLASSIFY.value
runner = CliRunner()
with runner.isolated_filesystem():
with open(data_location, 'w') as data_file:
data_file.write(TEST_DATA)
result = runner.invoke(stream.stream, ['--data', data_location, '--name', name, '--method-verb', verb],
catch_exceptions=False)
assert stream_mocks.get_namespace_mock.call_count == 1
assert stream_mocks.get_run_mock.call_count == 1
assert stream_mocks.get_inference_instance_url_mock.call_count == 1
assert stream_mocks.get_api_key_mock.call_count == 1
stream_mocks.inference_post_mock.assert_called_with(f'{TEST_URL}:{verb}', data=TEST_DATA, verify=False,
headers={'Authorization': TEST_API_KEY,
'Accept': 'application/json',
'Content-Type': 'application/json'})
assert TEST_RESPONSE in result.output
assert result.exit_code == 0
def test_stream_get_run_fail(stream_mocks: StreamPredictMocks):
stream_mocks.get_run_mock.return_value = None
data_location = 'data.json'
name = 'fake-inference-instance'
runner = CliRunner()
with runner.isolated_filesystem():
with open(data_location, 'w') as data_file:
data_file.write(TEST_DATA)
result = runner.invoke(stream.stream, ['--data', data_location, '--name', name],
catch_exceptions=False)
assert stream_mocks.get_namespace_mock.call_count == 1
assert stream_mocks.get_run_mock.call_count == 1
assert stream_mocks.get_inference_instance_url_mock.call_count == 0
assert stream_mocks.get_api_key_mock.call_count == 0
assert Texts.INSTANCE_NOT_EXISTS_ERROR_MSG.format(name=name) in result.output
assert result.exit_code == 1
def test_stream_instance_not_running_fail(stream_mocks: StreamPredictMocks):
stream_mocks.get_run_mock.return_value.state = RunStatus.QUEUED
data_location = 'data.json'
name = 'fake-inference-instance'
runner = CliRunner()
with runner.isolated_filesystem():
with open(data_location, 'w') as data_file:
data_file.write(TEST_DATA)
result = runner.invoke(stream.stream, ['--data', data_location, '--name', name],
catch_exceptions=False)
assert stream_mocks.get_namespace_mock.call_count == 1
assert stream_mocks.get_run_mock.call_count == 1
assert stream_mocks.get_inference_instance_url_mock.call_count == 0
assert stream_mocks.get_api_key_mock.call_count == 0
assert Texts.INSTANCE_NOT_RUNNING_ERROR_MSG.format(name=name, running_code=RunStatus.RUNNING.value) \
in result.output
assert result.exit_code == 1
def test_stream_get_run_url_fail(stream_mocks: StreamPredictMocks):
stream_mocks.get_inference_instance_url_mock.side_effect = RuntimeError
data_location = 'data.json'
name = 'fake-inference-instance'
runner = CliRunner()
with runner.isolated_filesystem():
with open(data_location, 'w') as data_file:
data_file.write(TEST_DATA)
result = runner.invoke(stream.stream, ['--data', data_location, '--name', name],
catch_exceptions=False)
assert stream_mocks.get_namespace_mock.call_count == 1
assert stream_mocks.get_run_mock.call_count == 1
assert stream_mocks.get_inference_instance_url_mock.call_count == 1
assert stream_mocks.get_api_key_mock.call_count == 0
assert Texts.INSTANCE_GET_FAIL_ERROR_MSG.format(name=name) in result.output
assert result.exit_code == 1
def test_stream_data_load_fail(stream_mocks: StreamPredictMocks):
data_location = 'data.json'
name = 'fake-inference-instance'
runner = CliRunner()
with runner.isolated_filesystem():
with open(data_location, 'w') as data_file:
data_file.write('')
result = runner.invoke(stream.stream, ['--data', data_location, '--name', name],
catch_exceptions=False)
assert stream_mocks.get_namespace_mock.call_count == 1
assert stream_mocks.get_run_mock.call_count == 1
assert stream_mocks.get_inference_instance_url_mock.call_count == 1
assert stream_mocks.get_api_key_mock.call_count == 0
assert Texts.JSON_LOAD_ERROR_MSG.format(data=data_location) in result.output
assert result.exit_code == 1
def test_stream_inference_fail(stream_mocks: StreamPredictMocks):
request_error = '403'
stream_mocks.inference_post_mock.return_value.raise_for_status.side_effect = RuntimeError(request_error)
data_location = 'data.json'
name = 'fake-inference-instance'
runner = CliRunner()
with runner.isolated_filesystem():
with open(data_location, 'w') as data_file:
data_file.write(TEST_DATA)
result = runner.invoke(stream.stream, ['--data', data_location, '--name', name],
catch_exceptions=False)
assert stream_mocks.get_namespace_mock.call_count == 1
assert stream_mocks.get_run_mock.call_count == 1
assert stream_mocks.get_inference_instance_url_mock.call_count == 1
assert stream_mocks.get_api_key_mock.call_count == 1
assert Texts.INFERENCE_OTHER_ERROR_MSG.format(exception=request_error) in result.output
assert result.exit_code == 1
|
moz_sp/extractors/foreign_key_extractor.py | sythello/TabularSemanticParsing | 141 | 12643347 | # encoding: utf-8
"""
Copyright (c) 2020, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
Traverse a SQL AST and extract table fields that have co-occurred in a conditional clause.
We assume such field pairs are likely to form foreign keys.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from mo_future import string_types
from moz_sp.debugs import debug_wrapper
from moz_sp.keywords import join_keywords
from moz_sp.sql_parser import DEBUG
from moz_sp.traverser import SchemaGroundedTraverser
class ForeignKeyCandidateExtractor(SchemaGroundedTraverser):
def Operator(op):
def func(self, json):
if op in ['<>', '>', '<', '>=', '<=', '=', '!='] and \
isinstance(json[0], string_types) and \
isinstance(json[1], string_types):
assert (len(json) == 2 and isinstance(json, list))
v1, v2 = json
if v1 != v2:
v1_id = self.schema.get_field_id(v1)
v2_id = self.schema.get_field_id(v2)
if self.is_field(v1) and self.is_field(v2):
_v1_id, _v2_id = sorted([v1_id, v2_id], reverse=True)
v1 = self.schema.get_field_signature(_v1_id)
v2 = self.schema.get_field_signature(_v2_id)
if not v1 in self.foreign_keys_readable:
self.foreign_keys_readable[v1] = []
self.foreign_keys_readable[v1].append(v2)
self.foreign_keys.add((_v1_id, _v2_id))
else:
for v in json:
self.dispatch(v)
return func
# simple operators
_concat = Operator('||')
_mul = Operator('*')
_div = Operator('/')
_add = Operator('+')
_sub = Operator('-')
_neq = Operator('!=')
_gt = Operator('>')
_lt = Operator('<')
_gte = Operator('>=')
_lte = Operator('<=')
_eq = Operator('=')
_or = Operator('OR')
_and = Operator('AND')
def __init__(self, schema, verbose=False):
super().__init__(schema, verbose)
self.foreign_keys_readable = dict()
self.foreign_keys = set()
@debug_wrapper
def extract(self, json):
self.root(json)
@debug_wrapper
def delimited_list(self, json):
for element in json:
self.dispatch(element)
@debug_wrapper
def dispatch(self, json):
if isinstance(json, list):
self.delimited_list(json)
if isinstance(json, dict):
if len(json) == 0:
return
elif 'value' in json:
self.value(json)
elif 'from' in json:
# Nested query 'from'
self.extract(json)
elif 'query' in json:
# Nested queries 'query'
self.extract(json['query'])
elif 'union' in json:
# Nested queries 'union'
self.union(json['union'])
elif 'intersect' in json:
return self.intersect(json['intersect'])
elif 'except' in json:
return self.except_(json['except'])
else:
self.op(json)
@debug_wrapper
def from_(self, json):
if 'from' in json:
from_ = json['from']
if isinstance(from_, dict):
return self.dispatch(from_)
if not isinstance(from_, list):
from_ = [from_]
for token in from_:
self.dispatch(token)
@debug_wrapper
def groupby(self, json):
if 'groupby' in json:
self.dispatch(json['groupby'])
@debug_wrapper
def having(self, json):
if 'having' in json:
self.dispatch(json['having'])
@debug_wrapper
def limit(self, json):
if 'limit' in json:
self.dispatch(json['limit'])
@debug_wrapper
def offset(self, json):
if 'offset' in json:
self.dispatch(json['offset'])
@debug_wrapper
def op(self, json):
if 'on' in json:
self._on(json)
return
if len(json) > 1:
raise Exception('Operators should have only one key!')
key, value = list(json.items())[0]
if DEBUG:
print(key)
# check if the attribute exists, and call the corresponding method;
# note that we disallow keys that start with `_` to avoid giving access
# to magic methods
attr = '_{0}'.format(key)
if hasattr(self, attr) and not key.startswith('_'):
getattr(self, attr)(value)
return
# treat as regular function call
if isinstance(value, dict) and len(value) == 0:
return
else:
self.dispatch(value)
return
@debug_wrapper
def orderby(self, json):
if 'orderby' in json:
self.dispatch(json['orderby'])
@debug_wrapper
def select(self, json):
if 'select' in json:
self.dispatch(json['select'])
@debug_wrapper
def union(self, json):
for query in json:
self.extract(query)
@debug_wrapper
def intersect(self, json):
for i, query in enumerate(json):
self.extract(query)
@debug_wrapper
def except_(self, json):
for i, query in enumerate(json):
self.extract(query)
@debug_wrapper
def query(self, json):
self.get_alias_table_map(json)
for clause in self.clauses:
getattr(self, clause)(json)
self.pop_table_alias_stack()
@debug_wrapper
def root(self, json):
if 'union' in json:
self.union(json['union'])
elif 'intersect' in json:
self.intersect(json['intersect'])
elif 'except' in json:
self.except_(json['except'])
else:
self.query(json)
@debug_wrapper
def value(self, json):
self.dispatch(json['value'])
@debug_wrapper
def where(self, json):
if 'where' in json:
self.dispatch(json['where'])
@debug_wrapper
def _case(self, checks):
for check in checks:
if isinstance(check, dict):
self.dispatch(check['when'])
self.dispatch(check['then'])
else:
self.dispatch(check)
@debug_wrapper
def _exists(self, value):
self.dispatch(value)
@debug_wrapper
def _in(self, json):
self.dispatch(json[1])
@debug_wrapper
def _nin(self, json):
self.dispatch(json[1])
@debug_wrapper
def _is(self, pair):
self.dispatch(pair[0])
self.dispatch(pair[1])
@debug_wrapper
def _like(self, pair):
self.dispatch(pair[0])
self.dispatch(pair[1])
@debug_wrapper
def _nlike(self, pair):
self.dispatch(pair[0])
self.dispatch(pair[1])
@debug_wrapper
def _literal(self, json):
if isinstance(json, list):
for v in json:
self._literal(v)
@debug_wrapper
def _missing(self, value):
self.dispatch(value)
@debug_wrapper
def _on(self, json):
for key in join_keywords:
if key in json:
self.dispatch(json[key])
self.dispatch(json['on']) |
examples/pybullet/gym/pybullet_envs/examples/dominoes.py | felipeek/bullet3 | 9,136 | 12643357 | import pybullet_data as pd
import pybullet_utils as pu
import pybullet
from pybullet_utils import bullet_client as bc
import time
p = bc.BulletClient(connection_mode=pybullet.GUI)
p.setAdditionalSearchPath(pd.getDataPath())
p.loadURDF("plane_transparent.urdf", useMaximalCoordinates=True)
p #.setPhysicsEngineParameter(numSolverIterations=10, fixedTimeStep=0.01)
p.configureDebugVisualizer(p.COV_ENABLE_PLANAR_REFLECTION, 1)
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 0)
y2z = p.getQuaternionFromEuler([0, 0, 1.57])
meshScale = [1, 1, 1]
visualShapeId = p.createVisualShape(shapeType=p.GEOM_MESH,
fileName="domino/domino.obj",
rgbaColor=[1, 1, 1, 1],
specularColor=[0.4, .4, 0],
visualFrameOrientation=y2z,
meshScale=meshScale)
boxDimensions = [0.5 * 0.00635, 0.5 * 0.0254, 0.5 * 0.0508]
collisionShapeId = p.createCollisionShape(p.GEOM_BOX, halfExtents=boxDimensions)
for j in range(12):
print("j=", j)
for i in range(35):
#p.loadURDF("domino/domino.urdf",[i*0.04,0, 0.06])
p.createMultiBody(baseMass=0.025,
baseCollisionShapeIndex=collisionShapeId,
baseVisualShapeIndex=visualShapeId,
basePosition=[i * 0.04, j * 0.05, 0.06],
useMaximalCoordinates=True)
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1)
p.setGravity(0, 0, -9.8)
p.setRealTimeSimulation(1)
while (1):
p.setGravity(0, 0, -9.8)
#p.stepSimulation(1./100.)
time.sleep(1. / 240.)
|
test_autolens/analysis/test_subhalo.py | Jammy2211/AutoLens | 114 | 12643364 | import numpy as np
import autofit as af
import autolens as al
from autolens.lens.subhalo import SubhaloResult
class TestSubhaloResult:
def test__result_derived_properties(self):
lower_limit_lists = [[0.0, 0.0], [0.0, 0.5], [0.5, 0.0], [0.5, 0.5]]
grid_search_result = af.GridSearchResult(
results=None,
grid_priors=[
af.UniformPrior(lower_limit=-2.0, upper_limit=2.0),
af.UniformPrior(lower_limit=-3.0, upper_limit=3.0),
],
lower_limits_lists=lower_limit_lists,
)
subhalo_result = SubhaloResult(
grid_search_result=grid_search_result, result_no_subhalo=1
)
subhalo_array = subhalo_result._subhalo_array_from(
values_native=np.array([[1.0, 2.0], [3.0, 4.0]])
)
assert isinstance(subhalo_array, al.Array2D)
assert (subhalo_array.native == np.array([[3.0, 4.0], [1.0, 2.0]])).all()
|
src/gamesbyexample/affinecipher.py | spp2/PythonStdioGames | 736 | 12643365 | <reponame>spp2/PythonStdioGames
"""Affine Cipher, by <NAME> <EMAIL>
The affine cipher is a simple substitution cipher that uses addition and
multiplication to encrypt and decrypt symbols.
More info at: https://en.wikipedia.org/wiki/Affine_cipher
This and other games are available at https://nostarch.com/XX
Tags: large, cryptography, math, pyperclip"""
__version__ = 0
try:
import pyperclip # pyperclip copies text to the clipboard.
except ImportError:
pass # If pyperclip is not installed, do nothing. It's no big deal.
import random
# Note the space at the front of the SYMBOLS string:
SYMBOLS = """ !"#$%&'()*+,-./0123456789:;<=>?@ABCDEF""" + \
"""GHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~"""
def main():
print('''Affine Cipher, by <NAME> <EMAIL>
The affine cipher is a simple substitution cipher that uses addition and
multiplication to encrypt and decrypt symbols.''')
# Let the user specify if they are encrypting or decrypting:
while True: # Keep asking until the user enters e or d.
print('Do you want to (e)ncrypt or (d)ecrypt?')
response = input('> ').lower()
if response.startswith('e'):
myMode = 'encrypt'
break
elif response.startswith('d'):
myMode = 'decrypt'
break
print('Please enter the letter e or d.')
# Let the user specify the key to use:
while True: # Keep asking until the user enters a valid key.
print('Please specify the key to use,')
print('or RANDOM to have one generated for you:')
response = input('> ').upper()
if response == 'RANDOM':
myKey = generateRandomKey()
print('The key is {}. KEEP THIS SECRET!'.format(myKey))
break
else:
if not response.isdecimal():
print('This key is not a number.')
continue
if checkKey(int(response), myMode):
myKey = int(response)
break
# Let the user specify the message to encrypt/decrypt:
print('Enter the message to {}.'.format(myMode))
myMessage = input('> ')
if myMode == 'encrypt':
translated = encryptMessage(myKey, myMessage)
elif myMode == 'decrypt':
translated = decryptMessage(myKey, myMessage)
print('%sed text:' % (myMode.title()))
print(translated)
try:
pyperclip.copy(translated)
print('Full %sed text copied to clipboard.' % (myMode))
except:
pass # Do nothing if pyperclip wasn't installed.
def getKeyPartsFromKey(key):
"""Get the two key A and key B parts from the key."""
keyA = key // len(SYMBOLS)
keyB = key % len(SYMBOLS)
return (keyA, keyB)
def checkKey(key, mode):
"""Return True if key is a valid encryption key for this mode.
Otherwise return False."""
keyA, keyB = getKeyPartsFromKey(key)
if mode == 'encrypt' and keyA == 1 and keyB == 0:
print('This key effectively does not do any encryption on the')
print('message. Choose a different key.')
return False
elif keyA < 0 or keyB < 0 or keyB > len(SYMBOLS) - 1:
print('Key A must be greater than 0 and Key B must be between')
print('0 and {}.'.format(len(SYMBOLS) - 1))
return False
elif gcd(keyA, len(SYMBOLS)) != 1:
print('Key A ({}) and the symbol set'.format(keyA))
print('size ({}) are not relatively prime.'.format(len(SYMBOLS)))
print('Choose a different key.')
return False
return True
def encryptMessage(key, message):
"""Encrypt the message using the key."""
checkKey(key, 'encrypt')
keyA, keyB = getKeyPartsFromKey(key)
ciphertext = ''
for symbol in message:
if symbol in SYMBOLS:
# encrypt this symbol
symIndex = SYMBOLS.find(symbol)
newIndex = (symIndex * keyA + keyB) % len(SYMBOLS)
ciphertext += SYMBOLS[newIndex]
else:
ciphertext += symbol # just append this symbol unencrypted
return ciphertext
def decryptMessage(key, message):
"""Decrypt the message using the key."""
checkKey(key, 'decrypt')
keyA, keyB = getKeyPartsFromKey(key)
plaintext = ''
modInvOfKeyA = findModInverse(keyA, len(SYMBOLS))
for symbol in message:
if symbol in SYMBOLS:
# decrypt this symbol
symIndex = SYMBOLS.find(symbol)
newIndex = (symIndex - keyB) * modInvOfKeyA % len(SYMBOLS)
plaintext += SYMBOLS[newIndex]
else:
plaintext += symbol # just append this symbol undecrypted
return plaintext
def generateRandomKey():
"""Generate and return a random encryption key."""
while True:
keyA = random.randint(2, len(SYMBOLS))
keyB = random.randint(2, len(SYMBOLS))
if gcd(keyA, len(SYMBOLS)) == 1:
return keyA * len(SYMBOLS) + keyB
def gcd(a, b):
"""Return the Greatest Common Divisor of a and b using
Euclid's Algorithm."""
while a != 0:
a, b = b % a, a
return b
def findModInverse(a, m):
"""Return the modular inverse of a % m, which is the number x such
that a*x % m = 1"""
if gcd(a, m) != 1:
# No mod inverse exists if a & m aren't relatively prime:
return None
# Calculate using the Extended Euclidean Algorithm:
u1, u2, u3 = 1, 0, a
v1, v2, v3 = 0, 1, m
while v3 != 0:
q = u3 // v3 # Note that // is the integer division operator
v1, v2, v3, u1, u2, u3 = ((u1 - q * v1),
(u2 - q * v2),
(u3 - q * v3),
v1, v2, v3)
return u1 % m
# If this program was run (instead of imported), run the program:
if __name__ == '__main__':
main()
|
sandbox/linux/bpf_dsl/golden/generate.py | zealoussnow/chromium | 14,668 | 12643367 | <reponame>zealoussnow/chromium<filename>sandbox/linux/bpf_dsl/golden/generate.py
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
arches = ['i386', 'x86-64']
goldens = {}
for fn in sys.argv[2:]:
dir, name = fn.split('/')[-2:]
name = name.rstrip('.txt')
golden = goldens.setdefault(name, [None] * len(arches))
idx = arches.index(dir)
golden[idx] = open(fn).read()
with open(sys.argv[1], 'w') as f:
f.write("""// Generated by sandbox/linux/bpf_dsl/golden/generate.py
#ifndef SANDBOX_LINUX_BPF_DSL_GOLDEN_GOLDEN_FILES_H_
#define SANDBOX_LINUX_BPF_DSL_GOLDEN_GOLDEN_FILES_H_
namespace sandbox {
namespace bpf_dsl {
namespace golden {
struct Golden {
const char* i386_dump;
const char* x86_64_dump;
};
""")
for name, datas in sorted(goldens.items()):
f.write("const Golden k%s = {\n" % name)
for data in datas:
if data is None:
f.write(" nullptr,\n")
else:
f.write(" \"%s\",\n" % data.replace("\n", "\\n\\\n"))
f.write("};\n\n")
f.write("""\
} // namespace golden
} // namespace bpf_dsl
} // namespace sandbox
#endif // SANDBOX_LINUX_BPF_DSL_GOLDEN_GOLDEN_FILES_H_
""")
|
tods/sk_interface/detection_algorithm/COF_skinterface.py | ZhuangweiKang/tods | 544 | 12643405 | import numpy as np
from ..base import BaseSKI
from tods.detection_algorithm.PyodCOF import COFPrimitive
class COFSKI(BaseSKI):
def __init__(self, **hyperparams):
super().__init__(primitive=COFPrimitive, **hyperparams)
self.fit_available = True
self.predict_available = True
self.produce_available = False
|
python_utils/__init__.py | xzabg/fast-adversarial | 520 | 12643467 | # --------------------------------------------------------
# Python Utils
# Copyright (c) 2015 UC Berkeley
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
|
rl_games/envs/test_network.py | Zhehui-Huang/rl_games | 193 | 12643489 | import torch
from torch import nn
import torch.nn.functional as F
class TestNet(nn.Module):
def __init__(self, params, **kwargs):
nn.Module.__init__(self)
actions_num = kwargs.pop('actions_num')
input_shape = kwargs.pop('input_shape')
num_inputs = 0
assert(type(input_shape) is dict)
for k,v in input_shape.items():
num_inputs +=v[0]
self.central_value = params.get('central_value', False)
self.value_size = kwargs.pop('value_size', 1)
self.linear1 = nn.Linear(num_inputs, 256)
self.linear2 = nn.Linear(256, 128)
self.linear3 = nn.Linear(128, 64)
self.mean_linear = nn.Linear(64, actions_num)
self.value_linear = nn.Linear(64, 1)
def is_rnn(self):
return False
def forward(self, obs):
obs = obs['obs']
obs = torch.cat([obs['pos'], obs['info']], axis=-1)
x = F.relu(self.linear1(obs))
x = F.relu(self.linear2(x))
x = F.relu(self.linear3(x))
action = self.mean_linear(x)
value = self.value_linear(x)
if self.central_value:
return value, None
return action, value, None
from rl_games.algos_torch.network_builder import NetworkBuilder
class TestNetBuilder(NetworkBuilder):
def __init__(self, **kwargs):
NetworkBuilder.__init__(self)
def load(self, params):
self.params = params
def build(self, name, **kwargs):
return TestNet(self.params, **kwargs)
def __call__(self, name, **kwargs):
return self.build(name, **kwargs)
|
halogen/mfbot.py | 0xkyle/halogen | 174 | 12643517 | import argparse
import glob
import os
from lib.parser import get_file
from lib.generator import yara_image_rule_maker
from lib.render import yara_print_rule
class MFBot:
""" Malicious File Bot Class """
def __init__(self) -> None:
args = MFBot.parse_args()
self.yara_base_file = args.file
self.image_name = None
self.idat = args.idat
self.jpgsos = args.jpgsos
self.sof2sos = args.sof2sos
self.dir = args.dir
self.jump = args.jump
self.dirhash = []
self.name = args.name
@staticmethod
def parse_args()-> iter:
""" Parse any options passed to the the script """
parser_args = argparse.ArgumentParser(description="Halogen: Automatically create yara \
rules based on images embedded in office documents.")
parser_args.add_argument("-f", "--file", help="File to parse")
parser_args.add_argument("-d", "--directory", dest="dir", help="directory to scan \
for image files.")
parser_args.add_argument("-n", "--rule-name", dest="name", help="specify a custom \
name for the rule file")
parser_args.add_argument("--png-idat", dest="idat", help="For PNG matches, instead \
of starting with the PNG file header, start with the IDAT chunk.", action='store_true')
parser_args.add_argument("--jpg-sos", dest="jpgsos", help="For JPG matches, skip \
over the header and look for the Start of Scan marker, \
and begin the match there.", action='store_true')
parser_args.add_argument("--jpg-sof2sos", dest="sof2sos", help="for JPG matches, \
skip over the header and match the SOF all the way to the SOS + 45 bytes of the \
data within the SOS.", action='store_true')
parser_args.add_argument("--jpg-jump", dest="jump", help="for JPG matches, \
skip over the header and identify the sof, the sos and then read the actual image data \
take that data and look for repeated bytes. Skip those bytes and then create 45 bytes of\
raw image data.", action='store_true')
args = parser_args.parse_args()
if (args.file is None) and (args.dir is None):
parser_args.print_help()
exit(1)
return args
def run(self):
"""mfbot.run() is the core function to call that will return all information
generated by mfbot.
returns: rule_dict - dictionary of rules. """
self.get_file = get_file(self)
rule_dict = yara_image_rule_maker(self)
if rule_dict is not None:
return rule_dict
def print_yara_rule(self, rule_list):
""" prints the yara rule by reading in a list of dicts, and iterating over that.
parameter: rule_list - list of rules to print. """
yara_print_rule(self, rule_list)
def dir_run(self):
""" runs through the process with a directory instead of a single file.
returns: combo list. """
filelist = glob.glob(self.dir + "/*")
combo = []
for f in filelist:
if os.path.isfile(f):
self.image_name = None
self.yara_base_file = f
self.get_file = get_file(self)
self.dirhash.append(self.get_file[0])
rule_dict = yara_image_rule_maker(self)
if rule_dict is not None:
for i in rule_dict:
if i not in combo:
combo.append(i)
else:
pass
return combo
|
tests/correlation_test.py | sethvargo/vaex | 337 | 12643523 | import numpy as np
import vaex
def test_correlation():
df = vaex.example()
# A single column pair
xy = yx = df.correlation('x', 'y')
xy_expected = np.corrcoef(df.x.values, df.y.values)[0,1]
np.testing.assert_array_almost_equal(xy, xy_expected, decimal=5)
np.testing.assert_array_almost_equal(df.correlation('x', 'y'), df.correlation('y', 'x'))
xx = df.correlation('x', 'x')
yy = df.correlation('y', 'y')
zz = df.correlation('z', 'z')
zx = xz = df.correlation('x', 'z')
zy = yz = df.correlation('y', 'z')
# A list of columns
result = df.correlation(x=['x', 'y', 'z'])
expected3 = expected = np.array(([xx, xy, xz],
[yx, yy, yz],
[zx, zy, zz]))
np.testing.assert_array_almost_equal(result, expected)
# A list of columns and a single target
desired = df.correlation(x=['x', 'y', 'z'], y='z')
expected = np.array([xz, yz, zz])
np.testing.assert_array_almost_equal(desired, expected)
result = df.correlation(x=['x', 'y', 'z'], y=['y', 'z'])
assert result.shape == (3, 2)
expected = np.array(([xy, xz],
[yy, yz],
[zy, zz]
))
np.testing.assert_array_almost_equal(result, expected)
result = df.correlation(x=['x', 'y', 'z'], y=['y', 'z'])
result = df.correlation(['x', 'y'], binby='x', shape=4, limits=[-2, 2])
result0 = df.correlation(['x', 'y'], selection=(df.x >= -2) & (df.x < -1))
np.testing.assert_array_almost_equal(result[0], result0)
xar = df.correlation(['x', 'y', 'z'], array_type='xarray')
np.testing.assert_array_almost_equal(xar.data, expected3)
assert xar.dims == ("x", "y")
assert xar.coords['x'].data.tolist() == ['x', 'y', 'z']
assert xar.coords['y'].data.tolist() == ['x', 'y', 'z']
dfc = df.correlation([('x', 'y'), ('x', 'z'), ('y', 'z')])
assert len(dfc) == 3
assert dfc['x'].tolist() == ['x', 'x', 'y']
assert dfc['y'].tolist() == ['y', 'z', 'z']
np.testing.assert_array_almost_equal(dfc['correlation'].tolist(), [xy, xz, yz])
|
python/fate_client/flow_sdk/client/api/job.py | QuantumA/FATE | 715 | 12643524 | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import json
from contextlib import closing
from flow_sdk.client.api.base import BaseFlowAPI
from flow_sdk.utils import preprocess, check_config, download_from_request
class Job(BaseFlowAPI):
def list(self, limit=10):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
return self._post(url='job/list/job', json=config_data)
def view(self, job_id=None, role=None, party_id=None, status=None):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
return self._post(url='job/data/view/query', json=config_data)
def submit(self, config_data, dsl_data=None):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
return self._post(url='job/submit', json={
'job_runtime_conf': config_data,
'job_dsl': dsl_data,
})
def stop(self, job_id):
job_id = str(job_id)
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
check_config(config=config_data, required_arguments=['job_id'])
return self._post(url='job/stop', json=config_data)
def query(self, job_id=None, role=None, party_id=None, component_name=None, status=None):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
return self._post(url='job/query', json=config_data)
def config(self, job_id, role, party_id, output_path):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
check_config(config=config_data, required_arguments=['job_id', 'role', 'party_id', 'output_path'])
response = self._post(url='job/config', json=config_data)
if response['retcode'] == 0:
job_id = response['data']['job_id']
download_directory = os.path.join(config_data['output_path'], 'job_{}_config'.format(job_id))
os.makedirs(download_directory, exist_ok=True)
for k, v in response['data'].items():
if k == 'job_id':
continue
with open('{}/{}.json'.format(download_directory, k), 'w') as fw:
json.dump(v, fw, indent=4)
del response['data']['dsl']
del response['data']['runtime_conf']
response['directory'] = download_directory
response['retmsg'] = 'download successfully, please check {} directory'.format(download_directory)
return response
def log(self, job_id, output_path):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
check_config(config=config_data, required_arguments=['job_id', 'output_path'])
job_id = config_data['job_id']
tar_file_name = 'job_{}_log.tar.gz'.format(job_id)
extract_dir = os.path.join(config_data['output_path'], 'job_{}_log'.format(job_id))
with closing(self._post(url='job/log/download', handle_result=False, json=config_data, stream=True)) as response:
if response.status_code == 200:
download_from_request(http_response=response, tar_file_name=tar_file_name, extract_dir=extract_dir)
response = {'retcode': 0,
'directory': extract_dir,
'retmsg': 'download successfully, please check {} directory'.format(extract_dir)}
else:
response = response.json()
return response
def generate_dsl(self, train_dsl, cpn):
"""
@param train_dsl: dict or str
@param cpn: list or str
"""
if isinstance(train_dsl, dict):
train_dsl = json.dumps(train_dsl)
config_data = {
"cpn_str": cpn,
"train_dsl": train_dsl,
"version": "2"
}
res = self._post(url="job/dsl/generate", handle_result=True, json=config_data)
if not res.get("data"):
res["data"] = {}
return res
# TODO complete it in next version
# def clean(self, job_id=None, role=None, party_id=None, component_name=None):
# kwargs = locals()
# config_data, dsl_data = preprocess(**kwargs)
# check_config(config=config_data, required_arguments=['job_id'])
# return self._post(url='job/clean', json=config_data)
|
python/src/nnabla/experimental/trainers/updater.py | daniel-falk/nnabla | 2,792 | 12643527 | # Copyright 2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Updater(object):
'''Updater
Args:
solver (:obj:`nnabla.solvers.Solver`): Solver object. E.g., Momentum or Adam.
loss (:obj:`nnabla.Variable`): Loss variable from which the forward and the backward is called.
data_feeder (callable :obj:`object`, function, or lambda): Data feeder.
forward_callback_on_start (callable :obj:`object`, function, lambda, or list of these, optional): Callback called before forward function.
forward_callback_on_finish (callable :obj:`object`, function, lambda, or list of these, optional): Callback called after forward function.
backward_callback_on_start (callable :obj:`object`, function, lambda, or list of these, optional): Callback called before backward function.
backward_callback_on_finish (callable :obj:`object`, function, lambda, or list of these, optional): Callback called after backward function.
comm_callback_on_start (callable :obj:`object`, function, lambda, or list of these, optional): Callback called before comm.all_reduce.
comm_callback_on_finish (callable :obj:`object`, function, lambda, or list of these, optional): Callback called after comm.all_reduce.
update_callback_on_start (callable :obj:`object`, function, lambda, or list of these, optional): Callback called before update function.
update_callback_on_finish (callable :obj:`object`, function, lambda, or list of these, optional): Callback called after update function.
clear_buffer (:obj:`bool`, optional): Clears the no longer referenced variables during backpropagation to save memory.
accum_grad (:obj:`int`, optional): Number of accumulation of gradients. Update method of the `solver` is called after the `accum_grad` number of the forward and backward is called. Default is 1.
comm (:obj:`nnabla.communicators.Communicator`, optional): Communicator when to do distributed training. Default is :obj:`None`.
grads (:obj:`list` of :obj:`nnabla.NdArray`, optional): The list of gradients to be exchanged when to do distributed training. Default is the empty :obj:`list`.
Example:
.. code-block:: python
from nnabla.experimental.trainers import Updater
solver = <Solver>
loss = <Loss Variable of Network>
def tdata_feeder():
...
def update_callback_on_finish(i):
...
updater = Updater(solver, loss, tdata_feeder, updater_callback_on_finish)
# Training iteration
for itr in range(<max_iter>):
updater.update()
'''
def _force_to_list(self, x):
if type(x) is list:
return x
else:
return [x]
def __init__(self, solver=None, loss=None,
data_feeder=lambda: True,
forward_callback_on_start=lambda i: True,
forward_callback_on_finish=lambda i: True,
backward_callback_on_start=lambda i: True,
backward_callback_on_finish=lambda i: True,
comm_callback_on_start=lambda i: True,
comm_callback_on_finish=lambda i: True,
update_callback_on_start=lambda i: True,
update_callback_on_finish=lambda i: True,
clear_buffer=True,
accum_grad=1,
comm=None,
grads=[]):
self.solver = solver
self.loss = loss
self.data_feeder = data_feeder
self.forward_callback_on_start = self._force_to_list(
forward_callback_on_start)
self.forward_callback_on_finish = self._force_to_list(
forward_callback_on_finish)
self.backward_callback_on_start = self._force_to_list(
backward_callback_on_start)
self.backward_callback_on_finish = self._force_to_list(
backward_callback_on_finish)
self.comm_callback_on_start = self._force_to_list(
comm_callback_on_start)
self.comm_callback_on_finish = self._force_to_list(
comm_callback_on_finish)
self.update_callback_on_start = self._force_to_list(
update_callback_on_start)
self.update_callback_on_finish = self._force_to_list(
update_callback_on_finish)
self.clear_buffer = clear_buffer
self.accum_grad = accum_grad
self.comm = comm
self.grads = grads
def update(self, i):
"""Monolithic update method.
This method calls the following methods with the dynamic loss scaling.
1. solver.zerograd
2. feed data
3. loss.forward
4. loss.backward
5. comm.all_reduce (if it is specified)
6. solver.update
"""
# Initialize gradients
self.solver.zero_grad()
# Forward and backward
for _ in range(self.accum_grad):
# feed data
self.data_feeder()
# forward
for callback in self.forward_callback_on_finish:
callback(i)
self.loss.forward(clear_no_need_grad=self.clear_buffer)
for callback in self.forward_callback_on_finish:
callback(i)
# backward
for callback in self.backward_callback_on_start:
callback(i)
self.loss.backward(clear_buffer=self.clear_buffer)
for callback in self.backward_callback_on_finish:
callback(i)
# AllReduce
if self.comm and len(grads) != 0:
for callback in self.comm_callback_on_start:
callback(i)
self.comm.all_reduce(self.grads, division=False, inplace=False)
for callback in self.comm_callback_on_finish:
callback(i)
# Update
for callback in self.update_callback_on_start:
callback(i)
self.solver.update()
for callback in self.update_callback_on_finish:
callback(i)
|
software/vna_controller/adc_bbone_init.py | loxodes/Vector-Network-Analyzer | 173 | 12643572 | # script to bit-bang ad9864 initialiation over SPI for fixed 45 MHz IF
# (use bit-banged SPI, initialization adc initialization only happens once
# save the hardware SPI for more exciting things like controlling the synth)
# currently hardcoded for assuming 26 MHz ref and adc clk
import time
import pdb
from bbone_spi_bitbang import bitbang_spi
from vna_pins_r1 import PINS
ADC_SPI_CS1 = PINS.AD_PE_A # adc1
ADC_SPI_CS2 = PINS.AD_PE_B # adc2
ADC_SPI_CS3 = PINS.AD_PE_C # adc3
ADC_SPI_CS4 = PINS.AD_PE_D # adc4
ADC_SPI_MOSI = PINS.AD_PD
ADC_SPI_MISO = PINS.AD_DOUTB
ADC_SPI_CLK = PINS.AD_PC
ADC_CLK_EN = PINS.ADC_CLK_EN
def ad9864_write_reg(spi, addr, val):
payload = addr << 9 | val
spi.transfer(payload, bits = 16)
print('readback: ' + str(ad9864_read_reg(spi, addr)) + ' for val: ' + str(val))
def ad9864_read_reg(spi, addr):
AD9864_READ_MASK = 1 << 15
payload = addr << 9 | AD9864_READ_MASK;
response = spi.transfer(payload, bits = 16)
return response & 0xFF
def ad9864_tristate_miso(spi):
ad9864_write_reg(spi, 0x3B, 0x08)
def ad9864_set_attenuation(spi, fixed = False):
if fixed:
# set fixed attenuation
ad9864_write_reg(spi, 0x03, 0x80)
else:
ad9864_write_reg(spi, 0x03, 0x00)
def ad9864_init(spi):
ad9864_write_reg(spi, 0x3F, 0x99) # software reset
time.sleep(.001)
ad9864_write_reg(spi, 0x19, 0x87) # 4-wire SPI, 16 bit I/Q
ad9864_write_reg(spi, 0x3B, 0x00) # enable mosi on doutb
ad9864_write_reg(spi, 0x00, 0x77) # take ref out of standby
# lc and rc resonator calibration
ad9864_write_reg(spi, 0x3E, 0x47)
ad9864_write_reg(spi, 0x38, 0x01)
ad9864_write_reg(spi, 0x39, 0x0F)
time.sleep(.001)
for i in range(5):
ad9864_write_reg(spi, 0x1C, 0x03)
ad9864_write_reg(spi, 0x00, 0x74)
time.sleep(.006)
r = ad9864_read_reg(spi, 0x1C)
if r == 0:
print('LC/RC calibration worked!')
break
ad9864_write_reg(spi, 0x1C, 0x00)
print("LC/RC calibration failed, retrying..")
ad9864_write_reg(spi, 0x38, 0x00)
ad9864_write_reg(spi, 0x3E, 0x00)
# readback tuning values
print('CAPL1 (coarse): {}'.format(ad9864_read_reg(spi, 0x1D)))
print('CAPL0 (fine): {}'.format(ad9864_read_reg(spi, 0x1E)))
print('CAPR : {}'.format(ad9864_read_reg(spi, 0x1F)))
# lo synth configuration, set LO to 48.25 MHz
ad9864_write_reg(spi, 0x00, 0x30) # enable everything but ck..
ad9864_write_reg(spi, 0x08, 0x00)
ad9864_write_reg(spi, 0x09, 0x68) # LOR = 104 (so, fif = 250 kHz * (8 LOB + LOA)
ad9864_write_reg(spi, 0x0A, 0x20) # LOA = 1
ad9864_write_reg(spi, 0x0B, 0x18) # LOB = 24 (0x18)
ad9864_write_reg(spi, 0x0C, 0x07) # normal LO charge pump current control
# configure decimation
ad9864_write_reg(spi, 0x07, 0x0e) # set decimation rate to 900, 60 * (M + 1) if K = 0, M = 14
# configure SSI
ad9864_write_reg(spi, 0x1A, 0x07) # (clkout freq = adc clk / 7)
ad9864_write_reg(spi, 0x18, 0x00) # take fs and clkout out of tristate
# set doutb to tristate
ad9864_write_reg(spi, 0x3B, 0x08) # disable mosi on doutb
if __name__ == '__main__':
from mmap_gpio import GPIO
import time
gpio = GPIO()
SYNCB = PINS.AD_SYNCB
V3_EN = PINS.PLL_3V3_EN
gpio.set_output(SYNCB)
gpio.set_output(V3_EN)
gpio.set_output(ADC_CLK_EN)
gpio.set_value(V3_EN, gpio.HIGH)
gpio.set_value(SYNCB, gpio.HIGH)
gpio.set_value(ADC_CLK_EN, gpio.HIGH)
print("SYNCB HIGH")
time.sleep(.5)
spi1 = bitbang_spi(ADC_SPI_CS1, ADC_SPI_MOSI, ADC_SPI_MISO, ADC_SPI_CLK)
spi2 = bitbang_spi(ADC_SPI_CS2, ADC_SPI_MOSI, ADC_SPI_MISO, ADC_SPI_CLK)
spi3 = bitbang_spi(ADC_SPI_CS3, ADC_SPI_MOSI, ADC_SPI_MISO, ADC_SPI_CLK)
spi4 = bitbang_spi(ADC_SPI_CS4, ADC_SPI_MOSI, ADC_SPI_MISO, ADC_SPI_CLK)
ad9864_tristate_miso(spi1)
ad9864_tristate_miso(spi2)
ad9864_tristate_miso(spi3)
ad9864_tristate_miso(spi4)
print("init adc1")
ad9864_init(spi1)
print("init adc2")
ad9864_init(spi2)
print("init adc3")
ad9864_init(spi3)
print("init adc4")
ad9864_init(spi4)
time.sleep(.5)
gpio.set_value(SYNCB, gpio.HIGH)
gpio.set_value(SYNCB, gpio.LOW)
time.sleep(.05)
gpio.set_value(SYNCB, gpio.HIGH)
|
libnd4j/include/graph/generated/nd4j/graph/UIHistogram.py | rghwer/testdocs | 13,006 | 12643581 | <reponame>rghwer/testdocs<filename>libnd4j/include/graph/generated/nd4j/graph/UIHistogram.py
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: graph
import flatbuffers
class UIHistogram(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsUIHistogram(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = UIHistogram()
x.Init(buf, n + offset)
return x
# UIHistogram
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# UIHistogram
def Type(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# UIHistogram
def Numbins(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
# UIHistogram
def Binranges(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from .FlatArray import FlatArray
obj = FlatArray()
obj.Init(self._tab.Bytes, x)
return obj
return None
# UIHistogram
def Y(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from .FlatArray import FlatArray
obj = FlatArray()
obj.Init(self._tab.Bytes, x)
return obj
return None
# UIHistogram
def Binlabels(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
a = self._tab.Vector(o)
return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return ""
# UIHistogram
def BinlabelsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.VectorLen(o)
return 0
def UIHistogramStart(builder): builder.StartObject(5)
def UIHistogramAddType(builder, type): builder.PrependInt8Slot(0, type, 0)
def UIHistogramAddNumbins(builder, numbins): builder.PrependUint32Slot(1, numbins, 0)
def UIHistogramAddBinranges(builder, binranges): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(binranges), 0)
def UIHistogramAddY(builder, y): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(y), 0)
def UIHistogramAddBinlabels(builder, binlabels): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(binlabels), 0)
def UIHistogramStartBinlabelsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def UIHistogramEnd(builder): return builder.EndObject()
|
networks/graph_cmr/utils/__init__.py | solomon-ma/PaMIR | 374 | 12643584 | from .saver import CheckpointSaver
from .data_loader import CheckpointDataLoader
from .base_trainer import BaseTrainer
from .train_options import TrainOptions
from .mesh import Mesh
|
tests/test_compile.py | strint/myia | 222 | 12643629 | from myia.abstract import from_value
from myia.operations import (
array_getitem,
array_setitem,
bool_and,
partial,
primitives as P,
scalar_add,
tagged,
)
from myia.pipeline import standard_pipeline, steps
from myia.testing.common import MA, MB, Point
from myia.testing.multitest import mt, run
run_no_opt = run.configure(
pipeline=standard_pipeline.with_steps(
steps.step_parse,
steps.step_infer,
steps.step_specialize,
steps.step_simplify_types,
steps.step_opt2,
steps.step_llift,
steps.step_validate,
steps.step_compile,
steps.step_wrap,
)
)
@run(2, 3)
def test_simple(x, y):
return x + y
@run(42)
def test_constant(x):
return x == 42
@mt(run(False, True), run(True, True), run(True, False), run(False, False))
def test_bool_and(x, y):
return bool_and(x, y)
@mt(run(22), run(3.0))
def test_dict(v):
return {"x": v}
@run({"x": 22, "y": 3.0})
def test_dict_getitem(d):
return d["x"]
@mt(run(33, 42), run(42, 33))
def test_if(x, y):
if x > y:
return x - y
else:
return y - x
@mt(run(33, 42), run(44, 42))
def test_if_nottail(x, y):
def cap(x):
if x > 42:
x = 42
return x
return y - cap(x)
@run(42, 33)
def test_call(x, y):
def f(x):
return x * x
return f(x) + f(y)
@run(42)
def test_tailcall(x):
def fsum(x, a):
if x == 1:
return a
else:
return fsum(x - 1, a + x)
return fsum(x, 1)
@mt(run(-1), run(1))
def test_callp(x):
def fn(f, x):
return f(x)
def f(x):
return -x
return fn(f, -42)
@run(True, 42, 33)
def test_call_hof(c, x, y):
def f1(x, y):
return x + y
def f2(x, y):
return x * y
def choose(c):
if c:
return f1
else:
return f2
return choose(c)(x, y) + choose(not c)(x, y)
@run_no_opt(15, 17)
def test_partial_prim(x, y):
return partial(scalar_add, x)(y)
def test_switch_nontail():
def fn(x, y):
def f1():
return x
def f2():
return y
a = P.switch(x > y, f1, f2)()
return a * a
i64 = from_value(1, broaden=True)
argspec = (i64, i64)
myia_fn = standard_pipeline(input=fn, argspec=argspec)["output"]
for test in [(6, 23, 23 ** 2), (67, 23, 67 ** 2)]:
*args, expected = test
assert myia_fn(*args) == expected
@mt(run(None), run(42))
def test_is_(x):
return x is None
@mt(run(None), run(42))
def test_is_not(x):
return x is not None
@mt(
run(1, 1.7, Point(3, 4), (8, 9)),
run(0, 1.7, Point(3, 4), (8, 9)),
run(-1, 1.7, Point(3, 4), (8, 9)),
)
def test_tagged(c, x, y, z):
if c > 0:
return tagged(x)
elif c == 0:
return tagged(y)
else:
return tagged(z)
@mt(run("hey", 2), run("idk", 5))
def test_string_eq(s, x):
if s == "idk":
x = x + 1
return x
@mt(run("hey", 2), run("idk", 5))
def test_string_ne(s, x):
if s != "idk":
x = x + 1
return x
@run("hey")
def test_string_return(s):
return s
@run(MA(4, 5))
def test_array_getitem(x):
return array_getitem(x, (0, 1), (3, 5), (2, 3))
@run(MA(4, 5), MB(2, 2))
def test_array_setitem(x, v):
return array_setitem(x, (0, 1), (3, 5), (2, 3), v)
|
py/tests/unit/with_runtime_sparkling/test_w2v.py | krmartin/sparkling-water | 990 | 12643637 | <gh_stars>100-1000
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from pyspark.ml import Pipeline, PipelineModel
from pyspark.ml.feature import RegexTokenizer, StopWordsRemover
from pyspark.mllib.linalg import *
from pyspark.mllib.linalg import *
from pyspark.sql.types import *
from pysparkling.ml import H2OGBM, H2OWord2Vec
from tests import unit_test_utils
def testPipelineSerialization(craiglistDataset):
[traningDataset, testingDataset] = craiglistDataset.randomSplit([0.9, 0.1], 42)
tokenizer = RegexTokenizer(inputCol="jobtitle", minTokenLength=2, outputCol="tokenized")
stopWordsRemover = StopWordsRemover(inputCol=tokenizer.getOutputCol(), outputCol="stopWordsRemoved")
w2v = H2OWord2Vec(sentSampleRate=0, epochs=10, inputCol=stopWordsRemover.getOutputCol(), outputCol="w2v")
gbm = H2OGBM(labelCol="category", featuresCols=[w2v.getOutputCol()])
pipeline = Pipeline(stages=[tokenizer, stopWordsRemover, w2v, gbm])
pipeline.write().overwrite().save("file://" + os.path.abspath("build/w2v_pipeline"))
loadedPipeline = Pipeline.load("file://" + os.path.abspath("build/w2v_pipeline"))
model = loadedPipeline.fit(traningDataset)
expected = model.transform(testingDataset)
model.write().overwrite().save("file://" + os.path.abspath("build/w2v_pipeline_model"))
loadedModel = PipelineModel.load("file://" + os.path.abspath("build/w2v_pipeline_model"))
result = loadedModel.transform(testingDataset)
unit_test_utils.assert_data_frames_are_identical(expected, result)
|
env/Lib/site-packages/argon2/_legacy.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 193 | 12643649 | <reponame>andresgreen-byte/Laboratorio-1--Inversion-de-Capital
# SPDX-License-Identifier: MIT
"""
Legacy mid-level functions.
"""
import os
from typing import Optional
from ._password_hasher import (
DEFAULT_HASH_LENGTH,
DEFAULT_MEMORY_COST,
DEFAULT_PARALLELISM,
DEFAULT_RANDOM_SALT_LENGTH,
DEFAULT_TIME_COST,
)
from ._typing import Literal
from .low_level import Type, hash_secret, hash_secret_raw, verify_secret
def hash_password(
password: bytes,
salt: Optional[bytes] = None,
time_cost: int = DEFAULT_TIME_COST,
memory_cost: int = DEFAULT_MEMORY_COST,
parallelism: int = DEFAULT_PARALLELISM,
hash_len: int = DEFAULT_HASH_LENGTH,
type: Type = Type.I,
) -> bytes:
"""
Legacy alias for :func:`hash_secret` with default parameters.
.. deprecated:: 16.0.0
Use :class:`argon2.PasswordHasher` for passwords.
"""
if salt is None:
salt = os.urandom(DEFAULT_RANDOM_SALT_LENGTH)
return hash_secret(
password, salt, time_cost, memory_cost, parallelism, hash_len, type
)
def hash_password_raw(
password: bytes,
salt: Optional[bytes] = None,
time_cost: int = DEFAULT_TIME_COST,
memory_cost: int = DEFAULT_MEMORY_COST,
parallelism: int = DEFAULT_PARALLELISM,
hash_len: int = DEFAULT_HASH_LENGTH,
type: Type = Type.I,
) -> bytes:
"""
Legacy alias for :func:`hash_secret_raw` with default parameters.
.. deprecated:: 16.0.0
Use :class:`argon2.PasswordHasher` for passwords.
"""
if salt is None:
salt = os.urandom(DEFAULT_RANDOM_SALT_LENGTH)
return hash_secret_raw(
password, salt, time_cost, memory_cost, parallelism, hash_len, type
)
def verify_password(
hash: bytes, password: bytes, type: Type = Type.I
) -> Literal[True]:
"""
Legacy alias for :func:`verify_secret` with default parameters.
.. deprecated:: 16.0.0
Use :class:`argon2.PasswordHasher` for passwords.
"""
return verify_secret(hash, password, type)
|
chips/compiler/utils.py | dillonhuff/Chips-2.0 | 221 | 12643660 | from numpy import uint32
from numpy import int32
from numpy import uint64
import struct
def calculate_jumps(instructions, extract_constants=False):
"""change symbolic labels into numeric addresses"""
# calculate the values of jump locations
location = 0
labels = {}
initial_contents = {}
new_instructions = []
for instruction in instructions:
if instruction["op"] == "label":
labels[instruction["label"]] = location
elif instruction["op"] == "constant" and extract_constants:
initial_contents[instruction["offset"]] = instruction["value"]
else:
new_instructions.append(instruction)
location += 1
instructions = new_instructions
# substitute real values for labeled jump locations
for instruction in instructions:
if "label" in instruction:
instruction["label"] = labels[instruction["label"]]
if extract_constants:
return instructions, initial_contents
else:
return instructions
|
home-assistant/custom_components/meteo-swiss/weather.py | twhite96/smart-home-setup | 190 | 12643672 | <filename>home-assistant/custom_components/meteo-swiss/weather.py<gh_stars>100-1000
from hamsclient import meteoSwissClient
import datetime
import logging
import voluptuous as vol
import re
import sys
import homeassistant.core as hass
from homeassistant.components.weather import (
ATTR_FORECAST_CONDITION,
ATTR_FORECAST_TEMP,
ATTR_FORECAST_TEMP_LOW,
ATTR_FORECAST_TIME,
WeatherEntity,
)
from homeassistant.const import (
TEMP_CELSIUS,
CONF_LATITUDE,
CONF_LONGITUDE,
)
import homeassistant.util.dt as dt_util
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.util import Throttle
import homeassistant.helpers.config_validation as cv
import async_timeout
_LOGGER = logging.getLogger(__name__)
from .const import CONDITION_CLASSES,DOMAIN
async def async_setup_entry(hass, config, async_add_entities):
_LOGGER.info("Starting asnyc setup platform")
client =hass.data[DOMAIN]['client']
async_add_entities([MeteoSwissWeather(client)], True)
class MeteoSwissWeather(WeatherEntity):
#Using openstreetmap to get post code from HA configuration
def __init__(self,client:meteoSwissClient):
self._client = client
if client is None:
_LOGGER.error("Error empty client")
def update(self):
"""Update Condition and Forecast."""
self._client.update()
data = self._client.get_data()
self._displayName = data["name"]
self._forecastData = data["forecast"]
self._condition = data["condition"]
@property
def name(self):
return self._displayName
@property
def temperature(self):
try:
return float(self._condition[0]['tre200s0'])
except:
_LOGGER.debug("Error converting temp %s"%self._condition[0]['tre200s0'])
return None
@property
def pressure(self):
try:
return float(self._condition[0]['prestas0'])
except:
_LOGGER.debug("Error converting pressure (qfe) %s"%self._condition[0]['prestas0'])
return None
@property
def pressure_qff(self):
try:
return float(self.condition[0]['pp0qffs0'])
except:
_LOGGER.debug("Error converting pressure (qff) %s"%self._condition[0]['pp0qffs0'])
return None
@property
def pressure_qnh(self):
try:
return float(self.condition[0]['pp0qnhs0'])
except:
_LOGGER.debug("Error converting pressure (qnh) %s"%self._condition[0]['pp0qnhs0'])
return None
@property
def state(self):
symbolId = self._forecastData["data"]["current"]['weather_symbol_id']
cond = next(
(
k
for k, v in CONDITION_CLASSES.items()
if int(symbolId) in v
),
None,
)
_LOGGER.debug("Current symbol is %s condition is : %s"%(symbolId,cond))
return cond
def msSymboldId(self):
return self._forecastData["data"]["current"]['weather_symbol_id']
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def humidity(self):
try:
return float(self._condition[0]['ure200s0'])
except:
_LOGGER.debug("Unable to convert humidity value : %s"%(self._condition[0]['ure200s0']))
@property
def wind_speed(self):
try:
return float(self._condition[0]['fu3010z0'])
except:
_LOGGER.debug("Unable to convert windSpeed value : %s"%(self._condition[0]['fu3010z0']))
return None
@property
def attribution(self):
return "Weather forecast from MeteoSwiss (https://www.meteoswiss.admin.ch/)"
@property
def wind_bearing(self):
try:
client = self._client.get_client()
return client.get_wind_bearing(self._condition[0]['dkl010z0'])
except:
_LOGGER.debug("Unable to get wind_bearing from data : %s"%(self._condition[0]['dkl010z0']))
return None
@property
def forecast(self):
currentDate = datetime.datetime.now()
one_day = datetime.timedelta(days=1)
fcdata_out = []
for forecast in self._forecastData["data"]["forecasts"]:
#calculating date of the forecast
currentDate = currentDate + one_day
data_out = {}
data_out[ATTR_FORECAST_TIME] = currentDate.strftime("%Y-%m-%d")
data_out[ATTR_FORECAST_TEMP_LOW]=float(forecast["temp_low"])
data_out[ATTR_FORECAST_TEMP]=float(forecast["temp_high"])
data_out[ATTR_FORECAST_CONDITION] = next(
(
k
for k, v in CONDITION_CLASSES.items()
if int(forecast["weather_symbol_id"]) in v
),
None,
)
fcdata_out.append(data_out)
return fcdata_out
|
SAN/lib/cluster/cluster.py | yerang823/landmark-detection | 612 | 12643676 | ##############################################################
### Copyright (c) 2018-present, <NAME> ###
### Style Aggregated Network for Facial Landmark Detection ###
### Computer Vision and Pattern Recognition, 2018 ###
##############################################################
import numpy as np
from sklearn.preprocessing import normalize
import pdb
def cos_dis(x, y):
x = normalize(x[:,np.newaxis], axis=0).ravel()
y = normalize(y[:,np.newaxis], axis=0).ravel()
return np.linalg.norm(x-y)
def filter_cluster(indexes, cluster_features, ratio):
num_feature = cluster_features.shape[0]
mean_feature = np.mean(cluster_features, axis=0)
all_L1, all_L2, all_LC = [], [], []
for i in range(num_feature):
x = cluster_features[i]
L1 = np.sum(np.abs((x-mean_feature)))
L2 = np.linalg.norm(x-mean_feature)
LC = cos_dis(x, mean_feature)
all_L1.append( L1 )
all_L2.append( L2 )
all_LC.append( LC )
all_L1 = np.array(all_L1)
all_L2 = np.array(all_L2)
all_LC = np.array(all_LC)
threshold = (all_L2.max()-all_L2.min())*ratio+all_L2.min()
selected = indexes[ all_L2 < threshold ]
return selected.copy()
|
run.py | mwielgoszewski/jython-burp-api | 134 | 12643715 | # -*- coding: utf-8 -*-
from java.lang import System
from org.python.util import JLineConsole, PythonInterpreter
import logging
import os.path
import sys
import time
def start_burp(options, *args):
sys.path.extend([os.path.join('java', 'src'), options.burp])
from burp_extender import BurpExtender as MyBurpExtender, ConsoleThread
from burp import StartBurp
import BurpExtender
from gds.burp.config import Configuration
if options.debug:
logging.basicConfig(
filename='jython-burp.log',
format='%(asctime)-15s - %(levelname)s - %(message)s',
level=logging.DEBUG)
elif options.verbose:
logging.basicConfig(
filename='jython-burp.log',
format='%(asctime)-15s - %(levelname)s - %(message)s',
level=logging.INFO)
else:
logging.basicConfig(
filename='jython-burp.log',
format='%(asctime)-15s - %(levelname)s - %(message)s',
level=logging.WARN)
# Set the BurpExtender handler to the Pythonic BurpExtender
Burp = MyBurpExtender()
Burp.config = Configuration(os.path.abspath(opt.config))
Burp.opt = options
Burp.args = args
BurpExtender.setHandler(Burp)
StartBurp.main(args)
# In latest Burp, callbacks might not get registered immediately
while not Burp.cb:
time.sleep(0.1)
# Disable Burp Proxy Interception on startup
Burp.setProxyInterceptionEnabled(False)
if options.interactive:
from java.util import Properties
pre_properties = System.getProperties()
pre_properties['python.console'] = 'org.python.util.ReadlineConsole'
post_properties = Properties()
PythonInterpreter.initialize(pre_properties, post_properties, [])
# Attach threaded console to BurpExtender
Burp.console = console = JLineConsole()
console.set('Burp', Burp)
try:
Burp.stdout.write('Launching interactive session...\n')
except Exception:
sys.stdout.write('Launching interactive session...\n')
ConsoleThread(console).start()
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-B', '--load-burp', dest='burp',
help='Load Burp Jar from PATH', metavar='PATH')
parser.add_option('-i', '--interactive',
action='store_true',
help='Run Burp in interactive mode (Jython Console)')
parser.add_option('-f', '--file', metavar='FILE',
help='Restore Burp state from FILE on startup')
parser.add_option('-d', '--debug',
action='store_true',
help='Set log level to DEBUG')
parser.add_option('-v', '--verbose',
action='store_true',
help='Set log level to INFO')
parser.add_option('-P', '--python-path',
default='',
help='Set PYTHONPATH used by Jython')
parser.add_option('-C', '--config',
default='burp.ini',
help='Specify alternate jython-burp config file')
parser.add_option('--disable-reloading',
action='store_true',
help='Disable hot-reloading when a file is changed')
opt, args = parser.parse_args()
if not opt.burp:
print('Load Burp Error: Specify a path to your burp.jar with -B')
parser.print_help()
sys.exit(1)
start_burp(opt, *args)
|
unittests/tools/test_blackduck_parser.py | M-Rod101/django-DefectDojo | 249 | 12643724 | <reponame>M-Rod101/django-DefectDojo<filename>unittests/tools/test_blackduck_parser.py
from ..dojo_test_case import DojoTestCase, get_unit_tests_path
from dojo.tools.blackduck.parser import BlackduckParser
from dojo.models import Test
from pathlib import Path
class TestBlackduckHubParser(DojoTestCase):
def test_blackduck_csv_parser_has_no_finding(self):
testfile = Path(get_unit_tests_path() + "/scans/blackduck/no_vuln.csv")
parser = BlackduckParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_blackduck_csv_parser_has_one_finding(self):
testfile = Path(get_unit_tests_path() + "/scans/blackduck/one_vuln.csv")
parser = BlackduckParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
def test_blackduck_csv_parser_has_many_findings(self):
testfile = Path(get_unit_tests_path() + "/scans/blackduck/many_vulns.csv")
parser = BlackduckParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(24, len(findings))
findings = list(findings)
self.assertEqual(1, len(findings[10].unsaved_vulnerability_ids))
self.assertEqual("CVE-2007-3386", findings[10].unsaved_vulnerability_ids[0])
def test_blackduck_csv_parser_new_format_has_many_findings(self):
testfile = Path(get_unit_tests_path() + "/scans/blackduck/many_vulns_new_format.csv")
parser = BlackduckParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(9, len(findings))
def test_blackduck_enhanced_has_many_findings(self):
testfile = Path(
get_unit_tests_path() + "/scans/blackduck/blackduck_enhanced_py3_unittest.zip"
)
parser = BlackduckParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(11, len(findings))
def test_blackduck_enhanced_zip_upload(self):
testfile = Path(
get_unit_tests_path() + "/scans/blackduck/blackduck_enhanced_py3_unittest_v2.zip"
)
parser = BlackduckParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(11, len(findings))
|
utils/lit/lit/ShCommands.py | clayne/DirectXShaderCompiler | 1,192 | 12643756 | <filename>utils/lit/lit/ShCommands.py<gh_stars>1000+
class Command:
def __init__(self, args, redirects):
self.args = list(args)
self.redirects = list(redirects)
def __repr__(self):
return 'Command(%r, %r)' % (self.args, self.redirects)
def __eq__(self, other):
if not isinstance(other, Command):
return False
return ((self.args, self.redirects) ==
(other.args, other.redirects))
def toShell(self, file):
for arg in self.args:
if "'" not in arg:
quoted = "'%s'" % arg
elif '"' not in arg and '$' not in arg:
quoted = '"%s"' % arg
else:
raise NotImplementedError('Unable to quote %r' % arg)
file.write(quoted)
# For debugging / validation.
import ShUtil
dequoted = list(ShUtil.ShLexer(quoted).lex())
if dequoted != [arg]:
raise NotImplementedError('Unable to quote %r' % arg)
for r in self.redirects:
if len(r[0]) == 1:
file.write("%s '%s'" % (r[0][0], r[1]))
else:
file.write("%s%s '%s'" % (r[0][1], r[0][0], r[1]))
class Pipeline:
def __init__(self, commands, negate=False, pipe_err=False):
self.commands = commands
self.negate = negate
self.pipe_err = pipe_err
def __repr__(self):
return 'Pipeline(%r, %r, %r)' % (self.commands, self.negate,
self.pipe_err)
def __eq__(self, other):
if not isinstance(other, Pipeline):
return False
return ((self.commands, self.negate, self.pipe_err) ==
(other.commands, other.negate, self.pipe_err))
def toShell(self, file, pipefail=False):
if pipefail != self.pipe_err:
raise ValueError('Inconsistent "pipefail" attribute!')
if self.negate:
file.write('! ')
for cmd in self.commands:
cmd.toShell(file)
if cmd is not self.commands[-1]:
file.write('|\n ')
class Seq:
def __init__(self, lhs, op, rhs):
assert op in (';', '&', '||', '&&')
self.op = op
self.lhs = lhs
self.rhs = rhs
def __repr__(self):
return 'Seq(%r, %r, %r)' % (self.lhs, self.op, self.rhs)
def __eq__(self, other):
if not isinstance(other, Seq):
return False
return ((self.lhs, self.op, self.rhs) ==
(other.lhs, other.op, other.rhs))
def toShell(self, file, pipefail=False):
self.lhs.toShell(file, pipefail)
file.write(' %s\n' % self.op)
self.rhs.toShell(file, pipefail)
|
plyer/platforms/android/gravity.py | EdwardCoventry/plyer | 1,184 | 12643766 | <filename>plyer/platforms/android/gravity.py
'''
Android gravity
---------------------
'''
from jnius import autoclass
from jnius import cast
from jnius import java_method
from jnius import PythonJavaClass
from plyer.facades import Gravity
from plyer.platforms.android import activity
Context = autoclass('android.content.Context')
Sensor = autoclass('android.hardware.Sensor')
SensorManager = autoclass('android.hardware.SensorManager')
class GravitySensorListener(PythonJavaClass):
__javainterfaces__ = ['android/hardware/SensorEventListener']
def __init__(self):
super().__init__()
service = activity.getSystemService(Context.SENSOR_SERVICE)
self.SensorManager = cast('android.hardware.SensorManager', service)
self.sensor = self.SensorManager.getDefaultSensor(
Sensor.TYPE_GRAVITY
)
self.values = [None, None, None]
def enable(self):
self.SensorManager.registerListener(
self,
self.sensor,
SensorManager.SENSOR_DELAY_NORMAL
)
def disable(self):
self.SensorManager.unregisterListener(self, self.sensor)
@java_method('(Landroid/hardware/SensorEvent;)V')
def onSensorChanged(self, event):
self.values = event.values[:3]
@java_method('(Landroid/hardware/Sensor;I)V')
def onAccuracyChanged(self, sensor, accuracy):
pass
class AndroidGravity(Gravity):
def __init__(self):
super().__init__()
self.state = False
def _enable(self):
if not self.state:
self.listener = GravitySensorListener()
self.listener.enable()
self.state = True
def _disable(self):
if self.state:
self.state = False
self.listener.disable()
del self.listener
def _get_gravity(self):
if self.state:
return tuple(self.listener.values)
else:
return (None, None, None)
def __del__(self):
if self.state:
self._disable()
super().__del__()
def instance():
return AndroidGravity()
|
plugins/modules/oci_log_analytics_scheduled_task.py | slmjy/oci-ansible-collection | 108 | 12643786 | #!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_log_analytics_scheduled_task
short_description: Manage a ScheduledTask resource in Oracle Cloud Infrastructure
description:
- This module allows the user to create, update and delete a ScheduledTask resource in Oracle Cloud Infrastructure
- For I(state=present), schedule a task as specified and return task info.
- "This resource has the following action operations in the M(oracle.oci.oci_log_analytics_scheduled_task_actions) module: change_compartment."
version_added: "2.9.0"
author: Oracle (@oracle)
options:
namespace_name:
description:
- The Logging Analytics namespace used for the request.
type: str
required: true
kind:
description:
- Discriminator.
- Required for create using I(state=present), update using I(state=present) with scheduled_task_id present.
type: str
choices:
- "STANDARD"
- "ACCELERATION"
compartment_id:
description:
- Compartment Identifier L(OCID],https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
- Required for create using I(state=present).
- Required for update when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
- Required for delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
type: str
display_name:
description:
- "A user-friendly name that is changeable and that does not have to be unique.
Format: a leading alphanumeric, followed by zero or more
alphanumerics, underscores, spaces, backslashes, or hyphens in any order).
No trailing spaces allowed."
- Required for create, update, delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
- This parameter is updatable when C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["name"]
freeform_tags:
description:
- "Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\\"bar-key\\": \\"value\\"}`"
- This parameter is updatable.
type: dict
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\\"foo-namespace\\": {\\"bar-key\\": \\"value\\"}}`"
- This parameter is updatable.
type: dict
task_type:
description:
- Task type.
- Required for create using I(state=present).
- Required for update when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
- Required for delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
- Required when kind is 'STANDARD'
type: str
choices:
- "SAVED_SEARCH"
- "ACCELERATION"
- "PURGE"
- "ACCELERATION_MAINTENANCE"
schedules:
description:
- Schedules, typically a single schedule.
Note there may only be a single schedule for SAVED_SEARCH and PURGE scheduled tasks.
- This parameter is updatable.
- Required when kind is 'STANDARD'
type: list
elements: dict
suboptions:
type:
description:
- Schedule type discriminator.
type: str
choices:
- "CRON"
- "FIXED_FREQUENCY"
required: true
misfire_policy:
description:
- Schedule misfire retry policy.
type: str
choices:
- "RETRY_ONCE"
- "RETRY_INDEFINITELY"
- "SKIP"
time_of_first_execution:
description:
- The date and time the scheduled task should execute first time after create or update;
thereafter the task will execute as specified in the schedule.
type: str
expression:
description:
- Value in cron format.
- Required when type is 'CRON'
type: str
time_zone:
description:
- Time zone, by default UTC.
- Required when type is 'CRON'
type: str
recurring_interval:
description:
- Recurring interval in ISO 8601 extended format as described in
https://en.wikipedia.org/wiki/ISO_8601#Durations.
The largest supported unit is D, e.g. P14D (not P2W).
The value must be at least 5 minutes (PT5M) and at most 3 weeks (P21D or PT30240M).
- Required when type is 'FIXED_FREQUENCY'
type: str
repeat_count:
description:
- Number of times (0-based) to execute until auto-stop.
Default value -1 will execute indefinitely.
Value 0 will execute once.
- Applicable when type is 'FIXED_FREQUENCY'
type: int
action:
description:
- ""
- This parameter is updatable.
- Required when kind is 'STANDARD'
type: dict
suboptions:
type:
description:
- Action type discriminator.
type: str
choices:
- "PURGE"
- "STREAM"
required: true
query_string:
description:
- Purge query string.
- Required when type is 'PURGE'
type: str
data_type:
description:
- the type of the log data to be purged
- Required when type is 'PURGE'
type: str
choices:
- "LOG"
- "LOOKUP"
purge_duration:
description:
- The duration of data to be retained, which is used to
calculate the timeDataEnded when the task fires.
The value should be negative.
Purge duration in ISO 8601 extended format as described in
https://en.wikipedia.org/wiki/ISO_8601#Durations.
The largest supported unit is D, e.g. -P365D (not -P1Y) or -P14D (not -P2W).
- Required when type is 'PURGE'
type: str
purge_compartment_id:
description:
- the compartment OCID under which the data will be purged
- Required when type is 'PURGE'
type: str
compartment_id_in_subtree:
description:
- if true, purge child compartments data
- Applicable when type is 'PURGE'
type: bool
saved_search_id:
description:
- The ManagementSavedSearch id [OCID] utilized in the action.
- Applicable when type is 'STREAM'
type: str
metric_extraction:
description:
- ""
- Applicable when type is 'STREAM'
type: dict
suboptions:
compartment_id:
description:
- The compartment OCID (/iaas/Content/General/Concepts/identifiers.htm) of the extracted metric.
- Required when type is 'STREAM'
type: str
required: true
namespace:
description:
- The namespace of the extracted metric.
A valid value starts with an alphabetical character and includes only
alphanumeric characters and underscores (_).
- Required when type is 'STREAM'
type: str
required: true
metric_name:
description:
- The metric name of the extracted metric.
A valid value starts with an alphabetical character and includes only
alphanumeric characters, periods (.), underscores (_), hyphens (-), and dollar signs ($).
- Required when type is 'STREAM'
type: str
required: true
resource_group:
description:
- The resourceGroup of the extracted metric.
A valid value starts with an alphabetical character and includes only
alphanumeric characters, periods (.), underscores (_), hyphens (-), and dollar signs ($).
- Applicable when type is 'STREAM'
type: str
saved_search_duration:
description:
- The duration of data to be searched for SAVED_SEARCH tasks,
used when the task fires to calculate the query time range.
- Duration in ISO 8601 extended format as described in
https://en.wikipedia.org/wiki/ISO_8601#Durations.
The value should be positive.
The largest supported unit (as opposed to value) is D, e.g. P14D (not P2W).
- "There are restrictions on the maximum duration value relative to the task schedule
value as specified in the following table.
Schedule Interval Range | Maximum Duration
----------------------------------- | -----------------
5 Minutes to 30 Minutes | 1 hour \\"PT60M\\"
31 Minutes to 1 Hour | 12 hours \\"PT720M\\"
1 Hour+1Minute to 1 Day | 1 day \\"P1D\\"
1 Day+1Minute to 1 Week-1Minute | 7 days \\"P7D\\"
1 Week to 2 Weeks | 14 days \\"P14D\\"
greater than 2 Weeks | 30 days \\"P30D\\""
- "If not specified, the duration will be based on the schedule. For example,
if the schedule is every 5 minutes then the savedSearchDuration will be \\"PT5M\\";
if the schedule is every 3 weeks then the savedSearchDuration will be \\"P21D\\"."
- Applicable when type is 'STREAM'
type: str
saved_search_id:
description:
- The ManagementSavedSearch id [OCID] to be accelerated.
- Required when kind is 'ACCELERATION'
type: str
scheduled_task_id:
description:
- Unique scheduledTask id returned from task create.
If invalid will lead to a 404 not found.
- Required for update using I(state=present) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
- Required for delete using I(state=absent) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["id"]
state:
description:
- The state of the ScheduledTask.
- Use I(state=present) to create or update a ScheduledTask.
- Use I(state=absent) to delete a ScheduledTask.
type: str
required: false
default: 'present'
choices: ["present", "absent"]
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_creatable_resource, oracle.oci.oracle_wait_options ]
"""
EXAMPLES = """
- name: Create scheduled_task with kind = STANDARD
oci_log_analytics_scheduled_task:
# required
kind: STANDARD
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
task_type: SAVED_SEARCH
# optional
display_name: display_name_example
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
schedules:
- # required
type: CRON
expression: expression_example
time_zone: time_zone_example
# optional
misfire_policy: RETRY_ONCE
time_of_first_execution: time_of_first_execution_example
action:
# required
type: PURGE
query_string: query_string_example
data_type: LOG
purge_duration: purge_duration_example
purge_compartment_id: "ocid1.purgecompartment.oc1..xxxxxxEXAMPLExxxxxx"
# optional
compartment_id_in_subtree: true
- name: Create scheduled_task with kind = ACCELERATION
oci_log_analytics_scheduled_task:
# required
kind: ACCELERATION
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
saved_search_id: "ocid1.savedsearch.oc1..xxxxxxEXAMPLExxxxxx"
# optional
display_name: display_name_example
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
- name: Update scheduled_task with kind = STANDARD
oci_log_analytics_scheduled_task:
# required
kind: STANDARD
# optional
display_name: display_name_example
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
schedules:
- # required
type: CRON
expression: expression_example
time_zone: time_zone_example
# optional
misfire_policy: RETRY_ONCE
time_of_first_execution: time_of_first_execution_example
action:
# required
type: PURGE
query_string: query_string_example
data_type: LOG
purge_duration: purge_duration_example
purge_compartment_id: "ocid1.purgecompartment.oc1..xxxxxxEXAMPLExxxxxx"
# optional
compartment_id_in_subtree: true
- name: Update scheduled_task with kind = ACCELERATION
oci_log_analytics_scheduled_task:
# required
kind: ACCELERATION
# optional
display_name: display_name_example
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
- name: Update scheduled_task using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set) with kind = STANDARD
oci_log_analytics_scheduled_task:
# required
kind: STANDARD
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
task_type: SAVED_SEARCH
# optional
display_name: display_name_example
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
schedules:
- # required
type: CRON
expression: expression_example
time_zone: time_zone_example
# optional
misfire_policy: RETRY_ONCE
time_of_first_execution: time_of_first_execution_example
action:
# required
type: PURGE
query_string: query_string_example
data_type: LOG
purge_duration: purge_duration_example
purge_compartment_id: "ocid1.purgecompartment.oc1..xxxxxxEXAMPLExxxxxx"
# optional
compartment_id_in_subtree: true
- name: Update scheduled_task using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set) with kind = ACCELERATION
oci_log_analytics_scheduled_task:
# required
kind: ACCELERATION
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
# optional
display_name: display_name_example
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
- name: Delete scheduled_task
oci_log_analytics_scheduled_task:
# required
namespace_name: namespace_name_example
scheduled_task_id: "ocid1.scheduledtask.oc1..xxxxxxEXAMPLExxxxxx"
state: absent
- name: Delete scheduled_task using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_log_analytics_scheduled_task:
# required
namespace_name: namespace_name_example
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
display_name: display_name_example
task_type: SAVED_SEARCH
state: absent
"""
RETURN = """
scheduled_task:
description:
- Details of the ScheduledTask resource acted upon by the current operation
returned: on success
type: complex
contains:
kind:
description:
- Discriminator.
returned: on success
type: str
sample: ACCELERATION
id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the data plane resource.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
display_name:
description:
- "A user-friendly name that is changeable and that does not have to be unique.
Format: a leading alphanumeric, followed by zero or more
alphanumerics, underscores, spaces, backslashes, or hyphens in any order).
No trailing spaces allowed."
returned: on success
type: str
sample: display_name_example
task_type:
description:
- Task type.
returned: on success
type: str
sample: SAVED_SEARCH
schedules:
description:
- Schedules.
returned: on success
type: complex
contains:
type:
description:
- Schedule type discriminator.
returned: on success
type: str
sample: FIXED_FREQUENCY
misfire_policy:
description:
- Schedule misfire retry policy.
returned: on success
type: str
sample: RETRY_ONCE
time_of_first_execution:
description:
- The date and time the scheduled task should execute first time after create or update;
thereafter the task will execute as specified in the schedule.
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
expression:
description:
- Value in cron format.
returned: on success
type: str
sample: expression_example
time_zone:
description:
- Time zone, by default UTC.
returned: on success
type: str
sample: time_zone_example
recurring_interval:
description:
- Recurring interval in ISO 8601 extended format as described in
https://en.wikipedia.org/wiki/ISO_8601#Durations.
The largest supported unit is D, e.g. P14D (not P2W).
The value must be at least 5 minutes (PT5M) and at most 3 weeks (P21D or PT30240M).
returned: on success
type: str
sample: recurring_interval_example
repeat_count:
description:
- Number of times (0-based) to execute until auto-stop.
Default value -1 will execute indefinitely.
Value 0 will execute once.
returned: on success
type: int
sample: 56
action:
description:
- ""
returned: on success
type: complex
contains:
type:
description:
- Action type discriminator.
returned: on success
type: str
sample: STREAM
query_string:
description:
- Purge query string.
returned: on success
type: str
sample: query_string_example
data_type:
description:
- the type of the log data to be purged
returned: on success
type: str
sample: LOG
purge_duration:
description:
- The duration of data to be retained, which is used to
calculate the timeDataEnded when the task fires.
The value should be negative.
Purge duration in ISO 8601 extended format as described in
https://en.wikipedia.org/wiki/ISO_8601#Durations.
The largest supported unit is D, e.g. -P365D (not -P1Y) or -P14D (not -P2W).
returned: on success
type: str
sample: purge_duration_example
purge_compartment_id:
description:
- the compartment OCID under which the data will be purged
returned: on success
type: str
sample: "ocid1.purgecompartment.oc1..xxxxxxEXAMPLExxxxxx"
compartment_id_in_subtree:
description:
- if true, purge child compartments data
returned: on success
type: bool
sample: true
saved_search_id:
description:
- The ManagementSavedSearch id [OCID] utilized in the action.
returned: on success
type: str
sample: "ocid1.savedsearch.oc1..xxxxxxEXAMPLExxxxxx"
metric_extraction:
description:
- ""
returned: on success
type: complex
contains:
compartment_id:
description:
- The compartment OCID (/iaas/Content/General/Concepts/identifiers.htm) of the extracted metric.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
namespace:
description:
- The namespace of the extracted metric.
A valid value starts with an alphabetical character and includes only
alphanumeric characters and underscores (_).
returned: on success
type: str
sample: namespace_example
metric_name:
description:
- The metric name of the extracted metric.
A valid value starts with an alphabetical character and includes only
alphanumeric characters, periods (.), underscores (_), hyphens (-), and dollar signs ($).
returned: on success
type: str
sample: metric_name_example
resource_group:
description:
- The resourceGroup of the extracted metric.
A valid value starts with an alphabetical character and includes only
alphanumeric characters, periods (.), underscores (_), hyphens (-), and dollar signs ($).
returned: on success
type: str
sample: resource_group_example
saved_search_duration:
description:
- The duration of data to be searched for SAVED_SEARCH tasks,
used when the task fires to calculate the query time range.
- Duration in ISO 8601 extended format as described in
https://en.wikipedia.org/wiki/ISO_8601#Durations.
The value should be positive.
The largest supported unit (as opposed to value) is D, e.g. P14D (not P2W).
- "There are restrictions on the maximum duration value relative to the task schedule
value as specified in the following table.
Schedule Interval Range | Maximum Duration
----------------------------------- | -----------------
5 Minutes to 30 Minutes | 1 hour \\"PT60M\\"
31 Minutes to 1 Hour | 12 hours \\"PT720M\\"
1 Hour+1Minute to 1 Day | 1 day \\"P1D\\"
1 Day+1Minute to 1 Week-1Minute | 7 days \\"P7D\\"
1 Week to 2 Weeks | 14 days \\"P14D\\"
greater than 2 Weeks | 30 days \\"P30D\\""
- "If not specified, the duration will be based on the schedule. For example,
if the schedule is every 5 minutes then the savedSearchDuration will be \\"PT5M\\";
if the schedule is every 3 weeks then the savedSearchDuration will be \\"P21D\\"."
returned: on success
type: str
sample: saved_search_duration_example
task_status:
description:
- Status of the scheduled task.
returned: on success
type: str
sample: READY
pause_reason:
description:
- reason for taskStatus PAUSED.
returned: on success
type: str
sample: METRIC_EXTRACTION_NOT_VALID
work_request_id:
description:
- most recent Work Request Identifier L(OCID],https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) for the asynchronous
request.
returned: on success
type: str
sample: "ocid1.workrequest.oc1..xxxxxxEXAMPLExxxxxx"
num_occurrences:
description:
- Number of execution occurrences.
returned: on success
type: int
sample: 56
compartment_id:
description:
- Compartment Identifier L(OCID],https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
time_created:
description:
- The date and time the scheduled task was created, in the format defined by RFC3339.
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
time_updated:
description:
- The date and time the scheduled task was last updated, in the format defined by RFC3339.
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
time_of_next_execution:
description:
- The date and time the scheduled task will execute next,
in the format defined by RFC3339.
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
lifecycle_state:
description:
- The current state of the scheduled task.
returned: on success
type: str
sample: ACTIVE
freeform_tags:
description:
- "Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\\"bar-key\\": \\"value\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\\"foo-namespace\\": {\\"bar-key\\": \\"value\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
last_execution_status:
description:
- The most recent task execution status.
returned: on success
type: str
sample: FAILED
time_last_executed:
description:
- The date and time the scheduled task last executed, in the format defined by RFC3339.
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
sample: {
"kind": "ACCELERATION",
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"display_name": "display_name_example",
"task_type": "SAVED_SEARCH",
"schedules": [{
"type": "FIXED_FREQUENCY",
"misfire_policy": "RETRY_ONCE",
"time_of_first_execution": "2013-10-20T19:20:30+01:00",
"expression": "expression_example",
"time_zone": "time_zone_example",
"recurring_interval": "recurring_interval_example",
"repeat_count": 56
}],
"action": {
"type": "STREAM",
"query_string": "query_string_example",
"data_type": "LOG",
"purge_duration": "purge_duration_example",
"purge_compartment_id": "ocid1.purgecompartment.oc1..xxxxxxEXAMPLExxxxxx",
"compartment_id_in_subtree": true,
"saved_search_id": "ocid1.savedsearch.oc1..xxxxxxEXAMPLExxxxxx",
"metric_extraction": {
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"namespace": "namespace_example",
"metric_name": "metric_name_example",
"resource_group": "resource_group_example"
},
"saved_search_duration": "saved_search_duration_example"
},
"task_status": "READY",
"pause_reason": "METRIC_EXTRACTION_NOT_VALID",
"work_request_id": "ocid1.workrequest.oc1..xxxxxxEXAMPLExxxxxx",
"num_occurrences": 56,
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"time_created": "2013-10-20T19:20:30+01:00",
"time_updated": "2013-10-20T19:20:30+01:00",
"time_of_next_execution": "2013-10-20T19:20:30+01:00",
"lifecycle_state": "ACTIVE",
"freeform_tags": {'Department': 'Finance'},
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"last_execution_status": "FAILED",
"time_last_executed": "2013-10-20T19:20:30+01:00"
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceHelperBase,
get_custom_class,
)
try:
from oci.log_analytics import LogAnalyticsClient
from oci.log_analytics.models import CreateScheduledTaskDetails
from oci.log_analytics.models import UpdateScheduledTaskDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class ScheduledTaskHelperGen(OCIResourceHelperBase):
"""Supported operations: create, update, get, list and delete"""
def get_module_resource_id_param(self):
return "scheduled_task_id"
def get_module_resource_id(self):
return self.module.params.get("scheduled_task_id")
def get_get_fn(self):
return self.client.get_scheduled_task
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_scheduled_task,
namespace_name=self.module.params.get("namespace_name"),
scheduled_task_id=self.module.params.get("scheduled_task_id"),
)
def get_required_kwargs_for_list(self):
required_list_method_params = [
"namespace_name",
"task_type",
"compartment_id",
]
return dict(
(param, self.module.params[param]) for param in required_list_method_params
)
def get_optional_kwargs_for_list(self):
optional_list_method_params = ["display_name", "saved_search_id"]
return dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
and (
self._use_name_as_identifier()
or (
not self.module.params.get("key_by")
or param in self.module.params.get("key_by")
)
)
)
def list_resources(self):
required_kwargs = self.get_required_kwargs_for_list()
optional_kwargs = self.get_optional_kwargs_for_list()
kwargs = oci_common_utils.merge_dicts(required_kwargs, optional_kwargs)
return oci_common_utils.list_all_resources(
self.client.list_scheduled_tasks, **kwargs
)
def get_create_model_class(self):
return CreateScheduledTaskDetails
def get_exclude_attributes(self):
return ["saved_search_id"]
def create_resource(self):
create_details = self.get_create_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.create_scheduled_task,
call_fn_args=(),
call_fn_kwargs=dict(
namespace_name=self.module.params.get("namespace_name"),
create_scheduled_task_details=create_details,
),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.CREATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.CREATE_OPERATION_KEY,
),
)
def get_update_model_class(self):
return UpdateScheduledTaskDetails
def update_resource(self):
update_details = self.get_update_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.update_scheduled_task,
call_fn_args=(),
call_fn_kwargs=dict(
namespace_name=self.module.params.get("namespace_name"),
scheduled_task_id=self.module.params.get("scheduled_task_id"),
update_scheduled_task_details=update_details,
),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.UPDATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.UPDATE_OPERATION_KEY,
),
)
def delete_resource(self):
return oci_wait_utils.call_and_wait(
call_fn=self.client.delete_scheduled_task,
call_fn_args=(),
call_fn_kwargs=dict(
namespace_name=self.module.params.get("namespace_name"),
scheduled_task_id=self.module.params.get("scheduled_task_id"),
),
waiter_type=oci_wait_utils.NONE_WAITER_KEY,
operation=oci_common_utils.DELETE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.DELETE_OPERATION_KEY,
),
)
ScheduledTaskHelperCustom = get_custom_class("ScheduledTaskHelperCustom")
class ResourceHelper(ScheduledTaskHelperCustom, ScheduledTaskHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=True, supports_wait=True
)
module_args.update(
dict(
namespace_name=dict(type="str", required=True),
kind=dict(type="str", choices=["STANDARD", "ACCELERATION"]),
compartment_id=dict(type="str"),
display_name=dict(aliases=["name"], type="str"),
freeform_tags=dict(type="dict"),
defined_tags=dict(type="dict"),
task_type=dict(
type="str",
choices=[
"SAVED_SEARCH",
"ACCELERATION",
"PURGE",
"ACCELERATION_MAINTENANCE",
],
),
schedules=dict(
type="list",
elements="dict",
options=dict(
type=dict(
type="str", required=True, choices=["CRON", "FIXED_FREQUENCY"]
),
misfire_policy=dict(
type="str", choices=["RETRY_ONCE", "RETRY_INDEFINITELY", "SKIP"]
),
time_of_first_execution=dict(type="str"),
expression=dict(type="str"),
time_zone=dict(type="str"),
recurring_interval=dict(type="str"),
repeat_count=dict(type="int"),
),
),
action=dict(
type="dict",
options=dict(
type=dict(type="str", required=True, choices=["PURGE", "STREAM"]),
query_string=dict(type="str"),
data_type=dict(type="str", choices=["LOG", "LOOKUP"]),
purge_duration=dict(type="str"),
purge_compartment_id=dict(type="str"),
compartment_id_in_subtree=dict(type="bool"),
saved_search_id=dict(type="str"),
metric_extraction=dict(
type="dict",
options=dict(
compartment_id=dict(type="str", required=True),
namespace=dict(type="str", required=True),
metric_name=dict(type="str", required=True),
resource_group=dict(type="str"),
),
),
saved_search_duration=dict(type="str"),
),
),
saved_search_id=dict(type="str"),
scheduled_task_id=dict(aliases=["id"], type="str"),
state=dict(type="str", default="present", choices=["present", "absent"]),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="scheduled_task",
service_client_class=LogAnalyticsClient,
namespace="log_analytics",
)
result = dict(changed=False)
if resource_helper.is_delete_using_name():
result = resource_helper.delete_using_name()
elif resource_helper.is_delete():
result = resource_helper.delete()
elif resource_helper.is_update_using_name():
result = resource_helper.update_using_name()
elif resource_helper.is_update():
result = resource_helper.update()
elif resource_helper.is_create():
result = resource_helper.create()
module.exit_json(**result)
if __name__ == "__main__":
main()
|
peering_manager/views/generics.py | jasjukaitis/peering-manager | 127 | 12643791 | import logging
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.mixins import (
PermissionRequiredMixin as _PermissionRequiredMixin,
)
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import FieldDoesNotExist, ValidationError
from django.db import transaction
from django.db.models import ManyToManyField, ProtectedError
from django.forms import ModelMultipleChoiceField, MultipleHiddenInput
from django.forms.formsets import formset_factory
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse
from django.urls.exceptions import NoReverseMatch
from django.utils.html import escape
from django.utils.http import is_safe_url
from django.utils.safestring import mark_safe
from django.views.generic import View
from utils.forms import ConfirmationForm, TableConfigurationForm
from utils.functions import (
get_permission_for_model,
handle_protectederror,
normalize_querydict,
)
from utils.signals import clear_webhooks
from utils.tables import paginate_table
class PermissionRequiredMixin(_PermissionRequiredMixin):
"""
Overrides the original `PermissionRequiredMixin` class to handle the
`LOGIN_REQUIRED` with `*.view_*` permission.
"""
def has_permission(self):
if (
not settings.LOGIN_REQUIRED
and isinstance(self.permission_required, str)
and ".view_" in self.permission_required
):
return True
else:
return super().has_permission()
class GetReturnURLMixin(object):
"""
Provides logic for determining where a user should be redirected
after processing a form.
"""
default_return_url = None
def get_return_url(self, request, instance=None):
# Check if `return_url` was specified as a query parameter or form
# data, use this URL only if it's safe
query_param = request.GET.get("return_url") or request.POST.get("return_url")
if query_param and is_safe_url(
url=query_param, allowed_hosts=request.get_host()
):
return query_param
# Check if the object being modified (if any) has an absolute URL
if (
instance is not None
and instance.pk
and hasattr(instance, "get_absolute_url")
):
return instance.get_absolute_url()
# Fall back to the default URL (if specified) for the view
if self.default_return_url is not None:
return reverse(self.default_return_url)
# Try to resolve the list view for the object
if hasattr(self, "queryset"):
model_opts = self.queryset.model._meta
try:
return reverse(f"{model_opts.app_label}:{model_opts.model_name}_list")
except NoReverseMatch:
pass
# If all fails, send the user to the homepage
return reverse("home")
class TableConfigurationMixin(object):
"""
Provides default functions implementation to handle table configuration
form.
"""
def table_configuration_form(self, table):
return TableConfigurationForm(table=table)
def post(self, request, **kwargs):
table = self.table(self.queryset)
form = TableConfigurationForm(table=table, data=request.POST)
if form.is_valid():
preference = f"tables.{self.table.__name__}.columns".lower()
if "save" in request.POST:
request.user.preferences.set(
preference, form.cleaned_data["columns"], commit=True
)
elif "reset" in request.POST:
request.user.preferences.delete(preference, commit=True)
messages.success(request, "Your preferences have been updated.")
return redirect(request.get_full_path())
class ObjectView(PermissionRequiredMixin, View):
"""
Retrieves a single object for display.
"""
queryset = None
template_name = None
def get_template_name(self):
"""
Returns self.template_name if set. Otherwise, resolves the template path by
model app_label and name.
"""
if self.template_name:
return self.template_name
model_opts = self.queryset.model._meta
return f"{model_opts.app_label}/{model_opts.model_name}/view.html"
def get_extra_context(self, request, instance):
"""
Returns any additional context data for the template.
"""
return {}
def get(self, request, *args, **kwargs):
"""
Generic GET handler for accessing an object.
"""
instance = get_object_or_404(self.queryset, **kwargs)
return render(
request,
self.get_template_name(),
{
"instance": instance,
**self.get_extra_context(request, instance),
},
)
class ObjectChildrenView(TableConfigurationMixin, ObjectView):
"""
Displays a table of child objects associated with the parent object.
"""
queryset = None
child_model = None
table = None
filterset = None
filterset_form = None
template_name = None
def get_children(self, request, parent):
"""
Returns a `QuerySet` of child objects.
"""
raise NotImplementedError(
f"{self.__class__.__name__} must implement get_children()"
)
def prepare_table_data(self, request, queryset, parent):
"""
Provides a hook for subclassed views to modify data before initializing the
table.
"""
return queryset
def get(self, request, *args, **kwargs):
"""
GET handler for rendering child objects.
"""
instance = get_object_or_404(self.queryset, **kwargs)
child_objects = self.get_children(request, instance)
if self.filterset:
child_objects = self.filterset(request.GET, child_objects).qs
permissions = {}
for action in ("add", "change", "delete"):
perm_name = get_permission_for_model(self.child_model, action)
permissions[action] = request.user.has_perm(perm_name)
table = self.table(
self.prepare_table_data(request, child_objects, instance), user=request.user
)
# Determine whether to display bulk action checkboxes
if "pk" in table.base_columns and (
permissions["change"] or permissions["delete"]
):
table.columns.show("pk")
paginate_table(table, request)
return render(
request,
self.get_template_name(),
{
"instance": instance,
"table": table,
"table_configuration_form": self.table_configuration_form(table),
"permissions": permissions,
"filter_form": self.filterset_form(request.GET, label_suffix="")
if self.filterset_form
else None,
**self.get_extra_context(request, instance),
},
)
class ObjectListView(PermissionRequiredMixin, TableConfigurationMixin, View):
"""
Lists a series of objects.
"""
queryset = None
filterset = None
filterset_form = None
table = None
template_name = "generic/object_list.html"
def extra_context(self):
return {}
def alter_queryset(self):
"""
Provides a hook to change the queryset before building the table.
"""
pass
def get_table(self, request, permissions):
table = self.table(self.queryset, user=request.user)
if "pk" in table.base_columns and (
permissions["change"] or permissions["delete"]
):
table.columns.show("pk")
return table
def get(self, request):
model = self.queryset.model
content_type = ContentType.objects.get_for_model(model)
self.alter_queryset()
if self.filterset:
self.queryset = self.filterset(request.GET, self.queryset).qs
# Compile a dictionary indicating which permissions are available to the current user for this model
permissions = {}
for action in ("add", "change", "delete", "view"):
perm_name = get_permission_for_model(model, action)
permissions[action] = request.user.has_perm(perm_name)
# Render the objects table
table = self.get_table(request, permissions)
paginate_table(table, request)
context = {
"content_type": content_type,
"table": table,
"table_configuration_form": self.table_configuration_form(table),
"permissions": permissions,
"filter_form": self.filterset_form(request.GET, label_suffix="")
if self.filterset_form
else None,
}
context.update(self.extra_context())
return render(request, self.template_name, context)
class ObjectEditView(GetReturnURLMixin, PermissionRequiredMixin, View):
"""
Creates or edit a single object.
"""
queryset = None
model_form = None
template_name = "generic/object_edit.html"
def get_object(self, kwargs):
if "pk" in kwargs:
o = get_object_or_404(self.queryset, pk=kwargs["pk"])
else:
return self.queryset.model()
# Take a snapshot of change-logged models
if hasattr(o, "snapshot"):
o.snapshot()
return o
def alter_object(self, instance, request, url_args, url_kwargs):
"""
Allows views to add extra info to an object before it is processed.
For example, a parent object can be defined given some parameter from
the request URL.
"""
return instance
def dispatch(self, request, *args, **kwargs):
# Determine required permission based on whether we are editing an existing object
self._permission_action = "change" if kwargs else "add"
return super().dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
o = self.alter_object(self.get_object(kwargs), request, args, kwargs)
initial_data = normalize_querydict(request.GET)
form = self.model_form(instance=o, initial=initial_data)
return render(
request,
self.template_name,
{
"object": o,
"object_type": self.queryset.model._meta.verbose_name,
"form": form,
"return_url": self.get_return_url(request, instance=o),
},
)
def post(self, request, *args, **kwargs):
logger = logging.getLogger("peering.manager.views.ObjectEditView")
o = self.alter_object(self.get_object(kwargs), request, args, kwargs)
form = self.model_form(data=request.POST, files=request.FILES, instance=o)
if form.is_valid():
logger.debug("form validation was successful")
with transaction.atomic():
object_created = form.instance.pk is None
o = form.save()
msg = f"{'Created' if object_created else 'Modified'} {self.queryset.model._meta.verbose_name}"
logger.info(f"{msg} {o} (pk: {o.pk})")
if hasattr(o, "get_absolute_url"):
msg = f'{msg} <a href="{o.get_absolute_url()}">{escape(o)}</a>'
else:
msg = f"{msg} {escape(o)}"
messages.success(request, mark_safe(msg))
if "_addanother" in request.POST:
redirect_url = request.path
if "return_url" in request.GET:
redirect_url += f"?return_url={request.GET.get('return_url')}"
return redirect(redirect_url)
return redirect(self.get_return_url(request, instance=o))
else:
logger.debug("form validation failed")
return render(
request,
self.template_name,
{
"object": o,
"object_type": self.queryset.model._meta.verbose_name,
"form": form,
"return_url": self.get_return_url(request, instance=o),
},
)
class ObjectDeleteView(GetReturnURLMixin, PermissionRequiredMixin, View):
"""
Deletes a single object.
"""
queryset = None
template_name = "generic/object_delete.html"
def get_object(self, kwargs):
o = get_object_or_404(self.queryset, pk=kwargs["pk"])
# Take a snapshot of change-logged models
if hasattr(o, "snapshot"):
o.snapshot()
return o
def get(self, request, **kwargs):
o = self.get_object(kwargs)
form = ConfirmationForm(initial=request.GET)
return render(
request,
self.template_name,
{
"object": o,
"object_type": self.queryset.model._meta.verbose_name,
"form": form,
"return_url": self.get_return_url(request, instance=o),
},
)
def post(self, request, **kwargs):
logger = logging.getLogger("peering.manager.views.ObjectDeleteView")
o = self.get_object(kwargs)
form = ConfirmationForm(request.POST)
if form.is_valid():
logger.debug("form validation was successful")
o.delete()
msg = f"Deleted {self.queryset.model._meta.verbose_name} {o}"
logger.info(msg)
messages.success(request, msg)
return_url = form.cleaned_data.get("return_url")
if return_url is not None and is_safe_url(
url=return_url, allowed_hosts=request.get_host()
):
return redirect(return_url)
else:
return redirect(self.get_return_url(request, instance=o))
else:
logger.debug("form validation failed")
return render(
request,
self.template_name,
{
"object": o,
"object_type": self.queryset.model._meta.verbose_name,
"form": form,
"return_url": self.get_return_url(request, instance=o),
},
)
class BulkEditView(GetReturnURLMixin, PermissionRequiredMixin, View):
"""
Edits objects in bulk.
"""
queryset = None
filterset = None
table = None
form = None
template_name = "generic/object_bulk_edit.html"
def get(self, request):
return redirect(self.get_return_url(request))
def post(self, request, **kwargs):
logger = logging.getLogger("peering.manager.views.BulkEditView")
model = self.queryset.model
# If we are editing *all* objects in the queryset, replace the PK list with
# all matched objects
if request.POST.get("_all") and self.filterset is not None:
pk_list = self.filterset(
request.GET, self.queryset.values_list("pk", flat=True)
).qs
else:
pk_list = request.POST.getlist("pk")
# Include the PK list as initial data for the form
initial_data = {"pk": pk_list}
if "_apply" in request.POST:
form = self.form(model, request.POST, initial=initial_data)
if form.is_valid():
logger.debug("form validation was successful")
fields = [field for field in form.fields if field != "pk"]
nullified_fields = request.POST.getlist("_nullify")
try:
with transaction.atomic():
updated_objects = []
for o in self.queryset.filter(pk__in=form.cleaned_data["pk"]):
# Take a snapshot of change-logged models
if hasattr(o, "snapshot"):
o.snapshot()
# Update standard fields. If a field is listed in _nullify
# delete its value
for name in fields:
try:
model_field = model._meta.get_field(name)
except FieldDoesNotExist:
# This form field is used to modify a field rather
# than set its value directly
model_field = None
# Handle nullification
if (
name in form.nullable_fields
and name in nullified_fields
):
if isinstance(model_field, ManyToManyField):
getattr(o, name).set([])
else:
setattr(
o, name, None if model_field.null else ""
)
# ManyToManyFields
elif isinstance(model_field, ManyToManyField):
if form.cleaned_data[name]:
getattr(o, name).set(form.cleaned_data[name])
# Normal fields
elif name in form.changed_data:
setattr(o, name, form.cleaned_data[name])
o.full_clean()
o.save()
updated_objects.append(o)
logger.debug(f"saved {o} (pk: {o.pk})")
# Add/remove tags
if form.cleaned_data.get("add_tags", None):
o.tags.add(*form.cleaned_data["add_tags"])
if form.cleaned_data.get("remove_tags", None):
o.tags.remove(*form.cleaned_data["remove_tags"])
if updated_objects:
count = len(updated_objects)
msg = f"Updated {count} {model._meta.verbose_name if count == 1 else model._meta.verbose_name_plural}"
logger.info(msg)
messages.success(self.request, msg)
return redirect(self.get_return_url(request))
except ValidationError as e:
messages.error(
self.request, f"{o} failed validation: {', '.join(e.messages)}"
)
clear_webhooks.send(sender=self)
else:
logger.debug("form validation failed")
else:
form = self.form(model, initial=initial_data)
# Retrieve objects being edited
table = self.table(self.queryset.filter(pk__in=pk_list), orderable=False)
if "actions" in table.base_columns:
table.columns.hide("actions")
if not table.rows:
messages.warning(
request, f"No {model._meta.verbose_name_plural} were selected."
)
return redirect(self.get_return_url(request))
return render(
request,
self.template_name,
{
"form": form,
"table": table,
"object_type_plural": model._meta.verbose_name_plural,
"return_url": self.get_return_url(request),
},
)
class BulkDeleteView(GetReturnURLMixin, PermissionRequiredMixin, View):
"""
Deletes objects in bulk.
"""
queryset = None
filterset = None
table = None
form = None
template_name = "generic/object_bulk_delete.html"
def get(self, request):
return redirect(self.get_return_url(request))
def post(self, request, **kwargs):
logger = logging.getLogger("peering.manager.views.BulkDeleteView")
model = self.queryset.model
# Are we deleting *all* objects in the queryset or just a selected subset?
if request.POST.get("_all"):
qs = model.objects.all()
if self.filterset is not None:
qs = self.filterset(request.GET, qs).qs
pk_list = qs.only("pk").values_list("pk", flat=True)
else:
pk_list = [int(pk) for pk in request.POST.getlist("pk")]
form_cls = self.get_form()
if "_confirm" in request.POST:
form = form_cls(request.POST)
if form.is_valid():
logger.debug("form validation was successful")
# Delete objects
queryset = self.queryset.filter(pk__in=pk_list)
deleted_count = queryset.count()
try:
for obj in queryset:
# Take a snapshot of change-logged models
if hasattr(obj, "snapshot"):
obj.snapshot()
obj.delete()
except ProtectedError as e:
logger.info(
"caught ProtectedError while attempting to delete objects"
)
handle_protectederror(queryset, request, e)
return redirect(self.get_return_url(request))
msg = f"Deleted {deleted_count} {model._meta.verbose_name if deleted_count == 1 else model._meta.verbose_name_plural}"
logger.info(msg)
messages.success(request, msg)
return redirect(self.get_return_url(request))
else:
logger.debug("form validation failed")
else:
form = form_cls(
initial={"pk": pk_list, "return_url": self.get_return_url(request)}
)
# Retrieve objects being deleted
table = self.table(self.queryset.filter(pk__in=pk_list), orderable=False)
if "actions" in table.base_columns:
table.columns.hide("actions")
if not table.rows:
messages.warning(
request,
f"No {model._meta.verbose_name_plural} were selected for deletion.",
)
return redirect(self.get_return_url(request))
return render(
request,
self.template_name,
{
"form": form,
"object_type_plural": model._meta.verbose_name_plural,
"table": table,
"return_url": self.get_return_url(request),
},
)
def get_form(self):
"""
Provides a standard bulk delete form if none has been specified for the view
"""
class BulkDeleteForm(ConfirmationForm):
pk = ModelMultipleChoiceField(
queryset=self.queryset, widget=MultipleHiddenInput
)
if self.form:
return self.form
return BulkDeleteForm
class ImportFromObjectView(GetReturnURLMixin, PermissionRequiredMixin, View):
queryset = None
custom_formset = None
form_model = None
template_name = "generic/object_import_from_base.html"
def get_base_objects(self, pk_list):
"""
Returns the list of objects to be used as dependencies.
"""
if not self.queryset:
return []
else:
return list(self.queryset.filter(pk__in=pk_list))
def process_base_object(self, request, base):
return None
def sort_objects(self, object_list):
return []
def get(self, request):
# Don't allow direct GET requests
return redirect(self.get_return_url(request))
def post(self, request):
"""
The form has been submitted, process it.
"""
logger = logging.getLogger("peering.manager.views.ImportFromObjectView")
if "_add" in request.POST and not request.POST.getlist("pk"):
messages.error(request, "No objects selected.")
return redirect(self.get_return_url(request))
# Prepare the form
if not self.custom_formset:
ObjectFormSet = formset_factory(self.form_model, extra=0)
else:
ObjectFormSet = formset_factory(
self.form_model, formset=self.custom_formset, extra=0
)
# Get dependencies
base_objects = self.get_base_objects(request.POST.getlist("pk"))
if not base_objects:
# We don't have base objects to handle, proceed as if we were in the next
# step of the form (object creation)
formset = ObjectFormSet(data=request.POST)
else:
# Proceed base object and fill in the form
processed_base_objects = [
self.process_base_object(request, o) for o in base_objects
]
formset = ObjectFormSet(initial=self.sort_objects(processed_base_objects))
created_objects = []
if formset.is_valid():
logger.debug("formset validation was successful")
with transaction.atomic():
for form in formset:
if form.is_valid():
instance = form.save()
created_objects.append(instance)
if created_objects:
count = len(created_objects)
msg = f"Imported {count} {created_objects[0]._meta.verbose_name if count == 1 else created_objects[0]._meta.verbose_name_plural}"
logger.info(msg)
messages.success(request, msg)
return redirect(self.get_return_url(request))
else:
logger.debug("formset validation failed")
return render(
request,
self.template_name,
{
"formset": formset,
"object_type": self.form_model._meta.model._meta.verbose_name,
"return_url": self.get_return_url(request),
},
)
|
atlas/foundations_core_cli/src/test/job_submission/test_config.py | DeepLearnI/atlas | 296 | 12643797 | <gh_stars>100-1000
from foundations_spec import *
from foundations_core_cli.job_submission.config import load
class TestJobSubmissionConfig(Spec):
mock_config_listing_klass = let_patch_mock_with_conditional_return('foundations_core_cli.typed_config_listing.TypedConfigListing')
exit_mock = let_patch_mock('sys.exit')
print_mock = let_patch_mock('builtins.print')
@let
def mock_config_listing(self):
mock = ConditionalReturn()
mock.config_path.return_when(None, self.config_name)
mock.update_config_manager_with_config = Mock()
return mock
@let
def config_name(self):
return self.faker.name()
@set_up
def set_up(self):
self.mock_config_listing_klass.return_when(self.mock_config_listing, 'submission')
def test_exits_when_config_missing(self):
load(self.config_name)
self.exit_mock.assert_called_with(1)
def test_prints_warning_message_when_config_missing(self):
load(self.config_name)
self.print_mock.assert_called_with(f"Could not find submission configuration with name: `{self.config_name}`")
def test_does_not_exit_when_config_present(self):
self._set_up_config()
load(self.config_name)
self.exit_mock.assert_not_called()
def test_does_not_print_error_when_config_present(self):
self._set_up_config()
load(self.config_name)
self.print_mock.assert_not_called()
def test_loads_config_into_config_manager_when_config_present(self):
from foundations_local_docker_scheduler_plugin.config.scheduler import translate
self._set_up_config()
load(self.config_name)
self.mock_config_listing.update_config_manager_with_config.assert_called_with(self.config_name, translate)
def _set_up_config(self):
self.mock_config_listing.config_path.clear()
self.mock_config_listing.config_path.return_when(self.faker.uri_path(), self.config_name)
|
benchmarks/media-streaming/dataset/files/filegen/video_gen.py | jonasbn/cloudsuite | 103 | 12643867 | import re
from subprocess import call
import os
from sys import argv
from random import randint
video_io_filenames ={}
config_param_path = None
video_file_info_path = None
textpaths_dir = None
output_videos_dir = None
file_resolution_info = None
videos_path = None
videos_js_path = None
def bytes_to_MB(number_of_bytes):
factor = 1024*1024
number_of_bytes = float(number_of_bytes)
number_of_bytes /= factor
precision = 1
number_of_bytes = round(number_of_bytes, precision)
return number_of_bytes
def generate_video_file_with_requested_size(requested_video_size_in_bytes,input_file_path,resolution,output_video_name):
file_stats = os.stat(input_file_path)
actual_file_size_bytes = int(file_stats.st_size)
size_diff = int(bytes_to_MB(actual_file_size_bytes)) - int(bytes_to_MB(requested_video_size_in_bytes))
if size_diff < 0:
num_concatenations = int(requested_video_size_in_bytes / actual_file_size_bytes) + 1
else:
num_concatenations = 1
output_file_name = os.path.splitext(output_video_name)[0]
in_txt_file_path = textpaths_dir + output_file_name + ".txt"
out_mp4_file_path = output_videos_dir + str(output_video_name)
video_io_filenames[in_txt_file_path] = out_mp4_file_path
input_file_path = input_file_path
in_file = open(in_txt_file_path, "a+")
for num in range(int(num_concatenations)):
in_file.write("file " + "'" + input_file_path + "'\n")
def get_resolution():
f = open(file_resolution_info, 'r')
resolution = None
for line in f:
if "video_quality" in line:
resolution = line.split("=")[1]
f.close()
resolution = resolution.strip()
return resolution
def getopts(argv):
opts = {}
while argv:
if argv[0][0] == '-':
opts[argv[0]] = argv[1]
argv = argv[1:]
return opts
def get_video_info():
video_request_dict = {}
f = open(video_file_info_path, 'r')
video_names_list = []
for line in f:
line = re.sub('[ \t]+', ' ', line)
if line[0] != "#":
video_info = line.split(" ")
size = int(video_info[1])
video_request_dict[video_info[0]] = size
video_names_list.append(video_info[0])
f.close()
return video_request_dict,video_names_list
def parse_videos_info(resolution,videos_path):
input_video_collection = []
complete_path = videos_path+"/"+resolution+"/"
for file in os.listdir(complete_path):
if file.endswith(".mp4"):
input_video_collection.append(os.path.join(complete_path, file))
return input_video_collection
if __name__ == '__main__':
myargs = getopts(argv)
if '-p' not in myargs or '-v' not in myargs or '-s' not in myargs or '-o' not in myargs:
raise ValueError('Please provide a valid config files.')
exit(1)
else:
file_resolution_info = myargs["-p"]
video_file_info_path = myargs["-v"]
videos_path = myargs["-s"]
output_videos_dir = myargs["-o"]
textpaths_dir = "/tmp/textpaths/"
videos_js_path = output_videos_dir+"/"+"test_videos.js"
resolution = get_resolution()
if resolution is None:
raise ValueError('Please provide a valid config param file.')
exit(1)
input_video_collection = parse_videos_info(resolution,videos_path)
video_request_dict,video_names_list = get_video_info()
videos_js_file = open(videos_js_path,"a+")
videos_list_in_js = ""
for key in video_names_list:
output_video_name = "full-"+resolution+"-"+key+".mp4"
requested_video_size_in_bytes = video_request_dict[key]
local_file_path = input_video_collection[0]
del input_video_collection[0]
input_video_collection.append(local_file_path)
generate_video_file_with_requested_size(requested_video_size_in_bytes,local_file_path,resolution,output_video_name)
videos_list_in_js = videos_list_in_js+'"'+output_video_name+'",'
videos_js_file.write("var videos"+resolution+" = [" +videos_list_in_js[:-1]+"]\n")
# Execute ffmpeg to concatenate these input videos to get output videos of required sizes
for in_txt_filename in video_io_filenames.keys():
ffmpeg_cmd = "ffmpeg -y -loglevel error -f concat -safe 0 -i " + in_txt_filename + " -c copy " + video_io_filenames.get(in_txt_filename);
call(ffmpeg_cmd, shell=True)
|
habitat_baselines/motion_planning/motion_plan.py | srama2512/habitat-api | 355 | 12643884 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import glob
import os
import os.path as osp
import sys
import uuid
from typing import Callable, List, Optional
import numpy as np
from gym import spaces
from PIL import Image
from habitat_sim.utils.viz_utils import save_video
matched_dir = glob.glob(
osp.join(osp.expanduser("~"), "ompl-1.5.*/py-bindings")
)
if len(matched_dir) > 0:
sys.path.insert(0, matched_dir[0])
try:
from ompl import base as ob
from ompl import util as ou
except ImportError:
ou = None
from copy import copy
from yacs.config import CfgNode
from habitat.tasks.rearrange.rearrange_sim import RearrangeSim
from habitat.tasks.rearrange.utils import CollisionDetails, make_border_red
from habitat_baselines.motion_planning.grasp_generator import GraspGenerator
from habitat_baselines.motion_planning.mp_sim import HabMpSim, MpSim
from habitat_baselines.motion_planning.mp_spaces import JsMpSpace, MpSpace
from habitat_baselines.motion_planning.robot_target import RobotTarget
def is_ompl_installed() -> bool:
return ou is not None
class MotionPlanner:
def __init__(self, sim: RearrangeSim, config: CfgNode):
if not is_ompl_installed:
raise ImportError("Need to install OMPL to use motion planning")
self._config = config
self._reach_for_obj = None
self._should_render = False
self._coll_check_count: int = 0
self._num_calls = 0
self._sphere_id: Optional[int] = None
self._ignore_names: List[str] = []
self.traj_viz_id: Optional[int] = None
self._sim = sim
os.makedirs(self._config.DEBUG_DIR, exist_ok=True)
self._use_sim = self._get_sim()
self.grasp_gen: Optional[GraspGenerator] = None
def set_should_render(self, should_render: bool):
self._should_render = should_render
if self._should_render:
for f in glob.glob(f"{self._config.DEBUG_DIR}/*"):
os.remove(f)
def _log(self, txt: str):
"""
Logs text to console only if logging is enabled.
"""
if self._config.VERBOSE:
print("MP:", txt)
@property
def action_space(self):
return spaces.Box(shape=(3,), low=0, high=1, dtype=np.float32)
def _render_debug_image(
self, add_txt: str, before_txt="", should_save=True
):
"""
Render debug utility helper. Renders an image of the current scene to
the debug directory.
"""
pic = self._use_sim.render()
if pic.shape[-1] > 3:
pic = pic[:, :, :3]
im = Image.fromarray(pic)
save_name = "%s/%s%s_%s.jpeg" % (
self._config.DEBUG_DIR,
before_txt,
str(uuid.uuid4())[:4],
add_txt,
)
if should_save:
im.save(save_name)
return pic
def get_mp_space(self) -> MpSpace:
return JsMpSpace(
self._use_sim,
self._sim.ik_helper,
self._num_calls,
self._should_render,
)
def _is_state_valid(self, x: np.ndarray, take_image: bool = False) -> bool:
"""Returns if a state is collision free.
:param take_image: If true, will render a debug image.
"""
self._mp_space.set_arm(x)
if self._ee_margin is not None and self._sphere_id is not None:
self._use_sim.set_position(
self._use_sim.get_ee_pos(), self._sphere_id
)
self._use_sim.micro_step()
did_collide, coll_details = self._use_sim.get_collisions(
self._config.COUNT_OBJ_COLLISIONS, self._ignore_names, True
)
if (
self._ignore_first
or self._use_sim.should_ignore_first_collisions()
) and self._coll_check_count == 0:
self._ignore_names.extend(coll_details.robot_coll_ids)
self._log(
"First run, ignoring collisions from "
+ str(self._ignore_names)
)
self._coll_check_count += 1
if take_image:
self._render_debug_image(f"{did_collide}")
if not self._use_sim.should_ignore_first_collisions():
# We only want to continue to ignore collisions from this if we are
# using a point cloud approach.
self._ignore_names = []
if did_collide and self._should_render:
return False
# Check we satisfy the EE margin, if there is one.
if not self._check_ee_coll(
self._ee_margin, self._sphere_id, coll_details
):
return False
return True
def set_config(
self,
ee_margin: float,
count_obj_collisions: bool,
grasp_thresh: float,
n_gen_grasps: int,
run_cfg: CfgNode,
ignore_first: bool = False,
use_prev: bool = False,
):
"""
Sets up the parameters of this motion planning call.
"""
self._ee_margin = ee_margin
self._count_obj_collisions = count_obj_collisions
self._sphere_id = None
self._run_cfg = run_cfg
self._mp_space = self.get_mp_space()
self._ignore_names = []
self._ignore_first = ignore_first
self._hold_id = self._sim.grasp_mgr.snap_idx
self._use_sim.setup(use_prev)
if self.traj_viz_id is not None:
self._sim.remove_traj_obj(self.traj_viz_id)
self.traj_viz_id = None
self.grasp_gen = GraspGenerator(
self._use_sim,
self._mp_space,
self._sim.ik_helper,
self,
self._should_render,
grasp_thresh,
n_gen_grasps,
self._config.MP_SIM_TYPE == "Priv",
self._config.DEBUG_DIR,
self._config.GRASP_GEN_IS_VERBOSE,
)
def setup_ee_margin(self, obj_id_target: int):
"""
Adds a collision margin sphere around the end-effector if it was
specified in the run config. This sphere intersects with everything but
the robot and currently held object.
"""
use_sim = self._use_sim
if self._ee_margin is not None:
self._sphere_id = use_sim.add_sphere(self._ee_margin)
use_sim.set_targ_obj_idx(obj_id_target)
def remove_ee_margin(self, obj_id_target: int):
"""
Removes the collision margin sphere around the end-effector. If not
called this object is never removed and will cause problems!
:param obj_id_target: ID of the object we are planning towards.
"""
use_sim = self._use_sim
if self._ee_margin is not None:
use_sim.remove_object(self._sphere_id)
use_sim.unset_targ_obj_idx(obj_id_target)
self._sphere_id = None
def get_recent_plan_stats(
self, plan: np.ndarray, robo_targ: RobotTarget, name: str = ""
):
"""
Return logging information about the most recent plan
"""
is_start_bad = False
is_goal_bad = False
if not robo_targ.is_guess and plan is None:
# Planning failed, but was it the planner's fault?
js_start, js_goal = self._mp_space.get_start_goal()
is_start_bad = self._is_state_valid(js_start)
is_goal_bad = self._is_state_valid(js_goal)
return {
f"plan_{name}bad_coll": int(self.was_bad_coll),
f"plan_{name}failure": int(plan is None),
f"plan_{name}guess": robo_targ.is_guess,
f"plan_{name}goal_bad": is_start_bad,
f"plan_{name}start_bad": is_goal_bad,
f"plan_{name}approx": self._is_approx_sol,
}
def motion_plan(
self,
start_js: np.ndarray,
robot_target: RobotTarget,
timeout: int = 30,
ignore_names: Optional[List[str]] = None,
):
"""
Runs the motion planning.
:param timeout: Time in seconds to run the motion planner for. If no
plan is found in the time, returns failure.
:param ignore_names: A list of IDs for objects to ignore collisions
with.
"""
if ignore_names is None:
ignore_names = []
use_sim = self._use_sim
self.was_bad_coll = False
self._is_approx_sol = False
if robot_target.is_guess:
return None
self.hold_id = self._sim.grasp_mgr.snap_idx
use_sim.start_mp()
self._log("Starting plan from %s" % str(start_js))
self._log("Target info %s" % str(robot_target))
self._log(
"Agent position" + str(use_sim.get_robot_transform().translation)
)
env_state = copy(use_sim.capture_state())
self._mp_space.set_env_state(env_state)
self._ignore_names = ["ball_new", *ignore_names]
self._coll_check_count = 0
self.setup_ee_margin(robot_target.obj_id_target)
joint_plan = self._get_path(
self._is_state_valid,
start_js,
robot_target,
use_sim,
self._mp_space,
timeout,
)
if joint_plan is None:
self._mp_space.render_start_targ(
self._run_cfg.VIDEO_DIR,
"mp_fail",
robot_target.ee_target_pos,
f"ep{self._sim.ep_info['episode_id']}",
)
if joint_plan is not None:
self._render_verify_motion_plan(use_sim, robot_target, joint_plan)
self._log("MP: Got plan of length %i" % len(joint_plan))
self.remove_ee_margin(robot_target.obj_id_target)
self._num_calls += 1
# Settle back to the regular environment
use_sim.set_state(env_state)
use_sim.set_arm_pos(start_js)
use_sim.end_mp()
for _ in range(100):
use_sim.micro_step()
use_sim.set_state(env_state)
for _ in range(100):
use_sim.micro_step()
return joint_plan
def _render_verify_motion_plan(
self,
use_sim: MpSim,
robot_target: RobotTarget,
joint_plan: np.ndarray,
) -> None:
"""
Renders the motion plan to a video by teleporting the arm to the
planned joint states. Does not reset the environment state after
finishing. Also sanity checks the motion plan to ensure each joint
state is truely collision free.
"""
all_frames = []
# Visualize the target position.
if robot_target.ee_target_pos is not None:
robo_trans = use_sim.get_robot_transform()
use_targ_state = robo_trans.transform_point(
robot_target.ee_target_pos
)
targ_viz_id = use_sim.add_sphere(0.03, color=[0, 0, 1, 1])
use_sim.set_position(use_targ_state, targ_viz_id)
else:
targ_viz_id = None
all_ee_pos = []
for i, joints in enumerate(joint_plan):
use_sim.set_arm_pos(joints)
all_ee_pos.append(use_sim.get_ee_pos())
if self._ee_margin is not None:
use_sim.set_position(
self._use_sim.get_ee_pos(), self._sphere_id
)
did_collide = not self._is_state_valid(joints, True)
if did_collide and self._should_render:
self.was_bad_coll = True
pic = self._render_debug_image(
"", f"{i}_{self._num_calls}", should_save=False
)
if did_collide:
pic = make_border_red(pic)
all_frames.append(pic)
if targ_viz_id is not None:
use_sim.remove_object(targ_viz_id)
dist_to_goal = np.linalg.norm(
use_targ_state - use_sim.get_ee_pos()
)
else:
dist_to_goal = -1.0
save_dir = osp.join(self._run_cfg.VIDEO_DIR, "mp_plans")
os.makedirs(save_dir, exist_ok=True)
mp_name = "ep%s_%i_%.3f" % (
self._sim.ep_info["episode_id"],
self._num_calls,
dist_to_goal,
)
save_video(osp.join(save_dir, mp_name + ".mp4"), all_frames, fps=5.0)
def set_plan_ignore_obj(self, obj_id):
self._reach_for_obj = obj_id
def _get_sim(self) -> MpSim:
"""
The two different simulators used for planning.
"""
if self._config.MP_SIM_TYPE == "Priv":
return HabMpSim(self._sim)
else:
raise ValueError("Unrecognized simulator type")
def _check_ee_coll(
self, ee_margin: float, sphere_id: int, coll_details: CollisionDetails
) -> bool:
if ee_margin is not None:
obj_id = self.hold_id
if obj_id is None:
obj_id = self._reach_for_obj
any_match = any([sphere_id in x for x in coll_details.all_colls])
if any_match:
return False
return True
def _get_path(
self,
is_state_valid: Callable[[np.ndarray], bool],
start_js: np.ndarray,
robot_targ: RobotTarget,
use_sim: MpSim,
mp_space: MpSpace,
timeout: int,
):
"""
Does the low-level path planning with OMPL.
"""
if not self._should_render:
ou.setLogLevel(ou.LOG_ERROR)
dim = mp_space.get_state_dim()
space = ob.RealVectorStateSpace(dim)
bounds = ob.RealVectorBounds(dim)
lims = mp_space.get_state_lims()
for i, lim in enumerate(lims):
bounds.setLow(i, lim[0])
bounds.setHigh(i, lim[1])
space.setBounds(bounds)
si = ob.SpaceInformation(space)
si.setStateValidityChecker(ob.StateValidityCheckerFn(is_state_valid))
si.setup()
pdef = ob.ProblemDefinition(si)
mp_space.set_problem(pdef, space, si, start_js, robot_targ)
planner = mp_space.get_planner(si)
planner.setProblemDefinition(pdef)
planner.setup()
if mp_space.get_range() is not None:
planner.setRange(mp_space.get_range())
solved = planner.solve(timeout)
if not solved:
self._log("Could not find plan")
return None
objective = pdef.getOptimizationObjective()
if objective is not None:
cost = (
pdef.getSolutionPath()
.cost(pdef.getOptimizationObjective())
.value()
)
else:
cost = np.inf
self._log(
"Got a path of length %.2f and cost %.2f"
% (pdef.getSolutionPath().length(), cost)
)
path = pdef.getSolutionPath()
joint_plan = mp_space.convert_sol(path)
self._is_approx_sol = pdef.hasApproximateSolution()
return joint_plan
|
mmf/datasets/builders/vinvl/dataset.py | sisilmehta2000/mmf | 3,252 | 12643887 | <reponame>sisilmehta2000/mmf<gh_stars>1000+
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import logging
import random
from mmf.datasets.mmf_dataset import MMFDataset
logger = logging.getLogger(__name__)
class VinVLDataset(MMFDataset):
"""The VinVL dataset is a dataset that augments an existing
dataset within MMF. VinVL requires unique inputs for
finetuning and pretraining unsupported by general datasets.
To enable this functionality on arbitrary datasets,
the VinVL dataset contains a base dataset,
and returns an augmented version of samples from the
base dataset.
For example, the VQA2 dataset may return a sample {image, text}
The VinVL dataset when asked for a sample, will return
{image, text', rand_caption, rand_label}
text' = text + labels
rand_caption = text from a random example
rand_label = obj detection labels text for a random example
Why does this exist?
VinVL samples contain rand_caption, and rand_label which require
random choice from the annotations db, and features_db.
Currently general text_processors do not have access to these
databases, instead randomness like mismatched_captions in
masked coco are implemented on the dataset level.
To support VinVL finetuning and pretraining on general datasets,
without a major refactor, the VinVL builder and dataset introduce
a new design pattern to enable processor access to databases.
Interface and Assumptions:
The VinVL dataset assumes:
The sample returned by the base dataset contains a key "text"
with string text.
There exists a label_map json file path in the dataset config
for a json obj containing idx_to_attribute and idx_to_label
maps. VinVL OD uses VG labels, and this map can be downloaded
from https://penzhanwu2.blob.core.windows.net/sgg/
sgg_benchmark/vinvl_model_zoo/VG-SGG-dicts-vgoi6-clipped.json
The features_db points to features generated from the VinVL
feature extraction script, consult the VinVL feature
extraction tutorial for more details.
"""
def __init__(self, config, dataset_type, *args, **kwargs):
if "name" in kwargs:
name = kwargs["name"]
elif "dataset_name" in kwargs:
name = kwargs["dataset_name"]
else:
name = "vinvl"
super().__init__(name, config, dataset_type, *args, **kwargs)
self.add_tags = not "test" == self._dataset_type
self.label_map = self.load_label_map(config.get("label_map"))
def set_base_dataset(self, base_dataset):
self.base_dataset = base_dataset
def init_processors(self):
super().init_processors()
def __len__(self):
return len(self.annotation_db)
def __getitem__(self, idx):
return self.load_item(idx)
def load_item(self, idx):
base_sample = self.base_dataset.load_item(idx)
# assumes sample contains key "text" that is the string text
# when using on vqa2 which returns tokens under key "text"
# change the vqa2 dataset class to return "text"
text_processor_argument = {"text": base_sample["text"]}
if self.add_tags:
text_processor_argument["text_b"] = self.get_label_str(base_sample)
random_caption_idx = random.randint(0, len(self.annotation_db) - 1)
random_caption_sample = self.base_dataset.load_item(random_caption_idx)
random_caption = random_caption_sample["text"]
text_processor_argument["random_captions"] = [random_caption]
random_labels_idx = random.randint(0, len(self.annotation_db) - 1)
random_labels_sample = self.base_dataset.load_item(random_labels_idx)
random_image_tags_str = self.get_label_str(random_labels_sample)
text_processor_argument["random_labels"] = [random_image_tags_str]
processed_caption = self.text_processor(text_processor_argument)
base_sample.update(processed_caption)
return base_sample
def load_label_map(self, map_path):
with open(map_path) as f:
return json.loads(f.read())
def get_label_str(self, sample):
image_labels = sample["image_info_0"].get("labels", [])
label_map = self.label_map.get("idx_to_label", {})
label_str = " ".join([label_map.get(str(id), "") for id in image_labels])
image_attr_labels = sample["image_info_0"].get("attr_labels", [])
attr_map = self.label_map.get("idx_to_attribute", {})
attr_str = " ".join([attr_map.get(str(id), "") for id in image_attr_labels])
accum_str = label_str + " " + attr_str
return accum_str
|
tools/android/modularization/convenience/lookup_dep.py | iridium-browser/iridium-browser | 575 | 12643912 | #!/usr/bin/env python3
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
r'''Finds which build target(s) contain a particular Java class.
This is a utility script for finding out which build target dependency needs to
be added to import a given Java class.
It is a best-effort script.
Example:
Find build target with class FooUtil:
tools/android/modularization/convenience/lookup_dep.py FooUtil
'''
import argparse
import collections
import dataclasses
import json
import logging
import os
import pathlib
import subprocess
import sys
from typing import Dict, List
_SRC_DIR = pathlib.Path(__file__).parents[4].resolve()
sys.path.append(str(_SRC_DIR / 'build' / 'android'))
from pylib import constants
def main():
arg_parser = argparse.ArgumentParser(
description='Finds which build target contains a particular Java class.')
arg_parser.add_argument('-C',
'--output-directory',
help='Build output directory.')
arg_parser.add_argument('classes',
nargs='+',
help=f'Java classes to search for')
arg_parser.add_argument('-v',
'--verbose',
action='store_true',
help=f'Verbose logging.')
arguments = arg_parser.parse_args()
logging.basicConfig(
level=logging.DEBUG if arguments.verbose else logging.WARNING,
format='%(asctime)s.%(msecs)03d %(levelname).1s %(message)s',
datefmt='%H:%M:%S')
if arguments.output_directory:
constants.SetOutputDirectory(arguments.output_directory)
constants.CheckOutputDirectory()
out_dir: str = constants.GetOutDirectory()
index = ClassLookupIndex(pathlib.Path(out_dir))
for class_name in arguments.classes:
class_entries = index.match(class_name)
if not class_entries:
print(f'Could not find build target for class "{class_name}"')
elif len(class_entries) == 1:
class_entry = class_entries[0]
print(f'Class {class_entry.full_class_name} found:')
print(f' "{class_entry.target}"')
else:
print(f'Multiple targets with classes that match "{class_name}":')
print()
for class_entry in class_entries:
print(f' "{class_entry.target}"')
print(f' contains {class_entry.full_class_name}')
print()
@dataclasses.dataclass(frozen=True)
class ClassEntry:
"""An assignment of a Java class to a build target."""
full_class_name: str
target: str
class ClassLookupIndex:
"""A map from full Java class to its build targets.
A class might be in multiple targets if it's bytecode rewritten."""
def __init__(self, build_output_dir: pathlib.Path):
self._build_output_dir = build_output_dir
self._class_index = self._index_root()
def match(self, search_string: str) -> List[ClassEntry]:
"""Get class/target entries where the class matches search_string"""
# Priority 1: Exact full matches
if search_string in self._class_index:
return self._entries_for(search_string)
# Priority 2: Match full class name (any case), if it's a class name
matches = []
lower_search_string = search_string.lower()
if '.' not in lower_search_string:
for full_class_name in self._class_index:
package_and_class = full_class_name.rsplit('.', 1)
if len(package_and_class) < 2:
continue
class_name = package_and_class[1]
class_lower = class_name.lower()
if class_lower == lower_search_string:
matches.extend(self._entries_for(full_class_name))
if matches:
return matches
# Priority 3: Match anything
for full_class_name in self._class_index:
if lower_search_string in full_class_name.lower():
matches.extend(self._entries_for(full_class_name))
return matches
def _entries_for(self, class_name) -> List[ClassEntry]:
return [
ClassEntry(class_name, target)
for target in self._class_index.get(class_name)
]
def _index_root(self) -> Dict[str, List[str]]:
"""Create the class to target index."""
logging.debug('Running list_java_targets.py...')
list_java_targets_command = [
'build/android/list_java_targets.py', '--type=java_library',
'--gn-labels', '--build', '--print-build-config-paths',
f'--output-directory={self._build_output_dir}'
]
list_java_targets_run = subprocess.run(list_java_targets_command,
cwd=_SRC_DIR,
capture_output=True,
text=True,
check=True)
logging.debug('... done.')
# Parse output of list_java_targets.py with mapping of build_target to
# build_config
root_build_targets = list_java_targets_run.stdout.split('\n')
class_index = collections.defaultdict(list)
for target_line in root_build_targets:
# Skip empty lines
if not target_line:
continue
target_line_parts = target_line.split(': ')
assert len(target_line_parts) == 2, target_line_parts
target, build_config_path = target_line_parts
# Read the location of the java_sources_file from the build_config
with open(build_config_path) as build_config_contents:
build_config: Dict = json.load(build_config_contents)
deps_info = build_config['deps_info']
sources_path = deps_info.get('java_sources_file')
if not sources_path:
# TODO(crbug.com/1108362): Handle targets that have no
# deps_info.sources_path but contain srcjars.
continue
# Read the java_sources_file, indexing the classes found
with open(self._build_output_dir / sources_path) as sources_contents:
sources_lines = sources_contents
for source_line in sources_lines:
source_path = pathlib.Path(source_line.strip())
java_class = self._parse_full_java_class(source_path)
if java_class:
class_index[java_class].append(target)
continue
return class_index
def _parse_full_java_class(self, source_path: pathlib.Path) -> str:
"""Guess the fully qualified class name from the path to the source file."""
if source_path.suffix != '.java':
logging.warning(f'"{source_path}" does not have the .java suffix')
return None
directory_path: pathlib.Path = source_path.parent
package_list_reversed = []
for part in reversed(directory_path.parts):
package_list_reversed.append(part)
if part in ('com', 'org'):
break
else:
logging.debug(f'File {source_path} not in a subdir of "org" or "com", '
'cannot detect package heuristically.')
return None
package = '.'.join(reversed(package_list_reversed))
class_name = source_path.stem
return f'{package}.{class_name}'
if __name__ == '__main__':
main()
|
auto_editor/__init__.py | brighteyed/auto-editor | 835 | 12643926 | '''__init__.py'''
__version__ = '21.40.2dev'
version = '21w40b-dev'
|
att_classification/tflib/data/__init__.py | sageprogrammer/STGAN | 405 | 12643983 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tflib.data.dataset import *
from tflib.data.disk_image import *
from tflib.data.memory_data import *
from tflib.data.tfrecord import *
from tflib.data.tfrecord_creator import *
|
merge_two_sorted_lists/solution.py | mahimadubey/leetcode-python | 528 | 12643991 | """
Merge two sorted linked lists and return it as a new list. The new list should
be made by splicing together the nodes of the first two lists.
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def mergeTwoLists(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
No dummy node
"""
res = None
res_end = None
while l1 is not None and l2 is not None:
if l1.val < l2.val:
if res is None:
res = l1
res_end = res
else:
res_end.next = l1
res_end = res_end.next
l1 = l1.next
else:
if res is None:
res = l2
res_end = res
else:
res_end.next = l2
res_end = res_end.next
l2 = l2.next
if l1 is not None:
if res is not None:
res_end.next = l1
else:
res = l1
if l2 is not None:
if res is not None:
res_end.next = l2
else:
res = l2
return res
|
deep-rl/lib/python2.7/site-packages/OpenGL/raw/GLES2/NV/framebuffer_blit.py | ShujaKhalid/deep-rl | 210 | 12644010 | <reponame>ShujaKhalid/deep-rl
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLES2 import _types as _cs
# End users want this...
from OpenGL.raw.GLES2._types import *
from OpenGL.raw.GLES2 import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLES2_NV_framebuffer_blit'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLES2,'GLES2_NV_framebuffer_blit',error_checker=_errors._error_checker)
GL_DRAW_FRAMEBUFFER_BINDING_NV=_C('GL_DRAW_FRAMEBUFFER_BINDING_NV',0x8CA6)
GL_DRAW_FRAMEBUFFER_NV=_C('GL_DRAW_FRAMEBUFFER_NV',0x8CA9)
GL_READ_FRAMEBUFFER_BINDING_NV=_C('GL_READ_FRAMEBUFFER_BINDING_NV',0x8CAA)
GL_READ_FRAMEBUFFER_NV=_C('GL_READ_FRAMEBUFFER_NV',0x8CA8)
@_f
@_p.types(None,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLbitfield,_cs.GLenum)
def glBlitFramebufferNV(srcX0,srcY0,srcX1,srcY1,dstX0,dstY0,dstX1,dstY1,mask,filter):pass
|
ethtx/decoders/semantic/decoder.py | 0xbhoori/ethtx | 238 | 12644015 | <reponame>0xbhoori/ethtx
# Copyright 2021 DAI Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Optional
from ethtx.decoders.semantic.abc import ISemanticDecoder
from ethtx.decoders.semantic.balances import SemanticBalancesDecoder
from ethtx.decoders.semantic.calls import SemanticCallsDecoder
from ethtx.decoders.semantic.events import SemanticEventsDecoder
from ethtx.decoders.semantic.metadata import SemanticMetadataDecoder
from ethtx.decoders.semantic.transfers import SemanticTransfersDecoder
from ethtx.models.decoded_model import (
DecodedTransactionMetadata,
DecodedTransaction,
DecodedEvent,
DecodedTransfer,
DecodedBalance,
DecodedCall,
Proxy,
)
from ethtx.models.objects_model import BlockMetadata, TransactionMetadata
class SemanticDecoder(ISemanticDecoder):
def decode_transaction(
self,
block: BlockMetadata,
transaction: DecodedTransaction,
proxies: Dict[str, Proxy],
chain_id: str,
) -> DecodedTransaction:
transaction.metadata = self.decode_metadata(
block, transaction.metadata, chain_id
)
transaction.events = self.decode_events(
transaction.events, transaction.metadata, proxies
)
transaction.calls = self.decode_calls(
transaction.calls, transaction.metadata, proxies
)
transaction.transfers = self.decode_transfers(
transaction.transfers, transaction.metadata
)
transaction.balances = self.decode_balances(
transaction.balances, transaction.metadata
)
return transaction
def decode_metadata(
self,
block_metadata: BlockMetadata,
tx_metadata: TransactionMetadata,
chain_id: str,
) -> DecodedTransactionMetadata:
return SemanticMetadataDecoder(repository=self.repository).decode(
block_metadata=block_metadata, tx_metadata=tx_metadata, chain_id=chain_id
)
def decode_event(
self,
event: DecodedEvent,
tx_metadata: DecodedTransactionMetadata,
proxies: Optional[Dict[str, Proxy]] = None,
) -> DecodedEvent:
return SemanticEventsDecoder(repository=self.repository).decode(
events=event, tx_metadata=tx_metadata, proxies=proxies or {}
)
def decode_events(
self,
events: List[DecodedEvent],
tx_metadata: DecodedTransactionMetadata,
proxies: Optional[Dict[str, Proxy]] = None,
) -> List[DecodedEvent]:
return SemanticEventsDecoder(repository=self.repository).decode(
events=events, tx_metadata=tx_metadata, proxies=proxies or {}
)
def decode_calls(
self,
call: DecodedCall,
tx_metadata: DecodedTransactionMetadata,
proxies: Optional[Dict[str, Proxy]] = None,
) -> DecodedCall:
return SemanticCallsDecoder(repository=self.repository).decode(
call=call, tx_metadata=tx_metadata, proxies=proxies or {}
)
def decode_call(
self,
call: DecodedCall,
tx_metadata: DecodedTransactionMetadata,
proxies: Optional[Dict[str, Proxy]] = None,
) -> DecodedCall:
return SemanticCallsDecoder(repository=self.repository).decode(
call=call, tx_metadata=tx_metadata, proxies=proxies or {}
)
def decode_transfers(
self, transfers: List[DecodedTransfer], tx_metadata: DecodedTransactionMetadata
) -> List[DecodedTransfer]:
return SemanticTransfersDecoder(repository=self.repository).decode(
transfers=transfers, tx_metadata=tx_metadata
)
def decode_balances(
self, balances: List[DecodedBalance], tx_metadata: DecodedTransactionMetadata
) -> List[DecodedBalance]:
return SemanticBalancesDecoder(repository=self.repository).decode(
balances=balances, tx_metadata=tx_metadata
)
|
netutils_linux_hardware/memory.py | strizhechenko/netutils-linux | 749 | 12644018 | <filename>netutils_linux_hardware/memory.py
# coding=utf-8
import yaml
from six import iteritems
from netutils_linux_hardware.grade import Grade
from netutils_linux_hardware.parser import YAMLLike, Parser
from netutils_linux_hardware.subsystem import Subsystem
class Memory(Subsystem):
""" Everything about Memory: type, speed, size, swap """
def parse(self):
return {
'size': self.read(MemInfo, 'meminfo'),
'devices': self.read(MemInfoDMI, 'dmidecode'),
}
def rate(self):
meminfo = self.data.get('memory')
if meminfo:
return self.folding.fold({
'devices': self.__devices(meminfo.get('devices')),
'size': self.__size(meminfo.get('size')),
}, self.folding.SUBSYSTEM)
def __devices(self, devices):
if not devices:
return 1
return self.folding.fold(dict((handle, self.__device(device))
for handle, device in devices.items()),
self.folding.SUBSYSTEM)
def __device(self, device):
return self.folding.fold({
'size': Grade.int(device.get('size', 0), 512, 8196),
'type': Grade.known_values(device.get('type', 'RAM'), {
'DDR1': 2,
'DDR2': 3,
'DDR3': 6,
'DDR4': 10,
}),
'speed': Grade.int(device.get('speed', 0), 200, 4000),
}, self.folding.DEVICE)
def __size(self, size):
return self.folding.fold({
'MemTotal': Grade.int(size.get('MemTotal'), 2 * (1024 ** 2), 16 * (1024 ** 2)),
'SwapTotal': Grade.int(size.get('SwapTotal'), 512 * 1024, 4 * (1024 ** 2)),
}, self.folding.DEVICE) if size else 1
class MemInfo(YAMLLike):
keys_required = (
'MemTotal',
'MemFree',
'SwapTotal',
'SwapFree',
)
def parse(self, text):
data = yaml.load(text, yaml.loader.SafeLoader)
return dict((k, int(v.replace(' kB', ''))) for k, v in iteritems(data) if k in self.keys_required)
class MemInfoDMIDevice(object):
def __init__(self, text):
self.data = {
'speed': 0,
'type': 'RAM',
'size': 0,
}
self.handle = None
self.parse_text(text)
def parse_text(self, text):
""" Разбор описания плашки памяти от dmidecode """
for line in map(str.strip, text.split('\n')):
self.parse_line(line)
def parse_line(self, line):
for key in ('Speed', 'Type', 'Size'):
if line.startswith(key + ':'):
self.data[key.lower()] = line.split()[1]
break
if line.startswith('Handle'):
self.handle = line.split(' ')[1].strip(',')
class MemInfoDMI(Parser):
@staticmethod
def parse(text):
""" Разбор всего вывода dmidecode --type memory """
return MemInfoDMI.__parse(text.split('\n\n')) if text else None
@staticmethod
def __parse(devices):
output = dict()
for device in devices:
if 'Memory Device' not in device:
continue
mem_dev = MemInfoDMIDevice(device)
if mem_dev.data.get('size') == 'No':
continue
output[mem_dev.handle] = mem_dev.data
return output
|
bf3s/algorithms/selfsupervision/fewshot_selfsupervision_rotation.py | alisure-fork/BF3S | 130 | 12644027 | <gh_stars>100-1000
import torch
import bf3s.algorithms.algorithm as algorithm
import bf3s.algorithms.fewshot.utils as fs_utils
import bf3s.algorithms.selfsupervision.rotation_utils as rot_utils
import bf3s.utils as utils
class FewShotRotationSelfSupervision(algorithm.Algorithm):
"""Trains a few-shot model with the auxiliary rotation prediction task."""
def __init__(self, opt, _run=None, _log=None):
super().__init__(opt, _run, _log)
self.keep_best_model_metric_name = "AccuracyNovel"
self.auxiliary_rotation_task_coef = opt["auxiliary_rotation_task_coef"]
self.rotation_invariant_classifier = opt["rotation_invariant_classifier"]
self.random_rotation = opt["random_rotation"]
self.semi_supervised = opt["semi_supervised"] if ("semi_supervised" in opt) else False
feature_name = opt["feature_name"] if ("feature_name" in opt) else None
if feature_name:
assert isinstance(feature_name, (list, tuple))
assert len(feature_name) == 1
self.feature_name = feature_name
self.accuracies = {}
def allocate_tensors(self):
self.tensors = {
"images_train": torch.FloatTensor(),
"labels_train": torch.LongTensor(),
"labels_train_1hot": torch.FloatTensor(),
"images_test": torch.FloatTensor(),
"labels_test": torch.LongTensor(),
"Kids": torch.LongTensor(),
"images_unlabeled": torch.FloatTensor(),
}
def set_tensors(self, batch):
two_datasets = (
isinstance(batch, (list, tuple))
and len(batch) == 2
and isinstance(batch[0], (list, tuple))
and isinstance(batch[1], (list, tuple))
)
if two_datasets:
train_test_stage = "classification"
assert len(batch[0]) == 4
assert len(batch[1]) == 1
assert self.semi_supervised is True
images_test, labels_test, K, num_base_per_episode = batch[0]
(images_unlabeled,) = batch[1]
self.num_base = num_base_per_episode[0].item()
self.tensors["images_test"].resize_(images_test.size()).copy_(images_test)
self.tensors["labels_test"].resize_(labels_test.size()).copy_(labels_test)
self.tensors["Kids"].resize_(K.size()).copy_(K)
self.tensors["images_unlabeled"].resize_(images_unlabeled.size()).copy_(
images_unlabeled
)
elif len(batch) == 6:
train_test_stage = "fewshot"
(
images_train,
labels_train,
images_test,
labels_test,
K,
num_base_per_episode,
) = batch
self.num_base = num_base_per_episode[0].item()
self.tensors["images_train"].resize_(images_train.size()).copy_(images_train)
self.tensors["labels_train"].resize_(labels_train.size()).copy_(labels_train)
labels_train = self.tensors["labels_train"]
nKnovel = 1 + labels_train.max().item() - self.num_base
labels_train_1hot_size = list(labels_train.size()) + [
nKnovel,
]
labels_train_unsqueeze = labels_train.unsqueeze(dim=labels_train.dim())
self.tensors["labels_train_1hot"].resize_(labels_train_1hot_size).fill_(
0
).scatter_(
len(labels_train_1hot_size) - 1, labels_train_unsqueeze - self.num_base, 1
)
self.tensors["images_test"].resize_(images_test.size()).copy_(images_test)
self.tensors["labels_test"].resize_(labels_test.size()).copy_(labels_test)
self.tensors["Kids"].resize_(K.size()).copy_(K)
elif len(batch) == 4:
train_test_stage = "classification"
images_test, labels_test, K, num_base_per_episode = batch
self.num_base = num_base_per_episode[0].item()
self.tensors["images_test"].resize_(images_test.size()).copy_(images_test)
self.tensors["labels_test"].resize_(labels_test.size()).copy_(labels_test)
self.tensors["Kids"].resize_(K.size()).copy_(K)
return train_test_stage
def train_step(self, batch):
return self.process_batch(batch, is_train=True)
def evaluation_step(self, batch):
return self.process_batch(batch, is_train=False)
def process_batch(self, batch, is_train):
process_type = self.set_tensors(batch)
auxiliary_rotation_task = is_train and (self.auxiliary_rotation_task_coef > 0.0)
if process_type == "fewshot":
record = self.process_batch_fewshot_classification_task(
auxiliary_rotation_task, is_train
)
elif process_type == "classification":
record = self.process_batch_base_class_classification_task(
auxiliary_rotation_task, is_train
)
else:
raise ValueError(f"Unexpected process type {process_type}")
return record
def process_batch_base_class_classification_task(self, auxiliary_rotation_task, is_train):
images = self.tensors["images_test"]
labels = self.tensors["labels_test"]
Kids = self.tensors["Kids"]
assert images.dim() == 5 and labels.dim() == 2
images = utils.convert_from_5d_to_4d(images)
labels = labels.view(-1)
if self.semi_supervised and is_train:
images_unlabeled = self.tensors["images_unlabeled"]
assert images_unlabeled.dim() == 4
assert auxiliary_rotation_task is True
else:
images_unlabeled = None
if auxiliary_rotation_task:
record = rot_utils.object_classification_with_rotation_selfsupervision(
feature_extractor=self.networks["feature_extractor"],
feature_extractor_optimizer=self.optimizers["feature_extractor"],
classifier=self.networks["classifier"],
classifier_optimizer=self.optimizers["classifier"],
classifier_rot=self.networks["classifier_aux"],
classifier_rot_optimizer=self.optimizers["classifier_aux"],
images=images,
labels=labels,
is_train=is_train,
alpha=self.auxiliary_rotation_task_coef,
random_rotation=self.random_rotation,
rotation_invariant_classifier=self.rotation_invariant_classifier,
base_ids=Kids[:, : self.num_base].contiguous(),
feature_name=self.feature_name,
images_unlabeled=images_unlabeled,
)
else:
record = rot_utils.object_classification_rotation_invariant(
feature_extractor=self.networks["feature_extractor"],
feature_extractor_optimizer=self.optimizers["feature_extractor"],
classifier=self.networks["classifier"],
classifier_optimizer=self.optimizers["classifier"],
images=images,
labels=labels,
is_train=is_train,
rotation_invariant_classifier=self.rotation_invariant_classifier,
random_rotation=self.random_rotation,
base_ids=Kids[:, : self.num_base].contiguous(),
feature_name=self.feature_name,
)
return record
def process_batch_fewshot_classification_task(self, auxiliary_rotation_task, is_train):
Kids = self.tensors["Kids"]
base_ids = None if (self.num_base == 0) else Kids[:, : self.num_base].contiguous()
if auxiliary_rotation_task:
if self.rotation_invariant_classifier:
raise ValueError("Not supported option.")
if self.random_rotation:
raise ValueError("Not supported option.")
if self.semi_supervised:
raise ValueError("Not supported option.")
record = rot_utils.fewshot_classification_with_rotation_selfsupervision(
feature_extractor=self.networks["feature_extractor"],
feature_extractor_optimizer=self.optimizers.get("feature_extractor"),
classifier=self.networks["classifier"],
classifier_optimizer=self.optimizers.get("classifier"),
classifier_rot=self.networks["classifier_aux"],
classifier_rot_optimizer=self.optimizers.get("classifier_aux"),
images_train=self.tensors["images_train"],
labels_train=self.tensors["labels_train"],
labels_train_1hot=self.tensors["labels_train_1hot"],
images_test=self.tensors["images_test"],
labels_test=self.tensors["labels_test"],
is_train=is_train,
alpha=self.auxiliary_rotation_task_coef,
base_ids=base_ids,
feature_name=self.feature_name,
)
else:
record = fs_utils.fewshot_classification(
feature_extractor=self.networks["feature_extractor"],
feature_extractor_optimizer=self.optimizers.get("feature_extractor"),
classifier=self.networks["classifier"],
classifier_optimizer=self.optimizers.get("classifier"),
images_train=self.tensors["images_train"],
labels_train=self.tensors["labels_train"],
labels_train_1hot=self.tensors["labels_train_1hot"],
images_test=self.tensors["images_test"],
labels_test=self.tensors["labels_test"],
is_train=is_train,
base_ids=base_ids,
feature_name=self.feature_name,
)
if not is_train:
record, self.accuracies = fs_utils.compute_95confidence_intervals(
record,
episode=self.biter,
num_episodes=self.bnumber,
store_accuracies=self.accuracies,
metrics=["AccuracyNovel",],
)
return record
|
ptpython/filters.py | facingBackwards/ptpython | 3,022 | 12644034 | <reponame>facingBackwards/ptpython<gh_stars>1000+
from typing import TYPE_CHECKING
from prompt_toolkit.filters import Filter
if TYPE_CHECKING:
from .python_input import PythonInput
__all__ = ["HasSignature", "ShowSidebar", "ShowSignature", "ShowDocstring"]
class PythonInputFilter(Filter):
def __init__(self, python_input: "PythonInput") -> None:
self.python_input = python_input
def __call__(self) -> bool:
raise NotImplementedError
class HasSignature(PythonInputFilter):
def __call__(self) -> bool:
return bool(self.python_input.signatures)
class ShowSidebar(PythonInputFilter):
def __call__(self) -> bool:
return self.python_input.show_sidebar
class ShowSignature(PythonInputFilter):
def __call__(self) -> bool:
return self.python_input.show_signature
class ShowDocstring(PythonInputFilter):
def __call__(self) -> bool:
return self.python_input.show_docstring
|
pokemongo_bot/walkers/polyline_walker.py | timgates42/PokemonGo-Bot | 5,362 | 12644052 | <gh_stars>1000+
from __future__ import absolute_import
from geographiclib.geodesic import Geodesic
from pokemongo_bot.walkers.step_walker import StepWalker
from .polyline_generator import PolylineObjectHandler
from pokemongo_bot.human_behaviour import random_alt_delta
class PolylineWalker(StepWalker):
def get_next_position(self, origin_lat, origin_lng, origin_alt, dest_lat, dest_lng, dest_alt, distance):
polyline = PolylineObjectHandler.cached_polyline((self.bot.position[0], self.bot.position[1]), (dest_lat, dest_lng), google_map_api_key=self.bot.config.gmapkey)
while True:
_, (dest_lat, dest_lng) = polyline._step_dict[polyline._step_keys[polyline._last_step]]
next_lat, next_lng, _ = super(PolylineWalker, self).get_next_position(origin_lat, origin_lng, origin_alt, dest_lat, dest_lng, dest_alt, distance)
if polyline._last_step == len(polyline._step_keys) - 1:
break
else:
travelled = Geodesic.WGS84.Inverse(origin_lat, origin_lng, next_lat, next_lng)["s12"]
remaining = Geodesic.WGS84.Inverse(next_lat, next_lng, dest_lat, dest_lng)["s12"]
step_distance = Geodesic.WGS84.Inverse(origin_lat, origin_lng, dest_lat, dest_lng)["s12"]
if remaining < (self.precision + self.epsilon):
polyline._last_step += 1
distance = abs(distance - step_distance)
else:
distance = abs(distance - travelled)
if distance > (self.precision + self.epsilon):
origin_lat, origin_lng, origin_alt = dest_lat, dest_lng, dest_alt
else:
break
polyline._last_pos = (next_lat, next_lng)
next_alt = polyline.get_alt() or origin_alt
return next_lat, next_lng, next_alt + random_alt_delta()
|
eod/plugins/yolox/models/neck/__init__.py | Helicopt/EOD | 196 | 12644056 | from .pafpn import YoloxPAFPN # noqa |
tests/test_sentry.py | Smlep/fastapi-jsonrpc | 155 | 12644061 | <gh_stars>100-1000
"""Test fixtures copied from https://github.com/getsentry/sentry-python/
TODO: move integration to sentry_sdk
"""
import pytest
import sentry_sdk
from sentry_sdk import Transport
from sentry_sdk.utils import capture_internal_exceptions
@pytest.fixture
def probe(ep):
@ep.method()
def probe() -> str:
raise ZeroDivisionError
@ep.method()
def probe2() -> str:
raise RuntimeError
return ep
def test_transaction_is_jsonrpc_method(probe, json_request, sentry_init, capture_exceptions, capture_events):
sentry_init(send_default_pii=True)
exceptions = capture_exceptions()
events = capture_events()
# Test in batch to ensure we correctly handle multiple requests
json_request([
{
'id': 1,
'jsonrpc': '2.0',
'method': 'probe',
'params': {},
},
{
'id': 2,
'jsonrpc': '2.0',
'method': 'probe2',
'params': {},
},
])
assert {type(e) for e in exceptions} == {RuntimeError, ZeroDivisionError}
assert set([
e.get('transaction') for e in events
]) == {'test_sentry.probe.<locals>.probe', 'test_sentry.probe.<locals>.probe2'}
class TestTransport(Transport):
def __init__(self, capture_event_callback, capture_envelope_callback):
Transport.__init__(self)
self.capture_event = capture_event_callback
self.capture_envelope = capture_envelope_callback
self._queue = None
@pytest.fixture
def monkeypatch_test_transport(monkeypatch):
def check_event(event):
def check_string_keys(map):
for key, value in map.items:
assert isinstance(key, str)
if isinstance(value, dict):
check_string_keys(value)
with capture_internal_exceptions():
check_string_keys(event)
def check_envelope(envelope):
with capture_internal_exceptions():
# Assert error events are sent without envelope to server, for compat.
# This does not apply if any item in the envelope is an attachment.
if not any(x.type == "attachment" for x in envelope.items):
assert not any(item.data_category == "error" for item in envelope.items)
assert not any(item.get_event() is not None for item in envelope.items)
def inner(client):
monkeypatch.setattr(
client, "transport", TestTransport(check_event, check_envelope)
)
return inner
@pytest.fixture
def sentry_init(monkeypatch_test_transport, request):
def inner(*a, **kw):
hub = sentry_sdk.Hub.current
client = sentry_sdk.Client(*a, **kw)
hub.bind_client(client)
if "transport" not in kw:
monkeypatch_test_transport(sentry_sdk.Hub.current.client)
if request.node.get_closest_marker("forked"):
# Do not run isolation if the test is already running in
# ultimate isolation (seems to be required for celery tests that
# fork)
yield inner
else:
with sentry_sdk.Hub(None):
yield inner
@pytest.fixture
def capture_events(monkeypatch):
def inner():
events = []
test_client = sentry_sdk.Hub.current.client
old_capture_event = test_client.transport.capture_event
old_capture_envelope = test_client.transport.capture_envelope
def append_event(event):
events.append(event)
return old_capture_event(event)
def append_envelope(envelope):
for item in envelope:
if item.headers.get("type") in ("event", "transaction"):
test_client.transport.capture_event(item.payload.json)
return old_capture_envelope(envelope)
monkeypatch.setattr(test_client.transport, "capture_event", append_event)
monkeypatch.setattr(test_client.transport, "capture_envelope", append_envelope)
return events
return inner
@pytest.fixture
def capture_exceptions(monkeypatch):
def inner():
errors = set()
old_capture_event = sentry_sdk.Hub.capture_event
def capture_event(self, event, hint=None):
if hint:
if "exc_info" in hint:
error = hint["exc_info"][1]
errors.add(error)
return old_capture_event(self, event, hint=hint)
monkeypatch.setattr(sentry_sdk.Hub, "capture_event", capture_event)
return errors
return inner
|
elex/__init__.py | jameswilkerson/elex | 183 | 12644064 | import os
import requests
import tempfile
from cachecontrol import CacheControl
from cachecontrol.caches import FileCache
from elex.cachecontrol_heuristics import EtagOnlyCache
__version__ = '2.4.3'
_DEFAULT_CACHE_DIRECTORY = os.path.join(tempfile.gettempdir(), 'elex-cache')
API_KEY = os.environ.get('AP_API_KEY', None)
API_VERSION = os.environ.get('AP_API_VERSION', 'v2')
BASE_URL = os.environ.get('AP_API_BASE_URL', 'http://api.ap.org/{0}'.format(API_VERSION))
CACHE_DIRECTORY = os.environ.get('ELEX_CACHE_DIRECTORY', _DEFAULT_CACHE_DIRECTORY)
session = requests.session()
session.headers.update({'Accept-Encoding': 'gzip'})
cache = CacheControl(session,
cache=FileCache(CACHE_DIRECTORY),
heuristic=EtagOnlyCache())
|
dojo/db_migrations/0134_sonarque_cobaltio_removal.py | mtcolman/django-DefectDojo | 249 | 12644086 | # Generated by Django 3.1.13 on 2021-11-04 06:26
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dojo', '0133_finding_service'),
]
operations = [
migrations.RemoveField(
model_name='sonarqube_product',
name='product',
),
migrations.RemoveField(
model_name='sonarqube_product',
name='sonarqube_tool_config',
),
migrations.DeleteModel(
name='Cobaltio_Product',
),
migrations.DeleteModel(
name='Sonarqube_Product',
),
]
|
docs/examples/robot_pots_2.py | codecademy-engineering/gpiozero | 743 | 12644093 | <reponame>codecademy-engineering/gpiozero<gh_stars>100-1000
from gpiozero import Robot, Motor, MCP3008
from gpiozero.tools import scaled
from signal import pause
robot = Robot(left=Motor(4, 14), right=Motor(17, 18))
left_pot = MCP3008(0)
right_pot = MCP3008(1)
robot.source = zip(scaled(left_pot, -1, 1), scaled(right_pot, -1, 1))
pause()
|
authlib/oidc/core/__init__.py | minddistrict/authlib | 3,172 | 12644117 | <reponame>minddistrict/authlib
"""
authlib.oidc.core
~~~~~~~~~~~~~~~~~
OpenID Connect Core 1.0 Implementation.
http://openid.net/specs/openid-connect-core-1_0.html
"""
from .models import AuthorizationCodeMixin
from .claims import (
IDToken, CodeIDToken, ImplicitIDToken, HybridIDToken,
UserInfo, get_claim_cls_by_response_type,
)
from .grants import OpenIDToken, OpenIDCode, OpenIDHybridGrant, OpenIDImplicitGrant
__all__ = [
'AuthorizationCodeMixin',
'IDToken', 'CodeIDToken', 'ImplicitIDToken', 'HybridIDToken',
'UserInfo', 'get_claim_cls_by_response_type',
'OpenIDToken', 'OpenIDCode', 'OpenIDHybridGrant', 'OpenIDImplicitGrant',
]
|
flow/python/S3DET.py | magical-eda/MAGICAL | 119 | 12644119 | #
# @file S3DET.py
# @author <NAME>
# @date 10/02/2019
# @brief The class for generating system symmetry constraints using graph similarity
#
import magicalFlow
import networkx as nx
from itertools import combinations
import GraphSim
import matplotlib.pyplot as plt
clk_set = {"clk", "clksb", "clks_boost", "clkb", "clkbo"}
vss_set = {"gnd", "vss", "vss_sub", "vrefn", "vrefnd", "avss", "dvss", "vss_d"}
vdd_set = {"vdd", "vdd_and", "vdd_c", "vdd_comp", "vdd_gm", "vddd", "vdda", "veld", "avdd", "vrefp", "vrefnp", "avdd_sar", "vdd_ac", "dvdd", "vdd_int", "vddac", "vdd_d"}
ignore_set = vss_set.union(vdd_set)#, clk_set)
class S3DET(object):
def __init__(self, magicalDB, symTol=0.0):
self.mDB = magicalDB
self.dDB = magicalDB.designDB.db
self.tDB = magicalDB.techDB
self.symTol = symTol
self.addPins = True
# Modified for fix for local graph generation
#self.graph = nx.Graph()
#self.circuitNodes = dict()
#self.constructGraph()
#self.graphSim = GraphSim.GraphSim(self.graph)
#self.plotGraph()
def systemSym(self, cktIdx, dirName):
# Adding fix for local graph generation
self.graph = nx.Graph()
self.circuitNodes = dict()
self.constructGraph(cktIdx)
self.graphSim = GraphSim.GraphSim(self.graph)
#
ckt = self.dDB.subCkt(cktIdx)
cktNodes = range(ckt.numNodes())
symVal = dict()
symPair = dict()
for nodeIdxA, nodeIdxB in combinations(cktNodes, 2):
nodeA = ckt.node(nodeIdxA)
nodeB = ckt.node(nodeIdxB)
cktA = self.dDB.subCkt(nodeA.graphIdx)
cktB = self.dDB.subCkt(nodeB.graphIdx)
#boxA = (cktA.gdsData().bbox().xLen(), cktA.gdsData().bbox().yLen())
#boxB = (cktB.gdsData().bbox().xLen(), cktB.gdsData().bbox().yLen())
boxA = (cktA.layout().boundary().xLen(), cktA.layout().boundary().yLen())
boxB = (cktB.layout().boundary().xLen(), cktB.layout().boundary().yLen())
subgraphA = self.subgraph(cktIdx, nodeIdxA)
subgraphB = self.subgraph(cktIdx, nodeIdxB)
# Boundary box size check and circuit graph isomorphic check
if boxA == boxB and nx.could_be_isomorphic(subgraphA, subgraphB):
if nodeIdxA not in symVal:
symVal[nodeIdxA] = dict()
symVal[nodeIdxA][nodeIdxB] = self.graphSim.specSimScore(subgraphA, subgraphB)
if nodeIdxB not in symVal:
symVal[nodeIdxB] = dict()
symVal[nodeIdxB][nodeIdxA] = symVal[nodeIdxA][nodeIdxB]
"""
print "Recognized symmetry pair:"
print nodeA.name, nodeB.name, symVal[nodeIdxA][nodeIdxB]
"""
symValKeys = list(symVal.keys())
for idxA in symValKeys:
if idxA not in symVal:
continue
tempDict = symVal[idxA]
tempList = list(tempDict.values())
idxB = list(tempDict.keys())[tempList.index(max(tempList))]
#symPair[idxA] = idxB
#symVal.pop(idxB, None)
# Adding fix, need to recursively remove. Dirty fix for now.
tempDict_p = symVal[idxB]
tempList_p = list(tempDict_p.values())
idxA_p = list(tempDict_p.keys())[tempList_p.index(max(tempList_p))]
if idxA == idxA_p:
symPair[idxA] = idxB
symVal.pop(idxB, None)
else:
val1 = tempDict[idxB]
val2 = tempDict_p[idxA_p]
if val1 > val2:
symPair[idxA] = idxB
symVal.pop(idxB, None)
else:
continue
filename = dirName + ckt.name + ".sym"
symFile = open(filename, "w")
for idxA in symPair:
idxB = symPair[idxA]
nameA = ckt.node(idxA).name
nameB = ckt.node(idxB).name
if symVal[idxA][idxB] >= self.symTol:
symFile.write("%s %s\n" % (nameA, nameB))
"""
else:
print "waived constraint", nameA, nameB, symVal[idxA][idxB]
"""
hierGraph = self.hierGraph(cktIdx)
selfSym = self.selfSym(symPair, hierGraph)
for idx in selfSym:
name = ckt.node(idx).name
symFile.write("%s\n" % name)
symNet = self.symNet(cktIdx, symPair, selfSym)
filename = dirName + ckt.name + ".symnet"
netFile = open(filename, "w")
for idxA in symNet:
idxB = symNet[idxA]
if idxA == idxB:
name = ckt.net(idxA).name
netFile.write("%s\n" % name)
else:
nameA = ckt.net(idxA).name
nameB = ckt.net(idxB).name
netFile.write("%s %s\n" % (nameA, nameB))
symFile.close()
netFile.close()
def selfSym(self, symPair, hierGraph):
selfSym = set()
symVerified = set(symPair.keys()).union(symPair.values())
symPairKeys = list(symPair.keys())
for idxA in symPairKeys:
idxB = symPair[idxA]
if idxB:
for comNei in set(nx.common_neighbors(hierGraph, idxA, idxB)).difference(symVerified):
selfSym.add(comNei)
symPair[comNei] = comNei
symPair[idxB] = idxA
return selfSym
def symNet(self, cktIdx, symPair, selfSym):
ckt = self.dDB.subCkt(cktIdx)
symNet = dict()
netId = range(ckt.numNets())
for netIdxA, netIdxB in combinations(netId, 2):
devListA = self.devList(cktIdx, netIdxA)
devListB = self.devList(cktIdx, netIdxB)
sym = True
if len(devListA) == 0 or len(devListB) == 0:
sym = False
if len(devListA) != len(devListB):
sym = False
for devA in devListA:
if devA in symPair and symPair[devA] in devListB:
continue
else:
sym = False
break
if sym:
symNet[netIdxA] = netIdxB
return symNet
def devList(self, cktIdx, netIdx):
ckt = self.dDB.subCkt(cktIdx)
devList = set()
if ckt.net(netIdx).name in ignore_set:
return list(devList)
for pinId in range(ckt.net(netIdx).numPins()):
pinIdx = ckt.net(netIdx).pinIdx(pinId)
pin = ckt.pin(pinIdx)
devList.add(pin.nodeIdx)
return list(devList)
def plotGraph(self, cktIdx=None, recursive=True):
if cktIdx == None:
labels = dict((n,d['name']) for n,d in self.graph.nodes(data=True))
pos = nx.spring_layout(self.graph)
nx.draw(self.graph, labels=labels, pos=pos)
plt.show()
if recursive:
self.plotGraph(self.mDB.topCktIdx())
else:
ckt = self.dDB.subCkt(cktIdx)
if magicalFlow.isImplTypeDevice(ckt.implType):
return
for nodes in range(ckt.numNodes()):
subgraph = self.subgraph(cktIdx, nodes)
labels = dict((n,d['name']) for n,d in subgraph.nodes(data=True))
pos = nx.spring_layout(subgraph)
if len(subgraph.nodes) > 4:
nx.draw(subgraph, labels=labels, pos=pos)
plt.show()
if recursive:
self.plotGraph(ckt.node(nodes).graphIdx)
def hierGraph(self, cktIdx):
ckt = self.dDB.subCkt(cktIdx)
hierGraph = nx.Graph()
hierGraph.add_nodes_from(range(ckt.numNodes()))
for netIdx in range(ckt.numNets()):
net = ckt.net(netIdx)
nodeList = set()
if net.name in ignore_set:
continue
for pinId in range(net.numPins()):
pinIdx = net.pinIdx(pinId)
pin = ckt.pin(pinIdx)
nodeList.add(pin.nodeIdx)
for nodeA, nodeB in combinations(nodeList, 2):
hierGraph.add_edge(nodeA, nodeB, index=netIdx)
return hierGraph
def subgraph(self, topIdx, nodeIdx):
nodes = self.circuitNodes[topIdx][nodeIdx]
subgraph = self.graph.subgraph(nodes)
return subgraph
def addNet(self, name):
if name in ignore_set:
self.graph.add_node(self.graph.number_of_nodes(), name=name, nodetype="pow")
else:
self.graph.add_node(self.graph.number_of_nodes(), name=name, nodetype="net")
return self.graph.number_of_nodes() - 1
def addInst(self, ckt, pinNum, ioNodeIdx):
devNode = self.graph.number_of_nodes()
nodeList = [devNode]
self.graph.add_node(devNode, name=ckt.name, nodetype="dev")
if self.addPins:
assert pinNum <= ckt.numPins(), "Device type pin count not matched"
for pinIdx in range(pinNum):
self.graph.add_node(devNode+pinIdx+1, name=ckt.name+'_'+str(pinIdx), nodetype="pin")
nodeList.append(devNode+pinIdx+1)
self.graph.add_edge(devNode+pinIdx+1, devNode, edgetype="dev_pin")
netNode = ioNodeIdx[pinIdx]
self.graph.add_edge(devNode+pinIdx+1, netNode, edgetype="pin_net")
else:
for pinIdx in range(pinNum):
netNode = ioNodeIdx[pinIdx]
self.graph.add_edge(devNode, netNode, edgetype="dev_net")
return nodeList
def constructGraph(self, topCktIdx=None):
# Added option for local graph generation
if not topCktIdx:
topCktIdx = self.mDB.topCktIdx()
self.circuitNodes[topCktIdx] = dict()
ckt = self.dDB.subCkt(topCktIdx)
netNodeIdx = dict() # dict of net name to graph node idx
for net in range(ckt.numNets()):
netName = ckt.net(net).name
nodeIdx = self.addNet(netName)
netNodeIdx[net] = nodeIdx
for nodeIdx in range(ckt.numNodes()):
cktNode = ckt.node(nodeIdx)
ioNodeIdx = dict()
subCkt = self.dDB.subCkt(cktNode.graphIdx)
cktType = subCkt.implType
for pin in range(cktNode.numPins()):
pinIdx = cktNode.pinIdx(pin)
netIdx = ckt.pin(pinIdx).netIdx
ioNodeIdx[pin] = netNodeIdx[netIdx]
if not magicalFlow.isImplTypeDevice(cktType):
subNodes = self.constructSubgraph(cktNode.graphIdx, ioNodeIdx)
elif cktType in [magicalFlow.ImplTypePCELL_Nch, magicalFlow.ImplTypePCELL_Pch]:
subNodes = self.addInst(subCkt, 3, ioNodeIdx)
elif cktType in [magicalFlow.ImplTypePCELL_Res, magicalFlow.ImplTypePCELL_Cap]:
subNodes = self.addInst(subCkt, 2, ioNodeIdx)
else:
raise Exception('Device type of %s not supported' % subCkt.name)
self.circuitNodes[topCktIdx][nodeIdx] = subNodes
self.removeNetNodes()
def constructSubgraph(self, cktIdx, topIoNodeIdx):
ckt = self.dDB.subCkt(cktIdx)
self.circuitNodes[cktIdx] = dict()
netNodeIdx = dict()
nodeList = []
for net in range(ckt.numNets()):
if ckt.net(net).isIo():
netNodeIdx[net] = topIoNodeIdx[ckt.net(net).ioPos]
else:
netName = ckt.net(net).name
netIdx = self.addNet(netName)
netNodeIdx[net] = netIdx
for nodeIdx in range(ckt.numNodes()):
cktNode = ckt.node(nodeIdx)
subCkt = self.dDB.subCkt(cktNode.graphIdx)
cktType = subCkt.implType
ioNodeIdx = dict()
for pin in range(cktNode.numPins()):
pinIdx = cktNode.pinIdx(pin)
netIdx = ckt.pin(pinIdx).netIdx
ioNodeIdx[pin] = netNodeIdx[netIdx]
if not magicalFlow.isImplTypeDevice(cktType):
subNodes = self.constructSubgraph(cktNode.graphIdx, ioNodeIdx)
elif cktType in [magicalFlow.ImplTypePCELL_Nch, magicalFlow.ImplTypePCELL_Pch]:
subNodes = self.addInst(subCkt, 3, ioNodeIdx)
elif cktType in [magicalFlow.ImplTypePCELL_Res, magicalFlow.ImplTypePCELL_Cap]:
subNodes = self.addInst(subCkt, 2, ioNodeIdx)
else:
raise Exception('Device type of %s not supported' % subCkt.name)
self.circuitNodes[cktIdx][nodeIdx] = subNodes
nodeList.extend(subNodes)
return nodeList
def removeNetNodes(self):
removeNode = []
for node in self.graph:
if self.graph.nodes[node]['nodetype'] is 'pow':
removeNode.append(node)
elif self.graph.nodes[node]['nodetype'] is 'net':
for pinA, pinB in combinations(self.graph[node], 2):
self.graph.add_edge(pinA, pinB, edgetype="pin_pin")
removeNode.append(node)
self.graph.remove_nodes_from(removeNode)
|
tools/ipc_fuzzer/play_testcase.py | kjthegod/chromium | 231 | 12644130 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wrapper around chrome.
Replaces all the child processes (renderer, GPU, plugins and utility) with the
IPC fuzzer. The fuzzer will then play back a specified testcase.
Depends on ipc_fuzzer being available on the same directory as chrome.
"""
import argparse
import os
import platform
import subprocess
import sys
def main():
desc = 'Wrapper to run chrome with child processes replaced by IPC fuzzers'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--out-dir', dest='out_dir', default='out',
help='output directory under src/ directory')
parser.add_argument('--build-type', dest='build_type', default='Release',
help='Debug vs. Release build')
parser.add_argument('--gdb-browser', dest='gdb_browser', default=False,
action='store_true',
help='run browser process inside gdb')
parser.add_argument('testcase',
help='IPC file to be replayed')
parser.add_argument('chrome_args',
nargs=argparse.REMAINDER,
help='any additional arguments are passed to chrome')
args = parser.parse_args()
chrome_binary = 'chrome'
fuzzer_binary = 'ipc_fuzzer_replay'
script_path = os.path.realpath(__file__)
ipc_fuzzer_dir = os.path.dirname(script_path)
src_dir = os.path.abspath(os.path.join(ipc_fuzzer_dir, os.pardir, os.pardir))
out_dir = os.path.join(src_dir, args.out_dir)
build_dir = os.path.join(out_dir, args.build_type)
chrome_path = os.path.join(build_dir, chrome_binary)
if not os.path.exists(chrome_path):
print 'chrome executable not found at ', chrome_path
return 1
fuzzer_path = os.path.join(build_dir, fuzzer_binary)
if not os.path.exists(fuzzer_path):
print 'fuzzer executable not found at ', fuzzer_path
print ('ensure GYP_DEFINES="enable_ipc_fuzzer=1" and build target ' +
fuzzer_binary + '.')
return 1
prefixes = {
'--renderer-cmd-prefix',
'--gpu-launcher',
'--plugin-launcher',
'--ppapi-plugin-launcher',
'--utility-cmd-prefix',
}
chrome_command = [
chrome_path,
'--ipc-fuzzer-testcase=' + args.testcase,
'--no-sandbox',
'--disable-kill-after-bad-ipc',
]
if args.gdb_browser:
chrome_command = ['gdb', '--args'] + chrome_command
launchers = {}
for prefix in prefixes:
launchers[prefix] = fuzzer_path
for arg in args.chrome_args:
if arg.find('=') != -1:
switch, value = arg.split('=', 1)
if switch in prefixes:
launchers[switch] = value + ' ' + launchers[switch]
continue
chrome_command.append(arg)
for switch, value in launchers.items():
chrome_command.append(switch + '=' + value)
command_line = ' '.join(['\'' + arg + '\'' for arg in chrome_command])
print 'Executing: ' + command_line
return subprocess.call(chrome_command)
if __name__ == "__main__":
sys.exit(main())
|
setup.py | alpxp/co2meter | 232 | 12644145 | <filename>setup.py
from setuptools import setup
GITHUB_URL = 'http://github.com/vfilimonov/co2meter'
exec(open('co2meter/_version.py').read())
# Long description to be published in PyPi
LONG_DESCRIPTION = """
**CO2meter** is a Python interface to the USB CO2 monitor with monitoring and
logging tools, flask web-server for visualization and Apple HomeKit compatibility.
"""
setup(name='CO2meter',
version=__version__,
description='Python interface to the USB CO2 monitor',
long_description=LONG_DESCRIPTION,
url=GITHUB_URL,
download_url=GITHUB_URL + '/archive/v%s.zip' % (__version__),
author='<NAME>',
author_email='<EMAIL>',
license='MIT License',
packages=['co2meter'],
install_requires=['hidapi', 'future'],
include_package_data=True,
zip_safe=False,
entry_points={
'console_scripts': ['co2meter_server = co2meter:start_server',
'co2meter_homekit = co2meter:start_homekit',
'co2meter_server_homekit = co2meter:start_server_homekit',
],
},
classifiers=['Programming Language :: Python :: 3', ]
)
|
src/main/python/tensorframes/core_test.py | ai2008/- | 789 | 12644167 |
from __future__ import print_function
from pyspark import SparkContext
from pyspark.sql import DataFrame, SQLContext
from pyspark.sql import Row
from pyspark.sql.functions import col
import tensorflow as tf
import pandas as pd
import tensorframes as tfs
from tensorframes.core import _java_api
class TestCore(object):
@classmethod
def setup_class(cls):
print("setup ", cls)
cls.sc = SparkContext('local[1]', cls.__name__)
@classmethod
def teardown_class(cls):
print("teardown ", cls)
cls.sc.stop()
def setUp(self):
self.sql = SQLContext(TestCore.sc)
self.api = _java_api()
self.api.initialize_logging()
print("setup")
def teardown(self):
print("teardown")
def test_schema(self):
data = [Row(x=float(x)) for x in range(100)]
df = self.sql.createDataFrame(data)
tfs.print_schema(df)
def test_map_blocks_0(self):
data = [Row(x=float(x)) for x in range(10)]
df = self.sql.createDataFrame(data)
with tf.Graph().as_default():
# The placeholder that corresponds to column 'x'
x = tf.placeholder(tf.double, shape=[None], name="x")
# The output that adds 3 to x
y = tf.Variable(3.0, dtype=tf.double, name='y')
z = tf.add(x, y, name='z')
# The resulting dataframe
df2 = tfs.map_blocks(z, df)
data2 = df2.collect()
assert data2[0].z == 3.0, data2
def test_map_blocks_1(self):
data = [Row(x=float(x)) for x in range(10)]
df = self.sql.createDataFrame(data)
with tf.Graph().as_default():
# The placeholder that corresponds to column 'x'
x = tf.placeholder(tf.double, shape=[None], name="x")
# The output that adds 3 to x
z = tf.add(x, 3, name='z')
# The resulting dataframe
df2 = tfs.map_blocks(z, df)
data2 = df2.collect()
assert data2[0].z == 3.0, data2
def test_map_blocks_2(self):
data = [dict(x=float(x)) for x in range(10)]
df = pd.DataFrame(data)
with tf.Graph().as_default():
# The placeholder that corresponds to column 'x'
x = tf.placeholder(tf.double, shape=[None], name="x")
# The output that adds 3 to x
z = tf.add(x, 3, name='z')
# The resulting dataframe
df2 = tfs.map_blocks(z, df)
data2 = df2
assert data2.z[0] == 3.0, data2
def test_map_blocks_3(self):
data = [dict(x=float(x)) for x in range(10)]
df = pd.DataFrame(data)
with tf.Graph().as_default():
# The placeholder that corresponds to column 'x'
x = tf.placeholder(tf.double, shape=[None], name="x")
# The output that adds 3 to x
y = tf.Variable(3.0, dtype=tf.double, name='y')
z = tf.add(x, y, name='z')
# The resulting dataframe
df2 = tfs.map_blocks(z, df)
data2 = df2
assert data2.z[0] == 3.0, data2
def test_map_blocks_feed_dict(self):
data = [dict(x_spark=float(x)) for x in range(10)]
df = self.sql.createDataFrame(data)
with tf.Graph().as_default():
# The placeholder that corresponds to column 'x'
x = tf.placeholder(tf.double, shape=[None], name="x_tf")
# The output that adds 3 to x
y = tf.Variable(3.0, dtype=tf.double, name='y')
z = tf.add(x, y, name='z')
# The resulting dataframe
df2 = tfs.map_blocks(z, df, feed_dict={'x_tf': 'x_spark'})
data2 = df2.collect()
assert data2[0].z == 3.0, data2
def test_map_rows_1(self):
data = [Row(x=float(x)) for x in range(5)]
df = self.sql.createDataFrame(data)
with tf.Graph().as_default():
# The placeholder that corresponds to column 'x'
x = tf.placeholder(tf.double, shape=[], name="x")
# The output that adds 3 to x
z = tf.add(x, 3, name='z')
# The resulting dataframe
df2 = tfs.map_rows(z, df)
data2 = df2.collect()
assert data2[0].z == 3.0, data2
def test_map_rows_2(self):
data = [Row(y=float(y)) for y in range(5)]
df = self.sql.createDataFrame(data)
with tf.Graph().as_default():
# The placeholder that corresponds to column 'x'
x = tf.placeholder(tf.double, shape=[], name="x")
# The output that adds 3 to x
z = tf.add(x, 3, name='z')
# The resulting dataframe
df2 = tfs.map_rows(z, df, feed_dict={'x':'y'})
data2 = df2.collect()
assert data2[0].z == 3.0, data2
def test_map_rows_3(self):
data = [dict(x=float(x)) for x in range(5)]
df = pd.DataFrame(data)
with tf.Graph().as_default():
# The placeholder that corresponds to column 'x'
x = tf.placeholder(tf.double, shape=[None], name="x")
# The output that adds 3 to x
z = tf.add(x, 3, name='z')
# The resulting dataframe
df2 = tfs.map_rows(z, df)
data2 = df2
assert data2.z[0] == 3.0, data2
def test_map_rows_feed_dict(self):
data = [dict(x_spark=float(x)) for x in range(5)]
df = self.sql.createDataFrame(data)
with tf.Graph().as_default():
# The placeholder that corresponds to column 'x'
x = tf.placeholder(tf.double, shape=[], name="x_tf")
# The output that adds 3 to x
z = tf.add(x, 3, name='z')
# The resulting dataframe
df2 = tfs.map_rows(z, df, feed_dict={'x_tf': 'x_spark'})
data2 = df2.collect()
assert data2[0].z == 3.0, data2
def test_map_rows_4(self):
data = [dict(y=float(x)) for x in range(5)]
df = pd.DataFrame(data)
with tf.Graph().as_default():
# The placeholder that corresponds to column 'x'
x = tf.placeholder(tf.double, shape=[None], name="x")
# The output that adds 3 to x
z = tf.add(x, 3, name='z')
# The resulting dataframe
df2 = tfs.map_rows(z, df, feed_dict={'x':'y'})
data2 = df2
assert data2.z[0] == 3.0, data2
def test_reduce_rows_0(self):
data = [Row(x=float(x)) for x in range(5)]
df = self.sql.createDataFrame(data)
with tf.Graph().as_default():
# The placeholder that corresponds to column 'x'
x_1 = tf.placeholder(tf.double, shape=[], name="x_1")
x_2 = tf.placeholder(tf.double, shape=[], name="x_2")
y = tf.Variable(0.0, dtype=tf.double, name='y')
x_0 = tf.add(y, x_1, name='x_0')
# The output that adds 3 to x
x = tf.add(x_0, x_2, name='x')
# The resulting number
res = tfs.reduce_rows(x, df)
assert res == sum([r.x for r in data])
def test_reduce_rows_1(self):
data = [Row(x=float(x)) for x in range(5)]
df = self.sql.createDataFrame(data)
with tf.Graph().as_default():
# The placeholder that corresponds to column 'x'
x_1 = tf.placeholder(tf.double, shape=[], name="x_1")
x_2 = tf.placeholder(tf.double, shape=[], name="x_2")
# The output that adds 3 to x
x = tf.add(x_1, x_2, name='x')
# The resulting number
res = tfs.reduce_rows(x, df)
assert res == sum([r.x for r in data])
def test_append_shape(self):
data = [Row(x=float(x)) for x in range(5)]
df = self.sql.createDataFrame(data)
ddf = tfs.append_shape(df, col('x'), [-1])
with tf.Graph().as_default():
# The placeholder that corresponds to column 'x'
x_1 = tf.placeholder(tf.double, shape=[], name="x_1")
x_2 = tf.placeholder(tf.double, shape=[], name="x_2")
# The output that adds 3 to x
x = tf.add(x_1, x_2, name='x')
# The resulting number
res = tfs.reduce_rows(x, ddf)
assert res == sum([r.x for r in data])
# This test fails
def test_reduce_blocks_1(self):
data = [Row(x=float(x)) for x in range(5)]
df = self.sql.createDataFrame(data)
with tf.Graph().as_default():
# The placeholder that corresponds to column 'x'
x_input = tf.placeholder(tf.double, shape=[None], name="x_input")
# The output that adds 3 to x
x = tf.reduce_sum(x_input, name='x')
# The resulting dataframe
res = tfs.reduce_blocks(x, df)
assert res == sum([r.x for r in data])
def test_map_blocks_trimmed_0(self):
data = [Row(x=float(x)) for x in range(3)]
df = self.sql.createDataFrame(data)
with tf.Graph().as_default():
# The placeholder that corresponds to column 'x'
x = tf.placeholder(tf.double, shape=[None], name="x")
# The output discards the input and return a single row of data
z = tf.Variable([2], dtype=tf.double, name='z')
# The resulting dataframe
df2 = tfs.map_blocks(z, df, trim=True)
data2 = df2.collect()
assert data2[0].z == 2, data2
def test_map_blocks_trimmed_1(self):
data = [Row(x=float(x)) for x in range(3)]
df = self.sql.createDataFrame(data)
with tf.Graph().as_default():
# The placeholder that corresponds to column 'x'
x = tf.placeholder(tf.double, shape=[None], name="x")
# The output discards the input and return a single row of data
z = tf.constant([2], name='z')
# The resulting dataframe
df2 = tfs.map_blocks(z, df, trim=True)
data2 = df2.collect()
assert data2[0].z == 2, data2
def test_groupby_1(self):
data = [Row(x=float(x), key=str(x % 2)) for x in range(4)]
df = self.sql.createDataFrame(data)
gb = df.groupBy("key")
with tf.Graph().as_default():
x_input = tfs.block(df, "x", tf_name="x_input")
x = tf.reduce_sum(x_input, [0], name='x')
df2 = tfs.aggregate(x, gb)
data2 = df2.collect()
assert data2 == [Row(key='0', x=2.0), Row(key='1', x=4.0)], data2
if __name__ == "__main__":
# Some testing stuff that should not be executed
with tf.Graph().as_default():
x_input = tf.placeholder(tf.double, shape=[2, 3], name="x_input")
x = tf.reduce_sum(x_input, [0], name='x')
print(g.as_graph_def())
with tf.Graph().as_default():
x = tf.constant([1, 1], name="x")
y = tf.reduce_sum(x, [0], name='y')
print(g.as_graph_def())
with tf.Graph().as_default():
tf.constant(1, name="x1")
tf.constant(1.0, name="x2")
tf.constant([1.0], name="x3")
tf.constant([1.0, 2.0], name="x4")
print(g.as_graph_def())
|
tests/test_agent.py | cdowney/python-nomad | 109 | 12644197 | import pytest
import os
from nomad.api import exceptions as nomad_exceptions
# integration tests requires nomad Vagrant VM or Binary running
def test_get_agent(nomad_setup):
assert isinstance(nomad_setup.agent.get_agent(), dict) == True
def test_get_members(nomad_setup):
m = nomad_setup.agent.get_members()
if isinstance(m, list):
assert True
elif isinstance(m, dict):
assert True
else:
assert False
def test_get_servers(nomad_setup):
s = nomad_setup.agent.get_servers()
assert isinstance(s, list) == True
def test_join_agent(nomad_setup):
r = nomad_setup.agent.join_agent("nope")
assert r["num_joined"] == 0
def test_force_leave(nomad_setup):
r = nomad_setup.agent.force_leave("nope")
assert r == 200
def test_dunder_str(nomad_setup):
assert isinstance(str(nomad_setup.agent), str)
def test_dunder_repr(nomad_setup):
assert isinstance(repr(nomad_setup.agent), str)
def test_dunder_getattr(nomad_setup):
with pytest.raises(AttributeError):
d = nomad_setup.agent.does_not_exist
|
third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/wpt/wpt/tools/localpaths.py | wenfeifei/miniblink49 | 5,964 | 12644202 | <reponame>wenfeifei/miniblink49
import os
import sys
here = os.path.abspath(os.path.split(__file__)[0])
repo_root = os.path.abspath(os.path.join(here, os.pardir))
sys.path.insert(0, os.path.join(repo_root, "tools"))
sys.path.insert(0, os.path.join(repo_root, "tools", "six"))
sys.path.insert(0, os.path.join(repo_root, "tools", "html5lib"))
sys.path.insert(0, os.path.join(repo_root, "tools", "wptserve"))
sys.path.insert(0, os.path.join(repo_root, "tools", "pywebsocket", "src"))
|
tensorflow_graphics/projects/points_to_3Dobjects/utils/plot.py | Liang813/graphics | 2,759 | 12644213 | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for plotting."""
import os
import pickle
import re
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow_graphics.projects.points_to_3Dobjects.utils import tf_utils
from google3.pyglib import gfile
def plot_to_image(figure):
"""Converts a matplotlib figure into a TF image e.g. for TensorBoard."""
figure.canvas.draw()
width, height = figure.canvas.get_width_height()
data_np = np.frombuffer(figure.canvas.tostring_rgb(), dtype='uint8')
data_np = data_np.reshape([width, height, 3])
image = tf.expand_dims(data_np, 0)
return image
def resize_heatmap(centers, color=(1, 0, 0), stride=4):
assert len(centers.shape) == 2
centers = np.repeat(np.repeat(centers, stride, axis=0), stride, axis=1)
centers = np.expand_dims(centers, axis=-1)
cmin, cmax = np.min(centers), np.max(centers)
centers = np.concatenate([np.ones(centers.shape) * color[0],
np.ones(centers.shape) * color[1],
np.ones(centers.shape) * color[2],
(centers-cmin)/cmax], axis=-1)
return centers
def plot_heatmaps(image, detections, figsize=5):
"""Plot."""
figure = plt.figure(figsize=(figsize, figsize))
plt.clf()
width, height = image.shape[1], image.shape[0]
if width != height:
image = tf.image.pad_to_bounding_box(image, 0, 0, 640, 640)
image = image.numpy()
width, height = image.shape[1], image.shape[0]
plt.imshow(np.concatenate([image.astype(float)/255.0,
1.0 * np.ones([height, width, 1])], axis=-1))
num_predicted_objects = detections['detection_classes'].numpy().shape[0]
for object_id in range(num_predicted_objects):
for k, color in [['centers', [0, 1, 0]]
]:
class_id = int(detections['detection_classes'][object_id].numpy())
centers = detections[k][:, :, class_id].numpy()
color = [[1, 0, 0], [0, 1, 0], [0, 0, 1]][object_id]
plt.imshow(resize_heatmap(centers, color=color))
plt.axis('off')
plt.tight_layout()
return figure
def draw_coordinate_frame(camera_intrinsic, pose_world2camera, dot):
"""Draw coordinate system frame."""
print(dot)
width = camera_intrinsic[0, 2] * 2.0
height = camera_intrinsic[1, 2] * 2.0
plt.plot([0, width], [height / 4.0 * 3.0, height / 4.0 * 3.0], 'g--')
plt.plot([width / 2.0, width / 2.0], [0.0, height], 'g--')
camera_intrinsic = np.reshape(camera_intrinsic, [3, 3])
pose_world2camera = np.reshape(pose_world2camera, [3, 4])
frame = np.array([[0, 0, 0, 1],
[0.1, 0, 0, 1],
[0, 0.1, 0, 1],
[0, 0, 0.1, 1]], dtype=np.float32).T # Shape: (4, 4)
projected_frame = camera_intrinsic @ pose_world2camera @ frame
projected_frame = projected_frame[0:2, :] / projected_frame[2, :]
plt.plot(projected_frame[0, [0, 1]], projected_frame[1, [0, 1]], 'r-')
plt.plot(projected_frame[0, [0, 2]], projected_frame[1, [0, 2]], 'g-')
plt.plot(projected_frame[0, [0, 3]], projected_frame[1, [0, 3]], 'b-')
dot_proj = camera_intrinsic @ \
pose_world2camera @ [dot[0, 0], dot[0, 1], dot[0, 2], 1.0]
dot_proj /= dot_proj[2]
print(dot_proj)
plt.plot(dot_proj[0], dot_proj[1], 'y*')
def plot_gt_boxes_2d(sample, shape_pointclouds, figsize=5):
"""Plot."""
_ = plt.figure(figsize=(figsize, figsize))
plt.clf()
plt.imshow(sample['image'])
# Plot ground truth boxes
sample['detection_boxes'] = sample['groundtruth_boxes'].numpy()
colors = ['r.', 'g.', 'b.']
for i in range(sample['num_boxes'].numpy()):
shape_id = sample['shapes'][i]
pointcloud = tf.transpose(shape_pointclouds[shape_id])
translation = sample['translations_3d'][i]
rotation = tf.reshape(sample['rotations_3d'][i], [3, 3])
size = np.diag(sample['sizes_3d'][i])
trafo_pc = \
rotation @ size @ (pointcloud / 2.0) + tf.expand_dims(translation, 1)
trafo_pc = tf.concat([trafo_pc, tf.ones([1, 512])], axis=0)
projected_pointcloud = \
tf.reshape(sample['k'], [3, 3]) @ sample['rt'] @ trafo_pc
projected_pointcloud /= projected_pointcloud[2, :]
plt.plot(projected_pointcloud[0, :],
projected_pointcloud[1, :], colors[i % 3])
y_min, x_min, y_max, x_max = sample['detection_boxes'][i]
y_min *= sample['original_image_spatial_shape'][1].numpy()
y_max *= sample['original_image_spatial_shape'][1].numpy()
x_min *= sample['original_image_spatial_shape'][0].numpy()
x_max *= sample['original_image_spatial_shape'][0].numpy()
plt.plot([x_min, x_max, x_max, x_min, x_min],
[y_min, y_min, y_max, y_max, y_min],
linestyle='dashed')
def show_sdf(sdf, figsize=5, resolution=32):
_, axis = plt.subplots(1, 3, figsize=(3*figsize, figsize))
sdf = tf.reshape(sdf, [resolution, resolution, resolution])
for a in range(3):
proj_sdf = tf.transpose(tf.reduce_min(sdf, axis=a))
c = axis[a].matshow(proj_sdf.numpy())
plt.colorbar(c, ax=axis[a])
def plot_gt_boxes_3d(sample, shape_pointclouds, figsize=5):
"""Plot."""
intrinsics = sample['k'].numpy()
pose_world2camera = sample['rt'].numpy()
_ = plt.figure(figsize=(figsize, figsize))
plt.clf()
intrinsics = np.reshape(intrinsics, [3, 3])
pose_world2camera = np.reshape(pose_world2camera, [3, 4])
# Plot ground truth boxes
# num_boxes = sample['groundtruth_boxes'].shape[0]
colors = ['r', 'g', 'b', 'c', 'm', 'y']
colors2 = ['r.', 'g.', 'b.']
for i in [2, 1, 0]:
shape_id = sample['shapes'][i]
pointcloud = tf.transpose(shape_pointclouds[shape_id])
translation = sample['translations_3d'][i]
rotation = tf.reshape(sample['rotations_3d'][i], [3, 3])
size = np.diag(sample['sizes_3d'][i])
trafo_pc = \
rotation @ size @ (pointcloud / 2.0) + tf.expand_dims(translation, 1)
trafo_pc = tf.concat([trafo_pc, tf.ones([1, 512])], axis=0)
projected_pointcloud = \
tf.reshape(sample['k'], [3, 3]) @ sample['rt'] @ trafo_pc
projected_pointcloud /= projected_pointcloud[2, :]
plt.plot(projected_pointcloud[0, :],
projected_pointcloud[1, :], 'w.', markersize=5)
plt.plot(projected_pointcloud[0, :],
projected_pointcloud[1, :], colors2[i], markersize=3)
predicted_pose_obj2world = np.eye(4)
predicted_pose_obj2world[0:3, 0:3] = \
tf.reshape(sample['rotations_3d'][i], [3, 3]).numpy()
predicted_pose_obj2world[0:3, 3] = sample['translations_3d'][i].numpy()
draw_bounding_box_3d(sample['sizes_3d'][i].numpy(),
predicted_pose_obj2world,
intrinsics, pose_world2camera,
linestyle='solid', color='w', linewidth=3)
draw_bounding_box_3d(sample['sizes_3d'][i].numpy(),
predicted_pose_obj2world,
intrinsics, pose_world2camera,
linestyle='solid', color=colors[i], linewidth=1)
# draw_coordinate_frame(intrinsics, pose_world2camera, sample['dot'])
CLASSES = ('chair', 'sofa', 'table', 'bottle', 'bowl', 'mug', 'bowl', 'mug')
def plot_boxes_2d(image, sample, predictions, projection=True, groundtruth=True,
figsize=5,
class_id_to_name=CLASSES):
"""Plot."""
batch_id = 0
figure = plt.figure(figsize=(figsize, figsize))
plt.clf()
plt.imshow(image)
if projection:
points = predictions['projected_pointclouds'].numpy()
colors = ['r.', 'g.', 'b.', 'c.', 'm.', 'y.']
# print('HERE:', points.shape)
for i in range(points.shape[0]):
# print(i, points.shape)
plt.plot(points[i, :, 0], points[i, :, 1],
colors[int(predictions['detection_classes'][i])])
# Plot ground truth boxes
if groundtruth:
sample['detection_boxes'] = sample['groundtruth_boxes'][batch_id].numpy()
for i in range(sample['detection_boxes'].shape[0]):
y_min, x_min, y_max, x_max = sample['detection_boxes'][i]
y_min *= sample['original_image_spatial_shape'][batch_id][1].numpy()
y_max *= sample['original_image_spatial_shape'][batch_id][1].numpy()
x_min *= sample['original_image_spatial_shape'][batch_id][0].numpy()
x_max *= sample['original_image_spatial_shape'][batch_id][0].numpy()
plt.plot([x_min, x_max, x_max, x_min, x_min],
[y_min, y_min, y_max, y_max, y_min],
linestyle='dashed')
# Plot predicted boxes
colors = ['r', 'g', 'b', 'c', 'm', 'y']
for i in range(predictions['detection_boxes'].shape[0]):
x_min, y_min, x_max, y_max = predictions['detection_boxes'][i]
plt.plot([x_min, x_max, x_max, x_min, x_min],
[y_min, y_min, y_max, y_max, y_min],
linestyle='solid',
color=colors[int(predictions['detection_classes'][i])])
plt.text(x_min, y_min, str(i) + '_' +
class_id_to_name[int(predictions['detection_classes'][i])] +
str(int(predictions['detection_scores'][i]*1000) / 1000.0))
plt.axis('off')
plt.tight_layout()
return figure
def plot_boxes_3d(image, sample, predictions, figsize=5, groundtruth=True,
class_id_to_name=CLASSES):
"""Plot."""
batch_id = 0
intrinsics = sample['k'][batch_id].numpy()
pose_world2camera = sample['rt'][batch_id].numpy()
figure = plt.figure(figsize=(figsize, figsize))
plt.clf()
plt.imshow(image)
intrinsics = np.reshape(intrinsics, [3, 3])
pose_world2camera = np.reshape(pose_world2camera, [3, 4])
# Plot ground truth boxes
if groundtruth:
num_boxes = sample['groundtruth_boxes'][batch_id].shape[0]
sample['detection_boxes'] = sample['groundtruth_boxes'][batch_id].numpy()
colors = ['c', 'm', 'y']
for i in range(num_boxes):
predicted_pose_obj2world = np.eye(4)
predicted_pose_obj2world[0:3, 0:3] = \
tf.reshape(sample['rotations_3d'][batch_id][i], [3, 3]).numpy()
predicted_pose_obj2world[0:3, 3] = \
sample['translations_3d'][batch_id][i].numpy()
draw_bounding_box_3d(sample['sizes_3d'][batch_id][i].numpy(),
predicted_pose_obj2world,
intrinsics, pose_world2camera,
linestyle='dashed', color=colors[i % 3])
y_min, x_min, y_max, x_max = sample['detection_boxes'][i]
y_min *= sample['original_image_spatial_shape'][batch_id][1].numpy()
y_max *= sample['original_image_spatial_shape'][batch_id][1].numpy()
x_min *= sample['original_image_spatial_shape'][batch_id][0].numpy()
x_max *= sample['original_image_spatial_shape'][batch_id][0].numpy()
plt.text(x_max, y_min,
str(i) + '_gt_' + \
class_id_to_name[int(sample['groundtruth_valid_classes'][batch_id][i])])
# Plot predicted boxes
colors = ['r', 'g', 'b', 'c', 'm', 'y', 'c', 'm', 'y', 'c', 'm', 'y']
num_boxes = predictions['rotations_3d'].shape[0]
for i in range(num_boxes):
predicted_pose_obj2world = np.eye(4)
predicted_pose_obj2world[0:3, 0:3] = predictions['rotations_3d'][i].numpy()
predicted_pose_obj2world[0:3, 3] = predictions['translations_3d'][i].numpy()
draw_bounding_box_3d(predictions['sizes_3d'].numpy()[i],
predicted_pose_obj2world,
intrinsics, pose_world2camera,
linestyle='solid',
color=colors[int(predictions['detection_classes'][i])])
x_min, y_min, x_max, y_max = predictions['detection_boxes'][i]
plt.text(x_min, y_min, str(i) + '_' +
class_id_to_name[int(predictions['detection_classes'][i])] +
str(int(predictions['detection_scores'][i] * 1000) / 1000.0))
plt.axis('off')
plt.tight_layout()
return figure
def plot_detections(
image,
intrinsics,
pose_world2camera,
detections,
labels,
figsize=0.1):
"""Plot."""
figure = plt.figure(figsize=(figsize, figsize))
plt.clf()
plt.imshow(np.concatenate([image.astype(float)/255.0,
0.2 * np.ones([256, 256, 1])], axis=-1))
# Plot heatmaps
num_predicted_objects = detections['detection_classes'].numpy().shape[0]
for object_id in range(num_predicted_objects):
for k, color in [['centers_sigmoid', [0, 1, 0]],
['centers_nms', [1, 0, 0]]]:
class_id = int(detections['detection_classes'][object_id].numpy())
centers = detections[k][:, :, class_id].numpy()
plt.imshow(resize_heatmap(centers, color=color))
intrinsics = np.reshape(intrinsics, [3, 3])
pose_world2camera = np.reshape(pose_world2camera, [3, 4])
for j, [boxes, style] in enumerate([[labels, 'dashed'],
[detections, 'solid']]):
number_of_boxes = boxes['detection_boxes'].shape[0]
for i in range(number_of_boxes):
predicted_pose_obj2world = np.eye(4)
predicted_pose_obj2world[0:3, 0:3] = boxes['rotations_3d'][i].numpy()
predicted_pose_obj2world[0:3, 3] = boxes['center3d'][i].numpy()
draw_bounding_box_3d(boxes['size3d'].numpy()[i],
predicted_pose_obj2world,
intrinsics, pose_world2camera,
linestyle=style)
if j == 0:
if isinstance(boxes['detection_boxes'], tf.Tensor):
boxes['detection_boxes'] = boxes['detection_boxes'].numpy()
# if isinstance(boxes['detection_classes'], tf.Tensor):
# boxes['detection_classes'] = boxes['detection_classes'].numpy()
x_min, y_min, x_max, y_max = boxes['detection_boxes'][i]
# plt.text(x_min, y_min,
# class_id_to_name[int(boxes['detection_classes'][i])])
plt.plot([x_min, x_max, x_max, x_min, x_min],
[y_min, y_min, y_max, y_max, y_min],
linestyle=style)
plt.axis('off')
plt.tight_layout()
return figure
def plot_all_heatmaps(image, detections, figsize=0.1, num_classes=6):
"""Plot."""
if figsize:
print(figsize)
figure, axis = plt.subplots(1, num_classes, figsize=(num_classes * 5, 5))
for class_id in range(num_classes):
for k, color in [['centers_sigmoid', [0, 1, 0]],
['centers_nms', [1, 0, 0]]]:
axis[class_id].imshow(np.concatenate(
[image.astype(float)/255.0, 0.5 * np.ones([256, 256, 1])], axis=-1))
centers = detections[k][:, :, class_id].numpy()
axis[class_id].imshow(resize_heatmap(centers, color=color))
return figure
def plot_gt_heatmaps(image, heatmaps, num_classes=6):
figure, axis = plt.subplots(1, num_classes, figsize=(num_classes * 4, 4))
for class_id in range(num_classes):
axis[class_id].imshow(np.concatenate(
[image, 0.5 * np.ones([image.shape[0], image.shape[1], 1])], axis=-1))
centers = heatmaps[:, :, class_id].numpy()
axis[class_id].imshow(resize_heatmap(centers, color=[255, 0, 0]))
return figure
def draw_bounding_box_3d(size, pose, camera_intrinsic, world2camera,
linestyle='solid', color=None, linewidth=1):
"""Draw bounding box."""
size = size * 0.5
origin = np.zeros([4, 1])
origin[3, 0] = 1.0
bbox3d = np.tile(origin, [1, 10]) # shape: (4, 10)
bbox3d[0:3, 0] += np.array([-size[0], -size[1], -size[2]])
bbox3d[0:3, 1] += np.array([size[0], -size[1], -size[2]])
bbox3d[0:3, 2] += np.array([size[0], -size[1], size[2]])
bbox3d[0:3, 3] += np.array([-size[0], -size[1], size[2]])
bbox3d[0:3, 4] += np.array([-size[0], size[1], -size[2]])
bbox3d[0:3, 5] += np.array([size[0], size[1], -size[2]])
bbox3d[0:3, 6] += np.array([size[0], size[1], size[2]])
bbox3d[0:3, 7] += np.array([-size[0], size[1], size[2]])
bbox3d[0:3, 8] += np.array([0.0, -size[1], 0.0])
bbox3d[0:3, 9] += np.array([0.0, -size[1], -size[2]])
projected_bbox3d = camera_intrinsic @ world2camera @ pose @ bbox3d
projected_bbox3d = projected_bbox3d[0:2, :] / projected_bbox3d[2, :]
lw = linewidth
plt.plot(projected_bbox3d[0, [0, 4, 7, 3]],
projected_bbox3d[1, [0, 4, 7, 3]],
linewidth=lw, linestyle=linestyle, color=color)
plt.plot(projected_bbox3d[0, [1, 5, 6, 2]],
projected_bbox3d[1, [1, 5, 6, 2]],
linewidth=lw, linestyle=linestyle, color=color)
plt.plot(projected_bbox3d[0, [0, 1, 2, 3, 0]],
projected_bbox3d[1, [0, 1, 2, 3, 0]],
linewidth=lw, linestyle=linestyle, color=color)
plt.plot(projected_bbox3d[0, [4, 5, 6, 7, 4]],
projected_bbox3d[1, [4, 5, 6, 7, 4]],
linewidth=lw, linestyle=linestyle, color=color)
plt.plot(projected_bbox3d[0, [8, 9]],
projected_bbox3d[1, [8, 9]],
linewidth=lw, linestyle=linestyle, color=color)
def plot_prediction(inputs, outputs, figsize=0.1, batch_id=0, plot_2d=False):
"""Plot bounding box predictions along ground truth labels.
Args:
inputs: Dict of batched inputs to the network.
outputs: Dict of batched outputs of the network.
figsize: The size of the figure.
batch_id: The batch entry to plot.
plot_2d: Whether 2D bounding boxes should be shown or not.
Returns:
A matplotlib figure.
"""
image = inputs['image'][batch_id].numpy()
size2d = inputs['box_dim2d'][batch_id].numpy()
size3d = inputs['box_dim3d'][batch_id].numpy()[[0, 2, 1]]
center2d = inputs['center2d'][batch_id].numpy()
center3d = inputs['center3d'][batch_id].numpy()
predicted_center2d = outputs['center2d'][batch_id].numpy()
predicted_size2d = outputs['size2d'][batch_id].numpy()
predicted_rotation = outputs['rotation'][batch_id].numpy()
predicted_center3d = outputs['center3d'][batch_id].numpy().T
predicted_size3d = outputs['size3d'][batch_id].numpy()[[0, 2, 1]]
# dot = outputs['dot'][batch_id].numpy()
intrinsics = inputs['k'][batch_id].numpy()
pose_world2camera = inputs['rt'][batch_id].numpy()
object_translation = np.squeeze(center3d[0:3])
object_rotation = inputs['rotation'][batch_id].numpy()
pose_obj2world = np.eye(4)
rad = np.deg2rad(object_rotation*-1)
cos = np.cos(rad)
sin = np.sin(rad)
pose_obj2world[0, 0] = cos
pose_obj2world[0, 1] = sin
pose_obj2world[1, 1] = cos
pose_obj2world[1, 0] = -sin
pose_obj2world[0:3, 3] = object_translation
predicted_pose_obj2world = np.eye(4)
predicted_pose_obj2world[0:2, 0:2] = predicted_rotation
predicted_pose_obj2world[0:3, 3] = predicted_center3d
figure = plt.figure(figsize=(figsize, figsize))
plt.clf()
plt.imshow(image / 255.)
plt.plot(center2d[0], center2d[1], 'g*')
def draw_ground_plane(camera_intrinsic, pose_world2camera):
"""Draw ground plane as grid.
Args:
camera_intrinsic: Camera intrinsic.
pose_world2camera: Camera extrinsic.
"""
line = np.array([[-3, 3, 0, 1], [3, 3, 0, 1]]).T
projected_line = camera_intrinsic @ pose_world2camera @ line
projected_line = projected_line[0:2, :] / projected_line[2, :]
plt.plot(projected_line[0, [0, 1]], projected_line[1, [0, 1]],
'black',
linewidth=1)
def draw_bounding_box_2d(center, size, style='b+-'):
bbox2d = np.tile(np.reshape(center, [1, 2]), [4, 1]) # shape: (4, 2)
bbox2d[0, :] += np.array([-size[0], -size[1]])
bbox2d[1, :] += np.array([size[0], -size[1]])
bbox2d[2, :] += np.array([size[0], size[1]])
bbox2d[3, :] += np.array([-size[0], size[1]])
plt.plot(bbox2d[[0, 1, 2, 3, 0], 0], bbox2d[[0, 1, 2, 3, 0], 1], style)
draw_bounding_box_3d(size3d, pose_obj2world, intrinsics,
pose_world2camera, 'dashed')
draw_ground_plane(intrinsics, pose_world2camera)
# draw_coordinate_frame(intrinsics, pose_world2camera)
draw_bounding_box_3d(predicted_size3d, predicted_pose_obj2world,
intrinsics, pose_world2camera)
if plot_2d:
draw_bounding_box_2d(center2d, size2d / 2, 'g-')
draw_bounding_box_2d(predicted_center2d, predicted_size2d / 2, 'b-')
return figure
def matrix_from_angle(angle: float, axis: int):
matrix = np.eye(3)
if axis == 1:
matrix[0, 0] = np.cos(angle)
matrix[2, 2] = np.cos(angle)
matrix[2, 0] = -np.sin(angle)
matrix[0, 2] = np.sin(angle)
return matrix
def save_for_blender(detections,
sample,
log_dir, dict_clusters, shape_pointclouds,
class_id_to_name=CLASSES):
"""Save for blender."""
# VisualDebugging uses the OpenCV coordinate representation
# while the dataset uses OpenGL (left-hand) so make sure to convert y and z.
batch_id = 0
prefix = '/cns/lu-d/home/giotto3d/datasets/shapenet/raw/'
sufix = 'models/model_normalized.obj'
blender_dict = {}
blender_dict['image'] = \
tf.io.decode_image(sample['image_data'][batch_id]).numpy()
blender_dict['world_to_cam'] = sample['rt'].numpy()
num_predicted_shapes = int(detections['sizes_3d'].shape[0])
blender_dict['num_predicted_shapes'] = num_predicted_shapes
blender_dict['predicted_rotations_3d'] = \
tf.reshape(detections['rotations_3d'], [-1, 3, 3]).numpy()
blender_dict['predicted_rotations_y'] = [
tf_utils.euler_from_rotation_matrix(
tf.reshape(detections['rotations_3d'][i], [3, 3]), 1).numpy()
for i in range(num_predicted_shapes)]
blender_dict['predicted_translations_3d'] = \
detections['translations_3d'].numpy()
blender_dict['predicted_sizes_3d'] = detections['sizes_3d'].numpy()
predicted_shapes_path = []
for i in range(num_predicted_shapes):
shape = detections['shapes'][i].numpy()
_, class_str, model_str = dict_clusters[shape]
filename = os.path.join(prefix, class_str, model_str, sufix)
predicted_shapes_path.append(filename)
blender_dict['predicted_shapes_path'] = predicted_shapes_path
blender_dict['predicted_class'] = [
class_id_to_name[int(detections['detection_classes'][i].numpy())]
for i in range(num_predicted_shapes)]
blender_dict['predicted_pointcloud'] = [
shape_pointclouds[int(detections['shapes'][i].numpy())]
for i in range(num_predicted_shapes)]
num_groundtruth_shapes = int(sample['sizes_3d'][batch_id].shape[0])
blender_dict['num_groundtruth_shapes'] = num_groundtruth_shapes
blender_dict['groundtruth_rotations_3d'] = \
tf.reshape(sample['rotations_3d'][batch_id], [-1, 3, 3]).numpy()
blender_dict['groundtruth_rotations_y'] = [
tf_utils.euler_from_rotation_matrix(
tf.reshape(sample['rotations_3d'][batch_id][i], [3, 3]), 1).numpy()
for i in range(sample['num_boxes'][batch_id].numpy())]
blender_dict['groundtruth_translations_3d'] = \
sample['translations_3d'][batch_id].numpy()
blender_dict['groundtruth_sizes_3d'] = sample['sizes_3d'][batch_id].numpy()
groundtruth_shapes_path = []
for i in range(num_groundtruth_shapes):
class_str = str(sample['classes'][batch_id, i].numpy()).zfill(8)
model_str = str(sample['mesh_names'][batch_id, i].numpy())[2:-1]
filename = os.path.join(prefix, class_str, model_str, sufix)
groundtruth_shapes_path.append(filename)
blender_dict['groundtruth_shapes_path'] = groundtruth_shapes_path
blender_dict['groundtruth_classes'] = \
sample['groundtruth_valid_classes'].numpy()
path = log_dir + '.pkl'
with gfile.Open(path, 'wb') as file:
pickle.dump(blender_dict, file)
def obj_read_for_gl(filename, texture_size=(32, 32)):
"""Read vertex and part information from OBJ file."""
if texture_size:
print(texture_size)
with gfile.Open(filename, 'r') as f:
content = f.readlines()
vertices = []
texture_coords = []
vertex_normals = []
group_name = None
material_name = None
faces = []
faces_tex = []
faces_normals = []
face_groups = []
material_ids = []
for i in range(len(content)):
line = content[i]
parts = re.split(r'\s+', line)
# if parts[0] == 'mtllib':
# material_file = parts[1]
# Vertex information -----------------------------------------------------
if parts[0] == 'v':
vertices.append([float(v) for v in parts[1:4]])
if parts[0] == 'vt':
texture_coords.append([float(v) for v in parts[1:4]])
if parts[0] == 'vn':
vertex_normals.append([float(v) for v in parts[1:4]])
if parts[0] == 'g':
group_name = parts[1]
if parts[0] == 'usemtl':
material_name = parts[1]
# Face information ------------------------------------------------------
if parts[0] == 'f':
vertex_index, tex_index, normal_index = 0, 0, 0
current_face, current_face_tex, current_face_norm = [], [], []
for j in range(1, 4):
face_info = parts[j]
if face_info.count('/') == 2:
vertex_index, tex_index, normal_index = face_info.split('/')
if not tex_index:
tex_index = 0
elif face_info.count('/') == 1:
vertex_index, tex_index = face_info.split('/')
elif face_info.count('/') == 0:
vertex_index = face_info
current_face.append(int(vertex_index)-1)
current_face_tex.append(int(tex_index)-1)
current_face_norm.append(int(normal_index)-1)
faces.append(current_face)
faces_tex.append(current_face_tex)
faces_normals.append(current_face_norm)
face_groups.append(group_name)
material_ids.append(material_name)
vertices = np.array(vertices)
texture_coords = np.array(texture_coords)
vertex_normals = np.array(vertex_normals)
has_tex_coord, has_normals = True, True
if texture_coords.shape[0] == 0:
has_tex_coord = False
if vertex_normals.shape[0] == 0:
has_normals = False
faces = np.array(faces)
faces_tex = np.array(faces_tex)
faces_normals = np.array(faces_normals)
n_faces = faces.shape[0]
vertex_positions = np.zeros((n_faces, 3, 3), dtype=np.float32)
tex_coords = np.zeros((n_faces, 3, 2), dtype=np.float32)
normals = np.zeros((n_faces, 3, 3), dtype=np.float32)
for i in range(n_faces):
for j in range(3):
vertex_positions[i, j, :] = vertices[faces[i, j], :]
if has_tex_coord:
tex_coords[i, j, :] = texture_coords[faces_tex[i, j], :2]
if has_normals:
normals[i, j, :] = vertex_normals[faces_normals[i, j], :]
# Material info --------------------------------------------------------------
return vertex_positions, \
tex_coords, \
normals, \
material_ids, \
vertices, \
faces
def plot_labeled_2d_boxes(sample, batch_id=0):
"""Plot."""
image = tf.io.decode_image(sample['image_data'][batch_id]).numpy() / 255.
image = sample['image'][batch_id].numpy()[..., ::-1]
image2 = np.reshape(image, [-1, 3])
image2 -= np.min(image2, axis=0)
image2 /= np.max(image2, axis=0)
image = np.reshape(image2, [256, 256, 3])
sample['detection_boxes'] = sample['groundtruth_boxes'][batch_id].numpy()
figure = plt.figure(figsize=(5, 5))
plt.clf()
plt.imshow(image)
for i in range(sample['groundtruth_boxes'][batch_id].shape[0]):
y_min, x_min, y_max, x_max = sample['detection_boxes'][i] * 256.0
plt.plot([x_min, x_max, x_max, x_min, x_min],
[y_min, y_min, y_max, y_max, y_min],
linestyle='dashed')
return figure
|
couchbase/tests_v3/cases/analyticsmgmt_t.py | couchbase/couchbase-python-client | 189 | 12644233 | # -*- coding:utf-8 -*-
#
# Copyright 2020, Couchbase, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import os
from couchbase_core import ulp
from couchbase_tests.base import CollectionTestCase, SkipTest
from couchbase.management.analytics import (CreateDataverseOptions, DropDataverseOptions, CreateDatasetOptions,
CreateAnalyticsIndexOptions, DropAnalyticsIndexOptions,
DropDatasetOptions, ConnectLinkOptions, DisconnectLinkOptions,
GetLinksAnalyticsOptions)
from couchbase.exceptions import (AnalyticsLinkExistsException, DataverseAlreadyExistsException,
DataverseNotFoundException, DatasetAlreadyExistsException, DatasetNotFoundException,
InvalidArgumentException, NotSupportedException, CompilationFailedException,
ParsingFailedException, AnalyticsLinkNotFoundException)
from couchbase.analytics import (AnalyticsDataType, AnalyticsEncryptionLevel,
AnalyticsLink, AnalyticsLinkType, AzureBlobExternalAnalyticsLink,
CouchbaseAnalyticsEncryptionSettings, CouchbaseRemoteAnalyticsLink, S3ExternalAnalyticsLink)
class AnalyticsIndexManagerTests(CollectionTestCase):
def setUp(self):
super(AnalyticsIndexManagerTests, self).setUp()
self._enable_print_statements = False
if self.is_mock:
raise SkipTest("mock doesn't mock management apis")
if int(self.get_cluster_version().split('.')[0]) < 6:
raise SkipTest("no analytics in {}".format(
self.get_cluster_version()))
self.mgr = self.cluster.analytics_indexes()
self.dataverse_name = "test/dataverse" if int(
self.get_cluster_version().split('.')[0]) == 7 else "test_dataverse"
self.dataset_name = "test_breweries"
# be sure the dataverse exists
self.mgr.create_dataverse(
self.dataverse_name, CreateDataverseOptions(ignore_if_exists=True))
# now ensure our dataset in there
self.mgr.create_dataset(self.dataset_name,
"beer-sample",
CreateDatasetOptions(dataverse_name=self.dataverse_name,
condition='`type` = "brewery"',
ignore_if_exists=True)
)
try:
self.mgr.disconnect_link(DisconnectLinkOptions(
dataverse_name=self.dataverse_name))
except BaseException:
pass
def tearDown(self):
super(AnalyticsIndexManagerTests, self).tearDown()
# be sure the dataverse doesn't exist
try:
dataverse_name = self.mgr._scrub_dataverse_name(
self.dataverse_name)
self.cluster.analytics_query(
"USE {}; DISCONNECT LINK Local;".format(dataverse_name)).metadata()
except DataverseNotFoundException:
pass
try:
self.mgr.disconnect_link(DisconnectLinkOptions(
dataverse_name=self.dataverse_name))
except BaseException:
pass
self.try_n_times(10, 3,
self.mgr.drop_dataverse, self.dataverse_name,
DropDatasetOptions(ignore_if_not_exists=True))
def assertRows(self, query, iterations=10, pause_time=3):
for _ in range(iterations):
resp = self.cluster.analytics_query(query)
for r in resp.rows():
return
time.sleep(pause_time)
self.fail("query '{}' yielded no rows after {} attempts pausing {} sec between attempts"
.format(query, iterations, pause_time))
def test_create_dataverse(self):
# lets query for the existence of test-dataverse
statement = 'SELECT * FROM Metadata.`Dataverse` WHERE DataverseName="{}";'.format(
self.dataverse_name)
result = self.cluster.analytics_query(statement)
self.assertEqual(1, len(result.rows()))
def test_create_dataverse_ignore_exists(self):
self.assertRaises(DataverseAlreadyExistsException,
self.mgr.create_dataverse, self.dataverse_name)
self.mgr.create_dataverse(
self.dataverse_name, CreateDataverseOptions(ignore_if_exists=True))
def test_drop_dataverse(self):
self.mgr.drop_dataverse(self.dataverse_name)
self.mgr.connect_link()
statement = 'SELECT * FROM Metadata.`Dataverse` WHERE DataverseName="{}";'.format(
self.dataverse_name)
result = self.cluster.analytics_query(statement)
self.assertEqual(0, len(result.rows()))
def test_drop_dataverse_ignore_not_exists(self):
self.mgr.drop_dataverse(self.dataverse_name)
self.assertRaises(DataverseNotFoundException,
self.mgr.drop_dataverse, self.dataverse_name)
self.mgr.drop_dataverse(
self.dataverse_name, DropDataverseOptions(ignore_if_not_exists=True))
def test_create_dataset(self):
# we put a dataset in during the setUp, so...
datasets = self.mgr.get_all_datasets()
for dataset in datasets:
if self._enable_print_statements:
print(dataset)
if dataset.dataset_name == self.dataset_name:
return
self.fail("didn't find {} in listing of all datasets".format(
self.dataset_name))
def test_create_dataset_ignore_exists(self):
self.assertRaises(DatasetAlreadyExistsException, self.mgr.create_dataset, self.dataset_name, 'beer-sample',
CreateDatasetOptions(dataverse_name=self.dataverse_name))
self.mgr.create_dataset(self.dataset_name, 'beer-sample',
CreateDatasetOptions(dataverse_name=self.dataverse_name), ignore_if_exists=True)
def test_drop_dataset(self):
self.mgr.drop_dataset(self.dataset_name, DropDatasetOptions(
dataverse_name=self.dataverse_name))
self.assertRaises(DatasetNotFoundException, self.mgr.drop_dataset, self.dataset_name,
DropDatasetOptions(dataverse_name=self.dataverse_name))
self.mgr.drop_dataset(self.dataset_name, DropDatasetOptions(dataverse_name=self.dataverse_name,
ignore_if_not_exists=True))
def test_create_index(self):
self.mgr.create_index("test_brewery_idx", self.dataset_name,
{'name': AnalyticsDataType.STRING,
'description': AnalyticsDataType.STRING},
CreateAnalyticsIndexOptions(dataverse_name=self.dataverse_name))
def check_for_idx(idx):
indexes = self.mgr.get_all_indexes()
for index in indexes:
if self._enable_print_statements:
print(index)
if index.name == idx:
return
raise Exception(
"unable to find 'test_brewery_idx' in list of all indexes")
self.try_n_times(10, 3, check_for_idx, 'test_brewery_idx')
def test_drop_index(self):
# create one first, if not already there
self.mgr.create_index("test_brewery_idx", self.dataset_name,
{'name': AnalyticsDataType.STRING,
'description': AnalyticsDataType.STRING},
CreateAnalyticsIndexOptions(dataverse_name=self.dataverse_name))
def check_for_idx(idx):
indexes = self.mgr.get_all_indexes()
for index in indexes:
if self._enable_print_statements:
print(index)
if index.name == idx:
return
raise Exception(
"unable to find 'test_brewery_idx' in list of all indexes")
self.try_n_times(10, 3, check_for_idx, 'test_brewery_idx')
self.mgr.drop_index("test_brewery_idx", self.dataset_name,
DropAnalyticsIndexOptions(dataverse_name=self.dataverse_name))
self.try_n_times_till_exception(
10, 3, check_for_idx, 'test_brewery_idx')
def test_connect_link(self):
self.mgr.connect_link(ConnectLinkOptions(
dataverse_name=self.dataverse_name))
# connect link should result in documents in the dataset, so...
dataverse_name = self.mgr._scrub_dataverse_name(self.dataverse_name)
self.assertRows(
'USE {}; SELECT * FROM `{}` LIMIT 1'.format(dataverse_name, self.dataset_name))
# manually stop it for now
self.cluster.analytics_query(
'USE {}; DISCONNECT LINK Local'.format(dataverse_name, self.dataset_name)).metadata()
def test_get_pending_mutations(self):
try:
result = self.mgr.get_pending_mutations()
if self._enable_print_statements:
# we expect no test_dataverse key yet
print(result)
self.assertFalse("test_dataverse" in result.keys())
self.mgr.connect_link(ConnectLinkOptions(
dataverse_name=self.dataverse_name))
time.sleep(5)
result = self.mgr.get_pending_mutations()
if self._enable_print_statements:
print(result)
dataverse_name = self.mgr._scrub_dataverse_name(
self.dataverse_name).replace("`", "")
self.assertTrue(dataverse_name in result.keys())
except NotSupportedException:
raise SkipTest(
"get pending mutations not supported on this cluster")
def test_v6_dataverse_name_parsing(self):
if int(self.cluster_version.split('.')[0]) != 6:
raise SkipTest("Test only for 6.x versions")
# wish the analytics service was consistent here :/
if float(self.cluster_version[:3]) >= 6.6:
with self.assertRaises(CompilationFailedException):
self.mgr.create_dataverse(
"test.beer_sample", CreateDataverseOptions(ignore_if_exists=True))
else:
self.mgr.create_dataverse(
"test.beer_sample", CreateDataverseOptions(ignore_if_exists=True))
# wish the analytics service was consistent here also :/
with self.assertRaises(ParsingFailedException):
# test/beer_sample => `test`.`beer_sample` which is not valid prior
# to 7.0
self.mgr.create_dataverse(
"test/beer_sample", CreateDataverseOptions(ignore_if_exists=True))
def test_v7_dataverse_name_parsing(self):
if int(self.cluster_version.split('.')[0]) != 7:
raise SkipTest("Test only for 7.x versions")
# test.beer_sample => `test.beer_sample` which is valid >= 7.0
self.mgr.create_dataverse(
"test.beer_sample", CreateDataverseOptions(ignore_if_exists=True))
statement = 'SELECT * FROM Metadata.`Dataverse` WHERE DataverseName="test.beer_sample";'.format(
self.dataverse_name)
result = self.cluster.analytics_query(statement)
self.assertEqual(1, len(result.rows()))
self.mgr.drop_dataverse("test.beer_sample")
# test/beer_sample => `test`.`beer_sample` which is valid >= 7.0
self.mgr.create_dataverse(
"test/beer_sample", CreateDataverseOptions(ignore_if_exists=True))
statement = 'SELECT * FROM Metadata.`Dataverse` WHERE DataverseName="test/beer_sample";'.format(
self.dataverse_name)
result = self.cluster.analytics_query(statement)
self.assertEqual(1, len(result.rows()))
self.mgr.drop_dataverse("test/beer_sample")
class AnalyticsIndexManagerLinkTests(CollectionTestCase):
def setUp(self):
super(AnalyticsIndexManagerLinkTests, self).setUp()
if self.is_mock:
raise SkipTest("mock doesn't mock management apis")
if int(self.cluster_version.split('.')[0]) < 6:
raise SkipTest("no analytics in {}".format(
self.cluster_version))
if int(self.cluster_version.split('.')[0]) < 7:
raise SkipTest("No analytics link management API in {}".format(
self.cluster_version))
self.mgr = self.cluster.analytics_indexes()
def test_couchbase_remote_link_encode(self):
link = CouchbaseRemoteAnalyticsLink("test_dataverse",
"cbremote",
"localhost",
CouchbaseAnalyticsEncryptionSettings(
AnalyticsEncryptionLevel.NONE),
username="Administrator",
password="password")
encoded = link.form_encode().decode()
query_str = ulp.parse_qs(encoded)
self.assertEqual("localhost", query_str.get("hostname")[0])
self.assertEqual(AnalyticsLinkType.CouchbaseRemote.value,
query_str.get("type")[0])
self.assertEqual(AnalyticsEncryptionLevel.NONE.value,
query_str.get("encryption")[0])
self.assertEqual("Administrator", query_str.get("username")[0])
self.assertEqual("password", query_str.get("password")[0])
link = CouchbaseRemoteAnalyticsLink("test_dataverse",
"cbremote",
"localhost",
CouchbaseAnalyticsEncryptionSettings(
AnalyticsEncryptionLevel.FULL,
certificate=bytes(
'certificate', 'utf-8'),
client_certificate=bytes(
'clientcertificate', 'utf-8'),
client_key=bytes('clientkey', 'utf-8')),
)
encoded = link.form_encode().decode()
query_str = ulp.parse_qs(encoded)
self.assertEqual("localhost", query_str.get("hostname")[0])
self.assertEqual(AnalyticsLinkType.CouchbaseRemote.value,
query_str.get("type")[0])
self.assertEqual(AnalyticsEncryptionLevel.FULL.value,
query_str.get("encryption")[0])
self.assertEqual("certificate", query_str.get("certificate")[0])
self.assertEqual("clientcertificate",
query_str.get("clientCertificate")[0])
self.assertEqual("clientkey", query_str.get("clientKey")[0])
def test_s3_external_link(self):
dataverse_name = "test_dataverse"
self.mgr.create_dataverse(
dataverse_name, CreateDataverseOptions(ignore_if_exists=True))
link = S3ExternalAnalyticsLink(dataverse_name,
"s3link",
"accesskey",
"us-west-2",
secret_access_key="mysupersecretkey",
)
link1 = S3ExternalAnalyticsLink(dataverse_name,
"s3link1",
"accesskey1",
"us-east-2",
secret_access_key="mysupersecretkey1",
)
self.mgr.create_link(link)
self.mgr.create_link(link1)
links = self.mgr.get_links()
self.assertEqual(2, len(links))
for l in links:
link_match = (l.dataverse_name() == link.dataverse_name()
and l.name() == link.name()
and l.link_type() == AnalyticsLinkType.S3External
and l._region == link._region
and l._access_key_id == link._access_key_id)
link1_match = (l.dataverse_name() == link1.dataverse_name()
and l.name() == link1.name()
and l.link_type() == AnalyticsLinkType.S3External
and l._region == link1._region
and l._access_key_id == link1._access_key_id)
self.assertTrue(link_match or link1_match)
links = self.mgr.get_links(GetLinksAnalyticsOptions(
dataverse_name=dataverse_name, name=link.name()))
self.assertEqual(1, len(links))
self.assertTrue(links[0].dataverse_name() == link.dataverse_name()
and links[0].name() == link.name()
and links[0].link_type() == AnalyticsLinkType.S3External
and links[0]._region == link._region
and links[0]._access_key_id == link._access_key_id)
new_link = S3ExternalAnalyticsLink(dataverse_name,
"s3link",
"accesskey",
"eu-west-2",
secret_access_key="mysupersecretkey1",
)
self.mgr.replace_link(new_link)
links = self.mgr.get_links()
self.assertEqual(2, len(links))
links = self.mgr.get_links(GetLinksAnalyticsOptions(
dataverse_name=dataverse_name, name=new_link.name()))
self.assertEqual(1, len(links))
self.assertTrue(links[0].dataverse_name() == new_link.dataverse_name()
and links[0].name() == new_link.name()
and links[0].link_type() == AnalyticsLinkType.S3External
and links[0]._region == new_link._region
and links[0]._access_key_id == new_link._access_key_id)
self.mgr.drop_link("s3link", dataverse_name)
self.mgr.drop_link("s3link1", dataverse_name)
links = self.mgr.get_links()
self.assertEqual(0, len(links))
self.mgr.drop_dataverse(
dataverse_name, DropDataverseOptions(ignore_if_not_exists=True))
def test_s3_external_link_compound_dataverse(self):
dataverse_name = "test/dataverse"
self.mgr.create_dataverse(
dataverse_name, CreateDataverseOptions(ignore_if_exists=True))
link = S3ExternalAnalyticsLink(dataverse_name,
"s3link",
"accesskey",
"us-west-2",
secret_access_key="mysupersecretkey",
)
link1 = S3ExternalAnalyticsLink(dataverse_name,
"s3link1",
"accesskey1",
"us-east-2",
secret_access_key="mysupersecretkey1",
)
self.mgr.create_link(link)
self.mgr.create_link(link1)
links = self.mgr.get_links()
self.assertEqual(2, len(links))
for l in links:
link_match = (l.dataverse_name() == link.dataverse_name()
and l.name() == link.name()
and l.link_type() == AnalyticsLinkType.S3External
and l._region == link._region
and l._access_key_id == link._access_key_id)
link1_match = (l.dataverse_name() == link1.dataverse_name()
and l.name() == link1.name()
and l.link_type() == AnalyticsLinkType.S3External
and l._region == link1._region
and l._access_key_id == link1._access_key_id)
self.assertTrue(link_match or link1_match)
links = self.mgr.get_links(GetLinksAnalyticsOptions(
dataverse_name=dataverse_name, name=link.name()))
self.assertEqual(1, len(links))
self.assertTrue(links[0].dataverse_name() == link.dataverse_name()
and links[0].name() == link.name()
and links[0].link_type() == AnalyticsLinkType.S3External
and links[0]._region == link._region
and links[0]._access_key_id == link._access_key_id)
new_link = S3ExternalAnalyticsLink(dataverse_name,
"s3link",
"accesskey",
"eu-west-2",
secret_access_key="mysupersecretkey1",
)
self.mgr.replace_link(new_link)
links = self.mgr.get_links()
self.assertEqual(2, len(links))
links = self.mgr.get_links(GetLinksAnalyticsOptions(
dataverse_name=dataverse_name, name=new_link.name()))
self.assertEqual(1, len(links))
self.assertTrue(links[0].dataverse_name() == new_link.dataverse_name()
and links[0].name() == new_link.name()
and links[0].link_type() == AnalyticsLinkType.S3External
and links[0]._region == new_link._region
and links[0]._access_key_id == new_link._access_key_id)
self.mgr.drop_link("s3link", dataverse_name)
self.mgr.drop_link("s3link1", dataverse_name)
links = self.mgr.get_links()
self.assertEqual(0, len(links))
self.mgr.drop_dataverse(
dataverse_name, DropDataverseOptions(ignore_if_not_exists=True))
def test_create_link_fail_link_exists(self):
dataverse_name = "test_dataverse"
self.mgr.create_dataverse(
dataverse_name, CreateDataverseOptions(ignore_if_exists=True))
link = S3ExternalAnalyticsLink(dataverse_name,
"s3link",
"accesskey",
"us-west-2",
secret_access_key="mysupersecretkey",
)
self.mgr.create_link(link)
with self.assertRaises(AnalyticsLinkExistsException):
self.mgr.create_link(link)
self.mgr.drop_link("s3link", dataverse_name)
self.mgr.drop_dataverse(
dataverse_name, DropDataverseOptions(ignore_if_not_exists=True))
def test_link_fail_dataverse_not_found(self):
dataverse_name = "test_dataverse"
self.mgr.create_dataverse(
dataverse_name, CreateDataverseOptions(ignore_if_exists=True))
link = S3ExternalAnalyticsLink("notadataverse",
"s3link",
"accesskey",
"us-west-2",
secret_access_key="mysupersecretkey",
)
with self.assertRaises(DataverseNotFoundException):
self.mgr.create_link(link)
with self.assertRaises(DataverseNotFoundException):
self.mgr.replace_link(link)
with self.assertRaises(DataverseNotFoundException):
self.mgr.drop_link(link.name(), link.dataverse_name())
link = CouchbaseRemoteAnalyticsLink("notadataverse",
"cbremote",
"localhost",
CouchbaseAnalyticsEncryptionSettings(
AnalyticsEncryptionLevel.NONE),
username="Administrator",
password="password")
with self.assertRaises(DataverseNotFoundException):
self.mgr.create_link(link)
with self.assertRaises(DataverseNotFoundException):
self.mgr.replace_link(link)
with self.assertRaises(DataverseNotFoundException):
self.mgr.drop_link(link.name(), link.dataverse_name())
link = AzureBlobExternalAnalyticsLink("notadataverse",
"azurebloblink",
account_name="myaccount",
account_key="myaccountkey")
with self.assertRaises(DataverseNotFoundException):
self.mgr.create_link(link)
with self.assertRaises(DataverseNotFoundException):
self.mgr.replace_link(link)
with self.assertRaises(DataverseNotFoundException):
self.mgr.drop_link(link.name(), link.dataverse_name())
self.mgr.drop_dataverse(
dataverse_name, DropDataverseOptions(ignore_if_not_exists=True))
def test_create_couchbase_link_fail_invalid_argument(self):
dataverse_name = "test_dataverse"
self.mgr.create_dataverse(
dataverse_name, CreateDataverseOptions(ignore_if_exists=True))
link = CouchbaseRemoteAnalyticsLink("",
"cbremote",
"localhost",
CouchbaseAnalyticsEncryptionSettings(
AnalyticsEncryptionLevel.NONE),
username="Administrator",
password="password")
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = CouchbaseRemoteAnalyticsLink(dataverse_name,
"",
"localhost",
CouchbaseAnalyticsEncryptionSettings(
AnalyticsEncryptionLevel.NONE),
username="Administrator",
password="password")
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = CouchbaseRemoteAnalyticsLink(dataverse_name,
"cbremote",
"",
CouchbaseAnalyticsEncryptionSettings(
AnalyticsEncryptionLevel.NONE),
username="Administrator",
password="password")
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = CouchbaseRemoteAnalyticsLink(dataverse_name,
"cbremote",
"localhost",
CouchbaseAnalyticsEncryptionSettings(
AnalyticsEncryptionLevel.NONE),
password="password")
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = CouchbaseRemoteAnalyticsLink(dataverse_name,
"cbremote",
"localhost",
CouchbaseAnalyticsEncryptionSettings(
AnalyticsEncryptionLevel.NONE),
username="Administrator")
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = CouchbaseRemoteAnalyticsLink(dataverse_name,
"cbremote",
"localhost",
CouchbaseAnalyticsEncryptionSettings(
AnalyticsEncryptionLevel.HALF),
password="password")
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = CouchbaseRemoteAnalyticsLink(dataverse_name,
"cbremote",
"localhost",
CouchbaseAnalyticsEncryptionSettings(
AnalyticsEncryptionLevel.HALF),
username="Administrator")
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = CouchbaseRemoteAnalyticsLink(dataverse_name,
"cbremote",
"localhost",
CouchbaseAnalyticsEncryptionSettings(
AnalyticsEncryptionLevel.FULL)
)
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = CouchbaseRemoteAnalyticsLink(dataverse_name,
"cbremote",
"localhost",
CouchbaseAnalyticsEncryptionSettings(
AnalyticsEncryptionLevel.FULL,
certificate=bytes('certificate', 'utf-8'))
)
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = CouchbaseRemoteAnalyticsLink(dataverse_name,
"cbremote",
"localhost",
CouchbaseAnalyticsEncryptionSettings(
AnalyticsEncryptionLevel.FULL,
certificate=bytes(
'certificate', 'utf-8'),
client_certificate=bytes('clientcert', 'utf-8'))
)
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = CouchbaseRemoteAnalyticsLink(dataverse_name,
"cbremote",
"localhost",
CouchbaseAnalyticsEncryptionSettings(
AnalyticsEncryptionLevel.FULL,
certificate=bytes(
'certificate', 'utf-8'),
client_key=bytes('clientkey', 'utf-8'))
)
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
self.mgr.drop_dataverse(
dataverse_name, DropDataverseOptions(ignore_if_not_exists=True))
def test_create_s3_link_fail_invalid_argument(self):
dataverse_name = "test_dataverse"
self.mgr.create_dataverse(
dataverse_name, CreateDataverseOptions(ignore_if_exists=True))
link = S3ExternalAnalyticsLink("",
"s3link",
"accesskey",
"us-west-2",
secret_access_key="mysupersecretkey",
)
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = S3ExternalAnalyticsLink(dataverse_name,
"",
"accesskey",
"us-west-2",
secret_access_key="mysupersecretkey",
)
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = S3ExternalAnalyticsLink(dataverse_name,
"s3link",
"",
"us-west-2",
secret_access_key="mysupersecretkey",
)
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = S3ExternalAnalyticsLink(dataverse_name,
"s3link",
"accesskey",
"",
secret_access_key="mysupersecretkey",
)
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = S3ExternalAnalyticsLink("",
"s3link",
"accesskey",
"us-west-2",
)
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
self.mgr.drop_dataverse(
dataverse_name, DropDataverseOptions(ignore_if_not_exists=True))
def test_create_azureblob_link_fail_invalid_argument(self):
dataverse_name = "test_dataverse"
self.mgr.create_dataverse(
dataverse_name, CreateDataverseOptions(ignore_if_exists=True))
link = AzureBlobExternalAnalyticsLink("",
"azurebloblink",
account_name="myaccount",
account_key="myaccountkey")
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = AzureBlobExternalAnalyticsLink(dataverse_name,
"",
account_name="myaccount",
account_key="myaccountkey")
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = AzureBlobExternalAnalyticsLink(dataverse_name,
"azurebloblink")
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = AzureBlobExternalAnalyticsLink(dataverse_name,
"azurebloblink",
account_name="myaccount")
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = AzureBlobExternalAnalyticsLink(dataverse_name,
"azurebloblink",
account_key="myaccountkey")
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
link = AzureBlobExternalAnalyticsLink(dataverse_name,
"azurebloblink",
shared_access_signature="sharedaccesssignature")
with self.assertRaises(InvalidArgumentException):
self.mgr.create_link(link)
self.mgr.drop_dataverse(
dataverse_name, DropDataverseOptions(ignore_if_not_exists=True))
def test_link_fail_link_not_found(self):
dataverse_name = "test_dataverse"
self.mgr.create_dataverse(
dataverse_name, CreateDataverseOptions(ignore_if_exists=True))
link = S3ExternalAnalyticsLink(dataverse_name,
"notalink",
"accesskey",
"us-west-2",
secret_access_key="mysupersecretkey",
)
with self.assertRaises(AnalyticsLinkNotFoundException):
self.mgr.replace_link(link)
with self.assertRaises(AnalyticsLinkNotFoundException):
self.mgr.drop_link(link.name(), link.dataverse_name())
link = CouchbaseRemoteAnalyticsLink(dataverse_name,
"cbremote",
"localhost",
CouchbaseAnalyticsEncryptionSettings(
AnalyticsEncryptionLevel.NONE),
username="Administrator",
password="password")
with self.assertRaises(AnalyticsLinkNotFoundException):
self.mgr.replace_link(link)
with self.assertRaises(AnalyticsLinkNotFoundException):
self.mgr.drop_link(link.name(), link.dataverse_name())
link = AzureBlobExternalAnalyticsLink(dataverse_name,
"azurebloblink",
account_name="myaccount",
account_key="myaccountkey")
with self.assertRaises(AnalyticsLinkNotFoundException):
self.mgr.replace_link(link)
with self.assertRaises(AnalyticsLinkNotFoundException):
self.mgr.drop_link(link.name(), link.dataverse_name())
self.mgr.drop_dataverse(
dataverse_name, DropDataverseOptions(ignore_if_not_exists=True))
def test_get_links_fail(self):
with self.assertRaises(DataverseNotFoundException):
self.mgr.get_links(GetLinksAnalyticsOptions(
dataverse_name="notadataverse"))
with self.assertRaises(InvalidArgumentException):
self.mgr.get_links(GetLinksAnalyticsOptions(name="mylink"))
|
tests/test/coverage/test_ternery_branches.py | x3devships/brownie | 1,595 | 12644234 | #!/usr/bin/python3
def test_ternery1(evmtester, branch_results):
evmtester.terneryBranches(1, True, False, False, False)
results = branch_results()
assert [2582, 2583] in results[True]
assert [2610, 2611] in results[False]
evmtester.terneryBranches(1, False, False, False, False)
results = branch_results()
assert [2582, 2583] in results[False]
assert [2610, 2611] in results[True]
def test_ternery2(evmtester, branch_results):
evmtester.terneryBranches(2, False, False, False, False)
results = branch_results()
for i in [2670, 2704, 2709]:
assert [i, i + 1] in results[False]
evmtester.terneryBranches(2, True, False, False, False)
results = branch_results()
assert [2675, 2676] in results[False]
for i in [2670, 2704]:
assert [i, i + 1] in results[True]
evmtester.terneryBranches(2, False, True, False, False)
results = branch_results()
assert [2709, 2710] in results[True]
for i in [2670, 2704]:
assert [i, i + 1] in results[False]
evmtester.terneryBranches(2, True, True, False, False)
results = branch_results()
for i in [2670, 2675, 2704]:
assert [i, i + 1] in results[True]
def test_ternery3(evmtester, branch_results):
evmtester.terneryBranches(3, False, False, False, False)
results = branch_results()
for i in [2771, 2777, 2807]:
assert [i, i + 1] in results[True]
evmtester.terneryBranches(3, True, False, False, False)
results = branch_results()
assert [2813, 2814] in results[True]
for i in [2771, 2807]:
assert [i, i + 1] in results[False]
evmtester.terneryBranches(3, False, True, False, False)
results = branch_results()
assert [2777, 2778] in results[False]
for i in [2771, 2807]:
assert [i, i + 1] in results[True]
evmtester.terneryBranches(3, True, True, False, False)
results = branch_results()
for i in [2771, 2807, 2813]:
assert [i, i + 1] in results[False]
def test_ternery4(evmtester, branch_results):
evmtester.terneryBranches(4, False, False, False, False)
results = branch_results()
for i in [2874, 2913, 2918, 2923]:
assert [i, i + 1] in results[False]
evmtester.terneryBranches(4, True, False, False, False)
results = branch_results()
assert [2879, 2880] in results[False]
for i in [2874, 2913]:
assert [i, i + 1] in results[True]
evmtester.terneryBranches(4, False, True, False, False)
results = branch_results()
assert [2918, 2919] in results[True]
for i in [2874, 2913]:
assert [i, i + 1] in results[False]
evmtester.terneryBranches(4, False, False, True, False)
results = branch_results()
assert [2923, 2924] in results[True]
for i in [2874, 2913, 2918]:
assert [i, i + 1] in results[False]
evmtester.terneryBranches(4, True, True, True, False)
results = branch_results()
for i in [2874, 2879, 2884, 2913]:
assert [i, i + 1] in results[True]
def test_ternery5(evmtester, branch_results):
evmtester.terneryBranches(5, True, True, True, True)
results = branch_results()
for i in [2985, 3027, 3033, 3039]:
assert [i, i + 1] in results[False]
evmtester.terneryBranches(5, False, True, True, True)
results = branch_results()
assert [2991, 2992] in results[False]
for i in [2985, 3027]:
assert [i, i + 1] in results[True]
evmtester.terneryBranches(5, True, False, True, True)
results = branch_results()
assert [3033, 3034] in results[True]
for i in [2985, 3027]:
assert [i, i + 1] in results[False]
evmtester.terneryBranches(5, True, True, False, True)
results = branch_results()
assert [3039, 3040] in results[True]
for i in [2985, 3027, 3033]:
assert [i, i + 1] in results[False]
evmtester.terneryBranches(5, False, False, False, False)
results = branch_results()
for i in [2985, 2991, 2997, 3027]:
assert [i, i + 1] in results[True]
|
tests/utils/test_capture_stdout.py | ai-fast-track/mantisshrimp | 580 | 12644235 | <reponame>ai-fast-track/mantisshrimp<gh_stars>100-1000
from icevision.all import *
def test_capture_stdout_simple():
with CaptureStdout() as out:
print("mantis")
print("shrimp")
assert out == ["mantis", "shrimp"]
def test_capture_stdout_propagate():
with CaptureStdout() as out1:
print("mantis")
with CaptureStdout(propagate_stdout=True) as out2:
print("shrimp")
assert out1 == ["mantis", "shrimp"]
def test_capture_stdout_block_propagate():
with CaptureStdout() as out1:
print("mantis")
with CaptureStdout() as out2:
print("shrimp")
assert out1 == ["mantis"]
assert out2 == ["shrimp"]
|
pylayers/mobility/ban/test/test_body.py | usmanwardag/pylayers | 143 | 12644247 | from pylayers.mobility.ban.body import *
from pylayers.mobility.trajectory import *
t = Trajectory()
bc = Body()
|
src/ethereum/tangerine_whistle/utils/__init__.py | petertdavies/execution-specs | 102 | 12644275 | <reponame>petertdavies/execution-specs
"""
Tangerine Whistle Utility Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. contents:: Table of Contents
:backlinks: none
:local:
Introduction
------------
Utility functions used in this tangerine whistle version of specification.
"""
|
tools/rust_analyzer/deps.bzl | yesudeep/rules_rust | 349 | 12644283 | <filename>tools/rust_analyzer/deps.bzl<gh_stars>100-1000
"""
The dependencies for running the gen_rust_project binary.
"""
load("//tools/rust_analyzer/raze:crates.bzl", "rules_rust_tools_rust_analyzer_fetch_remote_crates")
def rust_analyzer_deps():
rules_rust_tools_rust_analyzer_fetch_remote_crates()
# For legacy support
gen_rust_project_dependencies = rust_analyzer_deps
|
tools/vis_utils.py | PeterouZh/improved-nerfmm | 153 | 12644284 | from models.volume_rendering import volume_render
import torch
import numpy as np
from tqdm import tqdm
def get_rays_opencv_np(intrinsics: np.ndarray, c2w: np.ndarray, H: int, W: int):
'''
ray batch sampling
< opencv / colmap convention, standard pinhole camera >
the camera is facing [+z] direction, x right, y downwards
z
↗
/
/
o------> x
|
|
|
↓
y
:param H: image height
:param W: image width
:param intrinsics: [3, 3] or [4,4] intrinsic matrix
:param c2w: [...,4,4] or [...,3,4] camera to world extrinsic matrix
:return:
'''
prefix = c2w.shape[:-2] # [...]
# [H, W]
u, v = np.meshgrid(np.arange(W), np.arange(H))
# [H*W]
u = u.reshape(-1).astype(dtype=np.float32) + 0.5 # add half pixel
v = v.reshape(-1).astype(dtype=np.float32) + 0.5
# [3, H*W]
pixels = np.stack((u, v, np.ones_like(u)), axis=0)
# [3, H*W]
rays_d = np.matmul(np.linalg.inv(intrinsics[:3, :3]), pixels)
# [..., 3, H*W] = [..., 3, 3] @ [1,1,..., 3, H*W], with broadcasting
rays_d = np.matmul(c2w[..., :3, :3], rays_d.reshape([*len(prefix)*[1], 3, H*W]))
# [..., H*W, 3]
rays_d = np.moveaxis(rays_d, -1, -2)
# [..., 1, 3] -> [..., H*W, 3]
rays_o = np.tile(c2w[..., None, :3, 3], [*len(prefix)*[1], H*W, 1])
return rays_o, rays_d
def render_full(intr: np.ndarray, c2w: np.ndarray, H, W, near, far, render_kwargs, scene_model, device="cuda", batch_size=1, imgscale=True):
rgbs = []
depths = []
scene_model.to(device)
if len(c2w.shape) == 2:
c2w = c2w[None, ...]
render_kwargs['batched'] = True
def to_img(tensor):
tensor = tensor.reshape(tensor.shape[0], H, W, -1).data.cpu().numpy()
if imgscale:
return (255*np.clip(tensor, 0, 1)).astype(np.uint8)
else:
return tensor
def render_chunk(c2w):
rays_o, rays_d = get_rays_opencv_np(intr, c2w, H, W)
rays_o = torch.from_numpy(rays_o).float().to(device)
rays_d = torch.from_numpy(rays_d).float().to(device)
with torch.no_grad():
rgb, depth, _ = volume_render(
rays_o=rays_o,
rays_d=rays_d,
detailed_output=False, # to return acc map and disp map
show_progress=True,
**render_kwargs)
if imgscale:
depth = (depth-near)/(far-near)
return to_img(rgb), to_img(depth)
for i in tqdm(range(0, c2w.shape[0], batch_size), desc="=> Rendering..."):
rgb_i, depth_i = render_chunk(c2w[i:i+batch_size])
rgbs += [*rgb_i]
depths += [*depth_i]
return rgbs, depths |
homeassistant/components/uvc/__init__.py | domwillcode/home-assistant | 30,023 | 12644287 | <reponame>domwillcode/home-assistant
"""The uvc component."""
|
Chapter02/indices.py | shoshan/Clean-Code-in-Python | 402 | 12644295 | <gh_stars>100-1000
"""Indexes and slices
Getting elements by an index or range
"""
import doctest
def index_last():
"""
>>> my_numbers = (4, 5, 3, 9)
>>> my_numbers[-1]
9
>>> my_numbers[-3]
5
"""
def get_slices():
"""
>>> my_numbers = (1, 1, 2, 3, 5, 8, 13, 21)
>>> my_numbers[2:5]
(2, 3, 5)
>>> my_numbers[:3]
(1, 1, 2)
>>> my_numbers[3:]
(3, 5, 8, 13, 21)
>>> my_numbers[::]
(1, 1, 2, 3, 5, 8, 13, 21)
>>> my_numbers[1:7:2]
(1, 3, 8)
>>> interval = slice(1, 7, 2)
>>> my_numbers[interval]
(1, 3, 8)
>>> interval = slice(None, 3)
>>> my_numbers[interval] == my_numbers[:3]
True
"""
def main():
index_last()
get_slices()
fail_count, _ = doctest.testmod(verbose=True)
raise SystemExit(fail_count)
if __name__ == "__main__":
main()
|
Lib/test/test_compiler/testcorpus/40_import.py | diogommartins/cinder | 1,886 | 12644316 | <filename>Lib/test/test_compiler/testcorpus/40_import.py<gh_stars>1000+
import foo
import foo2, bar
import foo3 as baz
import foo.bar.baz
|
isserviceup/helpers/exceptions.py | EvgeshaGars/is-service-up | 182 | 12644355 | from flask import jsonify
from werkzeug.exceptions import HTTPException
class ApiException(Exception):
def __init__(self, message, status_code=400, **kwargs):
super(ApiException, self).__init__()
self.message = message
self.status_code = status_code
self.extra = kwargs
def format_exception(message, code=None, extra=None):
res = {
'status': 'error',
'error': message or 'server error',
'code': code,
}
if extra is not None:
res.update(extra)
return res
def handle_exception(error):
code = 500
message = None
if hasattr(error, 'status_code') :
code = error.status_code
if hasattr(error, 'message') :
message = str(error.message)
if isinstance(error, HTTPException):
code = error.code
message = str(error)
extra = error.extra if hasattr(error, 'extra') else None
response = jsonify(format_exception(message, code=code, extra=extra))
response.status_code = code
return response
|
tests/runtime/test_unbound_local_error.py | matan-h/friendly | 287 | 12644374 | <reponame>matan-h/friendly
# More complex example than needed - used for documentation
import friendly
spam_missing_global = 1
spam_missing_both = 1
def outer_missing_global():
def inner():
spam_missing_global += 1
inner()
def outer_missing_nonlocal():
spam_missing_nonlocal = 1
def inner():
spam_missing_nonlocal += 1
inner()
def outer_missing_both():
spam_missing_both = 2
def inner():
spam_missing_both += 1
inner()
def test_Missing_global():
try:
outer_missing_global()
except UnboundLocalError as e:
message = str(e)
friendly.explain_traceback(redirect="capture")
result = friendly.get_output()
assert "local variable 'spam_missing_global' referenced" in result
if friendly.get_lang() == "en":
assert (
"Did you forget to add `global spam_missing_global`?\n"
in result
)
return result, message
def test_Missing_nonlocal():
try:
outer_missing_nonlocal()
except UnboundLocalError as e:
message = str(e)
friendly.explain_traceback(redirect="capture")
result = friendly.get_output()
assert "local variable 'spam_missing_nonlocal' referenced" in result
if friendly.get_lang() == "en":
assert (
"Did you forget to add `nonlocal spam_missing_nonlocal`?\n"
in result
)
return result, message
def test_Missing_both():
try:
outer_missing_both()
except UnboundLocalError as e:
message = str(e)
friendly.explain_traceback(redirect="capture")
result = friendly.get_output()
assert "local variable 'spam_missing_both' referenced" in result
if friendly.get_lang() == "en":
assert "either `global spam_missing_both`" in result
assert "`nonlocal spam_missing_both`" in result
return result, message
def test_Typo_in_local():
def test1():
alpha1 = 1
alpha2 += 1
try:
test1()
except UnboundLocalError:
friendly.explain_traceback(redirect="capture")
result = friendly.get_output()
assert "local variable 'alpha2' referenced before assignment" in result
if friendly.get_lang() == "en":
assert "similar name `alpha1` was found" in result
def test2():
alpha1 = 1
alpha2 = 1
alpha3 += 1
try:
test2()
except UnboundLocalError as e:
message = str(e)
friendly.explain_traceback(redirect="capture")
result = friendly.get_output()
assert "local variable 'alpha3' referenced before assignment" in result
if friendly.get_lang() == "en":
assert "perhaps you meant one of the following" in result
return result, message
if __name__ == "__main__":
print(test_Missing_global()[0])
|
mayan/apps/locales/migrations/0002_auto_20210130_0324.py | nattangwiwat/Mayan-EDMS-recitation | 343 | 12644407 | from django.db import migrations
def code_copy_locales(apps, schema_editor):
UserLocaleProfile = apps.get_model(
app_label='common', model_name='UserLocaleProfile'
)
UserLocaleProfileNew = apps.get_model(
app_label='locales', model_name='UserLocaleProfileNew'
)
for locale_profile in UserLocaleProfile.objects.using(alias=schema_editor.connection.alias):
UserLocaleProfileNew.objects.create(
user=locale_profile.user,
timezone=locale_profile.timezone,
language=locale_profile.language
)
def code_copy_locales_reverse(apps, schema_editor):
UserLocaleProfile = apps.get_model(
app_label='common', model_name='UserLocaleProfile'
)
UserLocaleProfileNew = apps.get_model(
app_label='locales', model_name='UserLocaleProfileNew'
)
for locale_profile in UserLocaleProfileNew.objects.using(alias=schema_editor.connection.alias):
UserLocaleProfile.objects.create(
user=locale_profile.user,
timezone=locale_profile.timezone,
language=locale_profile.language
)
class Migration(migrations.Migration):
dependencies = [
('common', '0015_auto_20200501_0631'),
('locales', '0001_initial')
]
operations = [
migrations.RunPython(
code=code_copy_locales,
reverse_code=code_copy_locales_reverse,
),
]
run_before = [
('common', '0018_delete_userlocaleprofile'),
]
|
airsenal/scripts/parse_fixtures.py | Tdarnell/AIrsenal | 144 | 12644470 | #!/usr/bin/env python
"""
quick'n'dirty script to parse text cut'n'pasted off the FPL site,
and put into a csv file. Needs 'dateparser' package.
"""
import re
import dateparser
infile = open("../data/gameweeks.txt")
with open("../data/fixtures.csv", "w") as outfile:
outfile.write("gameweek,date,home_team,away_team\n")
fixture_regex = re.compile(r"([\w\s]+[\w])[\s]+([\d]{2}\:[\d]{2})([\w\s]+[\w])")
gameweek = ""
date = ""
home_team = ""
away_team = ""
date_str = ""
for line in infile.readlines():
if re.search(r"Gameweek ([\d]+)", line):
gameweek = re.search(r"Gameweek ([\d]+)", line).groups()[0]
print("gameweek {}".format(gameweek))
elif re.search(r"day [\d]+ [A-Z]", line):
date_str = line.strip()
date_str += " 2018 "
print("date {}".format(date_str))
elif fixture_regex.search(line):
home_team, ko_time, away_team = fixture_regex.search(line).groups()
match_time = date_str + ko_time
date = dateparser.parse(match_time)
print("{} vs {} {}".format(home_team, away_team, match_time))
outfile.write(
"{},{},{},{}\n".format(gameweek, str(date), home_team, away_team)
)
|
itest/test_http_api.py | kooiot/siridb-server | 349 | 12644478 | <filename>itest/test_http_api.py<gh_stars>100-1000
import requests
import json
from testing import gen_points
import asyncio
import functools
import random
import time
import math
import re
import qpack
from testing import Client
from testing import default_test_setup
from testing import gen_data
from testing import gen_points
from testing import gen_series
from testing import InsertError
from testing import PoolError
from testing import QueryError
from testing import run_test
from testing import Series
from testing import Server
from testing import ServerError
from testing import SiriDB
from testing import TestBase
from testing import UserAuthError
from testing import parse_args
TIME_PRECISION = 's'
class TestHTTPAPI(TestBase):
title = 'Test HTTP API requests'
@default_test_setup(3, time_precision=TIME_PRECISION)
async def run(self):
await self.client0.connect()
x = requests.get(
f'http://localhost:9020/get-version', auth=('sa', 'siri'))
self.assertEqual(x.status_code, 200)
v = x.json()
self.assertTrue(isinstance(v, list))
self.assertTrue(isinstance(v[0], str))
x = requests.post(
f'http://localhost:9020/insert/dbtest',
auth=('iris', 'siri'),
headers={'Content-Type': 'application/json'})
self.assertEqual(x.status_code, 400)
series_float = gen_points(
tp=float, n=10000, time_precision=TIME_PRECISION, ts_gap='5m')
series_int = gen_points(
tp=int, n=10000, time_precision=TIME_PRECISION, ts_gap='5m')
data = {
'my_float': series_float,
'my_int': series_int
}
x = requests.post(
f'http://localhost:9020/insert/dbtest',
data=json.dumps(data),
auth=('iris', 'siri'),
headers={'Content-Type': 'application/json'}
)
self.assertEqual(x.status_code, 200)
self.assertDictEqual(x.json(), {
'success_msg': 'Successfully inserted 20000 point(s).'})
data = {
'dbname': 'dbtest',
'host': 'localhost',
'port': 9000,
'username': 'iris',
'password': '<PASSWORD>'
}
x = requests.post(
f'http://localhost:9021/new-pool',
data=json.dumps(data),
auth=('sa', 'siri'),
headers={'Content-Type': 'application/json'})
self.assertEqual(x.status_code, 200)
self.assertEqual(x.json(), 'OK')
self.db.servers.append(self.server1)
await self.assertIsRunning(self.db, self.client0, timeout=30)
data = {'data': [[1579521271, 10], [1579521573, 20]]}
x = requests.post(
f'http://localhost:9020/insert/dbtest',
json=data,
auth=('iris', 'siri'))
self.assertEqual(x.status_code, 200)
self.assertDictEqual(x.json(), {
'success_msg': 'Successfully inserted 2 point(s).'})
x = requests.post(
f'http://localhost:9020/query/dbtest',
json={'q': 'select * from "data"'},
auth=('iris', 'siri'))
self.assertEqual(x.status_code, 200)
self.assertEqual(x.json(), data)
x = requests.post(
f'http://localhost:9020/query/dbtest',
json={'q': 'select * from "data"', 't': 'ms'},
auth=('iris', 'siri'))
data = {
'data': [[p[0] * 1000, p[1]] for p in data['data']]
}
self.assertEqual(x.status_code, 200)
self.assertEqual(x.json(), data)
x = requests.post(
f'http://localhost:9020/query/dbtest',
data=qpack.packb({
'q': 'select sum(1579600000) from "data"',
't': 'ms'}),
headers={'Content-Type': 'application/qpack'},
auth=('iris', 'siri'))
self.assertEqual(x.status_code, 200)
self.assertEqual(
qpack.unpackb(x.content, decode='utf8'),
{'data': [[1579600000000, 30]]})
x = requests.post(
f'http://localhost:9021/new-account',
json={'account': 't', 'password': ''},
auth=('sa', 'siri'))
self.assertEqual(x.status_code, 400)
self.assertEqual(x.json(), {
'error_msg':
'service account name should have at least 2 characters'})
x = requests.post(
f'http://localhost:9021/new-account',
json={'account': 'tt', 'password': '<PASSWORD>'},
auth=('sa', 'siri'))
self.assertEqual(x.status_code, 200)
data = {
'dbname': 'dbtest',
'host': 'localhost',
'port': 1234,
'pool': 0,
'username': 'iris',
'password': '<PASSWORD>'
}
auth = ('tt', 'pass')
x = requests.post(
f'http://localhost:9021/new-replica', json=data, auth=auth)
self.assertEqual(x.status_code, 400)
self.assertEqual(x.json(), {
'error_msg': "database name already exists: 'dbtest'"})
x = requests.post(
f'http://localhost:9022/new-replica', json=data, auth=auth)
self.assertEqual(x.status_code, 401)
auth = ('sa', 'siri')
x = requests.post(
f'http://localhost:9022/new-replica', json=data, auth=auth)
self.assertEqual(x.status_code, 400)
self.assertEqual(x.json(), {
'error_msg':
"connecting to server 'localhost:1234' failed with error: "
"connection refused"})
data['port'] = 9000
x = requests.post(
f'http://localhost:9022/new-replica', json=data, auth=auth)
self.assertEqual(x.status_code, 200)
self.assertEqual(x.json(), 'OK')
self.db.servers.append(self.server2)
await self.assertIsRunning(self.db, self.client0, timeout=50)
x = requests.get(
f'http://localhost:9022/get-databases', auth=auth)
self.assertEqual(x.status_code, 200)
self.assertEqual(x.json(), ['dbtest'])
self.client0.close()
if __name__ == '__main__':
parse_args()
run_test(TestHTTPAPI())
|
probtorch/objectives/__init__.py | alicanb/probtorch | 876 | 12644492 | <filename>probtorch/objectives/__init__.py<gh_stars>100-1000
from . import montecarlo
from . import importance
from . import marginal
|
test/runtime/test_vqeprogram.py | jschuhmac/qiskit-nature | 132 | 12644501 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test the VQE program."""
from test import QiskitNatureTestCase
import unittest
import warnings
from ddt import ddt, data
import numpy as np
from qiskit.providers.basicaer import QasmSimulatorPy
from qiskit.algorithms import VQEResult
from qiskit.algorithms.optimizers import SPSA
from qiskit.circuit.library import RealAmplitudes
from qiskit.opflow import I, Z
from qiskit_nature.algorithms.ground_state_solvers import GroundStateEigensolver
from qiskit_nature.converters.second_quantization import QubitConverter
from qiskit_nature.mappers.second_quantization import JordanWignerMapper
from qiskit_nature.runtime import VQEClient, VQEProgram, VQERuntimeResult, VQEProgramResult
from .fake_vqeruntime import FakeRuntimeProvider
@ddt
class TestVQEClient(QiskitNatureTestCase):
"""Test the VQE program."""
def get_standard_program(self, use_deprecated=False):
"""Get a standard VQEClient and operator to find the ground state of."""
circuit = RealAmplitudes(3)
operator = Z ^ I ^ Z
initial_point = np.random.random(circuit.num_parameters)
backend = QasmSimulatorPy()
if use_deprecated:
vqe_cls = VQEProgram
provider = FakeRuntimeProvider(use_deprecated=True)
warnings.filterwarnings("ignore", category=DeprecationWarning)
else:
provider = FakeRuntimeProvider(use_deprecated=False)
vqe_cls = VQEClient
vqe = vqe_cls(
ansatz=circuit,
optimizer=SPSA(),
initial_point=initial_point,
backend=backend,
provider=provider,
)
if use_deprecated:
warnings.filterwarnings("always", category=DeprecationWarning)
return vqe, operator
@data({"name": "SPSA", "maxiter": 100}, SPSA(maxiter=100))
def test_standard_case(self, optimizer):
"""Test a standard use case."""
for use_deprecated in [False, True]:
vqe, operator = self.get_standard_program(use_deprecated=use_deprecated)
vqe.optimizer = optimizer
result = vqe.compute_minimum_eigenvalue(operator)
self.assertIsInstance(result, VQEResult)
self.assertIsInstance(result, VQEProgramResult if use_deprecated else VQERuntimeResult)
def test_supports_aux_ops(self):
"""Test the VQEClient says it supports aux operators."""
for use_deprecated in [False, True]:
vqe, _ = self.get_standard_program(use_deprecated=use_deprecated)
self.assertTrue(vqe.supports_aux_operators)
def test_return_groundstate(self):
"""Test the VQEClient yields a ground state solver that returns the ground state."""
for use_deprecated in [False, True]:
vqe, _ = self.get_standard_program(use_deprecated=use_deprecated)
qubit_converter = QubitConverter(JordanWignerMapper())
gss = GroundStateEigensolver(qubit_converter, vqe)
self.assertTrue(gss.returns_groundstate)
if __name__ == "__main__":
unittest.main()
|
tests/scripts/thread-cert/network_diag.py | AdityaHPatwardhan/openthread | 2,962 | 12644526 | #!/usr/bin/env python3
#
# Copyright (c) 2020, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import struct
from enum import IntEnum
from typing import List
import common
import ipaddress
import mle
class TlvType(IntEnum):
EXT_ADDRESS = 0
ADDRESS16 = 1
MODE = 2
POLLING_PERIOD = 3
CONNECTIVITY = 4
ROUTE64 = 5
LEADER_DATA = 6
NETWORK_DATA = 7
IPV6_ADDRESS_LIST = 8
MAC_COUNTERS = 9
BATTERY_LEVEL = 14
SUPPLY_VOLTAGE = 15
CHILD_TABLE = 16
CHANNEL_PAGES = 17
TYPE_LIST = 18
MAX_CHILD_TIMEOUT = 19
class Ipv6AddressList:
def __init__(self, addresses: List[ipaddress.IPv6Address]):
self._addresses = addresses
@property
def addresses(self):
return self._addresses
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.addresses == other.addresses
def __repr__(self):
return f'Ipv6AddressList({self.addresses})'
class Ipv6AddressListFactory:
def parse(self, data, message_info):
addresses = []
while data.tell() < message_info.length:
addresses.append(ipaddress.IPv6Address(data.read(16)))
return Ipv6AddressList(addresses)
class MacCounters:
def __init__(self, counters: List[int]):
self._counters = counters
@property
def if_in_unknown_protos(self):
return self._counters[0]
@property
def if_in_errors(self):
return self._counters[1]
@property
def if_out_errors(self):
return self._counters[2]
@property
def if_in_ucast_pkts(self):
return self._counters[3]
@property
def if_in_broadcast_pkts(self):
return self._counters[4]
@property
def if_in_discards(self):
return self._counters[5]
@property
def if_out_ucast_pkts(self):
return self._counters[6]
@property
def if_out_broadcast_pkts(self):
return self._counters[7]
@property
def if_out_discards(self):
return self._counters[8]
@property
def counters(self):
return self._counters
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.counters == other.counters
def __repr__(self):
return ('MacCounters(' + f'if_in_unknown_protos={self.if_in_unknown_protos}, ' +
f'if_in_errors={self.if_in_errors}, ' + f'if_out_errors={self.if_out_errors}, ' +
f'if_in_ucast_pkts={self.if_in_ucast_pkts}, ' + f'if_in_broadcast_pkts={self.if_in_broadcast_pkts}, ' +
f'if_in_discards={self.if_in_discards}, ' + f'if_out_ucast_pkts={self.if_out_ucast_pkts}, ' +
f'if_out_broadcast_pkts={self.if_out_broadcast_pkts}, ' + f'if_out_discards={self.if_out_discards})')
class MacCountersFactory:
def parse(self, data, message_info):
return MacCounters(struct.unpack('>9I', data.read(4 * 9)))
class BatteryLevel:
def __init__(self, battery_level: int):
self._battery_level = battery_level
@property
def battery_level(self):
return self._battery_level
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.battery_level == other.battery_level
def __repr__(self):
return f'BatteryLevel(battery_level={self.battery_level})'
class BatteryLevelFactory:
def parse(self, data, message_info):
return BatteryLevel(struct.unpack('>B', data.read(1))[0])
class SupplyVoltage:
def __init__(self, supply_voltage: int):
self._supply_voltage = supply_voltage
@property
def supply_voltage(self):
return self._supply_voltage
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.supply_voltage == other.supply_voltage
def __repr__(self):
return f'SupplyVoltage(supply_voltage={self.supply_voltage})'
class SupplyVoltageFactory:
def parse(self, data, message_info):
return SupplyVoltage(struct.unpack('>H', data.read(2))[0])
class ChildTableEntry:
def __init__(self, timeout: int, child_id: int, mode: mle.Mode):
self._timeout = timeout
self._child_id = child_id
self._mode = mode
@property
def timeout(self):
return self._timeout
@property
def child_id(self):
return self._child_id
@property
def mode(self):
return self._mode
def __eq__(self, other):
common.expect_the_same_class(self, other)
return (self.timeout == other.timeout and self.child_id == other.child_id and self.mode == other.mode)
def __repr__(self):
return f'ChildTableEntry(timeout={self.timeout}, child_id={self.child_id}, mode={self.mode})'
class ChildTable:
def __init__(self, children: List[ChildTableEntry]):
self._children = sorted(children, key=lambda child: child.child_id)
@property
def children(self):
return self._children
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.children == other.children
def __repr__(self):
return f'ChildTable({self.children})'
class ChildTableFactory:
def parse(self, data, message_info):
children = []
while message_info.length > 0:
timeout_and_id = struct.unpack('>H', data.read(2))[0]
message_info.length -= 2
timeout = (timeout_and_id & 0xf800) >> 11
child_id = timeout_and_id & 0x1fff
mode = mle.ModeFactory().parse(data, message_info)
message_info.length -= 1
children.append(ChildTableEntry(timeout, child_id, mode))
return ChildTable(children)
class ChannelPages:
def __init__(self, channel_pages: bytes):
self._channel_pages = channel_pages
@property
def channel_pages(self):
return self._channel_pages
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.channel_pages == other.channel_pages
def __repr__(self):
return f'ChannelPages(channel_pages={self.channel_pages})'
class ChannelPagesFactory:
def parse(self, data, message_info):
return ChannelPages(data.getvalue())
class TypeList:
def __init__(self, tlv_types: List[int]):
self._tlv_types = tlv_types
@property
def tlv_types(self):
return self._tlv_types
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.tlv_types == other.tlv_types
def __repr__(self):
return f'TypeList(tlv_types={self.tlv_types})'
class TypeListFactory:
def parse(self, data, message_info):
return TypeList([ord(t) for t in data.getvalue()])
class MaxChildTimeout:
def __init__(self, max_child_timeout: int):
self._max_child_timeout = max_child_timeout
@property
def max_child_timeout(self):
return self._max_child_timeout
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.max_child_timeout == other.max_child_timeout
def __repr__(self):
return f'MaxChildTimeout(max_child_timeout={self.max_child_timeout})'
class MaxChildTimeoutFactory:
def parse(self, data, message_info):
return MaxChildTimeout(struct.unpack('>I', data.read(4))[0])
|
bcs-ui/backend/tests/resources/namespace/test_namespace_quota.py | laodiu/bk-bcs | 599 | 12644537 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from unittest import mock
import pytest
from backend.resources.namespace.namespace_quota import NamespaceQuota
from backend.tests.conftest import TEST_NAMESPACE
from ..conftest import FakeBcsKubeConfigurationService
class TestNamespaceQuota:
@pytest.fixture(autouse=True)
def use_faked_configuration(self):
"""Replace ConfigurationService with fake object"""
with mock.patch(
'backend.resources.utils.kube_client.BcsKubeConfigurationService',
new=FakeBcsKubeConfigurationService,
):
yield
@pytest.fixture
def client_obj(self, project_id, cluster_id):
return NamespaceQuota('token', project_id, cluster_id)
def test_create_namespace_quota(self, client_obj):
assert not client_obj.get_namespace_quota(TEST_NAMESPACE)
client_obj.create_namespace_quota(TEST_NAMESPACE, {'cpu': '1000m'})
def test_get_namespace_quota(self, client_obj):
""" 测试获取 单个 NamespaceQuota """
quota = client_obj.get_namespace_quota(TEST_NAMESPACE)
assert isinstance(quota, dict)
assert 'hard' in quota
def test_list_namespace_quota(self, client_obj):
""" 测试获取 NamespaceQuota 列表 """
results = client_obj.list_namespace_quota(TEST_NAMESPACE)
assert len(results) > 0
def test_update_or_create_namespace_quota(self, client_obj):
"""
测试 NamespaceQuota 的 更新或创建
TODO create_namespace_quota 与 update_or_create_namespace_quota 逻辑相同,后续考虑废弃一个
"""
client_obj.update_or_create_namespace_quota(TEST_NAMESPACE, {'cpu': '2000m'})
quota = client_obj.get_namespace_quota(TEST_NAMESPACE)
assert isinstance(quota, dict)
def test_delete_namespace_quota(self, client_obj):
client_obj.delete_namespace_quota(TEST_NAMESPACE)
assert not client_obj.get_namespace_quota(TEST_NAMESPACE)
|
exp.voc/voc8.res50v3+.GCT/train.py | Yongjin-colin-choi/TorchSemiSeg | 268 | 12644539 | from __future__ import division
import os.path as osp
import os
import sys
import time
import argparse
import math
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from config import config
from dataloader import get_train_loader
from network import Network
from dataloader import VOC
from utils.init_func import init_weight, group_weight
from engine.lr_policy import WarmUpPolyLR
from engine.engine import Engine
from seg_opr.loss_opr import SigmoidFocalLoss, ProbOhemCrossEntropy2d
# from seg_opr.sync_bn import DataParallelModel, Reduce, BatchNorm2d
from tensorboardX import SummaryWriter
from gct_util import sigmoid_rampup, FlawDetectorCriterion
try:
from apex.parallel import DistributedDataParallel, SyncBatchNorm
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex .")
try:
from azureml.core import Run
azure = True
run = Run.get_context()
except:
azure = False
parser = argparse.ArgumentParser()
os.environ['MASTER_PORT'] = '169711'
if os.getenv('debug') is not None:
is_debug = os.environ['debug']
else:
is_debug = False
with Engine(custom_parser=parser) as engine:
args = parser.parse_args()
cudnn.benchmark = True
seed = config.seed
if engine.distributed:
seed = engine.local_rank
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
# data loader + unsupervised data loader
train_loader, train_sampler = get_train_loader(engine, VOC, train_source=config.train_source, \
unsupervised=False)
unsupervised_train_loader, unsupervised_train_sampler = get_train_loader(engine, VOC, \
train_source=config.unsup_source, unsupervised=True)
if engine.distributed and (engine.local_rank == 0):
tb_dir = config.tb_dir + '/{}'.format(time.strftime("%b%d_%d-%H-%M", time.localtime()))
generate_tb_dir = config.tb_dir + '/tb'
logger = SummaryWriter(log_dir=tb_dir)
engine.link_tb(tb_dir, generate_tb_dir)
# config network and criterion
criterion = nn.CrossEntropyLoss(reduction='mean', ignore_index=255)
criterion_csst = nn.MSELoss(reduction='mean')
if engine.distributed:
BatchNorm2d = SyncBatchNorm
# define and init the model
model = Network(config.num_classes, criterion=criterion,
pretrained_model=config.pretrained_model,
norm_layer=BatchNorm2d)
init_weight(model.l_model.business_layer, nn.init.kaiming_normal_,
BatchNorm2d, config.bn_eps, config.bn_momentum,
mode='fan_in', nonlinearity='relu')
init_weight(model.r_model.business_layer, nn.init.kaiming_normal_,
BatchNorm2d, config.bn_eps, config.bn_momentum,
mode='fan_in', nonlinearity='relu')
# define the learning rate
base_lr = config.lr
fd_lr = config.fd_lr
if engine.distributed:
base_lr = config.lr * engine.world_size
fd_lr = config.fd_lr * engine.world_size
# define the optimizers
params_list_l = []
params_list_l = group_weight(params_list_l, model.l_model.backbone,
BatchNorm2d, base_lr)
for module in model.l_model.business_layer:
params_list_l = group_weight(params_list_l, module, BatchNorm2d,
base_lr) # head lr * 10
optimizer_l = torch.optim.SGD(params_list_l,
lr=base_lr,
momentum=config.momentum,
weight_decay=config.weight_decay)
params_list_r = []
params_list_r = group_weight(params_list_r, model.r_model.backbone,
BatchNorm2d, base_lr)
for module in model.r_model.business_layer:
params_list_r = group_weight(params_list_r, module, BatchNorm2d,
base_lr) # head lr * 10
optimizer_r = torch.optim.SGD(params_list_r,
lr=base_lr,
momentum=config.momentum,
weight_decay=config.weight_decay)
fd_optimizer = torch.optim.Adam(model.fd_model.parameters(),
lr=fd_lr, betas=(0.9, 0.99))
# config lr policy
total_iteration = config.nepochs * config.niters_per_epoch
lr_policy = WarmUpPolyLR(base_lr, config.lr_power, total_iteration, config.niters_per_epoch * config.warm_up_epoch)
lr_policy_fd = WarmUpPolyLR(fd_lr, config.lr_power, total_iteration, config.niters_per_epoch * config.warm_up_epoch)
if engine.distributed:
print('distributed !!')
if torch.cuda.is_available():
model.cuda()
model = DistributedDataParallel(model)
else:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = DataParallelModel(model, device_ids=engine.devices)
model.to(device)
engine.register_state(dataloader=train_loader, model=model,
optimizer_l=optimizer_l, optimizer_r=optimizer_r)
if engine.continue_state_object:
engine.restore_checkpoint()
model.train()
print('begin train')
for epoch in range(engine.state.epoch, config.nepochs):
model.train()
if engine.distributed:
train_sampler.set_epoch(epoch)
bar_format = '{desc}[{elapsed}<{remaining},{rate_fmt}]'
if is_debug:
pbar = tqdm(range(10), file=sys.stdout, bar_format=bar_format)
else:
pbar = tqdm(range(config.niters_per_epoch), file=sys.stdout, bar_format=bar_format)
dataloader = iter(train_loader)
unsupervised_dataloader = iter(unsupervised_train_loader)
sum_loss_l = 0
sum_loss_r = 0
sum_loss_fd = 0
''' supervised part '''
for idx in pbar:
optimizer_l.zero_grad()
optimizer_r.zero_grad()
fd_optimizer.zero_grad()
engine.update_iteration(epoch, idx)
minibatch = dataloader.next()
unsupervised_minibatch = unsupervised_dataloader.next()
imgs = minibatch['data']
gts = minibatch['label']
imgs_unlabeled = unsupervised_minibatch['data']
gts_unlabeled = (gts * 0 + 255).long() # set the gt of unlabeled data to be 255
imgs = imgs.cuda(non_blocking=True)
imgs_unlabeled = imgs_unlabeled.cuda(non_blocking=True)
gts = gts.cuda(non_blocking=True)
gts_unlabeled = gts_unlabeled.cuda(non_blocking=True)
l_inp = torch.cat([imgs, imgs_unlabeled], dim=0)
l_gt = torch.cat([gts, gts_unlabeled], dim=0)
r_inp = torch.cat([imgs, imgs_unlabeled], dim=0)
r_gt = torch.cat([gts, gts_unlabeled], dim=0)
current_idx = epoch * config.niters_per_epoch + idx
total_steps = config.dc_rampup_epochs * config.niters_per_epoch
dc_rampup_scale = sigmoid_rampup(current_idx, total_steps)
# -----------------------------------------------------------------------------
# step-1: train the task models
# -----------------------------------------------------------------------------
for param in model.module.fd_model.parameters():
param.requires_grad = False
pred_l, flawmap_l, l_fc_mask, l_dc_gt, pred_r, flawmap_r, r_fc_mask, r_dc_gt = model(l_inp, r_inp, l_gt,
r_gt, current_idx,
total_steps, step=1)
# task loss
fd_criterion = FlawDetectorCriterion()
dc_criterion = torch.nn.MSELoss()
b, c, h, w = pred_l.shape
task_loss_l = criterion(pred_l[:b // 2], l_gt[:b // 2])
dist.all_reduce(task_loss_l, dist.ReduceOp.SUM)
task_loss_l = task_loss_l / engine.world_size
fc_ssl_loss_l = fd_criterion(flawmap_l, torch.zeros(flawmap_l.shape).cuda(), is_ssl=True,
reduction=False)
fc_ssl_loss_l = l_fc_mask * fc_ssl_loss_l
fc_ssl_loss_l = config.fc_ssl_scale * torch.mean(fc_ssl_loss_l)
dist.all_reduce(fc_ssl_loss_l, dist.ReduceOp.SUM)
fc_ssl_loss_l = fc_ssl_loss_l / engine.world_size
dc_ssl_loss_l = dc_criterion(F.softmax(pred_l, dim=1), l_dc_gt)
dc_ssl_loss_l = dc_rampup_scale * config.dc_ssl_scale * torch.mean(dc_ssl_loss_l)
dist.all_reduce(dc_ssl_loss_l, dist.ReduceOp.SUM)
dc_ssl_loss_l = dc_ssl_loss_l / engine.world_size
loss_l = task_loss_l + fc_ssl_loss_l + dc_ssl_loss_l
''' train the 'r' task model '''
b, c, h, w = pred_r.shape
task_ross_r = criterion(pred_r[:b // 2], r_gt[:b // 2])
dist.all_reduce(task_ross_r, dist.ReduceOp.SUM)
task_ross_r = task_ross_r / engine.world_size
fc_ssl_loss_r = fd_criterion(flawmap_r, torch.zeros(flawmap_r.shape).cuda(), is_ssl=True,
reduction=False)
fc_ssl_loss_r = r_fc_mask * fc_ssl_loss_r
fc_ssl_loss_r = config.fc_ssl_scale * torch.mean(fc_ssl_loss_r)
dist.all_reduce(fc_ssl_loss_r, dist.ReduceOp.SUM)
fc_ssl_loss_r = fc_ssl_loss_r / engine.world_size
dc_ssl_loss_r = dc_criterion(F.softmax(pred_r, dim=1), r_dc_gt)
dc_ssl_loss_r = dc_rampup_scale * config.dc_ssl_scale * torch.mean(dc_ssl_loss_r)
dist.all_reduce(dc_ssl_loss_r, dist.ReduceOp.SUM)
dc_ssl_loss_r = dc_ssl_loss_r / engine.world_size
loss_r = task_ross_r + fc_ssl_loss_r + dc_ssl_loss_r
loss_task = loss_l + loss_r
loss_task.backward()
optimizer_l.step()
optimizer_r.step()
# -----------------------------------------------------------------------------
# step-2: train the flaw detector
# -----------------------------------------------------------------------------
for param in model.module.fd_model.parameters():
param.requires_grad = True
l_flawmap, r_flawmap, l_flawmap_gt, r_flawmap_gt = model(l_inp, r_inp, l_gt, r_gt, current_idx, total_steps,
step=2)
# generate the ground truth for the flaw detector (on labeled data only)
lbs = b // 2
l_fd_loss = fd_criterion(l_flawmap[:lbs, ...], l_flawmap_gt)
l_fd_loss = config.fd_scale * torch.mean(l_fd_loss)
r_fd_loss = fd_criterion(r_flawmap[:lbs, ...], r_flawmap_gt)
r_fd_loss = config.fd_scale * torch.mean(r_fd_loss)
fd_loss = (l_fd_loss + r_fd_loss) / 2
dist.all_reduce(fd_loss, dist.ReduceOp.SUM)
fd_loss = fd_loss / engine.world_size
fd_loss.backward()
fd_optimizer.step()
lr = lr_policy.get_lr(current_idx)
fd_lr = lr_policy_fd.get_lr(current_idx)
optimizer_l.param_groups[0]['lr'] = lr
optimizer_l.param_groups[1]['lr'] = lr
for i in range(2, len(optimizer_l.param_groups)):
optimizer_l.param_groups[i]['lr'] = lr
optimizer_r.param_groups[0]['lr'] = lr
optimizer_r.param_groups[1]['lr'] = lr
for i in range(2, len(optimizer_r.param_groups)):
optimizer_r.param_groups[i]['lr'] = lr
sum_loss_l += loss_l.item()
sum_loss_r += loss_r.item()
sum_loss_fd += fd_loss.item()
print_str = 'Epoch{}/{}'.format(epoch, config.nepochs) \
+ ' Iter{}/{}:'.format(idx + 1, config.niters_per_epoch) \
+ ' lr=%.2e' % lr \
+ ' loss_l=%.2f' % loss_l.item() \
+ ' loss_r=%.2f' % loss_r.item() \
+ ' loss_fd=%.2f' % fd_loss.item()
pbar.set_description(print_str, refresh=False)
if engine.distributed and (engine.local_rank == 0):
logger.add_scalar('train_loss_l', sum_loss_l / len(pbar), epoch)
logger.add_scalar('train_loss_r', sum_loss_r / len(pbar), epoch)
logger.add_scalar('train_loss_fd', sum_loss_fd / len(pbar), epoch)
if azure and engine.local_rank == 0:
run.log(name='train_loss_l', value=sum_loss_l / len(pbar))
run.log(name='train_loss_r', value=sum_loss_r / len(pbar))
run.log(name='train_loss_fd', value=sum_loss_fd / len(pbar))
if (epoch > config.nepochs // 2) and (epoch % config.snapshot_iter == 0) or (epoch == config.nepochs - 1):
if engine.distributed and (engine.local_rank == 0):
engine.save_and_link_checkpoint(config.snapshot_dir,
config.log_dir,
config.log_dir_link)
elif not engine.distributed:
engine.save_and_link_checkpoint(config.snapshot_dir,
config.log_dir,
config.log_dir_link) |
pfrl/nn/branched.py | g-votte/pfrl | 824 | 12644550 | <filename>pfrl/nn/branched.py
import torch
class Branched(torch.nn.Module):
"""Module that calls forward functions of child modules in parallel.
When the `forward` method of this module is called, all the
arguments are forwarded to each child module's `forward` method.
The returned values from the child modules are returned as a tuple.
Args:
*modules: Child modules. Each module should be callable.
"""
def __init__(self, *modules):
super().__init__()
self.child_modules = torch.nn.ModuleList(modules)
def forward(self, *args, **kwargs):
"""Forward the arguments to the child modules.
Args:
*args, **kwargs: Any arguments forwarded to child modules. Each
child module should be able to accept the arguments.
Returns:
tuple: Tuple of the returned values from the child modules.
"""
return tuple(mod(*args, **kwargs) for mod in self.child_modules)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.