max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
manim/utils/unit.py | PhotonSpheres/manim | 9,497 | 34325 | """Implement the Unit class."""
import numpy as np
from .. import config, constants
__all__ = ["Pixels", "Degrees", "Munits", "Percent"]
class _PixelUnits:
def __mul__(self, val):
return val * config.frame_width / config.pixel_width
def __rmul__(self, val):
return val * config.frame_width / config.pixel_width
class Percent:
def __init__(self, axis):
if np.array_equal(axis, constants.X_AXIS):
self.length = config.frame_width
if np.array_equal(axis, constants.Y_AXIS):
self.length = config.frame_height
if np.array_equal(axis, constants.Z_AXIS):
raise NotImplementedError("length of Z axis is undefined")
def __mul__(self, val):
return val / 100 * self.length
def __rmul__(self, val):
return val / 100 * self.length
Pixels = _PixelUnits()
Degrees = constants.PI / 180
Munits = 1
|
package/awesome_panel/express/bootstrap/modal.py | slamer59/awesome-panel | 179 | 34341 | <gh_stars>100-1000
"""In this module we provide the functionality of a Modal.
The Modal can be used to focus some kind of information like text, images, chart or an interactive
dashboard.
The implementation is inspired by
- https://css-tricks.com/considerations-styling-modal/
- https://codepen.io/henchmen/embed/preview/PzQpvk
- https://getbootstrap.com/docs/4.3/components/modal/
"""
import panel as pn
import param
_CSS = """
.bk.modal {
/* This way it could be display flex or grid or whatever also. */
display: block;
max-width: 100%;
max-height: 100%;
position: fixed!important;
z-index: 100;
left: 0!important;
top: 0!important;
bottom: 0!important;
right: 0!important;
margin: auto!important;
box-shadow: 5px 5px 20px grey;
box-shadow: 0 0 60px 10px rgba(0, 0, 0, 0.9);
border: 1px solid rgba(0,0,0,.125);
border-radius: 0.25rem;
}
.closed {
display: none!important;
}
.modal-overlay {
position: fixed;
top: 0;
left: 0;
width: 100%;
height: 100%;
z-index: 50;
background: rgba(0, 0, 0, 0.6);
}
.modal-body {
overflow: auto;
}
"""
class Modal(param.Parameterized):
"""The Modal can be used to focus some kind of information like text, images, chart or an
interactive dashboard.
In order to use this modal you need to
- Instantiate the Modal
- Add the CSS from the get_css function to the app
- using `pn.config.raw_css.append` or
- directly in your template
The implementation is inspired by
- https://css-tricks.com/considerations-styling-modal/
- https://codepen.io/henchmen/embed/preview/PzQpvk
- https://getbootstrap.com/docs/4.3/components/modal/
"""
title = param.String(default="Modal")
body = param.List()
def __init__(self, **params):
super().__init__(**params)
self.modal_overlay = pn.pane.HTML('<div class="modal-overlay" id="modal-overlay"></div>')
self.close_button = pn.widgets.Button(
name="X",
css_classes=["close-modal-button"],
width=50,
)
self.close_button.js_on_click(
code="""
var modal = document.querySelector(".bk.modal");
var modalOverlay = document.querySelector("#modal-overlay");
modal.classList.toggle("closed");
modalOverlay.classList.toggle("closed");
"""
)
self._modal_title = pn.pane.Markdown("# " + self.title)
self._modal_body = pn.Column(*self.body) # pylint: disable=not-an-iterable
self.modal = pn.Column(
pn.Column(
pn.Row(
self._modal_title,
pn.layout.HSpacer(),
self.close_button,
),
pn.layout.Divider(),
self._modal_body,
sizing_mode="stretch_width",
margin=10,
),
background="white",
width=400,
height=400,
css_classes=["modal"],
)
@staticmethod
def get_open_modal_button(name: str = "Open Modal", **kwargs) -> pn.widgets.Button:
"""A Button to open the modal with"""
open_modal_button = pn.widgets.Button(
name=name, css_classes=["open-modal-button"], **kwargs
)
open_modal_button.js_on_click(
code="""
var modal = document.querySelector(".modal");
var modalOverlay = document.querySelector("#modal-overlay");
modal.classList.toggle("closed");
modalOverlay.classList.toggle("closed");
"""
)
return open_modal_button
@staticmethod
def get_css() -> str:
"""Add the CSS from this function to the app
- using `pn.config.raw_css.append` or
- directly in your template
Returns:
str: The css string
"""
return _CSS
@param.depends(
"title",
watch=True,
)
def set_modal_title(
self,
):
"""Updates the title of the modal"""
self._modal_title.object = "# " + self.title
@param.depends(
"body",
watch=True,
)
def set_modal_body(
self,
):
"""Updates the body of the modal"""
self._modal_body[:] = self.body
|
python/paddle/distributed/fleet/meta_optimizers/fp16_allreduce_optimizer.py | zmxdream/Paddle | 17,085 | 34342 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
from paddle.fluid import core, framework, unique_name
from .meta_optimizer_base import MetaOptimizerBase
__all__ = []
class FP16AllReduceOptimizer(MetaOptimizerBase):
def __init__(self, optimizer):
super(FP16AllReduceOptimizer, self).__init__(optimizer)
self.inner_opt = optimizer
# we do not allow meta optimizer to be inner optimizer currently
self.meta_optimizers_white_list = [
"LarsOptimizer",
"LambOptimizer",
"RecomputeOptimizer",
"LocalSGDOptimizer",
"GradientMergeOptimizer",
"GraphExecutionOptimizer",
"AdaptiveLocalSGDOptimizer",
]
self.meta_optimizers_black_list = ["DGCOptimizer"]
def _set_basic_info(self, loss, role_maker, user_defined_optimizer,
user_defined_strategy):
super(FP16AllReduceOptimizer, self)._set_basic_info(
loss, role_maker, user_defined_optimizer, user_defined_strategy)
def _can_apply(self):
if not self.role_maker._is_collective:
return False
if self.user_defined_strategy.fp16_allreduce:
return True
return False
def _disable_strategy(self, dist_strategy):
dist_strategy.fp16_allreduce = False
def _enable_strategy(self, dist_strategy, context=None):
dist_strategy.fp16_allreduce = True
@staticmethod
def fp16_compression(param_and_grads):
"""
Compress fp32 gradients to fp16 during allreduce.
"""
op_maker = core.op_proto_and_checker_maker
new_param_and_grads = [] # param, grad, is_cast
# cast grad from fp32->fp16 before allreduce,
for param, grad in param_and_grads:
if grad is None or grad.dtype != core.VarDesc.VarType.FP32:
new_param_and_grads.append((param, grad, False))
continue
op = grad.op
block = grad.block
var_attr = op.all_attrs()[op_maker.kOpRoleVarAttrName()]
if param.name not in var_attr:
new_param_and_grads.append((param, grad, False))
continue
# remove (param, grad) from op_role_var
var_attr.remove(param.name)
var_attr.remove(grad.name)
if len(var_attr) > 1:
op._set_attr(op_maker.kOpRoleVarAttrName(), var_attr)
else:
op._remove_attr(op_maker.kOpRoleVarAttrName())
new_grad = block.create_var(
name=unique_name.generate(grad.name + ".cast_fp16"),
dtype=core.VarDesc.VarType.FP16,
persistable=False,
stop_gradient=True)
with block.program._backward_role_guard():
cast_op = block.append_op(
type="cast",
inputs={"X": grad},
outputs={"Out": new_grad},
attrs={
"in_dtype": core.VarDesc.VarType.FP32,
"out_dtype": core.VarDesc.VarType.FP16
},
stop_gradient=True)
backward = op_maker.OpRole.Backward
cast_op._set_attr(op_maker.kOpRoleAttrName(), backward)
cast_op._set_attr(op_maker.kOpRoleVarAttrName(),
[param.name, new_grad.name])
new_grad.op = cast_op
new_param_and_grads.append((param, new_grad, True))
ret_param_and_grads = []
# cast grad from fp16->fp32 after allreduce.
# NOTE. Now we split fp16 compression into two for loops,
# if we do not separate them, fuse allreduce will wrong.
# This must be the problem of fuse allreduce pass, need
# fixed in future.
for param, grad, cast in new_param_and_grads:
if not cast:
ret_param_and_grads.append((param, grad))
continue
block = grad.block
new_grad = block.create_var(
name=unique_name.generate(grad.name + ".cast_fp32"),
dtype=core.VarDesc.VarType.FP32,
persistable=False,
stop_gradient=True)
with block.program._optimized_guard(
[param, grad]), framework.name_scope('fp16_allreduce'):
cast_op = block.append_op(
type="cast",
inputs={"X": grad},
outputs={"Out": new_grad},
attrs={
"in_dtype": core.VarDesc.VarType.FP16,
"out_dtype": core.VarDesc.VarType.FP32
},
stop_gradient=True)
ret_param_and_grads.append((param, new_grad))
return ret_param_and_grads
def apply_optimize(self, loss, startup_program, params_grads):
new_params_grads = self.fp16_compression(params_grads)
return self.inner_opt.apply_optimize(
loss,
startup_program=startup_program,
params_grads=new_params_grads)
|
CondTools/Geometry/python/HGCalParametersWriter_cff.py | ckamtsikis/cmssw | 852 | 34369 | <gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
from CondTools.Geometry.HGCalEEParametersWriter_cfi import *
from Configuration.ProcessModifiers.dd4hep_cff import dd4hep
dd4hep.toModify(HGCalEEParametersWriter,
fromDD4Hep = cms.bool(True)
)
HGCalHESiParametersWriter = HGCalEEParametersWriter.clone(
name = cms.string("HGCalHESiliconSensitive"),
nameW = cms.string("HGCalHEWafer"),
nameC = cms.string("HGCalHECell"),
)
HGCalHEScParametersWriter = HGCalEEParametersWriter.clone(
name = cms.string("HGCalHEScintillatorSensitive"),
nameW = cms.string("HGCalWafer"),
nameC = cms.string("HGCalCell"),
)
|
examples/python/src/authors/models.py | ShivamSarodia/sqlc | 5,153 | 34443 | # Code generated by sqlc. DO NOT EDIT.
import dataclasses
from typing import Optional
@dataclasses.dataclass()
class Author:
id: int
name: str
bio: Optional[str]
|
RecoMuon/MuonIsolationProducers/test/isoTest_cfg.py | ckamtsikis/cmssw | 852 | 34469 | <gh_stars>100-1000
# The following comments couldn't be translated into the new config version:
#
# keep only muon-related info here
#
import FWCore.ParameterSet.Config as cms
process = cms.Process("MISO")
process.load("Configuration.EventContent.EventContent_cff")
# service = MessageLogger {
# untracked vstring destinations = { "cout" }
# untracked vstring debugModules = { "muIsoDepositTk",
# "muIsoDepositCalByAssociatorHits",
# "muIsoDepositCalByAssociatorTowers",
# "muIsoDepositCal" }
# untracked vstring categories = { "RecoMuon" , "MuonIsolation" }
#
# untracked PSet cout = {
# untracked string threshold = "DEBUG"
# untracked int32 lineLength = 132
# untracked bool noLineBreaks = true
# untracked PSet DEBUG = {untracked int32 limit = 0 }
# untracked PSet RecoMuon = { untracked int32 limit = 10000000}
# untracked PSet MuonIsolation = { untracked int32 limit = 10000000}
# }
# }
process.load("FWCore.MessageLogger.MessageLogger_cfi")
#process.load("RecoLocalMuon.Configuration.RecoLocalMuon_cff")
#process.load("RecoMuon.Configuration.RecoMuon_cff")
process.load("Configuration.StandardSequences.Services_cff")
process.load("Configuration.StandardSequences.Geometry_cff")
process.load("Configuration.StandardSequences.MagneticField_38T_cff")
process.load("Configuration.StandardSequences.FakeConditions_cff")
#process.load("Configuration.StandardSequences.RawToDigi_cff")
process.load("Configuration.StandardSequences.Reconstruction_cff")
#has everything(?) one needs
# pick muIsolation sequence for "standard" iso reco for tracker and global muons
process.load("RecoMuon.MuonIsolationProducers.muIsolation_cff")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(2000)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('/store/mc/2007/12/7/RelVal-RelValBJets_Pt_50_120-1197045102/0002/0A21A5F4-02A5-DC11-89F5-000423DD2F34.root')
)
process.source = cms.Source ("PoolSource",
fileNames = cms.untracked.vstring (
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/10438122-2A5F-DD11-A77F-000423D985E4.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/12F34420-2A5F-DD11-AB6E-000423D6CA6E.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/244E7C0B-315F-DD11-ACFC-001617E30F58.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/2ADD8A12-315F-DD11-8AB8-000423D6C8E6.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/34A291FB-305F-DD11-833E-001617C3B6CC.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/383E09CA-2C5F-DD11-9A28-000423D6BA18.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/40F0F8A4-2A5F-DD11-BC72-001617C3B64C.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/4AD39C8C-2A5F-DD11-B935-001617C3B710.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/4C0D4911-315F-DD11-A20D-001617DBD332.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/4C32E425-2A5F-DD11-B819-000423D6C8EE.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/50881CBB-2A5F-DD11-92C6-001617C3B6E8.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/52B83F75-2A5F-DD11-AD56-001617C3B6CC.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/544DC99A-2A5F-DD11-9160-001617C3B6E2.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/62F7698D-2A5F-DD11-907A-001617C3B6DC.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/7C8A2791-2A5F-DD11-814D-001617DBCF6A.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/7EDA5005-315F-DD11-8019-001617C3B706.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/8A91E518-2A5F-DD11-B49A-000423D6B42C.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/8CC497AE-2A5F-DD11-AE43-000423DD2F34.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/9A469FA8-2A5F-DD11-9909-001617C3B6FE.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/9A5BE3A4-2A5F-DD11-A61B-001617DF785A.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/9AC2141C-2A5F-DD11-ADF5-000423D6A6F4.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/9CCFA319-2A5F-DD11-B0AA-000423D94700.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/A0F6C41D-2A5F-DD11-8685-000423D6BA18.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/B0159DAC-2A5F-DD11-98A8-001617E30D00.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/B05C32FC-305F-DD11-A957-001617C3B70E.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/C6ADD999-2A5F-DD11-AF9F-0016177CA7A0.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/C8AEE585-2A5F-DD11-BB37-001617C3B77C.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/CC5178C4-2A5F-DD11-BCE6-001617E30F4C.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/CE9FE020-2A5F-DD11-9846-000423D6CA72.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/D24BFA7E-2A5F-DD11-8F79-001617C3B70E.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/D62761FA-305F-DD11-A108-0016177CA778.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/DA0DDFB6-2A5F-DD11-987A-001617DBD5B2.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/E64386FE-305F-DD11-BA68-0019DB29C614.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/E6BC0D37-2A5F-DD11-9ACB-000423D6B444.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/F251D794-2A5F-DD11-BA5D-00161757BF42.root'
),
secondaryFileNames = cms.untracked.vstring (
)
)
process.RECO = cms.OutputModule("PoolOutputModule",
process.FEVTSIMEventContent,
fileName = cms.untracked.string('file:isoTest.root')
)
process.p1 = cms.Path(process.muIsolation)
process.outpath = cms.EndPath(process.RECO)
process.RECO.outputCommands.append('drop *_*_*_*')
process.RECO.outputCommands.extend(process.RecoMuonRECO.outputCommands)
|
tests/bench/test_yahoo_nyse_VRS.py | jmabry/pyaf | 377 | 34475 | import pyaf.Bench.TS_datasets as tsds
import pyaf.Bench.YahooStocks as ys
import warnings
symbol_lists = tsds.get_yahoo_symbol_lists();
y_keys = sorted(symbol_lists.keys())
print(y_keys)
k = "nysecomp"
tester = ys.cYahoo_Tester(tsds.load_yahoo_stock_prices(k) , "YAHOO_STOCKS_" + k);
with warnings.catch_warnings():
warnings.simplefilter("error")
tester.testSignals('VRS')
|
events/migrations/0040_event_team_size.py | horacexd/clist | 166 | 34482 | <reponame>horacexd/clist
# Generated by Django 2.2.10 on 2020-04-03 19:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0039_event_limits'),
]
operations = [
migrations.AddField(
model_name='event',
name='team_size',
field=models.IntegerField(default=3),
),
]
|
rllib/contrib/bandits/envs/__init__.py | firebolt55439/ray | 21,382 | 34492 | from ray.rllib.contrib.bandits.envs.discrete import LinearDiscreteEnv, \
WheelBanditEnv
from ray.rllib.contrib.bandits.envs.parametric import ParametricItemRecoEnv
__all__ = ["LinearDiscreteEnv", "WheelBanditEnv", "ParametricItemRecoEnv"]
|
tests/k8s/k8s_test_utilities.py | shubav/sonic-mgmt | 132 | 34500 | import logging
import time
from tests.common.helpers.assertions import pytest_assert
logger = logging.getLogger(__name__)
def join_master(duthost, master_vip):
"""
Joins DUT to Kubernetes master
Args:
duthost: DUT host object
master_vip: VIP of high availability Kubernetes master
If join fails, test will fail at the assertion to check_connected
"""
logger.info("Joining DUT to Kubernetes master")
dut_join_cmds = ['sudo config kube server disable on',
'sudo config kube server ip {}'.format(master_vip),
'sudo config kube server disable off']
duthost.shell_cmds(cmds=dut_join_cmds)
pytest_assert(poll_for_status_change(duthost, True),"DUT failed to successfully join Kubernetes master")
def make_vip_unreachable(duthost, master_vip):
"""
Makes Kubernetes master VIP unreachable from SONiC DUT by configuring iptables rules. Cleans preexisting iptables rules for VIP.
Args:
duthost: DUT host object
master_vip: VIP of high availability Kubernetes master
"""
logger.info("Making Kubernetes master VIP unreachable from DUT")
clean_vip_iptables_rules(duthost, master_vip)
duthost.shell('sudo iptables -A INPUT -s {} -j DROP'.format(master_vip))
duthost.shell('sudo iptables -A OUTPUT -d {} -j DROP'.format(master_vip))
def make_vip_reachable(duthost, master_vip):
"""
Makes Kubernetes master VIP reachable from SONiC DUT by removing any iptables rules associated with the VIP.
Args:
duthost: DUT host object
master_vip: VIP of high availability Kubernetes master
"""
logger.info("Making Kubernetes master VIP reachable from DUT")
clean_vip_iptables_rules(duthost, master_vip)
def clean_vip_iptables_rules(duthost, master_vip):
"""
Removes all iptables rules associated with the VIP.
Args:
duthost: DUT host object
master_vip: VIP of high availability Kubernetes master
"""
iptables_rules = duthost.shell('sudo iptables -S | grep {} || true'.format(master_vip))["stdout_lines"]
logger.info('iptables rules: {}'.format(iptables_rules))
for line in iptables_rules:
if line:
duthost.shell('sudo iptables -D {}'.format(line[2:]))
def check_connected(duthost):
"""
Checks if the DUT already shows status 'connected' to Kubernetes master
Args:
duthost: DUT host object
Returns:
True if connected, False if not connected
"""
kube_server_status = duthost.shell('show kube server')["stdout_lines"]
logger.info("Kube server status: {}".format(kube_server_status))
for line in kube_server_status:
if line.startswith("KUBERNETES_MASTER SERVER connected"):
return line.endswith("true")
logger.info("Kubernetes server check_connected failed to check server status")
def poll_for_status_change(duthost, exp_status, poll_wait_secs=5, min_wait_time=20, max_wait_time=120):
"""
Polls to see if kube server connected status updates as expected
Args:
duthost: DUT host object
exp_status: expected server connected status once processes are synced
poll_wait_secs: seconds between each server connected status poll. Default: 5 seconds
min_wait_time: seconds before starting poll of server connected status. Default: 20 seconds
max_wait_time: maximum amount of time to spend polling for status change. Default: 120 seconds
Returns:
True if server connected status updates as expected by max_wait_time
False if server connected status fails to update as expected by max_wait_time
"""
time.sleep(min_wait_time)
timeout_wait_secs = max_wait_time - min_wait_time
while (timeout_wait_secs > 0):
if (check_connected(duthost) == exp_status):
logging.info("Time taken to update Kube server status: {} seconds".format(timeout_wait_secs))
return True
time.sleep(poll_wait_secs)
timeout_wait_secs -= poll_wait_secs
return False
|
common/py_vulcanize/third_party/rjsmin/_setup/py2/data.py | tingshao/catapult | 2,151 | 34508 | # -*- coding: ascii -*-
#
# Copyright 2007, 2008, 2009, 2010, 2011
# <NAME> or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
===================
Data distribution
===================
This module provides tools to simplify data distribution.
"""
__author__ = u"<NAME>"
__docformat__ = "restructuredtext en"
from distutils import filelist as _filelist
import os as _os
import posixpath as _posixpath
import sys as _sys
from _setup import commands as _commands
def splitpath(path):
""" Split a path """
drive, path = '', _os.path.normpath(path)
try:
splitunc = _os.path.splitunc
except AttributeError:
pass
else:
drive, path = splitunc(path)
if not drive:
drive, path = _os.path.splitdrive(path)
elems = []
try:
sep = _os.path.sep
except AttributeError:
sep = _os.path.join('1', '2')[1:-1]
while 1:
prefix, path = _os.path.split(path)
elems.append(path)
if prefix in ('', sep):
drive = _os.path.join(drive, prefix)
break
path = prefix
elems.reverse()
return drive, elems
def finalizer(installer):
""" Finalize install_data """
data_files = []
for item in installer.data_files:
if not isinstance(item, Data):
data_files.append(item)
continue
data_files.extend(item.flatten(installer))
installer.data_files = data_files
class Data(object):
""" File list container """
def __init__(self, files, target=None, preserve=0, strip=0,
prefix=None):
""" Initialization """
self._files = files
self._target = target
self._preserve = preserve
self._strip = strip
self._prefix = prefix
self.fixup_commands()
def fixup_commands(self):
pass
def from_templates(cls, *templates, **kwargs):
""" Initialize from template """
files = _filelist.FileList()
for tpl in templates:
for line in tpl.split(';'):
files.process_template_line(line.strip())
files.sort()
files.remove_duplicates()
result = []
for filename in files.files:
_, elems = splitpath(filename)
if '.svn' in elems or '.git' in elems:
continue
result.append(filename)
return cls(result, **kwargs)
from_templates = classmethod(from_templates)
def flatten(self, installer):
""" Flatten the file list to (target, file) tuples """
# pylint: disable = W0613
if self._prefix:
_, prefix = splitpath(self._prefix)
telems = prefix
else:
telems = []
tmap = {}
for fname in self._files:
(_, name), target = splitpath(fname), telems
if self._preserve:
if self._strip:
name = name[max(0, min(self._strip, len(name) - 1)):]
if len(name) > 1:
target = telems + name[:-1]
tmap.setdefault(_posixpath.join(*target), []).append(fname)
return tmap.items()
class Documentation(Data):
""" Documentation container """
def fixup_commands(self):
_commands.add_option('install_data', 'without-docs',
help_text='Do not install documentation files',
inherit='install',
)
_commands.add_finalizer('install_data', 'documentation', finalizer)
def flatten(self, installer):
""" Check if docs should be installed at all """
if installer.without_docs:
return []
return Data.flatten(self, installer)
class Manpages(Documentation):
""" Manpages container """
def dispatch(cls, files):
""" Automatically dispatch manpages to their target directories """
mpmap = {}
for manpage in files:
normalized = _os.path.normpath(manpage)
_, ext = _os.path.splitext(normalized)
if ext.startswith(_os.path.extsep):
ext = ext[len(_os.path.extsep):]
mpmap.setdefault(ext, []).append(manpage)
return [cls(manpages, prefix=_posixpath.join(
'share', 'man', 'man%s' % section,
)) for section, manpages in mpmap.items()]
dispatch = classmethod(dispatch)
def flatten(self, installer):
""" Check if manpages are suitable """
if _sys.platform == 'win32':
return []
return Documentation.flatten(self, installer)
|
keyboard/mechanical-button/single-button/keyboard.py | ELE-Clouds/mpy-lib | 116 | 34516 | <filename>keyboard/mechanical-button/single-button/keyboard.py
# -*- coding:UTF-8 -*-
'''
MIT License
Copyright (c) 2018 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
'''
******************************************************************************
* 文 件:keyboard.py
* 概 述:识别单个机械按键的单击、连击(暂未限制连击次数)、长按、短按动作,并返回事件。
* 版 本:V0.10
* 作 者:<NAME>
* 日 期:2018年7月26日
* 历 史: 日期 编辑 版本 记录
2018年7月26日 <NAME> V0.10 创建文件
`
******************************************************************************'''
class KEYBOARD:
cont = 0
def __init__(self, _btnKey, _tmBtn, _btnDef = 1, even_djlong = None, even_lj = None, _pull = None):
self.btn = _btnKey
if _pull == "UP":
self.btn.init(_btnKey.IN, _btnKey.PULL_UP)
elif _pull == "DOWN":
self.btn.init(_btnKey.IN, _btnKey.PULL_DOWN)
else:
self.btn.init(_btnKey.IN)
self.btnDef = _btnDef
self.eve_btnLon = even_djlong
self.evn_Continuous_Clicks = even_lj
self.btnLabDown = 0 # 按钮扫描记次,按下状态
self.btnLabUp = 0 # 按钮扫描记次,弹起状态
self.Continuous_Clicks = 0 # 连续点击次数
self.clock = 10 # 定时器时钟,单位毫秒
_tmBtn.init(freq = (1000 / self.clock))
_tmBtn.callback(self.doBtnScan)
self.staLon = 1 # 长按标志字,1:长按计时,0:长按计次
self.tLon = 3000 # 计时或计次延时,单位毫秒
self.TIME_CONT_CLICKS = 50 # 连击时间间隔,按下和松开的状态保持时间长度,单位,次
'''*************************************************************************
* 功 能:按键扫描
* 说 明:定时器回调函数,用于识别当前按键是否动作,并判断其动作形式。
* 输入参数:
t : 定时器无参回调函数必备,否则调用不成功。
* 输出参数:None
* 返 回 值:True
**************************************************************************'''
# 扫描按键,定时中断调用函数
def doBtnScan(self, t):
global cont
self.btnLabUp = (self.btnLabUp * int(not(self.btn.value() ^ int(not(self.btnDef))))) + int(not(self.btn.value() ^ int(not(self.btnDef))))
btdown = self.btnLabDown
self.btnLabDown = (self.btnLabDown * int(not(self.btn.value() ^ self.btnDef))) + int(not(self.btn.value() ^ self.btnDef))
# 长按计时/计次
# t1:按键保持按下的时长
if (self.btnLabDown * self.clock) == self.tLon:
if self.staLon == 1:
if self.eve_btnLon != None:
self.eve_btnLon() # 按键长按事件,请勿在事件中执行过长时间的程序,否则会报定时器错误。
elif self.staLon == 0:
if self.eve_btnLon != None:
cont += 1
self.eve_btnLon(cont) # 按键长按事件,请勿在事件中执行过长时间的程序,否则会报定时器错误。
self.btnLabDown = 0
if self.btnLabUp > 5:
cont = 0
# 连续点击
if (btdown > 5 and btdown < self.TIME_CONT_CLICKS) and self.btnLabUp > 0:
self.Continuous_Clicks += 1
if (self.btnLabUp > self.TIME_CONT_CLICKS) and (self.Continuous_Clicks > 0) or (self.btnLabDown > self.TIME_CONT_CLICKS) and (self.Continuous_Clicks > 0):
if self.evn_Continuous_Clicks != None:
self.evn_Continuous_Clicks(self.Continuous_Clicks) # 连续点击事件,次数为1时为单击,请勿在事件中执行过长时间的程序,否则会报定时器错误。
self.Continuous_Clicks = 0
|
sionna/channel/tr38901/__init__.py | NVlabs/sionna | 163 | 34527 | #
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""
Channel sub-package of the Sionna library implementing 3GPP TR39.801 models.
"""
# pylint: disable=line-too-long
from .antenna import AntennaElement, AntennaPanel, PanelArray, Antenna, AntennaArray
from .lsp import LSP, LSPGenerator
from .rays import Rays, RaysGenerator
from .system_level_scenario import SystemLevelScenario
from .rma_scenario import RMaScenario
from .umi_scenario import UMiScenario
from .uma_scenario import UMaScenario
from .channel_coefficients import Topology, ChannelCoefficientsGenerator
from .system_level_channel import SystemLevelChannel
from .rma import RMa
from .uma import UMa
from .umi import UMi
from .tdl import TDL
from .cdl import CDL
|
skin_detector/scripts.py | version0chiro/xilinx_Code | 154 | 34537 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
import os
import cv2
import numpy
def find_images(path, recursive=False, ignore=True):
if os.path.isfile(path):
yield path
elif os.path.isdir(path):
assert os.path.isdir(path), 'FileIO - get_images: Directory does not exist'
assert isinstance(recursive, bool), 'FileIO - get_images: recursive must be a boolean variable'
ext, result = ['png', 'jpg', 'jpeg'], []
for path_a in os.listdir(path):
path_a = path + '/' + path_a
if os.path.isdir(path_a) and recursive:
for path_b in find_images(path_a):
yield path_b
check_a = path_a.split('.')[-1] in ext
check_b = ignore or ('-' not in path_a.split('/')[-1])
if check_a and check_b:
yield path_a
else:
raise ValueError('error! path is not a valid path or directory')
def display(title, img, max_size=200000):
assert isinstance(img, numpy.ndarray), 'img must be a numpy array'
assert isinstance(title, str), 'title must be a string'
scale = numpy.sqrt(min(1.0, float(max_size) / (img.shape[0] * img.shape[1])))
shape = (int(scale * img.shape[1]), int(scale * img.shape[0]))
img = cv2.resize(img, shape)
cv2.imshow(title, img)
|
oslo_config/_list_opts.py | CyrilRoelandteNovance/oslo.config | 110 | 34542 | <gh_stars>100-1000
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import inspect
from oslo_config import cfg
import stevedore
def list_opts():
default_config_files = [
'~/.project/project.conf',
'~/project.conf',
'/etc/project/project.conf',
'/etc/project.conf',
]
default_config_dirs = [
'~/.project/project.conf.d/',
'~/project.conf.d/',
'/etc/project/project.conf.d/',
'/etc/project.conf.d/',
]
options = [(None, cfg.ConfigOpts._list_options_for_discovery(
default_config_files,
default_config_dirs,
))]
ext_mgr = stevedore.ExtensionManager(
"oslo.config.driver",
invoke_on_load=True)
source_names = ext_mgr.names()
for source_name in source_names:
source = ext_mgr[source_name].obj
source_options = copy.deepcopy(source.list_options_for_discovery())
source_description = inspect.getdoc(source)
source_options.insert(
0,
cfg.StrOpt(
name='driver',
sample_default=source_name,
help=cfg._SOURCE_DRIVER_OPTION_HELP,
)
)
group_name = 'sample_{}_source'.format(source_name)
group_help = 'Example of using a {} source'.format(source_name)
if source_description:
group_help = '{}\n\n{}: {}'.format(
group_help,
source_name,
source_description,
)
group = cfg.OptGroup(
name=group_name,
help=group_help,
driver_option='driver',
dynamic_group_owner='config_source',
)
options.append((group, source_options))
return options
|
src/gam/gapi/reports.py | GAM-team/GAM | 102 | 34599 | import calendar
import datetime
import re
import sys
from dateutil.relativedelta import relativedelta
import gam
from gam.var import *
from gam import controlflow
from gam import display
from gam import gapi
from gam import utils
from gam.gapi.directory import orgunits as gapi_directory_orgunits
def build():
return gam.buildGAPIObject('reports')
REPORT_CHOICE_MAP = {
'access': 'access_transparency',
'accesstransparency': 'access_transparency',
'calendars': 'calendar',
'customers': 'customer',
'doc': 'drive',
'docs': 'drive',
'domain': 'customer',
'enterprisegroups': 'groups_enterprise',
'google+': 'gplus',
'group': 'groups',
'groupsenterprise': 'groups_enterprise',
'hangoutsmeet': 'meet',
'logins': 'login',
'oauthtoken': 'token',
'tokens': 'token',
'usage': 'usage',
'usageparameters': 'usageparameters',
'users': 'user',
'useraccounts': 'user_accounts',
}
def showUsageParameters():
rep = build()
throw_reasons = [
gapi.errors.ErrorReason.INVALID, gapi.errors.ErrorReason.BAD_REQUEST
]
todrive = False
if len(sys.argv) == 3:
controlflow.missing_argument_exit('user or customer',
'report usageparameters')
report = sys.argv[3].lower()
titles = ['parameter']
if report == 'customer':
endpoint = rep.customerUsageReports()
kwargs = {}
elif report == 'user':
endpoint = rep.userUsageReport()
kwargs = {'userKey': gam._get_admin_email()}
else:
controlflow.expected_argument_exit('usageparameters',
['user', 'customer'], report)
customerId = GC_Values[GC_CUSTOMER_ID]
if customerId == MY_CUSTOMER:
customerId = None
tryDate = datetime.date.today().strftime(YYYYMMDD_FORMAT)
all_parameters = set()
i = 4
while i < len(sys.argv):
myarg = sys.argv[i].lower().replace('_', '')
if myarg == 'todrive':
todrive = True
i += 1
else:
controlflow.invalid_argument_exit(sys.argv[i],
'gam report usageparameters')
fullDataRequired = ['all']
while True:
try:
result = gapi.call(endpoint,
'get',
throw_reasons=throw_reasons,
date=tryDate,
customerId=customerId,
fields='warnings,usageReports(parameters(name))',
**kwargs)
warnings = result.get('warnings', [])
usage = result.get('usageReports')
has_reports = bool(usage)
fullData, tryDate = _check_full_data_available(
warnings, tryDate, fullDataRequired, has_reports)
if fullData < 0:
print('No usage parameters available.')
sys.exit(1)
if has_reports:
for parameter in usage[0]['parameters']:
name = parameter.get('name')
if name:
all_parameters.add(name)
if fullData == 1:
break
except gapi.errors.GapiInvalidError as e:
tryDate = _adjust_date(str(e))
csvRows = []
for parameter in sorted(all_parameters):
csvRows.append({'parameter': parameter})
display.write_csv_file(csvRows, titles,
f'{report.capitalize()} Report Usage Parameters',
todrive)
REPORTS_PARAMETERS_SIMPLE_TYPES = [
'intValue', 'boolValue', 'datetimeValue', 'stringValue'
]
def showUsage():
rep = build()
throw_reasons = [
gapi.errors.ErrorReason.INVALID, gapi.errors.ErrorReason.BAD_REQUEST
]
todrive = False
if len(sys.argv) == 3:
controlflow.missing_argument_exit('user or customer', 'report usage')
report = sys.argv[3].lower()
titles = ['date']
if report == 'customer':
endpoint = rep.customerUsageReports()
kwargs = [{}]
elif report == 'user':
endpoint = rep.userUsageReport()
kwargs = [{'userKey': 'all'}]
titles.append('user')
else:
controlflow.expected_argument_exit('usage', ['user', 'customer'],
report)
customerId = GC_Values[GC_CUSTOMER_ID]
if customerId == MY_CUSTOMER:
customerId = None
parameters = []
start_date = end_date = orgUnitId = None
skip_day_numbers = []
skip_dates = set()
one_day = datetime.timedelta(days=1)
i = 4
while i < len(sys.argv):
myarg = sys.argv[i].lower().replace('_', '')
if myarg == 'startdate':
start_date = utils.get_yyyymmdd(sys.argv[i + 1],
returnDateTime=True)
i += 2
elif myarg == 'enddate':
end_date = utils.get_yyyymmdd(sys.argv[i + 1], returnDateTime=True)
i += 2
elif myarg == 'todrive':
todrive = True
i += 1
elif myarg in ['fields', 'parameters']:
parameters = sys.argv[i + 1].split(',')
i += 2
elif myarg == 'skipdates':
for skip in sys.argv[i + 1].split(','):
if skip.find(':') == -1:
skip_dates.add(utils.get_yyyymmdd(skip,
returnDateTime=True))
else:
skip_start, skip_end = skip.split(':', 1)
skip_start = utils.get_yyyymmdd(skip_start,
returnDateTime=True)
skip_end = utils.get_yyyymmdd(skip_end, returnDateTime=True)
while skip_start <= skip_end:
skip_dates.add(skip_start)
skip_start += one_day
i += 2
elif myarg == 'skipdaysofweek':
skipdaynames = sys.argv[i + 1].split(',')
dow = [d.lower() for d in calendar.day_abbr]
skip_day_numbers = [dow.index(d) for d in skipdaynames if d in dow]
i += 2
elif report == 'user' and myarg in ['orgunit', 'org', 'ou']:
_, orgUnitId = gapi_directory_orgunits.getOrgUnitId(sys.argv[i + 1])
i += 2
elif report == 'user' and myarg in usergroup_types:
users = gam.getUsersToModify(myarg, sys.argv[i + 1])
kwargs = [{'userKey': user} for user in users]
i += 2
else:
controlflow.invalid_argument_exit(sys.argv[i],
f'gam report usage {report}')
if parameters:
titles.extend(parameters)
parameters = ','.join(parameters)
else:
parameters = None
if not end_date:
end_date = datetime.datetime.now()
if not start_date:
start_date = end_date + relativedelta(months=-1)
if orgUnitId:
for kw in kwargs:
kw['orgUnitID'] = orgUnitId
usage_on_date = start_date
start_date = usage_on_date.strftime(YYYYMMDD_FORMAT)
usage_end_date = end_date
end_date = end_date.strftime(YYYYMMDD_FORMAT)
start_use_date = end_use_date = None
csvRows = []
while usage_on_date <= usage_end_date:
if usage_on_date.weekday() in skip_day_numbers or \
usage_on_date in skip_dates:
usage_on_date += one_day
continue
use_date = usage_on_date.strftime(YYYYMMDD_FORMAT)
usage_on_date += one_day
try:
for kwarg in kwargs:
try:
usage = gapi.get_all_pages(endpoint,
'get',
'usageReports',
throw_reasons=throw_reasons,
customerId=customerId,
date=use_date,
parameters=parameters,
**kwarg)
except gapi.errors.GapiBadRequestError:
continue
for entity in usage:
row = {'date': use_date}
if 'userEmail' in entity['entity']:
row['user'] = entity['entity']['userEmail']
for item in entity.get('parameters', []):
if 'name' not in item:
continue
name = item['name']
if name == 'cros:device_version_distribution':
for cros_ver in item['msgValue']:
v = cros_ver['version_number']
column_name = f'cros:num_devices_chrome_{v}'
if column_name not in titles:
titles.append(column_name)
row[column_name] = cros_ver['num_devices']
else:
if not name in titles:
titles.append(name)
for ptype in REPORTS_PARAMETERS_SIMPLE_TYPES:
if ptype in item:
row[name] = item[ptype]
break
else:
row[name] = ''
if not start_use_date:
start_use_date = use_date
end_use_date = use_date
csvRows.append(row)
except gapi.errors.GapiInvalidError as e:
display.print_warning(str(e))
break
if start_use_date:
report_name = f'{report.capitalize()} Usage Report - {start_use_date}:{end_use_date}'
else:
report_name = f'{report.capitalize()} Usage Report - {start_date}:{end_date} - No Data'
display.write_csv_file(csvRows, titles, report_name, todrive)
def showReport():
rep = build()
throw_reasons = [gapi.errors.ErrorReason.INVALID]
report = sys.argv[2].lower()
report = REPORT_CHOICE_MAP.get(report.replace('_', ''), report)
if report == 'usage':
showUsage()
return
if report == 'usageparameters':
showUsageParameters()
return
valid_apps = gapi.get_enum_values_minus_unspecified(
rep._rootDesc['resources']['activities']['methods']['list']
['parameters']['applicationName']['enum']) + ['customer', 'user']
if report not in valid_apps:
controlflow.expected_argument_exit('report',
', '.join(sorted(valid_apps)),
report)
customerId = GC_Values[GC_CUSTOMER_ID]
if customerId == MY_CUSTOMER:
customerId = None
filters = parameters = actorIpAddress = groupIdFilter = startTime = endTime = eventName = orgUnitId = None
tryDate = datetime.date.today().strftime(YYYYMMDD_FORMAT)
to_drive = False
userKey = 'all'
fullDataRequired = None
i = 3
while i < len(sys.argv):
myarg = sys.argv[i].lower()
if myarg == 'date':
tryDate = utils.get_yyyymmdd(sys.argv[i + 1])
i += 2
elif myarg in ['orgunit', 'org', 'ou']:
_, orgUnitId = gapi_directory_orgunits.getOrgUnitId(sys.argv[i + 1])
i += 2
elif myarg == 'fulldatarequired':
fullDataRequired = []
fdr = sys.argv[i + 1].lower()
if fdr and fdr == 'all':
fullDataRequired = 'all'
else:
fullDataRequired = fdr.replace(',', ' ').split()
i += 2
elif myarg == 'start':
startTime = utils.get_time_or_delta_from_now(sys.argv[i + 1])
i += 2
elif myarg == 'end':
endTime = utils.get_time_or_delta_from_now(sys.argv[i + 1])
i += 2
elif myarg == 'event':
eventName = sys.argv[i + 1]
i += 2
elif myarg == 'user':
userKey = sys.argv[i + 1].lower()
if userKey != 'all':
userKey = gam.normalizeEmailAddressOrUID(sys.argv[i + 1])
i += 2
elif myarg in ['filter', 'filters']:
filters = sys.argv[i + 1]
i += 2
elif myarg in ['fields', 'parameters']:
parameters = sys.argv[i + 1]
i += 2
elif myarg == 'ip':
actorIpAddress = sys.argv[i + 1]
i += 2
elif myarg == 'groupidfilter':
groupIdFilter = sys.argv[i + 1]
i += 2
elif myarg == 'todrive':
to_drive = True
i += 1
else:
controlflow.invalid_argument_exit(sys.argv[i], 'gam report')
if report == 'user':
while True:
try:
one_page = gapi.call(rep.userUsageReport(),
'get',
throw_reasons=throw_reasons,
date=tryDate,
userKey=userKey,
customerId=customerId,
orgUnitID=orgUnitId,
fields='warnings,usageReports',
maxResults=1)
warnings = one_page.get('warnings', [])
has_reports = bool(one_page.get('usageReports'))
fullData, tryDate = _check_full_data_available(
warnings, tryDate, fullDataRequired, has_reports)
if fullData < 0:
print('No user report available.')
sys.exit(1)
if fullData == 0:
continue
page_message = gapi.got_total_items_msg('Users', '...\n')
usage = gapi.get_all_pages(rep.userUsageReport(),
'get',
'usageReports',
page_message=page_message,
throw_reasons=throw_reasons,
date=tryDate,
userKey=userKey,
customerId=customerId,
orgUnitID=orgUnitId,
filters=filters,
parameters=parameters)
break
except gapi.errors.GapiInvalidError as e:
tryDate = _adjust_date(str(e))
if not usage:
print('No user report available.')
sys.exit(1)
titles = ['email', 'date']
csvRows = []
for user_report in usage:
if 'entity' not in user_report:
continue
row = {'email': user_report['entity']['userEmail'], 'date': tryDate}
for item in user_report.get('parameters', []):
if 'name' not in item:
continue
name = item['name']
if not name in titles:
titles.append(name)
for ptype in REPORTS_PARAMETERS_SIMPLE_TYPES:
if ptype in item:
row[name] = item[ptype]
break
else:
row[name] = ''
csvRows.append(row)
display.write_csv_file(csvRows, titles, f'User Reports - {tryDate}',
to_drive)
elif report == 'customer':
while True:
try:
first_page = gapi.call(rep.customerUsageReports(),
'get',
throw_reasons=throw_reasons,
customerId=customerId,
date=tryDate,
fields='warnings,usageReports')
warnings = first_page.get('warnings', [])
has_reports = bool(first_page.get('usageReports'))
fullData, tryDate = _check_full_data_available(
warnings, tryDate, fullDataRequired, has_reports)
if fullData < 0:
print('No customer report available.')
sys.exit(1)
if fullData == 0:
continue
usage = gapi.get_all_pages(rep.customerUsageReports(),
'get',
'usageReports',
throw_reasons=throw_reasons,
customerId=customerId,
date=tryDate,
parameters=parameters)
break
except gapi.errors.GapiInvalidError as e:
tryDate = _adjust_date(str(e))
if not usage:
print('No customer report available.')
sys.exit(1)
titles = ['name', 'value', 'client_id']
csvRows = []
auth_apps = list()
for item in usage[0]['parameters']:
if 'name' not in item:
continue
name = item['name']
if 'intValue' in item:
value = item['intValue']
elif 'msgValue' in item:
if name == 'accounts:authorized_apps':
for subitem in item['msgValue']:
app = {}
for an_item in subitem:
if an_item == 'client_name':
app['name'] = 'App: ' + \
subitem[an_item].replace('\n', '\\n')
elif an_item == 'num_users':
app['value'] = f'{subitem[an_item]} users'
elif an_item == 'client_id':
app['client_id'] = subitem[an_item]
auth_apps.append(app)
continue
values = []
for subitem in item['msgValue']:
if 'count' in subitem:
mycount = myvalue = None
for key, value in list(subitem.items()):
if key == 'count':
mycount = value
else:
myvalue = value
if mycount and myvalue:
values.append(f'{myvalue}:{mycount}')
value = ' '.join(values)
elif 'version_number' in subitem \
and 'num_devices' in subitem:
values.append(f'{subitem["version_number"]}:'
f'{subitem["num_devices"]}')
else:
continue
value = ' '.join(sorted(values, reverse=True))
csvRows.append({'name': name, 'value': value})
for app in auth_apps: # put apps at bottom
csvRows.append(app)
display.write_csv_file(csvRows,
titles,
f'Customer Report - {tryDate}',
todrive=to_drive)
else:
page_message = gapi.got_total_items_msg('Activities', '...\n')
activities = gapi.get_all_pages(rep.activities(),
'list',
'items',
page_message=page_message,
applicationName=report,
userKey=userKey,
customerId=customerId,
actorIpAddress=actorIpAddress,
startTime=startTime,
endTime=endTime,
eventName=eventName,
filters=filters,
orgUnitID=orgUnitId,
groupIdFilter=groupIdFilter)
if activities:
titles = ['name']
csvRows = []
for activity in activities:
events = activity['events']
del activity['events']
activity_row = utils.flatten_json(activity)
purge_parameters = True
for event in events:
for item in event.get('parameters', []):
if set(item) == {'value', 'name'}:
event[item['name']] = item['value']
elif set(item) == {'intValue', 'name'}:
if item['name'] in ['start_time', 'end_time']:
val = item.get('intValue')
if val is not None:
val = int(val)
if val >= 62135683200:
event[item['name']] = \
datetime.datetime.fromtimestamp(
val-62135683200).isoformat()
else:
event[item['name']] = item['intValue']
elif set(item) == {'boolValue', 'name'}:
event[item['name']] = item['boolValue']
elif set(item) == {'multiValue', 'name'}:
event[item['name']] = ' '.join(item['multiValue'])
elif item['name'] == 'scope_data':
parts = {}
for message in item['multiMessageValue']:
for mess in message['parameter']:
value = mess.get(
'value',
' '.join(mess.get('multiValue', [])))
parts[mess['name']] = parts.get(
mess['name'], []) + [value]
for part, v in parts.items():
if part == 'scope_name':
part = 'scope'
event[part] = ' '.join(v)
else:
purge_parameters = False
if purge_parameters:
event.pop('parameters', None)
row = utils.flatten_json(event)
row.update(activity_row)
for item in row:
if item not in titles:
titles.append(item)
csvRows.append(row)
display.sort_csv_titles([
'name',
], titles)
display.write_csv_file(csvRows, titles,
f'{report.capitalize()} Activity Report',
to_drive)
def _adjust_date(errMsg):
match_date = re.match(
'Data for dates later than (.*) is not yet '
'available. Please check back later', errMsg)
if not match_date:
match_date = re.match('Start date can not be later than (.*)', errMsg)
if not match_date:
controlflow.system_error_exit(4, errMsg)
return str(match_date.group(1))
def _check_full_data_available(warnings, tryDate, fullDataRequired,
has_reports):
one_day = datetime.timedelta(days=1)
tryDateTime = datetime.datetime.strptime(tryDate, YYYYMMDD_FORMAT)
# move to day before if we don't have at least one usageReport
if not has_reports:
tryDateTime -= one_day
return (0, tryDateTime.strftime(YYYYMMDD_FORMAT))
for warning in warnings:
if warning['code'] == 'PARTIAL_DATA_AVAILABLE':
for app in warning['data']:
if app['key'] == 'application' and \
app['value'] != 'docs' and \
fullDataRequired is not None and \
(fullDataRequired == 'all' or app['value'] in fullDataRequired):
tryDateTime -= one_day
return (0, tryDateTime.strftime(YYYYMMDD_FORMAT))
elif warning['code'] == 'DATA_NOT_AVAILABLE':
for app in warning['data']:
if app['key'] == 'application' and \
app['value'] != 'docs' and \
(not fullDataRequired or app['value'] in fullDataRequired):
return (-1, tryDate)
return (1, tryDate)
|
src/mcedit2/widgets/infopanel.py | elcarrion06/mcedit2 | 673 | 34617 | """
${NAME}
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import time
import weakref
from PySide import QtGui
from mcedit2.widgets.layout import Column
log = logging.getLogger(__name__)
class InfoPanel(QtGui.QWidget):
def __init__(self, attrs, signals, **kwargs):
"""
Create a widget that displays a list of an object's selected attributes, named in `attrs`.
The widget updates itself whenever one of the object's signals named in `signals` is emitted.
If an attribute named in `attrs` is not found on `object`, the InfoPanel instance is checked for
an attribute of the same name and it is used instead if found.
:type attrs: list of attribute names to display
:type signals: list of signals to monitor
:param kwargs: args for QWidget
:type kwargs:
"""
QtGui.QWidget.__init__(self, **kwargs)
self.attrs = attrs
self.signals = signals
self.lastUpdate = time.time()
self.labels = [QtGui.QLabel() for _ in attrs]
self.setLayout(Column(*self.labels))
def updateLabels(self):
now = time.time()
if now < self.lastUpdate + 0.25:
return
self.lastUpdate = now
if self.object:
for attr, label in zip(self.attrs, self.labels):
try:
value = getattr(self.object, attr)
except AttributeError: # catches unrelated AttributeErrors in property getters...
try:
value = getattr(self, attr)
except AttributeError:
log.exception("Error updating info panel.")
value = getattr(self, attr, "Attribute not found")
label.setText("%s: %s" % (attr, value))
_object = None
@property
def object(self):
return self._object()
@object.setter
def object(self, value):
self._object = weakref.ref(value)
self.updateLabels()
for signal in self.signals:
signal = getattr(self.object, signal, None)
if signal:
signal.connect(self.updateLabels)
setObject = object.setter
|
tests/test_packages/test_skills/test_tac_negotiation/test_helpers.py | bryanchriswhite/agents-aea | 126 | 34625 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests of the helpers module of the tac negotiation."""
from pathlib import Path
from aea.helpers.search.models import (
Attribute,
Constraint,
ConstraintType,
DataModel,
Description,
)
from aea.test_tools.test_skill import BaseSkillTestCase
from packages.fetchai.skills.tac_negotiation.helpers import (
DEMAND_DATAMODEL_NAME,
SUPPLY_DATAMODEL_NAME,
_build_goods_datamodel,
build_goods_description,
build_goods_query,
)
from tests.conftest import ROOT_DIR
class TestHelpers(BaseSkillTestCase):
"""Test Helper module methods of tac control."""
path_to_skill = Path(ROOT_DIR, "packages", "fetchai", "skills", "tac_negotiation")
@classmethod
def setup(cls):
"""Setup the test class."""
super().setup()
def test_build_goods_datamodel_supply(self):
"""Test the _build_goods_datamodel of Helpers module for a supply."""
good_ids = ["1", "2"]
is_supply = True
attributes = [
Attribute("1", int, True, "A good on offer."),
Attribute("2", int, True, "A good on offer."),
Attribute("ledger_id", str, True, "The ledger for transacting."),
Attribute(
"currency_id",
str,
True,
"The currency for pricing and transacting the goods.",
),
Attribute("price", int, False, "The price of the goods in the currency."),
Attribute(
"fee",
int,
False,
"The transaction fee payable by the buyer in the currency.",
),
Attribute(
"nonce", str, False, "The nonce to distinguish identical descriptions."
),
]
expected_data_model = DataModel(SUPPLY_DATAMODEL_NAME, attributes)
actual_data_model = _build_goods_datamodel(good_ids, is_supply)
assert actual_data_model == expected_data_model
def test_build_goods_datamodel_demand(self):
"""Test the _build_goods_datamodel of Helpers module for a demand."""
good_ids = ["1", "2"]
is_supply = False
attributes = [
Attribute("1", int, True, "A good on offer."),
Attribute("2", int, True, "A good on offer."),
Attribute("ledger_id", str, True, "The ledger for transacting."),
Attribute(
"currency_id",
str,
True,
"The currency for pricing and transacting the goods.",
),
Attribute("price", int, False, "The price of the goods in the currency."),
Attribute(
"fee",
int,
False,
"The transaction fee payable by the buyer in the currency.",
),
Attribute(
"nonce", str, False, "The nonce to distinguish identical descriptions."
),
]
expected_data_model = DataModel(DEMAND_DATAMODEL_NAME, attributes)
actual_data_model = _build_goods_datamodel(good_ids, is_supply)
assert actual_data_model == expected_data_model
def test_build_goods_description_supply(self):
"""Test the build_goods_description of Helpers module for supply."""
quantities_by_good_id = {"2": 5, "3": 10}
currency_id = "1"
ledger_id = "some_ledger_id"
is_supply = True
attributes = [
Attribute("2", int, True, "A good on offer."),
Attribute("3", int, True, "A good on offer."),
Attribute("ledger_id", str, True, "The ledger for transacting."),
Attribute(
"currency_id",
str,
True,
"The currency for pricing and transacting the goods.",
),
Attribute("price", int, False, "The price of the goods in the currency."),
Attribute(
"fee",
int,
False,
"The transaction fee payable by the buyer in the currency.",
),
Attribute(
"nonce", str, False, "The nonce to distinguish identical descriptions."
),
]
expected_data_model = DataModel(SUPPLY_DATAMODEL_NAME, attributes)
expected_values = {"currency_id": currency_id, "ledger_id": ledger_id}
expected_values.update(quantities_by_good_id)
expected_description = Description(expected_values, expected_data_model)
actual_description = build_goods_description(
quantities_by_good_id, currency_id, ledger_id, is_supply
)
assert actual_description == expected_description
def test_build_goods_description_demand(self):
"""Test the build_goods_description of Helpers module for demand (same as above)."""
quantities_by_good_id = {"2": 5, "3": 10}
currency_id = "1"
ledger_id = "some_ledger_id"
is_supply = False
attributes = [
Attribute("2", int, True, "A good on offer."),
Attribute("3", int, True, "A good on offer."),
Attribute("ledger_id", str, True, "The ledger for transacting."),
Attribute(
"currency_id",
str,
True,
"The currency for pricing and transacting the goods.",
),
Attribute("price", int, False, "The price of the goods in the currency."),
Attribute(
"fee",
int,
False,
"The transaction fee payable by the buyer in the currency.",
),
Attribute(
"nonce", str, False, "The nonce to distinguish identical descriptions."
),
]
expected_data_model = DataModel(DEMAND_DATAMODEL_NAME, attributes)
expected_values = {"currency_id": currency_id, "ledger_id": ledger_id}
expected_values.update(quantities_by_good_id)
expected_description = Description(expected_values, expected_data_model)
actual_description = build_goods_description(
quantities_by_good_id, currency_id, ledger_id, is_supply
)
assert actual_description == expected_description
def test_build_goods_query(self):
"""Test the build_goods_query of Helpers module."""
good_ids = ["2", "3"]
currency_id = "1"
ledger_id = "some_ledger_id"
is_searching_for_sellers = True
attributes = [
Attribute("2", int, True, "A good on offer."),
Attribute("3", int, True, "A good on offer."),
Attribute("ledger_id", str, True, "The ledger for transacting."),
Attribute(
"currency_id",
str,
True,
"The currency for pricing and transacting the goods.",
),
Attribute("price", int, False, "The price of the goods in the currency."),
Attribute(
"fee",
int,
False,
"The transaction fee payable by the buyer in the currency.",
),
Attribute(
"nonce", str, False, "The nonce to distinguish identical descriptions."
),
]
expected_data_model = DataModel(SUPPLY_DATAMODEL_NAME, attributes)
expected_constraints = [
Constraint("2", ConstraintType(">=", 1)),
Constraint("3", ConstraintType(">=", 1)),
Constraint("ledger_id", ConstraintType("==", ledger_id)),
Constraint("currency_id", ConstraintType("==", currency_id)),
]
actual_query = build_goods_query(
good_ids, currency_id, ledger_id, is_searching_for_sellers
)
constraints = [
(c.constraint_type.type, c.constraint_type.value)
for c in actual_query.constraints[0].constraints
]
for constraint in expected_constraints:
assert (
constraint.constraint_type.type,
constraint.constraint_type.value,
) in constraints
assert actual_query.model == expected_data_model
def test_build_goods_query_1_good(self):
"""Test the build_goods_query of Helpers module where there is 1 good."""
good_ids = ["2"]
currency_id = "1"
ledger_id = "some_ledger_id"
is_searching_for_sellers = True
attributes = [
Attribute("2", int, True, "A good on offer."),
Attribute("ledger_id", str, True, "The ledger for transacting."),
Attribute(
"currency_id",
str,
True,
"The currency for pricing and transacting the goods.",
),
Attribute("price", int, False, "The price of the goods in the currency."),
Attribute(
"fee",
int,
False,
"The transaction fee payable by the buyer in the currency.",
),
Attribute(
"nonce", str, False, "The nonce to distinguish identical descriptions."
),
]
expected_data_model = DataModel(SUPPLY_DATAMODEL_NAME, attributes)
expected_constraints = [
Constraint("2", ConstraintType(">=", 1)),
Constraint("ledger_id", ConstraintType("==", ledger_id)),
Constraint("currency_id", ConstraintType("==", currency_id)),
]
actual_query = build_goods_query(
good_ids, currency_id, ledger_id, is_searching_for_sellers
)
for constraint in expected_constraints:
assert constraint in actual_query.constraints
assert actual_query.model == expected_data_model
|
examples/xlnet/utils/processor.py | qinzzz/texar-pytorch | 746 | 34680 | <filename>examples/xlnet/utils/processor.py
# Copyright 2019 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Data processors. Adapted from
https://github.com/zihangdai/xlnet/blob/master/run_classifier.py
"""
import csv
import logging
from abc import ABC
from pathlib import Path
from typing import NamedTuple, Optional, Union, List, Dict, Type
class InputExample(NamedTuple):
r"""A single training/test example for simple sequence classification."""
guid: str
r"""Unique id for the example."""
text_a: str
r"""string. The untokenized text of the first sequence. For single sequence
tasks, only this sequence must be specified."""
text_b: Optional[str]
r"""(Optional) string. The untokenized text of the second sequence. Only
needs to be specified for sequence pair tasks."""
label: Optional[Union[str, float]]
r"""(Optional) string. The label of the example. This should be specified
for train and dev examples, but not for test examples."""
class DataProcessor:
r"""Base class for data converters for sequence classification data sets."""
labels: List[str]
is_regression: bool = False
task_name: str
__task_dict__: Dict[str, Type['DataProcessor']] = {}
def __init__(self, data_dir: str):
self.data_dir = Path(data_dir)
@classmethod
def register(cls, *names):
def decorator(klass):
for name in names:
prev_processor = DataProcessor.__task_dict__.get(
name.lower(), None)
if prev_processor is not None:
raise ValueError(
f"Cannot register {klass} as {name}. "
f"The name is already taken by {prev_processor}")
DataProcessor.__task_dict__[name.lower()] = klass
klass.task_name = names[0]
return klass
return decorator
def get_train_examples(self) -> List[InputExample]:
r"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError
def get_dev_examples(self) -> List[InputExample]:
r"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError
def get_test_examples(self) -> List[InputExample]:
r"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError
@classmethod
def _read_tsv(cls, input_file: Path,
quotechar: Optional[str] = None) -> List[List[str]]:
"""Reads a tab separated value file."""
with input_file.open('r') as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if len(line) == 0:
continue
lines.append(line)
return lines
def get_processor_class(task: str) -> Type[DataProcessor]:
task = task.lower()
klass = DataProcessor.__task_dict__.get(task, None)
if klass is None:
raise ValueError(f"Unsupported task {task}")
return klass
class GLUEProcessor(DataProcessor, ABC):
train_file = "train.tsv"
dev_file = "dev.tsv"
test_file = "test.tsv"
label_column: int
text_a_column: int
text_b_column: int
contains_header = True
test_text_a_column: int
test_text_b_column: int
test_contains_header = True
def __init__(self, data_dir: str):
super().__init__(data_dir)
if not hasattr(self, 'test_text_a_column'):
self.test_text_a_column = self.text_a_column
if not hasattr(self, 'test_text_b_column'):
self.test_text_b_column = self.text_b_column
def get_train_examples(self) -> List[InputExample]:
return self._create_examples(
self._read_tsv(self.data_dir / self.train_file), "train")
def get_dev_examples(self) -> List[InputExample]:
return self._create_examples(
self._read_tsv(self.data_dir / self.dev_file), "dev")
def get_test_examples(self) -> List[InputExample]:
return self._create_examples(
self._read_tsv(self.data_dir / self.test_file), "test")
def _create_examples(self, lines: List[List[str]],
set_type: str) -> List[InputExample]:
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0 and self.contains_header and set_type != "test":
continue
if i == 0 and self.test_contains_header and set_type == "test":
continue
guid = f"{set_type}-{i}"
a_column = (self.text_a_column if set_type != "test" else
self.test_text_a_column)
b_column = (self.text_b_column if set_type != "test" else
self.test_text_b_column)
# there are some incomplete lines in QNLI
if len(line) <= a_column:
logging.warning('Incomplete line, ignored.')
continue
text_a = line[a_column]
if b_column is not None:
if len(line) <= b_column:
logging.warning('Incomplete line, ignored.')
continue
text_b = line[b_column]
else:
text_b = None
if set_type == "test":
label = self.labels[0]
else:
if len(line) <= self.label_column:
logging.warning('Incomplete line, ignored.')
continue
label = line[self.label_column]
examples.append(InputExample(guid, text_a, text_b, label))
return examples
@DataProcessor.register("MNLI", "MNLI_matched")
class MnliMatchedProcessor(GLUEProcessor):
labels = ["contradiction", "entailment", "neutral"]
dev_file = "dev_matched.tsv"
test_file = "test_matched.tsv"
label_column = -1
text_a_column = 8
text_b_column = 9
@DataProcessor.register("MNLI_mismatched")
class MnliMismatchedProcessor(MnliMatchedProcessor):
dev_file = "dev_mismatched.tsv"
test_file = "test_mismatched.tsv"
@DataProcessor.register("STS-B", "stsb")
class StsbProcessor(GLUEProcessor):
labels: List[str] = []
is_regression = True
label_column = 9
text_a_column = 7
text_b_column = 8
def _create_examples(self, lines: List[List[str]],
set_type: str) -> List[InputExample]:
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0 and self.contains_header and set_type != "test":
continue
if i == 0 and self.test_contains_header and set_type == "test":
continue
guid = f"{set_type}-{i}"
a_column = (self.text_a_column if set_type != "test" else
self.test_text_a_column)
b_column = (self.text_b_column if set_type != "test" else
self.test_text_b_column)
# there are some incomplete lines in QNLI
if len(line) <= a_column:
logging.warning('Incomplete line, ignored.')
continue
text_a = line[a_column]
if b_column is not None:
if len(line) <= b_column:
logging.warning('Incomplete line, ignored.')
continue
text_b = line[b_column]
else:
text_b = None
if set_type == "test":
label = 0.0
else:
if len(line) <= self.label_column:
logging.warning('Incomplete line, ignored.')
continue
label = float(line[self.label_column])
examples.append(InputExample(guid, text_a, text_b, label))
return examples
@DataProcessor.register("Yelp5")
class Yelp5Processor(DataProcessor):
labels = ["1", "2", "3", "4", "5"]
def get_train_examples(self) -> List[InputExample]:
return self._create_examples(self.data_dir / "train.csv")
def get_dev_examples(self) -> List[InputExample]:
return self._create_examples(self.data_dir / "test.csv")
def get_test_examples(self):
raise TypeError("The Yelp 5 dataset does not have a test set.")
@staticmethod
def _create_examples(input_file: Path) -> List[InputExample]:
"""Creates examples for the training and dev sets."""
examples = []
with input_file.open() as f:
reader = csv.reader(f)
for i, line in enumerate(reader):
label = line[0]
text_a = line[1].replace('""', '"').replace('\\"', '"')
examples.append(InputExample(
guid=str(i), text_a=text_a, text_b=None, label=label))
return examples
@DataProcessor.register("IMDB")
class ImdbProcessor(DataProcessor):
labels = ["neg", "pos"]
def get_train_examples(self) -> List[InputExample]:
return self._create_examples(self.data_dir / "train")
def get_dev_examples(self) -> List[InputExample]:
return self._create_examples(self.data_dir / "test")
def get_test_examples(self):
raise TypeError("The IMDB dataset does not have a test set.")
@staticmethod
def _create_examples(data_dir: Path) -> List[InputExample]:
examples = []
for label in ["neg", "pos"]:
cur_dir = data_dir / label
for filename in cur_dir.iterdir():
if filename.suffix != ".txt":
continue
with filename.open() as f:
text = f.read().strip().replace("<br />", " ")
examples.append(InputExample(
guid=str(filename), text_a=text, text_b=None, label=label))
return examples
|
modules/text/text_generation/plato2_en_base/module.py | AK391/PaddleHub | 8,360 | 34703 | # coding:utf-8
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import os
import json
import sys
import argparse
import contextlib
from collections import namedtuple
import paddle.fluid as fluid
import paddlehub as hub
from paddlehub.module.module import runnable
from paddlehub.module.nlp_module import DataFormatError
from paddlehub.common.logger import logger
from paddlehub.module.module import moduleinfo, serving
import plato2_en_base.models as plato_models
from plato2_en_base.tasks.dialog_generation import DialogGeneration
from plato2_en_base.utils import check_cuda, Timer
from plato2_en_base.utils.args import parse_args
@moduleinfo(
name="plato2_en_base",
version="1.0.0",
summary=
"A novel pre-training model for dialogue generation, incorporated with latent discrete variables for one-to-many relationship modeling.",
author="baidu-nlp",
author_email="",
type="nlp/text_generation",
)
class Plato(hub.NLPPredictionModule):
def _initialize(self):
"""
initialize with the necessary elements
"""
if "CUDA_VISIBLE_DEVICES" not in os.environ:
raise RuntimeError("The module only support GPU. Please set the environment variable CUDA_VISIBLE_DEVICES.")
args = self.setup_args()
self.task = DialogGeneration(args)
self.model = plato_models.create_model(args, fluid.CUDAPlace(0))
self.Example = namedtuple("Example", ["src", "data_id"])
self._interactive_mode = False
def setup_args(self):
"""
Setup arguments.
"""
assets_path = os.path.join(self.directory, "assets")
vocab_path = os.path.join(assets_path, "vocab.txt")
init_pretraining_params = os.path.join(assets_path, "24L", "Plato")
spm_model_file = os.path.join(assets_path, "spm.model")
nsp_inference_model_path = os.path.join(assets_path, "24L", "NSP")
config_path = os.path.join(assets_path, "24L.json")
# ArgumentParser.parse_args use argv[1:], it will drop the first one arg, so the first one in sys.argv should be ""
sys.argv = [
"", "--model", "Plato", "--vocab_path",
"%s" % vocab_path, "--do_lower_case", "False", "--init_pretraining_params",
"%s" % init_pretraining_params, "--spm_model_file",
"%s" % spm_model_file, "--nsp_inference_model_path",
"%s" % nsp_inference_model_path, "--ranking_score", "nsp_score", "--do_generation", "True", "--batch_size",
"1", "--config_path",
"%s" % config_path
]
parser = argparse.ArgumentParser()
plato_models.add_cmdline_args(parser)
DialogGeneration.add_cmdline_args(parser)
args = parse_args(parser)
args.load(args.config_path, "Model")
args.run_infer = True # only build infer program
return args
@serving
def generate(self, texts):
"""
Get the robot responses of the input texts.
Args:
texts(list or str): If not in the interactive mode, texts should be a list in which every element is the chat context separated with '\t'.
Otherwise, texts shoule be one sentence. The module can get the context automatically.
Returns:
results(list): the robot responses.
"""
if not texts:
return []
if self._interactive_mode:
if isinstance(texts, str):
self.context.append(texts.strip())
texts = [" [SEP] ".join(self.context[-self.max_turn:])]
else:
raise ValueError("In the interactive mode, the input data should be a string.")
elif not isinstance(texts, list):
raise ValueError("If not in the interactive mode, the input data should be a list.")
bot_responses = []
for i, text in enumerate(texts):
example = self.Example(src=text.replace("\t", " [SEP] "), data_id=i)
record = self.task.reader._convert_example_to_record(example, is_infer=True)
data = self.task.reader._pad_batch_records([record], is_infer=True)
pred = self.task.infer_step(self.model, data)[0] # batch_size is 1
bot_response = pred["response"] # ignore data_id and score
bot_responses.append(bot_response)
if self._interactive_mode:
self.context.append(bot_responses[0].strip())
return bot_responses
@contextlib.contextmanager
def interactive_mode(self, max_turn=6):
"""
Enter the interactive mode.
Args:
max_turn(int): the max dialogue turns. max_turn = 1 means the robot can only remember the last one utterance you have said.
"""
self._interactive_mode = True
self.max_turn = max_turn
self.context = []
yield
self.context = []
self._interactive_mode = False
@runnable
def run_cmd(self, argvs):
"""
Run as a command
"""
self.parser = argparse.ArgumentParser(
description='Run the %s module.' % self.name,
prog='hub run %s' % self.name,
usage='%(prog)s',
add_help=True)
self.arg_input_group = self.parser.add_argument_group(title="Input options", description="Input data. Required")
self.arg_config_group = self.parser.add_argument_group(
title="Config options", description="Run configuration for controlling module behavior, optional.")
self.add_module_input_arg()
args = self.parser.parse_args(argvs)
try:
input_data = self.check_input_data(args)
except DataFormatError and RuntimeError:
self.parser.print_help()
return None
results = self.generate(texts=input_data)
return results
if __name__ == "__main__":
module = Plato()
for result in module.generate(["Hello", "Hello\thi, nice to meet you, my name is tom\tso your name is tom?"]):
print(result)
with module.interactive_mode(max_turn=3):
while True:
human_utterance = input()
robot_utterance = module.generate(human_utterance)
print("Robot: %s" % robot_utterance[0])
|
labs-code/python/standard-product-track/get_followers.py | aod2004/getting-started-with-the-twitter-api-v2-for-academic-research | 282 | 34749 | from twarc import Twarc2, expansions
import json
# Replace your bearer token below
client = Twarc2(bearer_token="<PASSWORD>")
def main():
# The followers function gets followers for specified user
followers = client.followers(user="twitterdev")
for page in followers:
result = expansions.flatten(page)
for user in result:
# Here we are printing the full Tweet object JSON to the console
print(json.dumps(user))
if __name__ == "__main__":
main()
|
saleor/product/migrations/0168_fulfil_digitalcontenturl_orderline_token.py | eanknd/saleor | 1,392 | 34773 | # Generated by Django 3.2.12 on 2022-04-12 14:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("product", "0167_digitalcontenturl_order_line_token"),
("order", "0140_alter_orderline_old_id_and_created_at"),
]
operations = [
migrations.RunSQL(
"""
UPDATE product_digitalcontenturl
SET order_line_token = (
SELECT token
FROM order_orderline
WHERE product_digitalcontenturl.line_id = order_orderline.id
)
WHERE line_id IS NOT NULL;
""",
reverse_sql=migrations.RunSQL.noop,
),
]
|
tests/greedy/test_oblate_system.py | rodluger/starry | 116 | 34794 | import starry
import numpy as np
import matplotlib.pyplot as plt
import pytest
@pytest.mark.parametrize("ydeg,nw", [[0, None], [0, 10], [1, None], [1, 10]])
def test_system(ydeg, nw):
# Oblate map
map = starry.Map(udeg=2, ydeg=ydeg, oblate=True, nw=nw)
map[1] = 0.5
map[2] = 0.25
map.omega = 0.5
map.beta = 1.23
map.tpole = 8000
map.f = 1 - 2 / (map.omega ** 2 + 2)
map.obl = 30
# Compute system flux
star = starry.Primary(map, r=1.5)
planet = starry.Secondary(starry.Map(amp=0, nw=nw), porb=1.0, r=0.1, m=0)
sys = starry.System(star, planet)
t = np.linspace(-0.1, 0.1, 1000)
flux_sys = sys.flux(t, integrated=True)
# Compute map flux manually
x, y, z = sys.position(t)
xo = x[1] / star._r
yo = y[1] / star._r
flux_map = map.flux(xo=xo, yo=yo, ro=planet._r / star._r, integrated=True)
# Check that they agree
assert np.allclose(flux_map, flux_sys)
|
torchpq/transform/PCA.py | mhamilton723/TorchPQ | 103 | 34800 | <filename>torchpq/transform/PCA.py
import torch
import numpy as np
from ..CustomModule import CustomModule
class PCA(CustomModule):
def __init__(self, n_components):
"""
Principle Component Analysis (PCA)
n_components: int
number of principle components
"""
super(PCA, self).__init__()
assert n_components > 0
self.n_components = n_components
self.register_buffer("_mean", None)
self.register_buffer("_components", None)
@staticmethod
def covar(x, meaned=True, rowvar=True, inplace=False):
"""
compute covariance matrix of 'x'
x: torch.Tensor, shape : [m, n]
meaned: bool, default : True
if True, assume 'x' has zero mean
rowvar: bool, default : True
if True, assume 'm' represents n_features and 'n' represents n_samples
if False, assume 'm' represents n_samples and 'n' represents n_features
inplace: bool, default : False
if meaned is False and inplace is True, mean of 'x' will be subtracted from 'x' inplace,
and will be added back to 'x' at the end, this will prevent creating a new tensor of shape [m, n]
with the cost of extra computation.
"""
if x.dim() > 2:
raise ValueError('x has more than 2 dimensions')
if x.dim() < 2:
x = x.view(1, -1)
if not rowvar and x.shape[0] != 1:
x = x.T
fact = 1.0 / (x.shape[1] - 1)
if not meaned:
mean = x.mean(dim=1, keepdim=True)
if inplace:
x.sub_(mean)
else:
x = x - mean
result = fact * (x @ x.T).squeeze()
if inplace and not meaned:
x.add_(mean)
return result
def train(self, x, inplace=False):
"""
train PCA with 'x'
x: torch.Tensor, shape : [d_vec, n_sample]
inplace: bool, default : False
if True, reduce the memory consumption with the cost of extra computation
"""
assert x.shape[0] >= self.n_components
mean = x.mean(dim=1, keepdim=True) #[d_vec, 1]
if inplace:
x.sub_(mean)
else:
x = x - mean
x_cov = self.covar(x, rowvar=True, meaned=True)
if inplace:
x.add_(mean)
eig_val, eig_vec = torch.symeig(x_cov, eigenvectors=True, upper=False)
sorted_eig_val, sorted_index = eig_val.sort(descending=True)
sorted_eig_vec = eig_vec[:, sorted_index]
components = sorted_eig_vec[:, :self.n_components].T
self.register_buffer("_components", components)
self.register_buffer("_mean", mean)
def encode(self, x):
"""
reduce the dimentionality of 'x' from 'd_vec' to 'n_components'
x: torch.Tensor, shape : [d_vec, n_samples], dtype : float32
return: torch.Tensor, shape : [n_components, n_samples], dtype : float32
"""
assert self._components is not None
assert x.shape[0] == self._components.shape[1]
x = x - self._mean
y = self._components @ x
return y
def decode(self, x):
"""
reconstruct 'x' from 'n_components' dimentional space to 'd_vec' dimentional space
x: torch.Tensor, shape : [n_components, n_samples], dtype : float32
return: torch.Tensor, shape : [d_vec, n_samples], dtype : float32
"""
assert self._components is not None
assert x.shape[0] == self._components.shape[0]
y = self._components.T @ x
y = y + self._mean
return y |
tests/batch/base_parse_replication_stream_test.py | ywlianghang/mysql_streamer | 419 | 34853 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import signal
import sys
import mock
import pytest
from data_pipeline.producer import Producer
from data_pipeline.schematizer_clientlib.schematizer import SchematizerClient
from pymysqlreplication.event import QueryEvent
import replication_handler.batch.base_parse_replication_stream
from replication_handler.batch.base_parse_replication_stream import BaseParseReplicationStream
from replication_handler.components.change_log_data_event_handler import ChangeLogDataEventHandler
from replication_handler.components.data_event_handler import DataEventHandler
from replication_handler.components.schema_event_handler import SchemaEventHandler
from replication_handler.models.global_event_state import EventType
from replication_handler.util.misc import DataEvent
from replication_handler.util.misc import ReplicationHandlerEvent
from replication_handler.util.position import GtidPosition
class BaseParseReplicationStreamTest(object):
@pytest.yield_fixture(autouse=True)
def patch_zk(self):
with mock.patch.object(
replication_handler.batch.base_parse_replication_stream,
'ZKLock'
) as mock_zk:
yield mock_zk
@pytest.fixture
def schema_event(self):
return mock.Mock(spec=QueryEvent)
@pytest.fixture
def data_event(self):
return mock.Mock(spec=DataEvent)
@pytest.yield_fixture
def patch_restarter(self):
with mock.patch.object(
replication_handler.batch.base_parse_replication_stream,
'ReplicationStreamRestarter'
) as mock_restarter:
yield mock_restarter
@pytest.yield_fixture
def patch_save_position(self):
with mock.patch(
'replication_handler.batch.base_parse_replication_stream.save_position'
) as mock_save_position:
yield mock_save_position
@pytest.yield_fixture
def patch_data_handle_event(self):
with mock.patch.object(
DataEventHandler,
'handle_event',
) as mock_handle_event:
yield mock_handle_event
@pytest.yield_fixture
def patch_schema_handle_event(self):
with mock.patch.object(
SchemaEventHandler,
'handle_event',
) as mock_handle_event:
yield mock_handle_event
@pytest.fixture
def producer(self):
return mock.Mock(autospec=Producer)
@pytest.fixture
def schematizer(self):
return mock.Mock(autospec=SchematizerClient)
@pytest.yield_fixture
def patch_producer(self, producer):
with mock.patch(
'replication_handler.batch.base_parse_replication_stream.Producer'
) as mock_producer:
mock_producer.return_value.__enter__.return_value = producer
yield mock_producer
@pytest.yield_fixture
def patch_running(self):
with mock.patch.object(
BaseParseReplicationStream,
'running',
new_callable=mock.PropertyMock
) as mock_running:
mock_running.return_value = True
yield mock_running
@pytest.yield_fixture
def patch_process_event(self):
with mock.patch.object(
BaseParseReplicationStream,
'process_event',
) as mock_process_event:
yield mock_process_event
@pytest.yield_fixture(autouse=True)
def patch_schematizer(self, schematizer):
with mock.patch(
'replication_handler.batch.base_parse_replication_stream.get_schematizer'
) as mock_schematizer:
mock_schematizer.return_value = schematizer
yield mock_schematizer
@pytest.yield_fixture
def patch_db_connections(self, mock_db_connections):
with mock.patch(
'replication_handler.batch.base_parse_replication_stream.get_connection'
) as mock_get_db_conn:
mock_get_db_conn.return_value = mock_db_connections
yield mock_get_db_conn
@pytest.yield_fixture
def patch_exit(self):
with mock.patch.object(
os,
'_exit'
) as mock_exit:
yield mock_exit
@pytest.yield_fixture
def patch_sys_exit(self):
with mock.patch.object(
sys,
'exit'
) as mock_exit:
yield mock_exit
@pytest.yield_fixture
def patch_signal(self):
with mock.patch.object(
signal,
'signal'
) as mock_signal:
yield mock_signal
@pytest.fixture
def position_gtid_1(self):
return GtidPosition(gtid="fake_gtid_1")
@pytest.fixture
def position_gtid_2(self):
return GtidPosition(gtid="fake_gtid_2")
@pytest.yield_fixture
def patch_config(self):
with mock.patch(
'replication_handler.batch.base_parse_replication_stream.config.env_config'
) as mock_config:
mock_config.register_dry_run = False
mock_config.publish_dry_run = False
mock_config.namespace = "test_namespace"
mock_config.disable_meteorite = False
mock_config.changelog_mode = False
mock_config.topology_path = 'topology.yaml'
yield mock_config
@pytest.yield_fixture
def patch_config_with_small_recovery_queue_size(self):
with mock.patch(
'replication_handler.batch.base_parse_replication_stream.config.env_config'
) as mock_config:
mock_config.register_dry_run = False
mock_config.publish_dry_run = False
mock_config.namespace = "test_namespace"
mock_config.recovery_queue_size = 1
yield mock_config
@pytest.yield_fixture
def patch_config_changelog_on(self, patch_config):
patch_config.changelog_mode = True
yield patch_config
def _different_events_builder(
self,
schema_event,
data_event,
patch_config,
position_gtid_1,
position_gtid_2,
patch_restarter,
patch_db_connections,
patch_data_handle_event,
patch_schema_handle_event,
patch_producer,
patch_save_position,
patch_exit
):
schema_event_with_gtid = ReplicationHandlerEvent(
position=position_gtid_1,
event=schema_event
)
data_event_with_gtid = ReplicationHandlerEvent(
position=position_gtid_2,
event=data_event
)
patch_restarter.return_value.get_stream.return_value.next.side_effect = [
schema_event_with_gtid,
data_event_with_gtid,
]
def test_replication_stream_different_events(
self,
schema_event,
data_event,
patch_config,
position_gtid_1,
position_gtid_2,
patch_restarter,
patch_db_connections,
patch_data_handle_event,
patch_schema_handle_event,
patch_producer,
patch_save_position,
patch_exit
):
self._different_events_builder(
schema_event,
data_event,
patch_config,
position_gtid_1,
position_gtid_2,
patch_restarter,
patch_db_connections,
patch_data_handle_event,
patch_schema_handle_event,
patch_producer,
patch_save_position,
patch_exit
)
stream = self._init_and_run_batch()
assert patch_schema_handle_event.call_args_list == \
[mock.call(schema_event, position_gtid_1)]
assert patch_data_handle_event.call_args_list == \
[mock.call(data_event, position_gtid_2)]
assert patch_schema_handle_event.call_count == 1
assert patch_data_handle_event.call_count == 1
assert stream.register_dry_run is False
assert stream.publish_dry_run is False
def test_replication_stream_same_events(
self,
data_event,
patch_config,
position_gtid_1,
position_gtid_2,
patch_restarter,
patch_db_connections,
patch_data_handle_event,
patch_producer,
patch_exit,
patch_save_position,
):
data_event_with_gtid_1 = ReplicationHandlerEvent(
position=position_gtid_1,
event=data_event
)
data_event_with_gtid_2 = ReplicationHandlerEvent(
position=position_gtid_2,
event=data_event
)
patch_restarter.return_value.get_stream.return_value.next.side_effect = [
data_event_with_gtid_1,
data_event_with_gtid_2
]
self._init_and_run_batch()
assert patch_data_handle_event.call_args_list == [
mock.call(data_event, position_gtid_1),
mock.call(data_event, position_gtid_2)
]
assert patch_data_handle_event.call_count == 2
assert patch_save_position.call_count == 1
def test_register_signal_handler(
self,
patch_config,
patch_db_connections,
patch_restarter,
patch_signal,
patch_running,
patch_producer,
patch_exit,
):
patch_running.return_value = False
replication_stream = self._init_and_run_batch()
# ZKLock also calls patch_signal, so we have to work around it
assert [
mock.call(signal.SIGINT, replication_stream._handle_shutdown_signal),
mock.call(signal.SIGTERM, replication_stream._handle_shutdown_signal),
] in patch_signal.call_args_list
def test_graceful_exit_if_buffer_size_mismatch(
self,
producer,
patch_config_with_small_recovery_queue_size,
patch_restarter,
patch_data_handle_event,
patch_db_connections,
patch_save_position,
):
with pytest.raises(SystemExit):
self._init_and_run_batch()
def test_changelog_ON_chooses_changelog_dataevent_handler(
self,
patch_config,
patch_config_changelog_on,
producer,
patch_db_connections
):
replication_stream = self._get_parse_replication_stream()
replication_stream.producer = producer
replication_stream.counters = mock.MagicMock()
handler_info = replication_stream._build_handler_map()[DataEvent]
assert isinstance(handler_info.handler, ChangeLogDataEventHandler)
def test_without_changelog_mode_dataevent_handler_is_default(
self,
patch_config,
producer,
patch_db_connections
):
replication_stream = self._get_parse_replication_stream()
replication_stream.producer = producer
replication_stream.counters = mock.MagicMock()
handler_info = replication_stream._build_handler_map()[DataEvent]
assert isinstance(handler_info.handler, DataEventHandler)
def test_handle_graceful_termination_data_event(
self,
producer,
patch_producer,
patch_config,
patch_restarter,
patch_data_handle_event,
patch_save_position,
patch_exit,
patch_running,
patch_db_connections
):
patch_running.return_value = False
replication_stream = self._get_parse_replication_stream()
replication_stream.current_event_type = EventType.DATA_EVENT
replication_stream.run()
assert producer.get_checkpoint_position_data.call_count == 1
assert producer.flush.call_count == 1
assert patch_exit.call_count == 1
def test_handle_graceful_termination_schema_event(
self,
producer,
patch_config,
patch_producer,
patch_restarter,
patch_data_handle_event,
patch_exit,
patch_running,
patch_db_connections
):
patch_running.return_value = False
replication_stream = self._get_parse_replication_stream()
replication_stream.current_event_type = EventType.SCHEMA_EVENT
replication_stream.run()
assert producer.get_checkpoint_position_data.call_count == 0
assert producer.flush.call_count == 0
assert patch_exit.call_count == 1
def test_with_dry_run_options(self, patch_db_connections, patch_restarter):
with mock.patch(
'replication_handler.batch.base_parse_replication_stream.config.env_config'
) as mock_config:
mock_config.register_dry_run = True
mock_config.publish_dry_run = False
replication_stream = self._get_parse_replication_stream()
assert replication_stream.register_dry_run is True
assert replication_stream.publish_dry_run is False
def test_zk_lock_acquired(
self,
patch_config,
patch_exit,
patch_restarter,
patch_db_connections,
patch_zk,
patch_process_event,
):
# ZK will exit the proc if it can't acquire a lock using sys.exit
patch_zk.side_effect = SystemExit
with pytest.raises(SystemExit):
self._init_and_run_batch()
assert patch_zk.assert_called_once_with(
"replication_handler",
"test_namespace"
)
assert patch_process_event.call_count == 0
def test_zk_exit_on_exception(
self,
patch_config,
patch_restarter,
patch_db_connections,
patch_zk
):
patch_restarter.return_value.get_stream.return_value.__iter__.side_effect = Exception
with pytest.raises(Exception):
self._init_and_run_batch()
assert patch_zk.__exit__.call_count == 1
def _init_and_run_batch(self):
replication_stream = self._get_parse_replication_stream()
replication_stream.run()
return replication_stream
def _get_parse_replication_stream(self):
raise NotImplementedError()
|
tests/pup/sensors/ultrasonic_lights.py | cschlack/pybricks-micropython | 115 | 34862 | # SPDX-License-Identifier: MIT
# Copyright (c) 2020 The Pybricks Authors
"""
Hardware Module: 1
Description: This tests the lights on the Ultrasonic Sensor. No external
sensors are used to verify that it works.
"""
from pybricks.pupdevices import UltrasonicSensor
from pybricks.parameters import Port
from pybricks.tools import wait
from urandom import randint
# Initialize devices.
lights = UltrasonicSensor(Port.C).lights
# Turn on all lights at full brightness.
lights.on()
wait(500)
# Turn on all lights.
for i in range(-50, 150, 2):
lights.on(i)
wait(20)
# Turn of all lights.
lights.off()
wait(500)
# Turn on all lights.
for i in range(50):
lights.on([randint(0, 100) for j in range(4)])
wait(50)
|
test/test_nn/test_distribution/test_gaussian.py | brunomaga/PRML | 11,017 | 34896 | import unittest
import numpy as np
import prml.nn as nn
class TestGaussian(unittest.TestCase):
def test_gaussian_draw_forward(self):
mu = nn.array(0)
sigma = nn.softplus(nn.array(-1))
gaussian = nn.Gaussian(mu, sigma)
sample = []
for _ in range(1000):
sample.append(gaussian.draw().value)
self.assertTrue(np.allclose(np.mean(sample), 0, rtol=0.1, atol=0.1), np.mean(sample))
self.assertTrue(np.allclose(np.std(sample), gaussian.std.value, 0.1, 0.1))
def test_gaussian_draw_backward(self):
mu = nn.array(0)
s = nn.array(2)
optimizer = nn.optimizer.Gradient({0: mu, 1: s}, 0.01)
prior = nn.Gaussian(1, 1)
for _ in range(1000):
mu.cleargrad()
s.cleargrad()
gaussian = nn.Gaussian(mu, nn.softplus(s))
gaussian.draw()
loss = nn.loss.kl_divergence(gaussian, prior).sum()
optimizer.minimize(loss)
self.assertTrue(np.allclose(gaussian.mean.value, 1, 0.1, 0.1))
self.assertTrue(np.allclose(gaussian.std.value, 1, 0.1, 0.1))
if __name__ == "__main__":
unittest.main()
|
examples/qp2q/preprocessing/session_data_processing.py | xeisberg/pecos | 288 | 34931 | """The module contains functions to preprocess input
datasets into usable format."""
import gc
import gzip
import json
import logging
import multiprocessing as mp
import pathlib
import sys
from itertools import repeat
import numpy as np
import scipy.sparse as smat
logging.basicConfig(
stream=sys.stdout,
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
_FUNC = None # place holder to Pool functions.
def _worker_init(func):
"init method to invoke Pool."
global _FUNC
_FUNC = func
def _worker(x):
"init function to invoke pool"
return _FUNC(x)
def open_file_helper(filename, compressed, mode="rt"):
"""
Supports reading of gzip compressed or uncompressed file.
Parameters:
----------
filename : str
Name of the file to open.
compressed : bool
If true, treat filename as gzip compressed.
mode : str
Reading mode.
Returns:
--------
file handle to the opened file.
"""
return gzip.open(filename, mode=mode) if compressed else open(filename, mode)
def _get_unique_rows_cols(filename, compressed, delim="<@@>"):
"""Function to load a json file in the format of processed session-data
for qp2q. Then it returns dictionary of query<delim>prefix as r2i and next_query
as c2i.
"""
r2i = {}
c2i = {}
logger.info("Processing file for rows and columns: {}".format(filename))
with open_file_helper(filename, compressed) as fp:
for line in fp:
try:
pline = json.loads(line)
except json.decoder.JSONDecodeError:
logger.warn(f"Failed to parse: {line}")
continue
query_prefix = delim.join([pline["prev_query"], pline["prefix"]])
kw = pline["next_query"]
if query_prefix not in r2i:
r2i[query_prefix] = 1
if kw not in c2i:
c2i[kw] = 1
return r2i, c2i
def _transform_file_to_matrix_qp2q(filename, compressed, delim, g_r2i, g_c2i):
"""
Helper Function to extract qp2q matrix from input_file which was generated
as a output of the function parallel_process_session_data_qp2p.
Parameters:
----------
input_file: filename
full filepath of input dataframe
compressed: bool
compressed or not
delim: str
delim separating query and prefix
g_r2i: dictionary
mapping for input items
g_c2i: dictionary
mapping of output item
Returns:
-------
qp2q count matrix
"""
rows = []
cols = []
data = []
logger.info("Processing file for matrix: {}".format(filename))
with open_file_helper(filename, compressed) as fp:
for line in fp:
try:
pline = json.loads(line)
except json.decoder.JSONDecodeError:
logger.warn(f"Failed to parse: {line}")
continue
query_prefix = delim.join([pline["prev_query"], pline["prefix"]])
kw = pline["next_query"]
freq = 1
data.append(freq)
rows.append(g_r2i[query_prefix])
cols.append(g_c2i[kw])
matrix = smat.coo_matrix((data, (rows, cols)), shape=(len(g_r2i), len(g_c2i)), dtype=np.float32)
return matrix
def parallel_get_qp2q_sparse_data(fdir, compressed, delim="<@@>", n_jobs=4):
"""Process session data to sparse matrix and dictionaries mapping rows and columns.
Parameters:
----------
fdir: str
path to directory having all the files in json format
compressed: bool
files being compressed or not
delim: str
delimiter between query and prefix
n_jobs: int
number of threads to be used
Returns:
-------
dictionary mapping row index to row names
dictionary mapping col index to col names
qp2q sparse csr matrix containing freq. of occurences.
"""
if compressed:
extension = "*.gz"
else:
extension = "*.json"
if pathlib.Path(fdir).is_dir():
files = pathlib.Path(fdir).glob(extension)
else:
raise ValueError(f"{fdir} is not a valid directory")
files = [str(f) for f in files]
logger.info("Getting qp2q unique rows and columns from files in {}".format(fdir))
if n_jobs > 1:
with mp.Pool(processes=n_jobs) as pool:
dicts = pool.starmap(
_get_unique_rows_cols,
zip(files, repeat(compressed), repeat(delim)),
)
else:
dicts = [_get_unique_rows_cols(file, compressed, delim) for file in files]
g_r2i = {}
g_c2i = {}
for dic in dicts:
g_r2i.update(dic[0])
g_c2i.update(dic[1])
g_i2r = {}
g_i2c = {}
for i, k in enumerate(g_r2i.keys()):
g_r2i[k] = i
g_i2r[i] = k
for i, k in enumerate(g_c2i.keys()):
g_c2i[k] = i
g_i2c[i] = k
del dicts
gc.collect()
logger.info("Number of unique rows: {}".format(len(g_r2i)))
logger.info("Number of unique cols: {}".format(len(g_c2i)))
if n_jobs > 1:
with mp.Pool(
processes=n_jobs,
initializer=_worker_init,
initargs=(
lambda x: _transform_file_to_matrix_qp2q(x, compressed, delim, g_r2i, g_c2i),
),
) as pool:
matrices = pool.map(_worker, files)
else:
matrices = [
_transform_file_to_matrix_qp2q(x, compressed, delim, g_r2i, g_c2i) for x in files
]
matrices = [m.tocsr() for m in matrices]
qp2q_matrix = matrices[0]
for i in range(1, len(matrices)):
qp2q_matrix += matrices[i]
del matrices
gc.collect()
return g_i2r, g_i2c, qp2q_matrix
|
SymbolExtractorAndRenamer/lldb/packages/Python/lldbsuite/test/expression_command/options/TestExprOptions.py | Polidea/SiriusObfuscator | 427 | 34939 | <reponame>Polidea/SiriusObfuscator
"""
Test expression command options.
Test cases:
o test_expr_options:
Test expression command options.
"""
from __future__ import print_function
import os
import time
import lldb
import lldbsuite.test.lldbutil as lldbutil
from lldbsuite.test.lldbtest import *
class ExprOptionsTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
self.main_source = "main.cpp"
self.main_source_spec = lldb.SBFileSpec(self.main_source)
self.line = line_number('main.cpp', '// breakpoint_in_main')
self.exe = os.path.join(os.getcwd(), "a.out")
def test_expr_options(self):
"""These expression command options should work as expected."""
self.build()
# Set debugger into synchronous mode
self.dbg.SetAsync(False)
# Create a target by the debugger.
target = self.dbg.CreateTarget(self.exe)
self.assertTrue(target, VALID_TARGET)
# Set breakpoints inside main.
breakpoint = target.BreakpointCreateBySourceRegex(
'// breakpoint_in_main', self.main_source_spec)
self.assertTrue(breakpoint)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
threads = lldbutil.get_threads_stopped_at_breakpoint(
process, breakpoint)
self.assertEqual(len(threads), 1)
frame = threads[0].GetFrameAtIndex(0)
options = lldb.SBExpressionOptions()
# test --language on C++ expression using the SB API's
# Make sure we can evaluate a C++11 expression.
val = frame.EvaluateExpression('foo != nullptr')
self.assertTrue(val.IsValid())
self.assertTrue(val.GetError().Success())
self.DebugSBValue(val)
# Make sure it still works if language is set to C++11:
options.SetLanguage(lldb.eLanguageTypeC_plus_plus_11)
val = frame.EvaluateExpression('foo != nullptr', options)
self.assertTrue(val.IsValid())
self.assertTrue(val.GetError().Success())
self.DebugSBValue(val)
# Make sure it fails if language is set to C:
options.SetLanguage(lldb.eLanguageTypeC)
val = frame.EvaluateExpression('foo != nullptr', options)
self.assertTrue(val.IsValid())
self.assertFalse(val.GetError().Success())
|
src/permission/backends.py | dkopitsa/django-permission | 234 | 34941 | # coding=utf-8
"""
Logical permission backends module
"""
from permission.conf import settings
from permission.utils.handlers import registry
from permission.utils.permissions import perm_to_permission
__all__ = ('PermissionBackend',)
class PermissionBackend(object):
"""
A handler based permission backend
"""
supports_object_permissions = True
supports_anonymous_user = True
supports_inactive_user = True
# pylint:disable=unused-argument
def authenticate(self, username, password):
"""
Always return ``None`` to prevent authentication within this backend.
"""
return None
def has_perm(self, user_obj, perm, obj=None):
"""
Check if user have permission (of object) based on registered handlers.
It will raise ``ObjectDoesNotExist`` exception when the specified
string permission does not exist and
``PERMISSION_CHECK_PERMISSION_PRESENCE`` is ``True`` in ``settings``
module.
Parameters
----------
user_obj : django user model instance
A django user model instance which be checked
perm : string
`app_label.codename` formatted permission string
obj : None or django model instance
None or django model instance for object permission
Returns
-------
boolean
Whether the specified user have specified permission (of specified
object).
Raises
------
django.core.exceptions.ObjectDoesNotExist
If the specified string permission does not exist and
``PERMISSION_CHECK_PERMISSION_PRESENCE`` is ``True`` in ``settings``
module.
"""
if settings.PERMISSION_CHECK_PERMISSION_PRESENCE:
# get permission instance from string permission (perm)
# it raise ObjectDoesNotExists when the permission is not exists
try:
perm_to_permission(perm)
except AttributeError:
# Django 1.2 internally use wrong permission string thus ignore
pass
# get permission handlers fot this perm
cache_name = '_%s_cache' % perm
if hasattr(self, cache_name):
handlers = getattr(self, cache_name)
else:
handlers = [h for h in registry.get_handlers()
if perm in h.get_supported_permissions()]
setattr(self, cache_name, handlers)
for handler in handlers:
if handler.has_perm(user_obj, perm, obj=obj):
return True
return False
def has_module_perms(self, user_obj, app_label):
"""
Check if user have permission of specified app based on registered
handlers.
It will raise ``ObjectDoesNotExist`` exception when the specified
string permission does not exist and
``PERMISSION_CHECK_PERMISSION_PRESENCE`` is ``True`` in ``settings``
module.
Parameters
----------
user_obj : django user model instance
A django user model instance which is checked
app_label : string
`app_label.codename` formatted permission string
Returns
-------
boolean
Whether the specified user have specified permission.
Raises
------
django.core.exceptions.ObjectDoesNotExist
If the specified string permission does not exist and
``PERMISSION_CHECK_PERMISSION_PRESENCE`` is ``True`` in ``settings``
module.
"""
# get permission handlers fot this perm
cache_name = '_%s_cache' % app_label
if hasattr(self, cache_name):
handlers = getattr(self, cache_name)
else:
handlers = [h for h in registry.get_handlers()
if app_label in h.get_supported_app_labels()]
setattr(self, cache_name, handlers)
for handler in handlers:
if handler.has_module_perms(user_obj, app_label):
return True
return False
|
algorithms/Python/sorting/selection_sort.py | Tanmoy07tech/DSA | 247 | 34975 | '''
Find the largest element and place that element at the bottom
of the list. Repeat for each sub-array.
O(n^2) time complexity.
'''
from string import ascii_letters
arrays = (
[12, 3, 7, 22, -12, 100, 1],
[10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
[4, 1, 3, 9, 7],
[0, -1.5, 1.5, 1.3, -1.3, -1.01, 1.01],
list(reversed(ascii_letters)),
)
def selection_sort(arr):
"""
>>> all(selection_sort(arr) or arr == sorted(arr) for arr in arrays)
True
"""
for i in range(len(arr) - 1, 0, -1):
k = 0
for j in range(1, i + 1):
if arr[j] > arr[k]:
k = j
arr[i], arr[k] = arr[k], arr[i] # swap
if __name__ == "__main__":
for arr in arrays:
selection_sort(arr)
print("Sorted array: ")
for ele in arr: # type: ignore
print(f"\t{ele}")
|
infra/utils.py | BrandoZhang/alis | 176 | 35030 | <filename>infra/utils.py<gh_stars>100-1000
import os
import shutil
import subprocess
from distutils.dir_util import copy_tree
from shutil import copyfile
from typing import List, Optional
import click
import git
from omegaconf import DictConfig
def copy_objects(target_dir: os.PathLike, objects_to_copy: List[os.PathLike]):
for src_path in objects_to_copy:
trg_path = os.path.join(target_dir, os.path.basename(src_path))
if os.path.islink(src_path):
os.symlink(os.readlink(src_path), trg_path)
elif os.path.isfile(src_path):
copyfile(src_path, trg_path)
elif os.path.isdir(src_path):
copy_tree(src_path, trg_path)
else:
raise NotImplementedError(f"Unknown object type: {src_path}")
def create_symlinks(target_dir: os.PathLike, symlinks_to_create: List[os.PathLike]):
"""
Creates symlinks to the given paths
"""
for src_path in symlinks_to_create:
trg_path = os.path.join(target_dir, os.path.basename(src_path))
if os.path.islink(src_path):
# Let's not create symlinks to symlinks
# Since dropping the current symlink will break the experiment
os.symlink(os.readlink(src_path), trg_path)
else:
print(f'Creating a symlink to {src_path}, so try not to delete it occasionally!')
os.symlink(src_path, trg_path)
def is_git_repo(path: os.PathLike):
try:
_ = git.Repo(path).git_dir
return True
except git.exc.InvalidGitRepositoryError:
return False
def create_project_dir(project_dir: os.PathLike, objects_to_copy: List[os.PathLike], symlinks_to_create: List[os.PathLike]):
if is_git_repo(os.getcwd()) and are_there_uncommitted_changes():
if click.confirm("There are uncommited changes. Continue?", default=False):
print('Ok...')
else:
raise PermissionError("Cannot created a dir when there are uncommited changes")
if os.path.exists(project_dir):
if click.confirm(f'Dir {project_dir} already exists. Remove it?', default=False):
shutil.rmtree(project_dir)
else:
print('User refused to delete an existing project dir.')
raise PermissionError("There is an existing dir and I cannot delete it.")
os.makedirs(project_dir)
copy_objects(project_dir, objects_to_copy)
create_symlinks(project_dir, symlinks_to_create)
print(f'Created a project dir: {project_dir}')
def get_git_hash() -> Optional[str]:
if not is_git_repo(os.getcwd()):
return None
try:
return subprocess \
.check_output(['git', 'rev-parse', '--short', 'HEAD']) \
.decode("utf-8") \
.strip()
except:
return None
def get_experiment_path(master_dir: os.PathLike, experiment_name: str) -> os.PathLike:
return os.path.join(master_dir, f"{experiment_name}-{get_git_hash()}")
def get_git_hash_suffix() -> str:
git_hash: Optional[str] = get_git_hash()
git_hash_suffix = "" if git_hash is None else f"-{git_hash}"
return git_hash_suffix
def are_there_uncommitted_changes() -> bool:
return len(subprocess.check_output('git status -s'.split()).decode("utf-8")) > 0
def cfg_to_args_str(cfg: DictConfig, use_dashes=True) -> str:
dashes = '--' if use_dashes else ''
return ' '.join([f'{dashes}{p}={cfg[p]}' for p in cfg])
|
library/source1/mdl/structs/__init__.py | anderlli0053/SourceIO | 199 | 35047 | from ....utils.byte_io_mdl import ByteIO
from ....shared.base import Base |
Calibration/EcalAlCaRecoProducers/python/alcastreamEcalPhiSym_cff.py | ckamtsikis/cmssw | 852 | 35067 | # The following comments couldn't be translated into the new config version:
#------------------------------------------------
#AlCaReco filtering for phi symmetry calibration:
#------------------------------------------------
#
# Passes events that are coming from the online phi-symmetry stream
#
#
import FWCore.ParameterSet.Config as cms
import HLTrigger.HLTfilters.hltHighLevel_cfi
ecalphiSymHLT = HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone(
HLTPaths = ['AlCa_EcalPhiSym*'],
andOr = True,
throw = False
)
|
frameworks/Python/spyne/gen_benchmark_config.py | xsoheilalizadeh/FrameworkBenchmarks | 5,300 | 35076 | <filename>frameworks/Python/spyne/gen_benchmark_config.py
#!/usr/bin/env python
from __future__ import print_function
import json
from spyne import AnyUri, Unicode, ComplexModel, M, UnsignedInteger16, Array
from spyne.protocol.json import JsonDocument
from spyne.util.dictdoc import get_object_as_dict
class BenchmarkConfigElement(ComplexModel):
# exclude this from the output document
key = Unicode(pa={JsonDocument: dict(exc=True)})
display_name = M(Unicode)
notes = Unicode
versus = Unicode
db_url = AnyUri
json_url = AnyUri
query_url = AnyUri
fortune_url = AnyUri
update_url = AnyUri
plaintext_url = AnyUri
port = M(UnsignedInteger16(default=8080))
approach = M(Unicode(values=['Realistic', 'Stripped'], default='Realistic'))
classification = M(Unicode(values=['Micro', 'Fullstack', 'Platform'], default='Micro'))
database = M(Unicode(values=['none', 'mongodb', 'postgres', 'mysql'], default='none'))
orm = M(Unicode(values=['Full', 'Micro', 'None', 'Raw']))
framework = M(Unicode)
language = M(Unicode)
flavor = M(Unicode)
platform = M(Unicode)
webserver = M(Unicode)
os = M(Unicode(default='Linux'))
database_os = M(Unicode(default='Linux'))
class BenchmarkConfig(ComplexModel):
framework = M(Unicode)
tests = Array(BenchmarkConfigElement, wrapped=False)
gen_raw_test = lambda: BenchmarkConfigElement(
display_name="Spyne RAW",
db_url="/dbsraw",
query_url="/dbraw?queries=",
fortune_url="/fortunesraw",
update_url="/raw-updates?queries=",
orm='Raw',
)
gen_normal_test = lambda: BenchmarkConfigElement(
display_name="Spyne ORM",
db_url="/dbs",
query_url="/db?queries=",
fortune_url="/fortunes",
update_url="/updatesraw?queries=",
orm='Full',
)
def add_common(bc):
bc.port = 8080
bc.approach = "Realistic"
bc.classification = "Micro"
bc.database = "postgres"
bc.framework = "spyne"
bc.language = "Python"
bc.platform = "Spyne"
bc.webserver = "None"
bc.os = "Linux"
bc.database_os = "Linux"
bc.versus = "wsgi"
bc.plaintext_url = "/plaintext"
return bc
config = BenchmarkConfig(framework='spyne', tests=[])
keys = iter(['default', 'raw', 'py3orm', 'py3raw'])
for flav in ['CPython', 'Python3']:
bc = add_common(gen_normal_test())
bc.flavor = flav
bc.key = next(keys)
config.tests.append(bc)
bc = add_common(gen_raw_test())
bc.flavor = flav
bc.key = next(keys)
config.tests.append(bc)
data = get_object_as_dict(config, complex_as=dict)
data['tests'] = [{d['key']: d} for d in data['tests']]
data = json.dumps(data, indent=2, sort_keys=True, separators=(',', ': '))
open('benchmark_config.json', 'wb').write(data)
print(data)
|
slack_bolt/response/__init__.py | korymath/bolt-python | 160 | 35078 | <filename>slack_bolt/response/__init__.py<gh_stars>100-1000
from .response import BoltResponse
|
simtbx/diffBragg/attr_list.py | dperl-sol/cctbx_project | 155 | 35094 | from __future__ import division
"""
critical properties of diffBragg objects which should be logged for reproducibility
"""
# TODO : implement a savestate and getstate for these objects
# attrs of diffBragg() instances
DIFFBRAGG_ATTRS = [
'Amatrix',
'Bmatrix',
'Ncells_abc',
'Ncells_abc_aniso',
'Ncells_def',
'Npix_to_allocate',
'Omatrix',
'Umatrix',
'beamsize_mm',
'compute_curvatures',
'default_F',
'detector_thick_mm',
'detector_thickstep_mm',
'detector_thicksteps',
'detector_twotheta_deg',
'device_Id',
'diffuse_gamma',
'diffuse_sigma',
'exposure_s',
'fluence',
'flux',
'has_anisotropic_mosaic_spread',
'interpolate',
'isotropic_ncells',
'lambda_coefficients',
'mosaic_domains',
'mosaic_spread_deg',
'no_Nabc_scale',
'nopolar',
'only_diffuse',
'only_save_omega_kahn',
'oversample',
'oversample_omega',
'phi_deg',
'phistep_deg',
'phisteps',
'point_pixel',
'polar_vector',
'polarization',
'spindle_axis',
'spot_scale',
'twotheta_axis',
'unit_cell_Adeg',
'unit_cell_tuple',
'use_diffuse',
'use_lambda_coefficients']
# properties of nanoBragg_crystal.NBcryst instances
NB_CRYST_ATTRS = [
'anisotropic_mos_spread_deg',
'isotropic_ncells',
'miller_is_complex',
'mos_spread_deg',
'n_mos_domains',
'symbol',
'xtal_shape']
# properties of nanoBragg_beam.NBbeam instances
NB_BEAM_ATTRS = [
'divergence',
'polarization_fraction',
'size_mm',
'number_of_sources',
'unit_s0']
|
tests/data/fields/field_test.py | MSLars/allennlp | 11,433 | 35095 | from allennlp.data.fields import Field
def test_eq_with_inheritance():
class SubField(Field):
__slots__ = ["a"]
def __init__(self, a):
self.a = a
class SubSubField(SubField):
__slots__ = ["b"]
def __init__(self, a, b):
super().__init__(a)
self.b = b
class SubSubSubField(SubSubField):
__slots__ = ["c"]
def __init__(self, a, b, c):
super().__init__(a, b)
self.c = c
assert SubField(1) == SubField(1)
assert SubField(1) != SubField(2)
assert SubSubField(1, 2) == SubSubField(1, 2)
assert SubSubField(1, 2) != SubSubField(1, 1)
assert SubSubField(1, 2) != SubSubField(2, 2)
assert SubSubSubField(1, 2, 3) == SubSubSubField(1, 2, 3)
assert SubSubSubField(1, 2, 3) != SubSubSubField(0, 2, 3)
def test_eq_with_inheritance_for_non_slots_field():
class SubField(Field):
def __init__(self, a):
self.a = a
assert SubField(1) == SubField(1)
assert SubField(1) != SubField(2)
def test_eq_with_inheritance_for_mixed_field():
class SubField(Field):
__slots__ = ["a"]
def __init__(self, a):
self.a = a
class SubSubField(SubField):
def __init__(self, a, b):
super().__init__(a)
self.b = b
assert SubField(1) == SubField(1)
assert SubField(1) != SubField(2)
assert SubSubField(1, 2) == SubSubField(1, 2)
assert SubSubField(1, 2) != SubSubField(1, 1)
assert SubSubField(1, 2) != SubSubField(2, 2)
|
leo/modes/kivy.py | ATikhonov2/leo-editor | 1,550 | 35098 | <gh_stars>1000+
# Leo colorizer control file for kivy mode.
# This file is in the public domain.
# Properties for kivy mode.
properties = {
"ignoreWhitespace": "false",
"lineComment": "#",
}
# Attributes dict for kivy_main ruleset.
kivy_main_attributes_dict = {
"default": "null",
"digit_re": "",
"escape": "",
"highlight_digits": "true",
"ignore_case": "true",
"no_word_sep": "",
}
# Dictionary of attributes dictionaries for kivy mode.
attributesDictDict = {
"kivy_main": kivy_main_attributes_dict,
}
# Keywords dict for kivy_main ruleset.
kivy_main_keywords_dict = {
"app": "keyword2",
"args": "keyword2",
"canvas": "keyword1",
"id": "keyword1",
"root": "keyword2",
"self": "keyword2",
"size": "keyword1",
"text": "keyword1",
"x": "keyword1",
"y": "keyword1",
}
# Dictionary of keywords dictionaries for kivy mode.
keywordsDictDict = {
"kivy_main": kivy_main_keywords_dict,
}
# Rules for kivy_main ruleset.
def kivy_rule0(colorer, s, i):
return colorer.match_eol_span(s, i, kind="comment1", seq="#",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="", exclude_match=False)
def kivy_rule1(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="\"", end="\"",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="kivy::literal_one",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def kivy_rule2(colorer, s, i):
return colorer.match_keywords(s, i)
# Rules dict for kivy_main ruleset.
rulesDict1 = {
"\"": [kivy_rule1,],
"#": [kivy_rule0,],
"0": [kivy_rule2,],
"1": [kivy_rule2,],
"2": [kivy_rule2,],
"3": [kivy_rule2,],
"4": [kivy_rule2,],
"5": [kivy_rule2,],
"6": [kivy_rule2,],
"7": [kivy_rule2,],
"8": [kivy_rule2,],
"9": [kivy_rule2,],
"@": [kivy_rule2,],
"A": [kivy_rule2,],
"B": [kivy_rule2,],
"C": [kivy_rule2,],
"D": [kivy_rule2,],
"E": [kivy_rule2,],
"F": [kivy_rule2,],
"G": [kivy_rule2,],
"H": [kivy_rule2,],
"I": [kivy_rule2,],
"J": [kivy_rule2,],
"K": [kivy_rule2,],
"L": [kivy_rule2,],
"M": [kivy_rule2,],
"N": [kivy_rule2,],
"O": [kivy_rule2,],
"P": [kivy_rule2,],
"Q": [kivy_rule2,],
"R": [kivy_rule2,],
"S": [kivy_rule2,],
"T": [kivy_rule2,],
"U": [kivy_rule2,],
"V": [kivy_rule2,],
"W": [kivy_rule2,],
"X": [kivy_rule2,],
"Y": [kivy_rule2,],
"Z": [kivy_rule2,],
"a": [kivy_rule2,],
"b": [kivy_rule2,],
"c": [kivy_rule2,],
"d": [kivy_rule2,],
"e": [kivy_rule2,],
"f": [kivy_rule2,],
"g": [kivy_rule2,],
"h": [kivy_rule2,],
"i": [kivy_rule2,],
"j": [kivy_rule2,],
"k": [kivy_rule2,],
"l": [kivy_rule2,],
"m": [kivy_rule2,],
"n": [kivy_rule2,],
"o": [kivy_rule2,],
"p": [kivy_rule2,],
"q": [kivy_rule2,],
"r": [kivy_rule2,],
"s": [kivy_rule2,],
"t": [kivy_rule2,],
"u": [kivy_rule2,],
"v": [kivy_rule2,],
"w": [kivy_rule2,],
"x": [kivy_rule2,],
"y": [kivy_rule2,],
"z": [kivy_rule2,],
}
# x.rulesDictDict for kivy mode.
rulesDictDict = {
"kivy_main": rulesDict1,
}
# Import dict for kivy mode.
importDict = {}
|
tools/cr/cr/commands/info.py | zealoussnow/chromium | 14,668 | 35127 | <filename>tools/cr/cr/commands/info.py
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module for the info implementation of Command."""
from __future__ import print_function
import cr
class InfoCommand(cr.Command):
"""The cr info command implementation."""
def __init__(self):
super(InfoCommand, self).__init__()
self.help = 'Print information about the cr environment'
def AddArguments(self, subparsers):
parser = super(InfoCommand, self).AddArguments(subparsers)
parser.add_argument(
'-s', '--short', dest='_short',
action='store_true', default=False,
help='Short form results, useful for scripting.'
)
self.ConsumeArgs(parser, 'the environment')
return parser
def EarlyArgProcessing(self):
if getattr(cr.context.args, '_short', False):
self.requires_build_dir = False
cr.Command.EarlyArgProcessing(self)
def Run(self):
if cr.context.remains:
for var in cr.context.remains:
if getattr(cr.context.args, '_short', False):
val = cr.context.Find(var)
if val is None:
val = ''
print(val)
else:
print(var, '=', cr.context.Find(var))
else:
cr.base.client.PrintInfo()
|
gsheetsdb/types.py | tim-werner/gsheets-db-api | 176 | 35160 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from enum import Enum
class Type(Enum):
STRING = 'string'
NUMBER = 'number'
BOOLEAN = 'boolean'
DATE = 'date'
DATETIME = 'datetime'
TIMEOFDAY = 'timeofday'
|
env/Lib/site-packages/OpenGL/GL/ARB/shader_clock.py | 5gconnectedbike/Navio2 | 210 | 35218 | '''OpenGL extension ARB.shader_clock
This module customises the behaviour of the
OpenGL.raw.GL.ARB.shader_clock to provide a more
Python-friendly API
Overview (from the spec)
This extension exposes a 64-bit monotonically incrementing shader
counter which may be used to derive local timing information within
a single shader invocation.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/shader_clock.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.shader_clock import *
from OpenGL.raw.GL.ARB.shader_clock import _EXTENSION_NAME
def glInitShaderClockARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION |
learning/pytorch/models/rnn_models.py | thomasehuang/Ithemal-Extension | 105 | 35232 | #this file contains models that I have tried out for different tasks, which are reusable
#plus it has the training framework for those models given data - each model has its own data requirements
import numpy as np
import common_libs.utilities as ut
import random
import torch.nn as nn
import torch.autograd as autograd
import torch.optim as optim
import torch
import math
class ModelAbs(nn.Module):
"""
Abstract model without the forward method.
lstm for processing tokens in sequence and linear layer for output generation
lstm is a uni-directional single layer lstm
num_classes = 1 - for regression
num_classes = n - for classifying into n classes
"""
def __init__(self, hidden_size, embedding_size, num_classes):
super(ModelAbs, self).__init__()
self.hidden_size = hidden_size
self.name = 'should be overridden'
#numpy array with batchsize, embedding_size
self.embedding_size = embedding_size
self.num_classes = num_classes
#lstm - input size, hidden size, num layers
self.lstm_token = nn.LSTM(self.embedding_size, self.hidden_size)
#hidden state for the rnn
self.hidden_token = self.init_hidden()
#linear layer for regression - in_features, out_features
self.linear = nn.Linear(self.hidden_size, self.num_classes)
def init_hidden(self):
return (autograd.Variable(torch.zeros(1, 1, self.hidden_size)),
autograd.Variable(torch.zeros(1, 1, self.hidden_size)))
#this is to set learnable embeddings
def set_learnable_embedding(self, mode, dictsize, seed = None):
self.mode = mode
if mode != 'learnt':
embedding = nn.Embedding(dictsize, self.embedding_size)
if mode == 'none':
print 'learn embeddings form scratch...'
initrange = 0.5 / self.embedding_size
embedding.weight.data.uniform_(-initrange, initrange)
self.final_embeddings = embedding
elif mode == 'seed':
print 'seed by word2vec vectors....'
embedding.weight.data = torch.FloatTensor(seed)
self.final_embeddings = embedding
else:
print 'using learnt word2vec embeddings...'
self.final_embeddings = seed
#remove any references you may have that inhibits garbage collection
def remove_refs(self, item):
return
class ModelSequentialRNN(ModelAbs):
"""
Prediction at every hidden state of the unrolled rnn.
Input - sequence of tokens processed in sequence by the lstm
Output - predictions at the every hidden state
uses lstm and linear setup of ModelAbs
each hidden state is given as a seperate batch to the linear layer
"""
def __init__(self, hidden_size, embedding_size, num_classes, intermediate):
super(ModelSequentialRNN, self).__init__(hidden_size, embedding_size, num_classes)
if intermediate:
self.name = 'sequential RNN intermediate'
else:
self.name = 'sequential RNN'
self.intermediate = intermediate
def forward(self, item):
self.hidden_token = self.init_hidden()
#convert to tensor
if self.mode == 'learnt':
acc_embeds = []
for token in item.x:
acc_embeds.append(self.final_embeddings[token])
embeds = torch.FloatTensor(acc_embeds)
else:
embeds = self.final_embeddings(torch.LongTensor(item.x))
#prepare for lstm - seq len, batch size, embedding size
seq_len = embeds.shape[0]
embeds_for_lstm = embeds.unsqueeze(1)
#lstm outputs
#output, (h_n,c_n)
#output - (seq_len, batch = 1, hidden_size * directions) - h_t for each t final layer only
#h_n - (layers * directions, batch = 1, hidden_size) - h_t for t = seq_len
#c_n - (layers * directions, batch = 1, hidden_size) - c_t for t = seq_len
#lstm inputs
#input, (h_0, c_0)
#input - (seq_len, batch, input_size)
lstm_out, self.hidden_token = self.lstm_token(embeds_for_lstm, self.hidden_token)
if self.intermediate:
#input to linear - seq_len, hidden_size (seq_len is the batch size for the linear layer)
#output - seq_len, num_classes
values = self.linear(lstm_out[:,0,:].squeeze()).squeeze()
else:
#input to linear - hidden_size
#output - num_classes
values = self.linear(self.hidden_token[0].squeeze()).squeeze()
return values
class ModelHierarchicalRNN(ModelAbs):
"""
Prediction at every hidden state of the unrolled rnn for instructions.
Input - sequence of tokens processed in sequence by the lstm but seperated into instructions
Output - predictions at the every hidden state
lstm predicting instruction embedding for sequence of tokens
lstm_ins processes sequence of instruction embeddings
linear layer process hidden states to produce output
"""
def __init__(self, hidden_size, embedding_size, num_classes, intermediate):
super(ModelHierarchicalRNN, self).__init__(hidden_size, embedding_size, num_classes)
self.hidden_ins = self.init_hidden()
self.lstm_ins = nn.LSTM(self.hidden_size, self.hidden_size)
if intermediate:
self.name = 'hierarchical RNN intermediate'
else:
self.name = 'hierarchical RNN'
self.intermediate = intermediate
def copy(self, model):
self.linear = model.linear
self.lstm_token = model.lstm_token
self.lstm_ins = model.lstm_ins
def forward(self, item):
self.hidden_token = self.init_hidden()
self.hidden_ins = self.init_hidden()
ins_embeds = autograd.Variable(torch.zeros(len(item.x),self.embedding_size))
for i, ins in enumerate(item.x):
if self.mode == 'learnt':
acc_embeds = []
for token in ins:
acc_embeds.append(self.final_embeddings[token])
token_embeds = torch.FloatTensor(acc_embeds)
else:
token_embeds = self.final_embeddings(torch.LongTensor(ins))
#token_embeds = torch.FloatTensor(ins)
token_embeds_lstm = token_embeds.unsqueeze(1)
out_token, hidden_token = self.lstm_token(token_embeds_lstm,self.hidden_token)
ins_embeds[i] = hidden_token[0].squeeze()
ins_embeds_lstm = ins_embeds.unsqueeze(1)
out_ins, hidden_ins = self.lstm_ins(ins_embeds_lstm, self.hidden_ins)
if self.intermediate:
values = self.linear(out_ins[:,0,:]).squeeze()
else:
values = self.linear(hidden_ins[0].squeeze()).squeeze()
return values
class ModelHierarchicalRNNRelational(ModelAbs):
def __init__(self, embedding_size, num_classes):
super(ModelHierarchicalRNNRelational, self).__init__(embedding_size, num_classes)
self.hidden_ins = self.init_hidden()
self.lstm_ins = nn.LSTM(self.hidden_size, self.hidden_size)
self.linearg1 = nn.Linear(2 * self.hidden_size, self.hidden_size)
self.linearg2 = nn.Linear(self.hidden_size, self.hidden_size)
def forward(self, item):
self.hidden_token = self.init_hidden()
self.hidden_ins = self.init_hidden()
ins_embeds = autograd.Variable(torch.zeros(len(item.x),self.hidden_size))
for i, ins in enumerate(item.x):
if self.mode == 'learnt':
acc_embeds = []
for token in ins:
acc_embeds.append(self.final_embeddings[token])
token_embeds = torch.FloatTensor(acc_embeds)
else:
token_embeds = self.final_embeddings(torch.LongTensor(ins))
#token_embeds = torch.FloatTensor(ins)
token_embeds_lstm = token_embeds.unsqueeze(1)
out_token, hidden_token = self.lstm_token(token_embeds_lstm,self.hidden_token)
ins_embeds[i] = hidden_token[0].squeeze()
ins_embeds_lstm = ins_embeds.unsqueeze(1)
out_ins, hidden_ins = self.lstm_ins(ins_embeds_lstm, self.hidden_ins)
seq_len = len(item.x)
g_variable = autograd.Variable(torch.zeros(self.hidden_size))
for i in range(seq_len):
for j in range(i,seq_len):
concat = torch.cat((out_ins[i].squeeze(),out_ins[j].squeeze()),0)
g1 = nn.functional.relu(self.linearg1(concat))
g2 = nn.functional.relu(self.linearg2(g1))
g_variable += g2
output = self.linear(g_variable)
return output
class ModelSequentialRNNComplex(nn.Module):
"""
Prediction using the final hidden state of the unrolled rnn.
Input - sequence of tokens processed in sequence by the lstm
Output - the final value to be predicted
we do not derive from ModelAbs, but instead use a bidirectional, multi layer
lstm and a deep MLP with non-linear activation functions to predict the final output
"""
def __init__(self, embedding_size):
super(ModelFinalHidden, self).__init__()
self.name = 'sequential RNN'
self.hidden_size = 256
self.embedding_size = embedding_size
self.layers = 2
self.directions = 1
self.is_bidirectional = (self.directions == 2)
self.lstm_token = torch.nn.LSTM(input_size = self.embedding_size,
hidden_size = self.hidden_size,
num_layers = self.layers,
bidirectional = self.is_bidirectional)
self.linear1 = nn.Linear(self.layers * self. directions * self.hidden_size, self.hidden_size)
self.linear2 = nn.Linear(self.hidden_size,1)
self.hidden_token = self.init_hidden()
def init_hidden(self):
return (autograd.Variable(torch.zeros(self.layers * self.directions, 1, self.hidden_size)),
autograd.Variable(torch.zeros(self.layers * self.directions, 1, self.hidden_size)))
def forward(self, item):
self.hidden_token = self.init_hidden()
#convert to tensor
if self.mode == 'learnt':
acc_embeds = []
for token in item.x:
acc_embeds.append(self.final_embeddings[token])
embeds = torch.FloatTensor(acc_embeds)
else:
embeds = self.final_embeddings(torch.LongTensor(item.x))
#prepare for lstm - seq len, batch size, embedding size
seq_len = embeds.shape[0]
embeds_for_lstm = embeds.unsqueeze(1)
lstm_out, self.hidden_token = self.lstm_token(embeds_for_lstm, self.hidden_token)
f1 = nn.functional.relu(self.linear1(self.hidden_token[0].squeeze().view(-1)))
f2 = self.linear2(f1)
return f2
|
fewshot/models/refine_model.py | ashok-arjun/few-shot-ssl-public | 497 | 35259 | <reponame>ashok-arjun/few-shot-ssl-public
# Copyright (c) 2018 <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
"""
A few-shot classification model implementation that refines on unlabled
refinement images.
Author: <NAME> (<EMAIL>)
A single episode is divided into three parts:
1) Labeled reference images (self.x_ref).
2) Unlabeled refinement images (self.x_unlabel).
3) Labeled query images (from validation) (self.x_candidate).
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import tensorflow as tf
from fewshot.models.nnlib import cnn, weight_variable, concat
from fewshot.models.basic_model import BasicModel
from fewshot.utils import logger
log = logger.get()
# Load up the LSTM cell implementation.
if tf.__version__.startswith("0"):
BasicLSTMCell = tf.nn.rnn_cell.BasicLSTMCell
LSTMStateTuple = tf.nn.rnn_cell.LSTMStateTuple
else:
BasicLSTMCell = tf.contrib.rnn.BasicLSTMCell
LSTMStateTuple = tf.contrib.rnn.LSTMStateTuple
class RefineModel(BasicModel):
"""A retrieval model with an additional refinement stage."""
def __init__(self,
config,
nway=1,
nshot=1,
num_unlabel=10,
candidate_size=10,
is_training=True,
dtype=tf.float32):
"""Initiliazer.
Args:
config: Model configuration object.
nway: Int. Number of classes in the reference images.
nshot: Int. Number of labeled reference images.
num_unlabel: Int. Number of unlabeled refinement images.
candidate_size: Int. Number of candidates in the query stage.
is_training: Bool. Whether is in training mode.
dtype: TensorFlow data type.
"""
self._num_unlabel = num_unlabel
self._x_unlabel = tf.placeholder(
dtype, [None, None, config.height, config.width, config.num_channel],
name="x_unlabel")
self._y_unlabel = tf.placeholder(dtype, [None, None], name="y_unlabel")
super(RefineModel, self).__init__(
config,
nway=nway,
nshot=nshot,
num_test=candidate_size,
is_training=is_training,
dtype=dtype)
@property
def x_unlabel(self):
return self._x_unlabel
@property
def y_unlabel(self):
return self._y_unlabel
if __name__ == "__main__":
from fewshot.configs.omniglot_config import OmniglotRefineConfig
model = RefineModel(OmniglotRefineConfig())
|
migrations/versions/6245d75fa12_exceptions_table.py | boladmin/security_monkey | 4,258 | 35281 | """Exceptions Table
Revision ID: 6245d75fa12
Revises: <PASSWORD>
Create Date: 2016-08-16 11:35:38.575026
"""
# revision identifiers, used by Alembic.
revision = '6245d75fa12'
down_revision = 'e0a6af364a3f'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('exceptions',
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('source', sa.String(length=256), nullable=False),
sa.Column('occurred', sa.DateTime(), nullable=False),
sa.Column('ttl', sa.DateTime(), nullable=False),
sa.Column('type', sa.String(length=256), nullable=False),
sa.Column('message', sa.String(length=512), nullable=True),
sa.Column('stacktrace', sa.Text(), nullable=True),
sa.Column('region', sa.String(length=32), nullable=True),
sa.Column('tech_id', sa.Integer(), nullable=True),
sa.Column('item_id', sa.Integer(), nullable=True),
sa.Column('account_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['account_id'], ['account.id'], ),
sa.ForeignKeyConstraint(['item_id'], ['item.id'], ),
sa.ForeignKeyConstraint(['tech_id'], ['technology.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('ix_exceptions_account_id', 'exceptions', ['account_id'], unique=False)
op.create_index('ix_exceptions_item_id', 'exceptions', ['item_id'], unique=False)
op.create_index('ix_exceptions_region', 'exceptions', ['region'], unique=False)
op.create_index('ix_exceptions_source', 'exceptions', ['source'], unique=False)
op.create_index('ix_exceptions_tech_id', 'exceptions', ['tech_id'], unique=False)
op.create_index('ix_exceptions_type', 'exceptions', ['type'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_exceptions_type', table_name='exceptions')
op.drop_index('ix_exceptions_tech_id', table_name='exceptions')
op.drop_index('ix_exceptions_source', table_name='exceptions')
op.drop_index('ix_exceptions_region', table_name='exceptions')
op.drop_index('ix_exceptions_item_id', table_name='exceptions')
op.drop_index('ix_exceptions_account_id', table_name='exceptions')
op.drop_table('exceptions')
### end Alembic commands ###
|
ml/gan_test.py | Ryoich/python_zero | 203 | 35336 | <reponame>Ryoich/python_zero
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
def generator_fn(noise, weight_decay=2.5e-5, is_training=True):
layers = tf.contrib.layers
framework = tf.contrib.framework
f1 = framework.arg_scope(
[layers.fully_connected, layers.conv2d_transpose],
activation_fn=tf.nn.relu,
normalizer_fn=layers.batch_norm,
weights_regularizer=layers.l2_regularizer(weight_decay))
f2 = framework.arg_scope(
[layers.batch_norm],
is_training=is_training,
zero_debias_moving_mean=True)
with f1, f2:
net = layers.fully_connected(noise, 1024)
net = layers.fully_connected(net, 7 * 7 * 256)
net = tf.reshape(net, [-1, 7, 7, 256])
net = layers.conv2d_transpose(net, 64, [4, 4], stride=2)
net = layers.conv2d_transpose(net, 32, [4, 4], stride=2)
net = layers.conv2d(net, 1, 4, activation_fn=tf.tanh)
return net
def discriminator_fn(img, _, weight_decay=2.5e-5, is_training=True):
layers = tf.contrib.layers
framework = tf.contrib.framework
with framework.arg_scope(
[layers.conv2d, layers.fully_connected],
activation_fn=(lambda n: tf.nn.leaky_relu(n, alpha=0.01)),
weights_regularizer=layers.l2_regularizer(weight_decay),
biases_regularizer=layers.l2_regularizer(weight_decay)):
net = layers.conv2d(img, 64, [4, 4], stride=2)
net = layers.conv2d(net, 128, [4, 4], stride=2)
net = layers.flatten(net)
with framework.arg_scope([layers.batch_norm], is_training=is_training):
net = layers.fully_connected(
net, 1024, normalizer_fn=layers.batch_norm)
return layers.linear(net, 1)
def provide_data(source, batch_size):
slim = tf.contrib.slim
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='raw'),
}
datanum = sum(1 for _ in tf.python_io.tf_record_iterator(source))
items_to_handlers = {
'image': slim.tfexample_decoder.Image(shape=[28, 28, 1], channels=1),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
dataprovider = slim.dataset_data_provider.DatasetDataProvider
reader = tf.TFRecordReader
dataset = slim.dataset.Dataset(source, reader, decoder, datanum, None)
provider = dataprovider(dataset, shuffle=True)
image, = provider.get(['image'])
image = (tf.cast(image, tf.float32) - 128.0) / 128.0
images = tf.train.batch([image], batch_size=batch_size)
return images
def run_gan(TRAIN_DATA, TOTAL_STEPS=400):
BATCH_SIZE = 32
TOTAL_STEPS += 1
tfgan = tf.contrib.gan
tf.reset_default_graph()
with tf.device('/cpu:0'):
real_images = provide_data(TRAIN_DATA, BATCH_SIZE)
gan_model = tfgan.gan_model(
generator_fn,
discriminator_fn,
real_data=real_images,
generator_inputs=tf.random_normal([BATCH_SIZE, 64]))
improved_wgan_loss = tfgan.gan_loss(
gan_model,
generator_loss_fn=tfgan.losses.wasserstein_generator_loss,
discriminator_loss_fn=tfgan.losses.wasserstein_discriminator_loss,
gradient_penalty_weight=1.0)
generator_optimizer = tf.train.AdamOptimizer(0.001, beta1=0.5)
discriminator_optimizer = tf.train.AdamOptimizer(0.0001, beta1=0.5)
gan_train_ops = tfgan.gan_train_ops(
gan_model,
improved_wgan_loss,
generator_optimizer,
discriminator_optimizer)
with tf.variable_scope('Generator', reuse=True):
eval_images = gan_model.generator_fn(
tf.random_normal([500, 64]),
is_training=False)
visualizer = tfgan.eval.image_reshaper(eval_images[:20, ...], num_cols=10)
train_step_fn = tfgan.get_sequential_train_steps()
global_step = tf.train.get_or_create_global_step()
INTERVAL = 25
with tf.train.SingularMonitoredSession() as sess:
for i in range(TOTAL_STEPS):
train_step_fn(sess, gan_train_ops, global_step,
train_step_kwargs={})
if i % INTERVAL == 0:
digits_np = sess.run([visualizer])
plt.axis('off')
plt.imshow(np.squeeze(digits_np), cmap='gray')
plt.show()
#filename = "mnist.tfrecord"
#filename = "hiragana.tfrecord"
# run_gan(filename)
|
code/src/main.py | ChaofWang/AWSRN | 162 | 35359 | <filename>code/src/main.py<gh_stars>100-1000
import torch
import utility
import data
import model
import loss
from option import args
from trainer import Trainer
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('Total number of parameters: %d' % num_params)
def print_setting(net, args):
print('init this train:')
print_network(net)
print('training model:', args.model)
print('scale:', args.scale)
print('resume from ', args.resume)
print('output patch size', args.patch_size)
print('model setting: n_resblocks:', args.n_resblocks,
'n_feats:', args.n_feats, 'block_feats:', args.block_feats)
print('optimization setting: ', args.optimizer)
print('total epochs:', args.epochs)
print('lr:', args.lr, 'lr_decay at:', args.decay_type, 'decay gamma:', args.gamma)
print('train loss:', args.loss)
print('save_name:', args.save)
torch.manual_seed(args.seed)
checkpoint = utility.checkpoint(args)
if checkpoint.ok:
loader = data.Data(args)
model = model.Model(args, checkpoint)
print_setting(model, args)
loss = loss.Loss(args, checkpoint) if not args.test_only else None
t = Trainer(args, loader, model, loss, checkpoint)
while not t.terminate():
t.train()
t.test()
checkpoint.done()
|
maskrcnn_benchmark/utils/big_model_loading.py | microsoft/GLIP | 295 | 35377 | import numpy as np
import torch
import torch.nn as nn
from collections import OrderedDict
def tf2th(conv_weights):
"""Possibly convert HWIO to OIHW."""
if conv_weights.ndim == 4:
conv_weights = conv_weights.transpose([3, 2, 0, 1])
return torch.from_numpy(conv_weights)
def _rename_conv_weights_for_deformable_conv_layers(state_dict, cfg):
import re
layer_keys = sorted(state_dict.keys())
for ix, stage_with_dcn in enumerate(cfg.MODEL.RESNETS.STAGE_WITH_DCN, 1):
if not stage_with_dcn:
continue
for old_key in layer_keys:
pattern = ".*block{}.*conv2.*".format(ix)
r = re.match(pattern, old_key)
if r is None:
continue
for param in ["weight", "bias"]:
if old_key.find(param) is -1:
continue
if 'unit01' in old_key:
continue
new_key = old_key.replace(
"conv2.{}".format(param), "conv2.conv.{}".format(param)
)
print("pattern: {}, old_key: {}, new_key: {}".format(
pattern, old_key, new_key
))
# Calculate SD conv weight
w = state_dict[old_key]
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-10)
state_dict[new_key] = w
del state_dict[old_key]
return state_dict
def load_big_format(cfg, f):
model = OrderedDict()
weights = np.load(f)
cmap = {'a':1, 'b':2, 'c':3}
for key, val in weights.items():
old_key = key.replace('resnet/', '')
if 'root_block' in old_key:
new_key = 'root.conv.weight'
elif '/proj/standardized_conv2d/kernel' in old_key:
key_pattern = old_key.replace('/proj/standardized_conv2d/kernel', '').replace('resnet/', '')
bname, uname, cidx = key_pattern.split('/')
new_key = '{}.downsample.{}.conv{}.weight'.format(bname,uname,cmap[cidx])
elif '/standardized_conv2d/kernel' in old_key:
key_pattern = old_key.replace('/standardized_conv2d/kernel', '').replace('resnet/', '')
bname, uname, cidx = key_pattern.split('/')
new_key = '{}.{}.conv{}.weight'.format(bname,uname,cmap[cidx])
elif '/group_norm/gamma' in old_key:
key_pattern = old_key.replace('/group_norm/gamma', '').replace('resnet/', '')
bname, uname, cidx = key_pattern.split('/')
new_key = '{}.{}.gn{}.weight'.format(bname,uname,cmap[cidx])
elif '/group_norm/beta' in old_key:
key_pattern = old_key.replace('/group_norm/beta', '').replace('resnet/', '')
bname, uname, cidx = key_pattern.split('/')
new_key = '{}.{}.gn{}.bias'.format(bname,uname,cmap[cidx])
else:
print('Unknown key {}'.format(old_key))
continue
print('Map {} -> {}'.format(key, new_key))
model[new_key] = tf2th(val)
model = _rename_conv_weights_for_deformable_conv_layers(model, cfg)
return dict(model=model)
|
tests/simple_test.py | leoauri/auraloss | 272 | 35390 | <gh_stars>100-1000
import torch
import auraloss
input = torch.rand(8, 2, 44100)
target = torch.rand(8, 2, 44100)
loss = auraloss.freq.SumAndDifferenceSTFTLoss()
print(loss(input, target))
|
challenges/Sorter/poller/for-release/machine.py | pingjuiliao/cb-multios | 473 | 35421 | from generator.actions import Actions
import random
import string
import struct
import numpy as np
import math
import datetime as dt
import ctypes
def kaprica_mixin(self):
if hasattr(self, 'xlat_seed'):
return
def xlat_seed(seed):
def hash_string(seed):
H = 0x314abc86
for c in seed:
H = (H * 37) & 0xffffffff
H ^= ord(c)
H = ((H << 13) ^ (H >> 19)) & 0xffffffff
return H
def hash_iterate(H):
H = (H * 3) & 0xffffffff
H = ((H << 13) ^ (H >> 19) ^ (H >> 21)) & 0xffffffff
return H
xmap = list(xrange(256))
xmap_inv = list(xrange(256))
state = hash_string(seed)
for i in xrange(255, 0, -1):
j = state % i
state = hash_iterate(state)
xmap[i], xmap[j] = xmap[j], xmap[i]
for i in xrange(256):
xmap_inv[xmap[i]] = i
self.xlat_map = xmap
self.xlat_map_inv = xmap_inv
self.xlat_seed = xlat_seed
self.xlat_map = None
self.xlat_map_inv = None
def xlat_string(s, inverse=False):
if inverse:
return ''.join([chr(self.xlat_map_inv[ord(c)]) for c in s])
return ''.join([chr(self.xlat_map[ord(c)]) for c in s])
self.xlat_string = xlat_string
def read(delim=None, length=None, expect=None):
if self.xlat_map:
if delim:
delim = self.xlat_string(delim)
if expect:
expect = self.xlat_string(expect)
return self._original_read(delim=delim, length=length, expect=expect)
self._original_read = self.read
self.read = read
def write(s):
if self.xlat_map:
if s:
s = self.xlat_string(s)
return self._original_write(s)
self._original_write = self.write
self.write = write
def random_word(max_size=10, min_size=2):
if random.randint(0,4):
characters = string.letters + string.digits
else:
characters = string.letters
max_size = max_size if max_size >= min_size else min_size
max_size = random.randint(min_size, max_size)
return ("".join(random.choice(characters) for c in range(max_size))).lower()
def random_text(max_words=10, min_words=3):
max_words = max_words if max_words >= min_words else min_words
text = ''
for x in xrange(random.randint(min_words, max_words)):
text += random_word() + ' '
return text
class Cs10Fun(Actions):
def _read(self, data):
self.read(length=len(data), expect=data)
def _read_array(self, array=None):
if not array:
array = self._cur_array
if not len(array):
self._read("Empty Array\n")
else:
self._read("Array data\n")
temp_list = []
for val in array:
temp_list.append("%d, " % val)
temp_list[-1] = temp_list[-1][:-2]
self._read("[" + ''.join(temp_list) + ']\n')
def start(self):
#self.delay(100)
kaprica_mixin(self)
self.xlat_seed('2e0715f1709c')
self.write(self.magic_page[:4])
self.read(length=0x1000, expect=self.magic_page)
self._cur_array = []
self._read("Welcome to CS10Fun!\n")
self._read("Today's lesson is on sorting! We're going to cover 4 types of sorts.\n")
self._read("Before we begin, create an array of ints\n")
self.menu()
self.enter_array()
def menu(self):
self._read("Main Menu\n")
self._read("1. Enter Array\n")
self._read("2. Multiply Array\n")
self._read("3. Try Insertion Sort\n")
self._read("4. Try Selection Sort\n")
self._read("5. Try Heap Sort\n")
self._read("6. Try Merge Sort\n")
self._read("7. Reprint Array\n")
self._read("0. Exit\n")
self._read(" -----\n")
self._read("Current Number of items in array = %d\n" % len(self._cur_array))
#self._read_array()
self._read(" -----\n")
self._read("Make a selection\n")
def enter_array(self):
self.write('1\n')
self._read("Enter a list of numbers to sort. End the list with ';;'\n")
self._cur_array = []
for x in xrange(random.randint(1,100)):
self._cur_array.append(random.randint(-1000000000, 1000000000))
self.write(','.join([str(x) for x in self._cur_array]) + ',;;\n')
self._read("New Array\n")
self._read("Number of items in array = %d\n" % len(self._cur_array))
self._read_array()
def multiply_array(self):
self.write('2\n')
if len(self._cur_array) > 10000:
self._read("Array is too long. Can't multiply any more\n")
elif len(self._cur_array):
self._read("Quick Grow! Enter a list multiplier. End number with ';'\n")
multiplier = random.randint(1,3)
while multiplier * len(self._cur_array) > 1024 and multiplier * len(self._cur_array) <= 1048:
multiplier = random.randint(1,3)
self.write("%d;\n" % multiplier)
self._cur_array *= multiplier
self._read("Multiplied Array\n")
self._read("Number of items in array = %d\n" % len(self._cur_array))
self._read_array()
def insert_sort(self):
self.write('3\n')
self._read_array(sorted(self._cur_array))
#self.read(expect='Insertion sort takes [\d]+ operations\n', expect_format='pcre', delim='\n')
self.read(delim='\n')
def selection_sort(self):
self.write('4\n')
self._read_array(sorted(self._cur_array))
#self.read(expect='Selection sort takes [\d]+ operations\n', expect_format='pcre', delim='\n')
self.read(delim='\n')
def heap_sort(self):
self.write('5\n')
self._read_array(sorted(self._cur_array))
#self.read(expect='Heap sort takes [\d]+ operations\n', expect_format='pcre', delim='\n')
self.read(delim='\n')
def merge_sort(self):
self.write('6\n')
self._read_array(sorted(self._cur_array))
#self.read(expect='Merge sort takes [\d]+ operations\n', expect_format='pcre', delim='\n')
self.read(delim='\n')
def reprint_array(self):
self.write('7\n')
self._read("Current Array\n")
self._read("Number of items in array = %d\n" % len(self._cur_array))
self._read_array()
def exit(self):
self.write('0\n')
self._read("Thanks for joining us\n")
self._read("See you next time\n")
|
data_util.py | imalikshake/StyleNet | 202 | 35426 | <filename>data_util.py<gh_stars>100-1000
import numpy as np
class BatchGenerator(object):
'''Generator for returning shuffled batches.
data_x -- list of input matrices
data_y -- list of output matrices
batch_size -- size of batch
input_size -- input width
output_size -- output width
mini -- create subsequences for truncating backprop
mini_len -- truncated backprop window'''
def __init__(self, data_x, data_y, batch_size, input_size, output_size, mini=True, mini_len=200):
self.input_size = input_size
self.output_size = output_size
self.data_x = data_x
self.data_y = data_y
self.batch_size = batch_size
self.batch_count = len(range(0, len(self.data_x), self.batch_size))
self.batch_length = None
self.mini = mini
self.mini_len = mini_len
def batch(self):
while True:
idxs = np.arange(0, len(self.data_x))
np.random.shuffle(idxs)
# np.random.shuffle(idxs)
shuff_x = []
shuff_y = []
for i in idxs:
shuff_x.append(self.data_x[i])
shuff_y.append(self.data_y[i])
for batch_idx in range(0, len(self.data_x), self.batch_size):
input_batch = []
output_batch = []
for j in xrange(batch_idx, min(batch_idx+self.batch_size,len(self.data_x)), 1):
input_batch.append(shuff_x[j])
output_batch.append(shuff_y[j])
input_batch, output_batch, seq_len = self.pad(input_batch, output_batch)
yield input_batch, output_batch, seq_len
def pad(self, sequence_X, sequence_Y):
current_batch = len(sequence_X)
padding_X = [0]*self.input_size
padding_Y = [0]*self.output_size
lens = [sequence_X[i].shape[0] for i in range(len(sequence_X))]
# lens2 = [sequence_Y[i].shape[0] for i in range(len(sequence_Y))]
#
max_lens = max(lens)
# max_lens2 = max(lens2)
#
# assert max_lens == max_lens2
# print(max_lens)
for i, x in enumerate(lens):
length = x
a = list(sequence_X[i])
b = list(sequence_Y[i])
while length < max_lens:
a.append(padding_X)
b.append(padding_Y)
length+=1
if self.mini:
while length % self.mini_len != 0:
a.append(padding_X)
b.append(padding_Y)
length+=1
sequence_X[i] = np.array(a)
sequence_Y[i] = np.array(b)
# for x in minis:
# mini_X.append(np.array(a[x:min(x+self.mini,x)]))
# mini_Y.append(np.array(b[x:min(x+self.mini,x)]))
# print sequence_X[i].shape
# print sequence_Y[i].shape
# assert all(x.shape == (max_lens, self.input_size) for x in sequence_X)
# assert all(y.shape == (max_lens, self.output_size) for y in sequence_Y)
sequence_X = np.vstack([np.expand_dims(x, 1) for x in sequence_X])
sequence_Y = np.vstack([np.expand_dims(y, 1) for y in sequence_Y])
if not self.mini:
mini_batches = 1
max_lens = max(lens)
else:
mini_batches = length/self.mini_len
max_lens = self.mini_len
sequence_X = np.reshape(sequence_X, [current_batch*mini_batches, max_lens, self.input_size])
sequence_Y = np.reshape(sequence_Y, [current_batch*mini_batches, max_lens, self.output_size])
return sequence_X, sequence_Y, max_lens
|
python/helpers/pydev/third_party/wrapped_for_pydev/ctypes/wintypes.py | truthiswill/intellij-community | 695 | 35439 | <gh_stars>100-1000
#@PydevCodeAnalysisIgnore
# XXX This module needs cleanup.
from ctypes import *
DWORD = c_ulong
WORD = c_ushort
BYTE = c_byte
ULONG = c_ulong
LONG = c_long
LARGE_INTEGER = c_longlong
ULARGE_INTEGER = c_ulonglong
HANDLE = c_ulong # in the header files: void *
HWND = HANDLE
HDC = HANDLE
HMODULE = HANDLE
HINSTANCE = HANDLE
HRGN = HANDLE
HTASK = HANDLE
HKEY = HANDLE
HPEN = HANDLE
HGDIOBJ = HANDLE
HMENU = HANDLE
LCID = DWORD
WPARAM = c_uint
LPARAM = c_long
BOOL = c_long
VARIANT_BOOL = c_short
LPCOLESTR = LPOLESTR = OLESTR = c_wchar_p
LPCWSTR = LPWSTR = c_wchar_p
LPCSTR = LPSTR = c_char_p
class RECT(Structure):
_fields_ = [("left", c_long),
("top", c_long),
("right", c_long),
("bottom", c_long)]
RECTL = RECT
class POINT(Structure):
_fields_ = [("x", c_long),
("y", c_long)]
POINTL = POINT
class SIZE(Structure):
_fields_ = [("cx", c_long),
("cy", c_long)]
SIZEL = SIZE
def RGB(red, green, blue):
return red + (green << 8) + (blue << 16)
class FILETIME(Structure):
_fields_ = [("dwLowDateTime", DWORD),
("dwHighDateTime", DWORD)]
class MSG(Structure):
_fields_ = [("hWnd", HWND),
("message", c_uint),
("wParam", WPARAM),
("lParam", LPARAM),
("time", DWORD),
("pt", POINT)]
MAX_PATH = 260
class WIN32_FIND_DATAA(Structure):
_fields_ = [("dwFileAttributes", DWORD),
("ftCreationTime", FILETIME),
("ftLastAccessTime", FILETIME),
("ftLastWriteTime", FILETIME),
("nFileSizeHigh", DWORD),
("nFileSizeLow", DWORD),
("dwReserved0", DWORD),
("dwReserved1", DWORD),
("cFileName", c_char * MAX_PATH),
("cAlternameFileName", c_char * 14)]
class WIN32_FIND_DATAW(Structure):
_fields_ = [("dwFileAttributes", DWORD),
("ftCreationTime", FILETIME),
("ftLastAccessTime", FILETIME),
("ftLastWriteTime", FILETIME),
("nFileSizeHigh", DWORD),
("nFileSizeLow", DWORD),
("dwReserved0", DWORD),
("dwReserved1", DWORD),
("cFileName", c_wchar * MAX_PATH),
("cAlternameFileName", c_wchar * 14)]
|
test/visualization/test_visualize.py | wukathryn/axondeepseg | 115 | 35473 | <filename>test/visualization/test_visualize.py<gh_stars>100-1000
# coding: utf-8
from pathlib import Path
import pytest
from AxonDeepSeg.visualization.visualize import visualize_training
class TestCore(object):
def setup(self):
# Get the directory where this current file is saved
self.fullPath = Path(__file__).resolve().parent
# Move up to the test directory, "test/"
self.testPath = self.fullPath.parent
self.pathModel = (
self.testPath /
'__test_files__' /
'__test_model__' /
'Model'
)
def teardown(self):
pass
# --------------visualize_training tests-------------- #
@pytest.mark.unit
def test_visualize_training_runs_successfully(self):
assert visualize_training(str(self.pathModel))
|
src/trusted/validator_ragel/trie_test.py | cohortfsllc/cohort-cocl2-sandbox | 2,151 | 35497 | #!/usr/bin/python
# Copyright (c) 2014 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import trie
class TrieTest(unittest.TestCase):
def MakeUncompressedTrie(self):
uncompressed = trie.Node()
accept = trie.AcceptInfo(input_rr='%eax', output_rr='%edx')
trie.AddToUncompressedTrie(uncompressed, ['0', '1', '2'], accept)
trie.AddToUncompressedTrie(uncompressed, ['0', '1', '2', '3'], accept)
trie.AddToUncompressedTrie(uncompressed, ['0', '1', '3'], accept)
trie.AddToUncompressedTrie(uncompressed, ['0', '1', '4'], accept)
trie.AddToUncompressedTrie(uncompressed, ['0', '1', '5'], accept)
return uncompressed
def CheckTrieAccepts(self, accept_sequences):
accept = trie.AcceptInfo(input_rr='%eax', output_rr='%edx')
self.assertEquals([(accept, ['0', '1', '2']),
(accept, ['0', '1', '2', '3']),
(accept, ['0', '1', '3']),
(accept, ['0', '1', '4']),
(accept, ['0', '1', '5'])],
accept_sequences)
def testTrieAddAndMerge(self):
uncompressed = self.MakeUncompressedTrie()
self.CheckTrieAccepts(trie.GetAllAcceptSequences(uncompressed))
# n0 -0-> n1 -1-> n2 -2-> n3 -3-> n4
# | -3-> n5
# | -4-> n6
# | -5-> n7
self.assertEquals(8, len(trie.GetAllUniqueNodes(uncompressed)))
node_cache = trie.NodeCache()
compressed_trie = node_cache.Merge(node_cache.empty_node, uncompressed)
self.CheckTrieAccepts(trie.GetAllAcceptSequences(compressed_trie))
# (n4, n5. n6, n7) can be grouped together from above
self.assertEquals(5, len(trie.GetAllUniqueNodes(compressed_trie)))
def testTrieSerializationAndDeserialization(self):
uncompressed = self.MakeUncompressedTrie()
node_cache = trie.NodeCache()
compressed_trie = node_cache.Merge(node_cache.empty_node, uncompressed)
reconstructed_trie = trie.TrieFromDict(trie.TrieToDict(compressed_trie),
node_cache)
self.CheckTrieAccepts(trie.GetAllAcceptSequences(reconstructed_trie))
self.assertEquals(5, len(trie.GetAllUniqueNodes(reconstructed_trie)))
def testTrieDiff(self):
trie1 = trie.Node()
trie2 = trie.Node()
accept1 = trie.AcceptInfo(input_rr='%eax', output_rr='%edx')
accept2 = trie.AcceptInfo(input_rr='%eax', output_rr='%ecx')
trie.AddToUncompressedTrie(trie1, ['0', '1', '2'], accept1)
trie.AddToUncompressedTrie(trie1, ['0', '1', '3'], accept1)
trie.AddToUncompressedTrie(trie1, ['0', '1', '4'], accept1)
trie.AddToUncompressedTrie(trie1, ['0', '1', '5'], accept1)
trie.AddToUncompressedTrie(trie2, ['0', '1', '2'], accept1)
trie.AddToUncompressedTrie(trie2, ['0', '1', '3'], accept1)
trie.AddToUncompressedTrie(trie2, ['0', '1', '4'], accept2)
node_cache = trie.NodeCache()
compressed_trie1 = node_cache.Merge(node_cache.empty_node, trie1)
compressed_trie2 = node_cache.Merge(node_cache.empty_node, trie2)
diffs = set()
compressed_diffs = set()
for diff in trie.DiffTries(trie1, trie2, node_cache.empty_node, ()):
diffs.add(diff)
for diff in trie.DiffTries(compressed_trie1, compressed_trie2,
node_cache.empty_node, ()):
compressed_diffs.add(diff)
self.assertEquals(
diffs,
set([(('0', '1', '4'), accept1, accept2),
(('0', '1', '5'), accept1, None)]))
self.assertEquals(diffs, compressed_diffs)
if __name__ == '__main__':
unittest.main()
|
analysis/log_analysis/check_util.py | leozz37/makani | 1,178 | 35554 | # Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for checks."""
from makani.analysis.checks import base_check
from makani.lib.python import import_util
# TODO: Move this to //analysis/checks/base_check.py
def LoadListOfChecks(path_to_checks):
"""Load the ListOfChecks object given the path to its file and class.
Args:
path_to_checks: A string specifying the location of the checks.
E.g. makani.analysis.my_checks.MyCheck.
Returns:
The ListOfChecks object.
"""
cls = import_util.ImportClass(path_to_checks)
return cls(for_log=True)
def LoadJsonCheck(path_to_check, parameters_json):
r"""Load the Check object given the path to its classpath and parameters.
Args:
path_to_check: A string specifying the location of the checks.
E.g. makani.analysis.my_checks.MyCheck
parameters_json: A JSON serialized string of the parameters needed to
instantiate the class.
E.g. "{\"for_log\": true, \"warning_ranges\": {\"ranges\": [0, 180]},
\"normal_ranges\": {\"ranges\": [80, 150]}}"
Returns:
The Check object.
"""
cls = import_util.ImportClass(path_to_check)
parameters = base_check.ParseCheckSpecs(parameters_json)
return cls(**parameters)
def LoadCheck(path_to_check, params):
"""Load the ListOfChecks object given the path to its file and class.
Args:
path_to_check: A string specifying the location of the checks.
E.g. makani.analysis.my_checks.MyCheck.
params: A string specifying parameters to be passed into the check.
Returns:
The CheckItem object.
"""
cls = import_util.ImportClass(path_to_check)
return cls(**params)
|
bcbio/picard/metrics.py | a113n/bcbio-nextgen | 418 | 35583 | <reponame>a113n/bcbio-nextgen
# Back compatibility -- use broad subdirectory for new code
from bcbio.broad.metrics import *
|
examples/nlp/entity_linking/query_index.py | hamjam/NeMo | 4,145 | 35633 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle as pkl
from argparse import ArgumentParser
from collections import OrderedDict
from typing import Dict
import numpy as np
import torch
from build_index import load_model
from omegaconf import DictConfig, OmegaConf
from nemo.utils import logging
try:
import faiss
except ModuleNotFoundError:
logging.warning("Faiss is required for building the index. Please install faiss-gpu")
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def get_query_embedding(query, model):
"""Use entity linking encoder to get embedding for index query"""
model_input = model.tokenizer(
query,
add_special_tokens=True,
padding=True,
truncation=True,
max_length=512,
return_token_type_ids=True,
return_attention_mask=True,
)
query_emb = model.forward(
input_ids=torch.LongTensor([model_input["input_ids"]]).to(device),
token_type_ids=torch.LongTensor([model_input["token_type_ids"]]).to(device),
attention_mask=torch.LongTensor([model_input["attention_mask"]]).to(device),
)
return query_emb
def query_index(
query: str, cfg: DictConfig, model: object, index: object, pca: object, idx2id: dict, id2string: dict,
) -> Dict:
"""
Query the nearest neighbor index of entities to find the
concepts in the index dataset that are most similar to the
query.
Args:
query (str): entity to look up in the index
cfg (DictConfig): config object to specifiy query parameters
model (EntityLinkingModel): entity linking encoder model
index (object): faiss index
pca (object): sklearn pca transformation to be applied to queries
idx2id (dict): dictionary mapping unique concept dataset index to
its CUI
id2string (dict): dictionary mapping each unqiue CUI to a
representative english description of
the concept
Returns:
A dictionary with the concept ids of the index's most similar
entities as the keys and a tuple containing the string
representation of that concept and its cosine similarity to
the query as the values.
"""
query_emb = get_query_embedding(query, model).detach().cpu().numpy()
if cfg.apply_pca:
query_emb = pca.transform(query_emb)
dist, neighbors = index.search(query_emb.astype(np.float32), cfg.query_num_factor * cfg.top_n)
dist, neighbors = dist[0], neighbors[0]
unique_ids = OrderedDict()
neighbor_idx = 0
# Many of nearest neighbors could map to the same concept id, their idx is their unique identifier
while len(unique_ids) < cfg.top_n and neighbor_idx < len(neighbors):
concept_id_idx = neighbors[neighbor_idx]
concept_id = idx2id[concept_id_idx]
# Only want one instance of each unique concept
if concept_id not in unique_ids:
concept = id2string[concept_id]
unique_ids[concept_id] = (concept, 1 - dist[neighbor_idx])
neighbor_idx += 1
unique_ids = dict(unique_ids)
return unique_ids
def main(cfg: DictConfig, restore: bool):
"""
Loads faiss index and allows commandline queries
to the index. Builds new index if one hasn't been built yet.
Args:
cfg: Config file specifying index parameters
restore: Whether to restore model weights trained
by the user. Otherwise will load weights
used before self alignment pretraining.
"""
if not os.path.isfile(cfg.index.index_save_name) or (
cfg.apply_pca and not os.path.isfile(cfg.index.pca.pca_save_name) or not os.path.isfile(cfg.index.idx_to_id)
):
logging.warning("Either no index and/or no mapping from entity idx to ids exists. Please run `build_index.py`")
return
logging.info("Loading entity linking encoder model")
model = load_model(cfg.model, restore)
logging.info("Loading index and associated files")
index = faiss.read_index(cfg.index.index_save_name)
idx2id = pkl.load(open(cfg.index.idx_to_id, "rb"))
id2string = pkl.load(open(cfg.index.id_to_string, "rb")) # Should be created during dataset prep
if cfg.index.apply_pca:
pca = pkl.load(open(cfg.index.pca.pca_save_name, "rb"))
while True:
query = input("enter index query: ")
output = query_index(query, cfg.top_n, cfg.index, model, index, pca, idx2id, id2string)
if query == "exit":
break
for concept_id in output:
concept_details = output[concept_id]
concept_id = "C" + str(concept_id).zfill(7)
print(concept_id, concept_details)
print("----------------\n")
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument(
"--restore", action="store_true", help="Whether to restore encoder model weights from nemo path"
)
parser.add_argument("--project_dir", required=False, type=str, default=".")
parser.add_argument("--cfg", required=False, type=str, default="./conf/umls_medical_entity_linking_config.yaml")
args = parser.parse_args()
cfg = OmegaConf.load(args.cfg)
cfg.project_dir = args.project_dir
main(cfg, args.restore)
|
cvat/apps/iam/schema.py | ACHultman/cvat | 4,197 | 35635 | <reponame>ACHultman/cvat
# Copyright (C) 2022 Intel Corporation
#
# SPDX-License-Identifier: MIT
from drf_spectacular.extensions import OpenApiFilterExtension, OpenApiAuthenticationExtension
from drf_spectacular.plumbing import build_parameter_type
from drf_spectacular.utils import OpenApiParameter
# https://drf-spectacular.readthedocs.io/en/latest/customization.html?highlight=OpenApiFilterExtension#step-5-extensions
class OrganizationFilterExtension(OpenApiFilterExtension):
"""Describe OrganizationFilterBackend filter"""
target_class = 'cvat.apps.iam.filters.OrganizationFilterBackend'
priority = 1
def get_schema_operation_parameters(self, auto_schema, *args, **kwargs):
"""Describe query parameters"""
return [
build_parameter_type(
name=self.target.organization_slug,
required=False,
location=OpenApiParameter.QUERY,
description=self.target.organization_slug_description,
schema={'type': 'string'},
),
build_parameter_type(
name=self.target.organization_id,
required=False,
location=OpenApiParameter.QUERY,
description=self.target.organization_id_description,
schema={'type': 'string'},
)
]
class SignatureAuthenticationScheme(OpenApiAuthenticationExtension):
target_class = 'cvat.apps.iam.authentication.SignatureAuthentication'
name = 'SignatureAuthentication' # name used in the schema
def get_security_definition(self, auto_schema):
return {
'type': 'apiKey',
'in': 'query',
'name': 'sign',
} |
forex_python/bitcoin.py | Otisey/forex-python | 505 | 35647 | from decimal import Decimal
import simplejson as json
import requests
from .converter import RatesNotAvailableError, DecimalFloatMismatchError
class BtcConverter(object):
"""
Get bit coin rates and convertion
"""
def __init__(self, force_decimal=False):
self._force_decimal = force_decimal
def _decode_rates(self, response, use_decimal=False):
if self._force_decimal or use_decimal:
decoded_data = json.loads(response.text, use_decimal=True)
else:
decoded_data = response.json()
return decoded_data
def get_latest_price(self, currency):
"""
Get Lates price of one bitcoin to valid Currency 1BTC => X USD
"""
url = 'https://api.coindesk.com/v1/bpi/currentprice/{}.json'.format(currency)
response = requests.get(url)
if response.status_code == 200:
data = response.json()
price = data.get('bpi').get(currency, {}).get('rate_float', None)
if self._force_decimal:
return Decimal(price)
return price
return None
def get_previous_price(self, currency, date_obj):
"""
Get Price for one bit coin on given date
"""
start = date_obj.strftime('%Y-%m-%d')
end = date_obj.strftime('%Y-%m-%d')
url = (
'https://api.coindesk.com/v1/bpi/historical/close.json'
'?start={}&end={}¤cy={}'.format(
start, end, currency
)
)
response = requests.get(url)
if response.status_code == 200:
data = response.json()
price = data.get('bpi', {}).get(start, None)
if self._force_decimal:
return Decimal(price)
return price
raise RatesNotAvailableError("BitCoin Rates Source Not Ready For Given date")
def get_previous_price_list(self, currency, start_date, end_date):
"""
Get List of prices between two dates
"""
start = start_date.strftime('%Y-%m-%d')
end = end_date.strftime('%Y-%m-%d')
url = (
'https://api.coindesk.com/v1/bpi/historical/close.json'
'?start={}&end={}¤cy={}'.format(
start, end, currency
)
)
response = requests.get(url)
if response.status_code == 200:
data = self._decode_rates(response)
price_dict = data.get('bpi', {})
return price_dict
return {}
def convert_to_btc(self, amount, currency):
"""
Convert X amount to Bit Coins
"""
if isinstance(amount, Decimal):
use_decimal = True
else:
use_decimal = self._force_decimal
url = 'https://api.coindesk.com/v1/bpi/currentprice/{}.json'.format(currency)
response = requests.get(url)
if response.status_code == 200:
data = response.json()
price = data.get('bpi').get(currency, {}).get('rate_float', None)
if price:
if use_decimal:
price = Decimal(price)
try:
converted_btc = amount/price
return converted_btc
except TypeError:
raise DecimalFloatMismatchError("convert_to_btc requires amount parameter is of type Decimal when force_decimal=True")
raise RatesNotAvailableError("BitCoin Rates Source Not Ready For Given date")
def convert_btc_to_cur(self, coins, currency):
"""
Convert X bit coins to valid currency amount
"""
if isinstance(coins, Decimal):
use_decimal = True
else:
use_decimal = self._force_decimal
url = 'https://api.coindesk.com/v1/bpi/currentprice/{}.json'.format(currency)
response = requests.get(url)
if response.status_code == 200:
data = response.json()
price = data.get('bpi').get(currency, {}).get('rate_float', None)
if price:
if use_decimal:
price = Decimal(price)
try:
converted_amount = coins * price
return converted_amount
except TypeError:
raise DecimalFloatMismatchError("convert_btc_to_cur requires coins parameter is of type Decimal when force_decimal=True")
raise RatesNotAvailableError("BitCoin Rates Source Not Ready For Given date")
def convert_to_btc_on(self, amount, currency, date_obj):
"""
Convert X amount to BTC based on given date rate
"""
if isinstance(amount, Decimal):
use_decimal = True
else:
use_decimal = self._force_decimal
start = date_obj.strftime('%Y-%m-%d')
end = date_obj.strftime('%Y-%m-%d')
url = (
'https://api.coindesk.com/v1/bpi/historical/close.json'
'?start={}&end={}¤cy={}'.format(
start, end, currency
)
)
response = requests.get(url)
if response.status_code == 200:
data = response.json()
price = data.get('bpi', {}).get(start, None)
if price:
if use_decimal:
price = Decimal(price)
try:
converted_btc = amount/price
return converted_btc
except TypeError:
raise DecimalFloatMismatchError("convert_to_btc_on requires amount parameter is of type Decimal when force_decimal=True")
raise RatesNotAvailableError("BitCoin Rates Source Not Ready For Given Date")
def convert_btc_to_cur_on(self, coins, currency, date_obj):
"""
Convert X BTC to valid currency amount based on given date
"""
if isinstance(coins, Decimal):
use_decimal = True
else:
use_decimal = self._force_decimal
start = date_obj.strftime('%Y-%m-%d')
end = date_obj.strftime('%Y-%m-%d')
url = (
'https://api.coindesk.com/v1/bpi/historical/close.json'
'?start={}&end={}¤cy={}'.format(
start, end, currency
)
)
response = requests.get(url)
if response.status_code == 200:
data = response.json()
price = data.get('bpi', {}).get(start, None)
if price:
if use_decimal:
price = Decimal(price)
try:
converted_btc = coins*price
return converted_btc
except TypeError:
raise DecimalFloatMismatchError("convert_btc_to_cur_on requires amount parameter is of type Decimal when force_decimal=True")
raise RatesNotAvailableError("BitCoin Rates Source Not Ready For Given Date")
def get_symbol(self):
"""
Here is Unicode symbol for bitcoin
"""
return "\u0E3F"
_Btc_Converter = BtcConverter()
get_btc_symbol = _Btc_Converter.get_symbol
convert_btc_to_cur_on = _Btc_Converter.convert_btc_to_cur_on
convert_to_btc_on = _Btc_Converter.convert_to_btc_on
convert_btc_to_cur = _Btc_Converter.convert_btc_to_cur
convert_to_btc = _Btc_Converter.convert_to_btc
get_latest_price = _Btc_Converter.get_latest_price
get_previous_price = _Btc_Converter.get_previous_price
get_previous_price_list = _Btc_Converter.get_previous_price_list
|
mayan/apps/user_management/links.py | bonitobonita24/Mayan-EDMS | 343 | 35670 | from django.utils.translation import ugettext_lazy as _
from mayan.apps.authentication.link_conditions import condition_user_is_authenticated
from mayan.apps.navigation.classes import Link, Separator, Text
from mayan.apps.navigation.utils import factory_condition_queryset_access
from .icons import (
icon_current_user_details, icon_group_create, icon_group_delete_single,
icon_group_delete_multiple, icon_group_edit, icon_group_list,
icon_group_setup, icon_group_user_list, icon_user_create,
icon_user_edit, icon_user_group_list, icon_user_list,
icon_user_delete_single, icon_user_delete_multiple,
icon_user_set_options, icon_user_setup
)
from .link_conditions import condition_user_is_not_superuser
from .permissions import (
permission_group_create, permission_group_delete, permission_group_edit,
permission_group_view, permission_user_create, permission_user_delete,
permission_user_edit, permission_user_view
)
from .utils import get_user_label_text
# Current user
link_current_user_details = Link(
args='request.user.id',
condition=condition_user_is_authenticated,
icon=icon_current_user_details, text=_('User details'),
view='user_management:user_details'
)
# Group
link_group_create = Link(
icon=icon_group_create, permissions=(permission_group_create,),
text=_('Create new group'), view='user_management:group_create'
)
link_group_delete_single = Link(
args='object.id', icon=icon_group_delete_single,
permissions=(permission_group_delete,), tags='dangerous',
text=_('Delete'), view='user_management:group_delete_single'
)
link_group_delete_multiple = Link(
icon=icon_group_delete_multiple, tags='dangerous', text=_('Delete'),
view='user_management:group_delete_multiple'
)
link_group_edit = Link(
args='object.id', icon=icon_group_edit,
permissions=(permission_group_edit,), text=_('Edit'),
view='user_management:group_edit'
)
link_group_list = Link(
condition=factory_condition_queryset_access(
app_label='auth', model_name='Group',
object_permission=permission_group_view,
), icon=icon_group_list, text=_('Groups'),
view='user_management:group_list'
)
link_group_user_list = Link(
args='object.id', icon=icon_group_user_list,
permissions=(permission_group_edit,), text=_('Users'),
view='user_management:group_members'
)
link_group_setup = Link(
condition=factory_condition_queryset_access(
app_label='auth', model_name='Group',
callback=condition_user_is_not_superuser,
object_permission=permission_group_view,
view_permission=permission_group_create
), icon=icon_group_setup, text=_('Groups'),
view='user_management:group_list'
)
# User
link_user_create = Link(
condition=condition_user_is_authenticated, icon=icon_user_create,
permissions=(permission_user_create,), text=_('Create new user'),
view='user_management:user_create'
)
link_user_delete_single = Link(
args='object.id', condition=condition_user_is_authenticated,
icon=icon_user_delete_single, permissions=(permission_user_delete,),
tags='dangerous', text=_('Delete'),
view='user_management:user_delete_single'
)
link_user_delete_multiple = Link(
icon=icon_user_delete_multiple, tags='dangerous', text=_('Delete'),
view='user_management:user_delete_multiple'
)
link_user_edit = Link(
args='object.id', condition=condition_user_is_authenticated,
icon=icon_user_edit, permissions=(permission_user_edit,), text=_('Edit'),
view='user_management:user_edit'
)
link_user_group_list = Link(
args='object.id', condition=condition_user_is_authenticated,
icon=icon_user_group_list, permissions=(permission_user_edit,),
text=_('Groups'), view='user_management:user_groups'
)
link_user_list = Link(
icon=icon_user_list, text=_('Users'),
condition=factory_condition_queryset_access(
app_label='auth', model_name='User',
callback=condition_user_is_authenticated,
object_permission=permission_user_view,
view_permission=permission_user_create
), view='user_management:user_list'
)
link_user_set_options = Link(
args='object.id', condition=condition_user_is_authenticated,
icon=icon_user_set_options, permissions=(permission_user_edit,),
text=_('User options'), view='user_management:user_options'
)
link_user_setup = Link(
condition=factory_condition_queryset_access(
app_label='auth', model_name='User',
object_permission=permission_user_view,
view_permission=permission_user_create,
), icon=icon_user_setup, text=_('Users'),
view='user_management:user_list'
)
separator_user_label = Separator()
text_user_label = Text(
html_extra_classes='menu-user-name', text=get_user_label_text
)
|
utils/asyncTable.py | jiafangjun/DD_KaoRou | 108 | 35673 | <reponame>jiafangjun/DD_KaoRou
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import time
from PySide2.QtWidgets import QWidget, QMainWindow, QGridLayout, QFileDialog, QToolBar,\
QAction, QDialog, QStyle, QSlider, QLabel, QPushButton, QStackedWidget, QHBoxLayout,\
QLineEdit, QTableWidget, QAbstractItemView, QTableWidgetItem, QGraphicsTextItem, QMenu,\
QGraphicsScene, QGraphicsView, QGraphicsDropShadowEffect, QComboBox, QMessageBox, QColorDialog
from PySide2.QtMultimedia import QMediaPlayer
from PySide2.QtMultimediaWidgets import QGraphicsVideoItem
from PySide2.QtGui import QIcon, QKeySequence, QFont, QBrush, QColor
from PySide2.QtCore import Qt, QTimer, QEvent, QPoint, Signal, QSizeF, QUrl, QThread
def cnt2Time(cnt, interval, value=0):
'''
receive int
return str
count of interval times -> m:s.ms
'''
labels = []
for i in range(value, cnt + value):
m, s = divmod(i * interval, 60000)
s, ms = divmod(s, 1000)
labels.append(('%s:%02d.%03d' % (m, s, ms))[:-1])
return labels
class refillVerticalLabel(QThread):
def __init__(self, value, globalInterval, subtitle, parent=None):
super(refillVerticalLabel, self).__init__(parent)
self.value = value - 1
self.globalInterval = globalInterval
self.oldInterval = self.globalInterval
self.subtitle = subtitle
def setGlobalInterval(self, globalInterval):
self.globalInterval = globalInterval
def run(self):
while 1:
scrollValue = self.subtitle.verticalScrollBar().value()
if scrollValue != self.oldInterval:
print(scrollValue)
self.oldInterval = scrollValue
refillToken = False
for y in range(scrollValue - 1, scrollValue + 60):
if not self.subtitle.verticalHeaderItem(y):
refillToken = True
break
if refillToken:
for cnt, label in enumerate(cnt2Time(60, self.globalInterval, self.value)):
self.subtitle.setVerticalHeaderItem(self.value + cnt, QTableWidgetItem(label))
time.sleep(0.000001)
time.sleep(20)
class asyncTable(QThread):
reconnect = Signal()
def __init__(self, subtitleDict, oldInterval, globalInterval, duration, subtitle, autoSub, tablePreset, position, parent=None):
super(asyncTable, self).__init__(parent)
self.subtitleDict = subtitleDict
self.oldInterval = oldInterval
self.globalInterval = globalInterval
self.duration = duration
self.subtitle = subtitle
self.autoSub = autoSub
self.tablePreset = tablePreset
self.position = position
def initSubtitle(self):
# for index, subData in self.subtitleDict.items():
# for start, rowData in subData.items():
# if start >= 0:
# startRow = start // self.oldInterval
# deltaRow = rowData[0] // self.oldInterval
# for y in range(startRow, startRow + deltaRow + 1):
# self.subtitle.setItem(y, index, QTableWidgetItem(''))
# self.subtitle.item(y, index).setBackground(QBrush(QColor('#232629'))) # 全部填黑
# if self.subtitle.rowspan(y, index) > 1:
# self.subtitle.setSpan(y, index, 1, 1)
self.subtitle.clear()
self.subtitle.setRowCount(self.duration // self.globalInterval + 1) # 重置表格行数
for t in self.autoSub: # 重新标记AI识别位置
start, end = t
startRow = start // self.globalInterval
endRow = end // self.globalInterval
if self.tablePreset[1]:
self.subtitle.setItem(startRow, 0, QTableWidgetItem(self.tablePreset[0]))
try:
self.subtitle.item(startRow, 0).setBackground(QBrush(QColor('#35545d')))
except:
pass
self.subtitle.setSpan(startRow, 0, endRow - startRow, 1)
if self.tablePreset[0]:
self.subtitleDict[0][start] = [end - start, self.tablePreset[0]]
else:
for y in range(startRow, endRow):
self.subtitle.setItem(y, 0, QTableWidgetItem(self.tablePreset[0]))
try:
self.subtitle.item(y, 0).setBackground(QBrush(QColor('#35545d')))
except:
pass
if self.tablePreset[0]:
self.subtitleDict[0][y * self.globalInterval] = [self.globalInterval, self.tablePreset[0]]
scrollValue = self.subtitle.verticalScrollBar().value() - 1
for cnt, label in enumerate(cnt2Time(60, self.globalInterval, scrollValue)):
self.subtitle.setVerticalHeaderItem(scrollValue + cnt, QTableWidgetItem(label))
time.sleep(0.000001)
# for cnt, label in enumerate(cnt2Time(200, self.globalInterval)): # 只画前200个 其余的行号随用户拖动条动态生成
# self.subtitle.setVerticalHeaderItem(cnt, QTableWidgetItem(label))
# time.sleep(0.000000001)
def run(self):
self.initSubtitle()
for index, subData in self.subtitleDict.items():
for start, rowData in subData.items():
startRow = start // self.globalInterval
deltaRow = rowData[0] // self.globalInterval
if deltaRow:
endRow = startRow + deltaRow
for row in range(startRow, endRow):
self.subtitle.setItem(row, index, QTableWidgetItem(rowData[1]))
if row >= 0:
self.subtitle.item(row, index).setBackground(QBrush(QColor('#35545d')))
if endRow - startRow > 1:
self.subtitle.setSpan(startRow, index, endRow - startRow, 1)
row = self.position // self.globalInterval
self.subtitle.selectRow(row)
self.subtitle.verticalScrollBar().setValue(row - 10)
self.reconnect.emit()
|
maigret/__main__.py | noi4eg/maigret | 1,156 | 35680 | <gh_stars>1000+
#! /usr/bin/env python3
"""
Maigret entrypoint
"""
import asyncio
from .maigret import main
if __name__ == "__main__":
asyncio.run(main())
|
feapder/network/user_pool/__init__.py | ibryang/feapder | 876 | 35711 | __all__ = [
"GuestUserPool",
"GuestUser",
"NormalUserPool",
"NormalUser",
"GoldUserPool",
"GoldUser",
"GoldUserStatus",
]
from .gold_user_pool import GoldUserPool, GoldUser, GoldUserStatus
from .guest_user_pool import GuestUserPool, GuestUser
from .normal_user_pool import NormalUserPool, NormalUser
|
scripts/run_burner_bench.py | joye1503/cocrawler | 166 | 35730 | <reponame>joye1503/cocrawler<filename>scripts/run_burner_bench.py
import sys
import logging
import functools
import asyncio
import cocrawler.burner as burner
import cocrawler.parse as parse
import cocrawler.stats as stats
test_threadcount = 2
loop = asyncio.get_event_loop()
b = burner.Burner(test_threadcount, loop, 'parser')
queue = asyncio.Queue()
def parse_all(name, string):
links1, _ = parse.find_html_links(string, url=name)
links2, embeds2 = parse.find_html_links_and_embeds(string, url=name)
all2 = links2.union(embeds2)
if len(links1) != len(all2):
print('{} had different link counts of {} and {}'.format(name, len(links1), len(all2)))
extra1 = links1.difference(all2)
extra2 = all2.difference(links1)
print(' extra in links: {!r}'.format(extra1))
print(' extra in links and embeds: {!r}'.format(extra2))
return 1,
async def work():
while True:
w = await queue.get()
string = ' ' * 10000
partial = functools.partial(parse_all, w, string)
await b.burn(partial)
queue.task_done()
async def crawl():
workers = [asyncio.Task(work(), loop=loop) for _ in range(test_threadcount)]
print('queue count is {}'.format(queue.qsize()))
await queue.join()
print('join is done')
for w in workers:
if not w.done():
w.cancel()
# Main program:
for i in range(10000):
queue.put_nowait('foo')
print('Queue size is {}, beginning work.'.format(queue.qsize()))
try:
loop.run_until_complete(crawl())
print('exit run until complete')
except KeyboardInterrupt:
sys.stderr.flush()
print('\nInterrupt. Exiting cleanly.\n')
finally:
loop.stop()
loop.run_forever()
loop.close()
levels = [logging.ERROR, logging.WARN, logging.INFO, logging.DEBUG]
logging.basicConfig(level=levels[3])
stats.report()
parse.report()
|
examples/crankd/sample-of-events/generate-event-plist.py | timgates42/pymacadmin | 112 | 35731 | #!/usr/bin/env python
"""
Generates a list of OS X system events into a plist for crankd.
This is designed to create a large (but probably not comprehensive) sample
of the events generated by Mac OS X that crankd can tap into. The generated
file will call the 'tunnel.sh' as the command for each event; said fail can
be easily edited to redirect the output to wherever you would like it to go.
"""
OUTPUT_FILE = "crankd-config.plist"
from SystemConfiguration import SCDynamicStoreCopyKeyList, SCDynamicStoreCreate
# Each event has a general event type, and a specific event
# The category is the key, and the value is a list of specific events
event_dict = {}
def AddEvent(event_category, specific_event):
"""Adds an event to the event dictionary"""
if event_category not in event_dict:
event_dict[event_category] = []
event_dict[event_category].append(specific_event)
def AddCategoryOfEvents(event_category, events):
"""Adds a list of events that all belong to the same category"""
for specific_event in events:
AddEvent(event_category, specific_event)
def AddKnownEvents():
"""Here we add all the events that we know of to the dictionary"""
# Add a bunch of dynamic events
store = SCDynamicStoreCreate(None, "generate_event_plist", None, None)
AddCategoryOfEvents(u"SystemConfiguration",
SCDynamicStoreCopyKeyList(store, ".*"))
# Add some standard NSWorkspace events
AddCategoryOfEvents(u"NSWorkspace",
u'''
NSWorkspaceDidLaunchApplicationNotification
NSWorkspaceDidMountNotification
NSWorkspaceDidPerformFileOperationNotification
NSWorkspaceDidTerminateApplicationNotification
NSWorkspaceDidUnmountNotification
NSWorkspaceDidWakeNotification
NSWorkspaceSessionDidBecomeActiveNotification
NSWorkspaceSessionDidResignActiveNotification
NSWorkspaceWillLaunchApplicationNotification
NSWorkspaceWillPowerOffNotification
NSWorkspaceWillSleepNotification
NSWorkspaceWillUnmountNotification
'''.split())
def PrintEvents():
"""Prints all the events, for debugging purposes"""
for category in sorted(event_dict):
print category
for event in sorted(event_dict[category]):
print "\t" + event
def OutputEvents():
"""Outputs all the events to a file"""
# print the header for the file
plist = open(OUTPUT_FILE, 'w')
print >>plist, '''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>'''
for category in sorted(event_dict):
# print out the category
print >>plist, " <key>%s</key>\n <dict>" % category
for event in sorted(event_dict[category]):
print >>plist, """
<key>%s</key>
<dict>
<key>command</key>
<string>%s '%s' '%s'</string>
</dict>""" % ( event, 'tunnel.sh', category, event )
# end the category
print >>plist, " </dict>"
# end the plist file
print >>plist, '</dict>'
print >>plist, '</plist>'
plist.close()
def main():
"""Runs the program"""
AddKnownEvents()
#PrintEvents()
OutputEvents()
main()
|
sightpy/geometry/sphere.py | ulises1229/Python-Raytracer | 326 | 35737 | <reponame>ulises1229/Python-Raytracer
import numpy as np
from ..utils.constants import *
from ..utils.vector3 import vec3
from ..geometry import Primitive, Collider
class Sphere(Primitive):
def __init__(self,center, material, radius, max_ray_depth = 5, shadow = True):
super().__init__(center, material, max_ray_depth, shadow = shadow)
self.collider_list += [Sphere_Collider(assigned_primitive = self, center = center, radius = radius)]
self.bounded_sphere_radius = radius
def get_uv(self, hit):
return hit.collider.get_uv(hit)
class Sphere_Collider(Collider):
def __init__(self, radius, **kwargs):
super().__init__(**kwargs)
self.radius = radius
def intersect(self, O, D):
b = 2 * D.dot(O - self.center)
c = self.center.square_length() + O.square_length() - 2 * self.center.dot(O) - (self.radius * self.radius)
disc = (b ** 2) - (4 * c)
sq = np.sqrt(np.maximum(0, disc))
h0 = (-b - sq) / 2
h1 = (-b + sq) / 2
h = np.where((h0 > 0) & (h0 < h1), h0, h1)
pred = (disc > 0) & (h > 0)
M = (O + D * h)
NdotD = ((M - self.center) * (1. / self.radius) ).dot(D)
pred1 = (disc > 0) & (h > 0) & (NdotD > 0)
pred2 = (disc > 0) & (h > 0) & (NdotD < 0)
pred3 = True
#return an array with hit distance and the hit orientation
return np.select([pred1,pred2,pred3] , [[h, np.tile(UPDOWN, h.shape)], [h,np.tile(UPWARDS, h.shape)], FARAWAY])
def get_Normal(self, hit):
# M = intersection point
return (hit.point - self.center) * (1. / self.radius)
def get_uv(self, hit):
M_C = (hit.point - self.center) / self.radius
phi = np.arctan2(M_C.z, M_C.x)
theta = np.arcsin(M_C.y)
u = (phi + np.pi) / (2*np.pi)
v = (theta + np.pi/2) / np.pi
return u,v |
tests/test_download.py | HeavyTony2/downloader-cli | 301 | 35743 | <gh_stars>100-1000
"""Tests various methods of the Download
class.
All the methods that start with test are used
to test a certain function. The test method
will have the name of the method being tested
seperated by an underscore.
If the method to be tested is extract_content,
the test method name will be test_extract_content
"""
from hashlib import md5
from os import remove
from downloader_cli.download import Download
TEST_URL = "http://172.16.17.32/5MB.zip"
def test__extract_border_icon():
"""Test the _extract_border_icon method"""
download = Download(TEST_URL)
icon_one = download._extract_border_icon("#")
icon_two = download._extract_border_icon("[]")
icon_none = download._extract_border_icon("")
icon_more = download._extract_border_icon("sdafasdfasdf")
assert icon_one == ('#', '#'), "Should be ('#', '#')"
assert icon_two == ('[', ']'), "Should be ('[', '])"
assert icon_none == ('|', '|'), "Should be ('|', '|')"
assert icon_more == ('|', '|'), "Should be ('|', '|')"
def test__build_headers():
"""Test the _build_headers method"""
download = Download(TEST_URL)
download._build_headers(1024)
header_built = download.headers
assert header_built == {"Range": "bytes={}-".format(1024)}, \
"Should be 1024"
def test__preprocess_conn():
"""Test the _preprocess_conn method"""
download = Download(TEST_URL)
download._preprocess_conn()
assert download.f_size == 5242880, "Should be 5242880"
def test__format_size():
"""
Test the function that formats the size
"""
download = Download(TEST_URL)
size, unit = download._format_size(255678999)
# Size should be 243.83449459075928
# and unit should be `MB`
size = int(size)
assert size == 243, "Should be 243"
assert unit == "MB", "Should be MB"
def test__format_time():
"""
Test the format time function that formats the
passed time into a readable value
"""
download = Download(TEST_URL)
time, unit = download._format_time(2134991)
# Time should be 9 days
assert int(time) == 9, "Should be 9"
assert unit == "d", "Should be d"
time, unit = download._format_time(245)
# Time should be 4 minutes
assert int(time) == 4, "Should be 4"
assert unit == "m", "Should be m"
def test_file_integrity():
"""
Test the integrity of the downloaded file.
We will test the 5MB.zip file which has a hash
of `eb08885e3082037a12a42308c521fa3c`.
"""
HASH = "eb08885e3082037a12a42308c521fa3c"
download = Download(TEST_URL)
download.download()
# Once download is done, check the integrity
_hash = md5(open("5MB.zip", "rb").read()).hexdigest()
assert _hash == HASH, "Integrity check failed for 5MB.zip"
# Remove the file now
remove(download.basename)
|
terraform/stacks/analytics/cloud_sniper_beaconing_detection/cloud_sniper_beaconing_detection.py | houey/cloud-sniper | 160 | 35771 | import os
import ipaddress
import numpy as np
import pandas as pd
import datetime
import boto3
import gzip
import json
from signal_processing import signalProcess
BUCKET_NAME = os.environ.get("BUCKET_NAME", None)
VPC_FLOW_LOGS_PATH = os.environ.get("VPC_FLOW_LOGS_PATH", None)
FINDINGS_PATH = os.environ.get("FINDINGS_PATH", None)
TMP_DOWNLOAD_DIR = "/tmp/s3_download"
FLOW_COLUMNS = [
"date",
"version",
"account-id",
"interface-id",
"srcaddr",
"dstaddr",
"srcport",
"dstport",
"protocol",
"packets",
"bytes",
"start",
"end",
"action",
"log-status",
]
def cloud_sniper_beaconing_detection(event, context):
bucket_name = BUCKET_NAME
vpc_flow_logs_path = VPC_FLOW_LOGS_PATH
findings_path = FINDINGS_PATH
df = load_data(bucket_name, vpc_flow_logs_path)
print(f"Number of raw records: {len(df.index)}")
version = df.version.iloc[0] # constant
account_id = df["account-id"].iloc[0] # constant
df = filter_format_data(df)
print(f"Number of records after filtering missing data: {len(df.index)}")
df = sort_data(df)
print(f"Number of records after filtering by time: {len(df.index)}")
df = filter_useless_data(df)
print(f"Number of records after filtering by port: {len(df.index)}")
df = filter_unfrequent_data(df)
print(f"Number of records after filtering unfrequent: {len(df.index)}")
res = find_beacons(df)
new_fields = {
"hits": "",
"cloud.provider": "aws",
"event.type": "beaconing",
"cloud.account.name": "",
"interface.vpc.id": "",
"protocol": "",
"version": version,
"cloud.account.id": account_id,
}
list(map(lambda x: x.update(new_fields), res))
print(f"Result: {res}")
save_results(bucket_name, findings_path, res)
return res
def load_data(s3_bucket, s3_vpc_flow_logs_path):
s3 = boto3.resource('s3')
bucket = s3.Bucket(name=s3_bucket)
prefix = s3_vpc_flow_logs_path
if prefix.startswith("/"):
prefix = prefix[1:]
if not prefix.endswith("/"):
prefix += "/"
if not os.path.exists(TMP_DOWNLOAD_DIR):
os.mkdir(TMP_DOWNLOAD_DIR)
for i, s3_file_obj in enumerate(bucket.objects.filter(Prefix=prefix)):
if s3_file_obj.key.endswith(".log.gz"):
extension = "log.gz"
elif s3_file_obj.key.endswith(".log"):
extension = "log"
else:
continue
bucket.download_file(s3_file_obj.key,
TMP_DOWNLOAD_DIR + "/%06d" % i + "." + extension)
data = []
for fname in sorted(os.listdir(TMP_DOWNLOAD_DIR)):
if fname.endswith(".log.gz"):
open_ = gzip.open
decode = True
elif fname.endswith(".log"):
open_ = open
decode = False
else:
continue
with open_(os.path.join(TMP_DOWNLOAD_DIR, fname), 'r') as fd:
first_line = True
for line in fd:
if first_line:
first_line = False
continue
if decode:
line = line.decode("utf-8").strip().split(" ")
else:
line = line.strip().split(" ")
data.append(line)
if data and (len(data[0]) == len(FLOW_COLUMNS)):
df = pd.DataFrame(data, columns=FLOW_COLUMNS)
df.drop(['date'], axis=1, inplace=True)
else:
df = pd.DataFrame(data, columns=FLOW_COLUMNS[1:])
return df
def filter_format_data(df):
df = df[df.srcaddr != "-"]
df = df[df.dstaddr != "-"]
df.drop(["version", "srcport"], axis=1, inplace=True)
df = df.replace("-", np.nan)
df = df.replace("-", np.nan)
df[["dstport", "protocol", "packets", "bytes", "start", "end"]] = \
df[["dstport", "protocol", "packets", "bytes", "start", "end"]] \
.apply(pd.to_numeric)
return df
def sort_data(df):
df['datetime'] = pd.to_datetime(df.start, unit='s')
# TODO: should we process just the last hours?
df = df.set_index('datetime')
df.sort_index(inplace=True)
return df.reset_index(level=0)
def filter_useless_data(df):
# Requirements
# * srcIP should be private
# * dstport < 1024 and != 123
if df.empty:
return df
df = df[df.srcaddr.map(lambda x: ipaddress.ip_address(x).is_private)]
df = df[df.dstport <= 1024]
df = df[df.dstport != 123]
return df
def filter_unfrequent_data(df):
# remove communications if there were less than 6 snippets
selection = df.groupby(["srcaddr", "dstaddr", "dstport"])
df = selection.filter(lambda x: len(x) >= 6)
df = df.reset_index(level=0)
return df
def find_beacons(df):
res = []
time_fmt = "%Y-%m-%dT%H:%M:%S.%f"
groups = df.groupby(["srcaddr", "dstaddr", "dstport"])
data_in = {
"data": {},
"time": {}
}
for (srcaddr, dstaddr, port), traffic in groups:
k = (srcaddr, dstaddr, port)
data_in["data"][k] = traffic.bytes
data_in["time"][k] = traffic.datetime
lrner = signalProcess(data_in, options_in=None)
output = lrner.getPrimaryPeriods()
for (srcaddr, dstaddr, port) in output["powers"]:
if output["powers"][(srcaddr, dstaddr, port)][0] is not None:
print(data_in["time"][k])
k = (srcaddr, dstaddr, port)
start_time = data_in["time"][k].iloc[0].strftime(time_fmt)[:-3] + 'Z'
end_time = data_in["time"][k].iloc[-1].strftime(time_fmt)[:-3] + 'Z'
res.append({
"source.ip": srcaddr,
"destination.ip": dstaddr,
"destination.port": int(port),
"timestamp": start_time,
"event.end": end_time,
"event.start": start_time
})
return res
def save_results(bucket_name, findings_path, res):
now = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
s3_resource = boto3.resource('s3')
bucket = s3_resource.Bucket(name=bucket_name)
if findings_path.startswith("/"):
findings_path = findings_path[1:]
if findings_path.endswith("/"):
findings_path = findings_path[:-1]
(bucket.Object(key=f"{findings_path}/beaconing_detection_{now}.json")
.put(Body=bytes(json.dumps(res).encode('UTF-8'))))
if __name__ == "__main__":
print(json.dumps(cloud_sniper_beaconing_detection(None, None), indent=4))
|
web/cache.py | therealplato/vim-awesome | 1,379 | 35790 | <reponame>therealplato/vim-awesome
"""Module to hold the instance of the cache"""
from flask.ext.cache import Cache
cache = Cache()
|
stores/apps/preferences/forms.py | diassor/CollectorCity-Market-Place | 135 | 35806 | import re, logging
from django import forms
from django.forms import ModelForm
from django.utils.translation import ugettext as _
from django.contrib.localflavor.us.forms import USStateSelect,\
USPhoneNumberField
from models import Preference, ShippingWeight, ShippingPrice, ShippingItem, TaxState, DnsShop, EmailNotification
from preferences.models import ShopPolicies
from auth.models import User
from users.models import Profile
class GeneralPreferenceForm(ModelForm):
email = forms.EmailField(required=False)
phone = USPhoneNumberField(required=False)
class Meta:
model = Preference
fields = ['name_store', 'email', 'phone']
class ProfileForm(ModelForm):
state = forms.CharField(widget=USStateSelect)
class Meta:
model = Profile
fields = ['street_address', 'zip', 'city', 'state', 'country', ]
def clean_zip(self):
zip = self.cleaned_data.get("zip", "")
if zip.strip() == "": raise forms.ValidationError("Zip is a required field.")
if not (re.match("[0-9]{5}(-[0-9]{4})?$", zip)): raise forms.ValidationError("Invalid Zip code. Valid formats are XXXXX or XXXXX-XXXX")
return zip
def clean_country(self):
country = self.cleaned_data.get("country", "")
if country.strip() == "": raise forms.ValidationError("Country is a required field.")
return country
def clean_street_address(self):
street = self.cleaned_data.get("street_address", "")
if street.strip() == "": raise forms.ValidationError("Street is a required field.")
return street
def clean_city(self):
city = self.cleaned_data.get("city", "")
if city.strip() == "": raise forms.ValidationError("City is a required field.")
return city
class TaxesPreferenceForm(ModelForm):
class Meta:
model = Preference
fields = ['taxes_same_state_store', 'taxes_to_shipping_fees']
class TaxStateForm(ModelForm):
#state = forms.CharField(widget=USStateSelect)
tax = forms.DecimalField(help_text=_("Enter a state tax rate number (between 1 and 100)"))
class Meta:
model = TaxState
exclude = ['shop']
def __init__(self, shop, *args, ** kwargs):
self.shop = shop
super(TaxStateForm, self).__init__(*args, ** kwargs)
def clean_state(self):
state = self.cleaned_data['state']
try:
TaxState.objects.get(shop=self.shop, state=state)
except TaxState.DoesNotExist:
return state
raise forms.ValidationError(_("A tax for state %s already exists." % state))
def clean_tax(self):
tax = self.cleaned_data['tax']
if tax < 0:
raise forms.ValidationError(_("A tax has to be more or equal 0%"))
elif tax > 100:
raise forms.ValidationError(_("A tax has to be less than 100%"))
return tax
class TaxStateEditForm(ModelForm):
class Meta:
model = TaxState
exclude = ['shop', 'state']
def __init__(self, shop, *args, ** kwargs):
self.shop = shop
super(TaxStateEditForm, self).__init__(*args, ** kwargs)
def clean_tax(self):
tax = self.cleaned_data['tax']
if tax < 0:
raise forms.ValidationError(_("A tax has to be more or equal 0%"))
elif tax > 100:
raise forms.ValidationError(_("A tax has to be less than 100%"))
return tax
class AuctionsPreferenceForm(ModelForm):
class Meta:
model = Preference
fields = ['allow_sessions', 'allow_open_auctions', 'default_days', 'open_auto_extend', 'session_auto_extend']
class DnsShopForm(ModelForm):
class Meta:
model = DnsShop
exclude = ['shop']
def clean_dns(self):
dns = self.cleaned_data['dns']
try:
DnsShop.objects.get(dns=dns)
except DnsShop.DoesNotExist:
return dns
raise forms.ValidationError(_("A shop with that dns already exists."))
class ShippingWeightForm(ModelForm):
class Meta:
model = ShippingWeight
exclude = ['shop']
class ShippingPriceForm(ModelForm):
class Meta:
model = ShippingPrice
exclude = ['shop']
class ShippingItemForm(ModelForm):
class Meta:
model = ShippingItem
exclude = ['shop']
class EmailNotificationForm(ModelForm):
class Meta:
model = EmailNotification
fields = ['subject', 'body']
class ShopPoliciesForm(ModelForm):
class Meta:
model = ShopPolicies
fields = ['refund_policy', 'privacy_policy', 'terms_of_service']
class MarketingForm(ModelForm):
class Meta:
model = Preference
fields = ['google_analytics_account_number']
def clean_google_analytics_account_number(self):
google_analytics_account_number = self.cleaned_data['google_analytics_account_number']
if re.match(r"^\w{2}\-\d{4,8}\-\d$", google_analytics_account_number) is None:
raise forms.ValidationError('Invalid analitycs account number')
return google_analytics_account_number
class UsernameChangeForm(forms.ModelForm):
username = forms.RegexField(label=_("Username"), max_length=30, regex=r'^\w+$',
help_text = _("Required. 30 characters or fewer. Alphanumeric characters only (letters, digits and underscores)."),
error_message = _("This value must contain only letters, numbers and underscores."))
class Meta:
model = User
fields = ['username'] |
web/datasets/tasks.py | andressadotpy/maria-quiteria | 151 | 35835 | <filename>web/datasets/tasks.py
from datetime import datetime
from logging import info
from pathlib import Path
from typing import List
import requests
from celery import shared_task
from django.conf import settings
from django.contrib.admin.options import get_content_type_for_model
from requests import HTTPError
from tika import parser
from web.datasets.adapters import (
to_citycouncil_bid,
to_citycouncil_contract,
to_citycouncil_expense,
to_citycouncil_revenue,
)
from web.datasets.models import (
CityCouncilBid,
CityCouncilContract,
CityCouncilExpense,
CityCouncilRevenue,
File,
SyncInformation,
)
from web.datasets.services import get_s3_client
client = get_s3_client(settings)
class WebserviceException(Exception):
pass
@shared_task
def content_from_file(file_pk=None, path=None, keep_file=True):
if not any([file_pk, path]):
raise Exception("Ou `file_pk` ou `path` devem ser informados.")
a_file = None
if file_pk:
a_file = File.objects.get(pk=file_pk)
if a_file.content is not None:
return a_file.content
path = client.download_file(a_file.s3_file_path)
keep_file = False
if not Path(path).exists():
info(f"Arquivo {path} não encontrado.")
return
raw = parser.from_file(path)
if not keep_file:
Path(path).unlink()
if a_file:
a_file.content = raw["content"] or ""
a_file.save()
return a_file.content
return raw["content"]
@shared_task
def backup_file(file_id):
try:
file_obj = File.objects.get(pk=file_id, s3_url__isnull=True)
except File.DoesNotExist:
info(f"O arquivo ({file_id}) não existe ou já possui backup.")
return
if not file_obj.url and not file_obj.local_path:
info(f"O arquivo ({file_id}) não tem URL ou não existe localmente.")
return
model_name = file_obj.content_object._meta.model_name
relative_file_path = (
f"{model_name}/{file_obj.created_at.year}/"
f"{file_obj.created_at.month}/{file_obj.created_at.day}/"
)
location = file_obj.local_path or file_obj.url
s3_url, s3_file_path = client.upload_file(
location, relative_file_path, prefix=file_obj.checksum
)
file_obj.s3_file_path = s3_file_path
file_obj.s3_url = s3_url
file_obj.save()
return s3_url
@shared_task
def get_city_council_updates(formatted_date):
"""Solicita atualizações ao webservice da Câmara."""
target_date = datetime.strptime(formatted_date, "%Y-%m-%d").date()
sync_info, _ = SyncInformation.objects.get_or_create(
date=target_date, source="camara", defaults={"succeed": False}
)
response = requests.get(
settings.CITY_COUNCIL_WEBSERVICE_ENDPOINT,
params={
"data": formatted_date, # formato aaaa-mm-dd
"token": settings.CITY_COUNCIL_WEBSERVICE_TOKEN,
},
headers={"User-Agent": "Maria Quitéria"},
)
try:
response.raise_for_status()
sync_info.succeed = True
except HTTPError:
sync_info.succeed = False
sync_info.save()
raise HTTPError
response = response.json()
sync_info.response = response
if response.get("erro"):
sync_info.succeed = False
sync_info.save()
raise WebserviceException(response["erro"])
sync_info.save()
return response
@shared_task(ignore_result=True)
def distribute_city_council_objects_to_sync(payload):
"""Recebe o payload e dispara uma task para cada registro.
O webservice da Câmara retorna uma lista de ações (inserção,
atualização e deleção) e os registros que sofreram cada uma
delas. Essa task executa cada uma de maneira separada para que,
caso tenham algum erro, possam ser tratados de maneira separada.
"""
action_methods = {
"inclusoesContrato": add_citycouncil_contract,
"alteracoesContrato": update_citycouncil_contract,
"exclusoesContrato": remove_citycouncil_contract,
"inclusoesLicitacao": add_citycouncil_bid,
"alteracoesLicitacao": update_citycouncil_bid,
"exclusoesLicitacao": remove_citycouncil_bid,
"inclusoesReceita": add_citycouncil_revenue,
"alteracoesReceita": update_citycouncil_revenue,
"exclusoesReceita": remove_citycouncil_revenue,
"inclusoesDespesa": add_citycouncil_expense,
"alteracoesDespesa": update_citycouncil_expense,
"exclusoesDespesa": remove_citycouncil_expense,
}
for action_name, records in payload.items():
info(f"{action_name}: {len(records)} registros")
task = action_methods.get(action_name)
if action_name.startswith("exclusoes"):
task.delay(records)
else:
for record in records:
task.delay(record)
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def save_citycouncil_files(files, object, url_key):
if not files:
return
content_type = get_content_type_for_model(object)
from web.datasets.management.commands._file import save_file
if files:
for file_ in files:
save_file(file_[url_key], content_type, object.pk)
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def add_citycouncil_bid(record):
new_item = to_citycouncil_bid(record)
new_item["crawled_at"] = datetime.now()
new_item["crawled_from"] = settings.CITY_COUNCIL_WEBSERVICE_ENDPOINT
bid, _ = CityCouncilBid.objects.get_or_create(
external_code=new_item["external_code"], defaults=new_item
)
save_citycouncil_files(record.get("arquivos"), bid, "caminhoArqLic")
return bid
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def update_citycouncil_bid(record):
bid = CityCouncilBid.objects.get(external_code=record["codLic"])
updated_item = to_citycouncil_bid(record)
for key, value in updated_item.items():
setattr(bid, key, value)
bid.save()
save_citycouncil_files(record.get("arquivos"), bid, "caminhoArqLic")
return bid
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def remove_citycouncil_bid(records: List[dict]):
to_be_removed = [record["codLic"] for record in records]
CityCouncilBid.objects.filter(external_code__in=to_be_removed).update(excluded=True)
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def add_citycouncil_contract(record):
new_item = to_citycouncil_contract(record)
new_item["crawled_at"] = datetime.now()
new_item["crawled_from"] = settings.CITY_COUNCIL_WEBSERVICE_ENDPOINT
contract, _ = CityCouncilContract.objects.get_or_create(
external_code=new_item["external_code"], defaults=new_item
)
save_citycouncil_files(record.get("arquivos"), contract, "caminho")
return contract
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def update_citycouncil_contract(record):
contract = CityCouncilContract.objects.get(external_code=record["codCon"])
updated_item = to_citycouncil_contract(record)
for key, value in updated_item.items():
setattr(contract, key, value)
contract.save()
save_citycouncil_files(record.get("arquivos"), contract, "caminho")
return contract
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def remove_citycouncil_contract(records: List[dict]):
to_be_removed = [record["codCon"] for record in records]
CityCouncilContract.objects.filter(external_code__in=to_be_removed).update(
excluded=True
)
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def add_citycouncil_revenue(record):
new_item = to_citycouncil_revenue(record)
new_item["crawled_at"] = datetime.now()
new_item["crawled_from"] = settings.CITY_COUNCIL_WEBSERVICE_ENDPOINT
revenue, _ = CityCouncilRevenue.objects.get_or_create(
external_code=new_item["external_code"], defaults=new_item
)
return revenue
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def update_citycouncil_revenue(record):
revenue = CityCouncilRevenue.objects.get(external_code=record["codLinha"])
updated_item = to_citycouncil_revenue(record)
for key, value in updated_item.items():
setattr(revenue, key, value)
revenue.save()
return revenue
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def remove_citycouncil_revenue(records: List[dict]):
to_be_removed = [record["codLinha"] for record in records]
CityCouncilRevenue.objects.filter(external_code__in=to_be_removed).update(
excluded=True
)
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def add_citycouncil_expense(record):
new_item = to_citycouncil_expense(record)
new_item["crawled_at"] = datetime.now()
new_item["crawled_from"] = settings.CITY_COUNCIL_WEBSERVICE_ENDPOINT
expense, _ = CityCouncilExpense.objects.get_or_create(
external_file_code=new_item["external_file_code"],
external_file_line=new_item["external_file_line"],
number=new_item["number"],
phase=new_item["phase"],
defaults=new_item,
)
return expense
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def update_citycouncil_expense(record):
expense = CityCouncilExpense.objects.get(
external_file_code=record["codArquivo"],
external_file_line=record["codLinha"],
)
updated_item = to_citycouncil_expense(record)
for key, value in updated_item.items():
setattr(expense, key, value)
expense.save()
return expense
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def remove_citycouncil_expense(records: List[dict]):
for record in records:
CityCouncilExpense.objects.filter(
external_file_code=record["codigo"], external_file_line=record["linha"]
).update(excluded=True)
|
src/testMultiRootWkspc/workspace5/remoteDebugger-start.py | ChaseKnowlden/vscode-jupyter | 2,461 | 35841 | <reponame>ChaseKnowlden/vscode-jupyter<filename>src/testMultiRootWkspc/workspace5/remoteDebugger-start.py
import sys
import time
def main():
sys.stdout.write('this is stdout')
sys.stdout.flush()
sys.stderr.write('this is stderr')
sys.stderr.flush()
# Give the debugger some time to add a breakpoint.
time.sleep(5)
for i in range(1):
time.sleep(0.5)
pass
print('this is print')
main()
|
archived-stock-trading-bot-v1/yf_extender.py | Allcallofduty10/stock-trading-bot | 101 | 35857 | <reponame>Allcallofduty10/stock-trading-bot
import sys
from datetime import datetime
import yfinance as yf
def get_ticker_symbol(ticker: yf.Ticker) -> str:
try:
return ticker.get_info()['symbol']
except ImportError:
return ""
def get_stock_state(ticker: yf.Ticker) -> {}:
stock_info = ticker.history("1d").iloc[0].to_dict()
stock_info['Time'] = datetime.now().strftime("%H:%M:%S")
del stock_info['Dividends']
del stock_info['Stock Splits']
return stock_info
# Valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
def previous_high(ticker: yf.Ticker, time_period: str) -> float:
high = 0
stock_history = ticker.history(time_period)
for i in range(0, len(stock_history) - 2):
temp_high = stock_history.iloc[i].to_dict()['High']
if temp_high > high:
high = temp_high
return high
# Valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
def calculate_sma(ticker: yf.Ticker, time_period="1mo", interval="1d") -> float:
stock_history = ticker.history(period=time_period, interval=interval)
summation = 0
time_period_days = 0
for i in range(0, len(stock_history) - 1):
summation += stock_history.iloc[i].to_dict()['Close']
time_period_days += 1
if time_period_days > 0:
return summation / time_period_days
return sys.maxsize
# Valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
def calculate_ema(ticker: yf.Ticker, time_period="1mo") -> float:
stock_history = ticker.history(period=time_period)
return stock_history.iloc[len(stock_history) - 1].to_dict()['Close'] * (
2.5 / (1 + len(stock_history))) + calculate_sma(ticker, time_period) * (
1 - (2.5 / (1 + len(stock_history))))
# Valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
def calculate_previous_ema(ticker: yf.Ticker, time_period="1mo", days_previous=1) -> float:
time_period_days = len(ticker.history(period=time_period))
stock_history = ticker.history(period=time_period)
return stock_history.iloc[time_period_days - days_previous - 1].to_dict()['Close'] * (
2.5 / (1 + time_period_days)) + calculate_sma(ticker, time_period) * (
1 - (2.5 / (1 + time_period_days)))
def get_high2current_price_change_percent(ticker: yf.Ticker) -> float:
stock_info = ticker.history("1d").iloc[0].to_dict()
return (stock_info['Close'] - stock_info['High']) / stock_info['High']
def get_direction(ticker: yf.Ticker) -> float:
stock_history = ticker.history(period="1d", interval="1m")
return (stock_history.iloc[len(stock_history) - 1].to_dict()['Close'] - stock_history.iloc[len(stock_history) - 2].to_dict()['Close'])/stock_history.iloc[len(stock_history) - 2].to_dict()['Close']
|
tests/test_debianpkg.py | trathborne/nvchecker | 320 | 35873 | # MIT licensed
# Copyright (c) 2020 lilydjwg <<EMAIL>>, et al.
# Copyright (c) 2017 <NAME> <<EMAIL>>, et al.
from flaky import flaky
import pytest
pytestmark = [pytest.mark.asyncio, pytest.mark.needs_net]
@flaky(max_runs=10)
async def test_debianpkg(get_version):
assert await get_version("sigrok-firmware-fx2lafw", {
"source": "debianpkg",
}) == "0.1.7-1"
@flaky(max_runs=10)
async def test_debianpkg_strip_release(get_version):
assert await get_version("sigrok-firmware-fx2lafw", {
"source": "debianpkg",
"strip_release": 1,
}) == "0.1.7"
@flaky(max_runs=10)
async def test_debianpkg_suite(get_version):
assert await get_version("sigrok-firmware-fx2lafw", {
"source": "debianpkg",
"suite": "buster",
}) == "0.1.6-1"
|
data_collection/gazette/spiders/sc_chapeco.py | kaiocp/querido-diario | 454 | 35895 | from gazette.spiders.base.fecam import FecamGazetteSpider
class ScChapecoSpider(FecamGazetteSpider):
name = "sc_chapeco"
FECAM_QUERY = "cod_entidade:71"
TERRITORY_ID = "4204202"
|
saleor/graphql/account/dataloaders.py | fairhopeweb/saleor | 15,337 | 35926 | from collections import defaultdict
from ...account.models import Address, CustomerEvent, User
from ..core.dataloaders import DataLoader
class AddressByIdLoader(DataLoader):
context_key = "address_by_id"
def batch_load(self, keys):
address_map = Address.objects.in_bulk(keys)
return [address_map.get(address_id) for address_id in keys]
class UserByUserIdLoader(DataLoader):
context_key = "user_by_id"
def batch_load(self, keys):
user_map = User.objects.in_bulk(keys)
return [user_map.get(user_id) for user_id in keys]
class CustomerEventsByUserLoader(DataLoader):
context_key = "customer_events_by_user"
def batch_load(self, keys):
events = CustomerEvent.objects.filter(user_id__in=keys)
events_by_user_map = defaultdict(list)
for event in events:
events_by_user_map[event.user_id].append(event)
return [events_by_user_map.get(user_id, []) for user_id in keys]
|
Algo and DSA/LeetCode-Solutions-master/Python/maximize-the-confusion-of-an-exam.py | Sourav692/FAANG-Interview-Preparation | 3,269 | 35930 | <reponame>Sourav692/FAANG-Interview-Preparation
# Time: O(n)
# Space: O(1)
import collections
class Solution(object):
def maxConsecutiveAnswers(self, answerKey, k):
"""
:type answerKey: str
:type k: int
:rtype: int
"""
result = max_count = 0
count = collections.Counter()
for i in xrange(len(answerKey)):
count[answerKey[i]] += 1
max_count = max(max_count, count[answerKey[i]])
if result-max_count >= k:
count[answerKey[i-result]] -= 1
else:
result += 1
return result
|
homeassistant/components/eliqonline/__init__.py | domwillcode/home-assistant | 30,023 | 35931 | """The eliqonline component."""
|
search_for_similar_images__perceptual_hash__phash/ui/SearchForSimilarSettingsWidget.py | DazEB2/SimplePyScripts | 117 | 35960 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from PyQt5.QtWidgets import QWidget, QFormLayout, QComboBox, QSpinBox, QCheckBox
from PyQt5.QtCore import QSettings, pyqtSignal
from common import IMAGE_HASH_ALGO, DEFAULT_IMAGE_HASH_ALGO, DEFAULT_IMAGE_HASH_MAX_SCORE
class SearchForSimilarSettingsWidget(QWidget):
about_mark_matching = pyqtSignal(bool)
def __init__(self):
super().__init__()
self.setWindowTitle("Search for similar")
self.cb_algo = QComboBox()
self.cb_algo.addItems(IMAGE_HASH_ALGO)
self.sb_max_score = QSpinBox()
self.cb_mark_matching = QCheckBox()
self.cb_mark_matching.clicked.connect(self.about_mark_matching)
layout = QFormLayout()
layout.addRow("Hash algo:", self.cb_algo)
layout.addRow("Max score:", self.sb_max_score)
layout.addRow("Mark matching:", self.cb_mark_matching)
self.setLayout(layout)
def read_settings(self, ini: QSettings):
ini.beginGroup(self.__class__.__name__)
self.cb_algo.setCurrentText(
ini.value('algo', DEFAULT_IMAGE_HASH_ALGO)
)
self.sb_max_score.setValue(
int(ini.value('max_score', DEFAULT_IMAGE_HASH_MAX_SCORE))
)
self.cb_mark_matching.setChecked(
ini.value('mark_matching', 'true') == 'true'
)
ini.endGroup()
def write_settings(self, ini: QSettings):
ini.beginGroup(self.__class__.__name__)
ini.setValue('algo', self.cb_algo.currentText())
ini.setValue('max_score', self.sb_max_score.value())
ini.setValue('mark_matching', self.cb_mark_matching.isChecked())
ini.endGroup()
|
backpack/extensions/secondorder/diag_ggn/conv1d.py | jabader97/backpack | 395 | 35971 | <filename>backpack/extensions/secondorder/diag_ggn/conv1d.py
from backpack.core.derivatives.conv1d import Conv1DDerivatives
from backpack.extensions.secondorder.diag_ggn.convnd import (
BatchDiagGGNConvND,
DiagGGNConvND,
)
class DiagGGNConv1d(DiagGGNConvND):
def __init__(self):
super().__init__(derivatives=Conv1DDerivatives(), params=["bias", "weight"])
class BatchDiagGGNConv1d(BatchDiagGGNConvND):
def __init__(self):
super().__init__(derivatives=Conv1DDerivatives(), params=["bias", "weight"])
|
scripts/rpc/cmd_parser.py | 5cs/spdk | 2,107 | 35991 | <filename>scripts/rpc/cmd_parser.py<gh_stars>1000+
args_global = ['server_addr', 'port', 'timeout', 'verbose', 'dry_run', 'conn_retries',
'is_server', 'rpc_plugin', 'called_rpc_name', 'func', 'client']
def strip_globals(kwargs):
for arg in args_global:
kwargs.pop(arg, None)
def remove_null(kwargs):
keys = []
for key, value in kwargs.items():
if value is None:
keys.append(key)
for key in keys:
kwargs.pop(key, None)
def apply_defaults(kwargs, **defaults):
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
def group_as(kwargs, name, values):
group = {}
for arg in values:
if arg in kwargs and kwargs[arg] is not None:
group[arg] = kwargs.pop(arg, None)
kwargs[name] = group
|
djangular/tests/utils.py | jianglb-alibaba/djangular-0.2.7 | 145 | 36002 | import os
from djangular import utils
from django.test import SimpleTestCase
class SiteAndPathUtilsTest(SimpleTestCase):
site_utils = utils.SiteAndPathUtils()
def test_djangular_root(self):
current_dir = os.path.dirname(os.path.abspath(__file__))
djangular_dir = os.path.dirname(current_dir)
self.assertEqual(djangular_dir, self.site_utils.get_djangular_root())
|
src/encoded/searches/responses.py | procha2/encoded | 102 | 36005 | from encoded.searches.mixins import CartAggsToFacetsMixin
from snosearch.responses import BasicQueryResponseWithFacets
from snosearch.responses import BasicMatrixResponseWithFacets
class CartQueryResponseWithFacets(CartAggsToFacetsMixin, BasicQueryResponseWithFacets):
'''
Like BasicQueryResponseWithFacets but uses CartAggsToFacetsMixin instead of AggsToFacetsMixin.
'''
def __init__(self, results, query_builder, *args, **kwargs):
super().__init__(results, query_builder, *args, **kwargs)
class CartMatrixResponseWithFacets(CartAggsToFacetsMixin, BasicMatrixResponseWithFacets):
'''
Like BasicMatrixResponseWithFacets but uses CartAggsToFacetsMixin instead of AggsToFacetsMixin.
'''
def __init__(self, results, query_builder, *args, **kwargs):
super().__init__(results, query_builder, *args, **kwargs)
|
unit-tests/live/frames/test-t2ff-sensor.py | ksvbka/librealsense | 6,457 | 36006 | # License: Apache 2.0. See LICENSE file in root directory.
# Copyright(c) 2021 Intel Corporation. All Rights Reserved.
# test:device L500*
# test:device D400*
import pyrealsense2 as rs
from rspy.stopwatch import Stopwatch
from rspy import test, log
import time
import platform
# Start depth + color streams and measure the time from stream opened until first frame arrived using sensor API.
# Verify that the time do not exceeds the maximum time allowed
# Note - Using Windows Media Foundation to handle power management between USB actions take time (~27 ms)
def time_to_first_frame(sensor, profile, max_delay_allowed):
"""
Wait for the first frame for 'max_delay_allowed' + 1 extra second
If the frame arrives it will return the seconds it took since open() call
If no frame it will return 'max_delay_allowed'
"""
first_frame_time = max_delay_allowed
open_call_stopwatch = Stopwatch()
def frame_cb(frame):
nonlocal first_frame_time, open_call_stopwatch
if first_frame_time == max_delay_allowed:
first_frame_time = open_call_stopwatch.get_elapsed()
open_call_stopwatch.reset()
sensor.open(profile)
sensor.start(frame_cb)
# Wait condition:
# 1. first frame did not arrive yet
# 2. timeout of 'max_delay_allowed' + 1 extra second reached.
while first_frame_time == max_delay_allowed and open_call_stopwatch.get_elapsed() < max_delay_allowed + 1:
time.sleep(0.05)
sensor.stop()
sensor.close()
return first_frame_time
# The device starts at D0 (Operational) state, allow time for it to get into idle state
time.sleep(3)
#####################################################################################################
test.start("Testing device creation time on " + platform.system() + " OS")
device_creation_stopwatch = Stopwatch()
dev = test.find_first_device_or_exit()
device_creation_time = device_creation_stopwatch.get_elapsed()
max_time_for_device_creation = 1.5
print("Device creation time is: {:.3f} [sec] max allowed is: {:.1f} [sec] ".format(device_creation_time, max_time_for_device_creation))
test.check(device_creation_time < max_time_for_device_creation)
test.finish()
# Set maximum delay for first frame according to product line
product_line = dev.get_info(rs.camera_info.product_line)
if product_line == "D400":
max_delay_for_depth_frame = 1.5
max_delay_for_color_frame = 1.5
elif product_line == "L500":
max_delay_for_depth_frame = 2.5 # L515 depth frame has a 1.5 seconds built in delay at the FW side + 1.0 second for LRS
max_delay_for_color_frame = 1.5
else:
log.f( "This test support only D400 + L515 devices" )
ds = dev.first_depth_sensor()
cs = dev.first_color_sensor()
dp = next(p for p in
ds.profiles if p.fps() == 30
and p.stream_type() == rs.stream.depth
and p.format() == rs.format.z16)
cp = next(p for p in
cs.profiles if p.fps() == 30
and p.stream_type() == rs.stream.color
and p.format() == rs.format.rgb8)
#####################################################################################################
test.start("Testing first depth frame delay on " + product_line + " device - "+ platform.system() + " OS")
first_depth_frame_delay = time_to_first_frame(ds, dp, max_delay_for_depth_frame)
print("Time until first depth frame is: {:.3f} [sec] max allowed is: {:.1f} [sec] ".format(first_depth_frame_delay, max_delay_for_depth_frame))
test.check(first_depth_frame_delay < max_delay_for_depth_frame)
test.finish()
#####################################################################################################
test.start("Testing first color frame delay on " + product_line + " device - "+ platform.system() + " OS")
first_color_frame_delay = time_to_first_frame(cs, cp, max_delay_for_color_frame)
print("Time until first color frame is: {:.3f} [sec] max allowed is: {:.1f} [sec] ".format(first_color_frame_delay, max_delay_for_color_frame))
test.check(first_color_frame_delay < max_delay_for_color_frame)
test.finish()
#####################################################################################################
test.print_results_and_exit()
|
gmacpyutil/gmacpyutil/profiles_test.py | rgayon/macops | 758 | 36043 | <gh_stars>100-1000
"""Tests for profiles module."""
import mock
from google.apputils import basetest
import profiles
class ProfilesModuleTest(basetest.TestCase):
def testGenerateUUID(self):
self.assertIsInstance(profiles.GenerateUUID('a'), str)
self.assertTrue(profiles.GenerateUUID('a').isupper())
self.assertEqual(profiles.GenerateUUID('a'),
profiles.GenerateUUID('a'))
def testValidatePayload(self):
payload = {}
with self.assertRaises(profiles.PayloadValidationError):
profiles.ValidatePayload(payload)
payload.update({profiles.PAYLOADKEYS_IDENTIFIER: 'a',
profiles.PAYLOADKEYS_DISPLAYNAME: 'a',
profiles.PAYLOADKEYS_TYPE: 'com.apple.welcome.to.1984'})
profiles.ValidatePayload(payload)
self.assertEqual(payload.get(profiles.PAYLOADKEYS_UUID),
profiles.GenerateUUID('a'))
self.assertEqual(payload.get(profiles.PAYLOADKEYS_ENABLED), True)
self.assertEqual(payload.get(profiles.PAYLOADKEYS_VERSION), 1)
class ProfileClassTest(basetest.TestCase):
"""Tests for the Profile class."""
def _GetValidProfile(self, include_payload=True):
profile = profiles.Profile()
profile.Set(profiles.PAYLOADKEYS_DISPLAYNAME, 'Acme Corp Config Profile')
profile.Set(profiles.PAYLOADKEYS_IDENTIFIER, 'com.acme.configprofile')
profile.Set(profiles.PAYLOADKEYS_ORG, 'Acme Corp')
profile.Set(profiles.PAYLOADKEYS_SCOPE, ['System', 'User'])
profile.Set(profiles.PAYLOADKEYS_TYPE, 'Configuration')
if include_payload:
profile.AddPayload(self._GetValidPayload())
return profile
def _GetValidPayload(self):
test_payload = {profiles.PAYLOADKEYS_IDENTIFIER: 'com.test.payload',
profiles.PAYLOADKEYS_DISPLAYNAME: 'Test Payload',
profiles.PAYLOADKEYS_TYPE: 'com.apple.welcome.to.1984'}
return test_payload
def testInit(self):
"""Test the __init__ method."""
profile = profiles.Profile()
self.assertIsNotNone(profile._profile)
self.assertEqual(profile._profile[profiles.PAYLOADKEYS_CONTENT], [])
def testGet(self):
profile = profiles.Profile()
profile._profile['TestKey'] = 'TestValue'
self.assertEqual(profile.Get(profiles.PAYLOADKEYS_CONTENT), [])
self.assertEqual(profile.Get('TestKey'), 'TestValue')
def testSet(self):
profile = profiles.Profile()
profile.Set('TestKey', 'TestValue')
profile.Set('OtherKey', 'OtherValue')
self.assertEqual(profile._profile['TestKey'], 'TestValue')
self.assertEqual(profile._profile['OtherKey'], 'OtherValue')
def testStr(self):
profile = self._GetValidProfile()
self.assertEqual(profile.__str__(), 'Acme Corp Config Profile')
def testAddPayload(self):
profile = self._GetValidProfile(include_payload=False)
test_payload = self._GetValidPayload()
with self.assertRaises(profiles.PayloadValidationError):
profile.AddPayload('Payloads should be dicts')
profile.AddPayload(test_payload)
self.assertEqual(profile.Get(profiles.PAYLOADKEYS_CONTENT), [test_payload])
def testValidateProfile(self):
profile = profiles.Profile()
with self.assertRaises(profiles.ProfileValidationError):
profile._ValidateProfile()
profile = self._GetValidProfile(include_payload=False)
with self.assertRaises(profiles.ProfileValidationError):
profile._ValidateProfile()
profile.AddPayload(self._GetValidPayload())
profile._ValidateProfile()
self.assertIsNotNone(profile.Get(profiles.PAYLOADKEYS_UUID))
self.assertIsNotNone(profile.Get(profiles.PAYLOADKEYS_VERSION))
@mock.patch.object(profiles.plistlib, 'writePlist')
def testSaveSuccess(self, mock_writeplist):
profile = self._GetValidProfile()
profile.Save('/tmp/hello')
mock_writeplist.assert_called_once_with(profile._profile, '/tmp/hello')
@mock.patch.object(profiles.plistlib, 'writePlist')
def testSaveIOError(self, mock_writeplist):
profile = self._GetValidProfile()
mock_writeplist.side_effect = IOError
with self.assertRaises(profiles.ProfileSaveError):
profile.Save('/tmp/hello')
mock_writeplist.assert_called_once_with(profile._profile, '/tmp/hello')
@mock.patch.object(profiles.gmacpyutil, 'RunProcess')
@mock.patch.object(profiles.Profile, 'Save')
def testInstallSuccess(self, mock_save, mock_runprocess):
profile = self._GetValidProfile()
mock_runprocess.return_value = ['Output', None, 0]
profile.Install()
mock_save.assert_called_once_with(mock.ANY)
mock_runprocess.assert_called_once_with(
[profiles.CMD_PROFILES, '-I', '-F', mock.ANY],
sudo=None, sudo_password=None)
@mock.patch.object(profiles.gmacpyutil, 'RunProcess')
@mock.patch.object(profiles.Profile, 'Save')
def testInstallSudoPassword(self, mock_save, mock_runprocess):
profile = self._GetValidProfile()
mock_runprocess.return_value = ['Output', None, 0]
profile.Install(sudo_password='<PASSWORD>')
mock_save.assert_called_once_with(mock.ANY)
mock_runprocess.assert_called_once_with(
[profiles.CMD_PROFILES, '-I', '-F', mock.ANY],
sudo='ladygagaeatss<PASSWORD>', sudo_password='<PASSWORD>')
@mock.patch.object(profiles.gmacpyutil, 'RunProcess')
@mock.patch.object(profiles.Profile, 'Save')
def testInstallCommandFail(self, mock_save, mock_runprocess):
profile = self._GetValidProfile()
mock_runprocess.return_value = ['Output', 'Errors', 42]
with self.assertRaisesRegexp(profiles.ProfileInstallationError,
'Profile installation failed!\n'
'Output, Errors, 42'):
profile.Install(sudo_password='<PASSWORD>')
mock_save.assert_called_once_with(mock.ANY)
mock_runprocess.assert_called_once_with(
[profiles.CMD_PROFILES, '-I', '-F', mock.ANY],
sudo='ladygagaeatssocks', sudo_password='<PASSWORD>')
@mock.patch.object(profiles.gmacpyutil, 'RunProcess')
@mock.patch.object(profiles.Profile, 'Save')
def testInstallCommandException(self, mock_save, mock_runprocess):
profile = self._GetValidProfile()
mock_runprocess.side_effect = profiles.gmacpyutil.GmacpyutilException
with self.assertRaisesRegexp(profiles.ProfileInstallationError,
'Profile installation failed!\n'):
profile.Install(sudo_password='<PASSWORD>')
mock_save.assert_called_once_with(mock.ANY)
mock_runprocess.assert_called_once_with(
[profiles.CMD_PROFILES, '-I', '-F', mock.ANY],
sudo='ladygagaeatssocks', sudo_password='<PASSWORD>')
class NetworkProfileClassTest(basetest.TestCase):
"""Tests for the NetworkProfile class."""
def testInit(self):
profile = profiles.NetworkProfile('testuser')
self.assertEqual(profile.Get(profiles.PAYLOADKEYS_DISPLAYNAME),
'Network Profile (testuser)')
self.assertEqual(profile.Get(profiles.PAYLOADKEYS_DESCRIPTION),
'Network authentication settings')
self.assertEqual(profile.Get(profiles.PAYLOADKEYS_IDENTIFIER),
'com.megacorp.networkprofile')
self.assertEqual(profile.Get(profiles.PAYLOADKEYS_SCOPE),
['System', 'User'])
self.assertEqual(profile.Get(profiles.PAYLOADKEYS_TYPE), 'Configuration')
self.assertEqual(profile.Get(profiles.PAYLOADKEYS_CONTENT), [])
def testGenerateID(self):
profile = profiles.NetworkProfile('testuser')
self.assertEqual(profile._GenerateID('test_suffix'),
'com.megacorp.networkprofile.test_suffix')
self.assertEqual(profile._GenerateID('another_suffix'),
'com.megacorp.networkprofile.another_suffix')
@mock.patch.object(profiles.NetworkProfile, 'AddPayload')
@mock.patch.object(profiles.crypto, 'load_privatekey')
@mock.patch.object(profiles.crypto, 'load_certificate')
@mock.patch.object(profiles.crypto, 'PKCS12Type')
@mock.patch.object(profiles.certs, 'Certificate')
def testAddMachineCertificateSuccess(self, mock_certificate, mock_pkcs12,
mock_loadcert, mock_loadkey,
mock_addpayload):
mock_certobj = mock.MagicMock()
mock_certobj.subject_cn = 'My Cert Subject'
mock_certobj.osx_fingerprint = '0011223344556677889900'
mock_certificate.return_value = mock_certobj
mock_pkcs12obj = mock.MagicMock()
mock_pkcs12obj.export.return_value = '-----PKCS12 Data-----'
mock_pkcs12.return_value = mock_pkcs12obj
mock_loadcert.return_value = 'certobj'
mock_loadkey.return_value = 'keyobj'
profile = profiles.NetworkProfile('testuser')
profile.AddMachineCertificate('fakecert', 'fakekey')
mock_pkcs12.assert_called_once_with()
mock_pkcs12obj.set_certificate.assert_called_once_with('certobj')
mock_pkcs12obj.set_privatekey.assert_called_once_with('keyobj')
mock_pkcs12obj.export.assert_called_once_with('0011223344556677889900')
mock_loadcert.assert_called_once_with(1, 'fakecert')
mock_loadkey.assert_called_once_with(1, 'fakekey')
mock_addpayload.assert_called_once_with(
{profiles.PAYLOADKEYS_IDENTIFIER:
'com.megacorp.networkprofile.machine_cert',
profiles.PAYLOADKEYS_TYPE: 'com.apple.security.pkcs12',
profiles.PAYLOADKEYS_DISPLAYNAME: 'My Cert Subject',
profiles.PAYLOADKEYS_ENABLED: True,
profiles.PAYLOADKEYS_VERSION: 1,
profiles.PAYLOADKEYS_CONTENT: profiles.plistlib.Data(
'-----PKCS12 Data-----'),
profiles.PAYLOADKEYS_UUID: mock.ANY,
'Password': '<PASSWORD>'})
@mock.patch.object(profiles.crypto, 'load_privatekey')
@mock.patch.object(profiles.crypto, 'load_certificate')
@mock.patch.object(profiles.crypto, 'PKCS12Type')
@mock.patch.object(profiles.certs, 'Certificate')
def testAddMachineCertificateInvalidKey(self, mock_certificate, mock_pkcs12,
mock_loadcert, mock_loadkey):
mock_certobj = mock.MagicMock()
mock_certobj.subject_cn = 'My Cert Subject'
mock_certobj.osx_fingerprint = '<PASSWORD>'
mock_certificate.return_value = mock_certobj
mock_pkcs12obj = mock.MagicMock()
mock_pkcs12obj.export.side_effect = profiles.crypto.Error
mock_pkcs12.return_value = mock_pkcs12obj
mock_loadcert.return_value = 'certobj'
mock_loadkey.return_value = 'keyobj_from_different_cert'
profile = profiles.NetworkProfile('testuser')
with self.assertRaises(profiles.CertificateError):
profile.AddMachineCertificate('fakecert', 'otherfakekey')
@mock.patch.object(profiles.certs, 'Certificate')
def testAddMachineCertificateBadCert(self, mock_certificate):
mock_certificate.side_effect = profiles.certs.CertError
profile = profiles.NetworkProfile('testuser')
with self.assertRaises(profiles.CertificateError):
profile.AddMachineCertificate('fakecert', 'fakekey')
@mock.patch.object(profiles.NetworkProfile, 'AddPayload')
@mock.patch.object(profiles.certs, 'Certificate')
def testAddAnchorCertificateSuccess(self, mock_certificate, mock_addpayload):
mock_certobj = mock.MagicMock()
mock_certobj.subject_cn = 'My Cert Subject'
mock_certobj.osx_fingerprint = '0011223344556677889900'
mock_certificate.return_value = mock_certobj
profile = profiles.NetworkProfile('testuser')
profile.AddAnchorCertificate('my_cert')
mock_certificate.assert_called_once_with('my_cert')
mock_addpayload.assert_called_once_with(
{profiles.PAYLOADKEYS_IDENTIFIER:
'com.megacorp.networkprofile.0011223344556677889900',
profiles.PAYLOADKEYS_TYPE: 'com.apple.security.pkcs1',
profiles.PAYLOADKEYS_DISPLAYNAME: 'My Cert Subject',
profiles.PAYLOADKEYS_CONTENT: profiles.plistlib.Data('my_cert'),
profiles.PAYLOADKEYS_ENABLED: True,
profiles.PAYLOADKEYS_VERSION: 1,
profiles.PAYLOADKEYS_UUID: mock.ANY})
@mock.patch.object(profiles.certs, 'Certificate')
def testAddAnchorCertificateBadCert(self, mock_certificate):
mock_certificate.side_effect = profiles.certs.CertError
profile = profiles.NetworkProfile('testuser')
with self.assertRaises(profiles.CertificateError):
profile.AddAnchorCertificate('test_cert')
@mock.patch.object(profiles.NetworkProfile, 'AddPayload')
def testAddNetworkPayloadSSID(self, mock_addpayload):
profile = profiles.NetworkProfile('test_user')
profile._auth_cert = '00000000-AUTH-CERT-UUID-00000000'
profile._anchor_certs = ['00000000-ANCH-ORCE-RTUU-ID000000']
profile.AddTrustedServer('radius.company.com')
profile.AddNetworkPayload('SSID')
eap_client_data = {'AcceptEAPTypes': [13],
'PayloadCertificateAnchorUUID':
['00000000-ANCH-ORCE-RTUU-ID000000'],
'TLSTrustedServerNames':
['radius.company.com'],
'TLSAllowTrustExceptions': False}
mock_addpayload.assert_called_once_with(
{'AutoJoin': True,
'SetupModes': ['System', 'User'],
'PayloadCertificateUUID': '00000000-AUTH-CERT-UUID-00000000',
'EncryptionType': 'WPA',
'Interface': 'BuiltInWireless',
profiles.PAYLOADKEYS_DISPLAYNAME: 'SSID',
profiles.PAYLOADKEYS_IDENTIFIER:
'com.megacorp.networkprofile.ssid.SSID',
profiles.PAYLOADKEYS_TYPE: 'com.apple.wifi.managed',
'SSID_STR': 'SSID',
'EAPClientConfiguration': eap_client_data})
@mock.patch.object(profiles.NetworkProfile, 'AddPayload')
def testAddNetworkPayloadWired(self, mock_addpayload):
profile = profiles.NetworkProfile('test_user')
profile._auth_cert = '00000000-AUTH-CERT-UUID-00000000'
profile._anchor_certs = ['00000000-ANCH-ORCE-RTUU-ID000000']
profile.AddTrustedServer('radius.company.com')
profile.AddNetworkPayload('wired')
eap_client_data = {'AcceptEAPTypes': [13],
'PayloadCertificateAnchorUUID':
['00000000-ANCH-ORCE-RTUU-ID000000'],
'TLSTrustedServerNames':
['radius.company.com'],
'TLSAllowTrustExceptions': False}
mock_addpayload.assert_called_once_with(
{'AutoJoin': True,
'SetupModes': ['System', 'User'],
'PayloadCertificateUUID': '00000000-AUTH-CERT-UUID-00000000',
'EncryptionType': 'Any',
'Interface': 'FirstActiveEthernet',
profiles.PAYLOADKEYS_DISPLAYNAME: 'Wired',
profiles.PAYLOADKEYS_IDENTIFIER:
'com.megacorp.networkprofile.wired',
profiles.PAYLOADKEYS_TYPE: 'com.apple.firstactiveethernet.managed',
'EAPClientConfiguration': eap_client_data})
if __name__ == '__main__':
basetest.main()
|
test/test_regularization.py | rentainhe/glasses | 271 | 36057 | <filename>test/test_regularization.py
import torch
from glasses.nn.regularization import DropBlock, StochasticDepth
def test_drop_block():
drop = DropBlock()
x = torch.ones((1, 3, 28, 28))
x_drop = drop(x)
assert not torch.equal(x, x_drop)
assert drop.training
drop = drop.eval()
x_drop = drop(x)
assert torch.equal(x, x_drop)
assert not drop.training
assert drop.__repr__() == "DropBlock(p=0.5)"
def test_stocastic_depth():
stoc = StochasticDepth()
assert stoc.__repr__() == "StochasticDepth(p=0.5)"
x = torch.ones((2, 3, 28, 28))
stoc = StochasticDepth(p=1)
out = stoc(x)
assert out.sum() > 0
stoc = StochasticDepth(p=10e-6)
out = stoc(x)
assert out.sum() == 0
|
c_comp/nodes.py | Commodoreprime/Command-Block-Assembly | 223 | 36105 | <gh_stars>100-1000
class Node:
props = ()
def __init__(self, **kwargs):
for prop in kwargs:
if prop not in self.props:
raise Exception('Invalid property %r, allowed only: %s' %
(prop, self.props))
self.__dict__[prop] = kwargs[prop]
for prop in self.props:
if prop not in self.__dict__:
self.__dict__[prop] = None
self.attrs = {}
def print_node(self, indent=0, indent_size=4,extra=0):
s = self.__class__.__name__
s += '(\n'
i = ' ' * (indent+indent_size)
for prop in self.props:
s += i + prop + ' = '
s += self._print_val(self.__dict__[prop], indent+indent_size, indent_size,
(len(prop) + 3) - indent_size)
s += '\n'
s += (' ' * (indent + extra)) + ')'
return s
def _print_val(self, val, indent, indent_size,extra=0):
if isinstance(val, Node):
return val.print_node(indent+indent_size,indent_size,extra)
elif type(val) == list:
s = '[\n'
i = ' ' * (indent+indent_size)
for e in val:
s += i + self._print_val(e, indent, indent_size)
s += ',\n'
s += (' ' * (indent+extra)) + ']'
return s
else:
return str(val)
class Statement(Node): pass
class Expression(Node): pass
class EmptyStatement(Statement): pass
EmptyStatement.INSTANCE = EmptyStatement()
class FunctionDeclaration(Statement): props = ('type', 'decl', 'body')
class Declaration(Statement): props = ('type', 'init')
class ParamDeclaration(Node): props = ('type', 'decl')
class StructTypeRef(Node): props = ('name',)
class DeclarationSpecifier(Node): props = ('store', 'qual', 'type')
class InitSpec(Node): props = ('decl', 'val')
class DeclaratorSpec(Node): props = ('pointer_depth', 'name_spec')
class ArrayDeclSpec(Node): props = ('name', 'dim')
class FuncDeclSpec(Node): props = ('name', 'params')
class VarArgs(Node): pass
VarArgs.INSTANCE = VarArgs()
class StructSpec(Node): props = ('name', 'decl')
class StructMemberDecl(Node): props = ('spec', 'decl')
class MemberReference(Node): props = ('child', 'idx', 'name')
class TypeName(Node): props = ('type', 'spec')
class LabelledStmt(Statement): props = ('label', 'stmt')
class WhileStmt(Statement): props = ('cond', 'body')
class DoWhileStmt(Statement): props = ('body', 'cond')
class ForStmt(Statement): props = ('init', 'cond', 'after', 'body')
class IfStmt(Statement): props = ('cond', 'true', 'false')
class SwitchStmt(Statement): props = ('expr', 'cases')
class ContinueStmt(Statement): pass
ContinueStmt.INSTANCE = ContinueStmt()
class BreakStmt(Statement): pass
BreakStmt.INSTANCE = BreakStmt()
class ReturnStmt(Statement): props = ('expr',)
class GotoStmt(Statement): props = ('label',)
class CaseStmt(Statement): props = ('choice', 'body')
class SyncStmt(Statement): pass
class ExpressionStmt(Statement): props = ('expr',)
class SizeofExpr(Expression): props = ('expr',)
class ConditionalExpr(Expression): props = ('cond', 'true', 'false')
class FunctionCallExpr(Expression): props = ('ref', 'args')
class IdentifierExpr(Expression): props = ('val',)
class AssignmentExpr(Expression): props = ('left', 'right')
class AssignmentOperatorExpr(Expression): props = ('left', 'op', 'right')
class UnaryExpr(Expression): props = ('op', 'expr')
class BinaryOperatorExpr(Expression): props = ('left', 'op', 'right')
class IncrementExpr(Expression): props = ('dir', 'post', 'expr')
class MemberAccessExpr(Expression): props = ('expr', 'prop', 'deref')
class ArraySubscriptExpr(Expression): props = ('expr', 'sub')
class Literal(Expression): props = ('val',)
class IntLiteral(Literal): pass
class StringLiteral(Literal): pass
class Pragma(Node): props = ('val',)
class Token:
class Type:
IDENTIFIER = 'identifier'
OPERATOR = 'operator'
NUMBER = 'number'
STRING = 'string'
def __init__(self, val, type=None):
self.val = val
self.type = type or Token.Type.OPERATOR
def __str__(self):
return 'Token(%r, %s)' % (self.val, self.type)
class Keyword(Token):
REGISTRY = {}
def __init__(self, val):
super().__init__(val, Token.Type.IDENTIFIER)
Keyword.REGISTRY[val] = self
Token.EOF = Token('<eof>')
Token.OPEN_PAREN = Token('(')
Token.CLOSE_PAREN = Token(')')
Token.OPEN_BRACE = Token('{')
Token.CLOSE_BRACE = Token('}')
Token.OPEN_SQUARE = Token('[')
Token.CLOSE_SQUARE = Token(']')
Token.COMMA = Token(',')
Token.SEMICOLON = Token(';')
Token.QUESTION = Token('?')
Token.COLON = Token(':')
Token.DOT = Token('.')
Token.ARROW = Token('->')
Token.VARARG = Token('...')
Token.OP_ASSIGN = Token('=')
Token.OP_MUL_ASSIGN = Token('*=')
Token.OP_DIV_ASSIGN = Token('/=')
Token.OP_MOD_ASSIGN = Token('%=')
Token.OP_PLUS_ASSIGN = Token('+=')
Token.OP_MINUS_ASSIGN = Token('-=')
Token.OP_LSHIFT_ASSIGN = Token('<<=')
Token.OP_RSHIFT_ASSIGN = Token('>>=')
Token.OP_AND_ASSIGN = Token('&=')
Token.OP_XOR_ASSIGN = Token('^=')
Token.OP_OR_ASSIGN = Token('|=')
Token.OP_PLUS = Token('+')
Token.OP_PLUS_PLUS = Token('++')
Token.OP_MINUS = Token('-')
Token.OP_MINUS_MINUS = Token('--')
Token.OP_STAR = Token('*')
Token.OP_DIV = Token('/')
Token.OP_MOD = Token('%')
Token.OP_AND = Token('&')
Token.OP_OR = Token('|')
Token.OP_AND_AND = Token('&&')
Token.OP_OR_OR = Token('||')
Token.OP_XOR = Token('^')
Token.OP_NOT = Token('!')
Token.OP_BITNOT = Token('~')
Token.OP_SHIFT_LEFT = Token('<<')
Token.OP_SHIFT_RIGHT = Token('>>')
Token.OP_EQUAL = Token('==')
Token.OP_NOT_EQUAL = Token('!=')
Token.OP_LESS_THAN = Token('<')
Token.OP_LESS_OR_EQUAL = Token('<=')
Token.OP_GREATER_THAN = Token('>')
Token.OP_GREATER_OR_EQUAL = Token('>=')
Keyword.DO = Keyword('do')
Keyword.WHILE = Keyword('while')
Keyword.FOR = Keyword('for')
Keyword.IF = Keyword('if')
Keyword.ELSE = Keyword('else')
Keyword.SIZEOF = Keyword('sizeof')
Keyword.SYNC = Keyword('sync')
Keyword.SWITCH = Keyword('switch')
Keyword.CASE = Keyword('case')
Keyword.DEFAULT = Keyword('default')
Keyword.GOTO = Keyword('goto')
Keyword.CONTINUE = Keyword('continue')
Keyword.BREAK = Keyword('break')
Keyword.RETURN = Keyword('return')
Keyword.CONST = Keyword('const')
Keyword.STATIC = Keyword('static')
Keyword.TYPEDEF = Keyword('typedef')
Keyword.STRUCT = Keyword('struct')
|
mp_sort/virtenv/lib/python3.6/site-packages/transcrypt/demos/pysteroids_demo/org/theodox/__init__.py | ang-jason/fip_powerx_mini_projects-foxtrot | 2,200 | 36131 |
import math
import itertools
class Vector:
"""
Generic vector operations.
"""
def _apply(self,op, other):
pairwise = None
if type(other) is Vector:
pairwise = zip(self.vals, other.vals)
else:
pairwise = zip(self.vals, [other for _ in self.vals])
return Vector(*itertools.starmap(op, pairwise))
def __init__(self, *args):
self.vals = args
def __add__(self, other):
return self._apply(lambda a, b: a + b, other)
def __sub__(self, other):
return self._apply(lambda a, b: a - b, other)
def __mul__(self, other):
return self._apply(lambda a, b: a*b, other)
def __div__(self, other):
return self._apply(lambda a, b: a / b, other)
def length(self):
total = sum(map(lambda a: math.pow(a, 2), self.vals))
return math.sqrt(total)
def normalized(self):
divisor = [self.length()] * len(self)
return Vector(*(self / divisor))
def __iter__(self):
return py_iter(self.vals)
@classmethod
def map(cls, *args):
return args[0].map(args[1:])
def __getitem__(self, item):
return self.values[item]
def __str__(self):
return str(self.vals)
def __len__(self):
return len(self.vals)
@classmethod
def add(cls, a, b):
return Vector(*a) + Vector(*b)
@classmethod
def sub(cls, a, b):
return Vector(*a) - Vector(*b)
@classmethod
def mul(cls, a, b):
return Vector(*a) * Vector(*b)
@classmethod
def div(cls, a, b):
return Vector(*a) / Vector(*b)
@classmethod
def dot(cls, left, right):
return sum(Vector.mul(left, right))
@classmethod
def norm_dot(Vector, left, right):
left = Vector(*left).normalized()
right = Vector(*right).normalized()
return sum(Vector.mul(left, right))
|
atlas/foundations_sdk/src/foundations/helpers/queued.py | DeepLearnI/atlas | 296 | 36162 | <gh_stars>100-1000
_QUEUED_JOBS_KEY = 'projects:global:jobs:queued'
_ARCHIVED_JOBS_KEY = 'projects:global:jobs:archived'
def list_jobs(redis):
return {job_id.decode() for job_id in redis.smembers(_QUEUED_JOBS_KEY)}
def remove_jobs(redis, job_id_project_mapping):
for job_id, project_name in job_id_project_mapping.items():
redis.srem(_QUEUED_JOBS_KEY, job_id)
redis.srem('project:{}:jobs:queued'.format(project_name), job_id)
def job_project_names(redis, list_of_job_ids):
return {job_id: _job_project_name(redis, job_id) for job_id in list_of_job_ids}
def _job_project_name(redis, job_id):
project_name = redis.get('jobs:{}:project'.format(job_id))
if project_name:
return project_name.decode()
def add_jobs_to_archive(redis, list_of_job_ids):
for job_id in list_of_job_ids:
redis.sadd(_ARCHIVED_JOBS_KEY, job_id)
def list_archived_jobs(redis):
return {job_id.decode() for job_id in redis.smembers(_ARCHIVED_JOBS_KEY)} |
15-more-types/mysum.py | SeirousLee/example-code-2e | 990 | 36170 | import functools
import operator
from collections.abc import Iterable
from typing import overload, Union, TypeVar
T = TypeVar('T')
S = TypeVar('S') # <1>
@overload
def sum(it: Iterable[T]) -> Union[T, int]: ... # <2>
@overload
def sum(it: Iterable[T], /, start: S) -> Union[T, S]: ... # <3>
def sum(it, /, start=0): # <4>
return functools.reduce(operator.add, it, start)
|
docs/conf.py | guillaume-wisniewski/elpis | 118 | 36210 | <filename>docs/conf.py
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'Elpis'
copyright = '2020, The University of Queensland'
author = '<NAME>, <NAME>, <NAME>'
# The full version, including alpha/beta/rc tags
release = '0.96.0'
master_doc = 'index'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx_autodoc_typehints',
'recommonmark'
]
# Show undocumented members in docs
autodoc_default_options = {
'undoc-members': True,
}
# Mock to get RTD docs to compile
autodoc_mock_imports = ["pytest"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
# We also exclude the "ugly" auto-generated elpis.rst file and replace it with our own.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'elpis/elpis.rst']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_logo = '_static/img/logo.png'
html_theme_options = {
'logo_only': True,
}
github_url = 'https://github.com/CoEDL/elpis'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'style.css',
]
# -- Extension configuration -------------------------------------------------
|
packages/core/minos-microservice-networks/tests/test_networks/test_brokers/test_messages/test_models/test_v1.py | sorasful/minos-python | 247 | 36261 | <reponame>sorasful/minos-python
import unittest
import warnings
from unittest.mock import (
patch,
)
from uuid import (
UUID,
uuid4,
)
from minos.common import (
Model,
)
from minos.networks import (
BrokerMessageV1,
BrokerMessageV1Payload,
BrokerMessageV1Status,
BrokerMessageV1Strategy,
)
from tests.utils import (
FakeModel,
)
class TestBrokerMessageV1(unittest.TestCase):
def setUp(self) -> None:
self.topic = "FooCreated"
self.identifier = uuid4()
self.reply_topic = "AddOrderReply"
self.strategy = BrokerMessageV1Strategy.MULTICAST
self.payload = BrokerMessageV1Payload(
content=[FakeModel("blue"), FakeModel("red")], headers={"foo": "bar"}, status=BrokerMessageV1Status.ERROR
)
def test_constructor_simple(self):
message = BrokerMessageV1(self.topic, self.payload)
self.assertEqual(self.topic, message.topic)
self.assertIsInstance(message.identifier, UUID)
self.assertEqual(None, message.reply_topic)
self.assertEqual(BrokerMessageV1Strategy.UNICAST, message.strategy)
self.assertEqual(self.payload, message.payload)
def test_constructor(self):
message = BrokerMessageV1(
self.topic,
identifier=self.identifier,
reply_topic=self.reply_topic,
strategy=self.strategy,
payload=self.payload,
)
self.assertEqual(self.topic, message.topic)
self.assertEqual(self.identifier, message.identifier)
self.assertEqual(self.reply_topic, message.reply_topic)
self.assertEqual(self.strategy, message.strategy)
self.assertEqual(self.payload, message.payload)
def test_version(self):
self.assertEqual(1, BrokerMessageV1.version)
def test_topic(self):
message = BrokerMessageV1(self.topic, self.payload)
self.assertEqual(self.topic, message.topic)
def test_identifier(self):
message = BrokerMessageV1(self.topic, self.payload, identifier=self.identifier)
self.assertEqual(self.identifier, message.identifier)
def test_reply_topic(self):
message = BrokerMessageV1(self.topic, self.payload, reply_topic=self.reply_topic)
self.assertEqual(self.reply_topic, message.reply_topic)
def test_set_reply_topic(self):
message = BrokerMessageV1(self.topic, self.payload)
self.assertIsNone(message.reply_topic)
message.set_reply_topic(self.reply_topic)
self.assertEqual(self.reply_topic, message.reply_topic)
def test_ok(self):
message = BrokerMessageV1(self.topic, self.payload)
self.assertEqual(self.payload.ok, message.ok)
def test_status(self):
message = BrokerMessageV1(self.topic, self.payload)
self.assertEqual(self.payload.status, message.status)
def test_headers(self):
message = BrokerMessageV1(self.topic, self.payload)
self.assertEqual(self.payload.headers, message.headers)
def test_content(self):
message = BrokerMessageV1(self.topic, self.payload)
self.assertEqual(self.payload.content, message.content)
def test_data(self):
message = BrokerMessageV1(self.topic, self.payload)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
# noinspection PyDeprecation
self.assertEqual(self.payload.content, message.data)
def test_avro(self):
message = BrokerMessageV1(
self.topic,
identifier=self.identifier,
reply_topic=self.reply_topic,
strategy=self.strategy,
payload=self.payload,
)
observed = BrokerMessageV1.from_avro_bytes(message.avro_bytes)
self.assertEqual(message, observed)
def test_sort(self):
unsorted = [
BrokerMessageV1("", BrokerMessageV1Payload("foo")),
BrokerMessageV1("", BrokerMessageV1Payload(4)),
BrokerMessageV1("", BrokerMessageV1Payload(2)),
BrokerMessageV1("", BrokerMessageV1Payload(3)),
BrokerMessageV1("", BrokerMessageV1Payload(1)),
BrokerMessageV1("", BrokerMessageV1Payload("bar")),
]
expected = [unsorted[0], unsorted[4], unsorted[2], unsorted[3], unsorted[1], unsorted[5]]
observed = sorted(unsorted)
self.assertEqual(expected, observed)
def test_from_avro(self):
expected = BrokerMessageV1(self.topic, self.payload, identifier=self.identifier)
schema = {
"fields": [
{"name": "topic", "type": "string"},
{"name": "identifier", "type": {"logicalType": "uuid", "type": "string"}},
{"name": "reply_topic", "type": ["string", "null"]},
{
"name": "strategy",
"type": {
"logicalType": "minos.networks.brokers.messages.models.v1.BrokerMessageV1Strategy",
"type": "string",
},
},
{
"name": "payload",
"type": {
"fields": [
{
"name": "content",
"type": {
"items": {
"fields": [{"name": "data", "type": "string"}],
"name": "FakeModel",
"namespace": "tests.utils.hello",
"type": "record",
},
"type": "array",
},
},
{
"name": "status",
"type": {
"logicalType": "minos.networks.brokers.messages.models.v1.BrokerMessageV1Status",
"type": "int",
},
},
{"name": "headers", "type": {"type": "map", "values": "string"}},
],
"name": "BrokerMessageV1Payload",
"namespace": "minos.networks.brokers.messages.models.v1.hello",
"type": "record",
},
},
{"name": "version", "type": "int"},
],
"name": "BrokerMessage",
"namespace": "minos.networks.brokers.messages.models.abc.hello",
"type": "record",
}
data = {
"identifier": str(self.identifier),
"payload": {"content": [{"data": "blue"}, {"data": "red"}], "headers": {"foo": "bar"}, "status": 400},
"reply_topic": None,
"strategy": "unicast",
"topic": "FooCreated",
"version": 1,
}
observed = Model.from_avro(schema, data)
self.assertEqual(expected, observed)
def test_avro_schema(self):
schema = {
"fields": [
{"name": "topic", "type": "string"},
{"name": "identifier", "type": {"logicalType": "uuid", "type": "string"}},
{"name": "reply_topic", "type": ["string", "null"]},
{
"name": "strategy",
"type": {
"logicalType": "minos.networks.brokers.messages.models.v1.BrokerMessageV1Strategy",
"type": "string",
},
},
{
"name": "payload",
"type": {
"fields": [
{
"name": "content",
"type": {
"items": {
"fields": [{"name": "data", "type": "string"}],
"name": "FakeModel",
"namespace": "tests.utils.hello",
"type": "record",
},
"type": "array",
},
},
{
"name": "status",
"type": {
"logicalType": "minos.networks.brokers.messages.models.v1.BrokerMessageV1Status",
"type": "int",
},
},
{"name": "headers", "type": {"type": "map", "values": "string"}},
],
"name": "BrokerMessageV1Payload",
"namespace": "minos.networks.brokers.messages.models.v1.hello",
"type": "record",
},
},
{"name": "version", "type": "int"},
],
"name": "BrokerMessage",
"namespace": "minos.networks.brokers.messages.models.abc.hello",
"type": "record",
}
with patch("minos.common.AvroSchemaEncoder.generate_random_str", return_value="hello"):
observed = BrokerMessageV1(self.topic, self.payload).avro_schema
self.assertEqual([schema], observed)
def test_avro_data(self):
expected = {
"identifier": str(self.identifier),
"payload": {"content": [{"data": "blue"}, {"data": "red"}], "headers": {"foo": "bar"}, "status": 400},
"reply_topic": None,
"strategy": "unicast",
"topic": "FooCreated",
"version": 1,
}
observed = BrokerMessageV1(self.topic, self.payload, identifier=self.identifier).avro_data
self.assertEqual(expected, observed)
def test_avro_bytes(self):
expected = BrokerMessageV1(self.topic, self.payload)
self.assertEqual(expected, Model.from_avro_bytes(expected.avro_bytes))
class TestBrokerMessagePayload(unittest.TestCase):
def setUp(self) -> None:
self.content = [FakeModel("blue"), FakeModel("red")]
def test_ok(self):
self.assertTrue(BrokerMessageV1Payload(self.content, status=BrokerMessageV1Status.SUCCESS).ok)
self.assertFalse(BrokerMessageV1Payload(self.content, status=BrokerMessageV1Status.ERROR).ok)
self.assertFalse(BrokerMessageV1Payload(self.content, status=BrokerMessageV1Status.SYSTEM_ERROR).ok)
self.assertFalse(BrokerMessageV1Payload(self.content, status=BrokerMessageV1Status.UNKNOWN).ok)
def test_data(self):
payload = BrokerMessageV1Payload(self.content)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
# noinspection PyDeprecation
self.assertEqual(self.content, payload.data)
class TestBrokerMessageV1Status(unittest.TestCase):
def test_success(self):
self.assertEqual(BrokerMessageV1Status.SUCCESS, BrokerMessageV1Status(200))
def test_error(self):
self.assertEqual(BrokerMessageV1Status.ERROR, BrokerMessageV1Status(400))
def test_system_error(self):
self.assertEqual(BrokerMessageV1Status.SYSTEM_ERROR, BrokerMessageV1Status(500))
def test_unknown(self):
self.assertEqual(BrokerMessageV1Status.UNKNOWN, BrokerMessageV1Status(56))
if __name__ == "__main__":
unittest.main()
|
microraiden/proxy/resources/paywall_decorator.py | andrevmatos/microraiden | 417 | 36265 | import logging
from flask import Response, make_response, request
from microraiden import HTTPHeaders as header
from flask_restful.utils import unpack
from microraiden.channel_manager import (
ChannelManager,
)
from microraiden.exceptions import (
NoOpenChannel,
InvalidBalanceProof,
InvalidBalanceAmount,
InsufficientConfirmations
)
import microraiden.constants as constants
from microraiden.proxy.resources.request_data import RequestData
from functools import wraps
from eth_utils import is_address
log = logging.getLogger(__name__)
class Paywall(object):
def __init__(self,
channel_manager,
light_client_proxy=None
):
super().__init__()
assert isinstance(channel_manager, ChannelManager)
assert is_address(channel_manager.channel_manager_contract.address)
assert is_address(channel_manager.receiver)
self.contract_address = channel_manager.channel_manager_contract.address
self.receiver_address = channel_manager.receiver
self.channel_manager = channel_manager
self.light_client_proxy = light_client_proxy
def access(self, resource, method, *args, **kwargs):
if self.channel_manager.node_online() is False:
return "Ethereum node is not responding", 502
if self.channel_manager.get_eth_balance() < constants.PROXY_BALANCE_LIMIT:
return "Channel manager ETH balance is below limit", 502
try:
data = RequestData(request.headers, request.cookies)
except ValueError as e:
return str(e), 409
accepts_html = (
'text/html' in request.accept_mimetypes and
request.accept_mimetypes.best != '*/*'
)
headers = {}
price = resource.price()
# payment required
if price > 0:
paywall, headers = self.paywall_check(price, data)
if paywall and accepts_html is True:
reply_data = resource.get_paywall(request.path)
return self.reply_webui(reply_data, headers)
elif paywall:
return make_response('', 402, headers)
# all ok, return actual content
resp = method(request.path, *args, **kwargs)
# merge headers, resource headers take precedence
headers_lower = {key.lower(): value for key, value in headers.items()}
lower_to_case = {key.lower(): key for key in headers}
if isinstance(resp, Response):
resource_headers = (key for key, value in resp.headers)
else:
data, code, resource_headers = unpack(resp)
for key in resource_headers:
key_lower = key.lower()
if key_lower in headers_lower:
headers.pop(lower_to_case[key_lower])
if isinstance(resp, Response):
resp.headers.extend(headers)
return resp
else:
headers.update(resource_headers)
return make_response(str(data), code, resource_headers)
def paywall_check(self, price, data):
"""Check if the resource can be sent to the client.
Returns (is_paywalled: Bool, http_headers: dict)
"""
headers = self.generate_headers(price)
if not data.balance_signature:
return True, headers
# try to get an existing channel
try:
channel = self.channel_manager.verify_balance_proof(
data.sender_address, data.open_block_number,
data.balance, data.balance_signature)
except InsufficientConfirmations as e:
log.debug('Refused payment: Insufficient confirmations (sender=%s, block=%d)' %
(data.sender_address, data.open_block_number))
headers.update({header.INSUF_CONFS: "1"})
return True, headers
except NoOpenChannel as e:
log.debug('Refused payment: Channel does not exist (sender=%s, block=%d)' %
(data.sender_address, data.open_block_number))
headers.update({header.NONEXISTING_CHANNEL: "1"})
return True, headers
except InvalidBalanceAmount as e:
log.debug('Refused payment: Invalid balance amount: %s (sender=%s, block=%d)' %
(str(e), data.sender_address, data.open_block_number))
headers.update({header.INVALID_PROOF: 1})
return True, headers
except InvalidBalanceProof as e:
log.debug('Refused payment: Invalid balance proof: %s (sender=%s, block=%d)' %
(str(e), data.sender_address, data.open_block_number))
headers.update({header.INVALID_PROOF: 1})
return True, headers
# set headers to reflect channel state
assert channel.sender is not None
assert channel.balance >= 0
headers.update(
{
header.SENDER_ADDRESS: channel.sender,
header.SENDER_BALANCE: channel.balance
})
if channel.last_signature is not None:
headers.update({header.BALANCE_SIGNATURE: channel.last_signature})
amount_sent = data.balance - channel.balance
if amount_sent != 0 and amount_sent != price:
headers[header.INVALID_AMOUNT] = 1
# if difference is 0, it will be handled by channel manager
return True, headers
# set the headers to reflect actual state of a channel
try:
self.channel_manager.register_payment(
channel.sender,
data.open_block_number,
data.balance,
data.balance_signature)
except (InvalidBalanceAmount, InvalidBalanceProof):
# balance sent to the proxy is less than in the previous proof
return True, headers
# all ok, return premium content
return False, headers
# when are these generated?
def generate_headers(self, price: int):
assert price > 0
"""Generate basic headers that are sent back for every request"""
headers = {
header.GATEWAY_PATH: constants.API_PATH,
header.RECEIVER_ADDRESS: self.receiver_address,
header.CONTRACT_ADDRESS: self.contract_address,
header.TOKEN_ADDRESS: self.channel_manager.get_token_address(),
header.PRICE: price,
'Content-Type': 'application/json'
}
return headers
def reply_webui(self, reply_data='', headers: dict={}):
headers.update({
"Content-Type": "text/html",
})
reply = make_response(reply_data, 402, headers)
for k, v in headers.items():
if k.startswith('RDN-'):
reply.set_cookie(k, str(v))
return reply
def paywall_decorator(func):
"""Method decorator for Flask's Resource object. It magically makes
every method paywalled.
Example:
class MyPaywalledResource(Resource):
method_decorators = [paywall_decorator]
"""
@wraps(func)
def wrapper(*args, **kwargs):
self = func.__self__ # get instance of the bound method
return self.paywall.access(
self,
func,
*args,
**kwargs
)
return wrapper
|
keepercommander/plugins/windows/windows.py | Mkn-yskz/Commandy | 151 | 36346 | # -*- coding: utf-8 -*-
# _ __
# | |/ /___ ___ _ __ ___ _ _ ®
# | ' </ -_) -_) '_ \/ -_) '_|
# |_|\_\___\___| .__/\___|_|
# |_|
#
# <NAME>
# Copyright 2015 Keeper Security Inc.
# Contact: <EMAIL>
#
import logging
import subprocess
import re
def rotate(record, newpassword):
""" Grab any required fields from the record """
i = subprocess.call(["net", "user", record.login, newpassword], shell=True)
if i == 0:
logging.info('Password changed successfully')
record.password = <PASSWORD>
return True
logging.error('Password change failed')
return True
def adjust(newpassword):
# the characters below mess with windows command line
return re.sub('[<>&|]', '', newpassword)
|
corehq/ex-submodules/casexml/apps/stock/tests/mock_consumption.py | akashkj/commcare-hq | 471 | 36356 | <gh_stars>100-1000
from datetime import datetime, timedelta
from dimagi.utils import parsing as dateparse
from casexml.apps.stock.consumption import (
ConsumptionConfiguration,
compute_daily_consumption_from_transactions,
)
to_ts = dateparse.json_format_datetime
now = datetime.utcnow()
def ago(days):
return now - timedelta(days=days)
# note that you must add inferred consumption transactions manually to txdata
def mock_consumption(txdata, window, params=None):
default_params = {'min_window': 0, 'min_periods': 0}
params = params or {}
default_params.update(params)
config = ConsumptionConfiguration(**default_params)
return compute_daily_consumption_from_transactions(
txdata,
ago(window),
config,
)
|
script/run_WOA.py | cyy111/metaheuristics | 104 | 36416 | <gh_stars>100-1000
from models.multiple_solution.swarm_based.WOA import BaseWOA, BaoWOA
from utils.FunctionUtil import square_function
## Setting parameters
root_paras = {
"problem_size": 30,
"domain_range": [-1, 1],
"print_train": True,
"objective_func": square_function
}
woa_paras = {
"epoch": 100,
"pop_size": 250
}
## Run model
md = BaoWOA(root_algo_paras=root_paras, woa_paras=woa_paras)
md._train__()
|
setup.py | CentryPlan/dataclassframe | 321 | 36418 | #!/usr/bin/env python3
"""
Based on template: https://github.com/FedericoStra/cython-package-example
"""
from setuptools import setup
with open("requirements.txt") as fp:
install_requires = fp.read().strip().split("\n")
with open("requirements_dev.txt") as fp:
dev_requires = fp.read().strip().split("\n")
setup(
install_requires=install_requires,
extras_require={
"dev": dev_requires,
"docs": ["sphinx", "sphinx-rtd-theme"]
}
)
|
apps/events/views.py | seanlefevre/openduty | 145 | 36422 | from django.views.generic import DeleteView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib import messages
from django.urls import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from schedule.models import Calendar
from schedule.views import CreateEventView, EditEventView, EventMixin
from apps.events.forms import CustomEventForm
class CustomCreateEventView(CreateEventView):
form_class = CustomEventForm
template_name = 'event/edit.html'
def get_context_data(self, **kwargs):
context = super(CustomCreateEventView, self).get_context_data(**kwargs)
calendar = get_object_or_404(Calendar, slug=self.kwargs.get('calendar_slug'))
extra_context = {
"calendar": calendar,
}
context.update(extra_context)
return context
def form_valid(self, form):
super(CustomCreateEventView, self).form_valid(form)
messages.error(self.request, 'Event created successfully.')
return HttpResponseRedirect(
reverse('calendar_details', kwargs={'calendar_slug': self.kwargs.get('calendar_slug')})
)
class CustomUpdateEventView(EditEventView):
form_class = CustomEventForm
template_name = 'event/edit.html'
def get_context_data(self, **kwargs):
context = super(CustomUpdateEventView, self).get_context_data(**kwargs)
calendar = get_object_or_404(Calendar, slug=self.kwargs.get('calendar_slug'))
extra_context = {
"calendar": calendar,
}
context.update(extra_context)
return context
def form_valid(self, form):
super(CustomUpdateEventView, self).form_valid(form)
messages.error(self.request, 'Event edited successfully.')
return HttpResponseRedirect(
reverse('calendar_details', kwargs={'calendar_slug': self.kwargs.get('calendar_slug')})
)
class CustomDeleteEventView(LoginRequiredMixin, EventMixin, DeleteView):
"""Delete Event"""
template_name = 'event/delete.html'
def get_success_url(self):
return reverse('calendar_details', args=[self.kwargs.get('calendar_slug')])
def get_context_data(self, **kwargs):
context = super(CustomDeleteEventView, self).get_context_data(**kwargs)
calendar = get_object_or_404(Calendar, slug=self.kwargs.get('calendar_slug'))
context.update(
{
'event': self.object,
'calendar': calendar
}
)
return context
|
scale/product/apps.py | kaydoh/scale | 121 | 36429 | """Defines the application configuration for the product application"""
from __future__ import unicode_literals
from django.apps import AppConfig
class ProductConfig(AppConfig):
"""Configuration for the product application"""
name = 'product'
label = 'product'
verbose_name = 'Product'
def ready(self):
"""Registers the product implementations with other applications."""
from job.configuration.data.data_file import DATA_FILE_STORE
from product.configuration.product_data_file import ProductDataFileStore
# Register product files for the data file store
DATA_FILE_STORE['DATA_FILE_STORE'] = ProductDataFileStore()
|
tests/integrations/java/test_JDK__verify.py | pybee/briefcase | 522 | 36464 | <reponame>pybee/briefcase
import os
import shutil
import subprocess
import sys
from pathlib import Path
from unittest import mock
import pytest
from requests import exceptions as requests_exceptions
from briefcase.console import Log
from briefcase.exceptions import BriefcaseCommandError, MissingToolError, NetworkFailure
from briefcase.integrations.java import JDK
from tests.utils import FsPathMock
@pytest.fixture
def test_command(tmp_path):
command = mock.MagicMock()
command.logger = Log()
command.tools_path = tmp_path / "tools"
# Mock environ.get returning no explicit JAVA_HOME
command.os.environ.get = mock.MagicMock(return_value="")
return command
def test_macos_tool_java_home(test_command, capsys):
"""On macOS, the /usr/libexec/java_home utility is checked."""
# Mock being on macOS
test_command.host_os = "Darwin"
# Mock 2 calls to check_output.
test_command.subprocess.check_output.side_effect = [
"/path/to/java",
"javac 1.8.0_144\n",
]
# Create a JDK wrapper by verification
jdk = JDK.verify(command=test_command)
# The JDK should have the path returned by the tool
assert jdk.java_home == Path("/path/to/java")
test_command.subprocess.check_output.assert_has_calls(
[
# First call is to /usr/lib/java_home
mock.call(
["/usr/libexec/java_home"],
stderr=subprocess.STDOUT,
),
# Second is a call to verify a valid Java version
mock.call(
[os.fsdecode(Path("/path/to/java/bin/javac")), "-version"],
stderr=subprocess.STDOUT,
),
]
)
# No console output
output = capsys.readouterr()
assert output.out == ""
assert output.err == ""
def test_macos_tool_failure(test_command, tmp_path, capsys):
"""On macOS, if the libexec tool fails, the Briefcase JDK is used."""
# Mock being on macOS
test_command.host_os = "Darwin"
# Mock a failed call on the libexec tool
test_command.subprocess.check_output.side_effect = subprocess.CalledProcessError(
returncode=1, cmd="/usr/libexec/java_home"
)
# Create a directory to make it look like the Briefcase Java already exists.
(tmp_path / "tools" / "java" / "Contents" / "Home" / "bin").mkdir(parents=True)
# Create a JDK wrapper by verification
jdk = JDK.verify(command=test_command)
# The JDK should have the briefcase JAVA_HOME
assert jdk.java_home == tmp_path / "tools" / "java" / "Contents" / "Home"
test_command.subprocess.check_output.assert_has_calls(
[
# First call is to /usr/lib/java_home
mock.call(
["/usr/libexec/java_home"],
stderr=subprocess.STDOUT,
),
]
)
# No console output
output = capsys.readouterr()
assert output.out == ""
assert output.err == ""
def test_macos_provided_overrides_tool_java_home(test_command, capsys):
"""On macOS, an explicit JAVA_HOME overrides /usr/libexec/java_home."""
# Mock being on macOS
test_command.host_os = "Darwin"
# Mock environ.get returning an explicit JAVA_HOME
test_command.os.environ.get = mock.MagicMock(return_value="/path/to/java")
# Mock return value from javac. libexec won't be invoked.
test_command.subprocess.check_output.return_value = "javac 1.8.0_144\n"
# Create a JDK wrapper by verification
jdk = JDK.verify(command=test_command)
# The JDK should have the path returned by the tool
assert jdk.java_home == Path("/path/to/java")
# A single call to check output
test_command.subprocess.check_output.assert_called_once_with(
[os.fsdecode(Path("/path/to/java/bin/javac")), "-version"],
stderr=subprocess.STDOUT,
),
# No console output
output = capsys.readouterr()
assert output.out == ""
assert output.err == ""
def test_valid_provided_java_home(test_command, capsys):
"""If a valid JAVA_HOME is provided, it is used."""
# Mock environ.get returning an explicit JAVA_HOME
test_command.os.environ.get = mock.MagicMock(return_value="/path/to/java")
# Mock return value from javac.
test_command.subprocess.check_output.return_value = "javac 1.8.0_144\n"
# Create a JDK wrapper by verification
jdk = JDK.verify(command=test_command)
# The JDK should have the path returned by the tool
assert jdk.java_home == Path("/path/to/java")
# A single call to check output
test_command.subprocess.check_output.assert_called_once_with(
[os.fsdecode(Path("/path/to/java/bin/javac")), "-version"],
stderr=subprocess.STDOUT,
),
# No console output
output = capsys.readouterr()
assert output.out == ""
assert output.err == ""
def test_invalid_jdk_version(test_command, tmp_path, capsys):
"""If the JDK pointed to by JAVA_HOME isn't a Java 8 JDK, the briefcase JDK
is used."""
# Mock environ.get returning an explicit JAVA_HOME
test_command.os.environ.get = mock.MagicMock(return_value="/path/to/java")
# Mock return value from javac.
test_command.subprocess.check_output.return_value = "javac 14\n"
# Create a directory to make it look like the Briefcase Java already exists.
(tmp_path / "tools" / "java" / "bin").mkdir(parents=True)
# Create a JDK wrapper by verification
jdk = JDK.verify(command=test_command)
# The JDK should have the briefcase JAVA_HOME
assert jdk.java_home == tmp_path / "tools" / "java"
# A single call was made to check javac
test_command.subprocess.check_output.assert_called_once_with(
[os.fsdecode(Path("/path/to/java/bin/javac")), "-version"],
stderr=subprocess.STDOUT,
),
# No console output (because Briefcase JDK exists)
output = capsys.readouterr()
assert output.out == ""
assert output.err == ""
def test_no_javac(test_command, tmp_path, capsys):
"""If the JAVA_HOME doesn't point to a location with a bin/javac, the
briefcase JDK is used."""
# Mock environ.get returning an explicit JAVA_HOME
test_command.os.environ.get = mock.MagicMock(return_value="/path/to/nowhere")
# Mock return value from javac failing because executable doesn't exist
test_command.subprocess.check_output.side_effect = FileNotFoundError
# Create a directory to make it look like the Briefcase Java already exists.
(tmp_path / "tools" / "java" / "bin").mkdir(parents=True)
# Create a JDK wrapper by verification
jdk = JDK.verify(command=test_command)
# The JAVA_HOME should point at the Briefcase-provided JDK
assert jdk.java_home == tmp_path / "tools" / "java"
# A single call was made to check javac
test_command.subprocess.check_output.assert_called_once_with(
[os.fsdecode(Path("/path/to/nowhere/bin/javac")), "-version"],
stderr=subprocess.STDOUT,
),
# No console output (because Briefcase JDK exists)
output = capsys.readouterr()
assert output.out == ""
assert output.err == ""
def test_javac_error(test_command, tmp_path, capsys):
"""If javac can't be executed, the briefcase JDK is used."""
# Mock environ.get returning an explicit JAVA_HOME
test_command.os.environ.get = mock.MagicMock(return_value="/path/to/java")
# Mock return value from javac failing because executable doesn't exist
test_command.subprocess.check_output.side_effect = subprocess.CalledProcessError(
returncode=1, cmd="/path/to/java/bin/javac"
)
# Create a directory to make it look like the Briefcase Java already exists.
(tmp_path / "tools" / "java" / "bin").mkdir(parents=True)
# Create a JDK wrapper by verification
jdk = JDK.verify(command=test_command)
# The JDK should have the briefcase JAVA_HOME
assert jdk.java_home == tmp_path / "tools" / "java"
# A single call was made to check javac
test_command.subprocess.check_output.assert_called_once_with(
[os.fsdecode(Path("/path/to/java/bin/javac")), "-version"],
stderr=subprocess.STDOUT,
),
# No console output (because Briefcase JDK exists)
output = capsys.readouterr()
assert output.out == ""
assert output.err == ""
def test_unparseable_javac_version(test_command, tmp_path, capsys):
"""If the javac version can't be parsed, the briefcase JDK is used."""
# Mock environ.get returning an explicit JAVA_HOME
test_command.os.environ.get = mock.MagicMock(return_value="/path/to/java")
# Mock return value from javac.
test_command.subprocess.check_output.return_value = "NONSENSE\n"
# Create a directory to make it look like the Briefcase Java already exists.
(tmp_path / "tools" / "java" / "bin").mkdir(parents=True)
# Create a JDK wrapper by verification
jdk = JDK.verify(command=test_command)
# The JDK should have the briefcase JAVA_HOME
assert jdk.java_home == tmp_path / "tools" / "java"
# A single call was made to check javac
test_command.subprocess.check_output.assert_called_once_with(
[os.fsdecode(Path("/path/to/java/bin/javac")), "-version"],
stderr=subprocess.STDOUT,
),
# No console output (because Briefcase JDK exists)
output = capsys.readouterr()
assert output.out == ""
assert output.err == ""
@pytest.mark.parametrize(
("host_os, jdk_url, jhome"),
[
(
"Darwin",
"https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/"
"jdk8u242-b08/OpenJDK8U-jdk_x64_mac_hotspot_8u242b08.tar.gz",
"java/Contents/Home",
),
(
"Linux",
"https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/"
"jdk8u242-b08/OpenJDK8U-jdk_x64_linux_hotspot_8u242b08.tar.gz",
"java",
),
(
"Windows",
"https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/"
"jdk8u242-b08/OpenJDK8U-jdk_x64_windows_hotspot_8u242b08.zip",
"java",
),
],
)
def test_successful_jdk_download(
test_command, tmp_path, capsys, host_os, jdk_url, jhome
):
"""If needed, a JDK can be downloaded."""
# Mock host OS
test_command.host_os = host_os
# Mock a JAVA_HOME that won't exist
# This is only needed to make macOS *not* run /usr/libexec/java_home
test_command.os.environ.get = mock.MagicMock(return_value="/does/not/exist")
# Mock the cached download path
# Consider to remove if block when we drop py3.7 support, only keep statements from else.
# MagicMock below py3.8 doesn't has __fspath__ attribute.
if sys.version_info < (3, 8):
archive = FsPathMock("/path/to/download.zip")
else:
archive = mock.MagicMock()
archive.__fspath__.return_value = "/path/to/download.zip"
test_command.download_url.return_value = archive
# Create a directory to make it look like Java was downloaded and unpacked.
(tmp_path / "tools" / "jdk8u242-b08").mkdir(parents=True)
# Invoke the verify call
jdk = JDK.verify(command=test_command)
assert jdk.java_home == tmp_path / "tools" / jhome
# Console output contains a warning about the bad JDK location
output = capsys.readouterr()
assert output.err == ""
assert "** WARNING: JAVA_HOME does not point to a Java 8 JDK" in output.out
# Download was invoked
test_command.download_url.assert_called_with(
url=jdk_url,
download_path=tmp_path / "tools",
)
# The archive was unpacked
# TODO: Py3.6 compatibility; os.fsdecode not required in Py3.7
test_command.shutil.unpack_archive.assert_called_with(
"/path/to/download.zip", extract_dir=os.fsdecode(tmp_path / "tools")
)
# The original archive was deleted
archive.unlink.assert_called_once_with()
def test_not_installed(test_command, tmp_path):
"""If the JDK isn't installed, and install isn't requested, an error is
raised."""
# Mock host OS
test_command.host_os = "Linux"
# Invoke the verify call. Install is not requested, so this will fail.
with pytest.raises(MissingToolError):
JDK.verify(command=test_command, install=False)
# Download was not invoked
assert test_command.download_url.call_count == 0
def test_jdk_download_failure(test_command, tmp_path):
"""If an error occurs downloading the JDK, an error is raised."""
# Mock Linux as the host
test_command.host_os = "Linux"
# Mock a failure on download
test_command.download_url.side_effect = requests_exceptions.ConnectionError
# Invoking verify_jdk causes a network failure.
with pytest.raises(NetworkFailure):
JDK.verify(command=test_command)
# That download was attempted
test_command.download_url.assert_called_with(
url="https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/"
"jdk8u242-b08/OpenJDK8U-jdk_x64_linux_hotspot_8u242b08.tar.gz",
download_path=tmp_path / "tools",
)
# No attempt was made to unpack the archive
assert test_command.shutil.unpack_archive.call_count == 0
def test_invalid_jdk_archive(test_command, tmp_path):
"""If the JDK download isn't a valid archive, raise an error."""
# Mock Linux as the host
test_command.host_os = "Linux"
# Mock the cached download path
# Consider to remove if block when we drop py3.7 support, only keep statements from else.
# MagicMock below py3.8 doesn't has __fspath__ attribute.
if sys.version_info < (3, 8):
archive = FsPathMock("/path/to/download.zip")
else:
archive = mock.MagicMock()
archive.__fspath__.return_value = "/path/to/download.zip"
test_command.download_url.return_value = archive
# Mock an unpack failure due to an invalid archive
test_command.shutil.unpack_archive.side_effect = shutil.ReadError
with pytest.raises(BriefcaseCommandError):
JDK.verify(command=test_command)
# The download occurred
test_command.download_url.assert_called_with(
url="https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/"
"jdk8u242-b08/OpenJDK8U-jdk_x64_linux_hotspot_8u242b08.tar.gz",
download_path=tmp_path / "tools",
)
# An attempt was made to unpack the archive.
# TODO: Py3.6 compatibility; os.fsdecode not required in Py3.7
test_command.shutil.unpack_archive.assert_called_with(
"/path/to/download.zip", extract_dir=os.fsdecode(tmp_path / "tools")
)
# The original archive was not deleted
assert archive.unlink.call_count == 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.