max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
program/object-detection-tf-py/tensorRT_hooks.py | G4V/ck-tensorflow | 108 | 12771393 | '''
hooks for using tensorRT with the object detection program.
names and parameters are defined as required by the detect.py infrastructure.
'''
import tensorflow as tf
import tensorflow.contrib.tensorrt as trt
def load_graph_tensorrt(params):
graph_def = tf.compat.v1.GraphDef()
with tf.compat.v1.gfile.GFile(params["FROZEN_GRAPH"], 'rb') as f:
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
trt_graph = trt.create_inference_graph(
input_graph_def=graph_def,
outputs=['detection_boxes:0','detection_scores:0','detection_classes:0','num_detections:0'],
max_batch_size=params["BATCH_SIZE"],
max_workspace_size_bytes=4000000000,
is_dynamic_op=True if params["TENSORRT_DYNAMIC"]==1 else False,
precision_mode=params["TENSORRT_PRECISION"]
)
tf.import_graph_def(
trt_graph,
return_elements=['detection_boxes:0','detection_scores:0','detection_classes:0','num_detections:0'])
##no more needed
def convert_from_tensorrt(tmp_output_dict ):
return tmp_output_dict
### names of tensors are different from normal TF names, but can be retrieved and a dict with the same shape of the original one can be formed, thus avoiding the conversion after the postprocessing.
# note that for the tf session, the names are enough and there is no real need to get the tensors.
def get_handles_to_tensors_RT():
graph = tf.get_default_graph()
tensor_dict = {}
tensor_dict['num_detections'] = graph.get_tensor_by_name('import/num_detections:0')
tensor_dict['detection_classes']=graph.get_tensor_by_name( 'import/detection_classes:0')
tensor_dict['detection_boxes'] = graph.get_tensor_by_name('import/detection_boxes:0')
tensor_dict['detection_scores'] = graph.get_tensor_by_name('import/detection_scores:0')
image_tensor =graph.get_tensor_by_name('import/image_tensor:0')
return tensor_dict, image_tensor
|
experiments/distributed.py | DeNeutoy/flowseq | 256 | 12771399 | <reponame>DeNeutoy/flowseq
import sys
import os
current_path = os.path.dirname(os.path.realpath(__file__))
root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(root_path)
import json
import signal
import threading
import torch
from flownmt.data import NMTDataSet
import experiments.options as options
from experiments.nmt import main as single_process_main
def create_dataset(args):
model_path = args.model_path
if not os.path.exists(model_path):
os.makedirs(model_path)
result_path = os.path.join(model_path, 'translations')
if not os.path.exists(result_path):
os.makedirs(result_path)
vocab_path = os.path.join(model_path, 'vocab')
if not os.path.exists(vocab_path):
os.makedirs(vocab_path)
data_path = args.data_path
src_lang = args.src
tgt_lang = args.tgt
src_vocab_path = os.path.join(vocab_path, '{}.vocab'.format(src_lang))
tgt_vocab_path = os.path.join(vocab_path, '{}.vocab'.format(tgt_lang))
params = json.load(open(args.config, 'r'))
src_max_vocab = params['{}_vocab_size'.format(src_lang)]
tgt_max_vocab = params['{}_vocab_size'.format(tgt_lang)]
NMTDataSet(data_path, src_lang, tgt_lang, src_vocab_path, tgt_vocab_path, src_max_vocab, tgt_max_vocab,
subword=args.subword, create_vocab=True)
def main():
args = options.parse_distributed_args()
args_dict = vars(args)
nproc_per_node = args_dict.pop('nproc_per_node')
nnodes = args_dict.pop('nnodes')
node_rank = args_dict.pop('node_rank')
# world size in terms of number of processes
dist_world_size = nproc_per_node * nnodes
# set PyTorch distributed related environmental variables
current_env = os.environ
current_env["MASTER_ADDR"] = args_dict.pop('master_addr')
current_env["MASTER_PORT"] = str(args_dict.pop('master_port'))
current_env["WORLD_SIZE"] = str(dist_world_size)
create_vocab = args_dict.pop('create_vocab')
if create_vocab:
create_dataset(args)
args.create_vocab = False
batch_size = args.batch_size // dist_world_size
args.batch_size = batch_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
processes = []
for local_rank in range(0, nproc_per_node):
# each process's rank
dist_rank = nproc_per_node * node_rank + local_rank
args.rank = dist_rank
args.local_rank = local_rank
process = mp.Process(target=run, args=(args, error_queue, ), daemon=True)
process.start()
error_handler.add_child(process.pid)
processes.append(process)
for process in processes:
process.join()
def run(args, error_queue):
try:
single_process_main(args)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.rank, traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
self.children_pids.append(pid)
def error_listener(self):
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = "\n\n-- Tracebacks above this line can probably be ignored --\n\n"
msg += original_trace
raise Exception(msg)
if __name__ == "__main__":
main()
|
__init__.py | gszy/kicad-boardview | 116 | 12771408 | import pcbnew
import os
from .pcbnew2boardview import convert
class Pcbnew2Boardview(pcbnew.ActionPlugin):
def defaults(self):
self.name = "Pcbnew to Boardview"
self.category = "Read PCB"
self.description = "Generate Boardview file from KiCad pcb."
def Run(self):
kicad_pcb = pcbnew.GetBoard()
with open(kicad_pcb.GetFileName().replace('.kicad_pcb', '.brd'), 'wt') as brd_file:
convert(kicad_pcb, brd_file)
plugin = Pcbnew2Boardview()
plugin.register()
|
tests/test_utils.py | localuser2/pre-commit-hooks | 164 | 12771421 | <reponame>localuser2/pre-commit-hooks<filename>tests/test_utils.py
#!/usr/bin/env python3
import difflib
import os
import re
import shutil
import subprocess as sp
import sys
import pytest
test_file_strs = {
"ok.c": '// Copyright 2021 <NAME>\n#include <stdio.h>\n\nint main() {\n printf("Hello World!\\n");\n return 0;\n}\n',
"ok.cpp": '// Copyright 2021 <NAME>\n#include <iostream>\n\nint main() {\n std::cout << "Hello World!\\n";\n return 0;\n}\n',
"err.c": "#include <stdio.h>\nint main(){int i;return;}",
"err.cpp": "#include <string>\nint main(){int i;return;}",
}
def assert_equal(expected: bytes, actual: bytes):
"""Stand in for Python's assert which is annoying to work with."""
actual = actual.replace(b"\r", b"") # ignore windows file ending differences
if expected != actual:
print(f"\n\nExpected:`{expected}`")
print(f"\n\nActual__:`{actual}`")
if isinstance(expected, bytes) and isinstance(actual, bytes):
expected_str = expected.decode()
actual_str = actual.decode()
print("String comparison:", expected_str == actual_str)
diff_lines_gen = difflib.context_diff(expected_str, actual_str, "Expected", "Actual")
diff_lines = "".join(list(diff_lines_gen))
print(f"\n\nDifference:\n{diff_lines}")
else:
print(f"Expected is type {type(expected)}\nActual is type {type(actual)}")
pytest.fail("Test failed!")
def get_versions():
"""Returns a dict of commands and their versions."""
commands = ["clang-format", "clang-tidy", "uncrustify", "cppcheck", "cpplint"]
if os.name != "nt": # oclint doesn't work on windows, iwyu needs to be compiled on windows
commands += ["oclint", "include-what-you-use"]
# Regex for all versions. Unit tests: https://regex101.com/r/rzJE0I/1
regex = r"[- ]((?:\d+\.)+\d+[_+\-a-z\d]*)(?![\s\S]*OCLint version)"
versions = {}
for cmd in commands:
if not shutil.which(cmd):
sys.exit("Command " + cmd + " not found.")
cmds = [cmd, "--version"]
child = sp.run(cmds, stdout=sp.PIPE, stderr=sp.PIPE)
if len(child.stderr) > 0:
print(f"Received error when running {cmds}:\n{child.stderr}")
sys.exit(1)
output = child.stdout.decode("utf-8")
try:
versions[cmd] = re.search(regex, output).group(1)
except AttributeError:
print(f"Received `{output}`. Version regexes have broken.")
print("Please file a bug (github.com/pocc/pre-commit-hooks).")
sys.exit(1)
return versions
# Required for testing with clang-tidy and oclint
def set_compilation_db(filenames):
"""Create a compilation database for clang static analyzers."""
cdb = "["
clang_location = shutil.which("clang")
file_dir = os.path.dirname(os.path.abspath(filenames[0]))
for f in filenames:
file_base = os.path.basename(f)
clang_suffix = ""
if f.endswith("cpp"):
clang_suffix = "++"
cdb += """\n{{
"directory": "{0}",
"command": "{1}{2} {3} -o {3}.o",
"file": "{3}"
}},""".format(
file_dir, clang_location, clang_suffix, os.path.join(file_dir, file_base)
)
cdb = cdb[:-1] + "]" # Subtract extra comma and end json
# Required for clang-tidy
if os.name == "nt":
cdb = cdb.replace("\\", "\\\\").replace("Program Files", 'Program\\" \\"Files')
with open(os.path.join(file_dir, "compile_commands.json"), "w") as f:
f.write(cdb)
def set_git_identity():
"""Set a git identity if one does not exist."""
sp_child = sp.run(["git", "config", "--list"], stdout=sp.PIPE)
if "user.name" not in sp_child.stdout.decode() or "user.email" not in sp_child.stdout.decode():
sp.run(["git", "config", "--global", "user.name", "Test Runner"])
sp.run(["git", "config", "--global", "user.email", "<EMAIL>"])
sp.run(["git", "config", "--global", "init.defaultbranch", "master"])
def run_in(commands, tmpdir):
sp_child = sp.run(commands, cwd=tmpdir, stdout=sp.PIPE, stderr=sp.PIPE)
if sp_child.returncode != 0:
err_msg = (
f"commands {commands} failed with\nstdout: {sp_child.stdout.decode()}stderr: {sp_child.stderr.decode()}\n"
)
pytest.fail(err_msg)
def integration_test(cmd_name, files, args, test_dir):
for test_file in files:
test_file_base = os.path.split(test_file)[-1]
if test_file_base in test_file_strs:
with open(test_file, "w") as fd:
fd.write(test_file_strs[test_file_base])
# Add only the files we are testing
run_in(["git", "reset"], test_dir)
run_in(["git", "add"] + files, test_dir)
args = list(args) # redeclare so there's no memory weirdness
pre_commit_config_path = os.path.join(test_dir, ".pre-commit-config.yaml")
pre_commit_config = f"""\
repos:
- repo: https://github.com/pocc/pre-commit-hooks
rev: v1.3.4
hooks:
- id: {cmd_name}
args: {args}
"""
with open(pre_commit_config_path, "w") as f:
f.write(pre_commit_config)
# Pre-commit run will only work on staged files, which is what we want to test
# Using git commit can cause hangs if pre-commit passes
sp_child = sp.run(["pre-commit", "run"], cwd=test_dir, stdout=sp.PIPE, stderr=sp.PIPE)
output_actual = sp_child.stderr + sp_child.stdout
# Get rid of pre-commit first run info lines
output_actual = re.sub(rb"\[(?:INFO|WARNING)\].*\n", b"", output_actual)
# Output is unpredictable and platform/version dependent
if any([f.endswith("err.cpp") for f in files]) and "-std=c++20" in args:
output_actual = re.sub(rb"[\d,]+ warnings and ", b"", output_actual)
return output_actual, sp_child.returncode
|
jsonrpc/tests/test_backend_django/settings.py | DMantis/json-rpc | 409 | 12771439 | SECRET_KEY = 'secret'
ROOT_URLCONF = 'jsonrpc.tests.test_backend_django.urls'
ALLOWED_HOSTS = ['testserver']
DATABASE_ENGINE = 'django.db.backends.sqlite3'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
JSONRPC_MAP_VIEW_ENABLED = True
|
mmdet2trt/core/__init__.py | tehkillerbee/mmdetection-to-tensorrt | 496 | 12771450 | <reponame>tehkillerbee/mmdetection-to-tensorrt
from .anchor import * # noqa: F401,F403
from .bbox import * # noqa: F401,F403
from .post_processing import * # noqa: F401,F403
|
data/text/tokenizer.py | anh/TransformerTTS | 894 | 12771488 | from typing import Union
import re
from phonemizer.phonemize import phonemize
from data.text.symbols import all_phonemes, _punctuations
class Tokenizer:
def __init__(self, start_token='>', end_token='<', pad_token='/', add_start_end=True, alphabet=None,
model_breathing=True):
if not alphabet:
self.alphabet = all_phonemes
else:
self.alphabet = sorted(list(set(alphabet))) # for testing
self.idx_to_token = {i: s for i, s in enumerate(self.alphabet, start=1)}
self.idx_to_token[0] = pad_token
self.token_to_idx = {s: [i] for i, s in self.idx_to_token.items()}
self.vocab_size = len(self.alphabet) + 1
self.add_start_end = add_start_end
if add_start_end:
self.start_token_index = len(self.alphabet) + 1
self.end_token_index = len(self.alphabet) + 2
self.vocab_size += 2
self.idx_to_token[self.start_token_index] = start_token
self.idx_to_token[self.end_token_index] = end_token
self.model_breathing = model_breathing
if model_breathing:
self.breathing_token_index = self.vocab_size
self.token_to_idx[' '] = self.token_to_idx[' '] + [self.breathing_token_index]
self.vocab_size += 1
self.breathing_token = '@'
self.idx_to_token[self.breathing_token_index] = self.breathing_token
self.token_to_idx[self.breathing_token] = [self.breathing_token_index]
def __call__(self, sentence: str) -> list:
sequence = [self.token_to_idx[c] for c in sentence] # No filtering: text should only contain known chars.
sequence = [item for items in sequence for item in items]
if self.model_breathing:
sequence = [self.breathing_token_index] + sequence
if self.add_start_end:
sequence = [self.start_token_index] + sequence + [self.end_token_index]
return sequence
def decode(self, sequence: list) -> str:
return ''.join([self.idx_to_token[int(t)] for t in sequence])
class Phonemizer:
def __init__(self, language: str, with_stress: bool, njobs=4):
self.language = language
self.njobs = njobs
self.with_stress = with_stress
self.special_hyphen = '—'
self.punctuation = ';:,.!?¡¿—…"«»“”'
self._whitespace_re = re.compile(r'\s+')
self._whitespace_punctuation_re = re.compile(f'\s*([{_punctuations}])\s*')
def __call__(self, text: Union[str, list], with_stress=None, njobs=None, language=None) -> Union[str, list]:
language = language or self.language
njobs = njobs or self.njobs
with_stress = with_stress or self.with_stress
# phonemizer does not like hyphens.
text = self._preprocess(text)
phonemes = phonemize(text,
language=language,
backend='espeak',
strip=True,
preserve_punctuation=True,
with_stress=with_stress,
punctuation_marks=self.punctuation,
njobs=njobs,
language_switch='remove-flags')
return self._postprocess(phonemes)
def _preprocess_string(self, text: str):
text = text.replace('-', self.special_hyphen)
return text
def _preprocess(self, text: Union[str, list]) -> Union[str, list]:
if isinstance(text, list):
return [self._preprocess_string(t) for t in text]
elif isinstance(text, str):
return self._preprocess_string(text)
else:
raise TypeError(f'{self} input must be list or str, not {type(text)}')
def _collapse_whitespace(self, text: str) -> str:
text = re.sub(self._whitespace_re, ' ', text)
return re.sub(self._whitespace_punctuation_re, r'\1', text)
def _postprocess_string(self, text: str) -> str:
text = text.replace(self.special_hyphen, '-')
text = ''.join([c for c in text if c in all_phonemes])
text = self._collapse_whitespace(text)
text = text.strip()
return text
def _postprocess(self, text: Union[str, list]) -> Union[str, list]:
if isinstance(text, list):
return [self._postprocess_string(t) for t in text]
elif isinstance(text, str):
return self._postprocess_string(text)
else:
raise TypeError(f'{self} input must be list or str, not {type(text)}')
|
openelex/tests/test_transform_registry.py | Mpopoma/oe-core | 156 | 12771497 | from unittest import TestCase
from mock import Mock
from openelex.base.transform import registry
class TestTransformRegistry(TestCase):
def test_register_with_validators(self):
mock_transform = Mock(return_value=None)
mock_transform.__name__ = 'mock_transform'
mock_validator1 = Mock(return_value=None)
mock_validator1.__name__ = 'mock_validator1'
mock_validator2 = Mock(return_value=None)
mock_validator2.__name__ = 'mock_validator2'
validators = [mock_validator1, mock_validator2]
registry.register("XX", mock_transform, validators)
transform = registry.get("XX", "mock_transform")
self.assertEqual(list(transform.validators.values()), validators)
transform()
mock_transform.assert_called_once_with()
def test_register_raw(self):
mock_transform = Mock(return_value=None)
mock_transform.__name__ = 'mock_transform'
registry.register("XX", mock_transform, raw=True)
transform = registry.get("XX", "mock_transform", raw=True)
transform()
mock_transform.assert_called_once_with()
|
usaspending_api/references/models/overall_totals.py | g4brielvs/usaspending-api | 217 | 12771509 | from django.db import models
class OverallTotals(models.Model):
id = models.AutoField(primary_key=True)
create_date = models.DateTimeField(auto_now_add=True, blank=True, null=True)
update_date = models.DateTimeField(auto_now=True, null=True)
fiscal_year = models.IntegerField(blank=True, null=True)
total_budget_authority = models.DecimalField(max_digits=23, decimal_places=2, blank=True, null=True)
class Meta:
managed = True
db_table = "overall_totals"
|
Validation/RecoB/test/validation_customJet_cfg.py | ckamtsikis/cmssw | 852 | 12771511 | from __future__ import print_function
# The following comments couldn't be translated into the new config version:
#! /bin/env cmsRun
import FWCore.ParameterSet.Config as cms
process = cms.Process("validation")
import FWCore.ParameterSet.VarParsing as VarParsing
options = VarParsing.VarParsing ('analysis')
# load the full reconstraction configuration, to make sure we're getting all needed dependencies
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load("Configuration.StandardSequences.Reconstruction_cff")
options.register ('jets',
"ak4PFJetsCHS", # default value, examples : "ak4PFJets", "ak4PFJetsCHS"
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"jet collection to use")
options.parseArguments()
whichJets = options.jets
applyJEC = True
corrLabel = "ak4PFCHS"
from Configuration.AlCa.GlobalTag import GlobalTag
tag = GlobalTag(process.GlobalTag, 'auto:run2_mc', '')
useTrigger = False
triggerPath = "HLT_PFJet80_v*"
runOnMC = True
#Flavour plots for MC: "all" = plots for all jets ; "dusg" = plots for d, u, s, dus, g independently ; not mandatory and any combinations are possible
#b, c, light (dusg), non-identified (NI), PU jets plots are always produced
flavPlots = "allbcldusg"
###prints###
print("jet collcetion asked : ", whichJets)
print("JEC applied?", applyJEC, ", correction:", corrLabel)
print("trigger will be used ? : ", useTrigger, ", Trigger paths:", triggerPath)
print("is it MC ? : ", runOnMC, ", Flavours:", flavPlots)
print("Global Tag : ", tag.globaltag)
############
process.load("DQMServices.Components.DQMEnvironment_cfi")
process.load("DQMServices.Core.DQM_cfg")
process.load("JetMETCorrections.Configuration.JetCorrectors_cff")
process.load("CommonTools.ParticleFlow.goodOfflinePrimaryVertices_cfi")
process.load("RecoJets.JetAssociationProducers.ak4JTA_cff")
process.load("RecoBTag.Configuration.RecoBTag_cff")
process.load("PhysicsTools.JetMCAlgos.HadronAndPartonSelector_cfi")
process.load("PhysicsTools.JetMCAlgos.AK4PFJetsMCFlavourInfos_cfi")
process.load("PhysicsTools.JetMCAlgos.CaloJetsMCFlavour_cfi")
process.load("PhysicsTools.PatAlgos.mcMatchLayer0.jetMatch_cfi")
process.JECseq = cms.Sequence(getattr(process,corrLabel+"L1FastL2L3CorrectorChain"))
newjetID=cms.InputTag(whichJets)
process.ak4JetFlavourInfos.jets = newjetID
process.ak4JetFlavourInfos.hadronFlavourHasPriority = cms.bool(True)
process.AK4byRef.jets = newjetID
if not "ak4PFJetsCHS" in whichJets:
process.ak4JetTracksAssociatorAtVertexPF.jets = newjetID
process.pfImpactParameterTagInfos.jets = newjetID
process.softPFMuonsTagInfos.jets = newjetID
process.softPFElectronsTagInfos.jets = newjetID
process.patJetGenJetMatch.src = newjetID
process.btagSequence = cms.Sequence(
process.ak4JetTracksAssociatorAtVertexPF *
process.btagging
)
process.jetSequences = cms.Sequence(process.goodOfflinePrimaryVertices * process.btagSequence)
###
print("inputTag : ", process.ak4JetTracksAssociatorAtVertexPF.jets)
###
if runOnMC:
process.flavourSeq = cms.Sequence(
process.selectedHadronsAndPartons *
process.ak4JetFlavourInfos
)
process.load("Validation.RecoB.bTagAnalysis_cfi")
process.bTagValidation.jetMCSrc = 'ak4JetFlavourInfos'
if "Calo" in whichJets:
process.bTagValidation.caloJetMCSrc = 'AK4byValAlgo'
process.bTagValidation.useOldFlavourTool = True
process.flavourSeq = cms.Sequence(
process.myPartons *
process.AK4Flavour
)
process.bTagValidation.applyPtHatWeight = False
process.bTagValidation.doJetID = True
process.bTagValidation.doJEC = applyJEC
process.bTagValidation.JECsourceMC = cms.InputTag(corrLabel+"L1FastL2L3Corrector")
process.bTagValidation.flavPlots = flavPlots
process.bTagHarvestMC.flavPlots = flavPlots
#process.bTagValidation.ptRecJetMin = cms.double(20.)
process.bTagValidation.genJetsMatched = cms.InputTag("patJetGenJetMatch")
process.bTagValidation.doPUid = cms.bool(True)
process.ak4GenJetsForPUid = cms.EDFilter("GenJetSelector",
src = cms.InputTag("ak4GenJets"),
cut = cms.string('pt > 8.'),
filter = cms.bool(False)
)
process.patJetGenJetMatch.matched = cms.InputTag("ak4GenJetsForPUid")
process.patJetGenJetMatch.maxDeltaR = cms.double(0.25)
process.patJetGenJetMatch.resolveAmbiguities = cms.bool(True)
else:
process.load("DQMOffline.RecoB.bTagAnalysisData_cfi")
process.bTagAnalysis.doJEC = applyJEC
process.bTagAnalysis.JECsourceData = cms.InputTag(corrLabel+"L1FastL2L3ResidualCorrector")
process.JECseq *= (getattr(process,corrLabel+"ResidualCorrector") * getattr(process,corrLabel+"L1FastL2L3ResidualCorrector"))
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring()
)
from HLTrigger.HLTfilters.hltHighLevel_cfi import *
if useTrigger:
process.bTagHLT = hltHighLevel.clone(TriggerResultsTag = "TriggerResults::HLT", HLTPaths = ["HLT_PFJet40_v*"])
process.bTagHLT.HLTPaths = [triggerPath]
if runOnMC:
process.dqmSeq = cms.Sequence(process.ak4GenJetsForPUid * process.patJetGenJetMatch * process.flavourSeq * process.bTagValidation * process.bTagHarvestMC * process.dqmSaver)
else:
process.dqmSeq = cms.Sequence(process.bTagAnalysis * process.bTagHarvest * process.dqmSaver)
if useTrigger:
process.plots = cms.Path(process.bTagHLT * process.JECseq * process.jetSequences * process.dqmSeq)
else:
process.plots = cms.Path(process.JECseq * process.jetSequences * process.dqmSeq)
process.dqmEnv.subSystemFolder = 'BTAG'
process.dqmSaver.producer = 'DQM'
process.dqmSaver.workflow = '/POG/BTAG/BJET'
process.dqmSaver.convention = 'Offline'
process.dqmSaver.saveByRun = cms.untracked.int32(-1)
process.dqmSaver.saveAtJobEnd =cms.untracked.bool(True)
process.dqmSaver.forceRunNumber = cms.untracked.int32(1)
process.PoolSource.fileNames = [
]
#keep the logging output to a nice level
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 100
process.GlobalTag = tag
|
datasets/TGS_salt/TGSDataset.py | liaopeiyuan/ml-arsenal-public | 280 | 12771560 | from dependencies import *
IMAGE_HEIGHT, IMAGE_WIDTH = 101, 101
HEIGHT, WIDTH = 128, 128
DY0, DY1, DX0, DX1 = \
compute_center_pad(IMAGE_HEIGHT, IMAGE_WIDTH, factor=32)
#----------------------------------------
def null_augment(image,label,index):
cache = Struct(image = image.copy(), mask = mask.copy())
return image,label,index, cache
def null_collate(batch):
batch_size = len(batch)
cache = []
input = []
truth = []
index = []
for b in range(batch_size):
input.append(batch[b][0])
truth.append(batch[b][1])
index.append(batch[b][2])
cache.append(batch[b][3])
input = torch.from_numpy(np.array(input)).float().unsqueeze(1)
if truth[0]!=[]:
truth = torch.from_numpy(np.array(truth)).float().unsqueeze(1)
return input, truth, index, cache
#----------------------------------------
class TGSDataset(Dataset):
def __init__(self, split, augment=null_augment, mode='train'):
super(TGSDataset, self).__init__()
self.split = split
self.mode = mode
self.augment = augment
split_file = CODE + '/datasets/TGS_salt/splits/' + split
lines = read_list_from_file(split_file)
self.ids = []
self.images = []
for l in lines:
folder, name = l.split('/')
image_file = DATA + '/' + folder + '/images/' + name +'.png'
image = cv2.imread(image_file,cv2.IMREAD_GRAYSCALE).astype(np.float32)/255
self.images.append(image)
self.ids.append(name)
#print(image.shape)
self.masks = []
if self.mode in ['train','valid']:
for l in lines:
folder, file = l.split('/')
mask_file = DATA + '/' + folder + '/masks/' + file +'.png'
mask = cv2.imread(mask_file,cv2.IMREAD_GRAYSCALE).astype(np.float32)/255
self.masks.append(mask)
elif self.mode in ['test']:
self.masks = [[] for l in lines]
#-------
df = pd.read_csv(DATA + '/depths.csv')
df = df.set_index('id')
self.zs = df.loc[self.ids].z.values
#-------
print('\tTGSDataset')
print('\tsplit = %s'%split)
print('\tlen(self.images) = %d'%len(self.images))
print('')
def __getitem__(self, index):
image = self.images[index]
mask = self.masks[index]
return self.augment(image, mask, index)
def __len__(self):
return len(self.images)
def run_check_data():
dataset = TGSDataset('list_train0_3600', mode='train') #
#--
zz=0
zero = np.zeros((101,101),np.uint8)
save_dir = CODE+'/datasets/TGS_salt/demo'
num = len(dataset)
for m in [3,5,6,7,8,9,10,11,12]:
image = dataset.images[m]
mask = dataset.masks [m]
cv2.imshow('image',image)
#image_show_norm('image',image,1, 2)
#image_show_norm('mask', mask,1, 2)
for i in range(5):
#image1, mask1 = do_random_pad_to_factor2(image, mask, limit=(-4,4), factor=32)
#image1, mask1 = do_horizontal_flip2(image, mask)
mask1 = mask
#image1 = do_invert_intensity(image)
#image1 = do_brightness_shift(image, np.random.uniform(-0.125,0.125))
#image1 = do_brightness_multiply(image, np.random.uniform(1-0.125,1+0.125))
image1 = do_gamma(image, np.random.uniform(1-0.25,1+0.25))
#-----------------------------------------------
image1 = (image1*255).astype(np.uint8)
image1 = np.dstack([ image1, image1, image1])
#overlay1 = draw_mask_overlay(mask1, image1, color=[0,0,255])
#image_show('overlay1',overlay1,2)
#image_show('image1',image1,2)
#image_show_norm('mask1',mask1,1, 2)
#cv2.waitKey(0)
# main #################################################################
if __name__ == '__main__':
print( '%s: calling main function ... ' % os.path.basename(__file__))
run_check_data()
|
community/dm-scaffolder/configs.py | shan2202/deploymentmanager-samples | 930 | 12771561 | <reponame>shan2202/deploymentmanager-samples
# Copyright 2018 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ruamel.yaml import YAML
class Config:
yaml = YAML()
def __init__(self, path):
self.path = path
f = open(path, "r")
self.configs = self.yaml.load(f.read())
f.close()
def update_folders(self, folders):
self.configs['folders_list_cache'] = folders
print 'lets write'
with open(self.path, 'w') as yf:
self.yaml.dump(self.configs, stream=yf)
|
spirit/user/migrations/0009_auto_20161114_1850.py | Ke-xueting/Spirit | 974 | 12771562 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2016-11-14 18:50
from django.db import migrations
def user_model_content_type(apps, schema_editor):
from ...core.conf import settings
if not hasattr(settings, 'AUTH_USER_MODEL'):
return
user = apps.get_model(settings.AUTH_USER_MODEL)
if user._meta.db_table == 'spirit_user_user':
app_label, model = settings.AUTH_USER_MODEL.split('.')
content_types = apps.get_model('contenttypes.ContentType')
(content_types.objects
.filter(
app_label='spirit_user',
model='User'.lower())
.update(
app_label=app_label,
model=model.lower()))
class Migration(migrations.Migration):
dependencies = [
('spirit_user', '0008_auto_20161114_1707'),
]
operations = [
migrations.RunPython(user_model_content_type),
]
|
pyxb/bundles/opengis/examples/demo.py | eLBati/pyxb | 123 | 12771565 | from __future__ import print_function
import pyxb.bundles.opengis.gml as gml
dv = gml.DegreesType(32, direction='N')
print(dv.toDOM(element_name='degrees').toxml("utf-8"))
|
music21/stream/streamStatus.py | cuthbertLab/music21 | 1,449 | 12771595 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Name: streamStatus.py
# Purpose: functionality for reporting on the notational status of streams
#
# Authors: <NAME>
#
# Copyright: Copyright © 2013 <NAME> and the music21
# Project
# License: BSD, see license.txt
# -----------------------------------------------------------------------------
import unittest
from music21 import environment
from music21 import common
from music21.common.objects import SlottedObjectMixin
environLocal = environment.Environment(__file__)
# -----------------------------------------------------------------------------
class StreamStatus(SlottedObjectMixin):
'''
An object that stores the current notation state for the client stream.
Separates out tasks such as whether notation has been made, etc.
>>> s = stream.Stream()
>>> ss = s.streamStatus
>>> ss
<music21.stream.streamStatus.StreamStatus object at 0x...>
>>> s.streamStatus.client is s
True
Copying of StreamStatus and surrounding Streams
>>> import copy
>>> ss2 = copy.deepcopy(ss)
>>> ss2.client is None
True
>>> s2 = copy.deepcopy(s)
>>> s2.streamStatus
<music21.stream.streamStatus.StreamStatus object at 0x...>
>>> s2.streamStatus is ss
False
>>> s2.streamStatus.client is s2
True
'''
# CLASS VARIABLES #
__slots__ = (
'_accidentals',
'_beams',
'_client',
'_concertPitch',
'_dirty',
'_enharmonics',
'_measures',
'_ornaments',
'_rests',
'_ties',
'_tuplets',
)
# INITIALIZER #
def __init__(self, client=None):
self._client = None
self._accidentals = None
self._beams = None
self._concertPitch = None
self._dirty = None
self._enharmonics = None
self._measures = None
self._ornaments = None
self._rests = None
self._ties = None
self._tuplets = None
self.client = client
# SPECIAL METHODS #
def __deepcopy__(self, memo=None):
'''
Manage deepcopying by creating a new reference to the same object.
leaving out the client
'''
new = type(self)()
for x in self.__slots__:
if x == '_client':
new._client = None
else:
setattr(new, x, getattr(self, x))
return new
# unwrap weakref for pickling
def __getstate__(self):
self._client = common.unwrapWeakref(self._client)
return SlottedObjectMixin.__getstate__(self)
def __setstate__(self, state):
SlottedObjectMixin.__setstate__(self, state)
self._client = common.wrapWeakref(self._client)
# PUBLIC METHODS #
def haveAccidentalsBeenMade(self):
'''
If Accidentals.displayStatus is None for all contained pitches, it as
assumed that accidentals have not been set for display and/or
makeAccidentals has not been run. If any Accidental has displayStatus
other than None, this method returns True, regardless of if
makeAccidentals has actually been run.
'''
for p in self.client.pitches:
if p.accidental is not None:
if p.accidental.displayStatus is not None:
return True
return False
def haveBeamsBeenMade(self):
'''
If any Note in this Stream has .beams defined, it as assumed that Beams
have not been set and/or makeBeams has not been run. If any Beams
exist, this method returns True, regardless of if makeBeams has
actually been run.
'''
for n in self.client.recurse(classFilter=('NotRest',), restoreActiveSites=False):
if n.beams is not None and n.beams.beamsList:
return True
return False
def haveTupletBracketsBeenMade(self):
'''
If any GeneralNote in this Stream is a tuplet, then check to
see if any of them have a first Tuplet with type besides None
return True. Otherwise return False if there is a tuplet. Return None if
no Tuplets.
>>> s = stream.Stream()
>>> s.streamStatus.haveTupletBracketsBeenMade() is None
True
>>> s.append(note.Note())
>>> s.streamStatus.haveTupletBracketsBeenMade() is None
True
>>> n = note.Note(quarterLength=1/3)
>>> s.append(n)
>>> s.streamStatus.haveTupletBracketsBeenMade()
False
>>> n.duration.tuplets[0].type = 'start'
>>> s.streamStatus.haveTupletBracketsBeenMade()
True
'''
foundTuplet = False
for n in self.client.recurse(classFilter='GeneralNote', restoreActiveSites=False):
if n.duration.tuplets:
foundTuplet = True
if n.duration.tuplets[0].type is not None:
return True
if foundTuplet:
return False
else:
return None
# PUBLIC PROPERTIES #
@property
def client(self):
return common.unwrapWeakref(self._client)
@client.setter
def client(self, client):
# client is the Stream that this status lives on
self._client = common.wrapWeakref(client)
@property
def accidentals(self):
if self._accidentals is None:
self._accidentals = self.haveAccidentalsBeenMade()
return self._accidentals
@accidentals.setter
def accidentals(self, expr):
if expr is not None:
self._accidentals = bool(expr)
else:
self._accidentals = None
@property
def beams(self):
if self._beams is None:
self._beams = self.haveBeamsBeenMade()
return self._beams
@beams.setter
def beams(self, expr):
if expr is not None:
self._beams = bool(expr)
else:
self._beams = None
@property
def tuplets(self):
if self._tuplets is None:
self._tuplets = self.haveTupletBracketsBeenMade()
# If there were no tuplet durations,
# tuplet brackets don't need to be made.
if self._tuplets is None:
self._tuplets = True
return self._tuplets
@tuplets.setter
def tuplets(self, expr):
if expr is not None:
self._tuplets = bool(expr)
else:
self._tuplets = None
# -----------------------------------------------------------------------------
class Test(unittest.TestCase):
'''
Note: most Stream tests are found in stream.tests
'''
def testHaveBeamsBeenMadeAfterDeepcopy(self):
import copy
from music21 import stream
from music21 import note
m = stream.Measure()
c = note.Note('C4', type='quarter')
m.append(c)
d1 = note.Note('D4', type='eighth')
d2 = note.Note('D4', type='eighth')
m.append([d1, d2])
e3 = note.Note('E4', type='eighth')
e4 = note.Note('E4', type='eighth')
m.append([e3, e4])
d1.beams.append('start')
d2.beams.append('stop')
self.assertTrue(m.streamStatus.haveBeamsBeenMade())
mm = copy.deepcopy(m)
self.assertTrue(mm.streamStatus.haveBeamsBeenMade())
mm.streamStatus.beams = False
mmm = copy.deepcopy(mm)
self.assertFalse(mmm.streamStatus.beams)
# m.show()
# -----------------------------------------------------------------------------
if __name__ == '__main__':
import music21
music21.mainTest(Test)
|
dcase_util/containers/mapping.py | ankitshah009/dcase_util | 122 | 12771597 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
from six import iteritems
import os
import csv
from dcase_util.containers import DictContainer
from dcase_util.utils import FileFormat
class OneToOneMappingContainer(DictContainer):
"""Mapping container class for 1:1 data mapping, inherited from DictContainer class."""
valid_formats = [FileFormat.CSV, FileFormat.TXT, FileFormat.CPICKLE] #: Valid file formats
def __init__(self, *args, **kwargs):
# Run DictContainer init
DictContainer.__init__(self, *args, **kwargs)
super(OneToOneMappingContainer, self).__init__(*args, **kwargs)
def load(self, filename=None):
"""Load file
Parameters
----------
filename : str, optional
File path
Default value filename given to class constructor
Raises
------
ImportError:
Error if file format specific module cannot be imported
IOError:
File does not exists or has unknown file format
Returns
-------
self
"""
if filename:
self.filename = filename
self.detect_file_format()
self.validate_format()
dict.clear(self)
if self.exists():
from dcase_util.files import Serializer
if self.format == FileFormat.TXT or self.format == FileFormat.CSV:
map_data = {}
with open(self.filename, 'rtU') as f:
for row in csv.reader(f, delimiter=self.delimiter()):
if len(row) == 2:
map_data[row[0]] = row[1]
dict.update(self, map_data)
elif self.format == FileFormat.CPICKLE:
dict.update(self, Serializer.load_cpickle(filename=self.filename))
else:
message = '{name}: Unknown format [{format}]'.format(name=self.__class__.__name__, format=self.filename)
self.logger.exception(message)
raise IOError(message)
else:
message = '{name}: File does not exists [{file}]'.format(name=self.__class__.__name__, file=self.filename)
self.logger.exception(message)
raise IOError(message)
# Check if after load function is defined, call if found
if hasattr(self, '_after_load'):
self._after_load()
return self
def save(self, filename=None):
"""Save file
Parameters
----------
filename : str, optional
File path
Default value filename given to class constructor
Raises
------
ImportError:
Error if file format specific module cannot be imported
IOError:
File has unknown file format
Returns
-------
self
"""
if filename:
self.filename = filename
self.detect_file_format()
self.validate_format()
if self.filename is None or self.filename == '':
message = '{name}: Filename is empty [{filename}]'.format(
name=self.__class__.__name__,
filename=self.filename
)
self.logger.exception(message)
raise IOError(message)
try:
from dcase_util.files import Serializer
if self.format == FileFormat.CSV or self.format == FileFormat.TXT:
delimiter = ','
with open(self.filename, 'w') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=delimiter)
for key, value in iteritems(self):
if key not in ['filename']:
csv_writer.writerow((key, value))
elif self.format == FileFormat.CPICKLE:
Serializer.save_cpickle(filename=self.filename, data=dict(self))
else:
message = '{name}: Unknown format [{format}]'.format(name=self.__class__.__name__, format=self.filename)
self.logger.exception(message)
raise IOError(message)
except KeyboardInterrupt:
os.remove(self.filename) # Delete the file, since most likely it was not saved fully
raise
# Check if after save function is defined, call if found
if hasattr(self, '_after_save'):
self._after_save()
return self
@property
def flipped(self):
"""Exchange map key and value pairs.
Returns
-------
OneToOneMappingContainer
flipped map
"""
return OneToOneMappingContainer(dict((v, k) for k, v in iteritems(self)))
def map(self, key, default=None):
"""Map with a key.
Parameters
----------
key : str or number
Mapping key
default : str or number
Default value to be returned if key does not exists in the mapping container.
Returns
-------
OneToOneMappingContainer
flipped map
"""
if key in self:
return self[key]
else:
return default
|
desktop/core/ext-py/python-ldap-2.3.13/setup.py | kokosing/hue | 5,079 | 12771619 | <reponame>kokosing/hue
"""
setup.py - Setup package with the help Python's DistUtils
See http://www.python-ldap.org/ for details.
$Id: setup.py,v 1.65 2009/10/21 17:32:11 stroeder Exp $
"""
has_setuptools = False
try:
from setuptools import setup, Extension
has_setuptools = True
except ImportError:
from distutils.core import setup, Extension
from ConfigParser import ConfigParser
import sys,os,string,time
##################################################################
# Weird Hack to grab release version of python-ldap from local dir
##################################################################
exec_startdir = os.path.dirname(os.path.abspath(sys.argv[0]))
package_init_file_name = reduce(os.path.join,[exec_startdir,'Lib','ldap','__init__.py'])
f = open(package_init_file_name,'r')
s = f.readline()
while s:
s = string.strip(s)
if s[0:11]=='__version__':
version = eval(string.split(s,'=')[1])
break
s = f.readline()
f.close()
#-- A class describing the features and requirements of OpenLDAP 2.0
class OpenLDAP2:
library_dirs = []
include_dirs = []
extra_compile_args = []
extra_link_args = []
extra_objects = []
libs = ['ldap', 'lber']
defines = [ ]
extra_files = []
LDAP_CLASS = OpenLDAP2
#-- Read the [_ldap] section of setup.cfg
cfg = ConfigParser()
cfg.read('setup.cfg')
if cfg.has_section('_ldap'):
for name in dir(LDAP_CLASS):
if cfg.has_option('_ldap', name):
print name + ': ' + cfg.get('_ldap', name)
setattr(LDAP_CLASS, name, string.split(cfg.get('_ldap', name)))
for i in range(len(LDAP_CLASS.defines)):
LDAP_CLASS.defines[i]=((LDAP_CLASS.defines[i],None))
for i in range(len(LDAP_CLASS.extra_files)):
destdir, origfiles = string.split(LDAP_CLASS.extra_files[i], ':')
origfileslist = string.split(origfiles, ',')
LDAP_CLASS.extra_files[i]=(destdir, origfileslist)
#-- Let distutils/setuptools do the rest
name = 'python-ldap'
# Python 2.3.6+ and setuptools are needed to build eggs, so
# let's handle setuptools' additional keyword arguments to
# setup() in a fashion that doesn't break compatibility to
# distutils. This still allows 'normal' builds where either
# Python > 2.3.5 or setuptools (or both ;o) are not available.
kwargs = dict()
if has_setuptools:
kwargs = dict(
include_package_data = True,
install_requires = ['setuptools'],
zip_safe = False)
setup(
#-- Package description
name = name,
version = version,
description = 'Various LDAP-related Python modules',
author = '<NAME>, <NAME>, et al.',
author_email = '<EMAIL>',
url = 'http://www.python-ldap.org/',
#-- C extension modules
ext_modules = [
Extension(
'_ldap',
[
'Modules/LDAPObject.c',
'Modules/ldapcontrol.c',
'Modules/common.c',
'Modules/constants.c',
'Modules/errors.c',
'Modules/functions.c',
'Modules/schema.c',
'Modules/ldapmodule.c',
'Modules/message.c',
'Modules/version.c',
'Modules/options.c',
'Modules/berval.c',
],
libraries = LDAP_CLASS.libs,
include_dirs = ['Modules'] + LDAP_CLASS.include_dirs,
library_dirs = LDAP_CLASS.library_dirs,
extra_compile_args = LDAP_CLASS.extra_compile_args,
extra_link_args = LDAP_CLASS.extra_link_args,
extra_objects = LDAP_CLASS.extra_objects,
runtime_library_dirs = (not sys.platform.startswith("win"))*LDAP_CLASS.library_dirs,
define_macros = LDAP_CLASS.defines + \
('ldap_r' in LDAP_CLASS.libs or 'oldap_r' in LDAP_CLASS.libs)*[('HAVE_LIBLDAP_R',None)] + \
('sasl' in LDAP_CLASS.libs or 'sasl2' in LDAP_CLASS.libs or 'libsasl' in LDAP_CLASS.libs)*[('HAVE_SASL',None)] + \
('ssl' in LDAP_CLASS.libs and 'crypto' in LDAP_CLASS.libs)*[('HAVE_TLS',None)] + \
[('LDAPMODULE_VERSION', version)]
),
],
#-- Python "stand alone" modules
py_modules = [
'ldapurl',
'ldif',
'dsml',
'ldap',
'ldap.async',
'ldap.controls',
'ldap.cidict',
'ldap.dn',
'ldap.filter',
'ldap.functions',
'ldap.ldapobject',
'ldap.modlist',
'ldap.resiter',
'ldap.sasl',
'ldap.schema',
'ldap.schema.models',
'ldap.schema.subentry',
'ldap.schema.tokenizer',
],
package_dir = {'': 'Lib',},
data_files = LDAP_CLASS.extra_files,
**kwargs
)
|
python/367_Valid_Perfect_Square.py | dvlpsh/leetcode-1 | 4,416 | 12771629 | class Solution(object):
# def isPerfectSquare(self, num):
# """
# :type num: int
# :rtype: bool
# """
# i = 1
# while num > 0:
# num -= i
# i += 2
# return num == 0
def isPerfectSquare(self, num):
low, high = 1, num
while low <= high:
mid = (low + high) / 2
mid_square = mid * mid
if mid_square == num:
return True
elif mid_square < num:
low = mid + 1
else:
high = mid - 1
return False
# def isPerfectSquare(self, num):
# x = num
# while x * x > num:
# x = (x + num / x) / 2
# return x * x == num
|
corehq/apps/sms/management/commands/change_phonenumber_backend.py | dimagilg/commcare-hq | 471 | 12771663 | <filename>corehq/apps/sms/management/commands/change_phonenumber_backend.py<gh_stars>100-1000
import os
import sys
from collections import defaultdict
from django.core.management.base import BaseCommand
import csv
from corehq.util.log import with_progress_bar
from ...models import PhoneNumber, SQLMobileBackend
from ...util import clean_phone_number
class Command(BaseCommand):
help = "Reassign phone numbers with old backend id to new backend id"
def add_arguments(self, parser):
parser.add_argument("old_backend", help="Old backend ID")
parser.add_argument("--new-backend", help=(
"New backend ID. Dry-run if this option is absent. Use 'None' "
"to clear the old backend without specifying a new backend; "
"the phone number will use the domain/system default backend."
))
parser.add_argument("--domain", help="Limit to phone numbers in domain.")
parser.add_argument("--dump-csv",
help="Dump phone numbers to CSV file path "
"(the path is the value given for this option).")
def handle(self, old_backend, new_backend=None, domain=None, **options):
query = PhoneNumber.objects.filter(backend_id=old_backend)
if domain is not None:
query = query.filter(domain=domain)
if options["dump_csv"]:
dump_csv(query, options["dump_csv"])
print_counts_by_default_backend(query)
print("Total assigned to {}: {}".format(old_backend, len(query)))
if new_backend:
reassign(query, new_backend)
def dump_csv(query, path):
path = os.path.expanduser(path)
print("dumping to CSV: {}".format(path))
with open(path, "w", encoding="utf-8") as output:
csvfile = csv.writer(output)
csvfile.writerow(["domain", "couch_id", "phonenumber"])
for phone in query:
csvfile.writerow([
phone.domain,
phone.couch_id,
phone.phone_number,
])
def print_counts_by_default_backend(query):
counts = defaultdict(int)
for phone in with_progress_bar(query, len(query), oneline=True):
default_backend = SQLMobileBackend.load_default_by_phone_and_domain(
SQLMobileBackend.SMS,
clean_phone_number(phone.phone_number),
domain=phone.domain
)
counts[default_backend.name] += 1
print("Counts by default backend")
for default, count in sorted(counts.items()):
print("{:<25}{:>4}".format(default, count))
def reassign(query, new_backend):
if new_backend == "None":
new_backend = None
ok = confirm("Reassign to {}".format(new_backend))
if ok:
updated = query.update(backend_id=new_backend)
print("{} phone numbers updated".format(updated))
else:
print("abort")
sys.exit(1)
def confirm(msg):
return input(msg + " (y/N) ").lower() == 'y'
|
external/plex/dist/tests/test10.py | almartin82/bayeslite | 964 | 12771667 | <filename>external/plex/dist/tests/test10.py
# Test traditional regular expression syntax.
import Test
from Plex.Traditional import re
from Plex.Errors import PlexError
from Plex import Seq, AnyBut
def test_err(s):
try:
print re(s)
except PlexError, e:
print e
print re("")
print re("a")
print re("[a]")
print re("[ab]")
print re("[abc]")
print re("[a-c]")
print re("[a-cd]")
print re("[a-cg-i]")
print re("[^a]")
print re("[^a-cg-i]")
print re("[-]")
print re("[-abc]")
print re("[abc-]")
print re("[]]")
print re("[]-]")
print re("[^-]")
print re("[^-abc]")
print re("[^abc-]")
print re("[^]]")
print re("[^]-]")
print re("a*")
print re("a+")
print re("a?")
print re("a*+?")
print re("ab")
print re("a|b")
print re("abcde")
print re("a|b|c|d|e")
print re("abc|def|ghi")
print re("abc(def|ghi)")
print re("ab\(c\[de")
print re("^abc$")
print str(re(".")) == str(Seq(AnyBut('\n')))
test_err("abc(de")
test_err("abc[de")
test_err("abc)de")
|
lomond/events.py | johnashu/dataplicity-lomond | 225 | 12771668 | from __future__ import unicode_literals
import json
import time
class Event(object):
"""Base class for a websocket 'event'."""
__slots__ = ['received_time']
def __init__(self):
self.received_time = time.time()
def __repr__(self):
return "{}()".format(self.__class__.__name__)
@classmethod
def _summarize_bytes(cls, data, max_len=24):
"""Avoid spamming logs by truncating byte strings in repr."""
if len(data) > max_len:
return "{!r} + {} bytes".format(
data[:max_len],
len(data) - max_len
)
return repr(data)
@classmethod
def _summarize_text(cls, text, max_len=24):
"""Avoid spamming logs by truncating text."""
if len(text) > max_len:
return "{!r} + {} chars".format(
text[:max_len],
len(text) - max_len
)
return repr(text)
class Poll(Event):
"""A generated poll event."""
name = 'poll'
class Connecting(Event):
"""
Generated prior to establishing a websocket connection to a server.
:param url: The websocket URL the websocket is connecting to.
"""
__slots__ = ['url']
name = 'connecting'
def __init__(self, url):
self.url = url
super(Connecting, self).__init__()
def __repr__(self):
return "{}(url='{}')".format(self.__class__.__name__, self.url)
class ConnectFail(Event):
"""
Generate when Lomond was unable to connect to a Websocket server.
:param reason: A short description of the reason for the
failure.
:type reason: str
"""
__slots__ = ['reason']
name = 'connect_fail'
def __init__(self, reason):
self.reason = reason
super(ConnectFail, self).__init__()
def __repr__(self):
return "{}(reason='{}')".format(
self.__class__.__name__,
self.reason,
)
class Connected(Event):
"""Generated when Lomond has connected to a server but not yet
negotiated the websocket upgrade.
:param str url: The websocket URL connected to.
:param str proxy: The proxy URL connected to (or None).
"""
__slots__ = ['url', 'proxy']
name = 'connected'
def __init__(self, url, proxy=None):
self.url = url
self.proxy = proxy
super(Connected, self).__init__()
def __repr__(self):
_class = self.__class__.__name__
return (
"{}(url='{}')".format(_class, self.url)
if self.proxy is None else
"{}(url='{}', proxy='{}')".format(
_class, self.url, self.proxy
)
)
class Rejected(Event):
"""Server rejected WS connection."""
__slots__ = ['response', 'reason']
name = 'rejected'
def __init__(self, response, reason):
"""
Generated when Lomond is connected to the server, but the
websocket upgrade failed.
:param response: The response returned by the server.
:param str reason: A description of why the connection was
rejects.
"""
self.response = response
self.reason = reason
super(Rejected, self).__init__()
def __repr__(self):
return "{}(response={!r}, reason='{}')".format(
self.__class__.__name__,
self.response,
self.reason
)
class Ready(Event):
"""Generated when Lomond has connected to the server,
and successfully negotiated the websocket upgrade.
:param response: A :class:`~lomond.response.Response` object.
:param str protocol: A websocket protocol or ``None`` if no protocol
was supplied.
:param set extensions: A set of negotiated websocket extensions.
Currently only the ``'permessage-deflate'`` extension is supported.
"""
__slots__ = ['response', 'protocol', 'extensions']
name = 'ready'
def __init__(self, response, protocol, extensions):
self.response = response
self.protocol = protocol
self.extensions = extensions
super(Ready, self).__init__()
def __repr__(self):
return '{}(response={!r}, protocol={!r}, extensions={!r})'.format(
self.__class__.__name__,
self.response,
self.protocol,
self.extensions
)
class ProtocolError(Event):
"""Generated when the server deviates from the protocol.
:param str error: A description of the error.
:param bool critical: Indicates if the error is considered
'critical'. If ``True``, Lomond will disconnect immediately.
If ``False``, Lomond will send a close message to the server.
"""
__slots__ = ['error', 'critical']
name = 'protocol_error'
def __init__(self, error, critical):
self.error = error
self.critical = critical
super(ProtocolError, self).__init__()
def __repr__(self):
return "{}(error='{}', critical={!r})".format(
self.__class__.__name__,
self.error,
self.critical
)
class Unresponsive(Event):
"""The server has not responding to pings within `ping_timeout`
seconds.
Will be followed by a :class:`~lomond.events.Disconnected` event.
"""
name = 'unresponsive'
class Disconnected(Event):
"""Generated when a websocket connection has
been dropped.
:param str reason: A description of why the websocket was closed.
:param bool graceful: Flag indicating if the connection was dropped
gracefully (`True`), or disconnected due to a socket failure
(`False`) or other problem.
"""
__slots__ = ['graceful', 'reason']
name = 'disconnected'
def __init__(self, reason='closed', graceful=False):
self.reason = reason
self.graceful = graceful
super(Disconnected, self).__init__()
def __repr__(self):
return "{}(reason='{}', graceful={!r})".format(
self.__class__.__name__,
self.reason,
self.graceful
)
class Closed(Event):
"""Generated when the websocket was closed. The websocket may no
longer send packets after this event has been received. This event
will be followed by :class:`~lomond.events.Disconnected`.
:param code: The closed code returned from the server.
:param str reason: An optional description why the websocket was
closed, as returned from the server.
"""
__slots__ = ['code', 'reason']
name = 'closed'
def __init__(self, code, reason):
self.code = code
self.reason = reason
super(Closed, self).__init__()
def __repr__(self):
return '{}(code={!r}, reason={!r})'.format(
self.__class__.__name__,
self.code,
self.reason,
)
class Closing(Event):
"""Generated when the server is closing the connection.
No more messages will be received from the server, but you may still
send messages while handling this event. A
:class:`~lomond.events.Disconnected` event should be generated
shortly after this event.
:param code: The closed code returned from the server.
:param str reason: An optional description why the websocket was
closed, as returned from the server.
"""
__slots__ = ['code', 'reason']
name = 'closing'
def __init__(self, code, reason):
self.code = code
self.reason = reason
super(Closing, self).__init__()
def __repr__(self):
return '{}(code={!r}, reason={!r})'.format(
self.__class__.__name__,
self.code,
self.reason,
)
class UnknownMessage(Event):
"""
An application message was received, with an unknown opcode.
"""
__slots__ = ['message']
name = 'unknown'
def __init__(self, message):
self.message = message
super(UnknownMessage, self).__init__()
class Ping(Event):
"""Generated when Lomond received a ping packet from the server.
:param bytes data: Ping payload data.
"""
__slots__ = ['data']
name = 'ping'
def __init__(self, data):
self.data = data
super(Ping, self).__init__()
def __repr__(self):
return "{}(data={!r})".format(self.__class__.__name__, self.data)
class Pong(Event):
"""Generated when Lomond receives a pong packet from the server.
:param bytes data: The pong payload data.
"""
__slots__ = ['data']
name = 'pong'
def __init__(self, data):
self.data = data
super(Pong, self).__init__()
def __repr__(self):
return "{}(data={!r})".format(self.__class__.__name__, self.data)
class Text(Event):
"""Generated when Lomond receives a text message from the server.
:param str text: The text payload.
"""
__slots__ = ['text', '_json']
name = 'text'
def __init__(self, text):
self.text = text
self._json = None
super(Text, self).__init__()
@property
def json(self):
"""Text decoded as JSON.
Calls ``json.loads`` to decode the ``text`` attribute, and may
throw the same exceptions if the text is not valid json.
"""
if self._json is None:
self._json = json.loads(self.text)
return self._json
def __repr__(self):
return "{}(text={})".format(
self.__class__.__name__,
self._summarize_text(self.text)
)
class Binary(Event):
"""Generated when Lomond receives a binary message from the server.
:param bytes data: The binary payload.
"""
__slots__ = ['data']
name = 'binary'
def __init__(self, data):
self.data = data
super(Binary, self).__init__()
def __repr__(self):
return "{}(data={})".format(
self.__class__.__name__,
self._summarize_bytes(self.data)
)
class BackOff(Event):
"""Generated when a persistent connection has to wait before re-
attempting a connection.
:param float delay: The delay (in seconds) before Lomond will re-
attempt to connect.
"""
__slots__ = ['delay']
name = 'back_off'
def __init__(self, delay):
self.delay = delay
super(BackOff, self).__init__()
def __repr__(self):
return "{}(delay={:0.1f})".format(
self.__class__.__name__,
self.delay
)
|
Calibration/IsolatedParticles/test/python/proto_runIsolatedTracksNxNNzsData_cfg.py | ckamtsikis/cmssw | 852 | 12771683 | import FWCore.ParameterSet.Config as cms
process = cms.Process("L1SKIM")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 100000
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )
####################### configure pool source #############################
process.source = cms.Source("PoolSource",
fileNames =cms.untracked.vstring(
'/store/data/Run2010A/MinimumBias/RECO/Apr21ReReco-v1/0000/08275F4A-5270-E011-9DC3-003048635E02.root'
),
skipEvents = cms.untracked.uint32(0)
)
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(100) )
##################### digi-2-raw plus L1 emulation #########################
process.load("Configuration.StandardSequences.Services_cff")
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
#################### Conditions and L1 menu ################################
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
from Configuration.AlCa.autoCond import autoCond
process.GlobalTag.globaltag=autoCond['run1_data']
############ Skim the events according to the L1 seeds ####################
#select on HLT_HcalNZS_8E29 trigger
import HLTrigger.HLTfilters.hltLevel1GTSeed_cfi
process.skimL1Seeds = HLTrigger.HLTfilters.hltLevel1GTSeed_cfi.hltLevel1GTSeed.clone()
process.skimL1Seeds.L1GtReadoutRecordTag = cms.InputTag("gtDigis")
process.skimL1Seeds.L1GtObjectMapTag = cms.InputTag("hltL1GtObjectMap")
process.skimL1Seeds.L1CollectionsTag = cms.InputTag("l1extraParticles")
process.skimL1Seeds.L1MuonCollectionTag = cms.InputTag("l1extraParticles")
process.skimL1Seeds.L1SeedsLogicalExpression = "L1_SingleEG2 OR L1_SingleEG5 OR L1_SingleEG8 OR L1_SingleEG10 OR L1_SingleEG12 OR L1_SingleEG15 OR L1_SingleEG20 OR L1_SingleIsoEG5 OR L1_SingleIsoEG8 OR L1_SingleIsoEG10 OR L1_SingleIsoEG12 OR L1_SingleIsoEG15 OR L1_SingleJet6U OR L1_SingleJet10U OR L1_SingleJet20U OR L1_SingleJet30U OR L1_SingleJet40U OR L1_SingleJet50U OR L1_SingleJet60U OR L1_SingleTauJet10U OR L1_SingleTauJet20U OR L1_SingleTauJet30U OR L1_SingleTauJet50U OR L1_SingleMuOpen OR L1_SingleMu0 OR L1_SingleMu3 OR L1_SingleMu5 OR L1_SingleMu7 OR L1_SingleMu10 OR L1_SingleMu14 OR L1_SingleMu20 OR L1_ZeroBias"
# select on HLT_HcalPhiSym trigger
process.load("HLTrigger.HLTfilters.hltLevel1Activity_cfi")
process.hltLevel1Activity.L1GtReadoutRecordTag = cms.InputTag('gtDigis')
######################## Configure Analyzer ###############################
process.load("RecoLocalCalo.EcalRecAlgos.EcalSeverityLevelESProducer_cfi")
process.load("Calibration.IsolatedParticles.isolatedTracksNxN_cfi")
process.isolatedTracksNxN.Verbosity = cms.untracked.int32( 0 )
process.isolatedTracksNxN.HBHERecHitSource = cms.InputTag("hbhereco")
process.isolatedTracksNxN.L1TriggerAlgoInfo = True
#process.isolatedTracksNxN.DebugL1Info = True
process.isolatedTracksNxN_NZS = process.isolatedTracksNxN.clone(
Verbosity = cms.untracked.int32( 0 ),
HBHERecHitSource = cms.InputTag("hbherecoMB"),
L1TriggerAlgoInfo = True
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string('IsolatedTracksNxNData.root')
)
# configure Technical Bits to ensure collision and remove BeamHalo
process.load('L1TriggerConfig.L1GtConfigProducers.L1GtTriggerMaskTechTrigConfig_cff')
process.load('HLTrigger/HLTfilters/hltLevel1GTSeed_cfi')
process.hltLevel1GTSeed.L1TechTriggerSeeding = cms.bool(True)
process.hltLevel1GTSeed.L1SeedsLogicalExpression = cms.string('0 AND NOT (36 OR 37 OR 38 OR 39)')
# filter out scrapping events
process.noScraping= cms.EDFilter("FilterOutScraping",
applyfilter = cms.untracked.bool(True),
debugOn = cms.untracked.bool(False), ## Or 'True' to get some per-event info
numtrack = cms.untracked.uint32(10),
thresh = cms.untracked.double(0.25)
)
# select on primary vertex
process.primaryVertexFilter = cms.EDFilter("GoodVertexFilter",
vertexCollection = cms.InputTag('offlinePrimaryVertices'),
minimumNDOF = cms.uint32(4) ,
maxAbsZ = cms.double(25.0),
maxd0 = cms.double(5.0)
)
#=============================================================================
# define an EndPath to analyze all other path results
process.hltTrigReport = cms.EDAnalyzer( 'HLTrigReport',
HLTriggerResults = cms.InputTag( 'TriggerResults','','HLT')
)
process.load("L1Trigger.GlobalTriggerAnalyzer.l1GtTrigReport_cfi")
process.l1GtTrigReport.L1GtRecordInputTag = 'gtDigis'
process.l1GtTrigReport.PrintVerbosity = 1
#=============================================================================
#### by Benedikt
process.p1 = cms.Path(process.primaryVertexFilter * process.hltLevel1GTSeed * process.noScraping * process.skimL1Seeds *process.isolatedTracksNxN * process.isolatedTracksNxN_NZS)
process.e = cms.EndPath(process.l1GtTrigReport + process.hltTrigReport)
|
build/plugins/lib/nots/package_manager/pnpm/tests/workspace.py | jochenater/catboost | 6,989 | 12771684 | <filename>build/plugins/lib/nots/package_manager/pnpm/tests/workspace.py
from build.plugins.lib.nots.package_manager.base import PackageJson
from build.plugins.lib.nots.package_manager.pnpm.workspace import PnpmWorkspace
def test_workspace_get_paths():
ws = PnpmWorkspace(path="/packages/foo/pnpm-workspace.yaml")
ws.packages = set([".", "../bar", "../../another/baz"])
assert sorted(ws.get_paths()) == [
"/another/baz",
"/packages/bar",
"/packages/foo",
]
def test_workspace_set_from_package_json():
ws = PnpmWorkspace(path="/packages/foo/pnpm-workspace.yaml")
pj = PackageJson(path="/packages/foo/package.json")
pj.data = {
"dependencies": {
"@a/bar": "workspace:../bar",
},
"devDependencies": {
"@a/baz": "workspace:../../another/baz",
},
"peerDependencies": {
"@a/qux": "workspace:../../another/qux",
},
"optionalDependencies": {
"@a/quux": "workspace:../../another/quux",
}
}
ws.set_from_package_json(pj)
assert sorted(ws.get_paths()) == [
"/another/baz",
"/another/quux",
"/another/qux",
"/packages/bar",
"/packages/foo",
]
def test_workspace_merge():
ws1 = PnpmWorkspace(path="/packages/foo/pnpm-workspace.yaml")
ws1.packages = set([".", "../bar", "../../another/baz"])
ws2 = PnpmWorkspace(path="/another/baz/pnpm-workspace.yaml")
ws2.packages = set([".", "../qux"])
ws1.merge(ws2)
assert sorted(ws1.get_paths()) == [
"/another/baz",
"/another/qux",
"/packages/bar",
"/packages/foo",
]
|
tests/test_processor.py | alisaifee/jira-cli | 125 | 12771687 | <gh_stars>100-1000
import unittest
import mock
from jiracli.interface import build_parser, cli
class AddCommandTests(unittest.TestCase):
def test_issue_type_parsing(self):
"Previously, calling this would raise an exception on python3"
with mock.patch("jiracli.interface.print_output"):
with mock.patch("jiracli.interface.prompt") as prompt:
with mock.patch("jiracli.interface.initialize") as init:
init().get_issue_types.return_value = {'story': 1}
cli("new title --type story --project FOO --description bar".split(" "))
|
mmf/datasets/processors/video_processors.py | dk25021999/mmf | 1,928 | 12771749 | # Copyright (c) Facebook, Inc. and its affiliates.
# TODO: Once internal torchvision transforms become stable either in torchvision
# or in pytorchvideo, move to use those transforms.
import random
import mmf.datasets.processors.functional as F
import torch
from mmf.common.registry import registry
from mmf.datasets.processors import BaseProcessor
@registry.register_processor("video_random_crop")
class VideoRandomCrop(BaseProcessor):
def __init__(self, *args, size=None, **kwargs):
super().__init__()
if size is None:
raise TypeError("Parameter 'size' is required")
self.size = size
@staticmethod
def get_params(vid, output_size):
"""Get parameters for ``crop`` for a random crop.
"""
h, w = vid.shape[-2:]
th, tw = output_size
if w == tw and h == th:
return 0, 0, h, w
i = random.randint(0, h - th)
j = random.randint(0, w - tw)
return i, j, th, tw
def __call__(self, vid):
i, j, h, w = self.get_params(vid, self.size)
return F.video_crop(vid, i, j, h, w)
@registry.register_processor("video_center_crop")
class VideoCenterCrop(BaseProcessor):
def __init__(self, *args, size=None, **kwargs):
super().__init__()
if size is None:
raise TypeError("Parameter 'size' is required")
self.size = size
def __call__(self, vid):
return F.video_center_crop(vid, self.size)
@registry.register_processor("video_resize")
class VideoResize(BaseProcessor):
def __init__(self, *args, size=None, **kwargs):
if size is None:
raise TypeError("Parameter 'size' is required")
self.size = size
def __call__(self, vid):
return F.video_resize(vid, self.size)
@registry.register_processor("video_to_tensor")
class VideoToTensor(BaseProcessor):
def __init__(self, *args, **kwargs):
super().__init__()
pass
def __call__(self, vid):
return F.video_to_normalized_float_tensor(vid)
@registry.register_processor("video_normalize")
class VideoNormalize(BaseProcessor):
def __init__(self, mean=None, std=None, **kwargs):
super().__init__()
if mean is None and std is None:
raise TypeError("'mean' and 'std' params are required")
self.mean = mean
self.std = std
def __call__(self, vid):
return F.video_normalize(vid, self.mean, self.std)
@registry.register_processor("video_random_horizontal_flip")
class VideoRandomHorizontalFlip(BaseProcessor):
def __init__(self, p=0.5, **kwargs):
super().__init__()
self.p = p
def __call__(self, vid):
if random.random() < self.p:
return F.video_hflip(vid)
return vid
@registry.register_processor("video_pad")
class Pad(BaseProcessor):
def __init__(self, padding=None, fill=0, **kwargs):
super().__init__()
if padding is None:
raise TypeError("Parameter 'padding' is required")
self.padding = padding
self.fill = fill
def __call__(self, vid):
return F.video_pad(vid, self.padding, self.fill)
@registry.register_processor("truncate_or_pad")
class TruncateOrPad(BaseProcessor):
# truncate or add 0 until the desired output size
def __init__(self, output_size=None, **kwargs):
super().__init__()
if output_size is None:
raise TypeError("Parameter 'output_size' is required")
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, sample):
if sample.shape[1] >= self.output_size:
return sample[0, : self.output_size]
else:
return torch.cat(
(sample[0, :], torch.zeros(1, self.output_size - sample.shape[1])),
axis=1,
)
|
kanmail/server/mail/oauth_settings.py | frznvm0/Kanmail | 1,118 | 12771783 | <reponame>frznvm0/Kanmail
from kanmail.settings.hidden import get_hidden_value
OAUTH_PROVIDERS = {
'gmail': {
'auth_endpoint': 'https://accounts.google.com/o/oauth2/auth',
'token_endpoint': 'https://accounts.google.com/o/oauth2/token',
'profile_endpoint': 'https://www.googleapis.com/userinfo/v2/me',
'scope': 'https://mail.google.com https://www.googleapis.com/auth/userinfo.email',
'client_id': get_hidden_value('GOOGLE_OAUTH_CLIENT_ID'),
'client_secret': get_hidden_value('GOOGLE_OAUTH_CLIENT_SECRET'),
},
}
|
bumblebee_status/modules/contrib/http_status.py | rosalogia/bumblebee-status | 1,089 | 12771805 | # pylint: disable=C0111,R0903
"""Display HTTP status code
Parameters:
* http__status.label: Prefix label (optional)
* http__status.target: Target to retrieve the HTTP status from
* http__status.expect: Expected HTTP status
contributed by `valkheim <https://github.com/valkheim>`_ - many thanks!
"""
from requests import head
import psutil
import core.module
import core.widget
import core.decorators
class Module(core.module.Module):
UNK = "UNK"
@core.decorators.every(seconds=30)
def __init__(self, config, theme):
super().__init__(config, theme, core.widget.Widget(self.output))
self.__label = self.parameter("label")
self.__target = self.parameter("target")
self.__expect = self.parameter("expect", "200")
def labelize(self, s):
if self.__label is None:
return s
return "{}: {}".format(self.__label, s)
def getStatus(self):
try:
res = head(self.__target)
except Exception as e:
print(e)
return self.UNK
else:
status = str(res.status_code)
return status
def getOutput(self):
if self.__status == self.__expect:
return self.labelize(self.__status)
else:
reason = " != {}".format(self.__expect)
return self.labelize("{}{}".format(self.__status, reason))
def output(self, widget):
return self.__output
def update(self):
self.__status = self.getStatus()
self.__output = self.getOutput()
def state(self, widget):
if self.__status == self.UNK:
return "warning"
if self.__status != self.__expect:
return "critical"
return self.__output
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
python/GafferImageUI/ResampleUI.py | ddesmond/gaffer | 561 | 12771855 | <gh_stars>100-1000
##########################################################################
#
# Copyright (c) 2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import itertools
import Gaffer
import GafferImage
Gaffer.Metadata.registerNode(
GafferImage.Resample,
"description",
"""
Utility node used internally within GafferImage, but
not intended to be used directly by end users.
""",
plugs = {
"matrix" : [
"description",
"""
The transform to be applied to the input image.
This must contain only translation and scaling.
""",
],
"filter" : [
"description",
"""
The filter used to perform the resampling. The name
of any OIIO filter may be specified. The default automatically
picks an appropriate high-quality filter based on whether
or not the image is being enlarged or reduced.
""",
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
"preset:Default", "",
] + list( itertools.chain(
*[ ( "preset:" + x.title(), x ) for x in GafferImage.FilterAlgo.filterNames() ]
) ),
"filterScale" : [
"description",
"""
A multiplier for the scale of the filter used. Scaling up gives a softer
result, scaling down gives a sharper result ( likely to alias or even create black
patches where no pixels can be found ). Less than 1 is not recommended unless
you have a special technical reason.
""",
],
"boundingMode" : [
"description",
"""
The method used when a filter references pixels outside the
input data window.
""",
"preset:Black", GafferImage.Sampler.BoundingMode.Black,
"preset:Clamp", GafferImage.Sampler.BoundingMode.Clamp,
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
],
"expandDataWindow" : [
"description",
"""
Expands the data window by the filter radius, to include the
external pixels affected by the filter.
""",
],
"debug" : [
"description",
"""
Enables debug output. The HorizontalPass setting outputs
an intermediate image filtered just in the horizontal
direction - this is an internal optimisation used when
filtering with a separable filter. The SinglePass setting
forces all filtering to be done in a single pass (as if
the filter was non-separable) and can be used for validating
the results of the the two-pass (default) approach.
""",
"preset:Off", GafferImage.Resample.Debug.Off,
"preset:HorizontalPass", GafferImage.Resample.Debug.HorizontalPass,
"preset:SinglePass", GafferImage.Resample.Debug.SinglePass,
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
],
}
)
|
tests/test_aio_trampoline.py | suned/pfun | 126 | 12771857 | import pytest
from hypothesis import assume, given
from pfun import compose, identity
from pfun.aio_trampoline import Done
from pfun.hypothesis_strategies import aio_trampolines, anything, unaries
from .monad_test import MonadTest
class TestTrampoline(MonadTest):
@pytest.mark.asyncio
@given(aio_trampolines(anything()))
async def test_right_identity_law(self, trampoline):
assert (await
trampoline.and_then(Done).run()) == (await trampoline.run())
@pytest.mark.asyncio
@given(anything(), unaries(aio_trampolines(anything())))
async def test_left_identity_law(self, value, f):
assert (await Done(value).and_then(f).run()) == (await f(value).run())
@pytest.mark.asyncio
@given(
aio_trampolines(anything()),
unaries(aio_trampolines(anything())),
unaries(aio_trampolines(anything()))
)
async def test_associativity_law(self, trampoline, f, g):
assert (await trampoline.and_then(f).and_then(g).run(
)) == (await trampoline.and_then(lambda x: f(x).and_then(g)).run())
@given(anything())
def test_equality(self, value):
assert Done(value) == Done(value)
@given(anything(), anything())
def test_inequality(self, first, second):
assume(first != second)
assert Done(first) != Done(second)
@pytest.mark.asyncio
@given(anything())
async def test_identity_law(self, value):
assert (await
Done(value).map(identity).run()) == (await Done(value).run())
@pytest.mark.asyncio
@given(unaries(anything()), unaries(anything()), anything())
async def test_composition_law(self, f, g, value):
h = compose(f, g)
assert (await Done(value).map(g).map(f).run()
) == (await Done(value).map(h).run())
|
tests/test_sklearn_normalizer_converter.py | twosense/sklearn-onnx | 323 | 12771859 | # SPDX-License-Identifier: Apache-2.0
"""
Tests scikit-normalizer converter.
"""
import unittest
import numpy
from sklearn.preprocessing import Normalizer
from skl2onnx import convert_sklearn
from skl2onnx.common.data_types import (
Int64TensorType, FloatTensorType, DoubleTensorType)
from test_utils import dump_data_and_model, TARGET_OPSET
class TestSklearnNormalizerConverter(unittest.TestCase):
def test_model_normalizer(self):
model = Normalizer(norm="l2")
model_onnx = convert_sklearn(
model, "scikit-learn normalizer",
[("input", Int64TensorType([None, 1]))],
target_opset=TARGET_OPSET)
self.assertTrue(model_onnx is not None)
self.assertTrue(len(model_onnx.graph.node) == 1)
def test_model_normalizer_blackop(self):
model = Normalizer(norm="l2")
model_onnx = convert_sklearn(
model, "scikit-learn normalizer",
[("input", FloatTensorType([None, 3]))],
target_opset=TARGET_OPSET,
black_op={"Normalizer"})
self.assertNotIn('op_type: "Normalizer', str(model_onnx))
dump_data_and_model(
numpy.array([[1, -1, 3], [3, 1, 2]], dtype=numpy.float32),
model, model_onnx,
basename="SklearnNormalizerL1BlackOp-SkipDim1")
def test_model_normalizer_float_l1(self):
model = Normalizer(norm="l1")
model_onnx = convert_sklearn(
model, "scikit-learn normalizer",
[("input", FloatTensorType([None, 3]))],
target_opset=TARGET_OPSET)
self.assertTrue(model_onnx is not None)
self.assertTrue(len(model_onnx.graph.node) == 1)
dump_data_and_model(
numpy.array([[1, -1, 3], [3, 1, 2]], dtype=numpy.float32),
model, model_onnx,
basename="SklearnNormalizerL1-SkipDim1")
def test_model_normalizer_float_l2(self):
model = Normalizer(norm="l2")
model_onnx = convert_sklearn(
model, "scikit-learn normalizer",
[("input", FloatTensorType([None, 3]))],
target_opset=TARGET_OPSET)
self.assertTrue(model_onnx is not None)
self.assertTrue(len(model_onnx.graph.node) == 1)
dump_data_and_model(
numpy.array([[1, -1, 3], [3, 1, 2]], dtype=numpy.float32),
model, model_onnx,
basename="SklearnNormalizerL2-SkipDim1")
def test_model_normalizer_double_l1(self):
model = Normalizer(norm="l1")
model_onnx = convert_sklearn(
model, "scikit-learn normalizer",
[("input", DoubleTensorType([None, 3]))],
target_opset=TARGET_OPSET)
self.assertTrue(model_onnx is not None)
dump_data_and_model(
numpy.array([[1, -1, 3], [3, 1, 2]], dtype=numpy.float64),
model, model_onnx,
basename="SklearnNormalizerL1Double-SkipDim1")
def test_model_normalizer_double_l2(self):
model = Normalizer(norm="l2")
model_onnx = convert_sklearn(
model, "scikit-learn normalizer",
[("input", DoubleTensorType([None, 3]))],
target_opset=TARGET_OPSET)
self.assertTrue(model_onnx is not None)
dump_data_and_model(
numpy.array([[1, -1, 3], [3, 1, 2]], dtype=numpy.float64),
model, model_onnx,
basename="SklearnNormalizerL2Double-SkipDim1")
def test_model_normalizer_float_noshape(self):
model = Normalizer(norm="l2")
model_onnx = convert_sklearn(
model, "scikit-learn normalizer",
[("input", FloatTensorType([]))],
target_opset=TARGET_OPSET)
self.assertTrue(model_onnx is not None)
self.assertTrue(len(model_onnx.graph.node) == 1)
dump_data_and_model(
numpy.array([[1, -1, 3], [3, 1, 2]], dtype=numpy.float32),
model, model_onnx,
basename="SklearnNormalizerL2NoShape-SkipDim1")
if __name__ == "__main__":
unittest.main()
|
universe/wrappers/experimental/__init__.py | BitJetKit/universe | 8,120 | 12771873 | from universe.wrappers.experimental.action_space import SafeActionSpace, SoftmaxClickMouse
from universe.wrappers.experimental.observation import CropObservations
from universe.wrappers.experimental.random_env import RandomEnv
|
paper code/solveCrossTime.py | PKandarp/TICC | 393 | 12771912 | from snap import *
from cvxpy import *
import math
import multiprocessing
import numpy
from scipy.sparse import lil_matrix
import sys
import time
import __builtin__
import code
# File format: One edge per line, written as "srcID dstID"
# Commented lines that start with '#' are ignored
# Returns a TGraphVX object with the designated edges and nodes
def LoadEdgeList(Filename):
gvx = TGraphVX()
nids = set()
infile = open(Filename, 'r')
with open(Filename) as infile:
for line in infile:
if line.startswith('#'): continue
[src, dst] = line.split()
if int(src) not in nids:
gvx.AddNode(int(src))
nids.add(int(src))
if int(dst) not in nids:
gvx.AddNode(int(dst))
nids.add(int(dst))
gvx.AddEdge(int(src), int(dst))
return gvx
# TGraphVX inherits from the TUNGraph object defined by Snap.py
class TGraphVX(TUNGraph):
__default_objective = norm(0)
__default_constraints = []
# Data Structures
# ---------------
# node_objectives = {int NId : CVXPY Expression}
# node_constraints = {int NId : [CVXPY Constraint]}
# edge_objectives = {(int NId1, int NId2) : CVXPY Expression}
# edge_constraints = {(int NId1, int NId2) : [CVXPY Constraint]}
# all_variables = set(CVXPY Variable)
#
# ADMM-Specific Structures
# ------------------------
# node_variables = {int NId :
# [(CVXPY Variable id, CVXPY Variable name, CVXPY Variable, offset)]}
# node_values = {int NId : numpy array}
# node_values points to the numpy array containing the value of the entire
# variable space corresponding to then node. Use the offset to get the
# value for a specific variable.
#
# Constructor
# If Graph is a Snap.py graph, initializes a SnapVX graph with the same
# nodes and edges.
def __init__(self, Graph=None):
# Initialize data structures
self.node_objectives = {}
self.node_variables = {}
self.node_constraints = {}
self.edge_objectives = {}
self.edge_constraints = {}
self.node_values = {}
self.all_variables = set()
self.status = None
self.value = None
# Initialize superclass
nodes = 0
edges = 0
if Graph != None:
nodes = Graph.GetNodes()
edges = Graph.GetEdges()
TUNGraph.__init__(self, nodes, edges)
# Support for constructor with Snap.py graph argument
if Graph != None:
for ni in Graph.Nodes():
self.AddNode(ni.GetId())
for ei in Graph.Edges():
self.AddEdge(ei.GetSrcNId(), ei.GetDstNId())
# Simple iterator to iterator over all nodes in graph. Similar in
# functionality to Nodes() iterator of PUNGraph in Snap.py.
def Nodes(self):
ni = TUNGraph.BegNI(self)
for i in xrange(TUNGraph.GetNodes(self)):
yield ni
ni.Next()
# Simple iterator to iterator over all edge in graph. Similar in
# functionality to Edges() iterator of PUNGraph in Snap.py.
def Edges(self):
ei = TUNGraph.BegEI(self)
for i in xrange(TUNGraph.GetEdges(self)):
yield ei
ei.Next()
# Adds objectives together to form one collective CVXPY Problem.
# Option of specifying Maximize() or the default Minimize().
# Graph status and value properties will also be set.
# Individual variable values can be retrieved using GetNodeValue().
# Option to use serial version or distributed ADMM.
# maxIters optional parameter: Maximum iterations for distributed ADMM.
def Solve(self, M=Minimize, UseADMM=True, NumProcessors=0, Rho=1.0,
MaxIters=250, EpsAbs=0.01, EpsRel=0.01, Verbose=False,
UseClustering = False, ClusterSize = 1000 ):
global m_func
m_func = M
# Use ADMM if the appropriate parameter is specified and if there
# are edges in the graph.
#if __builtin__.len(SuperNodes) > 0:
if UseClustering and ClusterSize > 0:
SuperNodes = self.__ClusterGraph(ClusterSize)
self.__SolveClusterADMM(M,UseADMM,SuperNodes, NumProcessors, Rho, MaxIters,\
EpsAbs, EpsRel, Verbose)
return
if UseADMM and self.GetEdges() != 0:
self.__SolveADMM(NumProcessors, Rho, MaxIters, EpsAbs, EpsRel,
Verbose)
return
if Verbose:
print 'Serial ADMM'
objective = 0
constraints = []
# Add all node objectives and constraints
for ni in self.Nodes():
nid = ni.GetId()
objective += self.node_objectives[nid]
constraints += self.node_constraints[nid]
# Add all edge objectives and constraints
for ei in self.Edges():
etup = self.__GetEdgeTup(ei.GetSrcNId(), ei.GetDstNId())
objective += self.edge_objectives[etup]
constraints += self.edge_constraints[etup]
# Solve CVXPY Problem
objective = m_func(objective)
problem = Problem(objective, constraints)
try:
problem.solve()
except SolverError:
problem.solve(solver=SCS)
if problem.status in [INFEASIBLE_INACCURATE, UNBOUNDED_INACCURATE]:
problem.solve(solver=SCS)
# Set TGraphVX status and value to match CVXPY
self.status = problem.status
self.value = problem.value
# Insert into hash to support ADMM structures and GetNodeValue()
for ni in self.Nodes():
nid = ni.GetId()
variables = self.node_variables[nid]
value = None
for (varID, varName, var, offset) in variables:
if var.size[0] == 1:
val = numpy.array([var.value])
else:
val = numpy.array(var.value).reshape(-1,)
if value is None:
value = val
else:
value = numpy.concatenate((value, val))
self.node_values[nid] = value
"""Function to solve cluster wise optimization problem"""
def __SolveClusterADMM(self,M,UseADMM,superNodes,numProcessors, rho_param,
maxIters, eps_abs, eps_rel,verbose):
#initialize an empty supergraph
supergraph = TGraphVX()
nidToSuperidMap = {}
edgeToClusterTupMap = {}
for snid in xrange(__builtin__.len(superNodes)):
for nid in superNodes[snid]:
nidToSuperidMap[nid] = snid
"""collect the entities for the supergraph. a supernode is a subgraph. a superedge
is a representation of a graph cut"""
superEdgeObjectives = {}
superEdgeConstraints = {}
superNodeObjectives = {}
superNodeConstraints = {}
superNodeVariables = {}
superNodeValues = {}
varToSuperVarMap = {}
"""traverse through the list of edges and add each edge's constraint and objective to
either the supernode to which it belongs or the superedge which connects the ends
of the supernodes to which it belongs"""
for ei in self.Edges():
etup = self.__GetEdgeTup(ei.GetSrcNId(), ei.GetDstNId())
supersrcnid,superdstnid = nidToSuperidMap[etup[0]],nidToSuperidMap[etup[1]]
if supersrcnid != superdstnid: #the edge is a part of the cut
if supersrcnid > superdstnid:
supersrcnid,superdstnid = superdstnid,supersrcnid
if (supersrcnid,superdstnid) not in superEdgeConstraints:
superEdgeConstraints[(supersrcnid,superdstnid)] = self.edge_constraints[etup]
superEdgeObjectives[(supersrcnid,superdstnid)] = self.edge_objectives[etup]
else:
superEdgeConstraints[(supersrcnid,superdstnid)] += self.edge_constraints[etup]
superEdgeObjectives[(supersrcnid,superdstnid)] += self.edge_objectives[etup]
else: #the edge is a part of some supernode
if supersrcnid not in superNodeConstraints:
superNodeConstraints[supersrcnid] = self.edge_constraints[etup]
superNodeObjectives[supersrcnid] = self.edge_objectives[etup]
else:
superNodeConstraints[supersrcnid] += self.edge_constraints[etup]
superNodeObjectives[supersrcnid] += self.edge_objectives[etup]
for ni in self.Nodes():
nid = ni.GetId()
supernid = nidToSuperidMap[nid]
value = None
for (varID, varName, var, offset) in self.node_variables[nid]:
if var.size[0] == 1:
val = numpy.array([var.value])
else:
val = numpy.array(var.value).reshape(-1,)
if not value:
value = val
else:
value = numpy.concatenate((value, val))
if supernid not in superNodeConstraints:
superNodeObjectives[supernid] = self.node_objectives[nid]
superNodeConstraints[supernid] = self.node_constraints[nid]
else:
superNodeObjectives[supernid] += self.node_objectives[nid]
superNodeConstraints[supernid] += self.node_constraints[nid]
for ( varId, varName, var, offset) in self.node_variables[nid]:
superVarName = varName+str(varId)
varToSuperVarMap[(nid,varName)] = (supernid,superVarName)
if supernid not in superNodeVariables:
superNodeVariables[supernid] = [(varId, superVarName, var, offset)]
superNodeValues[supernid] = value
else:
superNodeOffset = sum([superNodeVariables[supernid][k][2].size[0]* \
superNodeVariables[supernid][k][2].size[1]\
for k in xrange(__builtin__.len(superNodeVariables[supernid])) ])
superNodeVariables[supernid] += [(varId, superVarName, var, superNodeOffset)]
superNodeValues[supernid] = numpy.concatenate((superNodeValues[supernid],value))
#add all supernodes to the supergraph
for supernid in superNodeConstraints:
supergraph.AddNode(supernid, superNodeObjectives[supernid], \
superNodeConstraints[supernid])
supergraph.node_variables[supernid] = superNodeVariables[supernid]
supergraph.node_values[supernid] = superNodeValues[supernid]
#add all superedges to the supergraph
for superei in superEdgeConstraints:
superSrcId,superDstId = superei
supergraph.AddEdge(superSrcId, superDstId, None,\
superEdgeObjectives[superei],\
superEdgeConstraints[superei])
#call solver for this supergraph
if UseADMM and supergraph.GetEdges() != 0:
supergraph.__SolveADMM(numProcessors, rho_param, maxIters, eps_abs, eps_rel, verbose)
else:
supergraph.Solve(M, False, numProcessors, rho_param, maxIters, eps_abs, eps_rel, verbose,
UseClustering=False)
self.status = supergraph.status
self.value = supergraph.value
for ni in self.Nodes():
nid = ni.GetId()
snid = nidToSuperidMap[nid]
self.node_values[nid] = []
for ( varId, varName, var, offset) in self.node_variables[nid]:
superVarName = varToSuperVarMap[(nid,varName)]
self.node_values[nid] = numpy.concatenate((self.node_values[nid],\
supergraph.GetNodeValue(snid, superVarName[1])))
# Implementation of distributed ADMM
# Uses a global value of rho_param for rho
# Will run for a maximum of maxIters iterations
def __SolveADMM(self, numProcessors, rho_param, maxIters, eps_abs, eps_rel,
verbose):
global node_vals, edge_z_vals, edge_u_vals, rho
global getValue, rho_update_func
if numProcessors <= 0:
num_processors = multiprocessing.cpu_count()
else:
num_processors = numProcessors
rho = rho_param
if verbose:
print 'Distributed ADMM (%d processors)' % num_processors
# Organize information for each node in helper node_info structure
node_info = {}
# Keeps track of the current offset necessary into the shared node
# values Array
length = 0
for ni in self.Nodes():
nid = ni.GetId()
deg = ni.GetDeg()
obj = self.node_objectives[nid]
variables = self.node_variables[nid]
con = self.node_constraints[nid]
neighbors = [ni.GetNbrNId(j) for j in xrange(deg)]
# Node's constraints include those imposed by edges
for neighborId in neighbors:
etup = self.__GetEdgeTup(nid, neighborId)
econ = self.edge_constraints[etup]
con += econ
# Calculate sum of dimensions of all Variables for this node
size = sum([var.size[0] for (varID, varName, var, offset) in variables])
# Nearly complete information package for this node
node_info[nid] = (nid, obj, variables, con, length, size, deg,\
neighbors)
length += size
node_vals = multiprocessing.Array('d', [0.0] * length)
x_length = length
# Organize information for each node in final edge_list structure and
# also helper edge_info structure
edge_list = []
edge_info = {}
# Keeps track of the current offset necessary into the shared edge
# values Arrays
length = 0
for ei in self.Edges():
etup = self.__GetEdgeTup(ei.GetSrcNId(), ei.GetDstNId())
obj = self.edge_objectives[etup]
con = self.edge_constraints[etup]
con += self.node_constraints[etup[0]] +\
self.node_constraints[etup[1]]
# Get information for each endpoint node
info_i = node_info[etup[0]]
info_j = node_info[etup[1]]
ind_zij = length
ind_uij = length
length += info_i[X_LEN]
ind_zji = length
ind_uji = length
length += info_j[X_LEN]
# Information package for this edge
tup = (etup, obj, con,\
info_i[X_VARS], info_i[X_LEN], info_i[X_IND], ind_zij, ind_uij,\
info_j[X_VARS], info_j[X_LEN], info_j[X_IND], ind_zji, ind_uji)
edge_list.append(tup)
edge_info[etup] = tup
edge_z_vals = multiprocessing.Array('d', [0.0] * length)
edge_u_vals = multiprocessing.Array('d', [0.0] * length)
z_length = length
# Populate sparse matrix A.
# A has dimensions (p, n), where p is the length of the stacked vector
# of node variables, and n is the length of the stacked z vector of
# edge variables.
# Each row of A has one 1. There is a 1 at (i,j) if z_i = x_j.
A = lil_matrix((z_length, x_length), dtype=numpy.int8)
for ei in self.Edges():
etup = self.__GetEdgeTup(ei.GetSrcNId(), ei.GetDstNId())
info_edge = edge_info[etup]
info_i = node_info[etup[0]]
info_j = node_info[etup[1]]
for offset in xrange(info_i[X_LEN]):
row = info_edge[Z_ZIJIND] + offset
col = info_i[X_IND] + offset
A[row, col] = 1
for offset in xrange(info_j[X_LEN]):
row = info_edge[Z_ZJIIND] + offset
col = info_j[X_IND] + offset
A[row, col] = 1
A_tr = A.transpose()
# Create final node_list structure by adding on information for
# node neighbors
node_list = []
for nid, info in node_info.iteritems():
entry = [nid, info[X_OBJ], info[X_VARS], info[X_CON], info[X_IND],\
info[X_LEN], info[X_DEG]]
# Append information about z- and u-value indices for each
# node neighbor
for i in xrange(info[X_DEG]):
neighborId = info[X_NEIGHBORS][i]
indices = (Z_ZIJIND, Z_UIJIND) if nid < neighborId else\
(Z_ZJIIND, Z_UJIIND)
einfo = edge_info[self.__GetEdgeTup(nid, neighborId)]
entry.append(einfo[indices[0]])
entry.append(einfo[indices[1]])
node_list.append(entry)
pool = multiprocessing.Pool(num_processors)
num_iterations = 0
z_old = getValue(edge_z_vals, 0, z_length)
# Proceed until convergence criteria are achieved or the maximum
# number of iterations has passed
while num_iterations <= maxIters:
# Check convergence criteria
if num_iterations != 0:
x = getValue(node_vals, 0, x_length)
z = getValue(edge_z_vals, 0, z_length)
u = getValue(edge_u_vals, 0, z_length)
# Determine if algorithm should stop. Retrieve primal and dual
# residuals and thresholds
stop, res_pri, e_pri, res_dual, e_dual =\
self.__CheckConvergence(A, A_tr, x, z, z_old, u, rho,\
x_length, z_length,
eps_abs, eps_rel, verbose)
if stop: break
z_old = z
# Update rho and scale u-values
rho_new = rho_update_func(rho, res_pri, e_pri, res_dual, e_dual)
scale = float(rho) / rho_new
edge_u_vals[:] = [i * scale for i in edge_u_vals]
rho = rho_new
num_iterations += 1
if verbose:
# Debugging information prints current iteration #
print 'Iteration %d' % num_iterations
pool.map(ADMM_x, node_list)
pool.map(ADMM_z, edge_list)
pool.map(ADMM_u, edge_list)
pool.close()
pool.join()
# Insert into hash to support GetNodeValue()
for entry in node_list:
nid = entry[X_NID]
index = entry[X_IND]
size = entry[X_LEN]
self.node_values[nid] = getValue(node_vals, index, size)
# Set TGraphVX status and value to match CVXPY
if num_iterations <= maxIters:
self.status = 'Optimal'
else:
self.status = 'Incomplete: max iterations reached'
# self.value = self.GetTotalProblemValue()
# Iterate through all variables and update values.
# Sum all objective values over all nodes and edges.
def GetTotalProblemValue(self):
global getValue
result = 0.0
for ni in self.Nodes():
nid = ni.GetId()
for (varID, varName, var, offset) in self.node_variables[nid]:
var.value = self.GetNodeValue(nid, varName)
for ni in self.Nodes():
result += self.node_objectives[ni.GetId()].value
for ei in self.Edges():
etup = self.__GetEdgeTup(ei.GetSrcNId(), ei.GetDstNId())
result += self.edge_objectives[etup].value
return result
# Returns True if convergence criteria have been satisfied
# eps_abs = eps_rel = 0.01
# r = Ax - z
# s = rho * (A^T)(z - z_old)
# e_pri = sqrt(p) * e_abs + e_rel * max(||Ax||, ||z||)
# e_dual = sqrt(n) * e_abs + e_rel * ||rho * (A^T)u||
# Should stop if (||r|| <= e_pri) and (||s|| <= e_dual)
# Returns (boolean shouldStop, primal residual value, primal threshold,
# dual residual value, dual threshold)
def __CheckConvergence(self, A, A_tr, x, z, z_old, u, rho, p, n,
e_abs, e_rel, verbose):
norm = numpy.linalg.norm
Ax = A.dot(x)
r = Ax - z
s = rho * A_tr.dot(z - z_old)
# Primal and dual thresholds. Add .0001 to prevent the case of 0.
e_pri = math.sqrt(p) * e_abs + e_rel * max(norm(Ax), norm(z)) + .0001
e_dual = math.sqrt(n) * e_abs + e_rel * norm(rho * A_tr.dot(u)) + .0001
# Primal and dual residuals
res_pri = norm(r)
res_dual = norm(s)
if verbose:
# Debugging information to print convergence criteria values
print ' r:', res_pri
print ' e_pri:', e_pri
print ' s:', res_dual
print ' e_dual:', e_dual
stop = (res_pri <= e_pri) and (res_dual <= e_dual)
return (stop, res_pri, e_pri, res_dual, e_dual)
# API to get node Variable value after solving with ADMM.
def GetNodeValue(self, NId, Name):
self.__VerifyNId(NId)
for (varID, varName, var, offset) in self.node_variables[NId]:
if varName == Name:
offset = offset
value = self.node_values[NId]
return value[offset:(offset + var.size[0])]
return None
# Prints value of all node variables to console or file, if given
def PrintSolution(self, Filename=None):
numpy.set_printoptions(linewidth=numpy.inf)
out = sys.stdout if (Filename == None) else open(Filename, 'w+')
out.write('Status: %s\n' % self.status)
out.write('Total Objective: %f\n' % self.value)
for ni in self.Nodes():
nid = ni.GetId()
s = 'Node %d:\n' % nid
out.write(s)
for (varID, varName, var, offset) in self.node_variables[nid]:
val = numpy.transpose(self.GetNodeValue(nid, varName))
s = ' %s %s\n' % (varName, str(val))
out.write(s)
# Helper method to verify existence of an NId.
def __VerifyNId(self, NId):
if not TUNGraph.IsNode(self, NId):
raise Exception('Node %d does not exist.' % NId)
# Helper method to determine if
def __UpdateAllVariables(self, NId, Objective):
if NId in self.node_objectives:
# First, remove the Variables from the old Objective.
old_obj = self.node_objectives[NId]
self.all_variables = self.all_variables - set(old_obj.variables())
# Check that the Variables of the new Objective are not currently
# in other Objectives.
new_variables = set(Objective.variables())
if __builtin__.len(self.all_variables.intersection(new_variables)) != 0:
raise Exception('Objective at NId %d shares a variable.' % NId)
self.all_variables = self.all_variables | new_variables
# Helper method to get CVXPY Variables out of a CVXPY Objective
def __ExtractVariableList(self, Objective):
l = [(var.name(), var) for var in Objective.variables()]
# Sort in ascending order by name
l.sort(key=lambda t: t[0])
l2 = []
offset = 0
for (varName, var) in l:
# Add tuples of the form (id, name, object, offset)
l2.append((var.id, varName, var, offset))
offset += var.size[0]
return l2
# Adds a Node to the TUNGraph and stores the corresponding CVX information.
def AddNode(self, NId, Objective=__default_objective,\
Constraints=__default_constraints):
self.__UpdateAllVariables(NId, Objective)
self.node_objectives[NId] = Objective
self.node_variables[NId] = self.__ExtractVariableList(Objective)
self.node_constraints[NId] = Constraints
return TUNGraph.AddNode(self, NId)
def SetNodeObjective(self, NId, Objective):
self.__VerifyNId(NId)
self.__UpdateAllVariables(NId, Objective)
self.node_objectives[NId] = Objective
self.node_variables[NId] = self.__ExtractVariableList(Objective)
def GetNodeObjective(self, NId):
self.__VerifyNId(NId)
return self.node_objectives[NId]
def SetNodeConstraints(self, NId, Constraints):
self.__VerifyNId(NId)
self.node_constraints[NId] = Constraints
def GetNodeConstraints(self, NId):
self.__VerifyNId(NId)
return self.node_constraints[NId]
# Helper method to get a tuple representing an edge. The smaller NId
# goes first.
def __GetEdgeTup(self, NId1, NId2):
return (NId1, NId2) if NId1 < NId2 else (NId2, NId1)
# Helper method to verify existence of an edge.
def __VerifyEdgeTup(self, ETup):
if not TUNGraph.IsEdge(self, ETup[0], ETup[1]):
raise Exception('Edge {%d,%d} does not exist.' % ETup)
# Adds an Edge to the TUNGraph and stores the corresponding CVX information.
# obj_func is a function which accepts two arguments, a dictionary of
# variables for the source and destination nodes
# { string varName : CVXPY Variable }
# obj_func should return a tuple of (objective, constraints), although
# it will assume a singleton object will be an objective and will use
# the default constraints.
# If obj_func is None, then will use Objective and Constraints, which are
# parameters currently set to defaults.
def AddEdge(self, SrcNId, DstNId, ObjectiveFunc=None,
Objective=__default_objective, Constraints=__default_constraints):
ETup = self.__GetEdgeTup(SrcNId, DstNId)
if ObjectiveFunc != None:
src_vars = self.GetNodeVariables(SrcNId)
dst_vars = self.GetNodeVariables(DstNId)
ret = ObjectiveFunc(src_vars, dst_vars)
if type(ret) is tuple:
# Tuple = assume we have (objective, constraints)
self.edge_objectives[ETup] = ret[0]
self.edge_constraints[ETup] = ret[1]
else:
# Singleton object = assume it is the objective
self.edge_objectives[ETup] = ret
self.edge_constraints[ETup] = self.__default_constraints
else:
self.edge_objectives[ETup] = Objective
self.edge_constraints[ETup] = Constraints
return TUNGraph.AddEdge(self, SrcNId, DstNId)
def SetEdgeObjective(self, SrcNId, DstNId, Objective):
ETup = self.__GetEdgeTup(SrcNId, DstNId)
self.__VerifyEdgeTup(ETup)
self.edge_objectives[ETup] = Objective
def GetEdgeObjective(self, SrcNId, DstNId):
ETup = self.__GetEdgeTup(SrcNId, DstNId)
self.__VerifyEdgeTup(ETup)
return self.edge_objectives[ETup]
def SetEdgeConstraints(self, SrcNId, DstNId, Constraints):
ETup = self.__GetEdgeTup(SrcNId, DstNId)
self.__VerifyEdgeTup(ETup)
self.edge_constraints[ETup] = Constraints
def GetEdgeConstraints(self, SrcNId, DstNId):
ETup = self.__GetEdgeTup(SrcNId, DstNId)
self.__VerifyEdgeTup(ETup)
return self.edge_constraints[ETup]
# Returns a dictionary of all variables corresponding to a node.
# { string name : CVXPY Variable }
# This can be used in place of bulk loading functions to recover necessary
# Variables for an edge.
def GetNodeVariables(self, NId):
self.__VerifyNId(NId)
d = {}
for (varID, varName, var, offset) in self.node_variables[NId]:
d[varName] = var
return d
# Bulk loading for nodes
# ObjFunc is a function which accepts one argument, an array of strings
# parsed from the given CSV filename
# ObjFunc should return a tuple of (objective, constraints), although
# it will assume a singleton object will be an objective
# Optional parameter NodeIDs allows the user to pass in a list specifying,
# in order, the node IDs that correspond to successive rows
# If NodeIDs is None, then the file must have a column denoting the
# node ID for each row. The index of this column (0-indexed) is IdCol.
# If NodeIDs and IdCol are both None, then will iterate over all Nodes, in
# order, as long as the file lasts
def AddNodeObjectives(self, Filename, ObjFunc, NodeIDs=None, IdCol=None):
infile = open(Filename, 'r')
if NodeIDs == None and IdCol == None:
stop = False
for ni in self.Nodes():
nid = ni.GetId()
while True:
line = infile.readline()
if line == '': stop = True
if not line.startswith('#'): break
if stop: break
data = [x.strip() for x in line.split(',')]
ret = ObjFunc(data)
if type(ret) is tuple:
# Tuple = assume we have (objective, constraints)
self.SetNodeObjective(nid, ret[0])
self.SetNodeConstraints(nid, ret[1])
else:
# Singleton object = assume it is the objective
self.SetNodeObjective(nid, ret)
if NodeIDs == None:
for line in infile:
if line.startswith('#'): continue
data = [x.strip() for x in line.split(',')]
ret = ObjFunc(data)
if type(ret) is tuple:
# Tuple = assume we have (objective, constraints)
self.SetNodeObjective(int(data[IdCol]), ret[0])
self.SetNodeConstraints(int(data[IdCol]), ret[1])
else:
# Singleton object = assume it is the objective
self.SetNodeObjective(int(data[IdCol]), ret)
else:
for nid in NodeIDs:
while True:
line = infile.readline()
if line == '':
raise Exception('File %s is too short.' % filename)
if not line.startswith('#'): break
data = [x.strip() for x in line.split(',')]
ret = ObjFunc(data)
if type(ret) is tuple:
# Tuple = assume we have (objective, constraints)
self.SetNodeObjective(nid, ret[0])
self.SetNodeConstraints(nid, ret[1])
else:
# Singleton object = assume it is the objective
self.SetNodeObjective(nid, ret)
infile.close()
# Bulk loading for edges
# If Filename is None:
# ObjFunc is a function which accepts three arguments, a dictionary of
# variables for the source and destination nodes, and an unused param
# { string varName : CVXPY Variable } x2, None
# ObjFunc should return a tuple of (objective, constraints), although
# it will assume a singleton object will be an objective
# If Filename exists:
# ObjFunc is the same, except the third param will be be an array of
# strings parsed from the given CSV filename
# Optional parameter EdgeIDs allows the user to pass in a list specifying,
# in order, the EdgeIDs that correspond to successive rows. An edgeID is
# a tuple of (srcID, dstID).
# If EdgeIDs is None, then the file may have columns denoting the srcID and
# dstID for each row. The indices of these columns are 0-indexed.
# If EdgeIDs and id columns are None, then will iterate through all edges
# in order, as long as the file lasts.
def AddEdgeObjectives(self, ObjFunc, Filename=None, EdgeIDs=None,\
SrcIdCol=None, DstIdCol=None):
if Filename == None:
for ei in self.Edges():
src_id = ei.GetSrcNId()
src_vars = self.GetNodeVariables(src_id)
dst_id = ei.GetDstNId()
dst_vars = self.GetNodeVariables(dst_id)
ret = ObjFunc(src_vars, dst_vars, None)
if type(ret) is tuple:
# Tuple = assume we have (objective, constraints)
self.SetEdgeObjective(src_id, dst_id, ret[0])
self.SetEdgeConstraints(src_id, dst_id, ret[1])
else:
# Singleton object = assume it is the objective
self.SetEdgeObjective(src_id, dst_id, ret)
return
infile = open(Filename, 'r')
if EdgeIDs == None and (SrcIdCol == None or DstIdCol == None):
stop = False
for ei in self.Edges():
src_id = ei.GetSrcNId()
src_vars = self.GetNodeVariables(src_id)
dst_id = ei.GetDstNId()
dst_vars = self.GetNodeVariables(dst_id)
while True:
line = infile.readline()
if line == '': stop = True
if not line.startswith('#'): break
if stop: break
data = [x.strip() for x in line.split(',')]
ret = ObjFunc(src_vars, dst_vars, data)
if type(ret) is tuple:
# Tuple = assume we have (objective, constraints)
self.SetEdgeObjective(src_id, dst_id, ret[0])
self.SetEdgeConstraints(src_id, dst_id, ret[1])
else:
# Singleton object = assume it is the objective
self.SetEdgeObjective(src_id, dst_id, ret)
if EdgeIDs == None:
for line in infile:
if line.startswith('#'): continue
data = [x.strip() for x in line.split(',')]
src_id = int(data[SrcIdCol])
dst_id = int(data[DstIdCol])
src_vars = self.GetNodeVariables(src_id)
dst_vars = self.GetNodeVariables(dst_id)
ret = ObjFunc(src_vars, dst_vars, data)
if type(ret) is tuple:
# Tuple = assume we have (objective, constraints)
self.SetEdgeObjective(src_id, dst_id, ret[0])
self.SetEdgeConstraints(src_id, dst_id, ret[1])
else:
# Singleton object = assume it is the objective
self.SetEdgeObjective(src_id, dst_id, ret)
else:
for edgeID in EdgeIDs:
etup = self.__GetEdgeTup(edgeID[0], edgeID[1])
while True:
line = infile.readline()
if line == '':
raise Exception('File %s is too short.' % Filename)
if not line.startswith('#'): break
data = [x.strip() for x in line.split(',')]
src_vars = self.GetNodeVariables(etup[0])
dst_vars = self.GetNodeVariables(etup[1])
ret = ObjFunc(src_vars, dst_vars, data)
if type(ret) is tuple:
# Tuple = assume we have (objective, constraints)
self.SetEdgeObjective(etup[0], etup[1], ret[0])
self.SetEdgeConstraints(etup[0], etup[1], ret[1])
else:
# Singleton object = assume it is the objective
self.SetEdgeObjective(etup[0], etup[1], ret)
infile.close()
"""return clusters of nodes of the original graph.Each cluster corresponds to
a supernode in the supergraph"""
def __ClusterGraph(self,clusterSize):
#obtain a random shuffle of the nodes
nidArray = [ni.GetId() for ni in self.Nodes()]
numpy.random.shuffle(nidArray)
visitedNode = {}
for nid in nidArray:
visitedNode[nid] = False
superNodes = []
superNode,superNodeSize = [],0
for nid in nidArray:
if not visitedNode[nid]:
oddLevel, evenLevel, isOdd = [],[],True
oddLevel.append(nid)
visitedNode[nid] = True
#do a level order traversal and add nodes to the superNode until the
#size of the supernode variables gets larger than clusterSize
while True:
if isOdd:
if __builtin__.len(oddLevel) > 0:
while __builtin__.len(oddLevel) > 0:
topId = oddLevel.pop(0)
node = TUNGraph.GetNI(self,topId)
varSize = sum([variable[2].size[0]* \
variable[2].size[1]\
for variable in self.node_variables[topId]])
if varSize + superNodeSize <= clusterSize:
superNode.append(topId)
superNodeSize = varSize + superNodeSize
else:
if __builtin__.len(superNode) > 0:
superNodes.append(superNode)
superNodeSize = varSize
superNode = [topId]
neighbors = [node.GetNbrNId(j) \
for j in xrange(node.GetDeg())]
for nbrId in neighbors:
if not visitedNode[nbrId]:
evenLevel.append(nbrId)
visitedNode[nbrId] = True
isOdd = False
#sort the nodes according to their variable size
if __builtin__.len(evenLevel) > 0:
evenLevel.sort(key=lambda nid : sum([variable[2].size[0]* \
variable[2].size[1] for variable \
in self.node_variables[nid]]))
else:
break
else:
if __builtin__.len(evenLevel) > 0:
while __builtin__.len(evenLevel) > 0:
topId = evenLevel.pop(0)
node = TUNGraph.GetNI(self,topId)
varSize = sum([variable[2].size[0]* \
variable[2].size[1]\
for variable in self.node_variables[topId]])
if varSize + superNodeSize <= clusterSize:
superNode.append(topId)
superNodeSize = varSize + superNodeSize
else:
if __builtin__.len(superNode) > 0:
superNodes.append(superNode)
superNodeSize = varSize
superNode = [topId]
neighbors = [node.GetNbrNId(j) \
for j in xrange(node.GetDeg())]
for nbrId in neighbors:
if not visitedNode[nbrId]:
oddLevel.append(nbrId)
visitedNode[nbrId] = True
isOdd = True
#sort the nodes according to their variable size
if __builtin__.len(oddLevel) > 0:
oddLevel.sort(key=lambda nid : sum([variable[2].size[0]* \
variable[2].size[1] for variable \
in self.node_variables[nid]]))
else:
break
if superNode not in superNodes:
superNodes.append(superNode)
return superNodes
## ADMM Global Variables and Functions ##
# By default, the objective function is Minimize().
__default_m_func = Minimize
m_func = __default_m_func
# By default, rho is 1.0. Default rho update is identity function and does not
# depend on primal or dual residuals or thresholds.
__default_rho = 1.0
__default_rho_update_func = lambda rho, res_p, thr_p, res_d, thr_d: rho
rho = __default_rho
# Rho update function takes 5 parameters
# - Old value of rho
# - Primal residual and threshold
# - Dual residual and threshold
rho_update_func = __default_rho_update_func
def SetRho(Rho=None):
global rho
rho = Rho if Rho else __default_rho
# Rho update function should take one parameter: old_rho
# Returns new_rho
# This function will be called at the end of every iteration
def SetRhoUpdateFunc(Func=None):
global rho_update_func
rho_update_func = Func if Func else __default_rho_update_func
# Tuple of indices to identify the information package for each node. Actual
# length of specific package (list) may vary depending on node degree.
# X_NID: Node ID
# X_OBJ: CVXPY Objective
# X_VARS: CVXPY Variables (entry from node_variables structure)
# X_CON: CVXPY Constraints
# X_IND: Starting index into shared node_vals Array
# X_LEN: Total length (sum of dimensions) of all variables
# X_DEG: Number of neighbors
# X_NEIGHBORS: Placeholder for information about each neighbors
# Information for each neighbor is two entries, appended in order.
# Starting index of the corresponding z-value in edge_z_vals. Then for u.
(X_NID, X_OBJ, X_VARS, X_CON, X_IND, X_LEN, X_DEG, X_NEIGHBORS) = range(8)
# Tuple of indices to identify the information package for each edge.
# Z_EID: Edge ID / tuple
# Z_OBJ: CVXPY Objective
# Z_CON: CVXPY Constraints
# Z_[IJ]VARS: CVXPY Variables for Node [ij] (entry from node_variables)
# Z_[IJ]LEN: Total length (sum of dimensions) of all variables for Node [ij]
# Z_X[IJ]IND: Starting index into shared node_vals Array for Node [ij]
# Z_Z[IJ|JI]IND: Starting index into shared edge_z_vals Array for edge [ij|ji]
# Z_U[IJ|JI]IND: Starting index into shared edge_u_vals Array for edge [ij|ji]
(Z_EID, Z_OBJ, Z_CON, Z_IVARS, Z_ILEN, Z_XIIND, Z_ZIJIND, Z_UIJIND,\
Z_JVARS, Z_JLEN, Z_XJIND, Z_ZJIIND, Z_UJIIND) = range(13)
# Contain all x, z, and u values for each node and/or edge in ADMM. Use the
# given starting index and length with getValue() to get individual node values
node_vals = None
edge_z_vals = None
edge_u_vals = None
# Extract a numpy array value from a shared Array.
# Give shared array, starting index, and total length.
def getValue(arr, index, length):
return numpy.array(arr[index:(index + length)])
# Write value of numpy array nparr (with given length) to a shared Array at
# the given starting index.
def writeValue(sharedarr, index, nparr, length):
if length == 1:
nparr = [nparr]
sharedarr[index:(index + length)] = nparr
# Write the values for all of the Variables involved in a given Objective to
# the given shared Array.
# variables should be an entry from the node_values structure.
def writeObjective(sharedarr, index, objective, variables):
for v in objective.variables():
vID = v.id
value = v.value
# Find the tuple in variables with the same ID. Take the offset.
# If no tuple exists, then silently skip.
for (varID, varName, var, offset) in variables:
if varID == vID:
writeValue(sharedarr, index + offset, value, var.size[0])
break
# Proximal operators
def Prox_logdet(S, A, eta):
global rho
d, q = numpy.linalg.eigh(eta*A-S)
q = numpy.matrix(q)
X_var = ( 1/(2*float(eta)) )*q*( numpy.diag(d + numpy.sqrt(numpy.square(d) + (4*eta)*numpy.ones(d.shape))) )*q.T
x_var = X_var[numpy.triu_indices(S.shape[1])] # extract upper triangular part as update variable
return numpy.matrix(x_var).T
def upper2Full(a):
n = int((-1 + numpy.sqrt(1+ 8*a.shape[0]))/2)
A = numpy.zeros([n,n])
A[numpy.triu_indices(n)] = a
temp = A.diagonal()
A = (A + A.T) - numpy.diag(temp)
return A
def ij2symmetric(i,j,size):
return (size * (size + 1))/2 - (size-i)*((size - i + 1))/2 + j - i
# x-update for ADMM for one node
def ADMM_x(entry):
global rho
variables = entry[X_VARS]
#-----------------------Proximal operator ---------------------------
x_update = [] # proximal update for the variable x
if(__builtin__.len(entry[1].args) > 1 ):
# print 'we are in logdet + trace node'
cvxpyMat = entry[1].args[1].args[0].args[0]
numpymat = cvxpyMat.value
mat_shape = ( int( numpymat.shape[1] * ( numpymat.shape[1]+1 )/2.0 ) ,)
a = numpy.zeros(mat_shape)
for i in xrange(entry[X_DEG]):
z_index = X_NEIGHBORS + (2 * i)
u_index = z_index + 1
zi = entry[z_index]
ui = entry[u_index]
for (varID, varName, var, offset) in variables:
z = getValue(edge_z_vals, zi + offset, var.size[0])
u = getValue(edge_u_vals, ui + offset, var.size[0])
a += (z-u)
A = upper2Full(a)
A = A/entry[X_DEG]
eta = 1/float(rho)
x_update = Prox_logdet(numpymat, A, eta)
solution = numpy.array(x_update).T.reshape(-1)
writeValue(node_vals, entry[X_IND] + variables[0][3], solution, variables[0][2].size[0])
else:
x_update = [] # no variable to update for dummy node
return None
# z-update for ADMM for one edge
def ADMM_z(entry, index_penalty = 1):
global rho
rho = float(rho)
#-----------------------Proximal operator ---------------------------
a_ij = [] #
flag = 0
variables_i = entry[Z_IVARS]
for (varID, varName, var, offset) in variables_i:
x_i = getValue(node_vals, entry[Z_XIIND] + offset, var.size[0])
u_ij = getValue(edge_u_vals, entry[Z_UIJIND] + offset, var.size[0])
if flag == 0:
a_ij = (x_i + u_ij)
flag = 1
else:
a_ij += (x_i + u_ij)
lamb = entry[1].args[0].args[0].value
numBlocks = entry[1].args[1].args[0].value
sizeBlocks = entry[1].args[2].args[0].value
probSize = numBlocks*sizeBlocks
z_ij = numpy.zeros(probSize*(probSize+1)/2)
for i in range(numBlocks):
if (i == 0):
#In the A^{(0)} block (the blocks on the diagonal)
for j in range(sizeBlocks):
for k in range(j, sizeBlocks):
elems = numBlocks
lamSum = 0
points = numpy.zeros((elems))
locList = []
for l in range(elems):
(loc1, loc2) = (l*sizeBlocks + j, l*sizeBlocks+k)
locList.append((loc1,loc2))
index = ij2symmetric(loc1, loc2, probSize)
points[l] = a_ij[index]
lamSum = lamSum + lamb[loc1,loc2]
#Calculate soft threshold
#If answer is positive
ansPos = max((rho*numpy.sum(points) - lamSum)/(rho*elems),0)
#If answer is negative
ansNeg = min((rho*numpy.sum(points) + lamSum)/(rho*elems),0)
if (rho*numpy.sum(points) > lamSum):
for locs in locList:
index = ij2symmetric(locs[0], locs[1], probSize)
z_ij[index] = ansPos
elif(rho*numpy.sum(points) < -1*lamSum):
for locs in locList:
index = ij2symmetric(locs[0], locs[1], probSize)
z_ij[index] = ansNeg
else:
for locs in locList:
index = ij2symmetric(locs[0], locs[1], probSize)
z_ij[index] = 0
else:
#Off diagonal blocks
for j in range(sizeBlocks):
for k in range(sizeBlocks):
elems = (2*numBlocks - 2*i)/2
lamSum = 0
points = numpy.zeros((elems))
locList = []
for l in range(elems):
(loc1, loc2) = ((l+i)*sizeBlocks + j, l*sizeBlocks+k)
locList.append((loc2,loc1))
index = ij2symmetric(loc2, loc1, probSize)
points[l] = a_ij[index]
lamSum = lamSum + lamb[loc2,loc1]
#Calculate soft threshold
#If answer is positive
ansPos = max((rho*numpy.sum(points) - lamSum)/(rho*elems),0)
#If answer is negative
ansNeg = min((rho*numpy.sum(points) + lamSum)/(rho*elems),0)
if (rho*numpy.sum(points) > lamSum):
for locs in locList:
index = ij2symmetric(locs[0], locs[1], probSize)
z_ij[index] = ansPos
elif(rho*numpy.sum(points) < -1*lamSum):
for locs in locList:
index = ij2symmetric(locs[0], locs[1], probSize)
z_ij[index] = ansNeg
else:
for locs in locList:
index = ij2symmetric(locs[0], locs[1], probSize)
z_ij[index] = 0
writeValue(edge_z_vals, entry[Z_ZIJIND] + variables_i[0][3], z_ij, variables_i[0][2].size[0])
return None
# u-update for ADMM for one edge
def ADMM_u(entry):
global rho
size_i = entry[Z_ILEN]
uij = getValue(edge_u_vals, entry[Z_UIJIND], size_i) +\
getValue(node_vals, entry[Z_XIIND], size_i) -\
getValue(edge_z_vals, entry[Z_ZIJIND], size_i)
writeValue(edge_u_vals, entry[Z_UIJIND], uij, size_i)
size_j = entry[Z_JLEN]
uji = getValue(edge_u_vals, entry[Z_UJIIND], size_j) +\
getValue(node_vals, entry[Z_XJIND], size_j) -\
getValue(edge_z_vals, entry[Z_ZJIIND], size_j)
writeValue(edge_u_vals, entry[Z_UJIIND], uji, size_j)
return entry
|
research/attention_ocr/python/datasets/testdata/fsns/download_data.py | gujralsanyam22/models | 82,518 | 12771925 | <gh_stars>1000+
import urllib.request
import tensorflow as tf
import itertools
URL = 'http://download.tensorflow.org/data/fsns-20160927/testdata/fsns-00000-of-00001'
DST_ORIG = 'fsns-00000-of-00001.orig'
DST = 'fsns-00000-of-00001'
KEEP_NUM_RECORDS = 5
print('Downloading %s ...' % URL)
urllib.request.urlretrieve(URL, DST_ORIG)
print('Writing %d records from %s to %s ...' %
(KEEP_NUM_RECORDS, DST_ORIG, DST))
with tf.io.TFRecordWriter(DST) as writer:
for raw_record in itertools.islice(tf.compat.v1.python_io.tf_record_iterator(DST_ORIG), KEEP_NUM_RECORDS):
writer.write(raw_record)
|
libs/pipeline_monitor/test.py | silentmonk/KubeFlow | 2,527 | 12771926 | from pipeline_monitor import prometheus_monitor as monitor
_labels= {'a_label_key':'a_label_value'}
@monitor(labels=_labels, name="test_monitor")
def test_log_inputs_and_outputs(arg1: int, arg2: int):
return arg1 + arg2
test_log_inputs_and_outputs(4, 5)
|
securityheaders/checkers/xpoweredby/__init__.py | th3cyb3rc0p/securityheaders | 151 | 12771943 | from .present import XPoweredByPresentChecker
__all__ = ['XPoweredByPresentChecker']
|
datasheets/tab.py | etcher-be/datasheets | 625 | 12771962 | import types
from collections import OrderedDict
import apiclient
import pandas as pd
from datasheets import exceptions, helpers
class Tab(object):
def __init__(self, tabname, workbook, drive_svc, sheets_svc):
"""Create a datasheets.Tab instance of an existing Google Sheets tab.
This class in not intended to be directly instantiated; it is created by
datasheets.Workbook.fetch_tab().
Args:
tabname (str): The name of the tab
workbook (datasheets.Workbook): The workbook instance that instantiated this tab
drive_svc (googleapiclient.discovery.Resource): An instance of Google Drive
sheets_svc (googleapiclient.discovery.Resource): An instance of Google Sheets
"""
self.tabname = tabname
self._workbook = workbook
self.drive_svc = drive_svc
self.sheets_svc = sheets_svc
# Get basic properties of the tab. We do this here partly
# to force failures early if tab can't be found
try:
self._update_tab_properties()
except apiclient.errors.HttpError as e:
if 'Unable to parse range'.encode() in e.content:
raise exceptions.TabNotFound('The given tab could not be found. Error generated: {}'.format(e))
else:
raise
self.url = 'https://docs.google.com/spreadsheets/d/{}#gid={}'.format(self.workbook.file_id, self.tab_id)
def __getattribute__(self, attr):
"""Get an attribute (variable or method) of this instance of this class
For client OAuth, before each user-facing method call this method will verify that the
access token is not expired and refresh it if it is.
We only refresh on user-facing method calls since otherwise we'd be refreshing multiple
times per user action (once for the user call, possibly multiple times for the private
method calls invoked by it).
"""
requested_attr = super(Tab, self).__getattribute__(attr)
if isinstance(requested_attr, types.MethodType) \
and not attr.startswith('_'):
self.workbook.client._refresh_token_if_needed()
return requested_attr
def __repr__(self):
msg = "<{module}.{name}(filename='{filename}', tabname='{tabname}')>"
return msg.format(module=self.__class__.__module__,
name=self.__class__.__name__,
filename=self.workbook.filename,
tabname=self.tabname)
@staticmethod
def _process_rows(raw_data):
"""Prepare a tab's raw data so that a pandas.DataFrame can be produced from it
Args:
raw_data (dict): The raw data from a tab
Returns:
list: A list of lists representing the raw_data, with one list per row in the tab
"""
raw_rows = raw_data['sheets'][0]['data'][0].get('rowData', {})
rows = []
for row_num, row in enumerate(raw_rows):
row_values = []
for col_num, cell in enumerate(row.get('values', {})):
# If the cell is empty, use None
value = cell.get('effectiveValue', {None: None})
# If a cell has an error in it (e.g. someone divides by zero, adds a number to
# text, etc.), then we raise an exception.
if 'errorValue' in value.keys():
cell_label = helpers.convert_cell_index_to_label(row_num+1, col_num+1)
error_type = value['errorValue'].get('type', 'unknown type')
error_message = value['errorValue'].get('message', 'unknown error message')
msg = 'Error of type "{}" within cell {} prevents fetching data. Message: "{}"'
raise exceptions.FetchDataError(msg.format(error_type, cell_label, error_message))
# value is a dict with only 1 key so this next(iter()) is safe
base_fmt, cell_value = next(iter(value.items()))
num_fmt = cell.get('effectiveFormat', {}).get('numberFormat')
if num_fmt:
cell_format = num_fmt['type']
else:
cell_format = base_fmt
formatting_fn = helpers._TYPE_CONVERSIONS[cell_format]
if cell_value:
try:
cell_value = formatting_fn(cell_value)
except ValueError:
pass
except TypeError:
raise TypeError(
"Mismatch exists in expected and actual data types for cell with "
"value '{value}'. Cell format is '{cell_format}' but cell value type "
"is '{value_type}'. To correct this, in Google Sheets set the "
"appropriate cell format or set it to Automatic".format(
value=cell_value,
cell_format=cell_format,
value_type=type(cell_value))
)
row_values.append(cell_value)
rows.append(row_values)
return rows
@property
def ncols(self):
""" Property for the number (int) of columns in the tab """
return self.properties['gridProperties']['columnCount']
@property
def nrows(self):
""" Property for the number (int) of rows in the tab """
return self.properties['gridProperties']['rowCount']
@property
def tab_id(self):
""" Property that gives the ID for the tab """
return self.properties['sheetId']
@property
def workbook(self):
""" Property for the workbook instance that this tab belongs to """
return self._workbook
def _add_rows_or_columns(self, kind, n):
request_body = {'appendDimension': {
'sheetId': self.tab_id,
'dimension': kind,
'length': n
}
}
body = {'requests': [request_body]}
self.workbook.batch_update(body)
self._update_tab_properties()
def _update_tab_properties(self):
raw_properties = self.sheets_svc.get(spreadsheetId=self.workbook.file_id,
ranges=self.tabname + '!A1',
fields='sheets/properties').execute()
self.properties = raw_properties['sheets'][0]['properties']
def add_rows(self, n):
"""Add n rows to the given tab
Args:
n (int): The number of rows to add
Returns:
None
"""
self._add_rows_or_columns(kind='ROWS', n=n)
def add_columns(self, n):
"""Add n columns to the given tab
Args:
n (int): The number of columns to add
Returns:
None
"""
self._add_rows_or_columns(kind='COLUMNS', n=n)
def align_cells(self, horizontal='LEFT', vertical='MIDDLE'):
"""Align all cells in the tab
Args:
horizontal (str): The horizontal alignment for cells. May be one of 'LEFT',
'CENTER', or 'RIGHT'
vertical (str): The vertical alignment for cells. May be one of 'TOP',
'MIDDLE', 'BOTTOM'
Returns:
None
"""
request_body = {'repeatCell': {
'range': {
'sheetId': self.tab_id,
'startRowIndex': 0,
'endRowIndex': self.nrows
},
'cell': {
'userEnteredFormat': {
'horizontalAlignment': horizontal,
'verticalAlignment': vertical,
}
},
'fields': 'userEnteredFormat(horizontalAlignment,verticalAlignment)'
}
}
body = {'requests': [request_body]}
self.workbook.batch_update(body)
def alter_dimensions(self, nrows=None, ncols=None):
"""Alter the dimensions of the current tab.
If either dimension is left to None, that dimension will not be altered. Note that it is
possible to set nrows or ncols to smaller than the current tab dimensions, in which case
that data will be eliminated.
Args:
nrows (int): The number of rows for the tab to have
ncols (int): The number of columns for the tab to have
Returns:
None
"""
request_body = {'updateSheetProperties': {
'properties': {
'sheetId': self.tab_id,
'gridProperties': {
'columnCount': ncols or self.ncols,
'rowCount': nrows or self.nrows
}
},
'fields': 'gridProperties(columnCount, rowCount)'
}
}
body = {'requests': [request_body]}
self.workbook.batch_update(body)
self._update_tab_properties()
def append_data(self, data, index=True, autoformat=True):
"""Append data to the existing data in this tab.
If the new data exceeds the tab's current dimensions the tab will be resized to
accommodate it. Data headers will not be included among the appended data as they are
assumed to already be among the existing tab data.
If the dimensions of `data` are larger than the tab's current dimensions,
the tab will automatically be resized to fit it.
Args:
data (pandas.DataFrame or dict or list): The data to be uploaded, formatted as a
pandas.DataFrame, a dict of lists, or a list of lists
index (bool): If `data` is a pandas.DataFrame, whether to upload the index as well
Returns:
None
"""
# Convert everything to lists of lists, which Google Sheets requires
headers, values = helpers._make_list_of_lists(data, index)
values = helpers._convert_nan_and_datelike_values(values)
body = {'values': values}
self.sheets_svc.values().append(spreadsheetId=self.workbook.file_id, range=self.tabname,
valueInputOption='USER_ENTERED', body=body).execute()
if autoformat:
self.autoformat(len(headers))
self._update_tab_properties()
def autoformat(self, n_header_rows):
"""Apply default stylings to the tab
This will apply the following stylings to the tab:
- Header rows will be formatted to a dark gray background and off-white text
- Font for all cells will be set to size 10 Proxima Nova
- Cells will be horizontally left-aligned and vertically middle-aligned
- Columns will be resized to display their largest entry
- Empty columns and rows will be trimmed from the tab
Args:
n_header_rows (int): The number of header rows (i.e. row of labels / metadata)
Returns:
None
"""
self.format_headers(nrows=n_header_rows)
self.format_font()
self.align_cells()
self.autosize_columns()
populated_cells = self.sheets_svc.values().get(spreadsheetId=self.workbook.file_id,
range=self.tabname).execute()
nrows = len(populated_cells['values'])
ncols = max(map(len, populated_cells['values']))
self.alter_dimensions(nrows=nrows, ncols=ncols)
self._update_tab_properties()
def autosize_columns(self):
"""Resize the widths of all columns in the tab to fit their data
Returns:
None
"""
request_body = {'autoResizeDimensions': {
'dimensions': {
'sheetId': self.tab_id,
'dimension': 'COLUMNS',
'startIndex': 0,
'endIndex': self.ncols
}
}
}
body = {'requests': [request_body]}
self.workbook.batch_update(body)
def clear_data(self):
"""Clear all data from the tab while leaving formatting intact
Returns:
None
"""
self.sheets_svc.values().clear(spreadsheetId=self.workbook.file_id,
range=self.tabname,
body={}).execute()
def format_font(self, font='Proxima Nova', size=10):
"""Set the font and size for all cells in the tab
Args:
font (str): The name of the font to use
size (int): The size to set the font to
Returns:
None
"""
request_body = {'repeatCell': {
'range': {'sheetId': self.tab_id},
'cell': {
'userEnteredFormat': {
'textFormat': {
'fontSize': size,
'fontFamily': font
}
}
},
'fields': 'userEnteredFormat(textFormat(fontSize,fontFamily))'
}
}
body = {'requests': [request_body]}
self.workbook.batch_update(body)
def format_headers(self, nrows):
"""Format the first n rows of a tab.
The following stylings will be applied to these rows:
- Background will be set to dark gray with off-white text
- Font will be set to size 10 Proxima Nova
- Text will be horizontally left-aligned and vertically middle-aligned
- Rows will be made "frozen" so that when the user scrolls these rows stay visible
Args:
nrows (int): The number of rows of headers in the tab
Returns:
None
"""
body = {
'requests': [
{
'repeatCell': {
'range': {
'sheetId': self.tab_id,
'startRowIndex': 0,
'endRowIndex': nrows
},
'cell': {
'userEnteredFormat': {
'backgroundColor': {
'red': 0.26274511,
'green': 0.26274511,
'blue': 0.26274511
},
'horizontalAlignment': 'LEFT',
'textFormat': {
'foregroundColor': {
'red': 0.95294118,
'green': 0.95294118,
'blue': 0.95294118
},
'fontSize': 10,
'fontFamily': 'Proxima Nova',
'bold': False
}
}
},
'fields': 'userEnteredFormat(backgroundColor,textFormat,horizontalAlignment)'
}
},
{
'updateSheetProperties': {
'properties': {
'sheetId': self.tab_id,
'gridProperties': {
'frozenRowCount': nrows
}
},
'fields': 'gridProperties(frozenRowCount)'
}
}
]
}
self.workbook.batch_update(body)
def fetch_data(self, headers=True, fmt='df'):
"""Retrieve the data within this tab.
Efforts are taken to ensure that returned rows are always the same length. If
headers=True, this length will be equal to the length of the headers. If headers=False,
this length will be equal to the longest row.
In either case, shorter rows will be padded with Nones and longer rows will be
truncated (i.e. if there are 3 headers then all rows will have 3 entries regardless
of the amount of populated cells they have).
Args:
headers (bool): If True, the first row will be used as the column names for the
pandas.DataFrame. Otherwise, a 0-indexed range will be used instead
fmt (str): The format in which to return the data. Accepted values: 'df', 'dict', 'list'
Returns:
When fmt='df' --> pandas.DataFrame
When fmt='dict' --> list of dicts, e.g.::
[{header1: row1cell1, header2: row1cell2},
{header1: row2cell1, header2: row2cell2},
...]
When fmt='list' --> tuple of header names, list of lists with row data, e.g.::
([header1, header2, ...],
[[row1cell1, row1cell2, ...], [row2cell1, row2cell2, ...], ...])
"""
if fmt not in ('df', 'dict', 'list'):
raise ValueError("Unexpected value '{}' for parameter `fmt`. "
"Accepted values are 'df', 'dict', and 'list'".format(fmt))
fields = 'sheets/data/rowData/values(effectiveValue,effectiveFormat/numberFormat/type)'
raw_data = self.sheets_svc.get(spreadsheetId=self.workbook.file_id, ranges=self.tabname,
includeGridData=True, fields=fields).execute()
processed_rows = self._process_rows(raw_data)
# filter out empty rows
max_idx = helpers._find_max_nonempty_row(processed_rows)
if max_idx is None:
if fmt == 'df':
return pd.DataFrame([])
elif fmt == 'dict':
return []
else:
return ([], [])
processed_rows = processed_rows[:max_idx+1]
# remove trailing Nones on rows
processed_rows = list(map(helpers._remove_trailing_nones, processed_rows))
if headers:
header_names = processed_rows.pop(0)
max_width = len(header_names)
else:
# Iterate through rows to find widest one
max_width = max(map(len, processed_rows))
header_names = list(range(max_width))
# resize the rows to match the number of column headers
processed_rows = [helpers._resize_row(row, max_width) for row in processed_rows]
if fmt == 'df':
df = pd.DataFrame(data=processed_rows, columns=header_names)
return df
elif fmt == 'dict':
make_row_dict = lambda row: OrderedDict(zip(header_names, row))
return list(map(make_row_dict, processed_rows))
else:
return header_names, processed_rows
def insert_data(self, data, index=True, autoformat=True):
"""Overwrite all data in this tab with the provided data.
All existing data in the tab will be removed, even if it might not have been overwritten
(for example, if there is 4x2 data already in the tab and only 2x2 data is being inserted).
If the dimensions of `data` are larger than the tab's current dimensions,
the tab will automatically be resized to fit it.
Args:
data (pandas.DataFrame or dict or list): The data to be uploaded, formatted as a
pandas.DataFrame, a dict of lists, or a list of lists
index (bool): If `data` is a pandas.DataFrame, whether to upload the index as well
Returns:
None
"""
# Convert everything to lists of lists, which Google Sheets requires
headers, values = helpers._make_list_of_lists(data, index)
values = headers + values # Include headers for inserts but not for appends
self.clear_data()
values = helpers._convert_nan_and_datelike_values(values)
body = {'values': values}
self.sheets_svc.values().update(spreadsheetId=self.workbook.file_id, range=self.tabname,
valueInputOption='USER_ENTERED', body=body).execute()
if autoformat:
self.autoformat(len(headers))
self._update_tab_properties()
|
examples/streamline1.py | yang69can/pyngl | 125 | 12771967 | #
# File:
# streamline1.py
#
# Synopsis:
# Draws streamlines on a map over water only.
#
# Category:
# Streamlines on a map.
#
# Author:
# <NAME>
#
# Date of original publication:
# December, 2004
#
# Description:
# This example draws streamlines over water on a map using a
# Cylindrical Equidistant map projection. The "add_cyclic"
# function is illustrated graphically.
#
# Effects illustrated:
# o Streamlines over maps.
# o Adding cyclic points.
# o Specifying colors by name.
# o Polylines.
# o Masking land areas.
#
# Output:
# This example produces two visualizations:
# 1.) Streamlines on a Cylindrical Equidistant map over water
# only highlighting missing cyclic points.
# 2.) Same as 1.) with the cyclic points added.
#
# Notes:
#
#
# Import Nio for reading netCDF files.
#
from __future__ import print_function
import Nio
#
# Import Ngl support functions.
#
import Ngl
import os
#
# Open the netCDF file.
#
file = Nio.open_file(os.path.join(Ngl.pynglpath("data"),"cdf","pop.nc"))
#
# Open a workstation.
#
wks_type = "png"
wks = Ngl.open_wks(wks_type,"streamline1")
#
# Get the u/v and lat/lon variables.
#
urot = file.variables["urot"]
vrot = file.variables["vrot"]
lat2d = file.variables["lat2d"]
lon2d = file.variables["lon2d"]
#
# Set up resource list.
#
resources = Ngl.Resources()
#
# Don't advance frame, because we want to draw a couple of lines on
# plot later.
#
resources.nglFrame = False
#
# Coordinate arrays for data
#
resources.vfXArray = lon2d[::4,::4]
resources.vfYArray = lat2d[::4,::4]
resources.mpProjection = "CylindricalEquidistant"
resources.mpFillOn = True
resources.mpLandFillColor = "Tan1"
resources.mpOceanFillColor = "SkyBlue"
resources.mpInlandWaterFillColor = "SkyBlue"
resources.mpGridAndLimbOn = False
resources.tiMainString = "Streamline plot without cyclic point added"
plot = Ngl.streamline_map(wks,urot[::4,::4],vrot[::4,::4],resources)
#
# Add a couple of lines showing the area where there's a gap in the
# data because of lack of a cyclic point. (It should be obvious even
# without the lines.)
#
line_res = Ngl.Resources() # line resources
line_res.gsLineColor = "Red" # line color
line_res.gsLineThicknessF = 1.5 # line thickness scale
line_res.gsLineDashPattern = 2 # dashed lines
Ngl.polyline(wks,plot,lon2d[::4,0],lat2d[::4,0],line_res)
Ngl.polyline(wks,plot,lon2d[::4,-1],lat2d[::4,-1],line_res)
#
# Add a text string explaining the lines.
#
text_res = Ngl.Resources() # text resources
text_res.txFontHeightF = 0.03 # font height
text_res.txFontColor = "Red"
Ngl.text_ndc(wks,"dashed red line shows area with no data",0.5,0.17,text_res)
Ngl.frame(wks) # Now advance frame.
#
# Add cyclic points. Since lat2d/lon2d are 2D arrays, make them
# cyclic the same way you do the 2D data array.
#
u = Ngl.add_cyclic(urot[::4,::4])
v = Ngl.add_cyclic(vrot[::4,::4])
lon = Ngl.add_cyclic(lon2d[::4,::4])
lat = Ngl.add_cyclic(lat2d[::4,::4])
#
# Specify new coordinate arrays for data.
#
resources.vfXArray = lon
resources.vfYArray = lat
resources.tiMainString = "Streamline plot with cyclic point added"
plot = Ngl.streamline_map(wks,u,v,resources)
#
# Add a couple of lines showing the area where the missing data were.
# Make the lines solid so we can see them.
#
line_res.gsLineDashPattern = 0
Ngl.polyline(wks,plot,lon2d[::4,0],lat2d[::4,0],line_res)
Ngl.polyline(wks,plot,lon2d[::4,-1],lat2d[::4,-1],line_res)
#
# Add a text string explaining the lines.
#
Ngl.text_ndc(wks,"red line shows area that previously had no data",0.5,0.17,text_res)
Ngl.frame(wks)
Ngl.end()
|
tests/test_model_serializer_deserialize.py | aswinkp/swampdragon | 366 | 12772005 | from swampdragon.serializers.model_serializer import ModelSerializer
from swampdragon.testing.dragon_testcase import DragonTestCase
from .models import TextModel, SDModel
from datetime import datetime
from django.db import models
# to make sure none of the ModelSerializer variables are clobbering the data
MODEL_KEYWORDS = ('data', )
# TODO: support the rest of these field names
# MODEL_KEYWORDS = ('data', 'opts', 'initial', 'base_fields', 'm2m_fields', 'related_fields', 'errors')
class KeywordModel(SDModel):
data = models.TextField()
# TODO: support the rest of these field names
# opts = models.TextField()
# initial = models.TextField()
# base_fields = models.TextField()
# m2m_fields = models.TextField()
# related_fields = models.TextField()
# errors = models.TextField()
class KeywordModelSerializer(ModelSerializer):
class Meta:
model = KeywordModel
publish_fields = MODEL_KEYWORDS
update_fields = MODEL_KEYWORDS
class DateModel(SDModel):
date = models.DateTimeField()
class DateModelSerializer(ModelSerializer):
class Meta:
model = DateModel
publish_fields = ('date')
update_fields = ('date')
class TextModelSerializer(ModelSerializer):
class Meta:
model = TextModel
publish_fields = ('text')
update_fields = ('text')
class TestModelSerializer(DragonTestCase):
def test_deserialize_model(self):
data = {'text': 'foo'}
serializer = TextModelSerializer(data)
model_instance = serializer.save()
self.assertEqual(model_instance.text, data['text'])
def test_passing_invalid_data(self):
foo = 'text'
with self.assertRaises(Exception):
TextModelSerializer(foo)
def test_ignore_non_model_fields(self):
data = {'text': 'foo', 'random_field': 'val'}
serializer = TextModelSerializer(data)
model_instance = serializer.deserialize()
self.assertEqual(model_instance.text, data['text'])
def test_deserialize_field(self):
date = datetime.now()
data = {'date': str(date)}
serializer = DateModelSerializer(data)
object = serializer.save()
self.assertEqual(object.date, date)
def test_deserialize_keyword_field(self):
data = dict(zip(MODEL_KEYWORDS, MODEL_KEYWORDS))
serializer = KeywordModelSerializer(data)
object = serializer.save()
for attr in MODEL_KEYWORDS:
self.assertEqual(getattr(object, attr), attr)
|
workalendar/usa/pennsylvania.py | taiyeoguns/workalendar | 405 | 12772026 | <gh_stars>100-1000
from ..registry_tools import iso_register
from .core import UnitedStates
@iso_register('US-PA')
class Pennsylvania(UnitedStates):
"""Pennsylvania"""
include_good_friday = True
include_thanksgiving_friday = True
include_election_day_every_year = True
|
llvm/utils/lit/tests/Inputs/shtest-not/fail.py | medismailben/llvm-project | 2,338 | 12772054 | #!/usr/bin/env python
import print_environment
import sys
print_environment.execute()
sys.exit(1)
|
caffe2/python/layers/sampling_train.py | KevinKecc/caffe2 | 585 | 12772057 | # Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package sampling_train
# Module caffe2.python.layers.sampling_train
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer, get_layer_class
from caffe2.python.layers.sampling_trainable_mixin import SamplingTrainableMixin
class SamplingTrain(ModelLayer):
def __init__(
self,
model,
input_record,
prediction_layer,
output_dims,
subtract_log_odd=True,
name='sampling_train',
**kwargs
):
super(SamplingTrain, self).__init__(
model, name, input_record, **kwargs
)
layer_class = get_layer_class(prediction_layer)
assert issubclass(layer_class, SamplingTrainableMixin)
assert 'indices' in input_record
assert isinstance(input_record.indices, schema.Scalar),\
"input_record.indices is expected to be a schema.Scalar"
assert 'input' in input_record
self.subtract_log_odd = subtract_log_odd
if self.subtract_log_odd:
assert 'sampling_prob' in input_record
self._prediction_layer = layer_class(
model,
input_record.input,
output_dims=output_dims,
**kwargs
)
self._prediction_layer.train_param_blobs = [
model.net.NextBlob(str(blob) + '_sampled')
for blob in self._prediction_layer.param_blobs
]
self.params = self._prediction_layer.params
self.output_schema = self._prediction_layer.output_schema
def add_ops(self, net):
self._prediction_layer.add_ops(net)
def add_train_ops(self, net):
for full_blob, sampled_blob in zip(
self._prediction_layer.param_blobs,
self._prediction_layer.train_param_blobs
):
net.Gather([full_blob, self.input_record.indices()], sampled_blob)
self._prediction_layer.add_train_ops(net)
if not self.subtract_log_odd:
return
log_q = net.Log(self.input_record.sampling_prob(),
net.NextScopedBlob("log_q"))
net.Sub([self.output_schema(), log_q], self.output_schema(),
broadcast=1, use_grad_hack=1)
|
cpmpy/fancy.py | tias/hakank | 279 | 12772065 | <reponame>tias/hakank
"""
Mr Greenguest puzzle (a.k.a fancy dress problem) in cpmpy.
Problem (and LPL) code in
http://diuflx71.unifr.ch/lpl/GetModel?name=/demo/demo2
'''
Mr. Greenfan wants to give a dress party where the male guests
must wear green dresses. The following rules are given:
1 If someone wears a green tie he has to wear a green shirt.
2 A guest may only wear green socks and a green shirt
if he wears a green tie or a green hat.
3 A guest wearing a green shirt or a green hat or who does
not wear green socks must wear a green tie.
4 A guest who is not dressed according to rules 1-3 must
pay a $11 entrance fee.
Mr Greenguest wants to participate but owns only a green shirt
(otherwise he would have to pay one for $9). He could buy
a green tie for $10, a green hat (used) for $2 and green socks
for $12.
What is the cheapest solution for Mr Greenguest to participate?
'''
Model created by <NAME>, <EMAIL>
See also my cpmpy page: http://www.hakank.org/cpmpy/
"""
import sys
import numpy as np
from cpmpy import *
from cpmpy.solvers import *
from cpmpy_hakank import *
def fancy():
# variables
# t: tie
# h: hat
# r: shirt
# s: socks
# n: entrance fee
t = boolvar(name="t")
h = boolvar(name="h")
r = boolvar(name="r")
s = boolvar(name="s")
n = boolvar(name="n")
cost = intvar(0,100,name="cost")
model = Model(minimize=cost)
# constraints
# This is a straight translation from the LPL code
# ( (t->r) \/ n)
model += [ t.implies(r) | n]
# ( ((s \/ r) -> (t \/ h)) \/ n )
model += [ ( (s | r).implies(t|h)) | n]
# ( ((r \/ h \/ not s) -> t) \/ n )
model += [(r | h | ~(s)).implies(t | n)]
model += [cost == 10*t + 2*h + 12*s + 11*n]
ss = CPM_ortools(model)
num_solutions = 0
if ss.solve():
num_solutions += 1
print("cost:",cost.value())
print("t:",t.value(),"h:",h.value(),"r:",r.value(),"s:",s.value(),"n:",n.value())
print("num_solutions:", num_solutions)
fancy()
|
judge/supplementary_gt.py | zwangab91/ctw-baseline | 333 | 12772076 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import matplotlib.pyplot as plt
import os
import plot_tools
import settings
from pythonapi import anno_tools
def plt_print_text(*args):
print('plot_tools.print_text', *args[:-1])
with plt.style.context({
'pdf.fonttype': 42,
}):
plot_tools.print_text(*args)
plt_print_text.concurrent = False
print_text = plt_print_text
def main():
with open(settings.DATA_LIST) as f:
data_list = json.load(f)
lines = []
with open(settings.TRAIN) as f:
lines += f.read().splitlines()
with open(settings.VAL) as f:
lines += f.read().splitlines()
with open(settings.TEST_DETECTION_GT) as f:
lines += f.read().splitlines()
def gt2array(gt):
color = '#0f0'
a = list()
for char in anno_tools.each_char(gt):
if char['is_chinese']:
a.append({'polygon': char['polygon'], 'text': char['text'], 'color': color, 'fontsize': 10})
for char in gt['ignore']:
a.append({'polygon': char['polygon'], 'text': '', 'color': '#ff0', 'fontsize': 10})
return a
selected = [
('0000507', 0, 0, 2048, 2048),
('1023899', 0, 0, 2048, 2048),
('1031755', 0, 0, 2048, 2048),
('1044721', 0, 0, 2048, 2048),
('1046905', 0, 0, 2048, 2048),
('2000215', 0, 0, 2048, 2048),
('2004154', 0, 0, 2048, 2048),
('2005679', 0, 0, 2048, 2048),
('2024003', 0, 0, 2048, 2048),
('3005669', 0, 0, 2048, 2048),
('3029319', 0, 0, 2048, 2048),
('3040629', 0, 0, 2048, 2048),
('3001838', 0, 650, 700, 550),
('1041797', 530, 740, 700, 550),
]
if not os.path.isdir(settings.PRINTTEXT_DRAWING_DIR):
os.makedirs(settings.PRINTTEXT_DRAWING_DIR)
tasks = []
for image_id, x, y, w, h in selected:
i = [o['image_id'] for o in data_list['train'] + data_list['val'] + data_list['test_det']].index(image_id)
gt = json.loads(lines[i])
crop = (x, y, w, h)
file_name = os.path.join(settings.TRAINVAL_IMAGE_DIR if i < len(data_list['train'] + data_list['val']) else settings.TEST_IMAGE_DIR, gt['file_name'])
output = os.path.join(settings.PRINTTEXT_DRAWING_DIR, 'gt_{}_{}_{}_{}_{}.pdf'.format(image_id, x, y, w, h))
print_text(file_name, output, {
'boxes': gt2array(gt),
'crop': crop,
})
if __name__ == '__main__':
main()
|
nuplan/planning/training/preprocessing/test/test_collate_dataloader.py | motional/nuplan-devkit | 128 | 12772103 | import unittest
import torch.utils.data
from nuplan.planning.scenario_builder.nuplan_db.test.nuplan_scenario_test_utils import get_test_nuplan_scenario
from nuplan.planning.simulation.trajectory.trajectory_sampling import TrajectorySampling
from nuplan.planning.training.data_loader.scenario_dataset import ScenarioDataset
from nuplan.planning.training.preprocessing.feature_builders.raster_feature_builder import RasterFeatureBuilder
from nuplan.planning.training.preprocessing.feature_builders.vector_map_feature_builder import VectorMapFeatureBuilder
from nuplan.planning.training.preprocessing.feature_collate import FeatureCollate
from nuplan.planning.training.preprocessing.feature_preprocessor import FeaturePreprocessor
from nuplan.planning.training.preprocessing.features.vector_map import VectorMap
from nuplan.planning.training.preprocessing.target_builders.ego_trajectory_target_builder import (
EgoTrajectoryTargetBuilder,
)
NUM_BATCHES = 20
class TestCollateDataLoader(unittest.TestCase):
"""
Tests data loading functionality
"""
def setUp(self) -> None:
"""Set up the test case."""
self.batch_size = 4
feature_preprocessor = FeaturePreprocessor(
cache_path=None,
feature_builders=[
RasterFeatureBuilder(
map_features={'LANE': 1.0, 'INTERSECTION': 1.0, 'STOP_LINE': 0.5, 'CROSSWALK': 0.5},
num_input_channels=4,
target_width=224,
target_height=224,
target_pixel_size=0.5,
ego_width=2.297,
ego_front_length=4.049,
ego_rear_length=1.127,
ego_longitudinal_offset=0.0,
baseline_path_thickness=1,
),
VectorMapFeatureBuilder(radius=20),
],
target_builders=[EgoTrajectoryTargetBuilder(TrajectorySampling(time_horizon=6.0, num_poses=12))],
force_feature_computation=False,
)
# Keep only a few scenarios instead of testing the whole extraction
scenario = get_test_nuplan_scenario()
scenarios = [scenario] * 3
dataset = ScenarioDataset(scenarios=scenarios, feature_preprocessor=feature_preprocessor)
self.dataloader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=self.batch_size,
num_workers=2,
pin_memory=False,
drop_last=True,
collate_fn=FeatureCollate(),
)
def test_dataloader(self) -> None:
"""
Tests that the training dataloader can be iterated without errors
"""
dataloader = self.dataloader
dataloader_iter = iter(dataloader)
iterations = min(len(dataloader), NUM_BATCHES)
for _ in range(iterations):
features, targets = next(dataloader_iter)
self.assertTrue("vector_map" in features.keys())
vector_map: VectorMap = features["vector_map"]
self.assertEqual(vector_map.num_of_batches, self.batch_size)
self.assertEqual(len(vector_map.coords), self.batch_size)
self.assertEqual(len(vector_map.multi_scale_connections), self.batch_size)
if __name__ == '__main__':
unittest.main()
|
pypy/tool/pytest/app_rewrite.py | nanjekyejoannah/pypy | 333 | 12772105 | import re
ASCII_IS_DEFAULT_ENCODING = False
cookie_re = re.compile(r"^[ \t\f]*#.*coding[:=][ \t]*[-\w.]+")
BOM_UTF8 = '\xef\xbb\xbf'
def _prepare_source(fn):
"""Read the source code for re-writing."""
try:
stat = fn.stat()
source = fn.read("rb")
except EnvironmentError:
return None, None
if ASCII_IS_DEFAULT_ENCODING:
# ASCII is the default encoding in Python 2. Without a coding
# declaration, Python 2 will complain about any bytes in the file
# outside the ASCII range. Sadly, this behavior does not extend to
# compile() or ast.parse(), which prefer to interpret the bytes as
# latin-1. (At least they properly handle explicit coding cookies.) To
# preserve this error behavior, we could force ast.parse() to use ASCII
# as the encoding by inserting a coding cookie. Unfortunately, that
# messes up line numbers. Thus, we have to check ourselves if anything
# is outside the ASCII range in the case no encoding is explicitly
# declared. For more context, see issue #269. Yay for Python 3 which
# gets this right.
end1 = source.find("\n")
end2 = source.find("\n", end1 + 1)
if (not source.startswith(BOM_UTF8) and
cookie_re.match(source[0:end1]) is None and
cookie_re.match(source[end1 + 1:end2]) is None):
try:
source.decode("ascii")
except UnicodeDecodeError:
# Let it fail in real import.
return None, None
# On Python versions which are not 2.7 and less than or equal to 3.1, the
# parser expects *nix newlines.
return stat, source
|
challenges/8.3.Function_Documentation_Strings/lesson_tests.py | pradeepsaiu/python-coding-challenges | 141 | 12772125 | import unittest
from main import *
class FunctionDocumentationStringsTests(unittest.TestCase):
def test_main(self):
self.assertIsNone(docstring_function())
self.assertIsNotNone(docstring_function.__doc__)
self.assertIsInstance(docstring_function.__doc__, str)
|
tables/table-alter/parse-turk-info.py | yash-srivastava19/sempre | 812 | 12772129 | <filename>tables/table-alter/parse-turk-info.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os, shutil, re, argparse, json
from codecs import open
from itertools import izip
from collections import defaultdict
def main():
parser = argparse.ArgumentParser()
parser.add_argument('infile')
parser.add_argument('-p', '--plot', action='store_true',
help='Plot number of classes vs number of denotations')
group = parser.add_mutually_exclusive_group()
group.add_argument('-s', '--summarize', action='store_true',
help='Summarize the number of classes')
group.add_argument('-d', '--dump', action='store_true',
help='Dump the list of examples with at least one agreed classes')
group.add_argument('-D', '--dataset-file',
help='Take a dataset file and print a filtered list of only examples'
' with at least one agreed classes')
args = parser.parse_args()
data = []
with open(args.infile) as fin:
print >> sys.stderr, 'Reading from', args.infile
header = fin.readline().rstrip('\n').split('\t')
for line in fin:
data.append(dict(zip(header, line.rstrip('\n').split('\t'))))
print >> sys.stderr, 'Read', len(data), 'records.'
# ['id', 'numDerivs', 'allTurkedTables', 'agreedTurkedTables',
# 'origTableTarget', 'origTableTurkedTarget', 'origTableFlag',
# 'numClassesMatched', 'numDerivsMatched']
# Classify examples
no_derivs = []
orig_table_mismatch = []
no_classes_matched = []
classes_matched = defaultdict(list)
plt_num_classes, plt_num_derivs = [], []
for record in data:
if record['numDerivs'] == '0':
no_derivs.append(record)
assert record['numDerivsMatched'] == '0'
continue
if record['origTableFlag'] == 'mismatched':
orig_table_mismatch.append(record)
#assert record['numDerivsMatched'] == '0'
continue
if record['numClassesMatched'] == '0':
no_classes_matched.append(record)
assert record['numDerivsMatched'] == '0'
continue
assert record['numDerivsMatched'] != '0'
num_classes = int(record['numClassesMatched'])
plt_num_classes.append(num_classes)
plt_num_derivs.append(int(record['numDerivsMatched']))
if num_classes < 10:
classes_matched[num_classes].append(record)
else:
classes_matched['> 10'].append(record)
if args.summarize:
print 'No derivs:', len(no_derivs)
print 'Original table mismatched:', len(orig_table_mismatch)
print 'No classes matched:', len(no_classes_matched)
print 'Classes matched:'
total = 0
for key in sorted(classes_matched):
num_matches = len(classes_matched[key])
total += num_matches
print ' {}: {} (cum = {})'.format(key, num_matches, total)
if args.plot:
import matplotlib.pyplot as plt
plt.scatter(plt_num_classes, plt_num_derivs)
plt.show()
if args.dump:
for key in sorted(classes_matched):
for x in classes_matched[key]:
print x['id']
if args.dataset_file:
indices = set(int(x['id'].replace('nt-', ''))
for y in classes_matched.values() for x in y)
count_all, count_filtered = 0, 0
with open(args.dataset_file, 'r', 'utf8') as fin:
for i, line in enumerate(fin):
count_all += 1
if i in indices:
print line.rstrip('\n')
count_filtered += 1
print >> sys.stderr, 'Printed {} / {} lines'.format(count_filtered, count_all)
if __name__ == '__main__':
main()
|
simple/game_loop_global.py | loyalgarlic/snakepit-game | 124 | 12772131 | import asyncio
from aiohttp import web
async def handle(request):
index = open("index.html", 'rb')
content = index.read()
return web.Response(body=content, content_type='text/html')
async def wshandler(request):
app = request.app
ws = web.WebSocketResponse()
await ws.prepare(request)
if app["game_loop"] is None or \
app["game_loop"].cancelled():
app["game_loop"] = asyncio.ensure_future(game_loop(app))
# this is required to propagate exceptions
app["game_loop"].add_done_callback(lambda t: t.result()
if not t.cancelled() else None)
app["sockets"].append(ws)
while 1:
msg = await ws.receive()
if msg.tp == web.MsgType.text:
ws.send_str("Pressed key code: {}".format(msg.data))
print("Got message %s" % msg.data)
elif msg.tp == web.MsgType.close or\
msg.tp == web.MsgType.error:
break
app["sockets"].remove(ws)
if len(app["sockets"]) == 0:
print("Stopping game loop")
app["game_loop"].cancel()
print("Closed connection")
return ws
async def game_loop(app):
print("Game loop started")
while 1:
for ws in app["sockets"]:
ws.send_str("game loop passed")
await asyncio.sleep(2)
app = web.Application()
app["sockets"] = []
app["game_loop"] = None
app.router.add_route('GET', '/connect', wshandler)
app.router.add_route('GET', '/', handle)
web.run_app(app)
|
protonfixes/gamefixes/280200.py | Citiroller/protonfixes | 213 | 12772140 | <reponame>Citiroller/protonfixes<filename>protonfixes/gamefixes/280200.py
""" Game fix for Eterium
"""
#pylint: disable=C0103
from protonfixes import util
def main():
""" Install xna40
"""
util.protontricks('xna40')
|
env/WritePolicyEnv.py | jeanqasaur/jeeves | 253 | 12772172 | <reponame>jeanqasaur/jeeves
import JeevesLib
# import fast.AST
# from collections import defaultdict
class WritePolicyEnv:
def __init__(self):
self.writers = {}
def mapPrimaryContext(self, ivar, ctxt):
self.writers[ivar] = ctxt
# This function associates a new set of write policies with a label.
def addWritePolicy(self, label, policy, newWriter):
# If the label is associated with a writer, then associate it with the
# new write policies.
if self.writers.has_key(label):
ictxt = self.writers[label]
# Make a new label mapped to the same writer.
newLabel = JeevesLib.mkLabel(label.name)
self.mapPrimaryContext(newLabel, ictxt)
# Associate the new policies with this new label.
JeevesLib.restrict(newLabel
, lambda oc:
JeevesLib.jand(lambda: label
, lambda: JeevesLib.jand(
lambda: policy(ictxt)(oc)
, lambda: policy(newWriter)(oc))))
return newLabel
# Otherwise return the label as is.
else:
return label
|
tests.live/Python/test_live.py | Bradben/iqsharp | 115 | 12772183 | #!/bin/env python
# -*- coding: utf-8 -*-
##
# test_live.py: Tests Azure Quantum functionality Live.
##
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
##
## IMPORTS ##
import pytest
import warnings
## TESTS ##
def connect():
import qsharp.azure
return qsharp.azure.connect(
credential="environment"
)
def has_completed(job) -> bool:
"""Check if the job has completed."""
return (
job.status == "Succeeded"
or job.status == "Failed"
or job.status == "Cancelled"
)
def wait_until_completed(job):
import time
import qsharp.azure
max_poll_wait_secs = 5
timeout_secs = 30
poll_wait = 0.2
total_time = 0.
while not has_completed(job):
if total_time >= timeout_secs:
raise TimeoutError(f"The wait time has exceeded {timeout_secs} seconds.")
time.sleep(poll_wait)
total_time += poll_wait
job = qsharp.azure.status(job.id)
poll_wait = (
max_poll_wait_secs
if poll_wait >= max_poll_wait_secs
else poll_wait * 1.5
)
def test_ionq_targets():
"""
Tests that we can fetch targets from the service,
and that the workspace includes the targets we need for submission
"""
targets = connect()
assert len(targets) > 2
target_ids = [t.id for t in targets]
assert 'ionq.simulator' in target_ids
assert 'ionq.qpu' in target_ids
def test_ionq_submit():
"""
Test that the SampleQrng operation can be submitted successfully on the ionq.simulator
"""
import time
import qsharp
from Microsoft.Quantum.Tests import SampleQrng
# Make sure we can simulate locally:
count = 3
result = SampleQrng.simulate(count=count, name='andres')
assert len(result) == count
import qsharp.azure
connect()
t = qsharp.azure.target("ionq.simulator")
assert isinstance(t, qsharp.azure.AzureTarget)
assert t.id == "ionq.simulator"
job = qsharp.azure.submit(SampleQrng, count=count, name="andres")
assert isinstance(job, qsharp.azure.AzureJob)
assert not job.id == ''
print("Submitted job: ", job.id)
try:
wait_until_completed(job)
except TimeoutError:
warnings.warn("IonQ execution exceeded timeout. Skipping fetching results.")
else:
job = qsharp.azure.status()
assert isinstance(job, qsharp.azure.AzureJob)
assert job.status == "Succeeded"
histogram = {
'[0,0,0]': 0.125,
'[0,0,1]': 0.125,
'[0,1,0]': 0.125,
'[0,1,1]': 0.125,
'[1,0,0]': 0.125,
'[1,0,1]': 0.125,
'[1,1,0]': 0.125,
'[1,1,1]': 0.125
}
retrieved_histogram = qsharp.azure.output()
assert isinstance(retrieved_histogram, dict)
assert histogram == retrieved_histogram
def test_honeywell_targets():
"""
Tests that we can fetch targets from the service,
and that the workspace includes the targets we need for submission
"""
targets = connect()
assert len(targets) > 2
target_ids = [t.id for t in targets]
assert 'honeywell.hqs-lt-s1' in target_ids
assert 'honeywell.hqs-lt-s1-apival' in target_ids
def test_honeywell_submit():
"""
Test that the RunTeleport operation can be submitted successfully on the honeywell apival target
"""
import qsharp
from Microsoft.Quantum.Tests import RunTeleport
# Make sure we can simulate locally:
expected = True
result = RunTeleport.simulate(doPlus=expected)
assert result == 0 if expected else 1
import qsharp.azure
connect()
t = qsharp.azure.target("honeywell.hqs-lt-s1-apival")
assert isinstance(t, qsharp.azure.AzureTarget)
assert t.id == "honeywell.hqs-lt-s1-apival"
job = qsharp.azure.submit(RunTeleport, doPlus=expected)
assert isinstance(job, qsharp.azure.AzureJob)
assert not job.id == ''
print("Submitted job: ", job.id)
try:
wait_until_completed(job)
except TimeoutError:
warnings.warn("Honeywell execution exceeded timeout. Skipping fetching results.")
else:
job = qsharp.azure.status()
assert isinstance(job, qsharp.azure.AzureJob)
if job.status == "Succeeded":
retrieved_histogram = qsharp.azure.output()
assert isinstance(retrieved_histogram, dict)
assert '0' in retrieved_histogram
|
tools/ffmpeg/playmany.py | AnantTiwari-Naman/pyglet | 1,160 | 12772200 | <filename>tools/ffmpeg/playmany.py<gh_stars>1000+
"""
Usage
playmany.py
Uses media_player to play a sequence of samples and record debug info
A configuration must be active, see command configure.py
If the active configuration has disallowed dbg overwrites it will do nothing.
If a playlist was provided at session creation, then only the samples in the
playlist will be played, otherwise all files in samples_dir.
"""
import os
import subprocess
import sys
import fs
import mpexceptions
def main():
try:
pathserv = fs.get_path_info_for_active_session()
except mpexceptions.ExceptionUndefinedSamplesDir:
print("The env var 'pyglet_mp_samples_dir' is not defined.")
return 1
except mpexceptions.ExceptionNoSessionIsActive:
print("*** Error, no session active.")
return 1
try:
play_many(pathserv, timeout=120)
except mpexceptions.ExceptionAttemptToBreakRawDataProtection:
print("*** Error, attempt to overwrite raw data when protect_raw_data is True.")
return 1
return 0
def play_many(pathserv, timeout=120):
"""plays the samples in the session playlist for the current active session
timeout: max time allowed to play a sample, default is 120 seconds
"""
conf = fs.get_session_configuration(pathserv)
if conf["dev_debug"]:
pass
else:
if conf["protect_raw_data"]:
raise mpexceptions.ExceptionAttemptToBreakRawDataProtection()
playlist_gen = pathserv.session_playlist_generator()
core_play_many(pathserv, playlist_gen, timeout=timeout)
def core_play_many(pathserv, playlist_gen, timeout=120):
for sample, filename in playlist_gen:
dbg_file = pathserv.dbg_filename(sample)
print("playmany playing:", filename)
cmdline = [os.path.join(fs.get_media_player_path(), "media_player.py"),
"--debug",
"--outfile=" + dbg_file,
filename]
killed, returncode = cmd__py3(cmdline, timeout=timeout)
if killed:
print("WARNING: killed by timeout, file: %s" % filename)
def cmd__py3(cmdline, bufsize=-1, cwd=None, timeout=60):
"""runs a .py script as a subprocess with the same python as the caller
cmdline: list [<scriptname>, arg1, ...]
timeout: time in seconds; subprocess wil be killed if it is still running
at that time.
"""
# use the same python as the caller to run the script
cmdline.insert(0, "-u")
cmdline.insert(0, sys.executable)
p = subprocess.Popen(
cmdline,
bufsize = bufsize,
shell = False,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
cwd = cwd
)
killed = True
try:
out, err = p.communicate(timeout=timeout)
killed = False
except subprocess.TimeoutExpired:
p.kill()
out, err = p.communicate()
## print("out:", out)
## print("err:", err)
returncode = p.returncode
return killed, returncode
def sysargs_to_mainargs():
"""builds main args from sys.argv"""
if len(sys.argv) > 1 and sys.argv[1].startswith("--help"):
print(__doc__)
sys.exit(1)
if __name__ == "__main__":
sysargs_to_mainargs()
main()
|
core/utils/render_utils.py | hyunynim/DIST-Renderer | 176 | 12772220 | import trimesh
import numpy as np
import cv2
import copy
import pickle
import torch
import pdb
def depth2normal(depth, f_pix_x, f_pix_y=None):
'''
To compute a normal map from the depth map
Input:
- depth: torch.Tensor (H, W)
- f_pix_x: K[0, 0]
- f_pix_y: K[1, 1]
Return:
- normal: torch.Tensor (H, W, 3)
'''
if f_pix_y is None:
f_pix_y = f_pix_x
h, w = depth.shape
eps = 1e-12
bg_flag = (depth > 1e5) | (depth == 0)
depth[bg_flag] = 0.0
depth_left, depth_right, depth_up, depth_down = torch.zeros(h, w), torch.zeros(h, w), torch.zeros(h, w), torch.zeros(h, w)
if depth.get_device() != -1:
device_id = depth.get_device()
depth_left, depth_right, depth_up, depth_down = depth_left.to(device_id), depth_right.to(device_id), depth_up.to(device_id), depth_down.to(device_id)
depth_left[:, 1:w-1] = depth[:, :w-2].clone()
depth_right[:, 1:w-1] = depth[:, 2:].clone()
depth_up[1:h-1, :] = depth[:h-2, :].clone()
depth_down[1:h-1, :] = depth[2:, :].clone()
dzdx = (depth_right - depth_left) * f_pix_x / 2.0
dzdy = (depth_down - depth_up) * f_pix_y / 2.0
normal = torch.stack([dzdx, dzdy, -torch.ones_like(dzdx)]).permute(1, 2, 0)
normal_length = torch.norm(normal, p=2, dim=2)
normal = normal / (normal_length + 1e-12)[:,:,None]
normal[bg_flag] = 0.0
return normal
def quad2rotation(quad):
'''
input: torch.Tensor (4)
'''
bs = quad.shape[0]
qr, qi, qj, qk = quad[:,0], quad[:,1], quad[:,2], quad[:,3]
rot_mat = torch.zeros(bs, 3, 3).to(quad.get_device())
rot_mat[:,0,0] = 1 - 2 * (qj ** 2 + qk ** 2)
rot_mat[:,0,1] = 2 * (qi * qj - qk * qr)
rot_mat[:,0,2] = 2 * (qi * qk + qj * qr)
rot_mat[:,1,0] = 2 * (qi * qj + qk * qr)
rot_mat[:,1,1] = 1 - 2 * (qi ** 2 + qk ** 2)
rot_mat[:,1,2] = 2 * (qj * qk - qi * qr)
rot_mat[:,2,0] = 2 * (qi * qk - qj * qr)
rot_mat[:,2,1] = 2 * (qj * qk + qi * qr)
rot_mat[:,2,2] = 1 - 2 * (qi ** 2 + qj ** 2)
return rot_mat
def get_camera_from_tensor(inputs):
N = len(inputs.shape)
if N == 1:
inputs = inputs.unsqueeze(0)
quad, T = inputs[:,:4], inputs[:,4:]
R = quad2rotation(quad)
RT = torch.cat([R, T[:,:,None]], 2)
if N == 1:
RT = RT[0]
return RT
def get_tensor_from_camera(RT):
gpu_id = -1
if type(RT) == torch.Tensor:
if RT.get_device() != -1:
RT = RT.detach().cpu()
gpu_id = RT.get_device()
RT = RT.numpy()
from mathutils import Matrix
R, T = RT[:,:3], RT[:,3]
rot = Matrix(R)
quad = rot.to_quaternion()
tensor = np.concatenate([quad, T], 0)
tensor = torch.from_numpy(tensor).float()
if gpu_id != -1:
tensor = tensor.to(gpu_id)
return tensor
def downsize_camera_intrinsic(intrinsic, factor):
'''
Input:
- intrinsic type: np.array (3,3)
- factor int
'''
img_h, img_w = int(2 * intrinsic[1,2]), int(2 * intrinsic[0,2])
img_h_new, img_w_new = img_h / factor, img_w / factor
if (img_h_new - round(img_h_new)) > 1e-12 or (img_w_new - round(img_w_new)) > 1e-12:
raise ValueError('The image size {0} should be divisible by the factor {1}.'.format((img_h, img_w), factor))
intrinsic_new = copy.deepcopy(intrinsic)
intrinsic_new[0,:] = intrinsic[0,:] / factor
intrinsic_new[1,:] = intrinsic[1,:] / factor
return intrinsic_new
def sample_points_from_mesh(mesh, N=30000):
'''
Return:
-- points: np.array (N, 3)
'''
points = trimesh.sample.sample_surface(mesh, N)[0]
return points
def transform_point_cloud(points):
'''
solve the mismatch between the point cloud coordinate and the mesh obj.
'''
points_new = copy.deepcopy(points)
points_new[:,1] = -points[:,2]
points_new[:,2] = points[:,1]
return points_new
def read_pickle(fname):
with open(fname, 'rb') as f:
data = pickle.load(f, encoding='latin1')
return data
def save_render_output(render_output, fname):
depth_rendered, normal_rendered, valid_mask_rendered, _ = render_output
output = {}
output['depth'] = depth_rendered.detach().cpu().numpy()
output['normal'] = normal_rendered.detach().cpu().numpy()
output['valid_mask'] = valid_mask_rendered.detach().cpu().numpy()
save_pkl(output, fname)
def save_pkl(data, fname):
with open(fname, 'wb') as f:
pickle.dump(data, f)
|
packages/core/minos-microservice-aggregate/minos/aggregate/transactions/repositories/abc.py | minos-framework/minos-python | 247 | 12772229 | <gh_stars>100-1000
from __future__ import (
annotations,
)
from abc import (
ABC,
abstractmethod,
)
from datetime import (
datetime,
)
from typing import (
AsyncIterator,
Optional,
)
from uuid import (
UUID,
)
from minos.common import (
Inject,
Injectable,
Lock,
LockPool,
NotProvidedException,
PoolFactory,
SetupMixin,
)
from ...exceptions import (
TransactionNotFoundException,
)
from ..entries import (
TransactionEntry,
TransactionStatus,
)
@Injectable("transaction_repository")
class TransactionRepository(ABC, SetupMixin):
"""Transaction Repository base class."""
@Inject()
def __init__(
self, lock_pool: Optional[LockPool] = None, pool_factory: Optional[PoolFactory] = None, *args, **kwargs
):
super().__init__(*args, **kwargs)
if lock_pool is None and pool_factory is not None:
lock_pool = pool_factory.get_pool("lock")
if lock_pool is None:
raise NotProvidedException("A lock pool instance is required.")
self._lock_pool = lock_pool
async def submit(self, transaction: TransactionEntry) -> TransactionEntry:
"""Submit a new or updated transaction to store it on the repository.
:param transaction: The transaction to be stored.
:return: This method does not return anything.
"""
return await self._submit(transaction)
@abstractmethod
async def _submit(self, transaction: TransactionEntry) -> TransactionEntry:
raise NotImplementedError
# noinspection PyUnusedLocal
async def get(self, uuid: UUID, **kwargs) -> TransactionEntry:
"""Get a ``TransactionEntry`` from its identifier.
:param uuid: Identifier of the ``RootEntity``.
:param kwargs: Additional named arguments.
:return: The ``TransactionEntry`` instance.
"""
try:
return await self.select(uuid=uuid).__anext__()
except StopAsyncIteration:
raise TransactionNotFoundException(f"Transaction identified by {uuid!r} does not exist.")
async def select(
self,
uuid: Optional[UUID] = None,
uuid_ne: Optional[UUID] = None,
uuid_in: Optional[tuple[UUID, ...]] = None,
destination_uuid: Optional[UUID] = None,
status: Optional[TransactionStatus] = None,
status_in: Optional[tuple[str, ...]] = None,
event_offset: Optional[int] = None,
event_offset_lt: Optional[int] = None,
event_offset_gt: Optional[int] = None,
event_offset_le: Optional[int] = None,
event_offset_ge: Optional[int] = None,
updated_at: Optional[datetime] = None,
updated_at_lt: Optional[datetime] = None,
updated_at_gt: Optional[datetime] = None,
updated_at_le: Optional[datetime] = None,
updated_at_ge: Optional[datetime] = None,
**kwargs,
) -> AsyncIterator[TransactionEntry]:
"""Get a transaction from the repository.
:param uuid: Transaction identifier equal to the given value.
:param uuid_ne: Transaction identifier not equal to the given value
:param uuid_in: Transaction identifier within the given values.
:param destination_uuid: Destination Transaction identifier equal to the given value.
:param status: Transaction status equal to the given value.
:param status_in: Transaction status within the given values
:param event_offset: Event offset equal to the given value.
:param event_offset_lt: Event Offset lower than the given value
:param event_offset_gt: Event Offset greater than the given value
:param event_offset_le: Event Offset lower or equal to the given value
:param event_offset_ge: Event Offset greater or equal to the given value
:param updated_at: Updated at equal to the given value.
:param updated_at_lt: Updated at lower than the given value.
:param updated_at_gt: Updated at greater than the given value.
:param updated_at_le: Updated at lower or equal to the given value.
:param updated_at_ge: Updated at greater or equal to the given value.
:param kwargs: Additional named arguments.
:return: An asynchronous iterator.
"""
generator = self._select(
uuid=uuid,
uuid_ne=uuid_ne,
uuid_in=uuid_in,
destination_uuid=destination_uuid,
status=status,
status_in=status_in,
event_offset=event_offset,
event_offset_lt=event_offset_lt,
event_offset_gt=event_offset_gt,
event_offset_le=event_offset_le,
event_offset_ge=event_offset_ge,
updated_at=updated_at,
updated_at_lt=updated_at_lt,
updated_at_gt=updated_at_gt,
updated_at_le=updated_at_le,
updated_at_ge=updated_at_ge,
**kwargs,
)
# noinspection PyTypeChecker
async for entry in generator:
yield entry
@abstractmethod
async def _select(self, **kwargs) -> AsyncIterator[TransactionEntry]:
raise NotImplementedError
def write_lock(self) -> Lock:
"""Get write lock.
:return: An asynchronous context manager.
"""
return self._lock_pool.acquire("aggregate_transaction_write_lock")
|
pkg/package.py | bruce30262/idapkg | 125 | 12772256 | <reponame>bruce30262/idapkg
"""
Package-related classes and methods are in pkg.package module. All constructing arguments are accessible via property.
"""
import ctypes
import glob
import json
import os
import random
import runpy
import shutil
import sys
import traceback
import zipfile
import ida_kernwin
import ida_loader
import ida_diskio
from .config import g
from .env import ea as current_ea, os as current_os
from .internal_api import invalidate_proccache, get_extlangs, idausr_remove, idausr_add
from .logger import getLogger
from .vendor.semantic_version import Version, Spec
from .virtualenv_utils import FixInterpreter
__all__ = ["LocalPackage", "InstallablePackage"]
log = getLogger(__name__)
def rename(old, new):
if sys.platform == 'win32':
if not ctypes.windll.kernel32.MoveFileExA(str(old), str(new), 0):
raise WindowsError(ctypes.windll.kernel32.GetLastError())
else:
return os.rename(old, new)
def _get_native_suffix():
if current_os == 'win':
suffix = '.dll'
elif current_os == 'linux':
suffix = '.so'
elif current_os == 'mac':
suffix = '.dylib'
else:
raise Exception("unknown os: %r" % current_os)
return suffix
class LocalPackage(object):
def __init__(self, id, path, version):
self.id = str(id)
self.version = str(version)
self.path = os.path.normpath(path)
def remove(self):
"""
Removes a package.
"""
idausr_remove(self.path)
with FixInterpreter():
for script in self.info().get('uninstallers', []):
script = os.path.join(self.path, script)
try:
runpy.run_path(script)
except Exception:
# XXX: How can I rollback this?
traceback.print_exc()
log.warn(
"Uninstallation script %r exited with exception!", script)
if not LocalPackage._remove_package_dir(self.path):
log.error(
"Package directory is in use and will be removed after restart.")
# If not modified, the only case this fails is, custom ld.so or windows.
# Latter case is common.
new_path = self.path.rstrip('/\\') + '-removed'
if os.path.exists(new_path):
new_path += '-%x' % random.getrandbits(64)
rename(self.path, new_path)
# XXX: is it good to mutate this object?
self.path = new_path
log.info("Done!")
def install(self, remove_on_fail=False):
"""
Run python scripts specified by :code:`installers` field in `info.json`.
:returns: None
"""
orig_cwd = os.getcwd()
try:
os.chdir(self.path)
info = self.info()
scripts = info.get('installers', [])
if not isinstance(scripts, list):
raise Exception(
'%r: Corrupted package: installers key is not list' % self.id)
with FixInterpreter():
for script in scripts:
log.info('Executing installer path %r...', script)
script = os.path.join(self.path, script)
runpy.run_path(script)
except Exception:
log.info('Installer failed!')
if remove_on_fail:
self.remove()
raise
finally:
os.chdir(orig_cwd)
def load(self, force=False):
"""
Actually does :code:`ida_loaders.load_plugin(paths)`, and updates IDAUSR variable.
"""
if not force and self.path in ida_diskio.get_ida_subdirs(''):
# Already loaded, just update sys.path for python imports
if self.path not in sys.path:
sys.path.append(self.path)
return
# XXX: find a more efficient way to ensure dependencies
errors = []
for dependency in self.info().get('dependencies', {}).keys():
dep = LocalPackage.by_name(dependency)
if not dep:
errors.append('Dependency not found: %r' % dependency)
continue
dep.load()
if errors:
for error in errors:
log.error(error)
return
def handler():
# Load plugins immediately
# processors / loaders will be loaded on demand
if self.path not in sys.path:
sys.path.append(self.path)
# Update IDAUSR variable
idausr_add(self.path)
# Immediately load compatible plugins
self._find_loadable_modules('plugins', ida_loader.load_plugin)
# Find loadable processor modules, and if exists, invalidate cached process list (proccache).
invalidates = []
self._find_loadable_modules('procs', invalidates.append)
if invalidates:
invalidate_proccache()
# Run in main thread
ida_kernwin.execute_sync(handler, ida_kernwin.MFF_FAST)
def populate_env(self):
"""
A passive version of load; it only populates IDAUSR variable.
It's called at :code:`idapythonrc.py`.
"""
errors = []
for dependency in self.info().get('dependencies', {}).keys():
dep = LocalPackage.by_name(dependency)
if not dep:
errors.append('Dependency not found: %r' % dependency)
continue
dep.populate_env()
if errors:
for error in errors:
log.error(error)
return
idausr_add(self.path)
if self.path not in sys.path:
sys.path.append(self.path)
def plugins(self):
return self._collect_modules('plugins')
def loaders(self):
return self._collect_modules('loaders')
def procs(self):
return self._collect_modules('procs')
def _collect_modules(self, category):
result = []
self._find_loadable_modules(category, result.append)
return result
def _find_loadable_modules(self, subdir, callback):
# Load modules in external languages (.py, .idc, ...)
for suffix in ['.' + x.fileext for x in get_extlangs()]:
expr = os.path.join(self.path, subdir, '*' + suffix)
for path in glob.glob(expr):
callback(str(path))
# Load native modules
for suffix in (_get_native_suffix(),):
expr = os.path.join(self.path, subdir, '*' + suffix)
for path in glob.glob(expr):
is64 = path[:-len(suffix)][-2:] == '64'
if is64 == (current_ea == 64):
callback(str(path))
def info(self):
"""
Loads :code:`info.json` and returns a parsed JSON object.
:rtype: dict
"""
with open(os.path.join(self.path, 'info.json'), 'rb') as _file:
return json.load(_file)
@staticmethod
def by_name(name, prefix=None):
"""
Returns a package with specified `name`.
:rtype: LocalPackage
"""
if prefix is None:
prefix = g['path']['packages']
path = os.path.join(prefix, name)
# check if the folder exists
if not os.path.isdir(path):
return None
# filter removed package
removed = os.path.join(path, '.removed')
if os.path.isfile(removed):
LocalPackage._remove_package_dir(path)
return None
info_json = os.path.join(path, 'info.json')
if not os.path.isfile(info_json):
log.warn('Warning: info.json is not found at %r', path)
return None
with open(info_json, 'rb') as _file:
try:
info = json.load(_file)
except Exception:
traceback.print_exc()
log.warn('Warning: info.json is not valid at %r', path)
return None
result = LocalPackage(
id=info['_id'], path=path, version=info['version'])
return result
@staticmethod
def all(disabled=False):
"""
List all packages installed at :code:`g['path']['packages']`.
:rtype: list(LocalPackage)
"""
prefix = g['path']['packages']
res = os.listdir(prefix)
res = (x for x in res if os.path.isdir(os.path.join(prefix, x)))
res = (LocalPackage.by_name(x) for x in res)
res = (x for x in res if x)
res = [x for x in res if (x.id in g['ignored_packages']) == disabled]
return res
@staticmethod
def _remove_package_dir(path):
errors = []
def onerror(_listdir, _path, exc_info):
log.error("%s: %s", _path, str(exc_info[1]))
errors.append(exc_info[1])
shutil.rmtree(path, onerror=onerror)
if errors:
# Mark for later removal
open(os.path.join(path, '.removed'), 'wb').close()
return not errors
def __repr__(self):
return '<LocalPackage id=%r path=%r version=%r>' % \
(self.id, self.path, self.version)
class InstallablePackage(object):
def __init__(self, id, name, version, description, author, repo):
self.id = str(id)
self.name = name
self.version = str(version)
self.description = description
self.repo = repo
self.author = author
def install(self, upgrade=False):
"""
Just calls :code:`InstallablePackage.install_from_repo(self.repo, self.id, upgrade)`.
"""
install_from_repo(self.repo, self.id, allow_upgrade=upgrade)
def __repr__(self):
return '<InstallablePackage id=%r version=%r repo=%r>' % \
(self.id, self.version, self.repo)
def install_from_repo(repo, name, version_spec='*', allow_upgrade=False, _visited=None):
"""
This method downloads a package satisfying spec.
.. note ::
The function waits until all of dependencies are installed.
Run it as separate thread if possible.
"""
top_level = _visited is None
_visited = _visited or {}
if name in _visited:
log.warn("Cyclic dependency found when installing %r <-> %r",
name, _visited)
return
prev = LocalPackage.by_name(name)
_version_spec = Spec(version_spec)
satisfies_local = prev and Version(prev.version) in _version_spec
if allow_upgrade or not satisfies_local:
log.debug("Fetching releases for %r from %r...", name, repo)
releases = repo.releases(name)
if not releases:
error = "Release not found on remote repository: %r on %r (error: %r)" % (
name, repo, releases['error'])
raise Exception(error)
releases = [release for release in releases
if Version(release['version']) in _version_spec]
if not releases:
error = "Release satisfying the condition %r %r not found on remote repository %r" % (
name, version_spec, repo)
raise Exception(error)
downloading = None if (
prev and releases[-1]['version'] == prev.version) else releases[-1]['version']
else:
downloading = None
if downloading:
log.info('Collecting %s...', name)
data = repo.download(name, downloading)
f = zipfile.ZipFile(data, 'r')
# No /: topmost files
# One /: topmost folders
topmost_files = [path for path in f.namelist() if path.count('/') == 0]
# From ZipInfo.is_dir() in Python 3.x
topmost_folders = [path for path in f.namelist() if path.endswith('/')]
common_prefix = topmost_folders[0] if len(topmost_files) == 0 and len(topmost_folders) == 1 else ""
info = json.load(f.open(common_prefix + 'info.json'))
packages_path = g['path']['packages']
install_path = os.path.join(packages_path, info["_id"])
# this ensures os.path.exists(install_path) == False
# TODO: should we unload a already-loaded plugin?
if prev:
prev.remove()
assert not os.path.exists(install_path)
# XXX: edge case?
removed = os.path.join(install_path, '.removed')
if os.path.isfile(removed):
os.unlink(removed)
log.info('Extracting into %r...', install_path)
if common_prefix:
f.extractall(packages_path)
os.rename(os.path.join(packages_path, common_prefix), install_path)
else:
f.extractall(install_path)
# Initiate LocalPackage object
pkg = LocalPackage(info['_id'], install_path, info['version'])
else:
pkg = prev
log.info("Requirement already satisfied: %s%s",
name, '' if version_spec == '*' else version_spec)
restart_required = pkg.info().get('restart_required', False)
_visited[name] = (pkg.version, restart_required)
# First, install dependencies
# TODO: add version check
for dep_name, dep_version_spec in pkg.info().get('dependencies', {}).items():
install_from_repo(repo, dep_name, dep_version_spec, allow_upgrade, _visited)
# Then, install this package.
if downloading:
pkg.install()
if not restart_required:
pkg.load()
if top_level:
log.info("Successfully installed %s",
' '.join('%s-%s' % (key, value[0]) for key, value in _visited.items()))
delayed = [(key, value) for key, value in _visited.items() if value[1]]
if delayed:
log.info(
"Plugins in the following packages will be loaded after restarting IDA.")
log.info(
" %s", " ".join('%s-%s' % (key, value[0]) for key, value in delayed))
return pkg
|
test/test_polar_decoding.py | NVlabs/sionna | 163 | 12772263 | #
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
try:
import sionna
except ImportError as e:
import sys
sys.path.append("../")
import tensorflow as tf
gpus = tf.config.list_physical_devices('GPU')
print('Number of GPUs available :', len(gpus))
if gpus:
gpu_num = 0 # Number of the GPU to be used
try:
tf.config.set_visible_devices(gpus[gpu_num], 'GPU')
print('Only GPU number', gpu_num, 'used.')
tf.config.experimental.set_memory_growth(gpus[gpu_num], True)
except RuntimeError as e:
print(e)
import unittest
import pytest # for pytest filterwarnings
import numpy as np
from sionna.fec.polar.encoding import PolarEncoder, Polar5GEncoder
from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder
from sionna.fec.polar.decoding import Polar5GDecoder
from sionna.fec.crc import CRCEncoder
from sionna.fec.utils import GaussianPriorSource
from sionna.utils import BinarySource
from sionna.fec.polar.utils import generate_5g_ranking
class TestPolarDecodingSC(unittest.TestCase):
def test_invalid_inputs(self):
"""Test against invalid values of n and frozen_pos."""
# frozen vec to long
n = 32
frozen_pos = np.arange(n+1)
with self.assertRaises(AssertionError):
PolarSCDecoder(frozen_pos, n)
# n not a pow of 2
# frozen vec to long
n = 32
k = 12
frozen_pos,_ = generate_5g_ranking(k, n)
with self.assertRaises(AssertionError):
PolarSCDecoder(frozen_pos, n+1)
# test valid shapes
# (k, n)
param_valid = [[0, 32], [10, 32], [32, 32], [100, 256],
[123, 1024], [1024, 1024]]
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0], p[1])
PolarSCDecoder(frozen_pos, p[1])
# no complex-valued input allowed
with self.assertRaises(ValueError):
frozen_pos,_ = generate_5g_ranking(32, 64)
PolarSCDecoder(frozen_pos, 64, output_dtype=tf.complex64)
def test_output_dim(self):
"""Test that output dims are correct (=n) and output equals all-zero
codeword."""
bs = 10
# (k, n)
param_valid = [[1, 32], [10, 32], [32, 32], [100, 256], [123, 1024],
[1024, 1024]]
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0],p[1])
dec = PolarSCDecoder(frozen_pos, p[1])
c = -10. * np.ones([bs, p[1]]) # all-zero with BPSK (no noise);logits
u = dec(c).numpy()
self.assertTrue(u.shape[-1]==p[0])
# also check that all-zero input yields all-zero output
u_hat = np.zeros([bs, p[0]])
self.assertTrue(np.array_equal(u, u_hat))
def test_numerical_stab(self):
"""Test for numerical stability (no nan or infty as output)."""
bs = 10
# (k,n)
param_valid = [[1, 32], [10, 32], [32, 32], [100, 256]]
source = GaussianPriorSource()
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0],p[1])
dec = PolarSCDecoder(frozen_pos, p[1])
# case 1: extremely large inputs
c = source([[bs, p[1]], 0.0001])
# llrs
u1 = dec(c).numpy()
# no nan
self.assertFalse(np.any(np.isnan(u1)))
#no inftfy
self.assertFalse(np.any(np.isinf(u1)))
self.assertFalse(np.any(np.isneginf(u1)))
# case 2: zero llr input
c = tf.zeros([bs, p[1]])
# llrs
u2 = dec(c).numpy()
# no nan
self.assertFalse(np.any(np.isnan(u2)))
#no inftfy
self.assertFalse(np.any(np.isinf(u2)))
self.assertFalse(np.any(np.isneginf(u2)))
def test_identity(self):
"""test that info bits can be recovered if no noise is added."""
bs = 10
# (k, n)
param_valid = [[1, 32], [10, 32], [32, 32], [100, 256], [123, 1024],
[1024, 1024]]
for p in param_valid:
source = BinarySource()
frozen_pos, _ = generate_5g_ranking(p[0],p[1])
enc = PolarEncoder(frozen_pos, p[1])
dec = PolarSCDecoder(frozen_pos, p[1])
u = source([bs, p[0]])
c = enc(u)
llr_ch = 20.*(2.*c-1) # demod BPSK witout noise
u_hat = dec(llr_ch)
self.assertTrue(np.array_equal(u.numpy(), u_hat.numpy()))
def test_keras(self):
"""Test that Keras model can be compiled (supports dynamic shapes)."""
bs = 10
k = 100
n = 128
source = BinarySource()
frozen_pos, _ = generate_5g_ranking(k, n)
inputs = tf.keras.Input(shape=(n), dtype=tf.float32)
x = PolarSCDecoder(frozen_pos, n)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=x)
b = source([bs, n])
model(b)
# call twice to see that bs can change
b2 = source([bs+1, n])
model(b2)
model.summary()
def test_multi_dimensional(self):
"""Test against arbitrary shapes.
"""
k = 120
n = 256
frozen_pos, _ = generate_5g_ranking(k, n)
source = BinarySource()
dec = PolarSCDecoder(frozen_pos, n)
b = source([100, n])
b_res = tf.reshape(b, [4, 5, 5, n])
# encode 2D Tensor
c = dec(b).numpy()
# encode 4D Tensor
c_res = dec(b_res).numpy()
# and reshape to 2D shape
c_res = tf.reshape(c_res, [100, k])
# both version should yield same result
self.assertTrue(np.array_equal(c, c_res))
def test_batch(self):
"""Test that all samples in batch yield same output (for same input).
"""
bs = 100
k = 120
n = 256
frozen_pos, _ = generate_5g_ranking(k, n)
source = BinarySource()
dec = PolarSCDecoder(frozen_pos, n)
b = source([1,15,n])
b_rep = tf.tile(b, [bs, 1, 1])
# and run tf version (to be tested)
c = dec(b_rep).numpy()
for i in range(bs):
self.assertTrue(np.array_equal(c[0,:,:], c[i,:,:]))
def test_tf_fun(self):
"""Test that graph mode works and xla is supported."""
@tf.function
def run_graph(u):
return dec(u)
@tf.function(jit_compile=True)
def run_graph_xla(u):
return dec(u)
bs = 10
k = 100
n = 128
source = BinarySource()
frozen_pos, _ = generate_5g_ranking(k, n)
dec = PolarSCDecoder(frozen_pos, n)
u = source([bs, n])
x = run_graph(u).numpy()
# execute the graph twice
x = run_graph(u).numpy()
# and change batch_size
u = source([bs+1, n])
x = run_graph(u).numpy()
# run same test for XLA (jit_compile=True)
u = source([bs, n])
x = run_graph_xla(u).numpy()
x = run_graph_xla(u).numpy()
u = source([bs+1, n])
x = run_graph_xla(u).numpy()
def test_ref_implementation(self):
"""Test against pre-calculated results from internal implementation.
"""
ref_path = '../test/codes/polar/'
filename = ["P_128_37", "P_128_110", "P_256_128"]
for f in filename:
A = np.load(ref_path + f + "_Avec.npy")
llr_ch = np.load(ref_path + f + "_Lch.npy")
u_hat = np.load(ref_path + f + "_uhat.npy")
frozen_pos = np.array(np.where(A==0)[0])
info_pos = np.array(np.where(A==1)[0])
n = len(frozen_pos) + len(info_pos)
k = len(info_pos)
dec = PolarSCDecoder(frozen_pos, n)
l_in = -1. * llr_ch # logits
u_hat_tf = dec(l_in).numpy()
# the output should be equal to the reference
self.assertTrue(np.array_equal(u_hat_tf, u_hat))
def test_dtype_flexible(self):
"""Test that output_dtype can be flexible."""
batch_size = 100
k = 30
n = 64
source = GaussianPriorSource()
frozen_pos, _ = generate_5g_ranking(k, n)
dtypes_supported = (tf.float16, tf.float32, tf.float64)
for dt_in in dtypes_supported:
for dt_out in dtypes_supported:
llr = source([[batch_size, n], 0.5])
llr = tf.cast(llr, dt_in)
dec = PolarSCDecoder(frozen_pos, n, output_dtype=dt_out)
x = dec(llr)
self.assertTrue(x.dtype==dt_out)
# test that complex-valued inputs raise error
llr = source([[batch_size, n], 0.5])
llr_c = tf.complex(llr, tf.zeros_like(llr))
dec = PolarSCDecoder(frozen_pos, n, output_dtype=tf.float32)
with self.assertRaises(TypeError):
x = dec(llr_c)
class TestPolarDecodingSCL(unittest.TestCase):
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_invalid_inputs(self):
"""Test against invalid values of n and frozen_pos."""
# frozen vec to long
n = 32
frozen_pos = np.arange(n+1)
with self.assertRaises(AssertionError):
PolarSCLDecoder(frozen_pos, n)
# n not a pow of 2
# frozen vec to long
n = 32
k = 12
frozen_pos,_ = generate_5g_ranking(k, n)
with self.assertRaises(AssertionError):
PolarSCLDecoder(frozen_pos, n+1)
# also test valid shapes
# (k, n)
param_valid = [[0, 32], [10, 32], [32, 32], [100, 256],
[123, 1024], [1024, 1024]]
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0],p[1])
PolarSCLDecoder(frozen_pos, p[1])
# no complex-valued input allowed
with self.assertRaises(ValueError):
frozen_pos,_ = generate_5g_ranking(32, 64)
PolarSCLDecoder(frozen_pos, 64, output_dtype=tf.complex64)
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_output_dim(self):
"""Test that output dims are correct (=n) and output is the all-zero
codeword."""
bs = 10
# (k, n)
param_valid = [[1, 32], [10, 32], [32, 32], [100, 256], [123, 1024],
[1024, 1024]]
# use_hybrid, use_fast_scl, cpu_only, use_scatter
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0], p[1])
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
dec = PolarSCLDecoder(frozen_pos,
p[1],
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter)
# all-zero with BPSK (no noise);logits
c = -10. * np.ones([bs, p[1]])
u = dec(c).numpy()
# check shape
self.assertTrue(u.shape[-1]==p[0])
# also check that all-zero input yields all-zero
u_hat = np.zeros([bs, p[0]])
self.assertTrue(np.array_equal(u, u_hat))
# also test different list sizes
n = 32
k = 16
frozen_pos, _ = generate_5g_ranking(k, n)
list_sizes = [1, 2, 8, 32]
for list_size in list_sizes:
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
dec = PolarSCLDecoder(frozen_pos,
n,
list_size=list_size,
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter)
# all-zero with BPSK (no noise);logits
c = -10. * np.ones([bs, n])
u = dec(c).numpy()
self.assertTrue(u.shape[-1]==k)
# also check that all-zero input yields all-zero
u_hat = np.zeros([bs, k])
self.assertTrue(np.array_equal(u, u_hat))
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_numerical_stab(self):
"""Test for numerical stability (no nan or infty as output)"""
bs = 10
# (k, n)
param_valid = [[1, 32], [10, 32], [32, 32], [100, 256]]
source = GaussianPriorSource()
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0], p[1])
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
dec = PolarSCLDecoder(frozen_pos,
p[1],
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter)
# case 1: extremely large inputs
c = source([[bs, p[1]], 0.0001])
# llrs
u1 = dec(c).numpy()
# no nan
self.assertFalse(np.any(np.isnan(u1)))
#no inftfy
self.assertFalse(np.any(np.isinf(u1)))
self.assertFalse(np.any(np.isneginf(u1)))
# case 2: zero input
c = tf.zeros([bs, p[1]])
# llrs
u2 = dec(c).numpy()
# no nan
self.assertFalse(np.any(np.isnan(u2)))
#no inftfy
self.assertFalse(np.any(np.isinf(u2)))
self.assertFalse(np.any(np.isneginf(u2)))
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_identity(self):
"""Test that info bits can be recovered if no noise is added."""
bs = 10
# (k,n)
param_valid = [[1, 32], [10, 32], [32, 32], [100, 256]]
source = BinarySource()
# use_hybrid, use_fast_scl, cpu_only, use_scatter
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0], p[1])
enc = PolarEncoder(frozen_pos, p[1])
u = source([bs, p[0]])
c = enc(u)
llr_ch = 200.*(2.*c-1) # demod BPSK witout noise
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
dec = PolarSCLDecoder(frozen_pos,
p[1],
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter)
u_hat = dec(llr_ch)
self.assertTrue(np.array_equal(u.numpy(),
u_hat.numpy()))
# also test different list sizes
n = 32
k = 16
crc_degree = "CRC11"
frozen_pos, _ = generate_5g_ranking(k, n)
enc = PolarEncoder(frozen_pos, n)
enc_crc = CRCEncoder(crc_degree)
u = source([bs, k-enc_crc.crc_length])
u_crc = enc_crc(u)
c = enc(u_crc)
llr_ch = 200.*(2.*c-1) # demod BPSK witout noise
list_sizes = [1, 2, 8, 32]
for list_size in list_sizes:
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
dec = PolarSCLDecoder(frozen_pos,
n,
list_size=list_size,
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter,
crc_degree=crc_degree)
u_hat = dec(llr_ch)
self.assertTrue(np.array_equal(u_crc.numpy(),
u_hat.numpy()))
def test_keras(self):
"""Test that Keras model can be compiled (supports dynamic shapes)."""
bs = 10
k = 16
n = 32
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
source = BinarySource()
frozen_pos, _ = generate_5g_ranking(k, n)
inputs = tf.keras.Input(shape=(n), dtype=tf.float32)
x = PolarSCLDecoder(frozen_pos,
n,
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=x)
b = source([bs,n])
model(b)
# call twice to see that bs can change
b2 = source([bs+1,n])
model(b2)
model.summary()
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_multi_dimensional(self):
"""Test against multi-dimensional input shapes.
As reshaping is done before calling the actual decoder, no exhaustive
testing against all decoder options is required.
"""
k = 120
n = 256
frozen_pos, _ = generate_5g_ranking(k, n)
source = BinarySource()
dec = PolarSCLDecoder(frozen_pos, n)
b = source([100, n])
b_res = tf.reshape(b, [4, 5, 5, n])
# encode 2D Tensor
c = dec(b).numpy()
# encode 4D Tensor
c_res = dec(b_res).numpy()
# and reshape to 2D shape
c_res = tf.reshape(c_res, [100, k])
# both version should yield same result
self.assertTrue(np.array_equal(c, c_res))
def test_batch(self):
"""Test that all samples in batch yield same output (for same input).
"""
bs = 100
k = 78
n = 128
frozen_pos, _ = generate_5g_ranking(k, n)
source = BinarySource()
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
dec = PolarSCLDecoder(frozen_pos,
n,
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter)
b = source([1,15,n])
b_rep = tf.tile(b, [bs, 1, 1])
# and run tf version (to be tested)
c = dec(b_rep).numpy()
for i in range(bs):
self.assertTrue(np.array_equal(c[0,:,:], c[i,:,:]))
def test_tf_fun(self):
"""Test that graph mode works and XLA is supported."""
bs = 10
k = 16
n = 32
source = BinarySource()
frozen_pos, _ = generate_5g_ranking(k, n)
crc_degrees = [None, "CRC11"]
for crc_degree in crc_degrees:
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
@tf.function
def run_graph(u):
return dec(u)
@tf.function(jit_compile=True)
def run_graph_xla(u):
return dec(u)
dec = PolarSCLDecoder(frozen_pos,
n,
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter,
crc_degree=crc_degree)
# test that for arbitrary input only binary values are
# returned
u = source([bs, n])
x = run_graph(u).numpy()
# execute the graph twice
x = run_graph(u).numpy()
# and change batch_size
u = source([bs+1, n])
x = run_graph(u).numpy()
if not cpu_only: # cpu only does not support XLA
# run same test for XLA (jit_compile=True)
u = source([bs, n])
x = run_graph_xla(u).numpy()
x = run_graph_xla(u).numpy()
u = source([bs+1, n])
x = run_graph_xla(u).numpy()
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_ref_implementation(self):
"""Test against pre-calculated results from internal implementation.
Also verifies that all decoding options yield same results.
Remark: results are for SC only, i.e., list_size=1.
"""
ref_path = '../test/codes/polar/'
filename = ["P_128_37", "P_128_110", "P_256_128"]
for f in filename:
A = np.load(ref_path + f + "_Avec.npy")
llr_ch = np.load(ref_path + f + "_Lch.npy")
u_hat = np.load(ref_path + f + "_uhat.npy")
frozen_pos = np.array(np.where(A==0)[0])
info_pos = np.array(np.where(A==1)[0])
n = len(frozen_pos) + len(info_pos)
k = len(info_pos)
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
dec = PolarSCLDecoder(frozen_pos,
n,
list_size=1,
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter)
l_in = -1. * llr_ch # logits
u_hat_tf = dec(l_in).numpy()
# the output should be equal to the reference
self.assertTrue(np.array_equal(u_hat_tf, u_hat))
def test_hybrid_scl(self):
"""Verify hybrid SC decoding option.
Remark: XLA is currently not supported.
"""
bs = 10
n = 32
k = 16
crc_degree = "CRC11"
list_sizes = [1, 2, 8, 32]
frozen_pos, _ = generate_5g_ranking(k, n)
source = BinarySource()
enc = PolarEncoder(frozen_pos, n)
enc_crc = CRCEncoder(crc_degree)
k_crc = enc_crc.crc_length
u = source([bs, k-k_crc])
u_crc = enc_crc(u)
c = enc(u_crc)
llr_ch = 20.*(2.*c-1) # demod BPSK witout noise
for list_size in list_sizes:
dec = PolarSCLDecoder(frozen_pos,
n,
list_size=list_size,
use_hybrid_sc=True,
crc_degree=crc_degree)
u_hat = dec(llr_ch)
self.assertTrue(np.array_equal(u_crc.numpy(), u_hat.numpy()))
# verify that graph can be executed
@tf.function
def run_graph(u):
return dec(u)
u = source([bs, n])
# execute the graph twice
x = run_graph(u).numpy()
x = run_graph(u).numpy()
# and change batch_size
u = source([bs+1, n])
x = run_graph(u).numpy()
def test_dtype_flexible(self):
"""Test that output_dtype is variable."""
batch_size = 100
k = 30
n = 64
source = GaussianPriorSource()
frozen_pos, _ = generate_5g_ranking(k, n)
dtypes_supported = (tf.float16, tf.float32, tf.float64)
for dt_in in dtypes_supported:
for dt_out in dtypes_supported:
llr = source([[batch_size, n], 0.5])
llr = tf.cast(llr, dt_in)
dec = PolarSCLDecoder(frozen_pos, n, output_dtype=dt_out)
x = dec(llr)
self.assertTrue(x.dtype==dt_out)
# test that complex-valued inputs raise error
llr = source([[batch_size, n], 0.5])
llr_c = tf.complex(llr, tf.zeros_like(llr))
dec = PolarSCLDecoder(frozen_pos, n, output_dtype=tf.float32)
with self.assertRaises(TypeError):
x = dec(llr_c)
class TestPolarDecodingBP(unittest.TestCase):
"""Test Polar BP decoder."""
def test_invalid_inputs(self):
"""Test against invalid values of n and frozen_pos."""
# frozen vec to long
n = 32
frozen_pos = np.arange(n+1)
with self.assertRaises(AssertionError):
PolarBPDecoder(frozen_pos, n)
# n not a pow of 2
# frozen vec to long
n = 32
k = 12
frozen_pos,_ = generate_5g_ranking(k, n)
with self.assertRaises(AssertionError):
PolarBPDecoder(frozen_pos, n+1)
# test also valid shapes
# (k, n)
param_valid = [[0, 32], [10, 32], [32, 32], [100, 256],
[123, 1024], [1024, 1024]]
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0],p[1])
PolarBPDecoder(frozen_pos, p[1])
# no complex-valued input allowed
with self.assertRaises(ValueError):
frozen_pos,_ = generate_5g_ranking(32, 64)
PolarBPDecoder(frozen_pos, 64, output_dtype=tf.complex64)
def test_output_dim(self):
"""Test that output dims are correct (=n) and output is all-zero
codeword."""
# batch size
bs = 10
# (k, n)
param_valid = [[1, 32],[10, 32], [32, 32], [100, 256], [123, 1024],
[1024, 1024]]
for hard_out in [True, False]:
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0],p[1])
dec = PolarBPDecoder(frozen_pos,
p[1],
hard_out=hard_out)
# all-zero with BPSK (no noise);logits
c = -10. * np.ones([bs, p[1]])
u = dec(c).numpy()
self.assertTrue(u.shape[-1]==p[0])
if hard_out:
# also check that all-zero input yields all-zero output
u_hat = np.zeros([bs, p[0]])
self.assertTrue(np.array_equal(u, u_hat))
def test_identity(self):
"""Test that info bits can be recovered if no noise is added."""
bs = 10
# (k, n)
param_valid = [[1, 32], [10, 32], [32, 32], [100, 256], [123, 1024],
[1024, 1024]]
for p in param_valid:
source = BinarySource()
frozen_pos, _ = generate_5g_ranking(p[0], p[1])
enc = PolarEncoder(frozen_pos, p[1])
dec = PolarBPDecoder(frozen_pos, p[1])
u = source([bs, p[0]])
c = enc(u)
llr_ch = 20.*(2.*c-1) # demod BPSK witout noise
u_hat = dec(llr_ch)
self.assertTrue(np.array_equal(u.numpy(), u_hat.numpy()))
def test_keras(self):
"""Test that Keras model can be compiled (supports dynamic shapes)."""
bs = 10
k = 100
n = 128
source = BinarySource()
frozen_pos, _ = generate_5g_ranking(k, n)
inputs = tf.keras.Input(shape=(n), dtype=tf.float32)
x = PolarBPDecoder(frozen_pos, n)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=x)
b = source([bs, n])
model(b)
# call twice to see that bs can change
b2 = source([bs+1, n])
model(b2)
model.summary()
def test_multi_dimensional(self):
"""Test against arbitrary shapes."""
k = 120
n = 256
frozen_pos, _ = generate_5g_ranking(k, n)
source = BinarySource()
dec = PolarBPDecoder(frozen_pos, n)
b = source([100, n])
b_res = tf.reshape(b, [4, 5, 5, n])
# encode 2D Tensor
c = dec(b).numpy()
# encode 4D Tensor
c_res = dec(b_res).numpy()
# and reshape to 2D shape
c_res = tf.reshape(c_res, [100, k])
# both version should yield same result
self.assertTrue(np.array_equal(c, c_res))
def test_batch(self):
"""Test that all samples in batch yield same output (for same input).
"""
bs = 100
k = 120
n = 256
frozen_pos, _ = generate_5g_ranking(k, n)
source = BinarySource()
dec = PolarBPDecoder(frozen_pos, n)
b = source([1, 15, n])
b_rep = tf.tile(b, [bs, 1, 1])
# and run tf version (to be tested)
c = dec(b_rep).numpy()
for i in range(bs):
self.assertTrue(np.array_equal(c[0,:,:], c[i,:,:]))
def test_numerics(self):
"""Test for numerical stability with large llrs and many iterations.
"""
bs = 100
k = 120
n = 256
num_iter = 200
for hard_out in [False, True]:
frozen_pos, _ = generate_5g_ranking(k, n)
source = GaussianPriorSource()
dec = PolarBPDecoder(frozen_pos,
n,
hard_out=hard_out,
num_iter=num_iter)
b = source([[bs,n], 0.001]) # very large llrs
c = dec(b).numpy()
# all values are finite (not nan and not inf)
self.assertTrue(np.sum(np.abs(1 - np.isfinite(c)))==0)
def test_tf_fun(self):
"""Test that graph mode works and XLA is supported."""
@tf.function
def run_graph(u):
return dec(u)
@tf.function(jit_compile=True)
def run_graph_xla(u):
return dec(u)
bs = 10
k = 32
n = 64
num_iter = 10
source = BinarySource()
frozen_pos, _ = generate_5g_ranking(k, n)
dec = PolarBPDecoder(frozen_pos, n, num_iter=num_iter)
# test that for arbitrary input only 0,1 values are returned
u = source([bs, n])
x = run_graph(u).numpy()
# execute the graph twice
x = run_graph(u).numpy()
# and change batch_size
u = source([bs+1, n])
x = run_graph(u).numpy()
x = run_graph(u).numpy()
# Currently not supported
# run same test for XLA (jit_compile=True)
#u = source([bs, n])
#x = run_graph_xla(u).numpy()
#x = run_graph_xla(u).numpy()
#u = source([bs+1, n])
#x = run_graph_xla(u).numpy()
def test_ref_implementation(self):
"""Test against Numpy reference implementation.
Test hard and soft output.
"""
def boxplus_np(x, y):
"""Check node update (boxplus) for LLRs in numpy.
See [Stimming_LLR]_ and [Hashemi_SSCL]_ for detailed equations.
"""
x_in = np.maximum(np.minimum(x, llr_max), -llr_max)
y_in = np.maximum(np.minimum(y, llr_max), -llr_max)
# avoid division for numerical stability
llr_out = np.log(1 + np.exp(x_in + y_in))
llr_out -= np.log(np.exp(x_in) + np.exp(y_in))
return llr_out
def decode_bp(llr_ch, n_iter, frozen_pos, info_pos):
n = llr_ch.shape[-1]
bs = llr_ch.shape[0]
n_stages = int(np.log2(n))
msg_r = np.zeros([bs, n_stages+1, n])
msg_l = np.zeros([bs, n_stages+1, n])
# init llr_ch
msg_l[:, n_stages, :] = -1*llr_ch.numpy()
# init frozen positions with infty
msg_r[:, 0, frozen_pos] = llr_max
# and decode
for iter in range(n_iter):
# update r messages
for s in range(n_stages):
# calc indices
ind_range = np.arange(int(n/2))
ind_1 = ind_range * 2 - np.mod(ind_range, 2**(s))
ind_2 = ind_1 + 2**s
# load messages
l1_in = msg_l[:, s+1, ind_1]
l2_in = msg_l[:, s+1, ind_2]
r1_in = msg_r[:, s, ind_1]
r2_in = msg_r[:, s, ind_2]
# r1_out
msg_r[:, s+1, ind_1] = boxplus_np(r1_in, l2_in + r2_in)
# r2_out
msg_r[:, s+1, ind_2] = boxplus_np(r1_in, l1_in) + r2_in
# update l messages
for s in range(n_stages-1, -1, -1):
ind_range = np.arange(int(n/2))
ind_1 = ind_range * 2 - np.mod(ind_range, 2**(s))
ind_2 = ind_1 + 2**s
l1_in = msg_l[:, s+1, ind_1]
l2_in = msg_l[:, s+1, ind_2]
r1_in = msg_r[:, s, ind_1]
r2_in = msg_r[:, s, ind_2]
# l1_out
msg_l[:, s, ind_1] = boxplus_np(l1_in, l2_in + r2_in)
# l2_out
msg_l[:, s, ind_2] = boxplus_np(r1_in, l1_in) + l2_in
# recover u_hat
u_hat_soft = msg_l[:, 0, info_pos]
u_hat = 0.5 * (1 - np.sign(u_hat_soft))
return u_hat, u_hat_soft
# generate llr_ch
noise_var = 0.3
num_iters = [5, 10, 20, 40]
llr_max = 19.3
bs = 100
n = 128
k = 64
frozen_pos, info_pos = generate_5g_ranking(k, n)
for num_iter in num_iters:
source = GaussianPriorSource()
llr_ch = source([[bs, n], noise_var])
# and decode
dec_bp = PolarBPDecoder(frozen_pos, n,
hard_out=True, num_iter=num_iter)
dec_bp_soft = PolarBPDecoder(frozen_pos, n,
hard_out=False, num_iter=num_iter)
u_hat_bp = dec_bp(llr_ch).numpy()
u_hat_bp_soft = dec_bp_soft(llr_ch,).numpy()
# and run BP decoder
u_hat_ref, u_hat_ref_soft = decode_bp(llr_ch,
num_iter,
frozen_pos,
info_pos)
# the output should be equal to the reference
self.assertTrue(np.array_equal(u_hat_bp, u_hat_ref))
self.assertTrue(np.allclose(-u_hat_bp_soft,
u_hat_ref_soft,
rtol=5e-2,
atol=5e-3))
def test_dtype_flexible(self):
"""Test that output dtype is variable."""
batch_size = 100
k = 30
n = 64
source = GaussianPriorSource()
frozen_pos, _ = generate_5g_ranking(k, n)
dtypes_supported = (tf.float16, tf.float32, tf.float64)
for dt_in in dtypes_supported:
for dt_out in dtypes_supported:
llr = source([[batch_size, n], 0.5])
llr = tf.cast(llr, dt_in)
dec = PolarBPDecoder(frozen_pos, n, output_dtype=dt_out)
x = dec(llr)
self.assertTrue(x.dtype==dt_out)
# test that complex inputs raise error
llr = source([[batch_size, n], 0.5])
llr_c = tf.complex(llr, tf.zeros_like(llr))
dec = PolarBPDecoder(frozen_pos, n, output_dtype=tf.float32)
with self.assertRaises(TypeError):
x = dec(llr_c)
class TestPolarDecoding5G(unittest.TestCase):
def test_invalid_inputs(self):
"""Test against invalid input values.
Note: consistency of code parameters is already checked by the encoder.
"""
enc = Polar5GEncoder(40, 60)
with self.assertRaises(AssertionError):
Polar5GDecoder(enc, dec_type=1)
with self.assertRaises(ValueError):
Polar5GDecoder(enc, dec_type="ABC")
with self.assertRaises(AssertionError):
Polar5GDecoder("SC")
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_identity_de_ratematching(self):
"""Test that info bits can be recovered if no noise is added and
dimensions are correct."""
bs = 10
# (k,n)
param_valid = [[12, 32], [20, 32], [100, 257], [123, 897],
[1013, 1088]]
dec_types = ["SC", "SCL", "hybSCL", "BP"]
for p in param_valid:
for dec_type in dec_types:
source = BinarySource()
enc = Polar5GEncoder(p[0], p[1])
dec = Polar5GDecoder(enc, dec_type=dec_type)
u = source([bs, p[0]])
c = enc(u)
self.assertTrue(c.numpy().shape[-1]==p[1])
llr_ch = 20.*(2.*c-1) # demod BPSK witout noise
u_hat = dec(llr_ch)
self.assertTrue(np.array_equal(u.numpy(), u_hat.numpy()))
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_keras(self):
"""Test that Keras model can be compiled (supports dynamic shapes)."""
bs = 10
k = 100
n = 145
source = BinarySource()
enc = Polar5GEncoder(k, n)
dec_types = ["SC", "SCL", "hybSCL", "BP"]
for dec_type in dec_types:
inputs = tf.keras.Input(shape=(n), dtype=tf.float32)
x = Polar5GDecoder(enc, dec_type=dec_type)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=x)
b = source([bs,n])
model(b)
# call twice to see that bs can change
b2 = source([bs+1,n])
model(b2)
model.summary()
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_multi_dimensional(self):
"""Test against arbitrary shapes."""
k = 120
n = 237
enc = Polar5GEncoder(k, n)
source = BinarySource()
dec_types = ["SC", "SCL", "hybSCL", "BP"]
for dec_type in dec_types:
dec = Polar5GDecoder(enc, dec_type=dec_type)
b = source([100, n])
b_res = tf.reshape(b, [4, 5, 5, n])
# encode 2D Tensor
c = dec(b).numpy()
# encode 4D Tensor
c_res = dec(b_res).numpy()
# and reshape to 2D shape
c_res = tf.reshape(c_res, [100, k])
# both version should yield same result
self.assertTrue(np.array_equal(c, c_res))
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_batch(self):
"""Test that all samples in batch yield same output (for same input).
"""
bs = 100
k = 95
n = 145
enc = Polar5GEncoder(k, n)
source = GaussianPriorSource()
dec_types = ["SC", "SCL", "hybSCL", "BP"]
for dec_type in dec_types:
dec = Polar5GDecoder(enc, dec_type=dec_type)
llr = source([[1,4,n], 0.5])
llr_rep = tf.tile(llr, [bs, 1, 1])
# and run tf version (to be tested)
c = dec(llr_rep).numpy()
for i in range(bs):
self.assertTrue(np.array_equal(c[0,:,:], c[i,:,:]))
def test_tf_fun(self):
"""Test that tf.function decorator works
include xla compiler test."""
bs = 10
k = 45
n = 67
enc = Polar5GEncoder(k, n)
source = GaussianPriorSource()
# hybSCL does not support graph mode!
dec_types = ["SC", "SCL", "BP"]
for dec_type in dec_types:
print(dec_type)
dec = Polar5GDecoder(enc, dec_type=dec_type)
@tf.function
def run_graph(u):
return dec(u)
@tf.function(jit_compile=True)
def run_graph_xla(u):
return dec(u)
# test that for arbitrary input only binary values are returned
u = source([[bs, n], 0.5])
x = run_graph(u).numpy()
# execute the graph twice
x = run_graph(u).numpy()
# and change batch_size
u = source([[bs+1, n], 0.5])
x = run_graph(u).numpy()
# run same test for XLA (jit_compile=True)
# BP does currently not support XLA
if dec_type != "BP":
u = source([[bs, n], 0.5])
x = run_graph_xla(u).numpy()
x = run_graph_xla(u).numpy()
u = source([[bs+1, n], 0.5])
x = run_graph_xla(u).numpy()
def test_dtype_flexible(self):
"""Test that output dtype can be variable."""
batch_size = 100
k = 30
n = 64
source = GaussianPriorSource()
enc = Polar5GEncoder(k, n)
dtypes_supported = (tf.float16, tf.float32, tf.float64)
for dt_in in dtypes_supported:
for dt_out in dtypes_supported:
llr = source([[batch_size, n], 0.5])
llr = tf.cast(llr, dt_in)
dec = Polar5GDecoder(enc, output_dtype=dt_out)
x = dec(llr)
self.assertTrue(x.dtype==dt_out)
# test that complex inputs raise error
llr = source([[batch_size, n], 0.5])
llr_c = tf.complex(llr, tf.zeros_like(llr))
dec = Polar5GDecoder(enc, output_dtype=tf.float32)
with self.assertRaises(TypeError):
x = dec(llr_c)
|
tests/openbb_terminal/stocks/due_diligence/test_csimarket_model.py | tehcoderer/GamestonkTerminal | 255 | 12772274 | <gh_stars>100-1000
# IMPORTATION STANDARD
# IMPORTATION THIRDPARTY
import pytest
# IMPORTATION INTERNAL
from openbb_terminal.stocks.due_diligence import csimarket_model
@pytest.mark.vcr
def test_get_suppliers(recorder):
result_txt = csimarket_model.get_suppliers(ticker="TSLA")
recorder.capture(result_txt)
@pytest.mark.vcr
def test_get_suppliers_invalid(recorder):
result_txt = csimarket_model.get_suppliers(ticker="INVALID_TICKER")
recorder.capture(result_txt)
@pytest.mark.vcr
def test_get_customers(recorder):
result_txt = csimarket_model.get_customers(ticker="TSLA")
recorder.capture(result_txt)
@pytest.mark.vcr
def test_get_customers_invalid(recorder):
result_txt = csimarket_model.get_customers(ticker="INVALID_TICKER")
recorder.capture(result_txt)
|
WebMirror/management/rss_parser_funcs/feed_parse_extractSharramycatsTranslations.py | fake-name/ReadableWebProxy | 193 | 12772282 | def extractSharramycatsTranslations(item):
"""
'Sharramycats Translations'
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
tagmap = [
('11 Ways to Forget Your Ex-Boyfriend', '11 Ways to Forget Your Ex-Boyfriend', 'translated'),
('The Monster Inside Of My Bed', 'The Monster Inside Of My Bed', 'translated'),
('The Peculiars\' Tale', 'The Peculiars\' Tale', 'translated'),
('ARG', '<NAME>.', 'translated'),
('Legend of Gemini', 'Legend of Gemini', 'translated'),
('Kaliskis', 'Kaliskis', 'translated'),
('She Died', 'She Died', 'translated'),
('Ice Goddess', 'Ice Goddess', 'translated'),
('The Friendly Wedding', 'The Friendly Wedding', 'translated'),
('Forlorn Madness', 'Forlorn Madness', 'translated'),
('Hidden Inside The Academy', 'Hidden Inside The Academy', 'translated'),
('The Señorita', 'The Señorita', 'translated'),
('School Of Myths', 'School of Myths', 'translated'),
('The Guys Inside of My Bed', 'The Guys Inside of My Bed', 'translated'),
('The Guy Inside Of My Bed', 'The Guys Inside of My Bed', 'translated'),
('Titan Academy Of Special Abilities', 'Titan Academy Of Special Abilities', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
torch_glow/tests/nodes/copy_test.py | YaronBenAtar/glow | 2,838 | 12772304 | <reponame>YaronBenAtar/glow<gh_stars>1000+
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class CopyModel(torch.nn.Module):
def __init__(self, shape):
super(CopyModel, self).__init__()
self.other = torch.randn(shape)
def forward(self, a):
b = a.copy_(self.other)
return a + b
class TestCopy(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("1x1 => 1x3", [1, 1], [1, 3]),
lambda: ("1x3x5 => 1x3x5", [1, 3, 5], [1, 3, 5]),
lambda: ("1x3 => 4x4x3", [1, 3], [4, 4, 3]),
]
)
def test_copy_(self, _, other_shape, tensor_shape):
"""Test of the PyTorch copy_ method on Glow."""
utils.compare_tracing_methods(
CopyModel(other_shape),
torch.randn(tensor_shape),
fusible_ops={"aten::copy_"},
)
@utils.deterministic_expand(
[
lambda: ("1x1x1 => 1x3", [1, 1, 1], [1, 3]),
lambda: ("1x4 => 4x4x3", [1, 4], [4, 4, 3]),
lambda: ("4x4x3 => 1x3", [4, 4, 3], [1, 3]),
]
)
def test_copy_broadcast_failure(self, _, other_shape, tensor_shape):
"""Test of the PyTorch copy_ method on Glow."""
with self.assertRaises(RuntimeError):
utils.compare_tracing_methods(
CopyModel(other_shape),
torch.randn(tensor_shape),
fusible_ops={"aten::copy_"},
)
|
tests/tasks/sodaspark/test_sodaspark_tasks.py | suryatmodulus/prefect | 8,633 | 12772309 | from unittest.mock import MagicMock
import pytest
from pyspark.sql import SparkSession
from prefect.tasks.sodaspark import SodaSparkScan
class TestSodaSparkScan:
def test_construction_provide_scan_and_df(self):
expected_scan_def = "/foo/bar.yaml"
expected_df = SparkSession.builder.getOrCreate().createDataFrame(
[{"id": 123, "value": "foo"}, {"id": 456, "value": "bar"}]
)
soda_spark_scan_task = SodaSparkScan(scan_def=expected_scan_def, df=expected_df)
assert soda_spark_scan_task.scan_def == expected_scan_def
assert soda_spark_scan_task.df == expected_df
def test_construction_no_scan_and_df(self):
soda_spark_scan_task = SodaSparkScan()
assert soda_spark_scan_task.scan_def is None
assert soda_spark_scan_task.df is None
# @pytest.mark.skip(reason="Requires PySpark and Java to be installed")
def test_run_no_scan(self):
df = SparkSession.builder.getOrCreate().createDataFrame(
[{"id": 123, "value": "foo"}, {"id": 456, "value": "bar"}]
)
soda_spark_scan_task = SodaSparkScan(df=df)
with pytest.raises(ValueError) as exc:
soda_spark_scan_task.run()
assert "scan_def cannot be None" in str(exc)
def test_run_no_df(self):
soda_spark_scan_task = SodaSparkScan(scan_def="/foo/bar.yaml")
with pytest.raises(ValueError) as exc:
soda_spark_scan_task.run()
assert "df cannot be None" in str(exc)
# @pytest.mark.skip(reason="Requires PySpark and Java to be installed")
def test_run_invalid_scan(self, monkeypatch):
scan_def = "invalid scan definition"
df = SparkSession.builder.getOrCreate().createDataFrame(
[{"id": 123, "value": "foo"}, {"id": 456, "value": "bar"}]
)
soda_spark_scan_task = SodaSparkScan(scan_def=scan_def, df=df)
with pytest.raises(AttributeError):
soda_spark_scan_task.run()
def test_run_invalid_df(self, monkeypatch):
scan_def = """
table_name: demodata
metrics:
- row_count
- max
- min_length
tests:
- row_count > 0
"""
df = "not a valid df"
soda_spark_scan_task = SodaSparkScan(scan_def=scan_def, df=df)
with pytest.raises(AttributeError):
soda_spark_scan_task.run()
# @pytest.mark.skip(reason="Requires PySpark and Java to be installed")
def test_run_valid_scan_and_df_with_measurements(self):
scan_def = """
table_name: demodata
metrics:
- row_count
- max
- min_length
tests:
- row_count > 0
"""
df = SparkSession.builder.getOrCreate().createDataFrame(
[{"id": 123, "value": "foo"}, {"id": 456, "value": "bar"}]
)
soda_spark_scan_task = SodaSparkScan(scan_def=scan_def, df=df)
res = soda_spark_scan_task.run()
assert hasattr(res, "measurements")
# @pytest.mark.skip(reason="Requires PySpark and Java to be installed")
def test_run_valid_scan_and_df_with_errors(self):
scan_def = """
table_name: demodata
metrics:
- row_count
- max
- min_length
tests:
- row_count == 0
"""
df = SparkSession.builder.getOrCreate().createDataFrame(
[{"id": 123, "value": "foo"}, {"id": 456, "value": "bar"}]
)
soda_spark_scan_task = SodaSparkScan(scan_def=scan_def, df=df)
res = soda_spark_scan_task.run()
assert hasattr(res, "errors")
|
src/CheckersBot.py | kartikkukreja/blog-codes | 182 | 12772371 | <gh_stars>100-1000
from copy import deepcopy
from time import time
# Global Constants
MaxUtility = 1e9
IsPlayerBlack = True
MaxAllowedTimeInSeconds = 9.5
MaxDepth = 100
class CheckersState:
def __init__(self, grid, blackToMove, moves):
self.grid = grid
self.blackToMove = blackToMove
self.moves = moves # Hops taken by a disc to reach the current state
# This just checks for whether or not all pieces of a player have been eliminated.
# It does not check for whether a player has a move or not. In that case, there will
# be no successors for that player and alpha beta search will return Min/Max Utility.
def isTerminalState(self):
blackSeen, whiteSeen = False, False
for row in grid:
for cell in row:
if cell == 'b' or cell == 'B': blackSeen = True
elif cell == 'w' or cell == 'W': whiteSeen = True
if blackSeen and whiteSeen: return False
self.isLoserBlack = whiteSeen
return True
def getTerminalUtility(self):
return MaxUtility if IsPlayerBlack != self.isLoserBlack else -MaxUtility
def getSuccessors(self):
def getSteps(cell):
whiteSteps = [(-1, -1), (-1, 1)]
blackSteps = [(1, -1), (1, 1)]
steps = []
if cell != 'b': steps.extend(whiteSteps)
if cell != 'w': steps.extend(blackSteps)
return steps
def generateMoves(board, i, j, successors):
for step in getSteps(board[i][j]):
x, y = i + step[0], j + step[1]
if x >= 0 and x < 8 and y >= 0 and y < 8 and board[x][y] == '_':
boardCopy = deepcopy(board)
boardCopy[x][y], boardCopy[i][j] = boardCopy[i][j], '_'
# A pawn is promoted when it reaches the last row
if (x == 7 and self.blackToMove) or (x == 0 and not self.blackToMove):
boardCopy[x][y] = boardCopy[x][y].upper()
successors.append(CheckersState(boardCopy, not self.blackToMove, [(i, j), (x, y)]))
def generateJumps(board, i, j, moves, successors):
jumpEnd = True
for step in getSteps(board[i][j]):
x, y = i + step[0], j + step[1]
if x >= 0 and x < 8 and y >= 0 and y < 8 and board[x][y] != '_' and board[i][j].lower() != board[x][y].lower():
xp, yp = x + step[0], y + step[1]
if xp >= 0 and xp < 8 and yp >= 0 and yp < 8 and board[xp][yp] == '_':
board[xp][yp], save = board[i][j], board[x][y]
board[i][j] = board[x][y] = '_'
previous = board[xp][yp]
# A pawn is promoted when it reaches the last row
if (xp == 7 and self.blackToMove) or (xp == 0 and not self.blackToMove):
board[xp][yp] = board[xp][yp].upper()
moves.append((xp, yp))
generateJumps(board, xp, yp, moves, successors)
moves.pop()
board[i][j], board[x][y], board[xp][yp] = previous, save, '_'
jumpEnd = False
if jumpEnd and len(moves) > 1:
successors.append(CheckersState(deepcopy(board), not self.blackToMove, deepcopy(moves)))
player = 'b' if self.blackToMove else 'w'
successors = []
# generate jumps
for i in xrange(8):
for j in xrange(8):
if self.grid[i][j].lower() == player:
generateJumps(self.grid, i, j, [(i, j)], successors)
if len(successors) > 0: return successors
# generate moves
for i in xrange(8):
for j in xrange(8):
if self.grid[i][j].lower() == player:
generateMoves(self.grid, i, j, successors)
return successors
def piecesCount(state):
# 1 for a normal piece, 1.5 for a king
black, white = 0, 0
for row in state.grid:
for cell in row:
if cell == 'b': black += 1.0
elif cell == 'B': black += 1.5
elif cell == 'w': white += 1.0
elif cell == 'W': white += 1.5
return black - white if IsPlayerBlack else white - black
def iterativeDeepeningAlphaBeta(state, evaluationFunc):
startTime = time()
def alphaBetaSearch(state, alpha, beta, depth):
def maxValue(state, alpha, beta, depth):
val = -MaxUtility
for successor in state.getSuccessors():
val = max(val, alphaBetaSearch(successor, alpha, beta, depth))
if val >= beta: return val
alpha = max(alpha, val)
return val
def minValue(state, alpha, beta, depth):
val = MaxUtility
for successor in state.getSuccessors():
val = min(val, alphaBetaSearch(successor, alpha, beta, depth - 1))
if val <= alpha: return val
beta = min(beta, val)
return val
if state.isTerminalState(): return state.getTerminalUtility()
if depth <= 0 or time() - startTime > MaxAllowedTimeInSeconds: return evaluationFunc(state)
return maxValue(state, alpha, beta, depth) if state.blackToMove == IsPlayerBlack else minValue(state, alpha, beta, depth)
bestMove = None
for depth in xrange(1, MaxDepth):
if time() - startTime > MaxAllowedTimeInSeconds: break
val = -MaxUtility
for successor in state.getSuccessors():
score = alphaBetaSearch(successor, -MaxUtility, MaxUtility, depth)
if score > val:
val, bestMove = score, successor.moves
return bestMove
if __name__ == '__main__':
player = raw_input()
boardSize = int(raw_input())
grid = []
for i in xrange(boardSize):
grid.append(raw_input())
IsPlayerBlack = player[0] == 'b'
state = CheckersState([list(row.rstrip()) for row in grid], IsPlayerBlack, [])
move = iterativeDeepeningAlphaBeta(state, piecesCount)
print len(move) - 1
for step in move:
print step[0], step[1]
|
localgraphclustering/algorithms/__init__.py | vishalbelsare/LocalGraphClustering | 106 | 12772372 | <gh_stars>100-1000
from .acl_list import acl_list
from .eig2_nL import eig2_nL, eig2nL_subgraph
from .fista_dinput_dense import fista_dinput_dense
from .sweepcut import sweepcut
|
examples/decompose_fmri_stability.py | johnbanq/modl | 135 | 12772373 | # Author: <NAME>
# License: BSD
import warnings
from nilearn.input_data import NiftiMasker
warnings.filterwarnings("ignore", category=DeprecationWarning)
import os
from os.path import expanduser, join
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from joblib import Memory, dump
from joblib import Parallel, delayed
from sklearn.model_selection import train_test_split
from sklearn.utils import check_random_state
from modl.datasets import fetch_adhd
from modl.decomposition.fmri import fMRIDictFact
from modl.decomposition.stability import mean_amari_discrepency
from modl.plotting.fmri import display_maps
from nilearn.datasets import fetch_atlas_smith_2009
from modl.utils.system import get_cache_dirs
batch_size = 200
learning_rate = .92
method = 'masked'
step_size = 0.01
reduction_ = 8
alpha = 1e-3
n_epochs = 4
verbose = 15
n_jobs = 70
smoothing_fwhm = 6
components_list = [20, 40, 80, 120, 200, 300, 500]
n_runs = 20
dict_init = fetch_atlas_smith_2009().rsn20
dataset = fetch_adhd(n_subjects=40)
data = dataset.rest.values
train_data, test_data = train_test_split(data, test_size=2, random_state=0)
train_imgs, train_confounds = zip(*train_data)
test_imgs, test_confounds = zip(*test_data)
mask = dataset.mask
mem = Memory(location=get_cache_dirs()[0])
masker = NiftiMasker(mask_img=mask).fit()
def fit_single(train_imgs, test_imgs, n_components, random_state):
dict_fact = fMRIDictFact(smoothing_fwhm=smoothing_fwhm,
method=method,
step_size=step_size,
mask=mask,
memory=mem,
memory_level=2,
verbose=verbose,
n_epochs=n_epochs,
n_jobs=1,
random_state=random_state,
n_components=n_components,
positive=True,
learning_rate=learning_rate,
batch_size=batch_size,
reduction=reduction_,
alpha=alpha,
callback=None,
)
dict_fact.fit(train_imgs, confounds=train_confounds)
score = dict_fact.score(test_imgs)
return dict_fact.components_, score
def fit_many_runs(train_imgs, test_imgs, components_list, n_runs=10, n_jobs=1):
random_states = check_random_state(0).randint(0, int(1e7), size=n_runs)
cached_fit = mem.cache(fit_single)
res = Parallel(n_jobs=n_jobs)(delayed(cached_fit)(
train_imgs, test_imgs, n_components, random_state)
for n_components in components_list
for random_state in random_states
)
components, scores = zip(*res)
shape = (len(components_list), len(random_states))
components = np.array(components).reshape(shape).tolist()
scores = np.array(scores).reshape(shape).tolist()
discrepencies = []
var_discrepencies = []
best_components = []
for n_components, these_components, these_scores in zip(components_list,
components,
scores):
discrepency, var_discrepency = mean_amari_discrepency(
these_components)
best_estimator = these_components[np.argmin(these_scores)]
discrepencies.append(var_discrepency)
var_discrepencies.append(var_discrepency)
best_components.append(best_estimator)
discrepencies = np.array(discrepencies)
var_discrepencies = np.array(var_discrepencies)
best_components = np.array(best_components)
components = best_components[np.argmin(discrepencies)]
return discrepencies, var_discrepencies, components
output_dir = expanduser('~/output_drago4/modl/fmri_stability2')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
discrepencies, var_discrepencies, components = fit_many_runs(
train_imgs, test_imgs,
components_list,
n_jobs=n_jobs,
n_runs=n_runs)
components_img = masker.inverse_transform(components)
components_img.to_filename(
join(output_dir, 'components.nii.gz'))
dump((components_list, discrepencies, var_discrepencies),
join(output_dir, 'discrepencies.pkl'))
fig = plt.figure()
display_maps(fig, components_img)
plt.savefig(join(output_dir, 'components.pdf'))
fig, ax = plt.subplots(1, 1)
ax.fill_between(components_list, discrepencies - var_discrepencies,
discrepencies + var_discrepencies, alpha=0.5)
ax.plot(components_list, discrepencies, marker='o')
ax.set_xlabel('Number of components')
ax.set_ylabel('Mean Amari discrepency')
sns.despine(fig)
fig.suptitle('Stability selection using DL')
plt.savefig(join(output_dir, 'discrepencies.pdf'))
|
PYTHON/PasswordGen.py | ayushyado/HACKTOBERFEST2021-2 | 125 | 12772391 | <reponame>ayushyado/HACKTOBERFEST2021-2<filename>PYTHON/PasswordGen.py
import string
import random
#Characters List to Generate Password
characters = list(string.ascii_letters + string.digits + "!@#$%^&*()")
def password_gen():
#Length of Password from the User
length = int(input("Password length: "))
#Shuffling the Characters
random.shuffle(characters)
#Picking random Characters from the given List
password = []
for i in range(length):
password.append(random.choice(characters))
#Shuffling the Resultant Password
random.shuffle(password)
#Converting the List to String
#Printing the List
print("".join(password))
#Invoking the function
password_gen()
|
icevision/models/mmdet/models/retinanet/backbones/resnet_fpn.py | bluseking/-first-agnostic-computer-vision-framework-to-offer-a-curated-collection-with-hundreds-of-high-qualit | 580 | 12772407 | __all__ = [
"resnet50_caffe_fpn_1x",
"resnet50_fpn_1x",
"resnet50_fpn_2x",
"resnet101_caffe_fpn_1x",
"resnet101_fpn_1x",
"resnet101_fpn_2x",
"resnext101_32x4d_fpn_1x",
"resnext101_32x4d_fpn_2x",
"resnext101_64x4d_fpn_1x",
"resnext101_64x4d_fpn_2x",
]
from icevision.imports import *
from icevision.models.mmdet.utils import *
class MMDetRetinanetBackboneConfig(MMDetBackboneConfig):
def __init__(self, **kwargs):
super().__init__(model_name="retinanet", **kwargs)
base_config_path = mmdet_configs_path / "retinanet"
base_weights_url = "http://download.openmmlab.com/mmdetection/v2.0/retinanet"
resnet50_caffe_fpn_1x = MMDetRetinanetBackboneConfig(
config_path=base_config_path / "retinanet_r50_caffe_fpn_1x_coco.py",
weights_url=f"{base_weights_url}/retinanet_r50_caffe_fpn_1x_coco/retinanet_r50_caffe_fpn_1x_coco_20200531-f11027c5.pth",
)
resnet50_fpn_1x = MMDetRetinanetBackboneConfig(
config_path=base_config_path / "retinanet_r50_fpn_1x_coco.py",
weights_url=f"{base_weights_url}/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth",
)
resnet50_fpn_2x = MMDetRetinanetBackboneConfig(
config_path=base_config_path / "retinanet_r50_fpn_2x_coco.py",
weights_url=f"{base_weights_url}/retinanet_r50_fpn_2x_coco/retinanet_r50_fpn_2x_coco_20200131-fdb43119.pth",
)
resnet101_caffe_fpn_1x = MMDetRetinanetBackboneConfig(
config_path=base_config_path / "retinanet_r101_caffe_fpn_1x_coco.py",
weights_url=f"{base_weights_url}/retinanet_r101_caffe_fpn_1x_coco/retinanet_r101_caffe_fpn_1x_coco_20200531-b428fa0f.pth",
)
resnet101_fpn_1x = MMDetRetinanetBackboneConfig(
config_path=base_config_path / "retinanet_r101_fpn_1x_coco.py",
weights_url=f"{base_weights_url}/retinanet_r101_fpn_1x_coco/retinanet_r101_fpn_1x_coco_20200130-7a93545f.pth",
)
resnet101_fpn_2x = MMDetRetinanetBackboneConfig(
config_path=base_config_path / "retinanet_r101_fpn_2x_coco.py",
weights_url=f"{base_weights_url}/retinanet_r101_fpn_2x_coco/retinanet_r101_fpn_2x_coco_20200131-5560aee8.pth",
)
resnext101_32x4d_fpn_1x = MMDetRetinanetBackboneConfig(
config_path=base_config_path / "retinanet_x101_32x4d_fpn_1x_coco.py",
weights_url=f"{base_weights_url}/retinanet_x101_32x4d_fpn_1x_coco/retinanet_x101_32x4d_fpn_1x_coco_20200130-5c8b7ec4.pth",
)
resnext101_32x4d_fpn_2x = MMDetRetinanetBackboneConfig(
config_path=base_config_path / "retinanet_x101_32x4d_fpn_2x_coco.py",
weights_url=f"{base_weights_url}/retinanet_x101_32x4d_fpn_2x_coco/retinanet_x101_32x4d_fpn_2x_coco_20200131-237fc5e1.pth",
)
resnext101_64x4d_fpn_1x = MMDetRetinanetBackboneConfig(
config_path=base_config_path / "retinanet_x101_64x4d_fpn_1x_coco.py",
weights_url=f"{base_weights_url}/retinanet_x101_64x4d_fpn_1x_coco/retinanet_x101_64x4d_fpn_1x_coco_20200130-366f5af1.pth",
)
resnext101_64x4d_fpn_2x = MMDetRetinanetBackboneConfig(
config_path=base_config_path / "retinanet_x101_64x4d_fpn_2x_coco.py",
weights_url=f"{base_weights_url}/retinanet_x101_64x4d_fpn_2x_coco/retinanet_x101_64x4d_fpn_2x_coco_20200131-bca068ab.pth",
)
|
docs/source/conftest.py | chaaklau/pycantonese | 124 | 12772409 | """Test code snippets embedded in the docs.
Reference: https://sybil.readthedocs.io/en/latest/use.html#pytest
"""
from doctest import NORMALIZE_WHITESPACE
from os import chdir, getcwd
from shutil import rmtree
from tempfile import mkdtemp
import pytest
from sybil import Sybil
from sybil.parsers.doctest import DocTestParser
from sybil.parsers.skip import skip
@pytest.fixture(scope="module")
def tempdir():
path = mkdtemp()
cwd = getcwd()
try:
chdir(path)
yield path
finally:
chdir(cwd)
rmtree(path)
pytest_collect_file = Sybil(
parsers=[DocTestParser(optionflags=NORMALIZE_WHITESPACE), skip],
pattern="*.rst",
fixtures=["tempdir"],
).pytest()
|
multitask_benchmark/datasets_generation/graph_algorithms.py | Michaelvll/pna | 249 | 12772440 | <reponame>Michaelvll/pna<filename>multitask_benchmark/datasets_generation/graph_algorithms.py
import math
from queue import Queue
import numpy as np
def is_connected(A):
"""
:param A:np.array the adjacency matrix
:return:bool whether the graph is connected or not
"""
for _ in range(int(1 + math.ceil(math.log2(A.shape[0])))):
A = np.dot(A, A)
return np.min(A) > 0
def identity(A, F):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return:F
"""
return F
def first_neighbours(A):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return: for each node, the number of nodes reachable in 1 hop
"""
return np.sum(A > 0, axis=0)
def second_neighbours(A):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return: for each node, the number of nodes reachable in no more than 2 hops
"""
A = A > 0.0
A = A + np.dot(A, A)
np.fill_diagonal(A, 0)
return np.sum(A > 0, axis=0)
def kth_neighbours(A, k):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return: for each node, the number of nodes reachable in k hops
"""
A = A > 0.0
R = np.zeros(A.shape)
for _ in range(k):
R = np.dot(R, A) + A
np.fill_diagonal(R, 0)
return np.sum(R > 0, axis=0)
def map_reduce_neighbourhood(A, F, f_reduce, f_map=None, hops=1, consider_itself=False):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return: for each node, map its neighbourhood with f_map, and reduce it with f_reduce
"""
if f_map is not None:
F = f_map(F)
A = np.array(A)
A = A > 0
R = np.zeros(A.shape)
for _ in range(hops):
R = np.dot(R, A) + A
np.fill_diagonal(R, 1 if consider_itself else 0)
R = R > 0
return np.array([f_reduce(F[R[i]]) for i in range(A.shape[0])])
def max_neighbourhood(A, F):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return: for each node, the maximum in its neighbourhood
"""
return map_reduce_neighbourhood(A, F, np.max, consider_itself=True)
def min_neighbourhood(A, F):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return: for each node, the minimum in its neighbourhood
"""
return map_reduce_neighbourhood(A, F, np.min, consider_itself=True)
def std_neighbourhood(A, F):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return: for each node, the standard deviation of its neighbourhood
"""
return map_reduce_neighbourhood(A, F, np.std, consider_itself=True)
def mean_neighbourhood(A, F):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return: for each node, the mean of its neighbourhood
"""
return map_reduce_neighbourhood(A, F, np.mean, consider_itself=True)
def local_maxima(A, F):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return: for each node, whether it is the maximum in its neighbourhood
"""
return F == map_reduce_neighbourhood(A, F, np.max, consider_itself=True)
def graph_laplacian(A):
"""
:param A:np.array the adjacency matrix
:return: the laplacian of the adjacency matrix
"""
L = (A > 0) * -1
np.fill_diagonal(L, np.sum(A > 0, axis=0))
return L
def graph_laplacian_features(A, F):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return: the laplacian of the adjacency matrix multiplied by the features
"""
return np.matmul(graph_laplacian(A), F)
def isomorphism(A1, A2, F1=None, F2=None):
"""
Takes two adjacency matrices (A1,A2) and (optionally) two lists of features. It uses Weisfeiler-Lehman algorithms, so false positives might arise
:param A1: adj_matrix, N*N numpy matrix
:param A2: adj_matrix, N*N numpy matrix
:param F1: node_values, numpy array of size N
:param F1: node_values, numpy array of size N
:return: isomorphic: boolean which is false when the two graphs are not isomorphic, true when they probably are.
"""
N = A1.shape[0]
if (F1 is None) ^ (F2 is None):
raise ValueError("either both or none between F1,F2 must be defined.")
if F1 is None:
# Assign same initial value to each node
F1 = np.ones(N, int)
F2 = np.ones(N, int)
else:
if not np.array_equal(np.sort(F1), np.sort(F2)):
return False
if F1.dtype() != int:
raise NotImplementedError('Still have to implement this')
p = 1000000007
def mapping(F):
return (F * 234 + 133) % 1000000007
def adjacency_hash(F):
F = np.sort(F)
b = 257
h = 0
for f in F:
h = (b * h + f) % 1000000007
return h
for i in range(N):
F1 = map_reduce_neighbourhood(A1, F1, adjacency_hash, f_map=mapping, consider_itself=True, hops=1)
F2 = map_reduce_neighbourhood(A2, F2, adjacency_hash, f_map=mapping, consider_itself=True, hops=1)
if not np.array_equal(np.sort(F1), np.sort(F2)):
return False
return True
def count_edges(A):
"""
:param A:np.array the adjacency matrix
:return: the number of edges in the graph
"""
return np.sum(A) / 2
def is_eulerian_cyclable(A):
"""
:param A:np.array the adjacency matrix
:return: whether the graph has an eulerian cycle
"""
return is_connected(A) and np.count_nonzero(first_neighbours(A) % 2 == 1) == 0
def is_eulerian_percorrible(A):
"""
:param A:np.array the adjacency matrix
:return: whether the graph has an eulerian path
"""
return is_connected(A) and np.count_nonzero(first_neighbours(A) % 2 == 1) in [0, 2]
def map_reduce_graph(A, F, f_reduce):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return: the features of the nodes reduced by f_reduce
"""
return f_reduce(F)
def mean_graph(A, F):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return: the mean of the features
"""
return map_reduce_graph(A, F, np.mean)
def max_graph(A, F):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return: the maximum of the features
"""
return map_reduce_graph(A, F, np.max)
def min_graph(A, F):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return: the minimum of the features
"""
return map_reduce_graph(A, F, np.min)
def std_graph(A, F):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return: the standard deviation of the features
"""
return map_reduce_graph(A, F, np.std)
def has_hamiltonian_cycle(A):
"""
:param A:np.array the adjacency matrix
:return:bool whether the graph has an hamiltonian cycle
"""
A += np.transpose(A)
A = A > 0
V = A.shape[0]
def ham_cycle_loop(pos):
if pos == V:
if A[path[pos - 1]][path[0]]:
return True
else:
return False
for v in range(1, V):
if A[path[pos - 1]][v] and not used[v]:
path[pos] = v
used[v] = True
if ham_cycle_loop(pos + 1):
return True
path[pos] = -1
used[v] = False
return False
used = [False] * V
path = [-1] * V
path[0] = 0
return ham_cycle_loop(1)
def all_pairs_shortest_paths(A, inf_sub=math.inf):
"""
:param A:np.array the adjacency matrix
:param inf_sub: the placeholder value to use for pairs which are not connected
:return:np.array all pairs shortest paths
"""
A = np.array(A)
N = A.shape[0]
for i in range(N):
for j in range(N):
if A[i][j] == 0:
A[i][j] = math.inf
if i == j:
A[i][j] = 0
for k in range(N):
for i in range(N):
for j in range(N):
A[i][j] = min(A[i][j], A[i][k] + A[k][j])
A = np.where(A == math.inf, inf_sub, A)
return A
def diameter(A):
"""
:param A:np.array the adjacency matrix
:return: the diameter of the gra[h
"""
sum = np.sum(A)
apsp = all_pairs_shortest_paths(A)
apsp = np.where(apsp < sum + 1, apsp, -1)
return np.max(apsp)
def eccentricity(A):
"""
:param A:np.array the adjacency matrix
:return: the eccentricity of the gra[h
"""
sum = np.sum(A)
apsp = all_pairs_shortest_paths(A)
apsp = np.where(apsp < sum + 1, apsp, -1)
return np.max(apsp, axis=0)
def sssp_predecessor(A, F):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return: for each node, the best next step to reach the designated source
"""
assert (np.sum(F) == 1)
assert (np.max(F) == 1)
s = np.argmax(F)
N = A.shape[0]
P = np.zeros(A.shape)
V = np.zeros(N)
bfs = Queue()
bfs.put(s)
V[s] = 1
while not bfs.empty():
u = bfs.get()
for v in range(N):
if A[u][v] > 0 and V[v] == 0:
V[v] = 1
P[v][u] = 1
bfs.put(v)
return P
def max_eigenvalue(A):
"""
:param A:np.array the adjacency matrix
:return: the maximum eigenvalue of A
since A is positive symmetric, all the eigenvalues are guaranteed to be real
"""
[W, _] = np.linalg.eig(A)
return W[np.argmax(np.absolute(W))].real
def max_eigenvalues(A, k):
"""
:param A:np.array the adjacency matrix
:param k:int the number of eigenvalues to be selected
:return: the k greatest (by absolute value) eigenvalues of A
"""
[W, _] = np.linalg.eig(A)
values = W[sorted(range(len(W)), key=lambda x: -np.absolute(W[x]))[:k]]
return values.real
def max_absolute_eigenvalues(A, k):
"""
:param A:np.array the adjacency matrix
:param k:int the number of eigenvalues to be selected
:return: the absolute value of the k greatest (by absolute value) eigenvalues of A
"""
return np.absolute(max_eigenvalues(A, k))
def max_absolute_eigenvalues_laplacian(A, n):
"""
:param A:np.array the adjacency matrix
:param k:int the number of eigenvalues to be selected
:return: the absolute value of the k greatest (by absolute value) eigenvalues of the laplacian of A
"""
A = graph_laplacian(A)
return np.absolute(max_eigenvalues(A, n))
def max_eigenvector(A):
"""
:param A:np.array the adjacency matrix
:return: the maximum (by absolute value) eigenvector of A
since A is positive symmetric, all the eigenvectors are guaranteed to be real
"""
[W, V] = np.linalg.eig(A)
return V[:, np.argmax(np.absolute(W))].real
def spectral_radius(A):
"""
:param A:np.array the adjacency matrix
:return: the maximum (by absolute value) eigenvector of A
since A is positive symmetric, all the eigenvectors are guaranteed to be real
"""
return np.abs(max_eigenvalue(A))
def page_rank(A, F=None, iter=64):
"""
:param A:np.array the adjacency matrix
:param F:np.array with initial weights. If None, uniform initialization will happen.
:param iter: log2 of length of power iteration
:return: for each node, its pagerank
"""
# normalize A rows
A = np.array(A)
A /= A.sum(axis=1)[:, np.newaxis]
# power iteration
for _ in range(iter):
A = np.matmul(A, A)
# generate prior distribution
if F is None:
F = np.ones(A.shape[-1])
else:
F = np.array(F)
# normalize prior
F /= np.sum(F)
# compute limit distribution
return np.matmul(F, A)
def tsp_length(A, F=None):
"""
:param A:np.array the adjacency matrix
:param F:np.array determining which nodes are to be visited. If None, all of them are.
:return: the length of the Traveling Salesman Problem shortest solution
"""
A = all_pairs_shortest_paths(A)
N = A.shape[0]
if F is None:
F = np.ones(N)
targets = np.nonzero(F)[0]
T = targets.shape[0]
S = (1 << T)
dp = np.zeros((S, T))
def popcount(x):
b = 0
while x > 0:
x &= x - 1
b += 1
return b
msks = np.argsort(np.vectorize(popcount)(np.arange(S)))
for i in range(T + 1):
for j in range(T):
if (1 << j) & msks[i] == 0:
dp[msks[i]][j] = math.inf
for i in range(T + 1, S):
msk = msks[i]
for u in range(T):
if (1 << u) & msk == 0:
dp[msk][u] = math.inf
continue
cost = math.inf
for v in range(T):
if v == u or (1 << v) & msk == 0:
continue
cost = min(cost, dp[msk ^ (1 << u)][v] + A[targets[v]][targets[u]])
dp[msk][u] = cost
return np.min(dp[S - 1])
def get_nodes_labels(A, F):
"""
Takes the adjacency matrix and the list of nodes features (and a list of algorithms) and returns
a set of labels for each node
:param A: adj_matrix, N*N numpy matrix
:param F: node_values, numpy array of size N
:return: labels: KxN numpy matrix where K is the number of labels for each node
"""
labels = [identity(A, F), map_reduce_neighbourhood(A, F, np.mean, consider_itself=True),
map_reduce_neighbourhood(A, F, np.max, consider_itself=True),
map_reduce_neighbourhood(A, F, np.std, consider_itself=True), first_neighbours(A), second_neighbours(A),
eccentricity(A)]
return np.swapaxes(np.stack(labels), 0, 1)
def get_graph_labels(A, F):
"""
Takes the adjacency matrix and the list of nodes features (and a list of algorithms) and returns
a set of labels for the whole graph
:param A: adj_matrix, N*N numpy matrix
:param F: node_values, numpy array of size N
:return: labels: numpy array of size K where K is the number of labels for the graph
"""
labels = [diameter(A)]
return np.asarray(labels)
|
test/unit/config/test_reload_config.py | rikeshi/galaxy | 1,085 | 12772450 | <filename>test/unit/config/test_reload_config.py
import pytest
from galaxy import config
from galaxy.config import BaseAppConfiguration
from galaxy.config import reload_config_options
from galaxy.config.schema import AppSchema
R1, R2, N1, N2 = 'reloadable1', 'reloadable2', 'nonrelodable1', 'nonreloadable2' # config options
MOCK_SCHEMA = {
R1: {'reloadable': True, 'default': 1},
R2: {'reloadable': True, 'default': 2},
N1: {'default': 3},
N2: {'default': 4},
}
def get_schema(app_mapping):
return {'mapping': {'_': {'mapping': app_mapping}}}
@pytest.fixture
def mock_init(monkeypatch):
monkeypatch.setattr(BaseAppConfiguration, '_load_schema', lambda a: AppSchema(None, '_'))
monkeypatch.setattr(AppSchema, '_read_schema', lambda a, b: get_schema(MOCK_SCHEMA))
def test_update_property(mock_init, monkeypatch):
# This also covers adding a property. When a config file does not set a property,
# that property is set to its default value. Thus, if we add a reloadable property
# to the config file, it's the same as modifying that property's value.
# edits to config file: R2, N1 modified
monkeypatch.setattr(config, 'read_properties_from_file', lambda _: {R1: 1, R2: 42, N1: 99})
appconfig = BaseAppConfiguration()
assert getattr(appconfig, R1) == 1
assert getattr(appconfig, R2) == 2
assert getattr(appconfig, N1) == 3
reload_config_options(appconfig)
assert getattr(appconfig, R1) == 1 # no change
assert getattr(appconfig, R2) == 42 # change: reloadable option modified
assert getattr(appconfig, N1) == 3 # no change: option modified but is non-relodable
def test_overwrite_reloadable_attribute(mock_init, monkeypatch):
# This is similar to test_update_property, but here we overwrite the attribute before reloading.
# This can happen if a config property is modified AFTER it has been loaded from schema or kwargs.
# For example: load `foo` (from schema or kwargs), but then, in a # subsequent step while initializing
# GalaxyAppConfiguration, do something like this: `foo = resove_path(foo, bar)`. Now the value of `foo`
# is not what was initially loaded, and if `foo` is reloadable, it will be reset to its default as soon
# as the config file is modified. To prevent this, we compare the values read from the modified file
# to the `_raw_config` dict. This test ensures this works correctly.
# edits to config file: R2 modified
monkeypatch.setattr(config, 'read_properties_from_file', lambda _: {R1: 1, R2: 42})
appconfig = BaseAppConfiguration()
assert getattr(appconfig, R1) == 1
assert getattr(appconfig, R2) == 2
# overwrite R1
setattr(appconfig, R1, 99)
assert getattr(appconfig, R1) == 99
# then reload
reload_config_options(appconfig)
assert getattr(appconfig, R1) == 99 # no change; should remain overwritten
assert getattr(appconfig, R2) == 42 # change: reloadable option modified
def test_cant_delete_property(mock_init, monkeypatch):
# A property should not be deleted: we don't know whether it was initially
# set to a default, loaded from a config file, env var, etc. Therefore, if a property
# is removed from the config file, it will not be modified or deleted.
# edits to config file: R2, N2 deleted
monkeypatch.setattr(config, 'read_properties_from_file', lambda _: {R1: 1, N1: 3})
appconfig = BaseAppConfiguration()
assert getattr(appconfig, R1) == 1
assert getattr(appconfig, R2) == 2
assert getattr(appconfig, N1) == 3
assert getattr(appconfig, N2) == 4
reload_config_options(appconfig)
assert getattr(appconfig, R1) == 1 # no change
assert getattr(appconfig, R2) == 2 # no change: option cannot be deleted
assert getattr(appconfig, N1) == 3 # no change
assert getattr(appconfig, N2) == 4 # no change: option cannot be deleted
|
tastyworks/__init__.py | olyoberdorf/tastyworks_api | 198 | 12772487 | <gh_stars>100-1000
import logging
import sys
log = logging.getLogger(__name__)
log.propagate = False
out_hdlr = logging.StreamHandler(sys.stdout)
out_hdlr.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
out_hdlr.setLevel(logging.INFO)
log.addHandler(out_hdlr)
log.setLevel(logging.INFO)
root = logging.getLogger()
root.addHandler(out_hdlr)
root.propagate = False
root.setLevel(logging.INFO)
|
src/jNlp/eProcessing.py | Reynolddoss/jProcessing | 133 | 12772495 | <filename>src/jNlp/eProcessing.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pkg_resources import resource_stream
import sys, os, subprocess
from subprocess import call
import xml.etree.cElementTree as etree
import nltk
from nltk.stem.wordnet import WordNetLemmatizer
if __name__ == '__main__':
pass
|
pgcontents/checkpoints.py | freedom079215/pgcontents | 138 | 12772513 | <reponame>freedom079215/pgcontents
"""
An IPython FileContentsManager that uses Postgres for checkpoints.
"""
from __future__ import unicode_literals
from .api_utils import (
_decode_unknown_from_base64,
outside_root_to_404,
reads_base64,
to_b64,
writes_base64,
)
from .managerbase import PostgresManagerMixin
from .query import (
delete_remote_checkpoints,
delete_single_remote_checkpoint,
get_remote_checkpoint,
list_remote_checkpoints,
move_remote_checkpoints,
purge_remote_checkpoints,
save_remote_checkpoint,
)
from .utils.ipycompat import Checkpoints, GenericCheckpointsMixin
class PostgresCheckpoints(PostgresManagerMixin,
GenericCheckpointsMixin,
Checkpoints):
"""
A Checkpoints implementation that saves checkpoints to a remote database.
"""
@outside_root_to_404
def create_notebook_checkpoint(self, nb, path):
"""Create a checkpoint of the current state of a notebook
Returns a checkpoint_id for the new checkpoint.
"""
b64_content = writes_base64(nb)
with self.engine.begin() as db:
return save_remote_checkpoint(
db,
self.user_id,
path,
b64_content,
self.crypto.encrypt,
self.max_file_size_bytes,
)
@outside_root_to_404
def create_file_checkpoint(self, content, format, path):
"""Create a checkpoint of the current state of a file
Returns a checkpoint_id for the new checkpoint.
"""
try:
b64_content = to_b64(content, format)
except ValueError as e:
self.do_400(str(e))
with self.engine.begin() as db:
return save_remote_checkpoint(
db,
self.user_id,
path,
b64_content,
self.crypto.encrypt,
self.max_file_size_bytes,
)
@outside_root_to_404
def delete_checkpoint(self, checkpoint_id, path):
"""delete a checkpoint for a file"""
with self.engine.begin() as db:
return delete_single_remote_checkpoint(
db, self.user_id, path, checkpoint_id,
)
def get_checkpoint_content(self, checkpoint_id, path):
"""Get the content of a checkpoint."""
with self.engine.begin() as db:
return get_remote_checkpoint(
db,
self.user_id,
path,
checkpoint_id,
self.crypto.decrypt,
)['content']
@outside_root_to_404
def get_notebook_checkpoint(self, checkpoint_id, path):
b64_content = self.get_checkpoint_content(checkpoint_id, path)
return {
'type': 'notebook',
'content': reads_base64(b64_content),
}
@outside_root_to_404
def get_file_checkpoint(self, checkpoint_id, path):
b64_content = self.get_checkpoint_content(checkpoint_id, path)
content, format = _decode_unknown_from_base64(path, b64_content)
return {
'type': 'file',
'content': content,
'format': format,
}
@outside_root_to_404
def list_checkpoints(self, path):
"""Return a list of checkpoints for a given file"""
with self.engine.begin() as db:
return list_remote_checkpoints(db, self.user_id, path)
@outside_root_to_404
def rename_all_checkpoints(self, old_path, new_path):
"""Rename all checkpoints for old_path to new_path."""
with self.engine.begin() as db:
return move_remote_checkpoints(
db,
self.user_id,
old_path,
new_path,
)
@outside_root_to_404
def delete_all_checkpoints(self, path):
"""Delete all checkpoints for the given path."""
with self.engine.begin() as db:
delete_remote_checkpoints(db, self.user_id, path)
def purge_db(self):
"""
Purge all database records for the current user.
"""
with self.engine.begin() as db:
purge_remote_checkpoints(db, self.user_id)
|
riptable/rt_meta.py | 972d5defe3218bd62b741e6a2f11f5b3/riptable | 307 | 12772516 | __all__ = ['Item', 'Info', 'Doc', 'apply_schema', 'info', 'doc']
from typing import Optional, List
from .rt_struct import Struct
from .rt_fastarray import FastArray
from .rt_display import DisplayText
META_DICT = '_meta'
DOC_KEY = 'Doc'
DESCRIPTION_KEY = 'Description'
STEWARD_KEY = 'Steward'
TYPE_KEY = 'Type'
DETAIL_KEY = 'Detail'
CONTENTS_KEY = 'Contents'
NO_DESCRIPTION = '<no description>'
NO_STEWARD = '<no steward>'
NO_TYPE = '<none>'
NAME_DEFAULT_WIDTH = 4
DESCRIPTION_DEFAULT_WIDTH = 50
STEWARD_DEFAULT_WIDTH = 12
TYPE_STR_DEFAULT_WIDTH = 4
# ERROR KEYS
TYPE_MISMATCH = 'Type Mismatch'
EXTRA_COLUMN = 'Extra Column'
MISSING_COLUMN = 'Missing Column'
class Item:
"""Descriptive information for a data object.
Parameters
----------
name : str
The name of the data object.
type : str
The type of the data object.
description : str
A description of the data object.
steward : str
The steward of the data object.
"""
name : str
"""str: The name of the data object."""
type : str
"""str: The type of the data object."""
description : str
"""str: A description of the data object."""
steward : str
"""steward: The steward of the data object."""
def __init__(self, name: str, type: str, description: str, steward: str):
self.name = name
self.type = type
self.description = description
self.steward = steward
class Info:
"""A hierarchically structured container of descriptive information
for a data object.
"""
title = []
"""list: The title of the data object"""
description : Optional[str] = None
"""str: The description of the data object."""
steward : Optional[str] = None
"""str: The steward of the data object."""
type : Optional[str] = None
"""str: The type of the data object."""
detail = None
"""str: Detail about the data object."""
items : Optional[List[Item]] = None
"""list of `Item`: For a :class:`~.rt_struct.Struct` or :class:`~.rt_dataset.Dataset`, the items contained within it."""
def __init__(self):
pass
def _make_text(self):
title_format = DisplayText.title_format
header_format = DisplayText.header_format
rows = []
if self.title:
rows += [title_format('{}'.format(self.title))]
rows += [title_format('='*len(self.title))]
if self.description:
rows += [header_format('Description: ') + self.description]
if self.steward:
rows += [header_format('Steward: ') + self.steward]
if self.type:
rows += [header_format('Type: ') + self.type]
if self.detail:
rows += [header_format('Detail: ') + self.detail]
if self.items:
rows += [header_format('Contents:'), '']
# Set column widths
name_width = max(NAME_DEFAULT_WIDTH, max(len(item.name) for item in self.items))
descrip_width = DESCRIPTION_DEFAULT_WIDTH
steward_width = STEWARD_DEFAULT_WIDTH
stype_width = max(TYPE_STR_DEFAULT_WIDTH, max(len(item.type) for item in self.items))
# Add list header
rows += [header_format("{: <{}} {: <{}} {: <{}} {: <{}}".format(
"Type", stype_width, "Name", name_width,
"Description", descrip_width, "Steward", steward_width))]
rows += [header_format("{} {} {} {}".format(
"-" * stype_width, "-" * name_width, "-" * descrip_width, "-" * steward_width))]
# Add item rows
for item in self.items:
rows += ["{: <{}} {} {: <{}} {: <{}}".format(
item.type, stype_width, title_format('{: <{}}'.format(item.name, name_width)),
item.description, descrip_width, item.steward, steward_width)]
# Add a newline at the end if there is a title on top
if self.title:
rows += ['']
return "\n".join(rows)
def __str__(self):
return DisplayText(self._make_text()).__str__()
def __repr__(self):
return DisplayText(self._make_text()).__repr__()
def _repr_html_(self):
return DisplayText(self._make_text())._repr_html_()
class Doc(Struct):
"""A document object containing metadata about a data object.
Parameters
----------
schema : dict
See :meth:`apply_schema` for more information on the format of the
dictionary.
"""
_type = NO_TYPE
_descrip = NO_DESCRIPTION
_steward = NO_STEWARD
_detail = None
def __init__(self, schema):
super().__init__()
self._type = schema.get(TYPE_KEY)
self._descrip = schema.get(DESCRIPTION_KEY, NO_DESCRIPTION)
self._steward = schema.get(STEWARD_KEY, NO_STEWARD)
self._detail = schema.get(DETAIL_KEY, None)
schema_contents = schema.get(CONTENTS_KEY)
if schema_contents:
for key in schema_contents.keys():
if self.is_valid_colname(key):
self[key] = Doc(schema_contents[key])
def _as_info(self):
info = Info()
info.title = None
info.description = self._descrip
info.steward = self._steward
info.type = self._type
info.detail = self._detail
info.items = []
for name in self.keys():
elem = self[name]
info.items.append(Item(name, elem._type, elem._descrip,
elem._steward))
return info
def __str__(self):
return self._as_info().__str__()
def __repr__(self):
return self._as_info().__repr__()
def _repr_html_(self):
return self._as_info()._repr_html_()
def apply_schema(obj, schema: dict, doc: bool=True):
"""
Apply a schema containing descriptive information recursively to the
input data object.
The schema should be in the form of a hierarchical dictionary, where
for the data object, and recursively for each element it may contain,
there is a descriptive dictionary with the following keys and values:
* Type: 'Struct', 'Dataset', 'Multiset', 'FastArray', etc.
* Description: a brief description of the data object
* Steward: the name of the steward for that data object
* Detail: any additional descriptive information
* Contents: if the data object is a :class:`~.rt_struct.Struct`,
:class:`~.rt_dataset.Dataset`, or :class:`~.rt_multiset.Multiset`, a
recursively formed dictionary where there is a descriptive
dictionary of this form associated with the name of each element
contained by the data object.
When the schema is applied to the data object, key/value pairs are set
within the ``_meta`` dictionary attribute of the object and all of
its elements, to enable subsequent retrieval of the descriptive
information using the :meth:`.rt_struct.Struct.info` method or
:meth:`.rt_struct.Struct.doc` property.
In addition, during the schema application process, the contents and type
of each data object is compared to the expectation of the schema, with
any differences returned in the form of a dictionary.
Parameters
----------
obj : Struct or FastArray
The data object to apply the schema information to.
schema : dict
A descriptive dictionary defining the schema that should apply to the
data object and any elements it may contain.
doc : bool
Indicates whether to create and attach a :class:`Doc` to the object,
so that the :meth:`doc` method may be run on the object.
Returns
-------
res : dict
Dictionary of deviations from the schema
See Also
--------
:meth:`.rt_struct.Struct.apply_schema`
"""
res = {}
if isinstance(obj, (Struct, FastArray)):
if not hasattr(obj, META_DICT):
obj._meta = {}
if doc:
obj._meta[DOC_KEY] = Doc(schema)
obj._meta[DESCRIPTION_KEY] = schema.get(DESCRIPTION_KEY, NO_DESCRIPTION)
obj._meta[STEWARD_KEY] = schema.get(STEWARD_KEY, NO_STEWARD)
obj._meta[DETAIL_KEY] = schema.get(DETAIL_KEY, None)
stype = schema.get(TYPE_KEY)
if stype and _type_str(obj) != stype:
res[TYPE_MISMATCH] = "Type {} does not match schema type {}".\
format(_type_str(obj), stype)
schema_contents = schema.get(CONTENTS_KEY)
if schema_contents:
for key in obj.keys():
elem_schema = schema_contents.get(key)
if elem_schema:
elem_res = apply_schema(obj[key], elem_schema, False)
if elem_res:
res[key] = elem_res
else:
res[EXTRA_COLUMN] = key
for key in schema_contents.keys():
if key not in obj.keys():
res[MISSING_COLUMN] = key
return res
def _type_str(obj) -> str:
"""
Return the string representation of an object's type.
Parameters
----------
obj : Any
An object
Returns
-------
str : str
String representation of an object's type.
"""
if isinstance(obj, FastArray):
stype = obj.dtype.name
else:
stype = type(obj).__name__
return stype
def info(obj, title=None) -> Info:
"""
Return the :class:`Info` for the object, describing its contents.
Parameters
----------
obj : Any
The object
title : str
The title to give the object, defaults to None.
Returns
-------
info : Info
Information about `obj`.
"""
info = Info()
info.title = title
info.description = NO_DESCRIPTION
info.steward = NO_STEWARD
info.detail = None
info.type = _type_str(obj)
if hasattr(obj, META_DICT):
info.description = obj._meta.get(DESCRIPTION_KEY, info.description)
info.steward = obj._meta.get(STEWARD_KEY, info.steward)
info.detail = obj._meta.get(DETAIL_KEY, None)
if isinstance(obj, Struct):
info.items = []
for name in obj.keys():
descrip = NO_DESCRIPTION
steward = NO_STEWARD
if hasattr(obj[name], META_DICT):
descrip = obj[name]._meta.get(DESCRIPTION_KEY, descrip)
steward = obj[name]._meta.get(STEWARD_KEY, steward)
info.items.append(Item(name, _type_str(obj[name]), descrip, steward))
return info
def doc(obj) -> Optional[Doc]:
"""
Return the :class:`Doc` for the object, describing its contents.
Parameters
----------
obj : Any
The object.
Returns
-------
doc : Doc
Returns a :class:`Doc` instance if the object contains documentation
metadata, otherwise None.
"""
if hasattr(obj, META_DICT):
if DOC_KEY in obj._meta:
return obj._meta[DOC_KEY]
return None
|
en_transformer/utils.py | dumpmemory/En-transformer | 108 | 12772518 | <filename>en_transformer/utils.py
import torch
from torch import sin, cos, atan2, acos
def rot_z(gamma):
return torch.tensor([
[cos(gamma), -sin(gamma), 0],
[sin(gamma), cos(gamma), 0],
[0, 0, 1]
], dtype = gamma.dtype)
def rot_y(beta):
return torch.tensor([
[cos(beta), 0, sin(beta)],
[0, 1, 0],
[-sin(beta), 0, cos(beta)]
], dtype = beta.dtype)
def rot(alpha, beta, gamma):
return rot_z(alpha) @ rot_y(beta) @ rot_z(gamma)
|
recruiter/api_urls.py | b1pb1p/opensource-job-portal | 199 | 12772554 | <filename>recruiter/api_urls.py
from django.urls import path, re_path
from recruiter import api_views
app_name = "api_recruiter"
urlpatterns = [
path("login/", api_views.login_view, name="api_login"),
path("out/", api_views.getout, name="getout"),
path("change-password/", api_views.change_password, name="api_change_password"),
path("profile/", api_views.user_profile, name="api_user_profile"),
path("job/list/", api_views.jobs_list, name="api_list"),
path("skill/list/", api_views.skill_list),
path("industry/list/", api_views.industry_list),
path("city/list/", api_views.city_list),
path("state/list/", api_views.state_list),
path("company/list/", api_views.company_list),
path("functional-area/list/", api_views.functional_area_list),
path("job/inactive/list/", api_views.inactive_jobs, name="api_inactive_jobs"),
path("profile/edit/", api_views.edit_profile, name="edit_profile"),
path("company-profile/", api_views.view_company, name="view_company"),
re_path(r"^job/(?P<job_type>[-\w]+)/new/$", api_views.new_job, name="api_new_job"),
re_path(
r"^job/edit/(?P<job_post_id>[a-zA-Z0-9]+)/$",
api_views.edit_job,
name="api_edit_job",
),
re_path(
r"^job/delete/(?P<job_post_id>[a-zA-Z0-9]+)/$",
api_views.delete_job,
name="api_delete_job",
),
]
|
tools/reval_discovery.py | AdilSiddiqui131/OIM | 231 | 12772556 | <filename>tools/reval_discovery.py
#!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
"""Reval = re-eval. Re-evaluate saved detections."""
import _init_paths
from fast_rcnn.config import cfg
from datasets.factory import get_imdb
import cPickle
import os, sys, argparse
import numpy as np
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Re-evaluate results')
parser.add_argument('output_dir', nargs=1, help='results directory',
type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to re-evaluate',
default='voc_2007_trainval', type=str)
parser.add_argument('--comp', dest='comp_mode', help='competition mode',
action='store_true')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def from_dets(imdb_name, output_dir, args):
imdb = get_imdb(imdb_name)
imdb.competition_mode(args.comp_mode)
with open(os.path.join(output_dir, 'discovery.pkl'), 'rb') as f:
dets = cPickle.load(f)
print 'Evaluating discovery'
imdb.evaluate_discovery(dets, output_dir)
if __name__ == '__main__':
args = parse_args()
output_dir = os.path.abspath(args.output_dir[0])
imdb_name = args.imdb_name
from_dets(imdb_name, output_dir, args)
|
pysec/core/ctx.py | benhunter/owasp-pysec | 416 | 12772562 | <filename>pysec/core/ctx.py
# Python Security Project (PySec) and its related class files.
#
# PySec is a set of tools for secure application development under Linux
#
# Copyright 2014 PySec development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: ascii -*-
import sys
from pysec.core import Object
__all__ = 'Context',
CONTEXTS = {'file', 'cmd', 'html', 'js'}
class Context(Object):
def __init__(self, name='none', info=None, locs=None):
name = str(name)
self.name = name
self.info = {} if info is None else dict(info)
CONTEXTS.add(name)
def __enter__(self):
frame = sys._getframe().f_back
contexts = frame.f_locals.setdefault('__ctx__', [])
contexts.append(self)
def __exit__(self, exc_type, exc_value, exc_tb):
sys._getframe().f_back.__ctx__.pop()
return 0
def contexts(self):
frame = sys._getframe().f_back
while frame:
ls = frame.f_locals.get('__ctx__', None)
if ls:
for ctx in ls:
yield ctx
frame = sys._getframe().f_back
|
examples/pytorch/vision/Face_Detection/eval.py | cw18-coder/EdgeML | 719 | 12772567 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
import torch
import torch.nn as nn
import torch.utils.data as data
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
import os
import time
import argparse
import numpy as np
from PIL import Image
import cv2
from data.choose_config import cfg
cfg = cfg.cfg
from utils.augmentations import to_chw_bgr
from importlib import import_module
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(description='face detection demo')
parser.add_argument('--save_dir', type=str, default='results/',
help='Directory for detect result')
parser.add_argument('--model', type=str,
default='weights/rpool_face_c.pth', help='trained model')
parser.add_argument('--thresh', default=0.17, type=float,
help='Final confidence threshold')
parser.add_argument('--multigpu',
default=False, type=str2bool,
help='Specify whether model was trained with multigpu')
parser.add_argument('--model_arch',
default='RPool_Face_C', type=str,
choices=['RPool_Face_C', 'RPool_Face_Quant', 'RPool_Face_QVGA_monochrome', 'RPool_Face_M4'],
help='choose architecture among rpool variants')
parser.add_argument('--image_folder', default=None, type=str, help='folder containing images')
parser.add_argument('--save_traces',
default=False, type=str2bool,
help='Specify whether to save input output traces')
args = parser.parse_args()
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
use_cuda = torch.cuda.is_available()
if use_cuda:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
def detect(net, img_path, thresh, save_traces):
img = Image.open(img_path)
img = img.convert('RGB')
img = np.array(img)
height, width, _ = img.shape
if os.environ['IS_QVGA_MONO'] == '1':
max_im_shrink = np.sqrt(
320 * 240 / (img.shape[0] * img.shape[1]))
else:
max_im_shrink = np.sqrt(
640 * 480 / (img.shape[0] * img.shape[1]))
if save_traces==True and os.environ['IS_QVGA_MONO'] == '1':
image = cv2.resize(img, (320, 240))
elif save_traces==True:
image = cv2.resize(img, (640, 480))
else:
image = cv2.resize(img, None, None, fx=max_im_shrink,
fy=max_im_shrink, interpolation=cv2.INTER_LINEAR)
x = to_chw_bgr(image)
x = x.astype('float32')
x -= cfg.img_mean
x = x[[2, 1, 0], :, :]
if cfg.IS_MONOCHROME == True:
x = 0.299 * x[0] + 0.587 * x[1] + 0.114 * x[2]
x = torch.from_numpy(x).unsqueeze(0).unsqueeze(0)
else:
x = torch.from_numpy(x).unsqueeze(0)
if use_cuda:
x = x.cuda()
t1 = time.time()
y, loc, conf = net(x)
detections = y.data
scale = torch.Tensor([img.shape[1], img.shape[0],
img.shape[1], img.shape[0]])
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
for i in range(detections.size(1)):
j = 0
while detections[0, i, j, 0] >= thresh:
score = detections[0, i, j, 0]
pt = (detections[0, i, j, 1:] * scale).cpu().numpy()
left_up, right_bottom = (pt[0], pt[1]), (pt[2], pt[3])
j += 1
cv2.rectangle(img, left_up, right_bottom, (0, 0, 255), 2)
conf_score = "{:.3f}".format(score)
point = (int(left_up[0]), int(left_up[1] - 5))
cv2.putText(img, conf_score, point, cv2.FONT_HERSHEY_COMPLEX,
0.6, (0, 255, 0), 1)
t2 = time.time()
print('detect:{} timer:{}'.format(img_path, t2 - t1))
cv2.imwrite(os.path.join(args.save_dir, os.path.basename(img_path)), img)
if save_traces == True:
return x, loc, conf
if __name__ == '__main__':
module = import_module('models.' + args.model_arch)
net = module.build_s3fd('test', cfg.NUM_CLASSES)
if args.multigpu == True:
net = torch.nn.DataParallel(net)
checkpoint_dict = torch.load(args.model)
model_dict = net.state_dict()
model_dict.update(checkpoint_dict)
net.load_state_dict(model_dict)
net.eval()
if use_cuda:
net.cuda()
cudnn.benckmark = True
img_path = args.image_folder
img_list = [os.path.join(img_path, x)
for x in os.listdir(img_path)]
x = []
loc = []
conf = []
for path in img_list:
if args.save_traces == True:
x_temp, loc_temp, conf_temp = detect(net, path, args.thresh, args.save_traces)
x.append(x_temp)
loc.append(loc_temp)
conf.append(conf_temp)
else:
detect(net, path, args.thresh, args.save_traces)
if args.save_traces == True:
np.save('trace_inputs.npy', torch.cat(x).cpu().detach().numpy())
np.save('trace_outputs.npy', torch.cat([torch.cat(conf), torch.cat(loc)], dim=1).cpu().detach().numpy())
|
codigo/Live173/exemplo_tk.py | BrunoPontesLira/live-de-python | 572 | 12772586 | from tkinter import Tk, Label
root = Tk()
a = Label(root, text='Live de Python', font=('Arial', 30))
a.pack()
root.mainloop()
|
flightrl/rpg_baselines/ppo/ppo2_test.py | MarioBonse/flightmare | 596 | 12772587 | <reponame>MarioBonse/flightmare<filename>flightrl/rpg_baselines/ppo/ppo2_test.py<gh_stars>100-1000
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.gridspec as gridspec
def test_model(env, model, render=False):
#
fig = plt.figure(figsize=(18, 12), tight_layout=True)
gs = gridspec.GridSpec(5, 12)
#
ax_x = fig.add_subplot(gs[0, 0:4])
ax_y = fig.add_subplot(gs[0, 4:8])
ax_z = fig.add_subplot(gs[0, 8:12])
#
ax_dx = fig.add_subplot(gs[1, 0:4])
ax_dy = fig.add_subplot(gs[1, 4:8])
ax_dz = fig.add_subplot(gs[1, 8:12])
#
ax_euler_x = fig.add_subplot(gs[2, 0:4])
ax_euler_y = fig.add_subplot(gs[2, 4:8])
ax_euler_z = fig.add_subplot(gs[2, 8:12])
#
ax_euler_vx = fig.add_subplot(gs[3, 0:4])
ax_euler_vy = fig.add_subplot(gs[3, 4:8])
ax_euler_vz = fig.add_subplot(gs[3, 8:12])
#
ax_action0 = fig.add_subplot(gs[4, 0:3])
ax_action1 = fig.add_subplot(gs[4, 3:6])
ax_action2 = fig.add_subplot(gs[4, 6:9])
ax_action3 = fig.add_subplot(gs[4, 9:12])
max_ep_length = env.max_episode_steps
num_rollouts = 5
if render:
env.connectUnity()
for n_roll in range(num_rollouts):
pos, euler, dpos, deuler = [], [], [], []
actions = []
obs, done, ep_len = env.reset(), False, 0
while not (done or (ep_len >= max_ep_length)):
act, _ = model.predict(obs, deterministic=True)
obs, rew, done, infos = env.step(act)
#
ep_len += 1
#
pos.append(obs[0, 0:3].tolist())
dpos.append(obs[0, 6:9].tolist())
euler.append(obs[0, 3:6].tolist())
deuler.append(obs[0, 9:12].tolist())
#
actions.append(act[0, :].tolist())
pos = np.asarray(pos)
dpos = np.asarray(dpos)
euler = np.asarray(euler)
deuler = np.asarray(deuler)
actions = np.asarray(actions)
#
t = np.arange(0, pos.shape[0])
ax_x.step(t, pos[:, 0], color="C{0}".format(
n_roll), label="trail: {0}".format(n_roll))
ax_y.step(t, pos[:, 1], color="C{0}".format(
n_roll), label="trail: {0}".format(n_roll))
ax_z.step(t, pos[:, 2], color="C{0}".format(
n_roll), label="pos [x, y, z] -- trail: {0}".format(n_roll))
#
ax_dx.step(t, dpos[:, 0], color="C{0}".format(
n_roll), label="trail: {0}".format(n_roll))
ax_dy.step(t, dpos[:, 1], color="C{0}".format(
n_roll), label="trail: {0}".format(n_roll))
ax_dz.step(t, dpos[:, 2], color="C{0}".format(
n_roll), label="vel [x, y, z] -- trail: {0}".format(n_roll))
#
ax_euler_x.step(t, euler[:, -1], color="C{0}".format(
n_roll), label="trail: {0}".format(n_roll))
ax_euler_y.step(t, euler[:, 0], color="C{0}".format(
n_roll), label="trail :{0}".format(n_roll))
ax_euler_z.step(t, euler[:, 1], color="C{0}".format(
n_roll), label="trail: {0}".format(n_roll))
#
ax_euler_vx.step(t, deuler[:, -1], color="C{0}".format(
n_roll), label="trail: {0}".format(n_roll))
ax_euler_vy.step(t, deuler[:, 0], color="C{0}".format(
n_roll), label="trail :{0}".format(n_roll))
ax_euler_vz.step(t, deuler[:, 1], color="C{0}".format(
n_roll), label=r"$\theta$ [x, y, z] -- trail: {0}".format(n_roll))
#
ax_action0.step(t, actions[:, 0], color="C{0}".format(
n_roll), label="trail: {0}".format(n_roll))
ax_action1.step(t, actions[:, 1], color="C{0}".format(
n_roll), label="trail: {0}".format(n_roll))
ax_action2.step(t, actions[:, 2], color="C{0}".format(
n_roll), label="trail: {0}".format(n_roll))
ax_action3.step(t, actions[:, 3], color="C{0}".format(
n_roll), label="act [0, 1, 2, 3] -- trail: {0}".format(n_roll))
#
if render:
env.disconnectUnity()
ax_z.legend()
ax_dz.legend()
ax_euler_z.legend()
ax_euler_vz.legend()
ax_action3.legend()
#
plt.tight_layout()
plt.show()
|
test_numpy_embedding.py | morpheusthewhite/twitter-sent-dnn | 314 | 12772591 | <gh_stars>100-1000
import theano
import numpy as np
from dcnn import WordEmbeddingLayer
from dcnn_train import WordEmbeddingLayer as TheanoWordEmbeddingLayer
from test_util import assert_matrix_eq
########### NUMPY ###########
vocab_size, embed_dm = 10, 5
embeddings = np.random.rand(vocab_size, embed_dm)
sents = np.asarray(np.random.randint(10, size = (3, 6)),
dtype = np.int32)
np_l = WordEmbeddingLayer(embeddings)
actual = np_l.output(sents)
########### THEANO ###########
x_symbol = theano.tensor.imatrix('x') # the word indices matrix
th_l = TheanoWordEmbeddingLayer(rng = np.random.RandomState(1234),
input = x_symbol,
vocab_size = vocab_size,
embed_dm = embed_dm,
embeddings = theano.shared(value = embeddings,
name = "embeddings"
)
)
f = theano.function(inputs = [x_symbol],
outputs = th_l.output)
expected = f(sents)
assert_matrix_eq(actual, expected, "Embedding")
|
recipes/structopt/all/conanfile.py | rockandsalt/conan-center-index | 562 | 12772595 | import os
from conans import ConanFile, tools
from conans.errors import ConanInvalidConfiguration
class StructoptConan(ConanFile):
name = "structopt"
homepage = "https://github.com/p-ranav/structopt"
url = "https://github.com/conan-io/conan-center-index"
description = "Parse command line arguments by defining a struct+"
license = "MIT"
settings = "compiler", "os"
topics = ("conan", "structopt", "argument-parser", "cpp17", "header-only",
"single-header-lib", "header-library", "command-line", "arguments",
"mit-license", "modern-cpp", "structopt", "lightweight", "reflection",
"cross-platform", "library", "type-safety", "type-safe", "argparse",
"clap", "visit-struct-library", "magic-enum")
no_copy_source = True
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _supported_compiler(self):
compiler = str(self.settings.compiler)
version = tools.Version(self.settings.compiler.version)
if compiler == "Visual Studio" and version >= "15":
return True
elif compiler == "gcc" and version >= "9":
return True
elif compiler == "clang" and version >= "5":
return True
elif compiler == "apple-clang" and version >= "10":
return True
else:
self.output.warn("{} recipe lacks information about the {} compiler standard version support".format(self.name, compiler))
return False
def configure(self):
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, "17")
if not self._supported_compiler:
raise ConanInvalidConfiguration("structopt: Unsupported compiler: {}-{} "
"(https://github.com/p-ranav/structopt#compiler-compatibility).".format(self.settings.compiler, self.settings.compiler.version))
def source(self):
tools.get(**self.conan_data["sources"][self.version])
os.rename("{}-{}".format(self.name, self.version), self._source_subfolder)
def package(self):
self.copy(pattern="LICENSE", src=self._source_subfolder, dst="licenses")
self.copy(pattern="*.h", src=os.path.join(self._source_subfolder, "include"), dst="include")
self.copy(pattern="*.hpp", src=os.path.join(self._source_subfolder, "include"), dst="include")
def package_id(self):
self.info.header_only()
|
src/TulsiGenerator/Scripts/install_genfiles_tests.py | comius/tulsi | 511 | 12772607 | <gh_stars>100-1000
# Copyright 2018 The Tulsi Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for install_genfiles.py."""
import os
import unittest
import install_genfiles
DOES_EXIST_DATA = {
'generated_sources': [
('src/TulsiGenerator/Scripts/install_genfiles.py',
'install_genfiles.py'),
],
}
DOES_NOT_EXIST_DATA = {
'generated_sources': [('src/does/not/exist.txt',
'exist.txt')],
}
class TestInstallForData(unittest.TestCase):
def testSrcDoeNotExist(self):
tmpdir = os.environ['TEST_TMPDIR']
installer = install_genfiles.Installer('.', output_root=tmpdir)
installer.InstallForData(DOES_NOT_EXIST_DATA)
self.assertFalse(os.path.lexists(
os.path.join(tmpdir, 'bazel-tulsi-includes/x/x/exist.txt')))
def testSrcDoesExist(self):
tmpdir = os.environ['TEST_TMPDIR']
installer = install_genfiles.Installer('.', output_root=tmpdir)
installer.InstallForData(DOES_EXIST_DATA)
# Must use lexists because we create a link but use the wrong exec root,
# so the symlink is not valid.
self.assertTrue(os.path.lexists(
os.path.join(tmpdir, 'bazel-tulsi-includes/x/x/install_genfiles.py')))
if __name__ == '__main__':
unittest.main()
|
Tests/Marketplace/Tests/test_pack_dependencies.py | satyakidroid/content | 799 | 12772636 | <reponame>satyakidroid/content<gh_stars>100-1000
# type: ignore[attr-defined]
from unittest.mock import patch
import networkx as nx
from Tests.Marketplace.packs_dependencies import calculate_single_pack_dependencies
def find_pack_display_name_mock(pack_folder_name):
return pack_folder_name
class TestCalculateSinglePackDependencies:
@classmethod
def setup_class(cls):
patch('demisto_sdk.commands.find_dependencies.find_dependencies.find_pack_display_name',
side_effect=find_pack_display_name_mock)
patch('Tests.scripts.utils.log_util.install_logging')
graph = nx.DiGraph()
graph.add_node('pack1', mandatory_for_packs=[])
graph.add_node('pack2', mandatory_for_packs=[])
graph.add_node('pack3', mandatory_for_packs=[])
graph.add_node('pack4', mandatory_for_packs=[])
graph.add_node('pack5', mandatory_for_packs=[])
graph.add_edge('pack1', 'pack2')
graph.add_edge('pack2', 'pack3')
graph.add_edge('pack1', 'pack4')
graph.nodes()['pack4']['mandatory_for_packs'].append('pack1')
dependencies = calculate_single_pack_dependencies('pack1', graph)
cls.first_level_dependencies, cls.all_level_dependencies, _ = dependencies
def test_calculate_single_pack_dependencies_first_level_dependencies(self):
"""
Given
- A full dependency graph where:
- pack1 -> pack2 -> pack3
- pack1 -> pack4
- pack4 is mandatory for pack1
- pack5 and pack1 are not a dependency for any pack
When
- Running `calculate_single_pack_dependencies` to extract the first and all levels dependencies
Then
- Ensure first level dependencies for pack1 are only pack2 and pack4
"""
all_nodes = {'pack1', 'pack2', 'pack3', 'pack4', 'pack5'}
expected_first_level_dependencies = {'pack2', 'pack4'}
for node in expected_first_level_dependencies:
assert node in self.first_level_dependencies
for node in all_nodes - expected_first_level_dependencies:
assert node not in self.first_level_dependencies
def test_calculate_single_pack_dependencies_all_levels_dependencies(self):
"""
Given
- A full dependency graph where:
- pack1 -> pack2 -> pack3
- pack1 -> pack4
- pack4 is mandatory for pack1
- pack5 and pack1 are not a dependency for any pack
When
- Running `calculate_single_pack_dependencies` to extract the first and all levels dependencies
Then
- Ensure all levels dependencies for pack1 are pack2, pack3 and pack4 only
"""
all_nodes = {'pack1', 'pack2', 'pack3', 'pack4', 'pack5'}
expected_all_level_dependencies = {'pack2', 'pack3', 'pack4'}
for node in expected_all_level_dependencies:
assert node in self.all_level_dependencies
for node in all_nodes - expected_all_level_dependencies:
assert node not in self.all_level_dependencies
def test_calculate_single_pack_dependencies_mandatory_dependencies(self):
"""
Given
- A full dependency graph where:
- pack1 -> pack2 -> pack3
- pack1 -> pack4
- pack4 is mandatory for pack1
- pack5 and pack1 are not a dependency for any pack
When
- Running `calculate_single_pack_dependencies` to extract the first and all levels dependencies
Then
- pack4 is mandatory for pack1 and that there are no other mandatory dependencies
"""
expected_mandatory_dependency = 'pack4'
assert self.first_level_dependencies[expected_mandatory_dependency]['mandatory']
for node in self.first_level_dependencies:
if node != expected_mandatory_dependency:
assert not self.first_level_dependencies[node]['mandatory']
|
util/rev_info.py | ccwanggl/smartknob | 3,836 | 12772649 | <reponame>ccwanggl/smartknob
import datetime
import subprocess
def git_short_rev():
try:
return subprocess.check_output([
'git',
'rev-parse',
'--short',
'HEAD',
]).decode('utf-8').strip()
except Exception:
raise RuntimeError("Could not read git revision. Make sure you have git installed and you're working with a git clone of the repository.")
def current_date():
return datetime.date.today().strftime('%Y-%m-%d')
def git_date(short=True):
try:
iso = subprocess.check_output([
'git',
'log',
'-1',
'--format=%ci',
'HEAD',
]).decode('utf-8').strip()
if short:
return iso.split(' ')[0]
else:
return iso
except Exception:
raise RuntimeError("Could not read git commit date. Make sure you have git installed and you're working with a git clone of the repository.")
def git_release_version(search_prefix):
try:
tags = subprocess.check_output([
'git',
'tag',
'--points-at',
'HEAD',
]).decode('utf-8').splitlines()
for tag in tags:
if tag.startswith(search_prefix):
return tag[len(search_prefix):]
return None
except Exception:
raise RuntimeError("Could not read git release tags. Make sure you have git installed and you're working with a git clone of the repository.")
|
test/pytest/test_anonymous_group.py | showipintbri/ttp | 254 | 12772650 | <filename>test/pytest/test_anonymous_group.py
import sys
sys.path.insert(0, "../..")
import pprint
from ttp import ttp
def test_simple_anonymous_template():
template_1 = """interface {{ interface }}
description {{ description | ORPHRASE }}"""
data_1 = """
interface Port-Chanel11
description Storage Management
interface Loopback0
description RID
"""
parser = ttp(template=template_1, data=data_1)
# check that data added:
datums_added = {
"{}:{}".format(template.name, input_name): input_obj.data
for template in parser._templates
for input_name, input_obj in template.inputs.items()
}
# pprint.pprint(datums_added)
parser.parse()
res = parser.result()
# pprint.pprint(res)
# assert res == [[[{'description': 'Storage Management', 'interface': 'Port-Chanel11'}, {'description': 'RID', 'interface': 'Loopback0'}]]]
# test_simple_anonymous_template()
def test_anonymous_group_with_vars():
template = """
<input load="text">
interface Port-Chanel11
description Storage Management
interface Loopback0
description RID
</input>
<vars name="my.var.s">
a = 1
b = 2
</vars>
<group>
interface {{ interface }}
description {{ description | ORPHRASE }}
</group>
"""
parser = ttp(template=template)
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
[
{"description": "Storage Management", "interface": "Port-Chanel11"},
{"description": "RID", "interface": "Loopback0"},
{"my": {"var": {"s": {"a": 1, "b": 2}}}},
]
]
]
# test_anonymous_group_with_vars()
def test_anonymous_group_with_child_group_empty_absolute_path():
template = """
<template results="per_template">
<input name="Cisco_ios" load="text">
r2#show interfaces | inc line protocol
interface GigabitEthernet1
vrf forwarding MGMT
ip address 10.123.89.55 255.255.255.0
</input>
<input name="Cisco_ios" load="text">
r1#show interfaces | inc line protocol:
interface GigabitEthernet1
description some info
vrf forwarding MGMT
ip address 10.123.89.56 255.255.255.0
interface GigabitEthernet2
ip address 10.123.89.55 255.255.255.0
</input>
<group void="">
interface {{ interface }}
description {{ description | ORPHRASE }}
<group name="/">
ip address {{ ip }} {{ mask }}
</group>
</group>
</template>
"""
parser = ttp(template=template)
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
{"ip": "10.123.89.55", "mask": "255.255.255.0"},
{"ip": "10.123.89.56", "mask": "255.255.255.0"},
{"ip": "10.123.89.55", "mask": "255.255.255.0"},
]
]
# test_anonymous_group_with_child_group_empty_absolute_path()
def test_anonymous_group_with_per_template_mode():
template = """
<template results="per_template">
<group void="">
hostname {{ hostname | record(hostname_abc) }}
</group>
<group>
interface {{ interface }}
description {{ description | ORPHRASE }}
ip address {{ ip }} {{ mask }}
{{ hostname | set(hostname_abc) }}
</group>
</template>
"""
datum_1 = """
hostname r2
!
interface GigabitEthernet1
vrf forwarding MGMT
ip address 10.123.89.55 255.255.255.0
"""
datum_2 = """
hostname r1
!
interface GigabitEthernet1
description some info
vrf forwarding MGMT
ip address 10.123.89.56 255.255.255.0
interface GigabitEthernet2
ip address 10.123.89.55 255.255.255.0
"""
parser_a = ttp(template=template)
parser_a.add_input(datum_1)
parser_a.add_input(datum_2)
parser_a.parse()
res = parser_a.result()
# pprint.pprint(res)
assert res == [
[
{
"hostname": "r2",
"interface": "GigabitEthernet1",
"ip": "10.123.89.55",
"mask": "255.255.255.0",
},
{
"description": "some info",
"hostname": "r1",
"interface": "GigabitEthernet1",
"ip": "10.123.89.56",
"mask": "255.255.255.0",
},
{
"hostname": "r1",
"interface": "GigabitEthernet2",
"ip": "10.123.89.55",
"mask": "255.255.255.0",
},
]
]
# test_anonymous_group_with_per_template_mode()
|
openproblems/utils.py | bendemeo/SingleCellOpenProblems | 134 | 12772674 | <reponame>bendemeo/SingleCellOpenProblems
from .version import __version__
import decorator
import packaging.version
@decorator.decorator
def temporary(func, version=None, *args, **kwargs):
"""Decorate a function as a temporary fix.
Parameters
----------
version : str
Version after which this function should raise a RuntimeError
"""
if version is None:
raise TypeError("temporary() missing 1 required keyword argument: 'version'")
if packaging.version.parse(__version__) >= packaging.version.parse(version):
raise RuntimeError(
"Temporary function {}.{} is temporary and should not be used "
"after version {} (current version: {})".format(
func.__module__, func.__name__, version, __version__
)
)
return func(*args, **kwargs)
def get_members(module):
"""Get all public members from a module."""
namespace = [attr for attr in dir(module) if not attr.startswith("_")]
return [getattr(module, attr) for attr in namespace]
def get_callable_members(module):
"""Get all callable public members from a module."""
return [member for member in get_members(module) if callable(member)]
|
Multiprocessing/single.py | commoncdp2021/Gun-Gaja-Gun | 171 | 12772676 | <filename>Multiprocessing/single.py
#!/usr/bin/python
from gen_rand import gen_random_data
if __name__ == '__main__':
gen_random_data()
gen_random_data()
gen_random_data()
gen_random_data() |
src/mobile_seg/modules/wrapper.py | murez/mobile-semantic-segmentation | 713 | 12772678 | <reponame>murez/mobile-semantic-segmentation
import torch
import torch.nn as nn
from mobile_seg.modules.net import MobileNetV2_unet
class Wrapper(nn.Module):
def __init__(
self,
unet: MobileNetV2_unet,
scale: float = 255.
):
super().__init__()
self.unet = unet
self.scale = scale
def forward(self, x):
x = x / self.scale
x = self.unet(x)
x = x * self.scale
x = torch.cat((x, x, x), dim=1)
return x
# %%
if __name__ == '__main__':
# %%
model = MobileNetV2_unet()
wrapper = Wrapper(model)
inputs = torch.randn((1, 3, 224, 224))
out = wrapper(inputs)
print(out.shape)
|
nanopore-human-transcriptome/scripts/bulk_signal_read_correction/make_reads.py | hasindu2008/NA12878 | 345 | 12772693 | import sys
from pathlib import Path
from argparse import ArgumentParser
import h5py
import pandas as pd
import numpy as np
from tqdm import tqdm
from export import export_read_file
def get_args():
parser = ArgumentParser(description="Parse sequencing_summary.txt files and .paf files to find split reads "
"in an Oxford Nanopore Dataset",
add_help=False)
general = parser.add_argument_group(title='General options')
general.add_argument("-h", "--help",
action="help",
help="Show this help and exit"
)
in_args = parser.add_argument_group(
title='Input sources'
)
in_args.add_argument("-s", "--summary",
required=True,
nargs='+',
help='Sequencing summary file(s) generated by albacore or guppy. Can be compressed '
'using gzip, bzip2, xz, or zip')
in_args.add_argument("--start-events",
help="start_events.csv file generated by event_finder.py",
default="",
required=True,
)
in_args.add_argument("--end-events",
help="end_events.csv file generated by event_finder.py",
default="",
required=True,
)
in_args.add_argument("--targets",
help="A text file of target read ids with one per line.",
default="",
required=True,
)
in_args.add_argument("--bulk-files",
help="ONT bulk FAST5 files.",
nargs='+',
default="",
)
in_args.add_argument("-o", "--output-name",
help="Name of the output folder, this will be generated if it does not exist",
required=True,
default=""
)
in_args.add_argument("--extra-classifications",
help="Any extra MinKNOW classifications to include.",
nargs='*',
default="",
)
return parser.parse_args()
def main():
args = get_args()
# debug(args)
# # sys.exit()
# Make folders
for j in ['starts', 'ends']:
Path('{i}/{j}/{k}'.format(i=args.output_name, j=j, k='fast5')).mkdir(parents=True, exist_ok=True)
# Open files
start_events = pd.read_csv(args.start_events, sep=',')
end_events = pd.read_csv(args.end_events, sep=',')
seq_sum_df = concat_files_to_df(file_list=args.summary, sep='\t')
# Create end_time Series in seq_sum_df
seq_sum_df['end_time'] = seq_sum_df['start_time'] + seq_sum_df['duration']
# Sort and Groupby to segregate runs and channels
seq_sum_df = seq_sum_df.sort_values(by=['run_id', 'channel', 'start_time'], ascending=True)
seq_sum_df_1 = seq_sum_df.copy()
gb = seq_sum_df.groupby(['run_id', 'channel'])
gb1 = seq_sum_df_1.groupby(['run_id', 'channel'])
# Get previous and next start times within groupby
seq_sum_df['next_start'] = gb['start_time'].shift(-1)
seq_sum_df_1['prev_start'] = gb1['start_time'].shift(1)
target_read_ids = []
with open(args.targets, 'r') as file:
for line in file:
target_read_ids.append(line.strip())
classifications = ['pore', 'inrange', 'good_single', 'unblocking']
if args.extra_classifications:
classifications.extend(args.extra_classifications)
# Get end_events for target_read_ids
end_events = end_events[end_events['read_id'].isin(target_read_ids)]
normal_ending_ids = end_events[end_events['time'].ge(0) &
end_events['label'].isin(classifications)]['read_id'].unique()
abnormally_ending_ids = end_events[~end_events['read_id'].isin(normal_ending_ids)]['read_id'].unique()
end_target_ss = seq_sum_df[seq_sum_df['read_id'].isin(abnormally_ending_ids)]
# Get start_events for target_read_ids
start_events = start_events[start_events['read_id'].isin(target_read_ids)]
normal_starting_ids = start_events[start_events['time'].le(0) &
start_events['label'].isin(classifications)]['read_id'].unique()
abnormally_starting_ids = start_events[~start_events['read_id'].isin(normal_starting_ids)]['read_id'].unique()
start_target_ss = seq_sum_df_1[seq_sum_df_1['read_id'].isin(abnormally_starting_ids)]
print('Collecting abnormally ending reads:')
end_read_info = write_files(end_target_ss, args.bulk_files, 'start_time',
'next_start', '{i}/ends/fast5/'.format(i=args.output_name))
end_read_info.to_csv('{}/ends_read_info.txt'.format(args.output_name), sep='\t', index=False, header=True)
end_read_info.to_csv('{}/ends_filenames.txt'.format(args.output_name), sep='\t', index=False, header=False,
columns=['filename'])
print('Collecting abnormally starting reads:')
start_read_info = write_files(start_target_ss, args.bulk_files, 'prev_start',
'end_time', '{i}/starts/fast5/'.format(i=args.output_name))
start_read_info.to_csv('{}/starts_read_info.txt'.format(args.output_name), sep='\t', index=False, header=True)
start_read_info.to_csv('{}/starts_filenames.txt'.format(args.output_name), sep='\t', index=False, header=False,
columns=['filename'])
return
def write_files(target_ss, bulkfiles, read_start_col, read_end_col, export_path, remove_pore=True):
"""Abstraction for export_read_file for collecting read info
Parameters
----------
target_ss : pd.DataFrame
DataFrame of reads to generate reads for
bulkfiles: list
list of bulk FAST5 files
read_start_col : str
Column in the target_ss that start index is derived from
read_end_col : str
Column in the target_ss that end index is derived from
export_path : str
The folder where read files will be written
remove_pore : bool
Remove pore-like signal from trace (>1500)
Returns
-------
pd.DataFrame
DataFrame of read info about reads that have been written
"""
d = {
'read_id': [],
'channel': [],
'start_index': [],
'end_index': [],
'bv_read_id': [],
'filename': [],
'bv_filename': []
}
files_written = 0
for bf in tqdm(bulkfiles):
f = h5py.File(bf, 'r')
run_id = f['UniqueGlobalKey']["tracking_id"].attrs["run_id"].decode('utf8')
sf = int(f["UniqueGlobalKey"]["context_tags"].attrs["sample_frequency"].decode('utf8'))
t = target_ss[target_ss['run_id'] == run_id]
t = t.dropna()
f.close()
file = h5py.File(bf, 'r')
for idx, row in tqdm(t.iterrows(), total=t.shape[0], desc=run_id):
si = int(np.floor(row[read_start_col] * sf))
ei = int(np.floor(row[read_end_col] * sf))
d['read_id'].append(row['read_id'])
d['channel'].append(row['channel'])
d['start_index'].append(si)
d['end_index'].append(ei)
d['bv_read_id'].append("{ch}-{start}-{end}".format(ch=row['channel'], start=si, end=ei))
d['filename'].append(row['filename'])
d['bv_filename'].append(export_read_file(row['channel'],
si,
ei,
file,
export_path,
remove_pore=remove_pore))
files_written += 1
print('{} reads written'.format(files_written))
return pd.DataFrame(d)
def concat_files_to_df(file_list, **kwargs):
"""Return a pandas.DataFrame from a list of files
"""
df_list = []
for f in file_list:
try:
df_list.append(pd.read_csv(filepath_or_buffer=f, **kwargs))
except pd.errors.ParserError as e:
print('{}\nThis is usually caused by an input file not being the expected format'.format(repr(e)))
sys.exit(1)
except Exception as e:
sys.exit(1)
return pd.concat(df_list, ignore_index=True)
def debug(args):
dirs = dir(args)
for attr in dirs:
if attr[0] != '_':
print('{a:<25} {b}'.format(a=attr, b=getattr(args, attr)))
if __name__ == '__main__':
main()
|
src/chapter-4/conftest.py | luizyao/pytest-chinese-doc | 283 | 12772698 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
'''
Author: <NAME> (<EMAIL>)
Created Date: 2019-09-19 5:35:12
-----
Last Modified: 2019-10-07 8:27:16
Modified By: <NAME> (<EMAIL>)
-----
THIS PROGRAM IS FREE SOFTWARE, IS LICENSED UNDER MIT.
A short and simple permissive license with conditions
only requiring preservation of copyright and license notices.
Copyright © 2019 <NAME>
-----
HISTORY:
Date By Comments
---------- -------- ---------------------------------------------------------
'''
import os
import smtplib
import tempfile
import pytest
@pytest.fixture(scope='module')
def smtp_connection():
return smtplib.SMTP("smtp.163.com", 25, timeout=5)
@pytest.fixture(scope='package')
def smtp_connection_package():
return smtplib.SMTP("smtp.163.com", 25, timeout=5)
@pytest.fixture()
def smtp_connection_yield():
smtp_connection = smtplib.SMTP("smtp.163.com", 25, timeout=5)
yield smtp_connection
print("关闭SMTP连接")
smtp_connection.close()
@pytest.fixture(scope='module')
def smtp_connection_request(request):
server, port = getattr(request.module, 'smtp_server', ("smtp.163.com", 25))
with smtplib.SMTP(server, port, timeout=5) as smtp_connection:
yield smtp_connection
print("断开 %s:%d" % (server, port))
@pytest.fixture(scope='module', params=['smtp.163.com', 'smtp.126.com'])
def smtp_connection_params(request):
server = request.param
with smtplib.SMTP(server, 25, timeout=5) as smtp_connection:
yield smtp_connection
print("断开 %s:%d" % (server, 25))
@pytest.fixture()
def cleandir():
newpath = tempfile.mkdtemp()
os.chdir(newpath)
|
tests/python/frontend/mxnet/model_zoo/squeezenet.py | XiaoSong9905/tvm | 4,640 | 12772719 | <reponame>XiaoSong9905/tvm
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Symbol of SqueezeNet
Reference:
Iandola, <NAME>., et al.
"Squeezenet: Alexnet-level accuracy with 50x fewer parameters and< 0.5 mb model size." (2016).
"""
import mxnet as mx
# Helpers
def _make_fire(net, squeeze_channels, expand1x1_channels, expand3x3_channels):
net = _make_fire_conv(net, squeeze_channels, 1, 0)
left = _make_fire_conv(net, expand1x1_channels, 1, 0)
right = _make_fire_conv(net, expand3x3_channels, 3, 1)
# NOTE : Assume NCHW layout here
net = mx.sym.concat(left, right, dim=1)
return net
def _make_fire_conv(net, channels, kernel_size, padding=0):
net = mx.sym.Convolution(
net, num_filter=channels, kernel=(kernel_size, kernel_size), pad=(padding, padding)
)
net = mx.sym.Activation(net, act_type="relu")
return net
# Net
def get_symbol(num_classes=1000, version="1.0", **kwargs):
"""Get symbol of SqueezeNet
Parameters
----------
num_classes: int
The number of classification results
version : str, optional
"1.0" or "1.1" of SqueezeNet
"""
assert version in [
"1.0",
"1.1",
], "Unsupported SqueezeNet version {version}:" "1.0 or 1.1 expected".format(version=version)
net = mx.sym.Variable("data")
if version == "1.0":
net = mx.sym.Convolution(net, num_filter=96, kernel=(7, 7), stride=(2, 2), pad=(3, 3))
net = mx.sym.Activation(net, act_type="relu")
net = mx.sym.Pooling(data=net, kernel=(3, 3), pool_type="max", stride=(2, 2))
net = _make_fire(net, 16, 64, 64)
net = _make_fire(net, 16, 64, 64)
net = _make_fire(net, 32, 128, 128)
net = mx.sym.Pooling(data=net, kernel=(3, 3), pool_type="max", stride=(2, 2))
net = _make_fire(net, 32, 128, 128)
net = _make_fire(net, 48, 192, 192)
net = _make_fire(net, 48, 192, 192)
net = _make_fire(net, 64, 256, 256)
net = mx.sym.Pooling(data=net, kernel=(3, 3), pool_type="max", stride=(2, 2))
net = _make_fire(net, 64, 256, 256)
else:
net = mx.sym.Convolution(net, num_filter=64, kernel=(3, 3), stride=(2, 2), pad=(1, 1))
net = mx.sym.Activation(net, act_type="relu")
net = mx.sym.Pooling(data=net, kernel=(3, 3), pool_type="max", stride=(2, 2))
net = _make_fire(net, 16, 64, 64)
net = _make_fire(net, 16, 64, 64)
net = mx.sym.Pooling(data=net, kernel=(3, 3), pool_type="max", stride=(2, 2))
net = _make_fire(net, 32, 128, 128)
net = _make_fire(net, 32, 128, 128)
net = mx.sym.Pooling(data=net, kernel=(3, 3), pool_type="max", stride=(2, 2))
net = _make_fire(net, 48, 192, 192)
net = _make_fire(net, 48, 192, 192)
net = _make_fire(net, 64, 256, 256)
net = _make_fire(net, 64, 256, 256)
net = mx.sym.Dropout(net, p=0.5)
net = mx.sym.Convolution(net, num_filter=num_classes, kernel=(1, 1))
net = mx.sym.Activation(net, act_type="relu")
net = mx.sym.Pooling(data=net, global_pool=True, kernel=(13, 13), pool_type="avg")
net = mx.sym.flatten(net)
return mx.sym.softmax(net)
|
homeassistant/components/webostv/trigger.py | MrDelik/core | 30,023 | 12772799 | """webOS Smart TV trigger dispatcher."""
from __future__ import annotations
from typing import cast
from homeassistant.components.automation import (
AutomationActionType,
AutomationTriggerInfo,
)
from homeassistant.const import CONF_PLATFORM
from homeassistant.core import CALLBACK_TYPE, HomeAssistant
from homeassistant.helpers.typing import ConfigType
from .triggers import TriggersPlatformModule, turn_on
TRIGGERS = {
"turn_on": turn_on,
}
def _get_trigger_platform(config: ConfigType) -> TriggersPlatformModule:
"""Return trigger platform."""
platform_split = config[CONF_PLATFORM].split(".", maxsplit=1)
if len(platform_split) < 2 or platform_split[1] not in TRIGGERS:
raise ValueError(
f"Unknown webOS Smart TV trigger platform {config[CONF_PLATFORM]}"
)
return cast(TriggersPlatformModule, TRIGGERS[platform_split[1]])
async def async_validate_trigger_config(
hass: HomeAssistant, config: ConfigType
) -> ConfigType:
"""Validate config."""
platform = _get_trigger_platform(config)
return cast(ConfigType, platform.TRIGGER_SCHEMA(config))
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: AutomationTriggerInfo,
) -> CALLBACK_TYPE:
"""Attach trigger of specified platform."""
platform = _get_trigger_platform(config)
assert hasattr(platform, "async_attach_trigger")
return cast(
CALLBACK_TYPE,
await getattr(platform, "async_attach_trigger")(
hass, config, action, automation_info
),
)
|
model.py | lFatality/tensorflow2caffe | 115 | 12772825 | <reponame>lFatality/tensorflow2caffe
from tflearn import input_data, conv_2d, max_pool_2d, fully_connected, dropout, Momentum, regression, DNN
#model of vgg-19
def vgg_net_19(width, height):
network = input_data(shape=[None, height, width, 3], name='input')
network = conv_2d(network, 64, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 64, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = max_pool_2d(network, 2, strides=2)
network = conv_2d(network, 128, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 128, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = max_pool_2d(network, 2, strides=2)
network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = max_pool_2d(network, 2, strides=2)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = max_pool_2d(network, 2, strides=2)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = max_pool_2d(network, 2, strides=2)
network = fully_connected(network, 4096, activation='relu', weight_decay=5e-4)
network = dropout(network, keep_prob=0.5)
network = fully_connected(network, 4096, activation='relu', weight_decay=5e-4)
network = dropout(network, keep_prob=0.5)
network = fully_connected(network, 1000, activation='softmax', weight_decay=5e-4)
opt = Momentum(learning_rate=0, momentum = 0.9)
network = regression(network, optimizer=opt, loss='categorical_crossentropy', name='targets')
model = DNN(network, checkpoint_path='', max_checkpoints=1, tensorboard_verbose=2, tensorboard_dir='')
return model
#model of vgg-19 for testing of the activations
#rename the output you want to test, connect it to the next layer and change the output layer at the bottom (model = DNN(...))
#make sure to use the correct test function (depending if your output is a tensor or a vector)
def vgg_net_19_activations(width, height):
network = input_data(shape=[None, height, width, 3], name='input')
network1 = conv_2d(network, 64, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network2 = conv_2d(network1, 64, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = max_pool_2d(network2, 2, strides=2)
network = conv_2d(network, 128, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 128, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = max_pool_2d(network, 2, strides=2)
network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = max_pool_2d(network, 2, strides=2)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = max_pool_2d(network, 2, strides=2)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = max_pool_2d(network, 2, strides=2)
network = fully_connected(network, 4096, activation='relu', weight_decay=5e-4)
network = dropout(network, keep_prob=0.5)
network = fully_connected(network, 4096, activation='relu', weight_decay=5e-4)
network = dropout(network, keep_prob=0.5)
network = fully_connected(network, 1000, activation='softmax', weight_decay=5e-4)
opt = Momentum(learning_rate=0, momentum = 0.9)
network = regression(network, optimizer=opt, loss='categorical_crossentropy', name='targets')
model = DNN(network1, checkpoint_path='', max_checkpoints=1, tensorboard_verbose=2, tensorboard_dir='')
return model
|
docs/snippets/ov_python_exclusives.py | kurylo/openvino | 1,127 | 12772826 | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
#! [auto_compilation]
import openvino.runtime as ov
compiled_model = ov.compile_model("model.xml")
#! [auto_compilation]
#! [properties_example]
core = ov.Core()
input_a = ov.opset8.parameter([8])
res = ov.opset8.absolute(input_a)
model = ov.Model(res, [input_a])
compiled = core.compile_model(model, "CPU")
print(model.inputs)
print(model.outputs)
print(compiled.inputs)
print(compiled.outputs)
#! [properties_example]
#! [tensor_basics]
data_float64 = np.ones(shape=(2,8))
tensor = ov.Tensor(data_float64)
assert tensor.element_type == ov.Type.f64
data_int32 = np.ones(shape=(2,8), dtype=np.int32)
tensor = ov.Tensor(data_int32)
assert tensor.element_type == ov.Type.i32
#! [tensor_basics]
#! [tensor_shared_mode]
data_to_share = np.ones(shape=(2,8))
shared_tensor = ov.Tensor(data_to_share, shared_memory=True)
# Editing of the numpy array affects Tensor's data
data_to_share[0][2] = 6.0
assert shared_tensor.data[0][2] == 6.0
# Editing of Tensor's data affects the numpy array
shared_tensor.data[0][2] = 0.6
assert data_to_share[0][2] == 0.6
#! [tensor_shared_mode]
infer_request = compiled.create_infer_request()
data = np.random.randint(-5, 3 + 1, size=(8))
#! [passing_numpy_array]
# Passing inputs data in form of a dictionary
infer_request.infer(inputs={0: data})
# Passing inputs data in form of a list
infer_request.infer(inputs=[data])
#! [passing_numpy_array]
#! [getting_results]
# Get output tensor
results = infer_request.get_output_tensor().data
# Get tensor with CompiledModel's output node
results = infer_request.get_tensor(compiled.outputs[0]).data
# Get all results with special helper property
results = list(infer_request.results.values())
#! [getting_results]
#! [sync_infer]
# Simple call to InferRequest
results = infer_request.infer(inputs={0: data})
# Extra feature: calling CompiledModel directly
results = compiled_model(inputs={0: data})
#! [sync_infer]
#! [asyncinferqueue]
core = ov.Core()
# Simple model that adds two inputs together
input_a = ov.opset8.parameter([8])
input_b = ov.opset8.parameter([8])
res = ov.opset8.add(input_a, input_b)
model = ov.Model(res, [input_a, input_b])
compiled = core.compile_model(model, "CPU")
# Number of InferRequests that AsyncInferQueue holds
jobs = 4
infer_queue = ov.AsyncInferQueue(compiled, jobs)
# Create data
data = [np.array([i] * 8, dtype=np.float32) for i in range(jobs)]
# Run all jobs
for i in range(len(data)):
infer_queue.start_async({0: data[i], 1: data[i]})
infer_queue.wait_all()
#! [asyncinferqueue]
#! [asyncinferqueue_access]
results = infer_queue[3].get_output_tensor().data
#! [asyncinferqueue_access]
#! [asyncinferqueue_set_callback]
data_done = [False for _ in range(jobs)]
def f(request, userdata):
print(f"Done! Result: {request.get_output_tensor().data}")
data_done[userdata] = True
infer_queue.set_callback(f)
for i in range(len(data)):
infer_queue.start_async({0: data[i], 1: data[i]}, userdata=i)
infer_queue.wait_all()
assert all(data_done)
#! [asyncinferqueue_set_callback]
unt8_data = np.ones([100])
#! [packing_data]
from openvino.helpers import pack_data
packed_buffer = pack_data(unt8_data, ov.Type.u4)
# Create tensor with shape in element types
t = ov.Tensor(packed_buffer, [1, 128], ov.Type.u4)
#! [packing_data]
#! [unpacking]
from openvino.helpers import unpack_data
unpacked_data = unpack_data(t.data, t.element_type, t.shape)
assert np.array_equal(unpacked_data , unt8_data)
#! [unpacking]
#! [releasing_gil]
import openvino.runtime as ov
import cv2 as cv
from threading import Thread
input_data = []
# Processing input data will be done in a separate thread
# while compilation of the model and creation of the infer request
# is going to be executed in the main thread.
def prepare_data(input, image_path):
image = cv.imread(image_path)
h, w = list(input.shape)[-2:]
image = cv.resize(image, (h, w))
image = image.transpose((2, 0, 1))
image = np.expand_dims(image, 0)
input_data.append(image)
core = ov.Core()
model = core.read_model("model.xml")
# Create thread with prepare_data function as target and start it
thread = Thread(target=prepare_data, args=[model.input(), "path/to/image"])
thread.start()
# The GIL will be released in compile_model.
# It allows a thread above to start the job,
# while main thread is running in the background.
compiled = core.compile_model(model, "GPU")
# After returning from compile_model, the main thread acquires the GIL
# and starts create_infer_request which releases it once again.
request = compiled.create_infer_request()
# Join the thread to make sure the input_data is ready
thread.join()
# running the inference
request.infer(input_data)
#! [releasing_gil]
|
samples/aci-epg-reports-in-yaml.py | richardstrnad/acitoolkit | 351 | 12772846 | #!/usr/bin/env python
"""
Simple application that logs on to the APIC and displays all
EPGs.
"""
import socket
import yaml
import sys
from acitoolkit import Credentials, Session, Tenant, AppProfile, EPG, Endpoint
def main():
"""
Main show EPGs routine
:return: None
"""
# Login to APIC
description = ('Simple application that logs on to the APIC'
' and displays all of the EPGs.')
creds = Credentials('apic', description)
args = creds.get()
session = Session(args.url, args.login, args.password)
resp = session.login()
if not resp.ok:
print('%% Could not login to APIC')
return
# Download all of the tenants, app profiles, and EPGs
# and store the names as tuples in a list
tenants = Tenant.get_deep(session)
tenants_list = []
for tenant in tenants:
tenants_dict = {}
tenants_dict['name'] = tenant.name
if tenant.descr:
tenants_dict['description'] = tenant.descr
tenants_dict['app-profiles'] = []
for app in tenant.get_children(AppProfile):
app_profiles = {'name': app.name}
if app.descr:
app_profiles['description'] = app.descr
app_profiles['epgs'] = []
for epg in app.get_children(EPG):
epgs_info = {'name': epg.name}
if epg.descr:
epgs_info['description'] = epg.descr
epgs_info['endpoints'] = []
for endpoint in epg.get_children(Endpoint):
endpoint_info = {'name': endpoint.name}
if endpoint.ip != '0.0.0.0':
endpoint_info['ip'] = endpoint.ip
try:
hostname = socket.gethostbyaddr(endpoint.ip)[0]
except socket.error:
hostname = None
if hostname:
endpoint_info['hostname'] = hostname
if endpoint.descr:
endpoint_info['description'] = endpoint.descr
epgs_info['endpoints'].append(endpoint_info)
app_profiles['epgs'].append(epgs_info)
tenants_dict['app-profiles'].append(app_profiles)
tenants_list.append(tenants_dict)
tenants_info = {'tenants': tenants_list}
print(yaml.safe_dump(tenants_info, sys.stdout,
indent=4, default_flow_style=False))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
|
tests/test_write.py | sphh/RPLCD | 231 | 12772849 | <reponame>sphh/RPLCD
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import, unicode_literals
import pytest
from RPLCD.gpio import CharLCD
from RPLCD.common import LCD_SETDDRAMADDR
def test_write_simple(mocker, charlcd_kwargs):
"""
Write "HelloWorld" to the display.
"""
lcd = CharLCD(**charlcd_kwargs)
send_data = mocker.patch.object(lcd, '_send_data')
text = 'HelloWorld'
lcd.write_string(text)
assert send_data.call_count == len(text)
calls = [c[0] for c in send_data.call_args_list]
assert calls[0] == (72,)
assert calls[1] == (101,)
assert calls[2] == (108,)
assert calls[3] == (108,)
assert calls[4] == (111,)
assert calls[5] == (87,)
assert calls[6] == (111,)
assert calls[7] == (114,)
assert calls[8] == (108,)
assert calls[9] == (100,)
def test_caching(mocker, charlcd_kwargs):
"""
Characters should only be written if they have changed
"""
lcd = CharLCD(**charlcd_kwargs)
send_data = mocker.patch.object(lcd, '_send_data')
send_instruction = mocker.patch.object(lcd, '_send_instruction')
lcd.write_string('hello')
assert send_data.call_count == 5
data_calls = [c[0] for c in send_data.call_args_list]
assert data_calls[0] == (104,)
assert data_calls[1] == (101,)
assert data_calls[2] == (108,)
assert data_calls[3] == (108,)
assert data_calls[4] == (111,)
lcd.home()
send_data.reset_mock()
send_instruction.reset_mock()
lcd.write_string('he77o')
assert send_data.call_count == 2
assert send_instruction.call_count == 3
data_calls = [c[0] for c in send_data.call_args_list]
instruction_calls = [c[0] for c in send_instruction.call_args_list]
assert instruction_calls[0] == (LCD_SETDDRAMADDR | 1,)
assert instruction_calls[1] == (LCD_SETDDRAMADDR | 2,)
assert data_calls[0] == (55,)
assert data_calls[1] == (55,)
assert instruction_calls[2] == (LCD_SETDDRAMADDR | 5,)
@pytest.mark.parametrize(['charmap', 'ue'], [
('A00', 0b11110101),
('A02', 0b11111100),
])
def test_charmap(mocker, charmap, ue, charlcd_kwargs):
"""
The charmap should be used. The "ü" Umlaut should be encoded correctly.
"""
lcd = CharLCD(charmap=charmap, **charlcd_kwargs)
send = mocker.patch.object(lcd, '_send_data')
text = 'Züri'
lcd.write_string(text)
assert send.call_count == 4, 'call count was %d' % send.call_count
calls = [c[0] for c in send.call_args_list]
assert calls[0] == (90,)
assert calls[1] == (ue,)
assert calls[2] == (114,)
assert calls[3] == (105,)
@pytest.mark.parametrize(['rows', 'cols'], [
(2, 16),
(4, 20),
])
def test_write_newline(mocker, rows, cols, charlcd_kwargs):
"""
Write text containing CR/LF chars to the display.
"""
lcd = CharLCD(rows=rows, cols=cols, **charlcd_kwargs)
send_data = mocker.patch.object(lcd, '_send_data')
send_instruction = mocker.patch.object(lcd, '_send_instruction')
text = '\nab\n\rcd'
lcd.write_string(text)
assert send_data.call_count + send_instruction.call_count == len(text)
data_calls = [c[0] for c in send_data.call_args_list]
instruction_calls = [c[0] for c in send_instruction.call_args_list]
assert instruction_calls[0] == (0x80 + 0x40,), instruction_calls
assert data_calls[0] == (97,), data_calls
assert data_calls[1] == (98,), data_calls
if rows == 2:
assert instruction_calls[1] == (0x80 + 2,), instruction_calls
assert instruction_calls[2] == (0x80 + 0,), instruction_calls
else:
assert instruction_calls[1] == (0x80 + cols + 2,), instruction_calls
assert instruction_calls[2] == (0x80 + cols + 0,), instruction_calls
assert data_calls[2] == (99,), data_calls
assert data_calls[3] == (100,), data_calls
|
demo/align_face.py | hologerry/pix2pix-flow | 2,898 | 12772853 | # OLD USAGE
# python align_faces.py --shape-predictor shape_predictor_68_face_landmarks.dat --image images/example_01.jpg
# import the necessary packages
from imutils.face_utils import FaceAligner
from PIL import Image
import numpy as np
# import argparse
import imutils
import dlib
import cv2
# construct the argument parser and parse the arguments
# ap = argparse.ArgumentParser()
# ap.add_argument("--shape-predictor", help="path to facial landmark predictor", default='shape_predictor_68_face_landmarks.dat')
# ap.add_argument("--input", help="path to input images", default='input_raw')
# ap.add_argument("--output", help="path to input images", default='input_aligned')
# args = vars(ap.parse_args())
# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor and the face aligner
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
fa = FaceAligner(predictor, desiredFaceWidth=256,
desiredLeftEye=(0.371, 0.480))
# Input: numpy array for image with RGB channels
# Output: (numpy array, face_found)
def align_face(img):
img = img[:, :, ::-1] # Convert from RGB to BGR format
img = imutils.resize(img, width=800)
# detect faces in the grayscale image
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 2)
if len(rects) > 0:
# align the face using facial landmarks
align_img = fa.align(img, gray, rects[0])[:, :, ::-1]
align_img = np.array(Image.fromarray(align_img).convert('RGB'))
return align_img, True
else:
# No face found
return None, False
# Input: img_path
# Output: aligned_img if face_found, else None
def align(img_path):
img = Image.open(img_path)
img = img.convert('RGB') # if image is RGBA or Grayscale etc
img = np.array(img)
x, face_found = align_face(img)
return x |
data_pipeline/testing_helpers/kafka_docker.py | poros/data_pipeline | 110 | 12772865 | # -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
from contextlib import contextmanager
from kafka import KafkaClient
from kafka import SimpleConsumer
from data_pipeline.config import get_config
from data_pipeline.message import create_from_offset_and_message
_ONE_MEGABYTE = 1024 * 1024
logger = get_config().logger
@contextmanager
def capture_new_data_pipeline_messages(topic):
"""contextmanager that moves to the tail of the given topic, and waits to
receive new messages, returning a function that can be called zero or more
times which will retrieve decoded data pipeline messages from the topic.
Returns:
Callable[[int], List[Message]]: Function that takes a single
optional argument, count, and returns up to count decoded data pipeline
messages. This function does not block, and will return however many
messages are available immediately. Default count is 100.
"""
with capture_new_messages(topic) as get_kafka_messages:
def get_data_pipeline_messages(count=100):
kafka_messages = get_kafka_messages(count)
return [
create_from_offset_and_message(kafka_message)
for kafka_message in kafka_messages
]
yield get_data_pipeline_messages
@contextmanager
def capture_new_messages(topic):
"""Seeks to the tail of the topic then returns a function that can
consume messages from that point.
"""
with setup_capture_new_messages_consumer(topic) as consumer:
def get_messages(count=100):
return consumer.get_messages(count=count)
yield get_messages
@contextmanager
def setup_capture_new_messages_consumer(topic):
"""Seeks to the tail of the topic then returns a function that can
consume messages from that point.
"""
kafka = KafkaClient(get_config().cluster_config.broker_list)
group = str('data_pipeline_clientlib_test')
consumer = SimpleConsumer(kafka, group, topic, max_buffer_size=_ONE_MEGABYTE)
consumer.seek(0, 2) # seek to tail, 0 is the offset, and 2 is the tail
yield consumer
kafka.close()
|
lib/bindings/samples/server/debug/profiling_output.py | tlalexander/stitchEm | 182 | 12772903 | import threading
import errors
import vs
import logging
import gc
from blinker import signal
from utils import performance
from output.output import Output
# We need to be able to load (not run) vs_server on windows to generate the documentation.
# So we're skipping non-windows imports
try:
import psutil
except ImportError:
pass
PROFILING_STITCH_FORMAT = vs.NV12
class ProfilingOutput(Output):
"""Profiling output
"""
def __init__(self, stitcher, name="profiling", critical=False, preserved=False):
super(ProfilingOutput, self).__init__(stitcher, name, critical, preserved)
self.writer = None
self.pid = psutil.Process()
def reset(self):
self._transition_check()
self.pid.cpu_percent(interval=None)
vs.Output_reset(self.writer.object())
def _start(self, profiling_time=0, preserve=False):
# Todo I don't like that it's created differently from other outputs here, but for now I left it like this
panorama = self.stitcher.project_manager.panorama
self.writer = vs.Output_profiling(self.name,
panorama.width,
panorama.height,
self.stitcher.project_manager.controller.getFrameRateFromInputController(),
PROFILING_STITCH_FORMAT)
if self.writer is None:
raise errors.InternalError()
self.shared_writer = vs.writerSharedPtr(self.writer.object())
self.shared_video = vs.videoWriterSharedPtr(self.shared_writer)
self.has_audio = False
if self.shared_video is not None and not self.stitcher.stitch_output.addWriter(self.shared_video):
raise errors.InternalError("Cannot add profiling writer to stitcher")
if profiling_time > 0:
threading.Timer(profiling_time, self.t_stop).start()
self.pid.cpu_percent(interval=None)
#jump automatically from starting state to started state
self.t_writer_ok()
def _stop(self):
self.fps = vs.Output_getFps(self.writer.release())
self.writer = None
logging.info("fps is %f:" % self.fps)
logging.info("cpu_util is %d" % self.pid.cpu_percent(interval=None))
cuda = performance.getCudaInfo()
logging.info("gpu_util is %d" % int(cuda['utilization.gpu']))
logging.info("enc_util is %s" % cuda['utilization.enc'])
success = self.stitcher.stitch_output.removeWriterNoGIL(self.name)
signal("profiling_stopping").send()
if not success:
raise errors.InternalError("Cannot remove writer")
self.shared_video = None
self.shared_writer = None
gc.collect()
#jump automatically from stopping state to stopped state
self.t_writer_completed()
def get_statistics(self):
cuda = performance.getCudaInfo()
self._transition_check()
if self.writer is not None:
self.fps = vs.Output_getFps(self.writer.object())
return {"fps": self.fps,
"cpu": self.pid.cpu_percent(interval=None),
"gpu": float(cuda['utilization.gpu']),
"enc": float(cuda['utilization.enc'])}
|
cloudpathlib/local/__init__.py | kabirkhan/cloudpathlib | 128 | 12772926 | <filename>cloudpathlib/local/__init__.py
"""This module implements "Local" classes that mimic their associated `cloudpathlib` non-local
counterparts but use the local filesystem in place of cloud storage. They can be used as drop-in
replacements, with the intent that you can use them as mock or monkepatch substitutes in your
tests. See ["Testing code that uses cloudpathlib"](../../testing_mocked_cloudpathlib/) for usage
examples.
"""
from .implementations import (
local_azure_blob_implementation,
LocalAzureBlobClient,
LocalAzureBlobPath,
local_gs_implementation,
LocalGSClient,
LocalGSPath,
local_s3_implementation,
LocalS3Client,
LocalS3Path,
)
from .localclient import LocalClient
from .localpath import LocalPath
__all__ = [
"local_azure_blob_implementation",
"LocalAzureBlobClient",
"LocalAzureBlobPath",
"LocalClient",
"local_gs_implementation",
"LocalGSClient",
"LocalGSPath",
"LocalPath",
"local_s3_implementation",
"LocalS3Client",
"LocalS3Path",
]
|
Subsets and Splits