content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
#!/usr/bin/env python3
# Copyright 2021 Xiaomi Corporation (Author: Liyong Guo, Fangjun Kuang)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import k2
import sentencepiece as spm
import torch
import torch.nn as nn
from asr_datamodule import LibriSpeechAsrDataModule
from conformer import Conformer
from icefall.bpe_graph_compiler import BpeCtcTrainingGraphCompiler
from icefall.checkpoint import average_checkpoints, load_checkpoint
from icefall.decode import (
get_lattice,
nbest_decoding,
nbest_oracle,
one_best_decoding,
rescore_with_attention_decoder,
rescore_with_n_best_list,
rescore_with_whole_lattice,
)
from icefall.lexicon import Lexicon
from icefall.utils import (
AttributeDict,
get_texts,
setup_logger,
store_transcripts,
str2bool,
write_error_stats,
)
def get_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--epoch",
type=int,
default=34,
help="It specifies the checkpoint to use for decoding."
"Note: Epoch counts from 0.",
)
parser.add_argument(
"--avg",
type=int,
default=20,
help="Number of checkpoints to average. Automatically select "
"consecutive checkpoints before the checkpoint specified by "
"'--epoch'. ",
)
parser.add_argument(
"--method",
type=str,
default="attention-decoder",
help="""Decoding method.
Supported values are:
- (0) ctc-decoding. Use CTC decoding. It uses a sentence piece
model, i.e., lang_dir/bpe.model, to convert word pieces to words.
It needs neither a lexicon nor an n-gram LM.
- (1) 1best. Extract the best path from the decoding lattice as the
decoding result.
- (2) nbest. Extract n paths from the decoding lattice; the path
with the highest score is the decoding result.
- (3) nbest-rescoring. Extract n paths from the decoding lattice,
rescore them with an n-gram LM (e.g., a 4-gram LM), the path with
the highest score is the decoding result.
- (4) whole-lattice-rescoring. Rescore the decoding lattice with an
n-gram LM (e.g., a 4-gram LM), the best path of rescored lattice
is the decoding result.
- (5) attention-decoder. Extract n paths from the LM rescored
lattice, the path with the highest score is the decoding result.
- (6) nbest-oracle. Its WER is the lower bound of any n-best
rescoring method can achieve. Useful for debugging n-best
rescoring method.
""",
)
parser.add_argument(
"--num-paths",
type=int,
default=100,
help="""Number of paths for n-best based decoding method.
Used only when "method" is one of the following values:
nbest, nbest-rescoring, attention-decoder, and nbest-oracle
""",
)
parser.add_argument(
"--nbest-scale",
type=float,
default=0.5,
help="""The scale to be applied to `lattice.scores`.
It's needed if you use any kinds of n-best based rescoring.
Used only when "method" is one of the following values:
nbest, nbest-rescoring, attention-decoder, and nbest-oracle
A smaller value results in more unique paths.
""",
)
parser.add_argument(
"--export",
type=str2bool,
default=False,
help="""When enabled, the averaged model is saved to
conformer_ctc/exp/pretrained.pt. Note: only model.state_dict() is saved.
pretrained.pt contains a dict {"model": model.state_dict()},
which can be loaded by `icefall.checkpoint.load_checkpoint()`.
""",
)
parser.add_argument(
"--exp-dir",
type=str,
default="conformer_mmi/exp_500",
help="The experiment dir",
)
parser.add_argument(
"--lang-dir",
type=str,
default="data/lang_bpe_500",
help="The lang dir",
)
parser.add_argument(
"--num-decoder-layers",
type=int,
default=6,
help="Number of attention decoder layers",
)
return parser
def get_params() -> AttributeDict:
params = AttributeDict(
{
"lm_dir": Path("data/lm"),
# parameters for conformer
"subsampling_factor": 4,
"vgg_frontend": False,
"use_feat_batchnorm": True,
"feature_dim": 80,
"nhead": 8,
"attention_dim": 512,
# parameters for decoding
"search_beam": 20,
"output_beam": 8,
"min_active_states": 30,
"max_active_states": 10000,
"use_double_scores": True,
}
)
return params
def decode_one_batch(
params: AttributeDict,
model: nn.Module,
HLG: Optional[k2.Fsa],
H: Optional[k2.Fsa],
bpe_model: Optional[spm.SentencePieceProcessor],
batch: dict,
word_table: k2.SymbolTable,
sos_id: int,
eos_id: int,
G: Optional[k2.Fsa] = None,
) -> Dict[str, List[List[str]]]:
"""Decode one batch and return the result in a dict. The dict has the
following format:
- key: It indicates the setting used for decoding. For example,
if no rescoring is used, the key is the string `no_rescore`.
If LM rescoring is used, the key is the string `lm_scale_xxx`,
where `xxx` is the value of `lm_scale`. An example key is
`lm_scale_0.7`
- value: It contains the decoding result. `len(value)` equals to
batch size. `value[i]` is the decoding result for the i-th
utterance in the given batch.
Args:
params:
It's the return value of :func:`get_params`.
- params.method is "1best", it uses 1best decoding without LM rescoring.
- params.method is "nbest", it uses nbest decoding without LM rescoring.
- params.method is "nbest-rescoring", it uses nbest LM rescoring.
- params.method is "whole-lattice-rescoring", it uses whole lattice LM
rescoring.
model:
The neural model.
HLG:
The decoding graph. Used only when params.method is NOT ctc-decoding.
H:
The ctc topo. Used only when params.method is ctc-decoding.
bpe_model:
The BPE model. Used only when params.method is ctc-decoding.
batch:
It is the return value from iterating
`lhotse.dataset.K2SpeechRecognitionDataset`. See its documentation
for the format of the `batch`.
word_table:
The word symbol table.
sos_id:
The token ID of the SOS.
eos_id:
The token ID of the EOS.
G:
An LM. It is not None when params.method is "nbest-rescoring"
or "whole-lattice-rescoring". In general, the G in HLG
is a 3-gram LM, while this G is a 4-gram LM.
Returns:
Return the decoding result. See above description for the format of
the returned dict.
"""
if HLG is not None:
device = HLG.device
else:
device = H.device
feature = batch["inputs"]
assert feature.ndim == 3
feature = feature.to(device)
# at entry, feature is (N, T, C)
supervisions = batch["supervisions"]
nnet_output, memory, memory_key_padding_mask = model(feature, supervisions)
# nnet_output is (N, T, C)
supervision_segments = torch.stack(
(
supervisions["sequence_idx"],
supervisions["start_frame"] // params.subsampling_factor,
supervisions["num_frames"] // params.subsampling_factor,
),
1,
).to(torch.int32)
if H is None:
assert HLG is not None
decoding_graph = HLG
else:
assert HLG is None
assert bpe_model is not None
decoding_graph = H
lattice = get_lattice(
nnet_output=nnet_output,
decoding_graph=decoding_graph,
supervision_segments=supervision_segments,
search_beam=params.search_beam,
output_beam=params.output_beam,
min_active_states=params.min_active_states,
max_active_states=params.max_active_states,
subsampling_factor=params.subsampling_factor,
)
if params.method == "ctc-decoding":
best_path = one_best_decoding(
lattice=lattice, use_double_scores=params.use_double_scores
)
# Note: `best_path.aux_labels` contains token IDs, not word IDs
# since we are using H, not HLG here.
#
# token_ids is a lit-of-list of IDs
token_ids = get_texts(best_path)
# hyps is a list of str, e.g., ['xxx yyy zzz', ...]
hyps = bpe_model.decode(token_ids)
# hyps is a list of list of str, e.g., [['xxx', 'yyy', 'zzz'], ... ]
hyps = [s.split() for s in hyps]
key = "ctc-decoding"
return {key: hyps}
if params.method == "nbest-oracle":
# Note: You can also pass rescored lattices to it.
# We choose the HLG decoded lattice for speed reasons
# as HLG decoding is faster and the oracle WER
# is only slightly worse than that of rescored lattices.
best_path = nbest_oracle(
lattice=lattice,
num_paths=params.num_paths,
ref_texts=supervisions["text"],
word_table=word_table,
nbest_scale=params.nbest_scale,
oov="<UNK>",
)
hyps = get_texts(best_path)
hyps = [[word_table[i] for i in ids] for ids in hyps]
key = f"oracle_{params.num_paths}_nbest_scale_{params.nbest_scale}" # noqa
return {key: hyps}
if params.method in ["1best", "nbest"]:
if params.method == "1best":
best_path = one_best_decoding(
lattice=lattice, use_double_scores=params.use_double_scores
)
key = "no_rescore"
else:
best_path = nbest_decoding(
lattice=lattice,
num_paths=params.num_paths,
use_double_scores=params.use_double_scores,
nbest_scale=params.nbest_scale,
)
key = f"no_rescore-nbest-scale-{params.nbest_scale}-{params.num_paths}" # noqa
hyps = get_texts(best_path)
hyps = [[word_table[i] for i in ids] for ids in hyps]
return {key: hyps}
assert params.method in [
"nbest-rescoring",
"whole-lattice-rescoring",
"attention-decoder",
]
lm_scale_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
lm_scale_list += [0.8, 0.9, 1.0, 1.1, 1.2, 1.3]
lm_scale_list += [1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0]
if params.method == "nbest-rescoring":
best_path_dict = rescore_with_n_best_list(
lattice=lattice,
G=G,
num_paths=params.num_paths,
lm_scale_list=lm_scale_list,
nbest_scale=params.nbest_scale,
)
elif params.method == "whole-lattice-rescoring":
best_path_dict = rescore_with_whole_lattice(
lattice=lattice,
G_with_epsilon_loops=G,
lm_scale_list=lm_scale_list,
)
elif params.method == "attention-decoder":
# lattice uses a 3-gram Lm. We rescore it with a 4-gram LM.
rescored_lattice = rescore_with_whole_lattice(
lattice=lattice,
G_with_epsilon_loops=G,
lm_scale_list=None,
)
# TODO: pass `lattice` instead of `rescored_lattice` to
# `rescore_with_attention_decoder`
best_path_dict = rescore_with_attention_decoder(
lattice=rescored_lattice,
num_paths=params.num_paths,
model=model,
memory=memory,
memory_key_padding_mask=memory_key_padding_mask,
sos_id=sos_id,
eos_id=eos_id,
nbest_scale=params.nbest_scale,
)
else:
assert False, f"Unsupported decoding method: {params.method}"
ans = dict()
if best_path_dict is not None:
for lm_scale_str, best_path in best_path_dict.items():
hyps = get_texts(best_path)
hyps = [[word_table[i] for i in ids] for ids in hyps]
ans[lm_scale_str] = hyps
else:
for lm_scale in lm_scale_list:
ans["empty"] = [[] * lattice.shape[0]]
return ans
def decode_dataset(
dl: torch.utils.data.DataLoader,
params: AttributeDict,
model: nn.Module,
HLG: Optional[k2.Fsa],
H: Optional[k2.Fsa],
bpe_model: Optional[spm.SentencePieceProcessor],
word_table: k2.SymbolTable,
sos_id: int,
eos_id: int,
G: Optional[k2.Fsa] = None,
) -> Dict[str, List[Tuple[List[str], List[str]]]]:
"""Decode dataset.
Args:
dl:
PyTorch's dataloader containing the dataset to decode.
params:
It is returned by :func:`get_params`.
model:
The neural model.
HLG:
The decoding graph. Used only when params.method is NOT ctc-decoding.
H:
The ctc topo. Used only when params.method is ctc-decoding.
bpe_model:
The BPE model. Used only when params.method is ctc-decoding.
word_table:
It is the word symbol table.
sos_id:
The token ID for SOS.
eos_id:
The token ID for EOS.
G:
An LM. It is not None when params.method is "nbest-rescoring"
or "whole-lattice-rescoring". In general, the G in HLG
is a 3-gram LM, while this G is a 4-gram LM.
Returns:
Return a dict, whose key may be "no-rescore" if no LM rescoring
is used, or it may be "lm_scale_0.7" if LM rescoring is used.
Its value is a list of tuples. Each tuple contains two elements:
The first is the reference transcript, and the second is the
predicted result.
"""
results = []
num_cuts = 0
try:
num_batches = len(dl)
except TypeError:
num_batches = "?"
results = defaultdict(list)
for batch_idx, batch in enumerate(dl):
texts = batch["supervisions"]["text"]
hyps_dict = decode_one_batch(
params=params,
model=model,
HLG=HLG,
H=H,
bpe_model=bpe_model,
batch=batch,
word_table=word_table,
G=G,
sos_id=sos_id,
eos_id=eos_id,
)
for lm_scale, hyps in hyps_dict.items():
this_batch = []
assert len(hyps) == len(texts)
for hyp_words, ref_text in zip(hyps, texts):
ref_words = ref_text.split()
this_batch.append((ref_words, hyp_words))
results[lm_scale].extend(this_batch)
num_cuts += len(batch["supervisions"]["text"])
if batch_idx % 100 == 0:
batch_str = f"{batch_idx}/{num_batches}"
logging.info(
f"batch {batch_str}, cuts processed until now is {num_cuts}"
)
return results
def save_results(
params: AttributeDict,
test_set_name: str,
results_dict: Dict[str, List[Tuple[List[int], List[int]]]],
):
if params.method == "attention-decoder":
# Set it to False since there are too many logs.
enable_log = False
else:
enable_log = True
test_set_wers = dict()
for key, results in results_dict.items():
recog_path = params.exp_dir / f"recogs-{test_set_name}-{key}.txt"
store_transcripts(filename=recog_path, texts=results)
if enable_log:
logging.info(f"The transcripts are stored in {recog_path}")
# The following prints out WERs, per-word error statistics and aligned
# ref/hyp pairs.
errs_filename = params.exp_dir / f"errs-{test_set_name}-{key}.txt"
with open(errs_filename, "w") as f:
wer = write_error_stats(
f, f"{test_set_name}-{key}", results, enable_log=enable_log
)
test_set_wers[key] = wer
if enable_log:
logging.info(
"Wrote detailed error stats to {}".format(errs_filename)
)
test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1])
errs_info = params.exp_dir / f"wer-summary-{test_set_name}.txt"
with open(errs_info, "w") as f:
print("settings\tWER", file=f)
for key, val in test_set_wers:
print("{}\t{}".format(key, val), file=f)
s = "\nFor {}, WER of different settings are:\n".format(test_set_name)
note = "\tbest for {}".format(test_set_name)
for key, val in test_set_wers:
s += "{}\t{}{}\n".format(key, val, note)
note = ""
logging.info(s)
@torch.no_grad()
def main():
parser = get_parser()
LibriSpeechAsrDataModule.add_arguments(parser)
args = parser.parse_args()
args.exp_dir = Path(args.exp_dir)
args.lang_dir = Path(args.lang_dir)
params = get_params()
params.update(vars(args))
setup_logger(f"{params.exp_dir}/log-{params.method}/log-decode")
logging.info("Decoding started")
logging.info(params)
lexicon = Lexicon(params.lang_dir)
max_token_id = max(lexicon.tokens)
num_classes = max_token_id + 1 # +1 for the blank
device = torch.device("cpu")
if torch.cuda.is_available():
device = torch.device("cuda", 0)
logging.info(f"device: {device}")
graph_compiler = BpeCtcTrainingGraphCompiler(
params.lang_dir,
device=device,
sos_token="<sos/eos>",
eos_token="<sos/eos>",
)
sos_id = graph_compiler.sos_id
eos_id = graph_compiler.eos_id
if params.method == "ctc-decoding":
HLG = None
H = k2.ctc_topo(
max_token=max_token_id,
modified=False,
device=device,
)
bpe_model = spm.SentencePieceProcessor()
bpe_model.load(str(params.lang_dir / "bpe.model"))
else:
H = None
bpe_model = None
HLG = k2.Fsa.from_dict(
torch.load(f"{params.lang_dir}/HLG.pt", map_location="cpu")
)
HLG = HLG.to(device)
assert HLG.requires_grad is False
if not hasattr(HLG, "lm_scores"):
HLG.lm_scores = HLG.scores.clone()
if params.method in (
"nbest-rescoring",
"whole-lattice-rescoring",
"attention-decoder",
):
if not (params.lm_dir / "G_4_gram.pt").is_file():
logging.info("Loading G_4_gram.fst.txt")
logging.warning("It may take 8 minutes.")
with open(params.lm_dir / "G_4_gram.fst.txt") as f:
first_word_disambig_id = lexicon.word_table["#0"]
G = k2.Fsa.from_openfst(f.read(), acceptor=False)
# G.aux_labels is not needed in later computations, so
# remove it here.
del G.aux_labels
# CAUTION: The following line is crucial.
# Arcs entering the back-off state have label equal to #0.
# We have to change it to 0 here.
G.labels[G.labels >= first_word_disambig_id] = 0
G = k2.Fsa.from_fsas([G]).to(device)
G = k2.arc_sort(G)
torch.save(G.as_dict(), params.lm_dir / "G_4_gram.pt")
else:
logging.info("Loading pre-compiled G_4_gram.pt")
d = torch.load(params.lm_dir / "G_4_gram.pt", map_location="cpu")
G = k2.Fsa.from_dict(d).to(device)
if params.method in ["whole-lattice-rescoring", "attention-decoder"]:
# Add epsilon self-loops to G as we will compose
# it with the whole lattice later
G = k2.add_epsilon_self_loops(G)
G = k2.arc_sort(G)
G = G.to(device)
# G.lm_scores is used to replace HLG.lm_scores during
# LM rescoring.
G.lm_scores = G.scores.clone()
else:
G = None
model = Conformer(
num_features=params.feature_dim,
nhead=params.nhead,
d_model=params.attention_dim,
num_classes=num_classes,
subsampling_factor=params.subsampling_factor,
num_decoder_layers=params.num_decoder_layers,
vgg_frontend=params.vgg_frontend,
use_feat_batchnorm=params.use_feat_batchnorm,
)
if params.avg == 1:
load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model)
else:
start = params.epoch - params.avg + 1
filenames = []
for i in range(start, params.epoch + 1):
if start >= 0:
filenames.append(f"{params.exp_dir}/epoch-{i}.pt")
logging.info(f"averaging {filenames}")
model.load_state_dict(average_checkpoints(filenames))
if params.export:
logging.info(f"Export averaged model to {params.exp_dir}/pretrained.pt")
torch.save(
{"model": model.state_dict()}, f"{params.exp_dir}/pretrained.pt"
)
return
model.to(device)
model.eval()
num_param = sum([p.numel() for p in model.parameters()])
logging.info(f"Number of model parameters: {num_param}")
librispeech = LibriSpeechAsrDataModule(args)
# CAUTION: `test_sets` is for displaying only.
# If you want to skip test-clean, you have to skip
# it inside the for loop. That is, use
#
# if test_set == 'test-clean': continue
#
test_sets = ["test-clean", "test-other"]
for test_set, test_dl in zip(test_sets, librispeech.test_dataloaders()):
results_dict = decode_dataset(
dl=test_dl,
params=params,
model=model,
HLG=HLG,
H=H,
bpe_model=bpe_model,
word_table=lexicon.word_table,
G=G,
sos_id=sos_id,
eos_id=eos_id,
)
save_results(
params=params, test_set_name=test_set, results_dict=results_dict
)
logging.info("Done!")
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
if __name__ == "__main__":
main()
| 33.02446 | 91 | 0.605132 | [
"Apache-2.0"
] | aarora8/icefall | egs/librispeech/ASR/conformer_mmi/decode.py | 22,952 | Python |
#!/usr/bin/env python
# -*- Mode: Python; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*-
# vi: set ts=4 sw=4 expandtab:
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is [Open Source Virtual Machine.].
#
# The Initial Developer of the Original Code is
# Adobe System Incorporated.
# Portions created by the Initial Developer are Copyright (C) 2004-2006
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Adobe AS3 Team
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
import os
import subprocess
import sys
def compile_abc(target, files, deps=None, configs=None):
asc_jar = os.environ.get('ASC', os.path.realpath('../../../utils/asc.jar'))
javacmd = ['java', '-ea', '-DAS3', '-DAVMPLUS', '-classpath', asc_jar, 'macromedia.asc.embedding.ScriptCompiler', '-builtin']
if deps:
javacmd.extend("../%s/%s.abc" % (dep, dep) for dep in deps)
javacmd.extend(['-out', target])
javacmd.extend(files)
javacmd.extend(configs)
p = subprocess.Popen(javacmd, cwd=target)
p.wait()
def main():
configs = sys.argv[1:]
if configs == []:
# Build without float suppot by default
configs = ['-config', 'CONFIG::VMCFG_FLOAT=false']
compile_abc("builtin", ["builtin.as", "Vector.as", "DescribeType.as", "JSON.as", "Math.as", "Error.as", "Date.as", "RegExp.as", "IDataInput.as", "IDataOutput.as", "ByteArray.as", "Proxy.as", "XML.as", "Dictionary.as"], configs=configs)
compile_abc("shell", ["Capabilities.as", "Domain.as", "System.as"], deps=["builtin"], configs=configs)
compile_abc("avmplus", ["avmplus.as"], deps=["builtin"], configs=configs)
if __name__ == "__main__":
main()
| 42.8 | 239 | 0.697263 | [
"Apache-2.0"
] | Acidburn0zzz/shumway | src/avm2/generated/generate.py | 2,996 | Python |
#!/usr/bin/env python
class FilterModule(object):
def filters(self):
return {'json_env_map': self.json_env_map}
def json_env_map(self, env):
return [{'name': k, 'value': str(v)} for k,v in env.items()]
| 24.444444 | 66 | 0.65 | [
"MIT"
] | antoine-fl/ansible-clever | filter_plugins/env_json_map.py | 220 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class TrafficWeight:
def __init__(self):
self.request = 0
self.response = 0
class PacketInterval:
def __init__(self):
self.firstPacket = 0
self.lastPacket = 0
| 17.461538 | 24 | 0.651982 | [
"MIT"
] | AntoineRondelet/SideChannelLeaksOverHTTPS | src/utils.py | 227 | Python |
# Dana jest posortowana tablica A[1, ..., n] oraz liczba x. Proszę napisać program, który stwierdza
# czy istnieją indeksy i oraz j takie, że A[i] + A[j] = x.
def sum_search(T, x):
l = 0
r = len(T) - 1
while l <= r:
if T[l] + T[r] == x:
return True
elif T[l] + T[r] > x:
r -= 1
else:
l += 1
return False
T = [2, 5, 8, 12, 16, 19, 20, 25, 34, 55, 81]
x = 37
print(sum_search(T, x))
| 21.952381 | 99 | 0.488069 | [
"MIT"
] | Szymon-Budziak/ASD_exercises_solutions | Exercises/Exercises_01/07_exercise.py | 466 | Python |
# Generated by Django 2.2.12 on 2020-07-05 18:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vote', '0002_request_track'),
]
operations = [
migrations.AddField(
model_name='track',
name='metadata_locked',
field=models.BooleanField(default=False),
),
]
| 20.421053 | 53 | 0.600515 | [
"BSD-3-Clause"
] | colons/nkd.su | nkdsu/apps/vote/migrations/0003_track_metadata_locked.py | 388 | Python |
# IMPORTATION STANDARD
# IMPORTATION THIRDPARTY
import pytest
# IMPORTATION INTERNAL
from openbb_terminal.cryptocurrency.defi import terraengineer_model
@pytest.mark.vcr
@pytest.mark.parametrize(
"asset,address",
[("ust", "terra1tmnqgvg567ypvsvk6rwsga3srp7e3lg6u0elp8")],
)
def test_get_history_asset_from_terra_address(asset, address, recorder):
df = terraengineer_model.get_history_asset_from_terra_address(
asset=asset,
address=address,
)
recorder.capture(df)
| 23.952381 | 72 | 0.771372 | [
"MIT"
] | 23errg/GamestonkTerminal | tests/openbb_terminal/cryptocurrency/defi/test_terraengineer_model.py | 503 | Python |
from __future__ import print_function
# Part of the JBEI Quantitative Metabolic Modeling Library (JQMM)
# Copyright (c) 2016, The Regents of the University of California.
# For licensing details see "license.txt" and "legal.txt".
from builtins import str
import re
import core
import NamedRangedNumber
class Gene(NamedRangedNumber.NamedRangedNumber):
"""
Class for single genes, and values typically associated with them.
Typically it is instantiated with a string representing a name, and a value.
Since genes can potentially have multiple names due to conflicting standards, the superclass also supports
receiving a list of names during instantiation, instead of a string.
The first name in the list will be considered the canonical name when rendering the gene as a string.
The given value can be an integer, a float, a ranged number, or a string representation of any of these,
but is kept internally and exported as a ranged number.
"""
def __init__(self, names, value=None):
if isinstance(names, list):
nameList = names
else:
nameList = [names]
for name in nameList:
assert ' ' not in name.strip(), "Gene names cannot contain spaces: '" + name + "'"
super(Gene, self).__init__(names, value)
def addName(self, name):
assert ' ' not in name.strip(), "Gene names cannot contain spaces: '" + name + "'"
super(Gene, self).addName(name)
class GeneSet(NamedRangedNumber.NamedRangedNumberSet):
"""
Class for a set of GeneSet objects, derived from NamedRangedNumberSet.
"""
def __init__(self, contents=None):
super(GeneSet, self).__init__(contents)
def recastSet(self, victimSet, preferExistingObjects=True, preferExistingValues=False):
itemsToRecast = victimSet.contentsList
recastItems = self.recastItems(itemsToRecast, preferExistingObjects, preferExistingValues)
return GeneSet(recastItems)
@staticmethod
def createSetsFromStrings(structureString="", valueString=None):
"""
Construct a list of GeneSet objects based on the given strings.
The first string, structureString, determines the number of GeneSets to create.
An empty string will return an empty list.
A string containing anything else will return one or more subsets.
The number of subsets is determined by the number of times the separator string " or " occurs in the
structureString. For example, "(setA) or (setB) or (setC) or geneD or geneE or (setF)" will create
six subsets. "(setA)", "(setC)", "geneD", etc are substrings that declare the contents of each set.
There are two accepted ways to format the substrings:
Method #1:
substrings example: "aName=[aLow:aBest:aHigh] and bName=[bLow:bBest:bHigh] and cName=value and dName"
valueString=None
In this example, four NamedRangedNumber objects will be created in total:
aName and bName specify rangedNumber values for NamedRangedNumber, cName specifies just one floating point
number that is converted to a rangedNumber, and dName crates a NamedRangedNumber with the value set to None.
valueString can be left out entirely.
Method #2:
substrings example: "aName and dName and cName and qName"
valueString example: "aName=[aLow:aBest:aHigh] bName=[bLow:bBest:bHigh] cName=value dName=value fName=value"
In this example, four NamedRangedNumber objects will be created, but only two of them will be assigned values
(the other two will have values of None). This happens because the structureString declares what items are in
the set, while the valueString only assigns values. If a value is given in the second string for a name that is
not listed in the first, that value is ignored. No item is created for it.
While it is possible to supply a mixture of methods 1 and 2, it is not recommended practice. Values assigned via
method 2 take precedence over values assigned via method 1, even if the value assigned is "=None".
Note that Gene objects are re-used from one set to the next. That is, if the same name is mentioned in two
different substrings, only one Gene object will be created but it will be placed in two subsets.
"""
givenValues = {}
if valueString is not None:
pairs = valueString.split() # Split on whitespace, no need to strip
for pair in pairs:
parts = pair.split('=')
name = parts[0]
if parts[1:2]:
givenValues[name] = parts[1]
subSets = []
structureString = structureString.strip() # Stripping initial surrounding whitespace in order to check for a blank entry
if structureString != "":
collectionStrings = re.split("\s+or\s+", structureString)
for collectionStr in collectionStrings:
items = []
# Sections of the string are sometimes enclosed in parenthesis.
# Plus, sometime garbage from badly-done embeds comes up, like so:
# <html:p> GENE_ASSOCIATION :( b1901 and b1900 and ( b1898 and b1899 ) ) </html:p>
collectionStr = collectionStr.replace('(',' ').replace(')',' ').strip()
itemStrings = re.split("\s+and\s+", collectionStr)
for itemString in itemStrings:
item = Gene.fromString(itemString)
if item.canonicalName in givenValues:
item.set(givenValues[item.canonicalName])
items.append(item)
if items:
subSets.append(GeneSet(items))
# is a list of GeneSets
return subSets
if __name__ == "__main__":
test()
def test():
try:
vError = False
print("Instantiating from illegal string \"test=[2,3,4]\", expecting failure ...")
a = Gene.fromString("test=[2,3,4]")
except ValueError:
vError = True
print("\tGot ValueError as expected.")
pass
assert vError, "NamedRangedNumber accepted wrong input."
print("\nInstantiating from string value \"test=[2:3:4]\" ...")
a = Gene.fromString("test=[2:3:4]")
assert a.canonicalName == 'test', "Name wrong"
b = Gene.fromString("dorks=[0.5:1:1.5]")
c = a + 3
print("\t" + str(a) + ' + 3 = ' + str(c))
d = a + b
print("\t" + str(a) + ' + ' + str(b) + ' = ' + str(d))
assert d.value.best == 4.0, "Addition failure, d.value.best should be 4.0."
print("\nInstantiating a GeneSet from an invalid string, expecting failure:")
strA = "(bob fred frank) or (jed and Bill123) and (fred & billyBob) or captainAmerica"
print("\t" + strA)
try:
aError = False
geneSets = GeneSet.createSetsFromStrings(strA)
except AssertionError:
aError = True
print("\tGot AssertionError as expected.")
pass
assert aError, "GeneSet.createSetsFromStrings accepted wrong input."
print("\nInstantiating a GeneSet from strings:")
strA = "(bob and fred and frank) or (jed and Bill123) or (fred and billyBob) or captainAmerica"
strB = "bob=12 fred=45 frank=[1:2:3] jed=10.1 Bill123=1"
print("\t" + strA)
print("\t" + strB)
subSets = GeneSet.createSetsFromStrings(strA, strB)
masterSet = GeneSet()
newSubSets = []
print("Master set:")
for subSet in subSets:
newSubSets.append(masterSet.recastSet(subSet))
print("\t" + str(masterSet))
print("Subsets consolidated, for embedding:")
print("\t" + GeneSet.createStringFromSets(newSubSets))
print("Significance test result for master set:" + str(masterSet.testSignificance(12)))
| 45.327586 | 128 | 0.651198 | [
"Unlicense"
] | somtirtharoy/jqmm | code/core/Genes.py | 7,887 | Python |
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple executor that operates synchronously in eager TensorFlow mode."""
from typing import Any, MutableMapping, Optional
import cachetools
import tensorflow as tf
from tensorflow_federated.proto.v0 import computation_pb2 as pb
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import serialization_utils
from tensorflow_federated.python.common_libs import structure
from tensorflow_federated.python.common_libs import tracing
from tensorflow_federated.python.core.api import computation_base
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.api import typed_object
from tensorflow_federated.python.core.impl import computation_impl
from tensorflow_federated.python.core.impl import type_utils
from tensorflow_federated.python.core.impl.compiler import building_blocks
from tensorflow_federated.python.core.impl.executors import executor_base
from tensorflow_federated.python.core.impl.executors import executor_value_base
from tensorflow_federated.python.core.impl.types import type_analysis
from tensorflow_federated.python.core.impl.types import type_conversions
from tensorflow_federated.python.core.impl.types import type_serialization
from tensorflow_federated.python.core.impl.utils import tensorflow_utils
from tensorflow_federated.python.tensorflow_libs import graph_merge
# Cache size here is simply heuristic, no formal analysis.
_TF_FUNCTION_CACHE_SIZE = 100
def _get_wrapped_function_from_comp(comp, must_pin_function_to_cpu, param_type,
device):
"""Extracts the TensorFlow function from serialized computation.
Args:
comp: An instance of `pb.Computation`.
must_pin_function_to_cpu: A boolean flag to indicate if the computation is
forced to be on CPUs.
param_type: A `tff.Type` instance or None.
device: A `tf.config.LogicalDevice` or None.
Returns:
A TensorFlow ConcreteFunction.
"""
def function_to_wrap():
"""No-arg function to import graph def.
We pass a no-arg function to `tf.compat.v1.wrap_function` to avoid
the leftover placeholders that can result from binding arguments to the
imported graphdef via `input_map`. The correct signature will be added to
this function later, via the `prune` call below.
Returns:
Result of importing graphdef backing `comp`.
"""
graph_def = serialization_utils.unpack_graph_def(comp.tensorflow.graph_def)
init_op = comp.tensorflow.initialize_op
if init_op:
graph_def = tensorflow_utils.add_control_deps_for_init_op(
graph_def, init_op)
def _import_fn():
return tf.import_graph_def(
graph_merge.uniquify_shared_names(graph_def), name='')
if must_pin_function_to_cpu:
with tf.device('cpu'):
return _import_fn()
elif device is not None:
with tf.device(device.name):
return _import_fn()
else:
return _import_fn()
wrapped_noarg_fn = tf.compat.v1.wrap_function(function_to_wrap, signature=[])
if param_type is not None:
input_tensor_names = tensorflow_utils.extract_tensor_names_from_binding(
comp.tensorflow.parameter)
else:
input_tensor_names = []
output_tensor_names = tensorflow_utils.extract_tensor_names_from_binding(
comp.tensorflow.result)
import_graph = wrapped_noarg_fn.graph
try:
wrapped_fn = wrapped_noarg_fn.prune(
feeds=tf.nest.map_structure(import_graph.as_graph_element,
input_tensor_names),
fetches=tf.nest.map_structure(import_graph.as_graph_element,
output_tensor_names),
)
except KeyError as e:
raise TypeError(
'Caught exception trying to prune graph `{g}` with '
'feeds {feeds} and fetches {fetches}. This indicates that these '
'names may not refer to tensors in the graph. .\nException: {e}'.format(
g=import_graph,
feeds=input_tensor_names,
fetches=output_tensor_names,
e=e))
return wrapped_fn
def embed_tensorflow_computation(comp, type_spec=None, device=None):
"""Embeds a TensorFlow computation for use in the eager context.
Args:
comp: An instance of `pb.Computation`.
type_spec: An optional `tff.Type` instance or something convertible to it.
device: An optional `tf.config.LogicalDevice`.
Returns:
Either a one-argument or a zero-argument callable that executes the
computation in eager mode.
Raises:
TypeError: If arguments are of the wrong types, e.g., in `comp` is not a
TensorFlow computation.
"""
# TODO(b/134543154): Decide whether this belongs in `tensorflow_utils.py`
# since it deals exclusively with eager mode. Incubate here, and potentially
# move there, once stable.
py_typecheck.check_type(comp, pb.Computation)
comp_type = type_serialization.deserialize_type(comp.type)
type_spec = computation_types.to_type(type_spec)
if type_spec is not None:
if not type_spec.is_equivalent_to(comp_type):
raise TypeError('Expected a computation of type {}, got {}.'.format(
type_spec, comp_type))
else:
type_spec = comp_type
# TODO(b/155198591): Currently, TF will raise on any function returning a
# `tf.data.Dataset` not pinned to CPU. We should follow up here and remove
# this gating when we can.
must_pin_function_to_cpu = type_analysis.contains(type_spec.result,
lambda t: t.is_sequence())
which_computation = comp.WhichOneof('computation')
if which_computation != 'tensorflow':
unexpected_building_block = building_blocks.ComputationBuildingBlock.from_proto(
comp)
raise TypeError('Expected a TensorFlow computation, found {}.'.format(
unexpected_building_block))
if type_spec.is_function():
param_type = type_spec.parameter
result_type = type_spec.result
else:
param_type = None
result_type = type_spec
wrapped_fn = _get_wrapped_function_from_comp(comp, must_pin_function_to_cpu,
param_type, device)
param_fns = []
if param_type is not None:
for spec in structure.flatten(type_spec.parameter):
if spec.is_tensor():
param_fns.append(lambda x: x)
else:
py_typecheck.check_type(spec, computation_types.SequenceType)
param_fns.append(tf.data.experimental.to_variant)
result_fns = []
for spec in structure.flatten(result_type):
if spec.is_tensor():
result_fns.append(lambda x: x)
else:
py_typecheck.check_type(spec, computation_types.SequenceType)
tf_structure = type_conversions.type_to_tf_structure(spec.element)
def fn(x, tf_structure=tf_structure):
return tf.data.experimental.from_variant(x, tf_structure)
result_fns.append(fn)
def _fn_to_return(arg, param_fns, wrapped_fn): # pylint:disable=missing-docstring
param_elements = []
if arg is not None:
arg_parts = structure.flatten(arg)
if len(arg_parts) != len(param_fns):
raise RuntimeError('Expected {} arguments, found {}.'.format(
len(param_fns), len(arg_parts)))
for arg_part, param_fn in zip(arg_parts, param_fns):
param_elements.append(param_fn(arg_part))
result_parts = wrapped_fn(*param_elements)
# There is a tf.wrap_function(...) issue b/144127474 that variables created
# from tf.import_graph_def(...) inside tf.wrap_function(...) is not
# destroyed. So get all the variables from `wrapped_fn` and destroy
# manually.
# TODO(b/144127474): Remove this manual cleanup once tf.wrap_function(...)
# is fixed.
resources = []
for op in wrapped_fn.graph.get_operations():
if op.type == 'VarHandleOp':
resources += op.outputs
if resources:
for resource in wrapped_fn.prune(feeds={}, fetches=resources)():
tf.raw_ops.DestroyResourceOp(resource=resource)
result_elements = []
for result_part, result_fn in zip(result_parts, result_fns):
result_elements.append(result_fn(result_part))
return structure.pack_sequence_as(result_type, result_elements)
fn_to_return = lambda arg, p=param_fns, w=wrapped_fn: _fn_to_return(arg, p, w)
# pylint: disable=function-redefined
if must_pin_function_to_cpu:
old_fn_to_return = fn_to_return
def fn_to_return(x):
with tf.device('cpu'):
return old_fn_to_return(x)
elif device is not None:
old_fn_to_return = fn_to_return
def fn_to_return(x):
with tf.device(device.name):
return old_fn_to_return(x)
# pylint: enable=function-redefined
if param_type is not None:
return lambda arg: fn_to_return(arg) # pylint: disable=unnecessary-lambda
else:
return lambda: fn_to_return(None)
def to_representation_for_type(
value: Any,
tf_function_cache: MutableMapping[str, Any],
type_spec: Optional[computation_types.Type] = None,
device: Optional[tf.config.LogicalDevice] = None) -> Any:
"""Verifies or converts the `value` to an eager object matching `type_spec`.
WARNING: This function is only partially implemented. It does not support
data sets at this point.
The output of this function is always an eager tensor, eager dataset, a
representation of a TensorFlow computation, or a nested structure of those
that matches `type_spec`, and when `device` has been specified, everything
is placed on that device on a best-effort basis.
TensorFlow computations are represented here as zero- or one-argument Python
callables that accept their entire argument bundle as a single Python object.
Args:
value: The raw representation of a value to compare against `type_spec` and
potentially to be converted.
tf_function_cache: A cache obeying `dict` semantics that can be used to look
up previously embedded TensorFlow functions.
type_spec: An instance of `tff.Type`, can be `None` for values that derive
from `typed_object.TypedObject`.
device: An optional `tf.config.LogicalDevice` to place the value on (for
tensor-level values).
Returns:
Either `value` itself, or a modified version of it.
Raises:
TypeError: If the `value` is not compatible with `type_spec`.
"""
type_spec = type_utils.reconcile_value_with_type_spec(value, type_spec)
if isinstance(value, computation_base.Computation):
return to_representation_for_type(
computation_impl.ComputationImpl.get_proto(value), tf_function_cache,
type_spec, device)
elif isinstance(value, pb.Computation):
key = (value.SerializeToString(), str(type_spec),
device.name if device else None)
cached_fn = tf_function_cache.get(key)
if cached_fn is not None:
return cached_fn
embedded_fn = embed_tensorflow_computation(value, type_spec, device)
tf_function_cache[key] = embedded_fn
return embedded_fn
elif type_spec.is_struct():
type_elem = structure.to_elements(type_spec)
value_elem = (structure.to_elements(structure.from_container(value)))
result_elem = []
if len(type_elem) != len(value_elem):
raise TypeError('Expected a {}-element tuple, found {} elements.'.format(
len(type_elem), len(value_elem)))
for (t_name, el_type), (v_name, el_val) in zip(type_elem, value_elem):
if t_name != v_name:
raise TypeError(
'Mismatching element names in type vs. value: {} vs. {}.'.format(
t_name, v_name))
el_repr = to_representation_for_type(el_val, tf_function_cache, el_type,
device)
result_elem.append((t_name, el_repr))
return structure.Struct(result_elem)
elif device is not None:
py_typecheck.check_type(device, tf.config.LogicalDevice)
with tf.device(device.name):
return to_representation_for_type(
value, tf_function_cache, type_spec=type_spec, device=None)
elif isinstance(value, EagerValue):
return value.internal_representation
elif isinstance(value, executor_value_base.ExecutorValue):
raise TypeError(
'Cannot accept a value embedded within a non-eager executor.')
elif type_spec.is_tensor():
if not tf.is_tensor(value):
value = tf.convert_to_tensor(value, dtype=type_spec.dtype)
elif hasattr(value, 'read_value'):
# a tf.Variable-like result, get a proper tensor.
value = value.read_value()
value_type = (
computation_types.TensorType(value.dtype.base_dtype, value.shape))
if not type_spec.is_assignable_from(value_type):
raise TypeError(
'The apparent type {} of a tensor {} does not match the expected '
'type {}.'.format(value_type, value, type_spec))
return value
elif type_spec.is_sequence():
if isinstance(value, list):
value = tensorflow_utils.make_data_set_from_elements(
None, value, type_spec.element)
py_typecheck.check_type(value,
type_conversions.TF_DATASET_REPRESENTATION_TYPES)
element_type = computation_types.to_type(value.element_spec)
value_type = computation_types.SequenceType(element_type)
type_spec.check_assignable_from(value_type)
return value
else:
raise TypeError('Unexpected type {}.'.format(type_spec))
class EagerValue(executor_value_base.ExecutorValue):
"""A representation of an eager value managed by the eager executor."""
def __init__(self, value, tf_function_cache, type_spec=None, device=None):
"""Creates an instance of a value in this executor.
Args:
value: Depending on `type_spec`, either a `tf.Tensor`, `tf.data.Dataset`,
or a nested structure of these stored in an `Struct`.
tf_function_cache: A cache obeying `dict` semantics that can be used to
look up previously embedded TensorFlow functions.
type_spec: An instance of `tff.Type` that represents a tensor, a dataset,
or a nested structure of these.
device: An optional `tf.config.LogicalDevice` on which to place the value.
"""
if type_spec is None:
py_typecheck.check_type(value, typed_object.TypedObject)
type_spec = value.type_signature
else:
type_spec = computation_types.to_type(type_spec)
py_typecheck.check_type(type_spec, computation_types.Type)
self._type_signature = type_spec
self._value = to_representation_for_type(value, tf_function_cache,
type_spec, device)
@property
def internal_representation(self):
"""Returns a representation of the eager value embedded in the executor.
This property is only intended for use by the eager executor and tests. Not
for consumption by consumers of the executor interface.
"""
return self._value
@property
def type_signature(self):
return self._type_signature
@tracing.trace
async def compute(self):
return self._value
class EagerTFExecutor(executor_base.Executor):
"""The eager executor only runs TensorFlow, synchronously, in eager mode.
TODO(b/134764569): Add support for data as a building block.
This executor understands the following TFF types: tensors, sequences, named
tuples, and functions. It does not understand placements, federated, or
abstract types.
This executor understands the following kinds of TFF computation building
blocks: tensorflow computations, and external data. It does not understand
lambda calculus or any compositional constructs. Tuples and selections can
only be created using `create_struct()` and `create_selection()` in the API.
The arguments to be ingested can be Python constants of simple types, nested
structures of those, as well as eager tensors and eager datasets.
The external data references must identify files available in the executor's
filesystem. The exact format is yet to be documented.
The executor will be able to place work on specific devices (e.g., on GPUs).
In contrast to the reference executor, it handles data sets in a pipelined
fashion, and does not place limits on the data set sizes. It also avoids
marshaling TensorFlow values in and out between calls.
It does not deal with multithreading, checkpointing, federated computations,
and other concerns to be covered by separate executor components. It runs the
operations it supports in a synchronous fashion. Asynchrony and other aspects
not supported here should be handled by composing this executor with other
executors into a complex executor stack, rather than mixing in all the logic.
"""
def __init__(self, device=None):
"""Creates a new instance of an eager executor.
Args:
device: An optional `tf.config.LogicalDevice` that this executor will
schedule all of its operations to run on. For example, the list of
logical devices can be obtained using
`tf.config.list_logical_devices()`.
Raises:
RuntimeError: If not executing eagerly.
TypeError: If the device is not a `tf.config.LogicalDevice`.
ValueError: If there is no device `device`.
"""
if not tf.executing_eagerly():
raise RuntimeError('The eager executor may only be used in eager mode.')
if device is not None:
py_typecheck.check_type(device, tf.config.LogicalDevice)
self._device = device
else:
self._device = None
self._tf_function_cache = cachetools.LRUCache(_TF_FUNCTION_CACHE_SIZE)
@tracing.trace(span=True)
async def create_value(self, value, type_spec=None):
"""Embeds `value` of type `type_spec` within this executor.
Args:
value: An object that represents the value to embed within the executor.
type_spec: The `tff.Type` of the value represented by this object, or
something convertible to it. Can optionally be `None` if `value` is an
instance of `typed_object.TypedObject`.
Returns:
An instance of `EagerValue`.
Raises:
RuntimeError: If not executing eagerly.
TypeError: If the arguments are of the wrong types.
ValueError: If the type was not specified and cannot be determined from
the value.
"""
if not tf.executing_eagerly():
raise RuntimeError('The eager executor may only be used in eager mode.')
return EagerValue(value, self._tf_function_cache, type_spec, self._device)
@tracing.trace
async def create_call(self, comp, arg=None):
"""Creates a call to `comp` with optional `arg`.
Args:
comp: As documented in `executor_base.Executor`.
arg: As documented in `executor_base.Executor`.
Returns:
An instance of `EagerValue` representing the result of the call.
Raises:
RuntimeError: If not executing eagerly.
TypeError: If the arguments are of the wrong types.
"""
py_typecheck.check_type(comp, EagerValue)
if arg is not None:
py_typecheck.check_type(arg, EagerValue)
if not comp.type_signature.is_function():
raise TypeError('Expected a functional type, found {}'.format(
comp.type_signature))
if comp.type_signature.parameter is not None:
return EagerValue(
comp.internal_representation(arg.internal_representation), # pytype: disable=attribute-error
self._tf_function_cache,
comp.type_signature.result,
self._device)
elif arg is None:
return EagerValue(comp.internal_representation(), self._tf_function_cache,
comp.type_signature.result, self._device)
else:
raise TypeError('Cannot pass an argument to a no-argument function.')
@tracing.trace
async def create_struct(self, elements):
"""Creates a tuple of `elements`.
Args:
elements: As documented in `executor_base.Executor`.
Returns:
An instance of `EagerValue` that represents the constructed tuple.
"""
elements = structure.to_elements(structure.from_container(elements))
val_elements = []
type_elements = []
for k, v in elements:
py_typecheck.check_type(v, EagerValue)
val_elements.append((k, v.internal_representation))
type_elements.append((k, v.type_signature))
return EagerValue(
structure.Struct(val_elements), self._tf_function_cache,
computation_types.StructType([
(k, v) if k is not None else v for k, v in type_elements
]))
@tracing.trace
async def create_selection(self, source, index=None, name=None):
"""Creates a selection from `source`.
Args:
source: As documented in `executor_base.Executor`.
index: As documented in `executor_base.Executor`.
name: As documented in `executor_base.Executor`.
Returns:
An instance of `EagerValue` that represents the constructed selection.
Raises:
TypeError: If arguments are of the wrong types.
ValueError: If either both, or neither of `name` and `index` are present.
"""
py_typecheck.check_type(source, EagerValue)
py_typecheck.check_type(source.type_signature, computation_types.StructType)
py_typecheck.check_type(source.internal_representation, structure.Struct)
if index is not None:
py_typecheck.check_type(index, int)
if name is not None:
raise ValueError(
'Cannot simultaneously specify name {} and index {}.'.format(
name, index))
else:
return EagerValue(source.internal_representation[index],
self._tf_function_cache, source.type_signature[index])
elif name is not None:
py_typecheck.check_type(name, str)
return EagerValue(
getattr(source.internal_representation, str(name)),
self._tf_function_cache, getattr(source.type_signature, str(name)))
else:
raise ValueError('Must specify either name or index.')
def close(self):
pass
| 40.007117 | 103 | 0.718244 | [
"Apache-2.0"
] | ddayzzz/federated | tensorflow_federated/python/core/impl/executors/eager_tf_executor.py | 22,484 | Python |
import torch
import argparse
from bindsnet.network import Network
from bindsnet.learning import Hebbian
from bindsnet.pipeline import EnvironmentPipeline
from bindsnet.encoding import bernoulli
from bindsnet.network.monitors import Monitor
from bindsnet.environment import GymEnvironment
from bindsnet.network.topology import Connection
from bindsnet.network.nodes import Input, LIFNodes
from bindsnet.pipeline.action import select_multinomial
parser = argparse.ArgumentParser()
parser.add_argument("-n", type=int, default=1000000)
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--n_neurons", type=int, default=100)
parser.add_argument("--dt", type=float, default=1.0)
parser.add_argument("--plot_interval", type=int, default=10)
parser.add_argument("--render_interval", type=int, default=10)
parser.add_argument("--print_interval", type=int, default=100)
parser.add_argument("--gpu", dest="gpu", action="store_true")
parser.set_defaults(plot=False, render=False, gpu=False)
args = parser.parse_args()
n = args.n
seed = args.seed
n_neurons = args.n_neurons
dt = args.dt
plot_interval = args.plot_interval
render_interval = args.render_interval
print_interval = args.print_interval
gpu = args.gpu
if gpu:
torch.set_default_tensor_type("torch.cuda.FloatTensor")
torch.cuda.manual_seed_all(seed)
else:
torch.manual_seed(seed)
# Build network.
network = Network(dt=dt)
# Layers of neurons.
inpt = Input(shape=(1, 1, 1, 80, 80), traces=True) # Input layer
exc = LIFNodes(n=n_neurons, refrac=0, traces=True) # Excitatory layer
readout = LIFNodes(n=4, refrac=0, traces=True) # Readout layer
layers = {"X": inpt, "E": exc, "R": readout}
# Connections between layers.
# Input -> excitatory.
w = 0.01 * torch.rand(layers["X"].n, layers["E"].n)
input_exc_conn = Connection(
source=layers["X"],
target=layers["E"],
w=0.01 * torch.rand(layers["X"].n, layers["E"].n),
wmax=0.02,
norm=0.01 * layers["X"].n,
)
# Excitatory -> readout.
exc_readout_conn = Connection(
source=layers["E"],
target=layers["R"],
w=0.01 * torch.rand(layers["E"].n, layers["R"].n),
update_rule=Hebbian,
nu=[1e-2, 1e-2],
norm=0.5 * layers["E"].n,
)
# Spike recordings for all layers.
spikes = {}
for layer in layers:
spikes[layer] = Monitor(layers[layer], ["s"], time=plot_interval)
# Voltage recordings for excitatory and readout layers.
voltages = {}
for layer in set(layers.keys()) - {"X"}:
voltages[layer] = Monitor(layers[layer], ["v"], time=plot_interval)
# Add all layers and connections to the network.
for layer in layers:
network.add_layer(layers[layer], name=layer)
network.add_connection(input_exc_conn, source="X", target="E")
network.add_connection(exc_readout_conn, source="E", target="R")
# Add all monitors to the network.
for layer in layers:
network.add_monitor(spikes[layer], name="%s_spikes" % layer)
if layer in voltages:
network.add_monitor(voltages[layer], name="%s_voltages" % layer)
# Load the Breakout environment.
environment = GymEnvironment("BreakoutDeterministic-v4")
environment.reset()
pipeline = EnvironmentPipeline(
network,
environment,
encoding=bernoulli,
time=1,
history=5,
delta=10,
plot_interval=plot_interval,
print_interval=print_interval,
render_interval=render_interval,
action_function=select_multinomial,
output="R",
)
total = 0
rewards = []
avg_rewards = []
lengths = []
avg_lengths = []
i = 0
try:
while i < n:
result = pipeline.env_step()
pipeline.step(result)
is_done = result[2]
if is_done:
pipeline.reset_state_variables()
i += 1
except KeyboardInterrupt:
environment.close()
| 27.791045 | 72 | 0.713212 | [
"MIT"
] | Singular-Brain/ProjectBrain | bindsnet_master/examples/breakout/random_network_baseline.py | 3,724 | Python |
"""
Ensemble the predictions from different model outputs.
"""
import argparse
import json
import pickle
import numpy as np
from collections import Counter
from data.loader import DataLoader
from utils import scorer, constant
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('pred_files', nargs='+', help='A list of prediction files written by eval.py.')
parser.add_argument('--data_dir', default='dataset/tacred')
parser.add_argument('--dataset', default='test', help='Evaluate on dev or test set.')
parser.add_argument('--weights', default='')
args = parser.parse_args()
return args
def main():
args = parse_args()
print("Loading data file...")
filename = args.data_dir + '/{}.json'.format(args.dataset)
with open(filename, 'r') as infile:
data = json.load(infile, encoding='utf8')
labels = [d['relation'] for d in data]
# read predictions
print("Loading {} prediction files...".format(len(args.pred_files)))
scores_list = []
for path in args.pred_files:
with open(path, 'rb') as infile:
scores = pickle.load(infile)
scores_list += [scores]
print("Calculating ensembled predictions...")
predictions = []
scores_by_examples = list(zip(*scores_list))
assert len(scores_by_examples) == len(data)
for scores in scores_by_examples:
if len(args.weights) == 0:
pred = ensemble(scores)
else:
pred = weight_sum(scores, args.weights)
predictions += [pred]
id2label = dict([(v,k) for k,v in constant.LABEL_TO_ID.items()])
predictions = [id2label[p] for p in predictions]
scorer.score(labels, predictions, verbose=True)
def ensemble(scores):
"""
Ensemble by majority vote.
"""
c = Counter()
for probs in zip(scores):
idx = int(np.argmax(np.array(probs)))
c.update([idx])
best = c.most_common(1)[0][0]
return best
def weight_sum(scores, weights):
weights = list(map(lambda x: float(x), weights.split(' ')))
aggregate_scores = np.zeros(len(scores[0]))
for model_scores, weight in zip(scores, weights):
scores_weights = np.array(model_scores) * weight
aggregate_scores += scores_weights
best = int(np.argmax(aggregate_scores))
return best
if __name__ == '__main__':
main()
| 31.533333 | 103 | 0.652854 | [
"Apache-2.0"
] | gstoica27/tacred-exploration | ensemble.py | 2,365 | Python |
import os
import sys
import shutil
import subprocess
from config import rfam_local as conf
from config import gen_config as gc
from utils import genome_search_utils as gsu
# ------------------------------------------------------------------------
def split_genome_to_chunks(updir, upid):
"""
updir:
upid:
return:
"""
# get updir location
upid_fasta = os.path.join(updir, upid + '.fa')
seq_chunks_dir = os.path.join(updir, "search_chunks")
if not os.path.exists(seq_chunks_dir):
os.mkdir(seq_chunks_dir)
os.chmod(seq_chunks_dir, 0777)
# check if we need to split the seq_file
if gsu.count_nucleotides_in_fasta(upid_fasta) >= gc.SPLIT_SIZE:
# split sequence file into smalled chunks
gsu.split_seq_file(upid_fasta, gc.SPLIT_SIZE, dest_dir=seq_chunks_dir)
# now index the fasta files
seq_files = os.listdir(seq_chunks_dir)
for seq_file in seq_files:
seq_file_loc = os.path.join(seq_chunks_dir, seq_file)
cmd = "%s --index %s" % (conf.ESL_SFETCH, seq_file_loc)
subprocess.call(cmd, shell=True)
# for input consistency if the sequence file is small, copy it in the
# search_chunks directory
else:
# copy file
shutil.copyfile(upid_fasta, os.path.join(seq_chunks_dir,
upid + '.fa'))
# index file
cmd = "%s --index %s" % (conf.ESL_SFETCH, os.path.join(seq_chunks_dir,
upid + '.fa'))
subprocess.call(cmd, shell=True)
# ------------------------------------------------------------------------
if __name__ == '__main__':
project_dir = sys.argv[1]
# this can be a file of upids or a upid string UPXXXXXXXX
upid_input = sys.argv[2]
if os.path.isfile(upid_input):
fp = open(upid_input, 'r')
upids = [x.strip() for x in fp]
fp.close()
for upid in upids:
suffix = upid[-3:]
subdir_loc = os.path.join(project_dir, suffix)
updir_loc = os.path.join(subdir_loc, upid)
split_genome_to_chunks(updir_loc, upid)
else:
# get updir location and subdir
suffix = upid_input[-3:]
subdir_loc = os.path.join(project_dir, suffix)
updir_loc = os.path.join(subdir_loc, upid_input)
split_genome_to_chunks(updir_loc, upid_input)
| 31.259259 | 82 | 0.557267 | [
"Apache-2.0"
] | Rfam/rfam-production | scripts/support/split_genomes.py | 2,532 | Python |
#!/usr/bin/env python
"""Pathspecs are methods of specifying the path on the client.
The GRR client has a number of drivers to virtualize access to different objects
to create a Virtual File System (VFS) abstraction. These are called 'VFS
Handlers' and they provide typical file-like operations (e.g. read, seek, tell
and stat). It is possible to recursively apply different drivers in the correct
order to arrive at a certain file like object. In order to specify how drivers
should be applied we use 'Path Specifications' or pathspec.
Each VFS handler is constructed from a previous handler and a pathspec. The
pathspec is just a collection of arguments which make sense to the specific VFS
handler. The type of the handler is carried by the pathtype parameter.
On the server the PathSpec is represented as a PathSpec object, and stored
as an attribute of the AFF4 object. This module defines this abstraction.
"""
import itertools
import posixpath
import re
from grr.lib import rdfvalue
from grr.lib import utils
from grr.lib.rdfvalues import standard as rdf_standard
from grr.lib.rdfvalues import structs as rdf_structs
from grr.proto import jobs_pb2
from grr.server import artifact_utils
INTERPOLATED_REGEX = re.compile(r"%%([^%]+?)%%")
# Grouping pattern: e.g. {test.exe,foo.doc,bar.txt}
GROUPING_PATTERN = re.compile("{([^}]+,[^}]+)}")
class PathSpec(rdf_structs.RDFProtoStruct):
"""A path specification.
The pathspec protobuf is a recursive protobuf which contains components. This
class makes it easier to manipulate these structures by providing useful
helpers.
"""
protobuf = jobs_pb2.PathSpec
rdf_deps = [
rdfvalue.ByteSize,
"PathSpec", # TODO(user): recursive definition.
]
def CopyConstructor(self, other):
# pylint: disable=protected-access
self.SetRawData(other._CopyRawData())
# pylint: enable=protected-access
self.age = other.age
def __len__(self):
"""Return the total number of path components."""
i = -1
for i, _ in enumerate(self):
pass
return i + 1
def __getitem__(self, item):
for i, element in enumerate(self):
if i == item:
return element
raise IndexError("Pathspec index (%s) out of range" % item)
def __iter__(self):
"""Only iterate over all components from the current pointer."""
element = self
while element.HasField("pathtype"):
yield element
if element.HasField("nested_path"):
element = element.nested_path
else:
break
def Insert(self, index, rdfpathspec=None, **kwarg):
"""Insert a single component at index."""
if rdfpathspec is None:
rdfpathspec = self.__class__(**kwarg)
if index == 0:
# Copy ourselves to a temp copy.
nested_proto = self.__class__()
nested_proto.SetRawData(self.GetRawData())
# Replace ourselves with the new object.
self.SetRawData(rdfpathspec.GetRawData())
# Append the temp copy to the end.
self.last.nested_path = nested_proto
else:
previous = self[index - 1]
rdfpathspec.last.nested_path = previous.nested_path
previous.nested_path = rdfpathspec
def Append(self, component=None, **kwarg):
"""Append a new pathspec component to this pathspec."""
if component is None:
component = self.__class__(**kwarg)
if self.HasField("pathtype"):
self.last.nested_path = component
else:
for k, v in kwarg.items():
setattr(self, k, v)
self.SetRawData(component.GetRawData())
return self
def CollapsePath(self):
return utils.JoinPath(*[x.path for x in self])
def Pop(self, index=0):
"""Removes and returns the pathspec at the specified index."""
if index < 0:
index += len(self)
if index == 0:
result = self.__class__()
result.SetRawData(self.GetRawData())
self.SetRawData(self.nested_path.GetRawData())
else:
# Get the raw protobufs for the previous member.
previous = self[index - 1]
result = previous.nested_path
# Manipulate the previous members protobuf to patch the next component in.
previous.nested_path = result.nested_path
result.nested_path = None
return result
@property
def first(self):
return self
@property
def last(self):
if self.HasField("pathtype") and self.pathtype != self.PathType.UNSET:
return list(self)[-1]
return self
def Dirname(self):
"""Get a new copied object with only the directory path."""
result = self.Copy()
while 1:
last_directory = posixpath.dirname(result.last.path)
if last_directory != "/" or len(result) <= 1:
result.last.path = last_directory
# Make sure to clear the inode information.
result.last.inode = None
break
result.Pop(-1)
return result
def Basename(self):
for component in reversed(self):
basename = posixpath.basename(component.path)
if basename:
return basename
return ""
def Validate(self):
if not self.HasField("pathtype") or self.pathtype == self.PathType.UNSET:
raise ValueError("No path type set in PathSpec.")
AFF4_PREFIXES = {
0: "/fs/os", # PathSpec.PathType.OS
1: "/fs/tsk", # PathSpec.PathType.TSK
2: "/registry", # PathSpec.PathType.REGISTRY
3: "/devices/memory", # PathSpec.PathType.MEMORY
4: "/temp", # PathSpec.PathType.TMPFILE
}
def AFF4Path(self, client_urn):
"""Returns the AFF4 URN this pathspec will be stored under.
Args:
client_urn: A ClientURN.
Returns:
A urn that corresponds to this pathspec.
Raises:
ValueError: If pathspec is not of the correct type.
"""
# If the first level is OS and the second level is TSK its probably a mount
# point resolution. We map it into the tsk branch. For example if we get:
# path: \\\\.\\Volume{1234}\\
# pathtype: OS
# mount_point: /c:/
# nested_path {
# path: /windows/
# pathtype: TSK
# }
# We map this to aff4://client_id/fs/tsk/\\\\.\\Volume{1234}\\/windows/
if not self.HasField("pathtype"):
raise ValueError("Can't determine AFF4 path without a valid pathtype.")
first_component = self[0]
dev = first_component.path
if first_component.HasField("offset"):
# We divide here just to get prettier numbers in the GUI
dev += ":" + str(first_component.offset / 512)
if (len(self) > 1 and first_component.pathtype == PathSpec.PathType.OS and
self[1].pathtype == PathSpec.PathType.TSK):
result = [self.AFF4_PREFIXES[PathSpec.PathType.TSK], dev]
# Skip the top level pathspec.
start = 1
else:
# For now just map the top level prefix based on the first pathtype
result = [self.AFF4_PREFIXES[first_component.pathtype]]
start = 0
for p in self[start]:
component = p.path
# The following encode different pathspec properties into the AFF4 path in
# such a way that unique files on the client are mapped to unique URNs in
# the AFF4 space. Note that this transformation does not need to be
# reversible since we always use the PathSpec when accessing files on the
# client.
if p.HasField("offset"):
component += ":" + str(p.offset / 512)
# Support ADS names.
if p.HasField("stream_name"):
component += ":" + p.stream_name
result.append(component)
return client_urn.Add("/".join(result))
class GlobExpression(rdfvalue.RDFString):
"""A glob expression for a client path.
A glob expression represents a set of regular expressions which match files on
the client. The Glob expression supports the following expansions:
1) Client attribute expansions are surrounded with %% characters. They will be
expanded from the client AFF4 object.
2) Groupings are collections of alternates. e.g. {foo.exe,bar.sys}
3) Wild cards like * and ?
"""
context_help_url = "investigating-with-grr/flows/specifying-file-paths.html"
RECURSION_REGEX = re.compile(r"\*\*(\d*)")
def Validate(self):
"""GlobExpression is valid."""
if len(self.RECURSION_REGEX.findall(self._value)) > 1:
raise ValueError("Only one ** is permitted per path: %s." % self._value)
def Interpolate(self, client=None):
kb = client.Get(client.Schema.KNOWLEDGE_BASE)
patterns = artifact_utils.InterpolateKbAttributes(self._value, kb)
for pattern in patterns:
# Normalize the component path (this allows us to resolve ../
# sequences).
pattern = utils.NormalizePath(pattern.replace("\\", "/"))
for pattern in self.InterpolateGrouping(pattern):
yield pattern
def InterpolateGrouping(self, pattern):
"""Interpolate inline globbing groups."""
components = []
offset = 0
for match in GROUPING_PATTERN.finditer(pattern):
components.append([pattern[offset:match.start()]])
# Expand the attribute into the set of possibilities:
alternatives = match.group(1).split(",")
components.append(set(alternatives))
offset = match.end()
components.append([pattern[offset:]])
# Now calculate the cartesian products of all these sets to form all
# strings.
for vector in itertools.product(*components):
yield u"".join(vector)
def _ReplaceRegExGrouping(self, grouping):
alternatives = grouping.group(1).split(",")
return "(" + "|".join(re.escape(s) for s in alternatives) + ")"
def _ReplaceRegExPart(self, part):
if part == "**/":
return "(?:.*\\/)?"
elif part == "*":
return "[^\\/]*"
elif part == "?":
return "[^\\/]"
elif GROUPING_PATTERN.match(part):
return GROUPING_PATTERN.sub(self._ReplaceRegExGrouping, part)
else:
return re.escape(part)
REGEX_SPLIT_PATTERN = re.compile(
"(" + "|".join(["{[^}]+,[^}]+}", "\\?", "\\*\\*\\/?", "\\*"]) + ")")
def AsRegEx(self):
"""Return the current glob as a simple regex.
Note: No interpolation is performed.
Returns:
A RegularExpression() object.
"""
parts = self.__class__.REGEX_SPLIT_PATTERN.split(self._value)
result = "".join(self._ReplaceRegExPart(p) for p in parts)
return rdf_standard.RegularExpression("(?i)\\A%s\\Z" % result)
| 30.449704 | 80 | 0.667314 | [
"Apache-2.0"
] | mrhania/grr | grr/lib/rdfvalues/paths.py | 10,292 | Python |
#!/usr/bin/env python3
# Copyright (c) 2020 The Garliccoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''Test generateblock rpc.
'''
from test_framework.test_framework import GarliccoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class GenerateBlockTest(GarliccoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node = self.nodes[0]
self.log.info('Generate an empty block to address')
address = node.getnewaddress()
hash = node.generateblock(output=address, transactions=[])['hash']
block = node.getblock(blockhash=hash, verbose=2)
assert_equal(len(block['tx']), 1)
assert_equal(block['tx'][0]['vout'][0]['scriptPubKey']['address'], address)
self.log.info('Generate an empty block to a descriptor')
hash = node.generateblock('addr(' + address + ')', [])['hash']
block = node.getblock(blockhash=hash, verbosity=2)
assert_equal(len(block['tx']), 1)
assert_equal(block['tx'][0]['vout'][0]['scriptPubKey']['address'], address)
self.log.info('Generate an empty block to a combo descriptor with compressed pubkey')
combo_key = '0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798'
combo_address = 'bcrt1qw508d6qejxtdg4y5r3zarvary0c5xw7kygt080'
hash = node.generateblock('combo(' + combo_key + ')', [])['hash']
block = node.getblock(hash, 2)
assert_equal(len(block['tx']), 1)
assert_equal(block['tx'][0]['vout'][0]['scriptPubKey']['address'], combo_address)
self.log.info('Generate an empty block to a combo descriptor with uncompressed pubkey')
combo_key = '0408ef68c46d20596cc3f6ddf7c8794f71913add807f1dc55949fa805d764d191c0b7ce6894c126fce0babc6663042f3dde9b0cf76467ea315514e5a6731149c67'
combo_address = 'mkc9STceoCcjoXEXe6cm66iJbmjM6zR9B2'
hash = node.generateblock('combo(' + combo_key + ')', [])['hash']
block = node.getblock(hash, 2)
assert_equal(len(block['tx']), 1)
assert_equal(block['tx'][0]['vout'][0]['scriptPubKey']['address'], combo_address)
# Generate 110 blocks to spend
node.generatetoaddress(110, address)
# Generate some extra mempool transactions to verify they don't get mined
for _ in range(10):
node.sendtoaddress(address, 0.001)
self.log.info('Generate block with txid')
txid = node.sendtoaddress(address, 1)
hash = node.generateblock(address, [txid])['hash']
block = node.getblock(hash, 1)
assert_equal(len(block['tx']), 2)
assert_equal(block['tx'][1], txid)
self.log.info('Generate block with raw tx')
utxos = node.listunspent(addresses=[address])
raw = node.createrawtransaction([{'txid':utxos[0]['txid'], 'vout':utxos[0]['vout']}],[{address:1}])
signed_raw = node.signrawtransactionwithwallet(raw)['hex']
hash = node.generateblock(address, [signed_raw])['hash']
block = node.getblock(hash, 1)
assert_equal(len(block['tx']), 2)
txid = block['tx'][1]
assert_equal(node.gettransaction(txid)['hex'], signed_raw)
self.log.info('Fail to generate block with out of order txs')
raw1 = node.createrawtransaction([{'txid':txid, 'vout':0}],[{address:0.9999}])
signed_raw1 = node.signrawtransactionwithwallet(raw1)['hex']
txid1 = node.sendrawtransaction(signed_raw1)
raw2 = node.createrawtransaction([{'txid':txid1, 'vout':0}],[{address:0.999}])
signed_raw2 = node.signrawtransactionwithwallet(raw2)['hex']
assert_raises_rpc_error(-25, 'TestBlockValidity failed: bad-txns-inputs-missingorspent', node.generateblock, address, [signed_raw2, txid1])
self.log.info('Fail to generate block with txid not in mempool')
missing_txid = '0000000000000000000000000000000000000000000000000000000000000000'
assert_raises_rpc_error(-5, 'Transaction ' + missing_txid + ' not in mempool.', node.generateblock, address, [missing_txid])
self.log.info('Fail to generate block with invalid raw tx')
invalid_raw_tx = '0000'
assert_raises_rpc_error(-22, 'Transaction decode failed for ' + invalid_raw_tx, node.generateblock, address, [invalid_raw_tx])
self.log.info('Fail to generate block with invalid address/descriptor')
assert_raises_rpc_error(-5, 'Invalid address or descriptor', node.generateblock, '1234', [])
self.log.info('Fail to generate block with a ranged descriptor')
ranged_descriptor = 'pkh(tpubD6NzVbkrYhZ4XgiXtGrdW5XDAPFCL9h7we1vwNCpn8tGbBcgfVYjXyhWo4E1xkh56hjod1RhGjxbaTLV3X4FyWuejifB9jusQ46QzG87VKp/0/*)'
assert_raises_rpc_error(-8, 'Ranged descriptor not accepted. Maybe pass through deriveaddresses first?', node.generateblock, ranged_descriptor, [])
self.log.info('Fail to generate block with a descriptor missing a private key')
child_descriptor = 'pkh(tpubD6NzVbkrYhZ4XgiXtGrdW5XDAPFCL9h7we1vwNCpn8tGbBcgfVYjXyhWo4E1xkh56hjod1RhGjxbaTLV3X4FyWuejifB9jusQ46QzG87VKp/0\'/0)'
assert_raises_rpc_error(-5, 'Cannot derive script without private keys', node.generateblock, child_descriptor, [])
if __name__ == '__main__':
GenerateBlockTest().main()
| 51.598131 | 155 | 0.694983 | [
"MIT"
] | Garlic-HM/garliccoin | test/functional/rpc_generateblock.py | 5,521 | Python |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class cluster_id(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-common-def - based on the path /routing-system/router/router-bgp/router-bgp-attributes/cluster-id. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__cluster_id_value','__cluster_id_ipv4_address',)
_yang_name = 'cluster-id'
_rest_name = 'cluster-id'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__cluster_id_ipv4_address = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="cluster-id-ipv4-address", rest_name="ipv4-address", parent=self, choice=(u'ch-cluster-id', u'ca-cluster-id-ipv4-address'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route-Reflector Cluster-ID as IP address', u'alt-name': u'ipv4-address'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='inet:ipv4-address', is_config=True)
self.__cluster_id_value = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..65535']}), is_leaf=True, yang_name="cluster-id-value", rest_name="id", parent=self, choice=(u'ch-cluster-id', u'ca-cluster-id'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route-Reflector Cluster-ID as 32 bit quantity', u'cli-drop-node-name': None, u'alt-name': u'id'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='decimal-number', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'routing-system', u'router', u'router-bgp', u'router-bgp-attributes', u'cluster-id']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'router', u'bgp', u'cluster-id']
def _get_cluster_id_value(self):
"""
Getter method for cluster_id_value, mapped from YANG variable /routing_system/router/router_bgp/router_bgp_attributes/cluster_id/cluster_id_value (decimal-number)
"""
return self.__cluster_id_value
def _set_cluster_id_value(self, v, load=False):
"""
Setter method for cluster_id_value, mapped from YANG variable /routing_system/router/router_bgp/router_bgp_attributes/cluster_id/cluster_id_value (decimal-number)
If this variable is read-only (config: false) in the
source YANG file, then _set_cluster_id_value is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cluster_id_value() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..65535']}), is_leaf=True, yang_name="cluster-id-value", rest_name="id", parent=self, choice=(u'ch-cluster-id', u'ca-cluster-id'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route-Reflector Cluster-ID as 32 bit quantity', u'cli-drop-node-name': None, u'alt-name': u'id'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='decimal-number', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cluster_id_value must be of a type compatible with decimal-number""",
'defined-type': "brocade-bgp:decimal-number",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..65535']}), is_leaf=True, yang_name="cluster-id-value", rest_name="id", parent=self, choice=(u'ch-cluster-id', u'ca-cluster-id'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route-Reflector Cluster-ID as 32 bit quantity', u'cli-drop-node-name': None, u'alt-name': u'id'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='decimal-number', is_config=True)""",
})
self.__cluster_id_value = t
if hasattr(self, '_set'):
self._set()
def _unset_cluster_id_value(self):
self.__cluster_id_value = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..65535']}), is_leaf=True, yang_name="cluster-id-value", rest_name="id", parent=self, choice=(u'ch-cluster-id', u'ca-cluster-id'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route-Reflector Cluster-ID as 32 bit quantity', u'cli-drop-node-name': None, u'alt-name': u'id'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='decimal-number', is_config=True)
def _get_cluster_id_ipv4_address(self):
"""
Getter method for cluster_id_ipv4_address, mapped from YANG variable /routing_system/router/router_bgp/router_bgp_attributes/cluster_id/cluster_id_ipv4_address (inet:ipv4-address)
"""
return self.__cluster_id_ipv4_address
def _set_cluster_id_ipv4_address(self, v, load=False):
"""
Setter method for cluster_id_ipv4_address, mapped from YANG variable /routing_system/router/router_bgp/router_bgp_attributes/cluster_id/cluster_id_ipv4_address (inet:ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_cluster_id_ipv4_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cluster_id_ipv4_address() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="cluster-id-ipv4-address", rest_name="ipv4-address", parent=self, choice=(u'ch-cluster-id', u'ca-cluster-id-ipv4-address'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route-Reflector Cluster-ID as IP address', u'alt-name': u'ipv4-address'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='inet:ipv4-address', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cluster_id_ipv4_address must be of a type compatible with inet:ipv4-address""",
'defined-type': "inet:ipv4-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="cluster-id-ipv4-address", rest_name="ipv4-address", parent=self, choice=(u'ch-cluster-id', u'ca-cluster-id-ipv4-address'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route-Reflector Cluster-ID as IP address', u'alt-name': u'ipv4-address'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='inet:ipv4-address', is_config=True)""",
})
self.__cluster_id_ipv4_address = t
if hasattr(self, '_set'):
self._set()
def _unset_cluster_id_ipv4_address(self):
self.__cluster_id_ipv4_address = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="cluster-id-ipv4-address", rest_name="ipv4-address", parent=self, choice=(u'ch-cluster-id', u'ca-cluster-id-ipv4-address'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route-Reflector Cluster-ID as IP address', u'alt-name': u'ipv4-address'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='inet:ipv4-address', is_config=True)
cluster_id_value = __builtin__.property(_get_cluster_id_value, _set_cluster_id_value)
cluster_id_ipv4_address = __builtin__.property(_get_cluster_id_ipv4_address, _set_cluster_id_ipv4_address)
__choices__ = {u'ch-cluster-id': {u'ca-cluster-id': [u'cluster_id_value'], u'ca-cluster-id-ipv4-address': [u'cluster_id_ipv4_address']}}
_pyangbind_elements = {'cluster_id_value': cluster_id_value, 'cluster_id_ipv4_address': cluster_id_ipv4_address, }
| 72.424051 | 725 | 0.718081 | [
"Apache-2.0"
] | extremenetworks/pybind | pybind/slxos/v17r_1_01a/routing_system/router/router_bgp/router_bgp_attributes/cluster_id/__init__.py | 11,443 | Python |
from django.contrib import messages, auth
from django.contrib.auth.decorators import login_required
from payments.forms import MakePaymentForm
from django.shortcuts import render, get_object_or_404, redirect
from django.core.urlresolvers import reverse
from django.template.context_processors import csrf
from django.conf import settings
from services.models import Service
import stripe
stripe.api_key = settings.STRIPE_SECRET
@login_required(login_url="/accounts/login?next=payments/buy_now")
def buy_now(request, id):
if request.method == 'POST':
form = MakePaymentForm(request.POST)
if form.is_valid():
try:
# service = get_object_or_404(Service, pk=id)
customer = stripe.Charge.create(
amount= int(total*100),
currency="EUR",
description=request.user.email,
card=form.cleaned_data['stripe_id'],
)
except stripe.error.CardError:
messages.error(request, "Your card was declined!")
if customer.paid:
messages.success(request, "You have successfully paid")
return redirect(reverse('all_services'))
else:
messages.error(request, "Unable to take payment")
else:
messages.error(request, "We were unable to take a payment with that card!")
else:
form = MakePaymentForm()
services = get_object_or_404(Service, pk=id)
args = {'form': form, 'publishable': settings.STRIPE_PUBLISHABLE, 'services': services}
args.update(csrf(request))
return render(request, 'pay.html', args) | 34.510204 | 91 | 0.645772 | [
"MIT"
] | lilschmitz/Lifestyle_Fitness_Site | payments/views.py | 1,691 | Python |
#!/usr/bin/env python2.7
# William Lam
# wwww.virtuallyghetto.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import pyVim.connect as connect
import getpass
import requests
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
# Snippet borrowed from Michael Rice
# https://gist.github.com/michaelrice/a6794a017e349fc65d01
requests.packages.urllib3.disable_warnings()
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
# Demonstrates configuring the Message of the Day (MOTD) on vCenter Server
# Example output:
# > logged in to vcsa
# > Setting vCenter Server MOTD to "Hello from virtuallyGhetto"
# > logout
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--host',
required=True,
action='store',
help='Remote host to connect to')
parser.add_argument('-u', '--user',
required=True,
action='store',
help='User name to use when connecting to host')
parser.add_argument('-p', '--password',
required=False,
action='store',
help='Password to use when connecting to host')
parser.add_argument('-o', '--port',
required=False,
action='store',
help="port to use, default 443", default=443)
parser.add_argument('-m', '--message',
required=True,
action='store',
help='Message to be used for VC MOTD')
args = parser.parse_args()
if args.password:
password = args.password
else:
password = getpass.getpass(
prompt='Enter password for host %s and user %s: ' %
(args.host, args.user))
si = connect.SmartConnect(host=args.host,
user=args.user,
pwd=password,
port=int(args.port))
print "logged in to %s" % args.host
print "Setting vCenter Server MOTD to \"%s\"" % args.message
si.content.sessionManager.UpdateServiceMessage(message=args.message)
print "logout"
si.content.sessionManager.Logout()
| 32.977528 | 74 | 0.656899 | [
"Apache-2.0"
] | whchoi98/whchoi_pyvmomi-community-samples | samples/set_vcenter_motd.py | 2,935 | Python |
# --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file is stored in the variable path
data=pd.read_csv(path)
#Code starts here
data.rename(columns={'Total':'Total_Medals'},inplace=True)
# Data Loading
data['Better_Event'] = np.where(data['Total_Summer']> data['Total_Winter'], 'Summer', 'Winter')
data['Better_Event'] =np.where(data['Total_Summer'] ==data['Total_Winter'],'Both',data['Better_Event'])
better_event=data['Better_Event'].value_counts().idxmax()
data.head()
# Summer or Winter
# Top 10
data.head(10)
# Plotting top 10
# Top Performing Countries
top_countries=data[['Country_Name','Total_Summer', 'Total_Winter','Total_Medals']]
top_countries=top_countries[:-1]
top_countries
# Best in the world
def top_ten(df,col):
country_list=[]
country_list= list((top_countries.nlargest(10,col)['Country_Name']))
return country_list
top_10_summer=top_ten(top_countries,'Total_Summer')
top_10_winter=top_ten(top_countries,'Total_Winter')
top_10=top_ten(top_countries,'Total_Medals')
a=set(top_10_summer).intersection(set(top_10_winter))
b=a.intersection(set(top_10))
common=list(b)
summer_df= data[data['Country_Name'].isin(top_10_summer)]
summer_df.head()
winter_df= data[data['Country_Name'].isin(top_10_winter)]
winter_df.head()
top_df= data[data['Country_Name'].isin(top_10)]
top_df.head()
plt.figure(figsize=(10,10))
plt.bar(summer_df['Country_Name'],summer_df['Total_Summer'])
plt.xticks(rotation=30)
plt.show()
summer_df['Golden_Ratio']=summer_df['Gold_Summer']/summer_df['Total_Summer']
summer_max_ratio=max(summer_df['Golden_Ratio'])
summer_max_ratio
summer_country_gold=summer_df.loc[summer_df['Gold_Summer'].idxmax(),'Country_Name']
summer_country_gold
winter_df['Golden_Ratio']=summer_df['Gold_Winter']/summer_df['Total_Winter']
winter_max_ratio=max(winter_df['Golden_Ratio'])
winter_country_gold=winter_df.loc[winter_df['Gold_Winter'].idxmax(),'Country_Name']
winter_country_gold
top_df['Golden_Ratio']=top_df['Gold_Total']/top_df['Total_Medals']
top_max_ratio=max(top_df['Golden_Ratio'])
top_country_gold=top_df.loc[top_df['Golden_Ratio'].idxmax(),'Country_Name']
top_country_gold
data_1=data[:-1]
data_1['Total_Points']=data_1['Gold_Total']*3+data_1['Silver_Total']*2+data_1['Bronze_Total']*1
most_points=max(data_1['Total_Points'])
most_points
best_country=data_1.loc[data_1['Total_Points'].idxmax(),'Country_Name']
best_country
# Plotting the best
best=data[data['Country_Name']==best_country]
best
best=best[['Gold_Total','Silver_Total','Bronze_Total']]
best
best.plot.bar()
plt.xlabel("United States")
plt.ylabel("Medals")
plt.xticks(rotation=45)
| 31.388235 | 103 | 0.775487 | [
"MIT"
] | Monika199211/olympics-data-analysis | code.py | 2,668 | Python |
from functools import partial as curry
from django import forms
from django.utils import timezone
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from pinax.images.models import ImageSet
from mdeditor.fields import MDTextFormField
from .conf import settings
from .models import Post, Revision, Section
from .signals import post_published
from .utils import load_path_attr
FIELDS = [
"section",
"author",
"markup",
"title",
"slug",
"teaser",
"content",
"description",
"state"
]
class PostFormMixin:
@property
def markup_choice(self):
return self.cleaned_data["markup"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
post = self.instance
latest_revision = post.latest()
if latest_revision:
# set initial data from the latest revision
self.fields["teaser"].initial = latest_revision.teaser
self.fields["content"].initial = latest_revision.content
def save_post(self, post):
published = False
if post.pk is None or Post.objects.filter(pk=post.pk, published=None).count():
if self.cleaned_data["state"] == Post.STATE_CHOICES[-1][0]:
post.published = timezone.now()
published = True
render_func = curry(
load_path_attr(
settings.PINAX_BLOG_MARKUP_CHOICE_MAP[self.markup_choice]["parser"]
)
)
post.teaser_html = render_func(self.cleaned_data["teaser"])
post.content_html = render_func(self.cleaned_data["content"])
post.updated = timezone.now()
post.save()
r = Revision()
r.post = post
r.title = post.title
r.teaser = self.cleaned_data["teaser"]
r.content = self.cleaned_data["content"]
r.author = post.author
r.updated = post.updated
r.published = post.published
r.save()
if published:
post_published.send(sender=Post, post=post)
return post
class AdminPostForm(PostFormMixin, forms.ModelForm):
title = forms.CharField(
label=_("Title"),
max_length=90,
widget=forms.TextInput(attrs={"style": "width: 50%;"}),
)
slug = forms.CharField(
label=_("Slug"),
widget=forms.TextInput(attrs={"style": "width: 50%;"})
)
teaser = forms.CharField(
label=_("Teaser"),
widget=forms.Textarea(attrs={"style": "width: 80%;"}),
)
content = MDTextFormField()
description = forms.CharField(
label=_("Description"),
widget=forms.Textarea(attrs={"style": "width: 80%;"}),
required=False
)
class Meta:
model = Post
fields = FIELDS
class Media:
js = settings.PINAX_BLOG_ADMIN_JS
def save(self, blog=None):
post = super().save(commit=False)
if blog:
post.blog = blog
return self.save_post(post)
class PostForm(PostFormMixin, forms.ModelForm):
markup_choice = "markdown"
teaser = forms.CharField(widget=forms.Textarea())
content = MDTextFormField()
class Meta:
model = Post
fields = [
"section",
"title",
"teaser",
"content",
"description",
"state"
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if Section.objects.count() < 2:
self.section = Section.objects.first()
del self.fields["section"]
else:
self.section = None
def save(self, blog=None, author=None):
post = super().save(commit=False)
if blog:
post.blog = blog
if author:
post.author = author
post.image_set = ImageSet.objects.create(created_by=author)
if self.section:
post.section = self.section
post.slug = slugify(post.title)
post.markup = self.markup_choice
return self.save_post(post)
| 26.558442 | 86 | 0.593643 | [
"MIT"
] | Zorking/pinax-blog | pinax/blog/forms.py | 4,090 | Python |
from mininet.topo import Topo
class Project1_Topo_0866007(Topo):
def __init__(self):
Topo.__init__(self)
# Add hosts
h1 = self.addHost('h1', ip='192.168.0.1/24')
h2 = self.addHost('h2', ip='192.168.0.2/24')
h3 = self.addHost('h3', ip='192.168.0.3/24')
h4 = self.addHost('h4', ip='192.168.0.4/24')
# Add switches
s1 = self.addSwitch('s1')
s2 = self.addSwitch('s2')
s3 = self.addSwitch('s3')
s4 = self.addSwitch('s4')
# Add links
self.addLink(h1, s1)
self.addLink(h2, s1)
self.addLink(h3, s2)
self.addLink(h4, s2)
self.addLink(s1, s3)
self.addLink(s1, s4)
self.addLink(s2, s3)
self.addLink(s2, s4)
topos = {'topo_0866007': Project1_Topo_0866007}
| 25.46875 | 52 | 0.548466 | [
"MIT"
] | shoulderhu/0866007-sdn-lab | project1_0866007/bonus_0866007.py | 815 | Python |
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import osx_trace
import optparse
import os
import subprocess
import sys
import StringIO
from exceptions import *
class MockTrace(object):
def __init__(self):
self.reset()
def reset(self):
self._call_return_value = 0
self._calls = []
def call(self, args, sudo=False):
self._calls.append((args, sudo))
return self._call_return_value
@property
def calls(self):
return self._calls
def set_call_return_value(self, value):
self._call_return_value = value
@property
def codes_file(self):
return "trace.codes"
def osx_trace_main(*args):
old_sys_argv = sys.argv
old_sys_stdout = sys.stdout
old_sys_stderr = sys.stderr
try:
sys.argv = [old_sys_argv[0]]
sys.argv.extend(args)
parser = optparse.OptionParser(usage=osx_trace.main_usage())
sys.stdout = StringIO.StringIO()
sys.stderr = sys.stdout
return osx_trace.main(parser)
finally:
sys.argv = old_sys_argv
sys.stdout = old_sys_stdout
sys.stderr = old_sys_stderr
class OSXTraceTest(unittest.TestCase):
def setUp(self):
self._real_create_trace_cmd = osx_trace.create_trace_cmd
def tearDown(self):
osx_trace.create_trace_cmd = self._real_create_trace_cmd
# Sanity check of the full script etc.
def test_toplevel_script_smoketest(self):
script = os.path.join(os.path.dirname(__file__), "../osx-trace")
assert os.path.exists(script)
p = subprocess.Popen([script, "help"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(stdout, stderr) = p.communicate()
self.assertTrue(stdout.startswith("Usage: osx-trace <command> [options]"))
def test_trace_but_cant_compile(self):
osx_trace.create_trace_cmd = lambda x: CompilerNeededException()
ret = osx_trace_main("record")
assert ret != 0
def test_record_empty(self):
osx_trace.create_trace_cmd = lambda x: MockTrace()
ret = osx_trace_main("record")
assert ret != 0
def test_record(self):
trace = MockTrace()
osx_trace.create_trace_cmd = lambda options: trace
ret = osx_trace_main("record", "test.trace")
assert ret == 0
calls = trace.calls
self.assertEquals(3, len(calls))
self.assertTrue(calls[0][1]) # sudo
self.assertEquals(["-r"], calls[0][0])
self.assertTrue(calls[1][1]) # sudo
self.assertEquals("-L", calls[1][0][0])
self.assertEquals(2, len(calls[1][0]))
self.assertFalse(calls[2][1]) # not sudo
self.assertEquals(6, len(calls[2][0]))
self.assertEquals("-t", calls[2][0][0])
self.assertEquals("-R", calls[2][0][1])
self.assertEquals("-o", calls[2][0][3])
self.assertEquals("test.trace", calls[2][0][4])
self.assertEquals("trace.codes", calls[2][0][5])
| 29.061947 | 92 | 0.704324 | [
"Apache-2.0"
] | natduca/osx-trace | src/osx_trace_test.py | 3,284 | Python |
import datetime
from django.contrib.syndication import feeds, views
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.utils import tzinfo
from django.utils.feedgenerator import rfc2822_date, rfc3339_date
from models import Entry
from xml.dom import minidom
try:
set
except NameError:
from sets import Set as set
class FeedTestCase(TestCase):
fixtures = ['feeddata.json']
def assertChildNodes(self, elem, expected):
actual = set([n.nodeName for n in elem.childNodes])
expected = set(expected)
self.assertEqual(actual, expected)
def assertChildNodeContent(self, elem, expected):
for k, v in expected.items():
self.assertEqual(
elem.getElementsByTagName(k)[0].firstChild.wholeText, v)
def assertCategories(self, elem, expected):
self.assertEqual(set(i.firstChild.wholeText for i in elem.childNodes if i.nodeName == 'category'), set(expected));
######################################
# Feed view
######################################
class SyndicationFeedTest(FeedTestCase):
"""
Tests for the high-level syndication feed framework.
"""
def test_rss2_feed(self):
"""
Test the structure and content of feeds generated by Rss201rev2Feed.
"""
response = self.client.get('/syndication/rss2/')
doc = minidom.parseString(response.content)
# Making sure there's only 1 `rss` element and that the correct
# RSS version was specified.
feed_elem = doc.getElementsByTagName('rss')
self.assertEqual(len(feed_elem), 1)
feed = feed_elem[0]
self.assertEqual(feed.getAttribute('version'), '2.0')
# Making sure there's only one `channel` element w/in the
# `rss` element.
chan_elem = feed.getElementsByTagName('channel')
self.assertEqual(len(chan_elem), 1)
chan = chan_elem[0]
# Find the last build date
d = Entry.objects.latest('date').date
ltz = tzinfo.LocalTimezone(d)
last_build_date = rfc2822_date(d.replace(tzinfo=ltz))
self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link', 'ttl', 'copyright', 'category'])
self.assertChildNodeContent(chan, {
'title': 'My blog',
'description': 'A more thorough description of my blog.',
'link': 'http://example.com/blog/',
'language': 'en',
'lastBuildDate': last_build_date,
#'atom:link': '',
'ttl': '600',
'copyright': 'Copyright (c) 2007, Sally Smith',
})
self.assertCategories(chan, ['python', 'django']);
# Ensure the content of the channel is correct
self.assertChildNodeContent(chan, {
'title': 'My blog',
'link': 'http://example.com/blog/',
})
# Check feed_url is passed
self.assertEqual(
chan.getElementsByTagName('atom:link')[0].getAttribute('href'),
'http://example.com/syndication/rss2/'
)
# Find the pubdate of the first feed item
d = Entry.objects.get(pk=1).date
ltz = tzinfo.LocalTimezone(d)
pub_date = rfc2822_date(d.replace(tzinfo=ltz))
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
self.assertChildNodeContent(items[0], {
'title': 'My first entry',
'description': 'Overridden description: My first entry',
'link': 'http://example.com/blog/1/',
'guid': 'http://example.com/blog/1/',
'pubDate': pub_date,
'author': '[email protected] (Sally Smith)',
})
self.assertCategories(items[0], ['python', 'testing']);
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'category', 'pubDate', 'author'])
def test_rss091_feed(self):
"""
Test the structure and content of feeds generated by RssUserland091Feed.
"""
response = self.client.get('/syndication/rss091/')
doc = minidom.parseString(response.content)
# Making sure there's only 1 `rss` element and that the correct
# RSS version was specified.
feed_elem = doc.getElementsByTagName('rss')
self.assertEqual(len(feed_elem), 1)
feed = feed_elem[0]
self.assertEqual(feed.getAttribute('version'), '0.91')
# Making sure there's only one `channel` element w/in the
# `rss` element.
chan_elem = feed.getElementsByTagName('channel')
self.assertEqual(len(chan_elem), 1)
chan = chan_elem[0]
self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link', 'ttl', 'copyright', 'category'])
# Ensure the content of the channel is correct
self.assertChildNodeContent(chan, {
'title': 'My blog',
'link': 'http://example.com/blog/',
})
self.assertCategories(chan, ['python', 'django'])
# Check feed_url is passed
self.assertEqual(
chan.getElementsByTagName('atom:link')[0].getAttribute('href'),
'http://example.com/syndication/rss091/'
)
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
self.assertChildNodeContent(items[0], {
'title': 'My first entry',
'description': 'Overridden description: My first entry',
'link': 'http://example.com/blog/1/',
})
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description'])
self.assertCategories(item, [])
def test_atom_feed(self):
"""
Test the structure and content of feeds generated by Atom1Feed.
"""
response = self.client.get('/syndication/atom/')
feed = minidom.parseString(response.content).firstChild
self.assertEqual(feed.nodeName, 'feed')
self.assertEqual(feed.getAttribute('xmlns'), 'http://www.w3.org/2005/Atom')
self.assertChildNodes(feed, ['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'rights', 'category', 'author'])
for link in feed.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href'), 'http://example.com/syndication/atom/')
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), Entry.objects.count())
for entry in entries:
self.assertChildNodes(entry, ['title', 'link', 'id', 'summary', 'category', 'updated', 'rights', 'author'])
summary = entry.getElementsByTagName('summary')[0]
self.assertEqual(summary.getAttribute('type'), 'html')
def test_custom_feed_generator(self):
response = self.client.get('/syndication/custom/')
feed = minidom.parseString(response.content).firstChild
self.assertEqual(feed.nodeName, 'feed')
self.assertEqual(feed.getAttribute('django'), 'rocks')
self.assertChildNodes(feed, ['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'spam', 'rights', 'category', 'author'])
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), Entry.objects.count())
for entry in entries:
self.assertEqual(entry.getAttribute('bacon'), 'yum')
self.assertChildNodes(entry, ['title', 'link', 'id', 'summary', 'ministry', 'rights', 'author', 'updated', 'category'])
summary = entry.getElementsByTagName('summary')[0]
self.assertEqual(summary.getAttribute('type'), 'html')
def test_title_escaping(self):
"""
Tests that titles are escaped correctly in RSS feeds.
"""
response = self.client.get('/syndication/rss2/')
doc = minidom.parseString(response.content)
for item in doc.getElementsByTagName('item'):
link = item.getElementsByTagName('link')[0]
if link.firstChild.wholeText == 'http://example.com/blog/4/':
title = item.getElementsByTagName('title')[0]
self.assertEquals(title.firstChild.wholeText, u'A & B < C > D')
def test_naive_datetime_conversion(self):
"""
Test that datetimes are correctly converted to the local time zone.
"""
# Naive date times passed in get converted to the local time zone, so
# check the recived zone offset against the local offset.
response = self.client.get('/syndication/naive-dates/')
doc = minidom.parseString(response.content)
updated = doc.getElementsByTagName('updated')[0].firstChild.wholeText
d = Entry.objects.latest('date').date
ltz = tzinfo.LocalTimezone(d)
latest = rfc3339_date(d.replace(tzinfo=ltz))
self.assertEqual(updated, latest)
def test_aware_datetime_conversion(self):
"""
Test that datetimes with timezones don't get trodden on.
"""
response = self.client.get('/syndication/aware-dates/')
doc = minidom.parseString(response.content)
updated = doc.getElementsByTagName('updated')[0].firstChild.wholeText
self.assertEqual(updated[-6:], '+00:42')
def test_feed_url(self):
"""
Test that the feed_url can be overridden.
"""
response = self.client.get('/syndication/feedurl/')
doc = minidom.parseString(response.content)
for link in doc.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href'), 'http://example.com/customfeedurl/')
def test_item_link_error(self):
"""
Test that a ImproperlyConfigured is raised if no link could be found
for the item(s).
"""
self.assertRaises(ImproperlyConfigured,
self.client.get,
'/syndication/articles/')
def test_template_feed(self):
"""
Test that the item title and description can be overridden with
templates.
"""
response = self.client.get('/syndication/template/')
doc = minidom.parseString(response.content)
feed = doc.getElementsByTagName('rss')[0]
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertChildNodeContent(items[0], {
'title': 'Title in your templates: My first entry',
'description': 'Description in your templates: My first entry',
'link': 'http://example.com/blog/1/',
})
def test_add_domain(self):
"""
Test add_domain() prefixes domains onto the correct URLs.
"""
self.assertEqual(
views.add_domain('example.com', '/foo/?arg=value'),
'http://example.com/foo/?arg=value'
)
self.assertEqual(
views.add_domain('example.com', 'http://djangoproject.com/doc/'),
'http://djangoproject.com/doc/'
)
self.assertEqual(
views.add_domain('example.com', 'https://djangoproject.com/doc/'),
'https://djangoproject.com/doc/'
)
self.assertEqual(
views.add_domain('example.com', 'mailto:[email protected]'),
'mailto:[email protected]'
)
######################################
# Deprecated feeds
######################################
class DeprecatedSyndicationFeedTest(FeedTestCase):
"""
Tests for the deprecated API (feed() view and the feed_dict etc).
"""
def test_empty_feed_dict(self):
"""
Test that an empty feed_dict raises a 404.
"""
response = self.client.get('/syndication/depr-feeds-empty/aware-dates/')
self.assertEquals(response.status_code, 404)
def test_nonexistent_slug(self):
"""
Test that a non-existent slug raises a 404.
"""
response = self.client.get('/syndication/depr-feeds/foobar/')
self.assertEquals(response.status_code, 404)
def test_rss_feed(self):
"""
A simple test for Rss201rev2Feed feeds generated by the deprecated
system.
"""
response = self.client.get('/syndication/depr-feeds/rss/')
doc = minidom.parseString(response.content)
feed = doc.getElementsByTagName('rss')[0]
self.assertEqual(feed.getAttribute('version'), '2.0')
chan = feed.getElementsByTagName('channel')[0]
self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link'])
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
def test_complex_base_url(self):
"""
Tests that the base url for a complex feed doesn't raise a 500
exception.
"""
response = self.client.get('/syndication/depr-feeds/complex/')
self.assertEquals(response.status_code, 404)
| 39.631737 | 151 | 0.607993 | [
"BSD-3-Clause"
] | Smarsh/django | tests/regressiontests/syndication/tests.py | 13,237 | Python |
import requests
from logging import getLogger
from bs4 import BeautifulSoup
from requests import Session
from typing import List
from urllib.parse import quote_plus
from .book import Book
from ..base import HEADERS
class BahanAjar:
def __init__(self, email: str, password: str, login: bool = True):
self.session: Session = Session()
self.session.headers.update(HEADERS)
self.email = email
self.password = password
self._my_books: List[Book] = []
self.logger = getLogger(self.__class__.__name__)
if login and self.login():
self.logger.debug("Berhasil login ke bahan ajar")
def login(self, email: str = None, password: str = None) -> bool:
try:
email = email if email else self.email
password = password if password else self.password
url = f"http://bahanajar.ut.ac.id/Homes/login_frame/{email}/{password}//////?service="
res = self.session.post(url)
return res.ok
except Exception as E:
self.logger.exception(E)
return False
@property
def my_books(self) -> List[Book]:
if self._my_books:
return self._my_books
url = "http://bahanajar.ut.ac.id/Homes/my_books"
res = self.session.get(url)
if not res.ok or "No books are available" in res.text:
return []
soup: BeautifulSoup = BeautifulSoup(res.text, "lxml")
soup = soup.find("div", id="bookHolder").find_all(
"div", class_="publib_bkthumb"
)
if not len(soup) > 0:
return []
else:
self._my_books = [Book.from_bkthumb(bktm) for bktm in soup]
return self._my_books
@my_books.deleter
def my_books(self):
del self._my_books
@staticmethod
def search(query: str, start: int = 0) -> List[Book]:
url = f"http://bahanajar.ut.ac.id/ebookstore/ajax_load_search_books/0/{quote_plus(query)}"
res = requests.get(url)
if not res.ok:
return []
soup: BeautifulSoup = BeautifulSoup(res.text, "lxml")
soup = soup.find("div", class_="book_stnd").find_all("div", class_="newb_bg")
if not len(soup) > 0:
return []
return [Book.from_newb_bg(newb_bg) for newb_bg in soup]
| 35.212121 | 98 | 0.609725 | [
"MIT"
] | UnivTerbuka/ut-telegram-bot | libs/bahan_ajar/bahan_ajar.py | 2,324 | Python |
"""
Provides linkedin api-related code
"""
import random
import logging
from time import sleep
import json
from linkedin_api.utils.helpers import get_id_from_urn
from linkedin_api.client import Client
logger = logging.getLogger(__name__)
class Linkedin(object):
"""
Class for accessing Linkedin API.
"""
_MAX_UPDATE_COUNT = 100 # max seems to be 100
_MAX_SEARCH_COUNT = 49 # max seems to be 49
_MAX_REPEATED_REQUESTS = (
200
) # VERY conservative max requests count to avoid rate-limit
def __init__(self, username, password):
self.client = Client(debug=True)
self.client.authenticate(username, password)
self.logger = logger
def search(self, params, max_results=None, results=[]):
"""
Do a search.
"""
sleep(
random.randint(0, 1)
) # sleep a random duration to try and evade suspention
count = (
max_results
if max_results and max_results <= Linkedin._MAX_SEARCH_COUNT
else Linkedin._MAX_SEARCH_COUNT
)
default_params = {
"count": count,
"guides": "List()",
"origin": "GLOBAL_SEARCH_HEADER",
"q": "guided",
"start": len(results),
}
default_params.update(params)
res = self.client.session.get(
f"{self.client.API_BASE_URL}/search/cluster", params=default_params
)
data = res.json()
total_found = data.get("paging", {}).get("total")
# recursive base case
if (
len(data["elements"]) == 0
or (max_results is not None and len(results) >= max_results)
or total_found is None
or len(results) >= total_found
or (max_results is not None and len(results) / max_results >= Linkedin._MAX_REPEATED_REQUESTS)
):
return results
results.extend(data["elements"][0]["elements"])
self.logger.debug(f"results grew: {len(results)}")
return self.search(params, results=results, max_results=max_results)
def search_people(
self,
keywords=None,
connection_of=None,
network_depth=None,
regions=None,
industries=None,
):
"""
Do a people search.
"""
guides = ["v->PEOPLE"]
if connection_of:
guides.append(f"facetConnectionOf->{connection_of}")
if network_depth:
guides.append(f"facetNetwork->{network_depth}")
if regions:
guides.append(f'facetGeoRegion->{"|".join(regions)}')
if industries:
guides.append(f'facetIndustry->{"|".join(industries)}')
params = {"guides": "List({})".format(",".join(guides))}
if keywords:
params["keywords"] = keywords
data = self.search(params)
results = []
for item in data:
search_profile = item["hitInfo"][
"com.linkedin.voyager.search.SearchProfile"
]
profile_id = search_profile["id"]
distance = search_profile["distance"]["value"]
results.append(
{
"urn_id": profile_id,
"distance": distance,
"public_id": search_profile["miniProfile"]["publicIdentifier"],
}
)
return results
def search_companies(self, max_results=None, results=[]):
"""
Do a company search
Note: try swap from blended search to cluster
"""
sleep(
random.randint(2, 5)
) # sleep a random duration to try and evade suspention
#Search params from main search, here for reference
'''
default_params = {
"count": count,
"guides": "List()",
"origin": "GLOBAL_SEARCH_HEADER",
"q": "guided",
"start": len(results),
}
'''
default_params = {
"origin": "GLOBAL_SEARCH_HEADER",
"guides": "List(resultType->companies)",
"count": "10",
"q": "guided",
"filters": "List(resultType->companies)",
"start": len(results)
}
res = self.client.session.get(
f"{self.client.API_BASE_URL}/search/blended?keywords=s&origin=GLOBAL_SEARCH_HEADER&count=10&guides=List(resultType-%3Ecompanies)&q=all&filters=List(resultType-%3Ecompanies)&start={len(results)}"
)
data = res.json()
total_found = data.get("paging", {}).get("total")
if (
len(data["elements"]) == 0 or
len(data["elements"][0]["elements"]) == 0
or total_found is None
or (max_results is not None and len(results) >= max_results)
or (max_results is not None and len(results) / max_results >= Linkedin._MAX_REPEATED_REQUESTS)
):
return results
results.extend(data["elements"][0]["elements"])
self.logger.debug(f"results grew: {len(results)}")
return self.search_companies(max_results=max_results, results=results)
def get_profile_contact_info(self, public_id=None, urn_id=None):
"""
Return data for a single profile.
[public_id] - public identifier i.e. tom-quirk-1928345
[urn_id] - id provided by the related URN
"""
res = self.client.session.get(
f"{self.client.API_BASE_URL}/identity/profiles/{public_id or urn_id}/profileContactInfo"
)
data = res.json()
contact_info = {
"email_address": data.get("emailAddress"),
"websites": [],
"phone_numbers": data.get("phoneNumbers", []),
}
websites = data.get("websites", [])
for item in websites:
if "com.linkedin.voyager.identity.profile.StandardWebsite" in item["type"]:
item["label"] = item["type"][
"com.linkedin.voyager.identity.profile.StandardWebsite"
]["category"]
elif "" in item["type"]:
item["label"] = item["type"][
"com.linkedin.voyager.identity.profile.CustomWebsite"
]["label"]
del item["type"]
contact_info["websites"] = websites
return contact_info
def get_profile(self, public_id=None, urn_id=None):
"""
Return data for a single profile.
[public_id] - public identifier i.e. tom-quirk-1928345
[urn_id] - id provided by the related URN
"""
sleep(
random.randint(2, 5)
) # sleep a random duration to try and evade suspention
res = self.client.session.get(
f"{self.client.API_BASE_URL}/identity/profiles/{public_id or urn_id}/profileView"
)
data = res.json()
if data and "status" in data and data["status"] != 200:
self.logger.info("request failed: {}".format(data["message"]))
return {}
# massage [profile] data
profile = data["profile"]
if "miniProfile" in profile:
if "picture" in profile["miniProfile"]:
profile["displayPictureUrl"] = profile["miniProfile"]["picture"][
"com.linkedin.common.VectorImage"
]["rootUrl"]
profile["profile_id"] = get_id_from_urn(profile["miniProfile"]["entityUrn"])
del profile["miniProfile"]
del profile["defaultLocale"]
del profile["supportedLocales"]
del profile["versionTag"]
del profile["showEducationOnProfileTopCard"]
# massage [experience] data
experience = data["positionView"]["elements"]
for item in experience:
if "company" in item and "miniCompany" in item["company"]:
if "logo" in item["company"]["miniCompany"]:
logo = item["company"]["miniCompany"]["logo"].get(
"com.linkedin.common.VectorImage"
)
if logo:
item["companyLogoUrl"] = logo["rootUrl"]
del item["company"]["miniCompany"]
profile["experience"] = experience
# massage [skills] data
skills = [item["name"] for item in data["skillView"]["elements"]]
profile["skills"] = skills
# massage [education] data
education = data["educationView"]["elements"]
for item in education:
if "school" in item:
if "logo" in item["school"]:
item["school"]["logoUrl"] = item["school"]["logo"][
"com.linkedin.common.VectorImage"
]["rootUrl"]
del item["school"]["logo"]
profile["education"] = education
return profile
def get_profile_connections(self, urn_id):
"""
Return a list of profile ids connected to profile of given [urn_id]
"""
return self.search_people(connection_of=urn_id, network_depth="F")
def get_profile_networkinfo(self, urn_id):
"""
Return the nework info connected to the profile of the given [urn_id]
"""
sleep(
random.randint(2, 5)
) # sleep a random duration to try and evade suspention
res = self.client.session.get(
f"{self.client.API_BASE_URL}/identity/profiles/{urn_id}/networkinfo"
)
return res.json()
def get_company_updates(self, public_id=None, urn_id=None, max_results=None, results=[]):
""""
Return a list of company posts
[public_id] - public identifier ie - microsoft
[urn_id] - id provided by the related URN
"""
sleep(
random.randint(2, 5)
) # sleep a random duration to try and evade suspention
params = {
"companyUniversalName": {public_id or urn_id},
"q": "companyFeedByUniversalName",
"moduleKey": "member-share",
"count": Linkedin._MAX_UPDATE_COUNT,
"start": len(results),
}
res = self.client.session.get(
f"{self.client.API_BASE_URL}/feed/updates", params=params
)
data = res.json()
if (
len(data["elements"]) == 0
or (max_results is not None and len(results) >= max_results)
or (max_results is not None and len(results) / max_results >= Linkedin._MAX_REPEATED_REQUESTS)
):
return results
results.extend(data["elements"])
self.logger.debug(f"results grew: {len(results)}")
return self.get_company_updates(public_id=public_id, urn_id=urn_id, results=results, max_results=max_results)
def get_profile_updates(self, public_id=None, urn_id=None, max_results=None, results=[]):
""""
Return a list of profile posts
[public_id] - public identifier i.e. tom-quirk-1928345
[urn_id] - id provided by the related URN
"""
sleep(
random.randint(2, 5)
) # sleep a random duration to try and evade suspention
params = {
"profileId": {public_id or urn_id},
"q": "memberShareFeed",
"moduleKey": "member-share",
"count": Linkedin._MAX_UPDATE_COUNT,
"start": len(results),
}
res = self.client.session.get(
f"{self.client.API_BASE_URL}/feed/updates", params=params
)
data = res.json()
if (
len(data["elements"]) == 0
or (max_results is not None and len(results) >= max_results)
or (max_results is not None and len(results) / max_results >= Linkedin._MAX_REPEATED_REQUESTS)
):
return results
results.extend(data["elements"])
self.logger.debug(f"results grew: {len(results)}")
return self.get_profile_updates(public_id=public_id, urn_id=urn_id, results=results, max_results=max_results)
def get_current_profile_views(self):
"""
Get profile view statistics, including chart data.
"""
res = self.client.session.get(
f"{self.client.API_BASE_URL}/identity/panels"
)
data = res.json()
return data['elements'][0]['value']['com.linkedin.voyager.identity.me.ProfileViewsByTimePanel']
def get_school(self, public_id):
"""
Return data for a single school.
[public_id] - public identifier i.e. uq
"""
sleep(
random.randint(2, 5)
) # sleep a random duration to try and evade suspention
params = {
"decoration": (
"""
(
autoGenerated,backgroundCoverImage,
companyEmployeesSearchPageUrl,companyPageUrl,confirmedLocations*,coverPhoto,dataVersion,description,
entityUrn,followingInfo,foundedOn,headquarter,jobSearchPageUrl,lcpTreatment,logo,name,type,overviewPhoto,
paidCompany,partnerCompanyUrl,partnerLogo,partnerLogoImage,rankForTopCompanies,salesNavigatorCompanyUrl,
school,showcase,staffCount,staffCountRange,staffingCompany,topCompaniesListName,universalName,url,
companyIndustries*,industries,specialities,
acquirerCompany~(entityUrn,logo,name,industries,followingInfo,url,paidCompany,universalName),
affiliatedCompanies*~(entityUrn,logo,name,industries,followingInfo,url,paidCompany,universalName),
groups*~(entityUrn,largeLogo,groupName,memberCount,websiteUrl,url),
showcasePages*~(entityUrn,logo,name,industries,followingInfo,url,description,universalName)
)
"""
),
"q": "universalName",
"universalName": public_id,
}
res = self.client.session.get(
f"{self.client.API_BASE_URL}/organization/companies", params=params
)
data = res.json()
if data and "status" in data and data["status"] != 200:
self.logger.info("request failed: {}".format(data["message"]))
return {}
school = data["elements"][0]
return school
def get_similar_companies(self, public_id):
"""
Return similar companies for a single company.
[public_id] - public identifier i.e. univeristy-of-queensland
"""
sleep(
random.randint(2, 5)
) # sleep a random duration to try and evade suspention
res = self.client.session.get(
f"{self.client.API_BASE_URL}/organization/companies?count={Linkedin._MAX_SEARCH_COUNT}&companyUniversalName={public_id}&q=similarCompanies&start=0&decorationId=com.linkedin.voyager.deco.organization.web.WebSimilarCompanyCardWithRelevanceReason-3"
)
data = res.json()
return data
def get_company(self, public_id):
"""
Return data for a single company.
[public_id] - public identifier i.e. univeristy-of-queensland
"""
sleep(
random.randint(2, 5)
) # sleep a random duration to try and evade suspention
params = {
"decoration": (
"""
(
affiliatedCompaniesWithEmployeesRollup,affiliatedCompaniesWithJobsRollup,articlePermalinkForTopCompanies,
autoGenerated,backgroundCoverImage,companyEmployeesSearchPageUrl,
companyPageUrl,confirmedLocations*,coverPhoto,dataVersion,description,entityUrn,followingInfo,
foundedOn,headquarter,jobSearchPageUrl,lcpTreatment,logo,name,type,overviewPhoto,paidCompany,
partnerCompanyUrl,partnerLogo,partnerLogoImage,permissions,rankForTopCompanies,
salesNavigatorCompanyUrl,school,showcase,staffCount,staffCountRange,staffingCompany,
topCompaniesListName,universalName,url,companyIndustries*,industries,specialities,
acquirerCompany~(entityUrn,logo,name,industries,followingInfo,url,paidCompany,universalName),
affiliatedCompanies*~(entityUrn,logo,name,industries,followingInfo,url,paidCompany,universalName),
groups*~(entityUrn,largeLogo,groupName,memberCount,websiteUrl,url),
showcasePages*~(entityUrn,logo,name,industries,followingInfo,url,description,universalName)
)
"""
),
"q": "universalName",
"universalName": public_id,
}
res = self.client.session.get(
f"{self.client.API_BASE_URL}/organization/companies", params=params
)
data = res.json()
if data and "status" in data and data["status"] != 200:
self.logger.info("request failed: {}".format(data["message"]))
return {}
company = data["elements"][0]
return company
def get_conversation_details(self, profile_urn_id):
"""
Return the conversation (or "message thread") details for a given [public_profile_id]
"""
# passing `params` doesn't work properly, think it's to do with List().
# Might be a bug in `requests`?
res = self.client.session.get(
f"{self.client.API_BASE_URL}/messaging/conversations?\
keyVersion=LEGACY_INBOX&q=participants&recipients=List({profile_urn_id})"
)
data = res.json()
item = data["elements"][0]
item["id"] = get_id_from_urn(item["entityUrn"])
return item
def get_conversations(self):
"""
Return list of conversations the user is in.
"""
params = {"keyVersion": "LEGACY_INBOX"}
res = self.client.session.get(
f"{self.client.API_BASE_URL}/messaging/conversations", params=params
)
return res.json()
def get_conversation(self, conversation_urn_id):
"""
Return the full conversation at a given [conversation_urn_id]
"""
res = self.client.session.get(
f"{self.client.API_BASE_URL}/messaging/conversations/{conversation_urn_id}/events"
)
return res.json()
def send_message(self, conversation_urn_id, message_body):
"""
Return the full conversation at a given [conversation_urn_id]
"""
params = {"action": "create"}
payload = json.dumps(
{
"eventCreate": {
"value": {
"com.linkedin.voyager.messaging.create.MessageCreate": {
"body": message_body,
"attachments": [],
"attributedBody": {"text": message_body, "attributes": []},
"mediaAttachments": [],
}
}
}
}
)
res = self.client.session.post(
f"{self.client.API_BASE_URL}/messaging/conversations/{conversation_urn_id}/events",
params=params,
data=payload,
)
return res.status_code == 201
| 34.379679 | 258 | 0.575932 | [
"MIT"
] | Alexander-Bakogeorge/linkedin-api | linkedin_api/linkedin.py | 19,287 | Python |
import os
import sys
sys.path.append(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
)
from src.DbHelper import DbHelper
persons = [
'Lucy',
'Franz',
'Susanne',
'Jonathan',
'Max',
'Stephan',
'Julian',
'Frederike',
'Amy',
'Miriam',
'Jonas',
'Anna',
'Sebastian'
]
addresses = [ f'Musterstraße {i}' for i in range(1,11)]
accounts = [ f'Bank Account {i}' for i in range(1, 14)]
phones = [f'Phone Number {i}' for i in range(1,12)]
creditcards = [f'Credit Card Number {i}' for i in range(1,14)]
socialsecuritynumbers = [f'SSN {i}' for i in range(1,10)]
nodes = {
'Person':('name', persons),
'Address':('address', addresses),
'BankAccount':('account', accounts),
'CreditCard':('number', creditcards),
'SSN':('ssn', socialsecuritynumbers)
}
if __name__ == "__main__":
# See https://neo4j.com/developer/aura-connect-driver/ for Aura specific connection URL.
scheme = "neo4j" # Connecting to Aura, use the "neo4j+s" URI scheme
host_name = "localhost"
port = 7687 # Bolt Port https://neo4j.com/docs/operations-manual/current/configuration/ports/ | .NET | Java | JavaScript | Go | Python
url = f"{scheme}://{host_name}:{port}"
user = 'neo4j'
password = 'neo4j'
db_helper = DbHelper(url, user, password)
for Label, values in nodes.items():
PropertyKey = values[0]
for PropertyValue in values[1]:
db_helper.run_query(
'CREATE (node:' + Label + ' {' + PropertyKey + ': "' + PropertyValue + '" }) RETURN node.' + PropertyKey
)
db_helper.close()
| 28.118644 | 139 | 0.614225 | [
"MIT"
] | LFeret/masterseminar | src/Simple_Fraud_Detection/solution/01_fill_fraud_db_with_nodes.py | 1,660 | Python |
# Keypirinha launcher (keypirinha.com)
import keypirinha as kp
import keypirinha_util as kpu
import keypirinha_net as kpnet
import json
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), "lib"))
from faker import Faker
class FakerData(kp.Plugin):
ITEMCAT = kp.ItemCategory.USER_BASE + 1
ITEMRESULT = kp.ItemCategory.USER_BASE + 2
# The default ammount of suggestions to show after the user selected the faker category
DEFAULT_MAX_RESULTS = 5
# The default language used to instantiate Faker
DEFAULT_LANGUAGE = 'en_US'
def __init__(self):
super().__init__()
self.current_output = []
def on_start(self):
self.read_config()
pass
def on_events(self, flags):
if flags & kp.Events.PACKCONFIG:
self.read_config()
def on_catalog(self):
self.set_catalog([
self.create_item(
category=kp.ItemCategory.KEYWORD,
label="Faker",
short_desc="Generate fake data",
target="Faker",
args_hint=kp.ItemArgsHint.REQUIRED,
hit_hint=kp.ItemHitHint.KEEPALL
)
])
def on_suggest(self, user_input, items_chain):
if not items_chain or items_chain[0].category() != kp.ItemCategory.KEYWORD:
return
suggestions = []
# Generate outputs
if len(items_chain) == 2:
items = []
# We don't want to generate the output each time the user enter a new query
# Let's keep the output, so this way Keypirinha itself can filter it
if not self.current_output:
for x in range(0, self.max_results):
try:
items.append(str(getattr(self.fakeGenerator, items_chain[1].target())()))
except Exception as error:
items.append(str(error))
continue
if len(items) > 0:
# Remove duplicated
items = list(set(items))
# Append suggestions
for x in items:
self.current_output.append(
self.create_item(
category=self.ITEMRESULT,
label=x,
short_desc='Press Enter to copy',
target=x,
args_hint=kp.ItemArgsHint.FORBIDDEN,
hit_hint=kp.ItemHitHint.IGNORE,
loop_on_suggest=False
)
)
suggestions = self.current_output
# Generate suggestions categories
else:
self.current_output = []
lines = self.load_text_resource('providers.json')
data = json.loads(lines)
for item in data:
try:
suggestions.append(
self.create_item(
category=self.ITEMCAT,
label=item['name'],
short_desc=item['description'],
target=item['function'],
args_hint=kp.ItemArgsHint.FORBIDDEN,
hit_hint=kp.ItemHitHint.IGNORE,
icon_handle=self.load_icon("res://{}/icons/{}.png".format(self.package_full_name(), item['name'][0].upper())),
loop_on_suggest=True
)
)
except Exception as error:
self.err("Could not generate suggestion for fake data category: {}".format(item['name']), error)
self.set_suggestions(suggestions, kp.Match.FUZZY, kp.Sort.DEFAULT)
def on_execute(self, item, action):
if (item.category() == self.ITEMCAT):
to_clipboard = getattr(self.fakeGenerator, item.target())()
elif (item.category() == self.ITEMRESULT):
to_clipboard = item.label()
kpu.set_clipboard(to_clipboard)
def read_config(self):
settings = self.load_settings()
self.max_results = int(settings.get("max_results", section="main", fallback=self.DEFAULT_MAX_RESULTS))
self.language = settings.get("language", section="main", fallback=self.DEFAULT_LANGUAGE)
self.fakeGenerator = Faker(self.language) | 27.919355 | 117 | 0.691797 | [
"MIT"
] | Fuhrmann/keypirinha-faker-data | src/fakerdata.py | 3,462 | Python |
from queue import Queue, Empty, Full
from ..core import DriverBase, format_msg
import pika
class Driver(DriverBase):
def __init__(self, exchange, queue, routing_key=None, buffer_maxsize=None,
*args, **kwargs):
super().__init__()
self._args = args
self._kwargs = kwargs
self._exchange = exchange
self._queue = queue
self._routing_key = routing_key or queue
self._buffer = Queue(buffer_maxsize) \
if buffer_maxsize is not None else None
self._declared = False
def run(self, driver_id, ts, fields, tags):
if not fields:
return
msg = format_msg(ts, driver_id, tags, fields)
try:
with pika.BlockingConnection(
pika.ConnectionParameters(*self._args, **self._kwargs)) as c:
channel = c.channel()
self._publish(channel, msg)
# Flush buffer
if self._buffer is not None:
try:
while True:
msg = self._buffer.get_nowait()
self._publish(channel, msg)
except Empty:
pass
except pika.exceptions.AMQPError:
# Add to buffer
if self._buffer is not None:
try:
self._buffer.put_nowait(msg)
except Full:
pass
def _declare(self, channel):
if not self._declared:
channel.exchange_declare(exchange=self._exchange, durable=True)
channel.queue_declare(queue=self._queue, durable=True)
channel.queue_bind(
exchange=self._exchange,
queue=self._queue,
routing_key=self._routing_key
)
self._declared = True
def _publish(self, channel, msg):
self._declare(channel)
channel.basic_publish(
exchange=self._exchange,
routing_key=self._routing_key,
body=msg,
properties=pika.BasicProperties(delivery_mode=2)
)
| 33.65625 | 78 | 0.539926 | [
"Apache-2.0"
] | frantp/iot-sensor-reader | piot/outputs/amqp.py | 2,154 | Python |
# -*- coding: utf-8 -*-
import subprocess
def test_too_many_arguments_in_fixture(absolute_path):
"""
End-to-End test to check arguments count.
It is required due to how 'function_type' parameter
works inside 'flake8'.
Otherwise it is not set, unit tests can not cover `is_method` correctly.
"""
filename = absolute_path('fixtures', 'config', 'wrong_arguments.py')
process = subprocess.Popen(
['flake8', '--select', 'Z', filename],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, _ = process.communicate()
assert stdout.count(b'Z211') == 4
| 25.958333 | 76 | 0.654896 | [
"MIT"
] | AlwxSin/wemake-python-styleguide | tests/test_checkers/test_high_complexity.py | 623 | Python |
import itertools
try:
import theano
import theano.tensor as T
from theano.gradient import disconnected_grad
except ImportError:
theano = None
T = None
from ._backend import Backend
from .. import make_graph_backend_decorator
class _TheanoBackend(Backend):
def __init__(self):
super().__init__("Theano")
@staticmethod
def is_available():
return theano is not None
@Backend._assert_backend_available
def is_compatible(self, function, arguments):
if not isinstance(function, T.TensorVariable):
return False
return all([isinstance(argument, T.TensorVariable)
for argument in arguments])
def _compile_function_without_warnings(self, *args, **kwargs):
return theano.function(*args, **kwargs, on_unused_input="ignore")
@Backend._assert_backend_available
def compile_function(self, function, arguments):
"""Compiles a Theano graph into a callable."""
return self._compile_function_without_warnings(arguments, function)
@Backend._assert_backend_available
def compute_gradient(self, function, arguments):
"""Returns a compiled function computing the gradient of ``function``
with respect to ``arguments``.
"""
if len(arguments) == 1:
(argument,) = arguments
gradient = T.grad(function, argument)
return self._compile_function_without_warnings(arguments, gradient)
gradient = T.grad(function, arguments)
return self._compile_function_without_warnings(arguments, gradient)
def _compute_unary_hessian_vector_product(self, gradient, argument):
"""Returns a function accepting two arguments to compute a
Hessian-vector product of a scalar-valued unary function.
"""
argument_type = argument.type()
try:
Rop = T.Rop(gradient, argument, argument_type)
except NotImplementedError:
proj = T.sum(gradient * disconnected_grad(argument_type))
Rop = T.grad(proj, argument)
return self._compile_function_without_warnings(
[argument, argument_type], Rop)
def _compute_nary_hessian_vector_product(self, gradients, arguments):
"""Returns a function accepting `2 * len(arguments)` arguments to
compute a Hessian-vector product of a multivariate function.
Notes
-----
The implementation is based on TensorFlow's '_hessian_vector_product'
function in 'tensorflow.python.ops.gradients_impl'.
"""
argument_types = [argument.type() for argument in arguments]
try:
Rop = T.Rop(gradients, arguments, argument_types)
except NotImplementedError:
proj = [T.sum(gradient * disconnected_grad(argument_type))
for gradient, argument_type in zip(gradients,
argument_types)]
proj_grad = [T.grad(proj_elem, arguments,
disconnected_inputs="ignore",
return_disconnected="None")
for proj_elem in proj]
proj_grad_transpose = map(list, zip(*proj_grad))
proj_grad_stack = [
T.stacklists([c for c in row if c is not None])
for row in proj_grad_transpose]
Rop = [T.sum(stack, axis=0) for stack in proj_grad_stack]
return self._compile_function_without_warnings(
list(itertools.chain(arguments, argument_types)), Rop)
@Backend._assert_backend_available
def compute_hessian_vector_product(self, function, arguments):
"""Computes the directional derivative of the gradient, which is
equivalent to computing a Hessian-vector product with the direction
vector.
"""
if len(arguments) == 1:
(argument,) = arguments
gradient = T.grad(function, argument)
return self._compute_unary_hessian_vector_product(
gradient, argument)
gradients = T.grad(function, arguments)
return self._compute_nary_hessian_vector_product(gradients, arguments)
Theano = make_graph_backend_decorator(_TheanoBackend)
| 39.211009 | 79 | 0.649509 | [
"BSD-3-Clause"
] | Andrew-Wyn/pymanopt | pymanopt/autodiff/backends/_theano.py | 4,274 | Python |
#!-*-coding:utf-8-*-
import sys
# import PyQt4 QtCore and QtGui modules
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5 import QtCore
from pylinac import VMAT
from dmlc import Ui_MainWindow
class DirectoryPath(object):
def __init__(self, pathDir, getCountImages):
self._pathDir = pathDir
self._fieldOpenPathfile = ""
self._dmlcopenfilenamepath = ""
self._getCountImages = getCountImages
@property
def pathDir(self):
return getattr(self, '_pathDir')
@pathDir.setter
def pathDir(self, pathDir):
self._pathDir = pathDir
@property
def fieldOpenPathfile(self):
return getattr(self, '_fieldOpenPathfile')
@fieldOpenPathfile.setter
def fieldOpenPathfile(self, fieldOpenPathfile):
self._fieldOpenPathfile = fieldOpenPathfile
@property
def dmlcopenfilenamepath(self):
return getattr(self, '_dmlcopenfilenamepath')
@dmlcopenfilenamepath.setter
def dmlcopenfilenamepath(self, dmlcopenfilenamepath):
self._dmlcopenfilenamepath = dmlcopenfilenamepath
class MainWindow(QMainWindow, Ui_MainWindow):
"""MainWindow inherits QMainWindow"""
def __init__(self, parent: object = None) -> object:
super(MainWindow, self).__init__(parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
def openFileNameDialog(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self, "QFileDialog.getOpenFileName()", "",
"All Files (*);;Python Files (*.py)", options=options)
if fileName:
print(fileName)
def OpenDialog(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self, "QFileDialog.getOpenFileName()", "",
"All Files (*);;DICOM Files (*.dcm)", options=options)
if fileName:
DirectoryPath.fieldOpenPathfile = fileName
def OpenDmlcFiles(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self, "QFileDialog.getOpenFileName()", "",
"All Files (*);;DICOM Files (*.dcm)", options=options)
if fileName:
DirectoryPath.dmlcopenfilenamepath = fileName
def DmlcCalculations(self, cal1, cal2, textparam):
if cal1:
leeds = VMAT(images=[DirectoryPath.fieldOpenPathfile, DirectoryPath.dmlcopenfilenamepath],
delivery_types=['open', 'dmlc'])
leeds.analyze(test='drmlc', tolerance=1.3, x_offset=0)
leeds.plot_analyzed_subimage('profile')
leeds.save_analyzed_subimage('myprofile.png', subimage='profile')
print(leeds.return_results())
leeds.plot_analyzed_image()
leeds.publish_pdf(DirectoryPath.dmlcopenfilenamepath + '.pdf')
if cal2:
drgs = VMAT(images=[DirectoryPath.fieldOpenPathfile, DirectoryPath.dmlcopenfilenamepath],
delivery_types=['open', 'drgs'])
drgs.analyze(test='drgs', tolerance=1.3, x_offset=10)
drgs.save_analyzed_subimage('myprofiledrgs.png', subimage='profile')
print(drgs.return_results())
drgs.plot_analyzed_image()
drgs.publish_pdf(DirectoryPath.dmlcopenfilenamepath + 'drgs' + '.pdf', author=textparam, unit="TrueBeamSTX")
def __del__(self):
self.ui = None
# -----------------------------------------------------#
if __name__ == '__main__':
# create application
app = QApplication(sys.argv)
app.setApplicationName('Dmlc')
d = DirectoryPath(pathDir="", getCountImages=0)
# create widget
w = MainWindow()
w.setWindowTitle('Dmlc')
w.show()
# connection
# QObject.connect( app, SIGNAL( 'lastWindowClosed()' ), app, SLOT( 'quit()' ) )
# execute application
sys.exit(app.exec_())
| 35.784483 | 120 | 0.630691 | [
"MIT"
] | bozhikovstanislav/UI-Pylinac | VMAT/Dmlc/main.py | 4,151 | Python |
import numpy as np
import pickle
import math
try:
from utilities import dot_loss, next_batch
except ImportError:
from utilities.utilities import dot_loss, next_batch
class DontCacheRef(Exception):
pass
class BasicConverter(object):
def __init__(self, learning_rate = 0.05, batch_size = 1, num_epochs = 300, threshold = 0.02, add_layer_dynamic = False, layer_to_expand = 0):
# training control
self._learning_rate = learning_rate
self._batchSize = batch_size
self._num_epochs = num_epochs
self._threshold = threshold
self._add_layer_dynamic = add_layer_dynamic
self._layer_to_expand = int(layer_to_expand)
# training history
self._updatedLoss = 1000.0
self._diffs = []
self._losses = []
self._updates = []
self._epoch = 0
def losses(self):
return self._losses
def diffs(self):
return self._diffs
def updates(self):
return self._updates
def save_history(self, fname):
f_train = open(fname, 'wb')
training_data = [self._losses, self._diffs, self._updates]
pickle.dump(training_data, f_train)
f_train.close()
def get_refs(self, base_model, datapoints, scaler = None, conv_1d = False, conv_2d = False, cache_data = True):
try:
if not cache_data:
raise DontCacheRef()
# Return the cached list of reference outputs for the base model
return (self.__datapoints, self.__refs)
except (DontCacheRef, AttributeError) as e:
# Create the list of reference outputs for the base model
if conv_1d and conv_2d:
print('ERROR: conv_1d and conv_2d are mutually exclusive')
return None
refs = []
flattened = []
for point in datapoints:
spoint = point
if scaler and not conv_2d:
spoint = scaler.transform([point])
prob = 0.0
if conv_1d:
prob = base_model.predict_proba(np.expand_dims(np.expand_dims(spoint, axis = 2), axis = 0))[0][0]
elif conv_2d:
# this will match if original model was trained with correct dimensionality
prob = base_model.predict_proba(np.expand_dims(spoint, axis = 0))
else:
prob = base_model.predict_proba(spoint.reshape(1, -1))[0][0]
refs.append(prob)
flattened.append(spoint.flatten().tolist())
self.__datapoints = np.asarray(flattened)
self.__refs = np.asarray(refs)
return (self.__datapoints, self.__refs)
def convert_model(self, drone_model, base_model, datapoints, scaler = None, conv_1d = False, conv_2d = False, cache_data = True, epoch_reset = False):
# Get the list of reference outputs for the base model
datapoints_for_drone, refs = self.get_refs(base_model, datapoints, scaler, conv_1d, conv_2d, cache_data)
inflate = 0 # to inflate the learning without change iterations
if epoch_reset:
self._epoch = 0
avloss = 0
# convert until min epochs are passed and leave only if loss at minima
while (self._epoch < self._num_epochs) or (self._updatedLoss < avloss):
# initialize the total loss for the epoch
epochloss = []
# loop over our data in batches
for (batchX, batchY) in next_batch(datapoints_for_drone, refs, self._batchSize):
batchY = np.array(batchY)
if batchX.shape[0] != self._batchSize:
print('Batch size insufficient (%s), continuing...' % batchY.shape[0])
continue
# Find current output and calculate loss for our graph
preds = drone_model.evaluate_total(batchX, debug = False)
loss, error = dot_loss(preds, batchY)
epochloss.append(loss)
# Update the model
drone_model.update(batchX, batchY, self._learning_rate)
avloss = np.average(epochloss)
diff = 0.0
if self._epoch > 0:
# is the relative improvement of the loss too small, smaller than threshold
diff = math.fabs(avloss - self._losses[-1]) / avloss
self._diffs.append(diff)
self._losses.append(avloss)
update = 0
modify = True if (diff < self._threshold) else False
if modify:
# If it is less than the threshold, is it below
# where we last updated, has the drone learned enough
#
# - skip checks if we have never updated before
# - do at least 6 learning iterations before attempting new update
# - use asymptotic exponential to push model to learn
# until its loss is far enough away from previous update,
inflate += 1 # iterate inflating
modify = True if self._updatedLoss == 1000.0 else (avloss < (self._updatedLoss - (50.0 * (1.0 - np.exp(-0.04 * inflate)) * diff * avloss))) and (inflate > 5)
if modify:
update = 1
inflate = 0
print('Model conversion not sufficient, updating...')
print('Last updated loss: %s' % self._updatedLoss)
self._updatedLoss = avloss
if self._add_layer_dynamic:
drone_model.add_layer_dynamic()
else:
drone_model.expand_layer_dynamic(self._layer_to_expand)
print('Model structure is now:')
drone_model.print_layers()
self._updates.append(update)
print('Epoch: %s, loss %s, diff %.5f, last updated loss %.5f' % (self._epoch, avloss, diff, self._updatedLoss))
# update our loss history list by taking the average loss
# across all batches
if self._epoch == 0: # be consistent at the first epoch
self._losses.append(avloss)
self._diffs.append(math.fabs(avloss - self._updatedLoss) / avloss)
self._updates.append(0)
self._epoch += 1
return drone_model
class AdvancedConverter(object):
def __init__(self, learning_rate = 0.05, batch_size = 1, num_epochs = 300, threshold = 0.02, add_layer_dynamic = False, layer_to_expand = None):
# training control
self._learning_rate = learning_rate
self._batchSize = batch_size
self._num_epochs = num_epochs
self._threshold = threshold
self._add_layer_dynamic = add_layer_dynamic
self.__round_robin = False
if layer_to_expand is None:
self.__round_robin = True
self._layer_to_expand = int(layer_to_expand) if layer_to_expand is not None else None
# training history
self._updatedLoss = 1000.0
self._diffs = []
self._losses = []
self._updates = []
self._epoch = 0
self.__rr_begin = 0
self.__rr_last = 0
def losses(self):
return self._losses
def diffs(self):
return self._diffs
def updates(self):
return self._updates
def round_robin(self, num_layers):
self.__rr_last = self.__rr_begin
self.__rr_begin = np.random.ranint(0, num_layers - 1) # careful, expanding last layer will change output number
return self.__rr_last
def save_history(self, fname):
f_train = open(fname, 'wb')
training_data = [self._losses, self._diffs, self._updates]
pickle.dump(training_data, f_train)
f_train.close()
def get_refs(self, base_model, datapoints, scaler = None, cache_data = True):
try:
if not cache_data:
raise DontCacheRef()
# Return the cached list of reference outputs for the base model
return (self.__datapoints, self.__refs)
except(DontCacheRef, AttributeError) as e:
# Create the list of reference outputs for the base model
refs = []
datapoints_for_drone = datapoints
if scaler:
datapoints_for_drone = scaler.transform(datapoints)
for point in datapoints_for_drone:
prob = base_model.predict_proba(point)
refs.append(prob)
self.__datapoints = datapoints_for_drone
self.__refs = refs
return (self.__datapoints, self.__refs)
def convert_model(self, drone_model, base_model, datapoints, scaler = None, cache_data = True, epoch_reset = False):
# Get the list of reference outputs for the base model
datapoints_for_drone, refs = self.get_refs(base_model, datapoints, scaler, cache_data)
inflate = 0 # to inflate the learning without change iterations
if epoch_reset:
self._epoch = 0
avloss = 0
# convert until min epochs are passed and leave only if loss at minima
while (self._epoch < self._num_epochs) or (self._updatedLoss < avloss):
# initialize the total loss for the epoch
epochloss = []
# loop over our data in batches
for (batchX, batchY) in next_batch(datapoints_for_drone, refs, self._batchSize):
batchY = np.array(batchY)
if batchX.shape[0] != self._batchSize:
print('Batch size insufficient ({}), continuing...'.format(batchY.shape[0]))
continue
# Find current output and calculate loss for our graph
preds = drone_model.evaluate_total(batchX, debug = False)
loss, error = dot_loss(preds, batchY)
epochloss.append(loss)
# Update the model
drone_model.update(batchX, batchY, self._learning_rate)
avloss = np.average(epochloss)
diff = 0.0
if self._epoch > 0:
# is the relative improvement of the loss too small, smaller than threshold
diff = math.fabs(avloss - self._losses[-1]) / avloss
self._diffs.append(diff)
self._losses.append(avloss)
update = 0
modify = True if (diff < self._threshold) else False
if modify:
# If it is less than the threshold, is it below
# where we last updated, has the drone learned enough
#
# - skip checks if we have never updated before
# - do at least 6 learning iterations before attempting new update
# - use asymptotic exponential to push model to learn
# until its loss is far enough away from previous update,
inflate += 1 # iterate inflating
modify = True if self._updatedLoss == 1000.0 else (avloss < (self._updatedLoss - (50.0 * (1.0 - np.exp(-0.04 * inflate)) * diff * avloss))) and (inflate > 5)
if modify:
update = 1
inflate = 0
print('Model conversion not sufficient, updating...')
print('Last updated loss: %s' % self._updatedLoss)
self._updatedLoss = avloss
if self._add_layer_dynamic:
drone_model.add_layer_dynamic()
elif self._layer_to_expand is not None:
drone_model.expand_layer_dynamic(self._layer_to_expand)
else:
drone_model.expand_layer_dynamic(self.round_robin(drone_model.num_layers()))
print('Model structure is now:')
drone_model.print_layers()
self._updates.append(update)
print('Epoch: %s, loss %s, diff %.5f, last updated loss %.5f' % (self._epoch, avloss, diff, self._updatedLoss))
# update our loss history list by taking the average loss
# across all batches
if self._epoch == 0: # be consistent at the first epoch
self._losses.append(avloss)
self._diffs.append(math.fabs(avloss - self._updatedLoss) / avloss)
self._updates.append(0)
self._epoch += 1
return drone_model
| 45.261649 | 177 | 0.578001 | [
"BSD-3-Clause"
] | Tevien/HEPDrone | nndrone/converters.py | 12,628 | Python |
from . import registry
def step(match):
def outer(func):
registry.add(match, func)
def inner(*args, **kwargs):
func(*args, **kwargs)
return inner
return outer
| 18.636364 | 35 | 0.565854 | [
"Apache-2.0"
] | tswicegood/maxixe | maxixe/decorators.py | 205 | Python |
import emoji
emoji.emojize('\:sunglasses:?')
#Transformação #Comentário
String['Curso em Videos Python']
frase[9:13]
frase[9:21:2]
frase[:5]
frase[15:]
frase[9::3]
#Aula Curso Em Video Python 9 => revisão 2 [13/07/2020 14h00m]
#Funcionalidades de Trasnformação
Objeti.Methodo() #Comentário
frase.find('deo') #Acha, busca
frase.find('Android') #Acha, busca
frase.replace('Python','Android') #Subistui
frase.lower() #tudo em minusculo
frase.capitelize() #tudo maiusculo
frase.title()
frase = ['Aprenda Python']
frase.rstrip() # o lado direito "r" é uma keyword de direita
frase.lstrip() # strip vai remover os, somente os espaços da esquerda keyword "r"
#Funcionalidade Divisão de Strings [Cadeia de Caracteres]
frase.split() #
'-'.join(frase) #vc vai juntar todos os elementos de frase e vai usar esse separador aqui '-'
| 24.055556 | 94 | 0.702079 | [
"MIT"
] | ErosMLima/python-server-connection | natural-languages-python.py | 876 | Python |
_HAS_OPS = False
def _register_extensions():
import os
import imp
import torch
# load the custom_op_library and register the custom ops
lib_dir = os.path.dirname(__file__)
_, path, _ = imp.find_module("_C", [lib_dir])
torch.ops.load_library(path)
try:
_register_extensions()
_HAS_OPS = True
except (ImportError, OSError):
pass
def _check_cuda_version():
"""
Make sure that CUDA versions match between the pytorch install and torchvision install
"""
if not _HAS_OPS:
return -1
import torch
_version = torch.ops.torchvision._cuda_version()
if _version != -1 and torch.version.cuda is not None:
tv_version = str(_version)
if int(tv_version) < 10000:
tv_major = int(tv_version[0])
tv_minor = int(tv_version[2])
else:
tv_major = int(tv_version[0:2])
tv_minor = int(tv_version[3])
t_version = torch.version.cuda
t_version = t_version.split('.')
t_major = int(t_version[0])
t_minor = int(t_version[1])
if t_major != tv_major or t_minor != tv_minor:
raise RuntimeError("Detected that PyTorch and torchvision were compiled with different CUDA versions. "
"PyTorch has CUDA Version={}.{} and torchvision has CUDA Version={}.{}. "
"Please reinstall the torchvision that matches your PyTorch install."
.format(t_major, t_minor, tv_major, tv_minor))
return _version
_check_cuda_version()
| 31 | 115 | 0.619861 | [
"BSD-3-Clause"
] | AryanRaj315/vision | torchvision/extension.py | 1,581 | Python |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SequenceFile Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow import dtypes
from tensorflow.compat.v1 import data
from tensorflow_io.core.python.ops import _load_library
hadoop_ops = _load_library('_hadoop_ops.so')
class SequenceFileDataset(data.Dataset):
"""A Sequence File Dataset that reads the sequence file."""
def __init__(self, filenames):
"""Create a `SequenceFileDataset`.
`SequenceFileDataset` allows a user to read data from a hadoop sequence
file. A sequence file consists of (key value) pairs sequentially. At
the moment, `org.apache.hadoop.io.Text` is the only serialization type
being supported, and there is no compression support.
For example:
```python
dataset = SequenceFileDataset("/foo/bar.seq")
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
# Prints the (key, value) pairs inside a hadoop sequence file.
while True:
try:
print(sess.run(next_element))
except tf.errors.OutOfRangeError:
break
```
Args:
filenames: A `tf.string` tensor containing one or more filenames.
"""
self._filenames = tf.convert_to_tensor(
filenames, dtype=dtypes.string, name="filenames")
super(SequenceFileDataset, self).__init__()
def _inputs(self):
return []
def _as_variant_tensor(self):
return hadoop_ops.sequence_file_dataset(
self._filenames, (dtypes.string, dtypes.string))
@property
def output_classes(self):
return tf.Tensor, tf.Tensor
@property
def output_shapes(self):
return (tf.TensorShape([]), tf.TensorShape([]))
@property
def output_types(self):
return dtypes.string, dtypes.string
| 32.480519 | 80 | 0.708517 | [
"Apache-2.0"
] | HubBucket-Team/io | tensorflow_io/hadoop/python/ops/hadoop_dataset_ops.py | 2,501 | Python |
import os
from os.path import splitext
names=os.listdir()
number=1
f_name=""
videos=[]
ext = [".3g2", ".3gp", ".asf", ".asx", ".avi", ".flv", ".m2ts", ".mkv", ".mov", ".mp4", ".mpg", ".mpeg", ".rm", ".swf", ".vob", ".wmv"]
for fileName in names:
if fileName.endswith(tuple(ext)):
f_name, f_ext= splitext(fileName)
videos.append(f_name)
vidIteretor=iter(videos)
for sub in names:
if sub.endswith(".srt"):
os.rename(sub, next(vidIteretor)+".srt")
| 21.391304 | 135 | 0.579268 | [
"MIT"
] | eren-ozdemir/get-subtitles | RenameSubs.py | 492 | Python |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle.fluid.layers as layers
from paddle.fluid.executor import Executor
import paddle.fluid.core as core
from paddle.fluid.backward import append_backward
import numpy
class TestWhileOp(unittest.TestCase):
def test_simple_forward(self):
d0 = layers.data(
"d0", shape=[10], append_batch_size=False, dtype='float32')
d1 = layers.data(
"d1", shape=[10], append_batch_size=False, dtype='float32')
d2 = layers.data(
"d2", shape=[10], append_batch_size=False, dtype='float32')
i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = True
init = layers.zeros(shape=[10], dtype='float32')
mem_array = layers.array_write(x=init, i=i)
data_array = layers.array_write(x=d0, i=i)
i = layers.increment(i)
layers.array_write(d1, i, array=data_array)
i = layers.increment(i)
layers.array_write(d2, i, array=data_array)
i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = True
array_len = layers.fill_constant(shape=[1], dtype='int64', value=3)
array_len.stop_gradient = True
cond = layers.less_than(x=i, y=array_len)
while_op = layers.While(cond=cond)
with while_op.block():
d = layers.array_read(array=data_array, i=i)
prev = layers.array_read(array=mem_array, i=i)
result = layers.sums(input=[d, prev])
i = layers.increment(x=i, in_place=True)
layers.array_write(result, i=i, array=mem_array)
layers.less_than(x=i, y=array_len, cond=cond)
sum_result = layers.array_read(array=mem_array, i=i)
loss = layers.mean(sum_result)
append_backward(loss)
cpu = core.CPUPlace()
exe = Executor(cpu)
d = []
for i in range(3):
d.append(numpy.random.random(size=[10]).astype('float32'))
outs = exe.run(feed={'d0': d[0],
'd1': d[1],
'd2': d[2]},
fetch_list=[sum_result])
self.assertAlmostEqual(numpy.sum(d), numpy.sum(outs[0]), delta=0.01)
if __name__ == '__main__':
unittest.main()
| 35.222222 | 76 | 0.629513 | [
"Apache-2.0"
] | skylarch/Paddle | python/paddle/fluid/tests/unittests/test_while_op.py | 2,853 | Python |
from models.joint_fpn import JointFpn
from trainers.segmentation_trainer import SegmentationTrainer
from data_generators.joint_data_generator import JointDataGenerator
from data_generators.scenenet_rgbd_data_generator import ScenenetRGBDDataGenerator
from utils.config import process_config
from utils.dirs import create_dirs
from utils.utils import get_args
import tensorflow as tf
from utils import factory
from tensorflow.keras.mixed_precision import experimental as mixed_precision
def main():
# capture the config path from the run arguments
# then process the json configuration file
try:
args = get_args()
config = process_config(args.config)
except:
print("missing or invalid arguments")
exit(0)
# use mixed precision for training
if config.exp.mixed_precision:
print('Use mixed precision training')
policy = mixed_precision.Policy('mixed_float16')
mixed_precision.set_policy(policy)
if config.exp.jpa_optimization:
tf.config.optimizer.set_jit(True)
# create the experiments dirs
create_dirs([config.callbacks.tensorboard_log_dir,
config.callbacks.checkpoint_dir])
print('Create the training data generator.')
if config.generator.is_scenenet == True:
train_data = ScenenetRGBDDataGenerator(config)
else:
train_data = JointDataGenerator(config)
validation_data = None
if type(config.validation.img_dir) == str:
print('Create the validation data generator.')
validation_data = JointDataGenerator(
config, is_training_set=False)
print('Create the model.')
model = factory.create(config.model.class_name)(config, train_data)
print('Create the trainer')
trainer = SegmentationTrainer(
model, train_data, config, validation_generator=validation_data)
print('Start training the model.')
trainer.train()
if __name__ == '__main__':
main()
| 32.377049 | 82 | 0.729114 | [
"Apache-2.0"
] | Barchid/Indoor_Segmentation | train_joint.py | 1,975 | Python |
# coding: utf-8
import warnings
import numpy as np
import pandas as pd
from packaging import version
from sklearn.metrics import pairwise_distances_chunked
from sklearn.utils import check_X_y,check_random_state
from sklearn.preprocessing import LabelEncoder
import functools
from pyclustering.cluster.clarans import clarans
from pyclustering.utils import timedcall
from pyclustering.utils import (draw_clusters,
average_inter_cluster_distance,
average_intra_cluster_distance,
average_neighbor_distance)
import sklearn
from sklearn.metrics import (davies_bouldin_score,
silhouette_score,
pairwise_distances,
calinski_harabasz_score
)
# They changed the name of calinski_harabaz_score in later version of sklearn:
# https://github.com/scikit-learn/scikit-learn/blob/c4733f4895c1becdf587b38970f6f7066656e3f9/doc/whats_new/v0.20.rst#id2012
sklearn_version = version.parse(sklearn.__version__)
nm_chg_ver = version.parse("0.23")
if sklearn_version >= nm_chg_ver:
from sklearn.metrics import calinski_harabasz_score as _cal_score
else:
from sklearn.metrics import calinski_harabaz_score as _cal_score
def _get_clust_pairs(clusters):
return [(i, j) for i in clusters for j in clusters if i > j]
def _dunn(data=None, dist=None, labels=None):
clusters = set(labels)
inter_dists = [
dist[np.ix_(labels == i, labels == j)].min()
for i, j in _get_clust_pairs(clusters)
]
intra_dists = [
dist[np.ix_(labels == i, labels == i)].max()
for i in clusters
]
return min(inter_dists) / max(intra_dists)
def dunn(dist, labels):
return _dunn(data=None, dist=dist, labels=labels)
def cop(data, dist, labels):
clusters = set(labels)
cpairs = _get_clust_pairs(clusters)
prox_lst = [
dist[np.ix_(labels == i[0], labels == i[1])].max()
for i in cpairs
]
out_l = []
for c in clusters:
c_data = data[labels == c]
c_center = c_data.mean(axis=0, keepdims=True)
c_intra = pairwise_distances(c_data, c_center).mean()
c_prox = [prox for pair, prox in zip(cpairs, prox_lst) if c in pair]
c_inter = min(c_prox)
to_add = len(c_data) * c_intra / c_inter
out_l.append(to_add)
return sum(out_l) / len(labels)
def _silhouette_score2(data=None, dist=None, labels=None):
return silhouette_score(dist, labels, metric='precomputed')
def _davies_bouldin_score2(data=None, dist=None, labels=None):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'divide by zero')
return davies_bouldin_score(data, labels)
def _calinski_harabaz_score2(data=None, dist=None, labels=None):
return _cal_score(data, labels)
def check_number_of_labels(n_labels, n_samples):
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
def cluster_dist_reduce(D_chunk, start, labels, label_freqs):
# accumulate distances from each sample to each cluster
clust_dists = np.zeros((len(D_chunk), len(label_freqs)),
dtype=D_chunk.dtype)
for i in range(len(D_chunk)):
clust_dists[i] += np.bincount(labels, weights=D_chunk[i],
minlength=len(label_freqs))
# intra_index selects intra-cluster distances within clust_dists
intra_index = (np.arange(len(D_chunk)), labels[start:start + len(D_chunk)])
# intra_clust_dists are averaged over cluster size outside this function
intra_clust_dists = clust_dists[intra_index]
# of the remaining distances we normalise and extract the minimum
clust_dists[intra_index] = np.inf
clust_dists /= label_freqs
inter_clust_dists = clust_dists.min(axis=1)
return intra_clust_dists, inter_clust_dists
def inter_cluster_dist(data=None, dist=None, labels=None):
_, inter_dist = cluster_distances(dist, labels, metric='precomputed')
return inter_dist
def intra_cluster_dist(data=None, dist=None, labels=None):
intra_dist, _ = cluster_distances(dist, labels, metric='precomputed')
return intra_dist
def cluster_distances(X, labels, *, metric='precomputed', random_state=None, **kwds):
return intra_inter_distances(X, labels, metric=metric, **kwds)
def intra_inter_distances(X, labels, metric='precomputed'):
# Check for non-zero diagonal entries in precomputed distance matrix
atol = np.finfo(X.dtype).eps * 100
if np.any(np.abs(np.diagonal(X)) > atol):
raise ValueError(
'The precomputed distance matrix contains non-zero '
'elements on the diagonal. Use np.fill_diagonal(X, 0).'
)
le = LabelEncoder()
labels = le.fit_transform(labels)
n_samples = len(labels)
label_freqs = np.bincount(labels)
check_number_of_labels(len(le.classes_), n_samples)
reduce_func = functools.partial(cluster_dist_reduce,
labels=labels, label_freqs=label_freqs)
results = zip(*pairwise_distances_chunked(X, reduce_func=reduce_func))
intra_clust_dists, inter_clust_dists = results
intra_clust_dists = np.concatenate(intra_clust_dists)
inter_clust_dists = np.concatenate(inter_clust_dists)
return np.mean(intra_clust_dists),np.mean(inter_clust_dists)
def clarans_labels(clarans_object):
labels_clarans = clarans_object.get_clusters()
labels=pd.DataFrame(labels_clarans).T.melt(var_name='clusters')\
.dropna()
labels['value']=labels.value.astype(int)
labels=labels.sort_values(['value'])\
.set_index('value')\
.values\
.flatten()
return labels
def calculate_clarans_cvi(data,initial_cluster,dist=None):
cvi_df = pd.DataFrame(columns=['avg_inter_dist','silhouette','calinski',
'avg_intra_dist','davies','dunn'])
df_list = data.values.tolist()
dist=pairwise_distances(data)
np.fill_diagonal(dist, 0)
for k in range(initial_cluster,10):
print(k)
clarans_model = clarans(df_list,k,3,5)
(_, result) =timedcall(clarans_model.process)
labels = clarans_labels(result)
# avg_inter_dist = inter_cluster_dist(dist=dist,labels=labels)
sihlouette = silhouette_score(dist=dist, labels=labels)
davies = davies_bouldin_score(data, labels)
calinski = calinski_harabasz_score(data, labels)
# avg_intra_dist = intra_cluster_dist(dist=dist,labels=labels)
dunn_ = dunn(dist,labels)
cvi_df.loc[k] = [avg_inter_dist,sihlouette,
davies,calinski,avg_intra_dist,dunn_]
print(cvi_df)
del clarans_model
return cvi_df | 37.988827 | 123 | 0.691324 | [
"Unlicense"
] | wgova/automations | clust_indices.py | 6,800 | Python |
import numpy
GenSpeedF = 0
IntSpdErr = 0
LastGenTrq = 0
LastTime = 0
LastTimePC = 0
LastTimeVS = 0
PitCom = numpy.zeros(3)
VS_Slope15 = 0
VS_Slope25 = 0
VS_SySp = 0
VS_TrGnSp = 0 | 33.538462 | 52 | 0.298165 | [
"Apache-2.0"
] | tonino102008/openfast | ExampleCases/OpFAST_WF1x1/globalDISCON.py | 436 | Python |
from .base import Uploader
from .photo import PhotoUploader
from .doc import DocUploader
from .audio import AudioUploader
| 24.4 | 32 | 0.836066 | [
"MIT"
] | Doncode/vkbottle | vkbottle/api/uploader/__init__.py | 122 | Python |
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
from common.onnx_layer_test_class import Caffe2OnnxLayerTest
class TestImageScaler(Caffe2OnnxLayerTest):
def create_net(self, shape, scale, ir_version):
"""
ONNX net IR net
Input->ImageScaler->Output => Input->ScaleShift(Power)
"""
#
# Create ONNX model
#
import onnx
from onnx import helper
from onnx import TensorProto
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, shape)
bias = np.random.randint(-10, 10, shape[1]).astype(np.float)
node_def = onnx.helper.make_node(
'ImageScaler',
inputs=['input'],
outputs=['output'],
bias=bias,
scale=scale
)
# Create the graph (GraphProto)
graph_def = helper.make_graph(
[node_def],
'test_model',
[input],
[output],
)
# Create the model (ModelProto)
onnx_net = helper.make_model(graph_def, producer_name='test_model')
#
# Create reference IR net
#
ref_net = None
return onnx_net, ref_net
def create_net_const(self, shape, scale, precision, ir_version):
"""
ONNX net IR net
Input->Concat(+scaled const)->Output => Input->Concat(+const)
"""
#
# Create ONNX model
#
import onnx
from onnx import helper
from onnx import TensorProto
concat_axis = 0
output_shape = shape.copy()
output_shape[concat_axis] *= 2
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
constant = np.random.randint(-127, 127, shape).astype(np.float)
bias = np.random.randint(-10, 10, shape[1]).astype(np.float)
node_const_def = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['const1'],
value=helper.make_tensor(
name='const_tensor',
data_type=TensorProto.FLOAT,
dims=constant.shape,
vals=constant.flatten(),
),
)
node_def = onnx.helper.make_node(
'ImageScaler',
inputs=['const1'],
outputs=['scale'],
bias=bias,
scale=scale
)
node_concat_def = onnx.helper.make_node(
'Concat',
inputs=['input', 'scale'],
outputs=['output'],
axis=concat_axis
)
# Create the graph (GraphProto)
graph_def = helper.make_graph(
[node_const_def, node_def, node_concat_def],
'test_model',
[input],
[output],
)
# Create the model (ModelProto)
onnx_net = helper.make_model(graph_def, producer_name='test_model')
#
# Create reference IR net
#
ir_const = constant * scale + np.expand_dims(np.expand_dims([bias], 2), 3)
if precision == 'FP16':
ir_const = ir_const.astype(np.float16)
ref_net = None
return onnx_net, ref_net
test_data_precommit = [dict(shape=[2, 4, 6, 8], scale=4.5),
dict(shape=[1, 1, 10, 12], scale=0.5)]
test_data = [dict(shape=[1, 1, 10, 12], scale=0.5),
dict(shape=[1, 3, 10, 12], scale=1.5),
dict(shape=[6, 8, 10, 12], scale=4.5)]
@pytest.mark.parametrize("params", test_data_precommit)
def test_image_scaler_precommit(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_image_scaler(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
@pytest.mark.parametrize("params", test_data_precommit)
def test_image_scaler_const_precommit(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_image_scaler_const(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
| 31.518519 | 103 | 0.57971 | [
"Apache-2.0"
] | 3Demonica/openvino | tests/layer_tests/onnx_tests/test_image_scaler.py | 5,106 | Python |
lista = ['item1', 'item2', 'item3', 123, 12.43, 898.34, 00.989]
print(lista)
del lista[0] # pode remover tudo ou apenas um item de um indice permanentemente
popped = lista.pop(0) #pode remover um item pelo indice de uma lista, porem o item tirado pode ser posto em uma variavel
lista.remove('item3') # pode remover um item pelo valor / remove o primeiro valor da lista
print(lista)
print(popped)
valores = list(range(0, 11)) # list cria uma lista
print(valores)
valores_1 = [7, 3, 6, 9, 7, 8, 2, 1, 78, 90, 23, 45, 56, 21, 3]
print(valores_1)
valores_1.sort() # coloca os itens de forma ordenada em uma lista permanentemente
print(valores_1)
valores_1.sort(reverse=True) # faz o mesmo de cima, porem inverte a ordem
print(valores_1)
print(len(valores_1)) # len conta a quantidade de elementos de uma lista ou varavel
valores_1[0] = 'new' #substitui um valor de indice por outro no mesmo indice
valores_1.append('alex') # adiciona um objeto no final da lista
valores_1.insert(4, 'camila') # insere um item em um indice especifico
print(valores_1)
print('\n')
a = [12, 43, 76, 35, 24] # lista
b = a # cria uma ligação com a lista
print(f'{a}\n{b}')
b.remove(43)
print(f'{a}\n{b}')
c = a[:] # [:] usado para criar uma copia de uma lista que pode ser modificado como quiser
c.append('jack')
print(f'{a}\n{b}\n{c}')
| 37.6 | 120 | 0.711246 | [
"MIT"
] | Alex4gtx/estudos | python/cursoemvideo-python/03-mundo-3/listas/lista 1/listas.py | 1,318 | Python |
import random
### Advantage Logic ###
def advantage(rollfunc):
roll1 = rollfunc
roll2 = rollfunc
if roll1 > roll2:
return roll1
else:
return roll2
### Disadvantage Logic ###
def disadvantage(rollfunc):
roll1 = rollfunc
roll2 = rollfunc
if roll1 < roll2:
return roll1
else:
return roll2
### Die Rolls ###
def rolld4(sides:int=4):
return random.randint(1, sides)
def rolld6(sides:int=6):
return random.randint(1, sides)
def rolld8(sides:int=8):
return random.randint(1, sides)
def rolld10(sides:int=10):
return random.randint(1, sides)
def rolld12(sides:int=12):
return random.randint(1, sides)
def rolld20(sides:int=20):
return random.randint(1, sides) | 20.162162 | 35 | 0.651475 | [
"MIT"
] | bwnelb/dnd5e | DieRolls.py | 746 | Python |
# Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from legate.pandas.common import types as ty
class SeriesAccessor(object):
def __init__(self, series):
self._series = series
self._column = series._frame._columns[0]
def _construct_result(self, column):
return self._series.__ctor__(
frame=self._series._frame.replace_columns([column])
)
class CategoricalAccessor(SeriesAccessor):
def __init__(self, series):
super(CategoricalAccessor, self).__init__(series)
assert ty.is_categorical_dtype(self._column.dtype)
@property
def codes(self):
return self._construct_result(self._column.get_codes())
class DatetimeProperties(SeriesAccessor):
def __init__(self, series):
super(DatetimeProperties, self).__init__(series)
assert self._column.dtype == ty.ts_ns
@property
def year(self):
return self._get_dt_field("year")
@property
def month(self):
return self._get_dt_field("month")
@property
def day(self):
return self._get_dt_field("day")
@property
def hour(self):
return self._get_dt_field("hour")
@property
def minute(self):
return self._get_dt_field("minute")
@property
def second(self):
return self._get_dt_field("second")
@property
def weekday(self):
return self._get_dt_field("weekday")
def _get_dt_field(self, field):
dtype = ty.get_dt_field_type(self._column.dtype, field)
return self._construct_result(self._column.get_dt_field(field, dtype))
class StringMethods(SeriesAccessor):
def __init__(self, series):
super(StringMethods, self).__init__(series)
assert ty.is_string_dtype(self._column.dtype)
def contains(self, pat, case=True, flags=0, na=np.NaN, regex=True):
if pat is None and not case:
raise AttributeError("'NoneType' object has no attribute 'upper'")
assert pat is not None and regex
return self._construct_result(self._column.contains(pat))
def pad(self, width, side="left", fillchar=" "):
if len(fillchar) != 1:
raise TypeError("fillchar must be a character, not str")
return self._construct_result(
self._column.pad(width, side=side, fillchar=fillchar)
)
def strip(self, to_strip=None):
return self._construct_result(self._column.strip(to_strip=to_strip))
def zfill(self, width):
return self._construct_result(self._column.zfill(width))
def lower(self):
return self._construct_result(self._column.lower())
def upper(self):
return self._construct_result(self._column.upper())
def swapcase(self):
return self._construct_result(self._column.swapcase())
def to_datetime(self, format):
if format is None:
raise ValueError("Format must be provided")
return self._construct_result(self._column.to_datetime(format))
| 30.059322 | 78 | 0.68424 | [
"Apache-2.0"
] | Pandinosaurus/legate.pandas | legate/pandas/frontend/accessors.py | 3,547 | Python |
"""
This is the UGaLi analysis sub-package.
Classes related to higher-level data analysis live here.
Modules
objects :
mask :
"""
| 13.454545 | 56 | 0.655405 | [
"MIT"
] | DarkEnergySurvey/ugali | ugali/analysis/__init__.py | 148 | Python |
from keras.models import Sequential, load_model
from keras.layers.core import Dense, Dropout, Activation,Flatten
from keras.layers.recurrent import LSTM, GRU, SimpleRNN
from keras.layers.convolutional import Convolution2D, Convolution1D, MaxPooling2D, MaxPooling1D, AveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import ELU, PReLU, LeakyReLU
from keras.layers.wrappers import TimeDistributed
from keras.optimizers import SGD, Adagrad, RMSprop
from keras.callbacks import Callback, ModelCheckpoint, EarlyStopping
from keras.utils.io_utils import HDF5Matrix
from scipy import signal
import scipy.io
import scipy.io.wavfile as wav
import numpy as np
import h5py
import librosa
import sys
import os
def make_spectrum_phase(y, FRAMESIZE, OVERLAP, FFTSIZE):
D=librosa.stft(y,n_fft=FRAMESIZE,hop_length=OVERLAP,win_length=FFTSIZE,window=scipy.signal.hamming)
Sxx = np.log10(abs(D)**2)
print(str(D) + " the value for D")
phase = np.exp(1j * np.angle(D))
print(str(phase) + " the value of phase")
mean = np.mean(Sxx, axis=1).reshape((257,1))
std = np.std(Sxx, axis=1).reshape((257,1))+1e-12
Sxx = (Sxx-mean)/std
return Sxx, phase, mean, std
def recons_spec_phase(Sxx_r, phase):
Sxx_r = np.sqrt(10**Sxx_r)
R = np.multiply(Sxx_r , phase)
result = librosa.istft(R,
hop_length=256,
win_length=512,
window=scipy.signal.hamming)
return result
def predict(modelpath, noisylistpath):
model=load_model(modelpath) #"weights/DNN_spec_20160425v2.hdf5"
FRAMESIZE = 512
OVERLAP = 256
FFTSIZE = 512
RATE = 16000
FRAMEWIDTH = 2
FBIN = FRAMESIZE//2+1
# noisylistpath = sys.argv[2]
noisylistpath = noisylistpath
with open(noisylistpath, 'r') as f:
for line in f:
print(line)
filename = line.split('/')[-1][:]
print(filename)
y,sr=librosa.load(line[:],sr=RATE)
training_data = np.empty((10000, FBIN, FRAMEWIDTH*2+1)) # For Noisy data
Sxx, phase, mean, std = make_spectrum_phase(y, FRAMESIZE, OVERLAP, FFTSIZE)
idx = 0
for i in range(FRAMEWIDTH, Sxx.shape[1]-FRAMEWIDTH): # 5 Frmae
training_data[idx,:,:] = Sxx[:,i-FRAMEWIDTH:i+FRAMEWIDTH+1] # For Noisy data
idx = idx + 1
X_train = training_data[:idx]
X_train = np.reshape(X_train,(idx,-1))
predict = model.predict(X_train)
count=0
for i in range(FRAMEWIDTH, Sxx.shape[1]-FRAMEWIDTH):
Sxx[:,i] = predict[count]
count+=1
# # The un-enhanced part of spec should be un-normalized
Sxx[:, :FRAMEWIDTH] = (Sxx[:, :FRAMEWIDTH] * std) + mean
Sxx[:, -FRAMEWIDTH:] = (Sxx[:, -FRAMEWIDTH:] * std) + mean
recons_y = recons_spec_phase(Sxx, phase)
output = librosa.util.fix_length(recons_y, y.shape[0])
wav.write("static/wav/enhanced.wav",RATE,np.int16(output*32767))
return os.path.join("static","wav","enhanced.wav")
| 36.235294 | 113 | 0.663312 | [
"Apache-2.0"
] | SharanRajani/SoundQX | test_gen_spec.py | 3,080 | Python |
from myelin.utils import CallbackList, Experience
class RLInteraction:
"""An episodic interaction between an agent and an environment."""
def __init__(self, env, agent, callbacks=None, termination_conditions=None):
self.env = env
self.agent = agent
self.callbacks = CallbackList(callbacks)
if termination_conditions is None:
self.termination_conditions = [lambda a: False]
else:
self.termination_conditions = termination_conditions
self.episode = 0
self.step = 0
@property
def info(self):
return {
'episode': self.episode,
'step': self.step
}
def should_continue(self):
for termination_condition in self.termination_conditions:
if termination_condition(self.info):
print(termination_condition)
return False
return True
def start(self):
"""Starts agent-environment interaction."""
self.callbacks.on_interaction_begin()
while self.should_continue():
self.callbacks.on_episode_begin(self.episode)
self.env.reset()
self.step = 0
while not self.env.is_terminal():
self.callbacks.on_step_begin(self.step)
state = self.env.get_state()
action = self.agent.get_action(state)
next_state, reward, done, info = self.env.step(action)
experience = Experience(state, action, reward, next_state, done)
self.agent.update(experience)
self.callbacks.on_step_end(self.step)
self.step += 1
self.callbacks.on_episode_end(self.episode, self.step)
self.episode += 1
self.callbacks.on_interaction_end(self.episode)
| 35.921569 | 80 | 0.606987 | [
"MIT"
] | davidrobles/myelin | myelin/core/interactions.py | 1,832 | Python |
from __future__ import division
import numpy as np
from scipy import ndimage as ndi
from ..morphology import dilation, erosion, square
from ..util import img_as_float, view_as_windows
from ..color import gray2rgb
def _find_boundaries_subpixel(label_img):
"""See ``find_boundaries(..., mode='subpixel')``.
Notes
-----
This function puts in an empty row and column between each *actual*
row and column of the image, for a corresponding shape of $2s - 1$
for every image dimension of size $s$. These "interstitial" rows
and columns are filled as ``True`` if they separate two labels in
`label_img`, ``False`` otherwise.
I used ``view_as_windows`` to get the neighborhood of each pixel.
Then I check whether there are two labels or more in that
neighborhood.
"""
ndim = label_img.ndim
max_label = np.iinfo(label_img.dtype).max
label_img_expanded = np.zeros([(2 * s - 1) for s in label_img.shape],
label_img.dtype)
pixels = (slice(None, None, 2), ) * ndim
label_img_expanded[pixels] = label_img
edges = np.ones(label_img_expanded.shape, dtype=bool)
edges[pixels] = False
label_img_expanded[edges] = max_label
windows = view_as_windows(np.pad(label_img_expanded, 1,
mode='constant', constant_values=0),
(3,) * ndim)
boundaries = np.zeros_like(edges)
for index in np.ndindex(label_img_expanded.shape):
if edges[index]:
values = np.unique(windows[index].ravel())
if len(values) > 2: # single value and max_label
boundaries[index] = True
return boundaries
def find_boundaries(label_img, connectivity=1, mode='thick', background=0):
"""Return bool array where boundaries between labeled regions are True.
Parameters
----------
label_img : array of int or bool
An array in which different regions are labeled with either different
integers or boolean values.
connectivity: int in {1, ..., `label_img.ndim`}, optional
A pixel is considered a boundary pixel if any of its neighbors
has a different label. `connectivity` controls which pixels are
considered neighbors. A connectivity of 1 (default) means
pixels sharing an edge (in 2D) or a face (in 3D) will be
considered neighbors. A connectivity of `label_img.ndim` means
pixels sharing a corner will be considered neighbors.
mode: string in {'thick', 'inner', 'outer', 'subpixel'}
How to mark the boundaries:
- thick: any pixel not completely surrounded by pixels of the
same label (defined by `connectivity`) is marked as a boundary.
This results in boundaries that are 2 pixels thick.
- inner: outline the pixels *just inside* of objects, leaving
background pixels untouched.
- outer: outline pixels in the background around object
boundaries. When two objects touch, their boundary is also
marked.
- subpixel: return a doubled image, with pixels *between* the
original pixels marked as boundary where appropriate.
background: int, optional
For modes 'inner' and 'outer', a definition of a background
label is required. See `mode` for descriptions of these two.
Returns
-------
boundaries : array of bool, same shape as `label_img`
A bool image where ``True`` represents a boundary pixel. For
`mode` equal to 'subpixel', ``boundaries.shape[i]`` is equal
to ``2 * label_img.shape[i] - 1`` for all ``i`` (a pixel is
inserted in between all other pairs of pixels).
Examples
--------
>>> labels = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 5, 5, 5, 0, 0],
... [0, 0, 1, 1, 1, 5, 5, 5, 0, 0],
... [0, 0, 1, 1, 1, 5, 5, 5, 0, 0],
... [0, 0, 1, 1, 1, 5, 5, 5, 0, 0],
... [0, 0, 0, 0, 0, 5, 5, 5, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.uint8)
>>> find_boundaries(labels, mode='thick').astype(np.uint8)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 0, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
>>> find_boundaries(labels, mode='inner').astype(np.uint8)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 1, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
>>> find_boundaries(labels, mode='outer').astype(np.uint8)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 1, 0],
[0, 0, 1, 1, 1, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
>>> labels_small = labels[::2, ::3]
>>> labels_small
array([[0, 0, 0, 0],
[0, 0, 5, 0],
[0, 1, 5, 0],
[0, 0, 5, 0],
[0, 0, 0, 0]], dtype=uint8)
>>> find_boundaries(labels_small, mode='subpixel').astype(np.uint8)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 0, 1, 0],
[0, 0, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
>>> bool_image = np.array([[False, False, False, False, False],
... [False, False, False, False, False],
... [False, False, True, True, True],
... [False, False, True, True, True],
... [False, False, True, True, True]], dtype=np.bool)
>>> find_boundaries(bool_image)
array([[False, False, False, False, False],
[False, False, True, True, True],
[False, True, True, True, True],
[False, True, True, False, False],
[False, True, True, False, False]], dtype=bool)
"""
if label_img.dtype == 'bool':
label_img = label_img.astype(np.uint8)
ndim = label_img.ndim
selem = ndi.generate_binary_structure(ndim, connectivity)
if mode != 'subpixel':
boundaries = dilation(label_img, selem) != erosion(label_img, selem)
if mode == 'inner':
foreground_image = (label_img != background)
boundaries &= foreground_image
elif mode == 'outer':
max_label = np.iinfo(label_img.dtype).max
background_image = (label_img == background)
selem = ndi.generate_binary_structure(ndim, ndim)
inverted_background = np.array(label_img, copy=True)
inverted_background[background_image] = max_label
adjacent_objects = ((dilation(label_img, selem) !=
erosion(inverted_background, selem)) &
~background_image)
boundaries &= (background_image | adjacent_objects)
return boundaries
else:
boundaries = _find_boundaries_subpixel(label_img)
return boundaries
def mark_boundaries(image, label_img, color=(1, 1, 0),
outline_color=None, mode='outer', background_label=0):
"""Return image with boundaries between labeled regions highlighted.
Parameters
----------
image : (M, N[, 3]) array
Grayscale or RGB image.
label_img : (M, N) array of int
Label array where regions are marked by different integer values.
color : length-3 sequence, optional
RGB color of boundaries in the output image.
outline_color : length-3 sequence, optional
RGB color surrounding boundaries in the output image. If None, no
outline is drawn.
mode : string in {'thick', 'inner', 'outer', 'subpixel'}, optional
The mode for finding boundaries.
background_label : int, optional
Which label to consider background (this is only useful for
modes ``inner`` and ``outer``).
Returns
-------
marked : (M, N, 3) array of float
An image in which the boundaries between labels are
superimposed on the original image.
See Also
--------
find_boundaries
"""
marked = img_as_float(image, force_copy=True)
if marked.ndim == 2:
marked = gray2rgb(marked)
if mode == 'subpixel':
# Here, we want to interpose an extra line of pixels between
# each original line - except for the last axis which holds
# the RGB information. ``ndi.zoom`` then performs the (cubic)
# interpolation, filling in the values of the interposed pixels
marked = ndi.zoom(marked, [2 - 1/s for s in marked.shape[:-1]] + [1],
mode='reflect')
boundaries = find_boundaries(label_img, mode=mode,
background=background_label)
if outline_color is not None:
outlines = dilation(boundaries, square(3))
marked[outlines] = outline_color
marked[boundaries] = color
return marked
| 43.030172 | 83 | 0.54102 | [
"MIT"
] | IZ-ZI/-EECS-393-_Attendance-System | venv/lib/python3.8/site-packages/skimage/segmentation/boundaries.py | 9,983 | Python |
from __future__ import print_function
import argparse
import gym
from itertools import count
import numpy as np
import mxnet as mx
import mxnet.ndarray as F
from mxnet import gluon
from mxnet.gluon import nn
from mxnet import autograd
parser = argparse.ArgumentParser(description='MXNet actor-critic example')
parser.add_argument('--gamma', type=float, default=0.99, metavar='G',
help='discount factor (default: 0.99)')
parser.add_argument('--seed', type=int, default=543, metavar='N',
help='random seed (default: 1)')
parser.add_argument('--render', action='store_true',
help='render the environment')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='interval between training status logs (default: 10)')
args = parser.parse_args()
env = gym.make('CartPole-v0')
env.seed(args.seed)
class Policy(gluon.Block):
def __init__(self, **kwargs):
super(Policy, self).__init__(**kwargs)
with self.name_scope():
self.dense = nn.Dense(16, in_units=4, activation='relu')
self.action_pred = nn.Dense(2, in_units=16)
self.value_pred = nn.Dense(1, in_units=16)
def forward(self, x):
x = self.dense(x)
probs = self.action_pred(x)
values = self.value_pred(x)
return F.softmax(probs), values
net = Policy()
net.collect_params().initialize(mx.init.Uniform(0.02))
trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': 3e-2})
loss = gluon.loss.L1Loss()
running_reward = 10
for epoch in count(1):
state = env.reset()
rewards = []
values = []
heads = []
actions = []
with autograd.record():
# Sample a sequence of actions
for t in range(10000):
state = mx.nd.array(np.expand_dims(state, 0))
prob, value = net(state)
action, logp = mx.nd.sample_multinomial(prob, get_prob=True)
state, reward, done, _ = env.step(action.asnumpy()[0])
if args.render:
env.render()
rewards.append(reward)
values.append(value)
actions.append(action.asnumpy()[0])
heads.append(logp)
if done:
break
# reverse accumulate and normalize rewards
running_reward = running_reward * 0.99 + t * 0.01
R = 0
for i in range(len(rewards)-1, -1, -1):
R = rewards[i] + args.gamma * R
rewards[i] = R
rewards = np.array(rewards)
rewards -= rewards.mean()
rewards /= rewards.std() + np.finfo(rewards.dtype).eps
# compute loss and gradient
L = sum([loss(value, mx.nd.array([r])) for r, value in zip(rewards, values)])
final_nodes = [L]
for logp, r, v in zip(heads, rewards, values):
reward = r - v.asnumpy()[0,0]
# Here we differentiate the stochastic graph, corresponds to the
# first term of equation (6) in https://arxiv.org/pdf/1506.05254.pdf
# Optimizer minimizes the loss but we want to maximizing the reward,
# so use we use -reward here.
final_nodes.append(logp*(-reward))
autograd.backward(final_nodes)
trainer.step(t)
if epoch % args.log_interval == 0:
print('Episode {}\tLast length: {:5d}\tAverage length: {:.2f}'.format(
epoch, t, running_reward))
if running_reward > 200:
print("Solved! Running reward is now {} and "
"the last episode runs to {} time steps!".format(running_reward, t))
break
| 34.846154 | 85 | 0.604029 | [
"Apache-2.0"
] | Jeffery-Song/MXNet-RDMA | example/gluon/actor_critic.py | 3,624 | Python |
import click
from typing import Sequence, Tuple
from click.formatting import measure_table, iter_rows
class OrderedCommand(click.Command):
def get_params(self, ctx):
rv = super().get_params(ctx)
rv.sort(key=lambda o: (not o.required, o.name))
return rv
def format_options(self, ctx, formatter) -> None:
"""Writes all the options into the formatter if they exist."""
opts = []
for param in self.get_params(ctx):
rv = param.get_help_record(ctx)
if rv is not None:
opts.append(rv)
if opts:
with formatter.section("Options"):
self.write_dl(formatter, opts)
@staticmethod
def write_dl(formatter, rows: Sequence[Tuple[str, str]], col_max: int = 30, col_spacing: int = 2) -> None:
rows = list(rows)
widths = measure_table(rows)
if len(widths) != 2:
raise TypeError("Expected two columns for definition list")
first_col = min(widths[0], col_max) + col_spacing
for first, second in iter_rows(rows, len(widths)):
formatter.write(f"{'':>{formatter.current_indent}}{first}")
if not second:
formatter.write("\n")
continue
if len(first) <= first_col - col_spacing:
formatter.write(" " * (first_col - len(first)))
else:
formatter.write("\n")
formatter.write(" " * (first_col + formatter.current_indent))
if "[" in second:
text, meta = second.split("[")
formatter.write(f"[{meta} {text}\n")
else:
formatter.write(f"{second}\n")
def add_options(options):
def _add_options(func):
for option in reversed(options):
func = option(func)
return func
return _add_options
class Defaults:
DOCKER_IMAGE = "docker.io/yellowdogco/virtual-screening-worker-public:3.3.0"
PORTAL_API_URL = "https://portal.yellowdog.co/api"
NAMESPACE = "virtual-screening"
RETRIES = 10
shared_options = [
click.option("--api_key_id", envvar="API_KEY_ID", required=True,
help="The application's API key ID for authenticating with the platform API. It is recommended to "
"supply this via the environment variable API_KEY_ID"),
click.option("--api_key_secret", envvar="API_KEY_SECRET", required=True,
help="The application's API key secret for authenticating with the platform API. It is recommended to "
"supply this via the environment variable API_KEY_SECRET"),
click.option("--template_id", envvar="TEMPLATE_ID", required=True,
help="The compute requirement template ID to use for provisioning compute"),
click.option("--platform_api_url", envvar="PLATFORM_API_URL", default=Defaults.PORTAL_API_URL,
help="The URL of the platform API"),
click.option("--namespace", envvar="NAMESPACE", default=Defaults.NAMESPACE,
help="The namespace within which all work and compute will be created"),
click.option("--docker_image", envvar="DOCKER_IMAGE", default=Defaults.DOCKER_IMAGE,
help="The docker image that will be executed by the workers"),
click.option("--retries", envvar="RETRIES", type=int, default=Defaults.RETRIES,
help="The number of times each failed task should be retried"),
]
| 40.964706 | 120 | 0.617461 | [
"Apache-2.0"
] | yellowdog/virtual-screening-public | src/cli.py | 3,482 | Python |
from django.utils.translation import ugettext_lazy as _
from mayan.apps.events.classes import EventTypeNamespace
namespace = EventTypeNamespace(
label=_('File metadata'), name='file_metadata'
)
event_file_metadata_document_file_submit = namespace.add_event_type(
label=_('Document file submitted for file metadata processing'),
name='document_version_submit'
)
event_file_metadata_document_file_finish = namespace.add_event_type(
label=_('Document file file metadata processing finished'),
name='document_version_finish'
)
| 33.058824 | 69 | 0.782918 | [
"Apache-2.0"
] | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | mayan/apps/file_metadata/events.py | 562 | Python |
import argparse
import glob
import os
import time
import vlc
import cv2
import numpy as np
from enum import Enum
from tqdm import tqdm
from PIL import Image, ImageDraw, ImageFont
from align.align_trans import get_reference_facial_points
from align.detector import load_detect_faces_models, process_faces
from align.visualization_utils import draw_fps, show_results
from util.extract_feature_v2 import extract_feature_for_img, load_face_id_model
MIN_FACE_PROB = 0.9
STREAM_DIR = '/home/ec2-user/projects/facelab-data/stream-data'
RESULT_DIR = '/home/ec2-user/projects/facelab-data/results'
ID_FEATURES_DIR = '/home/ec2-user/projects/facelab-data/test_Aligned/'
FACE_ID_MODEL_ROOT = '/home/ec2-user/projects/facelab-data/models/backbone_ir50_ms1m_epoch120.pth'
FONT_PATH = '/usr/share/fonts/dejavu/DejaVuSans.ttf'
class Mode(Enum):
DEMO = 1
FILE = 2
def __str__(self):
return self.name
@staticmethod
def from_string(s):
try:
return Mode[s]
except KeyError:
raise ValueError()
def load_id_files(id_features_dir):
id_npy = {}
for path in glob.glob('{}/*.npy'.format(id_features_dir)):
name = os.path.splitext(os.path.basename(path))[0]
id_npy[name] = np.load(path)
return id_npy
def check_identity(id_npy, query_features, max_min_dist=1.0):
distances_from_id = {}
for name, id_npy_arr in id_npy.items():
distances_from_id[name] = []
for id_npy_row in id_npy_arr:
dist = np.linalg.norm(id_npy_row - query_features)
distances_from_id[name].append(dist)
min_dist = np.finfo(float).max
name_match = ''
for name, distances in distances_from_id.items():
avg = np.mean(distances)
if avg < min_dist:
min_dist = avg
name_match = name
if min_dist > max_min_dist:
name_match = 'Unknown'
return name_match, min_dist
def process_and_viz_img(pil_img,
det_models,
face_id_model,
reference,
crop_size,
id_npy,
font):
# Detect bboxes and landmarks for all faces in the image and warp the
# faces.
face_results = process_faces(
img=pil_img,
det_models=det_models,
reference=reference,
crop_size=crop_size)
# Filter results by detection probability.
filtered_face_results = []
for face_result in face_results:
face_prob = face_result.bounding_box[4]
if face_prob < MIN_FACE_PROB:
print('Skipping detection with low face probability: {:.2f}'.format(face_prob))
continue
filtered_face_results.append(face_result)
face_results = filtered_face_results
identity_list = []
for face_result in face_results:
features = extract_feature_for_img(
img=face_result.warped_face,
backbone=face_id_model)
# features is tensor, so converting to numpy arr below
identity, min_dist = check_identity(
id_npy=id_npy,
query_features=features.numpy())
identity_list.append((identity, '({:.2f})'.format(min_dist)))
# Visualize the results
viz_img = show_results(
img=pil_img,
bounding_boxes=[
fr.bounding_box
for fr in face_results
],
facial_landmarks=[
fr.landmark
for fr in face_results
],
names=identity_list,
font=font)
if identity_list:
names = list(zip(*identity_list))[0]
else:
names = []
return viz_img, names
def play_sound_for_name(name):
name_to_sound_file = {
'neelam': '/Users/bkovacs/Documents/neelam-how-is-it-going.m4a',
'kovi': '/Users/bkovacs/Documents/balazs-how-is-it-going.m4a',
}
name = name.lower()
if name not in name_to_sound_file:
return
player = vlc.MediaPlayer(name_to_sound_file[name])
player.play()
def play_sound_if_needed(names,
name_to_last_time_seen,
cur_time,
min_elapsed_to_play=3):
for name in names:
if (name not in name_to_last_time_seen or
name_to_last_time_seen[name] + min_elapsed_to_play < cur_time):
play_sound_for_name(name)
name_to_last_time_seen[name] = cur_time
def demo(det_models,
face_id_model,
reference,
crop_size,
id_npy,
max_size,
font):
cap = cv2.VideoCapture(0)
name_to_last_time_seen = {}
try:
while cap.isOpened():
start_time = time.time()
ret, image_np = cap.read()
if ret and cap.isOpened():
# Process frame
# BGR -> RGB
pil_img = Image.fromarray(image_np[..., ::-1])
pil_img.thumbnail((max_size, max_size))
viz_img, names = process_and_viz_img(
pil_img=pil_img,
det_models=det_models,
face_id_model=face_id_model,
reference=reference,
crop_size=crop_size,
id_npy=id_npy,
font=font,
)
cur_time = time.time()
play_sound_if_needed(
names=names,
name_to_last_time_seen=name_to_last_time_seen,
cur_time=cur_time)
fps = 1.0 / (cur_time - start_time)
draw_fps(
img=viz_img,
font=font,
fps=fps,
)
# Display the resulting frame
viz_img_bgr = np.array(viz_img)[..., ::-1]
cv2.imshow('Face Detection Demo', viz_img_bgr)
# Quit if we press 'q'.
if (cv2.waitKey(1) & 0xFF) == ord('q'):
break
finally:
# When everything is done, release the capture.
cap.release()
cv2.destroyAllWindows()
def process_files(input_dir,
output_dir,
det_models,
face_id_model,
reference,
crop_size,
id_npy,
max_size,
font):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
image_names = list(os.listdir(input_dir))
for img_idx in tqdm(range(len(image_names))):
image_name = image_names[img_idx]
pil_img = Image.open(os.path.join(input_dir, image_name))
pil_img.thumbnail((max_size, max_size))
viz_img, _ = process_and_viz_img(
pil_img=pil_img,
det_models=det_models,
face_id_model=face_id_model,
reference=reference,
crop_size=crop_size,
id_npy=id_npy,
font=font,
)
viz_img.save(os.path.join(output_dir, '{}-stream.jpg'.format(img_idx)))
def main(mode, face_id_model_root, id_features_dir, font_path):
print('Loading models...')
det_models = load_detect_faces_models()
face_id_model = load_face_id_model(model_root=face_id_model_root)
id_npy = load_id_files(id_features_dir)
crop_size = 112
max_size = 1024
reference = get_reference_facial_points(default_square=True)
font = ImageFont.FreeTypeFont(font=font_path, size=24)
print('Starting image processing...')
if mode == Mode.DEMO:
demo(
det_models=det_models,
face_id_model=face_id_model,
reference=reference,
crop_size=crop_size,
id_npy=id_npy,
max_size=max_size,
font=font)
elif mode == Mode.FILE:
process_files(
input_dir=STREAM_DIR,
output_dir=RESULT_DIR,
det_models=det_models,
face_id_model=face_id_model,
reference=reference,
crop_size=crop_size,
id_npy=id_npy,
max_size=max_size,
font=font)
else:
raise ValueError('Invalid mode: {}'.format(mode))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=Mode.from_string, default=Mode.DEMO, choices=list(Mode))
parser.add_argument('--face_id_model_root',
type=str,
default=FACE_ID_MODEL_ROOT)
parser.add_argument('--id_features_dir',
type=str,
default=ID_FEATURES_DIR)
parser.add_argument('--font_path',
type=str,
default=FONT_PATH)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
main(args.mode,
args.face_id_model_root,
args.id_features_dir,
args.font_path)
| 30.844828 | 98 | 0.587144 | [
"MIT"
] | kovibalu/face.evoLVe.PyTorch | test_video_stream.py | 8,945 | Python |
from userinput import *
from types import SimpleNamespace
import sys
from PyQt5.QtCore import pyqtSignal as pys
class numberInput(QtWidgets.QMainWindow,Ui_MainWindow):
input_num=pys(str)
def __init__(self,opacity=1,loc=(200,200),parent=None):
super(numberInput,self).__init__(parent)
self.setupUi(self)
#self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
self.btn=SimpleNamespace(name="",text="")
self.setTransparency(opacity)
self.input_text=""
self.close_btn.clicked.connect(self.close)
self.move(loc[0],loc[1])
self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
self.b1.clicked.connect(self.getNumbers)
self.b2.clicked.connect(self.getNumbers)
self.b3.clicked.connect(self.getNumbers)
self.b4.clicked.connect(self.getNumbers)
self.b5.clicked.connect(self.getNumbers)
self.b6.clicked.connect(self.getNumbers)
self.b7.clicked.connect(self.getNumbers)
self.b8.clicked.connect(self.getNumbers)
self.b9.clicked.connect(self.getNumbers)
self.b0.clicked.connect(self.getNumbers)
self.ok.clicked.connect(self.submit_inputs)
self.clr.clicked.connect(self.clear_text)
self.del_btn.clicked.connect(self.delete_text)
def getNumbers(self):
self.btn.name=self.sender().objectName()
self.btn.text=self.sender().text()
self.input_text+=self.btn.text
self.display.setText(self.input_text)
def delete_text(self):
self.input_text=self.input_text[:-1]
self.display.setText(self.input_text)
def clear_text(self):
self.display.clear()
self.input_text=""
def submit_inputs(self):
self.input_num.emit(self.input_text)
self.close()
def setTransparency(self,pos):
self.setWindowOpacity(pos)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
numInput=numberInput()
numInput.setWindowFlags(QtCore.Qt.FramelessWindowHint)
numInput.show()
sys.exit(app.exec_())
| 31.8 | 61 | 0.638365 | [
"MIT"
] | magxTz/udventVersion2.0 | udvent_reworked_v2_2_friday/input_Number.py | 2,226 | Python |
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# pylint: disable=protected-access
import asyncio
from unittest import TestCase, main, mock
from magma.common.service_registry import ServiceRegistry
from magma.magmad.sync_rpc_client import SyncRPCClient
from orc8r.protos.sync_rpc_service_pb2 import (
GatewayRequest,
GatewayResponse,
SyncRPCRequest,
SyncRPCResponse,
)
class SyncRPCClientTests(TestCase):
"""
Tests for the SyncRPCClient
"""
def setUp(self):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self._loop = loop
self._sync_rpc_client = SyncRPCClient(loop=loop, response_timeout=3)
self._sync_rpc_client._conn_closed_table = {
12345: False
}
ServiceRegistry.add_service('test', '0.0.0.0', 0)
ServiceRegistry._PROXY_CONFIG = {'local_port': 2345,
'cloud_address': 'test',
'proxy_cloud_connections': True}
self._req_body = GatewayRequest(gwId="test id", authority='mobility',
path='/magma.MobilityService'
'/ListAddedIPv4Blocks',
headers={'te': 'trailers',
'content-type':
'application/grpc',
'user-agent':
'grpc-python/1.4.0',
'grpc-accept-encoding':
'identity'},
payload=bytes.fromhex('0000000000'))
self._expected_resp = GatewayResponse(status="400",
headers={"test_key": "test_val"},
payload=b'\x00'
b'\x00\x00\x00\n\n\x08')
self._expected_err_msg = "test error"
def test_forward_request_conn_closed(self):
self._sync_rpc_client.forward_request(
SyncRPCRequest(reqId=12345, connClosed=True))
self.assertEqual(self._sync_rpc_client._conn_closed_table[12345], True)
def test_send_sync_rpc_response(self):
expected = SyncRPCResponse(reqId=123, respBody=self._expected_resp)
self._sync_rpc_client._response_queue.put(expected)
res = self._sync_rpc_client.send_sync_rpc_response()
actual = next(res)
self.assertEqual(expected, actual)
expected = SyncRPCResponse(heartBeat=True)
actual = next(res)
self.assertEqual(expected, actual)
def test_retry_connect_sleep(self):
self._sync_rpc_client._current_delay = 0
for i in range(5):
self._sync_rpc_client._retry_connect_sleep()
if i == 4:
self.assertEqual(self._sync_rpc_client.RETRY_MAX_DELAY_SECS,
self._sync_rpc_client._current_delay)
else:
self.assertEqual(2 ** i, self._sync_rpc_client._current_delay)
def test_disconnect_sync_rpc_event(self):
disconnect_sync_rpc_event_mock = mock.patch(
'magma.magmad.events.disconnected_sync_rpc_stream')
with disconnect_sync_rpc_event_mock as disconnect_sync_rpc_streams:
self._sync_rpc_client._cleanup_and_reconnect()
disconnect_sync_rpc_streams.assert_called_once_with()
if __name__ == "__main__":
main()
| 41.907216 | 79 | 0.588438 | [
"BSD-3-Clause"
] | 0xa6e/magma | orc8r/gateway/python/magma/magmad/tests/sync_rpc_client_tests.py | 4,065 | Python |
from typing import Dict, List
from sortedcontainers import SortedDict
from shamrock.types.blockchain_format.coin import Coin
from shamrock.types.blockchain_format.sized_bytes import bytes32
from shamrock.types.mempool_item import MempoolItem
class Mempool:
def __init__(self, max_size_in_cost: int):
self.spends: Dict[bytes32, MempoolItem] = {}
self.sorted_spends: SortedDict = SortedDict()
self.additions: Dict[bytes32, MempoolItem] = {}
self.removals: Dict[bytes32, MempoolItem] = {}
self.max_size_in_cost: int = max_size_in_cost
self.total_mempool_cost: int = 0
def get_min_fee_rate(self, cost: int) -> float:
"""
Gets the minimum fpc rate that a transaction with specified cost will need in order to get included.
"""
if self.at_full_capacity(cost):
current_cost = self.total_mempool_cost
# Iterates through all spends in increasing fee per cost
for fee_per_cost, spends_with_fpc in self.sorted_spends.items():
for spend_name, item in spends_with_fpc.items():
current_cost -= item.cost
# Removing one at a time, until our transaction of size cost fits
if current_cost + cost <= self.max_size_in_cost:
return fee_per_cost
raise ValueError(
f"Transaction with cost {cost} does not fit in mempool of max cost {self.max_size_in_cost}"
)
else:
return 0
def remove_from_pool(self, item: MempoolItem):
"""
Removes an item from the mempool.
"""
removals: List[Coin] = item.removals
additions: List[Coin] = item.additions
for rem in removals:
del self.removals[rem.name()]
for add in additions:
del self.additions[add.name()]
del self.spends[item.name]
del self.sorted_spends[item.fee_per_cost][item.name]
dic = self.sorted_spends[item.fee_per_cost]
if len(dic.values()) == 0:
del self.sorted_spends[item.fee_per_cost]
self.total_mempool_cost -= item.cost
assert self.total_mempool_cost >= 0
def add_to_pool(
self,
item: MempoolItem,
):
"""
Adds an item to the mempool by kicking out transactions (if it doesn't fit), in order of increasing fee per cost
"""
while self.at_full_capacity(item.cost):
# Val is Dict[hash, MempoolItem]
fee_per_cost, val = self.sorted_spends.peekitem(index=0)
to_remove = list(val.values())[0]
self.remove_from_pool(to_remove)
self.spends[item.name] = item
# sorted_spends is Dict[float, Dict[bytes32, MempoolItem]]
if item.fee_per_cost not in self.sorted_spends:
self.sorted_spends[item.fee_per_cost] = {}
self.sorted_spends[item.fee_per_cost][item.name] = item
for add in item.additions:
self.additions[add.name()] = item
for coin in item.removals:
self.removals[coin.name()] = item
self.total_mempool_cost += item.cost
def at_full_capacity(self, cost: int) -> bool:
"""
Checks whether the mempool is at full capacity and cannot accept a transaction with size cost.
"""
return self.total_mempool_cost + cost > self.max_size_in_cost
| 37.315217 | 120 | 0.629187 | [
"Apache-2.0"
] | zcomputerwiz/shamrock-blockchain | shamrock/full_node/mempool.py | 3,433 | Python |
import time
import copy
import pickle
import warnings
import numpy as np
import scipy.sparse as sp
import torch
import torch.nn.functional as F
from sklearn.metrics import roc_auc_score, average_precision_score, precision_recall_curve, auc
def sparse_to_tuple(sparse_mx):
if not sp.isspmatrix_coo(sparse_mx):
sparse_mx = sparse_mx.tocoo()
coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose()
values = sparse_mx.data
shape = sparse_mx.shape
return coords, values, shape
def get_scores(edges_pos, edges_neg, A_pred, adj_label):
# get logists and labels
preds = A_pred[edges_pos.T]
preds_neg = A_pred[edges_neg.T]
logists = np.hstack([preds, preds_neg])
labels = np.hstack([np.ones(preds.size(0)), np.zeros(preds_neg.size(0))])
# logists = A_pred.view(-1)
# labels = adj_label.to_dense().view(-1)
# calc scores
roc_auc = roc_auc_score(labels, logists)
ap_score = average_precision_score(labels, logists)
precisions, recalls, thresholds = precision_recall_curve(labels, logists)
pr_auc = auc(recalls, precisions)
warnings.simplefilter('ignore', RuntimeWarning)
f1s = np.nan_to_num(2*precisions*recalls/(precisions+recalls))
best_comb = np.argmax(f1s)
f1 = f1s[best_comb]
pre = precisions[best_comb]
rec = recalls[best_comb]
thresh = thresholds[best_comb]
# calc reconstracted adj_mat and accuracy with the threshold for best f1
adj_rec = copy.deepcopy(A_pred)
adj_rec[adj_rec < thresh] = 0
adj_rec[adj_rec >= thresh] = 1
labels_all = adj_label.to_dense().view(-1).long()
preds_all = adj_rec.view(-1).long()
recon_acc = (preds_all == labels_all).sum().float() / labels_all.size(0)
results = {'roc': roc_auc,
'pr': pr_auc,
'ap': ap_score,
'pre': pre,
'rec': rec,
'f1': f1,
'acc': recon_acc,
'adj_recon': adj_rec}
return results
def train_model(args, dl, vgae):
optimizer = torch.optim.Adam(vgae.parameters(), lr=args.lr)
# weights for log_lik loss
adj_t = dl.adj_train
norm_w = adj_t.shape[0]**2 / float((adj_t.shape[0]**2 - adj_t.sum()) * 2)
pos_weight = torch.FloatTensor([float(adj_t.shape[0]**2 - adj_t.sum()) / adj_t.sum()]).to(args.device)
# move input data and label to gpu if needed
features = dl.features.to(args.device)
adj_label = dl.adj_label.to_dense().to(args.device)
best_vali_criterion = 0.0
best_state_dict = None
vgae.train()
for epoch in range(args.epochs):
t = time.time()
A_pred = vgae(features)
optimizer.zero_grad()
loss = log_lik = norm_w*F.binary_cross_entropy_with_logits(A_pred, adj_label, pos_weight=pos_weight)
if not args.gae:
kl_divergence = 0.5/A_pred.size(0) * (1 + 2*vgae.logstd - vgae.mean**2 - torch.exp(2*vgae.logstd)).sum(1).mean()
loss -= kl_divergence
A_pred = torch.sigmoid(A_pred).detach().cpu()
r = get_scores(dl.val_edges, dl.val_edges_false, A_pred, dl.adj_label)
print('Epoch{:3}: train_loss: {:.4f} recon_acc: {:.4f} val_roc: {:.4f} val_ap: {:.4f} f1: {:.4f} time: {:.4f}'.format(
epoch+1, loss.item(), r['acc'], r['roc'], r['ap'], r['f1'], time.time()-t))
if r[args.criterion] > best_vali_criterion:
best_vali_criterion = r[args.criterion]
best_state_dict = copy.deepcopy(vgae.state_dict())
# r_test = get_scores(dl.test_edges, dl.test_edges_false, A_pred, dl.adj_label)
r_test = r
print(" test_roc: {:.4f} test_ap: {:.4f} test_f1: {:.4f} test_recon_acc: {:.4f}".format(
r_test['roc'], r_test['ap'], r_test['f1'], r_test['acc']))
loss.backward()
optimizer.step()
print("Done! final results: test_roc: {:.4f} test_ap: {:.4f} test_f1: {:.4f} test_recon_acc: {:.4f}".format(
r_test['roc'], r_test['ap'], r_test['f1'], r_test['acc']))
vgae.load_state_dict(best_state_dict)
return vgae
def gen_graphs(args, dl, vgae):
adj_orig = dl.adj_orig
assert adj_orig.diagonal().sum() == 0
# sp.csr_matrix
if args.gae:
pickle.dump(adj_orig, open(f'graphs/{args.dataset}_graph_0_gae.pkl', 'wb'))
else:
pickle.dump(adj_orig, open(f'graphs/{args.dataset}_graph_0.pkl', 'wb'))
# sp.lil_matrix
pickle.dump(dl.features_orig, open(f'graphs/{args.dataset}_features.pkl', 'wb'))
features = dl.features.to(args.device)
for i in range(args.gen_graphs):
with torch.no_grad():
A_pred = vgae(features)
A_pred = torch.sigmoid(A_pred).detach().cpu()
r = get_scores(dl.val_edges, dl.val_edges_false, A_pred, dl.adj_label)
adj_recon = A_pred.numpy()
np.fill_diagonal(adj_recon, 0)
# np.ndarray
if args.gae:
filename = f'graphs/{args.dataset}_graph_{i+1}_logits_gae.pkl'
else:
filename = f'graphs/{args.dataset}_graph_{i+1}_logits.pkl'
pickle.dump(adj_recon, open(filename, 'wb'))
| 41.770492 | 126 | 0.635597 | [
"MIT"
] | DM2-ND/GAug | vgae/utils.py | 5,096 | Python |
# -*- coding: utf-8 -*-
# @Author: Yanqi Gu
# @Date: 2019-04-20 16:30:52
# @Last Modified by: Yanqi Gu
# @Last Modified time: 2019-04-20 16:57:49
| 25 | 42 | 0.613333 | [
"MIT"
] | Guyanqi/DPC4.5 | DPDecisionTree/__init__.py | 150 | Python |
def is_alpha(c):
result = ord('A') <= ord(c.upper()) <= ord('Z')
return result
def is_ascii(c):
result = 0 <= ord(c) <= 127
return result
def is_ascii_extended(c):
result = 128 <= ord(c) <= 255
return result
| 16.857143 | 51 | 0.567797 | [
"MIT"
] | GoodPeopleAI/django-htk | utils/text/general.py | 236 | Python |
# Copyright (c) 2016,2017,2018,2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Contains a collection of generally useful calculation tools."""
import functools
from operator import itemgetter
import numpy as np
from numpy.core.numeric import normalize_axis_index
import numpy.ma as ma
from scipy.spatial import cKDTree
import xarray as xr
from ..cbook import broadcast_indices, result_type
from ..interpolate import interpolate_1d, log_interpolate_1d
from ..package_tools import Exporter
from ..units import atleast_1d, check_units, concatenate, diff, units
from ..xarray import check_axis, preprocess_xarray
exporter = Exporter(globals())
UND = 'UND'
UND_ANGLE = -999.
DIR_STRS = (
'N', 'NNE', 'NE', 'ENE',
'E', 'ESE', 'SE', 'SSE',
'S', 'SSW', 'SW', 'WSW',
'W', 'WNW', 'NW', 'NNW',
UND
) # note the order matters!
MAX_DEGREE_ANGLE = 360 * units.degree
BASE_DEGREE_MULTIPLIER = 22.5 * units.degree
DIR_DICT = {dir_str: i * BASE_DEGREE_MULTIPLIER for i, dir_str in enumerate(DIR_STRS)}
DIR_DICT[UND] = np.nan
@exporter.export
@preprocess_xarray
def resample_nn_1d(a, centers):
"""Return one-dimensional nearest-neighbor indexes based on user-specified centers.
Parameters
----------
a : array-like
1-dimensional array of numeric values from which to
extract indexes of nearest-neighbors
centers : array-like
1-dimensional array of numeric values representing a subset of values to approximate
Returns
-------
An array of indexes representing values closest to given array values
"""
ix = []
for center in centers:
index = (np.abs(a - center)).argmin()
if index not in ix:
ix.append(index)
return ix
@exporter.export
@preprocess_xarray
def nearest_intersection_idx(a, b):
"""Determine the index of the point just before two lines with common x values.
Parameters
----------
a : array-like
1-dimensional array of y-values for line 1
b : array-like
1-dimensional array of y-values for line 2
Returns
-------
An array of indexes representing the index of the values
just before the intersection(s) of the two lines.
"""
# Difference in the two y-value sets
difference = a - b
# Determine the point just before the intersection of the lines
# Will return multiple points for multiple intersections
sign_change_idx, = np.nonzero(np.diff(np.sign(difference)))
return sign_change_idx
@exporter.export
@preprocess_xarray
@units.wraps(('=A', '=B'), ('=A', '=B', '=B'))
def find_intersections(x, a, b, direction='all', log_x=False):
"""Calculate the best estimate of intersection.
Calculates the best estimates of the intersection of two y-value
data sets that share a common x-value set.
Parameters
----------
x : array-like
1-dimensional array of numeric x-values
a : array-like
1-dimensional array of y-values for line 1
b : array-like
1-dimensional array of y-values for line 2
direction : string, optional
specifies direction of crossing. 'all', 'increasing' (a becoming greater than b),
or 'decreasing' (b becoming greater than a). Defaults to 'all'.
log_x : bool, optional
Use logarithmic interpolation along the `x` axis (i.e. for finding intersections
in pressure coordinates). Default is False.
Returns
-------
A tuple (x, y) of array-like with the x and y coordinates of the
intersections of the lines.
"""
# Change x to logarithmic if log_x=True
if log_x is True:
x = np.log(x)
# Find the index of the points just before the intersection(s)
nearest_idx = nearest_intersection_idx(a, b)
next_idx = nearest_idx + 1
# Determine the sign of the change
sign_change = np.sign(a[next_idx] - b[next_idx])
# x-values around each intersection
_, x0 = _next_non_masked_element(x, nearest_idx)
_, x1 = _next_non_masked_element(x, next_idx)
# y-values around each intersection for the first line
_, a0 = _next_non_masked_element(a, nearest_idx)
_, a1 = _next_non_masked_element(a, next_idx)
# y-values around each intersection for the second line
_, b0 = _next_non_masked_element(b, nearest_idx)
_, b1 = _next_non_masked_element(b, next_idx)
# Calculate the x-intersection. This comes from finding the equations of the two lines,
# one through (x0, a0) and (x1, a1) and the other through (x0, b0) and (x1, b1),
# finding their intersection, and reducing with a bunch of algebra.
delta_y0 = a0 - b0
delta_y1 = a1 - b1
intersect_x = (delta_y1 * x0 - delta_y0 * x1) / (delta_y1 - delta_y0)
# Calculate the y-intersection of the lines. Just plug the x above into the equation
# for the line through the a points. One could solve for y like x above, but this
# causes weirder unit behavior and seems a little less good numerically.
intersect_y = ((intersect_x - x0) / (x1 - x0)) * (a1 - a0) + a0
# If there's no intersections, return
if len(intersect_x) == 0:
return intersect_x, intersect_y
# Return x to linear if log_x is True
if log_x is True:
intersect_x = np.exp(intersect_x)
# Check for duplicates
duplicate_mask = (np.ediff1d(intersect_x, to_end=1) != 0)
# Make a mask based on the direction of sign change desired
if direction == 'increasing':
mask = sign_change > 0
elif direction == 'decreasing':
mask = sign_change < 0
elif direction == 'all':
return intersect_x[duplicate_mask], intersect_y[duplicate_mask]
else:
raise ValueError('Unknown option for direction: {0}'.format(str(direction)))
return intersect_x[mask & duplicate_mask], intersect_y[mask & duplicate_mask]
def _next_non_masked_element(a, idx):
"""Return the next non masked element of a masked array.
If an array is masked, return the next non-masked element (if the given index is masked).
If no other unmasked points are after the given masked point, returns none.
Parameters
----------
a : array-like
1-dimensional array of numeric values
idx : integer
index of requested element
Returns
-------
Index of next non-masked element and next non-masked element
"""
try:
next_idx = idx + a[idx:].mask.argmin()
if ma.is_masked(a[next_idx]):
return None, None
else:
return next_idx, a[next_idx]
except (AttributeError, TypeError, IndexError):
return idx, a[idx]
def _delete_masked_points(*arrs):
"""Delete masked points from arrays.
Takes arrays and removes masked points to help with calculations and plotting.
Parameters
----------
arrs : one or more array-like
source arrays
Returns
-------
arrs : one or more array-like
arrays with masked elements removed
"""
if any(hasattr(a, 'mask') for a in arrs):
keep = ~functools.reduce(np.logical_or, (np.ma.getmaskarray(a) for a in arrs))
return tuple(ma.asarray(a[keep]) for a in arrs)
else:
return arrs
@exporter.export
@preprocess_xarray
def reduce_point_density(points, radius, priority=None):
r"""Return a mask to reduce the density of points in irregularly-spaced data.
This function is used to down-sample a collection of scattered points (e.g. surface
data), returning a mask that can be used to select the points from one or more arrays
(e.g. arrays of temperature and dew point). The points selected can be controlled by
providing an array of ``priority`` values (e.g. rainfall totals to ensure that
stations with higher precipitation remain in the mask). The points and radius can be
specified with units. If none are provided, meters are assumed.
Parameters
----------
points : (N, K) array-like
N locations of the points in K dimensional space
radius : `pint.Quantity` or float
Minimum radius allowed between points. If units are not provided, meters is assumed.
priority : (N, K) array-like, optional
If given, this should have the same shape as ``points``; these values will
be used to control selection priority for points.
Returns
-------
(N,) array-like of boolean values indicating whether points should be kept. This
can be used directly to index numpy arrays to return only the desired points.
Examples
--------
>>> metpy.calc.reduce_point_density(np.array([1, 2, 3]), 1.)
array([ True, False, True])
>>> metpy.calc.reduce_point_density(np.array([1, 2, 3]), 1.,
... priority=np.array([0.1, 0.9, 0.3]))
array([False, True, False])
"""
# Handle input with units. Assume meters if units are not specified
if hasattr(radius, 'units'):
radius = radius.to('m').m
if hasattr(points, 'units'):
points = points.to('m').m
# Handle 1D input
if points.ndim < 2:
points = points.reshape(-1, 1)
# Make a kd-tree to speed searching of data.
tree = cKDTree(points)
# Need to use sorted indices rather than sorting the position
# so that the keep mask matches *original* order.
if priority is not None:
# Need to sort the locations in decreasing priority.
sorted_indices = np.argsort(priority)[::-1]
else:
# Take advantage of iterator nature of range here to avoid making big lists
sorted_indices = range(len(points))
# Keep all points initially
keep = np.ones(len(points), dtype=np.bool)
# Loop over all the potential points
for ind in sorted_indices:
# Only proceed if we haven't already excluded this point
if keep[ind]:
# Find the neighbors and eliminate them
neighbors = tree.query_ball_point(points[ind], radius)
keep[neighbors] = False
# We just removed ourselves, so undo that
keep[ind] = True
return keep
def _get_bound_pressure_height(pressure, bound, heights=None, interpolate=True):
"""Calculate the bounding pressure and height in a layer.
Given pressure, optional heights, and a bound, return either the closest pressure/height
or interpolated pressure/height. If no heights are provided, a standard atmosphere
([NOAA1976]_) is assumed.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressures
bound : `pint.Quantity`
Bound to retrieve (in pressure or height)
heights : `pint.Quantity`, optional
Atmospheric heights associated with the pressure levels. Defaults to using
heights calculated from ``pressure`` assuming a standard atmosphere.
interpolate : boolean, optional
Interpolate the bound or return the nearest. Defaults to True.
Returns
-------
`pint.Quantity`
The bound pressure and height.
"""
# avoid circular import if basic.py ever imports something from tools.py
from .basic import height_to_pressure_std, pressure_to_height_std
# Make sure pressure is monotonically decreasing
sort_inds = np.argsort(pressure)[::-1]
pressure = pressure[sort_inds]
if heights is not None:
heights = heights[sort_inds]
# Bound is given in pressure
if bound.dimensionality == {'[length]': -1.0, '[mass]': 1.0, '[time]': -2.0}:
# If the bound is in the pressure data, we know the pressure bound exactly
if bound in pressure:
bound_pressure = bound
# If we have heights, we know the exact height value, otherwise return standard
# atmosphere height for the pressure
if heights is not None:
bound_height = heights[pressure == bound_pressure]
else:
bound_height = pressure_to_height_std(bound_pressure)
# If bound is not in the data, return the nearest or interpolated values
else:
if interpolate:
bound_pressure = bound # Use the user specified bound
if heights is not None: # Interpolate heights from the height data
bound_height = log_interpolate_1d(bound_pressure, pressure, heights)
else: # If not heights given, use the standard atmosphere
bound_height = pressure_to_height_std(bound_pressure)
else: # No interpolation, find the closest values
idx = (np.abs(pressure - bound)).argmin()
bound_pressure = pressure[idx]
if heights is not None:
bound_height = heights[idx]
else:
bound_height = pressure_to_height_std(bound_pressure)
# Bound is given in height
elif bound.dimensionality == {'[length]': 1.0}:
# If there is height data, see if we have the bound or need to interpolate/find nearest
if heights is not None:
if bound in heights: # Bound is in the height data
bound_height = bound
bound_pressure = pressure[heights == bound]
else: # Bound is not in the data
if interpolate:
bound_height = bound
# Need to cast back to the input type since interp (up to at least numpy
# 1.13 always returns float64. This can cause upstream users problems,
# resulting in something like np.append() to upcast.
bound_pressure = (np.interp(np.atleast_1d(bound.m), heights.m,
pressure.m).astype(result_type(bound))
* pressure.units)
else:
idx = (np.abs(heights - bound)).argmin()
bound_pressure = pressure[idx]
bound_height = heights[idx]
else: # Don't have heights, so assume a standard atmosphere
bound_height = bound
bound_pressure = height_to_pressure_std(bound)
# If interpolation is on, this is all we need, if not, we need to go back and
# find the pressure closest to this and refigure the bounds
if not interpolate:
idx = (np.abs(pressure - bound_pressure)).argmin()
bound_pressure = pressure[idx]
bound_height = pressure_to_height_std(bound_pressure)
# Bound has invalid units
else:
raise ValueError('Bound must be specified in units of length or pressure.')
# If the bound is out of the range of the data, we shouldn't extrapolate
if not (_greater_or_close(bound_pressure, np.nanmin(pressure.m) * pressure.units)
and _less_or_close(bound_pressure, np.nanmax(pressure.m) * pressure.units)):
raise ValueError('Specified bound is outside pressure range.')
if heights is not None and not (_less_or_close(bound_height,
np.nanmax(heights.m) * heights.units)
and _greater_or_close(bound_height,
np.nanmin(heights.m)
* heights.units)):
raise ValueError('Specified bound is outside height range.')
return bound_pressure, bound_height
@exporter.export
@preprocess_xarray
@check_units('[length]')
def get_layer_heights(heights, depth, *args, bottom=None, interpolate=True, with_agl=False):
"""Return an atmospheric layer from upper air data with the requested bottom and depth.
This function will subset an upper air dataset to contain only the specified layer using
the heights only.
Parameters
----------
heights : array-like
Atmospheric heights
depth : `pint.Quantity`
The thickness of the layer
args : array-like
Atmospheric variable(s) measured at the given pressures
bottom : `pint.Quantity`, optional
The bottom of the layer
interpolate : bool, optional
Interpolate the top and bottom points if they are not in the given data. Defaults
to True.
with_agl : bool, optional
Returns the heights as above ground level by subtracting the minimum height in the
provided heights. Defaults to False.
Returns
-------
`pint.Quantity, pint.Quantity`
The height and data variables of the layer
"""
# Make sure pressure and datavars are the same length
for datavar in args:
if len(heights) != len(datavar):
raise ValueError('Height and data variables must have the same length.')
# If we want things in AGL, subtract the minimum height from all height values
if with_agl:
sfc_height = np.min(heights)
heights = heights - sfc_height
# If the bottom is not specified, make it the surface
if bottom is None:
bottom = heights[0]
# Make heights and arguments base units
heights = heights.to_base_units()
bottom = bottom.to_base_units()
# Calculate the top of the layer
top = bottom + depth
ret = [] # returned data variables in layer
# Ensure heights are sorted in ascending order
sort_inds = np.argsort(heights)
heights = heights[sort_inds]
# Mask based on top and bottom
inds = _greater_or_close(heights, bottom) & _less_or_close(heights, top)
heights_interp = heights[inds]
# Interpolate heights at bounds if necessary and sort
if interpolate:
# If we don't have the bottom or top requested, append them
if top not in heights_interp:
heights_interp = np.sort(np.append(heights_interp.m, top.m)) * heights.units
if bottom not in heights_interp:
heights_interp = np.sort(np.append(heights_interp.m, bottom.m)) * heights.units
ret.append(heights_interp)
for datavar in args:
# Ensure that things are sorted in ascending order
datavar = datavar[sort_inds]
if interpolate:
# Interpolate for the possibly missing bottom/top values
datavar_interp = interpolate_1d(heights_interp, heights, datavar)
datavar = datavar_interp
else:
datavar = datavar[inds]
ret.append(datavar)
return ret
@exporter.export
@preprocess_xarray
@check_units('[pressure]')
def get_layer(pressure, *args, heights=None, bottom=None, depth=100 * units.hPa,
interpolate=True):
r"""Return an atmospheric layer from upper air data with the requested bottom and depth.
This function will subset an upper air dataset to contain only the specified layer. The
bottom of the layer can be specified with a pressure or height above the surface
pressure. The bottom defaults to the surface pressure. The depth of the layer can be
specified in terms of pressure or height above the bottom of the layer. If the top and
bottom of the layer are not in the data, they are interpolated by default.
Parameters
----------
pressure : array-like
Atmospheric pressure profile
args : array-like
Atmospheric variable(s) measured at the given pressures
heights: array-like, optional
Atmospheric heights corresponding to the given pressures. Defaults to using
heights calculated from ``p`` assuming a standard atmosphere [NOAA1976]_.
bottom : `pint.Quantity`, optional
The bottom of the layer as a pressure or height above the surface pressure. Defaults
to the highest pressure or lowest height given.
depth : `pint.Quantity`, optional
The thickness of the layer as a pressure or height above the bottom of the layer.
Defaults to 100 hPa.
interpolate : bool, optional
Interpolate the top and bottom points if they are not in the given data. Defaults
to True.
Returns
-------
`pint.Quantity, pint.Quantity`
The pressure and data variables of the layer
"""
# If we get the depth kwarg, but it's None, set it to the default as well
if depth is None:
depth = 100 * units.hPa
# Make sure pressure and datavars are the same length
for datavar in args:
if len(pressure) != len(datavar):
raise ValueError('Pressure and data variables must have the same length.')
# If the bottom is not specified, make it the surface pressure
if bottom is None:
bottom = np.nanmax(pressure.m) * pressure.units
bottom_pressure, bottom_height = _get_bound_pressure_height(pressure, bottom,
heights=heights,
interpolate=interpolate)
# Calculate the top if whatever units depth is in
if depth.dimensionality == {'[length]': -1.0, '[mass]': 1.0, '[time]': -2.0}:
top = bottom_pressure - depth
elif depth.dimensionality == {'[length]': 1}:
top = bottom_height + depth
else:
raise ValueError('Depth must be specified in units of length or pressure')
top_pressure, _ = _get_bound_pressure_height(pressure, top, heights=heights,
interpolate=interpolate)
ret = [] # returned data variables in layer
# Ensure pressures are sorted in ascending order
sort_inds = np.argsort(pressure)
pressure = pressure[sort_inds]
# Mask based on top and bottom pressure
inds = (_less_or_close(pressure, bottom_pressure)
& _greater_or_close(pressure, top_pressure))
p_interp = pressure[inds]
# Interpolate pressures at bounds if necessary and sort
if interpolate:
# If we don't have the bottom or top requested, append them
if not np.any(np.isclose(top_pressure, p_interp)):
p_interp = np.sort(np.append(p_interp.m, top_pressure.m)) * pressure.units
if not np.any(np.isclose(bottom_pressure, p_interp)):
p_interp = np.sort(np.append(p_interp.m, bottom_pressure.m)) * pressure.units
ret.append(p_interp[::-1])
for datavar in args:
# Ensure that things are sorted in ascending order
datavar = datavar[sort_inds]
if interpolate:
# Interpolate for the possibly missing bottom/top values
datavar_interp = log_interpolate_1d(p_interp, pressure, datavar)
datavar = datavar_interp
else:
datavar = datavar[inds]
ret.append(datavar[::-1])
return ret
@exporter.export
@preprocess_xarray
def find_bounding_indices(arr, values, axis, from_below=True):
"""Find the indices surrounding the values within arr along axis.
Returns a set of above, below, good. Above and below are lists of arrays of indices.
These lists are formulated such that they can be used directly to index into a numpy
array and get the expected results (no extra slices or ellipsis necessary). `good` is
a boolean array indicating the "columns" that actually had values to bound the desired
value(s).
Parameters
----------
arr : array-like
Array to search for values
values: array-like
One or more values to search for in `arr`
axis : int
The dimension of `arr` along which to search.
from_below : bool, optional
Whether to search from "below" (i.e. low indices to high indices). If `False`,
the search will instead proceed from high indices to low indices. Defaults to `True`.
Returns
-------
above : list of arrays
List of broadcasted indices to the location above the desired value
below : list of arrays
List of broadcasted indices to the location below the desired value
good : array
Boolean array indicating where the search found proper bounds for the desired value
"""
# The shape of generated indices is the same as the input, but with the axis of interest
# replaced by the number of values to search for.
indices_shape = list(arr.shape)
indices_shape[axis] = len(values)
# Storage for the found indices and the mask for good locations
indices = np.empty(indices_shape, dtype=np.int)
good = np.empty(indices_shape, dtype=np.bool)
# Used to put the output in the proper location
store_slice = [slice(None)] * arr.ndim
# Loop over all of the values and for each, see where the value would be found from a
# linear search
for level_index, value in enumerate(values):
# Look for changes in the value of the test for <= value in consecutive points
# Taking abs() because we only care if there is a flip, not which direction.
switches = np.abs(np.diff((arr <= value).astype(np.int), axis=axis))
# Good points are those where it's not just 0's along the whole axis
good_search = np.any(switches, axis=axis)
if from_below:
# Look for the first switch; need to add 1 to the index since argmax is giving the
# index within the difference array, which is one smaller.
index = switches.argmax(axis=axis) + 1
else:
# Generate a list of slices to reverse the axis of interest so that searching from
# 0 to N is starting at the "top" of the axis.
arr_slice = [slice(None)] * arr.ndim
arr_slice[axis] = slice(None, None, -1)
# Same as above, but we use the slice to come from the end; then adjust those
# indices to measure from the front.
index = arr.shape[axis] - 1 - switches[tuple(arr_slice)].argmax(axis=axis)
# Set all indices where the results are not good to 0
index[~good_search] = 0
# Put the results in the proper slice
store_slice[axis] = level_index
indices[tuple(store_slice)] = index
good[tuple(store_slice)] = good_search
# Create index values for broadcasting arrays
above = broadcast_indices(arr, indices, arr.ndim, axis)
below = broadcast_indices(arr, indices - 1, arr.ndim, axis)
return above, below, good
def _greater_or_close(a, value, **kwargs):
r"""Compare values for greater or close to boolean masks.
Returns a boolean mask for values greater than or equal to a target within a specified
absolute or relative tolerance (as in :func:`numpy.isclose`).
Parameters
----------
a : array-like
Array of values to be compared
value : float
Comparison value
Returns
-------
array-like
Boolean array where values are greater than or nearly equal to value.
"""
return (a > value) | np.isclose(a, value, **kwargs)
def _less_or_close(a, value, **kwargs):
r"""Compare values for less or close to boolean masks.
Returns a boolean mask for values less than or equal to a target within a specified
absolute or relative tolerance (as in :func:`numpy.isclose`).
Parameters
----------
a : array-like
Array of values to be compared
value : float
Comparison value
Returns
-------
array-like
Boolean array where values are less than or nearly equal to value.
"""
return (a < value) | np.isclose(a, value, **kwargs)
@exporter.export
@preprocess_xarray
def lat_lon_grid_deltas(longitude, latitude, **kwargs):
r"""Calculate the delta between grid points that are in a latitude/longitude format.
Calculate the signed delta distance between grid points when the grid spacing is defined by
delta lat/lon rather than delta x/y
Parameters
----------
longitude : array_like
array of longitudes defining the grid
latitude : array_like
array of latitudes defining the grid
kwargs
Other keyword arguments to pass to :class:`~pyproj.Geod`
Returns
-------
dx, dy:
at least two dimensional arrays of signed deltas between grid points in the x and y
direction
Notes
-----
Accepts 1D, 2D, or higher arrays for latitude and longitude
Assumes [..., Y, X] for >=2 dimensional arrays
"""
from pyproj import Geod
# Inputs must be the same number of dimensions
if latitude.ndim != longitude.ndim:
raise ValueError('Latitude and longitude must have the same number of dimensions.')
# If we were given 1D arrays, make a mesh grid
if latitude.ndim < 2:
longitude, latitude = np.meshgrid(longitude, latitude)
geod_args = {'ellps': 'sphere'}
if kwargs:
geod_args = kwargs
g = Geod(**geod_args)
forward_az, _, dy = g.inv(longitude[..., :-1, :], latitude[..., :-1, :],
longitude[..., 1:, :], latitude[..., 1:, :])
dy[(forward_az < -90.) | (forward_az > 90.)] *= -1
forward_az, _, dx = g.inv(longitude[..., :, :-1], latitude[..., :, :-1],
longitude[..., :, 1:], latitude[..., :, 1:])
dx[(forward_az < 0.) | (forward_az > 180.)] *= -1
return dx * units.meter, dy * units.meter
@exporter.export
def grid_deltas_from_dataarray(f):
"""Calculate the horizontal deltas between grid points of a DataArray.
Calculate the signed delta distance between grid points of a DataArray in the horizontal
directions, whether the grid is lat/lon or x/y.
Parameters
----------
f : `xarray.DataArray`
Parsed DataArray on a latitude/longitude grid, in (..., lat, lon) or (..., y, x)
dimension order
Returns
-------
dx, dy:
arrays of signed deltas between grid points in the x and y directions with dimensions
matching those of `f`.
See Also
--------
lat_lon_grid_deltas
"""
if f.metpy.crs['grid_mapping_name'] == 'latitude_longitude':
dx, dy = lat_lon_grid_deltas(f.metpy.x, f.metpy.y,
initstring=f.metpy.cartopy_crs.proj4_init)
slc_x = slc_y = tuple([np.newaxis] * (f.ndim - 2) + [slice(None)] * 2)
else:
dx = np.diff(f.metpy.x.metpy.unit_array.to('m').magnitude) * units('m')
dy = np.diff(f.metpy.y.metpy.unit_array.to('m').magnitude) * units('m')
slc = [np.newaxis] * (f.ndim - 2)
slc_x = tuple(slc + [np.newaxis, slice(None)])
slc_y = tuple(slc + [slice(None), np.newaxis])
return dx[slc_x], dy[slc_y]
def xarray_derivative_wrap(func):
"""Decorate the derivative functions to make them work nicely with DataArrays.
This will automatically determine if the coordinates can be pulled directly from the
DataArray, or if a call to lat_lon_grid_deltas is needed.
"""
@functools.wraps(func)
def wrapper(f, **kwargs):
if 'x' in kwargs or 'delta' in kwargs:
# Use the usual DataArray to pint.Quantity preprocessing wrapper
return preprocess_xarray(func)(f, **kwargs)
elif isinstance(f, xr.DataArray):
# Get axis argument, defaulting to first dimension
axis = f.metpy.find_axis_name(kwargs.get('axis', 0))
# Initialize new kwargs with the axis number
new_kwargs = {'axis': f.get_axis_num(axis)}
if check_axis(f[axis], 'time'):
# Time coordinate, need to get time deltas
new_kwargs['delta'] = f[axis].metpy.time_deltas
elif check_axis(f[axis], 'longitude'):
# Longitude coordinate, need to get grid deltas
new_kwargs['delta'], _ = grid_deltas_from_dataarray(f)
elif check_axis(f[axis], 'latitude'):
# Latitude coordinate, need to get grid deltas
_, new_kwargs['delta'] = grid_deltas_from_dataarray(f)
else:
# General coordinate, use as is
new_kwargs['x'] = f[axis].metpy.unit_array
# Calculate and return result as a DataArray
result = func(f.metpy.unit_array, **new_kwargs)
return xr.DataArray(result.magnitude,
coords=f.coords,
dims=f.dims,
attrs={'units': str(result.units)})
else:
# Error
raise ValueError('Must specify either "x" or "delta" for value positions when "f" '
'is not a DataArray.')
return wrapper
@exporter.export
@xarray_derivative_wrap
def first_derivative(f, **kwargs):
"""Calculate the first derivative of a grid of values.
Works for both regularly-spaced data and grids with varying spacing.
Either `x` or `delta` must be specified, or `f` must be given as an `xarray.DataArray` with
attached coordinate and projection information. If `f` is an `xarray.DataArray`, and `x` or
`delta` are given, `f` will be converted to a `pint.Quantity` and the derivative returned
as a `pint.Quantity`, otherwise, if neither `x` nor `delta` are given, the attached
coordinate information belonging to `axis` will be used and the derivative will be returned
as an `xarray.DataArray`.
This uses 3 points to calculate the derivative, using forward or backward at the edges of
the grid as appropriate, and centered elsewhere. The irregular spacing is handled
explicitly, using the formulation as specified by [Bowen2005]_.
Parameters
----------
f : array-like
Array of values of which to calculate the derivative
axis : int or str, optional
The array axis along which to take the derivative. If `f` is ndarray-like, must be an
integer. If `f` is a `DataArray`, can be a string (referring to either the coordinate
dimension name or the axis type) or integer (referring to axis number), unless using
implicit conversion to `pint.Quantity`, in which case it must be an integer. Defaults
to 0.
x : array-like, optional
The coordinate values corresponding to the grid points in `f`.
delta : array-like, optional
Spacing between the grid points in `f`. Should be one item less than the size
of `f` along `axis`.
Returns
-------
array-like
The first derivative calculated along the selected axis.
See Also
--------
second_derivative
"""
n, axis, delta = _process_deriv_args(f, kwargs)
# create slice objects --- initially all are [:, :, ..., :]
slice0 = [slice(None)] * n
slice1 = [slice(None)] * n
slice2 = [slice(None)] * n
delta_slice0 = [slice(None)] * n
delta_slice1 = [slice(None)] * n
# First handle centered case
slice0[axis] = slice(None, -2)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
delta_slice0[axis] = slice(None, -1)
delta_slice1[axis] = slice(1, None)
combined_delta = delta[tuple(delta_slice0)] + delta[tuple(delta_slice1)]
delta_diff = delta[tuple(delta_slice1)] - delta[tuple(delta_slice0)]
center = (- delta[tuple(delta_slice1)] / (combined_delta * delta[tuple(delta_slice0)])
* f[tuple(slice0)]
+ delta_diff / (delta[tuple(delta_slice0)] * delta[tuple(delta_slice1)])
* f[tuple(slice1)]
+ delta[tuple(delta_slice0)] / (combined_delta * delta[tuple(delta_slice1)])
* f[tuple(slice2)])
# Fill in "left" edge with forward difference
slice0[axis] = slice(None, 1)
slice1[axis] = slice(1, 2)
slice2[axis] = slice(2, 3)
delta_slice0[axis] = slice(None, 1)
delta_slice1[axis] = slice(1, 2)
combined_delta = delta[tuple(delta_slice0)] + delta[tuple(delta_slice1)]
big_delta = combined_delta + delta[tuple(delta_slice0)]
left = (- big_delta / (combined_delta * delta[tuple(delta_slice0)])
* f[tuple(slice0)]
+ combined_delta / (delta[tuple(delta_slice0)] * delta[tuple(delta_slice1)])
* f[tuple(slice1)]
- delta[tuple(delta_slice0)] / (combined_delta * delta[tuple(delta_slice1)])
* f[tuple(slice2)])
# Now the "right" edge with backward difference
slice0[axis] = slice(-3, -2)
slice1[axis] = slice(-2, -1)
slice2[axis] = slice(-1, None)
delta_slice0[axis] = slice(-2, -1)
delta_slice1[axis] = slice(-1, None)
combined_delta = delta[tuple(delta_slice0)] + delta[tuple(delta_slice1)]
big_delta = combined_delta + delta[tuple(delta_slice1)]
right = (delta[tuple(delta_slice1)] / (combined_delta * delta[tuple(delta_slice0)])
* f[tuple(slice0)]
- combined_delta / (delta[tuple(delta_slice0)] * delta[tuple(delta_slice1)])
* f[tuple(slice1)]
+ big_delta / (combined_delta * delta[tuple(delta_slice1)])
* f[tuple(slice2)])
return concatenate((left, center, right), axis=axis)
@exporter.export
@xarray_derivative_wrap
def second_derivative(f, **kwargs):
"""Calculate the second derivative of a grid of values.
Works for both regularly-spaced data and grids with varying spacing.
Either `x` or `delta` must be specified, or `f` must be given as an `xarray.DataArray` with
attached coordinate and projection information. If `f` is an `xarray.DataArray`, and `x` or
`delta` are given, `f` will be converted to a `pint.Quantity` and the derivative returned
as a `pint.Quantity`, otherwise, if neither `x` nor `delta` are given, the attached
coordinate information belonging to `axis` will be used and the derivative will be returned
as an `xarray.DataArray`.
This uses 3 points to calculate the derivative, using forward or backward at the edges of
the grid as appropriate, and centered elsewhere. The irregular spacing is handled
explicitly, using the formulation as specified by [Bowen2005]_.
Parameters
----------
f : array-like
Array of values of which to calculate the derivative
axis : int or str, optional
The array axis along which to take the derivative. If `f` is ndarray-like, must be an
integer. If `f` is a `DataArray`, can be a string (referring to either the coordinate
dimension name or the axis type) or integer (referring to axis number), unless using
implicit conversion to `pint.Quantity`, in which case it must be an integer. Defaults
to 0.
x : array-like, optional
The coordinate values corresponding to the grid points in `f`.
delta : array-like, optional
Spacing between the grid points in `f`. There should be one item less than the size
of `f` along `axis`.
Returns
-------
array-like
The second derivative calculated along the selected axis.
See Also
--------
first_derivative
"""
n, axis, delta = _process_deriv_args(f, kwargs)
# create slice objects --- initially all are [:, :, ..., :]
slice0 = [slice(None)] * n
slice1 = [slice(None)] * n
slice2 = [slice(None)] * n
delta_slice0 = [slice(None)] * n
delta_slice1 = [slice(None)] * n
# First handle centered case
slice0[axis] = slice(None, -2)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
delta_slice0[axis] = slice(None, -1)
delta_slice1[axis] = slice(1, None)
combined_delta = delta[tuple(delta_slice0)] + delta[tuple(delta_slice1)]
center = 2 * (f[tuple(slice0)] / (combined_delta * delta[tuple(delta_slice0)])
- f[tuple(slice1)] / (delta[tuple(delta_slice0)]
* delta[tuple(delta_slice1)])
+ f[tuple(slice2)] / (combined_delta * delta[tuple(delta_slice1)]))
# Fill in "left" edge
slice0[axis] = slice(None, 1)
slice1[axis] = slice(1, 2)
slice2[axis] = slice(2, 3)
delta_slice0[axis] = slice(None, 1)
delta_slice1[axis] = slice(1, 2)
combined_delta = delta[tuple(delta_slice0)] + delta[tuple(delta_slice1)]
left = 2 * (f[tuple(slice0)] / (combined_delta * delta[tuple(delta_slice0)])
- f[tuple(slice1)] / (delta[tuple(delta_slice0)] * delta[tuple(delta_slice1)])
+ f[tuple(slice2)] / (combined_delta * delta[tuple(delta_slice1)]))
# Now the "right" edge
slice0[axis] = slice(-3, -2)
slice1[axis] = slice(-2, -1)
slice2[axis] = slice(-1, None)
delta_slice0[axis] = slice(-2, -1)
delta_slice1[axis] = slice(-1, None)
combined_delta = delta[tuple(delta_slice0)] + delta[tuple(delta_slice1)]
right = 2 * (f[tuple(slice0)] / (combined_delta * delta[tuple(delta_slice0)])
- f[tuple(slice1)] / (delta[tuple(delta_slice0)] * delta[tuple(delta_slice1)])
+ f[tuple(slice2)] / (combined_delta * delta[tuple(delta_slice1)]))
return concatenate((left, center, right), axis=axis)
@exporter.export
def gradient(f, **kwargs):
"""Calculate the gradient of a grid of values.
Works for both regularly-spaced data, and grids with varying spacing.
Either `coordinates` or `deltas` must be specified, or `f` must be given as an
`xarray.DataArray` with attached coordinate and projection information. If `f` is an
`xarray.DataArray`, and `coordinates` or `deltas` are given, `f` will be converted to a
`pint.Quantity` and the gradient returned as a tuple of `pint.Quantity`, otherwise, if
neither `coordinates` nor `deltas` are given, the attached coordinate information belonging
to `axis` will be used and the gradient will be returned as a tuple of `xarray.DataArray`.
Parameters
----------
f : array-like
Array of values of which to calculate the derivative
coordinates : array-like, optional
Sequence of arrays containing the coordinate values corresponding to the
grid points in `f` in axis order.
deltas : array-like, optional
Sequence of arrays or scalars that specify the spacing between the grid points in `f`
in axis order. There should be one item less than the size of `f` along the applicable
axis.
axes : sequence, optional
Sequence of strings (if `f` is a `xarray.DataArray` and implicit conversion to
`pint.Quantity` is not used) or integers that specify the array axes along which to
take the derivatives. Defaults to all axes of `f`. If given, and used with
`coordinates` or `deltas`, its length must be less than or equal to that of the
`coordinates` or `deltas` given.
Returns
-------
tuple of array-like
The first derivative calculated along each specified axis of the original array
See Also
--------
laplacian, first_derivative
Notes
-----
If this function is used without the `axes` parameter, the length of `coordinates` or
`deltas` (as applicable) should match the number of dimensions of `f`.
"""
pos_kwarg, positions, axes = _process_gradient_args(f, kwargs)
return tuple(first_derivative(f, axis=axis, **{pos_kwarg: positions[ind]})
for ind, axis in enumerate(axes))
@exporter.export
def laplacian(f, **kwargs):
"""Calculate the laplacian of a grid of values.
Works for both regularly-spaced data, and grids with varying spacing.
Either `coordinates` or `deltas` must be specified, or `f` must be given as an
`xarray.DataArray` with attached coordinate and projection information. If `f` is an
`xarray.DataArray`, and `coordinates` or `deltas` are given, `f` will be converted to a
`pint.Quantity` and the gradient returned as a tuple of `pint.Quantity`, otherwise, if
neither `coordinates` nor `deltas` are given, the attached coordinate information belonging
to `axis` will be used and the gradient will be returned as a tuple of `xarray.DataArray`.
Parameters
----------
f : array-like
Array of values of which to calculate the derivative
coordinates : array-like, optional
The coordinate values corresponding to the grid points in `f`
deltas : array-like, optional
Spacing between the grid points in `f`. There should be one item less than the size
of `f` along the applicable axis.
axes : sequence, optional
Sequence of strings (if `f` is a `xarray.DataArray` and implicit conversion to
`pint.Quantity` is not used) or integers that specify the array axes along which to
take the derivatives. Defaults to all axes of `f`. If given, and used with
`coordinates` or `deltas`, its length must be less than or equal to that of the
`coordinates` or `deltas` given.
Returns
-------
array-like
The laplacian
See Also
--------
gradient, second_derivative
Notes
-----
If this function is used without the `axes` parameter, the length of `coordinates` or
`deltas` (as applicable) should match the number of dimensions of `f`.
"""
pos_kwarg, positions, axes = _process_gradient_args(f, kwargs)
derivs = [second_derivative(f, axis=axis, **{pos_kwarg: positions[ind]})
for ind, axis in enumerate(axes)]
laplac = sum(derivs)
if isinstance(derivs[0], xr.DataArray):
# Patch in the units that are dropped
laplac.attrs['units'] = derivs[0].attrs['units']
return laplac
def _broadcast_to_axis(arr, axis, ndim):
"""Handle reshaping coordinate array to have proper dimensionality.
This puts the values along the specified axis.
"""
if arr.ndim == 1 and arr.ndim < ndim:
new_shape = [1] * ndim
new_shape[axis] = arr.size
arr = arr.reshape(*new_shape)
return arr
def _process_gradient_args(f, kwargs):
"""Handle common processing of arguments for gradient and gradient-like functions."""
axes = kwargs.get('axes', range(f.ndim))
def _check_length(positions):
if 'axes' in kwargs and len(positions) < len(axes):
raise ValueError('Length of "coordinates" or "deltas" cannot be less than that '
'of "axes".')
elif 'axes' not in kwargs and len(positions) != len(axes):
raise ValueError('Length of "coordinates" or "deltas" must match the number of '
'dimensions of "f" when "axes" is not given.')
if 'deltas' in kwargs:
if 'coordinates' in kwargs or 'x' in kwargs:
raise ValueError('Cannot specify both "coordinates" and "deltas".')
_check_length(kwargs['deltas'])
return 'delta', kwargs['deltas'], axes
elif 'coordinates' in kwargs:
_check_length(kwargs['coordinates'])
return 'x', kwargs['coordinates'], axes
elif isinstance(f, xr.DataArray):
return 'pass', axes, axes # only the axis argument matters
else:
raise ValueError('Must specify either "coordinates" or "deltas" for value positions '
'when "f" is not a DataArray.')
def _process_deriv_args(f, kwargs):
"""Handle common processing of arguments for derivative functions."""
n = f.ndim
axis = normalize_axis_index(kwargs.get('axis', 0), n)
if f.shape[axis] < 3:
raise ValueError('f must have at least 3 point along the desired axis.')
if 'delta' in kwargs:
if 'x' in kwargs:
raise ValueError('Cannot specify both "x" and "delta".')
delta = atleast_1d(kwargs['delta'])
if delta.size == 1:
diff_size = list(f.shape)
diff_size[axis] -= 1
delta_units = getattr(delta, 'units', None)
delta = np.broadcast_to(delta, diff_size, subok=True)
if not hasattr(delta, 'units') and delta_units is not None:
delta = delta * delta_units
else:
delta = _broadcast_to_axis(delta, axis, n)
elif 'x' in kwargs:
x = _broadcast_to_axis(kwargs['x'], axis, n)
delta = diff(x, axis=axis)
else:
raise ValueError('Must specify either "x" or "delta" for value positions.')
return n, axis, delta
@exporter.export
@preprocess_xarray
def parse_angle(input_dir):
"""Calculate the meteorological angle from directional text.
Works for abbrieviations or whole words (E -> 90 | South -> 180)
and also is able to parse 22.5 degreee angles such as ESE/East South East
Parameters
----------
input_dir : string or array-like
Directional text such as west, [south-west, ne], etc
Returns
-------
`pint.Quantity`
The angle in degrees
"""
if isinstance(input_dir, str):
# abb_dirs = abbrieviated directions
abb_dirs = _clean_direction([_abbrieviate_direction(input_dir)])
elif hasattr(input_dir, '__len__'): # handle np.array, pd.Series, list, and array-like
input_dir_str = ','.join(_clean_direction(input_dir, preprocess=True))
abb_dir_str = _abbrieviate_direction(input_dir_str)
abb_dirs = _clean_direction(abb_dir_str.split(','))
else: # handle unrecognizable scalar
return np.nan
return itemgetter(*abb_dirs)(DIR_DICT)
def _clean_direction(dir_list, preprocess=False):
"""Handle None if preprocess, else handles anything not in DIR_STRS."""
if preprocess: # primarily to remove None from list so ','.join works
return [UND if not isinstance(the_dir, str) else the_dir
for the_dir in dir_list]
else: # remove extraneous abbrieviated directions
return [UND if the_dir not in DIR_STRS else the_dir
for the_dir in dir_list]
def _abbrieviate_direction(ext_dir_str):
"""Convert extended (non-abbrievated) directions to abbrieviation."""
return (ext_dir_str
.upper()
.replace('_', '')
.replace('-', '')
.replace(' ', '')
.replace('NORTH', 'N')
.replace('EAST', 'E')
.replace('SOUTH', 'S')
.replace('WEST', 'W')
)
@exporter.export
@preprocess_xarray
def angle_to_direction(input_angle, full=False, level=3):
"""Convert the meteorological angle to directional text.
Works for angles greater than or equal to 360 (360 -> N | 405 -> NE)
and rounds to the nearest angle (355 -> N | 404 -> NNE)
Parameters
----------
input_angle : numeric or array-like numeric
Angles such as 0, 25, 45, 360, 410, etc
full : boolean
True returns full text (South), False returns abbrieviated text (S)
level : int
Level of detail (3 = N/NNE/NE/ENE/E... 2 = N/NE/E/SE... 1 = N/E/S/W)
Returns
-------
direction
The directional text
"""
try: # strip units temporarily
origin_units = input_angle.units
input_angle = input_angle.m
except AttributeError: # no units associated
origin_units = units.degree
if not hasattr(input_angle, '__len__') or isinstance(input_angle, str):
input_angle = [input_angle]
scalar = True
else:
scalar = False
# clean any numeric strings, negatives, and None
# does not handle strings with alphabet
input_angle = np.array(input_angle).astype(float)
with np.errstate(invalid='ignore'): # warns about the np.nan
input_angle[np.where(input_angle < 0)] = np.nan
input_angle = input_angle * origin_units
# normalizer used for angles > 360 degree to normalize between 0 - 360
normalizer = np.array(input_angle.m / MAX_DEGREE_ANGLE.m, dtype=int)
norm_angles = abs(input_angle - MAX_DEGREE_ANGLE * normalizer)
if level == 3:
nskip = 1
elif level == 2:
nskip = 2
elif level == 1:
nskip = 4
else:
err_msg = 'Level of complexity cannot be less than 1 or greater than 3!'
raise ValueError(err_msg)
angle_dict = {i * BASE_DEGREE_MULTIPLIER.m * nskip: dir_str
for i, dir_str in enumerate(DIR_STRS[::nskip])}
angle_dict[MAX_DEGREE_ANGLE.m] = 'N' # handle edge case of 360.
angle_dict[UND_ANGLE] = UND
# round to the nearest angles for dict lookup
# 0.001 is subtracted so there's an equal number of dir_str from
# np.arange(0, 360, 22.5), or else some dir_str will be preferred
# without the 0.001, level=2 would yield:
# ['N', 'N', 'NE', 'E', 'E', 'E', 'SE', 'S', 'S',
# 'S', 'SW', 'W', 'W', 'W', 'NW', 'N']
# with the -0.001, level=2 would yield:
# ['N', 'N', 'NE', 'NE', 'E', 'E', 'SE', 'SE',
# 'S', 'S', 'SW', 'SW', 'W', 'W', 'NW', 'NW']
multiplier = np.round(
(norm_angles / BASE_DEGREE_MULTIPLIER / nskip) - 0.001).m
round_angles = (multiplier * BASE_DEGREE_MULTIPLIER.m * nskip)
round_angles[np.where(np.isnan(round_angles))] = UND_ANGLE
dir_str_arr = itemgetter(*round_angles)(angle_dict) # for array
if full:
dir_str_arr = ','.join(dir_str_arr)
dir_str_arr = _unabbrieviate_direction(dir_str_arr)
if not scalar:
dir_str = dir_str_arr.split(',')
else:
dir_str = dir_str_arr.replace(',', ' ')
else:
dir_str = dir_str_arr
return dir_str
def _unabbrieviate_direction(abb_dir_str):
"""Convert abbrieviated directions to non-abbrieviated direction."""
return (abb_dir_str
.upper()
.replace(UND, 'Undefined ')
.replace('N', 'North ')
.replace('E', 'East ')
.replace('S', 'South ')
.replace('W', 'West ')
.replace(' ,', ',')
).strip()
def _remove_nans(*variables):
"""Remove NaNs from arrays that cause issues with calculations.
Takes a variable number of arguments
Returns masked arrays in the same order as provided
"""
mask = None
for v in variables:
if mask is None:
mask = np.isnan(v)
else:
mask |= np.isnan(v)
# Mask everyone with that joint mask
ret = []
for v in variables:
ret.append(v[~mask])
return ret
| 37.743357 | 95 | 0.640413 | [
"BSD-3-Clause"
] | Exi666/MetPy | src/metpy/calc/tools.py | 53,973 | Python |
# Copyright 2020-2022 Cambridge Quantum Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Optional, Union, Dict
from pytket.backends import ResultHandle, StatusEnum
from pytket.circuit import Circuit # type: ignore
from .common import _QsharpBaseBackend, BackendResult
if TYPE_CHECKING:
from typing import MutableMapping
from qsharp.loader import QSharpCallable # type: ignore
ResourcesResult = Dict[str, int]
class QsharpEstimatorBackend(_QsharpBaseBackend):
"""Backend for estimating resources of a circuit using the QDK."""
def _calculate_results(
self, qscall: "QSharpCallable", n_shots: Optional[int] = None
) -> Union[BackendResult, "MutableMapping"]:
results = qscall.estimate_resources()
results["Measure"] = 0 # Measures were added by qscompile()
return results # type: ignore
def get_resources(self, circuit: Union[Circuit, ResultHandle]) -> ResourcesResult:
"""Calculate resource estimates for circuit.
:param circuit: Circuit to calculate or result handle to retrieve for
:type circuit: Union[Circuit, ResultHandle]
:return: Resource estimate
:rtype: Dict[str, int]
"""
if isinstance(circuit, Circuit):
handle = self.process_circuits([circuit])[0]
elif isinstance(circuit, ResultHandle):
handle = circuit
circ_status = self.circuit_status(handle)
if circ_status.status is not StatusEnum.COMPLETED:
raise ValueError(f"Handle is '{circ_status}'")
else:
raise TypeError(
"Provide either a Circuit to run or a ResultHandle to a previously "
"submitted circuit."
)
return self._cache[handle]["resource"] # type: ignore
| 38.95 | 86 | 0.694052 | [
"Apache-2.0"
] | dhaycraft/pytket-extensions | modules/pytket-qsharp/pytket/extensions/qsharp/backends/estimator.py | 2,337 | Python |
# Copyright 2020, Kay Hayen, mailto:[email protected]
#
# Python test originally created or extracted from other peoples work. The
# parts from me are licensed as below. It is at least Free Software where
# it's copied from other people. In these cases, that will normally be
# indicated.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module_value1 = 5
module_value2 = 3
def calledRepeatedly():
# Force frame and eliminate forward propagation (currently).
module_value1
# Make sure we have a local variable x anyway
x = 2
local_value = module_value1
# construct_begin
for x in range(local_value, local_value+3):
pass
# construct_end
import itertools
for x in itertools.repeat(None, 50000):
calledRepeatedly()
print("OK.")
| 30.813953 | 78 | 0.712453 | [
"Apache-2.0"
] | CoyoteLeo/Nuitka | tests/benchmarks/constructs/LoopSmallRange.py | 1,325 | Python |
# Cliente
import socket
HOST = socket.gethostname() # Endereco IP do Servidor
PORT = 9999 # Porta que o Servidor esta
udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
dest = (HOST, PORT)
print('Para sair, digite $ + Enter')
msg = input("Entre com a mensagem:\n")
udp.sendto (msg.encode('utf-8'), dest)
while msg != '$': # terminação do programa
(msg, servidor) = udp.recvfrom(1024)
print(servidor, msg.decode('utf-8'))
msg = input()
udp.sendto (msg.encode('utf-8'), dest)
udp.close() | 33.666667 | 54 | 0.679208 | [
"MIT"
] | LC-ardovino/INFNET | Projeto_de_Bloco/Etapa8/Ex03_cli.py | 507 | Python |
import sys
import random
import numpy as np
import cv2
src = cv2.imread('vlcsnap-2021-02-04-10h00m02s260.png')
#src = cv2.imread('2_11_11.png')
if src is None:
print('Image load failed!')
sys.exit()
src = cv2.resize(src, (0, 0), fx=0.5, fy=0.5)
src_gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
h, w = src.shape[:2]
dst1 = np.zeros((h, w, 3), np.uint8)
dst2 = np.zeros((h, w, 3), np.uint8)
# 이진화
_, src_bin = cv2.threshold(src_gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
# 외곽선 검출
contours, _ = cv2.findContours(src_bin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for i in range(len(contours)):
pts = contours[i]
c = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
cv2.drawContours(dst1, contours, i, c, 1)
# 너무 작은 객체는 제외
if (cv2.contourArea(pts) < 1000):
continue
# 외곽선 근사화
approx = cv2.approxPolyDP(pts, cv2.arcLength(pts, True)*0.02, True)
# 컨벡스가 아니면 제외
if not cv2.isContourConvex(approx):
continue
if len(approx) == 4:
cv2.drawContours(dst2, contours, i, c, 2)
print(contours)
cv2.imshow('src', src)
cv2.imshow('src_bin', src_bin)
cv2.imshow('dst1', dst1)
cv2.imshow('dst2', dst2)
cv2.waitKey()
cv2.destroyAllWindows()
| 22.285714 | 81 | 0.653846 | [
"MIT"
] | qwerlarlgus/YOLO_Projec | contours.py | 1,312 | Python |
from typing import Literal
EventType = Literal['call', 'line', 'return']
| 15 | 45 | 0.706667 | [
"BSD-3-Clause"
] | Web-Dev-Collaborative/nbtutor | nbtutor/ipython/models/types.py | 75 | Python |
from rqalpha.api import *
def init(context):
context.S1 = "510500.XSHG"
context.UNIT = 10000
context.INIT_S = 2
context.MARGIN = 0.08
context.FIRST_P = 0
context.holdid = 0
context.sellcount = 0
context.inited = False
logger.info("RunInfo: {}".format(context.run_info))
def before_trading(context):
pass
def current_p(context):
return context.FIRST_P - ((context.holdid * context.MARGIN) * context.FIRST_P)
def next_buy_p(context):
if context.portfolio.cash < context.UNIT:
return -1
return context.FIRST_P - (((context.holdid + 1) * context.MARGIN) * context.FIRST_P)
def next_sell_p(context):
if context.portfolio.market_value < context.UNIT:
return -1
return context.FIRST_P - (((context.holdid - 1) * context.MARGIN) * context.FIRST_P)
def handle_bar(context, bar_dict):
bar = bar_dict[context.S1]
if context.inited is True:
nextB = next_buy_p(context)
nextS = next_sell_p(context)
if context.inited is False:
context.inited = True
order_value(context.S1, context.UNIT * context.INIT_S, price=bar.close)
context.current_cash = 0
context.holdid = 0
context.FIRST_P = bar.open
logger.info("Make first fire portfolio: {}".format(context.portfolio))
elif bar.low <= nextB <= bar.high:
res = order_value(context.S1, context.UNIT, nextB)
if res.status == ORDER_STATUS.FILLED:
context.holdid += 1
else:
logger.info("Buy failed: {}".format(res))
elif bar.high < nextB:
res = order_value(context.S1, context.UNIT, price=bar.high)
if res.status == ORDER_STATUS.FILLED:
context.holdid += 1
else:
logger.info("Buy failed: {}".format(res))
elif bar.low <= nextS <= bar.high:
res = order_value(context.S1, -1 * context.UNIT, price=nextS)
if res.status == ORDER_STATUS.FILLED:
context.holdid -= 1
context.sellcount += 1
logger.info("----- Sell count: {}".format(context.sellcount))
else:
logger.info("Sell failed: {}".format(res))
elif nextS != -1 and bar.low > nextS:
res = order_value(context.S1, -1 * context.UNIT, price=bar.low)
if res.status == ORDER_STATUS.FILLED:
context.holdid -= 1
context.sellcount += 1
logger.info("----- Sell count: {}".format(context.sellcount))
else:
logger.info("Sell failed: {}".format(res))
def after_trading(context):
logger.info("Hold count: {}".format(context.holdid + 1))
profit = (context.portfolio.cash + context.portfolio.market_value - context.portfolio.starting_cash)
profit_pct = profit / (context.portfolio.market_value - profit)
logger.info("after_trading: market_value {}, profit {}, percent {}".
format(context.portfolio.market_value, profit, profit_pct))
| 33.534091 | 104 | 0.626567 | [
"Apache-2.0"
] | HackReborn/rqalpha | rqalpha/examples/mg_same_value.py | 2,951 | Python |
from app.db import with_session
from logic.admin import get_query_metastore_by_id
def get_metastore_loader_class_by_name(name: str):
from .loaders import ALL_METASTORE_LOADERS
for loader in ALL_METASTORE_LOADERS:
if loader.__name__ == name:
return loader
raise ValueError(f"Unknown loader name {name}")
@with_session
def get_metastore_loader(metastore_id: int, session=None):
metastore = get_query_metastore_by_id(id=metastore_id, session=session)
metastore_dict = metastore.to_dict_admin()
return get_metastore_loader_class_by_name(metastore_dict["loader"])(metastore_dict)
def load_metastore(metastore_id: int):
loader = get_metastore_loader(metastore_id)
loader.load()
| 29.28 | 87 | 0.775956 | [
"Apache-2.0"
] | Aka-shi/querybook | querybook/server/lib/metastore/__init__.py | 732 | Python |
import rain_alert
| 7 | 18 | 0.761905 | [
"Unlicense"
] | frnkvsk/python100days | day35_env_sms/main.py | 21 | Python |
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import inspect
from sphinx import apidoc
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
package = "asm_challenge"
namespace = []
namespace_pkg = ".".join([namespace[-1], package]) if namespace else package
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'asm_challenge'
copyright = u'2015, Jose Luis Bellod Cisneros'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from namespace_pkg import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'asm_challenge-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual])
latex_documents = [
('index', 'user_guide.tex', u'asm_challenge Documentation',
u'Jose Luis Bellod Cisneros', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ---------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://sphinx.pocoo.org', None),
'python': ('http://docs.python.org/' + python_version, None),
'matplotlib': ('http://matplotlib.sourceforge.net', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
}
| 33.65272 | 85 | 0.708566 | [
"Apache-2.0"
] | josl/ASM_challenge | docs/conf.py | 8,043 | Python |
# -*- coding: utf-8 -*-
import scrapy
from city_scrapers.spider import Spider
from datetime import datetime, timedelta
from dateutil.parser import parse as dateparse
import re
class Chi_school_community_action_councilSpider(Spider):
name = 'chi_school_community_action_council'
long_name = 'Chicago Public Schools Community Action Council'
allowed_domains = ['cps.edu']
start_urls = ['http://cps.edu/FACE/Pages/CAC.aspx']
def parse(self, response):
"""
`parse` should always `yield` a dict that follows the Event Schema
<https://city-bureau.github.io/city-scrapers/06_event_schema.html>.
Change the `_parse_id`, `_parse_name`, etc methods to fit your scraping
needs.
"""
month_counter = datetime.today().month # Sets month counter to the current month, which is passed to parse_start
for x in range(12): # iterates through every month in the year after the current month
if month_counter > 12:
break
else:
for item in response.css("ul").css('li')[17:]:
try:
if item.css("strong").css("a::attr(href)").extract()[0] == 'http://www.humboldtparkportal.org/':
continue
except:
pass
data = {
'_type': 'event',
'name': self._parse_name(item),
'description': self._parse_description(item),
'classification': self._parse_classification(item),
'start_time': self._parse_start(item, month_counter),
'end_time': self._parse_end(item),
'timezone': self._parse_timezone(item),
'status': self._parse_status(item),
'all_day': self._parse_all_day(item),
'location': self._parse_location(item),
'sources': self._parse_sources(response),
'community_area' : self._parse_community_area(item)
}
data['id'] = self._generate_id(data)
data['end_time'] = data['start_time'] + timedelta(hours=3) #adds 3 hours to start time
yield data
month_counter += 1 # month counter is increased by 1 month with each iteration of the for loop
# self._parse_next(response) yields more responses to parse if necessary.
# uncomment to find a "next" url
# yield self._parse_next(response)
def _parse_community_area(self, item):
"""
Parse or generate community area.
"""
if len(item.css('li').css('strong::text').extract()) == 1:
community_name = item.css('li').css('strong::text').extract()
else:
community_name = item.css('li').css('strong').css('a::text').extract()
return community_name[0]
def _parse_name(self, item):
"""
Parse or generate event name.
"""
if len(item.css('li').css('strong::text').extract()) == 1:
community_name = item.css('li').css('strong::text').extract()
else:
community_name = item.css('li').css('strong').css('a::text').extract()
return community_name[0] + ' Community Action Council'
def _parse_description(self, item):
"""
Parse or generate event description.
"""
return "Community Action Councils, or CACs, consist of 25-30 voting members who are " \
"directly involved in developing a strategic plan for educational success within " \
"their communities. CAC members include parents; elected officials; faith-based " \
"institutions, health care and community-based organizations; Local School" \
" Council (LSC) members; business leaders; educators and school administrators; " \
"staff members from Chicago's Sister Agencies; community residents; " \
"and students. There are nine CACs across Chicago. Each works to empower the " \
"community they serve to lead the improvement of local quality education."
def _parse_classification(self, item):
"""
Parse or generate classification (e.g. public health, education, etc).
"""
return 'Education'
def _parse_start(self, item, month_counter):
"""
Parse start date and time.
Accepts month_counter as an argument from top level parse function to iterate through all months in the year.
"""
def parse_day(source):
'''Parses the source material and retrieves the day of the week that the meeting occurs.
'''
day_source = source[0]
day_regex = re.compile(r'[a-zA-Z]+day')
mo = day_regex.search(day_source)
return mo.group().lower()
def parse_time(source):
'''Parses the source material and retrieves the time that the meeting occurs.
'''
time_source = source[1]
time_regex = re.compile(r'(1[012]|[1-9]):[0-5][0-9](am|pm)')
mo = time_regex.search(time_source)
return mo.group()
def count_days(day, week_count):
'''Because the source material provides meeting dates on a reoccuring schedule, we must use the parsed day
from the parse_day function and the '''
today = datetime.today()
week_day = {'monday': 0, 'tuesday': 1, 'wednesday': 2, 'thursday': 3, 'friday': 4, 'saturday': 5,
'sunday': 6}
week_counter = 0
for x in range(1, 31):
try:
current_date = datetime(today.year, month_counter, x) #uses month counter from top level parse func.
if current_date.weekday() == week_day[day]:
week_counter += 1
if week_counter == int(week_count):
return current_date
except ValueError as e: # will break loop if range exceeds the number of days in the month
break
def concat_date(meeting_date, time):
'''Combines the meeting date with the time the meeting occurs. Function return a datetime
object.
'''
return dateparse(
str(meeting_date.year) + '-' + str(meeting_date.month) + '-' + str(meeting_date.day) + ' ' + time)
def get_start(source):
'''Combines above defined parse_day, parse_time, count_days, and concat_date functions to get the start
date from the source. If a start time cannot be found the UNIX epoch date is returned.
'''
day = parse_day(source)
week_count = source[0].strip()[
0] # selects first character in the source, which is usually the week count
if week_count.isdigit():
time = parse_time(source)
meeting_date = count_days(day, week_count)
start = concat_date(meeting_date, time)
else:
pass
return start
source = item.css('li::text').extract()
return get_start(source)
def _parse_end(self, item):
"""
Parse end date and time.
"""
return 'Estimated 3 hours'
def _parse_timezone(self, item):
"""
Parse or generate timzone in tzinfo format.
"""
return 'America/Chicago'
def _parse_all_day(self, item):
"""
Parse or generate all-day status. Defaults to False.
"""
return False
def _parse_location(self, item):
"""
Parse or generate location. Latitude and longitude can be
left blank and will be geocoded later.
"""
source = item.css('li::text').extract()[1]
return {
'url': None,
'name': source[source.find("at")+2:source.find("(")].replace('the', ''),
'address': source[source.find("(")+1:source.find(")")],
'coordinates': {
'latitude': None,
'longitude': None,
},
}
def _parse_status(self, item):
"""
Parse or generate status of meeting. Can be one of:
* cancelled
* tentative
* confirmed
* passed
By default, return "tentative"
"""
return 'Tentative'
def _parse_sources(self, response):
"""
Parse or generate sources.
"""
return [{
'url': response.url,
'note': '',
}]
| 41.277778 | 120 | 0.549237 | [
"MIT"
] | jim/city-scrapers | city_scrapers/spiders/chi_school_community_action_council.py | 8,916 | Python |
import logging
LOG = logging.getLogger(__name__)
class FlushAndLockMySQLAction(object):
def __init__(self, client, extra_flush=True):
self.client = client
self.extra_flush = extra_flush
def __call__(self, event, snapshot_fsm, snapshot_vol):
if event == 'pre-snapshot':
if self.extra_flush:
LOG.debug("Executing FLUSH TABLES")
self.client.flush_tables()
LOG.debug("Executing FLUSH TABLES WITH READ LOCK")
LOG.info("Acquiring read-lock and flushing tables")
self.client.flush_tables_with_read_lock()
elif event == 'post-snapshot':
LOG.info("Releasing read-lock")
self.client.unlock_tables()
| 35.047619 | 63 | 0.634511 | [
"BSD-3-Clause"
] | a5a351e7/holland | plugins/holland.backup.mysql_lvm/holland/backup/mysql_lvm/actions/mysql/lock.py | 736 | Python |
# -*- coding: utf-8 -*- #
# Copyright 2017 Google Inc. All Rights Reserved. #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to list all credentials for a registry."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.cloudiot import registries
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.iot import resource_args
from googlecloudsdk.core.resource import resource_projector
class List(base.ListCommand):
"""List credentials for a registry."""
@staticmethod
def Args(parser):
parser.display_info.AddFormat(
'table(index, publicKeyCertificate.format,'
'publicKeyCertificate.x509Details.issuer,'
'publicKeyCertificate.x509Details.subject,'
'publicKeyCertificate.x509Details.startTime,'
'publicKeyCertificate.x509Details.expiryTime,'
'publicKeyCertificate.x509Details.signatureAlgorithm,'
'publicKeyCertificate.x509Details.publicKeyType)')
base.URI_FLAG.RemoveFromParser(parser)
base.PAGE_SIZE_FLAG.RemoveFromParser(parser)
resource_args.AddRegistryResourceArg(parser,
'for which to list credentials',
positional=False)
def Run(self, args):
"""Run the list command."""
client = registries.RegistriesClient()
registry_ref = args.CONCEPTS.registry.Parse()
registry = client.Get(registry_ref)
for idx, credential in enumerate(registry.credentials):
serializable = resource_projector.MakeSerializable(credential)
serializable['index'] = idx
yield serializable
| 37.724138 | 74 | 0.73309 | [
"Apache-2.0"
] | bshaffer/google-cloud-sdk | lib/surface/iot/registries/credentials/list.py | 2,188 | Python |
from torch import nn
from pytorch_widedeep.wdtypes import * # noqa: F403
from pytorch_widedeep.models.tabular.mlp._layers import MLP
from pytorch_widedeep.models.tabular._base_tabular_model import (
BaseTabularModelWithAttention,
)
from pytorch_widedeep.models.tabular.transformers._encoders import SaintEncoder
class SAINT(BaseTabularModelWithAttention):
r"""Defines a `SAINT model <https://arxiv.org/abs/2106.01342>`_ that
can be used as the ``deeptabular`` component of a Wide & Deep model or
independently by itself.
Parameters
----------
column_idx: Dict
Dict containing the index of the columns that will be passed through
the model. Required to slice the tensors. e.g.
{'education': 0, 'relationship': 1, 'workclass': 2, ...}
cat_embed_input: List, Optional, default = None
List of Tuples with the column name and number of unique values and
embedding dimension. e.g. [(education, 11), ...]
cat_embed_dropout: float, default = 0.1
Categorical embeddings dropout
use_cat_bias: bool, default = False,
Boolean indicating if bias will be used for the categorical embeddings
cat_embed_activation: Optional, str, default = None,
Activation function for the categorical embeddings, if any. `'tanh'`,
`'relu'`, `'leaky_relu'` and `'gelu'` are supported.
full_embed_dropout: bool, default = False
Boolean indicating if an entire embedding (i.e. the representation of
one column) will be dropped in the batch. See:
:obj:`pytorch_widedeep.models.transformers._layers.FullEmbeddingDropout`.
If ``full_embed_dropout = True``, ``cat_embed_dropout`` is ignored.
shared_embed: bool, default = False
The idea behind ``shared_embed`` is described in the Appendix A in the
`TabTransformer paper <https://arxiv.org/abs/2012.06678>`_: `'The
goal of having column embedding is to enable the model to distinguish
the classes in one column from those in the other columns'`. In other
words, the idea is to let the model learn which column is embedded
at the time.
add_shared_embed: bool, default = False
The two embedding sharing strategies are: 1) add the shared embeddings
to the column embeddings or 2) to replace the first
``frac_shared_embed`` with the shared embeddings.
See :obj:`pytorch_widedeep.models.transformers._layers.SharedEmbeddings`
frac_shared_embed: float, default = 0.25
The fraction of embeddings that will be shared (if ``add_shared_embed
= False``) by all the different categories for one particular
column.
continuous_cols: List, Optional, default = None
List with the name of the numeric (aka continuous) columns
cont_norm_layer: str, default = "batchnorm"
Type of normalization layer applied to the continuous features. Options
are: 'layernorm', 'batchnorm' or None.
cont_embed_dropout: float, default = 0.1,
Continuous embeddings dropout
use_cont_bias: bool, default = True,
Boolean indicating if bias will be used for the continuous embeddings
cont_embed_activation: str, default = None
Activation function to be applied to the continuous embeddings, if
any. `'tanh'`, `'relu'`, `'leaky_relu'` and `'gelu'` are supported.
input_dim: int, default = 32
The so-called *dimension of the model*. Is the number of
embeddings used to encode the categorical and/or continuous columns
n_heads: int, default = 8
Number of attention heads per Transformer block
use_qkv_bias: bool, default = False
Boolean indicating whether or not to use bias in the Q, K, and V
projection layers
n_blocks: int, default = 2
Number of SAINT-Transformer blocks. 1 in the paper.
attn_dropout: float, default = 0.2
Dropout that will be applied to the Multi-Head Attention column and
row layers
ff_dropout: float, default = 0.1
Dropout that will be applied to the FeedForward network
transformer_activation: str, default = "gelu"
Transformer Encoder activation function. `'tanh'`, `'relu'`,
`'leaky_relu'`, `'gelu'`, `'geglu'` and `'reglu'` are supported
mlp_hidden_dims: List, Optional, default = None
MLP hidden dimensions. If not provided it will default to ``[l, 4*l,
2*l]`` where ``l`` is the MLP's input dimension
mlp_activation: str, default = "relu"
MLP activation function. `'tanh'`, `'relu'`, `'leaky_relu'` and
`'gelu'` are supported
mlp_dropout: float, default = 0.1
Dropout that will be applied to the final MLP
mlp_batchnorm: bool, default = False
Boolean indicating whether or not to apply batch normalization to the
dense layers
mlp_batchnorm_last: bool, default = False
Boolean indicating whether or not to apply batch normalization to the
last of the dense layers
mlp_linear_first: bool, default = False
Boolean indicating whether the order of the operations in the dense
layer. If ``True: [LIN -> ACT -> BN -> DP]``. If ``False: [BN -> DP ->
LIN -> ACT]``
Attributes
----------
cat_and_cont_embed: ``nn.Module``
This is the module that processes the categorical and continuous columns
saint_blks: ``nn.Sequential``
Sequence of SAINT-Transformer blocks
saint_mlp: ``nn.Module``
MLP component in the model
output_dim: int
The output dimension of the model. This is a required attribute
neccesary to build the ``WideDeep`` class
Example
--------
>>> import torch
>>> from pytorch_widedeep.models import SAINT
>>> X_tab = torch.cat((torch.empty(5, 4).random_(4), torch.rand(5, 1)), axis=1)
>>> colnames = ['a', 'b', 'c', 'd', 'e']
>>> cat_embed_input = [(u,i) for u,i in zip(colnames[:4], [4]*4)]
>>> continuous_cols = ['e']
>>> column_idx = {k:v for v,k in enumerate(colnames)}
>>> model = SAINT(column_idx=column_idx, cat_embed_input=cat_embed_input, continuous_cols=continuous_cols)
>>> out = model(X_tab)
"""
def __init__(
self,
column_idx: Dict[str, int],
cat_embed_input: Optional[List[Tuple[str, int]]] = None,
cat_embed_dropout: float = 0.1,
use_cat_bias: bool = False,
cat_embed_activation: Optional[str] = None,
full_embed_dropout: bool = False,
shared_embed: bool = False,
add_shared_embed: bool = False,
frac_shared_embed: float = 0.25,
continuous_cols: Optional[List[str]] = None,
cont_norm_layer: str = None,
cont_embed_dropout: float = 0.1,
use_cont_bias: bool = True,
cont_embed_activation: Optional[str] = None,
input_dim: int = 32,
use_qkv_bias: bool = False,
n_heads: int = 8,
n_blocks: int = 2,
attn_dropout: float = 0.1,
ff_dropout: float = 0.2,
transformer_activation: str = "gelu",
mlp_hidden_dims: Optional[List[int]] = None,
mlp_activation: str = "relu",
mlp_dropout: float = 0.1,
mlp_batchnorm: bool = False,
mlp_batchnorm_last: bool = False,
mlp_linear_first: bool = True,
):
super(SAINT, self).__init__(
column_idx=column_idx,
cat_embed_input=cat_embed_input,
cat_embed_dropout=cat_embed_dropout,
use_cat_bias=use_cat_bias,
cat_embed_activation=cat_embed_activation,
full_embed_dropout=full_embed_dropout,
shared_embed=shared_embed,
add_shared_embed=add_shared_embed,
frac_shared_embed=frac_shared_embed,
continuous_cols=continuous_cols,
cont_norm_layer=cont_norm_layer,
embed_continuous=True,
cont_embed_dropout=cont_embed_dropout,
use_cont_bias=use_cont_bias,
cont_embed_activation=cont_embed_activation,
input_dim=input_dim,
)
self.column_idx = column_idx
self.cat_embed_input = cat_embed_input
self.cat_embed_dropout = cat_embed_dropout
self.full_embed_dropout = full_embed_dropout
self.shared_embed = shared_embed
self.add_shared_embed = add_shared_embed
self.frac_shared_embed = frac_shared_embed
self.continuous_cols = continuous_cols
self.cont_embed_activation = cont_embed_activation
self.cont_embed_dropout = cont_embed_dropout
self.cont_norm_layer = cont_norm_layer
self.input_dim = input_dim
self.use_qkv_bias = use_qkv_bias
self.n_heads = n_heads
self.n_blocks = n_blocks
self.attn_dropout = attn_dropout
self.ff_dropout = ff_dropout
self.transformer_activation = transformer_activation
self.mlp_hidden_dims = mlp_hidden_dims
self.mlp_activation = mlp_activation
self.mlp_dropout = mlp_dropout
self.mlp_batchnorm = mlp_batchnorm
self.mlp_batchnorm_last = mlp_batchnorm_last
self.mlp_linear_first = mlp_linear_first
self.with_cls_token = "cls_token" in column_idx
self.n_cat = len(cat_embed_input) if cat_embed_input is not None else 0
self.n_cont = len(continuous_cols) if continuous_cols is not None else 0
self.n_feats = self.n_cat + self.n_cont
# Embeddings are instantiated at the base model
# Transformer blocks
self.saint_blks = nn.Sequential()
for i in range(n_blocks):
self.saint_blks.add_module(
"saint_block" + str(i),
SaintEncoder(
input_dim,
n_heads,
use_qkv_bias,
attn_dropout,
ff_dropout,
transformer_activation,
self.n_feats,
),
)
attn_output_dim = (
self.input_dim if self.with_cls_token else self.n_feats * self.input_dim
)
# Mlp
if not mlp_hidden_dims:
mlp_hidden_dims = [
attn_output_dim,
attn_output_dim * 4,
attn_output_dim * 2,
]
else:
mlp_hidden_dims = [attn_output_dim] + mlp_hidden_dims
self.saint_mlp = MLP(
mlp_hidden_dims,
mlp_activation,
mlp_dropout,
mlp_batchnorm,
mlp_batchnorm_last,
mlp_linear_first,
)
# the output_dim attribute will be used as input_dim when "merging" the models
self.output_dim: int = mlp_hidden_dims[-1]
def forward(self, X: Tensor) -> Tensor:
x = self._get_embeddings(X)
x = self.saint_blks(x)
if self.with_cls_token:
x = x[:, 0, :]
else:
x = x.flatten(1)
return self.saint_mlp(x)
@property
def attention_weights(self) -> List:
r"""List with the attention weights. Each element of the list is a tuple
where the first and the second elements are the column and row
attention weights respectively
The shape of the attention weights is:
- column attention: :math:`(N, H, F, F)`
- row attention: :math:`(1, H, N, N)`
where *N* is the batch size, *H* is the number of heads and *F* is the
number of features/columns in the dataset
"""
attention_weights = []
for blk in self.saint_blks:
attention_weights.append(
(blk.col_attn.attn_weights, blk.row_attn.attn_weights)
)
return attention_weights
| 41.565371 | 110 | 0.642013 | [
"MIT"
] | TangleSpace/pytorch-widedeep | pytorch_widedeep/models/tabular/transformers/saint.py | 11,763 | Python |
#!/usr/bin/env python
# encoding: utf-8
import socket
import sys
import tempfile
import time
import subprocess
import os
# function to get free port from ycmd
def GetUnusedLocalhostPort():
sock = socket.socket()
# This tells the OS to give us any free port in the range [1024 - 65535]
sock.bind(('', 0))
port = sock.getsockname()[1]
sock.close()
return port
SERVER = ('127.0.0.1', GetUnusedLocalhostPort())
# A wrapper for subprocess.Popen that works around a Popen bug on Windows.
def SafePopen(args, **kwargs):
if kwargs.get('stdin') is None:
kwargs['stdin'] = subprocess.PIPE if sys.platform == 'win32' else None
return subprocess.Popen(args, **kwargs)
class JavaviBridge():
pythonVersion = sys.version_info.major
sock = None
popen = None
logfile = None
def setupServer(self, javabin, args, classpath):
is_win = sys.platform == 'win32'
separator = (';' if is_win else ':')
fileSeparator = ('\\' if is_win else '/')
classpathset = set(classpath.split(separator))
environ = os.environ.copy()
if 'CLASSPATH' in environ:
classpathset.union(environ['CLASSPATH'].split(separator))
environ['CLASSPATH'] = separator.join(classpathset)
if vim.eval('get(g:, "JavaComplete_JavaviLogLevel", 0)') != 0:
defaulttmp = tempfile.gettempdir() + fileSeparator + 'javavi_log'
logdir = vim.eval(
"get(g:, 'JavaComplete_JavaviLogDirectory', '%s')"
% defaulttmp)
if not os.path.isdir(logdir):
os.mkdir(logdir)
self.logfile = open("%s%s%s" % (
logdir, fileSeparator, "javavi_stdout.log"),
"a")
output = self.logfile
else:
output = subprocess.PIPE
args = [javabin] + args + ['-D', str(SERVER[1])]
if is_win and vim.eval('has("gui_running")'):
info = subprocess.STARTUPINFO()
info.dwFlags = 1
info.wShowWindow = 0
self.popen = SafePopen(args, env=environ, stdout = output, stderr = output, startupinfo = info)
else:
self.popen = SafePopen(args, env=environ, stdout = output, stderr = output)
def pid(self):
return self.popen.pid
def port(self):
return SERVER[1]
def poll(self):
if self.popen:
return self.popen.poll() is None
else:
return 0
def terminateServer(self):
if self.popen:
self.popen.terminate()
self.popen.wait()
if self.logfile:
self.logfile.close()
def makeSocket(self):
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error as msg:
self.sock = None
try:
self.sock.connect(SERVER)
time.sleep(.1)
except socket.error as msg:
self.sock.close()
self.sock = None
if self.sock is None:
print('could not open socket, try again')
return
self.sock.setblocking(0)
def send(self, data):
if self.sock is None:
self.makeSocket()
if self.sock is None:
return ''
if self.pythonVersion == 3:
self.sock.sendall((str(data) + '\n').encode('UTF-8'))
else:
self.sock.sendall((data.decode('UTF-8') + '\n').encode('UTF-8'))
totalData = []
while 1:
try:
data = self.sock.recv(4096)
if not data or len(data) == 0:
break
totalData.append(data.decode('UTF-8'))
time.sleep(.01)
except:
if totalData: break
self.sock.close()
self.sock = None
return ''.join(totalData)
| 28.210145 | 107 | 0.552787 | [
"MIT",
"BSD-3-Clause"
] | darconeous/shattings | vim/bundle/vim-javacomplete2/autoload/javavibridge.py | 3,893 | Python |
"""This file defines the database connection, plus some terminal commands for
setting up and tearing down the database.
Do not import anything directly from `backend.database._core`. Instead, import
from `backend.database`.
"""
from typing import Optional
import os
import pandas as pd
import click
from flask import current_app
from flask.cli import AppGroup
from flask.cli import with_appcontext
from werkzeug.utils import secure_filename
from sqlalchemy.exc import ResourceClosedError
from flask_sqlalchemy import SQLAlchemy
import psycopg2.errors
from psycopg2 import connect
from psycopg2.extensions import connection
from ..config import TestingConfig
from ..utils import dev_only
db = SQLAlchemy()
QUERIES_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), "queries")
)
def execute_query(filename: str) -> Optional[pd.DataFrame]:
"""Run SQL from a file. It will return a Pandas DataFrame if it selected
anything; otherwise it will return None.
I do not recommend you use this function too often. In general we should be
using the SQLAlchemy ORM. That said, it's a nice convenience, and there are
times where this function is genuinely something you want to run.
"""
with open(os.path.join(QUERIES_DIR, secure_filename(filename))) as f:
query = f.read()
with db.engine.connect() as conn:
res = conn.execute(query)
try:
df = pd.DataFrame(res.fetchall(), columns=res.keys())
return df
except ResourceClosedError:
return None
@click.group("psql", cls=AppGroup)
@with_appcontext
@click.pass_context
def db_cli(ctx: click.Context):
"""Collection of database commands."""
ctx.obj = connect(
user=current_app.config["POSTGRES_USER"],
password=current_app.config["POSTGRES_PASSWORD"],
host=current_app.config["POSTGRES_HOST"],
port=current_app.config["POSTGRES_PORT"],
dbname="postgres",
)
pass_psql_admin_connection = click.make_pass_decorator(connection)
@db_cli.command("create")
@click.option(
"--overwrite/--no-overwrite",
default=False,
is_flag=True,
show_default=True,
help="If true, overwrite the database if it exists.",
)
@pass_psql_admin_connection
@click.pass_context
@dev_only
def create_database(
ctx: click.Context, conn: connection, overwrite: bool = False
):
"""Create the database from nothing."""
database = current_app.config["POSTGRES_DB"]
cursor = conn.cursor()
cursor.execute("ROLLBACK")
if overwrite:
cursor.execute(
f"SELECT bool_or(datname = '{database}') FROM pg_database;"
)
exists = cursor.fetchall()[0][0]
if exists:
ctx.invoke(delete_database)
try:
cursor.execute(f"CREATE DATABASE {database};")
except psycopg2.errors.lookup("42P04"):
click.echo(f"Database {database!r} already exists.")
cursor.execute("ROLLBACK")
else:
click.echo(f"Created database {database!r}.")
@db_cli.command("init")
def init_database():
"""Initialize the database schemas.
Run this after the database has been created.
"""
database = current_app.config["POSTGRES_DB"]
db.create_all()
click.echo(f"Initialized the database {database!r}.")
@db_cli.command("gen-examples")
def gen_examples_command():
"""Generate 2 incident examples in the database."""
execute_query("example_incidents.sql")
click.echo("Added 2 example incidents to the database.")
@db_cli.command("delete")
@click.option(
"--test-db",
"-t",
default=False,
is_flag=True,
help=f"Deletes the database {TestingConfig.POSTGRES_DB!r}.",
)
@pass_psql_admin_connection
@dev_only
def delete_database(conn: connection, test_db: bool):
"""Delete the database."""
if test_db:
database = TestingConfig.POSTGRES_DB
else:
database = current_app.config["POSTGRES_DB"]
cursor = conn.cursor()
cursor.execute("ROLLBACK")
# Don't validate name for `police_data_test`.
if database != TestingConfig.POSTGRES_DB:
# Make sure we want to do this.
click.echo(f"Are you sure you want to delete database {database!r}?")
click.echo(
"Type in the database name '"
+ click.style(database, fg="red")
+ "' to confirm"
)
confirmation = click.prompt("Database name")
if database != confirmation:
click.echo(
"The input does not match. " "The database will not be deleted."
)
return None
try:
cursor.execute(f"DROP DATABASE {database};")
except psycopg2.errors.lookup("3D000"):
click.echo(f"Database {database!r} does not exist.")
cursor.execute("ROLLBACK")
else:
click.echo(f"Database {database!r} was deleted.")
| 28.816568 | 80 | 0.675975 | [
"MIT"
] | stianberghansen/police-data-trust | backend/database/core.py | 4,870 | Python |
#! /usr/bin/env python
# -*- coding: latin-1; -*-
'''
Copyright 2018 University of Liège
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from wrap import *
metafor = None
def params(q={}):
""" default model parameters
"""
p={}
p['tolNR'] = 1.0e-7 # Newton-Raphson tolerance
p['tend'] = 1. # final time
p['dtmax'] = 0.005 # max time step
p['bndno'] = 17 # interface boundary number
# BC type
#p['bctype'] = 'pressure' # uniform pressure
#p['bctype'] = 'deadload' # uniform nodal load
#p['bctype'] = 'pydeadload1' # uniform nodal load (python)
p['bctype'] = 'pydeadloads' # variable loads
#p['bctype'] = 'slave' # variable loads (mpi)
p.update(q)
return p
def getMetafor(p={}):
global metafor
if metafor: return metafor
metafor = Metafor()
p = params(p)
domain = metafor.getDomain()
geometry = domain.getGeometry()
geometry.setDimPlaneStrain(1.0)
# import .geo
from toolbox.gmsh import GmshImport
f = os.path.join(os.path.dirname(__file__), "waterColoumnFallWithFlexibleObstacle_Mtf_Pfem.msh")
importer = GmshImport(f, domain)
importer.execute2D()
groupset = domain.getGeometry().getGroupSet()
# solid elements / material
interactionset = domain.getInteractionSet()
app1 = FieldApplicator(1)
app1.push( groupset(21) ) # physical group 100: beam
interactionset.add( app1 )
materset = domain.getMaterialSet()
materset.define( 1, ElastHypoMaterial )
mater1 = materset(1)
mater1.put(MASS_DENSITY, 2500.0) # [kg/m³]
mater1.put(ELASTIC_MODULUS, 1.0e6) # [Pa]
mater1.put(POISSON_RATIO, 0.) # [-]
prp = ElementProperties(Volume2DElement)
app1.addProperty(prp)
prp.put (MATERIAL, 1)
prp.put(EASS, 2)
prp.put(EASV, 2)
prp.put(PEAS, 1e-12)
# boundary conditions
loadingset = domain.getLoadingSet()
#Physical Line(101) - clamped side of the beam
loadingset.define(groupset(19), Field1D(TX,RE))
loadingset.define(groupset(19), Field1D(TY,RE))
#Physical Line(102) - free surface of the beam
#Physical Line(103) - upper surface of the beam (for tests only)
mim = metafor.getMechanicalIterationManager()
mim.setMaxNbOfIterations(4)
mim.setResidualTolerance(p['tolNR'])
ti = AlphaGeneralizedTimeIntegration(metafor)
metafor.setTimeIntegration(ti)
# visu
if 0:
tsm = metafor.getTimeStepManager()
tsm.setInitialTime(0.0, 1.0)
tsm.setNextTime(1.0, 1, 1.0)
# results
#vmgr = metafor.getValuesManager()
#vmgr.add(1, MiscValueExtractor(metafor, EXT_T), 'time')
#vmgr.add(2, DbNodalValueExtractor(groupset(104), Field1D(TY,RE)), 'dy')
return metafor
def getRealTimeExtractorsList(mtf):
extractorsList = []
# --- Extractors list starts --- #
# --- Extractors list ends --- #
return extractorsList
| 27.757813 | 100 | 0.639178 | [
"Apache-2.0"
] | mlucio89/CUPyDO | tests/PFEM_Metafor/waterColoumnFallWithFlexibleObstacle_obstacle_Mtf_E_1_0e6_EAS.py | 3,555 | Python |
#!/usr/bin/env python
import numpy
import scipy
from scipy import io
data_dict = scipy.io.loadmat('../data/hmsvm_data_large_integer.mat', struct_as_record=False)
parameter_list=[[data_dict]]
def structure_discrete_hmsvm_bmrm (m_data_dict=data_dict):
import shogun as sg
try:
_ = sg.create_machine("DualLibQPBMSOSVM")
except:
print("DualLibQPBMSOSVM not available")
return
labels_array = m_data_dict['label'][0]
idxs = numpy.nonzero(labels_array == -1)
labels_array[idxs] = 0
labels = sg.SequenceLabels(labels_array, 250, 500, 2)
features = sg.RealMatrixFeatures(m_data_dict['signal'].astype(float), 250, 500)
num_obs = 4 # given by the data file used
model = sg.create_structured_model("HMSVMModel", features=features, labels=labels,
state_model_type="SMT_TWO_STATE", num_obs=num_obs)
sosvm = sg.create_machine("DualLibQPBMSOSVM", model=model, labels=labels, m_lambda=5000.0)
sosvm.train()
#print sosvm.get_w()
predicted = sosvm.apply(features)
evaluator = sg.create_evaluation("StructuredAccuracy")
acc = evaluator.evaluate(predicted, labels)
#print('Accuracy = %.4f' % acc)
if __name__ == '__main__':
print("Discrete HMSVM BMRM")
structure_discrete_hmsvm_bmrm(*parameter_list[0])
| 27.909091 | 92 | 0.754886 | [
"BSD-3-Clause"
] | AbhinavTalari/shogun | examples/undocumented/python/structure_discrete_hmsvm_bmrm.py | 1,228 | Python |
###
# Copyright (c) 2003-2005, Jeremiah Fincher
# Copyright (c) 2009, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import re
from supybot.test import *
import supybot.utils as utils
nicks = ['fatjim','scn','moshez','LordVan','MetaCosm','pythong','fishfart',
'alb','d0rt','jemfinch','StyxAlso','fors','deltab','gd',
'hellz_hunter','are_j|pub_comp','jason_','dreid','sayke_','winjer',
'TenOfTen','GoNoVas','queuetue','the|zzz','Hellfried','Therion',
'shro','DaCa','rexec','polin8','r0ky','aaron_','ironfroggy','eugene',
'faassen','tirloni','mackstann','Yhg1s','ElBarono','vegai','shang',
'typo_','kikoforgetme','asqui','TazyTiggy','fab','nixman','liiwi',
'AdamV','paolo','red_one','_AleX_','lament','jamessan','supybot',
'macr0_zzz','plaisthos','redghost','disco','mphardy','gt3','mathie',
'jonez','r0ky-office','tic','d33p','ES3merge','talin','af','flippo',
'sholden','ameoba','shepherg','j2','Acapnotic','dash','merlin262',
'Taaus','_moshez','rik','jafo__','blk-majik','JT__','itamar',
'kermit-','davidmccabe','glyph','jojo','dave_p','goo','hyjinx',
'SamB','exarkun','drewp','Ragica','skylan','redgore','k3','Ra1stlin',
'StevenK','carball','h3x','carljm','_jacob','teratorn','frangen',
'phed','datazone','Yaggo','acct_','nowhere','pyn','ThomasWaldmann',
'dunker','pilotLight','brainless','LoganH_','jmpnz','steinn',
'EliasREC','lowks__','OldSmrf','Mad77','snibril','delta','psy',
'skimpIzu','Kengur','MoonFallen','kotkis','Hyperi']
def group(seq, groupSize, noneFill=True):
"""Groups a given sequence into sublists of length groupSize."""
ret = []
L = []
i = groupSize
for elt in seq:
if i > 0:
L.append(elt)
else:
ret.append(L)
i = groupSize
L = []
L.append(elt)
i -= 1
if L:
if noneFill:
while len(L) < groupSize:
L.append(None)
ret.append(L)
return ret
class StringTestCase(PluginTestCase):
plugins = ('String', 'Format', 'Status')
def testLen(self):
self.assertResponse('len foo', '3')
self.assertHelp('len')
def testNoErrors(self):
self.assertNotError('levenshtein Python Perl')
def testSoundex(self):
self.assertNotError('soundex jemfinch')
self.assertNotRegexp('soundex foobar 3:30', 'ValueError')
def testChr(self):
for i in range(256):
c = chr(i)
regexp = r'%s|%s' % (re.escape(c), re.escape(repr(c)))
self.assertRegexp('chr %s' % i, regexp)
def testOrd(self):
for c in map(chr, range(256)):
i = ord(c)
self.assertResponse('ord %s' % utils.str.dqrepr(c), str(i))
def testUnicode(self):
self.assertResponse('unicodename ☃', 'SNOWMAN')
self.assertResponse('unicodesearch SNOWMAN', '☃')
#self.assertResponse('unicodename ?',
# 'No name found for this character.')
self.assertResponse('unicodesearch FOO',
'Error: No character found with this name.')
def testMd5(self):
self.assertResponse('md5 supybot', '1360578d1276e945cc235654a53f9c65')
def testEncodeDecode(self):
# This no longer works correctly. It almost seems like were throwing
# in a repr() somewhere.
s = 'the recalcitrant jamessan tests his scramble function'
self.assertNotRegexp('encode aldkfja foobar', 'LookupError')
self.assertNotRegexp('decode asdflkj foobar', 'LookupError')
self.assertResponse('decode zlib [encode zlib %s]' % s, s)
self.assertRegexp('decode base64 $BCfBg7;9D;R(B', 'padded with')
def testRe(self):
self.assertResponse('re "m/system time/" [status cpu]', 'system time')
self.assertResponse('re s/user/luser/g user user', 'luser luser')
self.assertResponse('re s/user/luser/ user user', 'luser user')
self.assertNotRegexp('re m/foo/ bar', 'has no attribute')
self.assertResponse('re m/a\\S+y/ "the bot angryman is hairy"', 'angry')
self.assertResponse('re m/a\\S+y/g "the bot angryman is hairy"',
'angry and airy')
def testReNotEmptyString(self):
self.assertError('re s//foo/g blah')
def testReWorksWithJustCaret(self):
self.assertResponse('re s/^/foo/ bar', 'foobar')
def testReNoEscapingUnpackListOfWrongSize(self):
self.assertNotRegexp('re foo bar baz', 'unpack list of wrong size')
def testReBug850931(self):
self.assertResponse(r're s/\b(\w+)\b/\1./g foo bar baz',
'foo. bar. baz.')
def testNotOverlongRe(self):
self.assertError('re [strjoin "" s/./ [eval \'xxx\'*400]] blah blah')
def testXor(self):
# This no longer works correctly. It almost seems like were throwing
# in a repr() somewhere.
L = [nick for nick in nicks if '|' not in nick and
'[' not in nick and
']' not in nick]
for s0, s1, s2, s3, s4, s5, s6, s7, s8, s9 in group(L, 10):
data = '%s%s%s%s%s%s%s%s%s' % (s0, s1, s2, s3, s4, s5, s6, s7, s8)
self.assertResponse('xor %s [xor %s %s]' % (s9, s9, data), data)
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| 43.5 | 80 | 0.625862 | [
"BSD-3-Clause"
] | AntumDeluge/Limnoria | plugins/String/test.py | 6,964 | Python |
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Contains the Rally objects."""
from rally.common.objects.credential import Credential # noqa
from rally.common.objects.deploy import Deployment # noqa
from rally.common.objects.endpoint import Endpoint # noqa
from rally.common.objects.task import Task # noqa
from rally.common.objects.verification import Verification # noqa
| 43.772727 | 78 | 0.751817 | [
"Apache-2.0"
] | LorenzoBianconi/rally | rally/common/objects/__init__.py | 963 | Python |
from itertools import combinations
from typing import Callable, Dict, Set, Tuple, Union
import networkx as nx
import pandas as pd
from causal_networkx import ADMG
from causal_networkx.discovery.classes import ConstraintDiscovery
def _has_both_edges(dag, i, j):
return dag.has_edge(i, j) and dag.has_edge(j, i)
def _has_any_edge(dag, i, j):
return dag.has_edge(i, j) or dag.has_edge(j, i)
class PC(ConstraintDiscovery):
def __init__(
self,
ci_estimator: Callable,
alpha: float = 0.05,
init_graph: Union[nx.Graph, ADMG] = None,
fixed_edges: nx.Graph = None,
max_cond_set_size: int = None,
**ci_estimator_kwargs,
):
"""Peter and Clarke (PC) algorithm for causal discovery.
Assumes causal sufficiency, that is, all confounders in the
causal graph are observed variables.
Parameters
----------
ci_estimator : Callable
The conditional independence test function. The arguments of the estimator should
be data, node, node to compare, conditioning set of nodes, and any additional
keyword arguments.
alpha : float, optional
The significance level for the conditional independence test, by default 0.05.
init_graph : nx.Graph | ADMG, optional
An initialized graph. If ``None``, then will initialize PC using a
complete graph. By default None.
fixed_edges : nx.Graph, optional
An undirected graph with fixed edges. If ``None``, then will initialize PC using a
complete graph. By default None.
max_cond_set_size : int, optional
Maximum size of the conditioning set, by default None. Used to limit
the computation spent on the algorithm.
ci_estimator_kwargs : dict
Keyword arguments for the ``ci_estimator`` function.
Attributes
----------
graph_ : PAG
The graph discovered.
separating_sets_ : dict
The dictionary of separating sets, where it is a nested dictionary from
the variable name to the variable it is being compared to the set of
variables in the graph that separate the two.
"""
super().__init__(
ci_estimator, alpha, init_graph, fixed_edges, max_cond_set_size, **ci_estimator_kwargs
)
def learn_skeleton(self, X: pd.DataFrame) -> Tuple[nx.Graph, Dict[str, Dict[str, Set]]]:
"""Learn skeleton from data.
Parameters
----------
X : pd.DataFrame
Dataset.
Returns
-------
skel_graph : nx.Graph
The skeleton graph.
sep_set : Dict[str, Dict[str, Set]]
The separating set.
"""
graph, sep_set, fixed_edges = self._initialize_graph(X)
skel_graph, sep_set = self._learn_skeleton_from_neighbors(X, graph, sep_set, fixed_edges)
return skel_graph, sep_set
def fit(self, X: pd.DataFrame) -> None:
"""Fit PC algorithm on dataset 'X'."""
# learn skeleton
skel_graph, sep_set = self.learn_skeleton(X)
# perform CI tests to orient edges into a DAG
graph = self._orient_edges(skel_graph, sep_set)
self.separating_sets_ = sep_set
self.graph_ = graph
def _orient_edges(self, skel_graph, sep_set):
"""Orient edges in a skeleton graph to estimate the causal DAG, or CPDAG.
Uses the separation sets to orient edges via conditional independence
testing.
Parameters
----------
skel_graph : nx.Graph
A skeleton graph. If ``None``, then will initialize PC using a
complete graph. By default None.
sep_set : _type_
_description_
"""
dag = skel_graph.to_directed()
node_ids = skel_graph.nodes()
for (i, j) in combinations(node_ids, 2):
adj_i = set(dag.successors(i))
if j in adj_i:
continue
adj_j = set(dag.successors(j))
if i in adj_j:
continue
if sep_set[i][j] is None:
continue
common_k = adj_i & adj_j
for k in common_k:
if k not in sep_set[i][j]:
if dag.has_edge(k, i):
# _logger.debug('S: remove edge (%s, %s)' % (k, i))
dag.remove_edge(k, i)
if dag.has_edge(k, j):
# _logger.debug('S: remove edge (%s, %s)' % (k, j))
dag.remove_edge(k, j)
# For all the combination of nodes i and j, apply the following
# rules.
old_dag = dag.copy()
while True:
for (i, j) in combinations(node_ids, 2):
# Rule 1: Orient i-j into i->j whenever there is an arrow k->i
# such that k and j are nonadjacent.
#
# Check if i-j.
if _has_both_edges(dag, i, j):
# Look all the predecessors of i.
for k in dag.predecessors(i):
# Skip if there is an arrow i->k.
if dag.has_edge(i, k):
continue
# Skip if k and j are adjacent.
if _has_any_edge(dag, k, j):
continue
# Make i-j into i->j
# _logger.debug("R1: remove edge (%s, %s)" % (j, i))
dag.remove_edge(j, i)
break
# Rule 2: Orient i-j into i->j whenever there is a chain
# i->k->j.
#
# Check if i-j.
if _has_both_edges(dag, i, j):
# Find nodes k where k is i->k.
succs_i = set()
for k in dag.successors(i):
if not dag.has_edge(k, i):
succs_i.add(k)
# Find nodes j where j is k->j.
preds_j = set()
for k in dag.predecessors(j):
if not dag.has_edge(j, k):
preds_j.add(k)
# Check if there is any node k where i->k->j.
if len(succs_i & preds_j) > 0:
# Make i-j into i->j
# _logger.debug("R2: remove edge (%s, %s)" % (j, i))
dag.remove_edge(j, i)
# Rule 3: Orient i-j into i->j whenever there are two chains
# i-k->j and i-l->j such that k and l are nonadjacent.
#
# Check if i-j.
if _has_both_edges(dag, i, j):
# Find nodes k where i-k.
adj_i = set()
for k in dag.successors(i):
if dag.has_edge(k, i):
adj_i.add(k)
# For all the pairs of nodes in adj_i,
for (k, l) in combinations(adj_i, 2):
# Skip if k and l are adjacent.
if _has_any_edge(dag, k, l):
continue
# Skip if not k->j.
if dag.has_edge(j, k) or (not dag.has_edge(k, j)):
continue
# Skip if not l->j.
if dag.has_edge(j, l) or (not dag.has_edge(l, j)):
continue
# Make i-j into i->j.
# _logger.debug('R3: remove edge (%s, %s)' % (j, i))
dag.remove_edge(j, i)
break
# Rule 4: Orient i-j into i->j whenever there are two chains
# i-k->l and k->l->j such that k and j are nonadjacent.
#
# However, this rule is not necessary when the PC-algorithm
# is used to estimate a DAG.
if nx.is_isomorphic(dag, old_dag):
break
old_dag = dag.copy()
return dag
| 38.933962 | 98 | 0.500969 | [
"BSD-3-Clause"
] | adam2392/causal-networkx | causal_networkx/discovery/pcalg.py | 8,254 | Python |
# Copyright 2016 Rackspace
# Copyright 2016 Intel Corporation
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from alembic import command as alembic_command
from alembic import script as alembic_script
from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import test_fixtures
from oslo_db.sqlalchemy import test_migrations
import sqlalchemy.types as types
from glance.db.sqlalchemy import alembic_migrations
from glance.db.sqlalchemy.alembic_migrations import versions
from glance.db.sqlalchemy import models
from glance.db.sqlalchemy import models_metadef
import glance.tests.utils as test_utils
class AlembicMigrationsMixin(object):
def setUp(self):
super(AlembicMigrationsMixin, self).setUp()
self.engine = enginefacade.writer.get_engine()
def _get_revisions(self, config, head=None):
head = head or 'heads'
scripts_dir = alembic_script.ScriptDirectory.from_config(config)
revisions = list(scripts_dir.walk_revisions(base='base',
head=head))
revisions = list(reversed(revisions))
revisions = [rev.revision for rev in revisions]
return revisions
def _migrate_up(self, config, engine, revision, with_data=False):
if with_data:
data = None
pre_upgrade = getattr(self, '_pre_upgrade_%s' % revision, None)
if pre_upgrade:
data = pre_upgrade(engine)
alembic_command.upgrade(config, revision)
if with_data:
check = getattr(self, '_check_%s' % revision, None)
if check:
check(engine, data)
def test_walk_versions(self):
alembic_config = alembic_migrations.get_alembic_config(self.engine)
for revision in self._get_revisions(alembic_config):
self._migrate_up(alembic_config, self.engine, revision,
with_data=True)
class TestMysqlMigrations(test_fixtures.OpportunisticDBTestMixin,
AlembicMigrationsMixin,
test_utils.BaseTestCase):
FIXTURE = test_fixtures.MySQLOpportunisticFixture
def test_mysql_innodb_tables(self):
test_utils.db_sync(engine=self.engine)
total = self.engine.execute(
"SELECT COUNT(*) "
"FROM information_schema.TABLES "
"WHERE TABLE_SCHEMA='%s'"
% self.engine.url.database)
self.assertGreater(total.scalar(), 0, "No tables found. Wrong schema?")
noninnodb = self.engine.execute(
"SELECT count(*) "
"FROM information_schema.TABLES "
"WHERE TABLE_SCHEMA='%s' "
"AND ENGINE!='InnoDB' "
"AND TABLE_NAME!='migrate_version'"
% self.engine.url.database)
count = noninnodb.scalar()
self.assertEqual(0, count, "%d non InnoDB tables created" % count)
class TestPostgresqlMigrations(test_fixtures.OpportunisticDBTestMixin,
AlembicMigrationsMixin,
test_utils.BaseTestCase):
FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
class TestSqliteMigrations(test_fixtures.OpportunisticDBTestMixin,
AlembicMigrationsMixin,
test_utils.BaseTestCase):
pass
class TestMigrations(test_fixtures.OpportunisticDBTestMixin,
test_utils.BaseTestCase):
def test_no_downgrade(self):
migrate_file = versions.__path__[0]
for parent, dirnames, filenames in os.walk(migrate_file):
for filename in filenames:
if filename.split('.')[1] == 'py':
model_name = filename.split('.')[0]
model = __import__(
'glance.db.sqlalchemy.alembic_migrations.versions.' +
model_name)
obj = getattr(getattr(getattr(getattr(getattr(
model, 'db'), 'sqlalchemy'), 'alembic_migrations'),
'versions'), model_name)
func = getattr(obj, 'downgrade', None)
self.assertIsNone(func)
class ModelsMigrationSyncMixin(object):
def setUp(self):
super(ModelsMigrationSyncMixin, self).setUp()
self.engine = enginefacade.writer.get_engine()
def get_metadata(self):
for table in models_metadef.BASE_DICT.metadata.sorted_tables:
models.BASE.metadata._add_table(table.name, table.schema, table)
return models.BASE.metadata
def get_engine(self):
return self.engine
def db_sync(self, engine):
test_utils.db_sync(engine=engine)
# TODO(akamyshikova): remove this method as soon as comparison with Variant
# will be implemented in oslo.db or alembic
def compare_type(self, ctxt, insp_col, meta_col, insp_type, meta_type):
if isinstance(meta_type, types.Variant):
meta_orig_type = meta_col.type
insp_orig_type = insp_col.type
meta_col.type = meta_type.impl
insp_col.type = meta_type.impl
try:
return self.compare_type(ctxt, insp_col, meta_col, insp_type,
meta_type.impl)
finally:
meta_col.type = meta_orig_type
insp_col.type = insp_orig_type
else:
ret = super(ModelsMigrationSyncMixin, self).compare_type(
ctxt, insp_col, meta_col, insp_type, meta_type)
if ret is not None:
return ret
return ctxt.impl.compare_type(insp_col, meta_col)
def include_object(self, object_, name, type_, reflected, compare_to):
if name in ['migrate_version'] and type_ == 'table':
return False
return True
class ModelsMigrationsSyncMysql(ModelsMigrationSyncMixin,
test_migrations.ModelsMigrationsSync,
test_fixtures.OpportunisticDBTestMixin,
test_utils.BaseTestCase):
FIXTURE = test_fixtures.MySQLOpportunisticFixture
class ModelsMigrationsSyncPostgres(ModelsMigrationSyncMixin,
test_migrations.ModelsMigrationsSync,
test_fixtures.OpportunisticDBTestMixin,
test_utils.BaseTestCase):
FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
class ModelsMigrationsSyncSqlite(ModelsMigrationSyncMixin,
test_migrations.ModelsMigrationsSync,
test_fixtures.OpportunisticDBTestMixin,
test_utils.BaseTestCase):
pass
| 38.366492 | 79 | 0.631141 | [
"Apache-2.0"
] | FuzeSoft/OpenStack-Stein | glance-18.0.0/glance/tests/functional/db/test_migrations.py | 7,328 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Arne Neumann <[email protected]>
"""
The ``brat`` module converts discourse graphs into brat annotation
files.
"""
import os
import codecs
import math
import itertools
from collections import defaultdict
import brewer2mpl
from unidecode import unidecode
import discoursegraphs as dg
from discoursegraphs.readwrite.mmax2 import spanstring2text, spanstring2tokens
ANNOTATION_CONF = u"""
[entities]
Markable
[relations]
Coreference\tArg1:Markable, Arg2:Markable, <REL-TYPE>:symmetric-transitive
# "Markable" annotations can nest arbitrarily
ENTITY-NESTING\tArg1:Markable, Arg2:Markable
[events]
[attributes]
"""
def brat_output(docgraph, layer=None, show_relations=True):
"""
converts a document graph with pointing chains into a string representation
of a brat *.ann file.
Parameters
----------
docgraph : DiscourseDocumentGraph
a document graph which might contain pointing chains (e.g. coreference links)
layer : str or None
the name of the layer that contains the pointing chains (e.g. 'mmax' or 'pocores').
If unspecified, all pointing chains in the document will be considered
Returns
-------
ret_str : unicode
the content of a brat *.ann file
"""
# we can't rely on the .ns attribute of a merged graph
if layer:
namespace = dg.layer2namespace(layer)
else:
namespace = docgraph.ns
ret_str = u''
pointing_chains = dg.get_pointing_chains(docgraph, layer=layer)
# a token can be part of 1+ markable(s)
first_token2markables = defaultdict(list)
markable_dict = {}
markable_index = 1
for pointing_chain in pointing_chains:
for markable in sorted(pointing_chain, key=dg.util.natural_sort_key):
span_tokens = spanstring2tokens(docgraph, docgraph.node[markable][namespace+':span'])
span_text = dg.tokens2text(docgraph, span_tokens)
first_token2markables[span_tokens[0]].append(markable)
markable_dict[markable] = (markable_index, span_text, len(span_text))
markable_index += 1
onset = 0
for token_id in docgraph.tokens:
tok_len = len(docgraph.get_token(token_id))
if token_id in first_token2markables:
for markable in first_token2markables[token_id]:
mark_index, mark_text, mark_len = markable_dict[markable]
ret_str += u"T{0}\tMarkable {1} {2}\t{3}\n".format(
mark_index, onset, onset+mark_len, mark_text)
onset += tok_len+1
if show_relations:
relation = 1
for pointing_chain in pointing_chains:
last_to_first_mention = sorted(pointing_chain, key=dg.util.natural_sort_key, reverse=True)
for i in xrange(0, len(pointing_chain)-1):
chain_element = markable_dict[last_to_first_mention[i]][0]
prev_chain_element = markable_dict[last_to_first_mention[i+1]][0]
ret_str += u"R{0}\tCoreference Arg1:T{1} Arg2:T{2}\n".format(
relation, chain_element, prev_chain_element)
relation += 1
return ret_str
def create_visual_conf(docgraph, pointing_chains):
"""
creates a visual.conf file (as a string)
for the given document graph.
"""
num_of_entities = len(pointing_chains)
mapsize = max(3, min(12, num_of_entities)) # 3 <= mapsize <= 12
colormap = brewer2mpl.get_map(name='Paired', map_type='Qualitative', number=mapsize)
colors = range(mapsize) * int(math.ceil(num_of_entities / float(mapsize)))
# recycle colors if we need more than 12
endless_color_cycle = itertools.cycle(colors)
ret_str = u'[drawing]\n\n'
for chain in pointing_chains:
background_color = colormap.hex_colors[endless_color_cycle.next()]
for markable in chain:
span_tokens = spanstring2tokens(docgraph, docgraph.node[markable][docgraph.ns+':span'])
span_text = dg.tokens2text(docgraph, span_tokens)
ascii_markable = unidecode(span_text)
ret_str += u'{0}\tbgColor:{1}\n'.format(ascii_markable,
background_color)
ret_str += '\n[labels]'
return ret_str
def write_brat(docgraph, output_dir, layer='mmax', show_relations=True):
dg.util.create_dir(output_dir)
doc_name = os.path.basename(docgraph.name)
with codecs.open(os.path.join(output_dir, doc_name+'.txt'),
'wb', encoding='utf-8') as txtfile:
txtfile.write(dg.get_text(docgraph))
anno_str = brat_output(docgraph, layer=layer,
show_relations=show_relations)
with codecs.open(os.path.join(output_dir, 'annotation.conf'),
'wb', encoding='utf-8') as annotation_conf:
annotation_conf.write(ANNOTATION_CONF)
#~ with codecs.open(os.path.join(output_dir, 'visual.conf'),
#~ 'wb', encoding='utf-8') as visual_conf:
#~ visual_conf.write(visual_conf_str)
with codecs.open(os.path.join(output_dir, doc_name+'.ann'),
'wb', encoding='utf-8') as annfile:
annfile.write(anno_str)
| 35.391892 | 102 | 0.661512 | [
"BSD-3-Clause"
] | arne-cl/discoursegraphs | src/discoursegraphs/readwrite/brat.py | 5,238 | Python |
from django.shortcuts import render
from django.views.generic.detail import DetailView
from todos.models import Task
def index(request):
return render(request, 'frontend/index.html')
class TodoDetailView(DetailView):
model = Task
template_name = 'frontend/index.html'
| 20.357143 | 50 | 0.768421 | [
"MIT"
] | yuliiabuchko/todo | frontend/views.py | 285 | Python |
# Copyright (c) 2014 Ben Swartzlander. All rights reserved.
# Copyright (c) 2014 Navneet Singh. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Bob Callaway. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for NetApp API layer
"""
from cinder.i18n import _
from cinder import test
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
class NetAppApiElementTransTests(test.TestCase):
"""Test case for NetApp API element translations."""
def setUp(self):
super(NetAppApiElementTransTests, self).setUp()
def test_translate_struct_dict_unique_key(self):
"""Tests if dict gets properly converted to NaElements."""
root = netapp_api.NaElement('root')
child = {'e1': 'v1', 'e2': 'v2', 'e3': 'v3'}
root.translate_struct(child)
self.assertEqual(3, len(root.get_children()))
self.assertEqual('v1', root.get_child_content('e1'))
self.assertEqual('v2', root.get_child_content('e2'))
self.assertEqual('v3', root.get_child_content('e3'))
def test_translate_struct_dict_nonunique_key(self):
"""Tests if list/dict gets properly converted to NaElements."""
root = netapp_api.NaElement('root')
child = [{'e1': 'v1', 'e2': 'v2'}, {'e1': 'v3'}]
root.translate_struct(child)
self.assertEqual(3, len(root.get_children()))
children = root.get_children()
for c in children:
if c.get_name() == 'e1':
self.assertIn(c.get_content(), ['v1', 'v3'])
else:
self.assertEqual('v2', c.get_content())
def test_translate_struct_list(self):
"""Tests if list gets properly converted to NaElements."""
root = netapp_api.NaElement('root')
child = ['e1', 'e2']
root.translate_struct(child)
self.assertEqual(2, len(root.get_children()))
self.assertIsNone(root.get_child_content('e1'))
self.assertIsNone(root.get_child_content('e2'))
def test_translate_struct_tuple(self):
"""Tests if tuple gets properly converted to NaElements."""
root = netapp_api.NaElement('root')
child = ('e1', 'e2')
root.translate_struct(child)
self.assertEqual(2, len(root.get_children()))
self.assertIsNone(root.get_child_content('e1'))
self.assertIsNone(root.get_child_content('e2'))
def test_translate_invalid_struct(self):
"""Tests if invalid data structure raises exception."""
root = netapp_api.NaElement('root')
child = 'random child element'
self.assertRaises(ValueError, root.translate_struct, child)
def test_setter_builtin_types(self):
"""Tests str, int, float get converted to NaElement."""
root = netapp_api.NaElement('root')
root['e1'] = 'v1'
root['e2'] = 1
root['e3'] = 2.0
root['e4'] = 8l
self.assertEqual(4, len(root.get_children()))
self.assertEqual('v1', root.get_child_content('e1'))
self.assertEqual('1', root.get_child_content('e2'))
self.assertEqual('2.0', root.get_child_content('e3'))
self.assertEqual('8', root.get_child_content('e4'))
def test_setter_na_element(self):
"""Tests na_element gets appended as child."""
root = netapp_api.NaElement('root')
root['e1'] = netapp_api.NaElement('nested')
self.assertEqual(1, len(root.get_children()))
e1 = root.get_child_by_name('e1')
self.assertIsInstance(e1, netapp_api.NaElement)
self.assertIsInstance(e1.get_child_by_name('nested'),
netapp_api.NaElement)
def test_setter_child_dict(self):
"""Tests dict is appended as child to root."""
root = netapp_api.NaElement('root')
root['d'] = {'e1': 'v1', 'e2': 'v2'}
e1 = root.get_child_by_name('d')
self.assertIsInstance(e1, netapp_api.NaElement)
sub_ch = e1.get_children()
self.assertEqual(2, len(sub_ch))
for c in sub_ch:
self.assertIn(c.get_name(), ['e1', 'e2'])
if c.get_name() == 'e1':
self.assertEqual('v1', c.get_content())
else:
self.assertEqual('v2', c.get_content())
def test_setter_child_list_tuple(self):
"""Tests list/tuple are appended as child to root."""
root = netapp_api.NaElement('root')
root['l'] = ['l1', 'l2']
root['t'] = ('t1', 't2')
l = root.get_child_by_name('l')
self.assertIsInstance(l, netapp_api.NaElement)
t = root.get_child_by_name('t')
self.assertIsInstance(t, netapp_api.NaElement)
for le in l.get_children():
self.assertIn(le.get_name(), ['l1', 'l2'])
for te in t.get_children():
self.assertIn(te.get_name(), ['t1', 't2'])
def test_setter_no_value(self):
"""Tests key with None value."""
root = netapp_api.NaElement('root')
root['k'] = None
self.assertIsNone(root.get_child_content('k'))
def test_setter_invalid_value(self):
"""Tests invalid value raises exception."""
root = netapp_api.NaElement('root')
try:
root['k'] = netapp_api.NaServer('localhost')
except Exception as e:
if not isinstance(e, TypeError):
self.fail(_('Error not a TypeError.'))
def test_setter_invalid_key(self):
"""Tests invalid value raises exception."""
root = netapp_api.NaElement('root')
try:
root[None] = 'value'
except Exception as e:
if not isinstance(e, KeyError):
self.fail(_('Error not a KeyError.'))
| 40.660256 | 78 | 0.623837 | [
"Apache-2.0"
] | potsmaster/cinder | cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_api.py | 6,343 | Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='OfflineMessage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('level', models.IntegerField(default=20)),
('message', models.CharField(max_length=200)),
('created', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
options={
},
bases=(models.Model,),
),
]
| 31 | 114 | 0.604579 | [
"BSD-3-Clause"
] | Fanevanjanahary/django-offline-messages | offline_messages/migrations/0001_initial.py | 961 | Python |
from functools import partial
from typing import List, Optional, Union
from transformers import (AlbertModel, AlbertTokenizer, BartModel, BigBirdModel, BigBirdTokenizer,
BartTokenizer, BertModel, BertTokenizer,
CamembertModel, CamembertTokenizer, CTRLModel,
CTRLTokenizer, DistilBertModel, DistilBertTokenizer,
GPT2Model, GPT2Tokenizer, LongformerModel,
LongformerTokenizer, OpenAIGPTModel,
OpenAIGPTTokenizer, PreTrainedModel,
PreTrainedTokenizer, RobertaModel, RobertaTokenizer,
TransfoXLModel, TransfoXLTokenizer, XLMModel,
XLMTokenizer, XLNetModel, XLNetTokenizer)
from summarizer.summary_processor import SummaryProcessor
from summarizer.text_processors.sentence_handler import SentenceHandler
from summarizer.transformer_embeddings.bert_embedding import BertEmbedding
class BertSummarizer(SummaryProcessor):
"""Summarizer based on the BERT model."""
def __init__(
self,
model: Optional[str] = 'bert-large-uncased',
custom_model: PreTrainedModel = None,
custom_tokenizer: PreTrainedTokenizer = None,
hidden: Union[List[int], int] = -2,
reduce_option: str = 'mean',
sentence_handler: SentenceHandler = SentenceHandler(),
random_state: int = 12345,
hidden_concat: bool = False,
gpu_id: int = 0,
):
"""
This is the parent Bert Summarizer model. New methods should implement this class.
:param model: This parameter is associated with the inherit string parameters from the transformers library.
:param custom_model: If you have a pre-trained model, you can add the model class here.
:param custom_tokenizer: If you have a custom tokenizer, you can add the tokenizer here.
:param hidden: This signifies which layer(s) of the BERT model you would like to use as embeddings.
:param reduce_option: Given the output of the bert model, this param determines how you want to reduce results.
:param sentence_handler: The handler to process sentences. If want to use coreference, instantiate and pass.
CoreferenceHandler instance
:param random_state: The random state to reproduce summarizations.
:param hidden_concat: Whether or not to concat multiple hidden layers.
:param gpu_id: GPU device index if CUDA is available.
"""
model = BertEmbedding(model, custom_model, custom_tokenizer, gpu_id)
model_func = partial(model, hidden=hidden, reduce_option=reduce_option, hidden_concat=hidden_concat)
super().__init__(model_func, sentence_handler, random_state)
class Summarizer(BertSummarizer):
def __init__(
self,
model: str = 'bert-large-uncased',
custom_model: PreTrainedModel = None,
custom_tokenizer: PreTrainedTokenizer = None,
hidden: Union[List[int], int] = -2,
reduce_option: str = 'mean',
sentence_handler: SentenceHandler = SentenceHandler(),
random_state: int = 12345,
hidden_concat: bool = False,
gpu_id: int = 0,
):
"""
This is the main Bert Summarizer class.
:param model: This parameter is associated with the inherit string parameters from the transformers library.
:param custom_model: If you have a pre-trained model, you can add the model class here.
:param custom_tokenizer: If you have a custom tokenizer, you can add the tokenizer here.
:param hidden: This signifies which layer of the BERT model you would like to use as embeddings.
:param reduce_option: Given the output of the bert model, this param determines how you want to reduce results.
:param random_state: The random state to reproduce summarizations.
:param hidden_concat: Whether or not to concat multiple hidden layers.
:param gpu_id: GPU device index if CUDA is available.
"""
super(Summarizer, self).__init__(
model, custom_model, custom_tokenizer, hidden, reduce_option, sentence_handler, random_state, hidden_concat,
gpu_id
)
class TransformerSummarizer(BertSummarizer):
"""
Newer style that has keywords for models and tokenizers, but allows the user to change the type.
"""
MODEL_DICT = {
'Bert': (BertModel, BertTokenizer),
'OpenAIGPT': (OpenAIGPTModel, OpenAIGPTTokenizer),
'GPT2': (GPT2Model, GPT2Tokenizer),
'CTRL': (CTRLModel, CTRLTokenizer),
'TransfoXL': (TransfoXLModel, TransfoXLTokenizer),
'XLNet': (XLNetModel, XLNetTokenizer),
'XLM': (XLMModel, XLMTokenizer),
'DistilBert': (DistilBertModel, DistilBertTokenizer),
}
def __init__(
self,
transformer_type: str = 'Bert',
transformer_model_key: str = 'bert-base-uncased',
transformer_tokenizer_key: str = None,
hidden: Union[List[int], int] = -2,
reduce_option: str = 'mean',
sentence_handler: SentenceHandler = SentenceHandler(),
random_state: int = 12345,
hidden_concat: bool = False,
gpu_id: int = 0,
):
"""
:param transformer_type: The Transformer type, such as Bert, GPT2, DistilBert, etc.
:param transformer_model_key: The transformer model key. This is the directory for the model.
:param transformer_tokenizer_key: The transformer tokenizer key. This is the tokenizer directory.
:param hidden: The hidden output layers to use for the summarization.
:param reduce_option: The reduce option, such as mean, max, min, median, etc.
:param sentence_handler: The sentence handler class to process the raw text.
:param random_state: The random state to use.
:param hidden_concat: Deprecated hidden concat option.
:param gpu_id: GPU device index if CUDA is available.
"""
try:
self.MODEL_DICT['Roberta'] = (RobertaModel, RobertaTokenizer)
self.MODEL_DICT['Albert'] = (AlbertModel, AlbertTokenizer)
self.MODEL_DICT['Camembert'] = (CamembertModel, CamembertTokenizer)
self.MODEL_DICT['Bart'] = (BartModel, BartTokenizer)
self.MODEL_DICT['Longformer'] = (LongformerModel, LongformerTokenizer)
self.MODEL_DICT['BigBird'] = (BigBirdModel, BigBirdTokenizer)
except Exception:
pass # older transformer version
model_clz, tokenizer_clz = self.MODEL_DICT[transformer_type]
model = model_clz.from_pretrained(
transformer_model_key, output_hidden_states=True)
tokenizer = tokenizer_clz.from_pretrained(
transformer_tokenizer_key if transformer_tokenizer_key is not None else transformer_model_key
)
super().__init__(
None, model, tokenizer, hidden, reduce_option, sentence_handler, random_state, hidden_concat, gpu_id
)
| 48.176871 | 120 | 0.673539 | [
"MIT"
] | SelvinDatatonic/bert-extractive-summarizer | summarizer/bert.py | 7,082 | Python |
from json import load
from typing import Union
import pygame as pg
from pygame import Surface, event
from pygame.display import set_mode, set_caption, set_icon, get_surface, update
from pygame.key import get_pressed as get_key_pressed
from pygame.mouse import get_pressed as get_mouse_pressed
from pygame.time import Clock, get_ticks
from pygame.transform import scale
from source.imgs.sprite_sheet import load_sprite_sheet
from source.scripts.scenes import Game, Menu, Shop, Scene
from source.sounds.manager import get_song
class App:
all_keycodes = tuple(getattr(pg.constants, key_str) for key_str in
filter(lambda k: k.startswith("K_"), dir(pg.constants)))
def __init__(self, config: dict[str, Union[int, str]] = None):
# ## load
# # load config
if config is None:
with open("source/config.json") as file:
config = load(file)
# get config
self.width: int = ...
self.height: int = ...
self.fps: int = ...
self.start_scene: str = ...
self.volume: int = ...
for name in ["width", "height", "fps", "start_scene", "volume"]:
setattr(self, name, config[name])
self.done = True
self.clock = Clock()
# # load images
self.bg_sprite_sheet = load_sprite_sheet("bg_imgs")
# ## create and initialize
# # create music
self.bgm_intro = get_song("intro.wav")
self.bgm = get_song("bgm.wav")
self.bgm_is_running = False
self.bgm_intro.set_volume(self.volume)
self.bgm.set_volume(self.volume)
# # create scenes
self.name = "OREO Clicker"
self.screen: Surface
self.game = Game(self)
self.menu = Menu(self)
self.shop = Shop(self)
self._scene: Scene = getattr(self, self.start_scene, "menu")
# initialize scenes
self.game.initialize()
self.menu.initialize()
self.shop.initialize()
@property
def scene(self) -> Scene:
return self._scene
@scene.setter
def scene(self, value: Scene):
self._scene = value
self.update_screen()
@property
def scene_scale(self):
return self.scene.settings["scale"][0] / self.scene.settings["size"][0], \
self.scene.settings["scale"][1] / self.scene.settings["size"][1]
def update_screen(self):
set_mode(self.scene.settings["scale"])
set_caption(self.scene.settings["title"])
if self.scene.settings["icon"]:
set_icon(self.scene.settings["icon"])
# noinspection PyAttributeOutsideInit
self.screen = get_surface()
@property
def scene_screen(self):
return Surface(self.game.settings["size"])
def draw(self):
self.screen.blit(scale(self.scene.draw(), self.scene.settings["scale"]), (0, 0))
update()
def update(self):
self.scene.update()
def run(self):
self.bgm_intro.play()
self.done = False
self.update_screen()
while not self.done:
if get_ticks() // (self.fps * 20) >= int(self.bgm_intro.get_length()) and not self.bgm_is_running:
self.bgm_intro.stop()
self.bgm.play(-1)
self.bgm_is_running = True
self.draw()
self.handle_events()
self.handle_input()
self.clock.tick(self.fps)
self.update()
def handle_events(self):
for event_ in event.get():
if event_.type == pg.QUIT:
self.done = True
break
if "events_filter" not in self.scene.settings or event_.type in self.scene.settings["events_filter"]:
self.scene.handle_event(event)
def handle_input(self):
self.handle_mouse_press()
keys_pressed = get_key_pressed()
for keycode in self.all_keycodes:
if keys_pressed[keycode]:
self.scene.handle_input(keycode)
def handle_mouse_press(self):
pressed = get_mouse_pressed(3)
mouse_pos = self.get_mouse_pos()
for key in range(3):
if pressed[key]:
self.scene.handle_mouse_press(key, mouse_pos)
def get_mouse_pos(self):
return pg.mouse.get_pos()[0] // self.scene_scale[0], \
pg.mouse.get_pos()[1] // self.scene_scale[1]
| 31.126761 | 113 | 0.600226 | [
"Apache-2.0"
] | Mio-coder/clicer | source/scripts/manager.py | 4,420 | Python |
from __future__ import absolute_import
from __future__ import print_function
import graph_tool.all as gt
import numpy as np
from .base import LabelGraphClustererBase
from .helpers import _membership_to_list_of_communities, _overlapping_membership_to_list_of_communities
class StochasticBlockModel:
"""A Stochastic Blockmodel fit to Label Graph
This contains a stochastic block model instance constructed for a block model variant specified in parameters.
It can be fit to an instance of a graph and set of weights. More information on how to select parameters can be
found in `the extensive introduction into Stochastic Block Models
<https://graph-tool.skewed.de/static/doc/demos/inference/inference.html>`_ in graphtool documentation.
Parameters
----------
nested: boolean
whether to build a nested Stochastic Block Model or the regular variant,
will be automatically put under :code:`self.nested`.
use_degree_correlation: boolean
whether to correct for degree correlation in modeling, will be automatically
put under :code:`self.use_degree_correlation`.
allow_overlap: boolean
whether to allow overlapping clusters or not, will be automatically
put under :code:`self.allow_overlap`.
weight_model: string or None
decide whether to generate a weighted or unweighted graph,
will be automatically put under :code:`self.weight_model`.
Attributes
----------
model_: graph_tool.inference.BlockState or its subclass
an instance of the fitted model obtained from graph-tool
"""
def __init__(self, nested, use_degree_correlation, allow_overlap, weight_model):
self.nested = nested
self.use_degree_correlation = use_degree_correlation
self.allow_overlap = allow_overlap
self.weight_model = weight_model
self.model_ = None
def fit_predict(self, graph, weights):
"""Fits model to a given graph and weights list
Sets :code:`self.model_` to the state of graphtool's Stochastic Block Model the after fitting.
Attributes
----------
graph: graphtool.Graph
the graph to fit the model to
weights: graphtool.EdgePropertyMap<double>
the property map: edge -> weight (double) to fit the model to, if weighted variant
is selected
Returns
-------
numpy.ndarray
partition of labels, each sublist contains label indices
related to label positions in :code:`y`
"""
if self.weight_model:
self.model_ = self._model_fit_function()(
graph,
deg_corr=self.use_degree_correlation,
overlap=self.allow_overlap,
state_args=dict(recs=[weights],
rec_types=[self.weight_model])
)
else:
self.model_ = self._model_fit_function()(
graph,
deg_corr=self.use_degree_correlation,
overlap=self.allow_overlap
)
return self._detect_communities()
def _detect_communities(self):
if self.nested:
lowest_level = self.model_.get_levels()[0]
else:
lowest_level = self.model_
number_of_communities = lowest_level.get_B()
if self.allow_overlap:
# the overlaps block returns
# membership vector, and also edges vectors, we need just the membership here at the moment
membership_vector = list(lowest_level.get_overlap_blocks()[0])
else:
membership_vector = list(lowest_level.get_blocks())
if self.allow_overlap:
return _overlapping_membership_to_list_of_communities(membership_vector, number_of_communities)
return _membership_to_list_of_communities(membership_vector, number_of_communities)
def _model_fit_function(self):
if self.nested:
return gt.minimize_nested_blockmodel_dl
else:
return gt.minimize_blockmodel_dl
class GraphToolLabelGraphClusterer(LabelGraphClustererBase):
"""Fits a Stochastic Block Model to the Label Graph and infers the communities
This clusterer clusters the label space using by fitting a stochastic block
model to the label network and inferring the community structure using graph-tool.
The obtained community structure is returned as the label clustering. More information on the inference itself
can be found in `the extensive introduction into Stochastic Block Models
<https://graph-tool.skewed.de/static/doc/demos/inference/inference.html>`_ in graphtool documentation.
Parameters
----------
graph_builder: a GraphBuilderBase inherited transformer
the graph builder to provide the adjacency matrix and weight map for the underlying graph
model: StochasticBlockModel
the desired stochastic block model variant to use
Attributes
----------
graph_ : graphtool.Graph
object representing a label co-occurence graph
weights_ : graphtool.EdgeProperty<double>
edge weights defined by graph builder stored in a graphtool compatible format
.. note ::
This functionality is still undergoing research.
.. note ::
This clusterer is GPL-licenced and will taint your code with GPL restrictions.
References
----------
If you use this class please cite:
.. code : latex
article{peixoto_graph-tool_2014,
title = {The graph-tool python library},
url = {http://figshare.com/articles/graph_tool/1164194},
doi = {10.6084/m9.figshare.1164194},
urldate = {2014-09-10},
journal = {figshare},
author = {Peixoto, Tiago P.},
year = {2014},
keywords = {all, complex networks, graph, network, other}}
Examples
--------
An example code for using this clusterer with a classifier looks like this:
.. code-block:: python
from sklearn.ensemble import RandomForestClassifier
from yyskmultilearn.problem_transform import LabelPowerset
from yyskmultilearn.cluster import IGraphLabelGraphClusterer, LabelCooccurrenceGraphBuilder
from yyskmultilearn.ensemble import LabelSpacePartitioningClassifier
# construct base forest classifier
base_classifier = RandomForestClassifier(n_estimators=1000)
# construct a graph builder that will include
# label relations weighted by how many times they
# co-occurred in the data, without self-edges
graph_builder = LabelCooccurrenceGraphBuilder(
weighted = True,
include_self_edges = False
)
# select parameters for the model, we fit a flat,
# non-degree correlated, partitioning model
# which will use fit the normal distribution as the weights model
model = StochasticBlockModel(
nested=False,
use_degree_correlation=True,
allow_overlap=False,
weight_model='real-normal'
)
# setup problem transformation approach with sparse matrices for random forest
problem_transform_classifier = LabelPowerset(classifier=base_classifier,
require_dense=[False, False])
# setup the clusterer to use, we selected the fast greedy modularity-maximization approach
clusterer = GraphToolLabelGraphClusterer(graph_builder=graph_builder, model=model)
# setup the ensemble metaclassifier
classifier = LabelSpacePartitioningClassifier(problem_transform_classifier, clusterer)
# train
classifier.fit(X_train, y_train)
# predict
predictions = classifier.predict(X_test)
For more use cases see `the label relations exploration guide <../labelrelations.ipynb>`_.
"""
def __init__(self, graph_builder, model):
super(GraphToolLabelGraphClusterer, self).__init__(graph_builder)
self.model = model
self.graph_builder = graph_builder
def fit_predict(self, X, y):
"""Performs clustering on y and returns list of label lists
Builds a label graph using the provided graph builder's `transform` method
on `y` and then detects communities using the selected `method`.
Sets :code:`self.weights_` and :code:`self.graph_`.
Parameters
----------
X : None
currently unused, left for scikit compatibility
y : scipy.sparse
label space of shape :code:`(n_samples, n_labels)`
Returns
-------
arrray of arrays of label indexes (numpy.ndarray)
label space division, each sublist represents labels that are in that community
"""
self._build_graph_instance(y)
clusters = self.model.fit_predict(self.graph_, weights=self.weights_)
return np.array([community for community in clusters if len(community) > 0])
def _build_graph_instance(self, y):
edge_map = self.graph_builder.transform(y)
g = gt.Graph(directed=False)
g.add_vertex(y.shape[1])
self.weights_ = g.new_edge_property('double')
for edge, weight in edge_map.items():
e = g.add_edge(edge[0], edge[1])
self.weights_[e] = weight
self.graph_ = g
| 36.699219 | 115 | 0.673018 | [
"BSD-2-Clause"
] | yuan776/scikit-multilearn | yyskmultilearn/cluster/graphtool.py | 9,395 | Python |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from bokeh._testing.util.api import verify_all
# Module under test
#import bokeh.sampledata.glucose as bsg
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
ALL = (
'data',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
Test___all__ = pytest.mark.sampledata(verify_all("bokeh.sampledata.glucose", ALL))
@pytest.mark.sampledata
def test_data(pd):
import bokeh.sampledata.glucose as bsg
assert isinstance(bsg.data, pd.DataFrame)
# don't check detail for external data
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 33.915254 | 82 | 0.270365 | [
"BSD-3-Clause"
] | BiYandong110/bokeh | tests/unit/bokeh/sampledata/test_glucose.py | 2,001 | Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2019 - 2021 Geode-solutions
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os, sys, platform
if sys.version_info >= (3,8,0) and platform.system() == "Windows":
for path in [x.strip() for x in os.environ['PATH'].split(';') if x]:
os.add_dll_directory(path)
import opengeode
import opengeode_io_py_mesh as mesh_io
if __name__ == '__main__':
mesh_io.initialize_mesh_io()
test_dir = os.path.dirname(__file__)
data_dir = os.path.abspath(os.path.join(test_dir, "../../../../tests/data"))
surface = opengeode.load_polygonal_surface3D(os.path.join(data_dir, "Armadillo.ply"))
if surface.nb_vertices() != 172974:
raise ValueError("[Test] Number of vertices in the loaded Surface is not correct" )
if surface.nb_polygons() != 345944:
raise ValueError("[Test] Number of polygons in the loaded Surface is not correct" )
opengeode.save_polygonal_surface3D(surface, "Armadillo_save.ply")
| 46.348837 | 91 | 0.74009 | [
"MIT"
] | Geode-solutions/OpenGeode-IO | bindings/python/tests/mesh/test-py-ply.py | 1,993 | Python |
# YouTube: https://youtu.be/8lP9h4gaKYA
# Publicação: https://caffeinealgorithm.com/blog/20210914/funcao-print-e-strings-em-python/
print('Estamos a usar a função print() e eu sou uma string.')
print("Continuo a ser uma string.")
print('A Maria disse que o Miguel estava "doente".') # A Maria disse que o Miguel estava "doente".
print('I\'m writing in English.') # I'm writing in English.
print('Caffeine', 'Algorithm') # Caffeine Algorithm
| 50 | 99 | 0.722222 | [
"MIT"
] | caffeinealgorithm/code-programming-series | Programar em Python/02-Funcao-print-e-Strings.py | 454 | Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import math
DESTRUIDO = 'Destruido'
ATIVO = 'Ativo'
GRAVIDADE = 10 # m/s^2
class Ator():
"""
Classe que representa um ator. Ele representa um ponto cartesiano na tela.
"""
_caracter_ativo = 'A'
_caracter_destruido = ' '
def __init__(self, x=0, y=0):
"""
Método de inicialização da classe. Deve inicializar os parâmetros x, y, caracter e status
:param x: Posição horizontal inicial do ator
:param y: Posição vertical inicial do ator
"""
self.y = y
self.x = x
self.status = ATIVO
def caracter(self):
return self._caracter_ativo if self.status == ATIVO else self._caracter_destruido
def calcular_posicao(self, tempo):
"""
Método que calcula a posição do ator em determinado tempo.
Deve-se imaginar que o tempo começa em 0 e avança de 0,01 segundos
:param tempo: o tempo do jogo
:return: posição x, y do ator
"""
return self.x, self.y
def colidir(self, outro_ator, intervalo=1):
"""
Método que executa lógica de colisão entre dois atores.
Só deve haver colisão se os dois atores tiverem seus status ativos.
Para colisão, é considerado um quadrado, com lado igual ao parâmetro intervalo, em volta do ponto onde se
encontra o ator. Se os atores estiverem dentro desse mesmo quadrado, seus status devem ser alterados para
destruido, seus caracteres para destruido também.
:param outro_ator: Ator a ser considerado na colisão
:param intervalo: Intervalo a ser considerado
:return:
"""
if self.status == ATIVO and outro_ator.status == ATIVO:
delta_x = abs(self.x - outro_ator.x)
delta_y = abs(self.y - outro_ator.y)
if delta_x <= intervalo and delta_y <= intervalo:
self.status = outro_ator.status = DESTRUIDO
class Obstaculo(Ator):
_caracter_ativo = 'O'
class Porco(Ator):
_caracter_ativo = '@'
_caracter_destruido = '+'
class DuploLancamentoExcecao(Exception):
pass
class Passaro(Ator):
velocidade_escalar = 10
def __init__(self, x=0, y=0):
"""
Método de inicialização de pássaro.
Deve chamar a inicialização de ator. Além disso, deve armazenar a posição inicial e incializar o tempo de
lançamento e angulo de lançamento
:param x:
:param y:
"""
super().__init__(x, y)
self._x_inicial = x
self._y_inicial = y
self._tempo_de_lancamento = None
self._angulo_de_lancamento = None # radianos
def foi_lancado(self):
"""
Método que retorna verdaeira se o pássaro já foi lançado e falso caso contrário
:return: booleano
"""
return not self._tempo_de_lancamento is None
def colidir_com_chao(self):
"""
Método que executa lógica de colisão com o chão. Toda vez que y for menor ou igual a 0,
o status dos Passaro deve ser alterado para destruido, bem como o seu caracter
"""
pass
def calcular_posicao(self, tempo):
"""
Método que cálcula a posição do passaro de acordo com o tempo.
Antes do lançamento o pássaro deve retornar o valor de sua posição inicial
Depois do lançamento o pássaro deve calcular de acordo com sua posição inicial, velocidade escalar,
ângulo de lancamento, gravidade (constante GRAVIDADE) e o tempo do jogo.
Após a colisão, ou seja, ter seus status destruido, o pássaro deve apenas retornar a última posição calculada.
:param tempo: tempo de jogo a ser calculada a posição
:return: posição x, y
"""
if self.foi_lancado():
delta_t = tempo - self._tempo_de_lancamento
self._calcular_posicao_vertical(delta_t)
return super().calcular_posicao(tempo)
def lancar(self, angulo, tempo_de_lancamento):
"""
Lógica que lança o pássaro. Deve armazenar o ângulo e o tempo de lançamento para posteriores cálculo.
O ângulo é passado em graus e deve ser transformado em radianos
:param angulo:
:param tempo_de_lancamento:
:return:
"""
self._angulo_de_lancamento = angulo
self._tempo_de_lancamento = tempo_de_lancamento
def _calcular_posicao_vertical(self, delta_t):
y_atual = self._y_inicial
angulo_radianos = math.radians(self._angulo_de_lancamento)
y_atual += self.velocidade_escalar * delta_t * math.sin(angulo_radianos)
y_atual -= (GRAVIDADE * delta_t ** 2) / 2
self.y = y_atual
class PassaroAmarelo(Passaro):
_caracter_ativo = 'A'
_caracter_destruido = 'a'
velocidade_escalar = 30
class PassaroVermelho(Passaro):
_caracter_ativo = 'V'
_caracter_destruido = 'v'
velocidade_escalar = 20
| 31.157233 | 118 | 0.648365 | [
"MIT"
] | NTMaia/pythonbirds | atores.py | 5,030 | Python |
import sys
sys.path.append("..")
import os
import torch
import torchvision as tv
import numpy as np
from torch.utils.data import DataLoader
from torchvision import models
import torch.nn as nn
from utils import makedirs, tensor2cuda, load_model
from argument import parser
from visualization import VanillaBackprop
import patch_dataset as patd
from model.resnetdsbn import *
args = parser()
img_folder = 'grad_img'
img_folder = os.path.join(img_folder, args.dataset, args.affix)
makedirs(img_folder)
out_num = 1
transform_test = tv.transforms.Compose([
tv.transforms.Resize(256),
tv.transforms.ToTensor()
])
te_dataset = patd.PatchDataset(path_to_images=args.data_root, fold='test',
transform=tv.transforms.ToTensor())
te_loader = DataLoader(te_dataset, batch_size=1, shuffle=False, num_workers=1)
counter = 0
input_list = []
grad_list = []
label_list = []
for data, label in te_loader:
if int(np.sum(label.squeeze().numpy())) > 0:
disease = ''
for i in range(int(np.sum(label.squeeze().numpy()))):
disease_index = np.nonzero(label.squeeze().numpy())[0][i]
dis_temp = te_dataset.PRED_LABEL[disease_index]
disease = disease + ' ' + dis_temp
data, label = tensor2cuda(data), tensor2cuda(label)
# model_bns = resnet50dsbn(pretrained=args.pretrain, widefactor=args.widefactor)
model_std = models.resnet50()
num_classes=8
# model_bns.fc = nn.Linear(model_bns.fc.in_features, num_classes)
model_std.fc = nn.Linear(model_std.fc.in_features, num_classes)
# load_model(model_bns, args.load_checkpoint)
load_model(model_std, '../checkpoint/chexpert_gaussn_0.1/checkpoint_best.pth')
if torch.cuda.is_available():
# model_bns.cuda()
model_std.cuda()
# VBP = VanillaBackprop(model_bns)
VBP_std = VanillaBackprop(model_std)
# grad_bn0 = VBP.generate_gradients(data, label, [0]) # data: (1,3,96,96) label: (1,3)
# grad_bn1 = VBP.generate_gradients(data, label, [1])
grad_std = VBP_std.generate_gradients(data, label)
grads = []
# print(grad.shape)
for grad in [grad_std]:
grad_flat = grad.view(grad.shape[0], -1) # grad: (1, 3x96x96)
mean = grad_flat.mean(1, keepdim=True).unsqueeze(2).unsqueeze(3) # (1,1,1,1)
std = grad_flat.std(1, keepdim=True).unsqueeze(2).unsqueeze(3) # (1,1,1,1)
mean = mean.repeat(1, 1, data.shape[2], data.shape[3])
std = std.repeat(1, 1, data.shape[2], data.shape[3])
grad = torch.max(torch.min(grad, mean+3*std), mean-3*std)
print(grad.min(), grad.max())
grad -= grad.min()
grad /= grad.max()
grad = grad.cpu().numpy().squeeze() # (N, 28, 28)
grads.append(grad)
# grad *= 255.0
# label = label.cpu().numpy()
data = data.cpu().numpy().squeeze()
# data *= 255.0
# print('data shape ', data.shape)
# print('grad shape ', grad.shape)
input_list.append(data)
label_list.append(disease)
grad_list.append(grads)
# np.save(os.path.join(img_folder, 'data.npy'), np.array(input_list))
np.save(os.path.join(img_folder, 'label.npy'), np.array(label_list))
np.save(os.path.join(img_folder, 'grad.npy'), np.array(grad_list)) | 39.593023 | 94 | 0.634655 | [
"MIT"
] | peterhan91/Medical-Robust-Training | visualization/visualize.py | 3,405 | Python |
from gzip import decompress
from http import cookiejar
from json import loads, dumps
from os import environ
from time import strftime, gmtime
from urllib import request
def get_url(ticker):
env = environ.get('FLASK_ENV', 'development')
if env == 'development':
url = 'https://www.fundamentus.com.br/amline/cot_hist.php?papel='
else:
phproxy = 'http://shortbushash.com/proxy.php'
url = phproxy + '?q=https%3A%2F%2Fwww.fundamentus.com.br%2Famline%2Fcot_hist.php%3Fpapel%3D'
return url + ticker + '&hl=1a7', env
def build_headers(url, env):
if env == 'development':
headers = [
('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'),
('Referer', url),
('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36'),
]
else:
headers = [
('Accept', 'application/json, text/javascript, */*; q=0.01'),
('Accept-Encoding', 'gzip, deflate, br'),
('Referer', url),
('User-Agent', 'PostmanRuntime/7.26.8'),
]
return headers
def parse_epoch_time(parsed_content):
return [[strftime('%Y-%m-%d', gmtime(unix_epoch_time/1000)), price] for [unix_epoch_time, price] in parsed_content]
def load_prices(ticker, parse_json=True):
url, env = get_url(ticker)
cookie_jar = cookiejar.CookieJar()
opener = request.build_opener(request.HTTPCookieProcessor(cookie_jar))
opener.addheaders = build_headers(url, env)
with opener.open(url) as link:
gzip_response = link.read()
binary_response = gzip_response.decode() if env == 'development' else decompress(gzip_response)
parsed_content = loads(binary_response)
content = parse_epoch_time(parsed_content)
return dumps(content) if parse_json else content
| 35.178571 | 162 | 0.658883 | [
"MIT"
] | pedroeml/stock-projection-service | historical_prices.py | 1,970 | Python |
import numpy as np
import numpy.ma as npma
from scipy import stats
import matplotlib.pyplot as plt
import baspy as bp
import fnmatch
"""
Created on Wed Nov 27 18:34 2019
@author: Christine McKenna
========================================================================
Purpose: Plots Supp Fig 2, a pdf of all possible 20-year trends in gsat
for CMIP6 piControl simulations for each model. First detrends
the raw gsat time series to remove any long term drift,
which could bias 20-year trends (e.g. if positive drift,
pdf of 20-year trends likely biased positive).
Saves pdf of 20-year trends for models used in Supp Fig 8.
========================================================================
"""
# Required directories
loaddir_CMIP = 'Priestley-Centre/Near_term_warming/analysis_figure_code/'+\
'SuppFig2/saved_arrays'
savedir = 'Priestley-Centre/Near_term_warming/analysis_figure_code/'+\
'SuppFig8/saved_data'
### ------ Load in CMIP6 data ------
# Load models
models = np.load(loaddir_CMIP+'/models_gtas_CMIP6_piControl.npy')
# Load catalogue so can extract runids
var = 'tas'
cat_PI = bp.catalogue(dataset='cmip6',Var=var,Experiment='piControl',\
CMOR='Amon')
years = np.linspace(1,20,20)
### Process data, one model and RunID at a time
i = 0
fig,axs = plt.subplots(7,7,sharex=True,sharey=True,\
figsize=(15,12))
fig.suptitle('PDFs of rolling GSAT trends for 20-year segments of CMIP6 '+\
'piControl runs',fontsize=20)
axs = axs.ravel()
for model in models:
## Get data for model
filtmod_PI = cat_PI[cat_PI['Model'] == model]
## Only keep r1i1p1f?
runids_PI = np.unique(filtmod_PI['RunID'])
runids_PI = fnmatch.filter(runids_PI,'r1i1p1f?')
## Get data for each RunID
for runid in runids_PI:
## Load gsat data
gsat_tmp = np.load(loaddir_CMIP+'/gtas_'+model+'_'+runid+\
'_CMIP6_piControl.npy')
ny = len(gsat_tmp)
## Remove any drift
[m,c,_,_,_] = stats.linregress(np.linspace(0,ny-1,ny),gsat_tmp)
gsat_lin = m*np.linspace(0,ny-1,ny)+c
gsat = gsat_tmp - gsat_lin
## Calculate trends
gsat_trends = np.zeros([ny-20])
for y in xrange(0,ny-20):
[m,_,_,_,_] = stats.linregress(years,gsat[y:y+20])
gsat_trends[y] = m*10
## If model used in Supp Fig 8 save pdf of 20y trends
if (model == 'BCC-CSM2-MR') or (model == 'MIROC-ES2L'):
np.save(savedir+'/gsat_20ytrends_CMIP6_piControl_'+\
model+'.npy',gsat_trends)
### ------ Plot results ------
### Plot individual models
axs[i].hist(gsat_trends,density=True)
axs[i].set_title(model,fontsize=13)
axs[i].plot(np.zeros([2]),[0,11],'grey',linewidth=1)
axs[i].plot(np.ones([2])*(-0.075),[0,11],'black',\
linewidth=1,linestyle='--')
axs[i].plot(np.ones([2])*(0.072),[0,11],'black',\
linewidth=1,linestyle='--')
axs[i].plot(np.ones([2])*(-0.084),[0,11],'black',\
linewidth=1,linestyle='--')
axs[i].plot(np.ones([2])*(0.094),[0,11],'black',\
linewidth=1,linestyle='--')
axs[i].tick_params(labelsize=13)
i += 1
fig.text(0.5,0.02,'$^{\circ}$C / decade',ha='center',\
va='center',fontsize=18)
fig.text(0.02,0.5,'Probability density',ha='center',va='center',\
rotation='vertical',fontsize=18)
axs[i-1].set_xlim([-0.3,0.3])
axs[i-1].set_ylim([0,11])
axs[i].axis('off')
plt.subplots_adjust(top=0.9,bottom=0.07,left=0.07,right=0.97,\
wspace=0.17,hspace=0.27)
plt.show()
| 33.495575 | 75 | 0.573844 | [
"Apache-2.0"
] | Priestley-Centre/Near_term_warming | analysis_figure_code/SuppFig2/SuppFig2.py | 3,785 | Python |
import unittest
import progresspie
class TestCalculationMethods(unittest.TestCase):
def test_is_black(self):
self.assertEqual(progresspie.is_black(99, 99, 99), False)
self.assertEqual(progresspie.is_black(0, 55, 55), False)
self.assertEqual(progresspie.is_black(12, 55, 55), False)
self.assertEqual(progresspie.is_black(13, 55, 55), True)
self.assertEqual(progresspie.is_black(13, 45, 45), False)
self.assertEqual(progresspie.is_black(87, 20, 40), True)
if __name__ == '__main__':
unittest.main()
| 28 | 65 | 0.698214 | [
"MIT"
] | paujim/ProgressPie | test_progresspie.py | 560 | Python |
import requests
def gen_from_urls(urls: tuple) -> tuple:
for resp in (requests.get(url) for url in urls):
# yield returns only 1 items at a time.
yield len(resp.content), resp.status_code, resp.url
if __name__ == "__main__":
urls = (
"https://www.oreilly.com/",
"https://twitter.com/",
"https://www.google.com/",
)
for resp_len, status, url in gen_from_urls(urls):
print(resp_len, status, url)
| 24.368421 | 59 | 0.609071 | [
"MIT"
] | archeranimesh/HeadFirstPython | SRC/Chapter_13-Advanced-Iteration/12_function_yield.py | 463 | Python |
# Copyright 2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import importlib
import os
import platform
from shutil import which
import subprocess
import sys
import time
# Make sure we're using Python3
assert sys.version.startswith('3'), "This script is only meant to work with Python3"
# Make sure to get osrf_pycommon from the vendor folder
vendor_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'vendor'))
sys.path.insert(0, os.path.join(vendor_path, 'osrf_pycommon'))
import osrf_pycommon
# Assert that we got it from the right place
assert osrf_pycommon.__file__.startswith(vendor_path), \
("osrf_pycommon imported from '{0}' which is not in the vendor folder '{1}'"
.format(osrf_pycommon.__file__, vendor_path))
from osrf_pycommon.cli_utils.common import extract_argument_group
from osrf_pycommon.terminal_color import sanitize
from .packaging import build_and_test_and_package
from .util import change_directory
from .util import remove_folder
from .util import force_color
from .util import generated_venv_vars
from .util import info
from .util import log
from .util import UnbufferedIO
# Enforce unbuffered output
sys.stdout = UnbufferedIO(sys.stdout)
sys.stderr = UnbufferedIO(sys.stderr)
# One of the maintainers of pyparsing suggests pinning to 2.4.7 for now;
# see https://github.com/pyparsing/pyparsing/issues/323
pip_dependencies = [
'EmPy',
'coverage',
'catkin_pkg',
'flake8',
'flake8-blind-except==0.1.1',
'flake8-builtins',
'flake8-class-newline',
'flake8-comprehensions',
'flake8-deprecated',
'flake8-docstrings',
'flake8-import-order',
'flake8-quotes',
'importlib-metadata',
'mock',
'nose',
'pep8',
'pydocstyle',
'pyflakes',
'pyparsing==2.4.7',
'pytest',
'pytest-cov',
'pytest-mock',
'pytest-repeat',
'pytest-rerunfailures',
'pytest-runner',
'pyyaml',
'vcstool',
'yamllint',
]
# https://github.com/pyca/cryptography/issues/5433
pip_cryptography_version = '==3.0'
if sys.platform in ('darwin'):
pip_dependencies += [
f'cryptography{pip_cryptography_version}',
'lxml',
'netifaces'
]
colcon_packages = [
'colcon-core',
'colcon-defaults',
'colcon-library-path',
'colcon-metadata',
'colcon-mixin',
'colcon-output',
'colcon-package-information',
'colcon-package-selection',
'colcon-parallel-executor',
'colcon-powershell',
'colcon-python-setup-py',
'colcon-recursive-crawl',
'colcon-test-result',
'colcon-cmake',
'colcon-ros',
]
if sys.platform != 'win32':
colcon_packages += [
'colcon-bash',
'colcon-zsh',
]
gcov_flags = '--coverage'
colcon_space_defaults = {
'sourcespace': 'src',
'buildspace': 'build',
'installspace': 'install',
}
def main(sysargv=None):
args = get_args(sysargv=sysargv)
blacklisted_package_names = []
if not args.packaging:
build_function = build_and_test
blacklisted_package_names += [
'actionlib_msgs',
'common_interfaces',
'cv_bridge',
'opencv_tests',
'ros1_bridge',
'shape_msgs',
'stereo_msgs',
'vision_opencv',
]
else:
build_function = build_and_test_and_package
if sys.platform in ('darwin', 'win32'):
blacklisted_package_names += [
'pendulum_control',
'ros1_bridge',
'rttest',
'tlsf',
'tlsf_cpp',
]
# There are no Windows debug packages available for PyQt5 and PySide2, so
# python_qt_bindings can't be imported to run or test rqt_graph or
# rqt_py_common.
if sys.platform == 'win32' and args.cmake_build_type == 'Debug':
blacklisted_package_names.append('rqt_graph')
blacklisted_package_names.append('rqt_py_common')
blacklisted_package_names.append('rqt_reconfigure')
# TODO(wjwwood): remove this when a better solution is found, as
# this is just a work around for https://github.com/ros2/build_cop/issues/161
# If on Windows, kill any still running `colcon` processes to avoid
# problems when trying to delete files from pip or the workspace during
# this job.
if sys.platform == 'win32':
os.system('taskkill /f /im colcon.exe')
time.sleep(2) # wait a bit to avoid a race
return run(args, build_function, blacklisted_package_names=blacklisted_package_names)
def get_args(sysargv=None):
parser = argparse.ArgumentParser(
description="Builds the ROS2 repositories as a single batch job")
parser.add_argument(
'--packaging', default=False, action='store_true',
help='create an archive of the install space')
parser.add_argument(
'--repo-file-url', required=True,
help="url of the ros2.repos file to fetch and use for the basis of the batch job")
parser.add_argument(
'--supplemental-repo-file-url', default=None,
help="url of a .repos file to fetch and merge with the ros2.repos file")
parser.add_argument(
'--test-branch', default=None,
help="branch to attempt to checkout before doing batch job")
parser.add_argument(
'--colcon-branch', default=None,
help='Use a specific branch of the colcon repositories, if the branch '
"doesn't exist fall back to the default branch (default: latest "
'release)')
parser.add_argument(
'--white-space-in', nargs='*', default=[],
choices=['sourcespace', 'buildspace', 'installspace', 'workspace'],
help="which folder structures in which white space should be added")
parser.add_argument(
'--do-venv', default=False, action='store_true',
help="create and use a virtual env in the build process")
parser.add_argument(
'--os', default=None, choices=['linux', 'osx', 'windows'])
parser.add_argument(
'--ignore-rmw', nargs='*', default=[],
help='ignore the passed RMW implementations as well as supporting packages')
parser.add_argument(
'--connext-debs', default=False, action='store_true',
help="use Debian packages for Connext instead of binaries off the RTI website (Linux only)")
parser.add_argument(
'--isolated', default=False, action='store_true',
help="build and install each package a separate folders")
parser.add_argument(
'--force-ansi-color', default=False, action='store_true',
help="forces this program to output ansi color")
parser.add_argument(
'--ros-distro', required=True,
help="The ROS distribution being built")
parser.add_argument(
'--ros1-path', default=None,
help="path of ROS 1 workspace to be sourced")
parser.add_argument(
'--mixed-ros-overlay-pkgs', nargs='*', default=[],
help='space separated list of packages to be built in an overlay workspace with ROS 1')
parser.add_argument(
'--colcon-mixin-url', default=None,
help='A mixin index url to be included by colcon')
parser.add_argument(
'--cmake-build-type', default=None,
help='select the CMake build type')
parser.add_argument(
'--build-args', default=None,
help="arguments passed to the 'build' verb")
parser.add_argument(
'--test-args', default=None,
help="arguments passed to the 'test' verb")
parser.add_argument(
'--src-mounted', default=False, action='store_true',
help="src directory is already mounted into the workspace")
parser.add_argument(
'--compile-with-clang', default=False, action='store_true',
help="compile with clang instead of gcc")
parser.add_argument(
'--coverage', default=False, action='store_true',
help="enable collection of coverage statistics")
parser.add_argument(
'--workspace-path', default=None,
help="base path of the workspace")
parser.add_argument(
'--python-interpreter', default=None,
help='pass different Python interpreter')
parser.add_argument(
'--visual-studio-version', default=None, required=(os.name == 'nt'),
help='select the Visual Studio version')
parser.add_argument(
'--source-space', dest='sourcespace',
help='source directory path')
parser.add_argument(
'--build-space', dest='buildspace',
help='build directory path')
parser.add_argument(
'--install-space', dest='installspace',
help='install directory path')
argv = sysargv[1:] if sysargv is not None else sys.argv[1:]
argv, build_args = extract_argument_group(argv, '--build-args')
if '--test-args' in argv:
argv, test_args = extract_argument_group(argv, '--test-args')
else:
build_args, test_args = extract_argument_group(build_args, '--test-args')
args = parser.parse_args(argv)
args.build_args = build_args
args.test_args = test_args
for name in ('sourcespace', 'buildspace', 'installspace'):
space_directory = getattr(args, name)
if name in args.white_space_in and space_directory is not None:
raise Exception('Argument {} and "--white-space-in" cannot both be used'.format(name))
elif space_directory is None:
space_directory = colcon_space_defaults[name]
if name in args.white_space_in:
space_directory += ' space'
setattr(args, name, space_directory)
return args
def process_coverage(args, job):
print('# BEGIN SUBSECTION: coverage analysis')
# Capture all gdca/gcno files (all them inside buildspace)
coverage_file = os.path.join(args.buildspace, 'coverage.info')
cmd = [
'lcov',
'--capture',
'--directory', args.buildspace,
'--output', str(coverage_file)]
print(cmd)
subprocess.run(cmd, check=True)
# Filter out system coverage and test code
cmd = [
'lcov',
'--remove', coverage_file,
'--output', coverage_file,
'/usr/*', # no system files in reports
'/home/rosbuild/*', # remove rti_connext installed in rosbuild
'*/test/*',
'*/tests/*',
'*gtest_vendor*',
'*gmock_vendor*']
print(cmd)
subprocess.run(cmd, check=True)
# Transform results to the cobertura format
outfile = os.path.join(args.buildspace, 'coverage.xml')
print('Writing coverage.xml report at path {}'.format(outfile))
cmd = ['lcov_cobertura', coverage_file, '--output', outfile]
subprocess.run(cmd, check=True)
print('# END SUBSECTION')
return 0
def build_and_test(args, job):
compile_with_clang = args.compile_with_clang and args.os == 'linux'
print('# BEGIN SUBSECTION: build')
cmd = [
args.colcon_script, 'build',
'--base-paths', '"%s"' % args.sourcespace,
'--build-base', '"%s"' % args.buildspace,
'--install-base', '"%s"' % args.installspace,
] + (['--merge-install'] if not args.isolated else []) + \
args.build_args
cmake_args = ['-DBUILD_TESTING=ON', '--no-warn-unused-cli']
if args.cmake_build_type:
cmake_args.append(
'-DCMAKE_BUILD_TYPE=' + args.cmake_build_type)
if compile_with_clang:
cmake_args.extend(
['-DCMAKE_C_COMPILER=/usr/bin/clang', '-DCMAKE_CXX_COMPILER=/usr/bin/clang++'])
if '--cmake-args' in cmd:
index = cmd.index('--cmake-args')
cmd[index + 1:index + 1] = cmake_args
else:
cmd.append('--cmake-args')
cmd.extend(cmake_args)
if args.coverage:
if args.os == 'linux':
ament_cmake_args = [
'-DCMAKE_CXX_FLAGS="${CMAKE_CXX_FLAGS} ' + gcov_flags + '"',
'-DCMAKE_C_FLAGS="${CMAKE_C_FLAGS} ' + gcov_flags + '"']
if '--ament-cmake-args' in cmd:
index = cmd.index('--ament-cmake-args')
cmd[index + 1:index + 1] = ament_cmake_args
else:
cmd.append('--ament-cmake-args')
cmd.extend(ament_cmake_args)
ret_build = job.run(cmd, shell=True)
info("colcon build returned: '{0}'".format(ret_build))
print('# END SUBSECTION')
if ret_build:
return ret_build
print('# BEGIN SUBSECTION: test')
test_cmd = [
args.colcon_script, 'test',
'--base-paths', '"%s"' % args.sourcespace,
'--build-base', '"%s"' % args.buildspace,
'--install-base', '"%s"' % args.installspace,
]
if not args.isolated:
test_cmd.append('--merge-install')
if args.coverage:
test_cmd.append('--pytest-with-coverage')
test_cmd.extend(args.test_args)
# In Foxy and prior, xunit2 format is needed to make Jenkins xunit plugin 2.x happy
# After Foxy, we introduced per-package changes to make local builds and CI
# builds act the same.
if args.ros_distro == 'foxy':
pytest_args = ['-o', 'junit_family=xunit2']
# We should only have one --pytest-args option, or some options might get ignored
if '--pytest-args' in test_cmd:
pytest_opts_index = test_cmd.index('--pytest-args') + 1
test_cmd = test_cmd[:pytest_opts_index] + pytest_args + test_cmd[pytest_opts_index:]
else:
test_cmd.append('--pytest-args')
test_cmd.extend(pytest_args)
ret_test = job.run(test_cmd, exit_on_error=False, shell=True)
info("colcon test returned: '{0}'".format(ret_test))
print('# END SUBSECTION')
if ret_test:
return ret_test
print('# BEGIN SUBSECTION: test-result --all')
# Collect the test results
ret_test_results = job.run(
[args.colcon_script, 'test-result', '--test-result-base', '"%s"' % args.buildspace, '--all'],
exit_on_error=False, shell=True
)
info("colcon test-result returned: '{0}'".format(ret_test_results))
print('# END SUBSECTION')
print('# BEGIN SUBSECTION: test-result')
# Collect the test results
ret_test_results = job.run(
[args.colcon_script, 'test-result', '--test-result-base', '"%s"' % args.buildspace],
exit_on_error=False, shell=True
)
info("colcon test-result returned: '{0}'".format(ret_test_results))
print('# END SUBSECTION')
if args.coverage and args.os == 'linux':
process_coverage(args, job)
# Uncomment this line to failing tests a failrue of this command.
# return 0 if ret_test == 0 and ret_testr == 0 else 1
return 0
def run(args, build_function, blacklisted_package_names=None):
if blacklisted_package_names is None:
blacklisted_package_names = []
if args.force_ansi_color:
force_color()
info("run_ros2_batch called with args:")
for arg in vars(args):
info(sanitize(" - {0}={1}".format(arg, getattr(args, arg))))
job = None
args.workspace = 'work space' if 'workspace' in args.white_space_in else 'ws'
platform_name = platform.platform().lower()
if args.os == 'linux' or platform_name.startswith('linux'):
args.os = 'linux'
from .linux_batch import LinuxBatchJob
job = LinuxBatchJob(args)
elif args.os == 'osx' or platform_name.startswith('darwin') or platform_name.startswith('macos'):
args.os = 'osx'
from .osx_batch import OSXBatchJob
job = OSXBatchJob(args)
elif args.os == 'windows' or platform_name.startswith('windows'):
args.os = 'windows'
from .windows_batch import WindowsBatchJob
job = WindowsBatchJob(args)
if args.do_venv and args.os == 'windows':
sys.exit("--do-venv is not supported on windows")
# Set the TERM env variable to coerce the output of Make to be colored.
os.environ['TERM'] = os.environ.get('TERM', 'xterm-256color')
if args.os == 'windows':
# Set the ConEmuANSI env variable to trick some programs (vcs) into
# printing ANSI color codes on Windows.
os.environ['ConEmuANSI'] = 'ON'
# Set the appropriate GIT_* env variables in case vcs needs to merge branches
os.environ['GIT_AUTHOR_EMAIL'] = '[email protected]'
os.environ['GIT_AUTHOR_NAME'] = 'nobody'
os.environ['GIT_COMMITTER_EMAIL'] = '[email protected]'
os.environ['GIT_COMMITTER_NAME'] = 'nobody'
info("Using workspace: @!{0}", fargs=(args.workspace,))
# git doesn't work reliably inside qemu, so we're assuming that somebody
# already checked out the code on the host and mounted it in at the right
# place in <workspace>/src, which we don't want to remove here.
if args.src_mounted:
remove_folder(os.path.join(args.workspace, 'build'))
remove_folder(os.path.join(args.workspace, 'install'))
else:
remove_folder(args.workspace)
if not os.path.isdir(args.workspace):
os.makedirs(args.workspace)
# Allow batch job to do OS specific stuff
job.pre()
# ROS_DOMAIN_ID must be unique to each CI machine on a network to avoid crosstalk
if 'ROS_DOMAIN_ID' not in os.environ:
raise KeyError('ROS_DOMAIN_ID environment variable must be set')
# Check the env
job.show_env()
colcon_script = None
# Enter a venv if asked to, the venv must be in a path without spaces
if args.do_venv:
print('# BEGIN SUBSECTION: enter virtualenv')
if args.os != 'linux':
# Do not try this on Linux as elevated privileges are needed.
# The Linux host or Docker image will need to ensure the right
# version of virtualenv is available.
job.run([sys.executable, '-m', 'pip', 'install', '-U', 'virtualenv==16.7.9'])
venv_subfolder = 'venv'
remove_folder(venv_subfolder)
job.run([
sys.executable, '-m', 'virtualenv', '--system-site-packages',
'-p', sys.executable, venv_subfolder])
venv_path = os.path.abspath(os.path.join(os.getcwd(), venv_subfolder))
venv, venv_python = generated_venv_vars(venv_path)
job.push_run(venv) # job.run is now venv
job.push_python(venv_python) # job.python is now venv_python
job.show_env()
print('# END SUBSECTION')
# Now inside of the workspace...
with change_directory(args.workspace):
print('# BEGIN SUBSECTION: install Python packages')
# Update setuptools
job.run(['"%s"' % job.python, '-m', 'pip', 'install', '-U', 'pip', 'setuptools'],
shell=True)
# Print setuptools version
job.run(['"%s"' % job.python, '-c', '"import setuptools; print(setuptools.__version__)"'],
shell=True)
# Print the pip version
job.run(['"%s"' % job.python, '-m', 'pip', '--version'], shell=True)
# Install pip dependencies
pip_packages = list(pip_dependencies)
def need_package_from_pipy(pkg_name):
try:
importlib.import_module(pkg_name)
except ModuleNotFoundError:
return True
return False
# We prefer to get mypy from the distribution if it exists. If not we install it via pip.
if need_package_from_pipy("mypy"):
if args.ros_distro in ["foxy", "galactic"]:
pip_packages += ["mypy==0.761"]
else:
pip_packages += ["mypy==0.931"]
# We prefer to get lark from the distribution if it exists. If not we install it via pip.
if need_package_from_pipy("lark"):
if args.ros_distro in ["foxy", "galactic"]:
pip_packages += ["lark-parser==0.8.1"]
else:
pip_packages += ["lark==1.1.1"]
if sys.platform == 'win32':
# Install fork of pyreadline containing fix for deprecation warnings
# TODO(jacobperron): Until upstream issue is resolved https://github.com/pyreadline/pyreadline/issues/65
pip_packages += ['git+https://github.com/osrf/pyreadline']
if args.cmake_build_type == 'Debug':
pip_packages += [
'https://github.com/ros2/ros2/releases/download/cryptography-archives/cffi-1.14.0-cp38-cp38d-win_amd64.whl', # required by cryptography
'https://github.com/ros2/ros2/releases/download/cryptography-archives/cryptography-2.9.2-cp38-cp38d-win_amd64.whl',
'https://github.com/ros2/ros2/releases/download/lxml-archives/lxml-4.5.1-cp38-cp38d-win_amd64.whl',
'https://github.com/ros2/ros2/releases/download/netifaces-archives/netifaces-0.10.9-cp38-cp38d-win_amd64.whl',
'https://github.com/ros2/ros2/releases/download/numpy-archives/numpy-1.18.4-cp38-cp38d-win_amd64.whl',
'https://github.com/ros2/ros2/releases/download/typed-ast-archives/typed_ast-1.4.1-cp38-cp38d-win_amd64.whl', # required by mypy
]
else:
pip_packages += [
f'cryptography{pip_cryptography_version}',
'lxml',
'netifaces',
'numpy',
]
if not args.colcon_branch:
pip_packages += colcon_packages
if sys.platform == 'win32':
job.run(
['"%s"' % job.python, '-m', 'pip', 'uninstall', '-y'] +
colcon_packages, shell=True)
# to ensure that the build type specific package is installed
job.run(
['"%s"' % job.python, '-m', 'pip', 'uninstall', '-y'] +
[f'cryptography{pip_cryptography_version}', 'lxml', 'numpy'], shell=True)
pip_cmd = ['"%s"' % job.python, '-m', 'pip', 'install', '-U']
if args.do_venv or sys.platform == 'win32':
# Force reinstall so all dependencies are in virtual environment
# On Windows since we switch between the debug and non-debug
# interpreter all packages need to be reinstalled too
pip_cmd.append('--force-reinstall')
job.run(
pip_cmd + pip_packages,
shell=True)
# OS X can't invoke a file which has a space in the shebang line
# therefore invoking vcs explicitly through Python
if args.do_venv:
vcs_cmd = [
'"%s"' % job.python,
'"%s"' % os.path.join(venv_path, 'bin', 'vcs')]
else:
vcs_cmd = ['vcs']
if args.colcon_branch:
# create .repos file for colcon repositories
os.makedirs('colcon', exist_ok=True)
with open('colcon/colcon.repos', 'w') as h:
h.write('repositories:\n')
for name in colcon_packages:
h.write(' %s:\n' % name)
h.write(' type: git\n')
h.write(
' url: https://github.com/colcon/%s.git\n' % name)
# clone default branches
job.run(
vcs_cmd + [
'import', 'colcon', '--force', '--retry', '5', '--input',
'colcon/colcon.repos'],
shell=True)
# use -b and --track to checkout correctly when file/folder
# with the same name exists
job.run(
vcs_cmd + [
'custom', 'colcon', '--args', 'checkout',
'-b', args.colcon_branch,
'--track', 'origin/' + args.colcon_branch],
exit_on_error=False)
# install colcon packages from local working copies
job.run(
['"%s"' % job.python, '-m', 'pip', 'install', '-U'] +
['colcon/%s' % name for name in colcon_packages],
shell=True)
if args.do_venv and sys.platform != 'win32':
colcon_script = os.path.join(venv_path, 'bin', 'colcon')
else:
colcon_script = which('colcon')
args.colcon_script = colcon_script
# Show what pip has
job.run(['"%s"' % job.python, '-m', 'pip', 'freeze', '--all'], shell=True)
print('# END SUBSECTION')
# Fetch colcon mixins
if args.colcon_mixin_url:
true_cmd = 'VER>NUL' if sys.platform == 'win32' else 'true'
job.run([args.colcon_script, 'mixin', 'remove', 'default', '||', true_cmd], shell=True)
job.run([args.colcon_script, 'mixin', 'add', 'default', args.colcon_mixin_url], shell=True)
job.run([args.colcon_script, 'mixin', 'update', 'default'], shell=True)
# Skip git operations on arm because git doesn't work in qemu. Assume
# that somebody has already pulled the code on the host and mounted it
# in.
if not args.src_mounted:
print('# BEGIN SUBSECTION: import repositories')
repos_file_urls = [args.repo_file_url]
if args.supplemental_repo_file_url is not None:
repos_file_urls.append(args.supplemental_repo_file_url)
repos_filenames = []
for index, repos_file_url in enumerate(repos_file_urls):
repos_filename = '{0:02d}-{1}'.format(index, os.path.basename(repos_file_url))
_fetch_repos_file(repos_file_url, repos_filename, job)
repos_filenames.append(repos_filename)
# Use the repository listing and vcstool to fetch repositories
if not os.path.exists(args.sourcespace):
os.makedirs(args.sourcespace)
for filename in repos_filenames:
job.run(vcs_cmd + ['import', '"%s"' % args.sourcespace, '--force', '--retry', '5',
'--input', filename], shell=True)
print('# END SUBSECTION')
if args.test_branch is not None:
print('# BEGIN SUBSECTION: checkout custom branch')
# Store current branch as well-known branch name for later rebasing
info('Attempting to create a well known branch name for all the default branches')
job.run(vcs_cmd + ['custom', '.', '--git', '--args', 'checkout', '-b', '__ci_default'])
# Attempt to switch all the repositories to a given branch
info("Attempting to switch all repositories to the '{0}' branch"
.format(args.test_branch))
# use -b and --track to checkout correctly when file/folder with the same name exists
vcs_custom_cmd = vcs_cmd + [
'custom', '.', '--args', 'checkout',
'-b', args.test_branch, '--track', 'origin/' + args.test_branch]
ret = job.run(vcs_custom_cmd, exit_on_error=False)
info("'{0}' returned exit code '{1}'", fargs=(" ".join(vcs_custom_cmd), ret))
print()
# Attempt to merge the __ci_default branch into the branch.
# This is to ensure that the changes on the branch still work
# when applied to the latest version of the default branch.
info("Attempting to merge all repositories to the '__ci_default' branch")
vcs_custom_cmd = vcs_cmd + ['custom', '.', '--git', '--args', 'merge', '__ci_default']
ret = job.run(vcs_custom_cmd)
info("'{0}' returned exit code '{1}'", fargs=(" ".join(vcs_custom_cmd), ret))
print()
print('# END SUBSECTION')
print('# BEGIN SUBSECTION: repository hashes')
# Show the latest commit log on each repository (includes the commit hash).
job.run(vcs_cmd + ['log', '-l1', '"%s"' % args.sourcespace], shell=True)
print('# END SUBSECTION')
print('# BEGIN SUBSECTION: vcs export --exact')
# Show the output of 'vcs export --exact`
job.run(
vcs_cmd + ['export', '--exact', '"%s"' % args.sourcespace], shell=True,
# if a repo has been rebased against the default branch vcs can't detect the remote
exit_on_error=False)
print('# END SUBSECTION')
# blacklist rmw packages as well as their dependencies where possible
if 'rmw_connext_cpp' in args.ignore_rmw:
blacklisted_package_names += [
'rmw_connext_cpp',
'rosidl_typesupport_connext_c',
'rosidl_typesupport_connext_cpp',
]
if 'rmw_connext_dynamic_cpp' in args.ignore_rmw:
blacklisted_package_names += [
'rmw_connext_dynamic_cpp',
]
if 'rmw_connext_cpp' in args.ignore_rmw and 'rmw_connext_dynamic_cpp' in args.ignore_rmw:
blacklisted_package_names += [
'connext_cmake_module',
'rmw_connext_shared_cpp',
]
if 'rmw_connextdds' in args.ignore_rmw:
blacklisted_package_names += [
'rti_connext_dds_cmake_module',
'rmw_connextdds_common',
'rmw_connextdds',
'rmw_connextddsmicro',
]
if 'rmw_cyclonedds_cpp' in args.ignore_rmw:
blacklisted_package_names += [
'cyclonedds',
'cyclonedds_cmake_module',
'rmw_cyclonedds_cpp',
]
if 'rmw_fastrtps_cpp' in args.ignore_rmw:
blacklisted_package_names += [
'rmw_fastrtps_cpp',
]
if 'rmw_fastrtps_cpp' in args.ignore_rmw and 'rmw_connextdds' in args.ignore_rmw:
blacklisted_package_names += [
'rosidl_typesupport_fastrtps_c',
'rosidl_typesupport_fastrtps_cpp',
]
if 'rmw_fastrtps_dynamic_cpp' in args.ignore_rmw:
blacklisted_package_names += [
'rmw_fastrtps_dynamic_cpp',
]
if ('rmw_fastrtps_cpp' in args.ignore_rmw and
'rmw_fastrtps_dynamic_cpp' in args.ignore_rmw and
# TODO(asorbini) Ideally `rmw_connextdds` would only depend on `fastcdr`
# via `rosidl_typesupport_fastrtps_c[pp]`, but they depend on `fastrtps`.
'rmw_connextdds' in args.ignore_rmw):
blacklisted_package_names += [
'fastrtps',
'fastrtps_cmake_module',
]
if 'rmw_fastrtps_cpp' in args.ignore_rmw and 'rmw_fastrtps_dynamic_cpp' in args.ignore_rmw:
blacklisted_package_names += [
'rmw_fastrtps_shared_cpp',
]
# Allow the batch job to push custom sourcing onto the run command
job.setup_env()
# create COLCON_IGNORE files in package folders which should not be used
if blacklisted_package_names:
print('# BEGIN SUBSECTION: ignored packages')
print('Trying to ignore the following packages:')
[print('- ' + name) for name in blacklisted_package_names]
output = subprocess.check_output(
[colcon_script, 'list', '--base-paths', args.sourcespace])
for line in output.decode().splitlines():
package_name, package_path, _ = line.split('\t', 2)
if package_name in blacklisted_package_names:
marker_file = os.path.join(package_path, 'COLCON_IGNORE')
print('Create marker file: ' + marker_file)
with open(marker_file, 'w'):
pass
print('# END SUBSECTION')
rc = build_function(args, job)
job.post()
return rc
def _fetch_repos_file(url, filename, job):
"""Use curl to fetch a repos file and display the contents."""
job.run(['curl', '-skL', url, '-o', filename])
log("@{bf}==>@| Contents of `%s`:" % filename)
with open(filename, 'r') as f:
print(f.read())
if __name__ == '__main__':
sys.exit(main())
| 41.210127 | 156 | 0.607753 | [
"Apache-2.0"
] | jlblancoc/ci | ros2_batch_job/__main__.py | 32,556 | Python |
"""database session management."""
import logging
import os
from contextlib import contextmanager
from typing import Iterator
import attr
import psycopg2
import sqlalchemy as sa
from fastapi_utils.session import FastAPISessionMaker as _FastAPISessionMaker
from sqlalchemy.orm import Session as SqlSession
from stac_fastapi.sqlalchemy.config import SqlalchemySettings
from stac_fastapi.types import errors
logger = logging.getLogger(__name__)
class FastAPISessionMaker(_FastAPISessionMaker):
"""FastAPISessionMaker."""
@contextmanager
def context_session(self) -> Iterator[SqlSession]:
"""Override base method to include exception handling."""
try:
yield from self.get_db()
except sa.exc.StatementError as e:
if isinstance(e.orig, psycopg2.errors.UniqueViolation):
raise errors.ConflictError("resource already exists") from e
elif isinstance(e.orig, psycopg2.errors.ForeignKeyViolation):
raise errors.ForeignKeyError("collection does not exist") from e
logger.error(e, exc_info=True)
raise errors.DatabaseError("unhandled database error")
@attr.s
class Session:
"""Database session management."""
reader_conn_string: str = attr.ib()
writer_conn_string: str = attr.ib()
@classmethod
def create_from_env(cls):
"""Create from environment."""
return cls(
reader_conn_string=os.environ["READER_CONN_STRING"],
writer_conn_string=os.environ["WRITER_CONN_STRING"],
)
@classmethod
def create_from_settings(cls, settings: SqlalchemySettings) -> "Session":
"""Create a Session object from settings."""
return cls(
reader_conn_string=settings.reader_connection_string,
writer_conn_string=settings.writer_connection_string,
)
def __attrs_post_init__(self):
"""Post init handler."""
self.reader: FastAPISessionMaker = FastAPISessionMaker(self.reader_conn_string)
self.writer: FastAPISessionMaker = FastAPISessionMaker(self.writer_conn_string)
| 33.650794 | 87 | 0.708491 | [
"MIT"
] | AsgerPetersen/stac-fastapi | stac_fastapi/sqlalchemy/stac_fastapi/sqlalchemy/session.py | 2,120 | Python |
# -*- coding: utf-8 -*-
import pkg_resources
import platform
API_YOUTU_END_POINT = 'http://api.youtu.qq.com/'
API_TENCENTYUN_END_POINT = 'https://youtu.api.qcloud.com/'
API_YOUTU_VIP_END_POINT = 'https://vip-api.youtu.qq.com/'
APPID = 'xxx'
SECRET_ID = 'xxx'
SECRET_KEY = 'xx'
USER_ID = 'xx'
_config = {
'end_point':API_YOUTU_END_POINT,
'appid':APPID,
'secret_id':SECRET_ID,
'secret_key':SECRET_KEY,
'userid':USER_ID,
}
def get_app_info():
return _config
def set_app_info(appid=None, secret_id=None, secret_key=None, userid=None, end_point=None):
if appid:
_config['appid'] = appid
if secret_id:
_config['secret_id'] = secret_id
if secret_key:
_config['secret_key'] = secret_key
if userid:
_config['userid'] = userid
if end_point:
_config['end_point'] = end_point
| 22.5 | 91 | 0.666667 | [
"MIT"
] | qinyuanpei/wechat-assistant | TencentYoutuyun/conf.py | 855 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.