content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
from datetime import datetime
from typing import List, Optional
from uuid import getnode
from .ballot import (
CiphertextBallot,
CiphertextBallotContest,
CiphertextBallotSelection,
PlaintextBallot,
PlaintextBallotContest,
PlaintextBallotSelection,
make_ciphertext_ballot_contest,
make_ciphertext_ballot_selection,
make_ciphertext_ballot,
)
from .ballot_code import get_hash_for_device
from .election import CiphertextElectionContext
from .elgamal import elgamal_encrypt
from .group import ElementModP, ElementModQ, rand_q
from .logs import log_info, log_warning
from .manifest import (
InternalManifest,
ContestDescription,
ContestDescriptionWithPlaceholders,
SelectionDescription,
)
from .nonces import Nonces
from .serializable import Serializable
from .utils import get_optional, get_or_else_optional_func
class EncryptionDevice(Serializable):
"""
Metadata for encryption device
"""
device_id: int
"""Unique identifier for device"""
session_id: int
"""Used to identify session and protect the timestamp"""
launch_code: int
"""Election initialization value"""
location: str
"""Arbitary string to designate the location of device"""
def __init__(
self,
device_id: int,
session_id: int,
launch_code: int,
location: str,
) -> None:
self.device_id = device_id
self.session_id = session_id
self.launch_code = launch_code
self.location = location
log_info(f": EncryptionDevice: Created: UUID: {device_id} at: {location}")
def get_hash(self) -> ElementModQ:
"""
Get hash for encryption device
:return: Starting hash
"""
return get_hash_for_device(
self.device_id, self.session_id, self.launch_code, self.location
)
# pylint: disable=no-self-use
def get_timestamp(self) -> int:
"""
Get the current timestamp in utc
"""
return int(datetime.utcnow().timestamp())
class EncryptionMediator:
"""
An object for caching election and encryption state.
It composes Elections and Ballots.
"""
_internal_manifest: InternalManifest
_context: CiphertextElectionContext
_encryption_seed: ElementModQ
def __init__(
self,
internal_manifest: InternalManifest,
context: CiphertextElectionContext,
encryption_device: EncryptionDevice,
):
self._internal_manifest = internal_manifest
self._context = context
self._encryption_seed = encryption_device.get_hash()
def encrypt(self, ballot: PlaintextBallot) -> Optional[CiphertextBallot]:
"""
Encrypt the specified ballot using the cached election context.
"""
log_info(f" encrypt: objectId: {ballot.object_id}")
encrypted_ballot = encrypt_ballot(
ballot, self._internal_manifest, self._context, self._encryption_seed
)
if encrypted_ballot is not None and encrypted_ballot.code is not None:
self._encryption_seed = encrypted_ballot.code
return encrypted_ballot
def generate_device_uuid() -> int:
"""
Get unique identifier for device
:return: Unique identifier
"""
return getnode()
def selection_from(
description: SelectionDescription,
is_placeholder: bool = False,
is_affirmative: bool = False,
) -> PlaintextBallotSelection:
"""
Construct a `BallotSelection` from a specific `SelectionDescription`.
This function is useful for filling selections when a voter undervotes a ballot.
It is also used to create placeholder representations when generating the `ConstantChaumPedersenProof`
:param description: The `SelectionDescription` which provides the relevant `object_id`
:param is_placeholder: Mark this selection as a placeholder value
:param is_affirmative: Mark this selection as `yes`
:return: A BallotSelection
"""
return PlaintextBallotSelection(
description.object_id,
vote=1 if is_affirmative else 0,
is_placeholder_selection=is_placeholder,
)
def contest_from(description: ContestDescription) -> PlaintextBallotContest:
"""
Construct a `BallotContest` from a specific `ContestDescription` with all false fields.
This function is useful for filling contests and selections when a voter undervotes a ballot.
:param description: The `ContestDescription` used to derive the well-formed `BallotContest`
:return: a `BallotContest`
"""
selections: List[PlaintextBallotSelection] = list()
for selection_description in description.ballot_selections:
selections.append(selection_from(selection_description))
return PlaintextBallotContest(description.object_id, selections)
def encrypt_selection(
selection: PlaintextBallotSelection,
selection_description: SelectionDescription,
elgamal_public_key: ElementModP,
crypto_extended_base_hash: ElementModQ,
nonce_seed: ElementModQ,
is_placeholder: bool = False,
should_verify_proofs: bool = True,
) -> Optional[CiphertextBallotSelection]:
"""
Encrypt a specific `BallotSelection` in the context of a specific `BallotContest`
:param selection: the selection in the valid input form
:param selection_description: the `SelectionDescription` from the
`ContestDescription` which defines this selection's structure
:param elgamal_public_key: the public key (K) used to encrypt the ballot
:param crypto_extended_base_hash: the extended base hash of the election
:param nonce_seed: an `ElementModQ` used as a header to seed the `Nonce` generated for this selection.
this value can be (or derived from) the BallotContest nonce, but no relationship is required
:param is_placeholder: specifies if this is a placeholder selection
:param should_verify_proofs: specify if the proofs should be verified prior to returning (default True)
"""
# Validate Input
if not selection.is_valid(selection_description.object_id):
log_warning(f"malformed input selection: {selection}")
return None
selection_description_hash = selection_description.crypto_hash()
nonce_sequence = Nonces(selection_description_hash, nonce_seed)
selection_nonce = nonce_sequence[selection_description.sequence_order]
disjunctive_chaum_pedersen_nonce = next(iter(nonce_sequence))
log_info(
f": encrypt_selection: for {selection_description.object_id} hash: {selection_description_hash.to_hex()}"
)
selection_representation = selection.vote
# Generate the encryption
elgamal_encryption = elgamal_encrypt(
selection_representation, selection_nonce, elgamal_public_key
)
if elgamal_encryption is None:
# will have logged about the failure earlier, so no need to log anything here
return None
# TODO: ISSUE #35: encrypt/decrypt: encrypt the extended_data field
# Create the return object
encrypted_selection = make_ciphertext_ballot_selection(
object_id=selection.object_id,
description_hash=selection_description_hash,
ciphertext=get_optional(elgamal_encryption),
elgamal_public_key=elgamal_public_key,
crypto_extended_base_hash=crypto_extended_base_hash,
proof_seed=disjunctive_chaum_pedersen_nonce,
selection_representation=selection_representation,
is_placeholder_selection=is_placeholder,
nonce=selection_nonce,
)
if encrypted_selection.proof is None:
return None # log will have happened earlier
# optionally, skip the verification step
if not should_verify_proofs:
return encrypted_selection
# verify the selection.
if encrypted_selection.is_valid_encryption(
selection_description_hash, elgamal_public_key, crypto_extended_base_hash
):
return encrypted_selection
log_warning(
f"mismatching selection proof for selection {encrypted_selection.object_id}"
)
return None
# pylint: disable=too-many-return-statements
def encrypt_contest(
contest: PlaintextBallotContest,
contest_description: ContestDescriptionWithPlaceholders,
elgamal_public_key: ElementModP,
crypto_extended_base_hash: ElementModQ,
nonce_seed: ElementModQ,
should_verify_proofs: bool = True,
) -> Optional[CiphertextBallotContest]:
"""
Encrypt a specific `BallotContest` in the context of a specific `Ballot`.
This method accepts a contest representation that only includes `True` selections.
It will fill missing selections for a contest with `False` values, and generate `placeholder`
selections to represent the number of seats available for a given contest. By adding `placeholder`
votes
:param contest: the contest in the valid input form
:param contest_description: the `ContestDescriptionWithPlaceholders`
from the `ContestDescription` which defines this contest's structure
:param elgamal_public_key: the public key (k) used to encrypt the ballot
:param crypto_extended_base_hash: the extended base hash of the election
:param nonce_seed: an `ElementModQ` used as a header to seed the `Nonce` generated for this contest.
this value can be (or derived from) the Ballot nonce, but no relationship is required
:param should_verify_proofs: specify if the proofs should be verified prior to returning (default True)
"""
# Validate Input
if not contest.is_valid(
contest_description.object_id,
len(contest_description.ballot_selections),
contest_description.number_elected,
contest_description.votes_allowed,
):
log_warning(f"malformed input contest: {contest}")
return None
if not contest_description.is_valid():
log_warning(f"malformed contest description: {contest_description}")
return None
# account for sequence id
contest_description_hash = contest_description.crypto_hash()
nonce_sequence = Nonces(contest_description_hash, nonce_seed)
contest_nonce = nonce_sequence[contest_description.sequence_order]
chaum_pedersen_nonce = next(iter(nonce_sequence))
encrypted_selections: List[CiphertextBallotSelection] = list()
selection_count = 0
# TODO: ISSUE #54 this code could be inefficient if we had a contest
# with a lot of choices, although the O(n^2) iteration here is small
# compared to the huge cost of doing the cryptography.
# Generate the encrypted selections
for description in contest_description.ballot_selections:
has_selection = False
encrypted_selection = None
# iterate over the actual selections for each contest description
# and apply the selected value if it exists. If it does not, an explicit
# false is entered instead and the selection_count is not incremented
# this allows consumers to only pass in the relevant selections made by a voter
for selection in contest.ballot_selections:
if selection.object_id == description.object_id:
# track the selection count so we can append the
# appropriate number of true placeholder votes
has_selection = True
selection_count += selection.vote
encrypted_selection = encrypt_selection(
selection,
description,
elgamal_public_key,
crypto_extended_base_hash,
contest_nonce,
)
break
if not has_selection:
# No selection was made for this possible value
# so we explicitly set it to false
encrypted_selection = encrypt_selection(
selection_from(description),
description,
elgamal_public_key,
crypto_extended_base_hash,
contest_nonce,
)
if encrypted_selection is None:
return None # log will have happened earlier
encrypted_selections.append(get_optional(encrypted_selection))
# Handle Placeholder selections
# After we loop through all of the real selections on the ballot,
# we loop through each placeholder value and determine if it should be filled in
# Add a placeholder selection for each possible seat in the contest
for placeholder in contest_description.placeholder_selections:
# for undervotes, select the placeholder value as true for each available seat
# note this pattern is used since DisjunctiveChaumPedersen expects a 0 or 1
# so each seat can only have a maximum value of 1 in the current implementation
select_placeholder = False
if selection_count < contest_description.number_elected:
select_placeholder = True
selection_count += 1
encrypted_selection = encrypt_selection(
selection=selection_from(
description=placeholder,
is_placeholder=True,
is_affirmative=select_placeholder,
),
selection_description=placeholder,
elgamal_public_key=elgamal_public_key,
crypto_extended_base_hash=crypto_extended_base_hash,
nonce_seed=contest_nonce,
is_placeholder=True,
should_verify_proofs=True,
)
if encrypted_selection is None:
return None # log will have happened earlier
encrypted_selections.append(get_optional(encrypted_selection))
# TODO: ISSUE #33: support other cases such as cumulative voting
# (individual selections being an encryption of > 1)
if (
contest_description.votes_allowed is not None
and selection_count < contest_description.votes_allowed
):
log_warning(
"mismatching selection count: only n-of-m style elections are currently supported"
)
# Create the return object
encrypted_contest = make_ciphertext_ballot_contest(
object_id=contest.object_id,
description_hash=contest_description_hash,
ballot_selections=encrypted_selections,
elgamal_public_key=elgamal_public_key,
crypto_extended_base_hash=crypto_extended_base_hash,
proof_seed=chaum_pedersen_nonce,
number_elected=contest_description.number_elected,
nonce=contest_nonce,
)
if encrypted_contest is None or encrypted_contest.proof is None:
return None # log will have happened earlier
if not should_verify_proofs:
return encrypted_contest
# Verify the proof
if encrypted_contest.is_valid_encryption(
contest_description_hash, elgamal_public_key, crypto_extended_base_hash
):
return encrypted_contest
log_warning(f"mismatching contest proof for contest {encrypted_contest.object_id}")
return None
# TODO: ISSUE #57: add the device hash to the function interface so it can be propagated with the ballot.
# also propagate the seed so that the ballot codes can be regenerated
# by traversing the collection of ballots encrypted by a specific device
def encrypt_ballot(
ballot: PlaintextBallot,
internal_manifest: InternalManifest,
context: CiphertextElectionContext,
encryption_seed: ElementModQ,
nonce: Optional[ElementModQ] = None,
should_verify_proofs: bool = True,
) -> Optional[CiphertextBallot]:
"""
Encrypt a specific `Ballot` in the context of a specific `CiphertextElectionContext`.
This method accepts a ballot representation that only includes `True` selections.
It will fill missing selections for a contest with `False` values, and generate `placeholder`
selections to represent the number of seats available for a given contest.
This method also allows for ballots to exclude passing contests for which the voter made no selections.
It will fill missing contests with `False` selections and generate `placeholder` selections that are marked `True`.
:param ballot: the ballot in the valid input form
:param internal_manifest: the `InternalManifest` which defines this ballot's structure
:param context: all the cryptographic context for the election
:param encryption_seed: Hash from previous ballot or starting hash from device
:param nonce: an optional `int` used to seed the `Nonce` generated for this contest
if this value is not provided, the secret generating mechanism of the OS provides its own
:param should_verify_proofs: specify if the proofs should be verified prior to returning (default True)
"""
# Determine the relevant range of contests for this ballot style
style = internal_manifest.get_ballot_style(ballot.style_id)
# Validate Input
if not ballot.is_valid(style.object_id):
log_warning(f"malformed input ballot: {ballot}")
return None
# Generate a random master nonce to use for the contest and selection nonce's on the ballot
random_master_nonce = get_or_else_optional_func(nonce, lambda: rand_q())
# Include a representation of the election and the external Id in the nonce's used
# to derive other nonce values on the ballot
nonce_seed = CiphertextBallot.nonce_seed(
internal_manifest.manifest_hash,
ballot.object_id,
random_master_nonce,
)
log_info(f": manifest_hash : {internal_manifest.manifest_hash.to_hex()}")
log_info(f": encryption_seed : {encryption_seed.to_hex()}")
encrypted_contests = encrypt_ballot_contests(
ballot, internal_manifest, context, nonce_seed
)
if encrypted_contests is None:
return None
# Create the return object
encrypted_ballot = make_ciphertext_ballot(
ballot.object_id,
ballot.style_id,
internal_manifest.manifest_hash,
encryption_seed,
encrypted_contests,
random_master_nonce,
)
if not encrypted_ballot.code:
return None
if not should_verify_proofs:
return encrypted_ballot
# Verify the proofs
if encrypted_ballot.is_valid_encryption(
internal_manifest.manifest_hash,
context.elgamal_public_key,
context.crypto_extended_base_hash,
):
return encrypted_ballot
return None # log will have happened earlier
def encrypt_ballot_contests(
ballot: PlaintextBallot,
description: InternalManifest,
context: CiphertextElectionContext,
nonce_seed: ElementModQ,
) -> Optional[List[CiphertextBallotContest]]:
"""Encrypt contests from a plaintext ballot with a specific style"""
encrypted_contests: List[CiphertextBallotContest] = []
# Only iterate on contests for this specific ballot style
for ballot_style_contest in description.get_contests_for(ballot.style_id):
use_contest = None
for contest in ballot.contests:
if contest.object_id == ballot_style_contest.object_id:
use_contest = contest
break
# no selections provided for the contest, so create a placeholder contest
if not use_contest:
use_contest = contest_from(ballot_style_contest)
encrypted_contest = encrypt_contest(
use_contest,
ballot_style_contest,
context.elgamal_public_key,
context.crypto_extended_base_hash,
nonce_seed,
)
if encrypted_contest is None:
return None
encrypted_contests.append(get_optional(encrypted_contest))
return encrypted_contests
| 37.435361 | 119 | 0.713676 | [
"MIT"
] | john-s-morgan/electionguard-python | src/electionguard/encrypt.py | 19,691 | Python |
import argparse
import logging
import sys
import pyphen
import nltk
pyphen.language_fallback("en_US")
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console_out = logging.StreamHandler(sys.stdout)
console_out.setLevel(logging.DEBUG)
logger.addHandler(console_out)
def parse_arguments():
"""
Simple argument parser for the command line
:return: The text to be edited
"""
parser = argparse.ArgumentParser(description="Receive text to be edited")
parser.add_argument("text", metavar="input text", type=str)
args = parser.parse_args()
return args.text
def clean_input(text):
"""
Text sanitization function
:param text: User input text
:return: Sanitized text, without non ascii characters
"""
# To keep things simple at the start, let's only keep ASCII characters
return str(text.encode().decode("ascii", errors="ignore"))
def preprocess_input(text):
"""
Tokenizes text that has been sainitized
:param text: Sanitized text
:return: Text ready to be fed to analysis, by having sentences and words tokenized
"""
sentences = nltk.sent_tokenize(text)
tokens = [nltk.word_tokenize(sentence) for sentence in sentences]
return tokens
def compute_flesch_reading_ease(total_syllables, total_words, total_sentences):
"""
Computes readability score from summary statistics
:param total_syllables: number of syllables in input text
:param total_words: number of words in input text
:param total_sentences: number of sentences in input text
:return: A readability score: the lower the score, the more complex the text is deemed to be
"""
return (
206.85
- 1.015 * (total_words / total_sentences)
- 84.6 * (total_syllables / total_words)
)
def get_reading_level_from_flesch(flesch_score):
"""
Thresholds taken from https://en.wikipedia.org/wiki/Flesch%E2%80%93Kincaid_readability_tests
:param flesch_score:
:return: A reading level and difficulty for a given flesch score
"""
if flesch_score < 30:
return "Very difficult to read"
elif flesch_score < 50:
return "Difficult to read"
elif flesch_score < 60:
return "Fairly difficult to read"
elif flesch_score < 70:
return "Plain English"
elif flesch_score < 80:
return "Fairly easy to read"
elif flesch_score < 90:
return "Easy to read"
else:
return "Very easy to read"
def compute_average_word_length(tokens):
"""
Calculate word length for a sentence
:param tokens: a list of words
:return: The average length of words in this list
"""
word_lengths = [len(word) for word in tokens]
return sum(word_lengths) / len(word_lengths)
def compute_total_average_word_length(sentence_list):
"""
Calculate average word length for multiple sentences
:param sentence_list: a list of sentences, each being a list of words
:return: The average length of words in this list of sentences
"""
lengths = [compute_average_word_length(tokens) for tokens in sentence_list]
return sum(lengths) / len(lengths)
def compute_total_unique_words_fraction(sentence_list):
"""
Compute fraction os unique words
:param sentence_list: a list of sentences, each being a list of words
:return: the fraction of unique words in the sentences
"""
all_words = [word for word_list in sentence_list for word in word_list]
unique_words = set(all_words)
return len(unique_words) / len(all_words)
def count_word_usage(tokens, word_list):
"""
Counts occurrences of a given list of words
:param tokens: a list of tokens for one sentence
:param word_list: a list of words to search for
:return: the number of times the words appear in the list
"""
return len([word for word in tokens if word.lower() in word_list])
def count_word_syllables(word):
"""
Count syllables in a word
:param word: a one word string
:return: the number of syllables according to pyphen
"""
dic = pyphen.Pyphen(lang="en_US")
# this returns our word, with hyphens ("-") inserted in between each syllable
hyphenated = dic.inserted(word)
return len(hyphenated.split("-"))
def count_sentence_syllables(tokens):
"""
Count syllables in a sentence
:param tokens: a list of words and potentially punctuation
:return: the number of syllables in the sentence
"""
# Our tokenizer leaves punctuation as a separate word, so we filter for it here
punctuation = ".,!?/"
return sum(
[
count_word_syllables(word)
for word in tokens
if word not in punctuation
]
)
def count_total_syllables(sentence_list):
"""
Count syllables in a list of sentences
:param sentence_list: a list of sentences, each being a list of words
:return: the number of syllables in the sentences
"""
return sum(
[count_sentence_syllables(sentence) for sentence in sentence_list]
)
def count_words_per_sentence(sentence_tokens):
"""
Count words in a sentence
:param sentence_tokens: a list of words and potentially punctuation
:return: the number of words in the sentence
"""
punctuation = ".,!?/"
return len([word for word in sentence_tokens if word not in punctuation])
def count_total_words(sentence_list):
"""
Count words in a list of sentences
:param sentence_list: a list of sentences, each being a list of words
:return: the number of words in the sentences
"""
return sum(
[count_words_per_sentence(sentence) for sentence in sentence_list]
)
def get_suggestions(sentence_list):
"""
Returns a string containing our suggestions
:param sentence_list: a list of sentences, each being a list of words
:return: suggestions to improve the input
"""
told_said_usage = sum(
(count_word_usage(tokens, ["told", "said"]) for tokens in sentence_list)
)
but_and_usage = sum(
(count_word_usage(tokens, ["but", "and"]) for tokens in sentence_list)
)
wh_adverbs_usage = sum(
(
count_word_usage(
tokens,
[
"when",
"where",
"why",
"whence",
"whereby",
"wherein",
"whereupon",
],
)
for tokens in sentence_list
)
)
result_str = ""
adverb_usage = "Adverb usage: %s told/said, %s but/and, %s wh adverbs" % (
told_said_usage,
but_and_usage,
wh_adverbs_usage,
)
result_str += adverb_usage
average_word_length = compute_total_average_word_length(sentence_list)
unique_words_fraction = compute_total_unique_words_fraction(sentence_list)
word_stats = "Average word length %.2f, fraction of unique words %.2f" % (
average_word_length,
unique_words_fraction,
)
# Using HTML break to later display on a webapp
result_str += "<br/>"
result_str += word_stats
number_of_syllables = count_total_syllables(sentence_list)
number_of_words = count_total_words(sentence_list)
number_of_sentences = len(sentence_list)
syllable_counts = "%d syllables, %d words, %d sentences" % (
number_of_syllables,
number_of_words,
number_of_sentences,
)
result_str += "<br/>"
result_str += syllable_counts
flesch_score = compute_flesch_reading_ease(
number_of_syllables, number_of_words, number_of_sentences
)
flesch = "%d syllables, %.2f flesch score: %s" % (
number_of_syllables,
flesch_score,
get_reading_level_from_flesch(flesch_score),
)
result_str += "<br/>"
result_str += flesch
return result_str
def get_recommendations_from_input(txt):
"""
Cleans, preprocesses, and generates heuristic suggestion for input string
:param txt: Input text
:return: Suggestions for a given text input
"""
processed = clean_input(txt)
tokenized_sentences = preprocess_input(processed)
suggestions = get_suggestions(tokenized_sentences)
return suggestions
if __name__ == "__main__":
input_text = parse_arguments()
print(get_recommendations_from_input(input_text))
| 30.244604 | 96 | 0.673525 | [
"MIT"
] | 0105rahulk/ml-powered-applications | ml_editor/ml_editor.py | 8,408 | Python |
# -*- coding: utf-8 -*-
import six
from flask import Blueprint, jsonify, current_app
from ..utils import MountTree
from .utils import is_testing
api_bp = Blueprint('api', __name__.rsplit('.')[1])
if is_testing():
@api_bp.route('/_hello/')
def api_hello():
return jsonify('api hello')
@api_bp.route('/all')
def all_storage():
"""Get all storage in JSON."""
trees = current_app.trees
mounts = MountTree()
for prefix, tree in six.iteritems(trees):
for path, storage in tree.iter_storage():
mounts.mount(prefix + '/' + path, storage)
# get a compressed representation of the tree
def dfs(node):
children = node.children
if children:
ret = []
for name in sorted(six.iterkeys(children)):
child = children[name]
child_ret = dfs(child)
if child_ret:
ret.append((name, child_ret))
if ret:
return ret
data = node.data
if data:
return data.to_dict()
return jsonify(dfs(mounts.root) or [])
| 25.906977 | 55 | 0.572711 | [
"MIT"
] | korepwx/mlcomp | mlcomp/board/views/api.py | 1,114 | Python |
#!/usr/bin/python3
# -*- coding:utf-8 -*-
from copy import deepcopy
import torch
from cvpods.checkpoint import DefaultCheckpointer
from cvpods.data import build_transform_gens
__all__ = ["DefaultPredictor"]
class DefaultPredictor:
"""
Create a simple end-to-end predictor with the given config that runs on
single device for a single input image.
Compared to using the model directly, this class does the following additions:
1. Load checkpoint from `cfg.MODEL.WEIGHTS`.
2. Always take BGR image as the input and apply conversion defined by `cfg.INPUT.FORMAT`.
3. Apply resizing defined by `cfg.INPUT.{MIN,MAX}_SIZE_TEST`.
4. Take one input image and produce a single output, instead of a batch.
If you'd like to do anything more fancy, please refer to its source code
as examples to build and use the model manually.
Attributes:
metadata (Metadata): the metadata of the underlying dataset, obtained from
cfg.DATASETS.TEST.
Examples:
.. code-block:: python
pred = DefaultPredictor(cfg)
inputs = cv2.imread("input.jpg")
outputs = pred(inputs)
"""
def __init__(self, cfg, meta):
self.cfg = deepcopy(cfg)
if self.cfg.MODEL.DEVICE.startswith("cuda:"):
torch.cuda.set_device(self.cfg.MODEL.DEVICE)
self.cfg.MODEL.DEVICE = "cuda"
self.model = cfg.build_model(self.cfg)
self.model.eval()
self.metadata = meta
checkpointer = DefaultCheckpointer(self.model)
checkpointer.load(cfg.MODEL.WEIGHTS)
self.transform_gen = build_transform_gens(cfg.INPUT.AUG.TEST_PIPELINES)
self.input_format = cfg.INPUT.FORMAT
assert self.input_format in ["RGB", "BGR"], self.input_format
def __call__(self, original_image):
"""
Args:
original_image (np.ndarray): an image of shape (H, W, C) (in BGR order).
Returns:
predictions (dict):
the output of the model for one image only.
See :doc:`/tutorials/models` for details about the format.
"""
with torch.no_grad(
): # https://github.com/sphinx-doc/sphinx/issues/4258
# Apply pre-processing to image.
if self.input_format == "RGB":
# whether the model expects BGR inputs or RGB
original_image = original_image[:, :, ::-1]
height, width = original_image.shape[:2]
image = original_image
for tfm_gen in self.transform_gen:
image = tfm_gen.get_transform(image).apply_image(image)
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
inputs = {"image": image, "height": height, "width": width}
predictions = self.model([inputs])[0]
return predictions
| 35.012195 | 93 | 0.634622 | [
"Apache-2.0"
] | reinforcementdriving/cvpods | cvpods/engine/predictor.py | 2,871 | Python |
# coding: utf-8
from pytdx.hq import TdxHq_API
from pytdx.params import TDXParams
import pandas as pd
import numpy as np
import re
import csv
import io
import time
import traceback
if __name__ == '__main__':
with io.open(r'..\all_other_data\symbol.txt', 'r', encoding='utf-8') as f:
symbol = [s.strip() for s in f.readlines()]
TDXHQ = TdxHq_API(raise_exception=True, auto_retry=True)
if not TDXHQ.connect('121.14.110.200', 443):
raise Exception("Can't connect.")
#symbol = symbol[0:5]
first_df = True
for code in symbol:
if code[0:2] == 'SH':
market = 1
else:
market = 0
code = code [2:]
#quote_info = TDXHQ.get_security_quotes([(market, code)])
quote_info = TDXHQ.get_security_bars(9, market, code, 0, 1)
try:
if first_df:
columns = ['code', 'price']
quote_df = pd.DataFrame(columns=columns)
first_df = False
values = [code, quote_info[0]['close']]
quote_df.loc[quote_df.shape[0]] = values
except Exception as e:
print "code {}, process bars error, skipped.".format(code)
print e.message
print quote_info
quote_df = quote_df.rename(columns={
'code':'代码',
'price':'价格',
})
# string_columns = ['代码']
# quote_df[string_columns] = quote_df[string_columns].applymap(
# lambda x: '=""' if type(x) is float else '="' + str(x) + '"')
quote_df.to_csv(r"..\all_other_data\all_last_price.csv", encoding="gbk", quoting=csv.QUOTE_NONE, index=False)
TDXHQ.disconnect()
| 28.62069 | 113 | 0.586747 | [
"MIT"
] | lte2000/cwfx | get_data/get_last_price.py | 1,672 | Python |
#! /usr/bin/env python3
from ssedata import FunctionType
from google.protobuf.json_format import MessageToDict
import grpc
import argparse
import json
import logging
import logging.config
import os
import sys
import inspect
import time
from websocket import create_connection
import socket
import re
from concurrent import futures
from datetime import datetime
import requests
import configparser
PARENT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(PARENT_DIR, 'generated'))
sys.path.append(os.path.join(PARENT_DIR, 'helper_functions'))
import qlist
import pysize
from ssedata import FunctionType
import ServerSideExtension_pb2 as SSE
# import helper .py files
import qlist
import pysize
import ServerSideExtension_pb2 as SSE
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
config = configparser.ConfigParser()
class ExtensionService(SSE.ConnectorServicer):
"""
A simple SSE-plugin created for the HelloWorld example.
"""
def __init__(self, funcdef_file):
"""
Class initializer.
:param funcdef_file: a function definition JSON file
"""
self._function_definitions = funcdef_file
#self.ScriptEval = ScriptEval()
os.makedirs('logs', exist_ok=True)
log_file = os.path.join(os.path.dirname(
os.path.dirname(os.path.abspath(__file__))), 'logger.config')
print(log_file)
logging.config.fileConfig(log_file)
logging.info(self._function_definitions)
logging.info('Logging enabled')
function_name = "none"
@property
def function_definitions(self):
"""
:return: json file with function definitions
"""
return self._function_definitions
@property
def functions(self):
"""
:return: Mapping of function id and implementation
"""
return {
0: '_rest_single',
1: '_rest_30',
2: '_ws_single',
3: '_ws_batch',
4: '_gcp_bq'
}
@staticmethod
def _get_function_id(context):
"""
Retrieve function id from header.
:param context: context
:return: function id
"""
metadata = dict(context.invocation_metadata())
header = SSE.FunctionRequestHeader()
header.ParseFromString(metadata['qlik-functionrequestheader-bin'])
return header.functionId
@staticmethod
def _rest_single(request, context):
"""
Rest using single variable
"""
logging.info('Entering {} TimeStamp: {}' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
url = config.get(q_function_name, 'url')
logging.debug("Rest Url is set to {}" .format(url))
bCache = config.get(q_function_name, 'cache')
logging.debug("Caching is set to {}" .format(bCache))
if (bCache.lower() == "true"):
logging.info(
"Caching ****Enabled*** for {}" .format(q_function_name))
else:
logging.info(
"Caching ****Disabled**** for {}" .format(q_function_name))
md = (('qlik-cache', 'no-store'),)
context.send_initial_metadata(md)
response_rows = []
request_counter = 1
for request_rows in request:
logging.debug(
'Printing Request Rows - Request Counter {}' .format(request_counter))
request_counter = request_counter + 1
for row in request_rows.rows:
# Retrieve string value of parameter and append to the params variable
# Length of param is 1 since one column is received, the [0] collects the first value in the list
param = [d.strData for d in row.duals][0]
# Join with current timedate stamp
if (len(param) == 0):
logging.info('Exiting {} TimeStamp: {} due to Data being Empty ' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
else:
payload = '{"data":"' + param + '"}'
logging.debug('Showing Payload: {}'.format(payload))
resp = requests.post(url, data=payload)
logging.debug(
'Show Payload Response as Text: {}'.format(resp.text))
result = resp.text
result = result.replace('"', '')
result = result.strip()
logging.debug('Show Result: {}'.format(result))
# Create an iterable of dual with the result
duals = iter([SSE.Dual(strData=result)])
response_rows.append(SSE.Row(duals=duals))
# Yield the row data as bundled rows
yield SSE.BundledRows(rows=response_rows)
logging.info('Exiting {} TimeStamp: {}' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
@staticmethod
def _ws_single(request, context):
"""
Single Row Processing for Websockets
:param request: iterable sequence of bundled rows
:return: the same iterable sequence as received
"""
logging.info('Entering {} TimeStamp: {}' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
# Start by Gathering Environmental Varaiable
host = socket.gethostname()
ip_addr = socket.gethostbyname(host)
ws_url = config.get(q_function_name, 'ws_url')
token = config.get(q_function_name, 'token')
user_name = config.get(q_function_name, 'username')
ws_route = config.get(q_function_name, 'ws_route')
bCache = config.get(q_function_name, 'cache')
logging.debug('Pringint Route for WS {}' .format(ws_route))
logging.debug("Caching is set to {}" .format(bCache))
if (bCache.lower() == "true"):
logging.info(
"Caching ****Enabled*** for {}" .format(q_function_name))
else:
logging.info(
"Caching ****Disabled**** for {}" .format(q_function_name))
md = (('qlik-cache', 'no-store'),)
context.send_initial_metadata(md)
# In Future we will use the Token for Liencensing and Throttling
# Currently we are using Comblination of host+ipaddr+username for Client Identification
ws_url = ws_url + host + '_' + ip_addr+'_' + user_name+'_'
logging.debug('Websocket URL : {}' .format(ws_url))
ws = create_connection(ws_url)
response_rows = []
for request_rows in request:
# Iterate over rows
# Default code
for row in request_rows.rows:
# Retrieve string value of parameter and append to the params variable
# Length of param is 1 since one column is received, the [0] collects the first value in the list
param = [d.strData for d in row.duals][0]
result = ''
if (len(param) == 0):
logging.debug('Parameters are Empty')
result = 'Error'
else:
payload = '{"action":"' + ws_route + \
'","data":"' + param + '"}'
logging.debug('Showing Payload: {}'.format(payload))
ws.send(payload)
#logging.info('Show Payload Response: {}'.format(resp.text))
resp = json.loads(ws.recv())
logging.debug(resp)
result = resp['result']
logging.debug('Show Result: {}'.format(result))
# Create an iterable of dual with the result
duals = iter([SSE.Dual(strData=result)])
response_rows.append(SSE.Row(duals=duals))
# Yield the row data as bundled rows
yield SSE.BundledRows(rows=response_rows)
ws.close()
logging.info('Exiting {} TimeStamp: {}' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
@staticmethod
def _ws_batch(request, context):
"""
Mirrors the input and sends back the same data.
:param request: iterable sequence of bundled rows
:return: the same iterable sequence as received
"""
logging.info('Entering {} TimeStamp: {}' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
host = socket.gethostname()
ip_addr = socket.gethostbyname(host)
logging.debug('Calling qrag.ini section "{}' .format(q_function_name))
ws_url = config.get(q_function_name, 'ws_url')
token = config.get(q_function_name, 'token')
user_name = config.get(q_function_name, 'username')
batch_size = int(config.get(q_function_name, 'batch_size'))
logging.debug('Batch Size {}' .format(batch_size))
ws_route = config.get(q_function_name, 'ws_route')
logging.info('API Route : {}' .format(ws_route))
# setup Caching
bCache = config.get(q_function_name, 'cache')
logging.debug("Caching is set to {}" .format(bCache))
if (bCache.lower() == "true"):
logging.info(
"Caching ****Enabled*** for {}" .format(q_function_name))
else:
logging.info(
"Caching ****Disabled**** for {}" .format(q_function_name))
md = (('qlik-cache', 'no-store'),)
context.send_initial_metadata(md)
ws_url = ws_url + host + '_' + ip_addr+'_' + user_name+'_'
logging.debug('Full url for ws: {} '.format(ws_url))
ws = create_connection(ws_url)
response_rows = []
outer_counter = 1
inner_counter = 1
request_counter = 1
for request_rows in request:
logging.debug(
'Printing Request Rows - Request Counter {}' .format(request_counter))
request_counter += 1
temp = MessageToDict(request_rows)
logging.debug('Temp Message to Dict {}' .format(temp))
test_rows = temp['rows']
logging.debug('Test Rows: {}' .format(test_rows))
request_size = len(test_rows)
logging.debug(
'Bundled Row Number of Rows - {}' .format(request_size))
batches = list(qlist.divide_chunks(test_rows, batch_size))
for i in batches:
payload_t = {"action": ws_route}
logging.debug('PreFix Route Seletection {}' .format(payload_t))
logging.debug(len(batches))
payload_t["data"] = i
logging.debug('Size of payload {}' .format(
pysize.get_size(payload_t)))
logging.debug('Showing Payload: {}'.format(payload_t))
logging.debug('batch number {}'.format(outer_counter))
ws.send(json.dumps(payload_t))
logging.debug('message sent WS')
outer_counter += 1
payload_t.clear()
for j in i:
#logging.debug("Priniting i {}" .format(i))
resp = json.loads(ws.recv())
#logging.debug('Response Type : {}' .format(type(resp)))
logging.debug('Counter: {} Payload Size: {} Payload Response: {}'.format(
inner_counter, pysize.get_size(resp), resp))
inner_counter += 1
result = resp['result']
logging.debug('Log Resulst: {}' .format(result))
duals = iter([SSE.Dual(strData=result)])
# logging.debug(duals)
#logging.debug('Printing Duals {}' .format(duals))
# Yield the row data as bundled rows
response_rows.append(SSE.Row(duals=duals))
logging.debug(
'Exiting Inner Loop: Printing j {}' .format(j))
yield SSE.BundledRows(rows=response_rows)
ws.close()
logging.info('Exiting {} TimeStamp: {}' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
@staticmethod
def _rest_30(request, context):
"""
Aggregates the parameters to a single comma separated string.
"""
logging.info('Entering {} TimeStamp: {}' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
url = config.get(q_function_name, 'url')
bCache = config.get(q_function_name, 'cache')
logging.debug("Caching is set to {}" .format(bCache))
if (bCache.lower() == "true"):
logging.info(
"Caching ****Enabled*** for {}" .format(q_function_name))
else:
logging.info(
"Caching ****Disabled**** for {}" .format(q_function_name))
md = (('qlik-cache', 'no-store'),)
context.send_initial_metadata(md)
# Iterate over bundled rows
response_rows = []
for request_rows in request:
# Iterate over rows
for row in request_rows.rows:
# Retrieve string value of parameter and append to the params variable
# Length of param is 1 since one column is received, the [0] collects the first value in the list
param = [d.strData for d in row.duals]
if (len(param) == 0):
logging.debug('Parameters are Empty')
result = 'Error'
#logging.info('Showing Payload: {}'.format(param))
# Aggregate parameters to a single string
# Join payload via =','.join(param)
else:
payload = '{"data":"' + (','.join(param)) + '"}'
logging.debug('Showing Payload: {}'.format(payload))
resp = requests.post(url, data=payload)
logging.debug(
'Show Payload Response: {}'.format(resp.text))
result = resp.text
result = result.replace('"', '')
result = result.strip()
logging.debug('Show Result: {}'.format(result))
# Create an iterable of dual with the result
duals = iter([SSE.Dual(strData=result)])
response_rows.append(SSE.Row(duals=duals))
# Yield the row data as bundled rows
yield SSE.BundledRows(rows=response_rows)
logging.info('Exiting Predict v2 TimeStamp: {}' .format(
datetime.now().strftime("%H:%M:%S.%f")))
@staticmethod
def _gcp_bq(request, context)
"""
Google Cloud Big Query Client Integration
November 2020
[email protected]
"""
logging.info('Entering {} TimeStamp: {}' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
url = config.get(q_function_name, 'url')
bCache = config.get(q_function_name, 'cache')
logging.debug("Caching is set to {}" .format(bCache))
if (bCache.lower() == "true"):
logging.info(
"Caching ****Enabled*** for {}" .format(q_function_name))
else:
logging.info(
"Caching ****Disabled**** for {}" .format(q_function_name))
md = (('qlik-cache', 'no-store'),)
context.send_initial_metadata(md)
# Iterate over bundled rows
response_rows = []
for request_rows in request:
# Iterate over rows
for row in request_rows.rows:
# Retrieve string value of parameter and append to the params variable
# Length of param is 1 since one column is received, the [0] collects the first value in the list
param = [d.strData for d in row.duals]
if (len(param) == 0):
logging.debug('Parameters are Empty')
result = 'Error'
#logging.info('Showing Payload: {}'.format(param))
# Aggregate parameters to a single string
# Join payload via =','.join(param)
else:
# Create an iterable of dual with the result
duals = iter([SSE.Dual(strData=result)])
response_rows.append(SSE.Row(duals=duals))
# Yield the row data as bundled rows
yield SSE.BundledRows(rows=response_rows)
logging.info('Exiting gcp_bq TimeStamp: {}' .format(
datetime.now().strftime("%H:%M:%S.%f")))
@staticmethod
def _cache(request, context):
"""
Cache enabled. Add the datetime stamp to the end of each string value.
:param request: iterable sequence of bundled rows
:param context: not used.
:return: string
"""
# Iterate over bundled rows
for request_rows in request:
# Iterate over rows
for row in request_rows.rows:
# Retrieve string value of parameter and append to the params variable
# Length of param is 1 since one column is received, the [0] collects the first value in the list
param = [d.strData for d in row.duals][0]
# Join with current timedate stamp
result = param + ' ' + datetime.now().isoformat()
# Create an iterable of dual with the result
duals = iter([SSE.Dual(strData=result)])
# Yield the row data as bundled rows
yield SSE.BundledRows(rows=[SSE.Row(duals=duals)])
@staticmethod
def _no_cache(request, context):
"""
Cache disabled. Add the datetime stamp to the end of each string value.
:param request:
:param context: used for disabling the cache in the header.
:return: string
"""
# Disable caching.
md = (('qlik-cache', 'no-store'),)
context.send_initial_metadata(md)
# Iterate over bundled rows
for request_rows in request:
# Iterate over rows
for row in request_rows.rows:
# Retrieve string value of parameter and append to the params variable
# Length of param is 1 since one column is received, the [0] collects the first value in the list
param = [d.strData for d in row.duals][0]
# Join with current timedate stamp
result = param + ' ' + datetime.now().isoformat()
# Create an iterable of dual with the result
duals = iter([SSE.Dual(strData=result)])
# Yield the row data as bundled rows
yield SSE.BundledRows(rows=[SSE.Row(duals=duals)])
def _get_call_info(self, context):
"""
Retreive useful information for the function call.
:param context: context
:return: string containing header info
"""
# Get metadata for the call from the context
metadata = dict(context.invocation_metadata())
# Get the function ID
func_header = SSE.FunctionRequestHeader()
func_header.ParseFromString(metadata['qlik-functionrequestheader-bin'])
func_id = func_header.functionId
# Get the common request header
common_header = SSE.CommonRequestHeader()
common_header.ParseFromString(metadata['qlik-commonrequestheader-bin'])
# Get capabilities
if not hasattr(self, 'capabilities'):
self.capabilities = self.GetCapabilities(None, context)
# Get the name of the capability called in the function
capability = [
function.name for function in self.capabilities.functions if function.functionId == func_id][0]
# Get the user ID using a regular expression
match = re.match(r"UserDirectory=(?P<UserDirectory>\w*)\W+UserId=(?P<UserId>\w*)",
common_header.userId, re.IGNORECASE)
if match:
userId = match.group('UserDirectory') + '/' + match.group('UserId')
else:
userId = common_header.userId
# Get the app ID
appId = common_header.appId
# Get the call's origin
peer = context.peer()
return "{0} - Capability '{1}' called by user {2} from app {3}".format(peer, capability, userId, appId)
@staticmethod
def _echo_table(request, context):
"""
Echo the input table.
:param request:
:param context:
:return:
"""
for request_rows in request:
response_rows = []
for row in request_rows.rows:
response_rows.append(row)
yield SSE.BundledRows(rows=response_rows)
def GetCapabilities(self, request, context):
"""
Get capabilities.
Note that either request or context is used in the implementation of this method, but still added as
parameters. The reason is that gRPC always sends both when making a function call and therefore we must include
them to avoid error messages regarding too many parameters provided from the client.
:param request: the request, not used in this method.
:param context: the context, not used in this method.
:return: the capabilities.
"""
logging.info('GetCapabilities')
# Create an instance of the Capabilities grpc message
# Enable(or disable) script evaluation
# Set values for pluginIdentifier and pluginVersion
capabilities = SSE.Capabilities(allowScript=True,
pluginIdentifier='Qlik Rapid API Gateway - Partner Engineering',
pluginVersion='v0.1.0')
# If user defined functions supported, add the definitions to the message
with open(self.function_definitions) as json_file:
# Iterate over each function definition and add data to the capabilities grpc message
for definition in json.load(json_file)['Functions']:
function = capabilities.functions.add()
function.name = definition['Name']
function.functionId = definition['Id']
function.functionType = definition['Type']
function.returnType = definition['ReturnType']
# Retrieve name and type of each parameter
for param_name, param_type in sorted(definition['Params'].items()):
function.params.add(name=param_name, dataType=param_type)
logging.info('Adding to capabilities: {}({})'.format(function.name,
[p.name for p in function.params]))
return capabilities
def ExecuteFunction(self, request_iterator, context):
"""
Execute function call.
:param request_iterator: an iterable sequence of Row.
:param context: the context.
:return: an iterable sequence of Row.
"""
func_id = self._get_function_id(context)
logging.info(self._get_call_info(context))
# Call corresponding function
logging.info('ExecuteFunctions (functionId: {})' .format(func_id))
# self.functions[func_id]))
current_function_def = (json.load(open(self.function_definitions))[
'Functions'])[func_id]
logging.debug(current_function_def)
global q_function_name
q_function_name = current_function_def["Name"]
logging.debug('Logical Method Called is: {}' .format(q_function_name))
current_qrap_type = current_function_def["QRAP_Type"]
qrag_function_name = '_' + current_qrap_type
logging.debug(
'This is the type of QRAG Method Name: {}' .format(current_qrap_type))
logging.debug(
'Physical Method Called is: {}' .format(qrag_function_name))
# Convers to Method Name to Physical Main Function
qrag_id = qlist.find_key(self.functions, qrag_function_name)
logging.debug('QRAG ID: {}' .format(qrag_id))
global function_name
function_name = self.functions[qrag_id]
return getattr(self, self.functions[qrag_id])(request_iterator, context)
def Serve(self, port, pem_dir):
"""
Sets up the gRPC Server with insecure connection on port
:param port: port to listen on.
:param pem_dir: Directory including certificates
:return: None
"""
# Create gRPC server
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
SSE.add_ConnectorServicer_to_server(self, server)
if pem_dir:
# Secure connection
with open(os.path.join(pem_dir, 'sse_server_key.pem'), 'rb') as f:
private_key = f.read()
with open(os.path.join(pem_dir, 'sse_server_cert.pem'), 'rb') as f:
cert_chain = f.read()
with open(os.path.join(pem_dir, 'root_cert.pem'), 'rb') as f:
root_cert = f.read()
credentials = grpc.ssl_server_credentials(
[(private_key, cert_chain)], root_cert, True)
server.add_secure_port('[::]:{}'.format(port), credentials)
logging.info(
'*** Running server in secure mode on port: {} ***'.format(port))
else:
# Insecure connection
server.add_insecure_port('[::]:{}'.format(port))
logging.info(
'*** Running server in insecure mode on port: {} ***'.format(port))
# Start gRPC server
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
qrag_file = os.path.join(os.path.dirname(__file__), 'config', 'qrag.ini')
config.read(qrag_file)
print(qrag_file)
print(config.sections())
port = config.get('base', 'port')
parser.add_argument('--port', nargs='?', default=port)
parser.add_argument('--pem_dir', nargs='?')
parser.add_argument('--definition_file', nargs='?',
default='functions.json')
args = parser.parse_args()
# need to locate the file when script is called from outside it's location dir.
def_file = os.path.join(os.path.dirname(
os.path.abspath(__file__)), args.definition_file)
print(def_file)
logging.info('*** Server Configurations Port: {}, Pem_Dir: {}, def_file {} TimeStamp: {} ***'.format(
args.port, args.pem_dir, def_file, datetime.now().isoformat()))
calc = ExtensionService(def_file)
calc.Serve(args.port, args.pem_dir)
| 42.977671 | 119 | 0.575426 | [
"MIT"
] | Parkman328/Qlik-Rapid-API-Gateway | gcp/__main__.py | 26,947 | Python |
import utils.gpu as gpu
from model.build_model import Build_Model
from utils.tools import *
from eval.evaluator import Evaluator
import argparse
import time
import logging
import config.yolov4_config as cfg
from utils.visualize import *
from utils.torch_utils import *
from utils.log import Logger
import pooraka as prk
class Evaluation(object):
def __init__(self,
gpu_id=0,
weight_path=None,
visiual=None,
eval=False,
mode_path=None
):
self.__num_class = cfg.VOC_DATA["NUM"]
self.__conf_threshold = cfg.VAL["CONF_THRESH"]
self.__nms_threshold = cfg.VAL["NMS_THRESH"]
self.__device = gpu.select_device(gpu_id)
self.__multi_scale_val = cfg.VAL["MULTI_SCALE_VAL"]
self.__flip_val = cfg.VAL["FLIP_VAL"]
self.__visiual = visiual
self.__eval = eval
self.__classes = cfg.VOC_DATA["CLASSES"]
if cfg.MODEL_TYPE["TYPE"] == 'NSGA-YOLOv4':
self.__model = Build_Model(weight_path=mode_path).to(self.__device)
else:
self.__model = Build_Model(weight_path=weight_path).to(self.__device)
self.__load_model_weights(weight_path)
self.__evalter = Evaluator(self.__model, showatt=False)
def __load_model_weights(self, weight_path):
print("loading weight file from : {}".format(weight_path))
weight = os.path.join(weight_path)
chkpt = torch.load(weight, map_location=self.__device)
self.__model.load_state_dict(chkpt)
print("loading weight file is done")
flops, params = prk.get_flops_params(self.__model.cpu(), (1, 3, 416, 416))
print(flops, params )
self.__model = self.__model.cuda()
del chkpt
def val(self):
global logger
if self.__eval:
logger.info("***********Start Evaluation****************")
start = time.time()
mAP = 0
with torch.no_grad():
APs, inference_time = Evaluator(self.__model, showatt=False).APs_voc(self.__multi_scale_val, self.__flip_val)
for i in APs:
logger.info("{} --> mAP : {}".format(i, APs[i]))
mAP += APs[i]
mAP = mAP / self.__num_class
logger.info('mAP:{}'.format(mAP))
logger.info("inference time: {:.2f} ms".format(inference_time))
end = time.time()
logger.info(" ===val cost time:{:.4f}s".format(end - start))
def detection(self):
global logger
if self.__visiual:
imgs = os.listdir(self.__visiual)
logger.info("***********Start Detection****************")
for v in imgs:
path = os.path.join(self.__visiual, v)
logger.info("val images : {}".format(path))
img = cv2.imread(path)
assert img is not None
bboxes_prd = self.__evalter.get_bbox(img,v)
if bboxes_prd.shape[0] != 0:
boxes = bboxes_prd[..., :4]
class_inds = bboxes_prd[..., 5].astype(np.int32)
scores = bboxes_prd[..., 4]
visualize_boxes(image=img, boxes=boxes, labels=class_inds, probs=scores, class_labels=self.__classes)
path = os.path.join(cfg.PROJECT_PATH, "detection_result/{}".format(v))
cv2.imwrite(path, img)
logger.info("saved images : {}".format(path))
if __name__ == "__main__":
global logger
parser = argparse.ArgumentParser()
parser.add_argument('--weight_path', type=str, default='weight/best.pt', help='weight file path')
parser.add_argument('--model_path', type=str, default='', help='weight file path')
parser.add_argument('--log_val_path', type=str, default='log_val',
help='weight file path')
parser.add_argument('--gpu_id', type=int, default=-1, help='whither use GPU(eg:0,1,2,3,4,5,6,7,8) or CPU(-1)')
parser.add_argument('--visiual', type=str, default='VOCtest-2007/VOC2007/JPEGImages', help='val data path or None')
parser.add_argument('--eval', action='store_true', default=True, help='eval the mAP or not')
parser.add_argument('--mode', type=str, default='val',
help='val or det')
opt = parser.parse_args()
logger = Logger(log_file_name=opt.log_val_path + '/log_voc_val.txt', log_level=logging.DEBUG, logger_name='YOLOv4').get_log()
if opt.mode == 'val':
Evaluation(gpu_id=opt.gpu_id,
weight_path=opt.weight_path,
eval=opt.eval,
visiual=opt.visiual,
mode_path = opt.model_path).val()
else:
Evaluation(gpu_id=opt.gpu_id,
weight_path=opt.weight_path,
eval=opt.eval,
visiual=opt.visiual, mode_path = opt.model_path).detection()
| 39.76378 | 129 | 0.577426 | [
"Apache-2.0"
] | chakkritte/EEEA-Net | Transfer/YOLOv4-pytorch/eval_voc.py | 5,050 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test read functionality for OGR EDIGEO driver.
# Author: Even Rouault <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2011, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
from osgeo import ogr
sys.path.append('../pymod')
import gdaltest
import ogrtest
###############################################################################
def ogr_edigeo_1():
filelist = ['E000AB01.THF',
'EDAB01S1.VEC',
'EDAB01SE.DIC',
'EDAB01SE.GEN',
'EDAB01SE.GEO',
'EDAB01SE.QAL',
'EDAB01SE.SCD',
'EDAB01T1.VEC',
'EDAB01T2.VEC',
'EDAB01T3.VEC']
# base_url = 'http://svn.geotools.org/trunk/modules/unsupported/edigeo/src/test/resources/org/geotools/data/edigeo/test-data/'
base_url = 'https://raw.githubusercontent.com/geotools/geotools/master/modules/unsupported/edigeo/src/test/resources/org/geotools/data/edigeo/test-data/'
for filename in filelist:
if not gdaltest.download_file(base_url + filename, filename):
return 'skip'
try:
for filename in filelist:
os.stat('tmp/cache/' + filename)
except OSError:
return 'skip'
ds = ogr.Open('tmp/cache/E000AB01.THF')
if ds.GetLayerCount() != 24:
print(ds.GetLayerCount())
return 'fail'
layers = [('BATIMENT_id', ogr.wkbPolygon, 107),
('BORNE_id', ogr.wkbPoint, 5),
('COMMUNE_id', ogr.wkbPolygon, 1),
('LIEUDIT_id', ogr.wkbPolygon, 3),
('NUMVOIE_id', ogr.wkbPoint, 43),
('PARCELLE_id', ogr.wkbPolygon, 155),
('SECTION_id', ogr.wkbPolygon, 1),
('SUBDFISC_id', ogr.wkbPolygon, 1),
('SUBDSECT_id', ogr.wkbPolygon, 1),
('SYMBLIM_id', ogr.wkbPoint, 29),
('TLINE_id', ogr.wkbLineString, 134),
('TPOINT_id', ogr.wkbPoint, 1),
('TRONFLUV_id', ogr.wkbPolygon, 3),
('TRONROUTE_id', ogr.wkbPolygon, 1),
('TSURF_id', ogr.wkbPolygon, 3),
('ZONCOMMUNI_id', ogr.wkbLineString, 15),
('ID_S_OBJ_Z_1_2_2', ogr.wkbPoint, 248),
]
for l in layers:
lyr = ds.GetLayerByName(l[0])
if lyr.GetLayerDefn().GetGeomType() != l[1]:
return 'fail'
if lyr.GetFeatureCount() != l[2]:
print(lyr.GetFeatureCount())
return 'fail'
if l[1] != ogr.wkbNone:
if lyr.GetSpatialRef().ExportToWkt().find('Lambert_Conformal_Conic_1SP') == -1:
print(lyr.GetSpatialRef().ExportToWkt())
return 'fail'
lyr = ds.GetLayerByName('BORNE_id')
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry(feat, 'POINT (877171.28 72489.22)'):
feat.DumpReadable()
return 'fail'
lyr = ds.GetLayerByName('BATIMENT_id')
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry(feat, 'POLYGON ((877206.16 71888.82,877193.14 71865.51,877202.95 71860.07,877215.83 71883.5,877206.16 71888.82))'):
feat.DumpReadable()
return 'fail'
lyr = ds.GetLayerByName('ZONCOMMUNI_id')
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry(feat, 'LINESTRING (877929.8 71656.39,877922.38 71663.72,877911.48 71669.51,877884.23 71675.64,877783.07 71694.04,877716.31 71706.98,877707.45 71709.71,877702.0 71713.79,877696.89 71719.58,877671.69 71761.82,877607.99 71865.03,877545.32 71959.04,877499.22 72026.82)'):
feat.DumpReadable()
return 'fail'
ds.Destroy()
return 'success'
gdaltest_list = [
ogr_edigeo_1]
if __name__ == '__main__':
gdaltest.setup_run('ogr_edigeo')
gdaltest.run_tests(gdaltest_list)
gdaltest.summarize()
| 38.036232 | 305 | 0.600495 | [
"MIT"
] | GISerliang/gdal | autotest/ogr/ogr_edigeo.py | 5,249 | Python |
from typing import Optional
from tjax import Array, Generator, Shape
from .parametrization import Parametrization
__all__ = ['Samplable']
class Samplable(Parametrization):
def sample(self, rng: Generator, shape: Optional[Shape] = None) -> Array:
raise NotImplementedError
| 22.230769 | 77 | 0.754325 | [
"MIT"
] | NeilGirdhar/efax | efax/_src/samplable.py | 289 | Python |
from equinox.models import Model,cleanup
import glm
from random import random
from .glutils import bindIndicesToBuffer, storeDataInVBO,createVAO,unbindVAO
class Terrain(Model):
def __init__(self, n_vertex):
self.vertices = (
-1.0, 0.0, 1.0,
-1.0, 0.0, -1.0,
1.0, 0.0, -1.0,
1.0, 0.0, 1.0,
)
self.normals = (
0.0, 1.0, 0.0,
0.0, 1.0, 0.0,
0.0, 1.0, 0.0,
0.0, 1.0, 0.0
)
self.indices = (
0,1,2,
2,3,0
)
| 17.735294 | 76 | 0.434494 | [
"MIT"
] | ProfAndreaPollini/equinox | equinox/models/terrain.py | 603 | Python |
from datetime import datetime
import itertools
import os
import random
import string
from _signal import SIGINT
from contextlib import contextmanager
from functools import partial
from itertools import permutations, combinations
from shutil import copyfile
from sys import executable
from time import sleep, perf_counter
from typing import Tuple, Iterable, Dict, Optional, List, Any, Sequence, Union, Callable
import base58
import pytest
from indy.pool import set_protocol_version
from common.serializers.serialization import invalid_index_serializer
from crypto.bls.bls_factory import BlsFactoryCrypto
from plenum.common.event_bus import ExternalBus, InternalBus
from plenum.common.member.member import Member
from plenum.common.member.steward import Steward
from plenum.common.signer_did import DidSigner
from plenum.common.signer_simple import SimpleSigner
from plenum.common.timer import QueueTimer, TimerService
from plenum.config import Max3PCBatchWait
from psutil import Popen
import json
import asyncio
from indy.ledger import sign_and_submit_request, sign_request, submit_request, build_node_request, \
multi_sign_request
from indy.error import ErrorCode, IndyError
from ledger.genesis_txn.genesis_txn_file_util import genesis_txn_file
from plenum.common.constants import DOMAIN_LEDGER_ID, OP_FIELD_NAME, REPLY, REQNACK, REJECT, \
CURRENT_PROTOCOL_VERSION, STEWARD, VALIDATOR, TRUSTEE, DATA, BLS_KEY, BLS_KEY_PROOF
from plenum.common.exceptions import RequestNackedException, RequestRejectedException, CommonSdkIOException, \
PoolLedgerTimeoutException
from plenum.common.messages.node_messages import Reply, PrePrepare, Prepare, Commit
from plenum.common.txn_util import get_req_id, get_from, get_payload_data
from plenum.common.types import f, OPERATION
from plenum.common.util import getNoInstances, get_utc_epoch
from plenum.common.config_helper import PNodeConfigHelper
from plenum.common.request import Request
from plenum.server.consensus.ordering_service import OrderingService
from plenum.server.node import Node
from plenum.test import waits
from plenum.test.constants import BUY
from plenum.test.msgs import randomMsg
from plenum.test.spy_helpers import getLastClientReqReceivedForNode, getAllArgs, getAllReturnVals, \
getAllMsgReceivedForNode
from plenum.test.test_node import TestNode, TestReplica, \
getPrimaryReplica, getNonPrimaryReplicas
from stp_core.common.log import getlogger
from stp_core.loop.eventually import eventuallyAll, eventually
from stp_core.loop.looper import Looper
from stp_core.network.util import checkPortAvailable
logger = getlogger()
# noinspection PyUnresolvedReferences
def ordinal(n):
return "%d%s" % (
n, "tsnrhtdd"[(n / 10 % 10 != 1) * (n % 10 < 4) * n % 10::4])
def random_string(length: int) -> str:
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(length))
def send_reqs_batches_and_get_suff_replies(
looper: Looper,
txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_client,
num_reqs: int,
num_batches=1,
**kwargs):
# This method assumes that `num_reqs` <= num_batches*MaxbatchSize
if num_batches == 1:
return sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, num_reqs)
else:
requests = []
for _ in range(num_batches - 1):
requests.extend(
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, num_reqs // num_batches))
rem = num_reqs % num_batches
if rem == 0:
rem = num_reqs // num_batches
requests.extend(
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, rem))
return requests
# noinspection PyIncorrectDocstring
def checkResponseCorrectnessFromNodes(receivedMsgs: Iterable, reqId: int,
fValue: int) -> bool:
"""
the client must get at least :math:`f+1` responses
"""
msgs = [(msg[f.RESULT.nm][f.REQ_ID.nm], msg[f.RESULT.nm][f.IDENTIFIER.nm])
for msg in getRepliesFromClientInbox(receivedMsgs, reqId)]
groupedMsgs = {}
for tpl in msgs:
groupedMsgs[tpl] = groupedMsgs.get(tpl, 0) + 1
assert max(groupedMsgs.values()) >= fValue + 1
def getRepliesFromClientInbox(inbox, reqId) -> list:
return list({_: msg for msg, _ in inbox if
msg[OP_FIELD_NAME] == REPLY and msg[f.RESULT.nm]
[f.REQ_ID.nm] == reqId}.values())
def checkLastClientReqForNode(node: TestNode, expectedRequest: Request):
recvRequest = getLastClientReqReceivedForNode(node)
assert recvRequest
assert expectedRequest.as_dict == recvRequest.as_dict
# noinspection PyIncorrectDocstring
def assertLength(collection: Iterable[Any], expectedLength: int):
assert len(
collection) == expectedLength, "Observed length was {} but " \
"expected length was {}". \
format(len(collection), expectedLength)
def assertEquality(observed: Any, expected: Any, details=None):
assert observed == expected, "Observed value was {} but expected value " \
"was {}, details: {}".format(observed, expected, details)
def randomOperation():
return {
"type": BUY,
"amount": random.randint(10, 100000)
}
def random_requests(count):
return [randomOperation() for _ in range(count)]
def random_request_objects(count, protocol_version):
req_dicts = random_requests(count)
return [Request(operation=op, protocolVersion=protocol_version) for op in req_dicts]
def buildCompletedTxnFromReply(request, reply: Reply) -> Dict:
txn = request.operation
txn.update(reply)
return txn
async def msgAll(nodes):
# test sending messages from every node to every other node
# TODO split send and check so that the messages can be sent concurrently
for p in permutations(nodes, 2):
await sendMessageAndCheckDelivery(p[0], p[1])
def sendMessage(sender: Node,
reciever: Node,
msg: Optional[Tuple] = None):
"""
Sends message from one node to another
:param nodes:
:param sender: sender
:param reciever: recepient
:param msg: optional message - by default random one generated
:return:
"""
logger.debug("Sending msg from {} to {}".format(sender.name, reciever.name))
msg = msg if msg else randomMsg()
rid = sender.nodestack.getRemote(reciever.name).uid
sender.nodestack.send(msg, rid)
async def sendMessageAndCheckDelivery(sender: Node,
reciever: Node,
msg: Optional[Tuple] = None,
method=None,
customTimeout=None):
"""
Sends message from one node to another and checks that it was delivered
:param sender: sender
:param reciever: recepient
:param msg: optional message - by default random one generated
:param customTimeout:
:return:
"""
logger.debug("Sending msg from {} to {}".format(sender.name, reciever.name))
msg = msg if msg else randomMsg()
rid = sender.nodestack.getRemote(reciever.name).uid
sender.nodestack.send(msg, rid)
timeout = customTimeout or waits.expectedNodeToNodeMessageDeliveryTime()
await eventually(checkMessageReceived, msg, reciever, method,
retryWait=.1,
timeout=timeout,
ratchetSteps=10)
def sendMessageToAll(nodes,
sender: Node,
msg: Optional[Tuple] = None):
"""
Sends message from one node to all others
:param nodes:
:param sender: sender
:param msg: optional message - by default random one generated
:return:
"""
for node in nodes:
if node != sender:
sendMessage(sender, node, msg)
async def sendMessageAndCheckDeliveryToAll(nodes,
sender: Node,
msg: Optional[Tuple] = None,
method=None,
customTimeout=None):
"""
Sends message from one node to all other and checks that it was delivered
:param nodes:
:param sender: sender
:param msg: optional message - by default random one generated
:param customTimeout:
:return:
"""
customTimeout = customTimeout or waits.expectedNodeToAllNodesMessageDeliveryTime(
len(nodes))
for node in nodes:
if node != sender:
await sendMessageAndCheckDelivery(sender, node, msg, method, customTimeout)
break
def checkMessageReceived(msg, receiver, method: str = None):
allMsgs = getAllMsgReceivedForNode(receiver, method)
assert msg in allMsgs
def addNodeBack(node_set,
looper: Looper,
node: Node,
tconf,
tdir) -> TestNode:
config_helper = PNodeConfigHelper(node.name, tconf, chroot=tdir)
restartedNode = TestNode(node.name,
config_helper=config_helper,
config=tconf,
ha=node.nodestack.ha,
cliha=node.clientstack.ha)
node_set.append(restartedNode)
looper.add(restartedNode)
return restartedNode
def checkPropagateReqCountOfNode(node: TestNode, digest: str):
assert digest in node.requests
assert node.quorums.propagate.is_reached(
len(node.requests[digest].propagates))
def requestReturnedToNode(node: TestNode, key: str,
instId: int):
params = getAllArgs(node, node.processOrdered)
# Skipping the view no and time from each ordered request
recvdOrderedReqs = [
(p['ordered'].instId, p['ordered'].valid_reqIdr[0]) for p in params]
expected = (instId, key)
return expected in recvdOrderedReqs
def checkRequestReturnedToNode(node: TestNode, key: str,
instId: int):
assert requestReturnedToNode(node, key, instId)
def checkRequestNotReturnedToNode(node: TestNode, key: str,
instId: int):
assert not requestReturnedToNode(node, key, instId)
def check_request_is_not_returned_to_nodes(txnPoolNodeSet, request):
instances = range(getNoInstances(len(txnPoolNodeSet)))
for node, inst_id in itertools.product(txnPoolNodeSet, instances):
checkRequestNotReturnedToNode(node,
request.key,
inst_id)
def checkPrePrepareReqSent(replica: TestReplica, req: Request):
prePreparesSent = getAllArgs(replica._ordering_service,
replica._ordering_service.send_pre_prepare)
assert (req.digest,) in \
[p["ppReq"].reqIdr for p in prePreparesSent]
def checkPrePrepareReqRecvd(replicas: Iterable[TestReplica],
expectedRequest: PrePrepare):
for replica in replicas:
params = getAllArgs(replica._ordering_service, replica._ordering_service._can_process_pre_prepare)
assert expectedRequest.reqIdr in [p['pre_prepare'].reqIdr for p in params]
def checkPrepareReqSent(replica: TestReplica, key: str,
view_no: int):
paramsList = getAllArgs(replica._ordering_service, replica._ordering_service._can_prepare)
rv = getAllReturnVals(replica._ordering_service,
replica._ordering_service._can_prepare)
args = [p["ppReq"].reqIdr for p in paramsList if p["ppReq"].viewNo == view_no]
assert (key,) in args
idx = args.index((key,))
assert rv[idx]
def checkSufficientPrepareReqRecvd(replica: TestReplica, viewNo: int,
ppSeqNo: int):
key = (viewNo, ppSeqNo)
assert key in replica._ordering_service.prepares
assert len(replica._ordering_service.prepares[key][1]) >= replica.quorums.prepare.value
def checkSufficientCommitReqRecvd(replicas: Iterable[TestReplica], viewNo: int,
ppSeqNo: int):
for replica in replicas:
key = (viewNo, ppSeqNo)
assert key in replica._ordering_service.commits
received = len(replica._ordering_service.commits[key][1])
minimum = replica.quorums.commit.value
assert received > minimum
def checkViewNoForNodes(nodes: Iterable[TestNode], expectedViewNo: int = None):
"""
Checks if all the given nodes have the expected view no
:param nodes: The nodes to check for
:param expectedViewNo: the view no that the nodes are expected to have
:return:
"""
viewNos = set()
for node in nodes:
logger.debug("{}'s view no is {}".format(node, node.master_replica.viewNo))
viewNos.add(node.master_replica.viewNo)
assert len(viewNos) == 1, 'Expected 1, but got {}. ' \
'ViewNos: {}'.format(len(viewNos), [(n.name, n.master_replica.viewNo) for n in nodes])
vNo, = viewNos
if expectedViewNo is not None:
assert vNo >= expectedViewNo, \
'Expected at least {}, but got {}'.format(expectedViewNo, vNo)
return vNo
def waitForViewChange(looper, txnPoolNodeSet, expectedViewNo=None,
customTimeout=None):
"""
Waits for nodes to come to same view.
Raises exception when time is out
"""
timeout = customTimeout or waits.expectedPoolElectionTimeout(len(txnPoolNodeSet))
return looper.run(eventually(checkViewNoForNodes,
txnPoolNodeSet,
expectedViewNo,
timeout=timeout))
def getNodeSuspicions(node: TestNode, code: int = None):
params = getAllArgs(node, TestNode.reportSuspiciousNode)
if params and code is not None:
params = [param for param in params
if 'code' in param and param['code'] == code]
return params
def checkDiscardMsg(processors, discardedMsg,
reasonRegexp, *exclude):
if not exclude:
exclude = []
for p in filterNodeSet(processors, exclude):
last = p.spylog.getLastParams(p.discard, required=False)
assert last
assert last['msg'] == discardedMsg
assert reasonRegexp in last['reason']
def checkMasterReplicaDiscardMsg(processors, discardedMsg,
reasonRegexp, *exclude):
if not exclude:
exclude = []
for p in filterNodeSet(processors, exclude):
stasher = p.master_replica.stasher
last = stasher.spylog.getLastParams(stasher.discard, required=False)
assert last
assert last['msg'] == discardedMsg
assert reasonRegexp in last['reason']
def countDiscarded(processor, reasonPat):
c = 0
for entry in processor.spylog.getAll(processor.discard):
if 'reason' in entry.params and (
(isinstance(
entry.params['reason'],
str) and reasonPat in entry.params['reason']),
(reasonPat in str(
entry.params['reason']))):
c += 1
return c
def filterNodeSet(nodeSet, exclude: List[Union[str, Node]]):
"""
Return a set of nodes with the nodes in exclude removed.
:param nodeSet: the set of nodes
:param exclude: the list of nodes or node names to exclude
:return: the filtered nodeSet
"""
return [n for n in nodeSet
if n not in
[nodeSet[x] if isinstance(x, str) else x for x in exclude]]
def whitelistNode(toWhitelist: str, frm: Sequence[TestNode], *codes):
for node in frm:
node.whitelistNode(toWhitelist, *codes)
def whitelistClient(toWhitelist: str, frm: Sequence[TestNode], *codes):
for node in frm:
node.whitelistClient(toWhitelist, *codes)
def assertExp(condition):
assert condition
def assert_eq(actual, expected):
assert actual == expected
def assert_in(value, collection):
assert value in collection
def assertFunc(func):
assert func()
def checkLedgerEquality(ledger1, ledger2):
assertLength(ledger1, ledger2.size)
assertEquality(ledger1.root_hash, ledger2.root_hash)
assertEquality(ledger1.uncommitted_root_hash, ledger2.uncommitted_root_hash)
def checkAllLedgersEqual(*ledgers):
for l1, l2 in combinations(ledgers, 2):
checkLedgerEquality(l1, l2)
def checkStateEquality(state1, state2):
if state1 is None:
return state2 is None
assertEquality(state1.as_dict, state2.as_dict)
assertEquality(state1.committedHeadHash, state2.committedHeadHash)
assertEquality(state1.committedHead, state2.committedHead)
def check_seqno_db_equality(db1, db2):
if db1._keyValueStorage._db is None or db2._keyValueStorage._db is None:
return False
assert db1.size == db2.size, \
"{} != {}".format(db1.size, db2.size)
assert {bytes(k): bytes(v) for k, v in db1._keyValueStorage.iterator()} == \
{bytes(k): bytes(v) for k, v in db2._keyValueStorage.iterator()}
def check_primaries_equality(node1, node2):
assert node1.primaries == node2.primaries, \
"{} != {}, Node1: {}; Node2: {}".format(node1.primaries, node2.primaries, node1, node2)
def check_last_ordered_3pc(node1, node2):
master_replica_1 = node1.master_replica
master_replica_2 = node2.master_replica
assert master_replica_1.last_ordered_3pc == master_replica_2.last_ordered_3pc, \
"{} != {} Node1: {}, Node2: {}".format(master_replica_1.last_ordered_3pc,
master_replica_2.last_ordered_3pc,
node1, node2)
return master_replica_1.last_ordered_3pc
def check_last_ordered_3pc_backup(node1, node2):
assert len(node1.replicas) == len(node2.replicas)
for i in range(1, len(node1.replicas)):
replica1 = node1.replicas[i]
replica2 = node2.replicas[i]
assert replica1.last_ordered_3pc == replica2.last_ordered_3pc, \
"{}: {} != {}: {}".format(replica1, replica1.last_ordered_3pc,
replica2, replica2.last_ordered_3pc)
def check_view_no(node1, node2):
assert node1.master_replica.viewNo == node2.master_replica.viewNo, \
"{} != {}".format(node1.master_replica.viewNo, node2.master_replica.viewNo)
def check_last_ordered_3pc_on_all_replicas(nodes, last_ordered_3pc):
for n in nodes:
for r in n.replicas.values():
assert r.last_ordered_3pc == last_ordered_3pc, \
"{} != {}, Replica: {}".format(r.last_ordered_3pc,
last_ordered_3pc, r)
def check_last_ordered_3pc_on_master(nodes, last_ordered_3pc):
for n in nodes:
assert n.master_replica.last_ordered_3pc == last_ordered_3pc, \
"{} != {}".format(n.master_replica.last_ordered_3pc,
last_ordered_3pc)
def check_last_ordered_3pc_on_backup(nodes, last_ordered_3pc):
for n in nodes:
for i, r in n.replicas.items():
if i != 0:
assert r.last_ordered_3pc == last_ordered_3pc, \
"{} != {}".format(r.last_ordered_3pc,
last_ordered_3pc)
def randomText(size):
return ''.join(random.choice(string.ascii_letters) for _ in range(size))
def mockGetInstalledDistributions(packages):
ret = []
for pkg in packages:
obj = type('', (), {})()
obj.key = pkg
ret.append(obj)
return ret
def mockImportModule(moduleName):
obj = type(moduleName, (), {})()
obj.send_message = lambda *args: None
return obj
def initDirWithGenesisTxns(
dirName,
tconf,
tdirWithPoolTxns=None,
tdirWithDomainTxns=None,
new_pool_txn_file=None,
new_domain_txn_file=None):
os.makedirs(dirName, exist_ok=True)
if tdirWithPoolTxns:
new_pool_txn_file = new_pool_txn_file or tconf.poolTransactionsFile
copyfile(
os.path.join(
tdirWithPoolTxns, genesis_txn_file(
tconf.poolTransactionsFile)), os.path.join(
dirName, genesis_txn_file(new_pool_txn_file)))
if tdirWithDomainTxns:
new_domain_txn_file = new_domain_txn_file or tconf.domainTransactionsFile
copyfile(
os.path.join(
tdirWithDomainTxns, genesis_txn_file(
tconf.domainTransactionsFile)), os.path.join(
dirName, genesis_txn_file(new_domain_txn_file)))
def stopNodes(nodes: List[TestNode], looper=None, ensurePortsFreedUp=True):
if ensurePortsFreedUp:
assert looper, 'Need a looper to make sure ports are freed up'
for node in nodes:
node.stop()
if ensurePortsFreedUp:
ports = [[n.nodestack.ha[1], n.clientstack.ha[1]] for n in nodes]
waitUntilPortIsAvailable(looper, ports)
def waitUntilPortIsAvailable(looper, ports, timeout=5):
ports = itertools.chain(*ports)
def chk():
for port in ports:
checkPortAvailable(("", port))
looper.run(eventually(chk, retryWait=.5, timeout=timeout))
def run_script(script, *args):
s = os.path.join(os.path.dirname(__file__), '../../scripts/' + script)
command = [executable, s]
command.extend(args)
with Popen([executable, s]) as p:
sleep(4)
p.send_signal(SIGINT)
p.wait(timeout=1)
assert p.poll() == 0, 'script failed'
def viewNoForNodes(nodes):
viewNos = {node.viewNo for node in nodes}
assert 1 == len(viewNos)
return next(iter(viewNos))
def primaryNodeNameForInstance(nodes, instanceId):
primaryNames = {node.replicas[instanceId].primaryName for node in nodes}
assert 1 == len(primaryNames)
primaryReplicaName = next(iter(primaryNames))
return primaryReplicaName[:-2]
def nodeByName(nodes, name):
for node in nodes:
if node.name == name:
return node
raise Exception("Node with the name '{}' has not been found.".format(name))
def send_pre_prepare(view_no, pp_seq_no, nodes,
state_root=None, txn_root=None):
pre_prepare = PrePrepare(
0,
view_no,
pp_seq_no,
get_utc_epoch(),
["requests digest"],
0,
"random digest",
DOMAIN_LEDGER_ID,
state_root or '0' * 44,
txn_root or '0' * 44,
0,
True
)
primary_node = getPrimaryReplica(nodes).node
non_primary_nodes = set(nodes) - {primary_node}
sendMessageToAll(nodes, primary_node, pre_prepare)
for non_primary_node in non_primary_nodes:
sendMessageToAll(nodes, non_primary_node, pre_prepare)
def send_prepare(view_no, pp_seq_no, nodes, state_root=None, txn_root=None):
prepare = Prepare(
0,
view_no,
pp_seq_no,
get_utc_epoch(),
"random digest",
state_root or '0' * 44,
txn_root or '0' * 44
)
primary_node = getPrimaryReplica(nodes).node
sendMessageToAll(nodes, primary_node, prepare)
def send_commit(view_no, pp_seq_no, nodes):
commit = Commit(
0,
view_no,
pp_seq_no)
primary_node = getPrimaryReplica(nodes).node
sendMessageToAll(nodes, primary_node, commit)
def get_key_from_req(req: dict):
return Request(identifier=req[f.IDENTIFIER.nm],
reqId=req[f.REQ_ID.nm],
operation=req[OPERATION],
protocolVersion=req[f.PROTOCOL_VERSION.nm],
signature=req.get(f.SIG.nm),
taaAcceptance=req.get(f.TAA_ACCEPTANCE)
).key
def chk_all_funcs(looper, funcs, acceptable_fails=0, retry_wait=None,
timeout=None, override_eventually_timeout=False):
# TODO: Move this logic to eventuallyAll
def chk():
fails = 0
last_ex = None
for func in funcs:
try:
func()
except Exception as ex:
fails += 1
if fails >= acceptable_fails:
logger.debug('Too many fails, the last one: {}'.format(repr(ex)))
last_ex = ex
assert fails <= acceptable_fails, '{} out of {} failed. Last exception:' \
' {}'.format(fails, len(funcs), last_ex)
kwargs = {}
if retry_wait:
kwargs['retryWait'] = retry_wait
if timeout:
kwargs['timeout'] = timeout
if override_eventually_timeout:
kwargs['override_timeout_limit'] = override_eventually_timeout
looper.run(eventually(chk, **kwargs))
def check_request_ordered(node, request: Request):
# it's ok to iterate through all txns since this is a test
for seq_no, txn in node.domainLedger.getAllTxn():
if get_req_id(txn) is None:
continue
if get_from(txn) is None:
continue
if get_req_id(txn) != request.reqId:
continue
if get_from(txn) != request.identifier:
continue
return True
raise ValueError('{} request not ordered by node {}'.format(request, node.name))
def wait_for_requests_ordered(looper, nodes, requests):
node_count = len(nodes)
timeout_per_request = waits.expectedTransactionExecutionTime(node_count)
total_timeout = (1 + len(requests) / 10) * timeout_per_request
coros = [partial(check_request_ordered,
node,
request)
for (node, request) in list(itertools.product(nodes, requests))]
looper.run(eventuallyAll(*coros, retryWait=1, totalTimeout=total_timeout))
def create_new_test_node(test_node_class, node_config_helper_class, name, conf,
tdir, plugin_paths, bootstrap_cls=None,
node_ha=None, client_ha=None):
config_helper = node_config_helper_class(name, conf, chroot=tdir)
return test_node_class(name,
config_helper=config_helper,
config=conf,
pluginPaths=plugin_paths,
ha=node_ha,
cliha=client_ha,
bootstrap_cls=bootstrap_cls)
# ####### SDK
def sdk_gen_request(operation, protocol_version=CURRENT_PROTOCOL_VERSION,
identifier=None, **kwargs):
# Question: Why this method is called sdk_gen_request? It does not use
# the indy-sdk
return Request(operation=operation, reqId=random.randint(10, 1000000000),
protocolVersion=protocol_version, identifier=identifier,
**kwargs)
def sdk_gen_pool_request(looper, sdk_wallet_new_steward, node_alias, node_did):
_, new_steward_did = sdk_wallet_new_steward
node_ip = '{}.{}.{}.{}'.format(
random.randint(1, 240),
random.randint(1, 240),
random.randint(1, 240),
random.randint(1, 240))
data = {
'alias': node_alias,
'client_port': 50001,
'node_port': 50002,
'node_ip': node_ip,
'client_ip': node_ip,
'services': []
}
req = looper.loop.run_until_complete(
build_node_request(new_steward_did, node_did, json.dumps(data)))
return Request(**json.loads(req))
def sdk_random_request_objects(count, protocol_version, identifier=None,
**kwargs):
ops = random_requests(count)
return [sdk_gen_request(op, protocol_version=protocol_version,
identifier=identifier, **kwargs) for op in ops]
def sdk_sign_request_objects(looper, sdk_wallet, reqs: Sequence):
wallet_h, did = sdk_wallet
reqs_str = [json.dumps(req.as_dict) for req in reqs]
reqs = [looper.loop.run_until_complete(sign_request(wallet_h, did, req))
for req in reqs_str]
return reqs
def sdk_multi_sign_request_objects(looper, sdk_wallets, reqs: Sequence):
reqs_str = [json.dumps(req.as_dict) for req in reqs]
for sdk_wallet in sdk_wallets:
wallet_h, did = sdk_wallet
reqs_str = [looper.loop.run_until_complete(multi_sign_request(wallet_h, did, req))
for req in reqs_str]
return reqs_str
def sdk_sign_request_strings(looper, sdk_wallet, reqs: Sequence):
wallet_h, did = sdk_wallet
reqs_str = [json.dumps(req) for req in reqs]
reqs = [looper.loop.run_until_complete(sign_request(wallet_h, did, req))
for req in reqs_str]
return reqs
def sdk_multisign_request_object(looper, sdk_wallet, req):
wh, did = sdk_wallet
return looper.loop.run_until_complete(multi_sign_request(wh, did, req))
def sdk_multisign_request_from_dict(looper, sdk_wallet, op, reqId=None, taa_acceptance=None, endorser=None):
wh, did = sdk_wallet
reqId = reqId or random.randint(10, 100000)
request = Request(operation=op, reqId=reqId,
protocolVersion=CURRENT_PROTOCOL_VERSION, identifier=did,
taaAcceptance=taa_acceptance,
endorser=endorser)
req_str = json.dumps(request.as_dict)
resp = looper.loop.run_until_complete(multi_sign_request(wh, did, req_str))
return json.loads(resp)
def sdk_signed_random_requests(looper, sdk_wallet, count):
_, did = sdk_wallet
reqs_obj = sdk_random_request_objects(count, identifier=did,
protocol_version=CURRENT_PROTOCOL_VERSION)
return sdk_sign_request_objects(looper, sdk_wallet, reqs_obj)
def sdk_send_signed_requests(pool_h, signed_reqs: Sequence):
return [(json.loads(req),
asyncio.ensure_future(submit_request(pool_h, req)))
for req in signed_reqs]
def sdk_send_random_requests(looper, pool_h, sdk_wallet, count: int):
reqs = sdk_signed_random_requests(looper, sdk_wallet, count)
return sdk_send_signed_requests(pool_h, reqs)
def sdk_send_random_request(looper, pool_h, sdk_wallet):
rets = sdk_send_random_requests(looper, pool_h, sdk_wallet, 1)
return rets[0]
def sdk_send_random_pool_requests(looper, pool_h, sdk_wallet_new_steward, count: int):
node_alias = random_string(7)
node_did = SimpleSigner(seed=random_string(32).encode()).identifier
reqs = [sdk_gen_pool_request(looper, sdk_wallet_new_steward, node_alias, node_did) for _ in range(count)]
return [sdk_sign_and_submit_req_obj(looper, pool_h, sdk_wallet_new_steward, req) for req in reqs]
def sdk_send_random_pool_and_domain_requests(looper, pool_h, sdk_wallet_new_steward, count: int):
node_alias = random_string(7)
node_did = SimpleSigner(seed=random_string(32).encode()).identifier
req_gens = [
lambda: sdk_gen_request(random_requests(1)[0], identifier=sdk_wallet_new_steward[1]),
lambda: sdk_gen_pool_request(looper, sdk_wallet_new_steward, node_alias, node_did),
]
res = []
for i in range(count):
req = req_gens[i % len(req_gens)]()
res.append(sdk_sign_and_submit_req_obj(looper, pool_h, sdk_wallet_new_steward, req))
looper.runFor(0.1) # Give nodes some time to start ordering, so that requests are really alternating
return res
def sdk_sign_and_submit_req(pool_handle, sdk_wallet, req):
wallet_handle, sender_did = sdk_wallet
return json.loads(req), asyncio.ensure_future(
sign_and_submit_request(pool_handle, wallet_handle, sender_did, req))
def sdk_sign_and_submit_req_obj(looper, pool_handle, sdk_wallet, req_obj):
s_req = sdk_sign_request_objects(looper, sdk_wallet, [req_obj])[0]
return sdk_send_signed_requests(pool_handle, [s_req])[0]
def sdk_sign_and_submit_op(looper, pool_handle, sdk_wallet, op):
_, did = sdk_wallet
req_obj = sdk_gen_request(op, protocol_version=CURRENT_PROTOCOL_VERSION,
identifier=did)
s_req = sdk_sign_request_objects(looper, sdk_wallet, [req_obj])[0]
return sdk_send_signed_requests(pool_handle, [s_req])[0]
def sdk_get_reply(looper, sdk_req_resp, timeout=None):
req_json, resp_task = sdk_req_resp
# TODO: change timeout evaluating logic, when sdk will can tuning timeout from outside
if timeout is None:
timeout = waits.expectedTransactionExecutionTime(7)
try:
resp = looper.run(asyncio.wait_for(resp_task, timeout=timeout))
resp = json.loads(resp)
except IndyError as e:
resp = e.error_code
except TimeoutError as e:
resp = ErrorCode.PoolLedgerTimeout
return req_json, resp
# TODO: Check places where sdk_get_replies used without sdk_check_reply
# We need to be sure that test behaviour don't need to check response
# validity
def sdk_get_replies(looper, sdk_req_resp: Sequence, timeout=None):
resp_tasks = [resp for _, resp in sdk_req_resp]
# TODO: change timeout evaluating logic, when sdk will can tuning timeout from outside
if timeout is None:
timeout = waits.expectedTransactionExecutionTime(7)
def get_res(task, done_list):
if task in done_list:
try:
resp = json.loads(task.result())
except IndyError as e:
resp = e.error_code
else:
resp = ErrorCode.PoolLedgerTimeout
return resp
done, pending = looper.run(asyncio.wait(resp_tasks, timeout=timeout))
if pending:
for task in pending:
task.cancel()
ret = [(req, get_res(resp, done)) for req, resp in sdk_req_resp]
return ret
def sdk_check_reply(req_res):
req, res = req_res
if isinstance(res, ErrorCode):
if res == ErrorCode.PoolLedgerTimeout:
raise PoolLedgerTimeoutException('Got PoolLedgerTimeout for request {}'
.format(req))
else:
raise CommonSdkIOException('Got an error with code {} for request {}'
.format(res, req))
if not isinstance(res, dict):
raise CommonSdkIOException("Unexpected response format {}".format(res))
def _parse_op(res_dict):
if res_dict['op'] == REQNACK:
raise RequestNackedException('ReqNack of id {}. Reason: {}'
.format(req['reqId'], res_dict['reason']))
if res_dict['op'] == REJECT:
raise RequestRejectedException('Reject of id {}. Reason: {}'
.format(req['reqId'], res_dict['reason']))
if 'op' in res:
_parse_op(res)
else:
for resps in res.values():
if isinstance(resps, str):
_parse_op(json.loads(resps))
elif isinstance(resps, dict):
_parse_op(resps)
else:
raise CommonSdkIOException("Unexpected response format {}".format(res))
def sdk_get_and_check_replies(looper, sdk_req_resp: Sequence, timeout=None):
rets = []
for req_res in sdk_get_replies(looper, sdk_req_resp, timeout):
sdk_check_reply(req_res)
rets.append(req_res)
return rets
def sdk_eval_timeout(req_count: int, node_count: int,
customTimeoutPerReq: float = None, add_delay_to_timeout: float = 0):
timeout_per_request = customTimeoutPerReq or waits.expectedTransactionExecutionTime(node_count)
timeout_per_request += add_delay_to_timeout
# here we try to take into account what timeout for execution
# N request - total_timeout should be in
# timeout_per_request < total_timeout < timeout_per_request * N
# we cannot just take (timeout_per_request * N) because it is so huge.
# (for timeout_per_request=5 and N=10, total_timeout=50sec)
# lets start with some simple formula:
return (1 + req_count / 10) * timeout_per_request
def sdk_send_and_check(signed_reqs, looper, txnPoolNodeSet, pool_h, timeout=None):
if not timeout:
timeout = sdk_eval_timeout(len(signed_reqs), len(txnPoolNodeSet))
results = sdk_send_signed_requests(pool_h, signed_reqs)
sdk_replies = sdk_get_replies(looper, results, timeout=timeout)
for req_res in sdk_replies:
sdk_check_reply(req_res)
return sdk_replies
def sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool, sdk_wallet, count,
customTimeoutPerReq: float = None, add_delay_to_timeout: float = 0,
override_timeout_limit=False, total_timeout=None):
sdk_reqs = sdk_send_random_requests(looper, sdk_pool, sdk_wallet, count)
if not total_timeout:
total_timeout = sdk_eval_timeout(len(sdk_reqs), len(txnPoolNodeSet),
customTimeoutPerReq=customTimeoutPerReq,
add_delay_to_timeout=add_delay_to_timeout)
sdk_replies = sdk_get_replies(looper, sdk_reqs, timeout=total_timeout)
for req_res in sdk_replies:
sdk_check_reply(req_res)
return sdk_replies
def sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool, sdk_wallet,
num_reqs, num_batches=1, **kwargs):
# This method assumes that `num_reqs` <= num_batches*MaxbatchSize
if num_reqs < num_batches:
raise BaseException(
'sdk_send_batches_of_random_and_check method assumes that `num_reqs` <= num_batches*MaxbatchSize')
if num_batches == 1:
return sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool, sdk_wallet, num_reqs, **kwargs)
reqs_in_batch = num_reqs // num_batches
reqs_in_last_batch = reqs_in_batch + num_reqs % num_batches
sdk_replies = []
for _ in range(num_batches - 1):
sdk_replies.extend(sdk_send_random_and_check(looper, txnPoolNodeSet,
sdk_pool, sdk_wallet,
reqs_in_batch, **kwargs))
sdk_replies.extend(sdk_send_random_and_check(looper, txnPoolNodeSet,
sdk_pool, sdk_wallet,
reqs_in_last_batch, **kwargs))
return sdk_replies
def sdk_send_batches_of_random(looper, txnPoolNodeSet, sdk_pool, sdk_wallet,
num_reqs, num_batches=1, timeout=Max3PCBatchWait):
if num_reqs < num_batches:
raise BaseException(
'sdk_send_batches_of_random_and_check method assumes that `num_reqs` <= num_batches*MaxbatchSize')
if num_batches == 1:
sdk_reqs = sdk_send_random_requests(looper, sdk_pool, sdk_wallet, num_reqs)
looper.runFor(timeout)
return sdk_reqs
reqs_in_batch = num_reqs // num_batches
reqs_in_last_batch = reqs_in_batch + num_reqs % num_batches
sdk_reqs = []
for _ in range(num_batches - 1):
sdk_reqs.extend(sdk_send_random_requests(looper, sdk_pool, sdk_wallet, reqs_in_batch))
looper.runFor(timeout)
sdk_reqs.extend(sdk_send_random_requests(looper, sdk_pool, sdk_wallet, reqs_in_last_batch))
looper.runFor(timeout)
return sdk_reqs
def sdk_sign_request_from_dict(looper, sdk_wallet, op, reqId=None, taa_acceptance=None, endorser=None):
wallet_h, did = sdk_wallet
reqId = reqId or random.randint(10, 100000)
request = Request(operation=op, reqId=reqId,
protocolVersion=CURRENT_PROTOCOL_VERSION, identifier=did,
taaAcceptance=taa_acceptance,
endorser=endorser)
req_str = json.dumps(request.as_dict)
resp = looper.loop.run_until_complete(sign_request(wallet_h, did, req_str))
return json.loads(resp)
def sdk_check_request_is_not_returned_to_nodes(looper, nodeSet, request):
instances = range(getNoInstances(len(nodeSet)))
coros = []
for node, inst_id in itertools.product(nodeSet, instances):
c = partial(checkRequestNotReturnedToNode,
node=node,
identifier=request['identifier'],
reqId=request['reqId'],
instId=inst_id
)
coros.append(c)
timeout = waits.expectedTransactionExecutionTime(len(nodeSet))
looper.run(eventuallyAll(*coros, retryWait=1, totalTimeout=timeout))
def sdk_json_to_request_object(json_req):
return Request(identifier=json_req.get('identifier', None),
reqId=json_req['reqId'],
operation=json_req['operation'],
signature=json_req['signature'] if 'signature' in json_req else None,
protocolVersion=json_req['protocolVersion'] if 'protocolVersion' in json_req else None,
taaAcceptance=json_req.get('taaAcceptance', None))
def sdk_json_couples_to_request_list(json_couples):
req_list = []
for json_couple in json_couples:
req_list.append(sdk_json_to_request_object(json_couple[0]))
return req_list
def sdk_get_bad_response(looper, reqs, exception, message):
with pytest.raises(exception) as e:
sdk_get_and_check_replies(looper, reqs)
assert message in e._excinfo[1].args[0]
def sdk_set_protocol_version(looper, version=CURRENT_PROTOCOL_VERSION):
looper.loop.run_until_complete(set_protocol_version(version))
# Context managers to be used with tconf fixture
@contextmanager
def perf_monitor_disabled(tconf):
old_unsafe = tconf.unsafe.copy()
tconf.unsafe.add("disable_view_change")
yield tconf
tconf.unsafe = old_unsafe
@contextmanager
def view_change_timeout(tconf, vc_timeout, propose_timeout=None):
old_view_change_timeout = tconf.NEW_VIEW_TIMEOUT
old_propose_timeout = tconf.INITIAL_PROPOSE_VIEW_CHANGE_TIMEOUT
old_propagate_request_delay = tconf.PROPAGATE_REQUEST_DELAY
tconf.NEW_VIEW_TIMEOUT = vc_timeout
tconf.INITIAL_PROPOSE_VIEW_CHANGE_TIMEOUT = vc_timeout if propose_timeout is None else propose_timeout
tconf.PROPAGATE_REQUEST_DELAY = 0
yield tconf
tconf.NEW_VIEW_TIMEOUT = old_view_change_timeout
tconf.INITIAL_PROPOSE_VIEW_CHANGE_TIMEOUT = old_propose_timeout
tconf.PROPAGATE_REQUEST_DELAY = old_propagate_request_delay
@contextmanager
def max_3pc_batch_limits(tconf, size, wait=10000):
old_size = tconf.Max3PCBatchSize
old_wait = tconf.Max3PCBatchWait
tconf.Max3PCBatchSize = size
tconf.Max3PCBatchWait = wait
yield tconf
tconf.Max3PCBatchSize = old_size
tconf.Max3PCBatchWait = old_wait
@contextmanager
def freshness(tconf, enabled, timeout):
old_update_state = tconf.UPDATE_STATE_FRESHNESS
old_timeout = tconf.STATE_FRESHNESS_UPDATE_INTERVAL
tconf.UPDATE_STATE_FRESHNESS = enabled
tconf.STATE_FRESHNESS_UPDATE_INTERVAL = timeout
yield tconf
tconf.UPDATE_STATE_FRESHNESS = old_update_state
tconf.STATE_FRESHNESS_UPDATE_INTERVAL = old_timeout
@contextmanager
def primary_disconnection_time(tconf, value):
old_tolarate_disconnection = tconf.ToleratePrimaryDisconnection
tconf.ToleratePrimaryDisconnection = value
yield tconf
tconf.ToleratePrimaryDisconnection = old_tolarate_disconnection
@contextmanager
def acc_monitor(tconf, acc_monitor_enabled=True, acc_monitor_timeout=3, acc_monitor_delta=0):
old_timeout = tconf.ACC_MONITOR_TIMEOUT
old_delta = tconf.ACC_MONITOR_TXN_DELTA_K
old_acc_monitor_enabled = tconf.ACC_MONITOR_ENABLED
tconf.ACC_MONITOR_TIMEOUT = acc_monitor_timeout
tconf.ACC_MONITOR_TXN_DELTA_K = acc_monitor_delta
tconf.ACC_MONITOR_ENABLED = acc_monitor_enabled
yield tconf
tconf.ACC_MONITOR_TIMEOUT = old_timeout
tconf.ACC_MONITOR_TXN_DELTA_K = old_delta
tconf.ACC_MONITOR_ENABLED = old_acc_monitor_enabled
def create_pre_prepare_params(state_root,
ledger_id=DOMAIN_LEDGER_ID,
txn_root=None,
timestamp=None,
bls_multi_sig=None,
view_no=0,
pool_state_root=None,
pp_seq_no=0,
inst_id=0,
audit_txn_root=None,
reqs=None,
bls_multi_sigs=None):
if timestamp is None:
timestamp = get_utc_epoch()
req_idrs = [req.key for req in reqs] if reqs is not None else [random_string(32)]
digest = OrderingService.generate_pp_digest(req_idrs, view_no, timestamp)
params = [inst_id,
view_no,
pp_seq_no,
timestamp,
req_idrs,
init_discarded(0),
digest,
ledger_id,
state_root,
txn_root or '1' * 32,
0,
True,
pool_state_root or generate_state_root(),
audit_txn_root or generate_state_root()]
if bls_multi_sig:
# Pass None for backward compatibility
params.append(None)
params.append([bls_multi_sig.as_list()])
elif bls_multi_sigs is not None:
# Pass None for backward compatibility
params.append(None)
params.append([sig.as_list() for sig in bls_multi_sigs])
return params
def create_pre_prepare_no_bls(state_root, view_no=0, pool_state_root=None, pp_seq_no=0, inst_id=0, audit_txn_root=None):
params = create_pre_prepare_params(state_root=state_root,
view_no=view_no,
pool_state_root=pool_state_root,
pp_seq_no=pp_seq_no,
inst_id=inst_id,
audit_txn_root=audit_txn_root)
return PrePrepare(*params)
def create_commit_params(view_no, pp_seq_no, inst_id=0):
return [inst_id, view_no, pp_seq_no]
def create_commit_no_bls_sig(req_key, inst_id=0):
view_no, pp_seq_no = req_key
params = create_commit_params(view_no, pp_seq_no, inst_id=inst_id)
return Commit(*params)
def create_commit_with_bls_sig(req_key, bls_sig):
view_no, pp_seq_no = req_key
params = create_commit_params(view_no, pp_seq_no)
# Use ' ' as BLS_SIG for backward-compatibility as BLS_SIG in COMMIT is optional but not Nullable
params.append(' ')
params.append({DOMAIN_LEDGER_ID: bls_sig})
return Commit(*params)
def create_commit_with_bls_sigs(req_key, bls_sig, lid):
view_no, pp_seq_no = req_key
params = create_commit_params(view_no, pp_seq_no)
# Use ' ' as BLS_SIG for backward-compatibility as BLS_SIG in COMMIT is optional but not Nullable
params.append(' ')
params.append({str(lid): bls_sig})
return Commit(*params)
def create_commit_bls_sig(bls_bft, req_key, pre_prepare):
view_no, pp_seq_no = req_key
params = create_commit_params(view_no, pp_seq_no)
params = bls_bft.update_commit(params, pre_prepare)
return Commit(*params)
def create_prepare_params(view_no, pp_seq_no, state_root, inst_id=0):
return [inst_id,
view_no,
pp_seq_no,
get_utc_epoch(),
"random digest",
state_root,
'1' * 32]
def create_prepare_from_pre_prepare(pre_prepare):
params = [pre_prepare.instId,
pre_prepare.viewNo,
pre_prepare.ppSeqNo,
pre_prepare.ppTime,
pre_prepare.digest,
pre_prepare.stateRootHash,
pre_prepare.txnRootHash,
pre_prepare.auditTxnRootHash]
return Prepare(*params)
def create_commit_from_pre_prepare(pre_prepare):
params = [pre_prepare.instId,
pre_prepare.viewNo,
pre_prepare.ppSeqNo]
return Commit(*params)
def create_prepare(req_key, state_root, inst_id=0):
view_no, pp_seq_no = req_key
params = create_prepare_params(view_no, pp_seq_no, state_root, inst_id=inst_id)
return Prepare(*params)
def generate_state_root():
return base58.b58encode(os.urandom(32)).decode("utf-8")
def init_discarded(value=None):
"""init discarded field with value and return message like representation"""
discarded = []
if value:
discarded.append(value)
return invalid_index_serializer.serialize(discarded, toBytes=False)
def incoming_3pc_msgs_count(nodes_count: int = 4) -> int:
pre_prepare = 1 # Message from Primary
prepares = nodes_count - 2 # Messages from all nodes exclude primary and self node
commits = nodes_count - 1 # Messages from all nodes exclude self node
# The primary node receives the same number of messages. Doesn't get pre-prepare,
# but gets one more prepare
return pre_prepare + prepares + commits
def check_missing_pre_prepares(nodes, count):
assert all(count <= len(replica._ordering_service.prePreparesPendingPrevPP)
for replica in getNonPrimaryReplicas(nodes, instId=0))
class MockTimestamp:
def __init__(self, value=datetime.utcnow()):
self.value = value
def __call__(self):
return self.value
class MockTimer(QueueTimer):
def __init__(self, start_time: int = 0):
self._ts = MockTimestamp(start_time)
QueueTimer.__init__(self, self._ts)
def set_time(self, value):
"""
Update time and run scheduled callbacks afterwards
"""
self._ts.value = value
self._log_time()
self.service()
def sleep(self, seconds):
"""
Simulate sleeping for given amount of seconds, and run scheduled callbacks afterwards
"""
self.set_time(self._ts.value + seconds)
def advance(self):
"""
Advance time to next scheduled callback and run that callback
"""
if not self._events:
return
event = self._pop_event()
self._ts.value = event.timestamp
self._log_time()
event.callback()
def advance_until(self, value):
"""
Advance time in steps until required value running scheduled callbacks in process
"""
while self._events and self._next_timestamp() <= value:
self.advance()
self._ts.value = value
def run_for(self, seconds):
"""
Simulate running for given amount of seconds, running scheduled callbacks at required timestamps
"""
self.advance_until(self._ts.value + seconds)
def wait_for(self, condition: Callable[[], bool], timeout: Optional = None, max_iterations: int = 10000):
"""
Advance time in steps until condition is reached, running scheduled callbacks in process
Throws TimeoutError if fail to reach condition (under required timeout if defined)
"""
counter = 0
deadline = self._ts.value + timeout if timeout else None
while self._events and not condition() and counter < max_iterations:
if deadline and self._next_timestamp() > deadline:
raise TimeoutError("Failed to reach condition in required time, {} iterations passed".format(counter))
self.advance()
counter += 1
if not condition():
if not self._events:
raise TimeoutError("Condition will be never reached, {} iterations passed".format(counter))
else:
raise TimeoutError("Failed to reach condition in {} iterations".format(max_iterations))
def run_to_completion(self, max_iterations: int = 10000):
"""
Advance time in steps until nothing is scheduled
"""
counter = 0
while self._events and counter < max_iterations:
self.advance()
counter += 1
if self._events:
raise TimeoutError("Failed to complete in {} iterations".format(max_iterations))
def _log_time(self):
# TODO: Probably better solution would be to replace real time in logs with virtual?
logger.info("Virtual time: {}".format(self._ts.value))
class TestStopwatch:
def __init__(self, timer: Optional[TimerService] = None):
self._get_current_time = timer.get_current_time if timer else perf_counter
self._start_time = self._get_current_time()
def start(self):
self._start_time = self._get_current_time()
def has_elapsed(self, expected_delay: float, tolerance: float = 0.1) -> bool:
elapsed = self._get_current_time() - self._start_time
return abs(expected_delay - elapsed) <= expected_delay * tolerance
class TestInternalBus(InternalBus):
def __init__(self):
super().__init__()
self.sent_messages = []
def send(self, message: Any, *args):
self.sent_messages.append(message)
super().send(message, *args)
class MockNetwork(ExternalBus):
def __init__(self):
super().__init__(self._send_message)
self.sent_messages = []
def _send_message(self, msg: Any, dst: ExternalBus.Destination):
self.sent_messages.append((msg, dst))
def connect(self, name: str):
self.update_connecteds(self.connecteds.union({name}))
def disconnect(self, name: str):
self.update_connecteds(self.connecteds.difference({name}))
def get_handler_by_type_wm(write_manager, h_type):
for h_l in write_manager.request_handlers.values():
for h in h_l:
if isinstance(h, h_type):
return h
def create_pool_txn_data(node_names: List[str],
crypto_factory: BlsFactoryCrypto,
get_free_port: Callable[[], int],
nodes_with_bls: Optional[int] = None):
nodeCount = len(node_names)
data = {'txns': [], 'seeds': {}, 'nodesWithBls': {}}
for i, node_name in zip(range(1, nodeCount + 1), node_names):
data['seeds'][node_name] = node_name + '0' * (32 - len(node_name))
steward_name = 'Steward' + str(i)
data['seeds'][steward_name] = steward_name + '0' * (32 - len(steward_name))
n_idr = SimpleSigner(seed=data['seeds'][node_name].encode()).identifier
s_idr = DidSigner(seed=data['seeds'][steward_name].encode())
data['txns'].append(
Member.nym_txn(nym=s_idr.identifier,
verkey=s_idr.verkey,
role=STEWARD,
name=steward_name,
seq_no=i)
)
node_txn = Steward.node_txn(steward_nym=s_idr.identifier,
node_name=node_name,
nym=n_idr,
ip='127.0.0.1',
node_port=get_free_port(),
client_port=get_free_port(),
client_ip='127.0.0.1',
services=[VALIDATOR],
seq_no=i)
if nodes_with_bls is None or i <= nodes_with_bls:
_, bls_key, bls_key_proof = crypto_factory.generate_bls_keys(
seed=data['seeds'][node_name])
get_payload_data(node_txn)[DATA][BLS_KEY] = bls_key
get_payload_data(node_txn)[DATA][BLS_KEY_PROOF] = bls_key_proof
data['nodesWithBls'][node_name] = True
data['txns'].append(node_txn)
# Add 4 Trustees
for i in range(4):
trustee_name = 'Trs' + str(i)
data['seeds'][trustee_name] = trustee_name + '0' * (
32 - len(trustee_name))
t_sgnr = DidSigner(seed=data['seeds'][trustee_name].encode())
data['txns'].append(
Member.nym_txn(nym=t_sgnr.identifier,
verkey=t_sgnr.verkey,
role=TRUSTEE,
name=trustee_name)
)
more_data_seeds = \
{
"Alice": "99999999999999999999999999999999",
"Jason": "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
"John": "dddddddddddddddddddddddddddddddd",
"Les": "ffffffffffffffffffffffffffffffff"
}
more_data_users = []
for more_name, more_seed in more_data_seeds.items():
signer = DidSigner(seed=more_seed.encode())
more_data_users.append(
Member.nym_txn(nym=signer.identifier,
verkey=signer.verkey,
name=more_name,
creator="5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC")
)
data['txns'].extend(more_data_users)
data['seeds'].update(more_data_seeds)
return data
def get_pp_seq_no(nodes: list, inst_id=0) -> int:
los = set([n.replicas._replicas[inst_id].last_ordered_3pc[1] for n in nodes])
assert len(los) == 1
return los.pop()
| 36.212698 | 120 | 0.649215 | [
"Apache-2.0"
] | AYCH-Inc/aych.hyper.tolerant | plenum/test/helper.py | 57,035 | Python |
# !/usr/bin/env python2
from math import pi, cos, sin, atan2, acos, sqrt, pow, radians, asin
from math_calc import *
from service_router import readPos
class LegConsts(object):
''' Class object to store characteristics of each leg '''
def __init__(self, x_off, y_off, z_off, ang_off, leg_nr):
self.x_off = x_off # X offset from body origin to first servo (mm)
self.y_off = y_off # Y offset from body origin to first servo (mm)
self.z_off = z_off # Z offset from body origin to first servo (mm)
self.ang_off = ang_off # Angular offset from body origin to first servo (mm)
self.f_ang_off = radians(13.33) # Angular offset of Femur
self.t_ang_off = radians(-25.90) # Angular offset of Tibia
self.c_len = 66.50 # Link length of Coxa (mm)
self.f_len = 144.40 # Link length of Femur (mm)
self.t_len = 287 # Link length of Tibia (mm)
self.leg_nr = leg_nr # Leg Number
class Kinematics(object):
''' Class object to compute various types of kinematics data for AntBot '''
# Origin to coxa: x_off, y_off, z_off, ang_off, name
leg1 = LegConsts(70.5, 122.225, -14.9, - pi / 3, "Leg 1")
leg2 = LegConsts(-70.5, 122.225, -14.9, -2 * pi / 3, "Leg 2")
leg3 = LegConsts(141.33, 0, -14.9, 0, "Leg 3")
leg4 = LegConsts(-141.33, 0, -14.9, pi, "Leg 4")
leg5 = LegConsts(70.5, -122.225, -14.9, pi / 3, "Leg 5")
leg6 = LegConsts(-70.5, -122.225, -14.9, 2 * pi / 3, "Leg 6")
leg_list = [leg1, leg2, leg3, leg4, leg5, leg6]
################
# Public methods
################
def doFkine(self, all_positions):
''' Function: computes forward kinematics
Parameter: all_positions: list with 18 values of servo positions in steps from ID1 to ID18
Return: ee_xyz: list of x,y,z coordinates for all 6 legs
servoPos: servo positions in radians
'''
servoPos = self.step_to_rad(all_positions)
ee_xyz = []
j = 0
for i in xrange(0, 16, 3):
ee_xyz.extend(self.calc_fkine(servoPos[i:i + 3], self.leg_list[j]))
j += 1
return ee_xyz, servoPos
def doIkine(self, all_positions, x, y, z, body_orient=None, leg=None, auto=None):
''' Function: computes inverse kinematics
Parameters: all_positions: list with 18 values of servo positions in steps from ID1 to ID18;
x,y,z: desired change in x,y,z coordinates (same for all legs)
body_orient: list of 3 integers meaning alpha,beta,gamma rotation in degrees
leg: list with integers meaning leg numbers to compute inverse for them only
Return: list of 18 integers with servo steps
'''
ee_xyz, servoPos = self.doFkine(all_positions)
thetas = []
j = 0
if isinstance(leg, int):
leg = [leg]
elif isinstance(leg, tuple):
leg = list(leg)
elif isinstance(body_orient, tuple):
body_orient = list(body_orient)
if body_orient:
# Optional parameter. Compute inverse with body orientation
body_orient = [radians(d) for d in body_orient]
alpha_rad, beta_rad, gama_rad = body_orient[0], body_orient[1], body_orient[2]
x = (cos(gama_rad) * sin(beta_rad) * z + sin(gama_rad) * sin(beta_rad) * y + x * cos(beta_rad)) \
* cos(alpha_rad) - sin(alpha_rad) * (cos(gama_rad) * y - sin(gama_rad) * z)
y = (cos(gama_rad) * sin(beta_rad) * z + sin(gama_rad) * sin(beta_rad) * y + x * cos(beta_rad)) \
* sin(alpha_rad) + cos(alpha_rad) * (cos(gama_rad) * y - sin(gama_rad) * z)
z = -sin(beta_rad) * x + cos(beta_rad) * sin(gama_rad) * y + cos(beta_rad) * cos(gama_rad) * z
if leg:
# Optional parameter. Compute inverse for a specific leg/s.
for i in range(len(leg)):
j = leg[i] - 1
thetas.extend(self.calc_ikine(x, y, z, ee_xyz[j * 3:j * 3 + 3], self.leg_list[j]))
else:
# Compute inverse for all legs if not leg specified.
for i in xrange(0, 16, 3):
thetas.extend(self.calc_ikine(x, y, z, ee_xyz[i:i + 3], self.leg_list[j]))
j += 1
result = [int(each_theta) for each_theta in self.rad_to_step(thetas)]
return result
def doIkineRotationEuler(self, all_positions, alpha_rad, beta_rad, gama_rad, dist_x, dist_y, dist_z):
''' Function: computes inverse kinematics and body rotation (Parallel kinematics)
Parameters: all_positions: list with 18 values of servo positions in steps from ID1 to ID18;
alpha,beta,gama: # for leg in range(6): # 6 legs
# if leg in leg_list:
# new_pos.extend(K.calc_ikine(x, y, z, ee_xyz[leg:leg + 3], K.leg_list[leg]))
# else:
# new_pos.append(current_pos[3 * leg])
# new_pos.append(current_pos[3 * leg + 1])
# new_pos.append(current_pos[3 * leg + 2])ers with servo steps
'''
final_eexyz, ee_xyz = self.calc_rot_matrix(all_positions, alpha_rad, beta_rad, gama_rad)
thetas = []
j = 0
for i in xrange(0, 16, 3):
thetas.extend(self.calc_ikine(final_eexyz[i] - dist_x, final_eexyz[i + 1] - dist_y, final_eexyz[i + 2] - dist_z, ee_xyz[i:i + 3], self.leg_list[j]))
j += 1
result = [int(each_theta) for each_theta in self.rad_to_step(thetas)]
return result
def printForward(self, all_positions):
''' Function: Prints x,y,z coordinates of each leg
Parameters: all_positions: list with 18 values of servo positions in steps from ID1 to ID18;
'''
ee_list, theta_list = self.doFkine(all_positions)
RoundedCoords = ['%.4f' % elem for elem in ee_list]
print ""
print "X,Y,Z coordinates of Leg end-points: "
print " " + str(["X ", " Y ", " Z "])
print "Leg 1: " + str(RoundedCoords[0:3])
print "Leg 2: " + str(RoundedCoords[3:6])
print "Leg 3: " + str(RoundedCoords[6:9])
print "Leg 4: " + str(RoundedCoords[9:12])
print "Leg 5: " + str(RoundedCoords[12:15])
print "Leg 6: " + str(RoundedCoords[15:18])
print ""
def printInverse(self, all_positions, x, y, z):
''' Function: Prints servo positions, in radians, needed to reach the position
Parameters: theta_list: 18 servo positions in radians.
'''
theta_list = self.doIkine(all_positions, x, y, z)
RoundedThetas = ['%.4f' % elem for elem in theta_list]
print ""
print "Theta angles of each servo:"
print " " + str(["Coxa ", "Femur ", "Tibia"])
print "Leg 1: " + str(RoundedThetas[0:3])
print "Leg 2: " + str(RoundedThetas[3:6])
print "Leg 3: " + str(RoundedThetas[6:9])
print "Leg 4: " + str(RoundedThetas[9:12])
print "Leg 5: " + str(RoundedThetas[12:15])
print "Leg 6: " + str(RoundedThetas[15:18])
print ""
def printKinematics(self, all_positions, x, y, z):
self.printForward(all_positions)
self.printInverse(all_positions, x, y, z)
#################
# Private methods
#################
def calc_fkine(self, servoPos, leg):
theta1 = servoPos[0] - leg.ang_off
theta2 = servoPos[1] + leg.f_ang_off
theta3 = servoPos[2] + leg.t_ang_off
ee_z = leg.f_len * sin(theta2) + leg.t_len * sin(theta3 + theta2) + leg.z_off
ee_x = leg.x_off + cos(theta1) * (leg.c_len + leg.f_len * cos(theta2) + leg.t_len * cos(theta3 + theta2))
ee_y = leg.y_off + sin(theta1) * (leg.c_len + leg.f_len * cos(theta2) + leg.t_len * cos(theta3 + theta2))
return [ee_x, ee_y, ee_z]
def calc_ikine(self, x, y, z, ee_xyz, leg, auto=None):
init_X = ee_xyz[0]
init_Y = ee_xyz[1]
init_Z = ee_xyz[2]
X = init_X + (x) - leg.x_off
Y = init_Y + (y) - leg.y_off
Z = init_Z + (z) - leg.z_off
theta1 = atan2(Y, X) + leg.ang_off
if theta1 < -pi:
theta1 += 2 * pi
if theta1 > pi:
theta1 -= 2 * pi
new_x = cos(leg.ang_off) * X - sin(leg.ang_off) * Y
new_y = sin(leg.ang_off) * X + cos(leg.ang_off) * Y
final_x = cos(theta1) * new_x + sin(theta1) * new_y - leg.c_len
s = sqrt(pow(final_x, 2) + pow(Z, 2))
try:
t3_term = (-pow(s, 2) + pow(leg.f_len, 2) + pow(leg.t_len, 2)) / (2 * leg.f_len * leg.t_len)
t3 = pi - acos(t3_term)
except ValueError:
print "Cannot compute acos(", t3_term, ") for ", leg.leg_nr
if auto is None:
if t3_term < 0:
t3 = pi - acos(-0.99)
else:
t3 = pi - acos(0.99)
else:
return -1
theta3 = -t3 - leg.t_ang_off
theta2 = -(-atan2(Z, final_x) - atan2(leg.t_len * sin(t3), leg.f_len + leg.t_len * cos(t3)) + leg.f_ang_off)
if auto is not None:
if (theta2 > 1.8 or theta2 < -1.8) or (theta3 < -2.2 or theta3 > 2.2):
return -1
return [theta1, theta2, theta3]
def calc_rot_displacement(self, alpha_rad, beta_rad, gama_rad, ee_xyz):
pre_x = ee_xyz[0]
pre_y = ee_xyz[1]
pre_z = ee_xyz[2]
r_term1 = (cos(gama_rad) * sin(beta_rad) * pre_z + sin(gama_rad) * sin(beta_rad) * pre_y + pre_x * cos(beta_rad))
r_term2 = (cos(gama_rad) * pre_y - sin(gama_rad) * pre_z)
r_x = r_term1 * cos(alpha_rad) - r_term2 * sin(alpha_rad) - pre_x
r_y = r_term1 * sin(alpha_rad) + r_term2 * cos(alpha_rad) - pre_y
r_z = - sin(beta_rad) * pre_x + cos(beta_rad) * sin(gama_rad) * pre_y + cos(beta_rad) * cos(gama_rad) * pre_z - pre_z
return [r_x, r_y, r_z]
def calc_rot_matrix(self, all_positions, alpha_rad, beta_rad, gama_rad):
ee_xyz, servoPos = self.doFkine(all_positions)
rot_val_list = []
for i in xrange(0, 16, 3):
rot_val_list.extend(self.calc_rot_displacement(alpha_rad, beta_rad, gama_rad, ee_xyz[i:i + 3]))
return rot_val_list, ee_xyz
def rad_to_step(self, pos_rads):
return [i / pi * 2048 + 2048 for i in pos_rads]
def step_to_rad(self, pos_steps):
return [(((x / 2047.5) - 1) * pi) for x in pos_steps]
def make_poligonCorners(self, all_positions, leg_list):
if leg_list is int:
leg_list = [leg_list]
xyz_polygon = []
ee_xyz, servoPos = self.doFkine(all_positions)
newEe_xyz = [ee_xyz[0], ee_xyz[1], ee_xyz[2], ee_xyz[3], ee_xyz[4], ee_xyz[5],
ee_xyz[9], ee_xyz[10], ee_xyz[11], ee_xyz[15], ee_xyz[16], ee_xyz[17],
ee_xyz[12], ee_xyz[13], ee_xyz[14], ee_xyz[6], ee_xyz[7], ee_xyz[8]]
for i in range(len(leg_list)):
j = leg_list[i] - 1
xyz_polygon.extend((newEe_xyz[j * 3:j * 3 + 3]))
return xyz_polygon
def make_polygonLines(self, leg_list, ee_xyz):
print("leglistLins", leg_list)
line = []
for i in range(len(ee_xyz / 3)):
j = i - 1
line.extend = [ee_xyz[3 * j + 3] - ee_xyz[3 * j],
ee_xyz[3 * j + 4] - ee_xyz[3 * j + 1],
ee_xyz[3 * j + 5] - ee_xyz[3 * j + 2]]
return line
def check_stabilty(self, t_poly=None):
ee_xyz, servoPos = self.doFkine(readPos())
tac = [False, True, False, True, True, False]
leg_list = []
for i in range(len(tac)):
if tac[i] is True:
leg_list.extend([i + 1])
poly_lines, poly_points = self.make_polygonLines(leg_list, ee_xyz)
print("lines", poly_lines)
if tac[1] is True and tac[2] is True and tac[5]is True:
# gamma, beta = 10,20 #self.get_orientation(tac)
# n = [0,-sin(beta),cos(beta)]
print("im not here")
P1 = [ee_xyz[3], ee_xyz[4], 1]
P2 = [ee_xyz[6], ee_xyz[7], 1]
P3 = [ee_xyz[15], ee_xyz[16], 1]
print(P1, P2, P3)
elif tac[0] is True and tac[3] is True and tac[4] is True:
print("im here")
P1 = [ee_xyz[0], ee_xyz[1], 1]
P3 = [ee_xyz[9], ee_xyz[10], 1]
P2 = [ee_xyz[12], ee_xyz[13], 1]
print(P1, P2, P3)
k = 1 # dotProduct(n,P1)
x = 0
y = 1
z = 2
lambda_1 = ((P2[x] * P3[y] - P2[y] * P3[x]) * k) / (P1[x] * P2[y] * P3[z] - P1[x] * P2[z] * P3[y] - P1[y] * P2[x] * P3[z] + P1[y] * P2[z] * P3[x] + P1[z] * P2[x] * P3[y] - P1[z] * P2[y] * P3[x])
lambda_2 = -((P1[x] * P3[y] - P1[y] * P3[x]) * k) / (P1[x] * P2[y] * P3[z] - P1[x] * P2[z] * P3[y] - P1[y] * P2[x] * P3[z] + P1[y] * P2[z] * P3[x] + P1[z] * P2[x] * P3[y] - P1[z] * P2[y] * P3[x])
lambda_3 = ((P1[x] * P2[y] - P1[y] * P2[x]) * k) / (P1[x] * P2[y] * P3[z] - P1[x] * P2[z] * P3[y] - P1[y] * P2[x] * P3[z] + P1[y] * P2[z] * P3[x] + P1[z] * P2[x] * P3[y] - P1[z] * P2[y] * P3[x])
if lambda_1 > 0.1 and lambda_2 > 0.1 and lambda_3 > 0.1 and lambda_3 > 0.1:
if lambda_1 < 0.9 and lambda_2 < 0.9 and lambda_3 < 0.9:
if lambda_1 + lambda_2 + lambda_3 == 1:
inside = True
side1 = subtract(P1, P2)
side2 = subtract(P3, P2)
side3 = subtract(P1, P3)
G = [0, 0, 1]
P2_G = subtract(G, P2)
P3_G = subtract(G, P3)
margin_s1 = sqrt(pow(dotProduct(P2_G, unit_vec(side1)), 2) + dotProduct(P2_G, P2_G))
margin_s2 = sqrt(pow(dotProduct(P2_G, unit_vec(side2)), 2) + dotProduct(P2_G, P2_G))
margin_s3 = sqrt(pow(dotProduct(P3_G, unit_vec(side3)), 2) + dotProduct(P3_G, P3_G))
stability_margin = min(margin_s1, margin_s2, margin_s3)
print(stability_margin, inside)
return stability_margin, inside
def get_orientation(self, leg_list):
ee_xyz, servoPos = self.doFkine(readPos())
p1 = ee_xyz[3 * (leg_list[0] - 1):3 * (leg_list[0] - 1) + 3]
p2 = ee_xyz[3 * (leg_list[1] - 1):3 * (leg_list[1] - 1) + 3]
p3 = ee_xyz[3 * (leg_list[2] - 1):3 * (leg_list[2] - 1) + 3]
p21 = subtract(p2, p1)
p23 = subtract(p2, p3)
normz = crossProduct(p21, p23)
beta = atan2(normz[0], normz[2]) * 180 / pi
gamma = -atan2(normz[1], normz[2]) * 180 / pi
return gamma, beta
def calc_translationStairs(self, riser, climbed_stairs_front, climbed_stairs_rear):
# gamma, beta = self.get_orientation([1,5,6])
ee_xyz, servopos = self.doFkine(readPos())
dist_y = abs(ee_xyz[1] - ee_xyz[13])
riser_diff = (climbed_stairs_front - climbed_stairs_rear) * riser
omega = asin(riser_diff / dist_y) * 180 / pi
AB = -ee_xyz[14] + 30
AC = AB / cos(omega * pi / 180)
BC = AC * sin(omega * pi / 180)
BE = sqrt(pow(ee_xyz[12], 2) + pow(ee_xyz[11], 2)) - 141.33
CE = BE - BC
CD = BC * CE / AC
if AC + CD <= riser_diff:
trans_z_g = riser_diff - AC - CD + 10
translation_z = trans_z_g * cos(omega * pi / 180)
translation_y = trans_z_g * sin(omega * pi / 180)
else:
translation_z = 0
translation_y = 0
return [translation_z, translation_y]
| 47.048048 | 203 | 0.547456 | [
"MIT"
] | JevgenijsGalaktionovs/AntBot | dns_main/src/kinematics.py | 15,667 | Python |
from PIL import Image
import tempfile
import cv2
import imutils
import numpy as np
def set_image_dpi_ppi(file_path):
im = Image.open(file_path)
length_x, width_y = im.size
factor = float(length_x/width_y)
size = int(600), int(600/factor)
im_resized = im.resize(size, Image.ANTIALIAS)
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.png')
temp_filename = temp_file.name
im_resized.save(temp_filename, dpi=(800, 800))
return temp_filename
def set_text_region(file_path):
img = cv2.imread(file_path)
height = img.shape[0]
width = img.shape[1]
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
sobel = cv2.Sobel(gray, cv2.CV_8U, 1, 0, ksize=3)
ret, binary = cv2.threshold(sobel, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)
element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (int(width/2), 5))
element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (int(width/3), 2))
dilation = cv2.dilate(binary, element2, iterations=1)
erosion = cv2.erode(dilation, element1, iterations=1)
dilation2 = cv2.dilate(erosion, element2, iterations=2)
region = []
contours, hierarchy = cv2.findContours(dilation2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for i in range(len(contours)):
cnt = contours[i]
area = cv2.contourArea(cnt)
if (area < height*width/6):
continue
rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
box = np.int0(box)
height = abs(box[0][1] - box[2][1])
width = abs(box[0][0] - box[2][0])
if (height > width * 1.3):
continue
region.append(box)
return img, region
def set_sign_board_region(file_path):
image = cv2.imread(file_path)
height = image.shape[0]
width = image.shape[1]
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.bilateralFilter(gray, 25, 15, 15)
thresh = cv2.threshold(blurred, 90, 255, cv2.THRESH_BINARY)[1]
output = image.copy()
cnts = cv2.findContours(thresh.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
region = []
for c in cnts:
epsilon = 0.02 * cv2.arcLength(c, True)
c = cv2.approxPolyDP(c, epsilon, True)
area = cv2.contourArea(c)
if area < int(height*width/3):
continue
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(output,(x,y),(x+w,y+h),(0,255,0),2)
region.append((x,y,x+w,y+h))
return output, region
def add_margin(file_path, top, right, bottom, left, color):
image = Image.open(file_path)
width, height = image.size
new_width = width + right + left
new_height = height + top + bottom
result = Image.new(image.mode, (new_width, new_height), color)
result.paste(image, (left, top))
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.png')
temp_filename = temp_file.name
result.save(temp_filename, dpi=(800, 800))
return temp_filename
def process_text(file_path):
image = cv2.imread(file_path)
height = image.shape[0]
width = image.shape[1]
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.bilateralFilter(gray, 25, 15, 15)
thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.png')
temp_filename = temp_file.name
cv2.imwrite(temp_filename, thresh)
return temp_filename
| 36.347368 | 93 | 0.663481 | [
"MIT"
] | tonthatnam/japanese_ocr | tess/utilities/image_process.py | 3,453 | Python |
'''
Module containing python objects matching the ESGF database tables.
'''
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Boolean, ForeignKey
from sqlalchemy.orm import relationship
Base = declarative_base()
ROLE_USER = 'user'
ROLE_PUBLISHER = 'publisher'
ROLE_ADMIN = 'admin'
ROLE_SUPERUSER = 'super'
class ESGFUser(Base):
""" Class that represents the 'esgf_security.user' table in the ESGF database."""
__tablename__ = 'user'
#__table_args__ = { 'autoload':True, 'schema':'esgf_security' }
__table_args__ = { 'schema':'esgf_security' }
id = Column(Integer, primary_key=True)
firstname = Column(String)
middlename = Column(String)
lastname = Column(String)
email = Column(String)
username = Column(String)
password = Column(String)
dn = Column(String)
openid = Column(String)
organization = Column(String)
organization_type = Column(String)
city = Column(String)
state = Column(String)
country = Column(String)
status_code = Column(Integer)
verification_token = Column(String)
notification_code = Column(Integer)
class ESGFGroup(Base):
""" Class that represents the 'esgf_secitity.group' table in the ESGF database."""
__tablename__ = 'group'
__table_args__ = { 'schema':'esgf_security' }
id = Column(Integer, primary_key=True)
name = Column(String)
description = Column(String)
visible = Column(Boolean)
automatic_approval = Column(Boolean)
class ESGFRole(Base):
""" Class that represents the 'esgf_security.role' table in the ESGF database."""
__tablename__ = 'role'
__table_args__ = { 'schema':'esgf_security' }
id = Column(Integer, primary_key=True)
name = Column(String)
description = Column(String)
class ESGFPermission(Base):
""" Class that represents the 'esgf_security.permission' table in the ESGF database."""
__tablename__ = 'permission'
__table_args__ = { 'schema':'esgf_security' }
user_id = Column(Integer, ForeignKey('esgf_security.user.id'), primary_key=True)
group_id = Column(Integer, ForeignKey('esgf_security.group.id'), primary_key=True)
role_id = Column(Integer, ForeignKey('esgf_security.role.id'), primary_key=True)
approved = Column(Boolean)
user = relationship("ESGFUser")
group = relationship("ESGFGroup")
role = relationship("ESGFRole")
| 30.7625 | 91 | 0.697278 | [
"BSD-3-Clause"
] | ESGF/COG | cog/plugins/esgf/objects.py | 2,461 | Python |
from classes.fixed_scheduler import FixedScheduler
from classes.concretes.sql_mixin import SqlMixin
from sqlalchemy import Column, create_engine, Table
from sqlalchemy.types import Float
from sqlalchemy.orm import registry, Session
import attr
registry = registry()
@registry.mapped
@attr.s(auto_attribs=True)
class MyClass:
__table__ = Table(
"my_class",
registry.metadata,
Column('time', Float, primary_key=True)
)
time: float
class MyScheduler(SqlMixin, FixedScheduler):
def before_write(self, timestamp: float):
return MyClass(time=timestamp)
if __name__ == "__main__":
engine = create_engine("sqlite:///sanity.sqlite")
registry.metadata.create_all(engine)
with Session(engine) as session:
with session.begin():
scheduler = MyScheduler(1000000, MyClass.time, session)
result = scheduler.check_and_insert()
print(result)
pass | 26.333333 | 67 | 0.704641 | [
"MIT"
] | LiteralGenie/AutomateStuff | fancytimers/tests/sanity.py | 948 | Python |
import logging
from typing import Iterable, Mapping, Optional, Union
import gym
import numpy as np
import torch as th
from stable_baselines3.common import on_policy_algorithm, vec_env
from imitation.data import types
from imitation.rewards import discrim_nets
from imitation.algorithms.adversarial import AdversarialTrainer
from .cnn_discriminator import ActObsCNN
class CNNGAIL(AdversarialTrainer):
def __init__(
self,
venv: vec_env.VecEnv,
expert_data: Union[Iterable[Mapping], types.Transitions],
expert_batch_size: int,
gen_algo: on_policy_algorithm.OnPolicyAlgorithm,
discrim=None,
*,
discrim_kwargs: Optional[Mapping] = None,
**kwargs,
):
"""Generative Adversarial Imitation Learning that accepts Image Obs
Most parameters are described in and passed to `AdversarialTrainer.__init__`.
Additional parameters that `CNNGAIL` adds on top of its superclass initializer are
as follows:
Args:
discrim_kwargs: Optional keyword arguments to use while constructing the
DiscrimNetGAIL.
"""
discrim_kwargs = discrim_kwargs or {}
if discrim == None:
discrim = discrim_nets.DiscrimNetGAIL(
venv.observation_space,
venv.action_space,
discrim_net=ActObsCNN,
**discrim_kwargs,
)
logging.info("using CNN GAIL")
super().__init__(
venv, gen_algo, discrim, expert_data, expert_batch_size, **kwargs
)
| 28.963636 | 90 | 0.662272 | [
"MIT"
] | aj96/InfoGAIL | cnn_modules/cnn_gail.py | 1,593 | Python |
# -*- coding: utf-8 -*-
#
# Review Heatmap Add-on for Anki
# Copyright (C) 2016-2019 Glutanimate <https://glutanimate.com>
#
# This file was automatically generated by Anki Add-on Builder v0.1.4
# It is subject to the same licensing terms as the rest of the program
# (see the LICENSE file which accompanies this program).
#
# WARNING! All changes made in this file will be lost!
"""
Initializes generated Qt forms/resources
"""
__all__ = [
"options",
"contrib"
]
from . import options
from . import contrib
| 22.608696 | 70 | 0.709615 | [
"MIT"
] | kb1900/Anki-Addons | review_heatmap/gui/forms/anki21/__init__.py | 520 | Python |
""" Orlov Module : workspace module fixture. """
import os
import logging
import pytest
from orlov.libs.workspace import Workspace
logger = logging.getLogger(__name__)
@pytest.fixture(scope='session')
def workspace(request) -> Workspace:
""" Workspace Factory Fixture.
Yields:
directory(Workspace): Workspace Created.
"""
logger.debug('Setup of test structure.')
# create screenshot directory
if request.config.getoption('workspace'):
result_dir = request.config.getoption('workspace')
else:
if not os.path.exists('result'):
logger.debug('Creating results folder to store results')
os.mkdir('result')
result_dir = os.path.join(os.getcwd(), 'result')
logger.debug('Created folder %s', result_dir)
yield Workspace(result_dir)
| 27.4 | 68 | 0.678832 | [
"MIT"
] | coppelia517/orlov | orlov/libs/workspace/fixture.py | 822 | Python |
import dask
import dask.array as da
import numpy as np
import numpy.testing as npt
import pytest
import sklearn
import sklearn.linear_model
import sklearn.metrics
from dask.array.utils import assert_eq
import dask_ml.metrics
import dask_ml.wrappers
def test_pairwise_distances(X_blobs):
centers = X_blobs[::100].compute()
result = dask_ml.metrics.pairwise_distances(X_blobs, centers)
expected = sklearn.metrics.pairwise_distances(X_blobs.compute(), centers)
assert_eq(result, expected, atol=1e-4)
def test_pairwise_distances_argmin_min(X_blobs):
centers = X_blobs[::100].compute()
# X_blobs has 500 rows per block.
# Ensure 500 rows in the scikit-learn version too.
working_memory = float(80 * 500) / 2 ** 20
ctx = sklearn.config_context(working_memory=working_memory)
with ctx:
a_, b_ = sklearn.metrics.pairwise_distances_argmin_min(
X_blobs.compute(), centers
)
a, b = dask_ml.metrics.pairwise_distances_argmin_min(X_blobs, centers)
a, b = dask.compute(a, b)
npt.assert_array_equal(a, a_)
npt.assert_array_equal(b, b_)
def test_euclidean_distances():
X = da.random.uniform(size=(100, 4), chunks=50)
Y = da.random.uniform(size=(100, 4), chunks=50)
a = dask_ml.metrics.euclidean_distances(X, Y)
b = sklearn.metrics.euclidean_distances(X, Y)
assert_eq(a, b)
x_norm_squared = (X ** 2).sum(axis=1).compute()[:, np.newaxis]
a = dask_ml.metrics.euclidean_distances(X, Y, X_norm_squared=x_norm_squared)
b = sklearn.metrics.euclidean_distances(X, Y, X_norm_squared=x_norm_squared)
assert_eq(a, b)
y_norm_squared = (Y ** 2).sum(axis=1).compute()[np.newaxis, :]
a = dask_ml.metrics.euclidean_distances(X, Y, Y_norm_squared=y_norm_squared)
b = sklearn.metrics.euclidean_distances(X, Y, Y_norm_squared=y_norm_squared)
assert_eq(a, b)
def test_euclidean_distances_same():
X = da.random.uniform(size=(100, 4), chunks=50)
a = dask_ml.metrics.euclidean_distances(X, X)
b = sklearn.metrics.euclidean_distances(X, X)
assert_eq(a, b, atol=1e-4)
a = dask_ml.metrics.euclidean_distances(X)
b = sklearn.metrics.euclidean_distances(X)
assert_eq(a, b, atol=1e-4)
x_norm_squared = (X ** 2).sum(axis=1).compute()[:, np.newaxis]
assert_eq(X, X, Y_norm_squared=x_norm_squared, atol=1e-4)
@pytest.mark.parametrize("kernel", ["linear", "polynomial", "rbf", "sigmoid"])
def test_pairwise_kernels(kernel):
X = da.random.uniform(size=(100, 4), chunks=(50, 4))
a = dask_ml.metrics.pairwise.PAIRWISE_KERNEL_FUNCTIONS[kernel]
b = sklearn.metrics.pairwise.PAIRWISE_KERNEL_FUNCTIONS[kernel]
r1 = a(X)
r2 = b(X.compute())
assert isinstance(X, da.Array)
assert_eq(r1, r2)
@pytest.mark.parametrize("sample_weight", [True, False])
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("labels", [[0, 1], [0, 1, 3], [1, 0]])
@pytest.mark.parametrize("daskify", [True, False])
def test_log_loss(labels, normalize, sample_weight, daskify):
n = 100
c = 25
y_true = np.random.choice(labels, size=n)
y_pred = np.random.uniform(size=(n, len(labels)))
y_pred /= y_pred.sum(1, keepdims=True)
if sample_weight:
sample_weight = np.random.uniform(size=n)
sample_weight /= sample_weight.sum()
dsample_weight = da.from_array(sample_weight, chunks=c)
else:
sample_weight = None
dsample_weight = None
if daskify:
dy_true = da.from_array(y_true, chunks=c)
dy_pred = da.from_array(y_pred, chunks=c)
else:
dy_true = y_true
dy_pred = y_pred
(dsample_weight,) = dask.compute(dsample_weight)
a = sklearn.metrics.log_loss(
y_true, y_pred, normalize=normalize, sample_weight=sample_weight
)
b = dask_ml.metrics.log_loss(
dy_true,
dy_pred,
labels=labels,
normalize=normalize,
sample_weight=dsample_weight,
)
assert_eq(a, b)
@pytest.mark.parametrize(
"yhat",
[
da.from_array(np.array([0.25, 0.25, 0.75, 0.75]), chunks=2),
da.from_array(np.array([0, 0, 1, 1]), chunks=2),
da.from_array(
np.array([[0.75, 0.25], [0.75, 0.25], [0.25, 0.75], [0.25, 0.75]]), chunks=2
),
],
)
def test_log_loss_shape(yhat):
y = da.from_array(np.array([0, 0, 1, 1]), chunks=2)
labels = [0, 1]
a = sklearn.metrics.log_loss(y, yhat)
b = dask_ml.metrics.log_loss(y, yhat, labels=labels)
assert_eq(a, b)
@pytest.mark.parametrize("y", [[0, 1, 1, 0], [0, 1, 2, 0]])
def test_log_loss_scoring(y):
# a_scorer = sklearn.metrics.get_scorer('neg_log_loss')
# b_scorer = dask_ml.metrics.get_scorer('neg_log_loss')
X = da.random.uniform(size=(4, 2), chunks=2)
labels = np.unique(y)
y = da.from_array(np.array(y), chunks=2)
a_scorer = sklearn.metrics.make_scorer(
sklearn.metrics.log_loss,
greater_is_better=False,
needs_proba=True,
labels=labels,
)
b_scorer = sklearn.metrics.make_scorer(
dask_ml.metrics.log_loss,
greater_is_better=False,
needs_proba=True,
labels=labels,
)
clf = dask_ml.wrappers.ParallelPostFit(
sklearn.linear_model.LogisticRegression(
n_jobs=1, solver="lbfgs", multi_class="auto"
)
)
clf.fit(*dask.compute(X, y))
result = b_scorer(clf, X, y)
expected = a_scorer(clf, *dask.compute(X, y))
assert_eq(result, expected)
| 31.141243 | 88 | 0.662917 | [
"BSD-3-Clause"
] | Alilarian/dask-ml | tests/metrics/test_metrics.py | 5,512 | Python |
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import collections
import logging
import tensorflow as tf
from tensorflow.contrib.rnn import MultiRNNCell, LSTMStateTuple
from tensorflow.python.framework import dtypes, tensor_shape
from tensorflow.python.framework import ops
from tensorflow.python.util import nest
from ludwig.models.modules.fully_connected_modules import fc_layer
from ludwig.models.modules.initializer_modules import get_initializer
from ludwig.models.modules.reduction_modules import reduce_sequence
from ludwig.utils.tf_utils import sequence_length_3D, sequence_length_2D
def get_cell_fun(cell_type):
if cell_type == 'rnn':
cell_fn = tf.nn.rnn_cell.BasicRNNCell
elif cell_type == 'lstm':
# allows for optional peephole connections and cell clipping
cell_fn = tf.nn.rnn_cell.LSTMCell
elif cell_type == 'lstm_block':
# Faster version of basic LSTM
cell_fn = tf.contrib.rnn.LSTMBlockCell
elif cell_type == 'lstm_ln':
cell_fn = tf.contrib.rnn.LayerNormBasicLSTMCell
elif cell_type == 'lstm_cudnn':
cell_fn = tf.contrib.cudnn_rnn.CudnnCompatibleLSTMCell
elif cell_type == 'gru':
cell_fn = tf.nn.rnn_cell.GRUCell
elif cell_type == 'gru_block':
# Faster version of GRU (25% faster in my tests)
cell_fn = tf.contrib.rnn.GRUBlockCell
elif cell_type == 'gru_cudnn':
# Faster version of GRU (25% faster in my tests)
cell_fn = tf.contrib.cudnn_rnn.CudnnCompatibleGRUCell
else:
cell_fn = tf.nn.rnn_cell.BasicRNNCell
return cell_fn
class Projection(tf.layers.Layer):
def __init__(self, projection_weights, projection_biases, name=None,
**kwargs):
super(Projection, self).__init__(name=name, **kwargs)
self.projection_weights = projection_weights
self.projection_biases = projection_biases
def call(self, inputs, **kwargs):
inputs_shape = inputs.shape.as_list()
weights_shape = self.projection_weights.shape.as_list()
assert inputs_shape[-1] == weights_shape[0]
inputs = tf.reshape(inputs, [-1, inputs_shape[-1]])
outputs = tf.matmul(inputs, self.projection_weights)
if self.projection_biases is not None:
outputs = tf.nn.bias_add(outputs, self.projection_biases)
outputs_shape = inputs_shape
outputs_shape[0] = -1 # batch_size
outputs_shape[-1] = weights_shape[1]
outputs = tf.reshape(outputs, outputs_shape)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = input_shape
output_shape[-1] = self.projection_biases.shape.as_list()[0]
# output_shape = [input_shape[0], self.projection_biases.shape.as_list()[0]]
return tensor_shape.TensorShape(output_shape)
class BasicDecoderOutput(
collections.namedtuple('BasicDecoderOutput',
('rnn_output', 'sample_id', 'projection_input'))):
pass
class BasicDecoder(tf.contrib.seq2seq.BasicDecoder):
def _projection_input_size(self):
return self._cell.output_size
@property
def output_size(self):
return BasicDecoderOutput(
rnn_output=self._rnn_output_size(),
sample_id=self._helper.sample_ids_shape,
projection_input=self._projection_input_size())
@property
def output_dtype(self):
dtype = nest.flatten(self._initial_state)[0].dtype
return BasicDecoderOutput(
nest.map_structure(lambda _: dtype, self._rnn_output_size()),
self._helper.sample_ids_dtype,
nest.map_structure(lambda _: dtype, self._projection_input_size()))
def step(self, time, inputs, state, name=None):
with ops.name_scope(name, 'BasicDecoderStep', (time, inputs, state)):
cell_outputs, cell_state = self._cell(inputs, state)
projection_inputs = cell_outputs # get projection_inputs to compute sampled_softmax_cross_entropy_loss
if self._output_layer is not None:
cell_outputs = self._output_layer(cell_outputs)
sample_ids = self._helper.sample(
time=time, outputs=cell_outputs, state=cell_state)
(finished, next_inputs, next_state) = self._helper.next_inputs(
time=time,
outputs=cell_outputs,
state=cell_state,
sample_ids=sample_ids)
outputs = BasicDecoderOutput(cell_outputs, sample_ids,
projection_inputs)
return (outputs, next_state, next_inputs, finished)
class TimeseriesTrainingHelper(tf.contrib.seq2seq.TrainingHelper):
def sample(self, time, outputs, name=None, **unused_kwargs):
with ops.name_scope(name, 'TrainingHelperSample', [time, outputs]):
return tf.zeros(tf.shape(outputs)[:-1], dtype=dtypes.int32)
class RecurrentStack:
def __init__(
self,
state_size=256,
cell_type='rnn',
num_layers=1,
bidirectional=False,
dropout=False,
regularize=True,
reduce_output='last',
**kwargs
):
self.state_size = state_size
self.cell_type = cell_type
self.num_layers = num_layers
self.bidirectional = bidirectional
self.dropout = dropout
self.regularize = regularize
self.reduce_output = reduce_output
def __call__(
self,
input_sequence,
regularizer,
dropout_rate,
is_training=True
):
if not self.regularize:
regularizer = None
# Calculate the length of input_sequence and the batch size
sequence_length = sequence_length_3D(input_sequence)
# RNN cell
cell_fn = get_cell_fun(self.cell_type)
# initial state
# init_state = tf.get_variable(
# 'init_state',
# [1, state_size],
# initializer=tf.constant_initializer(0.0),
# )
# init_state = tf.tile(init_state, [batch_size, 1])
# main RNN operation
with tf.variable_scope('rnn_stack', reuse=tf.AUTO_REUSE,
regularizer=regularizer) as vs:
if self.bidirectional:
# forward direction cell
fw_cell = lambda state_size: cell_fn(state_size)
bw_cell = lambda state_size: cell_fn(state_size)
fw_cells = [fw_cell(self.state_size) for _ in
range(self.num_layers)]
bw_cells = [bw_cell(self.state_size) for _ in
range(self.num_layers)]
rnn_outputs, final_state_fw, final_state_bw = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(
cells_fw=fw_cells,
cells_bw=bw_cells,
dtype=tf.float32,
sequence_length=sequence_length,
inputs=input_sequence
)
else:
cell = lambda state_size: cell_fn(state_size)
cells = MultiRNNCell(
[cell(self.state_size) for _ in range(self.num_layers)],
state_is_tuple=True)
rnn_outputs, final_state = tf.nn.dynamic_rnn(
cells,
input_sequence,
sequence_length=sequence_length,
dtype=tf.float32)
# initial_state=init_state)
for v in tf.global_variables():
if v.name.startswith(vs.name):
logging.debug(' {}: {}'.format(v.name, v))
logging.debug(' rnn_outputs: {0}'.format(rnn_outputs))
rnn_output = reduce_sequence(rnn_outputs, self.reduce_output)
logging.debug(' reduced_rnn_output: {0}'.format(rnn_output))
# dropout
if self.dropout and dropout_rate is not None:
rnn_output = tf.layers.dropout(
rnn_output,
rate=dropout_rate,
training=is_training
)
logging.debug(' dropout_rnn: {0}'.format(rnn_output))
return rnn_output, rnn_output.shape.as_list()[-1]
def recurrent_decoder(encoder_outputs, targets, max_sequence_length, vocab_size,
cell_type='rnn', state_size=256, embedding_size=50,
num_layers=1,
attention_mechanism=None, beam_width=1, projection=True,
tied_target_embeddings=True, embeddings=None,
initializer=None, regularizer=None,
is_timeseries=False):
with tf.variable_scope('rnn_decoder', reuse=tf.AUTO_REUSE,
regularizer=regularizer):
# ================ Setup ================
if beam_width > 1 and is_timeseries:
raise ValueError('Invalid beam_width: {}'.format(beam_width))
GO_SYMBOL = vocab_size
END_SYMBOL = 0
batch_size = tf.shape(encoder_outputs)[0]
# ================ Projection ================
# Project the encoder outputs to the size of the decoder state
encoder_outputs_size = encoder_outputs.shape[-1]
if projection and encoder_outputs_size != state_size:
with tf.variable_scope('projection'):
encoder_output_rank = len(encoder_outputs.shape)
if encoder_output_rank > 2:
sequence_length = tf.shape(encoder_outputs)[1]
encoder_outputs = tf.reshape(encoder_outputs,
[-1, encoder_outputs_size])
encoder_outputs = fc_layer(encoder_outputs,
encoder_outputs.shape[-1],
state_size,
activation=None,
initializer=initializer)
encoder_outputs = tf.reshape(encoder_outputs,
[-1, sequence_length,
state_size])
else:
encoder_outputs = fc_layer(encoder_outputs,
encoder_outputs.shape[-1],
state_size,
activation=None,
initializer=initializer)
# ================ Targets sequence ================
# Calculate the length of inputs and the batch size
with tf.variable_scope('sequence'):
targets_sequence_length = sequence_length_2D(targets)
start_tokens = tf.tile([GO_SYMBOL], [batch_size])
end_tokens = tf.tile([END_SYMBOL], [batch_size])
if is_timeseries:
start_tokens = tf.cast(start_tokens, tf.float32)
end_tokens = tf.cast(end_tokens, tf.float32)
targets_with_go = tf.concat([
tf.expand_dims(start_tokens, 1),
targets,
tf.expand_dims(end_tokens, 1)], 1)
logging.debug(' targets_with_go: {0}'.format(targets_with_go))
targets_sequence_length_with_eos = targets_sequence_length + 1 # the EOS symbol is 0 so it's not increasing the real length of the sequence
# ================ Embeddings ================
if is_timeseries:
targets_embedded = tf.expand_dims(targets_with_go, -1)
targets_embeddings = None
else:
with tf.variable_scope('embedding'):
if embeddings is not None:
embedding_size = embeddings.shape.as_list()[-1]
if tied_target_embeddings:
state_size = embedding_size
elif tied_target_embeddings:
embedding_size = state_size
if embeddings is not None:
embedding_go = tf.get_variable('embedding_GO',
initializer=tf.random_uniform(
[1, embedding_size],
-1.0, 1.0))
targets_embeddings = tf.concat([embeddings, embedding_go],
axis=0)
else:
initializer_obj = get_initializer(initializer)
targets_embeddings = tf.get_variable(
'embeddings',
initializer=initializer_obj(
[vocab_size + 1, embedding_size]),
regularizer=regularizer
)
logging.debug(
' targets_embeddings: {0}'.format(targets_embeddings))
targets_embedded = tf.nn.embedding_lookup(targets_embeddings,
targets_with_go,
name='decoder_input_embeddings')
logging.debug(' targets_embedded: {0}'.format(targets_embedded))
# ================ Class prediction ================
if tied_target_embeddings:
class_weights = tf.transpose(targets_embeddings)
else:
initializer_obj = get_initializer(initializer)
class_weights = tf.get_variable(
'class_weights',
initializer=initializer_obj([state_size, vocab_size + 1]),
regularizer=regularizer
)
logging.debug(' class_weights: {0}'.format(class_weights))
class_biases = tf.get_variable('class_biases', [vocab_size + 1])
logging.debug(' class_biases: {0}'.format(class_biases))
projection_layer = Projection(class_weights, class_biases)
# ================ RNN ================
initial_state = encoder_outputs
with tf.variable_scope('rnn_cells') as vs:
# Cell
cell_fun = get_cell_fun(cell_type)
if num_layers == 1:
cell = cell_fun(state_size)
if cell_type.startswith('lstm'):
initial_state = LSTMStateTuple(c=initial_state,
h=initial_state)
elif num_layers > 1:
cell = MultiRNNCell(
[cell_fun(state_size) for _ in range(num_layers)],
state_is_tuple=True)
if cell_type.startswith('lstm'):
initial_state = LSTMStateTuple(c=initial_state,
h=initial_state)
initial_state = tuple([initial_state] * num_layers)
else:
raise ValueError('num_layers in recurrent decoser: {}. '
'Number of layers in a recurrenct decoder cannot be <= 0'.format(
num_layers))
# Attention
if attention_mechanism is not None:
if attention_mechanism == 'bahdanau':
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(
num_units=state_size, memory=encoder_outputs,
memory_sequence_length=sequence_length_3D(
encoder_outputs))
elif attention_mechanism == 'luong':
attention_mechanism = tf.contrib.seq2seq.LuongAttention(
num_units=state_size, memory=encoder_outputs,
memory_sequence_length=sequence_length_3D(
encoder_outputs))
else:
raise ValueError(
'Attention mechanism {} not supported'.format(
attention_mechanism))
cell = tf.contrib.seq2seq.AttentionWrapper(
cell, attention_mechanism, attention_layer_size=state_size)
initial_state = cell.zero_state(dtype=tf.float32,
batch_size=batch_size)
for v in tf.global_variables():
if v.name.startswith(vs.name):
logging.debug(' {}: {}'.format(v.name, v))
# ================ Decoding ================
def decode(initial_state, cell, helper, beam_width=1,
projection_layer=None):
# The decoder itself
if beam_width > 1:
# Tile inputs for beam search decoder
beam_initial_state = tf.contrib.seq2seq.tile_batch(
initial_state, beam_width)
decoder = tf.contrib.seq2seq.BeamSearchDecoder(
cell=cell,
embedding=targets_embeddings,
start_tokens=start_tokens,
end_token=END_SYMBOL,
initial_state=beam_initial_state,
beam_width=beam_width,
output_layer=projection_layer)
else:
decoder = BasicDecoder(
cell=cell, helper=helper,
initial_state=initial_state,
output_layer=projection_layer)
# The decoding operation
outputs = tf.contrib.seq2seq.dynamic_decode(
decoder=decoder,
output_time_major=False,
impute_finished=False if beam_width > 1 else True,
maximum_iterations=max_sequence_length
)
return outputs
# ================ Decoding helpers ================
if is_timeseries:
train_helper = TimeseriesTrainingHelper(
inputs=targets_embedded,
sequence_length=targets_sequence_length_with_eos)
final_outputs_pred, final_state_pred, final_sequence_lengths_pred = decode(
initial_state,
cell,
train_helper,
projection_layer=projection_layer)
eval_logits = final_outputs_pred.rnn_output
train_logits = final_outputs_pred.projection_input
predictions_sequence = tf.reshape(eval_logits, [batch_size, -1])
predictions_sequence_length_with_eos = final_sequence_lengths_pred
else:
train_helper = tf.contrib.seq2seq.TrainingHelper(
inputs=targets_embedded,
sequence_length=targets_sequence_length_with_eos)
final_outputs_train, final_state_train, final_sequence_lengths_train, = decode(
initial_state,
cell,
train_helper,
projection_layer=projection_layer)
eval_logits = final_outputs_train.rnn_output
train_logits = final_outputs_train.projection_input
# train_predictions = final_outputs_train.sample_id
pred_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(
embedding=targets_embeddings,
start_tokens=start_tokens,
end_token=END_SYMBOL)
final_outputs_pred, final_state_pred, final_sequence_lengths_pred = decode(
initial_state,
cell,
pred_helper,
beam_width,
projection_layer=projection_layer)
if beam_width > 1:
predictions_sequence = final_outputs_pred.beam_search_decoder_output.predicted_ids[
:, :, 0]
# final_outputs_pred..predicted_ids[:,:,0] would work too, but it contains -1s for padding
predictions_sequence_scores = final_outputs_pred.beam_search_decoder_output.scores[
:, :, 0]
predictions_sequence_length_with_eos = final_sequence_lengths_pred[
:, 0]
else:
predictions_sequence = final_outputs_pred.sample_id
predictions_sequence_scores = final_outputs_pred.rnn_output
predictions_sequence_length_with_eos = final_sequence_lengths_pred
logging.debug(' train_logits: {0}'.format(train_logits))
logging.debug(' eval_logits: {0}'.format(eval_logits))
logging.debug(' predictions_sequence: {0}'.format(predictions_sequence))
logging.debug(' predictions_sequence_scores: {0}'.format(
predictions_sequence_scores))
return predictions_sequence, predictions_sequence_scores, predictions_sequence_length_with_eos, \
targets_sequence_length_with_eos, eval_logits, train_logits, class_weights, class_biases
| 45.805383 | 153 | 0.557991 | [
"Apache-2.0"
] | rajputakhil/ludwig | ludwig/models/modules/recurrent_modules.py | 22,124 | Python |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RCheckmate(RPackage):
"""Tests and assertions to perform frequent argument checks.
A substantial part of the package was written in C to
minimize any worries about execution time overhead."""
homepage = "https://cloud.r-project.org/package=checkmate"
url = "https://cloud.r-project.org/src/contrib/checkmate_1.8.4.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/checkmate"
version('1.9.4', sha256='faa25754b757fe483b876f5d07b73f76f69a1baa971420892fadec4af4bbad21')
version('1.8.4', sha256='6f948883e5a885a1c409d997f0c782e754a549227ec3c8eb18318deceb38f8f6')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
| 41.434783 | 95 | 0.735572 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 0t1s1/spack | var/spack/repos/builtin/packages/r-checkmate/package.py | 953 | Python |
import os
import torch
import os
import random
from torch.nn import(
Module,Linear,LayerNorm
)
import math
from .AutoEncoder import Encoder
class DeltaT(Module):
def __init__(self):
super().__init__()
self.reset_seed()
self.elem = math.prod(Encoder().output_size)
self.input_size = (1,self.elem)
self.output_size = (1,1)
## Model layers
self.dense1 = Linear(self.elem,512)
self.norm1= LayerNorm(512)
self.dense2 = Linear(512,256)
self.norm2 = LayerNorm(256)
self.dense3 = Linear(256,1)
def forward(self,x1,x2):
#x1,x2 = x1.unsqueeze(1),x2.unsqueeze(1)
#x = torch.cat([x1,x2],dim=1)
x = x1 - x2
x = torch.relu(self.norm1(self.dense1(x)))
x = x.view(x.size(0),-1)
x = torch.relu(self.norm2(self.dense2(x)))
x = torch.relu(self.dense3(x))
return x
def reset_seed(self,seed=0):
os.environ['PYTHONHASHSEED'] = '0'
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
if __name__ == '__main__':
from torchsummaryX import summary
model = DeltaT()
dummy = torch.randn(model.input_size)
print(summary(model,dummy,dummy)) | 27.26087 | 52 | 0.605263 | [
"MIT"
] | Geson-anko/JARVIS3 | _Sensation0/DeltaTime.py | 1,254 | Python |
# Time: O(k * log(min(n, m, k))), where n is the size of num1, and m is the size of num2.
# Space: O(min(n, m, k))
# You are given two integer arrays nums1
# and nums2 sorted in ascending order and an integer k.
#
# Define a pair (u,v) which consists of one element
# from the first array and one element from the second array.
#
# Find the k pairs (u1,v1),(u2,v2) ...(uk,vk) with the smallest sums.
#
# Example 1:
# Given nums1 = [1,7,11], nums2 = [2,4,6], k = 3
#
# Return: [1,2],[1,4],[1,6]
#
# The first 3 pairs are returned from the sequence:
# [1,2],[1,4],[1,6],[7,2],[7,4],[11,2],[7,6],[11,4],[11,6]
# Example 2:
# Given nums1 = [1,1,2], nums2 = [1,2,3], k = 2
#
# Return: [1,1],[1,1]
#
# The first 2 pairs are returned from the sequence:
# [1,1],[1,1],[1,2],[2,1],[1,2],[2,2],[1,3],[1,3],[2,3]
# Example 3:
# Given nums1 = [1,2], nums2 = [3], k = 3
#
# Return: [1,3],[2,3]
#
# All possible pairs are returned from the sequence:
# [1,3],[2,3]
from heapq import heappush, heappop
class Solution(object):
def kSmallestPairs(self, nums1, nums2, k):
"""
:type nums1: List[int]
:type nums2: List[int]
:type k: int
:rtype: List[List[int]]
"""
pairs = []
if len(nums1) > len(nums2):
tmp = self.kSmallestPairs(nums2, nums1, k)
for pair in tmp:
pairs.append([pair[1], pair[0]])
return pairs
min_heap = []
def push(i, j):
if i < len(nums1) and j < len(nums2):
heappush(min_heap, [nums1[i] + nums2[j], i, j])
push(0, 0)
while min_heap and len(pairs) < k:
_, i, j = heappop(min_heap)
pairs.append([nums1[i], nums2[j]])
push(i, j + 1)
if j == 0:
push(i + 1, 0) # at most queue min(n, m) space
return pairs
# time: O(mn * log k)
# space: O(k)
from heapq import nsmallest
from itertools import product
class Solution2(object):
def kSmallestPairs(self, nums1, nums2, k):
"""
:type nums1: List[int]
:type nums2: List[int]
:type k: int
:rtype: List[List[int]]
"""
return nsmallest(k, product(nums1, nums2), key=sum)
| 27.54321 | 90 | 0.53922 | [
"MIT"
] | RideGreg/LeetCode | Python/find-k-pairs-with-smallest-sums.py | 2,231 | Python |
"""
Gets concordance for keywords and groups by word.
"""
from defoe import query_utils
from defoe.alto.query_utils import get_page_matches
def do_query(archives, config_file=None, logger=None, context=None):
"""
Gets concordance for keywords and groups by word.
config_file must be the path to a configuration file with a list
of the keywords to search for, one per line.
Both keywords and words in documents are normalized, by removing
all non-'a-z|A-Z' characters.
Returns result of form:
{
<WORD>:
[
{
"title": <TITLE>,
"place": <PLACE>,
"publisher": <PUBLISHER>,
"page_number": <PAGE_NUMBER>,
"content": <PAGE_CONTENT>,
"year": <YEAR>,
"document_id": <DOCUMENT_ID>,
"filename": <FILENAME>
},
...
],
<WORD>:
...
}
:param archives: RDD of defoe.alto.archive.Archive
:type archives: pyspark.rdd.PipelinedRDD
:param config_file: query configuration file
:type config_file: str or unicode
:param logger: logger (unused)
:type logger: py4j.java_gateway.JavaObject
:return: information on documents in which keywords occur grouped
by word
:rtype: dict
"""
keywords = query_utils.get_normalized_keywords(config_file)
# [document, ...]
documents = archives.flatMap(
lambda archive: [document for document in list(archive)]
)
# [(year, document, page, word), ...]
filtered_words = documents.flatMap(
lambda document: get_page_matches(document, keywords)
)
# [(year, document, page, word), ...]
# =>
# [(word, {"title": title, ...}), ...]
matching_docs = filtered_words.map(
lambda year_document_page_word: (
year_document_page_word[3],
{
"title": year_document_page_word[1].title,
"place": year_document_page_word[1].place,
"publisher": year_document_page_word[1].publisher,
"page_number": year_document_page_word[2].code,
"content": year_document_page_word[2].content,
"year": year_document_page_word[0],
"document_id": year_document_page_word[1].code,
"filename": year_document_page_word[1].archive.filename,
},
)
)
# [(word, {"title": title, ...}), ...]
# =>
# [(word, [{"title": title, ...], {...}), ...)]
result = (
matching_docs.groupByKey()
.map(lambda year_context: (year_context[0], list(year_context[1])))
.collect()
)
return result
| 31 | 75 | 0.547686 | [
"MIT"
] | kallewesterling/defoe | defoe/alto/queries/keyword_concordance_by_word.py | 2,852 | Python |
"""
Demonstrate differences between __str__() and __reper__().
"""
class neither:
pass
class stronly:
def __str__(self):
return "STR"
class repronly:
def __repr__(self):
return "REPR"
class both(stronly, repronly):
pass
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def __str__(self):
return self.name
def __repr__(self):
return "Person({0.name!r}, {0.age!r})".format(self) | 18.961538 | 59 | 0.600406 | [
"MIT"
] | ceeblet/OST_PythonCertificationTrack | Python3/Python3_Lesson09/src/reprmagic.py | 493 | Python |
#!/usr/bin/env python3
"""
"""
import socket
device_ca_server_prefix = f'{socket.gethostname()}_dio_controller:'
from caproto.threading.client import Context
ctx = Context()
ca_name = device_ca_server_prefix
pv_names = ['dio',
'bit0_indicator',
'bit0',
'bit0_enable',
'bit1_indicator',
'bit1',
'bit1_enable',
'bit2_indicator',
'bit2',
'bit2_enable',
'bit3_indicator',
'bit3',
'bit3_enable']
pvs = {}
for item in pv_names:
pvs[item], = ctx.get_pvs(f'{ca_name}{item}',)
if __name__ == '__main__':
pass
| 23.677419 | 68 | 0.491826 | [
"BSD-3-Clause"
] | vstadnytskyi/icarus-nmr | icarus_nmr/scripts/digital_controller_terminal_client.py | 734 | Python |
from flask import Flask, render_template, request, redirect
from flask import render_template
app = Flask(__name__)
@app.route('/hello/')
@app.route('/hello/<name>')
def hello(name=None):
return render_template('hello.html', name=name)
from flask import Flask,request,render_template,redirect
# 绑定访问地址127.0.0.1:5000/user
@app.route("/user", methods=['GET', 'POST'])
def login():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
if username == "user" and password == "password":
return redirect("http://www.baidu.com")
else:
message = "Failed Login"
return render_template('login.html', message=message)
return render_template('login.html')
if __name__ == '__main__':
app.run()
| 24.235294 | 65 | 0.654126 | [
"MIT"
] | archfool/NLP | demo_flask.py | 836 | Python |
#
# io_fits.py -- Module wrapper for loading FITS files.
#
# Eric Jeschke ([email protected])
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
"""
There are two possible choices for a python FITS file reading package
compatible with Ginga: astropy/pyfits and fitsio. Both are based on
the CFITSIO library, although it seems that astropy's version has
changed quite a bit from the original, while fitsio is still tracking
the current version.
To force the use of one, do:
from ginga.util import io_fits
io_fits.use('package')
(replace 'package' with one of {'astropy', 'fitsio'}) before you load
any images. Otherwise Ginga will try to pick one for you.
"""
import numpy
fits_configured = False
fitsLoaderClass = None
have_pyfits = False
have_fitsio = False
class FITSError(Exception):
pass
def use(fitspkg, raise_err=True):
global fits_configured, fitsLoaderClass, \
have_pyfits, pyfits, \
have_fitsio, fitsio
if fitspkg == 'astropy':
try:
from astropy.io import fits as pyfits
have_pyfits = True
fitsLoaderClass = PyFitsFileHandler
return True
except ImportError:
try:
# maybe they have a standalone version of pyfits?
import pyfits
have_pyfits = True
fitsLoaderClass = PyFitsFileHandler
return True
except ImportError as e:
if raise_err:
raise
return False
elif fitspkg == 'fitsio':
try:
import fitsio
have_fitsio = True
fitsLoaderClass = FitsioFileHandler
return True
except ImportError as e:
if raise_err:
raise
return False
return False
class BaseFitsFileHandler(object):
pass
class PyFitsFileHandler(BaseFitsFileHandler):
def __init__(self, logger):
super(PyFitsFileHandler, self).__init__()
if not have_pyfits:
raise FITSError("Need astropy or pyfits module installed to use this file handler")
self.logger = logger
self.kind = 'pyfits'
def fromHDU(self, hdu, ahdr):
header = hdu.header
if hasattr(header, 'cards'):
#newer astropy.io.fits don't have ascardlist
for card in header.cards:
bnch = ahdr.__setitem__(card.key, card.value)
bnch.comment = card.comment
else:
for card in header.ascardlist():
bnch = ahdr.__setitem__(card.key, card.value)
bnch.comment = card.comment
def load_hdu(self, hdu, ahdr, fobj=None, naxispath=None):
data = hdu.data
if len(data.shape) < 2:
# Expand 1D arrays into 1xN array
data = data.reshape((1, data.shape[0]))
else:
# Drill down to 2D data slice
if not naxispath:
naxispath = ([0] * (len(data.shape)-2))
for idx in naxispath:
data = data[idx]
self.fromHDU(hdu, ahdr)
return (data, naxispath)
def load_file(self, filespec, ahdr, numhdu=None, naxispath=None):
filepath = get_path(filespec)
self.logger.info("Loading file '%s' ..." % (filepath))
fits_f = pyfits.open(filepath, 'readonly')
# this seems to be necessary now for some fits files...
try:
fits_f.verify('fix')
except Exception, e:
raise FITSError("Error loading fits file '%s': %s" % (
fitspath, str(e)))
if numhdu == None:
found_valid_hdu = False
for i in range(len(fits_f)):
hdu = fits_f[i]
if hdu.data == None:
# compressed FITS file or non-pixel data hdu?
continue
if not isinstance(hdu.data, numpy.ndarray):
# We need to open a numpy array
continue
#print "data type is %s" % hdu.data.dtype.kind
# Looks good, let's try it
found_valid_hdu = True
break
if not found_valid_hdu:
raise FITSError("No data HDU found that Ginga can open in '%s'" % (
filepath))
else:
hdu = fits_f[numhdu]
data, naxispath = self.load_hdu(hdu, ahdr, fobj=fits_f,
naxispath=naxispath)
fits_f.close()
return (data, naxispath)
def create_fits(self, data, header):
fits_f = pyfits.HDUList()
hdu = pyfits.PrimaryHDU()
hdu.data = data
for kwd in header.keys():
card = header.get_card(kwd)
hdu.header.update(card.key, card.value, comment=card.comment)
fits_f.append(hdu)
return fits_f
def write_fits(self, path, data, header, **kwdargs):
fits_f = self.create_fits(data, header)
fits_f.writeto(path, **kwdargs)
fits_f.close()
def save_as_file(self, path, data, header, **kwdargs):
self.write_fits(filepath, data, header, **kwdargs)
class FitsioFileHandler(BaseFitsFileHandler):
def __init__(self, logger):
super(FitsioFileHandler, self).__init__()
if not have_fitsio:
raise FITSError("Need fitsio module installed to use this file handler")
self.logger = logger
self.kind = 'fitsio'
def fromHDU(self, hdu, ahdr):
header = hdu.read_header()
for d in header.records():
bnch = ahdr.__setitem__(d['name'], d['value'])
bnch.comment = d['comment']
def load_hdu(self, hdu, ahdr, fobj=None, naxispath=None):
data = hdu.read()
if len(data.shape) < 2:
# Expand 1D arrays into 1xN array
data = data.reshape((1, data.shape[0]))
else:
# Drill down to 2D data slice
if not naxispath:
naxispath = ([0] * (len(data.shape)-2))
for idx in naxispath:
data = data[idx]
self.fromHDU(hdu, ahdr)
return (data, naxispath)
def load_file(self, filespec, ahdr, numhdu=None, naxispath=None):
filepath = get_path(filespec)
self.logger.info("Loading file '%s' ..." % (filepath))
fits_f = fitsio.FITS(filepath)
if numhdu == None:
found_valid_hdu = False
for i in range(len(fits_f)):
hdu = fits_f[i]
info = hdu.get_info()
if not ('ndims' in info) or (info['ndims'] == 0):
# compressed FITS file or non-pixel data hdu?
continue
#print "data type is %s" % hdu.data.dtype.kind
# Looks good, let's try it
found_valid_hdu = True
break
if not found_valid_hdu:
raise FITSError("No data HDU found that Ginga can open in '%s'" % (
filepath))
else:
hdu = fits_f[numhdu]
data, naxispath = self.load_hdu(hdu, ahdr, fobj=fits_f,
naxispath=naxispath)
fits_f.close()
return (data, naxispath)
def create_fits(self, data, header):
fits_f = pyfits.HDUList()
hdu = pyfits.PrimaryHDU()
hdu.data = data
for kwd in header.keys():
card = header.get_card(kwd)
hdu.header.update(card.key, card.value, comment=card.comment)
fits_f.append(hdu)
return fits_f
def write_fits(self, path, data, header):
fits_f = fitsio.FITS(path, 'rw')
fits_f = self.create_fits(data, header)
fits_f.writeto(path, output_verify='fix')
fits_f.close()
def save_as_file(self, path, data, header, **kwdargs):
self.write_fits(filepath, data, header, **kwdargs)
def get_path(fileSpec):
path = fileSpec
if fileSpec.startswith('file://'):
path = fileSpec[7:]
# TODO: handle web references by fetching the file
return path
# default
fitsLoaderClass = PyFitsFileHandler
# try to use them in this order
# astropy is faster
for name in ('astropy', 'fitsio'):
if use(name, raise_err=True):
break
def get_fitsloader(kind=None, logger=None):
return fitsLoaderClass(logger)
#END
| 30.494662 | 95 | 0.566227 | [
"BSD-3-Clause"
] | Rbeaty88/ginga | ginga/util/io_fits.py | 8,569 | Python |
#!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: onyx_qos
author: "Anas Badaha (@anasb)"
short_description: Configures QoS
description:
- This module provides declarative management of Onyx QoS configuration
on Mellanox ONYX network devices.
notes:
- Tested on ONYX 3.6.8130
options:
interfaces:
description:
- list of interfaces name.
required: true
trust:
description:
- trust type.
choices: ['L2', 'L3', 'both']
default: L2
rewrite_pcp:
description:
- rewrite with type pcp.
choices: ['enabled', 'disabled']
default: disabled
rewrite_dscp:
description:
- rewrite with type dscp.
choices: ['enabled', 'disabled']
default: disabled
'''
EXAMPLES = """
- name: Configure QoS
onyx_QoS:
interfaces:
- Mpo7
- Mpo7
trust: L3
rewrite_pcp: disabled
rewrite_dscp: enabled
- name: Configure QoS
onyx_QoS:
interfaces:
- Eth1/1
- Eth1/2
trust: both
rewrite_pcp: disabled
rewrite_dscp: enabled
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device.
returned: always
type: list
sample:
- interface ethernet 1/16 qos trust L3
- interface mlag-port-channel 7 qos trust L3
- interface port-channel 1 qos trust L3
- interface mlag-port-channel 7 qos trust L2
- interface mlag-port-channel 7 qos rewrite dscp
- interface ethernet 1/16 qos rewrite pcp
- interface ethernet 1/1 no qos rewrite pcp
"""
import re
from ansible.module_utils.six import iteritems
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.mellanox.onyx.plugins.module_utils.network.onyx.onyx import show_cmd
from ansible_collections.mellanox.onyx.plugins.module_utils.network.onyx.onyx import BaseOnyxModule
class OnyxQosModule(BaseOnyxModule):
TRUST_CMD = "interface {0} {1} qos trust {2}"
NO_REWRITE_PCP_CMD = "interface {0} {1} no qos rewrite pcp"
NO_REWRITE_DSCP_CMD = "interface {0} {1} no qos rewrite dscp"
REWRITE_PCP_CMD = "interface {0} {1} qos rewrite pcp"
REWRITE_DSCP_CMD = "interface {0} {1} qos rewrite dscp"
REWRITE_PCP = "pcp"
REWRITE_DSCP = "dscp"
IF_ETH_REGEX = re.compile(r"^Eth(\d+\/\d+|Eth\d+\/\d+\d+)$")
IF_PO_REGEX = re.compile(r"^Po(\d+)$")
MLAG_NAME_REGEX = re.compile(r"^Mpo(\d+)$")
IF_TYPE_ETH = "ethernet"
PORT_CHANNEL = "port-channel"
MLAG_PORT_CHANNEL = "mlag-port-channel"
IF_TYPE_MAP = {
IF_TYPE_ETH: IF_ETH_REGEX,
PORT_CHANNEL: IF_PO_REGEX,
MLAG_PORT_CHANNEL: MLAG_NAME_REGEX
}
def init_module(self):
""" initialize module
"""
element_spec = dict(
interfaces=dict(type='list', required=True),
trust=dict(choices=['L2', 'L3', 'both'], default='L2'),
rewrite_pcp=dict(choices=['enabled', 'disabled'], default='disabled'),
rewrite_dscp=dict(choices=['enabled', 'disabled'], default='disabled')
)
argument_spec = dict()
argument_spec.update(element_spec)
self._module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
def get_required_config(self):
module_params = self._module.params
self._required_config = dict(module_params)
self.validate_param_values(self._required_config)
def _get_interface_type(self, if_name):
if_type = None
if_id = None
for interface_type, interface_regex in iteritems(self.IF_TYPE_MAP):
match = interface_regex.match(if_name)
if match:
if_type = interface_type
if_id = match.group(1)
break
return if_type, if_id
def _set_interface_qos_config(self, interface_qos_config, interface, if_type, if_id):
interface_qos_config = interface_qos_config[0].get(interface)
trust = interface_qos_config[0].get("Trust mode")
rewrite_dscp = interface_qos_config[0].get("DSCP rewrite")
rewrite_pcp = interface_qos_config[0].get("PCP,DEI rewrite")
self._current_config[interface] = dict(trust=trust, rewrite_dscp=rewrite_dscp,
rewrite_pcp=rewrite_pcp, if_type=if_type, if_id=if_id)
def _show_interface_qos(self, if_type, interface):
cmd = "show qos interface {0} {1}".format(if_type, interface)
return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False)
def load_current_config(self):
self._current_config = dict()
for interface in self._required_config.get("interfaces"):
if_type, if_id = self._get_interface_type(interface)
if not if_id:
self._module.fail_json(
msg='unsupported interface: {0}'.format(interface))
interface_qos_config = self._show_interface_qos(if_type, if_id)
if interface_qos_config is not None:
self._set_interface_qos_config(interface_qos_config, interface, if_type, if_id)
else:
self._module.fail_json(
msg='Interface {0} does not exist on switch'.format(interface))
def generate_commands(self):
trust = self._required_config.get("trust")
rewrite_pcp = self._required_config.get("rewrite_pcp")
rewrite_dscp = self._required_config.get("rewrite_dscp")
for interface in self._required_config.get("interfaces"):
ignored1, ignored2, current_trust, if_type, if_id = self._get_current_rewrite_config(interface)
self._add_interface_trust_cmds(if_type, if_id, interface, trust, current_trust)
self._add_interface_rewrite_cmds(if_type, if_id, interface,
rewrite_pcp, rewrite_dscp)
def _get_current_rewrite_config(self, interface):
current_interface_qos_config = self._current_config.get(interface)
current_rewrite_pcp = current_interface_qos_config.get('rewrite_pcp')
current_rewrite_dscp = current_interface_qos_config.get('rewrite_dscp')
if_type = current_interface_qos_config.get("if_type")
if_id = current_interface_qos_config.get("if_id")
current_trust = current_interface_qos_config.get('trust')
return current_rewrite_pcp, current_rewrite_dscp, current_trust, if_type, if_id
def _add_interface_trust_cmds(self, if_type, if_id, interface, trust, current_trust):
current_rewrite_pcp, current_rewrite_dscp, ignored1, ignored2, ignored3 = self._get_current_rewrite_config(
interface)
if trust == "L3" and trust != current_trust:
self._add_no_rewrite_cmd(if_type, if_id, interface, self.REWRITE_DSCP, current_rewrite_dscp)
self._commands.append(self.TRUST_CMD.format(if_type, if_id, trust))
elif trust == "L2" and trust != current_trust:
self._add_no_rewrite_cmd(if_type, if_id, interface, self.REWRITE_PCP, current_rewrite_pcp)
self._commands.append(self.TRUST_CMD.format(if_type, if_id, trust))
elif trust == "both" and trust != current_trust:
self._add_no_rewrite_cmd(if_type, if_id, interface, self.REWRITE_DSCP, current_rewrite_dscp)
self._add_no_rewrite_cmd(if_type, if_id, interface, self.REWRITE_PCP, current_rewrite_pcp)
self._commands.append(self.TRUST_CMD.format(if_type, if_id, trust))
def _add_interface_rewrite_cmds(self, if_type, if_id, interface, rewrite_pcp, rewrite_dscp):
current_rewrite_pcp, current_rewrite_dscp, ignored1, ignored2, ignored3 = self._get_current_rewrite_config(
interface)
if rewrite_pcp == "enabled" and rewrite_pcp != current_rewrite_pcp:
self._commands.append(self.REWRITE_PCP_CMD.format(if_type, if_id))
elif rewrite_pcp == "disabled" and rewrite_pcp != current_rewrite_pcp:
self._commands.append(self.NO_REWRITE_PCP_CMD.format(if_type, if_id))
if rewrite_dscp == "enabled" and rewrite_dscp != current_rewrite_dscp:
self._commands.append(self.REWRITE_DSCP_CMD.format(if_type, if_id))
elif rewrite_dscp == "disabled" and rewrite_dscp != current_rewrite_dscp:
self._commands.append(self.NO_REWRITE_DSCP_CMD.format(if_type, if_id))
def _add_no_rewrite_cmd(self, if_type, if_id, interface, rewrite_type, current_rewrite):
if rewrite_type == self.REWRITE_PCP and current_rewrite == "enabled":
self._commands.append(self.NO_REWRITE_PCP_CMD.format(if_type, if_id))
self._current_config[interface]["rewrite_pcp"] = "disabled"
elif rewrite_type == self.REWRITE_DSCP and current_rewrite == "enabled":
self._commands.append(self.NO_REWRITE_DSCP_CMD.format(if_type, if_id))
self._current_config[interface]["rewrite_dscp"] = "disabled"
def main():
""" main entry point for module execution
"""
OnyxQosModule.main()
if __name__ == '__main__':
main()
| 39.844828 | 115 | 0.682172 | [
"MIT"
] | DiptoChakrabarty/nexus | venv/lib/python3.7/site-packages/ansible_collections/mellanox/onyx/plugins/modules/onyx_qos.py | 9,244 | Python |
from abstractclasses import solver, solver_model
"""
The Nash equilibrium solver takes a payoff matrix from game theory,
then it solves for a nash equilibrium, if one exists.
"""
# ————————————————————————————————————————————————
# NASH EQUILIBRIUM SOLVER CLASS
# ————————————————————————————————————————————————
class nash_equilibrium_solver(solver):
def format_payoff_matrix(
self,
payoff_matrix: list,
player_1_strategies: list,
player_2_strategies: list,
) -> str:
"""
This is a helper function that turns a payoff matrix and available
strategies into ASCII art of a payoff matrix
"""
ret = "\t Player 1\n"
ret += "\t " + player_1_strategies[0] + " "
for j in range(1, len(payoff_matrix[0])):
ret += player_1_strategies[j] + " "
ret += "\n"
ret += "\t +------------+"
for j in range(1, len(payoff_matrix[0])):
ret += "------------+"
ret += "\n"
ret += "Player 2 " + str(player_2_strategies[0]) + " |"
for j in range(len(payoff_matrix[0])):
ret += (
"{:>5g}, {:<5g}".format(
payoff_matrix[0][j][0], payoff_matrix[0][j][1]
)
+ "|"
)
ret += "\n"
for i in range(1, len(payoff_matrix)):
ret += "\t +------------+"
for j in range(1, len(payoff_matrix[0])):
ret += "------------+"
ret += "\n"
ret += (
"\t "
+ player_2_strategies[i]
+ " |"
+ "{:>5g}, {:<5g}".format(
payoff_matrix[i][0][0], payoff_matrix[i][0][1]
)
+ "|"
)
for j in range(1, len(payoff_matrix[i])):
ret += (
"{:>5g}, {:<5g}".format(
payoff_matrix[i][j][0], payoff_matrix[i][j][1]
)
+ "|"
)
ret += "\n"
ret += "\t +------------+"
for j in range(1, len(payoff_matrix[0])):
ret += "------------+"
ret += "\n"
return ret
def prompt_inputs(self) -> None:
player_1_strategies = [
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
]
player_2_strategies = [
"N",
"O",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
]
num_strategies_1 = self.prompt_integer(
"Please enter the number of strategies for player 1 (2-13) > ",
2,
13,
)
num_strategies_2 = self.prompt_integer(
"Please enter the number of strategies for player 2 (2-13) > ",
2,
13,
)
player_1_strategies = player_1_strategies[:num_strategies_1]
player_2_strategies = player_2_strategies[:num_strategies_2]
payoff_matrix = [
[(0, 0) for i in range(num_strategies_1)]
for j in range(num_strategies_2)
]
print(
self.format_payoff_matrix(
payoff_matrix, player_1_strategies, player_2_strategies
)
)
for i in range(num_strategies_2):
for j in range(num_strategies_1):
player_1_payoff = self.prompt_float(
"Please enter the payoff value for Player "
+ str(1)
+ " in cell "
+ str(player_1_strategies[j])
+ ", "
+ str(player_2_strategies[i])
+ " of the payoff matrix > "
)
player_2_payoff = self.prompt_float(
"Please enter the payoff value for Player "
+ str(2)
+ " in cell "
+ str(player_1_strategies[j])
+ ", "
+ str(player_2_strategies[i])
+ " of the payoff matrix > "
)
payoff_matrix[i][j] = (player_2_payoff, player_1_payoff)
print(
self.format_payoff_matrix(
payoff_matrix, player_1_strategies, player_2_strategies
)
)
# Set inputs
self.inputs["payoff_matrix"] = payoff_matrix
self.inputs["player_1_strategies"] = player_1_strategies
self.inputs["player_2_strategies"] = player_2_strategies
self.inputs["format_payoff_matrix"] = self.format_payoff_matrix
# ————————————————————————————————————————————————
# NASH EQUILIBRIUM MODEL CLASS
# ————————————————————————————————————————————————
class nash_equilibrium_model(solver_model):
def __init__(self, **inputs) -> None:
super().__init__(**inputs)
self.format_payoff_matrix = self.inputs["format_payoff_matrix"]
def solve(self) -> None:
payoff_matrix = self.inputs["payoff_matrix"]
player_1_strategies = self.inputs["player_1_strategies"]
player_2_strategies = self.inputs["player_2_strategies"]
self.ans, self.work = self.nash(
payoff_matrix, player_1_strategies, player_2_strategies
)
def nash(
self,
payoff_matrix: list,
player_1_strategies: list,
player_2_strategies: list,
) -> tuple:
"""
Takes a payoff matrix from game theory and the available strategies for
both players. Solves for the Nash equilibrium
"""
work = ""
no_dominant_exists = False
while not no_dominant_exists and not (
len(player_1_strategies) == 1 and len(player_2_strategies) == 1
):
is_break = False
for i in range(len(payoff_matrix)):
for j in range(len(payoff_matrix)):
if (
i != j
and i < len(payoff_matrix)
and j < len(payoff_matrix)
):
is_greater = False
for k in range(len(payoff_matrix[0])):
if float(payoff_matrix[i][k][0]) >= float(
payoff_matrix[j][k][0]
):
is_greater = True
if is_greater:
break
if not is_greater:
work += (
"Player 2's Strategy "
+ str(player_2_strategies[j])
+ " dominates strategy "
+ str(player_2_strategies[i])
+ "\n"
)
payoff_matrix.pop(i)
player_2_strategies.pop(i)
is_break = True
work += self.format_payoff_matrix(
payoff_matrix,
player_1_strategies,
player_2_strategies,
)
work += "\n"
break
if is_break:
break
if not is_break:
no_dominant_exists = True
else:
no_dominant_exists = False
is_break = False
for i in range(len(payoff_matrix[0])):
for j in range(len(payoff_matrix[0])):
if (
i != j
and i < len(payoff_matrix[0])
and j < len(payoff_matrix[0])
):
is_greater = False
for k in range(len(payoff_matrix)):
if float(payoff_matrix[k][i][1]) >= float(
payoff_matrix[k][j][1]
):
is_greater = True
if is_greater:
break
if not is_greater:
work += (
"Player 1's Strategy "
+ str(player_1_strategies[j])
+ " dominates strategy "
+ str(player_1_strategies[i])
+ "\n"
)
for index in range(len(payoff_matrix)):
payoff_matrix[index].pop(i)
player_1_strategies.pop(i)
work += self.format_payoff_matrix(
payoff_matrix,
player_1_strategies,
player_2_strategies,
)
work += "\n"
is_break = True
break
if not is_break:
no_dominant_exists = True
else:
no_dominant_exists = False
if is_break:
no_dominant_exists = False
if not (
len(player_1_strategies) == 1 and len(player_2_strategies) == 1
):
ans = (
"There is no Nash equilibrium, since at least one player has"
+ " multiple viable strategies.\n"
)
work += ans
work += self.format_payoff_matrix(
payoff_matrix, player_1_strategies, player_2_strategies
)
else:
ans = (
"This is the Nash equilibrium of the entered payoff matrix,"
+ " calculated by eliminating dominanted strategies.\n"
)
ans += self.format_payoff_matrix(
payoff_matrix, player_1_strategies, player_2_strategies
)
work += ans
return ans, work
| 35.006623 | 79 | 0.404748 | [
"MIT"
] | benedictvs/FOCS-Calculator | modules/nashequilibrium.py | 10,956 | Python |
"""Describe overall framework configuration."""
import os
import pytest
from kubernetes.config.kube_config import KUBE_CONFIG_DEFAULT_LOCATION
from settings import (
DEFAULT_IMAGE,
DEFAULT_PULL_POLICY,
DEFAULT_IC_TYPE,
DEFAULT_SERVICE,
DEFAULT_DEPLOYMENT_TYPE,
NUM_REPLICAS,
BATCH_START,
BATCH_RESOURCES,
)
from suite.resources_utils import get_first_pod_name
def pytest_addoption(parser) -> None:
"""Get cli-arguments.
:param parser: pytest parser
:return:
"""
parser.addoption(
"--context",
action="store",
default="",
help="The context to use in the kubeconfig file.",
)
parser.addoption(
"--image",
action="store",
default=DEFAULT_IMAGE,
help="The Ingress Controller image.",
)
parser.addoption(
"--image-pull-policy",
action="store",
default=DEFAULT_PULL_POLICY,
help="The pull policy of the Ingress Controller image.",
)
parser.addoption(
"--deployment-type",
action="store",
default=DEFAULT_DEPLOYMENT_TYPE,
help="The type of the IC deployment: deployment or daemon-set.",
)
parser.addoption(
"--ic-type",
action="store",
default=DEFAULT_IC_TYPE,
help="The type of the Ingress Controller: nginx-ingress or nginx-ingress-plus.",
)
parser.addoption(
"--service",
action="store",
default=DEFAULT_SERVICE,
help="The type of the Ingress Controller service: nodeport or loadbalancer.",
)
parser.addoption(
"--replicas",
action="store",
default=NUM_REPLICAS,
help="Number of replica pods for type deployment",
)
parser.addoption(
"--node-ip",
action="store",
help="The public IP of a cluster node. Not required if you use the loadbalancer service (see --service argument).",
)
parser.addoption(
"--kubeconfig",
action="store",
default=os.path.expanduser(KUBE_CONFIG_DEFAULT_LOCATION),
help="An absolute path to a kubeconfig file.",
)
parser.addoption(
"--show-ic-logs",
action="store",
default="no",
help="Show IC logs in stdout on test failure",
)
parser.addoption(
"--batch-start",
action="store",
default=BATCH_START,
help="Run tests for pods restarts with multiple resources deployed (Ingress/VS): True/False",
)
parser.addoption(
"--batch-resources",
action="store",
default=BATCH_RESOURCES,
help="Number of VS/Ingress resources to deploy",
)
# import fixtures into pytest global namespace
pytest_plugins = ["suite.fixtures"]
def pytest_collection_modifyitems(config, items) -> None:
"""
Skip tests marked with '@pytest.mark.skip_for_nginx_oss' for Nginx OSS runs.
Skip tests marked with '@pytest.mark.appprotect' for non AP images.
:param config: pytest config
:param items: pytest collected test-items
:return:
"""
if config.getoption("--ic-type") == "nginx-ingress":
skip_for_nginx_oss = pytest.mark.skip(reason="Skip a test for Nginx OSS")
for item in items:
if "skip_for_nginx_oss" in item.keywords:
item.add_marker(skip_for_nginx_oss)
if config.getoption("--ic-type") == "nginx-plus-ingress":
skip_for_nginx_plus = pytest.mark.skip(reason="Skip a test for Nginx Plus")
for item in items:
if "skip_for_nginx_plus" in item.keywords:
item.add_marker(skip_for_nginx_plus)
if "-ap" not in config.getoption("--image"):
appprotect = pytest.mark.skip(reason="Skip AppProtect test in non-AP image")
for item in items:
if "appprotect" in item.keywords:
item.add_marker(appprotect)
if str(config.getoption("--batch-start")) != "True":
batch_start = pytest.mark.skip(reason="Skipping pod restart test with multiple resources")
for item in items:
if "batch_start" in item.keywords:
item.add_marker(batch_start)
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item) -> None:
"""
Print out IC Pod logs on test failure.
Only look at actual failing test calls, not setup/teardown.
Only show the logs if commandline argument `--show-ic-logs` is set to 'yes'
:param item:
:return:
"""
# execute all other hooks to obtain the report object
outcome = yield
rep = outcome.get_result()
# we only look at actual failing test calls, not setup/teardown
if (
rep.when == "call"
and rep.failed
and item.config.getoption("--show-ic-logs") == "yes"
):
pod_namespace = item.funcargs["ingress_controller_prerequisites"].namespace
pod_name = get_first_pod_name(item.funcargs["kube_apis"].v1, pod_namespace)
print("\n===================== IC Logs Start =====================")
print(
item.funcargs["kube_apis"].v1.read_namespaced_pod_log(
pod_name, pod_namespace
)
)
print("\n===================== IC Logs End =====================")
| 32.091463 | 123 | 0.620179 | [
"Apache-2.0"
] | 84flix/kubernetes-ingress | tests/conftest.py | 5,263 | Python |
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from dailymed.models import Set, Spl, InactiveIngredient
from dailymed.serializers import SplSerializer
import json
from pathlib import Path
SPL_URL = reverse('spl-list')
PRODUCT_URL = reverse('product-list')
PACKAGE_URL = reverse('package-list')
class PublicApiTest(TestCase):
"""Test public daily med API"""
def setUp(self):
self.client = APIClient()
"""Creates sample data for database"""
cwd = Path(__file__).parent.absolute()
with open(f'{cwd}/test.json', 'r') as f:
default = json.load(f)
for data in default['results']:
set_id = data.pop('set_id')
products_data = data.pop('products')
set_obj = Set.objects.create(id=set_id)
spl_obj = set_obj.spls.create(**data)
for product_data in products_data:
product_data.pop('name')
packages_data = product_data.pop('packages')
if 'inactive_ingredients' in product_data:
inactive_ingredients_data = product_data\
.pop('inactive_ingredients')
inactive_ingredients_list = []
for inactive_ingredient_data in inactive_ingredients_data:
try:
ingredient = InactiveIngredient.objects.get(
**inactive_ingredient_data
)
inactive_ingredients_list.append(ingredient)
except Exception:
ingredient = InactiveIngredient.objects.create(
**inactive_ingredient_data
)
inactive_ingredients_list.append(ingredient)
product_obj = spl_obj.products.create(**product_data)
product_obj.inactive_ingredients\
.add(*inactive_ingredients_list)
for package_data in packages_data:
product_obj.packages.create(**package_data)
def test_retrieve_spls(self):
"""Test retrieving spls"""
res = self.client.get(
SPL_URL,
format='json'
)
serializer = SplSerializer(Spl.objects.filter(), many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(serializer.data, res.data['results'])
def test_retrieve_spls_filter_by_set(self):
"""Test retrieving a spl by set filter"""
set_id = Set.objects.first()
res = self.client.get(
SPL_URL,
{'set_id': set_id.id},
format='json')
serializer = SplSerializer(
Spl.objects.filter(set__id=set_id.id), many=True
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(serializer.data, res.data['results'])
def test_retrieve_spls_filter_by_inactive_ing(self):
"""Test retrieving a spl by inactive ingredient filter"""
inactive_ing = 'alcohol'
res = self.client.get(
SPL_URL,
{'inactive_ingredient_name': inactive_ing},
format='json')
serializer = SplSerializer(
Spl.objects.filter(
products__inactive_ingredients__name__icontains=inactive_ing)
.distinct(),
many=True
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(serializer.data, res.data['results'])
def test_retrieve_spls_filter_by_schedule(self):
"""Test retrieving spls by schedule filter"""
schedule = 'CIV'
res = self.client.get(
SPL_URL,
{'schedule': schedule},
format='json')
serializer = SplSerializer(Spl.objects.filter(
products__schedule=schedule).distinct(),
many=True
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(serializer.data, res.data['results'])
def test_retrieve_spls_filter_by_drug_name(self):
"""Test retrieving spls by drug name filter"""
name = 'Ciprofloxacin'
res = self.client.get(
SPL_URL,
{'product_name': name},
format='json')
serializer = SplSerializer(Spl.objects.filter(
products__name=name).distinct(),
many=True
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(serializer.data, res.data['results'])
def test_retrieve_spls_filter_by_complex(self):
"""Test retrieving spls filtered by set & inactive ingredient"""
set_id = 'b88efb93-f1d1-4606-a669-6896f432a27f'
inactive_ing = 'alcohol'
res = self.client.get(
SPL_URL,
{'set_id': set_id,
'inactive_ingredient_name': inactive_ing},
format='json'
)
serializer = SplSerializer(
Spl.objects.filter(
products__inactive_ingredients__name__icontains=inactive_ing,
set__id=set_id)
.distinct(),
many=True
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data['results']), 1)
self.assertEqual(serializer.data, res.data['results'])
| 33.478788 | 78 | 0.589971 | [
"MIT"
] | coderxio/dailymed-api | api/dailymed/tests/test_api.py | 5,524 | Python |
import os
os.chdir("./export")
from reader.csv_mod import CsvReader
from reader.sarif_mod import SarifReader
from reader.server_mod import RestfulReader
from export.export import Exporter
def generate(args):
project_name = args.name
sarif_list = args.sarif
if sarif_list == None:
sarif_list = []
json_list = args.json
if json_list == None:
json_list = []
csv_list = args.csv
if csv_list == None:
csv_list = []
proj_data = []
sarif_reader = SarifReader()
for f in sarif_list:
sarif_reader.read(f)
sarif_data = sarif_reader.get_data()
proj_data.extend(sarif_data['data'])
csv_reader = CsvReader()
for f in csv_list:
csv_reader.read(f)
csv_data = csv_reader.get_data()
proj_data.extend(csv_data['data'])
restful_reader = RestfulReader()
for rid in json_list:
restful_reader.read(rid)
restful_data = restful_reader.get_data()
proj_data.extend(restful_data['data'])
reporter = Exporter()
reporter.setData(proj_data)
return reporter.build(project_name)
#r = SarifReader()
#r.read('/home/heersin/blackhole/codeql/result.sarif')
#print(os.getcwd())
#project_name = "socat"
#pdf_factory = Exporter()
#pdf_factory.setData(r.get_data())
#pdf_factory.build(project_name) | 23.157895 | 54 | 0.681818 | [
"MIT"
] | Heersin/codeql_packer | codql-report/generator.py | 1,320 | Python |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define the extention functions
import numpy as np
from ...fluid.data_feeder import check_dtype
from ...fluid.layer_helper import LayerHelper
from ...fluid.framework import Variable, in_dygraph_mode
from ...fluid.layers.tensor import assign
from ...fluid import core, dygraph_utils
from ...fluid.layers.layer_function_generator import templatedoc
from ...fluid.layers.sequence_lod import sequence_mask
def diag_embed(input, offset=0, dim1=-2, dim2=-1):
"""
This OP creates a tensor whose diagonals of certain 2D planes (specified by dim1 and dim2)
are filled by ``input``. By default, a 2D plane formed by the last two dimensions
of the returned tensor will be selected.
The argument ``offset`` determines which diagonal is generated:
- If offset = 0, it is the main diagonal.
- If offset > 0, it is above the main diagonal.
- If offset < 0, it is below the main diagonal.
Args:
input(Tensor|numpy.ndarray): The input tensor. Must be at least 1-dimensional. The input data type should be float32, float64, int32, int64.
offset(int, optional): Which diagonal to consider. Default: 0 (main diagonal).
dim1(int, optional): The first dimension with respect to which to take diagonal. Default: -2.
dim2(int, optional): The second dimension with respect to which to take diagonal. Default: -1.
Returns:
Tensor, the output data type is the same as input data type.
Examples:
.. code-block:: python
import paddle.nn.functional as F
import numpy as np
diag_embed = np.random.randn(2, 3).astype('float32')
# [[ 0.7545889 , -0.25074545, 0.5929117 ],
# [-0.6097662 , -0.01753256, 0.619769 ]]
data1 = F.diag_embed(diag_embed)
data1.numpy()
# [[[ 0.7545889 , 0. , 0. ],
# [ 0. , -0.25074545, 0. ],
# [ 0. , 0. , 0.5929117 ]],
# [[-0.6097662 , 0. , 0. ],
# [ 0. , -0.01753256, 0. ],
# [ 0. , 0. , 0.619769 ]]]
data2 = F.diag_embed(diag_embed, offset=-1, dim1=0, dim2=2)
data2.numpy()
# [[[ 0. , 0. , 0. , 0. ],
# [ 0.7545889 , 0. , 0. , 0. ],
# [ 0. , -0.25074545, 0. , 0. ],
# [ 0. , 0. , 0.5929117 , 0. ]],
#
# [[ 0. , 0. , 0. , 0. ],
# [-0.6097662 , 0. , 0. , 0. ],
# [ 0. , -0.01753256, 0. , 0. ],
# [ 0. , 0. , 0.619769 , 0. ]]]
data3 = F.diag_embed(diag_embed, offset=1, dim1=0, dim2=2)
data3.numpy()
# [[[ 0. , 0.7545889 , 0. , 0. ],
# [ 0. , -0.6097662 , 0. , 0. ]],
#
# [[ 0. , 0. , -0.25074545, 0. ],
# [ 0. , 0. , -0.01753256, 0. ]],
#
# [[ 0. , 0. , 0. , 0.5929117 ],
# [ 0. , 0. , 0. , 0.619769 ]],
#
# [[ 0. , 0. , 0. , 0. ],
# [ 0. , 0. , 0. , 0. ]]]
"""
inputs = {'Input': [input]}
attrs = {'offset': offset, 'dim1': dim1, 'dim2': dim2}
if not isinstance(input, Variable):
input = assign(input)
def __check_input(input, offset, dim1, dim2):
check_dtype(input.dtype, 'Input',
['int32', 'int64', 'float16', 'float32', 'float64'],
'diag_embed')
input_shape = list(input.shape)
assert len(input_shape) >= 1, \
"Input must be at least 1-dimensional, " \
"But received Input's dimensional: %s.\n" % \
len(input_shape)
assert np.abs(dim1) <= len(input_shape), \
"Dim1 is out of range (expected to be in range of [%d, %d], but got %d).\n" \
% (-(len(input_shape) + 1), len(input_shape), dim1)
assert np.abs(dim2) <= len(input_shape), \
"Dim2 is out of range (expected to be in range of [%d, %d], but got %d).\n" \
% (-(len(input_shape) + 1), len(input_shape), dim2)
dim1_ = dim1 if dim1 >= 0 else len(input_shape) + dim1 + 1
dim2_ = dim2 if dim2 >= 0 else len(input_shape) + dim2 + 1
assert dim1_ != dim2_, \
"dim1 and dim2 cannot be the same dimension." \
"But received dim1 = %d, dim2 = %d\n"%(dim1, dim2)
if not in_dygraph_mode():
__check_input(input, offset, dim1, dim2)
helper = LayerHelper("diag_embed", **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type='diag_embed',
inputs={'Input': [input]},
attrs={'offset': offset,
'dim1': dim1,
'dim2': dim2},
outputs={'Out': [out]})
out.stop_gradient = True
return out
| 42.785714 | 148 | 0.497663 | [
"Apache-2.0"
] | wangna11BD/Paddle | python/paddle/nn/functional/extension.py | 5,990 | Python |
# -*- coding: utf-8 -*-
# @Time : 2021/6/10
# @Author : kaka
import argparse
import logging
import os
from config import Params
from datasets import load_dataset
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
import numpy as np
from SimCSE import SimCSE
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# parser.add_argument("train_file", type=str, help="train text file")
# parser.add_argument("--pretrained", type=str, default="hfl/chinese-bert-wwm-ext", help="huggingface pretrained model")
# parser.add_argument("--model_out", type=str, default="./finder_model", help="model output path")
parser.add_argument("--num_proc", type=int, default=1, help="dataset process thread num")
parser.add_argument("--max_length", type=int, default=64, help="sentence max length")
parser.add_argument("--batch_size", type=int, default=32, help="batch size")
parser.add_argument("--epochs", type=int, default=101, help="epochs")
parser.add_argument("--lr", type=float, default=1e-5, help="learning rate")
parser.add_argument("--tao", type=float, default=0.05, help="temperature")
parser.add_argument("--device", type=str, default="cuda", help="device")
parser.add_argument("--display_interval", type=int, default=500, help="display interval")
parser.add_argument("--save_interval", type=int, default=10, help="save interval")
parser.add_argument("--pool_type", type=str, default="pooler", help="pool_type")
parser.add_argument("--dropout_rate", type=float, default=0.3, help="dropout_rate")
args = parser.parse_args()
return args
def read_data(args):
with open(Params.dialogues_file, 'r') as f:
sentences = f.readlines()
dl = DataLoader(sentences,
batch_size=args.batch_size)
return dl
def duplicate_batch(batch, tokenzier, args):
'''
句子进行重复
'''
new_batch = []
for sentence in batch:
new_batch.append(sentence)
new_batch.append(sentence)
batch_encoding = tokenzier(new_batch,
padding=True,
truncation=True,
max_length=args.max_length,
return_tensors='pt')
return batch_encoding
def compute_loss(y_pred, tao=0.05, device="cuda"):
idxs = torch.arange(0, y_pred.shape[0], device=device)
y_true = idxs + 1 - idxs % 2 * 2
similarities = F.cosine_similarity(y_pred.unsqueeze(1), y_pred.unsqueeze(0), dim=2)
similarities = similarities - torch.eye(y_pred.shape[0], device=device) * 1e12
similarities = similarities / tao
loss = F.cross_entropy(similarities, y_true)
return torch.mean(loss)
def train(args):
tokenizer = AutoTokenizer.from_pretrained(Params.pretrained_model_path)
dl = read_data(args)
model = SimCSE(Params.pretrained_model_path, args.pool_type, args.dropout_rate).to(args.device)
optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr)
model.train()
batch_idx = 0
min_loss = 10000000
for epoch_idx in range(args.epochs):
epoch_losses = []
for data in tqdm(dl):
batch_idx += 1
new_batch_data = duplicate_batch(data, tokenizer, args)
pred = model(input_ids=new_batch_data["input_ids"].to(args.device),
attention_mask=new_batch_data["attention_mask"].to(args.device),
token_type_ids=new_batch_data["token_type_ids"].to(args.device))
loss = compute_loss(pred, args.tao, args.device)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss = loss.item()
epoch_losses.append(loss)
if batch_idx % args.display_interval == 0:
logging.info(f"epoch: {epoch_idx}, batch_idx: {batch_idx}, loss: {loss:>10f}")
avg_epoch_loss = np.mean(epoch_losses)
if avg_epoch_loss < min_loss:
min_loss = avg_epoch_loss
torch.save({
'epoch': epoch_idx,
'model_state_dict': model.state_dict(),
'loss': avg_epoch_loss
}, Params.simcse_model_path)
def main():
args = parse_args()
train(args)
if __name__ == "__main__":
log_fmt = "%(asctime)s|%(name)s|%(levelname)s|%(message)s"
logging.basicConfig(level=logging.INFO, format=log_fmt)
main()
| 38.35 | 124 | 0.651673 | [
"Apache-2.0"
] | Macielyoung/sentence_representation_matching | simcse/train_unsup.py | 4,614 | Python |
from mgt.datamanagers.remi.dictionary_generator import DictionaryGenerator
from mgt.models.transformer_model import TransformerModel
"""
Example showing how to save and load a model.
"""
dictionary = DictionaryGenerator.create_dictionary();
model = TransformerModel(dictionary)
model.save_checkpoint("test_model")
model2 = TransformerModel.load_checkpoint("test_model")
| 31 | 74 | 0.833333 | [
"MIT"
] | wingedsheep/music-generation-toolbox | example/save_and_load_model.py | 372 | Python |
#---------------------------------------------------------------
# ALGORITHM DEMO : TOPLOGICAL SORT
#---------------------------------------------------------------
# Topological Sort is a algorithm can find "ordering" on an "order dependency" graph
# Concept
# https://blog.techbridge.cc/2020/05/10/leetcode-topological-sort/
# https://alrightchiu.github.io/SecondRound/graph-li-yong-dfsxun-zhao-dagde-topological-sorttuo-pu-pai-xu.html
# V0
# IDEA : implement topologicalSortUtil, topologicalSort, and addEdge methods
# step 1) maintain a stack, save "ordering" nodes in it (and return in final step)
# step 2) init visited as [False]*self.V (all nodes are NOT visited yet)
# step 3) iterate over all vertices in graph, if not visited, then run topologicalSortUtil
# step 4) return result (stack)
from collections import defaultdict
class Graph:
def __init__(self, vertices):
self.graph = defaultdict(list)
self.V = vertices
# for build graph
def addEdge(self, u, v):
self.graph[u].append(v)
def topologicalSortUtil(self, v, visited, stack):
visited[v] = True
### NOTE this !!! (self.graph[v])
for k in self.graph[v]:
if visited[k] == False:
self.topologicalSortUtil(k, visited, stack)
# stack.insert(0,v) # instead of insert v to idx = 0, we can still append v to stack and reverse it and return (e.g. return stack[::-1])
"""
### NOTE !! stack.append(v) is wrong, we SHOULD use stack.insert(0,v)
"""
stack.insert(0,v)
def topologicalSort(self):
visited = [False] * self.V
stack = []
### NOTE this !!! (range(self.V))
for v in range(self.V):
# call tologicalSortUtil only if visited[v] == False (the vertice is not visited yet)
if visited[v] == False:
self.topologicalSortUtil(v, visited, stack)
# return the result in inverse order
return stack[::-1]
### TEST
{"A": 0, "B":1, "C":2, "D": 3}
v = 4
g = Graph(v)
g.addEdge(0, 1)
g.addEdge(0, 2)
g.addEdge(2, 3)
g.addEdge(3, 1)
print (g.graph)
# ans should be TableB, TableD, TableC, TableA.
r = g.topologicalSort()
print (r)
# V0'
from collections import defaultdict
class Graph:
def __init__(self, v):
self.graph = defaultdict(list)
self.v = v
def addEdge(self, a, b):
self.graph[a].append(b)
def topologicalSortUtil(self, x, visited, stack):
# V1
if visited[x]:
return
for k in self.graph[x]:
self.topologicalSortUtil(k, visited, stack)
visited[x] = True
stack.insert(0, x)
# V2
# visited[v] = True
# ### NOTE this !!! (self.graph[v])
# for k in self.graph[v]:
# if visited[k] == False:
# self.topologicalSortUtil(k, visited, stack)
# # stack.insert(0,v) # instead of insert v to idx = 0, we can still append v to stack and reverse it and return (e.g. return stack[::-1])
# """
# ### NOTE !! stack.append(v) is wrong, we SHOULD use stack.insert(0,v)
# """
# stack.insert(0,v)
def topologicalSort(self):
visited = [False] * self.v
stack = []
for x in range(self.v):
if not visited[x]:
self.topologicalSortUtil(x, visited, stack)
print ("stack = " + str(stack))
return stack[::-1]
# V0''
# IDEA : implement topologicalSortUtil, topologicalSort, and addEdge methods
from collections import defaultdict
class Graph:
def __init__(self,vertices):
self.graph = defaultdict(list)
self.V = vertices
# for testing (build graph)
def addEdge(self,u,v):
self.graph[u].append(v)
def topologicalSortUtil(self,v,visited,stack):
visited[v] = True
for i in self.graph[v]:
if visited[i] == False:
self.topologicalSortUtil(i,visited,stack)
stack.insert(0,v)
def topologicalSort(self):
visited = [False]*self.V
stack =[]
for i in range(self.V):
if visited[i] == False:
self.topologicalSortUtil(i,visited,stack)
print (stack)
# V1
# https://www.geeksforgeeks.org/topological-sorting/
# Python program to print topological sorting of a DAG
from collections import defaultdict
class Graph:
def __init__(self, vertices):
self.graph = defaultdict(list) # dictionary containing adjacency List
self.V = vertices # No. of vertices
# function to add an edge to graph
def addEdge(self, u, v):
self.graph[u].append(v)
# A recursive function used by topologicalSort
def topologicalSortUtil(self, v, visited, stack):
# Mark the current node as visited.
visited[v] = True
# Recur for all the vertices adjacent to this vertex
for i in self.graph[v]:
if visited[i] == False:
self.topologicalSortUtil(i, visited, stack)
# Push current vertex to stack which stores result
#stack.append(v)
stack.insert(0,v)
# The function to do Topological Sort. It uses recursive
# topologicalSortUtil()
def topologicalSort(self):
# Mark all the vertices as not visited
visited = [False]*self.V
stack = []
# Call the recursive helper function to store Topological
# Sort starting from all vertices one by one
for i in range(self.V):
if visited[i] == False:
self.topologicalSortUtil(i, visited, stack)
# Print contents of the stack
print(stack[::-1]) # return list in reverse order
# TEST
# Driver Code
# g = Graph(6)
# g.addEdge(5, 2)
# g.addEdge(5, 0)
# g.addEdge(4, 0)
# g.addEdge(4, 1)
# g.addEdge(2, 3)
# g.addEdge(3, 1)
#
# print ("Following is a Topological Sort of the given graph")
#
# # Function Call
# g.topologicalSort()
# V1
# https://github.com/TheAlgorithms/Python/blob/master/sorts/topological_sort.py
"""Topological Sort."""
# a
# / \
# b c
# / \
# d e
# edges = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
# vertices = ["a", "b", "c", "d", "e"]
class Graph:
def topological_sort(self, start, visited, sort):
"""Perform topological sort on a directed acyclic graph."""
current = start
# add current to visited
visited.append(current)
neighbors = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
sort = topological_sort(neighbor, visited, sort)
# if all neighbors visited add current to sort
sort.append(current)
# if all vertices haven't been visited select a new one to visit
if len(visited) != len(vertices):
for vertice in vertices:
if vertice not in visited:
sort = topological_sort(vertice, visited, sort)
# return sort
return sort
# TEST
# edges = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
# vertices = ["a", "b", "c", "d", "e"]
# sort = topological_sort("a", [], [])
# print(sort)
# V1'
# http://www.runoob.com/python3/python-topological-sorting.html
class Graph:
from collections import defaultdict
def __init__(self,vertices):
self.graph = defaultdict(list)
self.V = vertices
def addEdge(self,u,v):
self.graph[u].append(v)
def topologicalSortUtil(self,v,visited,stack):
visited[v] = True
for i in self.graph[v]:
if visited[i] == False:
self.topologicalSortUtil(i,visited,stack)
stack.insert(0,v)
def topologicalSort(self):
visited = [False]*self.V
stack =[]
for i in range(self.V):
if visited[i] == False:
self.topologicalSortUtil(i,visited,stack)
print (stack)
# TEST
# g= Graph(6)
# g.addEdge(5, 2);
# g.addEdge(5, 0);
# g.addEdge(4, 0);
# g.addEdge(4, 1);
# g.addEdge(2, 3);
# g.addEdge(3, 1);
# print ("output of Topological Sort ")
# g.topologicalSort()
# [5, 4, 2, 3, 1, 0]
# V2
# https://zhuanlan.zhihu.com/p/69858335
def topoSort(graph):
in_degrees = dict((u,0) for u in graph) # init (value with 0)
num = len(in_degrees)
for u in graph:
for v in graph[u]:
in_degrees[v] += 1
Q = [u for u in in_degrees if in_degrees[u] == 0]
Seq = []
while Q:
u = Q.pop()
Seq.append(u)
for v in graph[u]:
in_degrees[v] -= 1
if in_degrees[v] == 0:
Q.append(v)
if len(Seq) == num:
return Seq
else:
return None
# TEST
# G = {
# 'a':'bf',
# 'b':'cdf',
# 'c':'d',
# 'd':'ef',
# 'e':'f',
# 'f':''
# }
# print(topoSort(G))
# ['a', 'b', 'c', 'd', 'e', 'f']
# V3
# https://www.educative.io/courses/grokking-the-coding-interview/m25rBmwLV00
from collections import deque
def topological_sort(vertices, edges):
sortedOrder = []
if vertices <= 0:
return sortedOrder
# a. Initialize the graph
inDegree = {i: 0 for i in range(vertices)} # count of incoming edges
graph = {i: [] for i in range(vertices)} # adjacency list graph
# b. Build the graph
for edge in edges:
parent, child = edge[0], edge[1]
graph[parent].append(child) # put the child into it's parent's list
inDegree[child] += 1 # increment child's inDegree
# c. Find all sources i.e., all vertices with 0 in-degrees
sources = deque()
for key in inDegree:
if inDegree[key] == 0:
sources.append(key)
# d. For each source, add it to the sortedOrder and subtract one from all of its children's in-degrees
# if a child's in-degree becomes zero, add it to the sources queue
while sources:
vertex = sources.popleft()
sortedOrder.append(vertex)
for child in graph[vertex]: # get the node's children to decrement their in-degrees
inDegree[child] -= 1
if inDegree[child] == 0:
sources.append(child)
# topological sort is not possible as the graph has a cycle
if len(sortedOrder) != vertices:
return []
return sortedOrder
# TEST
# def main():
# print("Topological sort: " +
# str(topological_sort(4, [[3, 2], [3, 0], [2, 0], [2, 1]])))
# print("Topological sort: " +
# str(topological_sort(5, [[4, 2], [4, 3], [2, 0], [2, 1], [3, 1]])))
# print("Topological sort: " +
# str(topological_sort(7, [[6, 4], [6, 2], [5, 3], [5, 4], [3, 0], [3, 1], [3, 2], [4, 1]])))
#main() | 30.322222 | 146 | 0.563576 | [
"Unlicense"
] | yennanliu/Python_basics | algorithm/python/topological_sort.py | 10,916 | Python |
#
# CSS
#
PIPELINE_CSS = {
'search': {
'source_filenames': (
'crashstats/css/lib/flatpickr.dark.min.css',
'supersearch/css/search.less',
),
'output_filename': 'css/search.min.css',
},
'select2': {
'source_filenames': (
'crashstats/js/lib/select2/select2.css',
),
'output_filename': 'css/select2.min.css',
},
'jquery_ui': {
'source_filenames': (
'crashstats/css/lib/jquery-ui.css',
'crashstats/css/lib/jquery-ui.structure.css',
'crashstats/css/lib/jquery-ui.theme.css',
),
'output_filename': 'css/jquery-ui.min.css',
},
'accordion': {
'source_filenames': (
'crashstats/css/accordion.less',
),
'output_filename': 'css/accordion.min.css',
},
'metricsgraphics': {
'source_filenames': (
'crashstats/css/lib/metricsgraphics.css',
'crashstats/css/metricsgraphics_custom.css',
),
'output_filename': 'css/metricsgraphics.min.css',
},
'crashstats_base': {
'source_filenames': (
'crashstats/css/screen.less',
'status/css/status.less',
),
'output_filename': 'css/crashstats-base.min.css',
},
'api_documentation': {
'source_filenames': (
'api/css/documentation.css',
),
'output_filename': 'css/api-documentation.min.css',
},
'crashes_per_day': {
'source_filenames': (
'crashstats/css/crashes_per_day.less',
),
'output_filename': 'css/crashes-per-day.min.css',
},
'crontabber_state': {
'source_filenames': (
'crashstats/css/crontabber_state.css',
),
'output_filename': 'css/crontabber-state.min.css',
},
'documentation': {
'source_filenames': (
'documentation/css/documentation.less',
'documentation/css/jsonview.custom.less',
),
'output_filename': 'css/documentation.min.css',
},
'report_index': {
'source_filenames': (
'crashstats/css/report_index.css',
),
'output_filename': 'css/report-index.min.css',
},
'report_pending': {
'source_filenames': (
'crashstats/css/report_pending.less',
),
'output_filename': 'css/report-pending.min.css',
},
'api_tokens': {
'source_filenames': (
'manage/css/api_tokens.css',
),
'output_filename': 'css/api-tokens.min.css',
},
'manage:home': {
'source_filenames': (
'crashstats/css/lib/font-awesome/css/font-awesome.css',
'crashstats/css/fonts.less',
'manage/css/home.less',
),
'output_filename': 'css/manage-home.min.css',
},
'manage:supersearch_fields': {
'source_filenames': (
'manage/css/supersearch_fields.less',
),
'output_filename': 'css/manage-supersearch-fields.min.css',
},
'manage:status_message': {
'source_filenames': (
'manage/css/status_message.css',
),
'output_filename': 'css/manage-status-message.min.css',
},
'profile': {
'source_filenames': (
'profile/css/profile.css',
),
'output_filename': 'css/profile.min.css',
},
'signature_report': {
'source_filenames': (
'signature/css/signature_report.less',
),
'output_filename': 'css/signature-report.min.css',
},
'symbols': {
'source_filenames': (
'symbols/css/home.css',
),
'output_filename': 'css/symbols.min.css',
},
'tokens': {
'source_filenames': (
'tokens/css/home.css',
),
'output_filename': 'css/tokens.min.css',
},
'topcrashers': {
'source_filenames': (
'topcrashers/css/topcrashers.less',
),
'output_filename': 'css/topcrashers.min.css',
},
'tablesorter': {
'source_filenames': (
'tablesorter/css/theme.default.min.css',
),
'output_filename': 'js/tablesorter.min.css',
},
}
#
# JavaScript
#
PIPELINE_JS = {
'pagination': {
'source_filenames': (
'manage/js/pagination_utils.js',
),
'output_filename': 'js/pagination.min.js',
},
'date_filters': {
'source_filenames': (
'crashstats/js/lib/flatpickr.min.js',
'supersearch/js/socorro/date_filters.js',
),
'output_filename': 'js/date-filters.min.js',
},
'dynamic_form': {
'source_filenames': (
'supersearch/js/lib/dynamic_form.js',
),
'output_filename': 'js/dynamic-form.min.js',
},
'bugzilla': {
'source_filenames': (
'crashstats/js/socorro/bugzilla.js',
),
'output_filename': 'js/bugzilla.min.js',
},
'd3': {
'source_filenames': (
'crashstats/js/lib/d3.min.js',
),
'output_filename': 'js/d3.min.js',
},
'jquery_ui': {
'source_filenames': (
'crashstats/js/jquery/plugins/jquery-ui.js',
),
'output_filename': 'js/jquery-ui.min.js',
},
'accordion': {
'source_filenames': (
'crashstats/js/lib/accordions.js',
),
'output_filename': 'js/accordion.min.js',
},
'correlation': {
'source_filenames': (
'crashstats/js/polyfill/fetch.js',
'crashstats/js/polyfill/es6-promise.auto.min.js',
'crashstats/js/lib/sha1.js',
'crashstats/js/socorro/correlation.js',
),
'output_filename': 'js/correlation.min.js',
},
'metricsgraphics': {
'source_filenames': (
'crashstats/js/lib/metricsgraphics.min.js',
),
'output_filename': 'js/metricsgraphics.min.js',
},
'select2': {
'source_filenames': (
'crashstats/js/lib/select2/select2.js',
),
'output_filename': 'js/select2.min.js',
},
'tablesorter': {
'source_filenames': (
'tablesorter/js/jquery.tablesorter.js',
),
'output_filename': 'js/jquery-tablesorter.min.js',
},
'socorro_utils': {
'source_filenames': (
'crashstats/js/socorro/utils.js',
),
'output_filename': 'js/socorro-utils.min.js',
},
'topcrashers': {
'source_filenames': (
'topcrashers/js/topcrashers.js',
),
'output_filename': 'js/topcrashers.min.js',
},
'crashstats_base': {
'source_filenames': (
'crashstats/js/jquery/jquery-2.0.3.min.js',
'crashstats/js/jquery/plugins/jquery.cookies.2.2.0.js',
'crashstats/js/lib/qs.js',
'crashstats/js/lib/moment.min.js',
'crashstats/js/socorro/timeutils.js',
'crashstats/js/socorro/oauth2.js',
'crashstats/js/socorro/nav.js',
'crashstats/js/socorro/analytics.js',
),
'output_filename': 'js/crashstats-base.min.js',
},
'api_documentation': {
'source_filenames': (
'api/js/lib/filesize.min.js',
'api/js/testdrive.js'
),
'output_filename': 'js/api-documentation.min.js',
},
'crashes_per_day': {
'source_filenames': (
'crashstats/js/socorro/crashes_per_day.js',
),
'output_filename': 'js/crashes-per-day.min.js',
},
'crontabber_state': {
'source_filenames': (
'crashstats/js/underscore-min.js',
'crashstats/js/lib/sankey.js',
'crashstats/js/socorro/crontabber_state.js',
),
'output_filename': 'js/crontabber-state.min.js',
},
'documentation': {
'source_filenames': (
'documentation/js/lib/jquery.jsonview.js',
'documentation/js/documentation.js',
),
'output_filename': 'js/documentation.min.js',
},
'exploitability_report': {
'source_filenames': (
'crashstats/js/socorro/exploitability_report.js',
),
'output_filename': 'js/exploitability-report.min.js',
},
'home': {
'source_filenames': (
'home/js/home.js',
),
'output_filename': 'js/home.min.js',
},
'report_index': {
'source_filenames': (
'crashstats/js/socorro/report.js',
'crashstats/js/socorro/reprocessing.js',
),
'output_filename': 'js/report-index.min.js',
},
'report_pending': {
'source_filenames': (
'crashstats/js/socorro/pending.js',
),
'output_filename': 'js/report-pending.min.js',
},
'api_tokens': {
'source_filenames': (
'manage/js/api_tokens.js',
),
'output_filename': 'js/api-tokens.min.js',
},
'manage:events': {
'source_filenames': (
'manage/js/events.js',
),
'output_filename': 'js/manage-events.min.js',
},
'manage:graphics_devices': {
'source_filenames': (
'manage/js/graphics_devices.js',
),
'output_filename': 'js/manage-graphics-devices.min.js',
},
'manage:groups': {
'source_filenames': (
'manage/js/groups.js',
),
'output_filename': 'js/manage-groups.min.js',
},
'manage:supersearch_field': {
'source_filenames': (
'manage/js/supersearch_field.js',
),
'output_filename': 'js/manage-supersearch-field.min.js',
},
'manage:supersearch_fields': {
'source_filenames': (
'manage/js/supersearch_fields.js',
),
'output_filename': 'js/manage-supersearch-fields.min.js',
},
'manage:symbols_uploads': {
'source_filenames': (
'manage/js/symbols-uploads.js',
),
'output_filename': 'js/manage-symbols-uploads.min.js',
},
'manage:users': {
'source_filenames': (
'manage/js/users.js',
),
'output_filename': 'js/manage-users.min.js',
},
'signature_report': {
'source_filenames': (
'signature/js/signature_report.js',
'signature/js/signature_tab.js',
'signature/js/signature_tab_summary.js',
'signature/js/signature_tab_graphs.js',
'signature/js/signature_tab_reports.js',
'signature/js/signature_tab_aggregations.js',
'signature/js/signature_tab_comments.js',
'signature/js/signature_tab_correlations.js',
'signature/js/signature_tab_bugzilla.js',
'signature/js/signature_tab_graph.js',
'signature/js/signature_panel.js',
),
'output_filename': 'js/signature-report.min.js',
},
'search_custom': {
'source_filenames': (
'supersearch/js/lib/ace/ace.js',
'supersearch/js/lib/ace/theme-monokai.js',
'supersearch/js/lib/ace/mode-json.js',
'supersearch/js/socorro/search_custom.js',
),
'output_filename': 'js/search-custom.min.js',
},
'search': {
'source_filenames': (
'supersearch/js/socorro/search.js',
),
'output_filename': 'js/search.min.js',
},
'tokens': {
'source_filenames': (
'tokens/js/home.js',
),
'output_filename': 'js/tokens.min.js',
},
'error': {
'source_filenames': (
'js/error.js',
),
'output_filename': 'js/error.min.js',
},
'google_analytics': {
'source_filenames': (
'crashstats/js/socorro/google_analytics.js',
),
'output_filename': 'js/google-analytics.min.js',
},
}
# This is sanity checks, primarily for developers. It checks that
# you haven't haven't accidentally make a string a tuple with an
# excess comma, no underscores in the bundle name and that the
# bundle file extension is either .js or .css.
# We also check, but only warn, if a file is re-used in a different bundle.
# That's because you might want to consider not including that file in the
# bundle and instead break it out so it can be re-used on its own.
_used = {}
for config in PIPELINE_JS, PIPELINE_CSS: # NOQA
_trouble = set()
for k, v in config.items():
assert isinstance(k, basestring), k
out = v['output_filename']
assert isinstance(v['source_filenames'], tuple), v
assert isinstance(out, basestring), v
assert not out.split('/')[-1].startswith('.'), k
assert '_' not in out
assert out.endswith('.min.css') or out.endswith('.min.js')
for asset_file in v['source_filenames']:
if asset_file in _used:
# Consider using warnings.warn here instead
print '{:<52} in {:<20} already in {}'.format(
asset_file,
k,
_used[asset_file]
)
_trouble.add(asset_file)
_used[asset_file] = k
for asset_file in _trouble:
print "REPEATED", asset_file
found_in = []
sets = []
for k, v in config.items():
if asset_file in v['source_filenames']:
found_in.append(k)
sets.append(set(list(v['source_filenames'])))
print "FOUND IN", found_in
print "ALWAYS TOGETHER WITH", set.intersection(*sets)
break
| 30.81448 | 75 | 0.544347 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | Krispy2009/socorro | webapp-django/crashstats/settings/bundles.py | 13,620 | Python |
import random
import torch
from torch.utils.tensorboard import SummaryWriter
from flowtron_plotting_utils import plot_alignment_to_numpy
from flowtron_plotting_utils import plot_gate_outputs_to_numpy
class FlowtronLogger(SummaryWriter):
def __init__(self, logdir):
super(FlowtronLogger, self).__init__(logdir)
def log_training(self, loss, learning_rate, iteration):
self.add_scalar("training/loss", loss, iteration)
self.add_scalar("learning_rate", learning_rate, iteration)
def log_validation(self, loss, loss_nll, loss_gate, attns, gate_pred,
gate_out, iteration):
self.add_scalar("validation/loss", loss, iteration)
self.add_scalar("validation/loss_nll", loss_nll, iteration)
self.add_scalar("validation/loss_gate", loss_gate, iteration)
# batch里随机抽一条看看效果
idx = random.randint(0, len(gate_out) - 1)
for i in range(len(attns)):
self.add_image(
'attention_weights_{}'.format(i),
plot_alignment_to_numpy(attns[i][idx].data.cpu().numpy().T),
iteration,
dataformats='HWC')
if gate_pred is not None:
gate_pred = gate_pred.transpose(0, 1)[:, :, 0]
self.add_image(
"gate",
plot_gate_outputs_to_numpy(
gate_out[idx].data.cpu().numpy(),
torch.sigmoid(gate_pred[idx]).data.cpu().numpy()),
iteration, dataformats='HWC')
| 39.179487 | 76 | 0.630236 | [
"Apache-2.0"
] | hit-thusz-RookieCJ/MyFLowtron | flowtron_logger.py | 1,548 | Python |
# Copyright (C) 2021, Mindee.
# This program is licensed under the Apache License version 2.
# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
import pytest
import numpy as np
from scipy.optimize import linear_sum_assignment
from doctr.utils.metrics import box_iou
@pytest.mark.asyncio
async def test_text_detection(test_app_asyncio, mock_detection_image):
response = await test_app_asyncio.post("/detection", files={'file': mock_detection_image})
assert response.status_code == 200
json_response = response.json()
gt_boxes = np.array([[1240, 430, 1355, 470], [1360, 430, 1495, 470]], dtype=np.float32)
gt_boxes[:, [0, 2]] = gt_boxes[:, [0, 2]] / 1654
gt_boxes[:, [1, 3]] = gt_boxes[:, [1, 3]] / 2339
# Check that IoU with GT if reasonable
assert isinstance(json_response, list) and len(json_response) == gt_boxes.shape[0]
pred_boxes = np.array([elt['box'] for elt in json_response])
iou_mat = box_iou(gt_boxes, pred_boxes)
gt_idxs, pred_idxs = linear_sum_assignment(-iou_mat)
is_kept = iou_mat[gt_idxs, pred_idxs] >= 0.8
assert gt_idxs[is_kept].shape[0] == gt_boxes.shape[0]
| 39.466667 | 98 | 0.714527 | [
"Apache-2.0"
] | fmobrj/doctr | api/tests/routes/test_detection.py | 1,184 | Python |
from .eLABJournalObject import *
import json
import pandas as pd
import numbers
class SampleSerie(eLABJournalObject):
def __init__(self, api, data):
"""
Internal use only: initialize sample serie
"""
if ((data is not None) & (type(data) == dict) &
("name" in data.keys())
):
super().__init__(api, data, "seriesID", str(data["name"]))
else:
raise Exception("no (valid) sampleSerie data")
def barcode(self):
"""
Get the barcode.
"""
if "barcode" in self.data():
barcode = self.data()["barcode"]
return(barcode)
return None
def samples(self):
"""
Get a dict with the samples for this sample serie.
The sampleID is used as a key, the value is a sample object.
"""
sample_list = []
if "samples" in self.data():
samplesData = self.data()["samples"]
if isinstance(samplesData, list):
for sampleItem in samplesData:
if isinstance(sampleItem,dict) & ("sampleID" in sampleItem):
sample_list.append(sampleItem["sampleID"])
elif isinstance(sampleItem,numbers.Integral) | isinstance(sampleItem,str):
sample_list.append(sampleItem)
return(self._eLABJournalObject__api.sample(sample_list))
| 32.888889 | 94 | 0.538514 | [
"Apache-2.0"
] | matthijsbrouwer/elabjournal-python | elabjournal/elabjournal/SampleSerie.py | 1,480 | Python |
#!/usr/bin/env python
"""
Axis camera video driver. Inspired by:
https://code.ros.org/svn/wg-ros-pkg/branches/trunk_cturtle/sandbox/axis_camera/axis.py
Communication with the camera is done using the Axis VAPIX API described at
http://www.axis.com/global/en/support/developer-support/vapix
.. note::
This is a major rewrite of the former ros-drivers/axis_camera node, so it contains a (deprecated) backwards
compatibility layer for the previous (non-released) API.
"""
import math
import re
import rospy
from sensor_msgs.msg import CompressedImage, CameraInfo
import camera_info_manager
import dynamic_reconfigure.server
from diagnostic_updater import Updater, DiagnosedPublisher, TimeStampStatusParam, FrequencyStatusParam, \
FunctionDiagnosticTask, DiagnosticStatusWrapper
from axis_camera.cfg import VideoStreamConfig
from axis_camera.srv import TakeSnapshot, TakeSnapshotResponse
from axis_camera.vapix import VAPIX
from axis_camera.video_streaming import ImageStreamingThread
from axis_camera.dynamic_reconfigure_tools import change_enum_items
# BACKWARDS COMPATIBILITY LAYER
StreamThread = ImageStreamingThread # deprecated
class Axis(rospy.SubscribeListener):
"""The ROS-VAPIX interface for video streaming."""
def __init__(self, hostname, username, password, width, height, frame_id, camera_info_url, use_encrypted_password,
camera_id=1, auto_wakeup_camera=True, compression=0, fps=24, use_color=True,
use_square_pixels=False):
"""Create the ROS-VAPIX interface.
:param hostname: Hostname of the camera (without http://, can be an IP address).
:type hostname: basestring
:param username: If login is needed, provide a username here.
:type username: :py:obj:`basestring` | None
:param password: If login is needed, provide a password here.
:type password: :py:obj:`basestring` | None
:param width: Width of the requested video stream in pixels (can be changed later). Must be one of the supported
resolutions. If `None`, the resolution will be chosen by height only. If also `height` is `None`,
then the default camera resolution will be used.
:type width: int|None
:param height: Height of the requested video stream in pixels (can be changed later). Must be one of the
supported resolutions. If `None`, the resolution will be chosen by width only. If also `width` is
`None`, then the default camera resolution will be used.
:type height: int|None
:param frame_id: The ROS TF frame assigned to the camera.
:type frame_id: basestring
:param camera_info_url: The URL pointing to the camera calaibration, if available.
:type camera_info_url: basestring
:param use_encrypted_password: Whether to use Plain HTTP Auth (False) or Digest HTTP Auth (True).
:type use_encrypted_password: bool
:param camera_id: ID (number) of the camera. Can be 1 to 4.
:type camera_id: int
:param auto_wakeup_camera: If True, there will be a wakeup trial after first unsuccessful network command.
:type auto_wakeup_camera: bool
:param compression: Compression of the image (0 - no compression, 100 - max compression).
:type compression: int
:param fps: The desired frames per second.
:type fps: int
:param use_color: If True, send a color stream, otherwise send only grayscale image.
:type use_color: bool
:param use_square_pixels: If True, the resolution will be stretched to match 1:1 pixels.
By default, the pixels have a ratio of 11:12.
:type use_square_pixels: bool
:raises: :py:exc:`ValueError` if the requested resolution (either the `resolution`, or `width`+`height`
is not supported.
"""
# True every time the video parameters have changed and the URL has to be altered (set from other threads).
self.video_params_changed = False
self.__initializing = True
self._hostname = hostname
self._camera_id = camera_id
self.diagnostic_updater = Updater()
self.diagnostic_updater.setHardwareID(hostname)
self._api = None
# autodetect the VAPIX API and connect to it; try it forever
while self._api is None and not rospy.is_shutdown():
try:
self._api = VAPIX.get_api_for_camera(hostname, username, password, camera_id, use_encrypted_password)
except (IOError, ValueError):
rospy.loginfo("Retrying connection to VAPIX on host %s, camera %d in 2 seconds." %
(hostname, camera_id))
rospy.sleep(2)
if rospy.is_shutdown():
return
self._allowed_resolutions = self._get_allowed_resolutions()
rospy.loginfo("The following resolutions are available for camera %d:\n%s" %
(camera_id, "\n".join([str(res) for res in self._allowed_resolutions])))
rospy.set_param("~allowed_resolutions", [res.get_vapix_representation() for res in self._allowed_resolutions])
# Sometimes the camera falls into power saving mode and stops streaming.
# This setting allows the script to try to wake up the camera.
self._auto_wakeup_camera = auto_wakeup_camera
# dynamic-reconfigurable properties - definitions
self._width = None # deprecated
self._height = None # deprecated
self._resolution = None
self._compression = None
self._fps = None
self._use_color = None
self._use_square_pixels = None
# treat empty strings as None in width and height params
width = width if width != "" else None
height = height if height != "" else None
# dynamic-reconfigurable properties - defaults
if width is None and height is None:
# TODO change to perform default resolution detection from VAPIX
self.set_resolution(self._allowed_resolutions[0])
else:
resolution = self.find_resolution_by_size(width, height)
self.set_resolution(resolution.get_vapix_representation())
self.set_compression(compression)
self.set_fps(fps)
self.set_use_color(use_color)
self.set_use_square_pixels(use_square_pixels)
# only advertise the supported resolutions on dynamic reconfigure
change_enum_items(
VideoStreamConfig,
"resolution",
[{
'name': res.name if isinstance(res, CIFVideoResolution) else str(res),
'value': res.get_vapix_representation(),
'description': str(res)
} for res in self._allowed_resolutions],
self._resolution.get_vapix_representation()
)
# dynamic reconfigure server
self._video_stream_param_change_server = dynamic_reconfigure.server.Server(VideoStreamConfig,
self.reconfigure_video)
# camera info setup
self._frame_id = frame_id
self._camera_info_url = camera_info_url
# generate a valid camera name based on the hostname
self._camera_name = camera_info_manager.genCameraName(self._hostname)
self._camera_info = camera_info_manager.CameraInfoManager(cname=self._camera_name, url=self._camera_info_url)
self._camera_info.loadCameraInfo() # required before getCameraInfo()
# the thread used for streaming images (is instantiated when the first image subscriber subscribes)
self._streaming_thread = None
# the publishers are started/stopped lazily in peer_subscribe/peer_unsubscribe
self._video_publisher_frequency_diagnostic = FrequencyStatusParam({'min': self._fps, 'max': self._fps})
self._video_publisher = PausableDiagnosedPublisher(
self,
rospy.Publisher("image_raw/compressed", CompressedImage, self, queue_size=100),
self.diagnostic_updater, self._video_publisher_frequency_diagnostic, TimeStampStatusParam()
)
self._camera_info_publisher = PausableDiagnosedPublisher(
self,
rospy.Publisher("camera_info", CameraInfo, self, queue_size=100),
self.diagnostic_updater, self._video_publisher_frequency_diagnostic, TimeStampStatusParam()
)
self._snapshot_server = rospy.Service("take_snapshot", TakeSnapshot, self.take_snapshot)
self.diagnostic_updater.add(FunctionDiagnosticTask("Camera parameters", self._camera_diagnostic_callback))
# BACKWARDS COMPATIBILITY LAYER
self.username = username # deprecated
self.password = password # deprecated
self.use_encrypted_password = use_encrypted_password # deprecated
self.st = None # deprecated
self.pub = self._video_publisher # deprecated
self.caminfo_pub = self._camera_info_publisher # deprecated
self.__initializing = False
def __str__(self):
(width, height) = self._resolution.get_resolution(self._use_square_pixels)
return 'Axis driver on host %s, camera %d (%dx%d px @ %d FPS)' % \
(self._hostname, self._api.camera_id, width, height, self._fps)
def peer_subscribe(self, topic_name, topic_publish, peer_publish):
"""Lazy-start the image-publisher."""
if self._streaming_thread is None:
self._streaming_thread = ImageStreamingThread(self)
self._streaming_thread.start()
else:
self._streaming_thread.resume()
def peer_unsubscribe(self, topic_name, num_peers):
"""Lazy-stop the image-publisher when nobody is interested"""
if num_peers == 0:
self._streaming_thread.pause()
def take_snapshot(self, request):
"""Retrieve a snapshot from the camera.
:param request: The service request.
:type request: :py:class:`axis_camera.srv.TakeSnapshotRequest`
:return: The response containing the image.
:rtype: :py:class:`axis_camera.srv.TakeSnapshotResponse`
:raises: :py:exc:`IOError`, :py:exc:`urllib2.URLError`
"""
image_data = self._api.take_snapshot()
image = CompressedImage()
image.header.stamp = rospy.Time.now()
image.header.frame_id = self._frame_id
image.format = "jpeg"
image.data = image_data
response = TakeSnapshotResponse()
response.image = image
return response
def reconfigure_video(self, config, level):
"""Dynamic reconfigure callback for video parameters.
:param config: The requested configuration.
:type config: dict
:param level: Unused here.
:type level: int
:return: The config corresponding to what was really achieved.
:rtype: dict
"""
if self.__initializing:
# in the initialization phase, we want to give precedence to the values given to the constructor
config.compression = self._compression
config.fps = self._fps
config.use_color = self._use_color
config.use_square_pixels = self._use_square_pixels
config.resolution = self._resolution.get_vapix_representation()
else:
self.__try_set_value_from_config(config, 'compression', self.set_compression)
self.__try_set_value_from_config(config, 'fps', self.set_fps)
self.__try_set_value_from_config(config, 'use_color', self.set_use_color)
self.__try_set_value_from_config(config, 'use_square_pixels', self.set_use_square_pixels)
try:
self.set_resolution(config.resolution)
except ValueError:
config.resolution = self._resolution.get_vapix_representation()
return config
def __try_set_value_from_config(self, config, field, setter):
"""First, try to call `setter(config[field])`, and if this call doesn't succeed. set the field in config to
its value stored in this class.
:param config: The dynamic reconfigure config dictionary.
:type config: dict
:param field: The field name (both in :py:obj:`config` and in :py:obj:`self`).
:type field: basestring
:param setter: The setter to use to set the value.
:type setter: lambda function
"""
try:
setter(config[field])
except ValueError:
config[field] = getattr(self, field)
#################################
# DYNAMIC RECONFIGURE CALLBACKS #
#################################
def set_resolution(self, resolution_value):
"""Request a new resolution for the video stream.
:param resolution_value: The string of type `width`x`height` or a :py:class:`VideoResolution` object.
:type resolution_value: basestring|VideoResolution
:raises: :py:exc:`ValueError` if the resolution is unknown/unsupported.
"""
resolution = None
if isinstance(resolution_value, VideoResolution):
resolution = resolution_value
elif isinstance(resolution_value, basestring):
resolution = self._get_resolution_from_param_value(resolution_value)
if resolution is None:
raise ValueError("Unsupported resolution type specified: %r" % resolution_value)
if self._resolution is None or resolution != self._resolution:
self._resolution = resolution
self.video_params_changed = True
# deprecated values
self._width = resolution.get_resolution(self._use_square_pixels)[0]
self._height = resolution.get_resolution(self._use_square_pixels)[1]
def _get_resolution_from_param_value(self, value):
"""Return a :py:class:`VideoResolution` object corresponding to the given video resolution param string.
:param value: Value of the resolution parameter to parse (of form `width`x`height`).
:type value: basestring
:return: The :py:class:`VideoResolution` corresponding to the given resolution param string.
:rtype: :py:class:`VideoResolution`
:raises: :py:exc:`ValueError` if the resolution is unknown/unsupported.
"""
for resolution in self._allowed_resolutions:
if resolution.get_vapix_representation() == value:
return resolution
raise ValueError("%s is not a valid valid resolution." % value)
def find_resolution_by_size(self, width, height):
"""Return a :py:class:`VideoResolution` object with the given dimensions.
If there are more resolutions with the same size, any of them may be returned.
:param width: Image width in pixels. If `None`, resolutions will be matched only by height.
:type width: int|None
:param height: Image height in pixels. If `None`, resolutions will be matched only by width.
:type height: int|None
:return: The corresponding resolution object.
:rtype: :py:class:`VideoResolution`
:raises: :py:exc:`ValueError` if no resolution with the given dimensions can be found.
:raises: :py:exc:`ValueError` if both `width` and `height` are None.
"""
if width is None and height is None:
raise ValueError("Either width or height of the desired resolution must be specified.")
for resolution in self._allowed_resolutions:
size = resolution.get_resolution(use_square_pixels=False)
if (width is None or width == size[0]) and (height is None or height == size[1]):
return resolution
size = resolution.get_resolution(use_square_pixels=True)
if (width is None or width == size[0]) and (height is None or height == size[1]):
return resolution
raise ValueError("Cannot find a supported resolution with dimensions %sx%s" % (width, height))
def _get_allowed_resolutions(self):
"""Return a list of resolutions supported both by the camera.
:return: The supported resolutions list.
:rtype: list of :py:class:`VideoResolution`
"""
camera_resolutions = self._get_resolutions_supported_by_camera()
return camera_resolutions
def _get_resolutions_supported_by_camera(self):
"""Return a list of resolutions supported the camera.
:return: The supported resolutions list.
:rtype: list of :py:class:`VideoResolution`
"""
try:
names = self._api.parse_list_parameter_value(self._api.get_parameter("Properties.Image.Resolution"))
return [VideoResolution.parse_from_vapix_param_value(name, self._api) for name in names]
except (IOError, ValueError):
rospy.logwarn("Could not determine resolutions supported by the camera. Asssuming only CIF.")
return [CIFVideoResolution("CIF", 384, 288)]
def set_compression(self, compression):
"""Request the given compression level for the video stream.
:param compression: Compression of the image (0 - no compression, 100 - max compression).
:type compression: int
:raises: :py:exc:`ValueError` if the given compression level is outside the allowed range.
"""
if compression != self._compression:
self._compression = self.sanitize_compression(compression)
self.video_params_changed = True
@staticmethod
def sanitize_compression(compression):
"""Make sure the given value can be used as a compression level of the video stream.
:param compression: Compression of the image (0 - no compression, 100 - max compression).
:type compression: int
:return: The given compression converted to an int.
:rtype: int
:raises: :py:exc:`ValueError` if the given compression level is outside the allowed range.
"""
compression = int(compression)
if not (0 <= compression <= 100):
raise ValueError("%s is not a valid value for compression." % str(compression))
return compression
def set_fps(self, fps):
"""Request the given compression level for the video stream.
:param fps: The desired frames per second.
:type fps: int
:raises: :py:exc:`ValueError` if the given FPS is outside the allowed range.
"""
if fps != self._fps:
self._fps = self.sanitize_fps(fps)
self.video_params_changed = True
if hasattr(self, "_video_publisher_frequency_diagnostic"):
self._video_publisher_frequency_diagnostic.freq_bound['min'] = self._fps
self._video_publisher_frequency_diagnostic.freq_bound['max'] = self._fps
@staticmethod
def sanitize_fps(fps):
"""Make sure the given value can be used as FPS of the video stream.
:param fps: The desired frames per second.
:type fps: int
:return: The given FPS converted to an int.
:rtype: int
:raises: :py:exc:`ValueError` if the given FPS is outside the allowed range.
"""
fps = int(fps)
if not (1 <= fps <= 30):
raise ValueError("%s is not a valid value for FPS." % str(fps))
return fps
def set_use_color(self, use_color):
"""Request using/not using color in the video stream.
:param use_color: If True, send a color stream, otherwise send only grayscale image.
:type use_color: bool
:raises: :py:exc:`ValueError` if the given argument is not a bool.
"""
if use_color != self._use_color:
self._use_color = self.sanitize_bool(use_color, "use_color")
self.video_params_changed = True
def set_use_square_pixels(self, use_square_pixels):
"""Request using/not using square pixels.
:param use_square_pixels: If True, the resolution will be stretched to match 1:1 pixels.
By default, the pixels have a ratio of 11:12.
:type use_square_pixels: bool
:raises: :py:exc:`ValueError` if the given argument is not a bool.
"""
if use_square_pixels != self._use_square_pixels:
self._use_square_pixels = self.sanitize_bool(use_square_pixels, "use_square_pixels")
self.video_params_changed = True
@staticmethod
def sanitize_bool(value, field_name):
"""Convert the given value to a bool.
:param value: Either True, False,, "1", "0", 1 or 0.
:type value: :py:class:`basestring` | :py:class:`bool` | :py:class:`int`
:param field_name: Name of the field this value belongs to (just for debug messages).
:type field_name: basestring
:return: The bool value of the given value.
:rtype: :py:class:`bool`
:raises: :py:exc:`ValueError` if the given value is not supported in this conversion.
"""
if value not in (True, False, "1", "0", 1, 0):
raise ValueError("%s is not a valid value for %s." % (str(value), field_name))
# bool("0") returns True because it is a nonempty string
if value == "0":
return False
return bool(value)
def _camera_diagnostic_callback(self, diag_message):
assert isinstance(diag_message, DiagnosticStatusWrapper)
diag_message.summary(DiagnosticStatusWrapper.OK, "Video parameters")
diag_message.add("FPS", self._fps)
diag_message.add("Resolution", self._resolution)
diag_message.add("Compression", self._compression)
diag_message.add("Color image", self._use_color)
diag_message.add("Square pixels used", self._use_square_pixels)
class VideoResolution(object):
"""A class representing a video resolution."""
def __init__(self, width, height):
"""Create a representation of the resolution.
:param width: Width of the resolution in pixels.
:type width: int
:param height: Height of the resolution in pixels.
:type height: int
"""
super(VideoResolution, self).__init__()
self.width = int(width)
self.height = int(height)
self.square_pixel_conversion_ratio_width = 12.0 / 11.0
self.square_pixel_conversion_ratio_height = 1
def __str__(self):
return "%dx%d" % (self.width, self.height)
def __repr__(self):
return "VideoResolution(width=%r,height=%r)" % (self.width, self.height)
def __eq__(self, other):
# compare by attribute values
return self.__dict__ == other.__dict__
def __ne__(self, other):
# reuse the former __eq__ definition
return not self == other
def get_resolution(self, use_square_pixels=False):
"""Get the image dimensions corresponding to this resolution.
:param use_square_pixels: Whether to strech the resulting resolution to square pixels.
:type use_square_pixels: bool
:return: A tuple (width, height)
:rtype: tuple
"""
width = self.width
height = self.height
if use_square_pixels:
width = int(math.ceil(self.square_pixel_conversion_ratio_width * self.width))
height = int(math.ceil(self.square_pixel_conversion_ratio_height * self.height))
return width, height
def get_vapix_representation(self):
return "%dx%d" % (self.width, self.height)
@staticmethod
def parse_from_vapix_param_value(value, api):
assert isinstance(value, basestring)
assert isinstance(api, VAPIX)
numeric_regexp = re.compile(r"(\d+)x(\d+)")
match = numeric_regexp.match(value)
if match is not None:
return VideoResolution(int(match.group(1)), int(match.group(2)))
else: # resolution given by CIF name
name = value
width, height = api.resolve_video_resolution_name(name)
return CIFVideoResolution(name, width, height)
class CIFVideoResolution(VideoResolution):
"""A class representing a CIF standard resolution."""
def __init__(self, name, width, height):
"""Create a representation of a CIF resolution.
:param name: CIF standard name of the resolution.
:type name: basestring
:param width: Width of the resolution in pixels.
:type width: int
:param height: Height of the resolution in pixels.
:type height: int
"""
super(CIFVideoResolution, self).__init__(width, height)
self.name = name
def __str__(self):
return "%s (%dx%d)" % (self.name, self.width, self.height)
def __repr__(self):
return "CIFVideoResolution(name=%r,width=%r,height=%r)" % (self.name, self.width, self.height)
def main():
"""Start the ROS driver and ROS node."""
rospy.init_node("axis_driver")
arg_defaults = {
'hostname': '192.168.0.90', # default IP address
'username': None, # default login name
'password': None,
'width': 704,
'height': 576,
'frame_id': 'axis_camera',
'camera_info_url': '',
'use_encrypted_password': False,
'camera_id': 1,
'auto_wakeup_camera': True,
'compression': 0,
'fps': 24,
'use_color': True,
'use_square_pixels': False,
}
args = read_args_with_defaults(arg_defaults)
axis = Axis(**args)
rate = rospy.Rate(1)
while not rospy.is_shutdown():
axis.diagnostic_updater.update()
try:
rate.sleep()
except rospy.ROSTimeMovedBackwardsException:
rospy.logwarn("Detected jump back in time.")
class PausableDiagnosedPublisher(DiagnosedPublisher):
def __init__(self, axis, pub, diag, freq, stamp):
DiagnosedPublisher.__init__(self, pub, diag, freq, stamp)
self._axis = axis
def run(self, stat):
if self._axis._streaming_thread is None or self._axis._streaming_thread.is_paused():
stat.summary(DiagnosticStatusWrapper.OK, "Video not subscribed")
else:
stat = DiagnosedPublisher.run(self, stat)
return stat
def read_args_with_defaults(arg_defaults):
"""Look up parameters starting in the driver's private parameter space, but also searching outer namespaces.
Defining them in a higher namespace allows the axis_ptz.py script to share parameters with the driver."""
args = {}
for name, val in arg_defaults.iteritems():
full_name = rospy.search_param(name)
if full_name is None:
args[name] = val
else:
args[name] = rospy.get_param(full_name, val)
# resolve frame_id with tf_prefix (unless already absolute)
if args['frame_id'][0] != '/': # not absolute?
tf_prefix = rospy.search_param('tf_prefix')
prefix_val = ''
if tf_prefix is not None: # prefix defined?
prefix_val = rospy.get_param(tf_prefix)
if prefix_val[0] != '/': # prefix not absolute?
prefix_val = '/' + prefix_val
args['frame_id'] = prefix_val + '/' + args['frame_id']
return args
if __name__ == "__main__":
main()
| 42.135177 | 120 | 0.653737 | [
"BSD-3-Clause"
] | MarcoStb1993/axis_camera | nodes/axis.py | 27,430 | Python |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CakeGallery.created'
db.add_column(u'cakegallery_cakegallery', 'created',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, default=datetime.datetime(2013, 11, 18, 0, 0), blank=True),
keep_default=False)
# Adding field 'CakeGallery.updated'
db.add_column(u'cakegallery_cakegallery', 'updated',
self.gf('django.db.models.fields.DateTimeField')(auto_now=True, default=datetime.datetime(2013, 11, 18, 0, 0), blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CakeGallery.created'
db.delete_column(u'cakegallery_cakegallery', 'created')
# Deleting field 'CakeGallery.updated'
db.delete_column(u'cakegallery_cakegallery', 'updated')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'cakegallery.cakecategory': {
'Meta': {'object_name': 'CakeCategory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'cakegallery.cakegallery': {
'Meta': {'object_name': 'CakeGallery'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'category'", 'to': u"orm['cakegallery.CakeCategory']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'}),
'subcategory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subcategory'", 'to': u"orm['cakegallery.CakeSubCategory']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'visits_num': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'cakegallery.cakeimage': {
'Meta': {'object_name': 'CakeImage'},
'add_watermark': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'blank': "'True'", 'related_name': "'images'", 'null': 'True', 'to': u"orm['cakegallery.CakeCategory']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'for_registered': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'gallery': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'gallery'", 'null': 'True', 'to': u"orm['cakegallery.CakeGallery']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100'}),
'image_alt': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'image_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'ip_addr': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'subcategory': ('django.db.models.fields.related.ForeignKey', [], {'blank': "'True'", 'related_name': "'images'", 'null': 'True', 'to': u"orm['cakegallery.CakeSubCategory']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'visits_num': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'cakegallery.cakesubcategory': {
'Meta': {'object_name': 'CakeSubCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subcategories'", 'to': u"orm['cakegallery.CakeCategory']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['cakegallery'] | 75.920635 | 188 | 0.57234 | [
"BSD-3-Clause"
] | TorinAsakura/cooking | povary/apps/cakegallery/migrations/0014_auto__add_field_cakegallery_created__add_field_cakegallery_updated.py | 9,566 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-24 05:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('utils', '0011_auto_20160822_1127'),
]
operations = [
migrations.CreateModel(
name='River',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(unique=True)),
('added', models.DateTimeField(auto_now_add=True)),
],
),
migrations.AlterField(
model_name='channel',
name='river',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='rivers', to='utils.River'),
),
]
| 30.7 | 145 | 0.605863 | [
"Apache-2.0"
] | Upande/MaMaSe | apps/utils/migrations/0012_auto_20160824_0543.py | 921 | Python |
import time
import pytest
from tools import utils, constants
PARAMS = ['--connections', '500']
# TODO parameterize test
@pytest.mark.baker
@pytest.mark.multinode
@pytest.mark.slow
@pytest.mark.incremental
class TestManyBakers:
"""Run 5 bakers and num nodes, wait and check logs"""
def test_init(self, sandbox):
for i in range(10):
sandbox.add_node(i, params=PARAMS)
utils.activate_alpha(sandbox.client(0))
for i in range(5):
sandbox.add_baker(i, f'bootstrap{i + 1}',
proto=constants.ALPHA_DEAMON)
def test_wait(self):
time.sleep(5)
def test_check_logs(self, sandbox):
if not sandbox.log_dir:
pytest.skip()
assert sandbox.logs
error_pattern = r"canceled|crashed"
assert utils.check_logs(sandbox.logs, error_pattern)
| 24.828571 | 60 | 0.638665 | [
"MIT"
] | blockchain-analysis-study/my-tezos | tests_python/tests/test_many_bakers.py | 869 | Python |
"""
Telnet server.
Example usage::
class MyTelnetApplication(TelnetApplication):
def client_connected(self, telnet_connection):
# Set CLI with simple prompt.
telnet_connection.set_application(
telnet_connection.create_prompt_application(...))
def handle_command(self, telnet_connection, document):
# When the client enters a command, just reply.
telnet_connection.send('You said: %r\n\n' % document.text)
...
a = MyTelnetApplication()
TelnetServer(application=a, host='127.0.0.1', port=23).run()
"""
from __future__ import unicode_literals
import socket
import select
import threading
import os
import fcntl
from six import int2byte, text_type, binary_type
from codecs import getincrementaldecoder
from prompt_toolkit.enums import DEFAULT_BUFFER
from prompt_toolkit.eventloop.base import EventLoop
from prompt_toolkit.interface import CommandLineInterface, Application
from prompt_toolkit.layout.screen import Size
from prompt_toolkit.shortcuts import create_prompt_application
from prompt_toolkit.terminal.vt100_input import InputStream
from prompt_toolkit.terminal.vt100_output import Vt100_Output
from .log import logger
from .protocol import IAC, DO, LINEMODE, SB, MODE, SE, WILL, ECHO, NAWS, SUPPRESS_GO_AHEAD
from .protocol import TelnetProtocolParser
from .application import TelnetApplication
__all__ = (
'TelnetServer',
)
def _initialize_telnet(connection):
logger.info('Initializing telnet connection')
# Iac Do Linemode
connection.send(IAC + DO + LINEMODE)
# Suppress Go Ahead. (This seems important for Putty to do correct echoing.)
# This will allow bi-directional operation.
connection.send(IAC + WILL + SUPPRESS_GO_AHEAD)
# Iac sb
connection.send(IAC + SB + LINEMODE + MODE + int2byte(0) + IAC + SE)
# IAC Will Echo
connection.send(IAC + WILL + ECHO)
# Negotiate window size
connection.send(IAC + DO + NAWS)
class _ConnectionStdout(object):
"""
Wrapper around socket which provides `write` and `flush` methods for the
Vt100_Output output.
"""
def __init__(self, connection, encoding):
self._encoding = encoding
self._connection = connection
self._buffer = []
def write(self, data):
assert isinstance(data, text_type)
self._buffer.append(data.encode(self._encoding))
self.flush()
def flush(self):
try:
self._connection.send(b''.join(self._buffer))
except socket.error as e:
logger.error("Couldn't send data over socket: %s" % e)
self._buffer = []
class TelnetConnection(object):
"""
Class that represents one Telnet connection.
"""
def __init__(self, conn, addr, application, server, encoding):
assert isinstance(addr, tuple) # (addr, port) tuple
assert isinstance(application, TelnetApplication)
assert isinstance(server, TelnetServer)
assert isinstance(encoding, text_type) # e.g. 'utf-8'
self.conn = conn
self.addr = addr
self.application = application
self.closed = False
self.handling_command = True
self.server = server
self.encoding = encoding
self.callback = None # Function that handles the CLI result.
# Create "Output" object.
self.size = Size(rows=40, columns=79)
# Initialize.
_initialize_telnet(conn)
# Create output.
def get_size():
return self.size
self.stdout = _ConnectionStdout(conn, encoding=encoding)
self.vt100_output = Vt100_Output(self.stdout, get_size, write_binary=False)
# Create an eventloop (adaptor) for the CommandLineInterface.
self.eventloop = _TelnetEventLoopInterface(server)
# Set default CommandLineInterface.
self.set_application(create_prompt_application())
# Call client_connected
application.client_connected(self)
# Draw for the first time.
self.handling_command = False
self.cli._redraw()
def set_application(self, app, callback=None):
"""
Set ``CommandLineInterface`` instance for this connection.
(This can be replaced any time.)
:param cli: CommandLineInterface instance.
:param callback: Callable that takes the result of the CLI.
"""
assert isinstance(app, Application)
assert callback is None or callable(callback)
self.cli = CommandLineInterface(
application=app,
eventloop=self.eventloop,
output=self.vt100_output)
self.callback = callback
# Create a parser, and parser callbacks.
cb = self.cli.create_eventloop_callbacks()
inputstream = InputStream(cb.feed_key)
# Input decoder for stdin. (Required when working with multibyte
# characters, like chinese input.)
stdin_decoder_cls = getincrementaldecoder(self.encoding)
stdin_decoder = [stdin_decoder_cls()] # nonlocal
# Tell the CLI that it's running. We don't start it through the run()
# call, but will still want _redraw() to work.
self.cli._is_running = True
def data_received(data):
""" TelnetProtocolParser 'data_received' callback """
assert isinstance(data, binary_type)
try:
result = stdin_decoder[0].decode(data)
inputstream.feed(result)
except UnicodeDecodeError:
stdin_decoder[0] = stdin_decoder_cls()
return ''
def size_received(rows, columns):
""" TelnetProtocolParser 'size_received' callback """
self.size = Size(rows=rows, columns=columns)
cb.terminal_size_changed()
self.parser = TelnetProtocolParser(data_received, size_received)
def feed(self, data):
"""
Handler for incoming data. (Called by TelnetServer.)
"""
assert isinstance(data, binary_type)
self.parser.feed(data)
# Render again.
self.cli._redraw()
# When a return value has been set (enter was pressed), handle command.
if self.cli.is_returning:
try:
return_value = self.cli.return_value()
except (EOFError, KeyboardInterrupt) as e:
# Control-D or Control-C was pressed.
logger.info('%s, closing connection.', type(e).__name__)
self.close()
return
# Handle CLI command
self._handle_command(return_value)
def _handle_command(self, command):
"""
Handle command. This will run in a separate thread, in order not
to block the event loop.
"""
logger.info('Handle command %r', command)
def in_executor():
self.handling_command = True
try:
if self.callback is not None:
self.callback(self, command)
finally:
self.server.call_from_executor(done)
def done():
self.handling_command = False
# Reset state and draw again. (If the connection is still open --
# the application could have called TelnetConnection.close()
if not self.closed:
self.cli.reset()
self.cli.buffers[DEFAULT_BUFFER].reset()
self.cli.renderer.request_absolute_cursor_position()
self.vt100_output.flush()
self.cli._redraw()
self.server.run_in_executor(in_executor)
def erase_screen(self):
"""
Erase output screen.
"""
self.vt100_output.erase_screen()
self.vt100_output.cursor_goto(0, 0)
self.vt100_output.flush()
def send(self, data):
"""
Send text to the client.
"""
assert isinstance(data, text_type)
# When data is send back to the client, we should replace the line
# endings. (We didn't allocate a real pseudo terminal, and the telnet
# connection is raw, so we are responsible for inserting \r.)
self.stdout.write(data.replace('\n', '\r\n'))
self.stdout.flush()
def close(self):
"""
Close the connection.
"""
self.application.client_leaving(self)
self.conn.close()
self.closed = True
class _TelnetEventLoopInterface(EventLoop):
"""
Eventloop object to be assigned to `CommandLineInterface`.
"""
def __init__(self, server):
self._server = server
def close(self):
" Ignore. "
def stop(self):
" Ignore. "
def run_in_executor(self, callback):
self._server.run_in_executor(callback)
def call_from_executor(self, callback, _max_postpone_until=None):
self._server.call_from_executor(callback)
def add_reader(self, fd, callback):
raise NotImplementedError
def remove_reader(self, fd):
raise NotImplementedError
class TelnetServer(object):
"""
Telnet server implementation.
"""
def __init__(self, host='127.0.0.1', port=23, application=None, encoding='utf-8'):
assert isinstance(host, text_type)
assert isinstance(port, int)
assert isinstance(application, TelnetApplication)
assert isinstance(encoding, text_type)
self.host = host
self.port = port
self.application = application
self.encoding = encoding
self.connections = set()
self._calls_from_executor = []
# Create a pipe for inter thread communication.
self._schedule_pipe = os.pipe()
fcntl.fcntl(self._schedule_pipe[0], fcntl.F_SETFL, os.O_NONBLOCK)
@classmethod
def create_socket(cls, host, port):
# Create and bind socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
s.listen(4)
return s
def run_in_executor(self, callback):
threading.Thread(target=callback).start()
def call_from_executor(self, callback):
self._calls_from_executor.append(callback)
if self._schedule_pipe:
os.write(self._schedule_pipe[1], b'x')
def _process_callbacks(self):
"""
Process callbacks from `call_from_executor` in eventloop.
"""
# Flush all the pipe content.
os.read(self._schedule_pipe[0], 1024)
# Process calls from executor.
calls_from_executor, self._calls_from_executor = self._calls_from_executor, []
for c in calls_from_executor:
c()
def run(self):
"""
Run the eventloop for the telnet server.
"""
listen_socket = self.create_socket(self.host, self.port)
logger.info('Listening for telnet connections on %s port %r', self.host, self.port)
try:
while True:
# Removed closed connections.
self.connections = set([c for c in self.connections if not c.closed])
# Ignore connections handling commands.
connections = set([c for c in self.connections if not c.handling_command])
# Wait for next event.
read_list = (
[listen_socket, self._schedule_pipe[0]] +
[c.conn for c in connections])
read, _, _ = select.select(read_list, [], [])
for s in read:
# When the socket itself is ready, accept a new connection.
if s == listen_socket:
self._accept(listen_socket)
# If we receive something on our "call_from_executor" pipe, process
# these callbacks in a thread safe way.
elif s == self._schedule_pipe[0]:
self._process_callbacks()
# Handle incoming data on socket.
else:
self._handle_incoming_data(s)
finally:
listen_socket.close()
def _accept(self, listen_socket):
"""
Accept new incoming connection.
"""
conn, addr = listen_socket.accept()
connection = TelnetConnection(conn, addr, self.application, self, encoding=self.encoding)
self.connections.add(connection)
logger.info('New connection %r %r', *addr)
def _handle_incoming_data(self, conn):
"""
Handle incoming data on socket.
"""
connection = [c for c in self.connections if c.conn == conn][0]
data = conn.recv(1024)
if data:
connection.feed(data)
else:
self.connections.remove(connection)
| 32.598039 | 98 | 0.60015 | [
"BSD-3-Clause"
] | sainjusajan/django-oscar | oscar/lib/python2.7/site-packages/prompt_toolkit/contrib/telnet/server.py | 13,300 | Python |
# model settings
model = dict(
type='CenterNet',
pretrained='modelzoo://resnet18',
backbone=dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_eval=False,
add_summay_every_n_step=200,
style='pytorch'),
neck=dict(type='None'),
bbox_head=dict(
type='CXTHead',
inplanes=(64, 128, 256, 512),
head_conv=128,
wh_conv=64,
use_deconv=False,
norm_after_upsample=False,
hm_head_conv_num=2,
wh_head_conv_num=1,
ct_head_conv_num=1,
fovea_hm=False,
num_classes=81,
use_exp_wh=False,
wh_offset_base=16,
shortcut_cfg=(1, 2, 3),
shortcut_attention=(False, False, False),
norm_cfg=dict(type='BN'),
norm_wh=False,
avg_wh_weightv3=True,
hm_init_value=None,
giou_weight=5.,
merge_weight=1.,
hm_weight=1.,
ct_weight=1.))
cudnn_benchmark = True
# training and testing settings
train_cfg = dict(
vis_every_n_iters=100,
debug=False)
test_cfg = dict(
score_thr=0.05,
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=16,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.003, momentum=0.9, weight_decay=0.0004,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 5,
step=[18, 22])
checkpoint_config = dict(save_every_n_steps=200, max_to_keep=1, keep_every_n_epochs=18)
bbox_head_hist_config = dict(
model_type=['ConvModule', 'DeformConvPack'],
sub_modules=['bbox_head'],
save_every_n_steps=200)
# yapf:disable
log_config = dict(interval=20)
# yapf:enable
# runtime settings
total_epochs = 24
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = 'paper_cxt18_Ro16_3lr_wd4e4_hm2wh1_s123_nos_2x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 30.126866 | 87 | 0.628685 | [
"Apache-2.0"
] | mrsempress/mmdetection | configs/centernext/paper_cxt18_Ro16_3lr_wd4e4_hm2wh1_s123_nos_2x.py | 4,037 | Python |
#!/usr/bin/env python
"""
This now uses the imshow command instead of pcolor which *is much
faster*
"""
from __future__ import division, print_function
import numpy as np
from matplotlib.pyplot import *
from matplotlib.collections import LineCollection
import matplotlib.cbook as cbook
# I use if 1 to break up the different regions of code visually
if 1: # load the data
# data are 256x256 16 bit integers
dfile = cbook.get_sample_data('s1045.ima.gz')
im = np.fromstring(dfile.read(), np.uint16).astype(float)
im.shape = 256, 256
if 1: # plot the MRI in pcolor
subplot(221)
imshow(im, cmap=cm.gray)
axis('off')
if 1: # plot the histogram of MRI intensity
subplot(222)
im = np.ravel(im)
im = im[np.nonzero(im)] # ignore the background
im = im/(2.0**15) # normalize
hist(im, 100)
xticks([-1, -.5, 0, .5, 1])
yticks([])
xlabel('intensity')
ylabel('MRI density')
if 1: # plot the EEG
# load the data
numSamples, numRows = 800,4
eegfile = cbook.get_sample_data('eeg.dat', asfileobj=False)
print('loading eeg %s' % eegfile)
data = np.fromstring(open(eegfile, 'rb').read(), float)
data.shape = numSamples, numRows
t = 10.0 * np.arange(numSamples, dtype=float)/numSamples
ticklocs = []
ax = subplot(212)
xlim(0,10)
xticks(np.arange(10))
dmin = data.min()
dmax = data.max()
dr = (dmax - dmin)*0.7 # Crowd them a bit.
y0 = dmin
y1 = (numRows-1) * dr + dmax
ylim(y0, y1)
segs = []
for i in range(numRows):
segs.append(np.hstack((t[:,np.newaxis], data[:,i,np.newaxis])))
ticklocs.append(i*dr)
offsets = np.zeros((numRows,2), dtype=float)
offsets[:,1] = ticklocs
lines = LineCollection(segs, offsets=offsets,
transOffset=None,
)
ax.add_collection(lines)
# set the yticks to use axes coords on the y axis
ax.set_yticks(ticklocs)
ax.set_yticklabels(['PG3', 'PG5', 'PG7', 'PG9'])
xlabel('time (s)')
show()
| 26.037975 | 71 | 0.618376 | [
"MIT",
"BSD-3-Clause"
] | epgauss/matplotlib | examples/pylab_examples/mri_with_eeg.py | 2,057 | Python |
import pandas as pd
kraken_rank_dictionary = {
'P': 'phylum',
'C': 'class',
'O': 'order',
'F': 'family',
'G': 'genus',
'S': 'species'
}
greengenes_rank_dict = {
'k__': 'kingdom',
'p__': 'phylum',
'c__': 'class',
'o__': 'order',
'f__': 'family',
'g__': 'genus',
's__': 'species'
}
kraken_columns = ['PERCENTAGE', 'lca_read_count', 'read_count', 'rank',
'@@TAXID', 'TAXNAME']
def kraken2_transformer(all_rank_summary, output_rank_summaries, ranks):
# TODO finsih docs
"""Converts a summary of all ranks from kraken into rank-wise profiles
similar to the CAMI-SIM output
Parameters
----------
all_rank_summary
output_rank_summaries
ranks
Returns
-------
"""
# TODO COULD be split into two format functions: one to reformat,
# and one to split on rank
# TODO give error for invalid rank value
all_ranks = pd.read_csv(all_rank_summary, sep='\t')
all_ranks.columns = kraken_columns
# TODO for kraken is it okay to just take the first part (drop the number)
all_ranks['rank'] = all_ranks['rank'].str[0]
all_ranks = all_ranks.loc[all_ranks['rank'].isin(kraken_rank_dictionary)]
all_ranks['RANK'] = [kraken_rank_dictionary[key] for key in
all_ranks['rank']]
keep_cols = ['@@TAXID', 'RANK', 'TAXNAME', 'PERCENTAGE']
for output_, rank in zip(output_rank_summaries, ranks):
sub_df = all_ranks.loc[all_ranks['RANK'] == rank]
sub_df_matching = sub_df[keep_cols]
sub_df_matching.to_csv(output_, sep='\t', index=False)
def metaphlan2_transformer(all_rank_summary, output_rank_summaries, ranks):
all_ranks = pd.read_csv(all_rank_summary, sep='\t', skiprows=3)
def last_entry(x): return x.split('|')[-1]
all_ranks['last_clade'] = all_ranks['#clade_name'].map(last_entry)
all_ranks['@@TAXID'] = all_ranks['NCBI_tax_id'].map(last_entry)
all_ranks['RANK'] = all_ranks['last_clade'].map(
lambda x: greengenes_rank_dict[x[:3]])
all_ranks['TAXNAME'] = all_ranks['last_clade'].map(lambda x: x[3:])
all_ranks['PERCENTAGE'] = all_ranks['relative_abundance']
keep_cols = ['@@TAXID', 'RANK', 'TAXNAME', 'PERCENTAGE']
for output_, rank in zip(output_rank_summaries, ranks):
sub_df = all_ranks.loc[all_ranks['RANK'] == rank]
sub_df_matching = sub_df[keep_cols]
sub_df_matching.to_csv(output_, sep='\t', index=False)
| 33.863014 | 78 | 0.642799 | [
"MIT"
] | qiyunzhu/taxa-assign-benchmarking | benchutils/transformers.py | 2,472 | Python |
from .randt import RandomizationTools
def setup(bot):
bot.add_cog(RandomizationTools(bot))
| 16.166667 | 40 | 0.773196 | [
"MIT"
] | Simonx22/FalcomBot-cogs | randt/__init__.py | 97 | Python |
"""empty message
Revision ID: f6d196dc5629
Revises: fd5076041bff
Create Date: 2019-04-06 22:25:32.133764
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f6d196dc5629'
down_revision = 'fd5076041bff'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('admin', sa.Boolean(), nullable=True))
op.execute('UPDATE users SET admin=False')
op.alter_column('users', 'admin', nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'admin')
# ### end Alembic commands ###
| 24.16129 | 75 | 0.688919 | [
"MIT"
] | YA-androidapp/vuejs-flask-docker | services/backend/migrations/versions/f6d196dc5629_.py | 749 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
TODO:
* needs to check if required modules are installed (or prefereably developed)
* needs to be able to ignore plugins that the user doesnt care about
Super Setup
PREREQ:
git config --global push.default current
export CODE_DIR=~/code
mkdir $CODE_DIR
cd $CODE_DIR
git clone https://github.com/WildbookOrg/ibeis.git
cd ibeis
python super_setup.py --bootstrap
OR (if in virtual environment)
python super_setup.py --bootstrap --nosudo
OR
./_scripts/bootstrap.py
THEN
./_scripts/__install_prereqs__.sh
THEN
./super_setup.py --build --develop
./super_setup.py --build --develop
./super_setup.py --status
# If on current branch copy so super setup isn't overwriten as we go
python -c "import utool as ut; ut.copy('super_setup.py', '_ibeis_setup.py')"
# Status
python _ibeis_setup.py -y --gg "git status"
python _ibeis_setup.py -y --gg "git branch"
# Setup Next
#python _ibeis_setup.py -y --gg "git pull"
#python _ibeis_setup.py -y --gg "git checkout master"
#python _ibeis_setup.py -y --gg "git pull"
#python _ibeis_setup.py -y --gg "git checkout -b next"
#python _ibeis_setup.py -y --gg "git checkout next"
#python _ibeis_setup.py -y --gg "git push -u origin next"
#python _ibeis_setup.py -y --gg "git push remote origin/next"
####python _ibeis_setup.py -y --gg "git merge master"
#python _ibeis_setup.py -y --gg "git checkout ^HEAD"
#python _ibeis_setup.py -y --gg "git checkout master"
#python _ibeis_setup.py -y --gg "git checkout next"
# -- MERGE topic -> next
##python _ibeis_setup.py -y --gg "git checkout topic"
##python _ibeis_setup.py -y --gg "git checkout next"
##python _ibeis_setup.py -y --gg "git merge topic"
# -- MERGE next -> master
python _ibeis_setup.py -y --gg "git checkout master"
python _ibeis_setup.py -y --gg "git merge next"
# -- SAFER MERGE topic -> next
python super_setup.py --checkout next
python super_setup.py --newlocalbranch merge_next_joncrall_dev_branch
python super_setup.py --merge joncrall_dev_branch
./run_tests.py
python super_setup.py --checkout next
python super_setup.py --merge merge_next_joncrall_dev_branch
# Push
python _ibeis_setup.py -y --gg "git push"
#python _ibeis_setup.py -y --gg "git checkout master"
#python _ibeis_setup.py -y --gg "git checkout next"
# MAKE A NEW BRANCH
python super_setup.py --newbranch joncrall_dev_branch
python super_setup.py --checkout joncrall_dev_branch
python super_setup.py --checkout next
python super_setup.py --newbranch jdb
python super_setup.py --checkout jdb
GitReferences:
http://git-scm.com/book/en/v2/Git-Branching-Basic-Branching-and-Merging
FIXME:
graph-viz
pydot
ibeis_cnn
Theano
Lasange
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from os.path import dirname, realpath
import platform
import sys
import os
#-----------------
# SYSTEM ENTRY POINT, NO UTOOL, BARE PYTHON
#-----------------
USAGE = ('''
--- USAGE ---
IBEIS (IMAGE ANALYSIS) SUPER SETUP
This script is meant to help setup, install, and update the developer
enviroment for IBEIS Image Analysis.
****
# Step 1 Initial Development Prereqs:
The first thing is to ensure you have a baseline development enviroment (gcc,
fortran, cmake, blas, git, pip, etc...). This should work well for apt-get,
yum, and macports package managers. It is possible to use Windows MinGW, but
it is not well supported.
The following command outputs the commands to install these prereq packages.
python super_setup.py --bootstrap
****
# Step 2 - utool
Just running the script will download and install utool --- a utility library
used in all aspects of the system.
python super_setup.py
****
# Step 3 - Download / Update Image Analysis Packages
Running the script again once utool is installed will ensure the rest of the
repositories are cloned and on your machine in the directory above this one, or
in a custom location set by your $CODE_DIR environment variable. Running with
the pull command will update the packages as well.
python super_setup.py pull
Note: if you have wildme credientials you can run this to setup git
python super_setup.py pull --move-wildme-ssh
****
# Step 3.5 - Grab and Build Extern libraries with scripts
python super_setup.py --opencv
python super_setup.py --hesaff
python super_setup.py --flann
python super_setup.py --dcnn
python super_setup.py --pydarknet
python super_setup.py --pyqt
python super_setup.py --pyrf
****
# Step 4 - Build C++ components.
Some submodles require C++ libraries. Build them using the following Command.
python super_setup.py build
****
# Step 5 - Install the system.
Register these packages with the python enviroment.
# Install external modules
python super_setup.py --develop
# Install the ibeis module
pip install -e .
--- /USAGE ---
''')
def define_argparse():
""" todo, find a way to use this effectively """
import argparse
parser = argparse.ArgumentParser(description='IBEIS super setup')
# parser.add_argument('command', help='command to run')
def add_flag(group, name, help=None):
group.add_argument(name.replace('--', ''), action='store_true',
default=False, help=help)
# subparsers = parser.add_subparsers()
# subparsers.add_parser('pull', help='pulls IBEIS repos')
# subparsers.add_parser('ensure', help='ensures checkouts of IBEIS repos')
# sub = subparsers.add_parser('move-wildme', help='changes to the wildme repos')
# sub.add_argument('--fmt', dest='fmt', action='store',
# choices=['ssh', 'https'], help='url type')
# # Setup options for parser_a
# # Add nargs="*" for zero or more other commands
# parser.add_argument('extra', nargs = "*", help = 'Other commands')
# parser.add_argument('command', action='store_true', default=False,
# help='outputs commands to install prereqs')
g1 = parser.add_argument_group('setup')
add_flag(g1, 'bootstrap', help='outputs commands to install prereqs')
add_flag(g1, 'ensure', help='ensures that all repos are checked out')
add_flag(g1, 'build', help='builds python packages')
add_flag(g1, 'develop', help='installs packages in developer mode')
add_flag(g1, 'dcnn', help='setup dcnn packages')
g4 = parser.add_argument_group('maintenance')
add_flag(g4, 'pull', help='pulls all IBIES repos')
g3 = parser.add_argument_group('extern')
add_flag(g3, 'no_qt')
add_flag(g3, 'no_gui')
add_flag(g3, 'ignore_opencv')
g2 = parser.add_argument_group('utils')
add_flag(g2, 'move_wildme',
help='changes to the wildme repos')
args = parser.parse_args()
return args
# args = define_argparse()
# print('args = %r' % (args,))
# sys.exit(1)
def get_plat_specifier():
"""
Standard platform specifier used by distutils
"""
import setuptools # NOQA
import distutils
plat_name = distutils.util.get_platform()
plat_specifier = ".%s-%s" % (plat_name, sys.version[0:3])
if hasattr(sys, 'gettotalrefcount'):
plat_specifier += '-pydebug'
return plat_specifier
def import_module_from_fpath(module_fpath):
""" imports module from a file path """
import platform
from os.path import basename, splitext
python_version = platform.python_version()
modname = splitext(basename(module_fpath))[0]
if python_version.startswith('2.7'):
import imp
module = imp.load_source(modname, module_fpath)
elif python_version.startswith('3'):
import importlib.machinery
loader = importlib.machinery.SourceFileLoader(modname, module_fpath)
module = loader.load_module()
else:
raise AssertionError('invalid python version')
return module
def bootstrap(WIN32):
if WIN32:
# need to preinstall parse
win32bootstrap_fpath = os.path.abspath('_scripts/win32bootstrap.py')
win32bootstrap = import_module_from_fpath(win32bootstrap_fpath)
win32bootstrap.bootstrap_sysreq()
else:
#import bootstrap
bootstrap_fpath = os.path.abspath('_scripts/bootstrap.py')
bootstrap = import_module_from_fpath(bootstrap_fpath)
#sys.path.append(os.path.abspath('_scripts'))
bootstrap.bootstrap_sysreq()
sys.exit(0)
#################
# ENSURING UTOOL
#################
def syscmd(cmdstr):
print('RUN> ' + cmdstr)
os.system(cmdstr)
def in_virtual_env():
print('sys.real_prefix=%r' % (getattr(sys, 'real_prefix', None),))
print('sys.base_prefix=%r' % (getattr(sys, 'base_prefix', None),))
print('sys.prefix=%r' % (getattr(sys, 'prefix', None),))
in_venv = False
if hasattr(sys, 'real_prefix'):
# For virtualenv module
in_venv = True
elif hasattr(sys, 'base_prefix'):
# For venv module
in_venv = sys.base_prefix != sys.prefix
return in_venv
def ensure_utool(CODE_DIR, pythoncmd):
WIN32 = sys.platform.startswith('win32')
#UTOOL_BRANCH = ' -b <branch> <remote_repo>'
UTOOL_BRANCH = 'next'
UTOOL_REPO = 'https://github.com/WildbookOrg/utool.git'
print('WARNING: utool is not found')
print('Attempting to get utool. Enter (y) to continue')
if '-y' in sys.argv:
ans = 'y'
else:
try:
ans = input('Enter y to continue. Anything else to exit...\n')
except:
ans = raw_input('Enter y to continue. Anything else to exit...\n') # NOQA
if ans != 'y':
print('Please install utool to continue')
sys.exit(0)
cwdpath = os.path.realpath(os.getcwd())
usr_code_dir = os.path.expanduser(CODE_DIR)
os.chdir(usr_code_dir)
print("user code dir = %r" % usr_code_dir)
print('cloning utool')
if not os.path.exists('utool'):
syscmd('git clone ' + UTOOL_REPO + ' -b ' + UTOOL_BRANCH)
os.chdir('utool')
print('pulling utool')
syscmd('git pull')
print('installing utool for development')
cmdstr = '{pythoncmd} -m pip install -e .'.format(pythoncmd=pythoncmd)
# TODO: use pip instead
# cmdstr = '{pythoncmd} -m pip install .'.format(pythoncmd=pythoncmd)
if not WIN32 and not in_virtual_env():
cmdstr = 'sudo ' + cmdstr
syscmd(cmdstr)
os.chdir(cwdpath)
# sys.path.append(usr_code_dir)
print('Please rerun super_setup.py')
print(' '.join(sys.argv))
sys.exit(1)
#-----------------
# UTOOL PYTHON
#-----------------
def initialize_repo_managers(CODE_DIR, pythoncmd, PY2, PY3):
import utool as ut
WITH_CNN = True
#WITH_TPL = True
WITH_QT = not ut.get_argflag('--no-qt')
WITH_GUI = not ut.get_argflag('--no-gui')
WITH_CUSTOM_TPL = True
WITH_PLUGINS = True
#-----------
# IBEIS project repos
#-----------
# if True:
# jon_repo_base = 'https://github.com/WildbookOrg'
# jason_repo_base = 'https://github.com/WildbookOrg'
# else:
# jon_repo_base = 'https://github.com/wildme'
# jason_repo_base = 'https://github.com/wildme'
ibeis_rman = ut.RepoManager([
'https://github.com/WildbookOrg/utool.git',
# 'https://github.com/WildbookOrg/sandbox_utools.git',
'https://github.com/WildbookOrg/vtool.git',
'https://github.com/WildbookOrg/dtool.git',
'https://github.com/Erotemic/ubelt.git',
'https://github.com/WildbookOrg/detecttools.git',
], CODE_DIR, label='core', pythoncmd=pythoncmd)
tpl_rman = ut.RepoManager([], CODE_DIR, label='tpl', pythoncmd=pythoncmd)
if not GET_ARGFLAG('--ignore-opencv'):
cv_repo = ut.Repo('https://github.com/Itseez/opencv.git', CODE_DIR, modname='cv2')
tpl_rman.add_repo(cv_repo)
if WITH_GUI:
ibeis_rman.add_repos([
'https://github.com/WildbookOrg/plottool.git',
])
if WITH_QT:
ibeis_rman.add_repos([
'https://github.com/WildbookOrg/guitool.git',
])
tpl_rman.add_repo(ut.Repo(modname=('PyQt4', 'PyQt5', 'PyQt')))
if WITH_CUSTOM_TPL:
flann_repo = ut.Repo('https://github.com/WildbookOrg/flann.git', CODE_DIR, modname='pyflann')
ibeis_rman.add_repo(flann_repo)
ibeis_rman.add_repos([
'https://github.com/WildbookOrg/hesaff.git',
])
if WITH_CNN:
ibeis_rman.add_repos([
'https://github.com/WildbookOrg/ibeis_cnn.git',
'https://github.com/WildbookOrg/pydarknet.git',
'https://gitlab.com/bluemellophone/lightnet.git',
'https://gitlab.com/bluemellophone/brambox.git',
])
# NEW CNN Dependencies
tpl_rman.add_repos([
'https://github.com/pytorch/pytorch.git',
])
# if GET_ARGFLAG('--libgpuarray'):
tpl_rman.add_repos([
'https://github.com/Theano/libgpuarray.git',
])
# CNN Dependencies
tpl_rman.add_repos([
'https://github.com/Theano/Theano.git',
# 'https://github.com/lisa-lab/pylearn2.git',
'https://github.com/Lasagne/Lasagne.git',
])
if WITH_PLUGINS:
ibeis_rman.add_repos([
'https://github.com/WildbookOrg/ibeis-flukematch-module.git',
'https://github.com/WildbookOrg/ibeis-curvrank-module.git',
'https://github.com/WildbookOrg/ibeis-deepsense-module.git',
'https://github.com/WildbookOrg/ibeis-finfindr-module.git',
'https://github.com/WildbookOrg/ibeis-kaggle7-module.git',
'https://github.com/WildbookOrg/pyrf.git',
])
if False:
# Depricated
ibeis_rman.add_repos([
#'https://github.com/WildbookOrg/pybing.git',
#'https://github.com/aweinstock314/cyth.git',
#'https://github.com/hjweide/pygist',
])
# Add main repo (Must be checked last due to dependency issues)
ibeis_rman.add_repos([
'https://github.com/WildbookOrg/ibeis.git',
])
#-----------
# Custom third party build/install scripts
#-----------
define_custom_scripts(tpl_rman, ibeis_rman, PY2, PY3)
return tpl_rman, ibeis_rman
def define_custom_scripts(tpl_rman, ibeis_rman, PY2, PY3):
"""
export THEANO_FLAGS="device=cpu,print_active_device=True,enable_initial_driver_test=True"
set THEANO_FLAGS=device=cpu,print_active_device=True,enable_initial_driver_test=True,print_test_value=True
python -c "import pydot; print(pydot.__file__)"
python -c "import pydot; print(pydot.__version__)"
python -c "import pydot; print(pydot.find_graphviz())"
DEVICE="cuda" python -c "import pygpu;pygpu.test()"
python -c "import theano; print(theano.__file__)"
# python -c "import pylearn2; print(pylearn2.__file__)"
python -c "import lasagne; print(lasagne.__file__)"
python -c "import ibeis_cnn; print(ibeis_cnn.__file__)"
python -c "import detecttools; print(detecttools.__file__)"
# http://stackoverflow.com/questions/18042919/how-to-install-pyqt5-on-a-new-virtualenv-and-work-on-an-idle
pip install vext.pyqt5
sudo apt-get install pyqt5-dev
sudo apt-get install python3-pyqt5
python
python -c "import sip; print('[test] Python can import sip')"
python -c "import sip; print('sip.__file__=%r' % (sip.__file__,))"
python -c "import sip; print('sip.SIP_VERSION=%r' % (sip.SIP_VERSION,))"
python -c "import sip; print('sip.SIP_VERSION_STR=%r' % (sip.SIP_VERSION_STR,))"
ln -s /usr/lib/python3/dist-packages/PyQt5/ /home/joncrall/venv3/lib/python3.4/site-packages/PyQt5
ln -s /usr/lib/python3/dist-packages/sip*.so /home/joncrall/venv3/lib/python3.4/site-packages/
ln -s /usr/lib/python3/dist-packages/sip*.py /home/joncrall/venv3/lib/python3.4/site-packages/
"""
import utool as ut
major = str(sys.version_info.major)
minor = str(sys.version_info.minor)
majorminor = [major, minor]
pyoff = '2' if sys.version_info.major == 3 else '3'
pyon = majorminor[0]
plat_spec = get_plat_specifier()
# build_dname = 'build' + ''.join(majorminor)
build_dname = 'cmake_builds/build' + plat_spec
script_fmtdict = {
'pyexe' : sys.executable,
'pyversion' : 'python' + '.'.join(majorminor),
'pypkg_var' : 'PYTHON' + pyon + '_PACKAGES_PATH',
'build_dname' : build_dname,
'pyoff' : pyoff,
'pyon' : pyon,
'cv_pyon_var' : 'BUILD_opencv_python' + pyon,
'cv_pyoff_var' : 'BUILD_opencv_python' + pyoff,
'plat_spec' : plat_spec,
'source_dpath' : '../..',
'libext' : ut.get_lib_ext(),
}
if os.environ.get('VIRTUAL_ENV', '') == '':
if sys.platform.startswith('darwin'):
local_prefix = '/opt/local'
else:
local_prefix = '/usr/local'
else:
local_prefix = os.environ['VIRTUAL_ENV']
opencv_dir = os.path.join(local_prefix, '/share/OpenCV')
if not os.path.exists(opencv_dir):
if not ut.get_argflag('--opencv'):
opencv_dir = ''
print('OpenCV is not installed in the expected location: {}'.format(opencv_dir))
print('Running this script with --opencv will build and install it there')
# define bash variables for different combinations of python distros and
# virtual environments
python_bash_setup = ut.codeblock(
r'''
# STARTBLOCK bash
if [[ "$VIRTUAL_ENV" == "" ]]; then
# The case where we are installying system-wide
# It is recommended that a virtual enviornment is used instead
export PYTHON_EXECUTABLE=$(which {pyversion})
if [[ '$OSTYPE' == 'darwin'* ]]; then
# Mac system info
export LOCAL_PREFIX=/opt/local
export {pypkg_var}=$($PYTHON_EXECUTABLE -c "import site; print(site.getsitepackages()[0])")
export PYTHON_PACKAGES_PATH=${pypkg_var}
export _SUDO="sudo"
else
# Linux system info
export LOCAL_PREFIX=/usr/local
export {pypkg_var}=$LOCAL_PREFIX/lib/{pyversion}/dist-packages
export PYTHON_PACKAGES_PATH=${pypkg_var}
export _SUDO="sudo"
fi
# No windows support here
else
# The prefered case where we are in a virtual environment
export PYTHON_EXECUTABLE=$(which python)
# export LOCAL_PREFIX=$VIRTUAL_ENV/local
export LOCAL_PREFIX=$VIRTUAL_ENV
export {pypkg_var}=$LOCAL_PREFIX/lib/{pyversion}/site-packages
export PYTHON_PACKAGES_PATH=${pypkg_var}
export _SUDO=""
fi
echo "LOCAL_PREFIX = $LOCAL_PREFIX"
echo "{pypkg_var} = ${pypkg_var}"
# ENDBLOCK bash
'''
).format(**script_fmtdict)
script_fmtdict['python_bash_setup'] = python_bash_setup
#===================
# PYFLANN SETUP SCRIPTS
#===================
ibeis_rman['pyflann'].add_script('build', ut.codeblock(
r'''
# STARTBLOCK bash
{python_bash_setup}
cd {repo_dir}
mkdir -p {build_dname}
cd {build_dname}
cmake -G "Unix Makefiles" \
-DCMAKE_BUILD_TYPE="Release" \
-DPYTHON_EXECUTABLE=$PYTHON_EXECUTABLE \
-DBUILD_EXAMPLES=Off \
-DBUILD_TESTS=Off \
-DBUILD_PYTHON_BINDINGS=On \
-DBUILD_MATLAB_BINDINGS=Off \
-DBUILD_CUDA_LIB=Off\
-DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX\
{source_dpath}
export NCPUS=$(grep -c ^processor /proc/cpuinfo)
make -j$NCPUS
# ENDBLOCK bash
''').format(repo_dir=ibeis_rman['pyflann'].dpath, **script_fmtdict)
)
ibeis_rman['pyflann'].add_script('install', ut.codeblock(
r'''
# STARTBLOCK bash
# The pyflann source lives here
cd {repo_dir}/src/python
# Need to run build to move the libs to the build directory
python setup.py build
# Use pip to editable install
pip install -e {repo_dir}/src/python
# Old way of doing it
# But the setup script is generated during build
# python {repo_dir}/build/src/python/setup.py develop
python -c "import pyflann; print(pyflann.__file__)" --verb-flann
python -c "import pyflann; print(pyflann)" --verb-flann
# ENDBLOCK bash
''').format(repo_dir=ibeis_rman['pyflann'].dpath)
)
#===================
# HESAFF
#===================
ibeis_rman['hesaff'].add_script('build', ut.codeblock(
r'''
# STARTBLOCK bash
{python_bash_setup}
cd $CODE_DIR/hesaff
mkdir -p {build_dname}
cd {build_dname}
# only specify an explicit opencv directory if we know one exists
if [ -d "$LOCAL_PREFIX/share/OpenCV" ]; then
OPENCV_ARGS="-DOpenCV_DIR=$LOCAL_PREFIX/share/OpenCV"
else
OPENCV_ARGS=""
fi
echo 'Configuring with cmake'
if [[ '$OSTYPE' == 'darwin'* ]]; then
cmake -G "Unix Makefiles" \
-DCMAKE_OSX_ARCHITECTURES=x86_64 \
-DCMAKE_C_COMPILER=clang2 \
-DCMAKE_CXX_COMPILER=clang2++ \
-DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX \
$OPENCV_ARGS \
{source_dpath}
else
cmake -G "Unix Makefiles" \
-DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX \
$OPENCV_ARGS \
{source_dpath}
fi
export NCPUS=$(grep -c ^processor /proc/cpuinfo)
make -j$NCPUS
export MAKE_EXITCODE=$?
echo "MAKE_EXITCODE=$MAKE_EXITCODE"
# Move the compiled library into the source folder
if [[ $MAKE_EXITCODE == 0 ]]; then
#make VERBOSE=1
cp -v libhesaff{libext} {source_dpath}/pyhesaff/libhesaff{plat_spec}{libext}
fi
# ENDBLOCK
''').format(**script_fmtdict))
#===================
# PYDARKNET
#===================
ibeis_rman['pydarknet'].add_script('build', ut.codeblock(
r'''
# STARTBLOCK bash
{python_bash_setup}
cd $CODE_DIR/pydarknet
mkdir -p {build_dname}
cd {build_dname}
if [[ "$(which nvcc)" == "" ]]; then
export CMAKE_CUDA=Off
else
export CMAKE_CUDA=On
fi
# only specify an explicit opencv directory if we know one exists
if [ -d "$LOCAL_PREFIX/share/OpenCV" ]; then
OPENCV_ARGS="-DOpenCV_DIR=$LOCAL_PREFIX/share/OpenCV"
else
OPENCV_ARGS=""
fi
echo 'Configuring with cmake'
if [[ '$OSTYPE' == 'darwin'* ]]; then
export CONFIG="-DCMAKE_OSX_ARCHITECTURES=x86_64 -DCMAKE_C_COMPILER=clang2 -DCMAKE_CXX_COMPILER=clang2++ -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX $OPENCV_ARGS"
else
export CONFIG="-DCMAKE_BUILD_TYPE='Release' -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX $OPENCV_ARGS"
fi
export CONFIG="$CONFIG -DCUDA=$CMAKE_CUDA"
echo "CONFIG = $CONFIG"
cmake $CONFIG -G 'Unix Makefiles' {source_dpath}
#################################
echo 'Building with make'
export NCPUS=$(grep -c ^processor /proc/cpuinfo)
make -j$NCPUS -w
#################################
export MAKE_EXITCODE=$?
echo "MAKE_EXITCODE=$MAKE_EXITCODE"
# Move the compiled library into the source folder
if [[ $MAKE_EXITCODE == 0 ]]; then
echo 'Moving the shared library'
# cp -v lib* ../pydarknet
cp -v lib*{libext} {source_dpath}/pydarknet
# cp -v libdarknet{libext} {source_dpath}/pydarknet/libdarknet{plat_spec}{libext}
fi
# ENDBLOCK
''').format(**script_fmtdict))
#===================
# PYRF
#===================
ibeis_rman['pyrf'].add_script('build', ut.codeblock(
r'''
# STARTBLOCK bash
{python_bash_setup}
cd $CODE_DIR/pyrf
mkdir -p {build_dname}
cd {build_dname}
# only specify an explicit opencv directory if we know one exists
if [ -d "$LOCAL_PREFIX/share/OpenCV" ]; then
OPENCV_ARGS="-DOpenCV_DIR=$LOCAL_PREFIX/share/OpenCV"
else
OPENCV_ARGS=""
fi
echo 'Configuring with cmake'
if [[ '$OSTYPE' == 'darwin'* ]]; then
export CONFIG="-DCMAKE_OSX_ARCHITECTURES=x86_64 -DCMAKE_C_COMPILER=clang2 -DCMAKE_CXX_COMPILER=clang2++ -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX $OPENCV_ARGS"
else
export CONFIG="-DCMAKE_BUILD_TYPE='Release' -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX $OPENCV_ARGS"
fi
echo "CONFIG = $CONFIG"
cmake $CONFIG -G 'Unix Makefiles' {source_dpath}
#################################
echo 'Building with make'
export NCPUS=$(grep -c ^processor /proc/cpuinfo)
make -j$NCPUS -w
#################################
export MAKE_EXITCODE=$?
echo "MAKE_EXITCODE=$MAKE_EXITCODE"
# Move the compiled library into the source folder
if [[ $MAKE_EXITCODE == 0 ]]; then
echo 'Moving the shared library'
# cp -v lib* ../pyrf
cp -v lib*{libext} {source_dpath}/pyrf
# cp -v libpyrf{libext} {source_dpath}/pyrf/libpyrf{plat_spec}{libext}
fi
# ENDBLOCK
''').format(**script_fmtdict))
#===================
# OPENCV SETUP SCRIPTS
#===================
"""
./super_setup.py --dump-scripts
"""
tpl_rman['cv2'].add_script('build', ut.codeblock(
r'''
# STARTBLOCK bash
{python_bash_setup}
# Checkout opencv core
cd $CODE_DIR
# export REPO_DIR=$CODE_DIR/opencv
export REPO_DIR={repo_dpath}
# git clone https://github.com/Itseez/opencv.git
cd $REPO_DIR
# Checkout opencv extras
git clone https://github.com/Itseez/opencv_contrib.git
# cd opencv_contrib
# git pull
# cd ..
# git pull
mkdir -p $REPO_DIR/{build_dname}
cd $REPO_DIR/{build_dname}
cmake -G "Unix Makefiles" \
-D WITH_OPENMP=ON \
-D CMAKE_BUILD_TYPE=RELEASE \
-D {cv_pyoff_var}=Off \
-D {cv_pyon_var}=On \
-D PYTHON_DEFAULT_EXECUTABLE="{pyexe}" \
-D {pypkg_var}=${pypkg_var} \
-D CMAKE_INSTALL_PREFIX=$LOCAL_PREFIX \
-D OPENCV_EXTRA_MODULES_PATH=$REPO_DIR/opencv_contrib/modules \
-D WITH_CUDA=Off \
-D BUILD_opencv_dnn=Off \
-D BUILD_opencv_dnn_modern=Off \
-D WITH_VTK=Off \
-D WITH_CUDA=Off \
-D WITH_MATLAB=Off \
$REPO_DIR
# -D WITH_OPENCL=Off \
# -D BUILD_opencv_face=Off \
# -D BUILD_opencv_objdetect=Off \
# -D BUILD_opencv_video=Off \
# -D BUILD_opencv_videoio=Off \
# -D BUILD_opencv_videostab=Off \
# -D BUILD_opencv_ximgproc=Off \
# -D BUILD_opencv_xobjdetect=Off \
# -D BUILD_opencv_xphoto=Off \
# -D BUILD_opencv_datasets=Off \
# -D CXX_FLAGS="-std=c++11" \ %TODO
export NCPUS=$(grep -c ^processor /proc/cpuinfo)
make -j$NCPUS
# ENDBLOCK
''').format(repo_dpath=ut.unexpanduser(tpl_rman['cv2'].dpath),
**script_fmtdict))
tpl_rman['cv2'].add_script('install', ut.codeblock(
r'''
# STARTBLOCK bash
{python_bash_setup}
cd $CODE_DIR/opencv/{build_dname}
$_SUDO make install
# Hack because cv2 does not want to be installed for some reason
# cp lib/cv2.so $PYTHON_PACKAGES_PATH
# Seems to work now that local is removed from prefix
# cp -v lib/cv2.so $PYTHON_PACKAGES_PATH
# Test makesure things working
python -c "import numpy; print(numpy.__file__)"
python -c "import numpy; print(numpy.__version__)"
python -c "import cv2; print(cv2.__version__)"
python -c "import cv2; print(cv2.__file__)"
#python -c "import vtool"
# Check if we have contrib modules
python -c "import cv2; print(cv2.xfeatures2d)"
# ENDBLOCK
''').format(**script_fmtdict))
# if GET_ARGFLAG('--libgpuarray'):
tpl_rman['libgpuarray'].add_script('build', ut.codeblock(
r'''
# STARTBLOCK bash
# Ensure the repo was checked out
if [ ! -d {repo_dpath} ]; then
git clone https://github.com/Theano/libgpuarray.git {repo_dpath}
fi
{python_bash_setup}
cd {repo_dpath}
# need a specific version of libgpuarray
git checkout tags/v0.6.2 -b v0.6.2
mkdir -p {repo_dpath}/{build_dname}
cd {repo_dpath}/{build_dname}
# First build the C library
cmake {repo_dpath} -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX
export NCPUS=$(grep -c ^processor /proc/cpuinfo)
make -j$NCPUS
$_SUDO make install
# Now build the python libarary
cd {repo_dpath}
python setup.py build_ext -L $LOCAL_PREFIX/lib -I $LOCAL_PREFIX/include
python setup.py build
# python setup.py install
$_SUDO pip install -e {repo_dpath}
# DEVICE="<test device>" python -c "import pygpu;pygpu.test()"
# DEVICE="gpu0" python -c "import pygpu;pygpu.test()"
cd ~
$_SUDO pip install nose
DEVICE="cuda" python -c "import pygpu;pygpu.test()"
# pip uninstall pygpu
# ENDBLOCK
''').format(repo_dpath=ut.unexpanduser(tpl_rman['libgpuarray'].dpath),
**script_fmtdict))
#===================
# PYQT SETUP SCRIPTS
#===================
if ut.in_virtual_env():
try:
fmtdict = {
'sys_dist_packages': ut.get_global_dist_packages_dir(),
'venv_site_packages': ut.get_site_packages_dir(),
'pyqt' : 'PyQt4' if PY2 else 'PyQt5',
# Need the PyQT5 SVG module for IPython to work properly
'debian-python-qt' : (
'python-qt4' if PY2 else
'qt5-default python3-pyqt5 debian-python-qt-svg'),
'pip-python-qt' : 'python-qt4' if PY2 else 'python-qt5'
}
# sys_dist_packages = ut.get_global_dist_packages_dir()
# sys_pyqt_dir = sys_dist_packages + '/{pyqt}'
# Allows us to use a system qt install in a virtual environment.
system_to_venv = ut.codeblock(
r'''
# STARTBLOCK bash
# Creates a symlink to the global PyQt in a virtual env
export GLOBAL_DIST_PACKAGES="{sys_dist_packages}"
export VENV_DIST_PACKAGES="{venv_site_packages}"
if [ -d $GLOBAL_DIST_PACKAGES/{pyqt} ]; then
echo "have qt"
ls $GLOBAL_DIST_PACKAGES/{pyqt}
ls $VENV_DIST_PACKAGES/{pyqt}
else
# Ensure PyQt is installed first (FIXME make this work for non-debian systems)
sudo apt-get install {debian-python-qt}
# pip install {pip-python-qt}
fi
if [ -d $GLOBAL_DIST_PACKAGES/{pyqt} ]; then
# Install system pyqt packages to virtual envirment via symlink
ln -s $GLOBAL_DIST_PACKAGES/{pyqt}/ $VENV_DIST_PACKAGES/{pyqt}
ln -s $GLOBAL_DIST_PACKAGES/sip*.so $VENV_DIST_PACKAGES/
ln -s $GLOBAL_DIST_PACKAGES/sip*.py $VENV_DIST_PACKAGES/
else
echo "{pyqt} DOES NOT SEEM TO BE INSTALLED ON THE SYSTEM"
fi
echo "testing"
python -c "import {pyqt}; print({pyqt})"
# ENDBLOCK bash
''').format(**fmtdict)
# TODO: add custom build alternative
tpl_rman['PyQt'].add_script('system_to_venv', system_to_venv)
except NotImplementedError:
pass
#-----------
# Verify TPL Dependencies
#-----------
def GET_ARGFLAG(arg, *args, **kwargs):
import utool as ut
return arg.lstrip('--') in sys.argv or ut.get_argflag(arg, *args, **kwargs)
def move_wildme(ibeis_rman, fmt):
wildme_user = 'WildbookOrg'
wildme_remote = 'wildme'
for repo in ibeis_rman.repos:
try:
gitrepo = repo.as_gitpython()
except Exception:
repo.change_url_format(fmt)
print('repo {!r} does not exist yet'.format(repo))
continue
wildme_url = repo._new_remote_url(host='github.com', user=wildme_user, fmt=fmt)
remotes = repo.remotes
message = 'Checking %s for move to wildme' % (repo,)
print(message)
incorrect_version = repo._ensure_remote_exists(wildme_remote, wildme_url)
if 'origin' in remotes:
try:
origin = remotes['origin']
origin_protocol = origin['url'].split(':')[0]
origin_user = origin['username']
if origin_user != wildme_user or origin_protocol != fmt or incorrect_version:
if origin_user not in remotes:
# first add a remote that is the original origin
origin_url = origin['url']
print(' * Create remote %r: %r' % (origin_user, origin_url,))
gitrepo.create_remote(origin_user, origin_url)
# change origin to use wildme url
gitorigin = gitrepo.remote('origin')
print(' * Change origin url to %r' % (wildme_url,))
gitorigin.set_url(wildme_url)
except:
print('\tWARNING: COULD NOT MIGRATE REPO = %r' % (repo, ))
repo.change_url_format(fmt)
def execute_commands(tpl_rman, ibeis_rman):
import utool as ut
GET_ARGVAL = ut.get_argval
ut.init_catch_ctrl_c()
if 0:
print('Version Check Source:')
for repo in tpl_rman.repos:
print('python -c "import {0}; print({0}.__file__)"'.format(repo.modname))
print('python -c "import {0}; print({0}.__version__)"'.format(repo.modname))
#-----------
# Execute Commands on Core Repos
#-----------
CODE_DIR, pythoncmd, WIN32, PY2, PY3 = get_sysinfo()
print('ibeis_rman = %r' % (ibeis_rman,))
wildme_ssh_flags = GET_ARGFLAG('--move-wildme') or GET_ARGFLAG('--move-wildme-ssh')
wildme_https_flags = GET_ARGFLAG('--move-wildme-https') or GET_ARGFLAG('--move-wildme-http')
if wildme_ssh_flags or wildme_https_flags:
fmt = 'ssh' if wildme_ssh_flags else 'https'
move_wildme(ibeis_rman, fmt)
# Commands on global git repos
if GET_ARGFLAG('--status'):
ibeis_rman.issue('git status')
sys.exit(0)
ibeis_rman.ensure()
if GET_ARGFLAG('--dump') or GET_ARGFLAG('--dump-scripts'):
dpath = '_super_scripts/' + 'scripts' + get_plat_specifier()
ut.ensuredir(dpath)
dumps = [
(tpl_rman, 'cv2', 'build'),
(tpl_rman, 'cv2', 'install'),
(ibeis_rman, 'flann', 'build'),
(ibeis_rman, 'flann', 'install'),
(ibeis_rman, 'hesaff', 'build'),
(tpl_rman, 'PyQt', 'system_to_venv'),
(tpl_rman, 'libgpuarray', 'build'),
]
for rman, mod, sname in dumps:
from os.path import join
# if mod not in rman:
# print('mod=%r not available in rman=%r' % (mod, rman))
# continue
script = rman[mod].get_script(sname).text
suffix = get_plat_specifier()
sh_fpath = join(dpath, mod + '_' + sname + suffix + '.sh')
ut.write_to(sh_fpath, script)
if GET_ARGFLAG('--requirements'):
ut.cmd('pip install -r requirements.txt')
# HACKED IN SCRIPTS WHILE IM STILL FIGURING OUT TPL DEPS
if GET_ARGFLAG('--opencv'):
# There is now a pypi for opencv! Yay
# ut.cmd('pip install opencv-python')
# Bummer, but we need opencv source for pyhessaff
# we should just make a wheel for pyhessaff
cv_repo = tpl_rman['cv2']
cv_repo.clone()
script = cv_repo.get_script('build')
script.exec_()
cv_repo = tpl_rman['cv2']
script = cv_repo.get_script('install')
script.exec_()
if GET_ARGFLAG('--flann'):
script = ibeis_rman['flann'].get_script('build')
script.exec_()
script = ibeis_rman['flann'].get_script('install')
script.exec_()
if GET_ARGFLAG('--pyqt'):
script = tpl_rman['PyQt'].get_script('system_to_venv')
script.exec_()
if GET_ARGFLAG('--hesaff'):
script = ibeis_rman['hesaff'].get_script('build')
script.exec_()
if GET_ARGFLAG('--pydarknet'):
script = ibeis_rman['pydarknet'].get_script('build')
script.exec_()
if GET_ARGFLAG('--pyrf'):
script = ibeis_rman['pyrf'].get_script('build')
script.exec_()
if GET_ARGFLAG('--torch'):
# Theano and lasange code should be moved to pytorch
tpl_rman['pytorch'].clone(recursive=True)
tpl_rman['pytorch'].issue('git submodule update --init')
tpl_rman['pytorch'].issue('python setup install')
tpl_rman['pytorch'].issue('pip install torchvision')
# tpl_rman['pytorch'].issue('NO_CUDNN=TRUE && python setup install')
# tpl_rman['pytorch'].issue('pip install -e .')
if GET_ARGFLAG('--libgpuarray') or GET_ARGFLAG('--dcnn'):
tpl_rman['libgpuarray'].clone()
script = tpl_rman['libgpuarray'].get_script('build')
script.exec_()
if GET_ARGFLAG('--dcnn'):
tpl_rman['theano'].clone()
# tpl_rman['pylearn2'].clone()
tpl_rman['lasagne'].clone()
tpl_rman['theano'].issue('pip install -e .')
# tpl_rman['pylearn2'].issue('pip install -e .')
tpl_rman['lasagne'].issue('pip install -e .')
# tpl_rman['pylearn2'].python_develop()
# tpl_rman['theano'].python_develop()
# tpl_rman['lasagne'].python_develop()
#_===
if GET_ARGFLAG('--fix') or GET_ARGFLAG('--check'):
missing_dynlib = tpl_rman.check_cpp_build()
missing_dynlib += ibeis_rman.check_cpp_build()
missing_install = tpl_rman.check_installed()
missing_install += ibeis_rman.check_installed()
problems = []
problems += ibeis_rman.check_importable()
problems += tpl_rman.check_importable()
if GET_ARGFLAG('--fix'):
print('Trying to fix problems')
for repo in missing_dynlib:
repo.custom_build()
for repo, recommended_fix in problems:
print('Trying to fix repo = %r' % (repo,))
print(' * recommended_fix = %r' % (recommended_fix,))
if recommended_fix == 'rebuild':
repo.custom_build()
print('Can currently only fix one module at a time. Please re-run')
sys.exit(1)
else:
print('Not sure how to fix %r' % (repo,))
if GET_ARGFLAG('--pull'):
ibeis_rman.issue('git pull')
if GET_ARGFLAG('--build'):
# Build tpl repos
# tpl_rman.custom_build()
# ibeis_rman.custom_build()
# Build only IBEIS repos with setup.py
_rman = ibeis_rman.only_with_pysetup()
_rman.issue('{pythoncmd} setup.py build'.format(pythoncmd=pythoncmd))
# Like install, but better if you are developing
if GET_ARGFLAG('--develop'):
_rman = ibeis_rman.only_with_pysetup()
# # _rman.issue('{pythoncmd} setup.py develop'.format(pythoncmd=pythoncmd),
# # sudo=not ut.in_virtual_env())
_rman.issue('{pythoncmd} -m pip install -e .'.format(pythoncmd=pythoncmd),
sudo=not ut.in_virtual_env())
if GET_ARGFLAG('--clean'):
_rman = ibeis_rman.only_with_pysetup()
_rman.issue('{pythoncmd} setup.py clean'.format(pythoncmd=pythoncmd))
if GET_ARGFLAG('--install'):
print('WARNING: Dont use install if you are a developer. Use develop instead.')
_rman = ibeis_rman.only_with_pysetup()
_rman.issue('python setup.py install'.format(pythoncmd=pythoncmd))
if GET_ARGFLAG('--push'):
ibeis_rman.issue('git push')
if GET_ARGFLAG('--branch'):
ibeis_rman.issue('git branch')
sys.exit(0)
if GET_ARGFLAG('--tag-status'):
ibeis_rman.issue('git tag')
# Tag everything
tag_name = GET_ARGVAL('--newtag', type_=str, default=None)
if tag_name is not None:
ibeis_rman.issue('git tag -a "{tag_name}" -m "super_setup autotag {tag_name}"'.format(**locals()))
ibeis_rman.issue('git push --tags')
if GET_ARGFLAG('--bext'):
ibeis_rman.issue('{pythoncmd} setup.py build_ext --inplace'.format(pythoncmd=pythoncmd))
commit_msg = GET_ARGVAL('--commit', type_=str, default=None)
if commit_msg is not None:
ibeis_rman.issue('git commit -am "{commit_msg}"'.format(**locals()))
# Change Branch
branch_name = GET_ARGVAL('--checkout', type_=str, default=None)
if branch_name is not None:
try:
ibeis_rman.issue('git checkout "{branch_name}"'.format(**locals()))
except Exception:
print('ERROR: Could not checkout branch: %r' % (branch_name, ))
# Creates new branches
newbranch_name = GET_ARGVAL('--newbranch', type_=str, default=None)
if newbranch_name is not None:
#rman.issue('git stash"'.format(**locals()))
ibeis_rman.issue('git checkout -b "{newbranch_name}"'.format(**locals()))
ibeis_rman.issue('git push --set-upstream origin {newbranch_name}'.format(**locals()))
#rman.issue('git stash pop"'.format(**locals()))
# Creates new branches
newlocalbranch_name = GET_ARGVAL('--newlocalbranch', type_=str, default=None)
if newlocalbranch_name is not None:
#rman.issue('git stash"'.format(**locals()))
ibeis_rman.issue('git checkout -b "{newlocalbranch_name}"'.format(**locals()))
#rman.issue('git push --set-upstream origin {newlocalbranch_name}'.format(**locals()))
#rman.issue('git stash pop"'.format(**locals()))
# Creates new branches
mergebranch_name = GET_ARGVAL('--merge', type_=str, default=None)
if mergebranch_name is not None:
ibeis_rman.issue('git merge "{mergebranch_name}"'.format(**locals()))
# Change ownership
if GET_ARGFLAG('--serverchmod'):
ibeis_rman.issue('chmod -R 755 *')
if GET_ARGFLAG('--chown'):
# Fixes problems where repos are checked out as root
username = os.environ.get('USERNAME', ut.get_argval('--username'))
if username is None:
username = os.environ.get('USER', None)
if username is None:
raise AssertionError('cannot find username in commandline or environment vars')
usergroup = username
ibeis_rman.issue('chown -R {username}:{usergroup} *'.format(**locals()),
sudo=True)
upstream_branch = GET_ARGVAL('--set-upstream', type_=str, default=None)
if upstream_branch is not None:
# git 2.0
ibeis_rman.issue('git branch --set-upstream-to=origin/{upstream_branch} {upstream_branch}'.format(**locals()))
upstream_push = GET_ARGVAL('--upstream-push', type_=str, default=None)
if upstream_push is not None:
ibeis_rman.issue('git push --set-upstream origin {upstream_push}'.format(**locals()))
if GET_ARGFLAG('--test'):
failures = []
for repo_dpath in ibeis_rman.repo_dirs:
# ut.getp_
mod_dpaths = ut.get_submodules_from_dpath(repo_dpath, recursive=False,
only_packages=True)
modname_list = ut.lmap(ut.get_modname_from_modpath, mod_dpaths)
print('Checking modules = %r' % (modname_list,))
for modname in modname_list:
try:
ut.import_modname(modname)
print(modname + ' success')
except ImportError as ex:
failures += [modname]
print(modname + ' failure')
print('failures = %s' % (ut.repr3(failures),))
if False:
try:
from six.moves import input
except ImportError:
input = raw_input # NOQA
# General global git command
gg_cmd = GET_ARGVAL('--gg', None) # global command
if gg_cmd is not None:
ans = 'yes' if GET_ARGFLAG('-y') else input('Are you sure you want to run: %r on all directories? ' % (gg_cmd,))
if ans == 'yes':
ibeis_rman.issue(gg_cmd)
def is_running_as_root():
"""
References:
http://stackoverflow.com/questions/5721529/running-python-script-as-root
http://stackoverflow.com/questions/2806897/checking-script-has-root
"""
return os.getenv('USER') == 'root'
def get_sysinfo(verbose=0):
if verbose:
print('USER = %r' % os.getenv("USER"))
if is_running_as_root():
print('Do not run super_setup.py as root')
sys.exit(1)
WIN32 = sys.platform.startswith('win32')
if verbose:
print('[super_setup] __IBEIS_SUPER_SETUP__')
if 'CODE_DIR' in os.environ:
CODE_DIR = os.environ.get('CODE_DIR')
else:
CODE_DIR = dirname(dirname(realpath(__file__))) # Home is where the .. is. # '~/code'
if verbose:
print('[super_setup] code_dir: %r' % CODE_DIR)
(DISTRO, DISTRO_VERSION, DISTRO_TAG) = platform.dist()
python_version = platform.python_version()
PY2 = python_version.startswith('2.7')
PY3 = python_version.startswith('3')
# '--py3' in sys.argv
# assert PY3 or
# 'IBEIS currently supports python 2.7, Instead got python=%r. use --py3 to override' % python_version
pythoncmd = sys.executable
# if PY2:
# pythoncmd = 'python' if WIN32 else 'python2.7'
# elif PY3:
# pythoncmd = 'python3'
return CODE_DIR, pythoncmd, WIN32, PY2, PY3
def main():
print('''
IBEIS Image Analysis (IA)
____ _ _ ___ ____ ____ ____ ____ ___ _ _ ___
[__ | | |__] |___ |__/ [__ |___ | | | |__]
___] |__| | |___ | \ ___] |___ | |__| |
Use --help to show usage
''')
show_usage = len(sys.argv) > 1 and sys.argv[1] in ['--help', '-h']
if show_usage:
print(USAGE)
CODE_DIR, pythoncmd, WIN32, PY2, PY3 = get_sysinfo(verbose=1)
try:
import cv2 # NOQA
except ImportError:
print('Need to install OpenCV')
print('python super_setup.py --opencv')
try:
import pyflann # NOQA
except ImportError:
print('Need to install FLANN')
print('python super_setup.py --flann')
try:
import theano, lasagne # NOQA
except ImportError:
print('Need to install Theano/Lasagne/Pylearn2')
print('python super_setup.py --dcnn')
except ValueError as ex:
print(repr(ex))
print('Probably need libgpu array')
print('python super_setup.py --libgpuarray')
try:
try:
import PyQt4 # NOQA
except ImportError:
import PyQt5 # NOQA
except ImportError:
print('Need to install PyQt')
print('python super_setup.py --pyqt')
if '--bootstrap' in sys.argv or 'bootstrap' in sys.argv:
bootstrap(WIN32)
try:
# HACK IN A WAY TO ENSURE UTOOL
print('Checking utool')
import utool as ut # NOQA
except Exception:
ensure_utool(CODE_DIR, pythoncmd)
tpl_rman, ibeis_rman = initialize_repo_managers(CODE_DIR, pythoncmd, PY2, PY3)
execute_commands(tpl_rman, ibeis_rman)
if __name__ == '__main__':
main()
| 34.715412 | 166 | 0.604485 | [
"Apache-2.0"
] | brmscheiner/ibeis | super_setup.py | 48,428 | Python |
#!/usr/bin/env python3
# Software Name: ngsildclient
# SPDX-FileCopyrightText: Copyright (c) 2021 Orange
# SPDX-License-Identifier: Apache 2.0
#
# This software is distributed under the Apache 2.0;
# see the NOTICE file for more details.
#
# Author: Fabien BATTELLO <[email protected]> et al.
# SPDX-License-Identifier: Apache-2.0
import logging
from ngsildclient.api.client import Client, Vendor
from .common import mocked_connected
logger = logging.getLogger(__name__)
def test_api_is_connected(requests_mock):
requests_mock.get("http://localhost:1026/ngsi-ld/v1/entities", status_code=200)
client = Client()
assert client.is_connected()
def test_api_guess_broker(mocked_connected, requests_mock):
requests_mock.get(
"http://localhost:1026/version",
status_code=200,
json={"orionld version": "post-v0.8.1"},
)
client = Client()
vendor, version = client.guess_vendor()
logger.info(f"{vendor=}")
assert vendor == Vendor.ORIONLD
assert version == "post-v0.8.1"
| 28.108108 | 83 | 0.721154 | [
"Apache-2.0"
] | Orange-OpenSource/python-orion-client | tests/test_client.py | 1,040 | Python |
async def m001_initial(db):
"""
Initial wallet table.
"""
await db.execute(
"""
CREATE TABLE IF NOT EXISTS charges (
id TEXT NOT NULL PRIMARY KEY,
user TEXT,
description TEXT,
onchainwallet TEXT,
onchainaddress TEXT,
lnbitswallet TEXT,
payment_request TEXT,
payment_hash TEXT,
webhook TEXT,
completelink TEXT,
completelinktext TEXT,
time INTEGER,
amount INTEGER,
balance INTEGER DEFAULT 0,
timestamp TIMESTAMP NOT NULL DEFAULT (strftime('%s', 'now'))
);
"""
)
| 25.518519 | 72 | 0.510885 | [
"MIT"
] | bliotti/lnbits | lnbits/extensions/satspay/migrations.py | 689 | Python |
#!flask/bin/python
from app import app
from config import DEBUG_MODE
if __name__ == '__main__':
app.run(debug=DEBUG_MODE)
| 16 | 29 | 0.742188 | [
"MIT"
] | siketh/TRBlog | run.py | 128 | Python |
#pylint: skip-file
from setuptools import setup, find_packages
from pypandoc import convert
def convert_md(filename):
return convert(filename, 'rst')
setup(name='nonstandard',
version='0.9.3',
description="Obsolete; see package *experimental*.",
long_description = convert_md('README.md'),
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Interpreters',
],
url='https://github.com/aroberge/nonstandard',
author='André Roberge',
author_email='[email protected]',
license='MIT',
packages=find_packages(exclude=['dist', 'build', 'tools']),
zip_safe=False)
| 29.192308 | 63 | 0.661397 | [
"MIT"
] | aroberge/nonstandard | setup.py | 760 | Python |
from django import template
from django.db import models
register = template.Library()
try:
''.rsplit
def rsplit(s, delim, maxsplit):
return s.rsplit(delim, maxsplit)
except AttributeError:
def rsplit(s, delim, maxsplit):
"""
Return a list of the words of the string s, scanning s
from the end. To all intents and purposes, the resulting
list of words is the same as returned by split(), except
when the optional third argument maxsplit is explicitly
specified and nonzero. When maxsplit is nonzero, at most
maxsplit number of splits - the rightmost ones - occur,
and the remainder of the string is returned as the first
element of the list (thus, the list will have at most
maxsplit+1 elements). New in version 2.4.
>>> rsplit('foo.bar.baz', '.', 0)
['foo.bar.baz']
>>> rsplit('foo.bar.baz', '.', 1)
['foo.bar', 'baz']
>>> rsplit('foo.bar.baz', '.', 2)
['foo', 'bar', 'baz']
>>> rsplit('foo.bar.baz', '.', 99)
['foo', 'bar', 'baz']
"""
assert maxsplit >= 0
if maxsplit == 0: return [s]
# the following lines perform the function, but inefficiently.
# This may be adequate for compatibility purposes
items = s.split(delim)
if maxsplit < len(items):
items[:-maxsplit] = [delim.join(items[:-maxsplit])]
return items
class FilterAdminApplistNode(template.Node):
def __init__(self, listname, varname):
self.listname = listname
self.varname = varname
def render(self, context):
all_apps = {}
for app in models.get_apps():
name = len(rsplit(app.__name__, '.', 0))>1 and rsplit(app.__name__, '.', 0)[-2] or app.__name__
all_apps[name] = app.__name__
filtered_app_list = []
for entry in context[self.listname]:
app = all_apps.get(entry['name'].lower(),'')
if not app.startswith('satchmo_'):
filtered_app_list.append(entry)
context[self.varname] = filtered_app_list
return ''
def filter_admin_app_list(parser, token):
"""Filters the list of installed apps returned by
django.contrib.admin.templatetags.adminapplist,
excluding apps installed by satchmo.
"""
tokens = token.contents.split()
if len(tokens) < 4:
raise template.TemplateSyntaxError, "'%s' tag requires two arguments" % tokens[0]
if tokens[2] != 'as':
raise template.TemplateSyntaxError, "Second argument to '%s' tag must be 'as'" % tokens[0]
return FilterAdminApplistNode(tokens[1], tokens[3])
register.tag('filter_admin_app_list', filter_admin_app_list)
| 37.931507 | 107 | 0.611051 | [
"BSD-3-Clause"
] | dokterbob/satchmo | satchmo/apps/satchmo_store/shop/templatetags/satchmo_adminapplist.py | 2,769 | Python |
# Copyright (c) 2010-2019 openpyxl
import pytest
from io import BytesIO
from zipfile import ZipFile
from openpyxl.packaging.manifest import Manifest
from openpyxl.xml.functions import fromstring, tostring
from openpyxl.tests.helper import compare_xml
from .test_fields import (
Index,
Number,
Text,
)
@pytest.fixture
def Record():
from ..record import Record
return Record
class TestRecord:
def test_ctor(self, Record, Number, Text, Index):
n = [Number(v=1), Number(v=25)]
s = [Text(v="2014-03-24")]
x = [Index(), Index(), Index()]
fields = n + s + x
field = Record(_fields=fields)
xml = tostring(field.to_tree())
expected = """
<r>
<n v="1"/>
<n v="25"/>
<s v="2014-03-24"/>
<x v="0"/>
<x v="0"/>
<x v="0"/>
</r>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, Record, Number, Text, Index):
src = """
<r>
<n v="1"/>
<x v="0"/>
<s v="2014-03-24"/>
<x v="0"/>
<n v="25"/>
<x v="0"/>
</r>
"""
node = fromstring(src)
n = [Number(v=1), Number(v=25)]
s = [Text(v="2014-03-24")]
x = [Index(), Index(), Index()]
fields = [
Number(v=1),
Index(),
Text(v="2014-03-24"),
Index(),
Number(v=25),
Index(),
]
field = Record.from_tree(node)
assert field == Record(_fields=fields)
@pytest.fixture
def RecordList():
from ..record import RecordList
return RecordList
class TestRecordList:
def test_ctor(self, RecordList):
cache = RecordList()
xml = tostring(cache.to_tree())
expected = """
<pivotCacheRecords xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main"
count="0" />
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, RecordList):
src = """
<pivotCacheRecords count="0" />
"""
node = fromstring(src)
cache = RecordList.from_tree(node)
assert cache == RecordList()
def test_write(self, RecordList):
out = BytesIO()
archive = ZipFile(out, mode="w")
manifest = Manifest()
records = RecordList()
xml = tostring(records.to_tree())
records._write(archive, manifest)
manifest.append(records)
assert archive.namelist() == [records.path[1:]]
assert manifest.find(records.mime_type)
| 23.814159 | 92 | 0.528056 | [
"MIT"
] | albertqee/openpyxl-3.x | openpyxl/pivot/tests/test_record.py | 2,691 | Python |
## Calculate feature importance, but focus on "meta-features" which are categorized by
## rules from different perspectives: orders, directions, powers.
## for "comprehensive methods"
from util_relaimpo import *
from util_ca import *
from util import loadNpy
def mainCA(x_name, y_name, divided_by = "", feature_names = []):
X = loadNpy(['data', 'X', x_name])
Y = loadNpy(['data', 'Y', y_name])
# INFO
print("Dataset", x_name, y_name)
print("Method: ", "CA")
print("Divided by", divided_by)
# make dataframe
if feature_names: xdf = pd.DataFrame(data=X, columns=feature_names)
else: xdf = pd.DataFrame(data=X)
# divide X
x_list, feature_names = dvdX(xdf, divided_by=divided_by)
# if power, only use the first four terms
if divided_by=='power': x_list, feature_names = x_list[0:4], feature_names[0:4]
print("bootstrapping ...")
coef_boot, comb_feature = bootstrappingCA(x_list, Y)
result_df = caResultDf(coef_boot, comb_feature)
printBootResultCA(result_df)
def mainDA(x_name, y_name, divided_by = "", feature_names = []):
X = loadNpy(['data', 'X', x_name])
Y = loadNpy(['data', 'Y', y_name])
# INFO
print("Dataset", x_name, y_name)
print("Method: ", "DA")
print("Divided by", divided_by)
# make dataframe
if feature_names:
xdf = pd.DataFrame(data=X, columns=feature_names)
else:
xdf = pd.DataFrame(data=X)
# divide X
x_list, feature_names = dvdX(xdf, divided_by=divided_by)
# if power, only use the first four terms
if divided_by=='power': x_list, feature_names = x_list[0:4], feature_names[0:4]
print("bootstrapping ...")
coef_boot, comb_feature, r2_mean, r2_ci, da_data, ave_data = bootstrappingDA(x_list, Y)
da_df = daResultDf(da_data, ave_data, r2_mean, comb_feature, feature_name=feature_names)
printBootResultCA(da_df)
if __name__ == '__main__':
# da or ca
x_prefix = ["HM", "MMA"]
y_suffix = ["MPS95", "MPSCC95", "CSDM"]
x_main = "{}_X_ang_vel.npy"
y_main = "{}_{}.npy"
divided_list = ["order", "direction", "power"]
for ys in y_suffix:
for xp in x_prefix:
for divide in divided_list:
x_name = x_main.format(xp)
y_name = y_main.format(xp, ys)
mainCA(x_name,y_name,divide,feature_names)
mainDA(x_name,y_name,divide,feature_names) | 37.546875 | 92 | 0.651685 | [
"MIT"
] | terryli710/MPS_regression | feature_importance_v4.py | 2,403 | Python |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
import argparse
from argcomplete.completers import FilesCompleter
from knack.arguments import CLIArgumentType
from azure.mgmt.containerregistry.v2018_09_01.models import (
PasswordName,
WebhookStatus,
WebhookAction,
PolicyStatus,
RunStatus,
TaskStatus,
BaseImageTriggerType
)
from azure.mgmt.containerregistry.v2018_02_01_preview.models import (
BuildTaskStatus,
OsType,
BuildStatus,
BaseImageTriggerType as BuildBaseImageTriggerType
)
from azure.cli.core.commands.parameters import (
resource_group_name_type,
get_location_type,
tags_type,
deployment_name_type,
get_resource_name_completion_list,
quotes,
get_three_state_flag,
get_enum_type
)
from azure.cli.core.commands.validators import get_default_location_from_resource_group
from ._constants import (
STORAGE_RESOURCE_TYPE,
REGISTRY_RESOURCE_TYPE,
WEBHOOK_RESOURCE_TYPE,
REPLICATION_RESOURCE_TYPE,
BUILD_TASK_RESOURCE_TYPE,
BUILD_STEP_RESOURCE_TYPE,
TASK_RESOURCE_TYPE,
CLASSIC_REGISTRY_SKU,
MANAGED_REGISTRY_SKU,
)
from ._validators import (
validate_headers,
validate_build_arg,
validate_secret_build_arg,
validate_arg,
validate_secret_arg,
validate_set,
validate_set_secret
)
image_by_tag_type = CLIArgumentType(
options_list=['--image', '-t'],
help="The name of the image. May include a tag in the format 'name:tag'."
)
image_by_tag_or_digest_type = CLIArgumentType(
options_list=['--image', '-t'],
help="The name of the image. May include a tag in the format 'name:tag' or digest in the format 'name@digest'."
)
def load_arguments(self, _): # pylint: disable=too-many-statements
with self.argument_context('acr') as c:
c.argument('resource_group_name', arg_type=resource_group_name_type)
c.argument('location', arg_type=get_location_type(self.cli_ctx))
c.argument('tags', arg_type=tags_type)
c.argument('registry_name', options_list=['--name', '-n'], help='The name of the container registry. You can configure the default registry name using `az configure --defaults acr=<registry name>`', completer=get_resource_name_completion_list(REGISTRY_RESOURCE_TYPE), configured_default='acr')
c.argument('storage_account_name', help='Provide the name of an existing storage account if you\'re recreating a container registry over a previous registry created storage account. Only applicable to Classic SKU.', completer=get_resource_name_completion_list(STORAGE_RESOURCE_TYPE))
c.argument('sku', help='The SKU of the container registry', arg_type=get_enum_type(MANAGED_REGISTRY_SKU + CLASSIC_REGISTRY_SKU))
c.argument('admin_enabled', help='Indicates whether the admin user is enabled', arg_type=get_three_state_flag())
c.argument('password_name', help='The name of password to regenerate', arg_type=get_enum_type(PasswordName))
c.argument('username', options_list=['--username', '-u'], help='The username used to log into a container registry')
c.argument('password', options_list=['--password', '-p'], help='The password used to log into a container registry')
c.argument('yes', options_list=['--yes', '-y'], help='Do not prompt for confirmation.', action='store_true')
c.argument('image_names', arg_type=image_by_tag_type, action='append')
c.argument('timeout', type=int, help='The timeout in seconds.')
c.argument('docker_file_path', options_list=['--file', '-f'], help="The relative path of the the docker file to the source code root folder.")
c.argument('no_logs', help="Do not show logs after successfully queuing the build.", action='store_true')
c.argument('no_wait', help="Do not wait for the run to complete and return immediately after queuing the run.", action='store_true')
c.argument('no_format', help="Indicates whether the logs should be displayed in raw format", action='store_true')
c.argument('os_type', options_list=['--os'], help='The operating system type required for the build.', arg_type=get_enum_type(OsType))
with self.argument_context('acr import') as c:
c.argument('source', help="The source identifier in the format '[registry.azurecr.io/]repository[:tag]' or '[registry.azurecr.io/]repository@digest'.")
c.argument('source_registry', options_list=['--registry', '-r'], help='The source container registry can be name, login server or resource ID of the source registry.')
c.argument('target_tags', arg_type=image_by_tag_type, action='append')
c.argument('repository', help='The repository name to do a manifest-only copy for images.', action='append')
c.argument('force', help='Overwrite the existing tag of the image to be imported.', action='store_true')
with self.argument_context('acr config content-trust') as c:
c.argument('status', help="Indicates whether content-trust is enabled or disabled.", arg_type=get_enum_type(PolicyStatus))
with self.argument_context('acr repository') as c:
c.argument('repository', help="The name of the repository.")
c.argument('image', arg_type=image_by_tag_or_digest_type)
c.argument('top', type=int, help='Limit the number of items in the results.')
c.argument('orderby', help='Order the items in the results. Default to alphabetical order of names.', arg_type=get_enum_type(['time_asc', 'time_desc']))
c.argument('detail', help='Show detailed information.', action='store_true')
c.argument('delete_enabled', help='Indicates whether delete operation is allowed.', arg_type=get_three_state_flag())
c.argument('list_enabled', help='Indicates whether this item shows in list operation results.', arg_type=get_three_state_flag())
c.argument('read_enabled', help='Indicates whether read operation is allowed.', arg_type=get_three_state_flag())
c.argument('write_enabled', help='Indicates whether write or delete operation is allowed.', arg_type=get_three_state_flag())
with self.argument_context('acr repository delete') as c:
c.argument('manifest', nargs='?', required=False, const='', default=None, help=argparse.SUPPRESS)
c.argument('tag', help=argparse.SUPPRESS)
with self.argument_context('acr repository untag') as c:
c.argument('image', arg_type=image_by_tag_type)
with self.argument_context('acr create') as c:
c.argument('registry_name', completer=None)
c.argument('deployment_name', arg_type=deployment_name_type, validator=None)
c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group)
with self.argument_context('acr check-name') as c:
c.argument('registry_name', completer=None)
with self.argument_context('acr webhook') as c:
c.argument('registry_name', options_list=['--registry', '-r'])
c.argument('webhook_name', options_list=['--name', '-n'], help='The name of the webhook', completer=get_resource_name_completion_list(WEBHOOK_RESOURCE_TYPE))
c.argument('uri', help='The service URI for the webhook to post notifications.')
c.argument('headers', nargs='+', help="Space-separated custom headers in 'key[=value]' format that will be added to the webhook notifications. Use {} to clear existing headers.".format(quotes), validator=validate_headers)
c.argument('actions', nargs='+', help='Space-separated list of actions that trigger the webhook to post notifications.', arg_type=get_enum_type(WebhookAction))
c.argument('status', help='Indicates whether the webhook is enabled.', arg_type=get_enum_type(WebhookStatus))
c.argument('scope', help="The scope of repositories where the event can be triggered. For example, 'foo:*' means events for all tags under repository 'foo'. 'foo:bar' means events for 'foo:bar' only. 'foo' is equivalent to 'foo:latest'. Empty means events for all repositories.")
with self.argument_context('acr webhook create') as c:
c.argument('webhook_name', completer=None)
with self.argument_context('acr replication') as c:
c.argument('registry_name', options_list=['--registry', '-r'])
c.argument('replication_name', options_list=['--name', '-n'], help='The name of the replication.', completer=get_resource_name_completion_list(REPLICATION_RESOURCE_TYPE))
with self.argument_context('acr replication create') as c:
c.argument('replication_name', help='The name of the replication. Default to the location name.', completer=None)
with self.argument_context('acr run') as c:
c.argument('registry_name', options_list=['--registry', '-r'])
c.positional('source_location', help="The local source code directory path (e.g., './src') or the URL to a git repository (e.g., 'https://github.com/Azure-Samples/acr-build-helloworld-node.git') or a remote tarball (e.g., 'http://server/context.tar.gz').", completer=FilesCompleter())
c.argument('file', options_list=['--file', '-f'], help="The task template/definition file path relative to the source context.")
c.argument('values', help="The task values file path relative to the source context.")
c.argument('set_value', options_list=['--set'], help="Value in 'name[=value]' format.", action='append', validator=validate_set)
with self.argument_context('acr build') as c:
c.argument('registry_name', options_list=['--registry', '-r'])
c.positional('source_location', help="The local source code directory path (e.g., './src') or the URL to a git repository (e.g., 'https://github.com/Azure-Samples/acr-build-helloworld-node.git') or a remote tarball (e.g., 'http://server/context.tar.gz').", completer=FilesCompleter())
c.argument('no_push', help="Indicates whether the image built should be pushed to the registry.", action='store_true')
c.argument('arg', options_list=['--build-arg'], help="Build argument in 'name[=value]' format.", action='append', validator=validate_arg)
c.argument('secret_arg', options_list=['--secret-build-arg'], help="Secret build argument in 'name[=value]' format.", action='append', validator=validate_secret_arg)
with self.argument_context('acr build-task') as c:
c.argument('registry_name', options_list=['--registry', '-r'])
# build task parameters
c.argument('build_task_name', options_list=['--name', '-n'], help='The name of the build task.', completer=get_resource_name_completion_list(BUILD_TASK_RESOURCE_TYPE))
c.argument('alias', help='The alternative name for build task. Default to the build task name.')
c.argument('status', help='The current status of build task.', arg_type=get_enum_type(BuildTaskStatus))
c.argument('cpu', type=int, help='The CPU configuration in terms of number of cores required for the build.')
c.argument('repository_url', options_list=['--context', '-c'], help="The full URL to the source code repository.")
c.argument('commit_trigger_enabled', help="Indicates whether the source control commit trigger is enabled.", arg_type=get_three_state_flag())
c.argument('git_access_token', help="The access token used to access the source control provider.")
c.argument('with_secure_properties', help="Indicates whether the secure properties of a build task should be returned.", action='store_true')
# build step parameters
c.argument('step_name', help='The name of the build step.', completer=get_resource_name_completion_list(BUILD_STEP_RESOURCE_TYPE))
c.argument('branch', help="The source control branch name.")
c.argument('no_push', help="Indicates whether the image built should be pushed to the registry.", arg_type=get_three_state_flag())
c.argument('no_cache', help='Indicates whether the image cache is enabled.', arg_type=get_three_state_flag())
c.argument('base_image_trigger', help="The type of the auto trigger for base image dependency updates.", arg_type=get_enum_type(BuildBaseImageTriggerType))
# build parameters
c.argument('top', help='Limit the number of latest builds in the results.')
c.argument('build_id', help='The unique build identifier.')
c.argument('build_status', help='The current status of build.', arg_type=get_enum_type(BuildStatus))
c.argument('image', arg_type=image_by_tag_or_digest_type)
c.argument('no_archive', help='Indicates whether the build should be archived.', arg_type=get_three_state_flag())
c.argument('build_arg', help="Build argument in 'name[=value]' format.", action='append', validator=validate_build_arg)
c.argument('secret_build_arg', help="Secret build argument in 'name[=value]' format.", action='append', validator=validate_secret_build_arg)
with self.argument_context('acr task') as c:
c.argument('registry_name', options_list=['--registry', '-r'])
c.argument('task_name', options_list=['--name', '-n'], help='The name of the task.', completer=get_resource_name_completion_list(TASK_RESOURCE_TYPE))
c.argument('status', help='The current status of task.', arg_type=get_enum_type(TaskStatus))
c.argument('with_secure_properties', help="Indicates whether the secure properties of a task should be returned.", action='store_true')
# DockerBuildStep, FileTaskStep parameters
c.argument('file', options_list=['--file', '-f'], help="The relative path of the the task/docker file to the source code root folder. Task files must be suffixed with '.yaml'.")
c.argument('image', arg_type=image_by_tag_or_digest_type)
c.argument('no_push', help="Indicates whether the image built should be pushed to the registry.", arg_type=get_three_state_flag())
c.argument('no_cache', help='Indicates whether the image cache is enabled.', arg_type=get_three_state_flag())
c.argument('values', help="The task values/parameters file path relative to the source context.")
# common to DockerBuildStep, FileTaskStep and RunTaskStep
c.argument('context_path', options_list=['--context', '-c'], help="The full URL to the source code repository (Requires '.git' suffix for a github repo).")
c.argument('arg', help="Build argument in 'name[=value]' format.", action='append', validator=validate_arg)
c.argument('secret_arg', help="Secret build argument in 'name[=value]' format.", action='append', validator=validate_secret_arg)
c.argument('set_value', options_list=['--set'], help="Task value in 'name[=value]' format.", action='append', validator=validate_set)
c.argument('set_secret', help="Secret task value in 'name[=value]' format.", action='append', validator=validate_set_secret)
# Source Trigger parameters
c.argument('source_trigger_name', help="The name of the source trigger.")
c.argument('commit_trigger_enabled', help="Indicates whether the source control commit trigger is enabled.", arg_type=get_three_state_flag())
c.argument('git_access_token', help="The access token used to access the source control provider.")
c.argument('branch', help="The source control branch name.")
c.argument('base_image_trigger_name', help="The name of the base image trigger.")
c.argument('base_image_trigger_enabled', help="Indicates whether the base image trigger is enabled.", arg_type=get_three_state_flag())
c.argument('base_image_trigger_type', help="The type of the auto trigger for base image dependency updates.", arg_type=get_enum_type(BaseImageTriggerType))
# Run related parameters
c.argument('top', help='Limit the number of latest runs in the results.')
c.argument('run_id', help='The unique run identifier.')
c.argument('run_status', help='The current status of run.', arg_type=get_enum_type(RunStatus))
c.argument('no_archive', help='Indicates whether the run should be archived.', arg_type=get_three_state_flag())
# Run agent parameters
c.argument('cpu', type=int, help='The CPU configuration in terms of number of cores required for the run.')
with self.argument_context('acr task create') as c:
c.argument('task_name', completer=None)
with self.argument_context('acr build-task create') as c:
c.argument('build_task_name', completer=None)
with self.argument_context('acr helm') as c:
c.argument('resource_group_name', help=argparse.SUPPRESS)
c.argument('repository', help=argparse.SUPPRESS)
c.argument('version', help='The helm chart version.')
with self.argument_context('acr helm show') as c:
c.positional('chart', help='The helm chart name.')
with self.argument_context('acr helm delete') as c:
c.positional('chart', help='The helm chart name.')
c.argument('prov', help='Only delete the provenance file.', action='store_true')
with self.argument_context('acr helm push') as c:
c.positional('chart_package', help="The helm chart package.", completer=FilesCompleter())
c.argument('force', help='Overwrite the existing chart package.', action='store_true')
| 71.495935 | 301 | 0.712702 | [
"MIT"
] | AndrewLane/azure-cli | src/command_modules/azure-cli-acr/azure/cli/command_modules/acr/_params.py | 17,588 | Python |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'sample'
copyright = '2020, Sample Author'
author = 'Sample Author'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static'] | 35.711538 | 79 | 0.661282 | [
"MIT"
] | keathmilligan/flask-jwt-refresh | docs/conf.py | 1,857 | Python |
print("Enter 1st number")
n1 = input()
print("Enter 2nd number")
n2 = input()
print("Sum of Both = ", int(n1) + int(n2))
print("Sum of Both = ", int(n1) + int(n2)) | 23.428571 | 42 | 0.609756 | [
"MIT"
] | codewithsandy/Python-Basic-Exp | 03 Variable/cal.py | 164 | Python |
# -*- coding: utf-8 -*-
from django.conf.urls import url
from blueapps.account import views
app_name = 'account'
urlpatterns = [
url(r'^login_success/$', views.login_success, name="login_success"),
url(r'^login_page/$', views.login_page, name="login_page"),
url(r'^send_code/$', views.send_code_view, name="send_code")
]
| 25.846154 | 72 | 0.693452 | [
"MIT"
] | wangzishuo111/bk_prometheus | blueapps/account/urls.py | 336 | Python |
# -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from turbogears.decorator import weak_signature_decorator
import xhtml2pdf.pisa as pisa
from six import StringIO
import cherrypy
def to_pdf(filename=None, content_type="application/pdf"):
def entangle(func):
def decorated(func, *args, **kw):
output = func(*args, **kw)
dst = StringIO.StringIO()
result = pisa.CreatePDF(
StringIO.StringIO(output),
dst
)
if not result.err:
cherrypy.response.headers["Content-Type"] = content_type
if filename:
cherrypy.response.headers["Content-Disposition"] = "attachment; filename=" + filename
output = dst.getvalue()
return output
return decorated
return weak_signature_decorator(entangle)
topdf = to_pdf
| 32.4 | 105 | 0.663237 | [
"Apache-2.0"
] | trib3/xhtml2pdf | xhtml2pdf/turbogears.py | 1,458 | Python |
#!/usr/bin/env python
# Copyright 2015 Coursera
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Coursera's asynchronous grader command line SDK.
You may install it from source, or via pip.
"""
from courseraprogramming.commands import oauth2
import requests
import logging
import time
import sys
def check_auth(args):
"""
Checks courseraprogramming's connectivity to the coursera.org API servers
"""
oauth2_instance = oauth2.build_oauth2(args)
auth = oauth2_instance.build_authorizer()
my_profile_url = (
'https://api.coursera.org/api/externalBasicProfiles.v1?'
'q=me&fields=name'
)
r = requests.get(my_profile_url, auth=auth)
if r.status_code != 200:
logging.error('Received response code %s from the basic profile API.',
r.status_code)
logging.debug('Response body:\n%s', r.text)
sys.exit(1)
try:
external_id = r.json()['elements'][0]['id']
except:
logging.error(
'Could not parse the external id out of the response body %s',
r.text)
external_id = None
try:
name = r.json()['elements'][0]['name']
except:
logging.error(
'Could not parse the name out of the response body %s',
r.text)
name = None
if not args.quiet or args.quiet == 0:
print('Name: %s' % name)
print('External ID: %s' % external_id)
if name is None or external_id is None:
sys.exit(1)
def display_auth_cache(args):
'''
Writes to the screen the state of the authentication cache. (For debugging
authentication issues.) BEWARE: DO NOT email the output of this command!!!
You must keep the tokens secure. Treat them as passwords.
'''
oauth2_instance = oauth2.build_oauth2(args)
if not args.quiet or args.quiet == 0:
token = oauth2_instance.token_cache['token']
if not args.no_truncate and token is not None:
token = token[:10] + '...'
print("Auth token: %s" % token)
expires_time = oauth2_instance.token_cache['expires']
expires_in = int((expires_time - time.time()) * 10) / 10.0
print("Auth token expires in: %s seconds." % expires_in)
if 'refresh' in oauth2_instance.token_cache:
refresh = oauth2_instance.token_cache['refresh']
if not args.no_truncate and refresh is not None:
refresh = refresh[:10] + '...'
print("Refresh token: %s" % refresh)
else:
print("No refresh token found.")
def parser(subparsers):
"Build an argparse argument parser to parse the command line."
# create the parser for the configure subcommand. (authentication / etc.)
parser_config = subparsers.add_parser(
'configure',
help='Configure %(prog)s for operation!')
config_subparsers = parser_config.add_subparsers()
# Local subsubcommand of the grade subcommand
parser_check_auth = config_subparsers.add_parser(
'check-auth',
help=check_auth.__doc__)
parser_check_auth.set_defaults(func=check_auth)
parser_local_cache = config_subparsers.add_parser(
'display-auth-cache',
help=display_auth_cache.__doc__)
parser_local_cache.set_defaults(func=display_auth_cache)
parser_local_cache.add_argument(
'--no-truncate',
action='store_true',
help='Do not truncate the keys [DANGER!!]')
return parser_config
| 33.208333 | 78 | 0.658971 | [
"Apache-2.0"
] | andres-zartab/courseraprogramming | courseraprogramming/commands/config.py | 3,985 | Python |
#-*-coding:utf-8-*-
import numpy as np
import cv2
import gc
from tqdm import tqdm
def watershed(opencv_image):
top_n_label = 2
gray = cv2.cvtColor(opencv_image, cv2.COLOR_BGR2GRAY)
print('convert gray end')
gray[gray == 0] = 255
_, cvt_img = cv2.threshold(gray, 225, 255, cv2.THRESH_BINARY_INV)
del(gray)
print('threshold end')
ret, markers = cv2.connectedComponents(cvt_img)
print('connectedComponents end')
label_dict = dict()
for i in tqdm(range(ret)):
if i == 0:
continue
label_dict[i] = len(markers[markers == i])
sort_label_list = sorted(label_dict.items(), key=lambda item: item[1], reverse=True)
print('label end')
result = np.zeros(markers.shape)
for ins in tqdm(sort_label_list[:top_n_label]):
result[markers == ins[0]] = 255
print(result.shape)
print('top n label end')
del(ret)
del(markers)
del(sort_label_list)
del(label_dict)
del(cvt_img)
return result | 21.446809 | 88 | 0.641865 | [
"MIT"
] | essential2189/Cell-Based-Model | preprocess/watershed.py | 1,008 | Python |
import torch
import torch.nn.functional as F
def spatial_argmax(logit):
weights = F.softmax(logit.view(logit.size(0), -1), dim=-1).view_as(logit)
return torch.stack(((weights.sum(1) * torch.linspace(-1, 1, logit.size(2)).to(logit.device)[None]).sum(1),
(weights.sum(2) * torch.linspace(-1, 1, logit.size(1)).to(logit.device)[None]).sum(1)), 1)
class CNNClassifier(torch.nn.Module):
class Block(torch.nn.Module):
def __init__(self, n_input, n_output, kernel_size=3, stride=2):
super().__init__()
self.c1 = torch.nn.Conv2d(n_input, n_output, kernel_size=kernel_size, padding=kernel_size // 2,
stride=stride, bias=False)
self.c2 = torch.nn.Conv2d(n_output, n_output, kernel_size=kernel_size, padding=kernel_size // 2, bias=False)
self.c3 = torch.nn.Conv2d(n_output, n_output, kernel_size=kernel_size, padding=kernel_size // 2, bias=False)
self.b1 = torch.nn.BatchNorm2d(n_output)
self.b2 = torch.nn.BatchNorm2d(n_output)
self.b3 = torch.nn.BatchNorm2d(n_output)
self.skip = torch.nn.Conv2d(n_input, n_output, kernel_size=1, stride=stride)
def forward(self, x):
return F.relu(self.b3(self.c3(F.relu(self.b2(self.c2(F.relu(self.b1(self.c1(x)))))))) + self.skip(x))
def __init__(self, layers=[16, 32, 32, 32], n_output_channels=2, kernel_size=3):
super().__init__()
L = []
c = 3
for l in layers:
L.append(self.Block(c, l, kernel_size, 2))
c = l
self.network = torch.nn.Sequential(*L)
self.classifier = torch.nn.Linear(c, n_output_channels)
def forward(self, x):
z = self.network(x)
return self.classifier(z.mean(dim=[2, 3]))
class Planner_reg(torch.nn.Module):
def __init__(self, channels=[16, 32, 32, 32]):
super().__init__()
conv_block = lambda c, h: [torch.nn.BatchNorm2d(h), torch.nn.Conv2d(h, c, 5, 2, 2), torch.nn.ReLU(True)]
h, _conv = 3, []
for c in channels:
_conv += conv_block(c, h)
h = c
self._conv = torch.nn.Sequential(*_conv, torch.nn.Conv2d(h, 1, 1))
# self.classifier = torch.nn.Linear(h, 2)
# self.classifier = torch.nn.Conv2d(h, 1, 1)
def forward(self, img):
"""
Your code here
Predict the aim point in image coordinate, given the supertuxkart image
@img: (B,3,96,128)
return (B,2)
"""
x = self._conv(img)
return spatial_argmax(x[:, 0])
class FCN(torch.nn.Module):
class UpBlock(torch.nn.Module):
def __init__(self, n_input, n_output, kernel_size=3, stride=2):
super().__init__()
self.c1 = torch.nn.ConvTranspose2d(n_input, n_output, kernel_size=kernel_size, padding=kernel_size // 2,
stride=stride, output_padding=1)
def forward(self, x):
return F.relu(self.c1(x))
def __init__(self, layers=[16, 32, 64, 128], n_output_channels=5, kernel_size=3, use_skip=True):
super().__init__()
self.input_mean = torch.Tensor([0.3521554, 0.30068502, 0.28527516])
self.input_std = torch.Tensor([0.18182722, 0.18656468, 0.15938024])
c = 3
self.use_skip = use_skip
self.n_conv = len(layers)
skip_layer_size = [3] + layers[:-1]
for i, l in enumerate(layers):
self.add_module('conv%d' % i, CNNClassifier.Block(c, l, kernel_size, 2))
c = l
for i, l in list(enumerate(layers))[::-1]:
self.add_module('upconv%d' % i, self.UpBlock(c, l, kernel_size, 2))
c = l
if self.use_skip:
c += skip_layer_size[i]
self.classifier = torch.nn.Conv2d(c, n_output_channels, 1)
def forward(self, x):
z = (x - self.input_mean[None, :, None, None].to(x.device)) / self.input_std[None, :, None, None].to(x.device)
up_activation = []
for i in range(self.n_conv):
# Add all the information required for skip connections
up_activation.append(z)
z = self._modules['conv%d'%i](z)
for i in reversed(range(self.n_conv)):
z = self._modules['upconv%d'%i](z)
# Fix the padding
z = z[:, :, :up_activation[i].size(2), :up_activation[i].size(3)]
# Add the skip connection
if self.use_skip:
z = torch.cat([z, up_activation[i]], dim=1)
return self.classifier(z)
model_factory = {
'cnn': CNNClassifier,
'fcn': FCN,
'planner_reg':Planner_reg
}
def save_model(model):
from torch import save
from os import path
for n, m in model_factory.items():
if isinstance(model, m):
return save(model.state_dict(), path.join(path.dirname(path.abspath(__file__)), '%s.th' % n))
raise ValueError("model type '%s' not supported!" % str(type(model)))
def load_model(model):
from torch import load
from os import path
r = model_factory[model]()
r.load_state_dict(load(path.join(path.dirname(path.abspath(__file__)), '%s.th' % model), map_location='cpu'))
return r
| 38.874074 | 120 | 0.59013 | [
"MIT"
] | aljubrmj/CS342-Final-Project | planner/regressor/models.py | 5,248 | Python |
from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter
import chat.routing
application = ProtocolTypeRouter({
# Empty for now (http->django views is added by default)
'websocket': AuthMiddlewareStack(
URLRouter(
chat.routing.websocket_urlpatterns
)
),
}) | 26.615385 | 60 | 0.722543 | [
"MIT"
] | aanu1143/chat-app | chat_app/routing.py | 346 | Python |
config = {
"interfaces": {
"google.monitoring.v3.NotificationChannelService": {
"retry_codes": {
"idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
"non_idempotent": []
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 20000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 20000,
"total_timeout_millis": 600000
}
},
"methods": {
"ListNotificationChannelDescriptors": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
},
"GetNotificationChannelDescriptor": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
},
"ListNotificationChannels": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
},
"GetNotificationChannel": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
},
"CreateNotificationChannel": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"UpdateNotificationChannel": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"DeleteNotificationChannel": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
}
}
}
}
}
| 38.847458 | 67 | 0.442845 | [
"Apache-2.0"
] | Random-Trees/google-cloud-python | monitoring/google/cloud/monitoring_v3/gapic/notification_channel_service_client_config.py | 2,292 | Python |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
import unittest
from airflow.providers.amazon.aws.hooks.cloud_formation import AWSCloudFormationHook
try:
from moto import mock_cloudformation
except ImportError:
mock_cloudformation = None
@unittest.skipIf(mock_cloudformation is None, 'moto package not present')
class TestAWSCloudFormationHook(unittest.TestCase):
def setUp(self):
self.hook = AWSCloudFormationHook(aws_conn_id='aws_default')
def create_stack(self, stack_name):
timeout = 15
template_body = json.dumps(
{'Resources': {"myResource": {"Type": "emr", "Properties": {"myProperty": "myPropertyValue"}}}}
)
self.hook.create_stack(
stack_name=stack_name,
params={
'TimeoutInMinutes': timeout,
'TemplateBody': template_body,
'Parameters': [{'ParameterKey': 'myParam', 'ParameterValue': 'myParamValue'}],
},
)
@mock_cloudformation
def test_get_conn_returns_a_boto3_connection(self):
self.assertIsNotNone(self.hook.get_conn().describe_stacks())
@mock_cloudformation
def test_get_stack_status(self):
stack_name = 'my_test_get_stack_status_stack'
stack_status = self.hook.get_stack_status(stack_name=stack_name)
self.assertIsNone(stack_status)
self.create_stack(stack_name)
stack_status = self.hook.get_stack_status(stack_name=stack_name)
self.assertEqual(stack_status, 'CREATE_COMPLETE', 'Incorrect stack status returned.')
@mock_cloudformation
def test_create_stack(self):
stack_name = 'my_test_create_stack_stack'
self.create_stack(stack_name)
stacks = self.hook.get_conn().describe_stacks()['Stacks']
self.assertGreater(len(stacks), 0, 'CloudFormation should have stacks')
matching_stacks = [x for x in stacks if x['StackName'] == stack_name]
self.assertEqual(len(matching_stacks), 1, f'stack with name {stack_name} should exist')
stack = matching_stacks[0]
self.assertEqual(stack['StackStatus'], 'CREATE_COMPLETE', 'Stack should be in status CREATE_COMPLETE')
@mock_cloudformation
def test_delete_stack(self):
stack_name = 'my_test_delete_stack_stack'
self.create_stack(stack_name)
self.hook.delete_stack(stack_name=stack_name)
stacks = self.hook.get_conn().describe_stacks()['Stacks']
matching_stacks = [x for x in stacks if x['StackName'] == stack_name]
self.assertEqual(len(matching_stacks), 0, f'stack with name {stack_name} should not exist')
| 38.05618 | 110 | 0.708001 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 312day/airflow | tests/providers/amazon/aws/hooks/test_cloud_formation.py | 3,387 | Python |
"""sls.py
An implementation of the robust adaptive controller.
Both FIR SLS version with CVXPY and the common
Lyapunov relaxation.
"""
import numpy as np
import cvxpy as cvx
import utils
import logging
import math
import scipy.linalg
from abc import ABC, abstractmethod
from adaptive import AdaptiveMethod
class SLSInfeasibleException(Exception):
def __init__(self, msg=None):
super().__init__(msg)
def make_state_space_controller(Phi_x, Phi_u, n, p):
"""
Converts FIR transfer functions to a state
space realization of the dynamic controller,
mapping states to inputs.
"""
assert len(Phi_x.shape) == 2
assert len(Phi_u.shape) == 2
assert Phi_x.shape[1] == n
assert Phi_u.shape[1] == n
nT, _ = Phi_x.shape
pT, _ = Phi_u.shape
assert (nT % n) == 0
assert (pT % p) == 0
T = nT // n
assert T == (pT // p)
# See Theorem 2 of:
# https://nikolaimatni.github.io/papers/sls_state_space.pdf
Z = np.diag(np.ones(n*(T-2)), k=-n)
assert Z.shape == ((T-1)*n, (T-1)*n)
calI = np.zeros((n*(T-1), n))
calI[:n, :] = np.eye(n)
Rhat = np.hstack([Phi_x[n*k:n*(k+1), :] for k in range(1, T)])
Mhat = np.hstack([Phi_u[p*k:p*(k+1), :] for k in range(1, T)])
M1 = Phi_u[:p, :]
R1 = Phi_x[:n, :]
A = Z - calI.dot(Rhat)
B = -calI
C = M1.dot(Rhat) - Mhat
D = M1
return (A, B, C, D)
def h2_squared_norm(A, B, Phi_x, Phi_u, Q, R, sigma_w):
"""
Gets the squared infinite horizon LQR cost for system
(A,B) in feedback with the controller defined by Phi_x
and Phi_u.
"""
n, p = B.shape
A_k, B_k, C_k, D_k = make_state_space_controller(Phi_x, Phi_u, n, p)
A_cl = np.block([
[A + B.dot(D_k), B.dot(C_k)],
[B_k, A_k]
])
Q_sqrt = utils.psd_sqrt(Q)
R_sqrt = utils.psd_sqrt(R)
C_cl = np.block([
[Q_sqrt, np.zeros((n, A_k.shape[0]))],
[R_sqrt.dot(D_k), R_sqrt.dot(C_k)]
])
B_cl = np.vstack((np.eye(n), np.zeros((A_k.shape[0], n))))
P = utils.solve_discrete_lyapunov(A_cl.T, B_cl.dot(B_cl.T))
return (sigma_w ** 2) * np.trace(C_cl.dot(P).dot(C_cl.T))
def _assert_AB_consistent(A, B):
assert len(A.shape) == 2 and A.shape[0] == A.shape[1]
assert len(B.shape) == 2
assert A.shape[0] == B.shape[0]
def _assert_ABCD_consistent(A, B, C, D):
_assert_AB_consistent(A, B)
assert len(C.shape) == 2
assert len(D.shape) == 2
assert C.shape[1] == A.shape[0]
assert C.shape[0] == D.shape[0]
assert D.shape[1] == B.shape[1]
def roll_forward(A, B, K, x0, psi0, sigma_w, horizon, rng=None):
"""Apply an LTI controller K = (A_k,B_k,C_k,D_k)
Roll the true system (A, B) forward with the SS realization of the LTI
controller given. horizon is the length of the trajectory, and
sigma_w is the stddev of the Gaussian process noise.
"""
if rng is None:
rng = np.random
_assert_AB_consistent(A, B)
A_k, B_k, C_k, D_k = K
_assert_ABCD_consistent(A_k, B_k, C_k, D_k)
state_dim, input_dim = B.shape
psi_dim = A_k.shape[0]
assert C_k.shape[0] == input_dim
assert B_k.shape[1] == state_dim
if x0 is None:
x0 = np.zeros((state_dim,))
if psi0 is None:
psi0 = np.zeros((psi_dim,))
assert x0.shape == (state_dim,)
assert psi0.shape == (psi_dim,)
process = sigma_w*rng.normal(size=(horizon, state_dim))
xt = np.array(x0)
psit = np.array(psi0)
states = np.zeros((horizon+1, state_dim))
inputs = np.zeros((horizon, input_dim))
controller_states = np.zeros((horizon+1, psi_dim))
states[0, :] = x0
controller_states[0, :] = psi0
for t in range(horizon):
psitp1 = A_k.dot(psit) + B_k.dot(xt)
ut = C_k.dot(psit) + D_k.dot(xt)
xtp1 = A.dot(xt) + B.dot(ut) + process[t]
inputs[t, :] = ut
states[t+1, :] = xtp1
controller_states[t+1, :] = psitp1
xt = xtp1
psit = psitp1
return states, inputs, controller_states
def sls_synth(Q, R, Ahat, Bhat, eps_A, eps_B, T, gamma, alpha, logger=None):
"""
Solves the SLS synthesis problem for length T FIR filters
using CVXPY
"""
assert len(Q.shape) == 2 and Q.shape[0] == Q.shape[1]
assert len(R.shape) == 2 and R.shape[0] == R.shape[1]
assert len(Ahat.shape) == 2 and Ahat.shape[0] == Ahat.shape[1]
assert len(Bhat.shape) == 2 and Bhat.shape[0] == Ahat.shape[0]
assert Q.shape[0] == Ahat.shape[0]
assert R.shape[0] == Bhat.shape[1]
assert eps_A >= 0
assert eps_B >= 0
assert T >= 1
assert gamma > 0 and gamma < 1
assert alpha > 0 and alpha < 1
if logger is None:
logger = logging.getLogger(__name__)
n, p = Bhat.shape
Q_sqrt = utils.psd_sqrt(Q)
R_sqrt = utils.psd_sqrt(R)
# Phi_x = \sum_{k=1}^{T} Phi_x[k] z^{-k}
Phi_x = cvx.Variable(T*n, n, name="Phi_x")
# Phi_u = \sum_{k=1}^{T} Phi_u[k] z^{-k}
Phi_u = cvx.Variable(T*p, n, name="Phi_u")
# htwo_cost
htwo_cost = cvx.Variable(name="htwo_cost")
# subspace constraint:
# [zI - Ah, -Bh] * [Phi_x; Phi_u] = I
#
# Note that:
# z Phi_x = \sum_{k=0}^{T-1} Phi_x[k+1] z^{-k}
#
# This means that:
# 1) Phi_x[1] = I
# 2) Phi_x[k+1] = Ah*Phi_x[k] + Bh*Phi_u[k] for k=1, ..., T-1
# 3) Ah*Phi_x[T] + Bh*Phi_u[T] = 0
constr = []
constr.append(Phi_x[:n, :] == np.eye(n))
for k in range(T-1):
constr.append(Phi_x[n*(k+1):n*(k+1+1), :] == Ahat*Phi_x[n*k:n*(k+1), :] + Bhat*Phi_u[p*k:p*(k+1), :])
constr.append(Ahat*Phi_x[n*(T-1):, :] + Bhat*Phi_u[p*(T-1):, :] == 0)
# H2 constraint:
# By Parseval's identity, this is equal (up to constants) to
#
# frobenius_norm(
# [ Q_sqrt*Phi_x[1] ;
# ...
# Q_sqrt*Phi_x[T] ;
# R_sqrt*Phi_u[1] ;
# ...
# R_sqrt*Phi_u[T]
# ]
# ) <= htwo_cost
# TODO: what is the best way to implement this in cvxpy?
constr.append(
cvx.norm(
cvx.bmat(
[[Q_sqrt*Phi_x[n*k:n*(k+1), :]] for k in range(T)] +
[[R_sqrt*Phi_u[p*k:p*(k+1), :]] for k in range(T)]),
'fro') <= htwo_cost)
# H-infinity constraint
#
# We want to enforce ||H(z)||_inf <= gamma, where
#
# H(z) = \sum_{k=1}^{T} [ mult_x * Phi_x[k] ; mult_u * Phi_u[k] ] z^{-k}.
#
# Here, each of the FIR coefficients has size (n+p) x n. Since n+p>n, we enforce
# the constraint on the transpose system H^T(z). The LMI constraint
# for this comes from Theorem 5.8 of
# Positive trigonometric polynomials and signal processing applications (2007) by
# B. Dumitrescu.
#
# Here is a table to map the variable names in the text to this program
#
# Text Program Comment
# -------------------------------------------------------------
# p n Output dim
# m n+p Input dim
# n T FIR horizon
# p(n+1) n(T+1) SDP variable size
# p(n+1) x m n(T+1) x (n+p)
mult_x = eps_A/np.sqrt(alpha)
mult_u = eps_B/np.sqrt(1-alpha)
# Hbar has size (T+1)*n x (n+p)
Hbar = cvx.bmat(
[[np.zeros((n, n)), np.zeros((n, p))]] +
[[mult_x*Phi_x[n*k:n*(k+1), :].T, mult_u*Phi_u[p*k:p*(k+1), :].T] for k in range(T)])
Q = cvx.Semidef(n*(T+1), name="Q")
# Constraint (5.44)
# Case k==0: the block diag of Q has to sum to gamma^2 * eye(n)
gamma_sq = gamma ** 2
constr.append(
sum([Q[n*t:n*(t+1), n*t:n*(t+1)] for t in range(T+1)]) == gamma_sq*np.eye(n))
# Case k>0: the block off-diag of Q has to sum to zero
for k in range(1, T+1):
constr.append(
sum([Q[n*t:n*(t+1), n*(t+k):n*(t+1+k)] for t in range(T+1-k)]) == np.zeros((n, n)))
# Constraint (5.45)
constr.append(
cvx.bmat([
[Q, Hbar],
[Hbar.T, np.eye(n+p)]]) == cvx.Semidef(n*(T+1) + (n+p)))
prob = cvx.Problem(cvx.Minimize(htwo_cost), constr)
prob.solve(solver=cvx.SCS)
if prob.status == cvx.OPTIMAL:
logging.debug("successfully solved!")
Phi_x = np.array(Phi_x.value)
Phi_u = np.array(Phi_u.value)
return (True, prob.value, Phi_x, Phi_u)
else:
logging.debug("could not solve: {}".format(prob.status))
return (False, None, None, None)
def sls_common_lyapunov(A, B, Q, R, eps_A, eps_B, tau, logger=None):
"""
Solves the common Lyapunov relaxation to the robust
synthesis problem.
Taken from
lstd-lqr/blob/master/code/policy_iteration.ipynb
learning-lqr/experiments/matlab/sls_synth_yalmip/common_lyap_synth_var2_alpha.m
"""
if logger is None:
logger = logging.getLogger(__name__)
d, p = B.shape
X = cvx.Symmetric(d) # inverse Lyapunov function
Z = cvx.Variable(p, d) # -K*X
W_11 = cvx.Symmetric(d)
W_12 = cvx.Variable(d, p)
W_22 = cvx.Symmetric(p)
alph = cvx.Variable() # scalar for tuning the H_inf constraint
constraints = []
# H2 cost: trace(W)=H2 cost
mat1 = cvx.bmat([
[X, X, Z.T],
[X, W_11, W_12],
[Z, W_12.T, W_22]])
constraints.append(mat1 == cvx.Semidef(2*d + p))
# H_infinity constraint
mat2 = cvx.bmat([
[X-np.eye(d), (A*X+B*Z), np.zeros((d, d)), np.zeros((d, p))],
[(X*A.T+Z.T*B.T), X, eps_A*X, eps_B*Z.T],
[np.zeros((d, d)), eps_A*X, alph*(tau**2)*np.eye(d), np.zeros((d, p))],
[np.zeros((p, d)), eps_B*Z, np.zeros((p, d)), (1-alph)*(tau**2)*np.eye(p)]])
constraints.append(mat2 == cvx.Semidef(3*d + p))
# constrain alpha to be in [0,1]:
constraints.append(alph >= 0)
constraints.append(alph <= 1)
# Solve!
objective = cvx.Minimize(cvx.trace(Q*W_11) + cvx.trace(R*W_22))
prob = cvx.Problem(objective, constraints)
try:
obj = prob.solve(solver=cvx.MOSEK)
except cvx.SolverError:
logger.warn("SolverError encountered")
return (False, None, None, None)
if prob.status == cvx.OPTIMAL:
logging.debug("common_lyapunov: found optimal solution")
X_value = np.array(X.value)
P_value = scipy.linalg.solve(X_value, np.eye(d), sym_pos=True)
# NOTE: the K returned here is meant to be used
# as A + BK **NOT** A - BK
K_value = np.array(Z.value).dot(P_value)
return (True, obj, P_value, K_value)
else:
logging.debug("common_lyapunov: could not solve (status={})".format(prob.status))
return (False, None, None, None)
class SLS_Implementation(ABC):
@abstractmethod
def open(self):
"""
"""
pass
@abstractmethod
def synth(self, Q, R, Ahat, Bhat, eps_A, eps_B, truncation_length, gamma, alpha, logger):
"""
"""
pass
class SLS_CVXPY(SLS_Implementation):
def open(self):
pass
def synth(self, Q, R, Ahat, Bhat, eps_A, eps_B, truncation_length, gamma, alpha, logger):
return sls_synth(Q, R, Ahat, Bhat, eps_A, eps_B, truncation_length, gamma, alpha, logger)
class SLS_FIRStrategy(AdaptiveMethod):
"""Adaptive control based on FIR truncated SLS
"""
def __init__(self, Q, R, A_star, B_star, sigma_w, rls_lam,
sigma_explore, reg, epoch_multiplier,
truncation_length, actual_error_multiplier,
use_gamma=0.98, sls_impl=None):
super().__init__(Q, R, A_star, B_star, sigma_w, rls_lam)
self._sigma_explore = sigma_explore
self._reg = reg
self._epoch_multiplier = epoch_multiplier
# TODO(stephentu):
# the truncation length should grow with time, but for now
# we keep it constant
# Additionally, gamma should be searched over as an optimization
# variable. For how, we fix the value.
# Finally, the optimization problem should be modified
# to involve the variable V as in https://arxiv.org/abs/1805.09388
self._truncation_length = truncation_length
self._actual_error_multiplier = actual_error_multiplier
self._sls_impl = sls_impl if sls_impl is not None else SLS_CVXPY()
self._logger = logging.getLogger(__name__)
self._use_gamma = use_gamma
self._controller_state = None
def _get_logger(self):
return self._logger
def reset(self, rng):
super().reset(rng)
self._sls_impl.open()
self._midway_infeasible = 0
def _design_controller(self, states, inputs, transitions, rng):
logger = self._get_logger()
Anom, Bnom, _ = utils.solve_least_squares(states, inputs, transitions, reg=self._reg)
eps_A = np.linalg.norm(Anom - self._A_star, ord=2)
eps_B = np.linalg.norm(Bnom - self._B_star, ord=2)
effective_eps_A = self._actual_error_multiplier * eps_A
effective_eps_B = self._actual_error_multiplier * eps_B
epoch_id = self._epoch_idx + 1 if self._has_primed else 0
logger.info("_design_controller(epoch={}): effective_eps_A={}, effective_eps_B={}".format(epoch_id, effective_eps_A, effective_eps_B))
# if SLS is not feasible, we fallback to the current
# control policy if it exists, otherwise we throw an SLSInfeasibleException
if self._use_gamma is None:
# bisect for gamma
logger.info("_design_controller(epoch={}): bisecting for gamma".format(epoch_id))
INF = 1e12
def fn(gamma):
is_feasible, obj, _, _ = self._sls_impl.synth(self._Q, self._R, Anom, Bnom,
effective_eps_A, effective_eps_B, self._truncation_length,
gamma=gamma, alpha=0.5, logger=logger)
if not is_feasible:
return INF
else:
return 1/(1-gamma) * obj
disp_lvl = 3 if logger.isEnabledFor(logging.DEBUG) else 0
gamma_star, _, error_flag, _ = scipy.optimize.fminbound(fn, 0, 1 - 1e-5, xtol=1e-2, maxfun=20, full_output=True, disp=disp_lvl)
if error_flag:
logger.warn("_design_controller(epoch={}): maxfun exceeded during bisection, gamma_star={}".format(epoch_id, gamma_star))
logger.info("_design_controller(epoch={}): using gamma_star={}".format(epoch_id, gamma_star))
is_feasible, _, Phi_x, Phi_u = self._sls_impl.synth(self._Q, self._R, Anom, Bnom,
effective_eps_A, effective_eps_B, self._truncation_length,
gamma=gamma_star, alpha=0.5, logger=logger)
else:
assert self._use_gamma > 0 and self._use_gamma < 1
logger.info("_design_controller(epoch={}): using fixed gamma={}".format(epoch_id, self._use_gamma))
is_feasible, _, Phi_x, Phi_u = self._sls_impl.synth(self._Q, self._R, Anom, Bnom,
effective_eps_A, effective_eps_B, self._truncation_length,
gamma=self._use_gamma, alpha=0.5, logger=logger)
if not is_feasible:
logger.info("_design_controller(epoch={}): SLS was not feasible...".format(epoch_id))
try:
self._current_K
# keep current controller
assert self._current_K is not None
logger.warn("_design_controller(epoch={}): SLS not feasible: keeping current controller".format(epoch_id))
self._midway_infeasible += 1
except AttributeError:
logger.warn("_design_controller(epoch={}): SLS not feasible: no existing controller to fallback on, effective_eps_A={}, effective_eps_B={}".format(epoch_id, effective_eps_A, effective_eps_B))
raise SLSInfeasibleException()
else:
logger.info("_design_controller(epoch={}): SLS was feasible. updating controller".format(epoch_id))
self._Phi_x = Phi_x
self._Phi_u = Phi_u
self._current_K = make_state_space_controller(Phi_x, Phi_u, self._n, self._p)
# compute the infinite horizon cost of this controller
Jnom = h2_squared_norm(self._A_star,
self._B_star,
self._Phi_x,
self._Phi_u,
self._Q,
self._R,
self._sigma_w)
return Anom, Bnom, Jnom
def _should_terminate_epoch(self):
if (self._iteration_within_epoch_idx >=
self._epoch_multiplier * (self._epoch_idx + 1)):
logger = self._get_logger()
logger.debug("terminating epoch... exploration noise will now have stddev {}".format(
self._sigma_explore * 1/math.pow(self._epoch_idx + 2, 1/3)))
return True
else:
return False
def _get_input(self, state, rng):
rng = self._get_rng(rng)
A_k, B_k, C_k, D_k = self._current_K
psit = self._controller_state
if psit is None:
psit = np.zeros((A_k.shape[0],))
psitp1 = A_k.dot(psit) + B_k.dot(state)
ctrl_input = C_k.dot(psit) + D_k.dot(state)
self._controller_state = psitp1
sigma_explore_decay = 1/math.pow(self._epoch_idx + 1, 1/3)
explore_input = self._sigma_explore * sigma_explore_decay * rng.normal(size=(self._p,))
return ctrl_input + explore_input
class SLS_CommonLyapunovStrategy(AdaptiveMethod):
"""
Adaptive control based on common Lyapunov relaxation
of robust control problem
"""
def __init__(self, Q, R, A_star, B_star, sigma_w, rls_lam,
sigma_explore, reg, epoch_multiplier, actual_error_multiplier):
super().__init__(Q, R, A_star, B_star, sigma_w, rls_lam)
self._sigma_explore = sigma_explore
self._reg = reg
self._epoch_multiplier = epoch_multiplier
self._actual_error_multiplier = actual_error_multiplier
self._logger = logging.getLogger(__name__)
self._midway_infeasible = 0
def reset(self, rng):
super().reset(rng)
self._midway_infeasible = 0
def _get_logger(self):
return self._logger
def _design_controller(self, states, inputs, transitions, rng):
logger = self._get_logger()
Anom, Bnom, _ = utils.solve_least_squares(states, inputs, transitions, reg=self._reg)
eps_A = np.linalg.norm(Anom - self._A_star, ord=2)
eps_B = np.linalg.norm(Bnom - self._B_star, ord=2)
effective_eps_A = self._actual_error_multiplier * eps_A
effective_eps_B = self._actual_error_multiplier * eps_B
epoch_id = self._epoch_idx + 1 if self._has_primed else 0
logger.info("_design_controller(epoch={}): effective_eps_A={}, effective_eps_B={}".format(epoch_id, effective_eps_A, effective_eps_B))
is_feasible, _, _, K = sls_common_lyapunov(
Anom, Bnom, self._Q, self._R,
effective_eps_A, effective_eps_B, tau=0.999, logger=logger)
if not is_feasible:
try:
self._current_K
# keep current controller
assert self._current_K is not None
logger.warn("_design_controller(epoch={}): SLS not feasible: keeping current controller".format(epoch_id))
self._midway_infeasible += 1
except AttributeError:
logger.warn("_design_controller(epoch={}): SLS not feasible: no existing controller to fallback on, effective_eps_A={}, effective_eps_B={}".format(epoch_id, effective_eps_A, effective_eps_B))
raise SLSInfeasibleException()
else:
logger.info("_design_controller(epoch={}): SLS was feasible. updating controller".format(epoch_id))
self._current_K = K
# compute the infinite horizon cost of this controller
Jnom = utils.LQR_cost(self._A_star, self._B_star, self._current_K, self._Q, self._R, self._sigma_w)
return Anom, Bnom, Jnom
def _should_terminate_epoch(self):
if (self._iteration_within_epoch_idx >=
self._epoch_multiplier * (self._epoch_idx + 1)):
logger = self._get_logger()
logger.debug("terminating epoch... exploration noise will now have stddev {}".format(
self._sigma_explore * 1/math.pow(self._epoch_idx + 2, 1/3)))
return True
else:
return False
def _get_input(self, state, rng):
rng = self._get_rng(rng)
ctrl_input = self._current_K.dot(state)
sigma_explore_decay = 1/math.pow(self._epoch_idx + 1, 1/3)
explore_input = self._sigma_explore * sigma_explore_decay * rng.normal(size=(self._p,))
return ctrl_input + explore_input
def _main():
import examples
A_star, B_star = examples.unstable_laplacian_dynamics()
# define costs
Q = 1e-3 * np.eye(3)
R = np.eye(3)
# initial controller
_, K_init = utils.dlqr(A_star, B_star, 1e-3*np.eye(3), np.eye(3))
rng = np.random
env = SLS_FIRStrategy(Q=Q,
R=R,
A_star=A_star,
B_star=B_star,
sigma_w=1,
sigma_explore=0.1,
reg=1e-5,
epoch_multiplier=10,
truncation_length=12,
actual_error_multiplier=1,
rls_lam=None)
env.reset(rng)
env.prime(250, K_init, 0.5, rng)
for idx in range(500):
env.step(rng)
env = SLS_CommonLyapunovStrategy(Q=Q,
R=R,
A_star=A_star,
B_star=B_star,
sigma_w=1,
sigma_explore=0.1,
reg=1e-5,
epoch_multiplier=10,
actual_error_multiplier=1,
rls_lam=None)
env.reset(rng)
env.prime(250, K_init, 0.5, rng)
for idx in range(500):
env.step(rng)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
np.set_printoptions(linewidth=200)
_main()
| 33.166176 | 207 | 0.579479 | [
"MIT"
] | DuttaAbhigyan/robust-adaptive-lqr | python/sls.py | 22,553 | Python |
# partesanato/__init__.py
| 13.5 | 26 | 0.777778 | [
"MIT"
] | edgarbs1998/partesanato-server | src/partesanato/__init__.py | 27 | Python |
from .__main__ import *
| 12.5 | 24 | 0.72 | [
"MIT"
] | kokonut27/Vyxal | vyxal/__init__.py | 25 | Python |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys
import os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % vchAddr)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match(r'\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
sys.exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside an IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'pnSeed6_main', 17771)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'pnSeed6_test', 27771)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 31.52518 | 99 | 0.583752 | [
"MIT"
] | 777-project/777 | contrib/seeds/generate-seeds.py | 4,382 | Python |
"""
Handles creation of genomes, either from scratch or by sexual or
asexual reproduction from parents.
"""
from __future__ import division
import math
import random
from itertools import count
from neat.config import ConfigParameter, DefaultClassConfig
from neat.math_util import mean
from neat.six_util import iteritems, itervalues
# TODO: Provide some sort of optional cross-species performance criteria, which
# are then used to control stagnation and possibly the mutation rate
# configuration. This scheme should be adaptive so that species do not evolve
# to become "cautious" and only make very slow progress.
class DefaultReproduction(DefaultClassConfig):
"""
Implements the default NEAT-python reproduction scheme:
explicit fitness sharing with fixed-time species stagnation.
"""
@classmethod
def parse_config(cls, param_dict):
return DefaultClassConfig(param_dict,
[ConfigParameter('elitism', int, 0),
ConfigParameter('survival_threshold', float, 0.2),
ConfigParameter('min_species_size', int, 2)])
def __init__(self, config, reporters, stagnation):
# pylint: disable=super-init-not-called
self.reproduction_config = config
self.reporters = reporters
self.genome_indexer = count(1)
self.stagnation = stagnation
self.ancestors = {}
def create_new(self, genome_type, genome_config, num_genomes):
new_genomes = {}
for i in range(num_genomes):
key = next(self.genome_indexer)
g = genome_type(key)
g.configure_new(genome_config)
new_genomes[key] = g
self.ancestors[key] = tuple()
return new_genomes
@staticmethod
def compute_spawn(adjusted_fitness, previous_sizes, pop_size, min_species_size):
"""Compute the proper number of offspring per species (proportional to fitness)."""
af_sum = sum(adjusted_fitness)
spawn_amounts = []
for af, ps in zip(adjusted_fitness, previous_sizes):
if af_sum > 0:
s = max(min_species_size, af / af_sum * pop_size)
else:
s = min_species_size
d = (s - ps) * 0.5
c = int(round(d))
spawn = ps
if abs(c) > 0:
spawn += c
elif d > 0:
spawn += 1
elif d < 0:
spawn -= 1
spawn_amounts.append(spawn)
# Normalize the spawn amounts so that the next generation is roughly
# the population size requested by the user.
total_spawn = sum(spawn_amounts)
norm = pop_size / total_spawn
spawn_amounts = [max(min_species_size, int(round(n * norm))) for n in spawn_amounts]
return spawn_amounts
def reproduce(self, config, species, pop_size, generation):
"""
Handles creation of genomes, either from scratch or by sexual or
asexual reproduction from parents.
"""
# TODO: I don't like this modification of the species and stagnation objects,
# because it requires internal knowledge of the objects.
# Filter out stagnated species, collect the set of non-stagnated
# species members, and compute their average adjusted fitness.
# The average adjusted fitness scheme (normalized to the interval
# [0, 1]) allows the use of negative fitness values without
# interfering with the shared fitness scheme.
all_fitnesses = []
remaining_species = []
for stag_sid, stag_s, stagnant in self.stagnation.update(species, generation):
if stagnant:
self.reporters.species_stagnant(stag_sid, stag_s)
else:
all_fitnesses.extend(m.fitness for m in itervalues(stag_s.members))
remaining_species.append(stag_s)
# The above comment was not quite what was happening - now getting fitnesses
# only from members of non-stagnated species.
# No species left.
if not remaining_species:
species.species = {}
return {} # was []
# Find minimum/maximum fitness across the entire population, for use in
# species adjusted fitness computation.
min_fitness = min(all_fitnesses)
max_fitness = max(all_fitnesses)
# Do not allow the fitness range to be zero, as we divide by it below.
# TODO: The ``1.0`` below is rather arbitrary, and should be configurable.
fitness_range = max(1.0, max_fitness - min_fitness)
for afs in remaining_species:
# Compute adjusted fitness.
msf = mean([m.fitness for m in itervalues(afs.members)])
af = (msf - min_fitness) / fitness_range
afs.adjusted_fitness = af
adjusted_fitnesses = [s.adjusted_fitness for s in remaining_species]
avg_adjusted_fitness = mean(adjusted_fitnesses) # type: float
self.reporters.info("Average adjusted fitness: {:.3f}".format(avg_adjusted_fitness))
# Compute the number of new members for each species in the new generation.
previous_sizes = [len(s.members) for s in remaining_species]
min_species_size = self.reproduction_config.min_species_size
# Isn't the effective min_species_size going to be max(min_species_size,
# self.reproduction_config.elitism)? That would probably produce more accurate tracking
# of population sizes and relative fitnesses... doing. TODO: document.
min_species_size = max(min_species_size,self.reproduction_config.elitism)
# TODO: THIS PROBABLY CAUSES POPULATION TO DOUBLE. Is an array of 2s of len ~232 here but ~ 112 in original
# TODO: BECAUSE OF ADJUSTED_FITNESSES ALSO BEING 232 INSTEAD OF 112
# TODO: 232 is number of species.. so probably rather an effect of increased population, not the cause...
spawn_amounts = self.compute_spawn(adjusted_fitnesses, previous_sizes,
pop_size, min_species_size)
new_population = {}
species.species = {}
for spawn, s in zip(spawn_amounts, remaining_species):
# If elitism is enabled, each species always at least gets to retain its elites.
spawn = max(spawn, self.reproduction_config.elitism)
assert spawn > 0
# The species has at least one member for the next generation, so retain it.
old_members = list(iteritems(s.members))
s.members = {}
species.species[s.key] = s
# Sort members in order of descending fitness.
old_members.sort(reverse=True, key=lambda x: x[1].fitness)
# Transfer elites to new generation.
if self.reproduction_config.elitism > 0:
for i, m in old_members[:self.reproduction_config.elitism]:
new_population[i] = m
spawn -= 1
if spawn <= 0:
continue
# Only use the survival threshold fraction to use as parents for the next generation.
repro_cutoff = int(math.ceil(self.reproduction_config.survival_threshold *
len(old_members)))
# Use at least two parents no matter what the threshold fraction result is.
repro_cutoff = max(repro_cutoff, 2)
old_members = old_members[:repro_cutoff]
# Randomly choose parents and produce the number of offspring allotted to the species.
while spawn > 0:
spawn -= 1
parent1_id, parent1 = random.choice(old_members)
parent2_id, parent2 = random.choice(old_members)
# Note that if the parents are not distinct, crossover will produce a
# genetically identical clone of the parent (but with a different ID).
gid = next(self.genome_indexer)
child = config.genome_type(gid)
child.configure_crossover(parent1, parent2, config.genome_config)
child.mutate(config.genome_config)
new_population[gid] = child
self.ancestors[gid] = (parent1_id, parent2_id)
return new_population
| 43.598958 | 115 | 0.632899 | [
"Apache-2.0"
] | Osrip/Novelty_criticality_PyTorch-NEAT | neat_local/reproduction.py | 8,371 | Python |
import logging
from functools import reduce
from typing import Text, Set, Dict, Optional, List, Union, Any
import os
import rasa.shared.data
import rasa.shared.utils.io
from rasa.shared.core.domain import Domain
from rasa.shared.importers.importer import TrainingDataImporter
from rasa.shared.importers import utils
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.core.training_data.structures import StoryGraph
from rasa.shared.utils.common import mark_as_experimental_feature
from rasa.shared.core.training_data.story_reader.yaml_story_reader import (
YAMLStoryReader,
)
logger = logging.getLogger(__name__)
class MultiProjectImporter(TrainingDataImporter):
def __init__(
self,
config_file: Text,
domain_path: Optional[Text] = None,
training_data_paths: Optional[Union[List[Text], Text]] = None,
project_directory: Optional[Text] = None,
):
self.config = rasa.shared.utils.io.read_model_configuration(config_file)
if domain_path:
self._domain_paths = [domain_path]
else:
self._domain_paths = []
self._story_paths = []
self._e2e_story_paths = []
self._nlu_paths = []
self._imports = []
self._additional_paths = training_data_paths or []
self._project_directory = project_directory or os.path.dirname(config_file)
self._init_from_dict(self.config, self._project_directory)
extra_nlu_files = rasa.shared.data.get_data_files(
training_data_paths, rasa.shared.data.is_nlu_file
)
extra_story_files = rasa.shared.data.get_data_files(
training_data_paths, YAMLStoryReader.is_stories_file
)
self._story_paths += extra_story_files
self._nlu_paths += extra_nlu_files
logger.debug(
"Selected projects: {}".format("".join([f"\n-{i}" for i in self._imports]))
)
mark_as_experimental_feature(feature_name="MultiProjectImporter")
def get_config_file_for_auto_config(self) -> Optional[Text]:
"""Returns config file path for auto-config only if there is a single one."""
return None
def _init_from_path(self, path: Text) -> None:
if os.path.isfile(path):
self._init_from_file(path)
elif os.path.isdir(path):
self._init_from_directory(path)
def _init_from_file(self, path: Text) -> None:
path = os.path.abspath(path)
if os.path.exists(path) and rasa.shared.data.is_config_file(path):
config = rasa.shared.utils.io.read_config_file(path)
parent_directory = os.path.dirname(path)
self._init_from_dict(config, parent_directory)
else:
rasa.shared.utils.io.raise_warning(
f"'{path}' does not exist or is not a valid config file."
)
def _init_from_dict(self, _dict: Dict[Text, Any], parent_directory: Text) -> None:
imports = _dict.get("imports") or []
imports = [os.path.join(parent_directory, i) for i in imports]
# clean out relative paths
imports = [os.path.abspath(i) for i in imports]
# remove duplication
import_candidates = []
for i in imports:
if i not in import_candidates and not self._is_explicitly_imported(i):
import_candidates.append(i)
self._imports.extend(import_candidates)
# import config files from paths which have not been processed so far
for p in import_candidates:
self._init_from_path(p)
def _is_explicitly_imported(self, path: Text) -> bool:
return not self.no_skills_selected() and self.is_imported(path)
def _init_from_directory(self, path: Text) -> None:
for parent, _, files in os.walk(path, followlinks=True):
for file in files:
full_path = os.path.join(parent, file)
if not self.is_imported(full_path):
# Check next file
continue
if YAMLStoryReader.is_test_stories_file(full_path):
self._e2e_story_paths.append(full_path)
elif Domain.is_domain_file(full_path):
self._domain_paths.append(full_path)
elif rasa.shared.data.is_nlu_file(full_path):
self._nlu_paths.append(full_path)
elif YAMLStoryReader.is_stories_file(full_path):
self._story_paths.append(full_path)
elif rasa.shared.data.is_config_file(full_path):
self._init_from_file(full_path)
def no_skills_selected(self) -> bool:
return not self._imports
def training_paths(self) -> Set[Text]:
"""Returns the paths which should be searched for training data."""
# only include extra paths if they are not part of the current project directory
training_paths = {
i
for i in self._imports
if not self._project_directory or self._project_directory not in i
}
if self._project_directory:
training_paths.add(self._project_directory)
return training_paths
def is_imported(self, path: Text) -> bool:
"""
Checks whether a path is imported by a skill.
Args:
path: File or directory path which should be checked.
Returns:
`True` if path is imported by a skill, `False` if not.
"""
absolute_path = os.path.abspath(path)
return (
self.no_skills_selected()
or self._is_in_project_directory(absolute_path)
or self._is_in_additional_paths(absolute_path)
or self._is_in_imported_paths(absolute_path)
)
def _is_in_project_directory(self, path: Text) -> bool:
if os.path.isfile(path):
parent_directory = os.path.abspath(os.path.dirname(path))
return parent_directory == self._project_directory
else:
return path == self._project_directory
def _is_in_additional_paths(self, path: Text) -> bool:
included = path in self._additional_paths
if not included and os.path.isfile(path):
parent_directory = os.path.abspath(os.path.dirname(path))
included = parent_directory in self._additional_paths
return included
def _is_in_imported_paths(self, path: Text) -> bool:
return any(
[rasa.shared.utils.io.is_subdirectory(path, i) for i in self._imports]
)
def get_domain(self) -> Domain:
"""Retrieves model domain (see parent class for full docstring)."""
domains = [Domain.load(path) for path in self._domain_paths]
return reduce(
lambda merged, other: merged.merge(other), domains, Domain.empty()
)
def get_stories(self, exclusion_percentage: Optional[int] = None) -> StoryGraph:
"""Retrieves training stories / rules (see parent class for full docstring)."""
return utils.story_graph_from_paths(
self._story_paths, self.get_domain(), exclusion_percentage
)
def get_conversation_tests(self) -> StoryGraph:
"""Retrieves conversation test stories (see parent class for full docstring)."""
return utils.story_graph_from_paths(self._e2e_story_paths, self.get_domain())
def get_config(self) -> Dict:
"""Retrieves model config (see parent class for full docstring)."""
return self.config
def get_nlu_data(self, language: Optional[Text] = "en") -> TrainingData:
"""Retrieves NLU training data (see parent class for full docstring)."""
return utils.training_data_from_paths(self._nlu_paths, language)
| 38.549505 | 88 | 0.651856 | [
"Apache-2.0"
] | mukulbalodi/rasa | rasa/shared/importers/multi_project.py | 7,787 | Python |
# Copyright (C) 2008 John Paulett (john -at- paulett.org)
# Copyright (C) 2009-2018 David Aguilar (davvid -at- gmail.com)
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
from __future__ import absolute_import, division, unicode_literals
import decimal
import warnings
import sys
import types
from itertools import chain, islice
from . import compat
from . import util
from . import tags
from . import handlers
from .backend import json
from .compat import numeric_types, string_types, PY3, PY2
def encode(
value,
unpicklable=True,
make_refs=True,
keys=False,
max_depth=None,
reset=True,
backend=None,
warn=False,
context=None,
max_iter=None,
use_decimal=False,
numeric_keys=False,
use_base85=False,
fail_safe=None,
indent=None,
separators=None,
):
"""Return a JSON formatted representation of value, a Python object.
:param unpicklable: If set to False then the output will not contain the
information necessary to turn the JSON data back into Python objects,
but a simpler JSON stream is produced.
:param max_depth: If set to a non-negative integer then jsonpickle will
not recurse deeper than 'max_depth' steps into the object. Anything
deeper than 'max_depth' is represented using a Python repr() of the
object.
:param make_refs: If set to False jsonpickle's referencing support is
disabled. Objects that are id()-identical won't be preserved across
encode()/decode(), but the resulting JSON stream will be conceptually
simpler. jsonpickle detects cyclical objects and will break the cycle
by calling repr() instead of recursing when make_refs is set False.
:param keys: If set to True then jsonpickle will encode non-string
dictionary keys instead of coercing them into strings via `repr()`.
This is typically what you want if you need to support Integer or
objects as dictionary keys.
:param numeric_keys: Only use this option if the backend supports integer
dict keys natively. This flag tells jsonpickle to leave numeric keys
as-is rather than conforming them to json-friendly strings.
Using ``keys=True`` is the typical solution for integer keys, so only
use this if you have a specific use case where you want to allow the
backend to handle serialization of numeric dict keys.
:param warn: If set to True then jsonpickle will warn when it
returns None for an object which it cannot pickle
(e.g. file descriptors).
:param max_iter: If set to a non-negative integer then jsonpickle will
consume at most `max_iter` items when pickling iterators.
:param use_decimal: If set to True jsonpickle will allow Decimal
instances to pass-through, with the assumption that the simplejson
backend will be used in `use_decimal` mode. In order to use this mode
you will need to configure simplejson::
jsonpickle.set_encoder_options('simplejson',
use_decimal=True, sort_keys=True)
jsonpickle.set_decoder_options('simplejson',
use_decimal=True)
jsonpickle.set_preferred_backend('simplejson')
NOTE: A side-effect of the above settings is that float values will be
converted to Decimal when converting to json.
:param use_base85:
If possible, use base85 to encode binary data. Base85 bloats binary data
by 1/4 as opposed to base64, which expands it by 1/3. This argument is
ignored on Python 2 because it doesn't support it.
:param fail_safe: If set to a function exceptions are ignored when pickling
and if a exception happens the function is called and the return value
is used as the value for the object that caused the error
:param indent: When `indent` is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that indent
level. An indent level of 0 will only insert newlines. ``None`` is
the most compact representation. Since the default item separator is
``(', ', ': ')``, the output might include trailing whitespace when
``indent`` is specified. You can use ``separators=(',', ': ')`` to
avoid this. This value is passed directly to the active JSON backend
library and not used by jsonpickle directly.
:param separators:
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')``
separators. ``(',', ':')`` is the most compact JSON representation.
This value is passed directly to the active JSON backend library and
not used by jsonpickle directly.
>>> encode('my string') == '"my string"'
True
>>> encode(36) == '36'
True
>>> encode({'foo': True}) == '{"foo": true}'
True
>>> encode({'foo': [1, 2, [3, 4]]}, max_depth=1)
'{"foo": "[1, 2, [3, 4]]"}'
"""
backend = backend or json
context = context or Pickler(
unpicklable=unpicklable,
make_refs=make_refs,
keys=keys,
backend=backend,
max_depth=max_depth,
warn=warn,
max_iter=max_iter,
numeric_keys=numeric_keys,
use_decimal=use_decimal,
use_base85=use_base85,
fail_safe=fail_safe,
)
return backend.encode(
context.flatten(value, reset=reset), indent=indent, separators=separators
)
class Pickler(object):
def __init__(
self,
unpicklable=True,
make_refs=True,
max_depth=None,
backend=None,
keys=False,
warn=False,
max_iter=None,
numeric_keys=False,
use_decimal=False,
use_base85=False,
fail_safe=None,
):
self.unpicklable = unpicklable
self.make_refs = make_refs
self.backend = backend or json
self.keys = keys
self.warn = warn
self.numeric_keys = numeric_keys
self.use_base85 = use_base85 and (not PY2)
# The current recursion depth
self._depth = -1
# The maximal recursion depth
self._max_depth = max_depth
# Maps id(obj) to reference IDs
self._objs = {}
# Avoids garbage collection
self._seen = []
# maximum amount of items to take from a pickled iterator
self._max_iter = max_iter
# Whether to allow decimals to pass-through
self._use_decimal = use_decimal
if self.use_base85:
self._bytes_tag = tags.B85
self._bytes_encoder = util.b85encode
else:
self._bytes_tag = tags.B64
self._bytes_encoder = util.b64encode
# ignore exceptions
self.fail_safe = fail_safe
def reset(self):
self._objs = {}
self._depth = -1
self._seen = []
def _push(self):
"""Steps down one level in the namespace."""
self._depth += 1
def _pop(self, value):
"""Step up one level in the namespace and return the value.
If we're at the root, reset the pickler's state.
"""
self._depth -= 1
if self._depth == -1:
self.reset()
return value
def _log_ref(self, obj):
"""
Log a reference to an in-memory object.
Return True if this object is new and was assigned
a new ID. Otherwise return False.
"""
objid = id(obj)
is_new = objid not in self._objs
if is_new:
new_id = len(self._objs)
self._objs[objid] = new_id
return is_new
def _mkref(self, obj):
"""
Log a reference to an in-memory object, and return
if that object should be considered newly logged.
"""
is_new = self._log_ref(obj)
# Pretend the object is new
pretend_new = not self.unpicklable or not self.make_refs
return pretend_new or is_new
def _getref(self, obj):
return {tags.ID: self._objs.get(id(obj))}
def flatten(self, obj, reset=True):
"""Takes an object and returns a JSON-safe representation of it.
Simply returns any of the basic builtin datatypes
>>> p = Pickler()
>>> p.flatten('hello world') == 'hello world'
True
>>> p.flatten(49)
49
>>> p.flatten(350.0)
350.0
>>> p.flatten(True)
True
>>> p.flatten(False)
False
>>> r = p.flatten(None)
>>> r is None
True
>>> p.flatten(False)
False
>>> p.flatten([1, 2, 3, 4])
[1, 2, 3, 4]
>>> p.flatten((1,2,))[tags.TUPLE]
[1, 2]
>>> p.flatten({'key': 'value'}) == {'key': 'value'}
True
"""
if reset:
self.reset()
return self._flatten(obj)
def _flatten(self, obj):
#########################################
# if obj is nonrecursive return immediately
# for performance reasons we don't want to do recursive checks
if PY2 and isinstance(obj, types.FileType):
return self._flatten_file(obj)
if util.is_bytes(obj):
return self._flatten_bytestring(obj)
if util.is_primitive(obj):
return obj
# Decimal is a primitive when use_decimal is True
if self._use_decimal and isinstance(obj, decimal.Decimal):
return obj
#########################################
self._push()
return self._pop(self._flatten_obj(obj))
def _max_reached(self):
return self._depth == self._max_depth
def _flatten_obj(self, obj):
self._seen.append(obj)
max_reached = self._max_reached()
try:
in_cycle = _in_cycle(obj, self._objs, max_reached, self.make_refs)
if in_cycle:
# break the cycle
flatten_func = repr
else:
flatten_func = self._get_flattener(obj)
if flatten_func is None:
self._pickle_warning(obj)
return None
return flatten_func(obj)
except (KeyboardInterrupt, SystemExit) as e:
raise e
except Exception as e:
if self.fail_safe is None:
raise e
else:
return self.fail_safe(e)
def _list_recurse(self, obj):
return [self._flatten(v) for v in obj]
def _get_flattener(self, obj):
list_recurse = self._list_recurse
if util.is_list(obj):
if self._mkref(obj):
return list_recurse
else:
self._push()
return self._getref
# We handle tuples and sets by encoding them in a "(tuple|set)dict"
if util.is_tuple(obj):
if not self.unpicklable:
return list_recurse
return lambda obj: {tags.TUPLE: [self._flatten(v) for v in obj]}
if util.is_set(obj):
if not self.unpicklable:
return list_recurse
return lambda obj: {tags.SET: [self._flatten(v) for v in obj]}
if util.is_dictionary(obj):
return self._flatten_dict_obj
if util.is_type(obj):
return _mktyperef
if util.is_object(obj):
return self._ref_obj_instance
if util.is_module_function(obj):
return self._flatten_function
# instance methods, lambdas, old style classes...
self._pickle_warning(obj)
return None
def _ref_obj_instance(self, obj):
"""Reference an existing object or flatten if new"""
if self.unpicklable:
if self._mkref(obj):
# We've never seen this object so return its
# json representation.
return self._flatten_obj_instance(obj)
# We've seen this object before so place an object
# reference tag in the data. This avoids infinite recursion
# when processing cyclical objects.
return self._getref(obj)
else:
max_reached = self._max_reached()
in_cycle = _in_cycle(obj, self._objs, max_reached, False)
if in_cycle:
# A circular becomes None.
return None
self._mkref(obj)
return self._flatten_obj_instance(obj)
def _flatten_file(self, obj):
"""
Special case file objects
"""
assert not PY3 and isinstance(obj, types.FileType)
return None
def _flatten_bytestring(self, obj):
if PY2:
try:
return obj.decode('utf-8')
except UnicodeDecodeError:
pass
return {self._bytes_tag: self._bytes_encoder(obj)}
def _flatten_obj_instance(self, obj):
"""Recursively flatten an instance and return a json-friendly dict"""
data = {}
has_class = hasattr(obj, '__class__')
has_dict = hasattr(obj, '__dict__')
has_slots = not has_dict and hasattr(obj, '__slots__')
has_getnewargs = util.has_method(obj, '__getnewargs__')
has_getnewargs_ex = util.has_method(obj, '__getnewargs_ex__')
has_getinitargs = util.has_method(obj, '__getinitargs__')
has_reduce, has_reduce_ex = util.has_reduce(obj)
# Support objects with __getstate__(); this ensures that
# both __setstate__() and __getstate__() are implemented
has_getstate = hasattr(obj, '__getstate__')
# not using has_method since __getstate__() is handled separately below
if has_class:
cls = obj.__class__
else:
cls = type(obj)
# Check for a custom handler
class_name = util.importable_name(cls)
handler = handlers.get(cls, handlers.get(class_name))
if handler is not None:
if self.unpicklable:
data[tags.OBJECT] = class_name
return handler(self).flatten(obj, data)
reduce_val = None
if self.unpicklable:
if has_reduce and not has_reduce_ex:
try:
reduce_val = obj.__reduce__()
except TypeError:
# A lot of builtin types have a reduce which
# just raises a TypeError
# we ignore those
pass
# test for a reduce implementation, and redirect before
# doing anything else if that is what reduce requests
elif has_reduce_ex:
try:
# we're implementing protocol 2
reduce_val = obj.__reduce_ex__(2)
except TypeError:
# A lot of builtin types have a reduce which
# just raises a TypeError
# we ignore those
pass
if reduce_val and isinstance(reduce_val, string_types):
try:
varpath = iter(reduce_val.split('.'))
# curmod will be transformed by the
# loop into the value to pickle
curmod = sys.modules[next(varpath)]
for modname in varpath:
curmod = getattr(curmod, modname)
# replace obj with value retrieved
return self._flatten(curmod)
except KeyError:
# well, we can't do anything with that, so we ignore it
pass
elif reduce_val:
# at this point, reduce_val should be some kind of iterable
# pad out to len 5
rv_as_list = list(reduce_val)
insufficiency = 5 - len(rv_as_list)
if insufficiency:
rv_as_list += [None] * insufficiency
if getattr(rv_as_list[0], '__name__', '') == '__newobj__':
rv_as_list[0] = tags.NEWOBJ
f, args, state, listitems, dictitems = rv_as_list
# check that getstate/setstate is sane
if not (
state
and hasattr(obj, '__getstate__')
and not hasattr(obj, '__setstate__')
and not isinstance(obj, dict)
):
# turn iterators to iterables for convenient serialization
if rv_as_list[3]:
rv_as_list[3] = tuple(rv_as_list[3])
if rv_as_list[4]:
rv_as_list[4] = tuple(rv_as_list[4])
reduce_args = list(map(self._flatten, rv_as_list))
last_index = len(reduce_args) - 1
while last_index >= 2 and reduce_args[last_index] is None:
last_index -= 1
data[tags.REDUCE] = reduce_args[: last_index + 1]
return data
if has_class and not util.is_module(obj):
if self.unpicklable:
data[tags.OBJECT] = class_name
if has_getnewargs_ex:
data[tags.NEWARGSEX] = list(map(self._flatten, obj.__getnewargs_ex__()))
if has_getnewargs and not has_getnewargs_ex:
data[tags.NEWARGS] = self._flatten(obj.__getnewargs__())
if has_getinitargs:
data[tags.INITARGS] = self._flatten(obj.__getinitargs__())
if has_getstate:
try:
state = obj.__getstate__()
except TypeError:
# Has getstate but it cannot be called, e.g. file descriptors
# in Python3
self._pickle_warning(obj)
return None
else:
return self._getstate(state, data)
if util.is_module(obj):
if self.unpicklable:
data[tags.REPR] = '{name}/{name}'.format(name=obj.__name__)
else:
data = compat.ustr(obj)
return data
if util.is_dictionary_subclass(obj):
self._flatten_dict_obj(obj, data)
return data
if util.is_sequence_subclass(obj):
return self._flatten_sequence_obj(obj, data)
if util.is_iterator(obj):
# force list in python 3
data[tags.ITERATOR] = list(map(self._flatten, islice(obj, self._max_iter)))
return data
if has_dict:
# Support objects that subclasses list and set
if util.is_sequence_subclass(obj):
return self._flatten_sequence_obj(obj, data)
# hack for zope persistent objects; this unghostifies the object
getattr(obj, '_', None)
return self._flatten_dict_obj(obj.__dict__, data)
if has_slots:
return self._flatten_newstyle_with_slots(obj, data)
# catchall return for data created above without a return
# (e.g. __getnewargs__ is not supposed to be the end of the story)
if data:
return data
self._pickle_warning(obj)
return None
def _flatten_function(self, obj):
if self.unpicklable:
data = {tags.FUNCTION: util.importable_name(obj)}
else:
data = None
return data
def _flatten_dict_obj(self, obj, data=None):
"""Recursively call flatten() and return json-friendly dict"""
if data is None:
data = obj.__class__()
# If we allow non-string keys then we have to do a two-phase
# encoding to ensure that the reference IDs are deterministic.
if self.keys:
# Phase 1: serialize regular objects, ignore fancy keys.
flatten = self._flatten_string_key_value_pair
for k, v in util.items(obj):
flatten(k, v, data)
# Phase 2: serialize non-string keys.
flatten = self._flatten_non_string_key_value_pair
for k, v in util.items(obj):
flatten(k, v, data)
else:
# If we have string keys only then we only need a single pass.
flatten = self._flatten_key_value_pair
for k, v in util.items(obj):
flatten(k, v, data)
# the collections.defaultdict protocol
if hasattr(obj, 'default_factory') and callable(obj.default_factory):
factory = obj.default_factory
if util.is_type(factory):
# Reference the class/type
value = _mktyperef(factory)
else:
# The factory is not a type and could reference e.g. functions
# or even the object instance itself, which creates a cycle.
if self._mkref(factory):
# We've never seen this object before so pickle it in-place.
# Create an instance from the factory and assume that the
# resulting instance is a suitable examplar.
value = self._flatten_obj_instance(handlers.CloneFactory(factory()))
else:
# We've seen this object before.
# Break the cycle by emitting a reference.
value = self._getref(factory)
data['default_factory'] = value
# Sub-classes of dict
if hasattr(obj, '__dict__') and self.unpicklable:
dict_data = {}
self._flatten_dict_obj(obj.__dict__, dict_data)
data['__dict__'] = dict_data
return data
def _flatten_obj_attrs(self, obj, attrs, data):
flatten = self._flatten_key_value_pair
ok = False
for k in attrs:
try:
value = getattr(obj, k)
flatten(k, value, data)
except AttributeError:
# The attribute may have been deleted
continue
ok = True
return ok
def _flatten_newstyle_with_slots(self, obj, data):
"""Return a json-friendly dict for new-style objects with __slots__."""
allslots = [
_wrap_string_slot(getattr(cls, '__slots__', tuple()))
for cls in obj.__class__.mro()
]
if not self._flatten_obj_attrs(obj, chain(*allslots), data):
attrs = [
x for x in dir(obj) if not x.startswith('__') and not x.endswith('__')
]
self._flatten_obj_attrs(obj, attrs, data)
return data
def _flatten_key_value_pair(self, k, v, data):
"""Flatten a key/value pair into the passed-in dictionary."""
if not util.is_picklable(k, v):
return data
if k is None:
k = 'null' # for compatibility with common json encoders
if self.numeric_keys and isinstance(k, numeric_types):
pass
elif not isinstance(k, string_types):
try:
k = repr(k)
except Exception:
k = compat.ustr(k)
data[k] = self._flatten(v)
return data
def _flatten_non_string_key_value_pair(self, k, v, data):
"""Flatten only non-string key/value pairs"""
if not util.is_picklable(k, v):
return data
if self.keys and not isinstance(k, string_types):
k = self._escape_key(k)
data[k] = self._flatten(v)
return data
def _flatten_string_key_value_pair(self, k, v, data):
"""Flatten string key/value pairs only."""
if not util.is_picklable(k, v):
return data
if self.keys:
if not isinstance(k, string_types):
return data
elif k.startswith(tags.JSON_KEY):
k = self._escape_key(k)
else:
if k is None:
k = 'null' # for compatibility with common json encoders
if self.numeric_keys and isinstance(k, numeric_types):
pass
elif not isinstance(k, string_types):
try:
k = repr(k)
except Exception:
k = compat.ustr(k)
data[k] = self._flatten(v)
return data
def _flatten_sequence_obj(self, obj, data):
"""Return a json-friendly dict for a sequence subclass."""
if hasattr(obj, '__dict__'):
self._flatten_dict_obj(obj.__dict__, data)
value = [self._flatten(v) for v in obj]
if self.unpicklable:
data[tags.SEQ] = value
else:
return value
return data
def _escape_key(self, k):
return tags.JSON_KEY + encode(
k,
reset=False,
keys=True,
context=self,
backend=self.backend,
make_refs=self.make_refs,
)
def _getstate(self, obj, data):
state = self._flatten(obj)
if self.unpicklable:
data[tags.STATE] = state
else:
data = state
return data
def _pickle_warning(self, obj):
if self.warn:
msg = 'jsonpickle cannot pickle %r: replaced with None' % obj
warnings.warn(msg)
def _in_cycle(obj, objs, max_reached, make_refs):
"""Detect cyclic structures that would lead to infinite recursion"""
return (
(max_reached or (not make_refs and id(obj) in objs))
and not util.is_primitive(obj)
and not util.is_enum(obj)
)
def _mktyperef(obj):
"""Return a typeref dictionary
>>> _mktyperef(AssertionError) == {'py/type': 'builtins.AssertionError'}
True
"""
return {tags.TYPE: util.importable_name(obj)}
def _wrap_string_slot(string):
"""Converts __slots__ = 'a' into __slots__ = ('a',)"""
if isinstance(string, string_types):
return (string,)
return string
| 35.005355 | 88 | 0.575548 | [
"BSD-3-Clause"
] | JHP4911/jsonpickle | jsonpickle/pickler.py | 26,149 | Python |
import os
import numpy as np
import tensorflow as tf
from utils.data_reader import H5DataLoader, H53DDataLoader
from utils.img_utils import imsave
from utils import ops
"""
This module builds a standard U-NET for semantic segmentation.
If want VAE using pixelDCL, please visit this code:
https://github.com/HongyangGao/UVAE
"""
class PixelDCN(object):
def __init__(self, sess, conf):
self.sess = sess
self.conf = conf
self.def_params()
if not os.path.exists(conf.modeldir):
os.makedirs(conf.modeldir)
if not os.path.exists(conf.logdir):
os.makedirs(conf.logdir)
if not os.path.exists(conf.sampledir):
os.makedirs(conf.sampledir)
self.configure_networks()
self.train_summary = self.config_summary('train')
self.valid_summary = self.config_summary('valid')
def def_params(self):
self.data_format = 'NHWC'
if self.conf.data_type == '3D':
self.conv_size = (3, 3, 3)
self.pool_size = (2, 2, 2)
self.axis, self.channel_axis = (1, 2, 3), 4
self.input_shape = [
self.conf.batch, self.conf.depth, self.conf.height,
self.conf.width, self.conf.channel]
self.output_shape = [
self.conf.batch, self.conf.depth, self.conf.height,
self.conf.width]
else:
self.conv_size = (3, 3)
self.pool_size = (2, 2)
self.axis, self.channel_axis = (1, 2), 3
self.input_shape = [
self.conf.batch, self.conf.height, self.conf.width,
self.conf.channel]
self.output_shape = [
self.conf.batch, self.conf.height, self.conf.width]
def configure_networks(self):
self.build_network()
optimizer = tf.train.AdamOptimizer(self.conf.learning_rate)
self.train_op = optimizer.minimize(self.loss_op, name='train_op')
tf.set_random_seed(self.conf.random_seed)
self.sess.run(tf.global_variables_initializer())
trainable_vars = tf.trainable_variables()
self.saver = tf.train.Saver(var_list=trainable_vars, max_to_keep=0)
self.writer = tf.summary.FileWriter(self.conf.logdir, self.sess.graph)
def build_network(self):
self.inputs = tf.placeholder(
tf.float32, self.input_shape, name='inputs')
self.labels = tf.placeholder(
tf.int64, self.output_shape, name='labels')
self.predictions = self.inference(self.inputs)
self.cal_loss()
def cal_loss(self):
one_hot_labels = tf.one_hot(
self.labels, depth=self.conf.class_num,
axis=self.channel_axis, name='labels/one_hot')
losses = tf.losses.softmax_cross_entropy(
one_hot_labels, self.predictions, scope='loss/losses')
self.loss_op = tf.reduce_mean(losses, name='loss/loss_op')
self.decoded_preds = tf.argmax(
self.predictions, self.channel_axis, name='accuracy/decode_pred')
correct_prediction = tf.equal(
self.labels, self.decoded_preds,
name='accuracy/correct_pred')
self.accuracy_op = tf.reduce_mean(
tf.cast(correct_prediction, tf.float32, name='accuracy/cast'),
name='accuracy/accuracy_op')
# weights = tf.cast(
# tf.greater(self.decoded_preds, 0, name='m_iou/greater'),
# tf.int32, name='m_iou/weights')
weights = tf.cast(
tf.less(self.labels, self.conf.channel, name='m_iou/greater'),
tf.int64, name='m_iou/weights')
labels = tf.multiply(self.labels, weights, name='m_iou/mul')
self.m_iou, self.miou_op = tf.metrics.mean_iou(
self.labels, self.decoded_preds, self.conf.class_num,
weights, name='m_iou/m_ious')
def config_summary(self, name):
summarys = []
summarys.append(tf.summary.scalar(name+'/loss', self.loss_op))
summarys.append(tf.summary.scalar(name+'/accuracy', self.accuracy_op))
if name == 'valid' and self.conf.data_type == '2D':
summarys.append(
tf.summary.image(name+'/input', self.inputs, max_outputs=100))
summarys.append(
tf.summary.image(
name+'/annotation',
tf.cast(tf.expand_dims(self.labels, -1),
tf.float32), max_outputs=100))
summarys.append(
tf.summary.image(
name+'/prediction',
tf.cast(tf.expand_dims(self.decoded_preds, -1),
tf.float32), max_outputs=100))
summary = tf.summary.merge(summarys)
return summary
def inference(self, inputs):
outputs = inputs
down_outputs = []
for layer_index in range(self.conf.network_depth-1):
is_first = True if not layer_index else False
name = 'down%s' % layer_index
outputs = self.build_down_block(
outputs, name, down_outputs, is_first)
outputs = self.build_bottom_block(outputs, 'bottom')
for layer_index in range(self.conf.network_depth-2, -1, -1):
is_final = True if layer_index == 0 else False
name = 'up%s' % layer_index
down_inputs = down_outputs[layer_index]
outputs = self.build_up_block(
outputs, down_inputs, name, is_final)
return outputs
def build_down_block(self, inputs, name, down_outputs, first=False):
out_num = self.conf.start_channel_num if first else 2 * \
inputs.shape[self.channel_axis].value
conv1 = ops.conv(inputs, out_num, self.conv_size,
name+'/conv1', self.conf.data_type)
conv2 = ops.conv(conv1, out_num, self.conv_size,
name+'/conv2', self.conf.data_type)
down_outputs.append(conv2)
pool = ops.pool(conv2, self.pool_size, name +
'/pool', self.conf.data_type)
return pool
def build_bottom_block(self, inputs, name):
out_num = inputs.shape[self.channel_axis].value
conv1 = ops.conv(
inputs, 2*out_num, self.conv_size, name+'/conv1',
self.conf.data_type)
conv2 = ops.conv(
conv1, out_num, self.conv_size, name+'/conv2', self.conf.data_type)
return conv2
def build_up_block(self, inputs, down_inputs, name, final=False):
out_num = inputs.shape[self.channel_axis].value
conv1 = self.deconv_func()(
inputs, out_num, self.conv_size, name+'/conv1',
self.conf.data_type, action=self.conf.action)
conv1 = tf.concat(
[conv1, down_inputs], self.channel_axis, name=name+'/concat')
conv2 = self.conv_func()(
conv1, out_num, self.conv_size, name+'/conv2', self.conf.data_type)
out_num = self.conf.class_num if final else out_num/2
conv3 = ops.conv(
conv2, out_num, self.conv_size, name+'/conv3', self.conf.data_type,
not final)
return conv3
def deconv_func(self):
return getattr(ops, self.conf.deconv_name)
def conv_func(self):
return getattr(ops, self.conf.conv_name)
def save_summary(self, summary, step):
print('---->summarizing', step)
self.writer.add_summary(summary, step)
def train(self):
if self.conf.reload_step > 0:
self.reload(self.conf.reload_step)
if self.conf.data_type == '2D':
train_reader = H5DataLoader(
self.conf.data_dir+self.conf.train_data)
valid_reader = H5DataLoader(
self.conf.data_dir+self.conf.valid_data)
else:
train_reader = H53DDataLoader(
self.conf.data_dir+self.conf.train_data, self.input_shape)
valid_reader = H53DDataLoader(
self.conf.data_dir+self.conf.valid_data, self.input_shape)
for epoch_num in range(self.conf.max_step+1):
if epoch_num and epoch_num % self.conf.test_interval == 0:
inputs, labels = valid_reader.next_batch(self.conf.batch)
feed_dict = {self.inputs: inputs,
self.labels: labels}
loss, summary = self.sess.run(
[self.loss_op, self.valid_summary], feed_dict=feed_dict)
self.save_summary(summary, epoch_num+self.conf.reload_step)
print('----testing loss', loss)
if epoch_num and epoch_num % self.conf.summary_interval == 0:
inputs, labels = train_reader.next_batch(self.conf.batch)
feed_dict = {self.inputs: inputs,
self.labels: labels}
loss, _, summary = self.sess.run(
[self.loss_op, self.train_op, self.train_summary],
feed_dict=feed_dict)
self.save_summary(summary, epoch_num+self.conf.reload_step)
else:
inputs, labels = train_reader.next_batch(self.conf.batch)
feed_dict = {self.inputs: inputs,
self.labels: labels}
loss, _ = self.sess.run(
[self.loss_op, self.train_op], feed_dict=feed_dict)
print('----training loss', loss)
if epoch_num and epoch_num % self.conf.save_interval == 0:
self.save(epoch_num+self.conf.reload_step)
def test(self):
print('---->testing ', self.conf.test_step)
if self.conf.test_step > 0:
self.reload(self.conf.test_step)
else:
print("please set a reasonable test_step")
return
if self.conf.data_type == '2D':
test_reader = H5DataLoader(
self.conf.data_dir+self.conf.test_data, False)
else:
test_reader = H53DDataLoader(
self.conf.data_dir+self.conf.test_data, self.input_shape)
self.sess.run(tf.local_variables_initializer())
count = 0
losses = []
accuracies = []
m_ious = []
while True:
inputs, labels = test_reader.next_batch(self.conf.batch)
if inputs.shape[0] < self.conf.batch:
break
feed_dict = {self.inputs: inputs, self.labels: labels}
loss, accuracy, m_iou, _ = self.sess.run(
[self.loss_op, self.accuracy_op, self.m_iou, self.miou_op],
feed_dict=feed_dict)
print('values----->', loss, accuracy, m_iou)
count += 1
losses.append(loss)
accuracies.append(accuracy)
m_ious.append(m_iou)
print('Loss: ', np.mean(losses))
print('Accuracy: ', np.mean(accuracies))
print('M_iou: ', m_ious[-1])
def predict(self):
print('---->predicting ', self.conf.test_step)
if self.conf.test_step > 0:
self.reload(self.conf.test_step)
else:
print("please set a reasonable test_step")
return
if self.conf.data_type == '2D':
test_reader = H5DataLoader(
self.conf.data_dir+self.conf.test_data, False)
else:
test_reader = H53DDataLoader(
self.conf.data_dir+self.conf.test_data, self.input_shape)
predictions = []
while True:
inputs, labels = test_reader.next_batch(self.conf.batch)
if inputs.shape[0] < self.conf.batch:
break
feed_dict = {self.inputs: inputs, self.labels: labels}
predictions.append(self.sess.run(
self.decoded_preds, feed_dict=feed_dict))
print('----->saving predictions')
for index, prediction in enumerate(predictions):
for i in range(prediction.shape[0]):
imsave(prediction[i], self.conf.sampledir +
str(index*prediction.shape[0]+i)+'.png')
def save(self, step):
print('---->saving', step)
checkpoint_path = os.path.join(
self.conf.modeldir, self.conf.model_name)
self.saver.save(self.sess, checkpoint_path, global_step=step)
def reload(self, step):
checkpoint_path = os.path.join(
self.conf.modeldir, self.conf.model_name)
model_path = checkpoint_path+'-'+str(step)
if not os.path.exists(model_path+'.meta'):
print('------- no such checkpoint', model_path)
return
self.saver.restore(self.sess, model_path)
| 42.606061 | 79 | 0.585744 | [
"MIT"
] | HongyangGao/DilatedPixelCNN | network.py | 12,654 | Python |
#!/usr/bin/python
script = r"""
MD Dir1
MD Dir1\Dir2
CD Dir1\Dir2
MF file2.dat
MD Dir3
CD Dir3
MF file3.dat
MD Dir4
CD Dir4
MF file4.dat
MD Dir5
CD Dir5
MF file5.dat
CD C:
DELTREE Dir1
MD Dir2
CD Dir2
MF a.txt
MF b.txt
CD C:
MD Dir3
COPY Dir2 Dir3
"""
expected = r"""
C:
|_DIR2
| |_a.txt
| |_b.txt
|
|_DIR3
|_DIR2
|_a.txt
|_b.txt
"""
import test
test.run(script, expected)
| 8.955556 | 26 | 0.635236 | [
"BSD-2-Clause"
] | artemkin/sandbox | fme/tests/test_deltree_copy.py | 403 | Python |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-statements
"""Schedule for conv2d_hwcn with auto fusion"""
import tvm
from .. import tag
def schedule_conv2d_hwcn(outs):
"""Schedule for conv2d_hwcn and any element-wise operations.
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_hwcn in the format
of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d_hwcn.
"""
outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs
sch = tvm.create_schedule([x.op for x in outs])
def schedule(Apad, W, B):
"""Schedule conv2d_hwcn"""
sch[Apad].compute_inline()
AA = sch.cache_read(Apad, "shared", [B])
WW = sch.cache_read(W, "shared", [B])
AL = sch.cache_read(AA, "local", [B])
WL = sch.cache_read(WW, "local", [B])
if B.op in sch.outputs:
Out = B
BL = sch.cache_write(Out, "local")
else:
Out = sch.outputs[0].output(0)
sch[B].set_scope("local")
BL = B
tile = 8
num_thread = 8
block_factor = tile * num_thread
step = 8
vthread = 2
block_x = tvm.thread_axis("blockIdx.x")
block_y = tvm.thread_axis("blockIdx.y")
block_z = tvm.thread_axis("blockIdx.z")
thread_x = tvm.thread_axis((0, num_thread), "threadIdx.x")
thread_y = tvm.thread_axis((0, num_thread), "threadIdx.y")
thread_xz = tvm.thread_axis((0, vthread), "vthread", name="vx")
thread_yz = tvm.thread_axis((0, vthread), "vthread", name="vy")
hi, wi, fi, ni = sch[Out].op.axis
bz = sch[Out].fuse(hi, wi)
by, fi = sch[Out].split(fi, factor=block_factor)
bx, ni = sch[Out].split(ni, factor=block_factor)
tyz, fi = sch[Out].split(fi, nparts=vthread)
txz, ni = sch[Out].split(ni, nparts=vthread)
ty, fi = sch[Out].split(fi, nparts=num_thread)
tx, ni = sch[Out].split(ni, nparts=num_thread)
sch[Out].reorder(bz, by, bx, tyz, txz, ty, tx, fi, ni)
sch[Out].bind(bz, block_z)
sch[Out].bind(by, block_y)
sch[Out].bind(bx, block_x)
sch[Out].bind(tyz, thread_yz)
sch[Out].bind(txz, thread_xz)
sch[Out].bind(ty, thread_y)
sch[Out].bind(tx, thread_x)
# Schedule BL local write
sch[BL].compute_at(sch[Out], tx)
yi, xi, fi, ni = sch[BL].op.axis
ry, rx, rc = sch[BL].op.reduce_axis
rco, rci = sch[BL].split(rc, factor=step)
sch[BL].reorder(rco, ry, rx, rci, fi, ni)
fuse_index = sch[BL].fuse(ry, rx)
fuse_index = sch[BL].fuse(fuse_index, rco)
rx = fuse_index
sch[AA].compute_at(sch[BL], rx)
sch[WW].compute_at(sch[BL], rx)
sch[AL].compute_at(sch[BL], rci)
sch[WL].compute_at(sch[BL], rci)
# Schedule for A's shared memory load
yi, xi, ci, ni = sch[AA].op.axis
ty, ci = sch[AA].split(ci, nparts=num_thread)
tx, ni = sch[AA].split(ni, nparts=num_thread)
_, ni = sch[AA].split(ni, factor=4)
sch[AA].reorder(ty, tx, yi, xi, ci, ni)
sch[AA].bind(ty, thread_y)
sch[AA].bind(tx, thread_x)
sch[AA].vectorize(ni)
# Schedule for W's shared memory load
yi, xi, ci, fi = sch[WW].op.axis
ty, ci = sch[WW].split(ci, nparts=num_thread)
tx, fi = sch[WW].split(fi, nparts=num_thread)
_, fi = sch[WW].split(fi, factor=4)
sch[WW].reorder(ty, tx, yi, xi, ci, fi)
sch[WW].bind(ty, thread_y)
sch[WW].bind(tx, thread_x)
sch[WW].vectorize(fi)
scheduled_ops = []
def traverse(operator):
"""Traverse operators from computation graph"""
if tag.is_broadcast(operator.tag):
if operator not in sch.outputs:
sch[operator].compute_inline()
for tensor in operator.input_tensors:
if tensor.op.input_tensors and tensor.op not in scheduled_ops:
traverse(tensor.op)
elif operator.tag == 'conv2d_hwcn':
Apad = operator.input_tensors[0]
W = operator.input_tensors[1]
if isinstance(W.op, tvm.tensor.ComputeOp) and 'dilate' in W.op.tag:
sch[W].compute_inline()
B = operator.output(0)
schedule(Apad, W, B)
else:
raise RuntimeError("Unsupported operator: %s" % operator.tag)
scheduled_ops.append(operator)
traverse(outs[0].op)
return sch
| 37.957746 | 79 | 0.601299 | [
"Apache-2.0"
] | CortexFoundation/tvm-cvm | topi/python/topi/cuda/conv2d_hwcn.py | 5,390 | Python |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import bisect
import copy
import dict_utils
import file_extract
from file_extract import AutoParser
import numbers
import operator
import optparse
import os
import re
import six
import string
import sys
import StringIO
def get_uleb128_byte_size(value):
byte_size = 1
while value >= 0x80:
byte_size += 1
value >>= 7
return byte_size
def get_uleb128p1_byte_size(value):
return get_uleb128_byte_size(value + 1)
# ----------------------------------------------------------------------
# Constants
# ----------------------------------------------------------------------
MAGIC = "dex\n"
ENDIAN_CONSTANT = 0x12345678
REVERSE_ENDIAN_CONSTANT = 0x78563412
NO_INDEX = 0xffffffff
INT4_MIN = -8
INT4_MAX = 7
INT8_MIN = -128
INT8_MAX = 127
INT16_MIN = -32768
INT16_MAX = 32767
INT24_MIN = -8388608
INT24_MAX = 8388607
INT32_MIN = -2147483648
INT32_MAX = 2147483647
UINT4_MAX = 15
UINT8_MAX = 255
UINT16_MAX = 65535
UINT32_MAX = 4294967295
# ----------------------------------------------------------------------
# access_flags definitions
# ----------------------------------------------------------------------
ACC_PUBLIC = 0x1
ACC_PRIVATE = 0x2
ACC_PROTECTED = 0x4
ACC_STATIC = 0x8
ACC_FINAL = 0x10
ACC_SYNCHRONIZED = 0x20
ACC_VOLATILE = 0x40
ACC_BRIDGE = 0x40
ACC_TRANSIENT = 0x80
ACC_VARARGS = 0x80
ACC_NATIVE = 0x100
ACC_INTERFACE = 0x200
ACC_ABSTRACT = 0x400
ACC_STRICT = 0x800
ACC_SYNTHETIC = 0x1000
ACC_ANNOTATION = 0x2000
ACC_ENUM = 0x4000
ACC_CONSTRUCTOR = 0x10000
ACC_DECLARED_SYNCHRONIZED = 0x20000
# ----------------------------------------------------------------------
# Value formats
# ----------------------------------------------------------------------
VALUE_BYTE = 0x00
VALUE_SHORT = 0x02
VALUE_CHAR = 0x03
VALUE_INT = 0x04
VALUE_LONG = 0x06
VALUE_FLOAT = 0x10
VALUE_DOUBLE = 0x11
VALUE_METHOD_TYPE = 0x15
VALUE_METHOD_HANDLE = 0x16
VALUE_STRING = 0x17
VALUE_TYPE = 0x18
VALUE_FIELD = 0x19
VALUE_METHOD = 0x1a
VALUE_ENUM = 0x1b
VALUE_ARRAY = 0x1c
VALUE_ANNOTATION = 0x1d
VALUE_NULL = 0x1e
VALUE_BOOLEAN = 0x1f
class ValueFormat(dict_utils.Enum):
enum = {
'VALUE_BYTE': VALUE_BYTE,
'VALUE_SHORT': VALUE_SHORT,
'VALUE_CHAR': VALUE_CHAR,
'VALUE_INT': VALUE_INT,
'VALUE_LONG': VALUE_LONG,
'VALUE_FLOAT': VALUE_FLOAT,
'VALUE_DOUBLE': VALUE_DOUBLE,
'VALUE_METHOD_TYPE': VALUE_METHOD_TYPE,
'VALUE_METHOD_HANDLE': VALUE_METHOD_HANDLE,
'VALUE_STRING': VALUE_STRING,
'VALUE_TYPE': VALUE_TYPE,
'VALUE_FIELD': VALUE_FIELD,
'VALUE_METHOD': VALUE_METHOD,
'VALUE_ENUM': VALUE_ENUM,
'VALUE_ARRAY': VALUE_ARRAY,
'VALUE_ANNOTATION': VALUE_ANNOTATION,
'VALUE_NULL': VALUE_NULL,
'VALUE_BOOLEAN': VALUE_BOOLEAN,
}
def __init__(self, data):
dict_utils.Enum.__init__(self, data.get_uint16(), self.enum)
# ----------------------------------------------------------------------
# Type Codes
# ----------------------------------------------------------------------
TYPE_HEADER_ITEM = 0x0000 # size = 0x70
TYPE_STRING_ID_ITEM = 0x0001 # size = 0x04
TYPE_TYPE_ID_ITEM = 0x0002 # size = 0x04
TYPE_PROTO_ID_ITEM = 0x0003 # size = 0x0c
TYPE_FIELD_ID_ITEM = 0x0004 # size = 0x08
TYPE_METHOD_ID_ITEM = 0x0005 # size = 0x08
TYPE_CLASS_DEF_ITEM = 0x0006 # size = 0x20
TYPE_CALL_SITE_ID_ITEM = 0x0007 # size = 0x04
TYPE_METHOD_HANDLE_ITEM = 0x0008 # size = 0x08
TYPE_MAP_LIST = 0x1000 # size = 4 + (item.size * 12)
TYPE_TYPE_LIST = 0x1001 # size = 4 + (item.size * 2)
TYPE_ANNOTATION_SET_REF_LIST = 0x1002 # size = 4 + (item.size * 4)
TYPE_ANNOTATION_SET_ITEM = 0x1003 # size = 4 + (item.size * 4)
TYPE_CLASS_DATA_ITEM = 0x2000
TYPE_CODE_ITEM = 0x2001
TYPE_STRING_DATA_ITEM = 0x2002
TYPE_DEBUG_INFO_ITEM = 0x2003
TYPE_ANNOTATION_ITEM = 0x2004
TYPE_ENCODED_ARRAY_ITEM = 0x2005
TYPE_ANNOTATIONS_DIRECTORY_ITEM = 0x2006
class TypeCode(dict_utils.Enum):
enum = {
'TYPE_HEADER_ITEM': TYPE_HEADER_ITEM,
'TYPE_STRING_ID_ITEM': TYPE_STRING_ID_ITEM,
'TYPE_TYPE_ID_ITEM': TYPE_TYPE_ID_ITEM,
'TYPE_PROTO_ID_ITEM': TYPE_PROTO_ID_ITEM,
'TYPE_FIELD_ID_ITEM': TYPE_FIELD_ID_ITEM,
'TYPE_METHOD_ID_ITEM': TYPE_METHOD_ID_ITEM,
'TYPE_CLASS_DEF_ITEM': TYPE_CLASS_DEF_ITEM,
'TYPE_CALL_SITE_ID_ITEM': TYPE_CALL_SITE_ID_ITEM,
'TYPE_METHOD_HANDLE_ITEM': TYPE_METHOD_HANDLE_ITEM,
'TYPE_MAP_LIST': TYPE_MAP_LIST,
'TYPE_TYPE_LIST': TYPE_TYPE_LIST,
'TYPE_ANNOTATION_SET_REF_LIST': TYPE_ANNOTATION_SET_REF_LIST,
'TYPE_ANNOTATION_SET_ITEM': TYPE_ANNOTATION_SET_ITEM,
'TYPE_CLASS_DATA_ITEM': TYPE_CLASS_DATA_ITEM,
'TYPE_CODE_ITEM': TYPE_CODE_ITEM,
'TYPE_STRING_DATA_ITEM': TYPE_STRING_DATA_ITEM,
'TYPE_DEBUG_INFO_ITEM': TYPE_DEBUG_INFO_ITEM,
'TYPE_ANNOTATION_ITEM': TYPE_ANNOTATION_ITEM,
'TYPE_ENCODED_ARRAY_ITEM': TYPE_ENCODED_ARRAY_ITEM,
'TYPE_ANNOTATIONS_DIRECTORY_ITEM': TYPE_ANNOTATIONS_DIRECTORY_ITEM,
}
def __init__(self, data):
dict_utils.Enum.__init__(self, data.get_uint16(), self.enum)
def dump(self, prefix=None, f=sys.stdout, print_name=True,
parent_path=None):
f.write(str(self))
# ----------------------------------------------------------------------
# Method Handle Type Codes
# ----------------------------------------------------------------------
METHOD_HANDLE_TYPE_STATIC_PUT = 0x00
METHOD_HANDLE_TYPE_STATIC_GET = 0x01
METHOD_HANDLE_TYPE_INSTANCE_PUT = 0x02
METHOD_HANDLE_TYPE_INSTANCE_GET = 0x03
METHOD_HANDLE_TYPE_INVOKE_STATIC = 0x04
METHOD_HANDLE_TYPE_INVOKE_INSTANCE = 0x05
class MethodHandleTypeCode(dict_utils.Enum):
enum = {
'METHOD_HANDLE_TYPE_STATIC_PUT': METHOD_HANDLE_TYPE_STATIC_PUT,
'METHOD_HANDLE_TYPE_STATIC_GET': METHOD_HANDLE_TYPE_STATIC_GET,
'METHOD_HANDLE_TYPE_INSTANCE_PUT': METHOD_HANDLE_TYPE_INSTANCE_PUT,
'METHOD_HANDLE_TYPE_INSTANCE_GET': METHOD_HANDLE_TYPE_INSTANCE_GET,
'METHOD_HANDLE_TYPE_INVOKE_STATIC': METHOD_HANDLE_TYPE_INVOKE_STATIC,
'METHOD_HANDLE_TYPE_INVOKE_INSTANCE':
METHOD_HANDLE_TYPE_INVOKE_INSTANCE,
}
def __init__(self, data):
dict_utils.Enum.__init__(self, data.get_uint16(), self.enum)
PRINTABLE = string.ascii_letters + string.digits + string.punctuation + ' '
def escape(c):
global PRINTABLE
if c in PRINTABLE:
return c
c = ord(c)
if c <= 0xff:
return '\\x' + '%02.2x' % (c)
elif c <= '\uffff':
return '\\u' + '%04.4x' % (c)
else:
return '\\U' + '%08.8x' % (c)
def print_string(s, f):
f.write('"')
f.write(''.join(escape(c) for c in s))
f.write('"')
def print_version(version, f):
if len(version) == 3:
f.write("%u.%u.%u" % (version[0], version[1], version[2]))
def print_hex_bytes(data, f):
for byte in data:
f.write("%2.2x" % (byte))
def print_endian(value, f):
f.write("%#8.8x" % (value))
if value == ENDIAN_CONSTANT:
f.write(" (ENDIAN_CONSTANT)")
elif value == REVERSE_ENDIAN_CONSTANT:
f.write(" (REVERSE_ENDIAN_CONSTANT)")
def is_zero(value):
if value == 0:
return None
return 'value should be zero, bit is %s' % (str(value))
def is_dex_magic(magic):
if magic == MAGIC:
return None
return 'value should be %s but is %s' % (MAGIC, magic)
def hex_escape(s):
return ''.join(escape(c) for c in s)
# ----------------------------------------------------------------------
# encoded_field
# ----------------------------------------------------------------------
class encoded_field(AutoParser):
items = [
{'type': 'uleb', 'name': 'field_idx', 'format': '%u'},
{'type': 'uleb', 'name': 'access_flags', 'format': '0x%8.8x'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
@classmethod
def fixup_indexes(cls, items):
for i in range(1, len(items)):
items[i].field_idx += items[i - 1].field_idx
@classmethod
def get_table_header(self):
return 'FIELD FLAGS\n'
def get_dump_flat(self):
return True
# ----------------------------------------------------------------------
# encoded_method
# ----------------------------------------------------------------------
class encoded_method(AutoParser):
items = [
{'type': 'uleb', 'name': 'method_idx', 'format': '%u'},
{'type': 'uleb', 'name': 'access_flags', 'format': '0x%8.8x'},
{'type': 'uleb', 'name': 'code_off', 'format': '0x%8.8x'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
@classmethod
def fixup_indexes(cls, items):
for i in range(1, len(items)):
items[i].method_idx += items[i - 1].method_idx
@classmethod
def get_table_header(self):
return 'METHOD FLAGS\n'
def get_dump_flat(self):
return True
# ----------------------------------------------------------------------
# class_data_item
# ----------------------------------------------------------------------
class class_data_item(AutoParser):
items = [
{'type': 'uleb', 'name': 'static_fields_size'},
{'type': 'uleb', 'name': 'instance_fields_size'},
{'type': 'uleb', 'name': 'direct_methods_size'},
{'type': 'uleb', 'name': 'virtual_methods_size'},
{'class': encoded_field, 'name': 'static_fields',
'attr_count': 'static_fields_size', 'flat': True},
{'class': encoded_field, 'name': 'instance_fields',
'attr_count': 'instance_fields_size', 'flat': True},
{'class': encoded_method, 'name': 'direct_methods',
'attr_count': 'direct_methods_size', 'flat': True},
{'class': encoded_method, 'name': 'virtual_methods',
'attr_count': 'virtual_methods_size', 'flat': True},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
encoded_field.fixup_indexes(self.static_fields)
encoded_field.fixup_indexes(self.instance_fields)
encoded_method.fixup_indexes(self.direct_methods)
encoded_method.fixup_indexes(self.virtual_methods)
@classmethod
def create_empty(cls):
data = file_extract.FileExtract(StringIO.StringIO('\0\0\0\0'), '=')
return class_data_item(data)
# ----------------------------------------------------------------------
# class_def_item
# ----------------------------------------------------------------------
class class_def_item(AutoParser):
items = [
{'type': 'u32', 'name': 'class_idx', 'align': 4},
{'type': 'u32', 'name': 'access_flags'},
{'type': 'u32', 'name': 'superclass_idx'},
{'type': 'u32', 'name': 'interfaces_off'},
{'type': 'u32', 'name': 'source_file_idx'},
{'type': 'u32', 'name': 'annotations_off'},
{'type': 'u32', 'name': 'class_data_off'},
{'type': 'u32', 'name': 'static_values_off'},
{'class': class_data_item, 'name': 'class_data',
'attr_offset': 'class_data_off',
'condition': lambda item, data: item.class_data_off != 0,
'dump': False,
'default': class_data_item.create_empty()},
]
def __init__(self, data, context):
AutoParser.__init__(self, self.items, data, context)
@classmethod
def get_table_header(self):
return ('CLASS ACCESS SUPERCLASS INTERFACES SOURCE'
' ANNOTATION CLASS_DATA STATIC_VALUES\n')
def get_dump_flat(self):
return True
def find_encoded_method_by_code_off(self, code_off):
for encoded_method in self.class_data.direct_methods:
if encoded_method.code_off == code_off:
return encoded_method
for encoded_method in self.class_data.virtual_methods:
if encoded_method.code_off == code_off:
return encoded_method
return None
# ----------------------------------------------------------------------
# try_item
# ----------------------------------------------------------------------
class try_item(AutoParser):
items = [
{'type': 'u32', 'name': 'start_addr'},
{'type': 'u16', 'name': 'insn_count'},
{'type': 'u16', 'name': 'handler_off'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
def get_dump_flat(self):
return True
# ----------------------------------------------------------------------
# encoded_type_addr_pair
# ----------------------------------------------------------------------
class encoded_type_addr_pair(AutoParser):
items = [
{'type': 'uleb', 'name': 'type_idx', 'format': '%#8.8x'},
{'type': 'uleb', 'name': 'addr', 'format': '%#8.8x'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
def get_dump_flat(self):
return True
# ----------------------------------------------------------------------
# encoded_catch_handler
# ----------------------------------------------------------------------
class encoded_catch_handler(AutoParser):
items = [
{'type': 'sleb', 'name': 'size'},
{'class': encoded_type_addr_pair, 'name': 'handlers',
'attr_count': 'size', 'attr_count_fixup': abs},
{'type': 'uleb', 'name': 'catch_all_addr', 'default': 0,
'condition': lambda item, data: item.size <= 0},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
def get_dump_flat(self):
return True
# ----------------------------------------------------------------------
# encoded_catch_handler_list
# ----------------------------------------------------------------------
class encoded_catch_handler_list(AutoParser):
items = [
{'type': 'uleb', 'name': 'size'},
{'class': encoded_catch_handler, 'name': 'list', 'attr_count': 'size'}
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
def get_dump_flat(self):
return True
def print_instructions(insns, prefix, flat, f):
f.write('\n')
code_units = CodeUnits(insns)
dex_inst = DexInstruction()
while code_units.index_is_valid():
dex_inst.decode(code_units)
if prefix:
f.write(prefix)
f.write(' ')
dex_inst.dump()
DBG_END_SEQUENCE = 0x00
DBG_ADVANCE_PC = 0x01
DBG_ADVANCE_LINE = 0x02
DBG_START_LOCAL = 0x03
DBG_START_LOCAL_EXTENDED = 0x04
DBG_END_LOCAL = 0x05
DBG_RESTART_LOCAL = 0x06
DBG_SET_PROLOGUE_END = 0x07
DBG_SET_EPILOGUE_BEGIN = 0x08
DBG_SET_FILE = 0x09
DBG_FIRST_SPECIAL = 0x0a
DBG_LINE_BASE = -4
DBG_LINE_RANGE = 15
class DBG(dict_utils.Enum):
enum = {
'DBG_END_SEQUENCE': DBG_END_SEQUENCE,
'DBG_ADVANCE_PC': DBG_ADVANCE_PC,
'DBG_ADVANCE_LINE': DBG_ADVANCE_LINE,
'DBG_START_LOCAL': DBG_START_LOCAL,
'DBG_START_LOCAL_EXTENDED': DBG_START_LOCAL_EXTENDED,
'DBG_END_LOCAL': DBG_END_LOCAL,
'DBG_RESTART_LOCAL': DBG_RESTART_LOCAL,
'DBG_SET_PROLOGUE_END': DBG_SET_PROLOGUE_END,
'DBG_SET_EPILOGUE_BEGIN': DBG_SET_EPILOGUE_BEGIN,
'DBG_SET_FILE': DBG_SET_FILE
}
def __init__(self, data):
dict_utils.Enum.__init__(self, data.get_uint8(), self.enum)
def dump(self, prefix=None, f=sys.stdout, print_name=True,
parent_path=None):
f.write(str(self))
class debug_info_op(AutoParser):
items = [
{'class': DBG, 'name': 'op'},
{'switch': 'op', 'cases': {
DBG_ADVANCE_PC: [
{'type': 'uleb', 'name': 'addr_offset'}
],
DBG_ADVANCE_LINE: [
{'type': 'sleb', 'name': 'line_offset'},
],
DBG_START_LOCAL: [
{'type': 'uleb', 'name': 'register_num'},
{'type': 'ulebp1', 'name': 'name_idx'},
{'type': 'ulebp1', 'name': 'type_idx'},
],
DBG_START_LOCAL_EXTENDED: [
{'type': 'uleb', 'name': 'register_num'},
{'type': 'ulebp1', 'name': 'name_idx'},
{'type': 'ulebp1', 'name': 'type_idx'},
{'type': 'ulebp1', 'name': 'sig_idx'},
],
DBG_END_LOCAL: [
{'type': 'uleb', 'name': 'register_num'}
],
DBG_RESTART_LOCAL: [
{'type': 'uleb', 'name': 'register_num'}
],
DBG_SET_FILE: [
{'type': 'ulebp1', 'name': 'name_idx'}
],
'default': []
}
}
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
if self.op >= DBG_FIRST_SPECIAL:
adjusted_opcode = int(self.op) - DBG_FIRST_SPECIAL
line_offset = DBG_LINE_BASE + (adjusted_opcode % DBG_LINE_RANGE)
addr_offset = (adjusted_opcode / DBG_LINE_RANGE)
setattr(self, 'line_offset', line_offset)
setattr(self, 'addr_offset', addr_offset)
setattr(self, 'byte_size', data.tell() - self.get_offset())
def get_dump_flat(self):
return True
def get_byte_size(self):
return self.byte_size
def dump_opcode(self, f=sys.stdout):
f.write(str(self.op))
if self.op == DBG_ADVANCE_PC:
f.write('(%u)' % self.addr_offset)
elif self.op == DBG_ADVANCE_LINE:
f.write('(%u)' % self.line_offset)
elif self.op == DBG_START_LOCAL:
f.write('(register_num=%u, name_idx=' % self.register_num)
if self.name_idx < 0:
f.write('NO_INDEX')
else:
f.write('%u' % (self.name_idx))
f.write(', type_idx=')
if self.type_idx < 0:
f.write('NO_INDEX)')
else:
f.write('%u)' % (self.type_idx))
elif self.op == DBG_START_LOCAL_EXTENDED:
f.write('(register_num=%u, name_idx=' % self.register_num)
if self.name_idx < 0:
f.write('NO_INDEX')
else:
f.write('%u' % (self.name_idx))
f.write(', type_idx=')
if self.type_idx < 0:
f.write('NO_INDEX')
else:
f.write('%u' % (self.type_idx))
f.write(', sig_idx=')
if self.type_idx < 0:
f.write('NO_INDEX)')
else:
f.write('%u)' % (self.type_idx))
elif self.op == DBG_END_LOCAL or self.op == DBG_RESTART_LOCAL:
f.write('(register_num=%u)' % self.register_num)
elif self.op == DBG_SET_FILE:
f.write('(name_idx=%u)' % self.name_idx)
elif self.op >= DBG_FIRST_SPECIAL:
f.write(' (addr_offset=%u, line_offset=%i)' %
(self.addr_offset, self.line_offset))
class debug_info_item(AutoParser):
items = [
{'type': 'uleb', 'name': 'line_start'},
{'type': 'uleb', 'name': 'parameters_size'},
{'type': 'ulebp1', 'name': 'parameter_names',
'attr_count': 'parameters_size'},
]
class row(object):
def __init__(self):
self.address = 0
self.line = 1
self.source_file = -1
self.prologue_end = False
self.epilogue_begin = False
def dump(self, f=sys.stdout):
f.write('0x%4.4x %5u %5u ' %
(self.address, self.line, self.source_file))
if self.prologue_end or self.epilogue_begin:
if self.prologue_end:
f.write('P ')
else:
f.write(' ')
if self.epilogue_begin:
f.write('E')
f.write('\n')
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
self.data = data
self.ops = None
self.line_table = None
self.debug_info_offset = data.tell()
def check_encoding(self, dex_method, f=sys.stdout):
bytes_saved = 0
ops = self.get_ops()
if len(ops) == 1:
op = ops[0]
if op.op == DBG_END_SEQUENCE:
bytes_saved += (get_uleb128_byte_size(self.line_start) +
get_uleb128p1_byte_size(self.parameters_size))
for parameter_name in self.parameter_names:
bytes_saved += get_uleb128p1_byte_size(parameter_name)
bytes_saved += 1
f.write('warning: %s debug info contains only a single ' % (
dex_method.get_qualified_name()))
f.write('%s, all debug info can be removed ' % (op.op))
f.write('(%u bytes)\n' % (bytes_saved))
return bytes_saved
# Dex files built for release don't need any the following
# debug info ops
for op in ops:
size = op.get_byte_size()
if op.op == DBG_SET_PROLOGUE_END:
f.write('warning: %s %s can be removed (%u byte)\n' % (
dex_method.get_qualified_name(), op.op, size))
bytes_saved += size
elif op.op == DBG_SET_EPILOGUE_BEGIN:
f.write('warning: %s %s can be removed (%u byte)\n' % (
dex_method.get_qualified_name(), op.op, size))
bytes_saved += size
elif op.op == DBG_START_LOCAL:
f.write('warning: %s %s can be removed (%u bytes)\n' % (
dex_method.get_qualified_name(), op.op, size))
bytes_saved += size
elif op.op == DBG_START_LOCAL_EXTENDED:
f.write('warning: %s %s can be removed (%u bytes)\n' % (
dex_method.get_qualified_name(), op.op, size))
bytes_saved += size
elif op.op == DBG_END_LOCAL:
f.write('warning: %s %s can be removed (%u bytes)\n' % (
dex_method.get_qualified_name(), op.op, size))
bytes_saved += size
elif op.op == DBG_RESTART_LOCAL:
f.write('warning: %s %s can be removed (%u bytes)\n' % (
dex_method.get_qualified_name(), op.op, size))
bytes_saved += size
return bytes_saved
def get_line_table(self):
if self.line_table is None:
ops = self.get_ops()
row = debug_info_item.row()
for op_args in ops:
op = op_args[0]
if op == DBG_END_SEQUENCE:
break
if op == DBG_ADVANCE_PC:
row.address += op.addr_offset
elif op == DBG_ADVANCE_LINE:
row.line += op.line_offset
elif op == DBG_START_LOCAL:
pass
elif op == DBG_START_LOCAL_EXTENDED:
pass
elif op == DBG_END_LOCAL:
pass
elif op == DBG_RESTART_LOCAL:
pass
elif op == DBG_SET_PROLOGUE_END:
row.prologue_end = True
elif op == DBG_SET_EPILOGUE_BEGIN:
row.epilogue_begin = True
elif op == DBG_SET_FILE:
row.source_file = op.name_idx
else:
row.line += op.line_offset
row.address += op.addr_offset
self.line_table.append(copy.copy(row))
row.prologue_end = False
row.epilogue_begin = False
return self.line_table
def get_ops(self):
if self.ops is None:
data = self.data
data.push_offset_and_seek(self.debug_info_offset)
self.ops = list()
while True:
op = debug_info_op(data)
self.ops.append(op)
if op.op == DBG_END_SEQUENCE:
break
data.pop_offset_and_seek()
return self.ops
def dump_debug_info(self, f=sys.stdout, prefix=None):
ops = self.get_ops()
for op in ops:
if prefix:
f.write(prefix)
f.write(' ')
op.dump_opcode(f=f)
f.write('\n')
# ----------------------------------------------------------------------
# code_item
# ----------------------------------------------------------------------
class code_item(AutoParser):
items = [
{'type': 'u16', 'name': 'registers_size', 'align': 4},
{'type': 'u16', 'name': 'ins_size'},
{'type': 'u16', 'name': 'outs_size'},
{'type': 'u16', 'name': 'tries_size'},
{'type': 'u32', 'name': 'debug_info_off'},
{'type': 'u32', 'name': 'insns_size', 'format': '%u'},
{'type': 'u16', 'name': 'insns',
'attr_count': 'insns_size', 'dump_list': print_instructions},
{'type': 'u16', 'condition': lambda item,
data: item.tries_size != 0 and item.insns_size & 1},
{'class': try_item, 'name': 'tries', 'attr_count': 'tries_size',
'condition': lambda item, data: item.tries_size != 0,
'default': None},
{'class': encoded_catch_handler_list, 'name': 'handlers',
'condition': lambda item, data: item.tries_size != 0,
'default': None}
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
self.debug_info = None
self.data = data
# Convert insns from a list to a tuple to avoid mutattion and also to
# allow self.insns to be hashed.
self.insns = tuple(self.insns)
def get_debug_info(self):
if self.debug_info is None and self.debug_info_off > 0:
data = self.data
data.push_offset_and_seek(self.debug_info_off)
self.debug_info = debug_info_item(data)
data.pop_offset_and_seek()
return self.debug_info
class encoded_value:
def __init__(self, data):
arg_type = data.get_uint8()
value_arg = arg_type >> 5
value_type = arg_type & 0x1f
self.value_type = ValueFormat(value_type)
self.value = None
size = value_arg + 1
if value_type == VALUE_BYTE:
if value_arg != 0:
raise ValueError(
'VALUE_BYTE value_arg != 0 (%u)' % (value_arg))
self.value = data.get_sint8()
elif value_type == VALUE_SHORT:
self.value = data.get_sint_size(size)
elif value_type == VALUE_CHAR:
self.value = data.get_uint_size(size)
elif value_type == VALUE_INT:
self.value = data.get_sint_size(size)
elif value_type == VALUE_LONG:
self.value = data.get_sint_size(size)
elif value_type == VALUE_FLOAT:
raise ValueError('VALUE_FLOAT not supported yet')
elif value_type == VALUE_DOUBLE:
raise ValueError('VALUE_DOUBLE not supported yet')
elif value_type == VALUE_METHOD_TYPE:
self.value = data.get_uint_size(size)
elif value_type == VALUE_METHOD_HANDLE:
self.value = data.get_uint_size(size)
elif value_type == VALUE_STRING:
self.value = data.get_uint_size(size)
elif value_type == VALUE_TYPE:
self.value = data.get_uint_size(size)
elif value_type == VALUE_FIELD:
self.value = data.get_uint_size(size)
elif value_type == VALUE_METHOD:
self.value = data.get_uint_size(size)
elif value_type == VALUE_ENUM:
self.value = data.get_uint_size(size)
elif value_type == VALUE_ARRAY:
if value_arg != 0:
raise ValueError(
'VALUE_ARRAY value_arg != 0 (%u)' % (value_arg))
raise ValueError('VALUE_ARRAY not supported yet')
# encoded_array: an array of values, in the format specified by
# "encoded_array format". The size of the value is implicit in
# the encoding.
elif value_type == VALUE_ANNOTATION:
if value_arg != 0:
raise ValueError(
'VALUE_ANNOTATION value_arg != 0 (%u)' % (value_arg))
# encoded_annotation: a sub-annotation, in the format specified by
# "encoded_annotation format" below. The size of the value is
# implicit in the encoding.
elif value_type == VALUE_NULL:
if value_arg != 0:
raise ValueError(
'VALUE_ARRAY value_arg != 0 (%u)' % (value_arg))
self.value = 0
elif value_type == VALUE_BOOLEAN:
if size == 0:
self.value = False
else:
self.value = data.get_uint8() != 0
# ----------------------------------------------------------------------
# encoded_array
# ----------------------------------------------------------------------
class encoded_array(AutoParser):
items = [
{'type': 'uleb', 'name': 'size'},
{'class': encoded_value, 'name': 'values', 'attr_count': 'size'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
class encoded_array_item(AutoParser):
items = [
{'class': encoded_array, 'name': 'value'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
# ----------------------------------------------------------------------
# field_id_item
# ----------------------------------------------------------------------
class field_id_item(AutoParser):
items = [
{'type': 'u16', 'name': 'class_idx', 'align': 4},
{'type': 'u16', 'name': 'type_idx'},
{'type': 'u32', 'name': 'name_idx'},
]
def __init__(self, data, context):
AutoParser.__init__(self, self.items, data, context)
@classmethod
def get_table_header(self):
return 'CLASS TYPE NAME\n'
def get_dump_flat(self):
return True
# ----------------------------------------------------------------------
# header_item
# ----------------------------------------------------------------------
class header_item(AutoParser):
items = [
{'type': 'cstr[4]', 'name': 'magic', 'validate': is_dex_magic},
{'type': 'u8[3]', 'name': 'version', 'dump': print_version},
{'type': 'u8', 'validate': is_zero}, # NULL byte
{'type': 'u32', 'name': 'checksum'},
{'type': 'u8[20]', 'name': 'signature', 'dump': print_hex_bytes},
{'type': 'u32', 'name': 'file_size'},
{'type': 'u32', 'name': 'header_size'},
{'type': 'u32', 'name': 'endian_tag', 'type': 'u32',
'dump': print_endian},
{'type': 'u32', 'name': 'link_size'},
{'type': 'u32', 'name': 'link_off'},
{'type': 'u32', 'name': 'map_off'},
{'type': 'u32', 'name': 'string_ids_size'},
{'type': 'u32', 'name': 'string_ids_off'},
{'type': 'u32', 'name': 'type_ids_size'},
{'type': 'u32', 'name': 'type_ids_off'},
{'type': 'u32', 'name': 'proto_ids_size'},
{'type': 'u32', 'name': 'proto_ids_off'},
{'type': 'u32', 'name': 'field_ids_size'},
{'type': 'u32', 'name': 'field_ids_off'},
{'type': 'u32', 'name': 'method_ids_size'},
{'type': 'u32', 'name': 'method_ids_off'},
{'type': 'u32', 'name': 'class_defs_size'},
{'type': 'u32', 'name': 'class_defs_off'},
{'type': 'u32', 'name': 'data_size'},
{'type': 'u32', 'name': 'data_off'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
def get_dump_header(self):
return 'DEX header:'
# ----------------------------------------------------------------------
# map_item
# ----------------------------------------------------------------------
class map_item(AutoParser):
items = [
{'class': TypeCode, 'name': 'type',
'dump_width': TypeCode.max_width()},
{'type': 'u16'},
{'type': 'u32', 'name': 'size'},
{'type': 'u32', 'name': 'offset'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
def get_list_header_lines(self):
return [' TYPE SIZE OFFSET\n']
def get_dump_flat(self):
return True
# ----------------------------------------------------------------------
# map_list
# ----------------------------------------------------------------------
class map_list(AutoParser):
items = [
{'type': 'u32', 'name': 'size', 'align': 4, 'dump': False},
{'class': map_item, 'name': 'list', 'attr_count': 'size',
'flat': True},
]
def get_dump_header(self):
return 'map_list:'
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
# ----------------------------------------------------------------------
# method_handle_item
# ----------------------------------------------------------------------
class method_handle_item(AutoParser):
items = [
{'class': MethodHandleTypeCode, 'name': 'method_handle_type',
'align': 4},
{'type': 'u16'},
{'type': 'u16', 'name': 'field_or_method_id'},
{'type': 'u16'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
# ----------------------------------------------------------------------
# method_id_item
# ----------------------------------------------------------------------
class method_id_item(AutoParser):
items = [
{'type': 'u16', 'name': 'class_idx', 'align': 4},
{'type': 'u16', 'name': 'proto_idx'},
{'type': 'u32', 'name': 'name_idx'},
]
def __init__(self, data, context):
AutoParser.__init__(self, self.items, data, context)
@classmethod
def get_table_header(self):
return 'CLASS PROTO NAME\n'
def get_dump_flat(self):
return True
# ----------------------------------------------------------------------
# proto_id_item
# ----------------------------------------------------------------------
class proto_id_item(AutoParser):
items = [
{'type': 'u32', 'name': 'shorty_idx', 'align': 4},
{'type': 'u32', 'name': 'return_type_idx'},
{'type': 'u32', 'name': 'parameters_off'},
]
def __init__(self, data, context):
AutoParser.__init__(self, self.items, data, context)
self.parameters = None
def get_dump_flat(self):
return True
@classmethod
def get_table_header(self):
return 'SHORTY_IDX RETURN PARAMETERS\n'
def get_parameters(self):
if self.parameters_off != 0 and self.parameters is None:
# Get the data from our dex.File object
data = self.context.data
data.push_offset_and_seek(self.parameters_off)
self.parameters = type_list(data)
data.pop_offset_and_seek()
return self.parameters
# ----------------------------------------------------------------------
# string_data_item
# ----------------------------------------------------------------------
class string_data_item(AutoParser):
items = [
{'type': 'uleb', 'name': 'utf16_size', 'format': '%3u'},
{'type': 'cstr', 'name': 'data', 'dump': print_string},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
def get_dump_flat(self):
return True
# ----------------------------------------------------------------------
# type_list
# ----------------------------------------------------------------------
class type_list(AutoParser):
items = [
{'type': 'u32', 'name': 'size', 'align': 4},
{'type': 'u16', 'name': 'list', 'attr_count': 'size'},
]
def get_dump_header(self):
return 'type_list:'
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
class Progard:
'''Parses a proguard map file and does name lookups.'''
def __init__(self, path):
self.path = path
self.classes_dict = {}
class_dict = None
regex = re.compile('\s+([0-9]+:[0-9]+:)?(.*) -> (.*)$')
with open(path, 'r') as f:
for line in f:
line = line.rstrip('\n')
if line:
if line[0].isspace():
match = regex.match(line)
if match:
old = match.group(2)
new = match.group(3)
# print('other old = "%s"' % (old))
# print('other new = "%s"' % (new))
class_dict[new] = old
else:
(old, new) = line.split(' -> ')
# print('class old = "%s"' % (old))
# print('class new = "%s"' % (new))
class_dict = {}
self.classes_dict[new] = (old, class_dict)
def lookup_class(self, new_class):
'''Translate a new class name to the old class name.'''
if new_class in self.classes_dict:
(old_class, class_dict) = self.classes_dict[new_class]
if old_class is not None:
return old_class
return None
def lookup_method(self, new_class, new_method):
'''Translate a new class name and a new method into the old class
name and the old method name.'''
if new_class in self.classes_dict:
(old_class, class_dict) = self.classes_dict[new_class]
if new_method in class_dict:
return class_dict[new_method]
return None
class DexMethod:
'''Encapsulates a method within a DEX file.'''
def __init__(self, dex_class, encoded_method, is_virtual):
self.dex_class = dex_class
self.encoded_method = encoded_method
self.method_id = None
self.is_virtual = is_virtual
self.code_item = None
self.insns = None
self.name_in_file = None
self.name = None
def get_qualified_name(self):
class_name = self.get_class().get_name()
method_name = self.get_name()
if class_name[-1] != ';':
return class_name + ':' + method_name
else:
return class_name + method_name
def get_method_id(self):
'''Get the method_id_item for this method.'''
if self.method_id is None:
self.method_id = self.get_dex().get_method_id(self.encoded_method)
return self.method_id
def get_method_index(self):
'''Get the method index into the method_ids array in the DEX file.'''
return self.encoded_method.method_idx
def get_code_offset(self):
'''Get the code offset for this method.'''
return self.encoded_method.code_off
def get_code_item_index(self):
'''Get the index into the code_items array in the dex file for the
code for this method, or -1 if there is no code for this method.'''
code_item = self.get_code_item()
if code_item:
return self.get_dex().get_code_item_index_from_code_off(
code_item.get_offset())
return -1
def get_dex(self):
return self.dex_class.get_dex()
def get_name_in_file(self):
'''Returns the name of the method as it is known in the current DEX
file (no proguard remapping)'''
if self.name_in_file is None:
self.name_in_file = self.get_dex().get_string(
self.get_method_id().name_idx)
return self.name_in_file
def get_name(self):
if self.name is None:
cls_mangled = self.get_class().get_mangled_name()
name_in_file = self.get_name_in_file()
if cls_mangled and name_in_file:
self.name = self.get_dex().demangle_class_method_name(
cls_mangled, name_in_file)
if self.name is None:
self.name = name_in_file
return self.name
def get_class(self):
return self.dex_class
def get_code_item(self):
if self.code_item is None:
if self.encoded_method.code_off != 0:
self.code_item = self.get_dex().find_code_item(
self.encoded_method.code_off)
return self.code_item
def get_code_byte_size(self):
code_item = self.get_code_item()
if code_item:
return len(code_item.insns) * 2
return 0
def get_instructions(self):
if self.insns is None:
self.insns = []
code_item = self.get_code_item()
if code_item:
code_units = CodeUnits(code_item.insns)
while code_units.index_is_valid():
insn = DexInstruction()
insn.decode(code_units)
self.insns.append(insn)
return self.insns
def dump(self, dump_code=True, dump_debug_info=True, f=sys.stdout):
if self.is_virtual:
method_type = 'virtual'
else:
method_type = 'direct'
dex = self.get_dex()
f.write('method: (%s) %s%s\n' %
(method_type, self.get_class().get_name(), self.get_name()))
code_item_idx = dex.get_code_item_index_from_code_off(
self.encoded_method.code_off)
self.encoded_method.dump(f=f, prefix=' encoded_method.', flat=False)
method_id = dex.get_method_id(self.encoded_method.method_idx)
if method_id:
method_id.dump(f=f, prefix=' method_id.', flat=False)
proto_id = dex.get_proto_id(method_id.proto_idx)
if proto_id:
proto_id.dump(f=f, prefix=' proto_id.', flat=False)
f.write('\n')
if dump_code:
if code_item_idx >= 0:
code_item = dex.get_code_items()[code_item_idx]
f.write(' code_item[%u] @ %#8.8x:\n' % (code_item_idx,
code_item.get_offset()))
code_item.dump(f=f, prefix=' ')
if dump_debug_info:
self.dump_debug_info(f=f, prefix=' ')
def dump_code(self, f=sys.stdout):
insns = self.get_instructions()
for insn in insns:
insn.dump(f=f)
def get_debug_info(self):
code_item = self.get_code_item()
if code_item:
return code_item.get_debug_info()
return None
def dump_debug_info(self, f=sys.stdout, prefix=None):
debug_info = self.get_debug_info()
if prefix:
f.write(prefix)
if debug_info:
f.write('debug info @ %#8.8x:\n' % (debug_info.get_offset()))
debug_info.dump_debug_info(f=f, prefix=prefix)
f.write('\n')
else:
f.write('no debug info\n')
def check_debug_info_encoding(self):
debug_info = self.get_debug_info()
if debug_info:
return debug_info.check_encoding(self)
class DexClass:
'''Encapsulates a class within a DEX file.'''
def __init__(self, dex, class_def):
self.dex = dex
self.class_def = class_def
self.methods = None
self.num_direct_methods = 0
self.mangled = None
self.demangled = None
def dump(self, f=sys.stdout):
f.write('\nclass: %s\n' % (self.get_name()))
dex = self.get_dex()
class_def_offset = self.class_def.get_offset()
class_def_idx = dex.get_class_def_index_from_offset(class_def_offset)
f.write(' class_def[%u] @ %#8.8x:\n' % (class_def_idx,
class_def_offset))
self.class_def.dump(f=f, flat=False, prefix=' ')
f.write(' class_data_item @ %#8.8x:\n' % (
self.class_def.class_data.get_offset()))
self.class_def.class_data.dump(f=f, flat=False, prefix=' ')
f.write('\n')
def get_type_index(self):
'''Get type ID index (class_idx) for this class.'''
return self.class_def.class_idx
def is_abstract(self):
return (self.class_def.access_flags & ACC_ABSTRACT) != 0
def get_mangled_name(self):
if self.mangled is None:
dex = self.get_dex()
self.mangled = dex.get_typename(self.class_def.class_idx)
return self.mangled
def get_name(self):
'''Get the demangled name for a class if we have a proguard file or
return the mangled name if we don't have a proguard file.'''
if self.demangled is None:
mangled = self.get_mangled_name()
if mangled:
self.demangled = self.get_dex().demangle_class_name(mangled)
if self.demangled is None:
self.demangled = mangled
return self.demangled
def get_dex(self):
return self.dex
def get_methods(self):
if self.methods is None:
self.methods = []
self.num_direct_methods = len(
self.class_def.class_data.direct_methods)
for encoded_method in self.class_def.class_data.direct_methods:
self.methods.append(DexMethod(self, encoded_method, False))
for encoded_method in self.class_def.class_data.virtual_methods:
self.methods.append(DexMethod(self, encoded_method, True))
return self.methods
def demangle_classname(mangled):
if (mangled and len(mangled) > 2 and mangled[0] == 'L' and
mangled[-1] == ';'):
return mangled[1:-1].replace('/', '.') + ':'
# Already demangled
return mangled
def mangle_classname(demangled):
if (demangled and len(demangled) > 2 and
(demangled[0] != 'L' or demangled[-1] != ';')):
return 'L' + demangled.replace('.', '/') + ';'
# Already demangled
return demangled
class File:
'''Represents and DEX (Dalvik Executable) file'''
def __init__(self, path, proguard_path):
self.path = path
self.proguard = None
if proguard_path and os.path.exists(proguard_path):
self.proguard = Progard(proguard_path)
self.data = file_extract.FileExtract(open(self.path), '=', 4)
self.header = header_item(self.data)
self.map_list = None
self.string_ids = None
self.type_ids = None
self.proto_ids = None
self.field_ids = None
self.method_ids = None
self.class_defs = None
self.classes = None
self.call_site_ids = None
self.method_handle_items = None
self.code_items = None
self.code_off_to_code_item_idx = {}
self.strings = None
self.call_sites = None
self.dex_classes = {}
def demangle_class_name(self, cls_mangled):
'''Given a mangled type name as it would appear in a DEX file like
"LX/JxK;", return the demangled version if we have a proguard file,
otherwise return the original class typename'''
if self.proguard:
cls_demangled = demangle_classname(cls_mangled)
if cls_demangled:
return self.proguard.lookup_class(cls_demangled)
return None
def demangle_class_method_name(self, cls_mangled, method_name):
if self.proguard:
cls_demangled = demangle_classname(cls_mangled)
if cls_demangled:
return self.proguard.lookup_method(cls_demangled, method_name)
return None
def get_map_list(self):
if self.map_list is None:
self.data.push_offset_and_seek(self.header.map_off)
self.map_list = map_list(self.data)
self.data.pop_offset_and_seek()
return self.map_list
def get_map_tuple(self, type_code):
map_list = self.get_map_list()
for item in map_list.list:
if item.type.get_enum_value() == type_code:
return (item.size, item.offset)
return (0, 0)
def find_class(self, class_ref):
class_idx = class_ref
if isinstance(class_ref, six.string_types):
# Make sure the string is in 'L' <classname-with-slashes> ';'
class_mangled = mangle_classname(class_ref)
class_str_idx = self.find_string_idx(class_mangled)
if class_str_idx >= 0:
class_idx = self.find_type_idx(class_str_idx)
if isinstance(class_idx, numbers.Integral):
classes = self.get_classes()
for cls in classes:
if cls.class_def.class_idx == class_idx:
return cls
return None
def find_string_idx(self, match_s):
strings = self.get_strings()
for (i, s) in enumerate(strings):
if match_s == s.data:
return i
return -1
def get_string(self, index):
strings = self.get_strings()
if index < len(strings):
return strings[index].data
return None
def get_typename(self, type_id):
types = self.get_type_ids()
if type_id < len(types):
return self.get_string(types[type_id])
return None
def get_string_ids(self):
if self.string_ids is None:
self.string_ids = list()
self.data.push_offset_and_seek(self.header.string_ids_off)
for i in range(self.header.string_ids_size):
self.string_ids.append(self.data.get_uint32())
self.data.pop_offset_and_seek()
return self.string_ids
def get_type_ids(self):
if self.type_ids is None:
self.type_ids = list()
self.data.push_offset_and_seek(self.header.type_ids_off)
for i in range(self.header.type_ids_size):
self.type_ids.append(self.data.get_uint32())
self.data.pop_offset_and_seek()
return self.type_ids
def get_proto_ids(self):
if self.proto_ids is None:
self.proto_ids = list()
self.data.push_offset_and_seek(self.header.proto_ids_off)
for i in range(self.header.proto_ids_size):
self.proto_ids.append(proto_id_item(self.data, self))
self.data.pop_offset_and_seek()
return self.proto_ids
def get_proto_id(self, proto_idx):
proto_ids = self.get_proto_ids()
if proto_idx >= 0 and proto_idx < len(proto_ids):
return proto_ids[proto_idx]
return None
def get_proto_shorty(self, proto_idx):
id = self.get_proto_id(proto_idx)
return self.get_string(id.shorty_idx)
def get_field_ids(self):
if self.field_ids is None:
self.field_ids = list()
self.data.push_offset_and_seek(self.header.field_ids_off)
for i in range(self.header.field_ids_size):
self.field_ids.append(field_id_item(self.data, self))
self.data.pop_offset_and_seek()
return self.field_ids
def get_method_ids(self):
if self.method_ids is None:
self.method_ids = list()
self.data.push_offset_and_seek(self.header.method_ids_off)
for i in range(self.header.method_ids_size):
self.method_ids.append(method_id_item(self.data, self))
self.data.pop_offset_and_seek()
return self.method_ids
def find_method_ids(self, method_name, class_ref=None):
dex_class = None
if class_ref is not None:
dex_class = self.find_class(class_ref)
matches = list() # Return a list of matching methods
method_ids = self.get_method_ids()
if not method_ids:
return matches
name_idx = self.find_string_idx(method_name)
if name_idx <= 0:
return matches
for method_id in method_ids:
if method_id.name_idx == name_idx:
if dex_class:
if method_id.class_idx != dex_class.class_def.class_idx:
continue
matches.append(method_id)
return matches
def find_method_id_by_code_offset(self, code_off):
class_defs = self.get_class_defs()
for class_def in class_defs:
method_id = class_def.find_encoded_method_by_code_off(code_off)
if method_id:
return method_id
return None
def get_method_id(self, method_ref):
'''method_ref can be one of:
- a encoded_method object
- integer method index'''
method_ids = self.get_method_ids()
if method_ids:
if isinstance(method_ref, encoded_method):
if method_ref.method_idx < len(method_ids):
return method_ids[method_ref.method_idx]
elif isinstance(method_ref, numbers.Integral):
if method_ref < len(method_ids):
return method_ids[method_ref]
else:
raise ValueError('invalid method_ref type %s' %
(type(method_ref)))
return None
# def get_call_site(self, idx):
# call_site_ids = self.get_call_site_ids()
# if idx >= len(call_site_ids):
# return None
# if self.call_sites[idx] is None:
# self.data.push_offset_and_seek(call_site_ids[idx])
# self.call_sites[idx] = call_site_item(self.data)
# self.data.pop_offset_and_seek()
# return self.call_sites[idx]
def get_call_site_ids(self):
if self.call_site_ids is None:
self.call_site_ids = list()
self.call_sites = list()
(size, offset) = self.get_map_tuple(TYPE_CALL_SITE_ID_ITEM)
self.data.push_offset_and_seek(offset)
for i in range(size):
self.call_site_ids.append(self.data.get_uint32())
self.call_sites.append(None)
self.data.pop_offset_and_seek()
return self.call_site_ids
def get_method_handle_items(self):
if self.method_handle_items is None:
self.method_handle_items = list()
(size, offset) = self.get_map_tuple(TYPE_METHOD_HANDLE_ITEM)
self.data.push_offset_and_seek(offset)
for i in range(size):
self.method_handle_items.append(method_handle_item(self.data))
self.data.pop_offset_and_seek()
return self.method_handle_items
def get_code_items(self):
if self.code_items is None:
self.code_items = list()
(size, offset) = self.get_map_tuple(TYPE_CODE_ITEM)
self.data.push_offset_and_seek(offset)
for i in range(size):
self.data.align_to(4)
item = code_item(self.data)
self.code_items.append(item)
self.code_off_to_code_item_idx[item.get_offset()] = i
self.data.pop_offset_and_seek()
return self.code_items
def report_code_duplication(self):
code_to_code_items = {}
code_items = self.get_code_items()
if code_items:
for code_item in code_items:
key = code_item.insns
if key in code_to_code_items:
code_to_code_items[key].append(code_item)
else:
code_to_code_items[key] = [code_item]
for key in code_to_code_items:
code_items = code_to_code_items[key]
if len(code_items) > 1:
print('-' * 72)
print('The following methods have the same code:')
for code_item in code_items:
method = self.find_method_from_code_off(
code_item.get_offset())
if method.is_virtual:
print('virtual', end=' ')
else:
print('direct', end=' ')
print(method.get_qualified_name())
# Dump the code once for all methods
method.dump_code()
def get_class_def_index_from_offset(self, class_def_offset):
class_defs = self.get_class_defs()
for (i, class_def) in enumerate(class_defs):
if class_def.get_offset() == class_def_offset:
return i
return -1
def get_code_item_index_from_code_off(self, code_off):
# Make sure the code items are created
self.get_code_items()
if code_off in self.code_off_to_code_item_idx:
return self.code_off_to_code_item_idx[code_off]
return -1
def find_code_item(self, code_off):
code_item_idx = self.get_code_item_index_from_code_off(code_off)
if code_item_idx >= 0:
return self.get_code_items()[code_item_idx]
else:
raise ValueError('invalid code item offset %#8.8x' % code_off)
def find_method_from_code_off(self, code_off):
if code_off == 0:
return None
for cls in self.get_classes():
for method in cls.get_methods():
if method.get_code_offset() == code_off:
return method
return None
def get_class_defs(self):
if self.class_defs is None:
self.class_defs = list()
self.data.push_offset_and_seek(self.header.class_defs_off)
for i in range(self.header.class_defs_size):
class_def = class_def_item(self.data, self)
self.class_defs.append(class_def)
self.data.pop_offset_and_seek()
return self.class_defs
def get_classes(self):
if self.classes is None:
self.classes = list()
class_defs = self.get_class_defs()
for class_def in class_defs:
dex_class = DexClass(self, class_def)
self.classes.append(dex_class)
self.data.pop_offset_and_seek()
return self.classes
def get_strings(self):
if self.strings is None:
self.strings = list()
for string_id_item in self.get_string_ids():
self.data.push_offset_and_seek(string_id_item)
self.strings.append(string_data_item(self.data))
self.data.pop_offset_and_seek()
return self.strings
def dump_header(self, options, f=sys.stdout):
self.header.dump(f=f)
def dump_map_list(self, options, f=sys.stdout):
self.get_map_list().dump(f=f)
f.write('\n')
def dump_string_ids(self, options, f=sys.stdout):
string_ids = self.get_string_ids()
if string_ids:
f.write('string_ids:\n')
for (i, item) in enumerate(self.get_strings()):
f.write('[%3u] %#8.8x ( ' % (i, string_ids[i]))
item.dump(f=f)
f.write(')\n')
def dump_type_ids(self, options, f=sys.stdout):
type_ids = self.get_type_ids()
if type_ids:
f.write('\ntype_ids:\n DESCRIPTOR_IDX\n')
for (i, item) in enumerate(type_ids):
f.write('[%3u] %#8.8x ("%s")\n' %
(i, item, self.get_string(item)))
def find_type_idx(self, class_str_idx):
types = self.get_type_ids()
i = bisect.bisect_left(types, class_str_idx)
if i != len(types) and types[i] == class_str_idx:
return i
return -1
def find_class_def_by_type_index(self, class_idx):
class_defs = self.get_class_defs()
for class_def in class_defs:
if class_def.class_idx == class_idx:
return class_def
return None
def dump_proto_ids(self, options, f=sys.stdout):
proto_ids = self.get_proto_ids()
if proto_ids:
f.write('\nproto_ids:\n')
f.write(' ' * (5 + 1))
f.write(proto_id_item.get_table_header())
for (i, item) in enumerate(proto_ids):
f.write('[%3u] ' % (i))
item.dump(f=f, print_name=False)
shorty = self.get_string(item.shorty_idx)
ret = self.get_string(item.return_type_idx)
f.write(' ("%s", "%s"' % (shorty, ret))
parameters = item.get_parameters()
if parameters:
f.write(', (')
for (i, type_id) in enumerate(parameters.list):
if i > 0:
f.write(', ')
f.write(self.get_string(type_id))
f.write(')')
else:
f.write(', ()')
f.write(')\n')
def dump_field_ids(self, options, f=sys.stdout):
field_ids = self.get_field_ids()
if field_ids:
f.write('\nfield_ids:\n')
f.write(' ' * (5 + 1))
f.write(field_id_item.get_table_header())
for (i, item) in enumerate(field_ids):
f.write('[%3u] ' % (i))
item.dump(f=f, print_name=False)
f.write(' ("%s", "%s", "%s")\n' % (
self.get_typename(item.class_idx),
self.get_typename(item.type_idx),
self.get_string(item.name_idx)))
def dump_method_ids(self, options, f=sys.stdout):
method_ids = self.get_method_ids()
if method_ids:
f.write('\nmethod_ids:\n')
f.write(' ' * (5 + 1))
f.write(method_id_item.get_table_header())
for (i, item) in enumerate(method_ids):
f.write('[%3u] ' % (i))
item.dump(f=f, print_name=False)
f.write(' ("%s", "%s", "%s")\n' % (
self.get_typename(item.class_idx),
self.get_proto_shorty(item.proto_idx),
self.get_string(item.name_idx)))
def dump_class_defs(self, options, f=sys.stdout):
class_defs = self.get_class_defs()
if class_defs:
f.write('\nclass_defs:\n')
f.write(' ' * (5 + 1))
f.write(class_def_item.get_table_header())
for (i, item) in enumerate(class_defs):
f.write('[%3u] ' % (i))
item.dump(f=f, print_name=False)
f.write(' ("%s")' % (self.get_typename(item.class_idx)))
f.write('\n')
def dump_call_site_ids(self, options, f=sys.stdout):
call_site_ids = self.get_call_site_ids()
if call_site_ids:
f.write('\ncall_site_ids:\n')
f.write(' ' * (5 + 1))
for (i, item) in enumerate(call_site_ids):
f.write('[%3u] %#8.8x\n' % (i, item))
def dump_method_handle_items(self, options, f=sys.stdout):
method_handle_items = self.get_method_handle_items()
if method_handle_items:
f.write('\nmethod_handle_items:\n')
f.write(' ' * (5 + 1))
for (i, item) in enumerate(method_handle_items):
f.write('[%3u] ' % (i))
item.dump(f=f)
f.write('\n')
def dump_code(self, options, f=sys.stdout):
classes = self.get_classes()
if classes:
for cls in classes:
if cls.is_abstract():
continue
cls.dump(f=f)
methods = cls.get_methods()
dc = options.dump_code or options.dump_all
ddi = options.debug or options.dump_all
for method in methods:
if options.dump_code or options.dump_all:
method.dump(f=f, dump_code=dc, dump_debug_info=ddi)
f.write('\n')
def dump_code_items(self, options, f=sys.stdout):
code_items = self.get_code_items()
if code_items:
for (i, code_item) in enumerate(code_items):
f.write('code_item[%u]:\n' % (i))
code_item.dump(f=f)
def dump(self, options, f=sys.stdout):
self.dump_header(options, f)
f.write('\n')
self.dump_map_list(options, f)
self.dump_string_ids(options, f)
self.dump_type_ids(options, f)
self.dump_proto_ids(options, f)
self.dump_field_ids(options, f)
self.dump_method_ids(options, f)
self.dump_class_defs(options, f)
self.dump_call_site_ids(options, f)
self.dump_method_handle_items(options, f)
self.dump_code(options, f)
self.dump_code_items(options, f)
def sign_extending(value, bit_width):
# is the highest bit (sign) set? (x>>(b-1)) would be faster
if value & (1 << (bit_width - 1)):
return value - (1 << bit_width) # 2s complement
return value
def get_signed_hex_offset_as_str(signed_offset, width):
if signed_offset < 0:
s = '-'
offset = abs(signed_offset)
else:
s = '+'
offset = signed_offset
if width == 2:
s += '%2.2x' % (offset & 0xff)
elif width == 4:
s += '%4.4x' % (offset & 0xffff)
elif width == 8:
s += '%8.8x' % (offset & 0xffffffff)
else:
raise ValueError("only sizes of 2 4 or 8 are supported")
return s
class Opcode(object):
def __init__(self, inst):
self.inst = inst
def check_encoding(self, f=sys.stdout):
'''Verify that this instruction can't be encoded more efficiently'''
return 0 # Return zero to indicate we can't save any bytes
def new_encoding(self, f=sys.stdout):
'''Look for bytes we can save by making new opcodes that are encoded
as unsigned, or other optimizations'''
return 0 # Return zero to indicate we can't save any bytes
def get_op(self):
return self.inst.get_op()
def get_name(self):
op = self.get_op()
return self.ops[op]
def get_num_code_units(self):
return self.num_code_units
def regs_are_sequential(self):
if len(self.regs) <= 1:
return True
prev_reg = self.regs[0]
for i in range(1, len(self.regs)):
curr_reg = self.regs[i]
if prev_reg + 1 != curr_reg:
return False
return True
class Opcode00(Opcode):
ops = {0x00: 'nop'}
num_code_units = 1
max_regs = 0
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.nature = inst.get_AA()
if self.nature == 0:
pass # NOP
elif self.nature == 1:
self.size = code_units.get_code_unit()
self.first_key = code_units.get_int()
self.targets = list()
for i in range(self.size):
self.targets.append(code_units.get_int())
elif self.nature == 2:
self.size = code_units.get_code_unit()
self.keys = list()
self.targets = list()
for i in range(self.size):
self.keys.append(code_units.get_int())
for i in range(self.size):
self.targets.append(code_units.get_int())
elif self.nature == 3:
self.element_width = code_units.get_code_unit()
self.size = code_units.get_uint()
num_code_units = int((self.size * self.element_width + 1) / 2)
encoder = file_extract.FileEncode(StringIO.StringIO(), 'little', 4)
for i in range(num_code_units):
encoder.put_uint16(code_units.get_code_unit())
encoder.seek(0)
self.data = encoder.file.getvalue()
else:
raise ValueError("add support for NOP nature %u" % (self.nature))
def get_name(self):
if self.nature == 0:
return self.ops[0]
elif self.nature == 1:
return 'packed-switch-payload'
elif self.nature == 2:
return 'sparse-switch-payload'
elif self.nature == 3:
return 'fill-array-data-payload'
else:
raise ValueError("add support for NOP nature %u" % (self.nature))
def get_num_code_units(self):
if self.nature == 0:
return 1
elif self.nature == 1:
op_count = 1
size_count = 1
first_key_count = 2
keys_count = self.size * 2
return op_count + size_count + first_key_count + keys_count
elif self.nature == 2:
op_count = 1
size_count = 1
keys_and_targets_count = self.size * 4
return op_count + size_count + keys_and_targets_count
elif self.nature == 3:
op_count = 1
element_width_count = 2
return op_count + element_width_count + len(self.data)
else:
raise ValueError("add support for NOP nature %u" % (self.nature))
def dump(self, f=sys.stdout):
if self.nature == 0:
f.write('%s' % (self.get_name()))
elif self.nature == 1:
f.write('packed-switch-payload\n')
f.write('INDEX KEY TARGET\n===== --------- ---------\n')
for (i, target) in enumerate(self.targets):
f.write('[%3u] %+8.8x %+8.8x\n' %
(i, self.first_key + i, target))
elif self.nature == 2:
f.write('sparse-switch-payload\n')
f.write('INDEX KEY TARGET\n===== --------- ---------\n')
for (i, key) in enumerate(self.keys):
f.write('[%3u] %+8.8x %+8.8x\n' % (i, key, self.targets[i]))
elif self.nature == 3:
f.write('fill-array-data-payload (elem_width = %u, size = %u)\n' %
(self.element_width, self.size))
file_extract.dump_memory(0, self.data, self.element_width, f)
def emulate(self, emulator):
pass
class Opcode01(Opcode):
ops = {0x01: 'move'}
num_code_units = 1
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode02(Opcode):
ops = {0x02: 'move/from16'}
num_code_units = 2
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_AA())
self.regs.append(inst[1])
def check_encoding(self, f=sys.stdout):
if self.regs[0] <= UINT4_MAX and self.regs[1] <= UINT4_MAX:
f.write('warning: "move/from16" can be encoded as a "move"')
f.write(' more efficiently as its registers are both <= %u\n' %
(UINT4_MAX))
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode03(Opcode):
ops = {0x03: 'move/16'}
num_code_units = 3
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst[1])
self.regs.append(inst[2])
def check_encoding(self, f=sys.stdout):
if self.regs[0] <= UINT4_MAX and self.regs[1] <= UINT4_MAX:
f.write('warning: "move/16" can be encoded as a "move"')
f.write(' more efficiently as its registers are both <= %u\n' %
(UINT4_MAX))
return 4
if self.regs[0] <= UINT8_MAX:
f.write('warning: "move/16" can be encoded as a "move/from16"')
f.write(' more efficiently as its first register is <= %u\n' %
(UINT8_MAX))
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode04(Opcode):
ops = {0x04: 'move-wide'}
num_code_units = 1
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode05(Opcode):
ops = {0x05: 'move-wide/from16'}
num_code_units = 2
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_AA())
self.regs.append(inst[1])
def check_encoding(self, f=sys.stdout):
if self.regs[0] <= UINT4_MAX and self.regs[1] <= UINT4_MAX:
f.write('warning: "move-wide/from16" can be encoded as a ')
f.write('"move-wide" more efficiently as its registers are ')
f.write('both <= %u\n' % (UINT4_MAX))
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode06(Opcode):
ops = {0x06: 'move-wide/16'}
num_code_units = 3
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst[1])
self.regs.append(inst[2])
def check_encoding(self, f=sys.stdout):
if self.regs[0] <= UINT4_MAX and self.regs[1] <= UINT4_MAX:
f.write('warning: "move-wide/16" can be encoded as a "move-wide" ')
f.write('more efficiently as its registers are both <= %u\n' %
(UINT4_MAX))
return 4
if self.regs[0] <= UINT8_MAX:
f.write('warning: "move-wide/16" can be encoded as a ')
f.write('"move-wide/from16" more efficiently as its first ')
f.write('register is <= %u\n' % (UINT8_MAX))
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode07(Opcode):
ops = {0x07: 'move-object'}
num_code_units = 1
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode08(Opcode):
ops = {0x08: 'move-object/from16 '}
num_code_units = 2
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_AA())
self.regs.append(inst[1])
def check_encoding(self, f=sys.stdout):
if self.regs[0] <= UINT4_MAX and self.regs[1] <= UINT4_MAX:
f.write('warning: "move-object/from16" can be encoded as a ')
f.write('"move-object" more efficiently as its registers are ')
f.write('both <= %u\n' % (UINT4_MAX))
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode09(Opcode):
ops = {0x09: 'move-object/16'}
num_code_units = 3
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst[1])
self.regs.append(inst[2])
def check_encoding(self, f=sys.stdout):
if self.regs[0] <= UINT4_MAX and self.regs[1] <= UINT4_MAX:
f.write('warning: "move-object/16" can be encoded as a ')
f.write('"move-object" more efficiently as its registers ')
f.write('are both <= %u\n' % (UINT4_MAX))
return 4
if self.regs[0] <= UINT8_MAX:
f.write('warning: "move-object/16" can be encoded as a ')
f.write('"move-object/from16" more efficiently as its first ')
f.write('register is <= %u\n' % (UINT8_MAX))
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode0A_0D(Opcode):
ops = {
0x0a: 'move-result',
0x0b: 'move-result-wide',
0x0c: 'move-result-object',
0x0d: 'move-exception'
}
num_code_units = 1
max_regs = 1
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
def dump(self, f=sys.stdout):
f.write('%s v%u' % (self.get_name(), self.reg))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode0E(Opcode):
ops = {0x0e: 'return-void'}
num_code_units = 1
max_regs = 0
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
def dump(self, f=sys.stdout):
f.write('%s' % (self.get_name()))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode0F(Opcode):
ops = {0x0f: 'return'}
num_code_units = 1
max_regs = 1
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
def dump(self, f=sys.stdout):
f.write('%s v%u' % (self.get_name(), self.reg))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode10(Opcode):
ops = {0x10: 'return-wide'}
num_code_units = 1
max_regs = 1
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
def dump(self, f=sys.stdout):
f.write('%s v%u' % (self.get_name(), self.reg))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode11(Opcode):
ops = {0x11: 'return-object'}
num_code_units = 1
max_regs = 1
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
def dump(self, f=sys.stdout):
f.write('%s v%u' % (self.get_name(), self.reg))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode12(Opcode):
ops = {0x12: 'const/4'}
num_code_units = 1
max_regs = 1
extra_data = 'n'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_A()
self.imm = sign_extending(inst[0] >> 12, 4)
def dump(self, f=sys.stdout):
f.write('%s v%u, #int %i // #%#x' %
(self.get_name(), self.reg, self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class Opcode13(Opcode):
ops = {0x13: 'const/16'}
num_code_units = 2
max_regs = 1
extra_data = 's'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.imm = sign_extending(inst[1], 16)
def check_encoding(self, f=sys.stdout):
if (self.reg <= UINT4_MAX and INT4_MIN <= self.imm and
self.imm <= INT4_MAX):
f.write('warning: "const/16" can be encoded as a "const/4" more ')
f.write('efficiently as its register is <= %u and ' % (UINT4_MAX))
f.write('(%i <= %i <= %i)\n' % (INT4_MIN, self.imm, INT4_MAX))
return 2
return 0
def new_encoding(self, f=sys.stdout):
if (self.reg <= UINT4_MAX and self.imm > INT4_MAX and
self.imm <= (INT4_MAX + UINT4_MAX)):
f.write('"const/16" could be encoded as a new "const/u4" stores ')
f.write('a 4 bit unsigned offset from +8 for a constant range ')
f.write('of [8-24):\n')
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, #int %i // #%#x' %
(self.get_name(), self.reg, self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class Opcode14(Opcode):
ops = {0x14: 'const'}
num_code_units = 3
max_regs = 1
extra_data = 'i'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.imm = inst.get_uint32(1)
def check_encoding(self, f=sys.stdout):
if (self.reg <= UINT8_MAX and INT16_MIN <= self.imm and
self.imm <= INT16_MAX):
f.write('warning: "const" can be encoded as a "const/16" more ')
f.write('efficiently as its register is < %u ' % (UINT8_MAX))
f.write('and (%i <= %i <= %i)\n' % (INT16_MIN, self.imm,
INT16_MAX))
return 2
return 0
def new_encoding(self, f=sys.stdout):
if self.imm > INT16_MAX and self.imm <= (INT16_MAX + UINT16_MAX):
f.write('"const" could be encoded as a new "const/u16" stores a ')
f.write('16 bit unsigned offset from 32768 instead of a 16 bit ')
f.write('signed value\n')
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, #int %i // #%#x' %
(self.get_name(), self.reg, self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class Opcode15(Opcode):
ops = {0x15: 'const/high16'}
num_code_units = 2
max_regs = 1
extra_data = 'h'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.imm = inst[1] << 16
def dump(self, f=sys.stdout):
f.write('%s v%u, #int %i // #%#x' %
(self.get_name(), self.reg, self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class Opcode16(Opcode):
ops = {0x16: 'const-wide/16'}
num_code_units = 2
max_regs = 1
extra_data = 's'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.imm = sign_extending(inst[1], 16)
def dump(self, f=sys.stdout):
f.write('%s v%u, #int %i // #%#x' %
(self.get_name(), self.reg, self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class Opcode17(Opcode):
ops = {0x17: 'const-wide/32'}
num_code_units = 3
max_regs = 1
extra_data = 'i'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.imm = inst.get_sint32(1)
def check_encoding(self, f=sys.stdout):
if INT16_MIN <= self.imm and self.imm <= INT16_MAX:
f.write('warning: "const-wide/32" can be encoded as a ')
f.write('"const-wide/16" more efficiently as (%i <= %i <= %i)\n' %
(UINT8_MAX, INT16_MIN, self.imm, INT16_MAX))
return 2
return 0
def new_encoding(self, f=sys.stdout):
if self.imm > INT16_MAX and self.imm <= (INT16_MAX + UINT16_MAX):
f.write('"const-wide/32" could be encoded as a new ')
f.write('"const-wide/u16" stores a 16 bit unsigned offset from ')
f.write('32768 instead of a 16 bit signed value\n')
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, #int %i // #%#x' %
(self.get_name(), self.reg, self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class Opcode18(Opcode):
ops = {0x18: 'const-wide/64'}
num_code_units = 5
max_regs = 1
extra_data = 'l'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.imm = inst.get_uint64(1)
def check_encoding(self, f=sys.stdout):
if INT16_MIN <= self.imm and self.imm <= INT16_MAX:
f.write('warning: "const-wide/64" can be encoded as a ')
f.write('"const-wide/16" more efficiently as (%i <= %i <= %i)\n' %
(INT16_MIN, self.imm, INT16_MAX))
return 6
if INT32_MIN <= self.imm and self.imm <= INT32_MAX:
f.write('warning: "const-wide/64" can be encoded as a ')
f.write('"const-wide/32" more efficiently as (%i <= %i <= %i)\n' %
(INT32_MIN, self.imm, INT32_MAX))
return 4
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, #int %i // #%#x' %
(self.get_name(), self.reg, self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class Opcode19(Opcode):
ops = {0x19: 'const-wide/high16'}
num_code_units = 2
max_regs = 1
extra_data = 'h'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.imm = sign_extending(inst[1], 16) << 48
def dump(self, f=sys.stdout):
f.write('%s v%u, #int %i // #%#x' %
(self.get_name(), self.reg, self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class Opcode1A(Opcode):
ops = {0x1a: 'const-string'}
num_code_units = 2
max_regs = 1
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.string_idx = inst[1]
def dump(self, f=sys.stdout):
f.write('%s v%u, string@%4.4x' %
(self.get_name(), self.reg, self.string_idx))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode1B(Opcode):
ops = {0x1b: 'const-string/jumbo'}
num_code_units = 3
max_regs = 1
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.string_idx = inst.get_uint32(1)
def dump(self, f=sys.stdout):
f.write('%s v%u, string@%8.8x' %
(self.get_name(), self.reg, self.string_idx))
def check_encoding(self, f=sys.stdout):
if self.signed_offset <= UINT16_MAX:
f.write('warning: "const-string/jumbo" can be encoded as a ')
f.write('"const-string" more efficiently as its offset is ')
f.write('<= UINT16_MAX\n')
return 2
return 0
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode1C(Opcode):
ops = {0x1c: 'const-class'}
num_code_units = 2
max_regs = 1
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.type = inst[1]
def dump(self, f=sys.stdout):
f.write('%s v%u, type@%4.4x' % (self.get_name(), self.reg, self.type))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode1D(Opcode):
ops = {0x1d: 'monitor-enter'}
num_code_units = 1
max_regs = 1
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
def dump(self, f=sys.stdout):
f.write('%s v%u' % (self.get_name(), self.reg))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode1E(Opcode):
ops = {0x1e: 'monitor-exit'}
num_code_units = 1
max_regs = 1
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
def dump(self, f=sys.stdout):
f.write('%s v%u' % (self.get_name(), self.reg))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode1F(Opcode):
ops = {0x1f: 'check-cast'}
num_code_units = 2
max_regs = 1
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.type = inst[1]
def dump(self, f=sys.stdout):
f.write('%s v%u, type@%4.4x' % (self.get_name(), self.reg, self.type))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode20(Opcode):
ops = {0x20: 'instance-of'}
num_code_units = 2
max_regs = 2
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
self.type = inst[1]
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u, type@%4.4x' %
(self.get_name(), self.regs[0], self.regs[1], self.type))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode21(Opcode):
ops = {0x21: 'array-length'}
num_code_units = 1
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode22(Opcode):
ops = {0x22: 'new-instance'}
num_code_units = 2
max_regs = 1
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.type = inst[1]
def dump(self, f=sys.stdout):
f.write('%s v%u, type@%4.4x' % (self.get_name(), self.reg, self.type))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode23(Opcode):
ops = {0x23: 'new-array'}
num_code_units = 2
max_regs = 2
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
self.type = inst[1]
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u, type@%4.4x' %
(self.get_name(), self.regs[0], self.regs[1], self.type))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode24(Opcode):
ops = {0x24: 'filled-new-array'}
num_code_units = 3
max_regs = 5
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
arg_count = inst[0] >> 12
self.type = inst[1]
self.regs = list()
regs = inst[2] | ((inst[0] << 8) & 0xf0000)
for i in range(arg_count):
self.regs.append(regs & 0xf)
regs >>= 4
def dump(self, f=sys.stdout):
f.write("%s {" % (self.get_name()))
first = True
for reg in self.regs:
if not first:
f.write(', ')
f.write("v%u" % (reg))
first = False
f.write("} type@%4.4x" % (self.type))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode25(Opcode):
ops = {0x25: 'filled-new-array/range '}
num_code_units = 3
max_regs = 'r'
extra_data = 'c'
format = '3rc'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
arg_count = inst.get_AA()
self.type = inst[1]
first_reg = inst[2]
self.regs = list()
for i in range(arg_count):
self.regs.append(first_reg + i)
def dump(self, f=sys.stdout):
f.write("%s {" % (self.get_name()))
first = True
for reg in self.regs:
if not first:
f.write(', ')
f.write("v%u" % (reg))
first = False
f.write("} type@%4.4x" % (self.type))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode26(Opcode):
ops = {0x26: 'fill-array-data'}
num_code_units = 3
max_regs = 1
extra_data = 't'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.signed_offset = inst.get_sint32(1)
def dump(self, f=sys.stdout):
f.write('%s v%u, %8.8x // %s' % (self.get_name(), self.reg,
self.inst.code_unit_idx + self.signed_offset,
get_signed_hex_offset_as_str(self.signed_offset, 8)))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode27(Opcode):
ops = {0x27: 'throw'}
num_code_units = 1
max_regs = 1
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
def dump(self, f=sys.stdout):
f.write('%s v%u' % (self.get_name(), self.reg))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode28(Opcode):
ops = {0x28: 'goto'}
num_code_units = 1
max_regs = 0
extra_data = 't'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.signed_offset = inst.get_signed_AA()
def check_encoding(self, f=sys.stdout):
if self.signed_offset == 0:
f.write('error: "goto" has a zero offset (invalid encoding)\n')
return 0
def dump(self, f=sys.stdout):
f.write('%s %4.4x // %+i' % (self.get_name(),
self.inst.code_unit_idx + self.signed_offset,
self.signed_offset))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode29(Opcode):
ops = {0x29: 'goto/16'}
num_code_units = 2
max_regs = 0
extra_data = 't'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.signed_offset = sign_extending(inst[1], 16)
def dump(self, f=sys.stdout):
f.write('%s %4.4x // %+i' % (self.get_name(),
self.inst.code_unit_idx + self.signed_offset,
self.signed_offset))
def check_encoding(self, f=sys.stdout):
if self.signed_offset == 0:
f.write(
'error: "goto/16" has a zero offset (invalid encoding)\n')
elif INT8_MIN <= self.signed_offset and self.signed_offset <= INT8_MAX:
f.write('warning: "goto/16" can be encoded as a "goto" more ')
f.write('efficiently since (INT8_MIN <= offset <= INT8_MAX)\n')
return 2
return 0
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode2A(Opcode):
ops = {0x2A: 'goto/32'}
num_code_units = 3
max_regs = 0
extra_data = 't'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.signed_offset = inst.get_sint32(1)
def dump(self, f=sys.stdout):
f.write('%s %4.4x // %+i' % (self.get_name(),
self.inst.code_unit_idx + self.signed_offset,
self.signed_offset))
def check_encoding(self, f=sys.stdout):
if self.signed_offset == 0:
return 0
if INT8_MIN <= self.signed_offset and self.signed_offset <= INT8_MAX:
f.write('warning: "goto/32" can be encoded as a "goto" more ')
f.write('efficiently since (INT8_MIN <= offset <= INT8_MAX)\n')
return 2
if INT16_MIN <= self.signed_offset and self.signed_offset <= INT16_MAX:
f.write('warning: "goto/32" can be encoded as a "goto/16" more ')
f.write('efficiently since (INT16_MIN <= offset <= INT16_MAX)\n')
return 4
return 0
def new_encoding(self, f=sys.stdout):
if INT16_MIN <= self.signed_offset and self.signed_offset <= INT16_MAX:
return 0
if INT24_MIN <= self.signed_offset and self.signed_offset <= INT24_MAX:
f.write('"goto/32" could be encoded as a new "goto/16" where ')
f.write('that opcode uses the extra 8 bits in the first code ')
f.write('unit to provide a 24 bit branch range\n')
return 2
return 0
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode2B(Opcode):
ops = {0x2b: 'packed-switch'}
num_code_units = 3
max_regs = 1
extra_data = 't'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.branch = inst.get_uint32(1)
def dump(self, f=sys.stdout):
f.write('%s v%u, %8.8x // +%8.8x' % (self.get_name(), self.reg,
self.inst.get_code_unit_index() + self.branch, self.branch))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode2C(Opcode):
ops = {0x2c: 'sparse-switch'}
num_code_units = 3
max_regs = 1
extra_data = 't'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.branch = inst.get_uint32(1)
def dump(self, f=sys.stdout):
f.write('%s v%u, %8.8x // +%8.8x' % (self.get_name(), self.reg,
self.inst.get_code_unit_index() + self.branch, self.branch))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode2D_31(Opcode):
ops = {
0x2d: 'cmpl-float (lt bias)',
0x2e: 'cmpg-float (gt bias)',
0x2f: 'cmpl-double (lt bias)',
0x30: 'cmpg-double (gt bias)',
0x31: 'cmp-long',
}
num_code_units = 2
max_regs = 3
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_AA())
self.regs.append(inst.get_uint8_lo(1))
self.regs.append(inst.get_uint8_hi(1))
def dump(self, f=sys.stdout):
f.write("%s v%u, v%u, v%u" %
(self.get_name(), self.regs[0], self.regs[1], self.regs[2]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode32_37(Opcode):
ops = {
0x32: 'if-eq',
0x33: 'if-ne',
0x34: 'if-lt',
0x35: 'if-ge',
0x36: 'if-gt',
0x37: 'if-le',
}
num_code_units = 2
max_regs = 2
extra_data = 't'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
self.signed_offset = sign_extending(inst[1], 16)
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u, %4.4x // %i' % (self.get_name(), self.regs[0],
self.regs[1], self.inst.code_unit_idx + self.signed_offset,
self.signed_offset))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode38_3D(Opcode):
ops = {
0x38: 'if-eqz',
0x39: 'if-nez',
0x3a: 'if-ltz',
0x3b: 'if-gez',
0x3c: 'if-gtz',
0x3d: 'if-lez',
}
num_code_units = 2
max_regs = 1
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.signed_offset = sign_extending(inst[1], 16)
def dump(self, f=sys.stdout):
f.write('%s v%u, %4.4x // %s' % (self.get_name(), self.reg,
self.signed_offset + self.inst.code_unit_idx,
get_signed_hex_offset_as_str(self.signed_offset, 4)))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode44_51(Opcode):
ops = {
0x44: 'aget',
0x45: 'aget-wide',
0x46: 'aget-object',
0x47: 'aget-boolean',
0x48: 'aget-byte',
0x49: 'aget-char',
0x4a: 'aget-short',
0x4b: 'aput',
0x4c: 'aput-wide',
0x4d: 'aput-object',
0x4e: 'aput-boolean',
0x4f: 'aput-byte',
0x50: 'aput-char',
0x51: 'aput-short',
}
num_code_units = 2
max_regs = 3
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_AA())
self.regs.append(inst.get_uint8_lo(1))
self.regs.append(inst.get_uint8_hi(1))
def dump(self, f=sys.stdout):
f.write("%s v%u, v%u, v%u" %
(self.get_name(), self.regs[0], self.regs[1], self.regs[2]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode52_5f(Opcode):
ops = {
0x52: 'iget',
0x53: 'iget-wide',
0x54: 'iget-object',
0x55: 'iget-boolean',
0x56: 'iget-byte',
0x57: 'iget-char',
0x58: 'iget-short',
0x59: 'iput',
0x5a: 'iput-wide',
0x5b: 'iput-object',
0x5c: 'iput-boolean',
0x5d: 'iput-byte',
0x5e: 'iput-char',
0x5f: 'iput-short',
}
num_code_units = 2
max_regs = 2
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
self.field = inst[1]
def dump(self, f=sys.stdout):
f.write("%s v%u, v%u, field@%4.4x" %
(self.get_name(), self.regs[0], self.regs[1], self.field))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode60_6d(Opcode):
ops = {
0x60: 'sget',
0x61: 'sget-wide',
0x62: 'sget-object',
0x63: 'sget-boolean',
0x64: 'sget-byte',
0x65: 'sget-char',
0x66: 'sget-short',
0x67: 'sput',
0x68: 'sput-wide',
0x69: 'sput-object',
0x6a: 'sput-boolean',
0x6b: 'sput-byte',
0x6c: 'sput-char',
0x6d: 'sput-short',
}
num_code_units = 2
max_regs = 1
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.field = inst.get_uint16(1)
def dump(self, f=sys.stdout):
f.write("%s v%u, field@%4.4x" %
(self.get_name(), self.reg, self.field))
def emulate(self, emulator):
raise ValueError('emulate not supported')
can_use_new_encoding = 0
cant_use_new_encoding = 0
class Opcode6E_72(Opcode):
ops = {
0x6e: 'invoke-virtual',
0x6f: 'invoke-super',
0x70: 'invoke-direct',
0x71: 'invoke-static',
0x72: 'invoke-interface',
}
num_code_units = 3
max_regs = 5
extra_data = 'c'
format = '35c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
arg_count = inst[0] >> 12
self.method_idx = inst[1]
self.regs = list()
regs = inst[2] | ((inst[0] << 8) & 0xf0000)
for i in range(arg_count):
self.regs.append(regs & 0xf)
regs >>= 4
def dump(self, f=sys.stdout):
f.write("%s {" % (self.get_name()))
first = True
for reg in self.regs:
if not first:
f.write(', ')
f.write("v%u" % (reg))
first = False
f.write("} method@%4.4x" % (self.method_idx))
def new_encoding(self, f=sys.stdout):
if (self.regs_are_sequential() and
(len(self.regs) == 0 or self.regs[0] <= UINT4_MAX) and
len(self.regs) <= UINT4_MAX):
global can_use_new_encoding
can_use_new_encoding += 1
name = self.get_name()
f.write('"%s" can be encoded as "%s/min-range" ' % (name, name))
f.write('where the first register is contained in the first ')
f.write('opcode\n')
return 2
global cant_use_new_encoding
cant_use_new_encoding += 1
return 0
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode74_78(Opcode):
ops = {
0x74: 'invoke-virtual/range',
0x75: 'invoke-super/range',
0x76: 'invoke-direct/range',
0x77: 'invoke-static/range',
0x78: 'invoke-interface/range',
}
num_code_units = 3
max_regs = 'r'
extra_data = 'c'
format = '3rc'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
arg_count = inst.get_AA()
self.method_idx = inst[1]
first_reg = inst[2]
self.regs = list()
for i in range(arg_count):
self.regs.append(first_reg + i)
def dump(self, f=sys.stdout):
f.write("%s {" % (self.get_name()))
first = True
for reg in self.regs:
if not first:
f.write(', ')
f.write("v%u" % (reg))
first = False
f.write("} method@%4.4x" % (self.method_idx))
def new_encoding(self, f=sys.stdout):
if (self.regs_are_sequential() and
(len(self.regs) == 0 or self.regs[0] <= UINT4_MAX) and
len(self.regs) <= UINT4_MAX):
name = self.get_name()
f.write('"%s" can be encoded as a "%s/min-range" ' % (name, name))
f.write('where the first register is contained in the first ')
f.write('opcode\n')
return 2
return 0
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode7B_8F(Opcode):
ops = {
0x7b: 'neg-int',
0x7c: 'not-int',
0x7d: 'neg-long',
0x7e: 'not-long',
0x7f: 'neg-float',
0x80: 'neg-double',
0x81: 'int-to-long',
0x82: 'int-to-float',
0x83: 'int-to-double',
0x84: 'long-to-int',
0x85: 'long-to-float',
0x86: 'long-to-double',
0x87: 'float-to-int',
0x88: 'float-to-long',
0x89: 'float-to-double',
0x8a: 'double-to-int',
0x8b: 'double-to-long',
0x8c: 'double-to-float',
0x8d: 'int-to-byte',
0x8e: 'int-to-char',
0x8f: 'int-to-short',
}
num_code_units = 1
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode90_AF(Opcode):
ops = {
0x90: 'add-int',
0x91: 'sub-int',
0x92: 'mul-int',
0x93: 'div-int',
0x94: 'rem-int',
0x95: 'and-int',
0x96: 'or-int',
0x97: 'xor-int',
0x98: 'shl-int',
0x99: 'shr-int',
0x9a: 'ushr-int',
0x9b: 'add-long',
0x9c: 'sub-long',
0x9d: 'mul-long',
0x9e: 'div-long',
0x9f: 'rem-long',
0xa0: 'and-long',
0xa1: 'or-long',
0xa2: 'xor-long',
0xa3: 'shl-long',
0xa4: 'shr-long',
0xa5: 'ushr-long',
0xa6: 'add-float',
0xa7: 'sub-float',
0xa8: 'mul-float',
0xa9: 'div-float',
0xaa: 'rem-float',
0xab: 'add-double',
0xac: 'sub-double',
0xad: 'mul-double',
0xae: 'div-double',
0xaf: 'rem-double',
}
num_code_units = 2
max_regs = 3
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_AA())
self.regs.append(inst.get_uint8_lo(1))
self.regs.append(inst.get_uint8_hi(1))
def dump(self, f=sys.stdout):
f.write("%s v%u, v%u, v%u" %
(self.get_name(), self.regs[0], self.regs[1], self.regs[2]))
def opIsCommutative(self):
'''Return True if the operation is commutative'''
op = self.get_op()
return (op == 0x90 or # add-int
op == 0x92 or # mul-int
op == 0x95 or # and-int
op == 0x96 or # or-int
op == 0x97 or # xor-int
op == 0x9b or # add-long
op == 0x9d or # mul-long
op == 0xa0 or # and-long
op == 0xa1 or # or-long
op == 0xa2 or # xor-long
op == 0xa6 or # add-float
op == 0xa8 or # mul-float
op == 0xab or # add-double
op == 0xad) # mul-double
def check_encoding(self, f=sys.stdout):
vAA = self.regs[0]
vBB = self.regs[1]
vCC = self.regs[2]
if vAA == vBB and vAA <= UINT4_MAX and vCC <= UINT4_MAX:
name = self.get_name()
f.write('warning: "%s" can be encoded more efficiently ' % (name))
f.write('as "%s/2addr v%u, v%u"\n' % (name, vAA, vCC))
return 2
if (vAA == vCC and vAA <= UINT4_MAX and vBB <= UINT4_MAX and
self.opIsCommutative()):
name = self.get_name()
f.write('warning: "%s" is commutative and can be ' % (name))
f.write('encoded more efficiently as "%s/2addr v%u, v%u"\n' %
(name, vAA, vBB))
return 2
return 0 # Return zero to indicate we can't save any bytes
def emulate(self, emulator):
raise ValueError('emulate not supported')
class OpcodeB0_CF(Opcode):
ops = {
0xb0: 'add-int/2addr',
0xb1: 'sub-int/2addr',
0xb2: 'mul-int/2addr',
0xb3: 'div-int/2addr',
0xb4: 'rem-int/2addr',
0xb5: 'and-int/2addr',
0xb6: 'or-int/2addr',
0xb7: 'xor-int/2addr',
0xb8: 'shl-int/2addr',
0xb9: 'shr-int/2addr',
0xba: 'ushr-int/2addr',
0xbb: 'add-long/2addr',
0xbc: 'sub-long/2addr',
0xbd: 'mul-long/2addr',
0xbe: 'div-long/2addr',
0xbf: 'rem-long/2addr',
0xc0: 'and-long/2addr',
0xc1: 'or-long/2addr',
0xc2: 'xor-long/2addr',
0xc3: 'shl-long/2addr',
0xc4: 'shr-long/2addr',
0xc5: 'ushr-long/2addr',
0xc6: 'add-float/2addr',
0xc7: 'sub-float/2addr',
0xc8: 'mul-float/2addr',
0xc9: 'div-float/2addr',
0xca: 'rem-float/2addr',
0xcb: 'add-double/2addr',
0xcc: 'sub-double/2addr',
0xcd: 'mul-double/2addr',
0xce: 'div-double/2addr',
0xcf: 'rem-double/2addr ',
}
num_code_units = 1
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class OpcodeD0_D7(Opcode):
ops = {
0xd0: 'add-int/lit16',
0xd1: 'rsub-int/lit16',
0xd2: 'mul-int/lit16',
0xd3: 'div-int/lit16',
0xd4: 'rem-int/lit16',
0xd5: 'and-int/lit16',
0xd6: 'or-int/lit16',
0xd7: 'xor-int/lit16',
}
num_code_units = 2
max_regs = 2
extra_data = 's'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
self.imm = sign_extending(inst[1], 16)
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u, #int %i // #%#x' % (self.get_name(),
self.regs[0], self.regs[1], self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class OpcodeD8_E2(Opcode):
ops = {
0xd8: 'add-int/lit8',
0xd9: 'rsub-int/lit8',
0xda: 'mul-int/lit8',
0xdb: 'div-int/lit8',
0xdc: 'rem-int/lit8',
0xdd: 'and-int/lit8',
0xde: 'or-int/lit8',
0xdf: 'xor-int/lit8',
0xe0: 'shl-int/lit8',
0xe1: 'shr-int/lit8',
0xe2: 'ushr-int/lit8',
}
num_code_units = 2
max_regs = 2
extra_data = 'b'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_AA())
self.regs.append(inst.get_uint8_lo(1))
self.imm = sign_extending(inst.get_uint8_hi(1), 8)
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u, #int %i // #%#x' % (self.get_name(),
self.regs[0], self.regs[1], self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class OpcodeFA(Opcode):
ops = {0xfa: 'invoke-polymorphic'}
num_code_units = 4
max_regs = 5
extra_data = 'cc'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
raise ValueError('debug this when we find one of these')
arg_count = inst[0] >> 12
self.method_ref_idx = inst[1]
self.method_hdl_ref = inst[2]
self.regs = list()
regs = inst[3] | ((inst[0] << 8) & 0xf0000)
self.proto = inst[4]
for i in range(arg_count):
self.regs.append(regs & 0xf)
regs >>= 4
def dump(self, f=sys.stdout):
f.write("%s {" % (self.get_name()))
first = True
for reg in self.regs:
if not first:
f.write(', ')
f.write("v%u" % (reg))
first = False
f.write("} type@%4.4x" % (self.type))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class CodeUnits(Opcode):
def __init__(self, code_units):
self.code_units = code_units
self.idx = 0
def index_is_valid(self):
return self.idx < len(self.code_units)
def get_index(self):
return self.idx
def peek_code_unit(self, idx):
return self.code_units[idx]
def get_int(self):
return sign_extending(self.get_uint(), 32)
def get_uint(self):
return self.get_code_unit() | (self.get_code_unit() << 16)
def get_code_unit(self):
idx = self.idx
self.idx += 1
return self.code_units[idx]
def swap16(u):
return ((u >> 8) & 0x00ff) | ((u << 8) & 0xff00)
class DexInstruction(object):
opcode_defs = list()
@classmethod
def initialize(cls):
opcode_classes = [
Opcode00,
Opcode01,
Opcode02,
Opcode03,
Opcode04,
Opcode05,
Opcode06,
Opcode07,
Opcode08,
Opcode09,
Opcode0A_0D,
Opcode0E,
Opcode0F,
Opcode10,
Opcode11,
Opcode12,
Opcode13,
Opcode14,
Opcode15,
Opcode16,
Opcode17,
Opcode18,
Opcode19,
Opcode1A,
Opcode1B,
Opcode1C,
Opcode1D,
Opcode1E,
Opcode1F,
Opcode20,
Opcode21,
Opcode22,
Opcode23,
Opcode24,
Opcode25,
Opcode26,
Opcode27,
Opcode28,
Opcode29,
Opcode2A,
Opcode2B,
Opcode2C,
Opcode2D_31,
Opcode32_37,
Opcode38_3D,
Opcode44_51,
Opcode52_5f,
Opcode60_6d,
Opcode6E_72,
Opcode74_78,
Opcode7B_8F,
Opcode90_AF,
OpcodeB0_CF,
OpcodeD0_D7,
OpcodeD8_E2,
OpcodeFA,
]
for i in range(256):
cls.opcode_defs.append(None)
for opcode_class in opcode_classes:
for op in opcode_class.ops:
if cls.opcode_defs[op] is None:
cls.opcode_defs[op] = opcode_class
else:
raise ValueError("registering the same opcode twice: "
"%#2.2x in %s" % (op, str(opcode_class)))
def dump(self, f=sys.stdout, suffix='\n'):
f.write('%4.4x:' % (self.code_unit_idx))
for code_unit in self.code_units:
f.write(' %4.4x' % (swap16(code_unit)))
num_code_units = len(self.code_units)
if num_code_units < 5:
pad = 5 - num_code_units
for i in range(pad):
f.write(' ')
f.write(' ')
self.instruction.dump(f=f)
if suffix:
f.write(suffix)
def __init__(self):
self.code_unit_idx = -1
self.code_units = None
def check_encoding(self, f=sys.stdout):
bytes_saved = self.instruction.check_encoding(f)
if bytes_saved:
self.dump(f)
return bytes_saved
def new_encoding(self, f=sys.stdout):
bytes_saved = self.instruction.new_encoding(f)
if bytes_saved:
self.dump(f)
return bytes_saved
def get_code_unit_index(self):
return self.code_unit_idx
def decode(self, code_units):
self.code_unit_idx = code_units.get_index()
self.code_units = list()
self.code_units.append(code_units.get_code_unit())
op = self.get_op()
opcode_class = self.opcode_defs[op]
if opcode_class is None:
raise ValueError("unsupported opcode %#4.4x" % (swap16(self[0])))
for i in range(1, opcode_class.num_code_units):
self.code_units.append(code_units.get_code_unit())
self.instruction = opcode_class(self, code_units)
def get_name(self):
return self.instruction.get_name()
def get_num_code_units(self):
return self.instruction.get_num_code_units()
def get_op(self):
'''Return the 1 byte op field that tells us what instruction this is'''
return self.code_units[0] & 0xff
def get_A(self):
'''Get the 4 bit value of A'''
return (self.code_units[0] >> 8) & 0xf
def get_B(self):
'''Get the 4 bit value of B'''
return (self.code_units[0] >> 12) & 0xf
def get_AA(self):
'''Get the 8 bit value of AA from the byte next to the Op'''
return self.get_uint8_hi(0)
def get_signed_AA(self):
return sign_extending(self.get_AA(), 8)
def get_uint8_lo(self, idx):
return self.code_units[idx] & 0xff
def get_sint8_lo(self, idx):
return sign_extending(self.get_uint8_lo(), 8)
def get_uint8_hi(self, idx):
return (self.code_units[idx] >> 8) & 0xff
def get_sint8_hi(self, idx):
return sign_extending(self.get_uint8_hi(), 8)
def get_uint16(self, idx):
return self.code_units[idx]
def get_sint16(self, idx):
return sign_extending(self.get_uint16(), 16)
def get_uint32(self, idx):
return self.code_units[idx + 1] << 16 | self.code_units[idx]
def get_sint32(self, idx):
return sign_extending(self.get_uint32(idx), 32)
def get_uint64(self, idx):
return (self.code_units[idx + 3] << 48 |
self.code_units[idx + 2] << 32 |
self.code_units[idx + 1] << 16 |
self.code_units[idx])
def get_sint64(self, idx):
return sign_extending(self.get_uint64(idx), 64)
def __len__(self):
'''Overload the length operator to give out the number of code units'''
return len(self.code_units)
def __getitem__(self, key):
'''Overload the [] operator to give out code units'''
return self.code_units[key]
def emulate(self, emulator):
self.instruction.emulate(emulator)
DexInstruction.initialize()
def get_percentage(part, total):
return (float(part) / float(total)) * 100.0
def print_code_stats(size, total_size, file_size):
code_savings = get_percentage(size, total_size)
file_savings = get_percentage(size, file_size)
print('error: %u of %u code bytes (%u file bytes) ' % (size, total_size,
file_size), end='')
print('could be saved by encoding opcodes more efficiently ', end='')
print('(%2.2f%% code savings, %2.2f%% file savings).\n' % (code_savings,
file_savings))
def print_debug_stats(size, file_size):
file_savings = get_percentage(size, file_size)
print('error: %u debug info bytes of %u file ' % (size, file_size), end='')
print('bytes could be saved by encoding debug info more ', end='')
print('efficiently (%2.2f%% file savings).\n' % (file_savings))
def print_encoding_stats(size, total_size, file_size):
code_savings = get_percentage(size, total_size)
file_savings = get_percentage(size, file_size)
print('%u of %u code bytes could be saved ' % (size, total_size), end='')
print('could be saved by encoding opcodes more efficiently ', end='')
print('(%2.2f%% code savings, %2.2f%% file savings).\n' % (code_savings,
file_savings))
class DexEmulator(object):
def __init__(self):
self.registers = dict()
self.pc = 0
def read_register(self, reg):
if reg in self.registers:
return self.registers[reg]
raise ValueError("reading register with no value")
def write_register(self, reg, value):
self.registers[reg] = value
def emulate(self, uint16_array):
pass
def main():
usage = 'Usage: dex.py [options] [dex file(s)]'
parser = optparse.OptionParser(
usage=usage,
description='A script that parses DEX files.')
parser.add_option('-v', '--verbose',
action='store_true',
dest='verbose',
help='display verbose debug info',
default=False)
parser.add_option('-C', '--color',
action='store_true',
dest='color',
help='Enable colorized output',
default=False)
parser.add_option('-a', '--all',
action='store_true',
dest='dump_all',
help='Dump all DEX sections.',
default=False)
parser.add_option('-H', '--header',
action='store_true',
dest='dump_header',
help='Dump the DEX file header.',
default=False)
parser.add_option('--map-list',
action='store_true',
dest='dump_map_list',
help='Dump the DEX map list info.',
default=False)
parser.add_option('-s', '--strings',
action='store_true',
dest='dump_strings',
help='Dump the DEX strings.',
default=False)
parser.add_option('-t', '--types',
action='store_true',
dest='dump_types',
help='Dump the DEX types.',
default=False)
parser.add_option('-p', '--protos',
action='store_true',
dest='dump_protos',
help='Dump the DEX protos.',
default=False)
parser.add_option('-f', '--fields',
action='store_true',
dest='dump_fields',
help='Dump the DEX fields.',
default=False)
parser.add_option('-m', '--methods',
action='store_true',
dest='dump_methods',
help='Dump the DEX methods.',
default=False)
parser.add_option('--method-handles',
action='store_true',
dest='dump_method_handles',
help='Dump the DEX method handles.',
default=False)
parser.add_option('--classes',
action='store_true',
dest='dump_classes',
help='Dump the DEX classes.',
default=False)
parser.add_option('--class',
dest='class_filter',
help='Find a class by name. ' +
'Accepts `Lpath/to/Class;` or `path.to.Class`',
default=None)
parser.add_option('--method',
dest='method_filter',
help='Find a method by name. Must be used with --class',
default=None)
parser.add_option('--call-sites',
action='store_true',
dest='dump_call_sites',
help='Dump the DEX call sites.',
default=False)
parser.add_option('--code',
action='store_true',
dest='dump_code',
help='Dump the DEX code in all class methods.',
default=False)
parser.add_option('--code-items',
action='store_true',
dest='dump_code_items',
help='Dump the DEX code items.',
default=False)
parser.add_option('--code-duplication',
action='store_true',
dest='code_duplication',
help=('Dump any methods in the DEX file that have the '
'same instructions.'),
default=False)
parser.add_option('--debug',
action='store_true',
dest='debug',
help='Dump the DEX debug info.',
default=False)
parser.add_option('-d', '--disassemble',
action='store_true',
dest='dump_disassembly',
help='Dump the DEX code items instructions.',
default=False)
parser.add_option('--stats',
action='store_true',
dest='dump_stats',
help='Dump the DEX opcode statistics.',
default=False)
parser.add_option('--check-encoding',
action='store_true',
dest='check_encoding',
help='Verify opcodes are efficiently encoded.',
default=False)
parser.add_option('--new-encoding',
action='store_true',
dest='new_encoding',
help='Report byte savings from potential new encodings.',
default=False)
parser.add_option('--proguard',
dest='proguard',
help='Specify a progard file to use for demangling.',
default=None)
(options, files) = parser.parse_args()
total_code_bytes_inefficiently_encoded = 0
total_debug_info_bytes_inefficiently_encoded = 0
total_new_code_bytes_inefficiently_encoded = 0
total_opcode_byte_size = 0
total_file_size = 0
op_name_to_size = {}
string_counts = {}
i = 0
if len(files) == 0:
print('No input files. {}'.format(usage))
return
for (i, path) in enumerate(files):
if os.path.splitext(path)[1] == '.apk':
print('error: dex.py operates on dex files, please unpack your apk')
return
print('Dex file: %s' % (path))
file_size = os.path.getsize(path)
total_file_size += file_size
dex = File(path, options.proguard)
if options.class_filter:
dex_class = dex.find_class(options.class_filter)
if dex_class:
if options.method_filter is None:
dex_class.dump()
for method in dex_class.get_methods():
method_name = method.get_name()
if options.method_filter:
if options.method_filter != method_name:
continue
method.dump()
else:
print('error: class definition not found for "%s"' % (
options.class_filter))
if options.dump_header or options.dump_all:
dex.dump_header(options)
print('')
if options.dump_map_list or options.dump_all:
dex.dump_map_list(options)
if options.dump_strings or options.dump_all:
dex.dump_string_ids(options)
if options.dump_types or options.dump_all:
dex.dump_type_ids(options)
if options.dump_protos or options.dump_all:
dex.dump_proto_ids(options)
if options.dump_fields or options.dump_all:
dex.dump_field_ids(options)
if options.dump_methods or options.dump_all:
dex.dump_method_ids(options)
if options.dump_classes or options.dump_all:
dex.dump_class_defs(options)
if options.dump_call_sites or options.dump_all:
dex.dump_call_site_ids(options)
if options.dump_method_handles or options.dump_all:
dex.dump_method_handle_items(options)
if options.dump_code or options.debug or options.dump_all:
dex.dump_code(options)
if options.dump_code_items:
dex.dump_code_items(options)
if (options.dump_disassembly or options.dump_stats or
options.check_encoding or options.new_encoding):
if options.dump_stats:
for string_item in dex.get_strings():
if string_item.data not in string_counts:
string_counts[string_item.data] = 0
string_counts[string_item.data] += 1
code_bytes_inefficiently_encoded = 0
debug_info_bytes_inefficiently_encoded = 0
new_code_bytes_inefficiently_encoded = 0
file_opcodes_byte_size = 0
classes = dex.get_classes()
used_code_item_indexes = list()
for cls in classes:
methods = cls.get_methods()
for method in methods:
if options.dump_disassembly or options.debug:
method.dump(
f=sys.stdout, dump_code=options.dump_disassembly,
dump_debug_info=options.debug)
opcodes_bytes_size = method.get_code_byte_size()
file_opcodes_byte_size += opcodes_bytes_size
total_opcode_byte_size += opcodes_bytes_size
if (options.dump_stats or options.check_encoding or
options.new_encoding):
for dex_inst in method.get_instructions():
if options.dump_stats:
op_name = dex_inst.get_name()
size = dex_inst.get_num_code_units() * 2
if op_name not in op_name_to_size:
op_name_to_size[op_name] = 0
op_name_to_size[op_name] += size
if options.check_encoding:
code_bytes_inefficiently_encoded += (
dex_inst.check_encoding())
if options.new_encoding:
new_code_bytes_inefficiently_encoded += (
dex_inst.new_encoding())
if options.check_encoding:
code_item_idx = method.get_code_item_index()
if code_item_idx >= 0:
used_code_item_indexes.append(code_item_idx)
debug_info = method.get_debug_info()
if debug_info:
debug_info_bytes_inefficiently_encoded += (
method.check_debug_info_encoding())
if options.check_encoding:
efficiently_encoded = True
if code_bytes_inefficiently_encoded > 0:
efficiently_encoded = False
total_code_bytes_inefficiently_encoded += (
code_bytes_inefficiently_encoded)
print_code_stats(code_bytes_inefficiently_encoded,
file_opcodes_byte_size, file_size)
if debug_info_bytes_inefficiently_encoded > 0:
efficiently_encoded = False
total_debug_info_bytes_inefficiently_encoded += (
debug_info_bytes_inefficiently_encoded)
print_debug_stats(debug_info_bytes_inefficiently_encoded,
file_size)
# Verify that all code items are used.
used_code_item_indexes.sort()
prev_ci_idx = 0
for ci_idx in used_code_item_indexes:
if ci_idx != prev_ci_idx:
efficiently_encoded = False
for idx in range(prev_ci_idx + 1, ci_idx):
print('code_item[%u] is not used and its '
'code_item can be removed' % (idx))
prev_ci_idx = ci_idx
if efficiently_encoded:
print('file is efficiently encoded.')
if options.new_encoding:
if new_code_bytes_inefficiently_encoded > 0:
total_new_code_bytes_inefficiently_encoded += (
new_code_bytes_inefficiently_encoded)
print_encoding_stats(new_code_bytes_inefficiently_encoded,
file_opcodes_byte_size, file_size)
else:
print('file is efficiently encoded.')
if options.code_duplication:
dex.report_code_duplication()
if options.dump_stats:
duped_strings_byte_size = 0
for s in string_counts:
count = string_counts[s]
if count > 1:
s_len = len(s)
duped_strings_byte_size += (count - 1) * \
s_len + get_uleb128_byte_size(s_len)
if duped_strings_byte_size > 0:
print('%u bytes in duplicated strings across dex files.' % (
duped_strings_byte_size))
print('BYTESIZE %AGE OPCODE')
print('======== ===== =================================')
sorted_x = sorted(op_name_to_size.items(),
key=operator.itemgetter(1))
for (op_name, byte_size) in sorted_x:
percentage = get_percentage(byte_size, total_opcode_byte_size)
print('%-8u %5.2f %s' % (byte_size, percentage, op_name))
print('-------- ----- ---------------------------------')
print('%-8u 100.0' % (total_opcode_byte_size))
if i > 0:
if options.check_encoding:
if total_code_bytes_inefficiently_encoded > 0:
print_code_stats(total_code_bytes_inefficiently_encoded,
total_opcode_byte_size, total_file_size)
if total_debug_info_bytes_inefficiently_encoded > 0:
efficiently_encoded = False
print_debug_stats(total_debug_info_bytes_inefficiently_encoded,
total_file_size)
if options.new_encoding:
invoke_kind_percentage = get_percentage(
can_use_new_encoding,
can_use_new_encoding + cant_use_new_encoding)
print('%u invoke-kind opcodes could use new encoding' % (
can_use_new_encoding), end='')
print('%u could not (%2.2f%%)' % (cant_use_new_encoding,
invoke_kind_percentage))
if total_new_code_bytes_inefficiently_encoded > 0:
print_encoding_stats(
total_new_code_bytes_inefficiently_encoded,
total_opcode_byte_size, total_file_size)
if __name__ == '__main__':
main()
| 33.531599 | 80 | 0.546804 | [
"MIT"
] | gdawg/redex | tools/python/dex.py | 137,949 | Python |
#!/usr/bin/env python
"""Tests for `calvestbr` package."""
import unittest
from calvestbr import calvestbr
class TestCalvestbr(unittest.TestCase):
"""Tests for `calvestbr` package."""
def setUp(self):
"""Set up test fixtures, if any."""
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_000_something(self):
"""Test something."""
| 18.045455 | 46 | 0.632242 | [
"MIT"
] | IsaacHiguchi/calvestbr | tests/test_calvestbr.py | 397 | Python |
# Copyright 2018 Samuel Payne [email protected]
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pandas as pd
import requests
import shutil
import warnings
import cptac
from cptac.file_download import get_box_token
from cptac.exceptions import DatasetAlreadyInstalledWarning, InvalidParameterError, NoInternetError, PdcDownloadError
from .pancanbrca import SOURCES as BRCA_SOURCES
from .pancanccrcc import SOURCES as CCRCC_SOURCES
from .pancancoad import SOURCES as COAD_SOURCES
from .pancangbm import SOURCES as GBM_SOURCES
from .pancanhnscc import SOURCES as HNSCC_SOURCES
from .pancanlscc import SOURCES as LSCC_SOURCES
from .pancanluad import SOURCES as LUAD_SOURCES
from .pancanov import SOURCES as OV_SOURCES
from .pancanucec import SOURCES as UCEC_SOURCES
from .pancanpdac import SOURCES as PDAC_SOURCES
STUDY_IDS_MAP = {
"pdcbrca": {
"acetylome": "PDC000239", # Prospective Breast BI Acetylome
"phosphoproteome": "PDC000121", # Prospective BRCA Phosphoproteome S039-2
"proteome": "PDC000120", # Prospective BRCA Proteome S039-1
},
"pdcccrcc": {
"phosphoproteome": "PDC000128", # CPTAC CCRCC Discovery Study - Phosphoproteme S044-2
"proteome": "PDC000127", # CPTAC CCRCC Discovery Study - Proteome S044-1
},
"pdccoad": {
"phosphoproteome": "PDC000117", # Prospective COAD Phosphoproteome S037-3
"proteome": "PDC000116", # Prospective COAD Proteome S037-2
},
"pdcgbm": {
"acetylome": "PDC000245", # CPTAC GBM Discovery Study - Acetylome
"phosphoproteome": "PDC000205", # CPTAC GBM Discovery Study - Phosphoproteome
"proteome": "PDC000204", # CPTAC GBM Discovery Study - Proteome
},
"pdchnscc": {
"phosphoproteome": "PDC000222", # CPTAC HNSCC Discovery Study - Phosphoproteome
"proteome": "PDC000221", # CPTAC HNSCC Discovery Study - Proteome
},
"pdclscc": {
"acetylome": "PDC000233", # CPTAC LSCC Discovery Study - Acetylome
"phosphoproteome": "PDC000232", # CPTAC LSCC Discovery Study - Phosphoproteome
"proteome": "PDC000234", # CPTAC LSCC Discovery Study - Proteome
"ubiquitylome": "PDC000237", # CPTAC LSCC Discovery Study - Ubiquitylome
},
"pdcluad": {
"acetylome": "PDC000224", # CPTAC LUAD Discovery Study - Acetylome
"phosphoproteome": "PDC000149", # CPTAC LUAD Discovery Study - Phosphoproteome
"proteome": "PDC000153", # CPTAC LUAD Discovery Study - Proteome
},
"pdcov": {
"phosphoproteome": "PDC000119", # Prospective OV Phosphoproteome S038-3
"proteome": "PDC000118", # Prospective OV Proteome S038-2
},
"pdcpdac": {
"proteome": "PDC000270", # CPTAC PDAC Discovery Study - Proteome
"phosphoproteome": "PDC000271", # CPTAC PDAC Discovery Study - Phosphoproteome
},
"pdcucec": {
"acetylome": "PDC000226", # CPTAC UCEC Discovery Study - Acetylome
"phosphoproteome": "PDC000126", # UCEC Discovery - Phosphoproteome S043-2
"proteome": "PDC000125", # UCEC Discovery - Proteome S043-1
},
}
def download(dataset, version="latest", redownload=False):
dataset = dataset.lower()
if dataset.startswith("pdc"):
box_token = get_box_token()
if dataset != 'pdcbrca': # pdcbrca is the only dataset that doesn't need a mapping file for PDC
mapping = cptac.download(dataset, version=version, redownload=redownload, _box_auth=True, _box_token=box_token) # download helper file for mapping aliquots to patient IDs
omics = _pdc_download(dataset, version=version, redownload=redownload)
if omics and mapping:
return True
else:
return False
else: # pdcbrca only needs omics
omics = _pdc_download(dataset, version=version, redownload=redownload)
if omics:
return True
else:
return False
elif dataset.startswith("pancan") or dataset == "all":
box_token = get_box_token()
if dataset == "pancanbrca":
sources = BRCA_SOURCES
elif dataset == "pancanccrcc":
sources = CCRCC_SOURCES
elif dataset == "pancancoad":
sources = COAD_SOURCES
elif dataset == "pancangbm":
sources = GBM_SOURCES
elif dataset == "pancanhnscc":
sources = HNSCC_SOURCES
elif dataset == "pancanlscc":
sources = LSCC_SOURCES
elif dataset == "pancanluad":
sources = LUAD_SOURCES
elif dataset == "pancanov":
sources = OV_SOURCES
elif dataset == "pancanucec":
sources = UCEC_SOURCES
elif dataset == "pancanpdac":
sources = PDAC_SOURCES
elif dataset == "all":
sources = sorted(set(BRCA_SOURCES + CCRCC_SOURCES + COAD_SOURCES + GBM_SOURCES + HNSCC_SOURCES + LSCC_SOURCES + LUAD_SOURCES + OV_SOURCES + UCEC_SOURCES + PDAC_SOURCES))
else:
raise InvalidParameterError(f"{dataset} is not a valid dataset.")
overall_success = True
for source in sources:
if source.startswith("pdc"):
single_success = download(source, version=version, redownload=redownload)
else:
single_success = cptac.download(source, version=version, redownload=redownload, _box_auth=True, _box_token=box_token)
if not single_success:
overall_success = False
return overall_success
else:
return cptac.download(dataset, version=version, redownload=redownload, _box_auth=True)
def download_pdc_id(pdc_id, _download_msg=True):
"""Download a PDC dataset by its PDC study id.
Returns:
pandas.DataFrame: The clinical table for the study id.
pandas.DataFrame: The quantitative table for the study id.
"""
if _download_msg:
clin_msg = f"Downloading clinical table for {pdc_id}..."
print(clin_msg, end="\r")
# Download the clinical table
clin = _download_study_clin(pdc_id).\
set_index("case_submitter_id").\
sort_index()
if _download_msg:
print(" " * len(clin_msg), end="\r")
bio_msg = f"Downloading biospecimenPerStudy table for {pdc_id}..."
print(bio_msg, end="\r")
# The the biospecimenPerStudy table, which has both patient IDs and aliquot IDs
bio = _download_study_biospecimen(pdc_id).\
set_index("aliquot_submitter_id").\
sort_index()
if _download_msg:
print(" " * len(bio_msg), end="\r")
quant_msg = f"Downloading quantitative table for {pdc_id}..."
print(quant_msg, end="\r")
# Get the quantitative data table
quant = _download_study_quant(pdc_id)
if _download_msg:
print(" " * len(quant_msg), end="\r")
format_msg = f"Formatting tables for {pdc_id}..."
print(format_msg, end="\r")
# Join the patient IDs from the biospecimenPerStudy table into the quant table
quant = quant.\
assign(aliquot_submitter_id=quant.iloc[:, 0].str.split(":", n=1, expand=True)[1]).\
drop(columns=quant.columns[0]).\
set_index("aliquot_submitter_id").\
sort_index()
quant = bio.\
join(quant, how="inner").\
reset_index().\
set_index(["case_submitter_id", "aliquot_submitter_id"]).\
sort_index()
# Clear message
if _download_msg:
print(" " * len(format_msg), end="\r")
return clin, quant
def list_pdc_datasets():
for dataset in STUDY_IDS_MAP.keys():
print(f"Pdc{dataset[3:].title()}:")
for data_type in STUDY_IDS_MAP[dataset].keys():
print(f"\t{data_type}: {STUDY_IDS_MAP[dataset][data_type]}")
# Helper functions
def _pdc_download(dataset, version, redownload):
"""Download data for the specified cancer type from the PDC."""
dataset = str.lower(dataset)
if dataset == "pdcall":
overall_result = True
for dataset in STUDY_IDS_MAP.keys():
if not pdc_download(dataset, version, redownload):
overall_result = False
return overall_result
if not dataset.startswith("pdc"):
raise InvalidParameterError(f"pdc_download function can only be used for PDC datasets, which start with the prefix 'pdc'. You tried to download '{dataset}'.")
if dataset not in STUDY_IDS_MAP.keys():
raise InvalidParameterError(f"PDC dataset must be one of the following:\n{list(STUDY_IDS_MAP.keys())}\nYou passed '{dataset}'.")
dataset_ids = STUDY_IDS_MAP[dataset]
# Get the directory to where to store the data, and see if it exists
path_here = os.path.abspath(os.path.dirname(__file__))
cancer_dir = os.path.join(path_here, f"data_{dataset}")
if os.path.isdir(cancer_dir):
index_path = os.path.join(cancer_dir, "index.txt")
# Check that they also have the index
if not os.path.isfile(index_path):
redownload = True
else:
# The PDC doesn't have a versioning scheme for the tables they serve, so originally we just called it version 0.0 but later decided it would be better to call it 1.0. So, check if theirs is called 0.0; if so, replace it with 1.0.
with open(index_path, "r") as index_file:
first_line = index_file.readline()
if first_line.startswith("#0.0"):
redownload=True
if redownload:
shutil.rmtree(cancer_dir)
else:
return True
os.mkdir(cancer_dir)
data_dir = os.path.join(cancer_dir, f"{dataset}_v1.0")
os.mkdir(data_dir)
# We'll combine all the clinical tables in case there are differences
master_clin = pd.DataFrame()
for data_type in dataset_ids.keys():
# Print an update
download_msg = f"Downloading {dataset} {data_type} files..."
print(download_msg, end="\r")
# Get the clinical and quantitative tables for the study ID
clin, quant = download_pdc_id(dataset_ids[data_type], _download_msg=False)
# Print a new update
print(" " * len(download_msg), end="\r")
save_msg = f"Saving {dataset} {data_type} files..."
print(save_msg, end="\r")
# Append the clinical dataframe
master_clin = master_clin.append(clin)
# Save the quantitative table
quant.to_csv(os.path.join(data_dir, f"{data_type}.tsv.gz"), sep="\t")
# Erase update
print(" " * len(save_msg), end="\r")
# Print an update
save_msg = f"Saving {dataset} clinical file..."
print(save_msg, end="\r")
# Drop any duplicated rows in combined clinical table, then save it too
master_clin = master_clin.drop_duplicates(keep="first")
master_clin.to_csv(os.path.join(data_dir, "clinical.tsv.gz"), sep="\t")
# Write a dummy index with just version numbers
index_path = os.path.join(cancer_dir, "index.txt")
with open(index_path, "w") as index_file:
index_file.write("#1.0\n")
# Erase update
print(" " * len(save_msg), end="\r")
return True
def _download_study_clin(pdc_study_id):
"""Download PDC clinical data for a particular study."""
clinical_query = '''
query {
clinicalPerStudy(pdc_study_id: "''' + pdc_study_id + '''", acceptDUA: true) {
age_at_diagnosis, ajcc_clinical_m, ajcc_clinical_n, ajcc_clinical_stage, ajcc_clinical_t, ajcc_pathologic_m,
ajcc_pathologic_n, ajcc_pathologic_stage, ajcc_pathologic_t, ann_arbor_b_symptoms, ann_arbor_clinical_stage,
ann_arbor_extranodal_involvement, ann_arbor_pathologic_stage, best_overall_response, burkitt_lymphoma_clinical_variant,
case_id, case_submitter_id, cause_of_death, circumferential_resection_margin, classification_of_tumor, colon_polyps_history,
days_to_best_overall_response, days_to_birth, days_to_death, days_to_diagnosis, days_to_hiv_diagnosis, days_to_last_follow_up,
days_to_last_known_disease_status, days_to_new_event, days_to_recurrence, demographic_id, demographic_submitter_id,
diagnosis_id, diagnosis_submitter_id, disease_type, ethnicity, figo_stage, gender, hiv_positive, hpv_positive_type, hpv_status,
icd_10_code, iss_stage, last_known_disease_status, laterality, ldh_level_at_diagnosis, ldh_normal_range_upper,
lymphatic_invasion_present, lymph_nodes_positive, method_of_diagnosis, morphology, new_event_anatomic_site, new_event_type,
overall_survival, perineural_invasion_present, primary_diagnosis, primary_site, prior_malignancy, prior_treatment,
progression_free_survival, progression_free_survival_event, progression_or_recurrence, race, residual_disease,
site_of_resection_or_biopsy, status, synchronous_malignancy, tissue_or_organ_of_origin, tumor_cell_content, tumor_grade,
tumor_stage, vascular_invasion_present, vital_status, year_of_birth, year_of_death, year_of_diagnosis
}
}
'''
result_json = _query_pdc(clinical_query)
result_df = pd.\
DataFrame(result_json["data"]["clinicalPerStudy"])
return result_df
def _download_study_biospecimen(pdc_study_id):
"""Download PDC biospecimen data for a particular study."""
biospecimen_query = '''
query {
biospecimenPerStudy(pdc_study_id: "''' + pdc_study_id + '''", acceptDUA: true) {
aliquot_submitter_id
case_submitter_id
}
}
'''
result_json = _query_pdc(biospecimen_query)
result_df = pd.\
DataFrame(result_json["data"]["biospecimenPerStudy"])
return result_df
def _download_study_quant(pdc_study_id):
"""Download PDC quantitative data for a particular study."""
proteome_query = '''
query {
quantDataMatrix(pdc_study_id: "''' + pdc_study_id + '''", data_type: "log2_ratio", acceptDUA: true)
}
'''
result_json = _query_pdc(proteome_query)
result_df = pd.DataFrame(result_json["data"]["quantDataMatrix"])
if result_df.shape[1] != 0:
result_df = result_df.set_index(result_df.columns[0]).transpose()
else:
raise PdcDownloadError(f"quantDataMatrix table returned for PDC study ID {pdc_study_id} was empty.")
return result_df
def _query_pdc(query):
"""Send a GraphQL query to the PDC and return the results."""
url = 'https://pdc.cancer.gov/graphql'
try:
response = requests.post(url, json={'query': query})
response.raise_for_status() # Raises a requests.HTTPError if the response code was unsuccessful
except requests.RequestException: # Parent class for all exceptions in the requests module
raise NoInternetError("Insufficient internet. Check your internet connection.") from None
return response.json()
def _check_ids_match(ids_map):
"""Check that the ids in the download function's STUDY_IDS_MAP match up."""
for cancer in ids_map.values():
for data in cancer.values():
pdc_study_id = data["pdc_study_id"]
study_submitter_id = data["study_submitter_id"]
query = '''
query {
study (pdc_study_id: "''' + pdc_study_id + '''" acceptDUA: true) {
pdc_study_id,
study_submitter_id
}
}
'''
idres = _query_pdc(query)
server_psi = idres["data"]["study"][0]["pdc_study_id"]
server_ssi = idres["data"]["study"][0]["study_submitter_id"]
assert server_psi == pdc_study_id
assert server_ssi == study_submitter_id
print(f"{server_psi} == {pdc_study_id}")
print(f"{server_ssi} == {study_submitter_id}")
print()
| 38.894988 | 241 | 0.666196 | [
"Apache-2.0"
] | PayneLab/cptac | cptac/pancan/file_download.py | 16,297 | Python |
from selenium import webdriver
from fixture.session import SessionHelper
from fixture.group import GroupHelper
from fixture.contact import ContactHelper
class Application:
def __init__(self, browser, base_url):
if browser == "firefox":
self.wd = webdriver.Firefox(capabilities={"marionette": False}, firefox_binary="C:/Program Files/Mozilla Firefox/firefox.exe")
elif browser == "chrome":
self.wd = webdriver.Chrome()
elif browser == "ie":
self.wd = webdriver.Ie()
else:
raise ValueError("Unrecognized browser %s" % browser)
self.session = SessionHelper (self)
self.group = GroupHelper (self)
self.contact = ContactHelper(self)
self.base_url = base_url
def is_valid (self):
try:
self.wd.current_url
return True
except:
return False
def open_home_page(self):
wd = self.wd
wd.get(self.base_url)
def destroy (self):
self.wd.quit() | 30.470588 | 138 | 0.619691 | [
"Apache-2.0"
] | oksanacps/python_for_testing | fixture/application.py | 1,036 | Python |
from django.urls import path
from . import views
app_name = "chat"
urlpatterns = [
path('', views.home, name='home'),
path('post/', views.post, name='post'),
path('messages/', views.messages, name='messages'),
path('upload/', views.upload, name='views.upload'),
]
| 23.5 | 55 | 0.641844 | [
"MIT"
] | MdIbu/ibu_chat | chat/urls.py | 282 | Python |
#!/usr/bin/env python
import os
import logging
import requests
import json
import configparser
import sys
import time
import re
from os.path import dirname
from config import (
instanceA_url, instanceA_key, instanceA_path, instanceA_profile,
instanceA_profile_id, instanceA_profile_filter, instanceA_profile_filter_id,
instanceA_language_id, instanceA_language, instanceA_quality_match,
instanceA_tag_filter_id, instanceA_tag_filter, instanceA_blacklist,
instanceB_url, instanceB_key, instanceB_path, instanceB_profile,
instanceB_profile_id, instanceB_profile_filter, instanceB_profile_filter_id,
instanceB_language_id, instanceB_language, instanceB_quality_match,
instanceB_tag_filter_id, instanceB_tag_filter, instanceB_blacklist,
content_id_key, logger, is_sonarr, is_radarr, is_lidarr,
get_status_path, get_content_path, get_profile_path, get_language_path, get_tag_path, get_content_put_path,
is_in_docker, instance_sync_interval_seconds,
sync_bidirectionally, auto_search, skip_missing, monitor_new_content,
api_version, is_test_run, sync_monitor
)
def get_content_details(content, instance_path, instance_profile_id, instance_url, instance_language_id=None):
"""gets details of a content item"""
global monitor_new_content, auto_search
images = content.get('images')
for image in images:
image['url'] = '{0}{1}'.format(instance_url, image.get('url'))
monitored = content.get('monitored')
if monitor_new_content is not None:
monitored = True if monitor_new_content else False
payload = {
content_id_key: content.get(content_id_key),
'qualityProfileId': int(instance_profile_id or content.get('qualityProfileId')),
'monitored': monitored,
'rootFolderPath': instance_path,
'images': images,
}
add_options = content.get('addOptions', {})
search_missing = True if auto_search else False
if is_sonarr:
payload['title'] = content.get('title')
payload['titleSlug'] = content.get('titleSlug')
payload['seasons'] = content.get('seasons')
payload['year'] = content.get('year')
payload['tvRageId'] = content.get('tvRageId')
payload['seasonFolder'] = content.get('seasonFolder')
payload['languageProfileId'] = instance_language_id if instance_language_id else content.get(
'languageProfileId')
payload['tags'] = content.get('tags')
payload['seriesType'] = content.get('seriesType')
payload['useSceneNumbering'] = content.get('useSceneNumbering')
payload['addOptions'] = {
**add_options,
**{'searchForMissingEpisodes': search_missing}
}
elif is_radarr:
payload['title'] = content.get('title')
payload['year'] = content.get('year')
payload['tmdbId'] = content.get('tmdbId')
payload['titleSlug'] = content.get('titleSlug')
payload['addOptions'] = {
**add_options,
**{'searchForMovie': search_missing}
}
elif is_lidarr:
payload['artistName'] = content.get('artistName')
payload['albumFolder'] = content.get('albumFolder')
payload['metadataProfileId'] = content.get('metadataProfileId')
payload['addOptions'] = {
**add_options,
**{
"monitored": monitored,
"searchForMissingAlbums": search_missing
}
}
logger.debug(payload)
return payload
def get_quality_profiles(instance_session, instance_url, instance_key):
instance_profile_url = get_profile_path(instance_url, instance_key)
profiles_response = instance_session.get(instance_profile_url)
if profiles_response.status_code != 200:
logger.error(f'Could not get profile id from {instance_profile_url}')
exit_system()
instance_profiles = None
try:
instance_profiles = profiles_response.json()
return instance_profiles
except:
logger.error(f'Could not decode profile id from {instance_profile_url}')
exit_system()
def get_profile_from_id(instance_session, instance_url, instance_key, instance_profile, instance_name=''):
instance_profiles = get_quality_profiles(instance_session=instance_session, instance_url=instance_url, instance_key=instance_key)
profile = next((item for item in instance_profiles if item["name"].lower() == instance_profile.lower()), False)
if not profile:
logger.error('Could not find profile_id for instance {} profile {}'.format(instance_name, instance_profile))
exit_system()
instance_profile_id = profile.get('id')
logger.debug(f'found profile_id (instance{instance_name}) "{instance_profile_id}" from profile "{instance_profile}"')
return instance_profile_id
def get_tag_from_id(instance_session, instance_url, instance_key, instance_tag, instance_name=''):
instance_tag_url = get_tag_path(instance_url, instance_key)
tag_response = instance_session.get(instance_tag_url)
if tag_response.status_code != 200:
logger.error(f'Could not get tag id from (instance{instance_name}) {instance_tag_url} - only works on Sonarr')
exit_system()
instance_tags = None
try:
instance_tags = tag_response.json()
except:
logger.error(f'Could not decode tag id from {instance_tag_url}')
exit_system()
tag_ids = []
for item in instance_tags:
for instance_item in instance_tag:
if item.get('label').lower() == instance_item.lower():
tag_ids.append(item)
if not tag_ids:
logger.error(f'Could not find tag_id for instance {instance_name} and tag {instance_tags}')
exit_system()
instance_tag_ids = [tag.get('id') for tag in tag_ids]
logger.debug(f'found id "{instance_tag_ids}" from tag "{instance_tag}" for instance {instance_name}')
if instance_tag_ids is None:
logger.error(f'tag_id is None for instance {instance_name} and tag {instance_tag}')
exit_system()
return instance_tag_ids
def get_language_from_id(instance_session, instance_url, instance_key, instance_language, instance_name=''):
instance_language_url = get_language_path(instance_url, instance_key)
language_response = instance_session.get(instance_language_url)
if language_response.status_code != 200:
logger.error(f'Could not get language id from (instance{instance_name}) {instance_language_url} - only works on sonarr v3')
exit_system()
instance_languages = None
try:
instance_languages = language_response.json()
except:
logger.error(f'Could not decode language id from {instance_language_url}')
exit_system()
instance_languages = instance_languages[0]['languages']
language = next((item for item in instance_languages if item.get('language', {}).get('name').lower() == instance_language.lower()), False)
if not language:
logger.error(f'Could not find language_id for instance {instance_name} and language {instance_language}')
exit_system()
instance_language_id = language.get('language', {}).get('id')
logger.debug(f'found id "{instance_language_id}" from language "{instance_language}" for instance {instance_name}')
if instance_language_id is None:
logger.error(f'language_id is None for instance {instance_name} and language {instance_language}')
exit_system()
return instance_language_id
def sync_servers(instanceA_contents, instanceB_language_id, instanceB_contentIds,
instanceB_path, instanceB_profile_id, instanceA_profile_filter_id,
instanceB_session, instanceB_url, instanceB_key, instanceA_quality_match,
instanceA_tag_filter_id, instanceA_blacklist, instanceB_contents):
global is_radarr, is_sonarr, is_test_run, sync_monitor
search_ids = []
# if given instance A profile id then we want to filter out content without that id
if instanceA_profile_filter_id:
logging.info(f'only filtering content with instanceA_profile_filter_id {instanceA_profile_filter_id}')
# for each content id in instance A, check if it needs to be synced to instance B
for content in instanceA_contents:
content_not_synced = content[content_id_key] not in instanceB_contentIds
# only skip alrerady synced items if we arent syncing monitoring as well
if content_not_synced or sync_monitor:
title = content.get('title') or content.get('artistName')
instance_path = instanceB_path or dirname(content.get('path'))
# if skipping missing files, we want to skip any that don't have files
if is_radarr and skip_missing:
content_has_file = content.get('hasFile')
if not content_has_file:
logging.debug(f'Skipping content {title} - file missing')
continue
# if given this, we want to filter from instance by profile id
if instanceA_profile_filter_id:
quality_profile_id = content.get('qualityProfileId')
if instanceA_profile_filter_id != quality_profile_id:
logging.debug(f'Skipping content {title} - mismatched quality_profile_id {quality_profile_id} with instanceA_profile_filter_id {instanceA_profile_filter_id}')
continue
# if given quality filter we want to filter if quality from instanceA isnt high enough yet
if is_radarr and instanceA_quality_match:
content_quality = content.get('movieFile', {}).get('quality', {}).get('quality', {}).get('name', '')
if content_quality and not re.match(instanceA_quality_match, content_quality):
logging.debug(f'Skipping content {title} - mismatched content_quality {content_quality} with instanceA_quality_match {instanceA_quality_match}')
continue
# if given tag filter then filter by tag - (Sonarr/Radarr v3 only)
if (is_sonarr or is_radarr) and instanceA_tag_filter_id:
content_tag_ids = content.get('tags')
if not (set(content_tag_ids) & set(instanceA_tag_filter_id)):
logging.debug(f'Skipping content {title} - mismatched content_tag_ids {content_tag_ids} with instanceA_tag_filter_id {instanceA_tag_filter_id}')
continue
# if black list given then dont sync matching slugs/ids
if instanceA_blacklist:
title_slug = content.get('titleSlug') or content.get('foreignArtistId')
if title_slug in instanceA_blacklist:
logging.debug(f'Skipping content {title} - blacklist slug: {title_slug}')
continue
content_id = str(content.get('id'))
if content_id in instanceA_blacklist:
logging.debug(f'Skipping content {title} - blacklist ID: {content_id}')
continue
# generate content from instance A to sync into instance B
formatted_content = get_content_details(
content=dict(content),
instance_path=instance_path,
instance_profile_id=instanceB_profile_id,
instance_url=instanceB_url,
instance_language_id=instanceB_language_id,
)
instanceB_content_url = get_content_path(instanceB_url, instanceB_key)
if is_test_run:
logging.info('content title "{0}" synced successfully (test only)'.format(title))
elif content_not_synced:
# sync content if not synced
logging.info(f'syncing content title "{title}"')
sync_response = instanceB_session.post(instanceB_content_url, json=formatted_content)
# check response and save content id for searching later on if success
if sync_response.status_code != 201 and sync_response.status_code != 200:
logger.error(f'server sync error for {title} - response: {sync_response.text}')
else:
try:
search_ids.append(int(sync_response.json()['id']))
except:
logger.error(f'Could not decode sync response from {instanceB_content_url}')
logging.info('content title "{0}" synced successfully'.format(title))
elif sync_monitor:
# else if is already synced and we want to sync monitoring then sync that now
# find matching content from instance B to check monitored status
matching_content_instanceB = list(filter(lambda content_instanceB: content_instanceB['titleSlug'] == content.get('titleSlug'), instanceB_contents))
if(len(matching_content_instanceB) == 1):
matching_content_instanceB = matching_content_instanceB[0]
# if we found a content match from instance B, then check monitored status - if different then sync from A to B
if matching_content_instanceB['monitored'] != content['monitored']:
matching_content_instanceB['monitored'] = content['monitored']
instanceB_content_url = get_content_put_path(instanceB_url, instanceB_key, matching_content_instanceB.get('id'))
sync_response = instanceB_session.put(instanceB_content_url, json=matching_content_instanceB)
# check response and save content id for searching later on if success
if sync_response.status_code != 202:
logger.error(f'server monitoring sync error for {title} - response: {sync_response.text}')
else:
try:
search_ids.append(int(sync_response.json()['id']))
except:
logger.error(f'Could not decode sync response from {instanceB_content_url}')
logging.info('content title "{0}" monitoring synced successfully'.format(title))
logging.info(f'{len(search_ids)} contents synced successfully')
def get_instance_contents(instance_url, instance_key, instance_session, instance_name=''):
instance_contentIds = []
instance_content_url = get_content_path(instance_url, instance_key)
instance_contents = instance_session.get(instance_content_url)
if instance_contents.status_code != 200:
logger.error('instance{} server error - response {}'.format(instance_name, instance_contents.status_code))
exit_system()
else:
try:
instance_contents = instance_contents.json()
except:
logger.error(f'Could not decode contents from {instance_content_url}')
exit_system()
for content_to_sync in instance_contents:
instance_contentIds.append(content_to_sync[content_id_key])
logger.debug('{} contents in instance {}'.format(len(instance_contentIds), instance_name))
return instance_contents, instance_contentIds
def check_status(instance_session, instance_url, instance_key, instance_name=''):
global api_version
instance_status_url = get_status_path(instance_url, instance_key)
error_message = f'Could not connect to instance{instance_name}: {instance_status_url}'
status_response = None
try:
status_response = instance_session.get(instance_status_url)
if status_response.status_code != 200:
logger.error(error_message)
exit_system()
except:
logger.error(error_message)
exit_system()
if status_response is None:
logger.error(error_message)
exit_system()
else:
try:
status_response = status_response.json()
except Exception as error:
if not isinstance(status_response, dict):
logger.error(
f"Could not retrieve status for {instance_status_url}: {status_response} - {error}")
exit_system()
if(status_response.get('error')):
logger.error(f"{instance_status_url} error {status_response.get('error')}")
exit_system()
logger.debug(f"{instance_status_url} version {status_response.get('version')}")
return status_response
def sync_content():
global instanceA_profile_id, instanceA_profile, instanceB_profile_id, instanceB_profile, instanceA_profile_filter, instanceA_profile_filter_id, instanceB_profile_filter, instanceB_profile_filter_id, tested_api_version, instanceA_language_id, instanceA_language, instanceB_language_id, instanceB_language, instanceA_quality_match, instanceB_quality_match, is_sonarr, instanceA_tag_filter_id, instanceA_tag_filter, instanceB_tag_filter_id, instanceB_tag_filter, is_radarr, instanceA_blacklist, instanceB_blacklist
# get sessions
instanceA_session = requests.Session()
instanceA_session.trust_env = False
instanceB_session = requests.Session()
instanceB_session.trust_env = False
# if given a profile instead of a profile id then try to find the profile id
if not instanceA_profile_id and instanceA_profile:
instanceA_profile_id = get_profile_from_id(instanceA_session, instanceA_url, instanceA_key, instanceA_profile, 'A')
if not instanceB_profile_id and instanceB_profile:
instanceB_profile_id = get_profile_from_id(instanceB_session, instanceB_url, instanceB_key, instanceB_profile, 'B')
logger.debug({
'instanceA_profile_id': instanceA_profile_id,
'instanceA_profile': instanceA_profile,
'instanceB_profile_id': instanceB_profile_id,
'instanceB_profile': instanceB_profile,
})
# do the same for profile id filters if they exist
if not instanceA_profile_filter_id and instanceA_profile_filter:
instanceA_profile_filter_id = get_profile_from_id(instanceA_session, instanceA_url, instanceA_key, instanceA_profile_filter, 'A')
if not instanceB_profile_filter_id and instanceB_profile_filter:
instanceB_profile_filter_id = get_profile_from_id(instanceB_session, instanceB_url, instanceB_key, instanceB_profile_filter, 'B')
logger.debug({
'instanceAprofile_filter_id': instanceA_profile_filter_id,
'instanceAprofile_filter': instanceA_profile_filter,
'instanceBprofile_filter_id': instanceB_profile_filter_id,
'instanceBprofile_filter': instanceB_profile_filter,
})
# do the same for tag id filters if they exist - (only Sonarr)
if is_sonarr or is_radarr:
if not instanceA_tag_filter_id and instanceA_tag_filter:
instanceA_tag_filter_id = get_tag_from_id(instanceA_session, instanceA_url, instanceA_key, instanceA_tag_filter, 'A')
if not instanceB_tag_filter_id and instanceB_tag_filter:
instanceB_tag_filter_id = get_tag_from_id(instanceB_session, instanceB_url, instanceB_key, instanceA_tag_filter, 'B')
logger.debug({
'instanceA_tag_filter': instanceA_tag_filter,
'instanceA_profile_filter': instanceA_profile_filter,
'instanceB_tag_filter_id': instanceB_tag_filter_id,
'instanceB_tag_filter': instanceB_tag_filter,
})
# if given language instead of language id then try to find the lanaguage id - (only Sonarr v3)
if is_sonarr:
if not instanceA_language_id and instanceA_language:
instanceA_language_id = get_language_from_id(
instance_session=instanceA_session,
instance_url=instanceA_url,
instance_key=instanceA_key,
instance_language=instanceA_language,
instance_name='A'
)
if not instanceB_language_id and instanceB_language:
instanceB_language_id = get_language_from_id(
instance_session=instanceB_session,
instance_url=instanceB_url,
instance_key=instanceB_key,
instance_language=instanceB_language,
instance_name='B'
)
logger.debug({
'instanceA_language_id': instanceA_language_id,
'instanceA_language': instanceA_language,
'instanceB_language_id': instanceB_language_id,
'instanceB_language': instanceB_language,
'is_sonarr': is_sonarr,
'api_version': api_version,
})
# get contents to compare
instanceA_contents, instanceA_contentIds = get_instance_contents(instanceA_url, instanceA_key, instanceA_session, instance_name='A')
instanceB_contents, instanceB_contentIds = get_instance_contents(instanceB_url, instanceB_key, instanceB_session, instance_name='B')
logger.info('syncing content from instance A to instance B')
sync_servers(
instanceA_contents=instanceA_contents,
instanceB_contents=instanceB_contents,
instanceB_contentIds=instanceB_contentIds,
instanceB_language_id=instanceB_language_id,
instanceB_path=instanceB_path,
instanceB_profile_id=instanceB_profile_id,
instanceB_session=instanceB_session,
instanceB_url=instanceB_url,
instanceA_profile_filter_id=instanceA_profile_filter_id,
instanceB_key=instanceB_key,
instanceA_quality_match=instanceA_quality_match,
instanceA_tag_filter_id=instanceA_tag_filter_id,
instanceA_blacklist=instanceA_blacklist
)
# if given bidirectional flag then sync from instance B to instance A
if sync_bidirectionally:
logger.info('syncing content from instance B to instance A')
sync_servers(
instanceA_contents=instanceB_contents,
instanceB_contents=instanceA_contents,
instanceB_contentIds=instanceA_contentIds,
instanceB_language_id=instanceA_language_id,
instanceB_path=instanceA_path,
instanceB_profile_id=instanceA_profile_id,
instanceB_session=instanceA_session,
instanceB_url=instanceA_url,
instanceA_profile_filter_id=instanceB_profile_filter_id,
instanceB_key=instanceA_key,
instanceA_quality_match=instanceB_quality_match,
instanceA_tag_filter_id=instanceB_tag_filter_id,
instanceA_blacklist=instanceB_blacklist
)
########################################################################################################################
def exit_system():
"""we dont want to exit if in docker"""
if is_in_docker:
raise Exception
else:
sys.exit(0)
if is_in_docker:
logger.info('syncing every {} seconds'.format(instance_sync_interval_seconds))
sync_content()
if is_in_docker:
while True:
try:
time.sleep(instance_sync_interval_seconds)
sync_content()
except Exception as inst:
d = inst
| 46.085657 | 515 | 0.684504 | [
"MIT"
] | markschrik/syncarr | index.py | 23,135 | Python |
import logging
import os
import queue
import requests
import time
from threading import Thread
cri_sock = os.getenv("KIP_CRI_SOCK", "unix:///var/run/containerd/containerd.sock")
cri_client = os.getenv("KIP_CRI_CLI", False)
gateway_host = os.getenv("KIP_GATEWAY_HOST", "http://localhost:8888")
num_pullers = int(os.getenv("KIP_NUM_PULLERS", "2"))
num_retries = int(os.getenv("KIP_NUM_RETRIES", "3"))
interval = int(os.getenv("KIP_INTERVAL", "300"))
log_level = os.getenv("KIP_LOG_LEVEL", "INFO")
POLICY_IF_NOT_PRESENT = "IfNotPresent"
POLICY_ALYWAYS = "Always"
policies = (POLICY_IF_NOT_PRESENT, POLICY_ALYWAYS)
policy = os.getenv("KIP_PULL_POLICY", POLICY_IF_NOT_PRESENT)
if cri_client or cri_client in ('Yes', 'yes', 'True', 'true'):
from docker.errors import NotFound
from cri_api.channel import Channel
from cri_api.images import Images
from cri_api.exceptions import ImageServiceException as APIError
class DockerMocker:
def __init__(self, cli):
self.cli=cli
def get(self, img_name):
ret=self.cli.get_image(img_name)
if ret is None:
raise NotFound
else:
return ret
def pull(self, img_name):
try:
self.cli.pull_image(img_name)
except APIError as err:
if "failed to resolve image" in str(err):
raise NotFound(err)
else:
raise APIError(err)
class CriClient:
def __init__(self, cri_sock):
self.channel=Channel(cri_sock)
self.cli=Images(self.channel)
self.images=DockerMocker(self.cli)
docker_client = CriClient(cri_sock)
else:
from docker.client import DockerClient
from docker.errors import APIError
from docker.errors import NotFound
docker_client = DockerClient.from_env()
logging.basicConfig(format='[%(levelname)1.1s %(asctime)s %(name)s.%(threadName)s] %(message)s')
def get_kernelspecs():
"""Fetches the set of kernelspecs from the gateway, returning a dict of configured kernel specs"""
end_point = '{}/api/kernelspecs'.format(gateway_host)
logger.info("Fetching kernelspecs from '{}' ...".format(end_point))
resp = requests.get(end_point)
if not resp.ok:
raise requests.exceptions.HTTPError('Gateway server response: {}'.format(resp.status_code))
return resp.json()
def fetch_image_names():
"""Fetches the image names by hitting the /api/kernelspecs endpoint of the Gateway.
For process-proxy kernelspecs, the image names are contained in the config stanza - which
resides in the process-proxy stanza located in the metadata.
"""
kspecs = None
try:
kspecs_response = get_kernelspecs()
kspecs = kspecs_response.get('kernelspecs')
except Exception as ex:
logger.error("Got exception attempting to retrieve kernelspecs - retrying. Exception was: {}".format(ex))
finally:
if kspecs is None:
return False
# Locate the configured images within the kernelspecs and add to set for duplicate management
images = set()
for key in kspecs.keys():
metadata = kspecs.get(key).get('spec').get('metadata')
if metadata is not None:
process_proxy = metadata.get('process_proxy')
if process_proxy is not None:
config = process_proxy.get('config')
if config is not None:
image_name = config.get('image_name')
if image_name is not None:
images.add(image_name)
executor_image_name = config.get('executor_image_name')
if executor_image_name is not None:
images.add(executor_image_name)
# Add the image names to the name queue
for image_name in images:
name_queue.put_nowait(image_name)
return True
def pull_image(image_name):
"""Pulls the image.
If the policy is `IfNotPresent` the set of pulled image names is
checked and, if present, the method returns. Otherwise, the pull attempt is made
and the set of pulled images is updated, when successful.
Since NotFound exceptions are tolerated, we trap for only that exception and let
the caller handle others.
"""
if policy == POLICY_IF_NOT_PRESENT:
if image_name in pulled_images:
# Image has been pulled, but make sure it still exists. If it doesn't exist
# let this drop through to actual pull
logger.info("Image '{}' already pulled and policy is '{}'. Checking existence.".
format(image_name, policy))
try:
t1 = time.time()
docker_client.images.get(image_name)
t2 = time.time()
logger.debug("Checked existence of image '{}' in {:.3f} secs.".format(image_name, t2 - t1))
return
except NotFound:
pulled_images.remove(image_name)
logger.warning("Previously pulled image '{}' was not found - attempting pull...".format(image_name))
logger.debug("Pulling image '{}'...".format(image_name))
try:
t1 = time.time()
docker_client.images.pull(image_name)
t2 = time.time()
pulled_images.add(image_name)
logger.info("Pulled image '{}' in {:.3f} secs.".format(image_name, t2 - t1))
except NotFound:
logger.warning("Image '{}' was not found!".format(image_name))
def puller():
"""Thread-based puller.
Gets image name from the queue and attempts to pull the image. Any issues, except
for NotFound, are retried up to num_retries times. Once the image has been pulled, it's not found or the
retries have been exceeded, the queue task is marked as done.
"""
while True:
image_name = name_queue.get()
if image_name is None:
break
i = 0
while i < num_retries:
try:
pull_image(image_name)
break
except APIError as ex:
i += 1
if i < num_retries:
logger.warning("Attempt {} to pull image '{}' encountered exception - retrying. Exception was: {}".
format(i, image_name, ex))
else:
logger.error("Attempt {} to pull image '{}' failed with exception: {}".
format(i, image_name, ex))
name_queue.task_done()
if __name__ == "__main__":
logger = logging.getLogger('kernel_image_puller')
logger.setLevel(log_level)
# Determine pull policy.
pulled_images = set()
if policy not in policies:
logger.warning("Invalid pull policy detected in KIP_PULL_POLICY: '{}'. Using policy '{}'.".
format(policy, POLICY_IF_NOT_PRESENT))
policy = POLICY_IF_NOT_PRESENT
logger.info("Starting Kernel Image Puller with the following parameters:")
logger.info("KIP_GATEWAY_HOST: {}".format(gateway_host))
logger.info("KIP_CRI_CLI: {}".format(cri_client))
logger.info("KIP_CRI_SOCK: {}".format(cri_sock))
logger.info("KIP_INTERVAL: {} secs".format(interval))
logger.info("KIP_NUM_PULLERS: {}".format(num_pullers))
logger.info("KIP_NUM_RETRIES: {}".format(num_retries))
logger.info("KIP_PULL_POLICY: {}".format(policy))
logger.info("KIP_LOG_LEVEL: {}\n".format(log_level))
# Create an empty queue and start the puller threads. The number of puller threads is configurable.
name_queue = queue.Queue()
threads = []
for i in range(num_pullers):
t = Thread(target=puller, name="t{}".format(i + 1))
t.start()
threads.append(t)
# Fetch the image names, then wait for name queue to drain. Once drained, or if there were issues
# fetching the image names, wait the interval number of seconds and perform the operation again.
wait_interval = 5 # Start with 5 seconds to ensure EG service gets started...
time.sleep(wait_interval)
while True:
fetched = fetch_image_names()
if fetched:
wait_interval = interval # Once we have fetched kernelspecs, update wait_interval
name_queue.join()
logger.info("Images pulled. Sleeping {} seconds...\n".format(wait_interval))
else:
logger.info("Sleeping {} seconds to fetch image names...\n".format(wait_interval))
time.sleep(wait_interval)
| 37.057018 | 120 | 0.640549 | [
"BSD-3-Clause"
] | dummys/kernel-image-puller | kernel_image_puller.py | 8,449 | Python |
import socket
import timeit
import numpy as np
from PIL import Image
from datetime import datetime
import os
import sys
from collections import OrderedDict
sys.path.append('./')
# PyTorch includes
import torch
from torch.autograd import Variable
from torchvision import transforms
import cv2
# Custom includes
from networks import deeplab_xception_transfer, graph
from dataloaders import custom_transforms as tr
#
import argparse
import torch.nn.functional as F
label_colours = [(0,0,0)
, (128,0,0), (255,0,0), (0,85,0), (170,0,51), (255,85,0), (0,0,85), (0,119,221), (85,85,0), (0,85,85), (85,51,0), (52,86,128), (0,128,0)
, (0,0,255), (51,170,221), (0,255,255), (85,255,170), (170,255,85), (255,255,0), (255,170,0)]
def flip(x, dim):
indices = [slice(None)] * x.dim()
indices[dim] = torch.arange(x.size(dim) - 1, -1, -1,
dtype=torch.long, device=x.device)
return x[tuple(indices)]
def flip_cihp(tail_list):
'''
:param tail_list: tail_list size is 1 x n_class x h x w
:return:
'''
# tail_list = tail_list[0]
tail_list_rev = [None] * 20
for xx in range(14):
tail_list_rev[xx] = tail_list[xx].unsqueeze(0)
tail_list_rev[14] = tail_list[15].unsqueeze(0)
tail_list_rev[15] = tail_list[14].unsqueeze(0)
tail_list_rev[16] = tail_list[17].unsqueeze(0)
tail_list_rev[17] = tail_list[16].unsqueeze(0)
tail_list_rev[18] = tail_list[19].unsqueeze(0)
tail_list_rev[19] = tail_list[18].unsqueeze(0)
return torch.cat(tail_list_rev,dim=0)
def decode_labels(mask, num_images=1, num_classes=20):
"""Decode batch of segmentation masks.
Args:
mask: result of inference after taking argmax.
num_images: number of images to decode from the batch.
num_classes: number of classes to predict (including background).
Returns:
A batch with num_images RGB images of the same size as the input.
"""
n, h, w = mask.shape
assert (n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (
n, num_images)
outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8)
for i in range(num_images):
img = Image.new('RGB', (len(mask[i, 0]), len(mask[i])))
pixels = img.load()
for j_, j in enumerate(mask[i, :, :]):
for k_, k in enumerate(j):
if k < num_classes:
pixels[k_, j_] = label_colours[k]
outputs[i] = np.array(img)
return outputs
def read_img(img_path):
_img = Image.open(img_path).convert('RGB') # return is RGB pic
return _img
def img_transform(img, transform=None):
sample = {'image': img, 'label': 0}
sample = transform(sample)
return sample
def get_img_paths(imgs_dir):
img_paths = []
for dirpath, dirnames, filenames in os.walk(imgs_dir):
for filename in [f for f in filenames if f.endswith('.png') or f.endswith('.PNG') or f.endswith('.jpg') or f.endswith('.JPG') or f.endswith('.jpeg') or f.endswith('.JPEG')]:
img_paths.append(os.path.join(dirpath,filename))
img_paths.sort()
return img_paths
def inference(net, img_path='', output_path='./', output_name='f', use_gpu=True):
'''
:param net:
:param img_path:
:param output_path:
:return:
'''
# adj
adj2_ = torch.from_numpy(graph.cihp2pascal_nlp_adj).float()
adj2_test = adj2_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 20).cuda().transpose(2, 3)
adj1_ = Variable(torch.from_numpy(graph.preprocess_adj(graph.pascal_graph)).float())
adj3_test = adj1_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 7).cuda()
cihp_adj = graph.preprocess_adj(graph.cihp_graph)
adj3_ = Variable(torch.from_numpy(cihp_adj).float())
adj1_test = adj3_.unsqueeze(0).unsqueeze(0).expand(1, 1, 20, 20).cuda()
# multi-scale
scale_list = [1, 0.5, 0.75, 1.25, 1.5, 1.75]
img = read_img(img_path)
testloader_list = []
testloader_flip_list = []
for pv in scale_list:
composed_transforms_ts = transforms.Compose([
tr.Scale_only_img(pv),
tr.Normalize_xception_tf_only_img(),
tr.ToTensor_only_img()])
composed_transforms_ts_flip = transforms.Compose([
tr.Scale_only_img(pv),
tr.HorizontalFlip_only_img(),
tr.Normalize_xception_tf_only_img(),
tr.ToTensor_only_img()])
testloader_list.append(img_transform(img, composed_transforms_ts))
# print(img_transform(img, composed_transforms_ts))
testloader_flip_list.append(img_transform(img, composed_transforms_ts_flip))
# print(testloader_list)
start_time = timeit.default_timer()
# One testing epoch
net.eval()
# 1 0.5 0.75 1.25 1.5 1.75 ; flip:
for iii, sample_batched in enumerate(zip(testloader_list, testloader_flip_list)):
inputs, labels = sample_batched[0]['image'], sample_batched[0]['label']
inputs_f, _ = sample_batched[1]['image'], sample_batched[1]['label']
inputs = inputs.unsqueeze(0)
inputs_f = inputs_f.unsqueeze(0)
inputs = torch.cat((inputs, inputs_f), dim=0)
if iii == 0:
_, _, h, w = inputs.size()
# assert inputs.size() == inputs_f.size()
# Forward pass of the mini-batch
inputs = Variable(inputs, requires_grad=False)
with torch.no_grad():
if use_gpu >= 0:
inputs = inputs.cuda()
# outputs = net.forward(inputs)
outputs = net.forward(inputs, adj1_test.cuda(), adj3_test.cuda(), adj2_test.cuda())
outputs = (outputs[0] + flip(flip_cihp(outputs[1]), dim=-1)) / 2
outputs = outputs.unsqueeze(0)
if iii > 0:
outputs = F.upsample(outputs, size=(h, w), mode='bilinear', align_corners=True)
outputs_final = outputs_final + outputs
else:
outputs_final = outputs.clone()
################ plot pic
predictions = torch.max(outputs_final, 1)[1]
results = predictions.cpu().numpy()
vis_res = decode_labels(results)
parsing_im = Image.fromarray(vis_res[0])
parsing_im.save(output_path+'/{}.png'.format(output_name))
#we don't need the gray image
#cv2.imwrite(output_path+'/{}_gray.png'.format(output_name), results[0, :, :])
end_time = timeit.default_timer()
print('time used for the multi-scale image inference' + ' is :' + str(end_time - start_time))
if __name__ == '__main__':
'''argparse begin'''
parser = argparse.ArgumentParser()
# parser.add_argument('--loadmodel',default=None,type=str)
parser.add_argument('--loadmodel', default='', type=str)
parser.add_argument('--imgs_dir', default='', type=str)
parser.add_argument('--output_dir', default='', type=str)
parser.add_argument('--use_gpu', default=1, type=int)
opts = parser.parse_args()
net = deeplab_xception_transfer.deeplab_xception_transfer_projection_savemem(n_classes=20,
hidden_layers=128,
source_classes=7, )
if not opts.loadmodel == '':
x = torch.load(opts.loadmodel)
net.load_source_model(x)
print('load model:', opts.loadmodel)
else:
print('no model load !!!!!!!!')
raise RuntimeError('No model!!!!')
if opts.use_gpu >0 :
net.cuda()
use_gpu = True
else:
use_gpu = False
raise RuntimeError('must use the gpu!!!!')
img_paths = get_img_paths(opts.imgs_dir)
for idx, path in enumerate(img_paths):
filename = os.path.splitext(os.path.basename(path))[0]
output_name = filename +"_seg"
inference(net=net, img_path=path, output_path=opts.output_dir , output_name=output_name, use_gpu=use_gpu)
| 36.417431 | 181 | 0.623504 | [
"MIT"
] | ericwang0701/Graphonomy | exp/inference/inference_dir.py | 7,939 | Python |
#
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from abc import ABC, abstractmethod
from typing import Any, Iterable, List, Mapping, MutableMapping, Optional, Tuple
import pendulum
import requests
from airbyte_cdk import AirbyteLogger
from airbyte_cdk.models import SyncMode
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.streams import Stream
from airbyte_cdk.sources.streams.http import HttpStream
from airbyte_cdk.sources.streams.http.auth import TokenAuthenticator
from pendulum import DateTime, Period
from slack_sdk import WebClient
class SlackStream(HttpStream, ABC):
url_base = "https://slack.com/api/"
primary_key = "id"
page_size = 100
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
# Slack uses a cursor-based pagination strategy.
# Extract the cursor from the response if it exists and return it in a format that can be used to update request parameters
json_response = response.json()
next_cursor = json_response.get("response_metadata", {}).get("next_cursor")
if next_cursor:
return {"cursor": next_cursor}
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> MutableMapping[str, Any]:
params = {"limit": self.page_size}
if next_page_token:
params.update(**next_page_token)
return params
def parse_response(
self,
response: requests.Response,
stream_state: Mapping[str, Any] = None,
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> Iterable[MutableMapping]:
json_response = response.json()
yield from json_response.get(self.data_field, [])
def backoff_time(self, response: requests.Response) -> Optional[float]:
# This method is called if we run into the rate limit. Slack puts the retry time in the `Retry-After` response header so we
# we return that value. If the response is anything other than a 429 (e.g: 5XX) fall back on default retry behavior.
# https://api.slack.com/docs/rate-limits#web
if response.status_code == 429:
return int(response.headers.get("Retry-After", 0))
@property
@abstractmethod
def data_field(self) -> str:
"""The name of the field in the response which contains the data"""
class Channels(SlackStream):
data_field = "channels"
def path(self, **kwargs) -> str:
return "conversations.list"
def request_params(self, **kwargs) -> MutableMapping[str, Any]:
params = super().request_params(**kwargs)
params["types"] = "public_channel"
return params
class ChannelMembers(SlackStream):
data_field = "members"
def path(self, **kwargs) -> str:
return "conversations.members"
def request_params(self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, **kwargs) -> MutableMapping[str, Any]:
params = super().request_params(stream_state=stream_state, stream_slice=stream_slice, **kwargs)
params["channel"] = stream_slice["channel_id"]
return params
def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping]:
for member_id in super().parse_response(response, **kwargs):
# Slack just returns raw IDs as a string, so we want to put them in a "join table" format
yield {"member_id": member_id, "channel_id": stream_slice["channel_id"]}
def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, any]]]:
channels_stream = Channels(authenticator=self.authenticator)
for channel_record in channels_stream.read_records(sync_mode=SyncMode.full_refresh):
yield {"channel_id": channel_record["id"]}
class Users(SlackStream):
data_field = "members"
def path(self, **kwargs) -> str:
return "users.list"
# Incremental Streams
def chunk_date_range(start_date: DateTime, interval=pendulum.duration(days=1)) -> Iterable[Period]:
"""
Yields a list of the beginning and ending timestamps of each day between the start date and now.
The return value is a pendulum.period
"""
now = pendulum.now()
# Each stream_slice contains the beginning and ending timestamp for a 24 hour period
while start_date <= now:
end_date = start_date + interval
yield pendulum.period(start_date, end_date)
start_date = end_date
class IncrementalMessageStream(SlackStream, ABC):
data_field = "messages"
cursor_field = "float_ts"
primary_key = ["channel_id", "ts"]
def __init__(self, default_start_date: DateTime, **kwargs):
self._start_ts = default_start_date.timestamp()
super().__init__(**kwargs)
def request_params(self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, **kwargs) -> MutableMapping[str, Any]:
params = super().request_params(stream_state=stream_state, stream_slice=stream_slice, **kwargs)
params.update(**stream_slice)
return params
def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping]:
for record in super().parse_response(response, **kwargs):
record[self.primary_key[0]] = stream_slice.get("channel", "")
record[self.cursor_field] = float(record[self.primary_key[1]])
yield record
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:
current_stream_state = current_stream_state or {}
current_stream_state[self.cursor_field] = max(
latest_record[self.cursor_field], current_stream_state.get(self.cursor_field, self._start_ts)
)
return current_stream_state
class ChannelMessages(IncrementalMessageStream):
def path(self, **kwargs) -> str:
return "conversations.history"
def stream_slices(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Optional[Mapping[str, any]]]:
stream_state = stream_state or {}
start_date = pendulum.from_timestamp(stream_state.get(self.cursor_field, self._start_ts))
for period in chunk_date_range(start_date):
yield {"oldest": period.start.timestamp(), "latest": period.end.timestamp()}
def read_records(self, stream_slice: Optional[Mapping[str, Any]] = None, **kwargs) -> Iterable[Mapping[str, Any]]:
# Channel is provided when reading threads
if "channel" in stream_slice:
yield from super().read_records(stream_slice=stream_slice, **kwargs)
else:
# if channel is not provided, then get channels and read accordingly
channels = Channels(authenticator=self.authenticator)
for channel_record in channels.read_records(sync_mode=SyncMode.full_refresh):
stream_slice["channel"] = channel_record["id"]
yield from super().read_records(stream_slice=stream_slice, **kwargs)
class Threads(IncrementalMessageStream):
def __init__(self, lookback_window: Mapping[str, int], **kwargs):
self.messages_lookback_window = lookback_window
super().__init__(**kwargs)
def path(self, **kwargs) -> str:
return "conversations.replies"
def stream_slices(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Optional[Mapping[str, any]]]:
"""
The logic for incrementally syncing threads is not very obvious, so buckle up.
To get all messages in a thread, one must specify the channel and timestamp of the parent (first) message of that thread, basically its ID.
One complication is that threads can be updated at any time in the future. Therefore, if we wanted to comprehensively sync data i.e: get every
single response in a thread, we'd have to read every message in the slack instance every time we ran a sync, because otherwise there is no
way to guarantee that a thread deep in the past didn't receive a new message.
A pragmatic workaround is to say we want threads to be at least N days fresh i.e: look back N days into the past, get every message since,
and read all of the thread responses. This is essentially the approach we're taking here via slicing: create slices from N days into the
past and read all messages in threads since then. We could optionally filter out records we have already read, but that's omitted to keep
the logic simple to reason about.
Good luck.
"""
stream_state = stream_state or {}
channels_stream = Channels(authenticator=self.authenticator)
if self.cursor_field in stream_state:
# Since new messages can be posted to threads continuously after the parent message has been posted, we get messages from the latest date
# found in the state minus 7 days to pick up any new messages in threads.
# If there is state always use lookback
messages_start_date = pendulum.from_timestamp(stream_state[self.cursor_field]) - self.messages_lookback_window
else:
# If there is no state i.e: this is the first sync then there is no use for lookback, just get messages from the default start date
messages_start_date = pendulum.from_timestamp(self._start_ts)
messages_stream = ChannelMessages(authenticator=self.authenticator, default_start_date=messages_start_date)
for message_chunk in messages_stream.stream_slices(stream_state={self.cursor_field: messages_start_date.timestamp()}):
self.logger.info(f"Syncing replies {message_chunk}")
for channel in channels_stream.read_records(sync_mode=SyncMode.full_refresh):
message_chunk["channel"] = channel["id"]
for message in messages_stream.read_records(sync_mode=SyncMode.full_refresh, stream_slice=message_chunk):
yield {"channel": channel["id"], self.cursor_field: message[self.primary_key]}
class JoinChannelsStream(HttpStream):
"""
This class is a special stream which joins channels because the Slack API only returns messages from channels this bot is in.
Its responses should only be logged for debugging reasons, not read as records.
"""
url_base = "https://slack.com/api/"
http_method = "POST"
primary_key = "id"
def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping]:
return [{"message": f"Successfully joined channel: {stream_slice['channel_name']}"}]
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
return None # No pagination
def path(self, **kwargs) -> str:
return "conversations.join"
def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, any]]]:
channels_stream = Channels(authenticator=self.authenticator)
for channel in channels_stream.read_records(sync_mode=SyncMode.full_refresh):
yield {"channel": channel["id"], "channel_name": channel["name"]}
def request_body_json(self, stream_slice: Mapping = None, **kwargs) -> Optional[Mapping]:
return {"channel": stream_slice["channel"]}
class SourceSlack(AbstractSource):
def check_connection(self, logger: AirbyteLogger, config: Mapping[str, Any]) -> Tuple[bool, Optional[Any]]:
slack_client = WebClient(token=config["api_token"])
users = slack_client.users_list(limit=1).get("members", [])
if len(users) > 0:
return True, None
else:
return False, "There are no users in the given Slack instance"
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
authenticator = TokenAuthenticator(config["api_token"])
default_start_date = pendulum.parse(config["start_date"])
threads_lookback_window = pendulum.Duration(days=config["lookback_window"])
streams = [
Channels(authenticator=authenticator),
ChannelMembers(authenticator=authenticator),
ChannelMessages(authenticator=authenticator, default_start_date=default_start_date),
Threads(authenticator=authenticator, default_start_date=default_start_date, lookback_window=threads_lookback_window),
Users(authenticator=authenticator),
]
# To sync data from channels, the bot backed by this token needs to join all those channels. This operation is idempotent.
if config["join_channels"]:
logger = AirbyteLogger()
logger.info("joining Slack channels")
join_channels_stream = JoinChannelsStream(authenticator=authenticator)
for stream_slice in join_channels_stream.stream_slices():
for message in join_channels_stream.read_records(sync_mode=SyncMode.full_refresh, stream_slice=stream_slice):
logger.info(message["message"])
return streams
| 47.822742 | 150 | 0.699979 | [
"MIT"
] | AetherUnbound/airbyte | airbyte-integrations/connectors/source-slack/source_slack/source.py | 14,299 | Python |
class Solution:
def isAlienSorted(self, words: List[str], order: str) -> bool:
if len(words) <= 1:
return True
self.dic = {}
for i, char in enumerate(order):
self.dic[char] = i
for i in range(1, len(words)):
if self.cmp(words[i], words[i-1]) == -1:
return False
return True
def cmp(self, word1, word2):
for i in range(min(len(word1), len(word2))):
if self.dic[word1[i]] > self.dic[word2[i]]:
return 1
if self.dic[word1[i]] < self.dic[word2[i]]:
return -1
if len(word1) > len(word2):
return 1
if len(word1) < len(word2):
return -1
return 0
| 31.5 | 66 | 0.478836 | [
"MIT"
] | QinganZhao/LXXtCode | LeetCode/953. Verifying an Alien Dictionary.py | 756 | Python |
# -*- coding: utf-8 -*-
import argparse
import importlib
import json
import logging
import os
import re
import sys
from io import StringIO
import boto3
import tabulate
import yaml
from dask.distributed import Client
from dask_kubernetes import KubeCluster
from kubernetes.client import Configuration
from kubernetes.client.api import core_v1_api
from kubernetes.config import load_kube_config
RUN_TEMPLATE = """
/bin/bash <<'EOF'
{}
EOF
"""
CONFIG_TEMPLATE = """
cat > config.json << JSON
{}
JSON
"""
WORKER_COMM = '/usr/bin/prepare.sh dask-worker --no-dashboard --memory-limit 0 --death-timeout 0'
def _import_function(config):
function = config['function']
function = function.split('.')
function_name = function[-1]
package = '.'.join(function[:-1])
module = importlib.import_module(package)
return getattr(module, function_name)
def _get_extra_setup(setup_dict):
extra_packages = []
script = setup_dict.get('script')
if script:
extra_packages.append('exec {}'.format(script))
apt_packages = setup_dict.get('apt_packages')
if apt_packages:
extra_packages.append('apt get install {}'.format(' '.join(apt_packages)))
pip_packages = setup_dict.get('pip_packages')
if pip_packages:
extra_packages.append('pip install {}'.format(' '.join(pip_packages)))
git_repository = setup_dict.get('git_repository')
if git_repository:
url = git_repository.get('url')
reference = git_repository.get('reference', 'master')
install = git_repository.get('install')
git_clone = 'git clone {} repo && cd repo'.format(url)
git_checkout = 'git checkout {}'.format(reference)
extra_packages.append('\n '.join([git_clone, git_checkout, install]))
if len(extra_packages) > 1:
return '\n '.join(extra_packages)
return extra_packages[0]
def _generate_cluster_spec(config, kubernetes=False):
extra_setup = ''
dask_cluster = config['dask_cluster']
metadata = {}
worker_config = dask_cluster.get('worker_config')
if worker_config.get('setup'):
extra_setup = _get_extra_setup(worker_config['setup'])
if kubernetes:
name = worker_config.get('image', 'daskdev/dask:latest')
name = '{}-'.format(re.sub(r'[\W_]', '-', name))
metadata['generateName'] = name
config_command = CONFIG_TEMPLATE.format(json.dumps(config))
run_command = 'python -u -m btb_benchmark.kubernetes config.json'
extra_setup = '\n'.join([extra_setup, config_command, run_command])
else:
run_command = WORKER_COMM
extra_setup = '\n'.join([extra_setup, run_command])
run_commands = RUN_TEMPLATE.format(extra_setup)
spec = {
'metadata': metadata,
'spec': {
'restartPolicy': 'Never',
'containers': [{
'args': ['-c', run_commands],
'command': ['tini', '-g', '--', '/bin/sh'],
'image': worker_config.get('image', 'daskdev/dask:latest'),
'name': 'dask-worker',
'resources': worker_config.get('resources', {})
}]
}
}
return spec
def _df_to_csv_str(df):
with StringIO() as sio:
df.to_csv(sio)
return sio.getvalue()
def _upload_to_s3(bucket, path, results, aws_key=None, aws_secret=None):
client = boto3.client('s3', aws_access_key_id=aws_key, aws_secret_access_key=aws_secret)
client.put_object(Bucket=bucket, Key=path, Body=_df_to_csv_str(results))
def run_dask_function(config):
"""Start a Dask Cluster using dask-kubernetes and run a function.
Talks to kubernetes to create `n` amount of new `pods` with a dask worker inside of each
forming a `dask` cluster. Then, a function specified from `config` is being imported and
run with the given arguments. The tasks created by this `function` are being run on the
`dask` cluster for distributed computation.
The config dict must contain the following sections:
* run
* dask_cluster
* output
Args:
config (dict):
Config dictionary.
"""
output_conf = config.get('output')
if output_conf:
path = output_conf.get('path')
if not path:
raise ValueError('An output path must be provided when providing `output`.')
cluster_spec = _generate_cluster_spec(config, kubernetes=False)
cluster = KubeCluster.from_dict(cluster_spec)
workers = config['dask_cluster'].get('workers')
if not workers:
cluster.adapt()
elif isinstance(workers, int):
cluster.scale(workers)
else:
cluster.adapt(**workers)
client = Client(cluster)
client.get_versions(check=True)
try:
run = _import_function(config['run'])
kwargs = config['run']['args']
results = run(**kwargs)
finally:
client.close()
cluster.close()
if output_conf:
bucket = output_conf.get('bucket')
try:
if bucket:
aws_key = output_conf.get('key')
aws_secret = output_conf.get('secret_key')
_upload_to_s3(bucket, path, results, aws_key, aws_secret)
else:
os.makedirs(os.path.dirname(path), exist_ok=True)
results.to_csv(path)
except Exception:
print('Error storing results. Falling back to console dump.')
print(_df_to_csv_str(results))
else:
return results
def run_on_kubernetes(config, namespace='default'):
"""Run dask function inside a pod using the given config.
Create a pod, using the local kubernetes configuration that starts a Dask Cluster
using dask-kubernetes and runs a function specified within the `config` dictionary.
Args:
config (dict):
Config dictionary.
namespace (str):
Kubernetes namespace were the pod will be created.
"""
# read local config
load_kube_config()
c = Configuration()
Configuration.set_default(c)
# create client and create pod on default namespace
core_v1 = core_v1_api.CoreV1Api()
spec = _generate_cluster_spec(config, kubernetes=True)
core_v1.create_namespaced_pod(body=spec, namespace=namespace)
print('Pod created.')
def _get_parser():
parser = argparse.ArgumentParser(description='Run on Kubernetes Command Line Interface')
parser.add_argument('config', help='Path to the JSON config file.')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='Be verbose. Use -vv for increased verbosity.')
parser.add_argument('--create-pod', action='store_true',
help='Create a master pod and run the given `config` from there.')
parser.add_argument('-n', '--namespace', default='default',
help='Namespace were the pod will be created.')
return parser
def main():
# Parse args
parser = _get_parser()
if len(sys.argv) < 2:
parser.print_help()
sys.exit(0)
args = parser.parse_args()
# Logger setup
log_level = (3 - args.verbose) * 10
fmt = '%(asctime)s - %(process)d - %(levelname)s - %(name)s - %(module)s - %(message)s'
logging.basicConfig(level=log_level, format=fmt)
with open(args.config) as config_file:
if args.config.endswith('yaml') or args.config.endswith('yml'):
config = yaml.safe_load(config_file)
else:
config = json.load(config_file)
if args.create_pod:
run_on_kubernetes(config, args.namespace)
else:
results = run_dask_function(config)
if results is not None:
print(tabulate.tabulate(
results,
tablefmt='github',
headers=results.columns
))
if __name__ == '__main__':
main()
| 29.314815 | 97 | 0.637271 | [
"MIT"
] | HDI-Project/BTB | benchmark/btb_benchmark/kubernetes.py | 7,915 | Python |
"""
Django settings for webapp2 project.
Generated by 'django-admin startproject' using Django 4.0.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-jtp=j6oy)@&t#9l$zv#1iavkq#l-#9f$*z97d@623=nzeo@pgm'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'webapp2.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'webapp2.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = 'static/'
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 25.983871 | 91 | 0.701117 | [
"MIT"
] | ndavilo/webapp2 | webapp2/settings.py | 3,222 | Python |
#!/usr/bin/env python3
# Copyright (c) 2019-2020 The Crown Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run fuzz test targets.
"""
from concurrent.futures import ThreadPoolExecutor, as_completed
import argparse
import configparser
import logging
import os
import subprocess
import sys
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='''Run the fuzz targets with all inputs from the seed_dir once.''',
)
parser.add_argument(
"-l",
"--loglevel",
dest="loglevel",
default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console.",
)
parser.add_argument(
'--valgrind',
action='store_true',
help='If true, run fuzzing binaries under the valgrind memory error detector',
)
parser.add_argument(
'-x',
'--exclude',
help="A comma-separated list of targets to exclude",
)
parser.add_argument(
'--par',
'-j',
type=int,
default=4,
help='How many targets to merge or execute in parallel.',
)
parser.add_argument(
'seed_dir',
help='The seed corpus to run on (must contain subfolders for each fuzz target).',
)
parser.add_argument(
'target',
nargs='*',
help='The target(s) to run. Default is to run all targets.',
)
parser.add_argument(
'--m_dir',
help='Merge inputs from this directory into the seed_dir. Needs /target subdirectory.',
)
parser.add_argument(
'-g',
'--generate',
action='store_true',
help='Create new corpus seeds (or extend the existing ones) by running'
' the given targets for a finite number of times. Outputs them to'
' the passed seed_dir.'
)
args = parser.parse_args()
# Set up logging
logging.basicConfig(
format='%(message)s',
level=int(args.loglevel) if args.loglevel.isdigit() else args.loglevel.upper(),
)
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile, encoding="utf8"))
if not config["components"].getboolean("ENABLE_FUZZ"):
logging.error("Must have fuzz targets built")
sys.exit(1)
# Build list of tests
test_list_all = parse_test_list(makefile=os.path.join(config["environment"]["SRCDIR"], 'src', 'Makefile.test.include'))
if not test_list_all:
logging.error("No fuzz targets found")
sys.exit(1)
logging.debug("{} fuzz target(s) found: {}".format(len(test_list_all), " ".join(sorted(test_list_all))))
args.target = args.target or test_list_all # By default run all
test_list_error = list(set(args.target).difference(set(test_list_all)))
if test_list_error:
logging.error("Unknown fuzz targets selected: {}".format(test_list_error))
test_list_selection = list(set(test_list_all).intersection(set(args.target)))
if not test_list_selection:
logging.error("No fuzz targets selected")
if args.exclude:
for excluded_target in args.exclude.split(","):
if excluded_target not in test_list_selection:
logging.error("Target \"{}\" not found in current target list.".format(excluded_target))
continue
test_list_selection.remove(excluded_target)
test_list_selection.sort()
logging.info("{} of {} detected fuzz target(s) selected: {}".format(len(test_list_selection), len(test_list_all), " ".join(test_list_selection)))
if not args.generate:
test_list_seedless = []
for t in test_list_selection:
corpus_path = os.path.join(args.seed_dir, t)
if not os.path.exists(corpus_path) or len(os.listdir(corpus_path)) == 0:
test_list_seedless.append(t)
test_list_seedless.sort()
if test_list_seedless:
logging.info(
"Fuzzing harnesses lacking a seed corpus: {}".format(
" ".join(test_list_seedless)
)
)
logging.info("Please consider adding a fuzz seed corpus at https://github.com/crown-core/qa-assets")
try:
help_output = subprocess.run(
args=[
os.path.join(config["environment"]["BUILDDIR"], 'src', 'test', 'fuzz', test_list_selection[0]),
'-help=1',
],
timeout=20,
check=True,
stderr=subprocess.PIPE,
universal_newlines=True,
).stderr
if "libFuzzer" not in help_output:
logging.error("Must be built with libFuzzer")
sys.exit(1)
except subprocess.TimeoutExpired:
logging.error("subprocess timed out: Currently only libFuzzer is supported")
sys.exit(1)
with ThreadPoolExecutor(max_workers=args.par) as fuzz_pool:
if args.generate:
return generate_corpus_seeds(
fuzz_pool=fuzz_pool,
build_dir=config["environment"]["BUILDDIR"],
seed_dir=args.seed_dir,
targets=test_list_selection,
)
if args.m_dir:
merge_inputs(
fuzz_pool=fuzz_pool,
corpus=args.seed_dir,
test_list=test_list_selection,
build_dir=config["environment"]["BUILDDIR"],
merge_dir=args.m_dir,
)
return
run_once(
fuzz_pool=fuzz_pool,
corpus=args.seed_dir,
test_list=test_list_selection,
build_dir=config["environment"]["BUILDDIR"],
use_valgrind=args.valgrind,
)
def generate_corpus_seeds(*, fuzz_pool, build_dir, seed_dir, targets):
"""Generates new corpus seeds.
Run {targets} without input, and outputs the generated corpus seeds to
{seed_dir}.
"""
logging.info("Generating corpus seeds to {}".format(seed_dir))
def job(command):
logging.debug("Running '{}'\n".format(" ".join(command)))
logging.debug("Command '{}' output:\n'{}'\n".format(
' '.join(command),
subprocess.run(command, check=True, stderr=subprocess.PIPE,
universal_newlines=True).stderr
))
futures = []
for target in targets:
target_seed_dir = os.path.join(seed_dir, target)
os.makedirs(target_seed_dir, exist_ok=True)
command = [
os.path.join(build_dir, "src", "test", "fuzz", target),
"-runs=100000",
target_seed_dir,
]
futures.append(fuzz_pool.submit(job, command))
for future in as_completed(futures):
future.result()
def merge_inputs(*, fuzz_pool, corpus, test_list, build_dir, merge_dir):
logging.info("Merge the inputs in the passed dir into the seed_dir. Passed dir {}".format(merge_dir))
jobs = []
for t in test_list:
args = [
os.path.join(build_dir, 'src', 'test', 'fuzz', t),
'-merge=1',
'-use_value_profile=1', # Also done by oss-fuzz https://github.com/google/oss-fuzz/issues/1406#issuecomment-387790487
os.path.join(corpus, t),
os.path.join(merge_dir, t),
]
os.makedirs(os.path.join(corpus, t), exist_ok=True)
os.makedirs(os.path.join(merge_dir, t), exist_ok=True)
def job(t, args):
output = 'Run {} with args {}\n'.format(t, " ".join(args))
output += subprocess.run(args, check=True, stderr=subprocess.PIPE, universal_newlines=True).stderr
logging.debug(output)
jobs.append(fuzz_pool.submit(job, t, args))
for future in as_completed(jobs):
future.result()
def run_once(*, fuzz_pool, corpus, test_list, build_dir, use_valgrind):
jobs = []
for t in test_list:
corpus_path = os.path.join(corpus, t)
os.makedirs(corpus_path, exist_ok=True)
args = [
os.path.join(build_dir, 'src', 'test', 'fuzz', t),
'-runs=1',
corpus_path,
]
if use_valgrind:
args = ['valgrind', '--quiet', '--error-exitcode=1'] + args
def job(t, args):
output = 'Run {} with args {}'.format(t, args)
result = subprocess.run(args, stderr=subprocess.PIPE, universal_newlines=True)
output += result.stderr
return output, result
jobs.append(fuzz_pool.submit(job, t, args))
for future in as_completed(jobs):
output, result = future.result()
logging.debug(output)
try:
result.check_returncode()
except subprocess.CalledProcessError as e:
if e.stdout:
logging.info(e.stdout)
if e.stderr:
logging.info(e.stderr)
logging.info("Target \"{}\" failed with exit code {}".format(" ".join(result.args), e.returncode))
sys.exit(1)
def parse_test_list(makefile):
with open(makefile, encoding='utf-8') as makefile_test:
test_list_all = []
read_targets = False
for line in makefile_test.readlines():
line = line.strip().replace('test/fuzz/', '').replace(' \\', '')
if read_targets:
if not line:
break
test_list_all.append(line)
continue
if line == 'FUZZ_TARGETS =':
read_targets = True
return test_list_all
if __name__ == '__main__':
main()
| 35.053191 | 180 | 0.600202 | [
"MIT"
] | BlockMechanic/crown | test/fuzz/test_runner.py | 9,885 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = [
'GetEventCategoriesResult',
'AwaitableGetEventCategoriesResult',
'get_event_categories',
]
@pulumi.output_type
class GetEventCategoriesResult:
"""
A collection of values returned by getEventCategories.
"""
def __init__(__self__, event_categories=None, id=None, source_type=None):
if event_categories and not isinstance(event_categories, list):
raise TypeError("Expected argument 'event_categories' to be a list")
pulumi.set(__self__, "event_categories", event_categories)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if source_type and not isinstance(source_type, str):
raise TypeError("Expected argument 'source_type' to be a str")
pulumi.set(__self__, "source_type", source_type)
@property
@pulumi.getter(name="eventCategories")
def event_categories(self) -> List[str]:
"""
A list of the event categories.
"""
return pulumi.get(self, "event_categories")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="sourceType")
def source_type(self) -> Optional[str]:
return pulumi.get(self, "source_type")
class AwaitableGetEventCategoriesResult(GetEventCategoriesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetEventCategoriesResult(
event_categories=self.event_categories,
id=self.id,
source_type=self.source_type)
def get_event_categories(source_type: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEventCategoriesResult:
"""
## Example Usage
List the event categories of all the RDS resources.
```python
import pulumi
import pulumi_aws as aws
example_event_categories = aws.rds.get_event_categories()
pulumi.export("example", example_event_categories.event_categories)
```
List the event categories specific to the RDS resource `db-snapshot`.
```python
import pulumi
import pulumi_aws as aws
example_event_categories = aws.rds.get_event_categories(source_type="db-snapshot")
pulumi.export("example", example_event_categories.event_categories)
```
:param str source_type: The type of source that will be generating the events. Valid options are db-instance, db-security-group, db-parameter-group, db-snapshot, db-cluster or db-cluster-snapshot.
"""
__args__ = dict()
__args__['sourceType'] = source_type
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:rds/getEventCategories:getEventCategories', __args__, opts=opts, typ=GetEventCategoriesResult).value
return AwaitableGetEventCategoriesResult(
event_categories=__ret__.event_categories,
id=__ret__.id,
source_type=__ret__.source_type)
| 33.867925 | 200 | 0.688858 | [
"ECL-2.0",
"Apache-2.0"
] | mdop-wh/pulumi-aws | sdk/python/pulumi_aws/rds/get_event_categories.py | 3,590 | Python |
# Copyright 2018, Kay Hayen, mailto:[email protected]
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
def closureTest1():
# Assign, but the value is not supposed to be used by the function, instead the later
# update is effective.
d = 1
def subby():
return d
d = 22222*2222
return subby()
def closureTest2():
# Using a closure variable that is not initialized at the time it is closured should
# work as well.
def subby():
return d
d = 2222*2222
return subby()
def closureTest3():
def subby():
return undefined_global # @UndefinedVariable
try:
return subby()
except NameError:
return 88
d = 1
def scopeTest4():
try:
return d
d = 1
except UnboundLocalError as e:
return repr(e)
print("Test closure where value is overwritten:", closureTest1())
print("Test closure where value is assigned only late:", closureTest2())
print("Test function where closured value is never assigned:", closureTest3())
print("Scope test where UnboundLocalError is expected:", scopeTest4())
def function():
pass
class ClosureLocalizerClass:
print("Function before assigned in a class:", function)
function = 1
print("Function after it was assigned in class:", function)
ClosureLocalizerClass()
def ClosureLocalizerFunction():
try:
function = function
print("Function didn't give unbound local error")
except UnboundLocalError as e:
print("Function gave unbound local error when accessing function before assignment:", repr(e))
ClosureLocalizerFunction()
class X:
def __init__(self, x):
self.x = x
def changingClosure():
print("Changing a closure taken value after it was taken.")
a = 1
def closureTaker():
return X(a)
x = closureTaker()
a=2
print("Closure value first time:", x.x)
x = closureTaker()
print("Closure value second time:", x.x)
changingClosure()
| 23.902655 | 102 | 0.675676 | [
"Apache-2.0"
] | 4O4/Nuitka | tests/basics/LateClosureAssignment.py | 2,701 | Python |
# SPDX-FileCopyrightText: Copyright (c) 2021 Martin Stephens
#
# SPDX-License-Identifier: MIT
"""These tests are run with a sensor connected to confirm that the correct
responses are received from the sensor.
The try - except clauses and an if __name__ == "__main__" allow the code to be
run with pytest on a Raspberry Pi or as a stand alone file copied into main.py
on a CircuitPython board. To run on a board also copy 'biffobear_as3935.py' to
the lib folder.
"""
# Many Pylnt conventions are broken for the sake of test readability
# Others fail because Pylint doesn't understand Pytest.
# Therefore skip this file.
# pylint: skip-file
import time
try:
import pytest # If this works, we're on a Raspberry Pi
import os
from CircuitPython_AS3935 import biffobear_as3935 as as3935
# try:
# sensor_attached = os.environ["SENSOR_ATTACHED"]
# except (KeyError, AttributeError):
pytestmark = pytest.mark.skip(reason="No as3935 board connected.")
print("hello world")
except ImportError:
# Deduce that pytest didn't import, so we are running on a board
import biffobear_as3935 as as3935
import board
device = None
def setup_module():
# Returns an instance of the AS3935 driver
global device
# Look for I2C connected sensor
try:
print("Setting up I2C connection...")
i2c = board.I2C()
try:
interrupt = board.D25
except AttributeError:
interrupt = board.D7
device = as3935.AS3935_I2C(i2c, interrupt_pin=interrupt)
except ValueError:
print("No I2C connection found.")
print("Setting up SPI connection...")
spi = board.SPI()
try:
cs = board.D24
interrupt = board.D25
except AttributeError:
cs = board.D5
interrupt = board.D7
device = as3935.AS3935(spi, cs, interrupt_pin=interrupt)
def teardown_module():
# Reset the chip between runs for consistent test results
device.reset()
def test_indoor_outdoor():
assert device.indoor is True # Chip default
device.indoor = False
assert device.indoor is False
def test_power_down():
assert device.power_down is False # Chip default
device.power_down = True
assert device.power_down is True
device.power_down = False
assert device.power_down is False
def test_noise_floor_level():
assert device.noise_floor_limit == 0x02 # Chip default
# Test possible values
for level in range(8):
device.noise_floor_limit = level
assert device.noise_floor_limit == level
def test_watchdog():
assert device.watchdog == 0x02 # Chip default
# Test possible values
for level in range(11):
device.watchdog = level
assert device.watchdog == level
def test_spike_rejection():
assert device.spike_threshold == 0x02 # Chip default
# Test possible values
for level in range(12):
device.spike_threshold = level
assert device.spike_threshold == level
def test_disturber_mask():
assert device.disturber_mask is False # Chip default
device.disturber_mask = True
assert device.disturber_mask is True
def test_strike_count_threshold():
assert device.strike_count_threshold == 1
# Test possible values
for level in (1, 5, 9, 16):
device.strike_count_threshold = level
assert device.strike_count_threshold == level
def test_freq_divisor():
assert device.freq_divisor == 16 # Chip default
# Test possible values
for divisor in (16, 32, 64, 128):
device.freq_divisor = divisor
assert device.freq_divisor == divisor
def test_output_antenna_freq():
assert device.output_antenna_freq is False
device.output_antenna_freq = True
assert device.output_antenna_freq is True
def test_output_srco():
assert device.output_srco is False # Chip default
device.output_srco = True
assert device.output_srco is True
def test_output_trco():
assert device.output_trco is False # Chip default
device.output_trco = True
assert device.output_trco is True
def test_tuning_capacitance():
assert device.tuning_capacitance == 0 # Chip default
# Test possible values
for capacitance in range(0, 128, 8):
device.tuning_capacitance = capacitance
assert device.tuning_capacitance == capacitance
def test_reset():
# Set a none default value
device.freq_divisor = 32
assert device.freq_divisor == 32
device.reset()
# Confirm that is reset to default
assert device.freq_divisor == 16 # Chip default
def test_commands_which_do_not_change_readable_values():
# Call to see if an exception is raised
device.clear_stats()
device.calibrate_clocks()
def test_registers_with_unpredictable_states():
# Just read them to see if an error occurs since value depends on presence of lightning.
device.energy
device.distance
device.interrupt_status
def test_read_interrupt_pin():
# The state of the pin is unknown, so just read it error free.
device.interrupt_set
if __name__ == "__main__":
print("setup...")
setup_module()
device.reset()
print("test_indoor_outdoor...")
test_indoor_outdoor()
print("power_down...")
test_power_down()
print("noise_floor_level...")
test_noise_floor_level()
print("watchdog...")
test_watchdog()
print("spike_rejection...")
test_spike_rejection()
print("strike_count_threshold...")
test_strike_count_threshold()
print("disturber_mask...")
test_disturber_mask()
print("freq_divisor...")
test_freq_divisor()
print("output_antenna_freq...")
test_output_antenna_freq()
print("output_srco...")
test_output_srco()
print("output_trco...")
test_output_trco()
print("tuning_capacitance...")
test_tuning_capacitance()
print("reset...")
test_reset()
print("commands_which_do_not_change_readable_values...")
test_commands_which_do_not_change_readable_values()
print("registers_with_unpredictable_states...")
test_registers_with_unpredictable_states()
print("Interrupt pin...")
test_read_interrupt_pin()
print("teardown...")
teardown_module()
print("Tests complete.")
| 28.237668 | 92 | 0.697157 | [
"MIT",
"MIT-0",
"Unlicense"
] | BiffoBear/CircuitPython-AS3935 | tests/test_board_responses.py | 6,297 | Python |
import json
from banal import ensure_list
from functools import lru_cache
from pantomime.types import JSON
from requests.exceptions import TooManyRedirects
from opensanctions.core import Dataset
from opensanctions import helpers as h
FORMATS = ["%d %b %Y", "%d %B %Y", "%Y", "%b %Y", "%B %Y"]
SDN = Dataset.require("us_ofac_sdn")
@lru_cache(maxsize=None)
def deref_url(context, url):
try:
res = context.http.get(url, stream=True)
return res.url
except TooManyRedirects:
return url
def parse_result(context, result):
type_ = result.pop("type", None)
schema = context.lookup_value("type", type_)
if schema is None:
context.log.error("Unknown result type", type=type_)
return
entity = context.make(schema)
entity.id = context.make_slug(result.pop("id"))
entity_number = result.pop("entity_number", None)
if entity_number is not None:
assert int(entity_number)
entity.id = SDN.make_slug(entity_number)
name = result.pop("name", None)
name = name.replace("and any successor, sub-unit, or subsidiary thereof", "")
entity.add("name", name)
for alias in ensure_list(result.pop("alt_names", "")):
entity.add("alias", alias.split("; "))
entity.add("notes", result.pop("remarks", None))
entity.add("country", result.pop("country", None))
if entity.schema.is_a("Person"):
entity.add("position", result.pop("title", None))
entity.add("nationality", result.pop("nationalities", None))
entity.add("nationality", result.pop("citizenships", None))
for dob in result.pop("dates_of_birth", []):
entity.add("birthDate", h.parse_date(dob, FORMATS))
entity.add("birthPlace", result.pop("places_of_birth", None))
elif entity.schema.is_a("Vessel"):
entity.add("flag", result.pop("vessel_flag", None))
entity.add("callSign", result.pop("call_sign", None))
entity.add("type", result.pop("vessel_type", None))
grt = result.pop("gross_registered_tonnage", None)
entity.add("grossRegisteredTonnage", grt)
gt = result.pop("gross_tonnage", None)
entity.add("tonnage", gt)
# TODO: make adjacent owner entity
result.pop("vessel_owner", None)
assert result.pop("title", None) is None
assert not len(result.pop("nationalities", []))
assert not len(result.pop("citizenships", []))
assert not len(result.pop("dates_of_birth", []))
assert not len(result.pop("places_of_birth", []))
for address in result.pop("addresses", []):
obj = h.make_address(
context,
street=address.get("address"),
city=address.get("city"),
postal_code=address.get("postal_code"),
region=address.get("state"),
country=address.get("country"),
)
h.apply_address(context, entity, obj)
for ident in result.pop("ids", []):
country = ident.pop("country")
entity.add("country", country)
h.apply_feature(
context,
entity,
ident.pop("type"),
ident.pop("number"),
country=country,
date_formats=FORMATS,
start_date=ident.pop("issue_date", None),
end_date=ident.pop("expiration_date", None),
)
sanction = context.make("Sanction")
sanction.id = context.make_id(entity.id, "Sanction")
sanction.add("entity", entity)
sanction.add("program", result.pop("programs", []))
sanction.add("status", result.pop("license_policy", []))
sanction.add("reason", result.pop("license_requirement", []))
sanction.add("reason", result.pop("federal_register_notice", None))
sanction.add("startDate", result.pop("start_date", None))
sanction.add("endDate", result.pop("end_date", None))
sanction.add("country", "us")
sanction.add("authority", result.pop("source", None))
# TODO: deref
source_url = deref_url(context, result.pop("source_information_url"))
sanction.add("sourceUrl", source_url)
result.pop("source_list_url")
# TODO: what is this?
result.pop("standard_order", None)
context.emit(sanction)
context.emit(entity, target=True, unique=True)
if len(result):
context.pprint(result)
def crawl(context):
path = context.fetch_resource("source.json", context.dataset.data.url)
context.export_resource(path, JSON, title=context.SOURCE_TITLE)
with open(path, "r") as file:
data = json.load(file)
for result in data.get("results"):
parse_result(context, result)
| 36.078125 | 81 | 0.637289 | [
"MIT"
] | pudo/opensanctions | opensanctions/crawlers/us_trade_csl.py | 4,618 | Python |
import argparse
import json
import os
import pandas as pd
import torch
import torch.optim as optim
import torch.nn as nn
import torch.utils.data
# imports the model in model.py by name
from model import BinaryClassifier
def model_fn(model_dir):
"""Load the PyTorch model from the `model_dir` directory."""
print("Loading model.")
# First, load the parameters used to create the model.
model_info = {}
model_info_path = os.path.join(model_dir, 'model_info.pth')
with open(model_info_path, 'rb') as f:
model_info = torch.load(f)
print("model_info: {}".format(model_info))
# Determine the device and construct the model.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = BinaryClassifier(model_info['input_features'], model_info['hidden_dim'], model_info['output_dim'])
# Load the stored model parameters.
model_path = os.path.join(model_dir, 'model.pth')
with open(model_path, 'rb') as f:
model.load_state_dict(torch.load(f))
# set to eval mode, could use no_grad
model.to(device).eval()
print("Done loading model.")
return model
# Gets training data in batches from the train.csv file
def _get_train_data_loader(batch_size, training_dir):
print("Get train data loader.")
train_data = pd.read_csv(os.path.join(training_dir, "train.csv"), header=None, names=None)
train_y = torch.from_numpy(train_data[[0]].values).float().squeeze()
train_x = torch.from_numpy(train_data.drop([0], axis=1).values).float()
train_ds = torch.utils.data.TensorDataset(train_x, train_y)
return torch.utils.data.DataLoader(train_ds, batch_size=batch_size)
# Provided training function
def train(model, train_loader, epochs, criterion, optimizer, device):
"""
This is the training method that is called by the PyTorch training script. The parameters
passed are as follows:
model - The PyTorch model that we wish to train.
train_loader - The PyTorch DataLoader that should be used during training.
epochs - The total number of epochs to train for.
criterion - The loss function used for training.
optimizer - The optimizer to use during training.
device - Where the model and data should be loaded (gpu or cpu).
"""
# training loop is provided
for epoch in range(1, epochs + 1):
model.train() # Make sure that the model is in training mode.
total_loss = 0
for batch in train_loader:
# get data
batch_x, batch_y = batch
batch_x = batch_x.to(device)
batch_y = batch_y.to(device)
optimizer.zero_grad()
# get predictions from model
y_pred = model(batch_x)
# perform backprop
loss = criterion(y_pred, batch_y)
loss.backward()
optimizer.step()
total_loss += loss.data.item()
print("Epoch: {}, Loss: {}".format(epoch, total_loss / len(train_loader)))
## TODO: Complete the main code
if __name__ == '__main__':
# All of the model parameters and training parameters are sent as arguments
# when this script is executed, during a training job
# Here we set up an argument parser to easily access the parameters
parser = argparse.ArgumentParser()
# SageMaker parameters, like the directories for training data and saving models; set automatically
# Do not need to change
parser.add_argument('--output-data-dir', type=str, default=os.environ['SM_OUTPUT_DATA_DIR'])
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--data-dir', type=str, default=os.environ['SM_CHANNEL_TRAIN'])
# Training Parameters, given
parser.add_argument('--batch-size', type=int, default=10, metavar='N',
help='input batch size for training (default: 10)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
## TODO: Add args for the three model parameters: input_features, hidden_dim, output_dim
# Model Parameters
parser.add_argument('--input_features', type=int, default=2, metavar='IN',
help='number of input features to model (default: 2)')
parser.add_argument('--hidden_dim', type=int, default=10, metavar='H',
help='hidden dim of model (default: 10)')
parser.add_argument('--output_dim', type=int, default=1, metavar='OUT',
help='output dim of model (default: 1)')
# args holds all passed-in arguments
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Using device {}.".format(device))
torch.manual_seed(args.seed)
# Load the training data.
train_loader = _get_train_data_loader(args.batch_size, args.data_dir)
## --- Your code here --- ##
## TODO: Build the model by passing in the input params
# To get params from the parser, call args.argument_name, ex. args.epochs or ards.hidden_dim
# Don't forget to move your model .to(device) to move to GPU , if appropriate
model = BinaryClassifier(args.input_features, args.hidden_dim, args.output_dim).to(device)
## TODO: Define an optimizer and loss function for training
optimizer = optim.Adam(model.parameters(), lr=args.lr)
criterion = nn.BCELoss()
# Trains the model (given line of code, which calls the above training function)
train(model, train_loader, args.epochs, criterion, optimizer, device)
## TODO: complete in the model_info by adding three argument names, the first is given
# Keep the keys of this dictionary as they are
model_info_path = os.path.join(args.model_dir, 'model_info.pth')
with open(model_info_path, 'wb') as f:
model_info = {
'input_features': args.input_features,
'hidden_dim': args.hidden_dim,
'output_dim': args.output_dim,
}
torch.save(model_info, f)
## --- End of your code --- ##
# Save the model parameters
model_path = os.path.join(args.model_dir, 'model.pth')
with open(model_path, 'wb') as f:
torch.save(model.cpu().state_dict(), f)
| 38.166667 | 110 | 0.657732 | [
"MIT"
] | ngocpc/Project_Plagiarism_Detection | Project_Plagiarism_Detection/source_pytorch/train.py | 6,641 | Python |
#
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import List, Optional, Union, cast
import tensorflow as tf
from merlin_standard_lib import Schema, Tag
from ..features.continuous import ContinuousFeatures
from ..features.embedding import EmbeddingFeatures
from ..tabular.base import TabularBlock
from .base import Block, BlockType
class ExpandDimsAndToTabular(tf.keras.layers.Lambda):
def __init__(self, **kwargs):
super().__init__(lambda x: dict(continuous=x), **kwargs)
@tf.keras.utils.register_keras_serializable(package="transformers4rec")
class DLRMBlock(Block):
def __init__(
self,
continuous_features: Union[List[str], Schema, Optional[TabularBlock]],
embedding_layer: EmbeddingFeatures,
bottom_mlp: BlockType,
top_mlp: Optional[BlockType] = None,
interaction_layer: Optional[tf.keras.layers.Layer] = None,
**kwargs
):
super().__init__(**kwargs)
_continuous_features: Optional[TabularBlock]
if isinstance(continuous_features, Schema):
_continuous_features = cast(
Optional[TabularBlock],
ContinuousFeatures.from_schema(
cast(Schema, continuous_features), aggregation="concat"
),
)
if isinstance(continuous_features, list):
_continuous_features = ContinuousFeatures.from_features(
continuous_features, aggregation="concat"
)
else:
_continuous_features = cast(Optional[TabularBlock], continuous_features)
if _continuous_features:
continuous_embedding = _continuous_features >> bottom_mlp >> ExpandDimsAndToTabular()
continuous_embedding.block_name = "ContinuousEmbedding"
self.stack_features = embedding_layer.merge(continuous_embedding, aggregation="stack")
else:
embedding_layer.set_aggregation("stack")
self.stack_features = embedding_layer
# self.stack_features = tabular.MergeTabular(embedding_layer, continuous_embedding,
# aggregation_registry="stack")
# self.stack_features = embedding_layer + continuous_embedding
# self.stack_features.aggregation_registry = "stack"
from ..layers import DotProductInteraction
self.interaction_layer = interaction_layer or DotProductInteraction()
self.top_mlp = top_mlp
@classmethod
def from_schema(
cls, schema: Schema, bottom_mlp: BlockType, top_mlp: Optional[BlockType] = None, **kwargs
):
embedding_layer = EmbeddingFeatures.from_schema(
schema.select_by_tag(Tag.CATEGORICAL),
infer_embedding_sizes=False,
embedding_dim_default=bottom_mlp.layers[-1].units,
)
if not embedding_layer:
raise ValueError("embedding_layer must be set.")
continuous_features = cast(
Optional[TabularBlock],
ContinuousFeatures.from_schema(
schema.select_by_tag(Tag.CONTINUOUS), aggregation="concat"
),
)
return cls(continuous_features, embedding_layer, bottom_mlp, top_mlp=top_mlp, **kwargs)
def call(self, inputs, **kwargs):
stacked = self.stack_features(inputs)
interactions = self.interaction_layer(stacked)
return interactions if not self.top_mlp else self.top_mlp(interactions)
| 37.185185 | 98 | 0.680777 | [
"Apache-2.0"
] | Jwmc999/Transformers4Rec | transformers4rec/tf/block/dlrm.py | 4,016 | Python |
class SqlDataQualityQueries:
establisment_company_relation_check = ("""
-- looks for registration without relation with compay
-- for have a database with full information needs to return total equal to zero (establisment + company)
SELECT count(e.basiccnpj) as total_without_relation from open_data.fact_establishment e
LEFT JOIN open_data.dim_company c ON c.basiccnpj = e.basiccnpj
WHERE c.basiccnpj is null;
""", "== 1", "== 0") | 58 | 109 | 0.732759 | [
"MIT"
] | paulo3011/opendatafrombrasil | source/python/airflow/runtime/plugins/helpers/sql_data_quality_queries.py | 464 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.