max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tasks/prequential_drift_evaluator.py | JanSurft/tornado | 103 | 11105267 | """
The Tornado Framework
By <NAME>
University of Ottawa, Ontario, Canada
E-mail: apesaran -at- uottawa -dot- ca / alipsgh -at- gmail -dot- com
"""
import copy
import random
import numpy
from pympler import asizeof
from archiver.archiver import Archiver
from evaluators.classifier_evaluator import PredictionEvaluator
from evaluators.detector_evaluator import DriftDetectionEvaluator
from plotter.performance_plotter import *
from filters.attribute_handlers import *
from streams.readers.arff_reader import *
class PrequentialDriftEvaluator:
"""This class lets one run a classifier with a drift detector against a data stream,
and evaluate it prequentially over time. Also, one is able to measure the detection
false positive as well as false negative rates."""
def __init__(self, learner, drift_detector, attributes, attributes_scheme,
actual_drift_points, drift_acceptance_interval, project, memory_check_step=-1):
self.learner = learner
self.drift_detector = drift_detector
self.__instance_counter = 0
self.__num_rubbish = 0
self.__learner_error_rate_array = []
self.__learner_memory_usage = []
self.__learner_runtime = []
self.__actual_drift_points = actual_drift_points
self.__drift_acceptance_interval = drift_acceptance_interval
self.__located_drift_points = []
self.__drift_points_boolean = []
self.__drift_detection_memory_usage = []
self.__drift_detection_runtime = []
self.__attributes = attributes
self.__numeric_attribute_scheme = attributes_scheme['numeric']
self.__nominal_attribute_scheme = attributes_scheme['nominal']
self.__project_path = project.get_path()
self.__project_name = project.get_name()
self.__memory_check_step = memory_check_step
def run(self, stream, random_seed=1):
random.seed(random_seed)
for record in stream:
self.__instance_counter += 1
percentage = (self.__instance_counter / len(stream)) * 100
print("%0.2f" % percentage + "% of instances are prequentially processed!", end="\r")
if record.__contains__("?"):
self.__num_rubbish += 1
continue
# ---------------------
# Data Transformation
# ---------------------
r = copy.copy(record)
for k in range(0, len(r) - 1):
if self.learner.LEARNER_CATEGORY == TornadoDic.NOM_CLASSIFIER and self.__attributes[k].TYPE == TornadoDic.NUMERIC_ATTRIBUTE:
r[k] = Discretizer.find_bin(r[k], self.__nominal_attribute_scheme[k])
elif self.learner.LEARNER_CATEGORY == TornadoDic.NUM_CLASSIFIER and self.__attributes[k].TYPE == TornadoDic.NOMINAL_ATTRIBUTE:
r[k] = NominalToNumericTransformer.map_attribute_value(r[k], self.__numeric_attribute_scheme[k])
# NORMALIZING NUMERIC DATA
if self.learner.LEARNER_CATEGORY == TornadoDic.NUM_CLASSIFIER:
r[0:len(r) - 1] = Normalizer.normalize(r[0:len(r) - 1], self.__numeric_attribute_scheme)
# ----------------------
# Prequential Learning
# ----------------------
if self.learner.is_ready():
real_class = r[len(r) - 1]
predicted_class = self.learner.do_testing(r)
prediction_status = True
if real_class != predicted_class:
prediction_status = False
# -----------------------
# Drift Detected?
# -----------------------
warning_status, drift_status = self.drift_detector.detect(prediction_status)
if drift_status:
self.__drift_points_boolean.append(1)
self.__located_drift_points.append(self.__instance_counter)
print("\n ->>> " + self.learner.LEARNER_NAME.title() + " faced a drift at instance " +
str(self.__instance_counter) + ".")
print("%0.2f" % percentage, " of instances are prequentially processed!", end="\r")
learner_error_rate = PredictionEvaluator.calculate(TornadoDic.ERROR_RATE,
self.learner.get_global_confusion_matrix())
self.__learner_error_rate_array.append(round(learner_error_rate, 4))
self.__learner_memory_usage.append(asizeof.asizeof(self.learner, limit=20))
self.__learner_runtime.append(self.learner.get_running_time())
self.__drift_detection_memory_usage.append(asizeof.asizeof(self.drift_detector, limit=20))
self.__drift_detection_runtime.append(self.drift_detector.RUNTIME)
self.learner.reset()
self.drift_detector.reset()
continue
if self.learner.LEARNER_TYPE == TornadoDic.TRAINABLE:
self.learner.do_training(r)
else:
self.learner.do_loading(r)
else:
if self.learner.LEARNER_TYPE == TornadoDic.TRAINABLE:
self.learner.do_training(r)
else:
self.learner.do_loading(r)
self.learner.set_ready()
self.learner.update_confusion_matrix(r[len(r) - 1], r[len(r) - 1])
learner_error_rate = PredictionEvaluator.calculate(TornadoDic.ERROR_RATE,
self.learner.get_confusion_matrix())
learner_error_rate = round(learner_error_rate, 4)
self.__learner_error_rate_array.append(learner_error_rate)
if self.__memory_check_step != -1:
if self.__instance_counter % self.__memory_check_step == 0:
self.__drift_detection_memory_usage.append(asizeof.asizeof(self.drift_detector, limit=20))
self.__drift_points_boolean.append(0)
print("\n" + "The stream is completely processed.")
self.__store_stats()
self.__plot()
print("\n\r" + "THE END!")
print("\a")
def __store_stats(self):
learner_name = TornadoDic.get_short_names(self.learner.LEARNER_NAME)
detector_name = self.drift_detector.DETECTOR_NAME
detector_setting = self.drift_detector.get_settings()
file_name = learner_name + "_" + detector_name + "." + detector_setting[0]
st_wr = open(self.__project_path + file_name.lower() + ".txt", "w")
lrn_error_rate = PredictionEvaluator.calculate_error_rate(self.learner.get_global_confusion_matrix())
dl, tp, fp, fn = DriftDetectionEvaluator.calculate_dl_tp_fp_fn(self.__located_drift_points,
self.__actual_drift_points,
self.__drift_acceptance_interval)
if len(self.__located_drift_points) != 0:
# learner stats
lrn_mem = numpy.mean(self.__learner_memory_usage)
lrn_ave_runtime = numpy.mean(self.__learner_runtime)
lrn_total_runtime = self.learner.get_total_running_time()
# ddm stats
ddm_mem = numpy.mean(self.__drift_detection_memory_usage)
ddm_avg_runtime = numpy.mean(self.__drift_detection_runtime)
ddm_total_runtime = self.drift_detector.TOTAL_RUNTIME
else:
lrn_mem = asizeof.asizeof(self.learner, limit=20)
lrn_ave_runtime = self.learner.get_total_running_time()
lrn_total_runtime = lrn_ave_runtime
ddm_mem = asizeof.asizeof(self.drift_detector, limit=20)
ddm_avg_runtime = self.drift_detector.TOTAL_RUNTIME
ddm_total_runtime = ddm_avg_runtime
stats = learner_name + " + " + detector_name + ": " + "\n\t" + \
"Classifier Error-rate: " + "%0.2f" % (100 * lrn_error_rate) + "%" + "," + "\n\t" + \
"Classifier Average Memory Usage (bytes): " + "%0.2f" % lrn_mem + "," + "\n\t" + \
"Classifier Average Runtime (ms): " + "%0.2f" % lrn_ave_runtime + "," + "\n\t" + \
"Classifier Total Runtime (ms): " + "%0.2f" % lrn_total_runtime + "," + "\n\t" + \
"Detection Delay: " + "%0.2f" % dl + " TP: " + str(tp) + " FP: " + str(fp) + " FN: " + str(fn) + "," + "\n\t" + \
"Average Detection Memory Usage (bytes): " + "%0.2f" % ddm_mem + "," + "\n\t" + \
"Average Detection Runtime (ms): " + "%0.2f" % ddm_avg_runtime + "," + "\n\t" + \
"Total Detection Runtime (ms): " + "%0.2f" % ddm_total_runtime + "," + "\n\t" + \
"Drift Points detected: " + str(self.__located_drift_points)
print(stats)
st_wr.write(stats)
st_wr.close()
def __plot(self):
learner_name = TornadoDic.get_short_names(self.learner.LEARNER_NAME)
detector_name = self.drift_detector.DETECTOR_NAME
detector_setting = self.drift_detector.get_settings()
file_name = learner_name + "_" + detector_name + "." + detector_setting[0]
up_range = numpy.max(self.__learner_error_rate_array)
up_range = 1 if up_range > 0.75 else round(up_range, 1) + 0.25
pair_name = learner_name + ' + ' + detector_name + "(" + detector_setting[1] + ")"
Plotter.plot_single(pair_name, self.__learner_error_rate_array, "Error-rate",
self.__project_name, self.__project_path, file_name, [0, up_range], 'upper right', 200)
Archiver.archive_single(pair_name, self.__learner_error_rate_array,
self.__project_path, self.__project_name, 'Error-rate')
Plotter.plot_single_ddm_points(pair_name, self.__drift_points_boolean,
self.__project_name, self.__project_path, file_name)
|
environment/settings.py | claytonbrown/lambda-refarch-iotbackend | 181 | 11105277 | <reponame>claytonbrown/lambda-refarch-iotbackend
"""
Modify these values to match your configuration
"""
# AWS IoT endpoint settings
HOST_NAME = "ahrxvb36afpwx-ats.iot.eu-west-1.amazonaws.com" # replace with your AWS IoT endpoint for your region
HOST_PORT = 8883 # leave this as-is
# thing certs & keys
PRIVATE_KEY = "certs/private.pem.key" # replace with your private key name
DEVICE_CERT = "certs/certificate.pem.crt" # replace with your certificate name
ROOT_CERT = "certs/root-ca.pem"
# device & message settings
BATTERY_DISCHARGE_RANGE = (1, 3) # tuple that stores the possible discharge rates of the battery
# RANDOM_INTEGER_RANGE = (1,10) # tuple that stores the possible range of your sensor reading
QOS_LEVEL = 0 # AWS IoT supports QoS levels 0 & 1 for MQTT sessions
|
motpy/utils.py | HaydenAI/motpy | 337 | 11105281 | import importlib.util
from motpy import Track
def ensure_packages_installed(packages, stop_if_some_missing: bool = True):
some_missing = False
for package in packages:
spec = importlib.util.find_spec(package)
if spec is None:
some_missing = True
print(f'package {package} is not installed')
if some_missing and stop_if_some_missing:
print('Please install required python packages to run this script')
exit(1)
def track_to_string(track: Track) -> str:
score = track.score if track.score is not None else -1
return f'ID: {track.id[:8]} | S: {score:.1f} | C: {track.class_id}'
|
django/contrib/postgres/serializers.py | ni-ning/django | 61,676 | 11105296 | <filename>django/contrib/postgres/serializers.py
from django.db.migrations.serializer import BaseSerializer
class RangeSerializer(BaseSerializer):
def serialize(self):
module = self.value.__class__.__module__
# Ranges are implemented in psycopg2._range but the public import
# location is psycopg2.extras.
module = 'psycopg2.extras' if module == 'psycopg2._range' else module
return '%s.%r' % (module, self.value), {'import %s' % module}
|
maro/rl/scheduling/__init__.py | yangboz/maro | 598 | 11105342 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from .scheduler import Scheduler
from .simple_parameter_scheduler import LinearParameterScheduler, TwoPhaseLinearParameterScheduler
__all__ = [
"Scheduler",
"LinearParameterScheduler",
"TwoPhaseLinearParameterScheduler"
]
|
tests/issues/test_issue_289.py | pasqoc/heterocl | 236 | 11105349 | import heterocl as hcl
import numpy as np
def test_slice_op():
hcl.init()
def kernel(A):
return hcl.compute(A.shape, lambda x: A[x][8:0] + A[x][16:8])
A = hcl.placeholder((10,))
s = hcl.create_schedule(A, kernel)
f = hcl.build(s)
np_A = np.random.randint(10, size=(10,))
np_B = np.zeros(10)
golden = (np_A & 0xFF) + ((np_A >> 8) & 0xFF)
hcl_A = hcl.asarray(np_A)
hcl_B = hcl.asarray(np_B)
f(hcl_A, hcl_B)
ret = hcl_B.asnumpy()
assert np.array_equal(golden, ret)
|
tfimm/train/problems/classification.py | hyenal/tensorflow-image-models | 154 | 11105361 | <reponame>hyenal/tensorflow-image-models
import shutil
import tempfile
from dataclasses import dataclass
from pathlib import Path
from typing import Any
import numpy as np
import tensorflow as tf
from ..interface import ProblemBase
from ..registry import cfg_serializable, get_class
@dataclass
class ClassificationConfig:
model: Any
model_class: str
optimizer: Any
optimizer_class: str = "OptimizerFactory"
# We treat binary classification problems as a special case, because for binary
# problems the model can return just one logit, which is the logit for class 1.
# The logit for class 0 is implicitly set to 0.0.
binary_loss: bool = False
# We apply weight decay by summing the squares of all trainable variables and
# multiplying them by `weight_decay`. We are ignoring Keras weight regularizers
# and the automatically generated model losses.
weight_decay: float = 0.0
mixed_precision: bool = False
# When saving the model we may want to use a different dtype for model inputs. E.g.,
# for images, `uint8` is a natural choice. In particular if the saved model is
# deployed via TF serving, `uint8` input reduces the network payload, even though
# the first thing the model does is cast everything to `float32`.
save_input_dtype: str = "float32"
@cfg_serializable
class ClassificationProblem(ProblemBase):
cfg_class = ClassificationConfig
def __init__(self, cfg: ClassificationConfig, timekeeping):
self.cfg = cfg
self.timekeeping = timekeeping
# Setting global state before building model
if cfg.mixed_precision:
tf.keras.mixed_precision.set_global_policy("mixed_float16")
# Building the model
model, preprocess = get_class(cfg.model_class)(cfg=cfg.model)()
self.model = model
self.preprocess = preprocess
# Training metrics
self.avg_ce_loss = tf.keras.metrics.Mean(dtype=tf.float32)
self.avg_reg_loss = tf.keras.metrics.Mean(dtype=tf.float32)
self.avg_loss = tf.keras.metrics.Mean(dtype=tf.float32)
self.avg_acc = tf.keras.metrics.Accuracy(dtype=tf.float32)
# Optimizer
self.optimizer = get_class(cfg.optimizer_class)(
cfg=cfg.optimizer,
timekeeping=timekeeping,
mixed_precision=cfg.mixed_precision,
)()
def ckpt_variables(self, model_only: bool = False):
"""Return dictionary with all variables that need to be added to checkpoint."""
variables = {"model": self.model}
if not model_only:
variables["avg_ce_loss"] = self.avg_ce_loss
variables["avg_reg_loss"] = self.avg_reg_loss
variables["avg_loss"] = self.avg_loss
variables["avg_acc"] = self.avg_acc
variables["optimizer"] = self.optimizer
return variables
def start_epoch(self):
"""Called at the beginning of an epoch. Used to reset moving averages."""
self.avg_ce_loss.reset_states()
self.avg_reg_loss.reset_states()
self.avg_loss.reset_states()
self.avg_acc.reset_states()
def train_step(self, data, it):
"""Perform one step of training."""
img, labels = data
ce_loss, reg_loss, preds = self.train_step_inner(img, labels)
self.avg_ce_loss.update_state(ce_loss)
self.avg_reg_loss.update_state(reg_loss)
self.avg_loss.update_state(reg_loss + ce_loss)
self.avg_acc.update_state(preds, labels)
logs = {
"train/ce_loss": self.avg_ce_loss.result().numpy(),
"train/reg_loss": self.avg_reg_loss.result().numpy(),
"train/loss": self.avg_loss.result().numpy(),
"train/acc": self.avg_acc.result().numpy(),
}
return logs["train/loss"], logs
@tf.function
def train_step_inner(self, img, labels):
img = self.preprocess(img)
with tf.GradientTape() as tape:
logits = self.logits(img, training=True)
# Regardless of mixed precision or not, we compute the loss in float32
logits = tf.cast(logits, tf.float32)
ce_loss = self.softmax_loss(logits, labels)
# Weight decay
# TODO: Exclude certain variables from weight decay based on model cfg
reg_loss = 0.0
if self.cfg.weight_decay != 0.0:
for weight in self.model.trainable_variables:
reg_loss += tf.reduce_sum(tf.square(weight))
reg_loss *= self.cfg.weight_decay
# Total loss
loss = ce_loss + reg_loss
if self.cfg.mixed_precision:
loss = self.optimizer.get_scaled_loss(loss)
grads = tape.gradient(loss, self.model.trainable_variables)
if self.cfg.mixed_precision:
grads = self.optimizer.get_unscaled_gradients(grads)
self.optimizer.apply_gradients(zip(grads, self.model.trainable_variables))
preds = self.predict(logits)
return ce_loss, reg_loss, preds
def logits(self, img, training):
logits = self.model(img, training=training)
logits = tf.cast(logits, tf.float32)
return logits
def predict(self, logits):
if self.cfg.binary_loss:
logits = tf.concat([tf.zeros_like(logits), logits], axis=-1)
preds = tf.argmax(logits, axis=-1)
return preds
def softmax_loss(self, logits, labels):
if self.cfg.binary_loss:
loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.cast(labels, tf.float32), logits=logits[:, 0]
)
else:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits
)
loss = tf.reduce_mean(loss)
return loss
def logits_and_labels(self, dataset):
"""
Returns logits and labels from dataset. The dataset must be finite.
Returns:
np.ndarrays for logits and labels of shape (N, nb_classes) and (N,)
"""
def _inference(_img):
_img = self.preprocess(_img)
_logits = self.model(_img, training=False)
_logits = tf.cast(_logits, tf.float32)
if self.cfg.binary_loss:
# In the binary case the model can return logits only for class 1.
# The logit for class 0 is assumed to be 0.0.
_logits = _logits[..., 0]
_logits = tf.stack([tf.zeros_like(_logits), _logits], axis=-1)
# Normalize logits to have sum=0. While not necessary to compute
# accuracy, this becomes important if we want to compare decision
# thresholds across epochs and training runs.
_logits = _logits - tf.reduce_mean(_logits, axis=-1, keepdims=True)
return _logits
labels, logits = [], []
for img_batch, labels_batch in dataset:
logits_batch = _inference(img_batch)
logits.append(logits_batch.numpy())
labels.append(labels_batch.numpy())
labels = np.concatenate(labels, axis=0)
logits = np.concatenate(logits, axis=0)
return logits, labels
def validation(self, dataset):
"""
Function performs validation on a dataset and returns a dictionary of metrics.
"""
logits, labels = self.logits_and_labels(dataset)
preds = tf.argmax(logits, axis=-1).numpy()
# This is work in progress. For now, we measure only accuracy. Later, we should
# add top-5 accuracy, etc.
acc = np.sum(preds == labels) / len(labels)
logs = {"val/acc": acc}
return logs["val/acc"], logs
def save_model(self, save_dir):
"""Save models ready for inference."""
save_dir = Path(save_dir)
# We need to set policy to float32 for saving, otherwise we save models that
# perform inference with float16, which is extremely slow on CPUs
old_policy = tf.keras.mixed_precision.global_policy()
tf.keras.mixed_precision.set_global_policy("float32")
# After changing the policy, we need to create a new model using the policy
model_factory = get_class(self.cfg.model_class)(cfg=self.cfg.model)
save_model, save_preprocess = model_factory()
save_model.set_weights(self.model.get_weights())
# Now build the full inference model including preprocessing and logit layer
inputs = tf.keras.layers.Input(
shape=model_factory.tf_input_shape,
batch_size=None,
dtype=self.cfg.save_input_dtype,
name="input",
)
img = tf.cast(inputs, tf.float32)
img = save_preprocess(img)
logits = save_model(img, training=False)
if self.cfg.binary_loss:
# In the binary case the model can return logits only for class 1.
# The logit for class 0 is assumed to be 0.0.
logits = logits[..., 0]
logits = tf.stack([tf.zeros_like(logits), logits], axis=-1)
# Normalize logits to have sum=0.
logits = logits - tf.reduce_mean(logits, axis=-1, keepdims=True)
# So output layer has the right name
logits = tf.keras.layers.Activation("linear", name="logits")(logits)
inference_model = tf.keras.Model(inputs, logits)
model_dir = save_dir / "model"
with tempfile.TemporaryDirectory() as tmpdir:
# If `save_dir` points to a network file system or S3FS, sometimes TF saving
# can be very slow. It is faster to save to a temporary directory first and
# copying data across.
local_dir = Path(tmpdir) / "model"
tf.saved_model.save(inference_model, str(local_dir))
# TODO: Add support for S3 paths here...
shutil.copytree(str(local_dir), str(model_dir), dirs_exist_ok=True)
# Restore original float policy
tf.keras.mixed_precision.set_global_policy(old_policy)
|
src/masonite/validation/__init__.py | cercos/masonite | 1,816 | 11105400 | <filename>src/masonite/validation/__init__.py<gh_stars>1000+
from .RuleEnclosure import RuleEnclosure
from .MessageBag import MessageBag
from .Validator import (
BaseValidation,
ValidationFactory,
Validator,
accepted,
active_domain,
after_today,
before_today,
confirmed,
contains,
date,
different,
distinct,
does_not,
email,
equals,
exists,
file,
greater_than,
image,
in_range,
ip,
is_future,
is_list,
is_in,
is_past,
isnt,
json,
length,
less_than,
none,
numeric,
phone,
postal_code,
regex,
required,
required_if,
required_with,
string,
strong,
timezone,
truthy,
uuid,
video,
when,
)
|
reviewboard/reviews/urls.py | amalik2/reviewboard | 921 | 11105409 | from __future__ import unicode_literals
from django.conf.urls import include, url
from reviewboard.reviews import views
download_diff_urls = [
url(r'^orig/$',
views.DownloadDiffFileView.as_view(
file_type=views.DownloadDiffFileView.TYPE_ORIG),
name='download-orig-file'),
url(r'^new/$',
views.DownloadDiffFileView.as_view(
file_type=views.DownloadDiffFileView.TYPE_MODIFIED),
name='download-modified-file'),
]
diff_fragment_urls = [
url(r'^$', views.ReviewsDiffFragmentView.as_view(),
name='view-diff-fragment'),
url(r'^patch-error-bundle/$',
views.ReviewsDownloadPatchErrorBundleView.as_view(),
name='patch-error-bundle'),
]
diffviewer_revision_urls = [
url(r'^$',
views.ReviewsDiffViewerView.as_view(),
name="view-diff-revision"),
url(r'^raw/$',
views.DownloadRawDiffView.as_view(),
name='raw-diff-revision'),
url(r'^fragment/(?P<filediff_id>\d+)/(?:chunk/(?P<chunk_index>\d+)/)?',
include(diff_fragment_urls)),
url(r'^download/(?P<filediff_id>\d+)/',
include(download_diff_urls)),
]
diffviewer_interdiff_urls = [
url(r'^$',
views.ReviewsDiffViewerView.as_view(),
name="view-interdiff"),
url(r'^fragment/(?P<filediff_id>\d+)(?:-(?P<interfilediff_id>\d+))?/'
r'(?:chunk/(?P<chunk_index>\d+)/)?',
include(diff_fragment_urls)),
]
diffviewer_urls = [
url(r'^$', views.ReviewsDiffViewerView.as_view(), name='view-diff'),
url(r'^raw/$', views.DownloadRawDiffView.as_view(), name='raw-diff'),
url(r'^(?P<revision>\d+)/',
include(diffviewer_revision_urls)),
url(r'^(?P<revision>\d+)-(?P<interdiff_revision>\d+)/',
include(diffviewer_interdiff_urls)),
]
bugs_urls = [
url(r'^$', views.BugURLRedirectView.as_view(), name='bug_url'),
url(r'^infobox/$', views.BugInfoboxView.as_view(), name='bug_infobox'),
]
review_request_urls = [
# Review request detail
url(r'^$',
views.ReviewRequestDetailView.as_view(),
name='review-request-detail'),
url(r'^_updates/$',
views.ReviewRequestUpdatesView.as_view(),
name='review-request-updates'),
# Review request diffs
url(r'^diff/', include(diffviewer_urls)),
# Fragments
url(r'^_fragments/diff-comments/(?P<comment_ids>[\d,]+)/$',
views.CommentDiffFragmentsView.as_view(),
name='diff-comment-fragments'),
# File attachments
url(r'^file/(?P<file_attachment_id>\d+)/$',
views.ReviewFileAttachmentView.as_view(),
name='file-attachment'),
url(r'^file/(?P<file_attachment_diff_id>\d+)'
r'-(?P<file_attachment_id>\d+)/$',
views.ReviewFileAttachmentView.as_view(),
name='file-attachment'),
# Screenshots
url(r'^s/(?P<screenshot_id>\d+)/$',
views.ReviewScreenshotView.as_view(),
name='screenshot'),
# Bugs
url(r'^bugs/(?P<bug_id>[\w\.-]+)/', include(bugs_urls)),
# E-mail previews
url(r'^preview-email/(?P<message_format>text|html)/$',
views.PreviewReviewRequestEmailView.as_view(),
name='preview-review-request-email'),
url(r'^changes/(?P<changedesc_id>\d+)/preview-email/'
r'(?P<message_format>text|html)/$',
views.PreviewReviewRequestEmailView.as_view(),
name='preview-review-request-email'),
url(r'^reviews/(?P<review_id>\d+)/preview-email/'
r'(?P<message_format>text|html)/$',
views.PreviewReviewEmailView.as_view(),
name='preview-review-email'),
url(r'^reviews/(?P<review_id>\d+)/replies/(?P<reply_id>\d+)/'
r'preview-email/(?P<message_format>text|html)/$',
views.PreviewReplyEmailView.as_view(),
name='preview-review-reply-email'),
# Review Request infobox
url(r'^infobox/$',
views.ReviewRequestInfoboxView.as_view(),
name='review-request-infobox'),
]
urlpatterns = [
url(r'^new/$',
views.NewReviewRequestView.as_view(),
name='new-review-request'),
url(r'^(?P<review_request_id>\d+)/',
include(review_request_urls)),
]
|
webserver_example/webserver_example.py | iamontresor/unity-experiment-framework | 119 | 11105427 | <gh_stars>100-1000
from flask import Flask, request
from flask_httpauth import HTTPBasicAuth
from flask_cors import CORS
from werkzeug.security import generate_password_hash, check_password_hash
import os
# where data will be stored
OUTPUT_DIR = 'example_output'
# generate username/passwords
users = {
"susan": generate_password_hash("<PASSWORD>"),
}
# create the flask application
app = Flask(__name__)
# for username/password support
auth = HTTPBasicAuth()
# for Cross Origin Resource Sharing (required for WebGL builds)
# read more here: https://docs.unity3d.com/Manual/webgl-networking.html
CORS(app)
@app.route('/form', methods=['POST'])
@auth.login_required
def form():
"""
POST request handler that accepts the data coming in and saves it to disk.
"""
filepath = request.form["filepath"]
data = request.form["data"]
fullpath = os.path.join(OUTPUT_DIR, filepath)
directory, filename = os.path.split(fullpath)
os.makedirs(directory, exist_ok=True)
try:
with open(fullpath, 'w+') as f:
f.write(data)
print(f"Wrote data to {fullpath}.")
return app.response_class(status=200)
except:
return app.response_class(status=500)
@auth.verify_password
def verify_password(username, password):
if username in users and \
check_password_hash(users.get(username), password):
return username
@app.route('/')
@auth.login_required
def index():
"""
Basic Hello World at the index.
"""
return "Hello, {}!".format(auth.current_user())
if __name__ == '__main__':
app.run()
|
app/spider/reply/test_reply_task.py | kenmingwang/ASoulCnki | 384 | 11105446 | import get_reply_data
if __name__ == '__main__':
reply_param_tuples = [(1, 459141852, 0, 490521832557537540)]
get_reply_data.task(reply_param_tuples)
|
using_dl_to_annotate_protein_universe/hmm_baseline/phmmer.py | DionysisChristopoulos/google-research | 23,901 | 11105499 | <filename>using_dl_to_annotate_protein_universe/hmm_baseline/phmmer.py<gh_stars>1000+
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Run PHMMer and get predictions vs actual family membership.
Given fasta files of unaligned amino acid sequences in a training file,
and fasta files of unaligned amino acid sequences in a test directory,
compute a prediction for the class membership of each sequence in the test
directory. These fasta files are generated by generate_hmmer_files.py.
The way that prediction of classes is determined is by a process similar to
1-nearest-neighbors.
PHMMer can be installed by running apt-get install hmmer.
The HMMER manual can be found at
http://eddylab.org/software/hmmer3/3.1b2/Userguide.pdf
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import subprocess
from absl import logging
from Bio.SeqIO import FastaIO
import hmmer_utils
import parallel
import pfam_utils
import tensorflow.compat.v1 as tf
# Optimize the speed of running phmmer.
_BLOCK_SIZE = 131
_CPUS = 9
_THREADS = 8
def run_phmmer_for_query(train_sequence_file,
list_of_protein_name_and_sequence):
"""Return output of phmmer binary of one query against all sequences.
Args:
train_sequence_file: string. Filename of fasta file of training sequences.
list_of_protein_name_and_sequence: list of tuples of (protein_name,
sequence).
The protein_name: string. Of the form `sequence_name`_`family_accession`,
like OLF1_CHICK/41-290_PF00001.20.
The sequence: string. Amino acid sequence corresponding to protein_name.
Returns:
List of HMMEROutputs. Output of running the binary phmmer.
"""
protein_name_and_sequence_as_fasta_list = [
'>{}\n{}'.format(protein_name, sequence)
for protein_name, sequence in list_of_protein_name_and_sequence
]
protein_name_and_sequence_formatted = '\n'.join(
protein_name_and_sequence_as_fasta_list)
# Here we discard the normal stdout of phmmer with -o /dev/null, as we don't
# use it. Then redirect the normal file output from phmmer to /dev/stdout to
# avoid making a file and reading it back in for parsing. This allows us to
# immediately treat it as a python string.
# Use the stdin option for phmmer by using the filename '-'. Then, use
# p.communicate to send the process the input fasta strings.
p = subprocess.Popen(
(('phmmer --tblout /dev/stdout -o /dev/null -E 10.0 --cpu={cpus} - '
'{train_sequence_file}').format(
cpus=_CPUS, train_sequence_file=train_sequence_file)).split(),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate(protein_name_and_sequence_formatted)
if stderr:
logging.warning('phmmer returned an error for sequences %s: %s',
protein_name_and_sequence_formatted, stderr)
all_query_identifiers = set(
protein_name for protein_name, _ in list_of_protein_name_and_sequence)
return hmmer_utils.parse_phmmer_output(
stdout, query_identifiers=all_query_identifiers)
def write_phmmer_predictions(train_sequence_file, test_sequence_file,
parsed_output):
"""Write prediction csv file for all files in test_sequence_dir.
The csv content is:
sequence_name,true_label,predicted_label
Where sequence_name is the uniprot identifier, including domain indices,
and true and predicted label are pfam family accession ids.
Args:
train_sequence_file: string. Filename of fasta file of unaligned training
sequences.
test_sequence_file: string. Fasta files of unaligned test sequences.
parsed_output: string. csv file for parsed phmmer outputs.
"""
logging.info('Writing predictions to %s', parsed_output)
with tf.io.gfile.GFile(test_sequence_file, 'r') as input_file:
batched_fasta_iterable = pfam_utils.batch_iterable(
FastaIO.SimpleFastaParser(input_file), _BLOCK_SIZE)
input_dict_to_phmmer_function = [
dict(
train_sequence_file=train_sequence_file,
list_of_protein_name_and_sequence=list_of_protein_name_and_sequence)
for list_of_protein_name_and_sequence in batched_fasta_iterable
]
results = parallel.RunInParallel(
run_phmmer_for_query,
input_dict_to_phmmer_function,
_THREADS,
cancel_futures=True)
with tf.io.gfile.GFile(parsed_output, 'w') as parsed_output_file:
for phmmer_query_result in results:
for phmmer_output in phmmer_query_result:
parsed_output_file.write(phmmer_output.format_as_csv() + '\n')
|
sam_convnet_drawer/examples/LeNet.py | b2220333/convnet-drawer | 580 | 11105503 | import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from convnet_drawer import Model, Conv2D, MaxPooling2D, Flatten, Dense
from pptx_util import save_model_to_pptx
import config
def main():
config.inter_layer_margin = 65
config.channel_scale = 4 / 5
model = Model(input_shape=(32, 32, 1))
model.add(Conv2D(6, (5, 5), (1, 1)))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(16, (5, 5)))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(120))
model.add(Dense(84))
model.add(Dense(10))
model.save_fig(os.path.splitext(os.path.basename(__file__))[0] + ".svg")
save_model_to_pptx(model, os.path.splitext(os.path.basename(__file__))[0] + ".pptx")
if __name__ == '__main__':
main()
|
deep_learning/train_val_model.py | gautard/pystatsml | 123 | 11105537 | import numpy as np
import torch
import time
import copy
def train_val_model(model, criterion, optimizer, dataloaders, num_epochs=25,
scheduler=None, log_interval=None):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
# Store losses and accuracies accross epochs
losses, accuracies = dict(train=[], val=[]), dict(train=[], val=[])
for epoch in range(num_epochs):
if log_interval is not None and epoch % log_interval == 0:
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
nsamples = 0
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
nsamples += inputs.shape[0]
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if scheduler is not None and phase == 'train':
scheduler.step()
#nsamples = dataloaders[phase].dataset.data.shape[0]
epoch_loss = running_loss / nsamples
epoch_acc = running_corrects.double() / nsamples
losses[phase].append(epoch_loss)
accuracies[phase].append(epoch_acc)
if log_interval is not None and epoch % log_interval == 0:
print('{} Loss: {:.4f} Acc: {:.2f}%'.format(
phase, epoch_loss, 100 * epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
if log_interval is not None and epoch % log_interval == 0:
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:.2f}%'.format(100 * best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model, losses, accuracies
|
tests/structures/test_with.py | akubera/batavia | 1,256 | 11105560 | <filename>tests/structures/test_with.py
from unittest import expectedFailure
from ..utils import TranspileTestCase
class WithTests(TranspileTestCase):
def test_enter_exit(self):
self.assertCodeExecution("""
class mgr:
def __enter__(self):
print('__enter__')
def __exit__(self, exc_type, val, traceback):
print('__exit__')
with mgr():
print('inside')
""")
def test_return_from_enter(self):
self.assertCodeExecution("""
class mgr:
def __enter__(self):
return 42
def __exit__(self, exc, val, tb):
print('cleaning up')
with mgr() as x:
print(x)
""")
def test_raise(self):
self.assertCodeExecution("""
class mgr:
def __enter__(self):
pass
def __exit__(self, exc, val, tb):
print('cleaning up')
try:
with mgr():
print('inside')
raise Exception('oops')
print('done')
except Exception as e:
print(type(e), e)
""")
def test_suppress_exception(self):
self.assertCodeExecution("""
class mgr:
def __enter__(self):
pass
def __exit__(self, exc, val, tb):
print('supress')
return True
with mgr():
raise KeyError(42)
print('raised')
print('done')
""")
# There is no traceback class in batavia
@expectedFailure
def test_exit_args(self):
self.assertCodeExecution("""
class mgr:
def __enter__(self):
pass
def __exit__(self, exc, val, tb):
print('exc', type(exc), exc)
print('val', type(val), val)
print('tb', type(tb), tb)
return True
with mgr():
pass
with mgr():
raise Exception
""")
|
deep-rl/lib/python2.7/site-packages/OpenGL/raw/GLES1/OES/single_precision.py | ShujaKhalid/deep-rl | 210 | 11105575 | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLES1 import _types as _cs
# End users want this...
from OpenGL.raw.GLES1._types import *
from OpenGL.raw.GLES1 import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLES1_OES_single_precision'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLES1,'GLES1_OES_single_precision',error_checker=_errors._error_checker)
@_f
@_p.types(None,_cs.GLclampf)
def glClearDepthfOES(depth):pass
@_f
@_p.types(None,_cs.GLenum,arrays.GLfloatArray)
def glClipPlanefOES(plane,equation):pass
@_f
@_p.types(None,_cs.GLclampf,_cs.GLclampf)
def glDepthRangefOES(n,f):pass
@_f
@_p.types(None,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glFrustumfOES(l,r,b,t,n,f):pass
@_f
@_p.types(None,_cs.GLenum,arrays.GLfloatArray)
def glGetClipPlanefOES(plane,equation):pass
@_f
@_p.types(None,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glOrthofOES(l,r,b,t,n,f):pass
|
recipes/Python/310791_skeletal/recipe-310791.py | tdiprima/code | 2,023 | 11105629 | <reponame>tdiprima/code<filename>recipes/Python/310791_skeletal/recipe-310791.py
import compiler.visitor
class NotImplementedException(Exception): pass
class VisitorSkeleton(compiler.visitor.ASTVisitor):
def visitAdd(self, node):
# Add attributes
# left left operand
# right right operand
raise NotImplementedException('visitAdd')
def visitAnd(self, node):
# And attributes
# nodes list of operands
raise NotImplementedException('visitAnd')
def visitAssAttr(self, node):
# AssAttr attributes
# expr expression on the left-hand side of the dot
# attrname the attribute name, a string
# flags XXX
raise NotImplementedException('visitAssAttr')
def visitAssList(self, node):
# AssList attributes
# nodes list of list elements being assigned to
raise NotImplementedException('visitAssList')
def visitAssName(self, node):
# AssName attributes
# name name being assigned to
# flags XXX
raise NotImplementedException('visitAssName')
def visitAssTuple(self, node):
# AssTuple attributes
# nodes list of tuple elements being assigned to
raise NotImplementedException('visitAssTuple')
def visitAssert(self, node):
# Assert attributes
# test the expression to be tested
# fail the value of the <tt class="exception">AssertionError</tt>
raise NotImplementedException('visitAssert')
def visitAssign(self, node):
# Assign attributes
# nodes a list of assignment targets, one per equal sign
# expr the value being assigned
raise NotImplementedException('visitAssign')
def visitAugAssign(self, node):
# AugAssign attributes
# node
# op
# expr
raise NotImplementedException('visitAugAssign')
def visitBackquote(self, node):
# Backquote attributes
# expr
raise NotImplementedException('visitBackquote')
def visitBitand(self, node):
# Bitand attributes
# nodes
raise NotImplementedException('visitBitand')
def visitBitor(self, node):
# Bitor attributes
# nodes
raise NotImplementedException('visitBitor')
def visitBitxor(self, node):
# Bitxor attributes
# nodes
raise NotImplementedException('visitBitxor')
def visitBreak(self, node):
# Break attributes
raise NotImplementedException('visitBreak')
def visitCallFunc(self, node):
# CallFunc attributes
# node expression for the callee
# args a list of arguments
# star_args the extended *-arg value
# dstar_args the extended **-arg value
raise NotImplementedException('visitCallFunc')
def visitClass(self, node):
# Class attributes
# name the name of the class, a string
# bases a list of base classes
# doc doc string, a string or <code>None</code>
# code the body of the class statement
raise NotImplementedException('visitClass')
def visitCompare(self, node):
# Compare attributes
# expr
# ops
raise NotImplementedException('visitCompare')
def visitConst(self, node):
# Const attributes
# value
raise NotImplementedException('visitConst')
def visitContinue(self, node):
# Continue attributes
raise NotImplementedException('visitContinue')
def visitDict(self, node):
# Dict attributes
# items
raise NotImplementedException('visitDict')
def visitDiscard(self, node):
# Discard attributes
# expr
raise NotImplementedException('visitDiscard')
def visitDiv(self, node):
# Div attributes
# left
# right
raise NotImplementedException('visitDiv')
def visitEllipsis(self, node):
# Ellipsis attributes
raise NotImplementedException('visitEllipsis')
def visitExec(self, node):
# Exec attributes
# expr
# locals
# globals
raise NotImplementedException('visitExec')
def visitFor(self, node):
# For attributes
# assign
# list
# body
# else_
raise NotImplementedException('visitFor')
def visitFrom(self, node):
# From attributes
# modname
# names
raise NotImplementedException('visitFrom')
def visitFunction(self, node):
# Function attributes
# name name used in def, a string
# argnames list of argument names, as strings
# defaults list of default values
# flags xxx
# doc doc string, a string or <code>None</code>
# code the body of the function
raise NotImplementedException('visitFunction')
def visitGetattr(self, node):
# Getattr attributes
# expr
# attrname
raise NotImplementedException('visitGetattr')
def visitGlobal(self, node):
# Global attributes
# names
raise NotImplementedException('visitGlobal')
def visitIf(self, node):
# If attributes
# tests
# else_
raise NotImplementedException('visitIf')
def visitImport(self, node):
# Import attributes
# names
raise NotImplementedException('visitImport')
def visitInvert(self, node):
# Invert attributes
# expr
raise NotImplementedException('visitInvert')
def visitKeyword(self, node):
# Keyword attributes
# name
# expr
raise NotImplementedException('visitKeyword')
def visitLambda(self, node):
# Lambda attributes
# argnames
# defaults
# flags
# code
raise NotImplementedException('visitLambda')
def visitLeftShift(self, node):
# LeftShift attributes
# left
# right
raise NotImplementedException('visitLeftShift')
def visitList(self, node):
# List attributes
# nodes
raise NotImplementedException('visitList')
def visitListComp(self, node):
# ListComp attributes
# expr
# quals
raise NotImplementedException('visitListComp')
def visitListCompFor(self, node):
# ListCompFor attributes
# assign
# list
# ifs
raise NotImplementedException('visitListCompFor')
def visitListCompIf(self, node):
# ListCompIf attributes
# test
raise NotImplementedException('visitListCompIf')
def visitMod(self, node):
# Mod attributes
# left
# right
raise NotImplementedException('visitMod')
def visitModule(self, node):
# Module attributes
# doc doc string, a string or <code>None</code>
# node body of the module, a <tt class="class">Stmt</tt>
raise NotImplementedException('visitModule')
def visitMul(self, node):
# Mul attributes
# left
# right
raise NotImplementedException('visitMul')
def visitName(self, node):
# Name attributes
# name
raise NotImplementedException('visitName')
def visitNot(self, node):
# Not attributes
# expr
raise NotImplementedException('visitNot')
def visitOr(self, node):
# Or attributes
# nodes
raise NotImplementedException('visitOr')
def visitPass(self, node):
# Pass attributes
raise NotImplementedException('visitPass')
def visitPower(self, node):
# Power attributes
# left
# right
raise NotImplementedException('visitPower')
def visitPrint(self, node):
# Print attributes
# nodes
# dest
raise NotImplementedException('visitPrint')
def visitPrintnl(self, node):
# Printnl attributes
# nodes
# dest
raise NotImplementedException('visitPrintnl')
def visitRaise(self, node):
# Raise attributes
# expr1
# expr2
# expr3
raise NotImplementedException('visitRaise')
def visitReturn(self, node):
# Return attributes
# value
raise NotImplementedException('visitReturn')
def visitRightShift(self, node):
# RightShift attributes
# left
# right
raise NotImplementedException('visitRightShift')
def visitSlice(self, node):
# Slice attributes
# expr
# flags
# lower
# upper
raise NotImplementedException('visitSlice')
def visitSliceobj(self, node):
# Sliceobj attributes
# nodes list of statements
raise NotImplementedException('visitSliceobj')
def visitStmt(self, node):
# Stmt attributes
# nodes
raise NotImplementedException('visitStmt')
def visitSub(self, node):
# Sub attributes
# left
# right
raise NotImplementedException('visitSub')
def visitSubscript(self, node):
# Subscript attributes
# expr
# flags
# subs
raise NotImplementedException('visitSubscript')
def visitTryExcept(self, node):
# TryExcept attributes
# body
# handlers
# else_
raise NotImplementedException('visitTryExcept')
def visitTryFinally(self, node):
# TryFinally attributes
# body
# final
raise NotImplementedException('visitTryFinally')
def visitTuple(self, node):
# Tuple attributes
# nodes
raise NotImplementedException('visitTuple')
def visitUnaryAdd(self, node):
# UnaryAdd attributes
# expr
raise NotImplementedException('visitUnaryAdd')
def visitUnarySub(self, node):
# UnarySub attributes
# expr
raise NotImplementedException('visitUnarySub')
def visitWhile(self, node):
# While attributes
# test
# body
# else_
raise NotImplementedException('visitWhile')
def visitYield(self, node):
# Yield attributes
# value
raise NotImplementedException('visitYield')
|
Bin/slice_psd.py | anastasia2607/DeloresDev | 765 | 11105671 | #!/usr/bin/python
import os
import sys
from PIL import Image
from optparse import OptionParser
from psd_tools import PSDImage
# Globals
source_size = None
emited_files = {}
def visit_layer(psd, layer, options):
global source_size, emited_files
name=layer.name
if name.endswith(".png") and not name.startswith("-"):
if name in emited_files:
print("ERROR layer "+name+" appears in the .psd twice and was already emitted.")
return False
emited_files[name] = True
if layer.is_group():
bounds = None
width = None
height = None
for child in layer:
if child.name == "@bounds":
if child.kind == "shape":
bounds = (
int(round(child.vector_mask.bbox[0] * psd.width)),
int(round(child.vector_mask.bbox[1] * psd.height)),
int(round(child.vector_mask.bbox[2] * psd.width)),
int(round(child.vector_mask.bbox[3] * psd.height)),
)
width = bounds[2]-bounds[0]
height = bounds[3]-bounds[1]
elif child.kind == "solidcolorfill":
bounds = child.bbox
bounds = ( bounds[0], bounds[1], bounds[2], bounds[3] )
width = bounds[2]-bounds[0]
height = bounds[3]-bounds[1]
else:
bounds = child.bbox
# Adjustments are due to psd-tools not capturing the bounds correctly. Is this true anymore?
bounds = ( bounds[0]+1, bounds[1]+1, bounds[2]-1, bounds[3]-1 )
width = child.width-2
height = child.height-2
child.visible = False
layer.visible = True
image = layer.composite()
if bounds != None:
if options.verbose:
print("@bounds: "+str(width)+","+str(height))
new_image = Image.new("RGBA", (width, height))
image_bounds = layer.bbox
try:
new_x = image_bounds[0]-bounds[0]
new_y = image_bounds[1]-bounds[1]
new_image.paste(image, (new_x, new_y))
except:
pass
if not options.quiet:
print("Saving "+name+" @bounds ("+str(width)+","+str(height)+")")
try:
new_image.save(options.image_folder+"/"+name)
except:
print("ERROR saving "+name)
return False
else:
if not options.quiet:
print("Saving "+name)
try:
image.save(options.image_folder+"/"+name)
except:
print("ERROR composing "+name)
return False
else:
layer.visible = True
image = layer.composite()
if not options.quiet:
print("Saving "+name)
try:
image.save(options.image_folder+"/"+name)
except:
print("ERROR saving "+name)
return False
if layer.is_group():
layer.visible = True
for child in layer:
visit_layer(psd, child, options)
def slicePSD(psd_filename, options):
global source_size, emited_files
try:
psd = PSDImage.open(psd_filename)
except:
print("Error reading "+psd_filename)
return False
source_size = psd.size
emited_files = {}
for layer in psd:
if visit_layer(psd, layer, options) == False:
return False
return True
def main(args):
parser = OptionParser(usage="%prog [options]")
parser.add_option("--quiet", action="store_true", dest="quiet", default=False, help="Quiet flag")
parser.add_option("--images", action="store", dest="image_folder", default="./", help="Folder to place images into")
parser.add_option("--verbose", action="store_true", dest="verbose", default=False, help="Verbose output")
(options, args) = parser.parse_args(args)
if len(args) == 0:
print("Missing .psd file. Did you mean to call munge_psd.py?")
sys.exit(1)
if (options.quiet):
options.verbose = False
if slicePSD(args[0], options) == False:
sys.exit(1)
if __name__ == '__main__':
main(sys.argv[1:])
|
toolkit/third_party/apiclient/ext/django_orm.py | suraj-testing2/Flowers_Toilet | 790 | 11105695 | # Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import apiclient
import base64
import pickle
from django.db import models
class OAuthCredentialsField(models.Field):
__metaclass__ = models.SubfieldBase
def db_type(self):
return 'VARCHAR'
def to_python(self, value):
if value is None:
return None
if isinstance(value, apiclient.oauth.Credentials):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value):
return base64.b64encode(pickle.dumps(value))
class FlowThreeLeggedField(models.Field):
__metaclass__ = models.SubfieldBase
def db_type(self):
return 'VARCHAR'
def to_python(self, value):
print "In to_python", value
if value is None:
return None
if isinstance(value, apiclient.oauth.FlowThreeLegged):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value):
return base64.b64encode(pickle.dumps(value))
|
transpyle/cpp/parser.py | EraYaN/transpyle | 107 | 11105706 | """Parsing C++."""
import logging
import pathlib
import platform
import tempfile
import xml.etree.ElementTree as ET
import argunparse
from ..general import Parser
from ..general.tools import run_tool
_LOG = logging.getLogger(__name__)
CASTXML_PATH = pathlib.Path('castxml')
def run_castxml(input_path: pathlib.Path, output_path: pathlib.Path, gcc: bool = False):
"""Run CastXML with given arguments."""
args = ['-std=c++17', '-fcolor-diagnostics', input_path]
kwargs = {}
if gcc:
kwargs['castxml-gccxml'] = True
else:
kwargs['castxml-output=1'] = True
if platform.system() == 'Linux':
kwargs['castxml-cc-gnu'] = 'g++'
elif platform.system() == 'Darwin':
kwargs['castxml-cc-gnu'] = 'clang++'
kwargs['o'] = str(output_path)
return run_tool(CASTXML_PATH, args, kwargs,
argunparser=argunparse.ArgumentUnparser(opt_value=' '))
class CppParser(Parser):
"""C++ parser using CastXML."""
def _parse_scope(self, code, path=None):
output_path = None
with tempfile.NamedTemporaryFile(delete=False) as temporary_file:
output_path = pathlib.Path(temporary_file.name)
_ = run_castxml(path, output_path, gcc=False)
with open(str(output_path)) as output_file:
output = output_file.read()
output_path.unlink()
return ET.fromstring(output)
|
unittests/smstest.py | low456high/Skype4Py | 199 | 11105722 | <gh_stars>100-1000
import unittest
import skype4pytest
from Skype4Py.sms import *
class SmsMessageTest(skype4pytest.TestCase):
def setUpObject(self):
self.obj = SmsMessage(self.skype, '1234')
# Methods
# =======
def testDelete(self):
self.api.enqueue('DELETE SMS 1234')
self.obj.Delete()
self.failUnless(self.api.is_empty())
def testMarkAsSeen(self):
self.api.enqueue('SET SMS 1234 SEEN',
'SMS 1234 STATUS READ')
self.obj.MarkAsSeen()
self.failUnless(self.api.is_empty())
def testSend(self):
self.api.enqueue('ALTER SMS 1234 SEND')
self.obj.Send()
self.failUnless(self.api.is_empty())
# Properties
# ==========
def testBody(self):
# Readable, Writable, Type: unicode
self.api.enqueue('GET SMS 1234 BODY',
'SMS 1234 BODY eggs')
t = self.obj.Body
self.assertInstance(t, unicode)
self.assertEqual(t, 'eggs')
self.failUnless(self.api.is_empty())
self.api.enqueue('SET SMS 1234 BODY eggs',
'SMS 1234 BODY eggs')
self.obj.Body = 'eggs'
self.failUnless(self.api.is_empty())
def testChunks(self):
# Readable, Type: SmsChunkCollection
self.api.enqueue('GET SMS 1234 CHUNKING',
'SMS 1234 CHUNKING 2 30')
t = self.obj.Chunks
self.assertInstance(t, SmsChunkCollection)
self.assertEqual(len(t), 2)
self.failUnless(self.api.is_empty())
def testDatetime(self):
# Readable, Type: datetime
from datetime import datetime
from time import time
now = time()
self.api.enqueue('GET SMS 1234 TIMESTAMP',
'SMS 1234 TIMESTAMP %f' % now)
t = self.obj.Datetime
self.assertInstance(t, datetime)
self.assertEqual(t, datetime.fromtimestamp(now))
self.failUnless(self.api.is_empty())
def testFailureReason(self):
# Readable, Type: str
self.api.enqueue('GET SMS 1234 FAILUREREASON',
'SMS 1234 FAILUREREASON eggs')
t = self.obj.FailureReason
self.assertInstance(t, str)
self.assertEqual(t, 'eggs')
self.failUnless(self.api.is_empty())
def testId(self):
# Readable, Type: int
t = self.obj.Id
self.assertInstance(t, int)
self.assertEqual(t, 1234)
def testIsFailedUnseen(self):
# Readable, Type: bool
self.api.enqueue('GET SMS 1234 IS_FAILED_UNSEEN',
'SMS 1234 IS_FAILED_UNSEEN TRUE')
t = self.obj.IsFailedUnseen
self.assertInstance(t, bool)
self.assertEqual(t, True)
self.failUnless(self.api.is_empty())
def testPrice(self):
# Readable, Type: int
self.api.enqueue('GET SMS 1234 PRICE',
'SMS 1234 PRICE 123')
t = self.obj.Price
self.assertInstance(t, int)
self.assertEqual(t, 123)
self.failUnless(self.api.is_empty())
def testPriceCurrency(self):
# Readable, Type: unicode
self.api.enqueue('GET SMS 1234 PRICE_CURRENCY',
'SMS 1234 PRICE_CURRENCY EUR')
t = self.obj.PriceCurrency
self.assertInstance(t, unicode)
self.assertEqual(t, 'EUR')
self.failUnless(self.api.is_empty())
def testPricePrecision(self):
# Readable, Type: int
self.api.enqueue('GET SMS 1234 PRICE_PRECISION',
'SMS 1234 PRICE_PRECISION 3')
t = self.obj.PricePrecision
self.assertInstance(t, int)
self.assertEqual(t, 3)
self.failUnless(self.api.is_empty())
def testPriceToText(self):
# Readable, Type: unicode
self.api.enqueue('GET SMS 1234 PRICE_CURRENCY',
'SMS 1234 PRICE_CURRENCY EUR')
self.api.enqueue('GET SMS 1234 PRICE',
'SMS 1234 PRICE 123')
self.api.enqueue('GET SMS 1234 PRICE_PRECISION',
'SMS 1234 PRICE_PRECISION 3')
t = self.obj.PriceToText
self.assertInstance(t, unicode)
self.assertEqual(t, 'EUR 0.123')
self.failUnless(self.api.is_empty())
def testPriceValue(self):
# Readable, Type: float
self.api.enqueue('GET SMS 1234 PRICE',
'SMS 1234 PRICE 123')
self.api.enqueue('GET SMS 1234 PRICE_PRECISION',
'SMS 1234 PRICE_PRECISION 3')
t = self.obj.PriceValue
self.assertInstance(t, float)
self.assertEqual(t, 0.123)
self.failUnless(self.api.is_empty())
def testReplyToNumber(self):
# Readable, Writable, Type: str
self.api.enqueue('GET SMS 1234 REPLY_TO_NUMBER',
'SMS 1234 REPLY_TO_NUMBER eggs')
t = self.obj.ReplyToNumber
self.assertInstance(t, str)
self.assertEqual(t, 'eggs')
self.failUnless(self.api.is_empty())
self.api.enqueue('SET SMS 1234 REPLY_TO_NUMBER eggs',
'SMS 1234 REPLY_TO_NUMBER eggs')
self.obj.ReplyToNumber = 'eggs'
self.failUnless(self.api.is_empty())
def testSeen(self):
# Writable, Type: bool
from warnings import simplefilter
self.api.enqueue('SET SMS 1234 SEEN',
'SMS 1234 STATUS READ')
simplefilter('ignore')
try:
self.obj.Seen = True
finally:
simplefilter('default')
self.failUnless(self.api.is_empty())
def testStatus(self):
# Readable, Type: str
self.api.enqueue('GET SMS 1234 STATUS',
'SMS 1234 STATUS RECEIVED')
t = self.obj.Status
self.assertInstance(t, str)
self.assertEqual(t, 'RECEIVED')
self.failUnless(self.api.is_empty())
def testTargetNumbers(self):
# Readable, Writable, Type: tuple of str
self.api.enqueue('GET SMS 1234 TARGET_NUMBERS',
'SMS 1234 TARGET_NUMBERS +3712345678, +3723456789')
t = self.obj.TargetNumbers
self.assertInstance(t, tuple)
self.assertEqual(len(t), 2)
self.failUnless(self.api.is_empty())
self.api.enqueue('SET SMS 1234 TARGET_NUMBERS +3787654321',
'SMS 1234 TARGET_NUMBERS +3787654321')
self.obj.TargetNumbers = ('+3787654321',)
self.failUnless(self.api.is_empty())
def testTargets(self):
# Readable, Type: SmsTargetCollection
self.api.enqueue('GET SMS 1234 TARGET_NUMBERS',
'SMS 1234 TARGET_NUMBERS +3712345678, +3723456789')
t = self.obj.Targets
self.assertInstance(t, SmsTargetCollection)
self.assertEqual(len(t), 2)
self.failUnless(self.api.is_empty())
def testTimestamp(self):
# Readable, Type: float
self.api.enqueue('GET SMS 1234 TIMESTAMP',
'SMS 1234 TIMESTAMP 123.4')
t = self.obj.Timestamp
self.assertInstance(t, float)
self.assertEqual(t, 123.4)
self.failUnless(self.api.is_empty())
def testType(self):
# Readable, Type: str
self.api.enqueue('GET SMS 1234 TYPE',
'SMS 1234 TYPE INCOMING')
t = self.obj.Type
self.assertInstance(t, str)
self.assertEqual(t, 'INCOMING')
self.failUnless(self.api.is_empty())
class SmsChunkTest(skype4pytest.TestCase):
def setUpObject(self):
self.obj = SmsChunk(SmsMessage(self.skype, '1234'), 1)
# Properties
# ==========
def testCharactersLeft(self):
# Readable, Type: int
self.api.enqueue('GET SMS 1234 CHUNKING',
'SMS 1234 CHUNKING 2 30')
t = self.obj.CharactersLeft
self.assertInstance(t, int)
self.assertEqual(t, 30)
self.failUnless(self.api.is_empty())
def testId(self):
# Readable, Type: int
t = self.obj.Id
self.assertInstance(t, int)
self.assertEqual(t, 1)
def testMessage(self):
# Readable, Type: SmsMessage
t = self.obj.Message
self.assertInstance(t, SmsMessage)
self.assertEqual(t.Id, 1234)
def testText(self):
# Readable, Type: unicode
self.api.enqueue('GET SMS 1234 CHUNK 1',
'SMS 1234 CHUNK 1 eggs')
t = self.obj.Text
self.assertInstance(t, unicode)
self.assertEqual(t, 'eggs')
self.failUnless(self.api.is_empty())
class SmsTargetTest(skype4pytest.TestCase):
def setUpObject(self):
self.obj = SmsTarget(SmsMessage(self.skype, '1234'), '+3712345678')
# Properties
# ==========
def testMessage(self):
# Readable, Type: SmsMessage
t = self.obj.Message
self.assertInstance(t, SmsMessage)
self.assertEqual(t.Id, 1234)
def testNumber(self):
# Readable, Type: str
t = self.obj.Number
self.assertInstance(t, str)
self.assertEqual(t, '+3712345678')
def testStatus(self):
# Readable, Type: str
self.api.enqueue('GET SMS 1234 TARGET_STATUSES',
'SMS 1234 TARGET_STATUSES +3723456789=TARGET_NOT_ROUTABLE, +3712345678=TARGET_ACCEPTABLE')
t = self.obj.Status
self.assertInstance(t, str)
self.assertEqual(t, 'TARGET_ACCEPTABLE')
self.failUnless(self.api.is_empty())
def suite():
return unittest.TestSuite([
unittest.defaultTestLoader.loadTestsFromTestCase(SmsMessageTest),
unittest.defaultTestLoader.loadTestsFromTestCase(SmsChunkTest),
unittest.defaultTestLoader.loadTestsFromTestCase(SmsTargetTest),
])
if __name__ == '__main__':
unittest.main()
|
Chapter07/loadcsv-fail.py | VinushaVemuri/learn | 185 | 11105728 | from faker import Faker
import csv
output=open('/home/paulcrickard/peoplepipeline/people.csv','w')
fake=Faker()
header=['name','age','street','city','state','zip','lng','lat']
mywriter=csv.writer(output)
mywriter.writerow(header)
for r in range(1000):
mywriter.writerow([fake.name(),fake.random_int(min=1, max=100, step=1), fake.street_address(), fake.city(),fake.state(),fake.zipcode(),fake.longitude(),fake.latitude()])
output.close()
print('{"status":"complete"}')
|
tests/utils/functions/test_singleobj_returndims.py | goncalogteixeira/pyswarns | 959 | 11105773 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Import standard library
from collections import namedtuple
# Import modules
import numpy as np
import pytest
# Import from pyswarms
from pyswarms.utils.functions import single_obj as fx
def test_ackley_output_size(common_minima, targetdim):
"""Tests ackley output size."""
assert fx.ackley(common_minima).shape == targetdim
def test_beale_output_size(common_minima, targetdim):
"""Tests beale output size."""
assert fx.beale(common_minima).shape == targetdim
def test_booth_output_size(common_minima, targetdim):
"""Test booth output size."""
assert fx.booth(common_minima).shape == targetdim
def test_bukin6_output_size(common_minima2, targetdim):
"""Test bukin6 output size."""
assert fx.bukin6([-10, 0] * common_minima2).shape == targetdim
def test_crossintray_output_size(common_minima2, targetdim):
"""Test crossintray output size."""
assert fx.crossintray([-10, 0] * common_minima2).shape == targetdim
def test_easom_output_size(common_minima2, targetdim):
"""Test easom output size."""
assert fx.easom([-10, 0] * common_minima2).shape == targetdim
def test_eggholder_output_size(common_minima2, targetdim):
"""Test eggholder output size."""
assert fx.eggholder([-10, 0] * common_minima2).shape == targetdim
def test_goldstein_output_size(common_minima, targetdim):
"""Test goldstein output size."""
assert fx.goldstein(common_minima).shape == targetdim
def test_himmelblau_output_size(common_minima, targetdim):
"""Test himmelblau output size."""
assert fx.himmelblau(common_minima).shape == targetdim
def test_holdertable_output_size(common_minima, targetdim):
"""Test holdertable output size."""
assert fx.holdertable(common_minima).shape == targetdim
def test_levi_output_size(common_minima, targetdim):
"""Test levi output size."""
assert fx.levi(common_minima).shape == targetdim
def test_rastrigin_output_size(common_minima, targetdim):
"""Tests rastrigin output size."""
assert fx.rastrigin(common_minima).shape == targetdim
def test_rosenbrock_output_size(common_minima, targetdim):
"""Tests rosenbrock output size."""
assert fx.rosenbrock(common_minima).shape == targetdim
def test_schaffer2_output_size(common_minima, targetdim):
"""Test schaffer2 output size."""
assert fx.schaffer2(common_minima).shape == targetdim
def test_sphere_output_size(common_minima, targetdim):
"""Tests sphere output size."""
assert fx.sphere(common_minima).shape == targetdim
def test_threehump_output_size(common_minima, targetdim):
"""Test threehump output size."""
assert fx.threehump(common_minima).shape == targetdim
|
Configuration/ProcessModifiers/python/run3_ecalclustering_cff.py | ckamtsikis/cmssw | 852 | 11105775 | <reponame>ckamtsikis/cmssw
import FWCore.ParameterSet.Config as cms
# This modifier is for ECAL Run3 clustering studies
run3_ecalclustering = cms.Modifier()
|
test-framework/test-suites/integration/tests/list/test_list_vm.py | sammeidinger/stack | 123 | 11105791 | import pytest
import json
from pathlib import Path
class TestListVM:
"""
Tests for the list vm command
"""
def test_list_vm_all(self, add_hypervisor, add_vm_multiple, host):
list_result = host.run(f'stack list vm output-format=json')
assert list_result.rc == 0
assert json.loads(list_result.stdout) == [
{
'virtual machine': 'vm-backend-0-3',
'hypervisor': 'hypervisor-0-1',
'memory': 2048,
'cpu': 1,
'pending deletion': False
},
{
'virtual machine': 'vm-backend-0-4',
'hypervisor': 'hypervisor-0-1',
'memory': 2048,
'cpu': 2,
'pending deletion': False
},
{
'virtual machine': 'vm-backend-0-5',
'hypervisor': 'hypervisor-0-2',
'memory': 3072,
'cpu': 3,
'pending deletion': False
},
{
'virtual machine': 'vm-backend-0-6',
'hypervisor': 'hypervisor-0-2',
'memory': 4096,
'cpu': 4,
'pending deletion': False
}
]
def test_list_vm_single(self, add_hypervisor, add_vm_multiple, host):
list_result = host.run(f'stack list vm vm-backend-0-3 output-format=json')
assert list_result.rc == 0
assert json.loads(list_result.stdout) == [
{
'virtual machine': 'vm-backend-0-3',
'hypervisor': 'hypervisor-0-1',
'memory': 2048,
'cpu': 1,
'pending deletion': False
}
]
def test_list_vm_multiple(self, add_hypervisor, add_vm_multiple, host):
list_result = host.run(f'stack list vm vm-backend-0-3 vm-backend-0-5 vm-backend-0-6 output-format=json')
assert list_result.rc == 0
assert json.loads(list_result.stdout) == [
{
'virtual machine': 'vm-backend-0-3',
'hypervisor': 'hypervisor-0-1',
'memory': 2048,
'cpu': 1,
'pending deletion': False
},
{
'virtual machine': 'vm-backend-0-5',
'hypervisor': 'hypervisor-0-2',
'memory': 3072,
'cpu': 3,
'pending deletion': False
},
{
'virtual machine': 'vm-backend-0-6',
'hypervisor': 'hypervisor-0-2',
'memory': 4096,
'cpu': 4,
'pending deletion': False
}
]
def test_list_vm_hypervisor(self, add_hypervisor, add_vm_multiple, host):
list_result = host.run(f'stack list vm vm-backend-0-3 vm-backend-0-5 vm-backend-0-6 hypervisor=hypervisor-0-2 output-format=json')
assert list_result.rc == 0
assert json.loads(list_result.stdout) == [
{
'virtual machine': 'vm-backend-0-5',
'hypervisor': 'hypervisor-0-2',
'memory': 3072,
'cpu': 3,
'pending deletion': False
},
{
'virtual machine': 'vm-backend-0-6',
'hypervisor': 'hypervisor-0-2',
'memory': 4096,
'cpu': 4,
'pending deletion': False
}
]
def test_list_vm_expanded(self, add_hypervisor, add_vm_multiple, host):
list_result = host.run(f'stack list vm vm-backend-0-3 vm-backend-0-5 vm-backend-0-6 expanded=y output-format=json')
assert list_result.rc == 0
assert json.loads(list_result.stdout) == [
{
'virtual machine': 'vm-backend-0-3',
'hypervisor': 'hypervisor-0-1',
'memory': 2048,
'cpu': 1,
'pending deletion': False,
'status': 'Connection failed to hypervisor'
},
{
'virtual machine': 'vm-backend-0-5',
'hypervisor': 'hypervisor-0-2',
'memory': 3072,
'cpu': 3,
'pending deletion': False,
'status': 'Connection failed to hypervisor'
},
{
'virtual machine': 'vm-backend-0-6',
'hypervisor': 'hypervisor-0-2',
'memory': 4096,
'cpu': 4,
'pending deletion': False,
'status': 'Connection failed to hypervisor'
}
]
BAD_LIST_VM_DATA = [
('backend-0-0', 'not a valid virtual machine'),
('hypervisor=backend-0-0', 'not a valid hypervisor'),
('vm-backend-0-1 hypervisor=hypervisor-0-3', 'cannot resolve host'),
('fake-backend-0-0', 'cannot resolve host'),
('hypervisor=hypervisor-0-3', 'cannot resolve host')
]
@pytest.mark.parametrize('params, msg', BAD_LIST_VM_DATA)
def test_list_vm_bad(self, add_hypervisor, add_vm_multiple, add_host, host, params, msg):
list_result = host.run(f'stack list vm {params}')
assert list_result.rc != 0 and msg in list_result.stderr
|
modelvshuman/helper/human_categories.py | TizianThieringer/model-vs-human | 158 | 11105831 | #!/usr/bin/env python
"""human_categories.py
Code to define the class that deals with the specifics
of the 16 categories used in Robert's human and DNN
experiments.
"""
import numpy as np
import os
from . import wordnet_functions as wf
def compute_imagenet_indices_for_category(category):
"""Return list of ImageNet indices that correspond to category.
'category' is part of the 16 classes.
"""
assert category in get_human_object_recognition_categories()
categories = HumanCategories()
indices = []
for i in range(0, 1000):
WNID = wf.get_WNID_from_index(i)
if categories.get_human_category_from_WNID(WNID) == category:
indices.append(i)
return indices
def get_human_object_recognition_categories():
"""Return the 16 categories that are used for the human experiment.
To be more precise, return the categories that Robert uses in his
object recognition experiment.
"""
return sorted(["knife", "keyboard", "elephant", "bicycle", "airplane",
"clock", "oven", "chair", "bear", "boat", "cat",
"bottle", "truck", "car", "bird", "dog"])
def get_num_human_categories():
"""Return number of categories used in the object recogn. experiment."""
return len(get_human_object_recognition_categories())
class HumanCategories(object):
#Note: Some WNIDs may not be part of the ilsvrc2012 database.
# Those WNIDs were generated with:
# wordnet_functions.get_ilsvrc2012_training_WNID("knife") etc.
# Since this takes some time, they were collected here for
# a massive speed-up in computation time.
# and then Robert manually removed those who are in the
# excluded categories (documented in vision-model-DNN/human_experiments/
# /documentation/category_information.ods)
knife = ['n03041632']
keyboard = ['n03085013', 'n04505470']
elephant = ['n02504013', 'n02504458']
bicycle = ['n02835271', 'n03792782']
airplane = ['n02690373', 'n03955296', 'n13861050',
'n13941806']
clock = ['n02708093', 'n03196217', 'n04548280']
oven = ['n03259401', 'n04111414', 'n04111531']
chair = ['n02791124', 'n03376595', 'n04099969',
'n00605023', 'n04429376']
bear = ['n02132136', 'n02133161', 'n02134084',
'n02134418']
boat = ['n02951358', 'n03344393', 'n03662601',
'n04273569', 'n04612373', 'n04612504']
cat = ["n02122878", "n02123045", "n02123159",
"n02126465", "n02123394", "n02123597",
"n02124075", "n02125311"]
bottle = ['n02823428', 'n03937543', 'n03983396',
'n04557648', 'n04560804', 'n04579145',
'n04591713']
truck = ['n03345487', 'n03417042', 'n03770679',
'n03796401', 'n00319176', 'n01016201',
'n03930630', 'n03930777', 'n05061003',
'n06547832', 'n10432053', 'n03977966',
'n04461696', 'n04467665']
car = ['n02814533', 'n03100240', 'n03100346',
'n13419325', 'n04285008']
bird = ['n01321123', 'n01514859', 'n01792640',
'n07646067', 'n01530575', 'n01531178', 'n01532829',
'n01534433', 'n01537544', 'n01558993', 'n01562265',
'n01560419', 'n01582220', 'n10281276', 'n01592084',
'n01601694', 'n01614925', 'n01616318', 'n01622779',
'n01795545', 'n01796340', 'n01797886', 'n01798484',
'n01817953', 'n01818515', 'n01819313', 'n01820546',
'n01824575', 'n01828970', 'n01829413', 'n01833805',
'n01843065', 'n01843383', 'n01855032', 'n01855672',
'n07646821', 'n01860187', 'n02002556', 'n02002724',
'n02006656', 'n02007558', 'n02009229', 'n02009912',
'n02011460', 'n02013706', 'n02017213', 'n02018207',
'n02018795', 'n02025239', 'n02027492', 'n02028035',
'n02033041', 'n02037110', 'n02051845', 'n02056570']
dog = ['n02085782', 'n02085936', 'n02086079',
'n02086240', 'n02086646', 'n02086910', 'n02087046',
'n02087394', 'n02088094', 'n02088238', 'n02088364',
'n02088466', 'n02088632', 'n02089078', 'n02089867',
'n02089973', 'n02090379', 'n02090622', 'n02090721',
'n02091032', 'n02091134', 'n02091244', 'n02091467',
'n02091635', 'n02091831', 'n02092002', 'n02092339',
'n02093256', 'n02093428', 'n02093647', 'n02093754',
'n02093859', 'n02093991', 'n02094114', 'n02094258',
'n02094433', 'n02095314', 'n02095570', 'n02095889',
'n02096051', 'n02096294', 'n02096437', 'n02096585',
'n02097047', 'n02097130', 'n02097209', 'n02097298',
'n02097474', 'n02097658', 'n02098105', 'n02098286',
'n02099267', 'n02099429', 'n02099601', 'n02099712',
'n02099849', 'n02100236', 'n02100583', 'n02100735',
'n02100877', 'n02101006', 'n02101388', 'n02101556',
'n02102040', 'n02102177', 'n02102318', 'n02102480',
'n02102973', 'n02104029', 'n02104365', 'n02105056',
'n02105162', 'n02105251', 'n02105505', 'n02105641',
'n02105855', 'n02106030', 'n02106166', 'n02106382',
'n02106550', 'n02106662', 'n02107142', 'n02107312',
'n02107574', 'n02107683', 'n02107908', 'n02108000',
'n02108422', 'n02108551', 'n02108915', 'n02109047',
'n02109525', 'n02109961', 'n02110063', 'n02110185',
'n02110627', 'n02110806', 'n02110958', 'n02111129',
'n02111277', 'n08825211', 'n02111500', 'n02112018',
'n02112350', 'n02112706', 'n02113023', 'n02113624',
'n02113712', 'n02113799', 'n02113978']
airplane_indices = [404]
bear_indices = [294, 295, 296, 297]
bicycle_indices = [444, 671]
bird_indices = [8, 10, 11, 12, 13, 14, 15, 16, 18, 19, 20, 22, 23,
24, 80, 81, 82, 83, 87, 88, 89, 90, 91, 92, 93,
94, 95, 96, 98, 99, 100, 127, 128, 129, 130, 131,
132, 133, 135, 136, 137, 138, 139, 140, 141, 142,
143, 144, 145]
boat_indices = [472, 554, 625, 814, 914]
bottle_indices = [440, 720, 737, 898, 899, 901, 907]
car_indices = [436, 511, 817]
cat_indices = [281, 282, 283, 284, 285, 286]
chair_indices = [423, 559, 765, 857]
clock_indices = [409, 530, 892]
dog_indices = [152, 153, 154, 155, 156, 157, 158, 159, 160, 161,
162, 163, 164, 165, 166, 167, 168, 169, 170, 171,
172, 173, 174, 175, 176, 177, 178, 179, 180, 181,
182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
193, 194, 195, 196, 197, 198, 199, 200, 201, 202,
203, 205, 206, 207, 208, 209, 210, 211, 212, 213,
214, 215, 216, 217, 218, 219, 220, 221, 222, 223,
224, 225, 226, 228, 229, 230, 231, 232, 233, 234,
235, 236, 237, 238, 239, 240, 241, 243, 244, 245,
246, 247, 248, 249, 250, 252, 253, 254, 255, 256,
257, 259, 261, 262, 263, 265, 266, 267, 268]
elephant_indices = [385, 386]
keyboard_indices = [508, 878]
knife_indices = [499]
oven_indices = [766]
truck_indices = [555, 569, 656, 675, 717, 734, 864, 867]
def get_human_category_from_WNID(self, wnid):
"""Return the MS COCO category for a given WNID.
Returns None if wnid is not part of the 16 human categories.
parameters:
- wnid: a string containing the wnid of an image, e.g. 'n03658185'
"""
categories = get_human_object_recognition_categories()
for c in categories:
attr = getattr(self, c)
if wnid in attr:
return c
return None
def get_imagenet_indices_for_category(self, category):
"""Return ImageNet indices that correspond to an entry-level category.
Returns error if 'category' is not part of the 16 human categories.
parameters:
- category: a string, e.g. "dog" or "knife"
"""
assert category in get_human_object_recognition_categories()
return getattr(self, category+"_indices")
|
test/unit/network/gremlin/test_gremlin_path_pattern.py | Sam-Martin/graph-notebook | 378 | 11105843 | <filename>test/unit/network/gremlin/test_gremlin_path_pattern.py
"""
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
"""
import unittest
from gremlin_python.structure.graph import Path
from graph_notebook.network.gremlin.GremlinNetwork import GremlinNetwork, PathPattern
class TestAddResultsPathPattern(unittest.TestCase):
def test_add_all_V_pattern(self):
pattern = [PathPattern.V, PathPattern.V, PathPattern.V]
path = Path([], ['SEA', 'DFW', 'AUS'])
gn = GremlinNetwork()
gn.add_results_with_pattern([path], pattern)
self.assertEqual(3, len(gn.graph.nodes))
self.assertEqual(2, len(gn.graph.edges))
def test_add_v_and_inV_pattern(self):
pattern = [PathPattern.V, PathPattern.IN_V, PathPattern.V]
path = Path([], ['SEA', 'DFW', 'AUS'])
gn = GremlinNetwork()
gn.add_results_with_pattern([path], pattern)
for tup in gn.graph.edges:
self.assertEqual(tup[1], 'DFW') # assert that DFW is the incoming vertex for both edges.
self.assertEqual(3, len(gn.graph.nodes))
self.assertEqual(2, len(gn.graph.edges))
def test_add_v_and_outV_pattern(self):
pattern = [PathPattern.V, PathPattern.OUT_V, PathPattern.V]
path = Path([], ['SEA', 'DFW', 'AUS'])
gn = GremlinNetwork()
gn.add_results_with_pattern([path], pattern)
for tup in gn.graph.edges:
self.assertEqual(tup[0], 'DFW') # assert that DFW is the incoming vertex for both edges.
self.assertEqual(3, len(gn.graph.nodes))
self.assertEqual(2, len(gn.graph.edges))
def test_add_v_outV_inV_pattern(self):
pattern = [PathPattern.V, PathPattern.OUT_V, PathPattern.IN_V]
path = Path([], ['SEA', 'DFW', 'AUS'])
gn = GremlinNetwork()
gn.add_results_with_pattern([path], pattern)
self.assertEqual(3, len(gn.graph.nodes))
self.assertEqual(2, len(gn.graph.edges))
edges = gn.graph.out_edges('DFW')
self.assertEqual(2, len(edges))
def test_add_v_inV_outV_pattern(self):
pattern = [PathPattern.V, PathPattern.IN_V, PathPattern.OUT_V]
path = Path([], ['SEA', 'DFW', 'AUS'])
gn = GremlinNetwork()
gn.add_results_with_pattern([path], pattern)
self.assertEqual(3, len(gn.graph.nodes))
self.assertEqual(2, len(gn.graph.edges))
edges = gn.graph.in_edges('DFW')
self.assertEqual(2, len(edges))
def test_add_v_inV_outV_longer_path(self):
pattern = [PathPattern.V, PathPattern.IN_V, PathPattern.OUT_V]
path = Path([], ['SEA', 'DFW', 'AUS', 'LAX', 'JFK'])
gn = GremlinNetwork()
gn.add_results_with_pattern([path], pattern)
self.assertEqual(5, len(gn.graph.nodes))
self.assertEqual(4, len(gn.graph.edges))
dfw_edges = gn.graph.in_edges('DFW')
self.assertEqual(2, len(dfw_edges))
lax_edges = gn.graph.in_edges('LAX')
self.assertEqual(1, len(lax_edges))
jfk_edges = gn.graph.in_edges('JFK')
self.assertEqual(1, len(jfk_edges))
def test_add_v_e_v_path(self):
pattern = [PathPattern.V, PathPattern.E, PathPattern.V]
path = Path([], ['SEA', 'route', 'DFW'])
gn = GremlinNetwork()
gn.add_results_with_pattern([path], pattern)
self.assertEqual(2, len(gn.graph.nodes))
self.assertEqual(1, len(gn.graph.edges))
self.assertIsNotNone(gn.graph.edges[('SEA', 'DFW', 'route')])
def test_add_v_inE_v_path(self):
pattern = [PathPattern.V, PathPattern.E, PathPattern.V]
path = Path([], ['SEA', 'route', 'DFW'])
gn = GremlinNetwork()
gn.add_results_with_pattern([path], pattern)
self.assertEqual(2, len(gn.graph.nodes))
self.assertEqual(1, len(gn.graph.edges))
self.assertIsNotNone(gn.graph.edges[('SEA', 'DFW', 'route')])
def test_add_v_outE_path(self):
pattern = [PathPattern.V, PathPattern.OUT_E, PathPattern.V]
path = Path([], ['SEA', 'route', 'DFW'])
gn = GremlinNetwork()
gn.add_results_with_pattern([path], pattern)
self.assertEqual(2, len(gn.graph.nodes))
self.assertEqual(1, len(gn.graph.edges))
self.assertIsNotNone(gn.graph.edges[('SEA', 'DFW', 'route')])
def test_add_v_inE_path(self):
pattern = [PathPattern.V, PathPattern.IN_E, PathPattern.V]
path = Path([], ['SEA', 'route', 'DFW'])
gn = GremlinNetwork()
gn.add_results_with_pattern([path], pattern)
self.assertEqual(2, len(gn.graph.nodes))
self.assertEqual(1, len(gn.graph.edges))
self.assertIsNotNone(gn.graph.edges[('DFW', 'SEA', 'route')])
def test_add_inV_E_V_path(self):
pattern = [PathPattern.IN_V, PathPattern.E, PathPattern.V]
path = Path([], ['SEA', 'route', 'DFW'])
gn = GremlinNetwork()
gn.add_results_with_pattern([path], pattern)
self.assertEqual(2, len(gn.graph.nodes))
self.assertEqual(1, len(gn.graph.edges))
self.assertIsNotNone(gn.graph.edges[('DFW', 'SEA', 'route')])
def test_add_outV_E_V_path(self):
pattern = [PathPattern.OUT_V, PathPattern.E, PathPattern.V]
path = Path([], ['SEA', 'route', 'DFW'])
gn = GremlinNetwork()
gn.add_results_with_pattern([path], pattern)
self.assertEqual(2, len(gn.graph.nodes))
self.assertEqual(1, len(gn.graph.edges))
self.assertIsNotNone(gn.graph.edges[('SEA', 'DFW', 'route')])
def test_add_outV_E_inV_path(self):
pattern = [PathPattern.OUT_V, PathPattern.E, PathPattern.IN_V]
path = Path([], ['SEA', 'route', 'DFW'])
gn = GremlinNetwork()
gn.add_results_with_pattern([path], pattern)
self.assertEqual(2, len(gn.graph.nodes))
self.assertEqual(1, len(gn.graph.edges))
self.assertIsNotNone(gn.graph.edges[('SEA', 'DFW', 'route')])
def test_add_V_inE_V_path(self):
pattern = [PathPattern.V, PathPattern.IN_E, PathPattern.V]
path = Path([], ['SEA', 'route', 'DFW'])
gn = GremlinNetwork()
gn.add_results_with_pattern([path], pattern)
self.assertEqual(2, len(gn.graph.nodes))
self.assertEqual(1, len(gn.graph.edges))
self.assertIsNotNone(gn.graph.edges[('DFW', 'SEA', 'route')])
def test_add_V_outE_V_path(self):
pattern = [PathPattern.V, PathPattern.OUT_E, PathPattern.V]
path = Path([], ['SEA', 'route', 'DFW'])
gn = GremlinNetwork()
gn.add_results_with_pattern([path], pattern)
self.assertEqual(2, len(gn.graph.nodes))
self.assertEqual(1, len(gn.graph.edges))
self.assertIsNotNone(gn.graph.edges[('SEA', 'DFW', 'route')])
|
coco_synthetic/split_train_test.py | psaboia/RGB-N | 117 | 11105856 | <filename>coco_synthetic/split_train_test.py
import networkx as nx
import numpy as np
import os
from glob import glob
import sys
import skimage.io as io
import pdb
def contain_node(Graph_list,node):
for g in Graph_list:
if g.has_node(node):
return True
return False
data_dir='../../dataset/filter_tamper' #FIXME
ext='Tp*'
dataDir='../../dataset' #FIXME
dataType='train2014' #COCO2014 train directory
#cls=['person','tv','airplane','dog','bench','train','kite','bed','refrigerator','bowl']
cls=['person','airplane','dog','train','bed','refrigerator']
filenames=glob(os.path.join(data_dir,ext))
G=nx.Graph()
print(len(filenames))
for file in filenames:
content=os.path.splitext(os.path.basename(file))[0].split("_")
if content[-1] in cls:
target_name=content[1]
source_name=content[2]
G.add_edge(target_name,source_name)
train = sorted(nx.connected_component_subgraphs(G), key=len, reverse=True)[0:950]
test=sorted(nx.connected_component_subgraphs(G), key=len, reverse=True)[950:]
with open('train_filter.txt','w') as f:
for file in filenames:
content=os.path.splitext(os.path.basename(a))[0].split("_")
if content[-1] in cls:
target_name=content[1]
source_name=content[2]
if target_name!= source_name and contain_node(train,target_name) and contain_node(train,source_name):
x1=float(content[3])
y1=float(content[4])
x2=float(content[5])
y2=float(content[6])
source_img=io.imread(os.path.join(dataDir,dataType,'COCO_train2014_{:012d}.jpg'.format(int(source_name))))
target_img=io.imread(os.path.join(dataDir,dataType,'COCO_train2014_{:012d}.jpg'.format(int(target_name))))
s_w,s_h = source_img.shape[:2]
t_w,t_h = target_img.shape[:2]
f.write('%s %.5f %.5f %.5f %.5f\n' % (file,x1*s_h/t_h,y1*s_w/t_w,x2*s_h/t_h,y2*s_w/t_w) )
with open('test_filter.txt','w') as f:
for file in filenames:
content=os.path.splitext(os.path.basename(file))[0].split("_")
if content[-1] in cls:
target_name=content[1]
source_name=content[2]
if target_name!= source_name and contain_node(test,target_name) and contain_node(test,source_name):
x1=float(content[3])
y1=float(content[4])
x2=float(content[5])
y2=float(content[6])
source_img=io.imread(os.path.join(dataDir,dataType,'COCO_train2014_{:012d}.jpg'.format(int(source_name))))
target_img=io.imread(os.path.join(dataDir,dataType,'COCO_train2014_{:012d}.jpg'.format(int(target_name))))
s_w,s_h = source_img.shape[:2]
t_w,t_h = target_img.shape[:2]
f.write('%s %.5f %.5f %.5f %.5f\n' % (file,x1*s_h/t_h,y1*s_w/t_w,x2*s_h/t_h,y2*s_w/t_w))
|
tests/test_serializers.py | sidarun88/django_private_chat2 | 150 | 11105869 | <gh_stars>100-1000
from django.test import TestCase
from django_private_chat2.models import DialogsModel, MessageModel, UploadedFile
from django_private_chat2.serializers import serialize_message_model, serialize_dialog_model, serialize_file_model
from .factories import DialogsModelFactory, MessageModelFactory, UserFactory, faker
class SerializerTests(TestCase):
def setUp(self):
self.sender = UserFactory.create()
self.recipient = UserFactory.create()
self.message = MessageModel.objects.create(sender=self.sender, recipient=self.recipient,
text="testText", read=True)
self.dialog = DialogsModel.objects.filter(user1=self.sender, user2=self.recipient).first()
self.file = UploadedFile.objects.create(uploaded_by=self.sender, file="LICENSE")
def test_serialize_file_model(self):
serialized = serialize_file_model(self.file)
o = {
"id": str(self.file.id),
"url": self.file.file.url,
"size": self.file.file.size,
"name": "LICENSE"
}
self.assertEqual(serialized, o)
def test_serialize_message_with_file(self):
msg = MessageModel.objects.create(sender=self.sender, recipient=self.recipient, file=self.file, read=True)
serialized = serialize_message_model(msg, self.sender.pk)
o = {
"id": msg.id,
"text": '',
"sent": int(msg.created.timestamp()),
"edited": int(msg.modified.timestamp()),
"read": True,
"file": serialize_file_model(self.file),
"sender": str(self.sender.pk),
"recipient": str(self.recipient.pk),
"out": True,
"sender_username": self.sender.username
}
self.assertEqual(serialized, o)
def test_serialize_message_model(self):
serialized = serialize_message_model(self.message, self.sender.pk)
o = {
"id": self.message.id,
"text": "testText",
"sent": int(self.message.created.timestamp()),
"edited": int(self.message.modified.timestamp()),
"read": True,
"file": None,
"sender": str(self.sender.pk),
"recipient": str(self.recipient.pk),
"out": True,
"sender_username": self.sender.username
}
self.assertEqual(serialized, o)
def test_serialize_dialog_model(self):
serialized = serialize_dialog_model(self.dialog, self.sender.pk)
o = {
"id": self.dialog.id,
"created": int(self.dialog.created.timestamp()),
"modified": int(self.dialog.modified.timestamp()),
"other_user_id": str(self.recipient.id),
"unread_count": 0,
"username": self.recipient.username,
"last_message": serialize_message_model(self.message, self.sender.pk)
}
self.assertEqual(serialized, o)
def tearDown(self):
pass
|
statannotations/_GroupsPositions.py | orena1/statannotations | 110 | 11105951 | import numpy as np
from statannotations.utils import get_closest
class _GroupsPositions:
def __init__(self, plotter, group_names):
self._plotter = plotter
self._hue_names = self._plotter.hue_names
if self._hue_names is not None:
nb_hues = len(self._hue_names)
if nb_hues == 1:
raise ValueError(
"Using hues with only one hue is not supported.")
self.hue_offsets = self._plotter.hue_offsets
self._axis_units = self.hue_offsets[1] - self.hue_offsets[0]
self._groups_positions = {
np.round(self.get_group_axis_position(group_name), 1): group_name
for group_name in group_names
}
self._groups_positions_list = sorted(self._groups_positions.keys())
if self._hue_names is None:
self._axis_units = ((max(list(self._groups_positions.keys())) + 1)
/ len(self._groups_positions))
self._axis_ranges = {
(pos - self._axis_units / 2,
pos + self._axis_units / 2,
pos): group_name
for pos, group_name in self._groups_positions.items()}
@property
def axis_positions(self):
return self._groups_positions
@property
def axis_units(self):
return self._axis_units
def get_axis_pos_location(self, pos):
"""
Finds the x-axis location of a categorical variable
"""
for axis_range in self._axis_ranges:
if (pos >= axis_range[0]) & (pos <= axis_range[1]):
return axis_range[2]
def get_group_axis_position(self, group):
"""
group_name can be either a name "cat" or a tuple ("cat", "hue")
"""
if self._plotter.plot_hues is None:
cat = group
hue_offset = 0
else:
cat = group[0]
hue_level = group[1]
hue_offset = self._plotter.hue_offsets[
self._plotter.hue_names.index(hue_level)]
group_pos = self._plotter.group_names.index(cat) + hue_offset
return group_pos
def find_closest(self, pos):
return get_closest(list(self._groups_positions_list), pos)
|
tars/deployment/models/actions.py | js882829/tars | 371 | 11105961 | from django.db import models
from deployments import TarsDeployment
from roll_engine.models import DeploymentAction
class TarsDeploymentAction(DeploymentAction):
deployment = models.ForeignKey(
TarsDeployment, related_name='actions', db_constraint=False, null=True)
finish_time = models.DateTimeField(auto_now_add=True, null=True, blank=True)
class Meta:
db_table = 'deployment_actions'
def __unicode__(self):
return self.action
|
ml_logger/ml_logger_tests/conftest.py | mcx/ml_logger | 107 | 11106008 | LOCAL_TEST_DIR = '/tmp/ml-logger-debug'
def pytest_addoption(parser):
parser.addoption('--logdir', action='store', default=LOCAL_TEST_DIR,
help="The logging path for the test.")
|
deephyper/ensemble/_base_ensemble.py | felixeperez/deephyper | 185 | 11106041 | import abc
import json
import os
import traceback
import ray
import tensorflow as tf
class BaseEnsemble(abc.ABC):
"""Base class for ensembles, every new ensemble algorithms needs to extend this class.
Args:
model_dir (str): Path to directory containing saved Keras models in .h5 format.
loss (callable): a callable taking (y_true, y_pred) as input.
size (int, optional): Number of unique models used in the ensemble. Defaults to 5.
verbose (bool, optional): Verbose mode. Defaults to True.
ray_address (str, optional): Address of the Ray cluster. If "auto" it will try to connect to an existing cluster. If "" it will start a local Ray cluster. Defaults to "".
num_cpus (int, optional): Number of CPUs allocated to load one model and predict. Defaults to 1.
num_gpus (int, optional): Number of GPUs allocated to load one model and predict. Defaults to None.
batch_size (int, optional): Batch size used batchify the inference of loaded models. Defaults to 32.
"""
def __init__(
self,
model_dir,
loss,
size=5,
verbose=True,
ray_address="",
num_cpus=1,
num_gpus=None,
batch_size=32,
):
self.model_dir = os.path.abspath(model_dir)
self.loss = loss
self.members_files = []
self.size = size
self.verbose = verbose
self.ray_address = ray_address
self.num_cpus = num_cpus
self.num_gpus = num_gpus
self.batch_size = batch_size
if not(ray.is_initialized()):
ray.init(address=self.ray_address)
def __repr__(self) -> str:
out = ""
out += f"Model Dir: {self.model_dir}\n"
out += f"Members files: {self.members_files}\n"
out += f"Ensemble size: {len(self.members_files)}/{self.size}\n"
return out
def _list_files_in_model_dir(self):
return [f for f in os.listdir(self.model_dir) if f[-2:] == "h5"]
@abc.abstractmethod
def fit(self, X, y):
"""Fit the current algorithm to the provided data.
Args:
X (array): The input data.
y (array): The output data.
Returns:
BaseEnsemble: The current fitted instance.
"""
@abc.abstractmethod
def predict(self, X):
"""Execute an inference of the ensemble for the provided data.
Args:
X (array): An array of input data.
Returns:
array: The prediction.
"""
@abc.abstractmethod
def evaluate(self, X, y, metrics=None):
"""Compute metrics based on the provided data.
Args:
X (array): An array of input data.
y (array): An array of true output data.
metrics (callable, optional): A metric. Defaults to None.
"""
def load_members_files(self, file: str = "ensemble.json") -> None:
"""Load the members composing an ensemble.
Args:
file (str, optional): Path of JSON file containing the ensemble members. All members needs to be accessible in ``model_dir``. Defaults to "ensemble.json".
"""
with open(file, "r") as f:
self.members_files = json.load(f)
def save_members_files(self, file: str = "ensemble.json") -> None:
"""Save the list of file names of the members of the ensemble in a JSON file.
Args:
file (str, optional): Path JSON file where the file names are saved. Defaults to "ensemble.json".
"""
with open(file, "w") as f:
json.dump(self.members_files, f)
def load(self, file: str) -> None:
"""Load an ensemble from a save.
Args:
file (str): Path to the save of the ensemble.
"""
self.load_members_files(file)
def save(self, file: str=None) -> None:
"""Save an ensemble.
Args:
file (str): Path to the save of the ensemble.
"""
self.save_members_files(file)
|
tests/unit/repo/plots/test_diff.py | lucasalavapena/dvc | 9,136 | 11106063 | import pytest
from dvc.repo.plots.diff import _revisions
@pytest.mark.parametrize(
"arg_revisions,is_dirty,expected_revisions",
[
([], False, ["workspace"]),
([], True, ["HEAD", "workspace"]),
(["v1", "v2", "workspace"], False, ["v1", "v2", "workspace"]),
(["v1", "v2", "workspace"], True, ["v1", "v2", "workspace"]),
],
)
def test_revisions(mocker, arg_revisions, is_dirty, expected_revisions):
mock_scm = mocker.Mock()
mock_scm.configure_mock(
**{"is_dirty.return_value": is_dirty, "get_ref.return_value": None}
)
mock_repo = mocker.Mock(scm=mock_scm)
assert _revisions(mock_repo, arg_revisions, False) == expected_revisions
@pytest.mark.parametrize(
"arg_revisions,baseline,expected_revisions",
[
(["v1"], "v0", ["v1", "v0"]),
(["v1"], None, ["v1", "workspace"]),
(["v1", "v2"], "v0", ["v1", "v2"]),
(["v1", "v2"], None, ["v1", "v2"]),
],
)
def test_revisions_experiment(
mocker, arg_revisions, baseline, expected_revisions
):
mock_scm = mocker.Mock()
mock_scm.configure_mock(
**{"is_dirty.return_value": False, "get_ref.return_value": None}
)
mock_experiments = mocker.Mock()
mock_experiments.configure_mock(**{"get_baseline.return_value": baseline})
mock_repo = mocker.Mock(scm=mock_scm, experiments=mock_experiments)
assert _revisions(mock_repo, arg_revisions, True) == expected_revisions
|
alipay/aop/api/domain/KoubeiCateringPosDishgroupSyncModel.py | snowxmas/alipay-sdk-python-all | 213 | 11106081 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.PosDishGroupModel import PosDishGroupModel
class KoubeiCateringPosDishgroupSyncModel(object):
def __init__(self):
self._pos_dish_group_model = None
@property
def pos_dish_group_model(self):
return self._pos_dish_group_model
@pos_dish_group_model.setter
def pos_dish_group_model(self, value):
if isinstance(value, PosDishGroupModel):
self._pos_dish_group_model = value
else:
self._pos_dish_group_model = PosDishGroupModel.from_alipay_dict(value)
def to_alipay_dict(self):
params = dict()
if self.pos_dish_group_model:
if hasattr(self.pos_dish_group_model, 'to_alipay_dict'):
params['pos_dish_group_model'] = self.pos_dish_group_model.to_alipay_dict()
else:
params['pos_dish_group_model'] = self.pos_dish_group_model
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiCateringPosDishgroupSyncModel()
if 'pos_dish_group_model' in d:
o.pos_dish_group_model = d['pos_dish_group_model']
return o
|
Python3/97.py | rakhi2001/ecom7 | 854 | 11106083 | <reponame>rakhi2001/ecom7<gh_stars>100-1000
__________________________________________________________________________________________________
sample 24 ms submission
class Solution:
def isInterleave(self, s1: str, s2: str, s3: str) -> bool:
if not s1 and not s2 and not s3:
return True
if not s1:
return True if s2 == s3 else False
if not s2:
return True if s1 == s3 else False
length1 = len(s1)
length2 = len(s2)
length3 = len(s3)
if length1 + length2 != length3:
return False
memo = [[-1] * length2 for row in range(length1)]
def helper(idx1, idx2, idx3):
#print("idx1, idx2 are ",idx1, idx2)
if idx1 == length1 and idx2 == length2:
#print("True and return previous level")
return True
if idx1 < length1 and idx2 < length2 and memo[idx1][idx2] >= 0:
return memo[idx1][idx2]
if idx1 == length1:
if s3[idx3] == s2[idx2]:
ans = helper(idx1, idx2+1, idx3+1)
memo[idx1-1][idx2] = ans
#print("condition: end s1 and s2 == s3", memo)
return ans
else:
#print("condition: end s1 and no match!", memo)
return False
if idx2 == length2:
if s3[idx3] == s1[idx1]:
ans = helper(idx1+1, idx2, idx3+1)
memo[idx1][idx2-1] = ans
#print("condition: end s2 and s1 == s3", memo)
return ans
else:
#print("condition: end s2 and no match!", memo)
return False
if s3[idx3] == s1[idx1] and s3[idx3] == s2[idx2]:
ans = helper(idx1+1, idx2, idx3+1) or helper(idx1, idx2+1, idx3+1)
memo[idx1][idx2] = ans
#print("condition: s1 == s3 and s2 == s3", memo)
return ans
elif s3[idx3] == s1[idx1]:
ans = helper(idx1+1, idx2, idx3+1)
memo[idx1][idx2] = ans
#print("condition: s1 == s3", memo)
return ans
elif s3[idx3] == s2[idx2]:
ans = helper(idx1, idx2+1, idx3+1)
memo[idx1][idx2] = ans
#print("condition: s2 == s3", memo)
return ans
else:
#print("condition: no match!", memo)
return False
return helper(0,0,0)
__________________________________________________________________________________________________
sample 13184 kb submission
class Solution:
def isInterleave(self, s1: str, s2: str, s3: str) -> bool:
l1 = len(s1)
l2 = len(s2)
l3 = len(s3)
if l1 == 0: return s2==s3
if l2 == 0: return s1==s3
if l1+l2 != l3: return False
dp = [[None]*(l2+1) for _ in range(l1+1)]
dp[0][0] = True
return self.isInterleave_dp(s1,s2,s3,dp)
def isInterleave_dp(self,s1,s2,s3,dp):
l1 = len(s1)
l2 = len(s2)
assert(dp[l1][l2] == None)
dp[l1][l2] = False
if l1 > 0:
# (l1-1, l2) branch
if dp[l1-1][l2] == None:
dp[l1-1][l2] = self.isInterleave_dp(s1[:-1],s2,s3[:-1],dp)
if dp[l1-1][l2] == True and s1[-1] == s3[-1]:
dp[l1][l2] = True
if l2 > 0:
# (l1, l2-1) branch
if dp[l1][l2-1] == None:
dp[l1][l2-1] = self.isInterleave_dp(s1,s2[:-1],s3[:-1],dp)
if dp[l1][l2-1] == True and s2[-1] == s3[-1]:
dp[l1][l2] = True
# print("%d %d %d" % (l1,l2,dp[l1][l2]))
return dp[l1][l2]
__________________________________________________________________________________________________
|
corrector/bert_modeling/create_data.py | fire717/OCR-Corrector | 367 | 11106091 | import numpy as np
import pickle
import os
import argparse
def cut_line(sentence):
sent = ''
delimiter = ['。', ';', '?', '!']
for i, c in enumerate(sentence):
sent += c
if ((c in delimiter) and (sentence[min(len(sentence)-1, i + 1)] not in ['」', '”', '’'])) or i == len(sentence)-1:
yield sent
sent = ''
def cut_line2(sentence):
sent = ''
for i, c in enumerate(sentence):
sent += c
if c == ',':
flag = True
for j in range(i+1, min(len(sentence)-1, i+6)):
if sentence[j] == ',' or j == len(sentence)-1:
flag = False
if (flag and len(sent) > 20) or i == len(sentence)-1:
yield sent[:-1] + '。'
sent = ''
def make_docs(wrong, correct):
w_res = []
if ('。' in wrong[:-1]) or (';' in wrong[:-1]) or ('?' in wrong[:-1]) or ('!' in wrong[:-1]):
for w_sent in cut_line(wrong):
w_res.append(w_sent + '\n')
# wrong_file.write(w_sent + '\n')
elif len(wrong) > 100:
for w_sent in cut_line2(wrong):
w_res.append(w_sent + '\n')
# wrong_file.write(w_sent + '\n')
else:
w_res.append(wrong + '\n')
# wrong_file.write(wrong + '\n')
# wrong_file.write('\n')
c_res = []
if ('。' in correct[:-1]) or (';' in correct[:-1]) or ('?' in correct[:-1]) or ('!' in correct[:-1]):
for c_sent in cut_line(correct):
c_res.append(c_sent + '\n')
# correct_file.write(c_sent + '\n')
elif len(wrong) > 100:
for c_sent in cut_line2(correct):
c_res.append(c_sent + '\n')
# correct_file.write(c_sent + '\n')
else:
c_res.append(correct + '\n')
# correct_file.write(correct + '\n')
if len(w_res) != len(c_res):
w_res = [wrong + '\n']
c_res = [correct + '\n']
for w_r, c_r in zip(w_res, c_res):
if not len(w_r.strip()) == len(c_r.strip()):
print(w_r)
print(len(w_r.strip()))
print(c_r)
print(len(c_r.strip()))
exit()
for l in w_res:
wrong_file.write(l)
wrong_file.write('\n')
for l in c_res:
correct_file.write(l)
correct_file.write('\n')
def main(fname, output_dir):
confusions = {}
for line in open(fname, 'r', encoding='utf-8'):
num, wrong, correct = line.strip().split('\t')
wrong = wrong.strip()
correct = correct.strip()
for w, c in zip(wrong, correct):
if w!=c:
if w + c not in confusions:
confusions[w + c] = 0
confusions[w + c] += 1
# if len(wrong) != len(correct):
# print(wrong)
# print(correct)
# exit()
assert len(wrong) == len(correct)
num = int(num)
make_docs(wrong, correct)
if wrong != correct:
make_docs(correct, correct)
poses = [pos for pos, (w, c) in enumerate(zip(wrong, correct)) if w != c]
num = len(poses)
if num >= 2:
if len(poses) != num:
print(wrong)
print(correct)
exit()
assert len(poses) == num
for i in range(1, num):
selected_poses = [poses[k] for k in np.random.choice(num, i, replace=False)]
fake_wrong = list(wrong)
for p in selected_poses:
fake_wrong[p] = correct[p]
fake_wrong = ''.join(fake_wrong)
assert len(fake_wrong) == len(correct)
assert fake_wrong != correct
make_docs(fake_wrong, correct)
# take the top frequency of confusions about the each character.
top_confusions = {}
for k in confusions:
if k[0] not in top_confusions:
top_confusions[k[0]] = confusions[k]
elif top_confusions[k[0]] < confusions[k]:
top_confusions[k[0]] = confusions[k]
confusions_top = sorted(list(top_confusions.keys()), key=lambda x: top_confusions[x], reverse=True)
correct_count = {}
for line_c, line_w in zip(open(os.path.join(args.output, 'correct.txt'), 'r', encoding='utf-8'), open(os.path.join(args.output, 'wrong.txt'), 'r', encoding='utf-8')):
if line_c.strip():
wrong, correct = line_w.strip(), line_c.strip()
wrong = wrong.strip()
correct = correct.strip()
for w, c in zip(wrong, correct):
if w==c and w in top_confusions:
if w not in correct_count:
correct_count[w] = 0
correct_count[w] += 1
proportions = {}
for k in correct_count:
assert correct_count[k] != 0
proportions[k] = min(top_confusions[k] / correct_count[k], 1.0)
print('confusion statistics:')
for i in range(min(len(confusions_top), 20)):
if confusions_top[i] in correct_count:
correct_occurs = correct_count[confusions_top[i]]
proportions_num = proportions[confusions_top[i]]
else:
correct_occurs = 0
proportions_num = 'NaN'
print(f'most frequent confusion pair for {confusions_top[i]} occurs {top_confusions[confusions_top[i]]} times,'
f' correct ones occur {correct_occurs} times, mask probability should be {proportions_num}')
pickle.dump(proportions, open(os.path.join(args.output, 'mask_probability.sav'), 'wb'))
# print('top confusions:')
# for i in range(20):
# print(f'{top_confusions[i]} occurs {confusions[confusions_top[i]]} times')
# main()
def parse_args():
usage = '\n1. create wrong.txt, correct.txt and mask_probability.sav by:\n' \
'python create_data.py -f /path/to/train.txt\n' \
'\n' \
'\n2. specify output dir by:\n' \
'python create_data.py -f /path/to/train.txt -o /path/to/dir/\n' \
'\n'
parser = argparse.ArgumentParser(
description='A module for FASPell - Fast, Adaptable, Simple, Powerful Chinese Spell Checker', usage=usage)
parser.add_argument('--file', '-f', type=str, default=None,
help='original training data.')
parser.add_argument('--output', '-o', type=str, default='',
help='output a file a dir; default is current directory.')
# parser.add_argument('--verbose', '-v', action="store_true", default=False,
# help='to show details of spell checking sentences under mode s')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
correct_file = open(os.path.join(args.output,'correct.txt'), 'w', encoding='utf-8')
wrong_file = open(os.path.join(args.output,'wrong.txt'), 'w', encoding='utf-8')
main(args.file, args.output)
|
orgutil/temp.py | Sinamore/orgextended | 120 | 11106102 | <filename>orgutil/temp.py
import sublime
import sublime_plugin
import re
from pathlib import Path
import os
import fnmatch
import logging
import sys
import traceback
import tempfile
from shutil import copyfile
log = logging.getLogger(__name__)
def CreateTempFile(source, suffix=".temp",start=None, end=None):
filename = None
tmp = tempfile.NamedTemporaryFile(delete=False,suffix=suffix)
try:
if(start):
tmp.write((start + "\n").encode("utf-8"))
tmp.write(source.encode('utf-8'))
if(end):
tmp.write(("\n" + end).encode("utf-8"))
filename = tmp.name
tmp.close()
except:
res = traceback.format_exc()
log.debug("Failed to create temp file: " + str(tmp.name) + "\n" + str(res))
pass
return filename
def CreateTempFileFromRegion(view, region,suffix=".temp",start=None, end=None):
content = view.substr(region)
return CreateTempFile(content,suffix,start,end)
def GetViewFileAs(view,extension):
sourcepath = os.path.dirname(view.file_name())
return os.path.join(sourcepath,os.path.splitext(os.path.basename(view.file_name()))[0] + extension)
def CopyTempToViewDir(view,tempfile,asfile):
sourcepath = os.path.dirname(view.file_name())
destFile = os.path.join(sourcepath,asfile)
copyfile(tempfile, destFile)
return destFile
|
lib/main.py | Rehzende/project-dev-kpis | 113 | 11106105 | import sys
reload(sys)
sys.setdefaultencoding('utf8')
import os
import json
import time
import argparse
from util import logger
from prometheus_client import start_http_server
import inventory
import project_time_metrics
import project_contributors
import project_composition
import project_issue_transition
from api_server import ApiServer
API_SERVER_PORT = int(os.environ.get('API_SERVER_PORT', '80'))
def configure(configuration_filepath):
with open(configuration_filepath) as projects_file:
project_confs = json.load(projects_file)['projects']
project_synonym_mappings = dict([
(s, t['project_name'])
for t in project_confs
for s in t['project_name_synonyms']
])
assert(project_confs is not None)
assert(project_synonym_mappings is not {})
return project_confs, project_synonym_mappings
def schedule(minutes, task, health_check_server):
while True:
try:
tic = time.time()
task()
duration = time.time() - tic
sleep_time = max(60 * minutes - int(duration), 1)
logger.info("sleeping %d seconds" % sleep_time)
time.sleep(max(sleep_time, 0))
health_check_server.observe_health(True)
except (KeyboardInterrupt, SystemExit) as e:
raise e
except Exception as e:
logger.exception(e)
health_check_server.observe_health(False)
def main():
parser = argparse.ArgumentParser(prog='project-dev-kpis', description='Metrics server for project-dev-kpis.')
parser.add_argument('--projects-config', dest='projects_config', help='projects configuration file')
args = parser.parse_args(sys.argv[1:])
project_confs, project_synonym_mappings = configure(args.projects_config)
def metrics_task():
for conf in project_confs:
if conf['transition_parent_issues']:
project_issue_transition.transition_stories_with_wip_subtasks_to_wip(conf)
project_issue_transition.transition_epics_with_wip_issues_to_wip(conf)
project_issue_transition.transition_epics_with_resolved_issues_to_resolved(conf)
project_time_metrics.monitor_project_time_metrics(conf)
project_composition.monitor_project_composition(conf)
project_contributors.monitor_project_contributors(conf)
inventory.monitor_inventory_metrics(project_synonym_mappings)
api = ApiServer(
max_sequential_errors=2,
confs=project_confs,
port=API_SERVER_PORT
)
try:
api.start()
# prometheus server
start_http_server(8080)
schedule(minutes=5, task=metrics_task, health_check_server=api)
except (KeyboardInterrupt, SystemExit) as e:
api.stop()
if __name__ == "__main__":
main()
|
desktop/core/ext-py/nose-1.3.7/unit_tests/test_suite.py | kokosing/hue | 5,079 | 11106126 | <reponame>kokosing/hue<filename>desktop/core/ext-py/nose-1.3.7/unit_tests/test_suite.py<gh_stars>1000+
from nose.config import Config
from nose import case
from nose.suite import LazySuite, ContextSuite, ContextSuiteFactory, \
ContextList
import imp
import sys
import unittest
from mock import ResultProxyFactory, ResultProxy
class TestLazySuite(unittest.TestCase):
def setUp(self):
class TC(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
self.TC = TC
def test_test_generator(self):
TC = self.TC
tests = [TC('test_one'), TC('test_two')]
def gen_tests():
for test in tests:
yield test
suite = LazySuite(gen_tests)
self.assertEqual(list([test for test in suite]), tests)
def test_lazy_and_nonlazy(self):
TC = self.TC
tests = [TC('test_one'), TC('test_two')]
def gen_tests():
for test in tests:
yield test
nonlazy = LazySuite(tests)
lazy = LazySuite(gen_tests)
assert lazy
assert nonlazy
lazytests = []
nonlazytests = []
for t in lazy:
print "lazy %s" % t
lazytests.append(t)
for t in nonlazy:
print "nonlazy %s" % t
nonlazytests.append(t)
slazy = map(str, lazytests)
snonlazy = map(str, nonlazytests)
assert slazy == snonlazy, \
"Lazy and Nonlazy produced different test lists (%s vs %s)" \
% (slazy, snonlazy)
def test_lazy_nonzero(self):
"""__nonzero__ works correctly for lazy suites"""
TC = self.TC
tests = [TC('test_one'), TC('test_two')]
def gen_tests():
for test in tests:
yield test
lazy = LazySuite(gen_tests)
assert lazy
assert lazy
assert lazy
count = 0
for test in lazy:
print test
assert test
count += 1
self.assertEqual(count, 2, "Expected 2 tests, got %s" % count)
assert lazy
def gen_tests_empty():
for test in []:
yield test
return
empty = LazySuite(gen_tests_empty)
assert not empty
for test in empty:
assert False, "Loaded a test from empty suite: %s" % test
class TestContextSuite(unittest.TestCase):
def setUp(self):
class TC(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
self.TC = TC
def test_tests_are_wrapped(self):
"""Tests in a context suite are wrapped"""
suite = ContextSuite(
[self.TC('test_one'), self.TC('test_two')])
for test in suite:
assert isinstance(test.test, self.TC)
def test_nested_context_suites(self):
"""Nested suites don't re-wrap"""
suite = ContextSuite(
[self.TC('test_one'), self.TC('test_two')])
suite2 = ContextSuite(suite)
suite3 = ContextSuite([suite2])
# suite3 is [suite2]
tests = [t for t in suite3]
assert isinstance(tests[0], ContextSuite)
# suite2 is [suite]
tests = [t for t in tests[0]]
assert isinstance(tests[0], ContextSuite)
# suite is full of wrapped tests
tests = [t for t in tests[0]]
cases = filter(lambda t: isinstance(t, case.Test), tests)
assert cases
assert len(cases) == len(tests)
# sub-suites knows they have a context
#assert suite.context is None
#assert suite2.context is suite
#assert suite3.context is suite2
def test_context_fixtures_called(self):
class P:
was_setup = False
was_torndown = False
def setup(self):
self.was_setup = True
def teardown(self):
self.was_torndown = True
context = P()
suite = ContextSuite(
[self.TC('test_one'), self.TC('test_two')],
context=context)
res = unittest.TestResult()
suite(res)
assert not res.errors, res.errors
assert not res.failures, res.failures
assert context.was_setup
assert context.was_torndown
def test_context_fixtures_for_ancestors(self):
top = imp.new_module('top')
top.bot = imp.new_module('top.bot')
top.bot.end = imp.new_module('top.bot.end')
sys.modules['top'] = top
sys.modules['top.bot'] = top.bot
sys.modules['top.bot.end'] = top.bot.end
class TC(unittest.TestCase):
def runTest(self):
pass
top.bot.TC = TC
TC.__module__ = 'top.bot'
# suite with just TC test
# this suite should call top and top.bot setup
csf = ContextSuiteFactory()
suite = csf(ContextList([TC()], context=top.bot))
suite.setUp()
assert top in csf.was_setup, "Ancestor not set up"
assert top.bot in csf.was_setup, "Context not set up"
suite.has_run = True
suite.tearDown()
assert top in csf.was_torndown, "Ancestor not torn down"
assert top.bot in csf.was_torndown, "Context not torn down"
# wrapped suites
# the outer suite sets up its context, the inner
# its context only, without re-setting up the outer context
csf = ContextSuiteFactory()
inner_suite = csf(ContextList([TC()], context=top.bot))
suite = csf(ContextList(inner_suite, context=top))
suite.setUp()
assert top in csf.was_setup
assert not top.bot in csf.was_setup
inner_suite.setUp()
assert top in csf.was_setup
assert top.bot in csf.was_setup
assert csf.was_setup[top] is suite
assert csf.was_setup[top.bot] is inner_suite
def test_context_fixtures_setup_fails(self):
class P:
was_setup = False
was_torndown = False
def setup(self):
self.was_setup = True
assert False, "Setup failed"
def teardown(self):
self.was_torndown = True
context = P()
suite = ContextSuite(
[self.TC('test_one'), self.TC('test_two')],
context=context)
res = unittest.TestResult()
suite(res)
assert not res.failures, res.failures
assert res.errors, res.errors
assert context.was_setup
assert not context.was_torndown
assert res.testsRun == 0, \
"Expected to run no tests but ran %s" % res.testsRun
def test_context_fixtures_no_tests_no_setup(self):
class P:
was_setup = False
was_torndown = False
def setup(self):
self.was_setup = True
def teardown(self):
self.was_torndown = True
context = P()
suite = ContextSuite([], context=context)
res = unittest.TestResult()
suite(res)
assert not res.failures, res.failures
assert not res.errors, res.errors
assert not context.was_setup
assert not context.was_torndown
assert res.testsRun == 0, \
"Expected to run no tests but ran %s" % res.testsRun
def test_result_proxy_used(self):
class TC(unittest.TestCase):
def runTest(self):
raise Exception("error")
ResultProxy.called[:] = []
res = unittest.TestResult()
config = Config()
suite = ContextSuite([TC()], resultProxy=ResultProxyFactory())
suite(res)
calls = [ c[0] for c in ResultProxy.called ]
self.assertEqual(calls, ['beforeTest', 'startTest',
'addError', 'stopTest', 'afterTest'])
class TestContextSuiteFactory(unittest.TestCase):
def test_ancestry(self):
top = imp.new_module('top')
top.bot = imp.new_module('top.bot')
top.bot.end = imp.new_module('top.bot.end')
sys.modules['top'] = top
sys.modules['top.bot'] = top.bot
sys.modules['top.bot.end'] = top.bot.end
class P:
pass
top.bot.P = P
P.__module__ = 'top.bot'
csf = ContextSuiteFactory()
P_ancestors = list([a for a in csf.ancestry(P)])
self.assertEqual(P_ancestors, [top.bot, top])
end_ancestors = list([a for a in csf.ancestry(top.bot.end)])
self.assertEqual(end_ancestors, [top.bot, top])
bot_ancestors = list([a for a in csf.ancestry(top.bot)])
self.assertEqual(bot_ancestors, [top])
top_ancestors = list([a for a in csf.ancestry(top)])
self.assertEqual(top_ancestors, [])
if __name__ == '__main__':
import logging
logging.basicConfig(level=logging.DEBUG)
unittest.main()
# class TC(unittest.TestCase):
# def runTest(self):
# raise Exception("error")
# ResultProxy.called[:] = []
# res = unittest.TestResult()
# config = Config()
|
crafters/nlp/TikaExtractor/tests/test_tikaextractor.py | Harshdeep1996/jina-hub | 106 | 11106140 | __copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import numpy as np
import pytest
from .. import TikaExtractor
def input_bytes():
with open('cats_are_awesome.pdf', 'rb') as pdf:
i_bytes = pdf.read()
return i_bytes
@pytest.mark.parametrize('uri, buffer', [
(np.stack(['cats_are_awesome.pdf', 'cats_are_awesome.pdf']), [None, None]),
([None, None], np.stack([input_bytes(), input_bytes()]))
])
def test_extraction(uri, buffer):
tika_extractor = TikaExtractor()
crafted_docs = tika_extractor.craft(uri, buffer)
assert len(crafted_docs) == 2
for crafted_doc in crafted_docs:
assert len(crafted_doc['text']) > 20
@pytest.mark.parametrize('uri, buffer', [
('cats_are_awesome.pdf', None),
(None, input_bytes())
])
def test_extraction_single(uri, buffer):
tika_extractor = TikaExtractor()
crafted_doc = tika_extractor.craft(uri, buffer)
assert len(crafted_doc['text']) > 20
@pytest.mark.parametrize('uri, buffer', [
('cats_are_awesome.pdf', None),
(None, input_bytes())
])
def test_extraction_single(uri, buffer):
tika_extractor = TikaExtractor()
crafted_doc = tika_extractor.craft(uri=uri, buffer=buffer)
assert len(crafted_doc['text']) > 20
|
chainercv/links/model/mobilenet/__init__.py | Manny27nyc/chainercv | 1,600 | 11106145 | <filename>chainercv/links/model/mobilenet/__init__.py<gh_stars>1000+
from chainercv.links.model.mobilenet.expanded_conv_2d import ExpandedConv2D # NOQA
from chainercv.links.model.mobilenet.mobilenet_v2 import MobileNetV2 # NOQA
from chainercv.links.model.mobilenet.tf_conv_2d_bn_activ import TFConv2DBNActiv # NOQA
from chainercv.links.model.mobilenet.tf_convolution_2d import TFConvolution2D # NOQA
|
util/iso.py | Dennisbonke/toaruos | 429 | 11106159 | <gh_stars>100-1000
#!/usr/bin/env python3
"""
Tool for creating ISO 9660 CD images.
"""
import array
import struct
class Structure(object):
assert_size = -1
def __init__(self):
self.data = {}
for field in self.fields:
if len(field) > 2:
f, s, d = field
self.data[s] = d
else:
f, s = field
if f.endswith('s'):
self.data[s] = b""
else:
self.data[s] = 0
if self.assert_size != -1:
assert(len(self) == self.assert_size)
def __len__(self):
return sum([struct.calcsize(f[0]) for f in self.fields])
def read(self, data, offset):
def read_struct(fmt,buf,offset):
out, = struct.unpack_from(fmt,buf,offset)
return out, offset + struct.calcsize(fmt)
o = offset
for field in self.fields:
if len(field) > 2:
f, s, _ = field
else:
f, s = field
self.data[s], o = read_struct(f, data, o)
return o
def write(self, data, offset):
def write_struct(fmt, buf, offset, value):
struct.pack_into(fmt, buf, offset, value)
return offset + struct.calcsize(fmt)
o = offset
for field in self.fields:
if len(field) > 2:
f, s, _ = field
else:
f, s = field
o = write_struct(f,data,o,self.data[s])
return o
def read_struct(fmt,buf,offset):
out, = struct.unpack_from(fmt,buf,offset)
return out, offset + struct.calcsize(fmt)
class FAT(object):
def __init__(self, iso, offset):
self.iso = iso
self.offset = offset
self.bytespersector, _ = read_struct('H', self.iso.data, offset + 11)
self.sectorspercluster, _ = read_struct('B', self.iso.data, offset + 13)
self.reservedsectors, _ = read_struct('H', self.iso.data, offset + 14)
self.numberoffats, _ = read_struct('B', self.iso.data, offset + 16)
self.numberofdirs, _ = read_struct('H', self.iso.data, offset + 17)
self.fatsize, _ = read_struct('H', self.iso.data, offset + 22)
self.root_dir_sectors = (self.numberofdirs * 32 + (self.bytespersector - 1)) // self.bytespersector
self.first_data_sector = self.reservedsectors + (self.numberoffats * self.fatsize) + self.root_dir_sectors
self.root_sector= self.first_data_sector - self.root_dir_sectors
self.root = FATDirectory(self, self.offset + self.root_sector * self.bytespersector)
def get_offset(self, cluster):
return self.offset + ((cluster - 2) * self.sectorspercluster + self.first_data_sector) * self.bytespersector
def get_file(self, path):
units = path.split('/')
units = units[1:]
me = self.root
out = None
for i in units:
for fatfile in me.list():
if fatfile.readable_name() == i:
me = fatfile.to_dir()
out = fatfile
break
else:
return None
return out
class FATDirectory(object):
def __init__(self, fat, offset):
self.fat = fat
self.offset = offset
def list(self):
o = self.offset
while 1:
out = FATFile(self.fat, o)
if out.name != '\0\0\0\0\0\0\0\0':
yield out
else:
break
o += out.size
class FATFile(object):
def __init__(self, fat, offset):
self.fat = fat
self.offset = offset
self.magic_long = None
self.size = 0
self.long_name = ''
o = self.offset
self.actual_offset = o
self.attrib, _ = read_struct('B',self.fat.iso.data,o+11)
while (self.attrib & 0x0F) == 0x0F:
# Long file name entry
tmp = read_struct('10s',self.fat.iso.data,o+1)[0]
tmp += read_struct('12s',self.fat.iso.data,o+14)[0]
tmp += read_struct('4s',self.fat.iso.data,o+28)[0]
tmp = "".join([chr(x) for x in tmp[::2] if x != '\xFF']).strip('\x00')
self.long_name = tmp + self.long_name
self.size += 32
o = self.offset + self.size
self.actual_offset = o
self.attrib, _ = read_struct('B',self.fat.iso.data,o+11)
o = self.offset + self.size
self.name, o = read_struct('8s',self.fat.iso.data,o)
self.ext, o = read_struct('3s',self.fat.iso.data,o)
self.attrib, o = read_struct('B',self.fat.iso.data,o)
self.userattrib, o = read_struct('B',self.fat.iso.data,o)
self.undelete, o = read_struct('b',self.fat.iso.data,o)
self.createtime, o = read_struct('H',self.fat.iso.data,o)
self.createdate, o = read_struct('H',self.fat.iso.data,o)
self.accessdate, o = read_struct('H',self.fat.iso.data,o)
self.clusterhi, o = read_struct('H',self.fat.iso.data,o)
self.modifiedti, o = read_struct('H',self.fat.iso.data,o)
self.modifiedda, o = read_struct('H',self.fat.iso.data,o)
self.clusterlow, o = read_struct('H',self.fat.iso.data,o)
self.filesize, o = read_struct('I',self.fat.iso.data,o)
self.name = self.name.decode('ascii')
self.ext = self.ext.decode('ascii')
self.size += 32
self.cluster = (self.clusterhi << 16) + self.clusterlow
def is_dir(self):
return bool(self.attrib & 0x10)
def is_long(self):
return bool((self.attrib & 0x0F) == 0x0F)
def to_dir(self):
return FATDirectory(self.fat, self.fat.get_offset(self.cluster))
def get_offset(self):
return self.fat.get_offset(self.cluster)
def readable_name(self):
if self.long_name:
return self.long_name
if self.ext.strip():
return (self.name.strip() + '.' + self.ext.strip()).lower()
else:
return self.name.strip().lower()
def make_time():
data = array.array('b',b'\0'*17)
struct.pack_into(
'4s2s2s2s2s2s2sb',
data, 0,
b'2018', b'11', b'14', # Year, Month, Day
b'12', b'00', b'00', # Hour, Minute, Second
b'00', # Hundreths
0, # Offset
)
return bytes(data)
def make_date():
data = array.array('b',b'\0'*7)
struct.pack_into(
'BBBBBBb',
data, 0,
118, 11, 14,
12, 0, 0,
0,
)
return bytes(data)
class ISOBootRecord(Structure):
assert_size = 2048
fields = (
('B', 'type_code', 0),
('5s', 'cd001', b'CD001'),
('B', 'version', 1),
('32s', 'boot_system_identifier'),
('32s', 'boot_identifier'),
('1977s', 'boot_record_data'),
)
class ISOElToritoBootRecord(ISOBootRecord):
assert_size = 2048
fields = (
('B', 'type_code', 0),
('5s', 'cd001', b'CD001'),
('B', 'version', 1),
('32s', 'boot_system_identifier',b'EL TORITO SPECIFICATION'),
('32s', 'boot_identifier'),
('<I', 'catalog_lba'),
('1973s', 'boot_record_data'),
)
def set_catalog(self, catalog_lba):
self.data['catalog_lba'] = catalog_lba
class ISOPrimaryVolumeDescriptor(Structure):
assert_size = 2048
fields = (
('B', 'type_code', 1),
('5s', 'cd001', b'CD001'),
('B', 'version', 1),
('B', 'unused_0', 0),
('32s', 'system_id', b' '*32),
('32s', 'volume_id', b'ToaruOS Boot CD'.ljust(32)),
('8s', 'unused_1', b'\0'*8),
('<I', 'volume_space_lsb'),
('>I', 'volume_space_msb'),
('32s', 'unused_2', b'\0'*32),
('<H', 'volume_set_size_lsb', 1),
('>H', 'volume_set_size_msb', 1),
('<H', 'volume_sequence_lsb', 1),
('>H', 'volume_sequence_msb', 1),
('<H', 'logical_block_size_lsb', 2048),
('>H', 'logical_block_size_msb', 2048),
('<I', 'path_table_size_lsb'),
('>I', 'path_table_size_msb'),
('<I', 'type_l_table_lsb'),
('<I', 'optional_type_l_table_lsb'),
('>I', 'type_m_table_msb'),
('>I', 'optional_type_m_table_msb'),
('34s', 'root_entry_data'),
('128s', 'volume_set_identifier', b' '*128),
('128s', 'publisher_identifier', b' '*128),
('128s', 'data_preparer_identifier', b' '*128),
('128s', 'application_identifier',b' '*128),
('38s', 'copyright_file_identifier',b' '*38),
('36s', 'abstract_file_identifier',b' '*36),
('37s', 'bibliographic_file_identifier',b' '*37),
('17s', 'volume_creation_time',make_time()),
('17s', 'volume_modification_time',make_time()),
('17s', 'volume_expiration_time',make_time()),
('17s', 'volume_effective_time',make_time()),
('B', 'file_structure_version'),
('B', 'unused_3', 0),
('512s', 'application_data'),
('653s', 'reserved', b'\0'*653),
)
class ISOVolumeDescriptorSetTerminator(Structure):
assert_size = 2048
fields = (
('B', 'type_code', 0xFF),
('5s', 'cd001', b'CD001'),
('B', 'version', 1),
('2041s', 'unused', b'\0'*2041)
)
class ISODirectoryEntry(Structure):
assert_size = 33
fields = (
('B', 'length'),
('B', 'ext_length'),
('<I', 'extent_start_lsb'),
('>I', 'extent_start_msb'),
('<I', 'extent_length_lsb'),
('>I', 'extent_length_msb'),
('7s', 'record_date', make_date()),
('B', 'flags'),
('B', 'interleave_units'),
('B', 'interleave_gap'),
('<H', 'volume_seq_lsb'),
('>H', 'volume_seq_msb'),
('B', 'name_len'),
)
def set_name(self, name):
self.data['name_len'] = len(name)
self.name = name
self.data['length'] = self.assert_size + len(self.name)
if self.data['length'] % 2:
self.data['length'] += 1
def set_extent(self, start, length):
self.data['extent_start_lsb'] = start
self.data['extent_start_msb'] = start
self.data['extent_length_lsb'] = length
self.data['extent_length_msb'] = length
def write(self, data, offset):
o = super(ISODirectoryEntry,self).write(data,offset)
struct.pack_into(str(len(self.name))+'s', data, o, self.name.encode('utf-8'))
return offset + self.data['length']
class ArbitraryData(object):
def __init__(self, path=None, size=None):
if path:
with open(path,'rb') as f:
tmp = f.read()
self.data = array.array('b',tmp)
elif size:
self.data = array.array('b',b'\0'*size)
else:
raise ValueError("Expected one of path or size to be set.")
self.size = len(self.data.tobytes())
self.actual_size = self.size
while (self.size % 2048):
self.size += 1
def write(self, data, offset):
struct.pack_into(str(self.size) + 's', data, offset, self.data.tobytes())
return offset + self.size
def make_entry():
return b'\0'*34
class ISO9660(object):
def __init__(self, from_file=None):
self.primary_volume_descriptor = ISOPrimaryVolumeDescriptor()
self.boot_record = ISOElToritoBootRecord()
self.volume_descriptor_set_terminator = ISOVolumeDescriptorSetTerminator()
self.el_torito_catalog = ElToritoCatalog()
self.allocate = 0x13
if from_file:
# Only for a file we produced.
with open(from_file, 'rb') as f:
tmp = f.read()
data = array.array('b', tmp)
self.primary_volume_descriptor.read(data, 0x10 * 2048)
self.boot_record.read(data, 0x11 * 2048)
self.volume_descriptor_set_terminator.read(data, 0x12 * 2048)
self.el_torito_catalog.read(data, self.boot_record.data['catalog_lba'] * 2048)
else:
# Root directory
self.root = ISODirectoryEntry()
self.root.data['flags'] = 0x02 # Directory
self.root.set_name(' ')
self.root_data = ArbitraryData(size=2048)
self.root_data.sector_offset = self.allocate_space(1)
self.root.set_extent(self.root_data.sector_offset,self.root_data.size)
# Dummy entries
t = ISODirectoryEntry()
t.set_name('')
o = t.write(self.root_data.data, 0)
t = ISODirectoryEntry()
t.set_name('\1')
o = t.write(self.root_data.data, o)
# Kernel
self.kernel_data = ArbitraryData(path='fatbase/kernel')
self.kernel_data.sector_offset = self.allocate_space(self.kernel_data.size // 2048)
self.kernel_entry = ISODirectoryEntry()
self.kernel_entry.set_name('KERNEL.')
self.kernel_entry.set_extent(self.kernel_data.sector_offset, self.kernel_data.actual_size)
o = self.kernel_entry.write(self.root_data.data, o)
# Ramdisk
self.ramdisk_data = ArbitraryData(path='fatbase/ramdisk.img')
self.ramdisk_data.sector_offset = self.allocate_space(self.ramdisk_data.size // 2048)
self.ramdisk_entry = ISODirectoryEntry()
self.ramdisk_entry.set_name('RAMDISK.IMG')
self.ramdisk_entry.set_extent(self.ramdisk_data.sector_offset, self.ramdisk_data.actual_size)
o = self.ramdisk_entry.write(self.root_data.data, o)
# Modules directory
self.mods_data = ArbitraryData(size=(2048*2)) # Just in case
self.mods_data.sector_offset = self.allocate_space(self.mods_data.size // 2048)
self.mods_entry = ISODirectoryEntry()
self.mods_entry.data['flags'] = 0x02
self.mods_entry.set_name('MOD')
self.mods_entry.set_extent(self.mods_data.sector_offset, self.mods_data.actual_size)
o = self.mods_entry.write(self.root_data.data, o)
self.payloads = []
# Modules themselves
t = ISODirectoryEntry()
t.set_name('')
o = t.write(self.mods_data.data, 0)
t = ISODirectoryEntry()
t.set_name('\1')
o = t.write(self.mods_data.data, o)
for mod_file in [
'fatbase/mod/ac97.ko',
'fatbase/mod/ata.ko',
'fatbase/mod/ataold.ko',
'fatbase/mod/debug_sh.ko',
'fatbase/mod/dospart.ko',
'fatbase/mod/e1000.ko',
'fatbase/mod/ext2.ko',
'fatbase/mod/hda.ko',
'fatbase/mod/iso9660.ko',
'fatbase/mod/lfbvideo.ko',
'fatbase/mod/net.ko',
'fatbase/mod/packetfs.ko',
'fatbase/mod/pcnet.ko',
'fatbase/mod/pcspkr.ko',
'fatbase/mod/portio.ko',
'fatbase/mod/procfs.ko',
'fatbase/mod/ps2kbd.ko',
'fatbase/mod/ps2mouse.ko',
'fatbase/mod/random.ko',
'fatbase/mod/rtl.ko',
'fatbase/mod/serial.ko',
'fatbase/mod/snd.ko',
'fatbase/mod/tmpfs.ko',
'fatbase/mod/usbuhci.ko',
'fatbase/mod/vbox.ko',
'fatbase/mod/vgadbg.ko',
'fatbase/mod/vgalog.ko',
'fatbase/mod/vidset.ko',
'fatbase/mod/vmware.ko',
'fatbase/mod/xtest.ko',
'fatbase/mod/zero.ko',
'fatbase/mod/tarfs.ko',
]:
payload = ArbitraryData(path=mod_file)
payload.sector_offset = self.allocate_space(payload.size // 2048)
entry = ISODirectoryEntry()
entry.set_name(mod_file.replace('fatbase/mod/','').upper())
entry.set_extent(payload.sector_offset, payload.actual_size)
o = entry.write(self.mods_data.data, o)
self.payloads.append(payload)
# Set up the boot catalog and records
self.el_torito_catalog.sector_offset = self.allocate_space(1)
self.boot_record.set_catalog(self.el_torito_catalog.sector_offset)
self.boot_payload = ArbitraryData(path='cdrom/boot.sys')
self.boot_payload.sector_offset = self.allocate_space(self.boot_payload.size // 2048)
self.el_torito_catalog.initial_entry.data['sector_count'] = self.boot_payload.size // 512
self.el_torito_catalog.initial_entry.data['load_rba'] = self.boot_payload.sector_offset
#self.el_torito_catalog.section.data['sector_count'] = 0 # Expected to be 0 or 1 for "until end of CD"
#self.el_torito_catalog.section.data['load_rba'] = self.fat_payload.sector_offset
self.primary_volume_descriptor.data['root_entry_data'] = make_entry()
def allocate_space(self, sectors):
out = self.allocate
self.allocate += sectors
return out
def write(self, file_name):
with open(file_name, 'wb') as f:
data = array.array('b',b'\0'*(2048*self.allocate))
self.primary_volume_descriptor.write(data,0x10 * 2048)
self.root.write(data,0x10*2048 + 156)
self.boot_record.write(data,0x11 * 2048)
self.mods_data.write(data, self.mods_data.sector_offset * 2048)
self.root_data.write(data,self.root_data.sector_offset * 2048)
self.volume_descriptor_set_terminator.write(data,0x12 * 2048)
self.el_torito_catalog.write(data,self.el_torito_catalog.sector_offset * 2048)
self.boot_payload.write(data,self.boot_payload.sector_offset * 2048)
self.kernel_data.write(data,self.kernel_data.sector_offset * 2048)
self.ramdisk_data.write(data,self.ramdisk_data.sector_offset * 2048)
#self.fat_payload.write(data,self.fat_payload.sector_offset * 2048)
for payload in self.payloads:
payload.write(data,payload.sector_offset * 2048)
data.tofile(f)
class ElToritoValidationEntry(Structure):
assert_size = 0x20
fields = (
('B','header_id',1),
('B','platform_id',0),
('<H','reserved_0'),
('24s','id_str',b'\0'*24),
('<H','checksum',0x55aa),
('B','key_55',0x55),
('B','key_aa',0xaa),
)
class ElToritoInitialEntry(Structure):
assert_size = 0x20
fields = (
('B','bootable',0x88),
('B','media_type'),
('<H','load_segment'),
('B','system_type'),
('B','unused_0'),
('<H','sector_count'),
('<I','load_rba'),
('20s','unused_1',b'\0'*20),
)
class ElToritoSectionHeader(Structure):
assert_size = 0x20
fields = (
('B','header_id',0x91),
('B','platform_id',0xEF),
('<H','sections',1),
('28s','id_str',b'\0'*28)
)
class ElToritoSectionEntry(Structure):
assert_size = 0x20
fields = (
('B','bootable',0x88),
('B','media_type'),
('<H','load_segment'),
('B','system_type'),
('B','unused_0'),
('<H','sector_count'),
('<I','load_rba'),
('B','selection_criteria'),
('19s','vendor'),
)
class ElToritoCatalog(object):
def __init__(self):
self.validation_entry = ElToritoValidationEntry()
self.initial_entry = ElToritoInitialEntry()
self.section_header = ElToritoSectionHeader()
self.section = ElToritoSectionEntry()
def read(self, data, offset):
o = offset
o = self.validation_entry.read(data, o)
o = self.initial_entry.read(data, o)
o = self.section_header.read(data, o)
o = self.section.read(data, o)
def write(self, data, offset):
o = offset
o = self.validation_entry.write(data, o)
o = self.initial_entry.write(data, o)
o = self.section_header.write(data, o)
o = self.section.write(data, o)
iso = ISO9660()
#print(iso.el_torito_catalog.validation_entry.data)
#print(iso.el_torito_catalog.initial_entry.data)
#print(iso.el_torito_catalog.section_header.data)
#print(iso.el_torito_catalog.section.data)
iso.write('test.iso')
|
sknetwork/utils/tests/test_ward.py | altana-tech/scikit-network | 457 | 11106179 | <gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on October 2019
@author: <NAME> <<EMAIL>>
"""
import unittest
import numpy as np
from sknetwork.utils import WardDense
class TestKMeans(unittest.TestCase):
def test_kmeans(self):
x = np.random.randn(10, 3)
ward = WardDense()
dendrogram = ward.fit_transform(x)
self.assertEqual(dendrogram.shape, (x.shape[0] - 1, 4))
|
docs/src/util/toml.py | andrevidela/elba | 182 | 11106234 | #!/usr/bin/env python
from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, bygroups, \
using, DelegatingLexer
from pygments.token import Text, Name, Number, String, Comment, Punctuation, \
Other, Keyword, Operator, Literal, Whitespace
class TOMLLexer(RegexLexer):
"""
Lexer for TOML, a simple language for config files
"""
name = 'TOML'
aliases = ['toml']
filenames = ['*.toml']
tokens = {
'root': [
# Basics, comments, strings
(r'\s+', Text),
(r'#.*?$', Comment.Single),
(r'"(\\\\|\\"|[^"])*"', String),
(r'(true|false)$', Keyword.Constant),
('[a-zA-Z_][a-zA-Z0-9_\-]*', Name),
# Datetime
(r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z', Number.Integer),
# Numbers
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
(r'\d+[eE][+-]?[0-9]+j?', Number.Float),
(r'\-?\d+', Number.Integer),
# Punctuation
(r'[]{}:(),;[]', Punctuation),
(r'\.', Punctuation),
# Operators
(r'=', Operator)
]
}
|
src/python/Somatic/Strelka.py | nh13/hap.py | 315 | 11106251 | <reponame>nh13/hap.py
# coding=utf-8
#
# Copyright (c) 2010-2015 Illumina, Inc.
# All rights reserved.
#
# This file is distributed under the simplified BSD license.
# The full text can be found here (and in LICENSE.txt in the root folder of
# this distribution):
#
# https://github.com/Illumina/licenses/blob/master/Simplified-BSD-License.txt
import pandas
import logging
from Tools.vcfextract import vcfExtract, extractHeaders
def extractStrelkaSNVFeatures(vcfname, tag, avg_depth=None):
""" Return a data frame with features collected from the given VCF, tagged by given type
:param vcfname: name of the VCF file
:param tag: type of variants
:param avg_depth: average chromosome depths from BAM file
"""
features = ["CHROM", "POS", "REF", "ALT", "FILTER",
"I.NT", "I.SOMATIC", "I.QSS_NT",
"I.VQSR", "I.EVS", "I.EVSF", "I.SomaticEVS",
"I.SGT", "I.MQ", "I.MQ0",
"I.SNVSB", "I.ReadPosRankSum",
"S.1.SDP", "S.2.SDP",
"S.1.FDP", "S.2.FDP",
"S.1.DP", "S.2.DP",
"S.1.AU", "S.2.AU",
"S.1.CU", "S.2.CU",
"S.1.GU", "S.2.GU",
"S.1.TU", "S.2.TU"]
cols = ["CHROM", "POS", "REF", "ALT",
"NT", "NT_REF", "QSS_NT", "FILTER", "SomaticEVS", "EVS", "VQSR",
"N_FDP_RATE", "T_FDP_RATE", "N_SDP_RATE", "T_SDP_RATE",
"N_DP", "T_DP", "N_DP_RATE", "T_DP_RATE",
"N_AF", "T_AF",
"MQ", "MQ0",
"SNVSB",
"ReadPosRankSum", "tag"]
vcfheaders = list(extractHeaders(vcfname))
evs_featurenames = {}
for l in vcfheaders:
if '##snv_scoring_features' in l:
try:
xl = str(l).split('=', 1)
xl = xl[1].split(",")
for i, n in enumerate(xl):
evs_featurenames[i] = n
cols.append("E." + n)
logging.info("Scoring feature %i : %s" % (i, n))
except:
logging.warn("Could not parse scoring feature names from Strelka output")
records = []
if not avg_depth:
avg_depth = {}
for l in vcfheaders:
x = str(l).lower()
x = x.replace("##meandepth_", "##maxdepth_")
x = x.replace("##depth_", "##maxdepth_")
if '##maxdepth_' in x:
p, _, l = l.partition("_")
xl = str(l).split('=')
xchr = xl[0]
avg_depth[xchr] = float(xl[1])
logging.info("%s depth from VCF header is %f" % (xchr, avg_depth[xchr]))
has_warned = {}
for vr in vcfExtract(vcfname, features):
rec = {}
for i, ff in enumerate(features):
rec[ff] = vr[i]
# read VQSR value, if it's not present, set to -1 (old versions of Strelka)
try:
rec["I.VQSR"] = float(rec["I.VQSR"])
except:
rec["I.VQSR"] = -1.0
# read EVS value, if it's not present, set to -1 (old versions of Strelka)
if "I.SomaticEVS" in rec:
try:
rec["I.EVS"] = float(rec["I.SomaticEVS"])
except:
rec["I.EVS"] = -1.0
else:
try:
rec["I.EVS"] = float(rec["I.EVS"])
except:
rec["I.EVS"] = -1.0
# fix missing features
for q in ["I.QSS_NT", "I.MQ", "I.MQ0",
"I.SNVSB", "I.ReadPosRankSum", "S.1.SDP", "S.2.SDP",
"S.1.FDP", "S.2.FDP",
"S.1.DP", "S.2.DP",
"S.1.AU", "S.2.AU",
"S.1.CU", "S.2.CU",
"S.1.GU", "S.2.GU",
"S.1.TU", "S.2.TU"]:
if q not in rec or rec[q] is None:
rec[q] = 0
if not ("feat:" + q) in has_warned:
logging.warn("Missing feature %s" % q)
has_warned["feat:" + q] = True
rec["tag"] = tag
NT = rec["I.NT"]
NT_is_ref = int(NT == "ref")
QSS_NT = int(rec["I.QSS_NT"])
try:
MQ = float(rec["I.MQ"])
except:
MQ = None
try:
MQ_ZERO = float(rec["I.MQ0"])
except:
MQ_ZERO = None
n_FDP = float(rec["S.1.FDP"])
t_FDP = float(rec["S.2.FDP"])
n_SDP = float(rec["S.1.SDP"])
t_SDP = float(rec["S.2.SDP"])
n_DP = float(rec["S.1.DP"])
t_DP = float(rec["S.2.DP"])
n_FDP_ratio = n_FDP / n_DP if n_DP != 0 else 0
t_FDP_ratio = t_FDP / t_DP if t_DP != 0 else 0
n_SDP_ratio = n_SDP / (n_DP + n_SDP) if (n_DP + n_SDP) != 0 else 0
t_SDP_ratio = t_SDP / (t_DP + t_SDP) if (t_DP + t_SDP) != 0 else 0
n_DP_ratio = 0
t_DP_ratio = 0
if avg_depth:
try:
n_DP_ratio = n_DP / float(avg_depth[rec["CHROM"]])
t_DP_ratio = t_DP / float(avg_depth[rec["CHROM"]])
except:
if not rec["CHROM"] in has_warned:
logging.warn("Cannot normalize depths on %s" % rec["CHROM"])
has_warned[rec["CHROM"]] = True
elif "DPnorm" not in has_warned:
logging.warn("Cannot normalize depths.")
has_warned["DPnorm"] = True
# Ref and alt allele counts for tier1 and tier2
allele_ref = rec["REF"]
try:
t_allele_ref_counts = map(float, rec['S.2.' + allele_ref + 'U'])
except:
t_allele_ref_counts = [0, 0]
alleles_alt = rec["ALT"]
try:
t_allele_alt_counts = [0, 0]
for a in alleles_alt:
for i in range(2):
t_allele_alt_counts[i] += float(rec['S.2.' + a + 'U'][i])
except:
t_allele_alt_counts = [0, 0]
# Compute the tier1 and tier2 alt allele rates.
if t_allele_alt_counts[0] + t_allele_ref_counts[0] == 0:
t_tier1_allele_rate = 0
else:
t_tier1_allele_rate = t_allele_alt_counts[0] / float(t_allele_alt_counts[0] + t_allele_ref_counts[0])
try:
n_allele_ref_counts = map(float, rec['S.1.' + allele_ref + 'U'])
except:
n_allele_ref_counts = [0, 0]
alleles_alt = rec["ALT"]
try:
n_allele_alt_counts = [0, 0]
for a in alleles_alt:
for i in range(2):
n_allele_alt_counts[i] += float(rec['S.1.' + a + 'U'][i])
except:
n_allele_alt_counts = [0, 0]
# Compute the tier1 and tier2 alt allele rates.
if n_allele_alt_counts[0] + n_allele_ref_counts[0] == 0:
n_tier1_allele_rate = 0
else:
n_tier1_allele_rate = n_allele_alt_counts[0] / float(n_allele_alt_counts[0] + n_allele_ref_counts[0])
try:
snvsb = rec["I.SNVSB"]
except:
snvsb = 0
try:
rprs = rec["I.ReadPosRankSum"]
except:
rprs = 0
# Gather the computed data into a dict
qrec = {
"CHROM": rec["CHROM"],
"POS": int(rec["POS"]),
"REF": rec["REF"],
"ALT": ",".join(rec["ALT"]),
"FILTER": ",".join(rec["FILTER"]),
"NT": NT,
"NT_REF": NT_is_ref,
"QSS_NT": QSS_NT,
"VQSR": rec["I.VQSR"],
"EVS": rec["I.EVS"],
"N_FDP_RATE": n_FDP_ratio,
"T_FDP_RATE": t_FDP_ratio,
"N_SDP_RATE": n_SDP_ratio,
"T_SDP_RATE": t_SDP_ratio,
"N_DP": n_DP,
"T_DP": t_DP,
"N_DP_RATE": n_DP_ratio,
"T_DP_RATE": t_DP_ratio,
"N_AF": n_tier1_allele_rate,
"T_AF": t_tier1_allele_rate,
"MQ": MQ,
"MQ0": MQ_ZERO,
"SNVSB": snvsb,
"ReadPosRankSum": rprs,
"tag": tag
}
# ESF features
try:
for i, v in enumerate(rec["I.EVSF"]):
if i in evs_featurenames:
try:
qrec["E." + evs_featurenames[i]] = float(v)
except:
# failure to parse
pass
except:
pass
for k, v in evs_featurenames.iteritems():
if not "E." + v in qrec:
qrec["E." + v] = 0
records.append(qrec)
if records:
df = pandas.DataFrame(records, columns=cols)
else:
df = pandas.DataFrame(columns=cols)
return df
def extractStrelkaIndelFeatures(vcfname, tag, avg_depth=None):
""" Return a data frame with features collected from the given VCF, tagged by given type
:param vcfname: name of the VCF file
:param tag: type of variants
:param avg_depth: average chromosome depths from BAM file
"""
features = ["CHROM", "POS", "REF", "ALT", "FILTER",
"I.NT", "I.SOMATIC", "I.QSI_NT", "I.EVS", "I.EVSF", "I.SomaticEVS",
"I.SGT", "I.RC", "I.RU",
"I.IC", "I.IHP",
"I.MQ", "I.MQ0",
"S.1.DP", "S.2.DP",
"S.1.TAR", "S.2.TAR",
"S.1.TIR", "S.2.TIR",
"S.1.TOR", "S.2.TOR",
"S.1.BCN50", "S.2.BCN50",
"S.1.FDP50", "S.2.FDP50",
]
cols = ["CHROM",
"POS",
"REF",
"ALT",
"LENGTH",
"INDELTYPE",
"FILTER",
"NT",
"NT_REF",
"EVS",
"QSI_NT",
"N_DP",
"T_DP",
"N_DP_RATE",
"T_DP_RATE",
"N_BCN",
"T_BCN",
"N_FDP",
"T_FDP",
"N_AF",
"T_AF",
"SGT",
"RC",
"RU",
"RU_LEN",
"IC",
"IHP",
"MQ",
"MQ0",
"tag"]
records = []
vcfheaders = list(extractHeaders(vcfname))
evs_featurenames = {}
for l in vcfheaders:
if '##indel_scoring_features' in l:
try:
xl = str(l).split('=', 1)
xl = xl[1].split(",")
for i, n in enumerate(xl):
evs_featurenames[i] = n
cols.append("E." + n)
logging.info("Scoring feature %i : %s" % (i, n))
except:
logging.warn("Could not parse scoring feature names from Strelka output")
if not avg_depth:
avg_depth = {}
for l in vcfheaders:
x = str(l).lower()
x = x.replace("##meandepth_", "##maxdepth_")
x = x.replace("##depth_", "##maxdepth_")
if '##maxdepth_' in x:
p, _, l = l.partition("_")
xl = str(l).split('=')
xchr = xl[0]
avg_depth[xchr] = float(xl[1])
logging.info("%s depth from VCF header is %f" % (xchr, avg_depth[xchr]))
has_warned = {}
for vr in vcfExtract(vcfname, features):
rec = {}
for i, ff in enumerate(features):
rec[ff] = vr[i]
rec["tag"] = tag
if "I.SomaticEVS" in rec:
try:
rec["I.EVS"] = float(rec["I.SomaticEVS"])
except:
rec["I.EVS"] = -1.0
else:
try:
rec["I.EVS"] = float(rec["I.EVS"])
except:
rec["I.EVS"] = -1.0
# fix missing features
for q in ["I.QSI_NT", "I.RC", "I.IC", "I.IHP",
"S.1.DP", "S.2.DP",
"S.1.BCN50", "S.2.BCN50",
"S.1.FDP50", "S.2.FDP50"]:
if q not in rec or rec[q] is None:
rec[q] = 0
if not ("feat:" + q) in has_warned:
logging.warn("Missing feature %s" % q)
has_warned["feat:" + q] = True
for q in ["S.1.TAR", "S.2.TAR",
"S.1.TIR", "S.2.TIR",
"S.1.TOR", "S.2.TOR"]:
if q not in rec or rec[q] is None:
rec[q] = [0, 0]
if not ("feat:" + q) in has_warned:
logging.warn("Missing feature %s" % q)
has_warned["feat:" + q] = True
NT = rec["I.NT"]
NT_is_ref = int(NT == "ref")
QSI_NT = int(rec["I.QSI_NT"])
n_DP = float(rec["S.1.DP"])
t_DP = float(rec["S.2.DP"])
in_del = 0
max_len = len(rec["REF"])
min_len = len(rec["REF"])
for a in rec["ALT"]:
if len(a) > len(rec["REF"]):
in_del |= 1
else:
in_del |= 2
min_len = min(len(a), min_len)
max_len = max(len(a), max_len)
ilen = max_len - min_len
n_DP_ratio = 0
t_DP_ratio = 0
if avg_depth:
try:
n_DP_ratio = n_DP / float(avg_depth[rec["CHROM"]])
t_DP_ratio = t_DP / float(avg_depth[rec["CHROM"]])
except:
if not rec["CHROM"] in has_warned:
logging.warn("Cannot normalize depths on %s" % rec["CHROM"])
has_warned[rec["CHROM"]] = True
elif "DPnorm" not in has_warned:
logging.warn("Cannot normalize depths.")
has_warned["DPnorm"] = True
# extract observed AF from strelka counts. TIR = ALT; TAR = REF
try:
n_af = float(rec["S.1.TIR"][0]) / (float(rec["S.1.TIR"][0]) + float(rec["S.1.TAR"][0]))
except:
n_af = 0
try:
t_af = float(rec["S.2.TIR"][0]) / (float(rec["S.2.TIR"][0]) + float(rec["S.2.TAR"][0]))
except:
t_af = 0
# Gather the computed data into a dict
qrec = {
"CHROM": rec["CHROM"],
"POS": int(rec["POS"]),
"REF": rec["REF"],
"ALT": ",".join(rec["ALT"]),
"LENGTH": ilen,
"INDELTYPE": in_del,
"FILTER": ",".join(rec["FILTER"]),
"NT": NT,
"NT_REF": NT_is_ref,
"QSI_NT": QSI_NT,
"N_DP": n_DP,
"T_DP": t_DP,
"N_DP_RATE": n_DP_ratio,
"T_DP_RATE": t_DP_ratio,
"N_AF": n_af,
"T_AF": t_af,
"SGT": rec["I.SGT"],
"tag": tag
}
# fields with defaults
fields = [
{"n": "EVS", "s": "I.EVS", "def": 0, "t": float},
{"n": "VQSR", "s": "I.VQSR", "def": 0, "t": float},
{"n": "RC", "s": "I.RC", "def": 0, "t": int},
{"n": "RU", "s": "I.RU", "def": ""},
{"n": "RU_LEN", "s": "I.RU", "def": 0, "t": len},
{"n": "IC", "s": "I.IC", "def": 0, "t": int},
{"n": "IHP", "s": "I.IHP", "def": 0, "t": int},
{"n": "MQ", "s": "I.MQ", "def": 0.0, "t": float},
{"n": "MQ0", "s": "I.MQ0", "def": 0.0, "t": float},
{"n": "N_BCN", "s": "S.1.BCN50", "def": 0.0, "t": float},
{"n": "T_BCN", "s": "S.2.BCN50", "def": 0.0, "t": float},
{"n": "N_FDP", "s": "S.1.FDP50", "def": 0.0, "t": float},
{"n": "T_FDP", "s": "S.2.FDP50", "def": 0.0, "t": float},
]
for fd in fields:
try:
res = rec[fd["s"]]
if "t" in fd:
res = fd["t"](res)
except:
res = fd["def"]
qrec[fd["n"]] = res
# ESF features
try:
for i, v in enumerate(rec["I.EVSF"]):
if i in evs_featurenames:
try:
qrec["E." + evs_featurenames[i]] = float(v)
except:
# failure to parse
pass
except:
pass
for k, v in evs_featurenames.iteritems():
if not "E." + v in qrec:
qrec["E." + v] = 0
records.append(qrec)
if records:
df = pandas.DataFrame(records, columns=cols)
else:
df = pandas.DataFrame(columns=cols)
return df
|
PythonVirtEnv/Lib/site-packages/debugpy/_vendored/pydevd/pydevd_attach_to_process/add_code_to_python_process.py | zuhorski/EPL_Project | 349 | 11106263 | <gh_stars>100-1000
r'''
Copyright: Brainwy Software Ltda.
License: EPL.
=============
Works for Windows by using an executable that'll inject a dll to a process and call a function.
Note: https://github.com/fabioz/winappdbg is used just to determine if the target process is 32 or 64 bits.
Works for Linux relying on gdb.
Limitations:
============
Linux:
------
1. It possible that ptrace is disabled: /etc/sysctl.d/10-ptrace.conf
Note that even enabling it in /etc/sysctl.d/10-ptrace.conf (i.e.: making the
ptrace_scope=0), it's possible that we need to run the application that'll use ptrace (or
gdb in this case) as root (so, we must sudo the python which'll run this module).
2. It currently doesn't work in debug builds (i.e.: python_d)
Other implementations:
- pyrasite.com:
GPL
Windows/linux (in Linux it also uses gdb to connect -- although specifics are different as we use a dll to execute
code with other threads stopped). It's Windows approach is more limited because it doesn't seem to deal properly with
Python 3 if threading is disabled.
- https://github.com/google/pyringe:
Apache v2.
Only linux/Python 2.
- http://pytools.codeplex.com:
Apache V2
Windows Only (but supports mixed mode debugging)
Our own code relies heavily on a part of it: http://pytools.codeplex.com/SourceControl/latest#Python/Product/PyDebugAttach/PyDebugAttach.cpp
to overcome some limitations of attaching and running code in the target python executable on Python 3.
See: attach.cpp
Linux: References if we wanted to use a pure-python debugger:
https://bitbucket.org/haypo/python-ptrace/
http://stackoverflow.com/questions/7841573/how-to-get-an-error-message-for-errno-value-in-python
Jugaad:
https://www.defcon.org/images/defcon-19/dc-19-presentations/Jakhar/DEFCON-19-Jakhar-Jugaad-Linux-Thread-Injection.pdf
https://github.com/aseemjakhar/jugaad
Something else (general and not Python related):
- http://www.codeproject.com/Articles/4610/Three-Ways-to-Inject-Your-Code-into-Another-Proces
Other references:
- https://github.com/haypo/faulthandler
- http://nedbatchelder.com/text/trace-function.html
- https://github.com/python-git/python/blob/master/Python/sysmodule.c (sys_settrace)
- https://github.com/python-git/python/blob/master/Python/ceval.c (PyEval_SetTrace)
- https://github.com/python-git/python/blob/master/Python/thread.c (PyThread_get_key_value)
To build the dlls needed on windows, visual studio express 13 was used (see compile_dll.bat)
See: attach_pydevd.py to attach the pydev debugger to a running python process.
'''
# Note: to work with nasm compiling asm to code and decompiling to see asm with shellcode:
# x:\nasm\nasm-2.07-win32\nasm-2.07\nasm.exe
# nasm.asm&x:\nasm\nasm-2.07-win32\nasm-2.07\ndisasm.exe -b arch nasm
import ctypes
import os
import struct
import subprocess
import sys
import time
from contextlib import contextmanager
try:
TimeoutError = TimeoutError # @ReservedAssignment
except NameError:
class TimeoutError(RuntimeError): # @ReservedAssignment
pass
@contextmanager
def _create_win_event(name):
from winappdbg.win32.kernel32 import CreateEventA, WaitForSingleObject, CloseHandle
manual_reset = False # i.e.: after someone waits it, automatically set to False.
initial_state = False
if not isinstance(name, bytes):
name = name.encode('utf-8')
event = CreateEventA(None, manual_reset, initial_state, name)
if not event:
raise ctypes.WinError()
class _WinEvent(object):
def wait_for_event_set(self, timeout=None):
'''
:param timeout: in seconds
'''
if timeout is None:
timeout = 0xFFFFFFFF
else:
timeout = int(timeout * 1000)
ret = WaitForSingleObject(event, timeout)
if ret in (0, 0x80):
return True
elif ret == 0x102:
# Timed out
return False
else:
raise ctypes.WinError()
try:
yield _WinEvent()
finally:
CloseHandle(event)
def is_python_64bit():
return (struct.calcsize('P') == 8)
def is_mac():
import platform
return platform.system() == 'Darwin'
def run_python_code_windows(pid, python_code, connect_debugger_tracing=False, show_debug_info=0):
assert '\'' not in python_code, 'Having a single quote messes with our command.'
from winappdbg.process import Process
if not isinstance(python_code, bytes):
python_code = python_code.encode('utf-8')
process = Process(pid)
bits = process.get_bits()
is_64 = bits == 64
# Note: this restriction no longer applies (we create a process with the proper bitness from
# this process so that the attach works).
# if is_64 != is_python_64bit():
# raise RuntimeError("The architecture of the Python used to connect doesn't match the architecture of the target.\n"
# "Target 64 bits: %s\n"
# "Current Python 64 bits: %s" % (is_64, is_python_64bit()))
with _acquire_mutex('_pydevd_pid_attach_mutex_%s' % (pid,), 10):
print('--- Connecting to %s bits target (current process is: %s) ---' % (bits, 64 if is_python_64bit() else 32))
with _win_write_to_shared_named_memory(python_code, pid):
filedir = os.path.dirname(__file__)
if is_64:
suffix = 'amd64'
else:
suffix = 'x86'
target_executable = os.path.join(filedir, 'inject_dll_%s.exe' % suffix)
if not os.path.exists(target_executable):
raise RuntimeError('Could not find exe file to inject: %s' % target_executable)
name = 'attach_%s.dll' % suffix
target_dll = os.path.join(filedir, name)
if not os.path.exists(target_dll):
raise RuntimeError('Could not find dll file to inject: %s' % target_dll)
print('\n--- Injecting attach dll: %s into pid: %s ---' % (name, pid))
args = [target_executable, str(pid), target_dll]
subprocess.check_call(args)
# Now, if the first injection worked, go on to the second which will actually
# run the code.
name = 'run_code_on_dllmain_%s.dll' % suffix
target_dll = os.path.join(filedir, name)
if not os.path.exists(target_dll):
raise RuntimeError('Could not find dll file to inject: %s' % target_dll)
with _create_win_event('_pydevd_pid_event_%s' % (pid,)) as event:
print('\n--- Injecting run code dll: %s into pid: %s ---' % (name, pid))
args = [target_executable, str(pid), target_dll]
subprocess.check_call(args)
if not event.wait_for_event_set(10):
print('Timeout error: the attach may not have completed.')
print('--- Finished dll injection ---\n')
return 0
@contextmanager
def _acquire_mutex(mutex_name, timeout):
'''
Only one process may be attaching to a pid, so, create a system mutex
to make sure this holds in practice.
'''
from winappdbg.win32.kernel32 import CreateMutex, GetLastError, CloseHandle
from winappdbg.win32.defines import ERROR_ALREADY_EXISTS
initial_time = time.time()
while True:
mutex = CreateMutex(None, True, mutex_name)
acquired = GetLastError() != ERROR_ALREADY_EXISTS
if acquired:
break
if time.time() - initial_time > timeout:
raise TimeoutError('Unable to acquire mutex to make attach before timeout.')
time.sleep(.2)
try:
yield
finally:
CloseHandle(mutex)
@contextmanager
def _win_write_to_shared_named_memory(python_code, pid):
# Use the definitions from winappdbg when possible.
from winappdbg.win32 import defines
from winappdbg.win32.kernel32 import (
CreateFileMapping,
MapViewOfFile,
CloseHandle,
UnmapViewOfFile,
)
memmove = ctypes.cdll.msvcrt.memmove
memmove.argtypes = [
ctypes.c_void_p,
ctypes.c_void_p,
defines.SIZE_T,
]
memmove.restype = ctypes.c_void_p
# Note: BUFSIZE must be the same from run_code_in_memory.hpp
BUFSIZE = 2048
assert isinstance(python_code, bytes)
assert len(python_code) > 0, 'Python code must not be empty.'
# Note: -1 so that we're sure we'll add a \0 to the end.
assert len(python_code) < BUFSIZE - 1, 'Python code must have at most %s bytes (found: %s)' % (BUFSIZE - 1, len(python_code))
python_code += b'\0' * (BUFSIZE - len(python_code))
assert python_code.endswith(b'\0')
INVALID_HANDLE_VALUE = -1
PAGE_READWRITE = 0x4
FILE_MAP_WRITE = 0x2
filemap = CreateFileMapping(
INVALID_HANDLE_VALUE, 0, PAGE_READWRITE, 0, BUFSIZE, u"__pydevd_pid_code_to_run__%s" % (pid,))
if filemap == INVALID_HANDLE_VALUE or filemap is None:
raise Exception("Failed to create named file mapping (ctypes: CreateFileMapping): %s" % (filemap,))
try:
view = MapViewOfFile(filemap, FILE_MAP_WRITE, 0, 0, 0)
if not view:
raise Exception("Failed to create view of named file mapping (ctypes: MapViewOfFile).")
try:
memmove(view, python_code, BUFSIZE)
yield
finally:
UnmapViewOfFile(view)
finally:
CloseHandle(filemap)
def run_python_code_linux(pid, python_code, connect_debugger_tracing=False, show_debug_info=0):
assert '\'' not in python_code, 'Having a single quote messes with our command.'
filedir = os.path.dirname(__file__)
# Valid arguments for arch are i386, i386:x86-64, i386:x64-32, i8086,
# i386:intel, i386:x86-64:intel, i386:x64-32:intel, i386:nacl,
# i386:x86-64:nacl, i386:x64-32:nacl, auto.
if is_python_64bit():
suffix = 'amd64'
arch = 'i386:x86-64'
else:
suffix = 'x86'
arch = 'i386'
print('Attaching with arch: %s' % (arch,))
target_dll = os.path.join(filedir, 'attach_linux_%s.so' % suffix)
target_dll = os.path.abspath(os.path.normpath(target_dll))
if not os.path.exists(target_dll):
raise RuntimeError('Could not find dll file to inject: %s' % target_dll)
# Note: we currently don't support debug builds
is_debug = 0
# Note that the space in the beginning of each line in the multi-line is important!
cmd = [
'gdb',
'--nw', # no gui interface
'--nh', # no ~/.gdbinit
'--nx', # no .gdbinit
# '--quiet', # no version number on startup
'--pid',
str(pid),
'--batch',
# '--batch-silent',
]
cmd.extend(["--eval-command='set scheduler-locking off'"]) # If on we'll deadlock.
cmd.extend(["--eval-command='set architecture %s'" % arch])
cmd.extend([
"--eval-command='call (void*)dlopen(\"%s\", 2)'" % target_dll,
"--eval-command='call (int)DoAttach(%s, \"%s\", %s)'" % (
is_debug, python_code, show_debug_info)
])
# print ' '.join(cmd)
env = os.environ.copy()
# Remove the PYTHONPATH (if gdb has a builtin Python it could fail if we
# have the PYTHONPATH for a different python version or some forced encoding).
env.pop('PYTHONIOENCODING', None)
env.pop('PYTHONPATH', None)
print('Running: %s' % (' '.join(cmd)))
p = subprocess.Popen(
' '.join(cmd),
shell=True,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
print('Running gdb in target process.')
out, err = p.communicate()
print('stdout: %s' % (out,))
print('stderr: %s' % (err,))
return out, err
def find_helper_script(filedir, script_name):
target_filename = os.path.join(filedir, 'linux_and_mac', script_name)
target_filename = os.path.normpath(target_filename)
if not os.path.exists(target_filename):
raise RuntimeError('Could not find helper script: %s' % target_filename)
return target_filename
def run_python_code_mac(pid, python_code, connect_debugger_tracing=False, show_debug_info=0):
assert '\'' not in python_code, 'Having a single quote messes with our command.'
filedir = os.path.dirname(__file__)
# Valid arguments for arch are i386, i386:x86-64, i386:x64-32, i8086,
# i386:intel, i386:x86-64:intel, i386:x64-32:intel, i386:nacl,
# i386:x86-64:nacl, i386:x64-32:nacl, auto.
if is_python_64bit():
suffix = 'x86_64.dylib'
arch = 'i386:x86-64'
else:
suffix = 'x86.dylib'
arch = 'i386'
print('Attaching with arch: %s' % (arch,))
target_dll = os.path.join(filedir, 'attach_%s' % suffix)
target_dll = os.path.normpath(target_dll)
if not os.path.exists(target_dll):
raise RuntimeError('Could not find dll file to inject: %s' % target_dll)
lldb_prepare_file = find_helper_script(filedir, 'lldb_prepare.py')
# Note: we currently don't support debug builds
is_debug = 0
# Note that the space in the beginning of each line in the multi-line is important!
cmd = [
'lldb',
'--no-lldbinit', # Do not automatically parse any '.lldbinit' files.
# '--attach-pid',
# str(pid),
# '--arch',
# arch,
'--script-language',
'Python'
# '--batch-silent',
]
cmd.extend([
"-o 'process attach --pid %d'" % pid,
"-o 'command script import \"%s\"'" % (lldb_prepare_file,),
"-o 'load_lib_and_attach \"%s\" %s \"%s\" %s'" % (target_dll,
is_debug, python_code, show_debug_info),
])
cmd.extend([
"-o 'process detach'",
"-o 'script import os; os._exit(1)'",
])
# print ' '.join(cmd)
env = os.environ.copy()
# Remove the PYTHONPATH (if gdb has a builtin Python it could fail if we
# have the PYTHONPATH for a different python version or some forced encoding).
env.pop('PYTHONIOENCODING', None)
env.pop('PYTHONPATH', None)
print('Running: %s' % (' '.join(cmd)))
p = subprocess.Popen(
' '.join(cmd),
shell=True,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
print('Running lldb in target process.')
out, err = p.communicate()
print('stdout: %s' % (out,))
print('stderr: %s' % (err,))
return out, err
if sys.platform == 'win32':
run_python_code = run_python_code_windows
elif is_mac():
run_python_code = run_python_code_mac
else:
run_python_code = run_python_code_linux
def test():
print('Running with: %s' % (sys.executable,))
code = '''
import os, time, sys
print(os.getpid())
#from threading import Thread
#Thread(target=str).start()
if __name__ == '__main__':
while True:
time.sleep(.5)
sys.stdout.write('.\\n')
sys.stdout.flush()
'''
p = subprocess.Popen([sys.executable, '-u', '-c', code])
try:
code = 'print("It worked!")\n'
# Real code will be something as:
# code = '''import sys;sys.path.append(r'X:\winappdbg-code\examples'); import imported;'''
run_python_code(p.pid, python_code=code)
print('\nRun a 2nd time...\n')
run_python_code(p.pid, python_code=code)
time.sleep(3)
finally:
p.kill()
def main(args):
# Otherwise, assume the first parameter is the pid and anything else is code to be executed
# in the target process.
pid = int(args[0])
del args[0]
python_code = ';'.join(args)
# Note: on Linux the python code may not have a single quote char: '
run_python_code(pid, python_code)
if __name__ == '__main__':
args = sys.argv[1:]
if not args:
print('Expected pid and Python code to execute in target process.')
else:
if '--test' == args[0]:
test()
else:
main(args)
|
tests/unit/common/test_common_image.py | muminkoykiran/computervision-recipes | 7,899 | 11106264 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import numpy as np
from pathlib import Path
from utils_cv.common.image import (
im_width,
im_height,
im_width_height,
im2base64,
ims2strlist,
)
def test_im_width(tiny_ic_data_path):
im_path = Path(tiny_ic_data_path) / "can" / "1.jpg"
assert (
im_width(im_path) == 499
), "Expected image width of 499, but got {}".format(im_width(im_path))
im = np.zeros((100, 50))
assert im_width(im) == 50, "Expected image width of 50, but got ".format(
im_width(im)
)
def test_im_height(tiny_ic_data_path):
im_path = Path(tiny_ic_data_path) / "can" / "1.jpg"
assert (
im_height(im_path) == 665
), "Expected image height of 665, but got ".format(im_width(60))
im = np.zeros((100, 50))
assert (
im_height(im) == 100
), "Expected image height of 100, but got ".format(im_width(im))
def test_im_width_height(tiny_ic_data_path):
im_path = Path(tiny_ic_data_path) / "can" / "1.jpg"
w, h = im_width_height(im_path)
assert w == 499 and h == 665
im = np.zeros((100, 50))
w, h = im_width_height(im)
assert w == 50 and h == 100
def test_ims2strlist(tiny_ic_data_path):
""" Tests extraction of image content and conversion into string"""
im_list = [
os.path.join(tiny_ic_data_path, "can", "1.jpg"),
os.path.join(tiny_ic_data_path, "carton", "34.jpg"),
]
im_string_list = ims2strlist(im_list)
assert isinstance(im_string_list, list)
assert len(im_string_list) == len(im_list)
for im_str in im_string_list:
assert isinstance(im_str, str)
def test_im2base64(tiny_ic_data_path):
""" Tests extraction of image content and conversion into bytes"""
im_name = os.path.join(tiny_ic_data_path, "can", "1.jpg")
im_content = im2base64(im_name)
assert isinstance(im_content, bytes)
|
src/classifier/model_base/base.py | LeslieLeung/2c | 236 | 11106265 | #!/usr/bin/env python
"""
Created by howie.hu at 2021-04-08.
Description:模型父类
Changelog: all notable changes to this file will be documented
"""
from importlib import import_module
class ModelManager:
"""
模型管理类
"""
_model_load_dict = {}
@classmethod
def get_model(cls, model_name: str, model_path: str, **kwargs):
"""get_model.
获取配置对应的模型
:param model_name:
:type model_name: str
:param model_path:
:type model_path: str
:param kwargs:
"""
model_key = f"{model_name}_{model_path}"
if model_key not in cls._model_load_dict:
model_loader = import_module(
f"src.classifier.model_base.{model_name}_model_loader"
)
try:
cls._model_load_dict[model_key] = model_loader.get_model(
model_path, **kwargs
)
except Exception as e:
err_info = f"{model_name}: {model_path} 加载失败"
raise ValueError(err_info)
return cls._model_load_dict[model_key]
class ModelLoaderBase:
"""
模型加载父类
"""
def __init__(self, model_path: str, **kwargs):
"""
属性初始化
:param model_path:
:type model_path: str
:param kwargs:
"""
self.model_path = model_path
self.kwargs = kwargs
def get_model(self) -> dict:
"""
获取模型
:return:
"""
raise NotImplementedError()
class ModelResponse:
"""
模型响应父类,定义一些常用属性
"""
def __init__(self):
"""
属性初始化
"""
self.model_name = ""
# 0 正常 1 异常
self.result = 0
self.probability = 0.0
# 特征字典
# 基础key有:
# - is_black: 是否黑名单
# - is_white: 是否白名单
# - text: 输入文本
self.feature_dict = {}
def to_dict(self):
"""
返回字典
:return:
"""
return {
"model_name": self.model_name,
"result": self.result,
"probability": self.probability,
"feature_dict": self.feature_dict,
}
class ModelPredictBase:
"""
模型父类,抽象流程
"""
def __init__(self, model_name: str, model_path: str, input_dict: dict):
"""__init__.
属性初始化
:param model_name: 模型名称
:type model_name: str
:param model_path: 模型路径
:type model_path: str
:param input_dict: 输入配置
:type input_dict: dict
"""
self.model_name = model_name
self.model_path = model_path
self.input_dict = input_dict
# 模型响应类
self.model_resp: ModelResponse = ModelResponse()
self.model_resp.model_name = model_name
# 自定义返回内容
self.model_resp.feature_dict = {}
def _load_model(self):
"""
加载模型
"""
return ModelManager.get_model(self.model_name, self.model_path)
def process(self, **kwargs):
"""
模型开始预测时候可能需要的处理函数
:param kwargs:
:return:
"""
raise NotImplementedError
def predict(self):
"""
模型预测函数,模型预测类的唯一入口
:return:
"""
raise NotImplementedError
|
Code/twitter-mining.py | sarah314/SpyPi | 211 | 11106270 | # <NAME>. 2017
import os
import tweepy
from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
import time
import string
import sys
import keys
import json
from colors import farben
class MyListener(StreamListener):
def on_data(self, data):
try:
filepath = '/home/pi/Mining-Data/miner-'+ wort +'.json'
with open(filepath, 'a') as f:
f.write(data)
f.write("\n")
print(data)
return True
except BaseException as e:
print("Error on_data: %s" % str(e))
time.sleep(5)
return True
def on_error(self, status):
print(status)
return True
def jumpback():
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
if __name__ == '__main__':
wort = input(farben.AUF + "Bitte wählen Sie eine Zeichenkette, zu der Sie Daten sammeln möchten: " + farben.END)
jumpback()
print(farben.AUF + "Daten werden gesammelt..." + farben.END)
auth = OAuthHandler(keys.consumer_key, keys.consumer_secret)
auth.set_access_token(keys.access_token, keys.access_secret)
api = tweepy.API(auth)
twitter_stream = Stream(auth, MyListener())
twitter_stream.filter(track=['%s' % wort])
|
tests/test_hx.py | CalebBell/ht | 118 | 11106281 | <reponame>CalebBell/ht
# -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, 2017 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from __future__ import division
from math import log, log10, exp, sqrt, tanh, factorial, isnan
from ht import *
import ht
import numpy as np
from fluids.numerics import assert_close, assert_close1d, assert_close2d, linspace, logspace
import pytest
from random import uniform, randint, seed, choice
seed(0)
def test_Ntubes_Perrys():
Nt_perry = [[Ntubes_Perrys(DBundle=1.184, Ntp=i, Do=.028, angle=j) for i in [1,2,4,6]] for j in [30, 45, 60, 90]]
Nt_values = [[1001, 973, 914, 886], [819, 803, 784, 769], [1001, 973, 914, 886], [819, 803, 784, 769]]
assert_close2d(Nt_perry, Nt_values)
# angle = 30 or 60 and ntubes = 1.5 raise exception
with pytest.raises(Exception):
Ntubes_Perrys(DBundle=1.184, Ntp=5, Do=.028, angle=30)
with pytest.raises(Exception):
Ntubes_Perrys(DBundle=1.184, Ntp=5, Do=.028, angle=45)
def test_Ntubes_Phadkeb():
# For the 45 degree case, ten exanples are given and known to be correct.
# Unfortunately no examples were given for any other case.
Ntubes_calc = [Ntubes_Phadkeb(DBundle=1.200-.008*2, Do=.028, pitch=.036, Ntp=i, angle=45.) for i in [1,2,4,6,8]]
assert_close1d(Ntubes_calc, [805, 782, 760, 698, 680])
Ntubes_calc = [Ntubes_Phadkeb(DBundle=1.200-.008*2, Do=.028, pitch=.035, Ntp=i, angle=45.) for i in [1,2,4,6,8]]
assert_close1d(Ntubes_calc, [861, 838, 816, 750, 732])
# Extra tests
N = Ntubes_Phadkeb(DBundle=1.200-.008*2, Do=.028, pitch=.036, Ntp=2, angle=30.)
assert N == 898
N = Ntubes_Phadkeb(DBundle=1.200-.008*2, Do=.028, pitch=.036, Ntp=2, angle=60.)
assert N == 876
N = Ntubes_Phadkeb(DBundle=1.200-.008*2, Do=.028, pitch=.036, Ntp=6, angle=60.)
assert N == 822
N = Ntubes_Phadkeb(DBundle=1.200-.008*2, Do=.028, pitch=.036, Ntp=8, angle=60.)
assert N == 772
N = Ntubes_Phadkeb(DBundle=1.200-.008*2, Do=.028, pitch=.092, Ntp=8, angle=60.)
assert N == 88
N = Ntubes_Phadkeb(DBundle=1.200-.008*2, Do=.028, pitch=.036, Ntp=8, angle=30.)
assert N == 788
N = Ntubes_Phadkeb(DBundle=1.200-.008*2, Do=.028, pitch=.04, Ntp=6, angle=30.)
assert N == 652
N = Ntubes_Phadkeb(DBundle=1.200-.008*2, Do=.028, pitch=.036, Ntp=8, angle=90.)
assert N == 684
N = Ntubes_Phadkeb(DBundle=1.200-.008*2, Do=.028, pitch=.036, Ntp=2, angle=90.)
assert N == 772
N = Ntubes_Phadkeb(DBundle=1.200-.008*2, Do=.028, pitch=.036, Ntp=6, angle=90.)
assert N == 712
# Big case
N = Ntubes_Phadkeb(DBundle=5, Do=.028, pitch=.036, Ntp=2, angle=90.)
assert N == 14842
# negative case
N = Ntubes_Phadkeb(DBundle=0.004750018463796297, Do=.001, pitch=.0015, Ntp=8, angle=60)
assert N == 0
# reverse case
# DBundle_for_Ntubes_Phadkeb(Ntubes=17546, Do=.001, pitch=.00125, Ntp=6, angle=45) 0.19052937784048926
with pytest.raises(Exception):
Ntubes_Phadkeb(DBundle=1.008, Do=.028, pitch=.036, Ntp=11, angle=45.)
# Test the case of too small for anything
assert 0 == Ntubes_Phadkeb(DBundle=.01, Do=.028, pitch=.036, Ntp=2, angle=45.)
def test_Ntubes_Phadkeb_fuzz():
seed(100)
D_main = 1E-3
for angle in [30, 45, 60, 90]:
for Ntp in [1, 2, 4, 6, 8]:
for pitch_ratio in [1.25, 1.31, 1.33, 1.375, 1.4, 1.42, 1.5]:
pitch = D_main*pitch_ratio
for _ in range(10):
DBundle = uniform(pitch*2, pitch*300)
N = Ntubes_Phadkeb(DBundle=DBundle, Do=D_main, pitch=pitch, Ntp=Ntp, angle=angle)
# Test the reverse correlation
D_main = 1E-2
for angle in [30, 45, 60, 90]:
for Ntp in [1, 2, 4, 6, 8]:
for pitch_ratio in [1.25, 1.31, 1.33, 1.375, 1.4, 1.42, 1.5]:
pitch = D_main*pitch_ratio
DBundle = uniform(pitch*5, pitch*300)
N = Ntubes_Phadkeb(DBundle=DBundle, Do=D_main, pitch=pitch, Ntp=Ntp, angle=angle)
if N > 2:
DBundle2 = DBundle_for_Ntubes_Phadkeb(Ntubes=N, Do=D_main, pitch=pitch, Ntp=Ntp, angle=angle)
N2 = Ntubes_Phadkeb(DBundle=DBundle2, Do=D_main, pitch=pitch, Ntp=Ntp, angle=angle)
assert N2 == N
@pytest.mark.slow
def test_Phadkeb_numbers():
# One pain point of this code is that it takes 880 kb to store the results
# in memory as a list
ht.hx._load_coeffs_Phadkeb()
from ht.hx import triangular_Ns, triangular_C1s, square_Ns, square_C1s
from math import floor, ceil
# Triangular Ns
# https://oeis.org/A003136
# Translated expression originally in Wolfram Mathematica
# nn = 14; Select[Union[Flatten[Table[x^2 + x*y + y^2, {x, 0, nn}, {y, 0, x}]]], # <= nn^2 &] (* <NAME>, Apr 18 2011 *)
nums = []
nn = 400 # Increase this to generate more numbers
for x in range(0, nn+1):
for y in range(0, x+1):
nums.append(x*x + x*y + y*y)
nums = sorted(list(set(nums)))
nn_square = nn*nn
nums = [i for i in nums if i < nn_square]
nums = nums[0:len(triangular_Ns)]
assert_close1d(nums, triangular_Ns)
# triangular C1s
# https://oeis.org/A038590 is the sequence, and it is the unique numbers in:
# https://oeis.org/A038589
# Translated from pari expression a(n)=1+6*sum(k=0, n\3, (n\(3*k+1))-(n\(3*k+2)))
# Tested with the online interpreter http://pari.math.u-bordeaux.fr/gp.html
# This one is very slow, 300 seconds+
# Used to be 300 + seconds, now 50+ seconds
def a(n):
tot = 0
for k in range(0, int(ceil(n/3.))):
k3 = k*3.
tot += floor(n/(k3 + 1.)) - floor(n/(k3 + 2.))
return 1 + int(6*tot)
s = set()
len_triangular_C1s = len(triangular_C1s)
i = 0
while len(s) < len_triangular_C1s:
val = a(i)
s.update([val])
i += 1
ans2 = sorted(list(s))
assert np.all(ans2[0:len(triangular_C1s)] == triangular_C1s)
# square Ns
# https://oeis.org/A001481
# Quick and efficient
# Translated from Mathematica
# up to = 160; With[{max = Ceiling[Sqrt[upTo]]}, Select[Union[Total /@ (Tuples[Range[0, max], {2}]^2)], # <= upTo &]] (* <NAME>, Apr 22 2011 *)
# 10 loops, best of 3: 17.3 ms per loop
# Confirmed with SymPy
up_to = 100000
max_range = int(ceil(up_to**0.5))
squares = [i*i for i in range(max_range+1)]
seq = [i+j for i in squares for j in squares]
seq = [i for i in set(seq) if i < up_to] # optional
seq.sort() # on PyPy, the [i+j for i in squares for j in squares] look yields results in a different order
# so we need to sort the result
nums = seq[0:len(square_Ns)]
assert_close1d(nums, square_Ns)
# square C1s
# https://oeis.org/A057961 is the sequence, there is one mathematica expression
# but it needs SymPy or some hard work to be done
# It is also the uniqiue elements in https://oeis.org/A057655
# That has a convenient expression for pari, tested online and translated
# a(n)=1+4*sum(k=0, sqrtint(n), sqrtint(n-k^2) ); /* <NAME>, Oct 08 2012 */
# Currently 1.8 seconds
# No numerical issues up to 35000 (confirmed with SymPy to do the root, int)
def a2(n):
sqrtint = lambda i: int(i**0.5)
return 1 + 4*sum([sqrtint(n - k*k) for k in range(0, sqrtint(n) + 1)])
ans = set([a2(i) for i in range(35000)])
ans = sorted(list(ans))
nums = ans[0:len(square_C1s)]
assert_close1d(nums, square_C1s)
def test_Ntubes_HEDH():
Ntubes_HEDH_c = [Ntubes_HEDH(DBundle=1.200-.008*2, Do=.028, pitch=.036, angle=i) for i in [30, 45, 60, 90]]
assert_close1d(Ntubes_HEDH_c, [928, 804, 928, 804])
with pytest.raises(Exception):
# unsuported angle
Ntubes_HEDH(DBundle=1.200-.008*2, Do=.028, pitch=.036, angle=20)
with pytest.raises(Exception):
# unsuported angle
DBundle_for_Ntubes_HEDH(N=100, Do=.028, pitch=.036, angle=20)
# Fuzzing test
Do = 0.028
for angle in [30, 45, 60, 90]:
for pitch_ratio in [1.01, 1.1, 1.175, 1.25, 1.5, 1.75, 2]:
pitch = Do*pitch_ratio
for i in range(100):
N = int(uniform(10, 10000))
DBundle = DBundle_for_Ntubes_HEDH(N=N, Do=Do, pitch=pitch, angle=angle)
# If we don't increase the bundle by a hair, int() will round the wrong way
N_recalculated = Ntubes_HEDH(DBundle=DBundle*(1+1E-12), Do=Do, pitch=pitch, angle=angle)
assert N == N_recalculated
def test_Ntubes_VDI():
VDI_t = [[Ntubes_VDI(DBundle=1.184, Ntp=i, Do=.028, pitch=.036, angle=j) for i in [1,2,4,6,8]] for j in [30, 45, 60, 90]]
VDI_values = [[983, 966, 929, 914, 903], [832, 818, 790, 778, 769], [983, 966, 929, 914, 903], [832, 818, 790, 778, 769]]
assert_close2d(VDI_t, VDI_values)
with pytest.raises(Exception):
Ntubes_VDI(DBundle=1.184, Ntp=5, Do=.028, pitch=.036, angle=30)
with pytest.raises(Exception):
Ntubes_VDI(DBundle=1.184, Ntp=2, Do=.028, pitch=.036, angle=40)
D_VDI = [[D_for_Ntubes_VDI(N=970, Ntp=i, Do=0.00735, pitch=0.015, angle=j) for i in [1, 2, 4, 6, 8]] for j in [30, 60, 45, 90]]
D_VDI_values = [[0.489981989464919, 0.5003600119829544, 0.522287673753684, 0.5311570964003711, 0.5377131635291736], [0.489981989464919, 0.5003600119829544, 0.522287673753684, 0.5311570964003711, 0.5377131635291736], [0.5326653264480428, 0.5422270203444146, 0.5625250342473964, 0.5707695340997739, 0.5768755899087357], [0.5326653264480428, 0.5422270203444146, 0.5625250342473964, 0.5707695340997739, 0.5768755899087357]]
assert_close2d(D_VDI, D_VDI_values)
with pytest.raises(Exception):
D_for_Ntubes_VDI(N=970, Ntp=5., Do=0.00735, pitch=0.015, angle=30.)
with pytest.raises(Exception):
D_for_Ntubes_VDI(N=970, Ntp=2., Do=0.00735, pitch=0.015, angle=40.)
def test_Ntubes():
methods = ['Phadkeb', 'HEDH', 'VDI', 'Perry']
Ntubes_calc = [Ntubes(DBundle=1.2, Do=0.025, pitch=.025*1.25, Method=i) for i in methods]
assert Ntubes_calc == [1285, 1272, 1340, 1297]
assert_close(Ntubes(DBundle=1.2, Do=0.025, pitch=.025*1.25), 1285)
with pytest.raises(Exception):
Ntubes(DBundle=1.2, Do=0.025, pitch=.025*1.25, Method='failure')
D = size_bundle_from_tubecount(N=1285, Do=0.025, pitch=0.03125)
assert_close(D, 1.1985676402390355)
D = size_bundle_from_tubecount(N=1285, Do=0.025, pitch=0.03125, Method='HEDH')
assert_close(D, 1.205810838411941)
D = size_bundle_from_tubecount(N=1285, Do=0.025, pitch=0.03125, Method='VDI')
assert_close(D, 1.1749025890472795)
D = size_bundle_from_tubecount(N=13252, Do=.028, Ntp=2, angle=45, pitch=.028*1.25, Method='Perry')
assert_close(D, 3.598336054740235)
with pytest.raises(Exception):
size_bundle_from_tubecount(N=1285, Do=0.025, pitch=0.03125, Method='BADMETHOD')
def test_effectiveness_NTU():
# Counterflow
for i in range(20):
eff = uniform(0, 1)
Cr = uniform(0, 1)
units = NTU_from_effectiveness(effectiveness=eff, Cr=Cr, subtype='counterflow')
eff_calc = effectiveness_from_NTU(NTU=units, Cr=Cr, subtype='counterflow')
assert_close(eff, eff_calc)
# Case with Cr = 1
NTU = NTU_from_effectiveness(effectiveness=.9, Cr=1, subtype='counterflow')
assert_close(NTU, 9)
e = effectiveness_from_NTU(NTU=9, Cr=1, subtype='counterflow')
assert_close(e, 0.9)
# Parallel
for i in range(20):
Cr = uniform(0, 1)
eff = uniform(0, 1./(Cr + 1.)*(1-1E-7))
units = NTU_from_effectiveness(effectiveness=eff, Cr=Cr, subtype='parallel')
eff_calc = effectiveness_from_NTU(NTU=units, Cr=Cr, subtype='parallel')
assert_close(eff, eff_calc)
with pytest.raises(Exception):
Cr = 0.6
NTU_from_effectiveness(effectiveness=0.62500001, Cr=Cr, subtype='parallel')
# Crossflow, Cmin mixed, Cmax unmixed
for i in range(20):
Cr = uniform(0, 1)
eff = uniform(0, (1 - exp(-1/Cr))*(1-1E-7))
N = NTU_from_effectiveness(eff, Cr=Cr, subtype='crossflow, mixed Cmin')
eff_calc = effectiveness_from_NTU(N, Cr=Cr, subtype='crossflow, mixed Cmin')
assert_close(eff, eff_calc)
with pytest.raises(Exception):
Cr = 0.7
NTU_from_effectiveness(0.760348963559, Cr=Cr, subtype='crossflow, mixed Cmin')
# Crossflow, Cmax mixed, Cmin unmixed
for i in range(20):
Cr = uniform(0, 1)
eff = uniform(0, (exp(Cr) - 1)*exp(-Cr)/Cr-1E-5)
N = NTU_from_effectiveness(eff, Cr=Cr, subtype='crossflow, mixed Cmax')
eff_calc = effectiveness_from_NTU(N, Cr=Cr, subtype='crossflow, mixed Cmax')
assert_close(eff, eff_calc)
with pytest.raises(Exception):
Cr = 0.7
eff = 0.7201638517265581
NTU_from_effectiveness(eff, Cr=Cr, subtype='crossflow, mixed Cmax')
# Crossflow, this one needed a closed-form solver
for i in range(100):
Cr = uniform(0, 1)
eff = uniform(0, 1)
N = NTU_from_effectiveness(eff, Cr=Cr, subtype='crossflow approximate')
eff_calc = effectiveness_from_NTU(N, Cr=Cr, subtype='crossflow approximate')
assert_close(eff, eff_calc, rtol=1E-6) # brenth differs in old Python versions, rtol is needed
# Shell and tube - this one doesn't have a nice effectiveness limit,
# and it depends on the number of shells
for i in range(20):
Cr = uniform(0, 1)
shells = randint(1, 10)
eff_max = (-((-Cr + sqrt(Cr**2 + 1) + 1)/(Cr + sqrt(Cr**2 + 1) - 1))**shells + 1)/(Cr - ((-Cr + sqrt(Cr**2 + 1) + 1)/(Cr + sqrt(Cr**2 + 1) - 1))**shells)
eff = uniform(0, eff_max-1E-5)
N = NTU_from_effectiveness(eff, Cr=Cr, subtype=str(shells)+'S&T')
eff_calc = effectiveness_from_NTU(N, Cr=Cr, subtype=str(shells)+'S&T')
assert_close(eff, eff_calc)
with pytest.raises(Exception):
NTU_from_effectiveness(.99, Cr=.7, subtype='5S&T')
# Easy tests
effectiveness = effectiveness_from_NTU(NTU=5, Cr=0.7, subtype='crossflow, mixed Cmin')
assert_close(effectiveness, 0.7497843941508544)
NTU = NTU_from_effectiveness(effectiveness=effectiveness, Cr=0.7, subtype='crossflow, mixed Cmin')
assert_close(NTU, 5)
eff = effectiveness_from_NTU(NTU=5, Cr=0.7, subtype='crossflow, mixed Cmax')
assert_close(eff, 0.7158099831204696)
NTU = NTU_from_effectiveness(eff, Cr=0.7, subtype='crossflow, mixed Cmax')
assert_close(5, NTU)
eff = effectiveness_from_NTU(NTU=5, Cr=0, subtype='boiler')
assert_close(eff, 0.9932620530009145)
NTU = NTU_from_effectiveness(eff, Cr=0, subtype='boiler')
assert_close(NTU, 5)
with pytest.raises(Exception):
effectiveness_from_NTU(NTU=5, Cr=1.01, subtype='crossflow, mixed Cmin')
with pytest.raises(Exception):
NTU_from_effectiveness(effectiveness=.2, Cr=1.01, subtype='crossflow, mixed Cmin')
# bad names
with pytest.raises(Exception):
NTU_from_effectiveness(.99, Cr=.7, subtype='FAIL')
with pytest.raises(Exception):
effectiveness_from_NTU(NTU=5, Cr=.5, subtype='FAIL')
# Crossflow analytical solution
eff = effectiveness_from_NTU(NTU=5, Cr=.7, subtype='crossflow')
assert_close(eff, 0.8444821799748551)
def crossflow_unmixed_sum_infinite(NTU, Cr):
def Pn(NTU, n):
tot = sum([(n+1.-j)/factorial(j)*NTU**(n+j) for j in range(1, n+1)])
return tot/factorial(n + 1)
tot = sum([Cr**n*Pn(NTU, n) for n in range(1, 150)])
return 1 - exp(-NTU) - exp(-(1+Cr)*NTU)*tot
eff_old = crossflow_unmixed_sum_infinite(5, .7)
assert_close(eff, eff_old)
# Crossflow analytical, this one needed a closed-form solver
for i in range(20):
Cr = uniform(0, 1)
eff = uniform(0, .9)
# A good anser is not always obtainable at eff> 0.9 at very high NTU,
# because the integral term gets too close to 1 for floating point numbers
# to capture any more accuracy
# This is not likely to be a problem to users
N = NTU_from_effectiveness(eff, Cr=Cr, subtype='crossflow')
eff_calc = effectiveness_from_NTU(N, Cr=Cr, subtype='crossflow')
assert_close(eff, eff_calc, rtol=1E-6) # brenth differs in old Python versions, rtol is needed
def test_effectiveness_NTU_method():
ans_known = {'Q': 192850.0, 'Thi': 130, 'Cmax': 9672.0, 'Tho': 110.06100082712986, 'Cmin': 2755.0, 'NTU': 1.1040839095588, 'Tco': 85, 'Tci': 15, 'Cr': 0.2848428453267163, 'effectiveness': 0.6086956521739131, 'UA': 3041.751170834494}
ans = effectiveness_NTU_method(mh=5.2, mc=1.45, Cph=1860., Cpc=1900, subtype='crossflow, mixed Cmax', Tci=15, Tco=85, Tho=110.06100082712986)
[assert_close(ans_known[i], ans[i]) for i in ans_known.keys()]
ans = effectiveness_NTU_method(mh=5.2, mc=1.45, Cph=1860., Cpc=1900, subtype='crossflow, mixed Cmax', Tci=15, Tco=85, Thi=130)
[assert_close(ans_known[i], ans[i]) for i in ans_known.keys()]
ans = effectiveness_NTU_method(mh=5.2, mc=1.45, Cph=1860., Cpc=1900, subtype='crossflow, mixed Cmax', Thi=130, Tho=110.06100082712986, Tci=15)
[assert_close(ans_known[i], ans[i]) for i in ans_known.keys()]
ans = effectiveness_NTU_method(mh=5.2, mc=1.45, Cph=1860., Cpc=1900, subtype='crossflow, mixed Cmax', Thi=130, Tho=110.06100082712986, Tco=85)
[assert_close(ans_known[i], ans[i]) for i in ans_known.keys()]
ans = effectiveness_NTU_method(mh=5.2, mc=1.45, Cph=1860., Cpc=1900, subtype='crossflow, mixed Cmax', Tco=85, Tho=110.06100082712986, UA=3041.751170834494)
[assert_close(ans_known[i], ans[i]) for i in ans_known.keys()]
ans = effectiveness_NTU_method(mh=5.2, mc=1.45, Cph=1860., Cpc=1900, subtype='crossflow, mixed Cmax', Tci=15, Thi=130, UA=3041.751170834494)
[assert_close(ans_known[i], ans[i]) for i in ans_known.keys()]
ans = effectiveness_NTU_method(mh=5.2, mc=1.45, Cph=1860., Cpc=1900, subtype='crossflow, mixed Cmax', Tci=15, Tho=110.06100082712986, UA=3041.751170834494)
[assert_close(ans_known[i], ans[i]) for i in ans_known.keys()]
ans = effectiveness_NTU_method(mh=5.2, mc=1.45, Cph=1860., Cpc=1900, subtype='crossflow, mixed Cmax', Tco=85, Thi=130, UA=3041.751170834494)
[assert_close(ans_known[i], ans[i]) for i in ans_known.keys()]
ans = effectiveness_NTU_method(mh=5.2, mc=1.45, Cph=1860., Cpc=1900, subtype='crossflow, mixed Cmax', Tci=15, Tco=85, Tho=110.06100082712986, UA=3041.751170834494)
[assert_close(ans_known[i], ans[i]) for i in ans_known.keys()]
ans = effectiveness_NTU_method(mh=5.2, mc=1.45, Cph=1860., Cpc=1900, subtype='crossflow, mixed Cmax', Tco=85, Thi=130, Tho=110.06100082712986, UA=3041.751170834494)
[assert_close(ans_known[i], ans[i]) for i in ans_known.keys()]
with pytest.raises(Exception):
# Test raising an error with only on set of stream information
effectiveness_NTU_method(mh=5.2, mc=1.45, Cph=1860., Cpc=1900, subtype='crossflow, mixed Cmax', Thi=130, Tho=110.06100082712986, UA=3041.751170834494)
with pytest.raises(Exception):
# Inconsistent hot and cold temperatures and heat capacity ratios
effectiveness_NTU_method(mh=5.2, mc=1.45, Cph=1860., Cpc=1900, subtype='crossflow, mixed Cmax', Thi=130, Tho=110.06100082712986, Tco=85, Tci=5)
with pytest.raises(Exception):
# Calculate UA, but no code side temperature information given
effectiveness_NTU_method(mh=5.2, mc=1.45, Cph=1860., Cpc=1900, subtype='crossflow, mixed Cmax', Thi=130, Tho=110.06100082712986)
with pytest.raises(Exception):
# Calculate UA, but no hot side temperature information given
effectiveness_NTU_method(mh=5.2, mc=1.45, Cph=1860., Cpc=1900, subtype='crossflow, mixed Cmax', Tci=15, Tco=85)
with pytest.raises(Exception):
# Calculate UA, but only two temperatures given
effectiveness_NTU_method(mh=5.2, mc=1.45, Cph=1860., Cpc=1900, subtype='crossflow, mixed Cmax', Tci=15, Thi=130)
def test_F_LMTD_Fakheri():
'''Number of tube passes must be a multiple of 2N for correlation to work.
N can be 1.
Example from http://excelcalculations.blogspot.ca/2011/06/lmtd-correction-factor.html
spreadsheet file which Bowman et al (1940).
This matches for 3, 6, and 11 shell passes perfectly.
This also matches that from the sheet:
http://www.mhprofessional.com/getpage.php?c=0071624082_download.php&cat=113
'''
F_calc = F_LMTD_Fakheri(Tci=15, Tco=85, Thi=130, Tho=110, shells=1)
assert_close(F_calc, 0.9438358829645933)
# R = 1 check
F_calc = F_LMTD_Fakheri(Tci=15, Tco=35, Thi=130, Tho=110, shells=1)
assert_close(F_calc, 0.9925689447100824)
for i in range(1, 10):
ans = effectiveness_NTU_method(mh=5.2, mc=1.45, Cph=1860., Cpc=1900, subtype=str(i)+'S&T', Tci=15, Tco=85, Thi=130)
dTlm = LMTD(Thi=130, Tho=110.06100082712986, Tci=15, Tco=85)
F_expect = ans['Q']/ans['UA']/dTlm
F_calc = F_LMTD_Fakheri(Tci=15, Tco=85, Thi=130, Tho=110.06100082712986, shells=i)
assert_close(F_expect, F_calc)
F_calc = F_LMTD_Fakheri(Thi=15, Tho=85, Tci=130, Tco=110.06100082712986, shells=i)
assert_close(F_expect, F_calc)
def test_temperature_effectiveness_basic():
# Except for the crossflow mixed 1&2 cases, taken from an example and checked that
# it matches the e-NTU method. The approximate formula for crossflow is somewhat
# different - it is believed the approximations are different.
P1 = temperature_effectiveness_basic(R1=3.5107078039927404, NTU1=0.29786672449248663, subtype='counterflow')
assert_close(P1, 0.173382601503)
P1 = temperature_effectiveness_basic(R1=3.5107078039927404, NTU1=0.29786672449248663, subtype='parallel')
assert_close(P1, 0.163852912049)
P1 = temperature_effectiveness_basic(R1=3.5107078039927404, NTU1=0.29786672449248663, subtype='crossflow approximate')
assert_close(P1, 0.149974594007)
P1 = temperature_effectiveness_basic(R1=3.5107078039927404, NTU1=0.29786672449248663, subtype='crossflow')
assert_close(P1, 0.1698702121873175)
P1 = temperature_effectiveness_basic(R1=3.5107078039927404, NTU1=0.29786672449248663, subtype='crossflow, mixed 1')
assert_close(P1, 0.168678230894)
P1 = temperature_effectiveness_basic(R1=3.5107078039927404, NTU1=0.29786672449248663, subtype='crossflow, mixed 2')
assert_close(P1, 0.16953790774)
P1 = temperature_effectiveness_basic(R1=3.5107078039927404, NTU1=0.29786672449248663, subtype='crossflow, mixed 1&2')
assert_close(P1, 0.168411216829)
with pytest.raises(Exception):
temperature_effectiveness_basic(R1=3.5107078039927404, NTU1=0.29786672449248663, subtype='FAIL')
# Formulas are in [1]_, [3]_, and [2]_.
def test_temperature_effectiveness_TEMA_J():
# All three models are checked with Rosenhow and then Shaw
# Formulas presented in Thulukkanam are with respect to the other side of the
# exchanger
P1 = temperature_effectiveness_TEMA_J(R1=1/3., NTU1=1., Ntp=1)
assert_close(P1, 0.5699085193651295)
P1 = temperature_effectiveness_TEMA_J(R1=2., NTU1=1., Ntp=1) # R = 2 case
assert_close(P1, 0.3580830895954234)
P1 = temperature_effectiveness_TEMA_J(R1=1/3., NTU1=1., Ntp=2)
assert_close(P1, 0.5688878232315694)
P1 = temperature_effectiveness_TEMA_J(R1=1/3., NTU1=1., Ntp=4)
assert_close(P1, 0.5688711846568247)
with pytest.raises(Exception):
temperature_effectiveness_TEMA_J(R1=1/3., NTU1=1., Ntp=3)
def test_temperature_effectiveness_TEMA_H():
P1 = temperature_effectiveness_TEMA_H(R1=1/3., NTU1=1., Ntp=1)
assert_close(P1, 0.5730728284905833)
P1 = temperature_effectiveness_TEMA_H(R1=2., NTU1=1., Ntp=1) # R = 2 case
assert_close(P1, 0.3640257049950876)
P1 = temperature_effectiveness_TEMA_H(R1=1/3., NTU1=1., Ntp=2)
assert_close(P1, 0.5824437803128222)
P1 = temperature_effectiveness_TEMA_H(R1=4., NTU1=1., Ntp=2) # R = 4 case
assert_close(P1, 0.2366953352462191)
P1 = temperature_effectiveness_TEMA_H(R1=1/3., NTU1=1., Ntp=2, optimal=False)
assert_close(P1, 0.5560057072310012)
P1 = temperature_effectiveness_TEMA_H(R1=4, NTU1=1., Ntp=2, optimal=False)
assert_close(P1, 0.19223481412807347) # R2 = 0.25
# The 1 and 2 case by default are checked with Rosenhow and Shah
# for the two pass unoptimal case, the result is from Thulukkanam only.
# The 2-pass optimal arrangement from Rosenhow and Shaw is the same
# as that of Thulukkanam however, and shown below.
m1 = .5
m2 = 1.2
Cp1 = 1800.
Cp2 = 2200.
UA = 500.
C1 = m1*Cp1
C2 = m2*Cp2
R1_orig = R1 = C1/C2
NTU1 = UA/C1
R2 = C2/C1
NTU2 = UA/C2
R1 = R2
NTU1 = NTU2
alpha = NTU1*(4*R1 + 1)/8.
beta = NTU1*(4*R1 - 1)/8.
D = (1 - exp(-alpha))/(4.*R1 + 1)
E = (1 - exp(-beta))/(4*R1 - 1)
H = (1 - exp(-2*beta))/(4.*R1 - 1)
G = (1-D)**2*(D**2 + E**2) + D**2*(1+E)**2
B = (1 + H)*(1 + E)**2
P1 = (1 - (1-D)**4/(B - 4.*R1*G))
P1 = P1/R1_orig
assert_close(P1, 0.40026600037802335)
with pytest.raises(Exception):
temperature_effectiveness_TEMA_H(R1=1/3., NTU1=1., Ntp=5)
def test_temperature_effectiveness_TEMA_G():
# Checked with Shah and Rosenhow, formula typed and then the other case is working
P1 = temperature_effectiveness_TEMA_G(R1=1/3., NTU1=1., Ntp=1)
assert_close(P1, 0.5730149350867675)
P1 = temperature_effectiveness_TEMA_G(R1=1/3., NTU1=1., Ntp=2) # TEST CASSE
assert_close(P1, 0.5824238778134628)
# Ntp = 1, R=1 case
P1_Ntp_R1 = 0.8024466201983814
P1 = temperature_effectiveness_TEMA_G(R1=1., NTU1=7., Ntp=1) # R = 1 case
assert_close(P1, P1_Ntp_R1)
P1_near = temperature_effectiveness_TEMA_G(R1=1-1E-9, NTU1=7, Ntp=1)
assert_close(P1_near, P1_Ntp_R1)
# Ntp = 2, optimal, R=2 case
P1_Ntp_R1 = 0.4838424889135673
P1 = temperature_effectiveness_TEMA_G(R1=2., NTU1=7., Ntp=2) # R = 2 case
assert_close(P1, P1_Ntp_R1)
P1_near = temperature_effectiveness_TEMA_G(R1=2-1E-9, NTU1=7., Ntp=2)
assert_close(P1_near, P1_Ntp_R1)
# Ntp = 2, not optimal case, R1=0.5 case
P1 = temperature_effectiveness_TEMA_G(R1=1/3., NTU1=1., Ntp=2, optimal=False)
assert_close(P1, 0.5559883028569507)
P1_Ntp_R1 = 0.3182960796403764
P1 = temperature_effectiveness_TEMA_G(R1=2, NTU1=1., Ntp=2, optimal=False)
assert_close(P1, P1_Ntp_R1)
P1_near = temperature_effectiveness_TEMA_G(R1=2-1E-9, NTU1=1., Ntp=2, optimal=False)
assert_close(P1_near, P1_Ntp_R1)
with pytest.raises(Exception):
temperature_effectiveness_TEMA_G(R1=2., NTU1=7., Ntp=5)
# The optimal 2 pass case from Thulukkanam is checked with the following case
# to be the same as those in Rosenhow and Shah
# Believed working great.
m1 = .5
m2 = 1.2
Cp1 = 1800.
Cp2 = 2200.
UA = 500.
C1 = m1*Cp1
C2 = m2*Cp2
R1_orig = R1 = C1/C2
NTU1 = UA/C1
R2 = C2/C1
NTU2 = UA/C2
P1_good = temperature_effectiveness_TEMA_G(R1=R1, NTU1=NTU1, Ntp=2)
# Good G 2 pass case, working
R1 = R2
NTU1 = NTU2
beta = exp(-NTU1*(2*R1 - 1)/2.)
alpha = exp(-NTU1*(2*R1 + 1)/4.)
B = (4*R1 - beta*(2*R1 + 1))/(2*R1 - 1.)
A = -1*(1-alpha)**2/(R1 + 0.5)
P1 = (B - alpha**2)/(R1*(A + 2 + B/R1))
P1 = P1/R1_orig
assert_close(P1, P1_good)
def test_temperature_effectiveness_TEMA_E():
# 1, 2 both cases are perfect
eff = temperature_effectiveness_TEMA_E(R1=1/3., NTU1=1., Ntp=1)
assert_close(eff, 0.5870500654031314)
eff = temperature_effectiveness_TEMA_E(R1=1., NTU1=7., Ntp=1)
assert_close(eff, 0.875)
# Remaining E-shells, checked
eff = temperature_effectiveness_TEMA_E(R1=1/3., NTU1=1., Ntp=2)
assert_close(eff, 0.5689613217664634)
eff = temperature_effectiveness_TEMA_E(R1=1., NTU1=7., Ntp=2) # R = 1 case
assert_close(eff, 0.5857620762776082)
eff = temperature_effectiveness_TEMA_E(R1=1/3., NTU1=1., Ntp=2, optimal=False)
assert_close(eff, 0.5699085193651295) # unoptimal case
eff = temperature_effectiveness_TEMA_E(R1=2, NTU1=1., Ntp=2, optimal=False)
assert_close(eff, 0.3580830895954234)
eff = temperature_effectiveness_TEMA_E(R1=1/3., NTU1=1., Ntp=3)
assert_close(eff, 0.5708624888990603)
eff = temperature_effectiveness_TEMA_E(R1=1., NTU1=7., Ntp=3) # R = 1 case
assert_close(eff, 0.6366132064792461)
eff = temperature_effectiveness_TEMA_E(R1=3., NTU1=1., Ntp=3, optimal=False)
assert_close(eff, 0.276815590660033)
eff = temperature_effectiveness_TEMA_E(R1=1/3., NTU1=1., Ntp=4)
assert_close(eff, 0.56888933865756)
eff = temperature_effectiveness_TEMA_E(R1=1., NTU1=7., Ntp=4) # R = 1 case, even though it's no longer used
assert_close(eff, 0.5571628802075902)
with pytest.raises(Exception):
temperature_effectiveness_TEMA_E(R1=1., NTU1=7., Ntp=7)
# Compare the expression for 4 tube passes in two of the sources with that
# in the third.
R1 = 1/3.
NTU1 = 1
D = (4 + R1**2)**0.5
B = tanh(R1*NTU1/4.)
A = 1/tanh(D*NTU1/4.)
P1 = 4*(2*(1 + R1) + D*A + R1*B)**-1
assert_close(P1, 0.56888933865756)
def test_temperature_effectiveness_air_cooler():
# 1 pass-N rows case
R1 = 0.9090909090909091
NTU1 = 14.958251192851375
expected_P1s = [0.6568205178185993, 0.7589599992302802, 0.8064227529035781, 0.8330202134563712, 0.8491213831157698, 0.8594126317585193, 0.8662974164766494, 0.871087594489211, 0.8745345926002213, 0.8770877118478316, 0.8790262425246239, 0.8805299599498708, 0.8817182454510963, 0.8826726050451953, 0.8834500769975893, 0.8840914654885264, 0.8846265414931143, 0.88507741320138, 0.8854607616314836, 0.8857893552314147, 0.886073095973165, 0.8863197546874396, 0.8865354963468465, 0.8867252608860744, 0.8868930430686396]
P1s_calc = [temperature_effectiveness_air_cooler(R1=R1, NTU1=NTU1, rows=N, passes=1) for N in range(1, 26)]
assert_close1d(expected_P1s, P1s_calc)
# Compare the results of 1-N against the function without the annoying optimizations;
# may be helpful for debugging
def calc_N_1_orig(NTU1, R1, N):
NTU, R = NTU1, R1
K = 1 - exp(-NTU/N)
top = N*exp(N*K*R)
tot = 0
for i in range(1, N):
for j in range(0, i+1):
prod = factorial(i)/factorial(i-j)/factorial(j)
tot1 = prod*K**j*exp(-(i-j)*NTU/N)
tot2 = 0
for k in range(0, j+1):
tot2 += (N*K*R)**k/factorial(k)
tot += tot1*tot2
P = 1/R*(1 - (top/(1+tot))**-1)
return P
P1s_calc = [calc_N_1_orig(R1=R1, NTU1=NTU1, N=N) for N in range(1, 26)]
assert_close1d(expected_P1s, P1s_calc)
# N rows / N passes (N from 2 to 5) cases
R1, NTU1 = 1.1, .5
expected_P1s = [0.3254086785640332, 0.3267486216405819, 0.3272282999575143, 0.3274325680785421]
P1s_calc = [temperature_effectiveness_air_cooler(R1, NTU1, rows=N, passes=N) for N in range(2, 6)]
assert_close1d(expected_P1s, P1s_calc)
# 4 row / 2 pass special case
P1_calc = temperature_effectiveness_air_cooler(R1, NTU1, rows=4, passes=2)
assert_close(P1_calc, 0.32552127419957044)
# Tentative checking of the above has been done with hete.c for isolated cases
def test_temperature_effectiveness_air_cooler_coerce():
# Simple test a call that the number of row and passes can be domain reduced
# without causing a recursion depth error
# Do not test the results for any specific answer, as they will ideally one day
# be replaced with the exactly correct one
[temperature_effectiveness_air_cooler(.5, 2, rows=j, passes=i) for i in range(1, 10) for j in range(1, 10)]
@pytest.mark.mpmath
def test_P_NTU_method():
# Counterflow case
ans = effectiveness_NTU_method(mh=5.2, mc=1.45, Cph=1860., Cpc=1900, subtype='counterflow', Tci=15, Tco=85, Tho=110.06100082712986)
ans2 = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., UA=ans['UA'], T1i=130, T2i=15, subtype='counterflow')
assert_close(ans2['Q'], ans['Q'])
# Parallel flow case
ans = effectiveness_NTU_method(mh=5.2, mc=1.45, Cph=1860., Cpc=1900, subtype='parallel', Tci=15, Tco=85, Tho=110.06100082712986)
ans2 = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., UA=ans['UA'], T1i=130, T2i=15, subtype='parallel')
assert_close(ans2['Q'], ans['Q'])
# Mixed Cmax/ 1
ans = effectiveness_NTU_method(mh=5.2, mc=1.45, Cph=1860., Cpc=1900, subtype='crossflow, mixed Cmax', Tci=15, Tco=85, Tho=110.06100082712986)
ans2 = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., UA=ans['UA'], T1i=130, T2i=15, subtype='crossflow, mixed 1')
assert_close(ans2['Q'], ans['Q'])
# Mixed Cmin/2
ans = effectiveness_NTU_method(mh=5.2, mc=1.45, Cph=1860., Cpc=1900, subtype='crossflow, mixed Cmin', Tci=15, Tco=85, Tho=110.06100082712986)
ans2 = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., UA=ans['UA'], T1i=130, T2i=15, subtype='crossflow, mixed 2')
assert_close(ans2['Q'], ans['Q'])
# Counterflow case but with all five different temperature input cases (both inlets known already done)
ans = effectiveness_NTU_method(mh=5.2, mc=1.45, Cph=1860., Cpc=1900, subtype='counterflow', Tci=15, Tco=85, Tho=110.06100082712986)
ans2 = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., UA=ans['UA'], T1o=110.06100082712986, T2o=85, subtype='counterflow')
assert_close(ans2['Q'], ans['Q'])
ans2 = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., UA=ans['UA'], T1i=130, T2o=85, subtype='counterflow')
assert_close(ans2['Q'], ans['Q'])
ans2 = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., UA=ans['UA'], T1o=110.06100082712986, T2i=15, subtype='counterflow')
assert_close(ans2['Q'], ans['Q'])
ans2 = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., UA=ans['UA'], T2o=85, T2i=15, subtype='counterflow')
assert_close(ans2['Q'], ans['Q'])
ans2 = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., UA=ans['UA'], T1o=110.06100082712986, T1i=130, subtype='counterflow')
assert_close(ans2['Q'], ans['Q'])
# Only 1 temperature input
with pytest.raises(Exception):
P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., UA=300, T1i=130, subtype='counterflow')
# Bad HX type input
with pytest.raises(Exception):
P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., UA=300, T1i=130, T2i=15, subtype='BADTYPE')
ans = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., UA=300, T1i=130, T2i=15, subtype='E', Ntp=10)
assert_close(ans['Q'], 32212.185563086336,)
ans = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., UA=300, T1i=130, T2i=15, subtype='G', Ntp=2)
assert_close(ans['Q'], 32224.88788570008)
ans = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., UA=300, T1i=130, T2i=15, subtype='H', Ntp=2)
assert_close(ans['Q'], 32224.888572366734)
ans = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., UA=300, T1i=130, T2i=15, subtype='J', Ntp=2)
assert_close(ans['Q'], 32212.185699719837)
# Plate tests
ans = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., UA=300, T1i=130, T2i=15, subtype='3/1')
assert_close(ans['Q'], 32214.179745602625)
ans = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., UA=300, T1i=130, T2i=15, subtype='3/1', optimal=False)
assert_close(ans['Q'], 32210.4190840378)
ans = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., UA=300, T1i=130, T2i=15, subtype='2/2')
assert_close(ans['Q'], 32229.120739501937)
ans = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., UA=300, T1i=130, T2i=15, subtype='2/2', optimal=False)
assert_close(ans['Q'], 32203.721238671216)
ans = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., UA=300, T1i=130, T2i=15, subtype='2/2c', optimal=False)
assert_close(ans['Q'], 32203.721238671216)
ans = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., UA=300, T1i=130, T2i=15, subtype='2/2p', optimal=False)
assert_close(ans['Q'], 32195.273806845064)
def test_P_NTU_method_backwards():
ans = effectiveness_NTU_method(mh=5.2, mc=1.45, Cph=1860., Cpc=1900, subtype='counterflow', Tci=15, Tco=85, Tho=110.06100082712986)
ans2 = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., T2i=15, T2o=85, T1o=110.06100082712986, subtype='counterflow')
assert_close(ans2['Q'], ans['Q'])
# # Parallel flow case
ans = effectiveness_NTU_method(mh=5.2, mc=1.45, Cph=1860., Cpc=1900, subtype='parallel', Tci=15, Tco=85, Tho=110.06100082712986)
ans2 = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., T1i=130, T2i=15, T1o=110.06100082712986, subtype='parallel')
assert_close(ans2['Q'], ans['Q'])
# # Mixed Cmax/ 1
ans = effectiveness_NTU_method(mh=5.2, mc=1.45, Cph=1860., Cpc=1900, subtype='crossflow, mixed Cmax', Tci=15, Tco=85, Tho=110.06100082712986)
ans2 = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., T1o=110.06100082712986, T1i=130, T2i=15, subtype='crossflow, mixed 1')
assert_close(ans2['Q'], ans['Q'])
# # Mixed Cmin/2
ans = effectiveness_NTU_method(mh=5.2, mc=1.45, Cph=1860., Cpc=1900, subtype='crossflow, mixed Cmin', Tci=15, Tco=85, Tho=110.06100082712986)
ans2 = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., T1o=110.06100082712986, T1i=130, T2i=15, subtype='crossflow, mixed 2')
assert_close(ans2['Q'], ans['Q'])
# # Counterflow case but with all five different temperature input cases (both inlets known already done)
ans = effectiveness_NTU_method(mh=5.2, mc=1.45, Cph=1860., Cpc=1900, subtype='counterflow', Tci=15, Tco=85, Tho=110.06100082712986)
ans2 = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., T1i=130, T1o=110.06100082712986, T2o=85, subtype='counterflow')
assert_close(ans2['Q'], ans['Q'])
ans2 = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., T1o=110.06100082712986, T1i=130, T2o=85, subtype='counterflow')
assert_close(ans2['Q'], ans['Q'])
ans2 = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., T1i=130, T1o=110.06100082712986, T2i=15, subtype='counterflow')
assert_close(ans2['Q'], ans['Q'])
ans2 = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., T2o=85, T2i=15, T1o=110.06100082712986, subtype='counterflow')
assert_close(ans2['Q'], ans['Q'])
ans2 = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., T1o=110.06100082712986, T1i=130, T2i=15, subtype='counterflow')
assert_close(ans2['Q'], ans['Q'])
ans2 = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., T1o=110.06100082712986, T2o=85, T1i=130, T2i=15, subtype='counterflow')
assert_close(ans2['Q'], ans['Q'])
# TEMA types
ans = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., T1i=130, T1o=126.66954243557834, T2i=15, subtype='E', Ntp=10)
assert_close(ans['Q'], 32212.185563086336,)
ans = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., T1o=126.66822912678866, T1i=130, T2i=15, subtype='G', Ntp=2)
assert_close(ans['Q'], 32224.88788570008)
ans = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., T1o=126.66822905579335, T1i=130, T2i=15, subtype='H', Ntp=2)
assert_close(ans['Q'], 32224.888572366734)
ans = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., T1o=126.66954242145162, T1i=130, T2i=15, subtype='J', Ntp=2)
assert_close(ans['Q'], 32212.185699719837)
# Plate tests
ans = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., T1o=126.6693362545903, T1i=130, T2i=15, subtype='3/1')
assert_close(ans['Q'], 32214.179745602625)
ans = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., T1o=126.66972507402421, T1i=130, T2i=15, subtype='3/1', optimal=False)
assert_close(ans['Q'], 32210.4190840378)
ans = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., T1o=126.66779148681742, T1i=130, T2i=15, subtype='2/2')
assert_close(ans['Q'], 32229.120739501937)
ans = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., T1o=126.67041757251124, T1i=130, T2i=15, subtype='2/2', optimal=False)
assert_close(ans['Q'], 32203.721238671216)
ans = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., T1o=126.67041757251124, T1i=130, T2i=15, subtype='2/2c', optimal=False)
assert_close(ans['Q'], 32203.721238671216)
ans = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., T1o=126.67129096289857, T1i=130, T2i=15, subtype='2/2p', optimal=False)
assert_close(ans['Q'], 32195.273806845064)
# Q for both streams don't match case
with pytest.raises(Exception):
P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., T1o=110.06100082712986, T2o=85, T1i=170, T2i=15, subtype='counterflow')
# No T speced on side 2
with pytest.raises(Exception):
P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., T1o=110.06100082712986, T1i=130, subtype='counterflow')
# No T specified on side 1
with pytest.raises(Exception):
P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., T2o=85, T2i=15, subtype='counterflow')
# No T information at all
with pytest.raises(Exception):
ans2 = P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., subtype='counterflow')
# subtype not recognized
with pytest.raises(Exception):
P_NTU_method(m1=5.2, m2=1.45, Cp1=1860., Cp2=1900., T1o=110.06100082712986, T1i=130, T2i=15, subtype='NOTAREALTYPEOFHEATEXCHANGER')
def test_Pp():
from ht.hx import Pp, Pc
# randomly chosen test value
ans = Pp(5, .4)
assert_close(ans, 0.713634370024604)
# Test the limit works with a small difference
assert_close(Pp(2, -1), Pp(2, -1+1E-9))
# randomly chosen test value
assert_close(Pc(5, .7), 0.9206703686051108)
# Test the limit works with a small difference
assert_close(Pc(5, 1), Pc(5, 1-1E-8))
def test_temperature_effectiveness_plate():
R1 = 0.5
NTU1 = 1.5
P1 = temperature_effectiveness_plate(R1, NTU1, Np1=1, Np2=1, counterflow=True)
assert_close(P1, 0.6907854082479168)
P1 = temperature_effectiveness_plate(R1, NTU1, Np1=1, Np2=1, counterflow=False)
assert_close(P1, 0.5964005169587571)
# 1 pass/2 pass
for b1 in [True, False]:
for b2 in [True, False]:
P1 = temperature_effectiveness_plate(R1, NTU1, Np1=1, Np2=2, counterflow=b1, passes_counterflow=b2)
assert_close(P1, 0.6439306988115887)
# We can check we did the conversion right as follows:
NTU2 = NTU1*R1 #
R2 = 1./R1 # switch 2
P2 = P1*R1
P2_reversed = temperature_effectiveness_plate(R2, NTU2, Np1=2, Np2=1)
assert_close(P2, P2_reversed)
# in reverse
P1 = temperature_effectiveness_plate(R1, NTU1, Np1=2, Np2=1, counterflow=b1, passes_counterflow=b2)
assert_close(P1, 0.6505342399575915)
# 1 pass/3 pass, counterflow
for b1 in [True, False]:
P1 = temperature_effectiveness_plate(R1, NTU1, Np1=1, Np2=3, counterflow=True, passes_counterflow=b1)
assert_close(P1, 0.6491132138517642)
# In reverse
P1 = temperature_effectiveness_plate(R1, NTU1, Np1=3, Np2=1, counterflow=True, passes_counterflow=b1)
assert_close(P1, 0.6565261377239298)
# 1 pass/3 pass, parallel
for b1 in [True, False]:
P1 = temperature_effectiveness_plate(R1, NTU1, Np1=1, Np2=3, counterflow=False, passes_counterflow=b1)
assert_close(P1, 0.6385443460862099)
# in reverse
P1 = temperature_effectiveness_plate(R1, NTU1, Np1=3, Np2=1, counterflow=False, passes_counterflow=b1)
assert_close(P1, 0.6459675147406085)
# 1 pass/4 pass
for b1 in [True, False]:
for b2 in [True, False]:
P1 = temperature_effectiveness_plate(R1, NTU1, Np1=1, Np2=4, counterflow=b1, passes_counterflow=b2)
assert_close(P1, 0.6438068496552443)
# In reverse
P1 = temperature_effectiveness_plate(R1, NTU1, Np1=4, Np2=1, counterflow=b1, passes_counterflow=b2)
assert_close(P1, 0.6515539888566283)
# Four different results for 4 passes
P1 = temperature_effectiveness_plate(R1, NTU1, Np1=2, Np2=2, counterflow=False, passes_counterflow=False)
assert_close(P1, 0.5964005169587571)
P1 = temperature_effectiveness_plate(R1, NTU1, Np1=2, Np2=2, counterflow=False, passes_counterflow=True)
assert_close(P1, 0.6123845839665905)
P1 = temperature_effectiveness_plate(R1, NTU1, Np1=2, Np2=2, counterflow=True, passes_counterflow=False)
assert_close(P1, 0.6636659009073801)
P1 = temperature_effectiveness_plate(R1, NTU1, Np1=2, Np2=2, counterflow=True, passes_counterflow=True)
assert_close(P1, 0.6907854082479168)
# 2-3 counterflow
for b1 in [True, False]:
P1 = temperature_effectiveness_plate(R1, NTU1, Np1=2, Np2=3, counterflow=True, passes_counterflow=b1)
assert_close(P1, 0.67478876724034)
P1 = temperature_effectiveness_plate(R1, NTU1, Np1=2, Np2=3, counterflow=False, passes_counterflow=b1)
assert_close(P1, 0.6102922060616937)
# In reverse
P1 = temperature_effectiveness_plate(R1, NTU1, Np1=3, Np2=2, counterflow=True, passes_counterflow=b1)
assert_close(P1, 0.675522913050678)
P1 = temperature_effectiveness_plate(R1, NTU1, Np1=3, Np2=2, counterflow=False, passes_counterflow=b1)
assert_close(P1, 0.6105764872072659)
# 2-4 counterflow
for b1 in [True, False]:
P1 = temperature_effectiveness_plate(R1, NTU1, Np1=2, Np2=4, counterflow=True, passes_counterflow=b1)
assert_close(P1, 0.6777107269336475)
P1 = temperature_effectiveness_plate(R1, NTU1, Np1=2, Np2=4, counterflow=False, passes_counterflow=b1)
assert_close(P1, 0.6048585344522575)
# In reverse
P1 = temperature_effectiveness_plate(R1, NTU1, Np1=4, Np2=2, counterflow=True, passes_counterflow=b1)
assert_close(P1, 0.6786601861219819)
P1 = temperature_effectiveness_plate(R1, NTU1, Np1=4, Np2=2, counterflow=False, passes_counterflow=b1)
assert_close(P1, 0.6054166111196166)
with pytest.raises(Exception):
temperature_effectiveness_plate(R1=1/3., NTU1=1., Np1=3, Np2=3)
@pytest.mark.mpmath
def test_NTU_from_P_basic():
# Analytical result for counterflow
R1s = np.logspace(np.log10(2E-5), np.log10(1E2), 10000)
NTU1s = np.logspace(np.log10(1E-4), np.log10(1E2), 10000)
for i in range(100):
R1 = float(choice(R1s))
NTU1 = float(choice(NTU1s))
try:
# Not all of the guesses work forward; some overflow, some divide by 0
P1 = temperature_effectiveness_basic(R1=R1, NTU1=NTU1, subtype='counterflow')
# Backwards, it's the same divide by zero or log(negative number)
NTU1_calc = NTU_from_P_basic(P1, R1, subtype='counterflow')
except (ValueError, OverflowError, ZeroDivisionError):
continue
# Again, multiple values of NTU1 can produce the same P1
P1_calc = temperature_effectiveness_basic(R1=R1, NTU1=NTU1_calc, subtype='counterflow')
assert_close(P1, P1_calc)
# Analytical result for parallel flow
for i in range(100):
R1 = float(choice(R1s))
NTU1 = float(choice(NTU1s))
try:
P1 = temperature_effectiveness_basic(R1=R1, NTU1=NTU1, subtype='parallel')
# Backwards, it's the same divide by zero or log(negative number)
NTU1_calc = NTU_from_P_basic(P1, R1, subtype='parallel')
except (ValueError, OverflowError, ZeroDivisionError):
continue
P1_calc = temperature_effectiveness_basic(R1=R1, NTU1=NTU1_calc, subtype='parallel')
assert_close(P1, P1_calc)
# Analytical result for 'crossflow, mixed 1'
for i in range(100):
R1 = float(choice(R1s))
NTU1 = float(choice(NTU1s))
try:
# Not all of the guesses work forward; some overflow, some divide by 0
P1 = temperature_effectiveness_basic(R1=R1, NTU1=NTU1, subtype='crossflow, mixed 1')
# Backwards, it's the same divide by zero or log(negative number)
NTU1_calc = NTU_from_P_basic(P1, R1, subtype='crossflow, mixed 1')
except (ValueError, OverflowError, ZeroDivisionError):
continue
# Again, multiple values of NTU1 can produce the same P1
P1_calc = temperature_effectiveness_basic(R1=R1, NTU1=NTU1_calc, subtype='crossflow, mixed 1')
assert_close(P1, P1_calc)
# Analytical result for 'crossflow, mixed 2'
for i in range(100):
R1 = float(choice(R1s))
NTU1 = float(choice(NTU1s))
try:
# Not all of the guesses work forward; some overflow, some divide by 0
P1 = temperature_effectiveness_basic(R1=R1, NTU1=NTU1, subtype='crossflow, mixed 2')
# Backwards, it's the same divide by zero or log(negative number)
NTU1_calc = NTU_from_P_basic(P1, R1, subtype='crossflow, mixed 2')
except (ValueError, OverflowError, ZeroDivisionError):
continue
# Again, multiple values of NTU1 can produce the same P1
P1_calc = temperature_effectiveness_basic(R1=R1, NTU1=NTU1_calc, subtype='crossflow, mixed 2')
assert_close(P1, P1_calc)
# Test 'crossflow, mixed 1&2':
R1s = np.logspace(np.log10(2E-5), np.log10(1E2), 10000)
NTU1s = np.logspace(np.log10(1E-4), np.log10(1E2), 10000)
seed(0)
tot = 0
for i in range(100):
R1 = choice(R1s)
NTU1 = choice(NTU1s)
P1 = temperature_effectiveness_basic(R1=R1, NTU1=NTU1, subtype='crossflow, mixed 1&2')
try:
# Very rarely, the pade approximation will get a result too close to the infeasibility region and
# the solver cannot start as it is already outside the region
NTU1_calc = NTU_from_P_basic(P1, R1, subtype='crossflow, mixed 1&2')
except:
continue
# May not get the original NTU1, but the found NTU1 needs to produce the same P1.
P1_calc = temperature_effectiveness_basic(R1=R1, NTU1=NTU1_calc, subtype='crossflow, mixed 1&2')
assert_close(P1, P1_calc)
tot +=1
assert tot == 100
# crossflow approximate - easy as 1 is always a possibility for any R
for i in range(100):
R1 = float(choice(R1s))
NTU1 = float(choice(NTU1s))
P1 = temperature_effectiveness_basic(R1=R1, NTU1=NTU1, subtype='crossflow approximate')
NTU1_calc = NTU_from_P_basic(P1, R1, subtype='crossflow approximate')
# We have to compare the re calculated P1 values, because for many values of NTU1,
# at the initial far guess of 10000 P1 = 1 and at the random NTU1 P1 is also 1
P1_calc = temperature_effectiveness_basic(R1=R1, NTU1=NTU1_calc, subtype='crossflow approximate')
# In python 2.6 and 3.3 the solver doesn't converge as well, so we need
# to add a little tolerance
assert_close(P1, P1_calc, rtol=5E-6)
# Crossflow approximate test case
R1 = .1
NTU1 = 2
P1_calc_orig = temperature_effectiveness_basic(R1=R1, NTU1=NTU1, subtype='crossflow approximate')
P1_expect = 0.8408180737140558
assert_close(P1_calc_orig, P1_expect)
NTU1_backwards = NTU_from_P_basic(P1=P1_expect, R1=R1, subtype='crossflow approximate')
assert_close(NTU1, NTU1_backwards)
# Test cross flow - failes VERY OFTEN, should rely on crossflow approximate
NTU1 = 10
R1 = 0.5
P1 = temperature_effectiveness_basic(R1=R1, NTU1=NTU1, subtype='crossflow')
NTU1_calc = NTU_from_P_basic(P1, R1=R1, subtype='crossflow')
assert_close(NTU1, NTU1_calc)
# bad type of exchanger
with pytest.raises(Exception):
NTU_from_P_basic(P1=.975, R1=.1, subtype='BADTYPE')
@pytest.mark.mpmath
def test_NTU_from_P_E():
# not yet documented
# 1 tube pass AKA counterflow
R1s = np.logspace(np.log10(2E-5), np.log10(1E2), 10000)
NTU1s = np.logspace(np.log10(1E-4), np.log10(1E2), 10000)
# Exact same asa as the counterflow basic case
tot = 0
seed(0)
for i in range(100):
R1 = float(choice(R1s))
NTU1 = float(choice(NTU1s))
try:
# Not all of the guesses work forward; some overflow, some divide by 0
P1 = temperature_effectiveness_TEMA_E(R1=R1, NTU1=NTU1, Ntp=1)
# Backwards, it's the same divide by zero or log(negative number)
NTU1_calc = NTU_from_P_E(P1, R1, Ntp=1)
except (ValueError, OverflowError, ZeroDivisionError):
continue
# Again, multiple values of NTU1 can produce the same P1
P1_calc = temperature_effectiveness_TEMA_E(R1=R1, NTU1=NTU1_calc, Ntp=1)
assert_close(P1, P1_calc)
tot +=1
assert tot >= 85
# 2 tube passes (optimal arrangement) (analytical)
R1 = 1.1
NTU1 = 10
P1 = temperature_effectiveness_TEMA_E(R1=R1, NTU1=NTU1, Ntp=2, optimal=True)
P1_expect = 0.5576299522073297
assert_close(P1, P1_expect)
NTU1_calc = NTU_from_P_E(P1, R1, Ntp=2, optimal=True)
assert_close(NTU1_calc, NTU1)
# 2 tube pass (unoptimal)
tot = 0
for i in range(100):
R1 = float(choice(R1s))
NTU1 = float(choice(NTU1s))
try:
# Not all of the guesses work forward; some overflow, some divide by 0
P1 = temperature_effectiveness_TEMA_E(R1=R1, NTU1=NTU1, Ntp=2, optimal=False)
# Backwards, it's the same divide by zero or log(negative number)
NTU1_calc = NTU_from_P_E(P1, R1, Ntp=2, optimal=False)
except (ValueError, OverflowError, ZeroDivisionError):
continue
# Again, multiple values of NTU1 can produce the same P1
try:
P1_calc = temperature_effectiveness_TEMA_E(R1=R1, NTU1=NTU1_calc, Ntp=2, optimal=False)
except (ZeroDivisionError):
continue
assert_close(P1, P1_calc)
tot +=1
assert tot >= 90
# At the default mpmath precision, the following will predict a value larger
# than one
bad_P1 = temperature_effectiveness_TEMA_E(R1=1E-8 , NTU1=19.60414246043446, Ntp=2, optimal=False)
assert_close(bad_P1, 1.0000000050247593)
# 4 pass
for Ntp in [4, 6, 8, 10, 12]:
tot = 0
for i in range(100):
R1 = float(choice(R1s))
NTU1 = float(choice(NTU1s))
try:
P1 = temperature_effectiveness_TEMA_E(R1=R1, NTU1=NTU1, Ntp=Ntp)
NTU1_calc = NTU_from_P_E(P1, R1, Ntp=Ntp)
except ValueError:
# The case where with mpmath being used, the result is too high for
# the bounded solver to be able to solve it
continue
P1_calc = temperature_effectiveness_TEMA_E(R1=R1, NTU1=NTU1_calc, Ntp=Ntp)
assert_close(P1, P1_calc)
tot +=1
assert tot >= 70
# 3 pass optimal and not optimal
R1s = np.logspace(np.log10(2E-5), np.log10(1E1), 10000)
NTU1s = np.logspace(np.log10(1E-4), np.log10(1E1), 10000)
seed(0)
for optimal in [True, False]:
tot = 0
for i in range(100):
R1 = float(choice(R1s))
NTU1 = float(choice(NTU1s))
try:
P1 = temperature_effectiveness_TEMA_E(R1=R1, NTU1=NTU1, Ntp=3, optimal=optimal)
NTU1_calc = NTU_from_P_E(P1, R1, Ntp=3, optimal=optimal)
except (ValueError):
# The case where with mpmath being used, the result is too high for
# the bounded solver to be able to solve it
continue
# Again, multiple values of NTU1 can produce the same P1
P1_calc = temperature_effectiveness_TEMA_E(R1=R1, NTU1=NTU1_calc, Ntp=3, optimal=optimal)
assert_close(P1, P1_calc)
tot +=1
assert tot >= 97
with pytest.raises(Exception):
NTU_from_P_E(P1=1, R1=1, Ntp=17)
@pytest.mark.mpmath
def test_NTU_from_P_H():
# Within these limits everything is fund
R1s = np.logspace(np.log10(2E-5), np.log10(1E1), 10000)
NTU1s = np.logspace(np.log10(1E-4), np.log10(10), 10000)
seed(0)
for i in range(100):
R1 = float(choice(R1s))
NTU1 = float(choice(NTU1s))
P1 = temperature_effectiveness_TEMA_H(R1=R1, NTU1=NTU1, Ntp=1)
NTU1_calc = NTU_from_P_H(P1, R1, Ntp=1)
P1_calc = temperature_effectiveness_TEMA_H(R1=R1, NTU1=NTU1_calc, Ntp=1)
assert_close(P1, P1_calc)
for i in range(100):
R1 = float(choice(R1s))
NTU1 = float(choice(NTU1s))
P1 = temperature_effectiveness_TEMA_H(R1=R1, NTU1=NTU1, Ntp=2)
NTU1_calc = NTU_from_P_H(P1, R1, Ntp=2)
P1_calc = temperature_effectiveness_TEMA_H(R1=R1, NTU1=NTU1_calc, Ntp=2)
assert_close(P1, P1_calc)
for i in range(100):
R1 = float(choice(R1s))
NTU1 = float(choice(NTU1s))
P1 = temperature_effectiveness_TEMA_H(R1=R1, NTU1=NTU1, Ntp=2, optimal=False)
NTU1_calc = NTU_from_P_H(P1, R1, Ntp=2, optimal=False)
P1_calc = temperature_effectiveness_TEMA_H(R1=R1, NTU1=NTU1_calc, Ntp=2, optimal=False)
assert_close(P1, P1_calc, rtol=1E-6)
with pytest.raises(Exception):
NTU_from_P_H(P1=0.573, R1=1/3., Ntp=101)
@pytest.mark.mpmath
def test_NTU_from_P_G():
# 1 tube pass, random point
R1 = 1.1
NTU1 = 2
P1_calc_orig = temperature_effectiveness_TEMA_G(R1=R1, NTU1=NTU1, Ntp=1)
P1_expect = 0.5868787117241955
assert_close(P1_calc_orig, P1_expect)
NTU1_backwards = NTU_from_P_G(P1=P1_expect, R1=R1, Ntp=1)
assert_close(NTU1, NTU1_backwards)
# 2 tube pass, randompoint
R1 = 1.1
NTU1 = 2
P1_calc_orig = temperature_effectiveness_TEMA_G(R1=R1, NTU1=NTU1, Ntp=2)
P1_calc_orig
P1_expect = 0.6110347802764724
assert_close(P1_calc_orig, P1_expect)
NTU1_backwards = NTU_from_P_G(P1=P1_expect, R1=R1, Ntp=2)
assert_close(NTU1, NTU1_backwards)
# 2 tube pass, not optimal
R1 = .1
NTU1 = 2
P1_calc_orig = temperature_effectiveness_TEMA_G(R1=R1, NTU1=NTU1, Ntp=2, optimal=False)
P1_calc_orig
P1_expect = 0.8121969945075509
assert_close(P1_calc_orig, P1_expect)
NTU1_backwards = NTU_from_P_G(P1=P1_expect, R1=R1, Ntp=2, optimal=False)
assert_close(NTU1, NTU1_backwards)
# Run the gamut testing all the solvers
R1s = np.logspace(np.log10(2E-5), np.log10(1E2), 10000)
NTU1s = np.logspace(np.log10(1E-4), np.log10(1E2), 10000)
seed(0)
tot = 0
for Ntp, optimal in zip([1, 2, 2], [True, True, False]):
for i in range(100):
R1 = float(choice(R1s))
NTU1 = float(choice(NTU1s))
try:
P1 = temperature_effectiveness_TEMA_G(R1=R1, NTU1=NTU1, Ntp=Ntp, optimal=optimal)
NTU1_calc = NTU_from_P_G(P1, R1, Ntp=Ntp, optimal=optimal)
P1_calc = temperature_effectiveness_TEMA_G(R1=R1, NTU1=NTU1_calc, Ntp=Ntp, optimal=optimal)
except (ValueError, OverflowError, ZeroDivisionError, RuntimeError) as e:
continue
if isnan(P1) or isnan(P1_calc):
continue
assert_close(P1, P1_calc)
tot +=1
assert tot > 270
with pytest.raises(Exception):
NTU_from_P_G(P1=.573, R1=1/3., Ntp=10)
@pytest.mark.mpmath
def test_NTU_from_P_J():
# Run the gamut testing all the solvers
R1s = np.logspace(np.log10(2E-5), np.log10(1E2), 10000)
NTU1s = np.logspace(np.log10(1E-4), np.log10(1E2), 10000)
seed(0)
tot = 0
for Ntp in [1, 2, 4]:
for i in range(100):
R1 = float(choice(R1s))
NTU1 = float(choice(NTU1s))
try:
P1 = temperature_effectiveness_TEMA_J(R1=R1, NTU1=NTU1, Ntp=Ntp)
NTU1_calc = NTU_from_P_J(P1, R1, Ntp=Ntp)
P1_calc = temperature_effectiveness_TEMA_J(R1=R1, NTU1=NTU1_calc, Ntp=Ntp)
except (ValueError, OverflowError, ZeroDivisionError, RuntimeError) as e:
continue
assert_close(P1, P1_calc)
tot +=1
assert tot > 270
# Actual individual understandable working test cases
# 1 tube pass
R1 = 1.1
NTU1 = 3
P1_calc_orig = temperature_effectiveness_TEMA_J(R1=R1, NTU1=NTU1, Ntp=1)
P1_expect = 0.5996529947927913
assert_close(P1_calc_orig, P1_expect)
NTU1_backwards = NTU_from_P_J(P1=P1_expect, R1=R1, Ntp=1)
assert_close(NTU1, NTU1_backwards)
# 2 tube passes
R1 = 1.1
NTU1 = 2.7363888898379249
P1_calc_orig = temperature_effectiveness_TEMA_J(R1=R1, NTU1=NTU1, Ntp=2)
P1_expect = 0.53635261090479802
assert_close(P1_calc_orig, P1_expect)
# The exact P1 is slightly higher than that calculated as the upper limit
# of the pade approximation, so we multiply it by a small fraction
NTU1_backwards = NTU_from_P_J(P1=P1_expect*(1-2E-9), R1=R1, Ntp=2)
assert_close(NTU1, NTU1_backwards, rtol=1E-3)
# Unfortunately the derivative is so large we can't compare it exactly
# 4 tube passes
R1 = 1.1
NTU1 = 2.8702676768833268
P1_calc_orig = temperature_effectiveness_TEMA_J(R1=R1, NTU1=NTU1, Ntp=4)
P1_expect = 0.53812561986477236
assert_close(P1_calc_orig, P1_expect)
# The exact P1 is slightly higher than that calculated as the upper limit
# of the pade approximation, so we multiply it by a small fraction
NTU1_backwards = NTU_from_P_J(P1=P1_expect*(1-1E-15), R1=R1, Ntp=4)
assert_close(NTU1, NTU1_backwards)
# The derivative is very large but the pade approximation is really good, ant it works
with pytest.raises(Exception):
# unsupported number of tube passes case
NTU_from_P_J(P1=.57, R1=1/3., Ntp=10)
def test_NTU_from_P_plate():
# 1 pass-1 pass counterflow
NTU1 = 3.5
R1 = 0.25
P1_calc = temperature_effectiveness_plate(R1=R1, NTU1=NTU1, Np1=1, Np2=1)
assert_close(P1_calc, 0.944668125335067)
NTU1_calc = NTU_from_P_plate(P1=P1_calc, R1=R1, Np1=1, Np2=1)
assert_close(NTU1, NTU1_calc)
with pytest.raises(Exception):
NTU_from_P_plate(P1=.10001, R1=10, Np1=1, Np2=1, counterflow=True)
# 1 pass-1 pass parallelflow
NTU1 = 3.5
R1 = 0.25
P1_calc = temperature_effectiveness_plate(R1=R1, NTU1=NTU1, Np1=1, Np2=1, counterflow=False)
assert_close(P1_calc, 0.7899294862060529)
NTU1_calc = NTU_from_P_plate(P1=P1_calc, R1=R1, Np1=1, Np2=1, counterflow=False)
assert_close(NTU1, NTU1_calc)
with pytest.raises(Exception):
NTU_from_P_plate(P1=.091, R1=10, Np1=1, Np2=1, counterflow=False)
# 1-2 True True
R1s = np.logspace(np.log10(2E-5), np.log10(10), 10000) # too high R1 causes overflows
NTU1s = np.logspace(np.log10(1E-4), np.log10(99), 10000)
tot = 0
seed(0)
for i in range(100):
R1 = float(choice(R1s))
NTU1 = float(choice(NTU1s))
try:
P1 = temperature_effectiveness_plate(R1=R1, NTU1=NTU1, Np1=1, Np2=2)
NTU1_calc = NTU_from_P_plate(P1, R1, Np1=1, Np2=2)
P1_calc = temperature_effectiveness_plate(R1=R1, NTU1=NTU1_calc, Np1=1, Np2=2)
except (OverflowError, ValueError):
continue
assert_close(P1, P1_calc)
tot +=1
assert tot > 97
tot = 0
for i in range(100):
R1 = float(choice(R1s))
NTU1 = float(choice(NTU1s))
try:
P1 = temperature_effectiveness_plate(R1=R1, NTU1=NTU1, Np1=1, Np2=3)
NTU1_calc = NTU_from_P_plate(P1, R1, Np1=1, Np2=3)
P1_calc = temperature_effectiveness_plate(R1=R1, NTU1=NTU1_calc, Np1=1, Np2=3)
except (OverflowError, ValueError):
continue
assert_close(P1, P1_calc)
tot +=1
assert tot >= 99
tot = 0
for i in range(100):
R1 = float(choice(R1s))
NTU1 = float(choice(NTU1s))
try:
P1 = temperature_effectiveness_plate(R1=R1, NTU1=NTU1, Np1=1, Np2=3, counterflow=False)
NTU1_calc = NTU_from_P_plate(P1, R1, Np1=1, Np2=3, counterflow=False)
P1_calc = temperature_effectiveness_plate(R1=R1, NTU1=NTU1_calc, Np1=1, Np2=3, counterflow=False)
except (OverflowError, ValueError):
continue
assert_close(P1, P1_calc)
tot +=1
assert tot >= 99
tot = 0
for i in range(100):
R1 = float(choice(R1s))
NTU1 = float(choice(NTU1s))
try:
P1 = temperature_effectiveness_plate(R1=R1, NTU1=NTU1, Np1=1, Np2=4)
NTU1_calc = NTU_from_P_plate(P1, R1, Np1=1, Np2=4)
P1_calc = temperature_effectiveness_plate(R1=R1, NTU1=NTU1_calc, Np1=1, Np2=4)
except (OverflowError, ValueError):
continue
assert_close(P1, P1_calc)
tot +=1
assert tot >= 99
# 2-2 pass cases
# counterflow and not passes_counterflow
tot = 0
for i in range(100):
R1 = float(choice(R1s))
NTU1 = float(choice(NTU1s))
try:
P1 = temperature_effectiveness_plate(R1=R1, NTU1=NTU1, Np1=2, Np2=2, counterflow=True, passes_counterflow=False)
NTU1_calc = NTU_from_P_plate(P1, R1, Np1=2, Np2=2, counterflow=True, passes_counterflow=False)
P1_calc = temperature_effectiveness_plate(R1=R1, NTU1=NTU1_calc, Np1=2, Np2=2, counterflow=True, passes_counterflow=False)
except (OverflowError, ValueError):
continue
assert_close(P1, P1_calc)
tot +=1
assert tot >= 99
# not counterflow and not passes_counterflow
# random example
NTU1 = 1.1
R1 = .6
P1_calc = temperature_effectiveness_plate(R1=R1, NTU1=NTU1, Np1=2, Np2=2, counterflow=False, passes_counterflow=False)
assert_close(P1_calc, 0.5174719601105934)
NTU1_calc = NTU_from_P_plate(P1=P1_calc, R1=R1, Np1=2, Np2=2, counterflow=False, passes_counterflow=False)
assert_close(NTU1, NTU1_calc)
# methodical test
tot = 0
for i in range(100):
R1 = float(choice(R1s))
NTU1 = float(choice(NTU1s))
try:
P1 = temperature_effectiveness_plate(R1=R1, NTU1=NTU1, Np1=2, Np2=2, counterflow=False, passes_counterflow=False)
NTU1_calc = NTU_from_P_plate(P1, R1, Np1=2, Np2=2, counterflow=False, passes_counterflow=False)
P1_calc = temperature_effectiveness_plate(R1=R1, NTU1=NTU1_calc, Np1=2, Np2=2, counterflow=False, passes_counterflow=False)
except (ZeroDivisionError, ValueError):
continue
assert_close(P1, P1_calc)
tot +=1
assert tot > 85
# not counterflow and passes_counterflow
# random example
NTU1 = 1.1
R1 = .6
P1_calc = temperature_effectiveness_plate(R1=R1, NTU1=NTU1, Np1=2, Np2=2, counterflow=False, passes_counterflow=True)
assert_close(P1_calc, 0.529647502598342)
NTU1_calc = NTU_from_P_plate(P1=P1_calc, R1=R1, Np1=2, Np2=2, counterflow=False, passes_counterflow=True)
assert_close(NTU1, NTU1_calc)
# methodical
for i in range(100):
R1 = float(choice(R1s))
NTU1 = float(choice(NTU1s))
P1 = temperature_effectiveness_plate(R1=R1, NTU1=NTU1, Np1=2, Np2=2, counterflow=False, passes_counterflow=True)
NTU1_calc = NTU_from_P_plate(P1, R1, Np1=2, Np2=2, counterflow=False, passes_counterflow=True)
P1_calc = temperature_effectiveness_plate(R1=R1, NTU1=NTU1_calc, Np1=2, Np2=2, counterflow=False, passes_counterflow=True)
assert_close(P1, P1_calc)
# 2-2 counterflow and passes_counterflow
tot = 0
for i in range(100):
R1 = float(choice(R1s))
NTU1 = float(choice(NTU1s))
try:
P1 = temperature_effectiveness_plate(R1=R1, NTU1=NTU1, Np1=2, Np2=2, counterflow=True, passes_counterflow=True)
NTU1_calc = NTU_from_P_plate(P1, R1, Np1=2, Np2=2, counterflow=True, passes_counterflow=True)
P1_calc = temperature_effectiveness_plate(R1=R1, NTU1=NTU1_calc, Np1=2, Np2=2, counterflow=True, passes_counterflow=True)
except (ValueError, ZeroDivisionError):
continue
tot +=1
assert_close(P1, P1_calc)
assert tot > 90
# 2-3 counterflow - random example
NTU1 = 1.1
R1 = .6
P1_calc = temperature_effectiveness_plate(R1=R1, NTU1=NTU1, Np1=2, Np2=3, counterflow=True)
assert_close(P1_calc, 0.5696402802155714)
NTU1_calc = NTU_from_P_plate(P1=P1_calc, R1=R1, Np1=2, Np2=3, counterflow=True)
assert_close(NTU1, NTU1_calc)
# 2-3 counterflow - methodical
tot = 0
for i in range(100):
R1 = float(choice(R1s))
NTU1 = float(choice(NTU1s))
try:
P1 = temperature_effectiveness_plate(R1=R1, NTU1=NTU1, Np1=2, Np2=3, counterflow=True)
NTU1_calc = NTU_from_P_plate(P1, R1, Np1=2, Np2=3, counterflow=True)
P1_calc = temperature_effectiveness_plate(R1=R1, NTU1=NTU1_calc, Np1=2, Np2=3, counterflow=True)
except (ValueError, ZeroDivisionError):
continue
tot +=1
assert_close(P1, P1_calc, rtol=5E-4)
assert tot > 85
# 2-3 parallelflow - random example
NTU1 = 1.1
R1 = .6
P1_calc = temperature_effectiveness_plate(R1=R1, NTU1=NTU1, Np1=2, Np2=3, counterflow=False)
assert_close(P1_calc, 0.5272339114328507)
NTU1_calc = NTU_from_P_plate(P1=P1_calc, R1=R1, Np1=2, Np2=3, counterflow=False)
assert_close(NTU1, NTU1_calc)
# 2-3 parallelflow - methodical (all work for given range)
for i in range(100):
R1 = float(choice(R1s))
NTU1 = float(choice(NTU1s))
P1 = temperature_effectiveness_plate(R1=R1, NTU1=NTU1, Np1=2, Np2=3, counterflow=False)
NTU1_calc = NTU_from_P_plate(P1, R1, Np1=2, Np2=3, counterflow=False)
P1_calc = temperature_effectiveness_plate(R1=R1, NTU1=NTU1_calc, Np1=2, Np2=3, counterflow=False)
assert_close(P1, P1_calc)
# 2-4 counterflow - random example
NTU1 = 1.1
R1 = .6
P1_calc = temperature_effectiveness_plate(R1=R1, NTU1=NTU1, Np1=2, Np2=4, counterflow=True)
assert_close(P1_calc, 0.5717083161054717)
NTU1_calc = NTU_from_P_plate(P1=P1_calc, R1=R1, Np1=2, Np2=4, counterflow=True)
assert_close(NTU1, NTU1_calc)
# 2-4 counterflow - methodical
tot = 0
for i in range(100):
R1 = float(choice(R1s))
NTU1 = float(choice(NTU1s))
try:
P1 = temperature_effectiveness_plate(R1=R1, NTU1=NTU1, Np1=2, Np2=4, counterflow=True)
NTU1_calc = NTU_from_P_plate(P1, R1, Np1=2, Np2=4, counterflow=True)
P1_calc = temperature_effectiveness_plate(R1=R1, NTU1=NTU1_calc, Np1=2, Np2=4, counterflow=True)
except (ValueError, ZeroDivisionError):
continue
tot +=1
assert_close(P1, P1_calc)
assert tot > 95
# 2-4 parallelflow - random example
NTU1 = 1.1
R1 = .6
P1_calc = temperature_effectiveness_plate(R1=R1, NTU1=NTU1, Np1=2, Np2=4, counterflow=False)
assert_close(P1_calc, 0.5238412695944656)
NTU1_calc = NTU_from_P_plate(P1=P1_calc, R1=R1, Np1=2, Np2=4, counterflow=False)
assert_close(NTU1, NTU1_calc)
# 2-4 counterflow - methodical
tot = 0
for i in range(100):
R1 = float(choice(R1s))
NTU1 = float(choice(NTU1s))
try:
P1 = temperature_effectiveness_plate(R1=R1, NTU1=NTU1, Np1=2, Np2=4, counterflow=False)
NTU1_calc = NTU_from_P_plate(P1, R1, Np1=2, Np2=4, counterflow=False)
P1_calc = temperature_effectiveness_plate(R1=R1, NTU1=NTU1_calc, Np1=2, Np2=4, counterflow=False)
except (ValueError, ZeroDivisionError):
continue
tot +=1
assert_close(P1, P1_calc)
assert tot > 95
# Backwards, only one example in the tests
# No real point in being exhaustive
NTU1 = NTU_from_P_plate(P1=0.5743514352720835, R1=1/3., Np1=3, Np2=1)
assert_close(NTU1, 1)
# Bad number of plates
with pytest.raises(Exception):
NTU_from_P_plate(P1=0.5743, R1=1/3., Np1=3, Np2=13415151213)
def test_DBundle_min():
assert_close(DBundle_min(0.0254), 1)
assert_close(DBundle_min(0.005), .1)
assert_close(DBundle_min(0.014), .3)
assert_close(DBundle_min(0.015), .5)
assert_close(DBundle_min(.1), 1.5)
def test_shell_clearance():
assert_close(shell_clearance(DBundle=1.245), 0.0064)
assert_close(shell_clearance(DBundle=4), 0.011)
assert_close(shell_clearance(DBundle=.2), .0032)
assert_close(shell_clearance(DBundle=1.778), 0.0095)
assert_close(shell_clearance(DShell=1.245), 0.0064)
assert_close(shell_clearance(DShell=4), 0.011)
assert_close(shell_clearance(DShell=.2), .0032)
assert_close(shell_clearance(DShell=1.778), 0.0095)
with pytest.raises(Exception):
shell_clearance()
def test_L_unsupported_max():
assert_close(L_unsupported_max(Do=.0254, material='CS'), 1.88)
assert_close(L_unsupported_max(Do=.0253, material='CS'), 1.753)
assert_close(L_unsupported_max(Do=1E-5, material='CS'), 0.66)
assert_close(L_unsupported_max(Do=.00635, material='CS'), 0.66)
assert_close(L_unsupported_max(Do=.00635, material='aluminium'), 0.559)
with pytest.raises(Exception):
L_unsupported_max(Do=.0254, material='BADMATERIAL')
# Terribly pessimistic
assert_close(L_unsupported_max(Do=10, material='CS'), 3.175) |
protonfixes/gamefixes/307690.py | Sirmentio/protonfixes | 213 | 11106290 | """ Game fix for Sleeping Dogs: Definitive Edition
Note: It does not work with WINED3D.
After the game start fullscreen and resolution can be set from game display settings.
Sometimes the game is crashing.
"""
#pylint: disable=C0103
from protonfixes import util
def main():
""" Set virtual desktop
"""
# https://github.com/ValveSoftware/Proton/issues/872
util.protontricks('vd=1280x720')
|
anubis/scanners/shodan.py | sdfmmbi/Anubis | 972 | 11106294 | import socket
from json import dumps
import shodan
from anubis.utils.ColorPrint import ColorPrint
def search_shodan(self):
print("Searching Shodan.io for additional information")
try:
from anubis.API import SHODAN_KEY
except ImportError:
ColorPrint.red("Unable to import API keys - make sure API.py exists!")
return
api = shodan.Shodan(SHODAN_KEY)
for i in range(len(self.options["TARGET"])):
try:
results = api.host(socket.gethostbyname(self.options["TARGET"][i]))
if self.options["--verbose"]:
print(dumps(results, indent=2, sort_keys=True))
print('Server Location: ' + str(results['city']) + ", " + str(
results['country_code']) + ' - ' + str(results['postal_code']))
print("ISP or Hosting Company: %s" % str(results['isp']))
if results['os'] is not None:
print("Possible OS: %s" % str(results['os']))
except Exception as e:
self.handle_exception(e, "Error retrieving additional info")
|
zeus/networks/tensorflow/detectors/faster_rcnn_trainer_callback.py | shaido987/vega | 240 | 11106302 | <reponame>shaido987/vega<filename>zeus/networks/tensorflow/detectors/faster_rcnn_trainer_callback.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""CARS trainer."""
import logging
import tensorflow as tf
import tf_slim as slim
from object_detection.core import standard_fields as fields
from object_detection.utils import variables_helper
from zeus.common import ClassFactory, ClassType
from zeus.trainer.callbacks import Callback
from .tf_optimizer import TFOptimizer
@ClassFactory.register(ClassType.CALLBACK)
class FasterRCNNTrainerCallback(Callback):
"""A special callback for FasterRCNNTrainer."""
disable_callbacks = ["ModelStatistics"]
def model_fn(self, features, labels, mode):
"""Define Faster R-CNN model_fn used by TensorFlow Estimator."""
logging.info('Faster R-CNN model function action')
self.model = self.trainer.model
self.config = self.trainer.config
predict_result_dict = self.model(
features, labels, mode == tf.estimator.ModeKeys.TRAIN)
self.fine_tune_checkpoint_type = self.config.fine_tune_checkpoint_type
self.load_all_detection_checkpoint_vars = True
asg_map = self.model.restore_map(
fine_tune_checkpoint_type=self.fine_tune_checkpoint_type,
load_all_detection_checkpoint_vars=(
self.load_all_detection_checkpoint_vars))
self.fine_tune_checkpoint = self.config.fine_tune_checkpoint
available_var_map = (
variables_helper.get_variables_available_in_checkpoint(
asg_map,
self.fine_tune_checkpoint,
include_global_step=False))
tf.train.init_from_checkpoint(self.fine_tune_checkpoint,
available_var_map)
losses_dict = self.model.loss(
predict_result_dict, features[fields.InputDataFields.true_image_shape])
losses = [loss_tensor for loss_tensor in losses_dict.values()]
total_loss = tf.add_n(losses, name='total_loss')
train_op = None
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_or_create_global_step()
self.optimizer, self.optimizer_summary_vars = TFOptimizer(
self.config.optimizer).get_real_optimizer(global_step)
trainable_variables = None
trainable_variables = slim.filter_variables(
tf.trainable_variables())
clip_gradients_value = None
summaries = None
train_op = slim.optimizers.optimize_loss(
loss=total_loss,
global_step=global_step,
learning_rate=None,
clip_gradients=clip_gradients_value,
optimizer=self.optimizer,
update_ops=self.model.updates(),
variables=trainable_variables,
summaries=summaries,
name='') # Preventing scope prefix on all variables.
eval_metric_ops = None
if mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = self.valid_metrics(predict_result_dict, labels)
return tf.estimator.EstimatorSpec(mode=mode, loss=total_loss, train_op=train_op,
eval_metric_ops=eval_metric_ops)
|
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_vendor/colorama/__init__.py | brianherrera/lumberyard | 1,738 | 11106331 | <gh_stars>1000+
# Copyright <NAME> 2013. BSD 3-Clause license, see LICENSE file.
from .initialise import init, deinit, reinit, colorama_text
from .ansi import Fore, Back, Style, Cursor
from .ansitowin32 import AnsiToWin32
__version__ = '0.4.4'
|
examples/smoke/smoke.py | Frekby/glumpy | 1,074 | 11106360 | <reponame>Frekby/glumpy
# -----------------------------------------------------------------------------
# Copyright (c) 2009-2016 <NAME>. All rights reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
# Porting of the Fluid demo by <NAME> (c) 2010
# Originals sources and explanation on http://prideout.net/blog/?p=58
# -----------------------------------------------------------------------------
import numpy as np
from glumpy import app, gloo, gl, data
# Constants
# -------------------------------------
CellSize = 1.25
ViewportWidth = 512
ViewportHeight = 512
GridWidth = 512
GridHeight = 512
SplatRadius = GridWidth / 8.0
AmbientTemperature = -1.0
ImpulseTemperature = 10.0
ImpulseDensity = 1.0
NumJacobiIterations = 40
TimeStep = 0.125
SmokeBuoyancy = 1.00
SmokeWeight = 0.05
GradientScale = 1.125 / CellSize
TemperatureDissipation = 0.99
VelocityDissipation = 0.99
DensityDissipation = 0.9995
ImpulsePosition = GridWidth/2, -int(SplatRadius/2)
PositionSlot = 0
window = app.Window(ViewportWidth, ViewportHeight)
class Surface(object):
def __init__(self, width, height, depth, interpolation=gl.GL_NEAREST):
self.texture = np.zeros((height,width,depth), np.float32).view(gloo.TextureFloat2D)
self.texture.interpolation = interpolation
self.framebuffer = gloo.FrameBuffer(color=self.texture)
self.clear()
def clear(self):
self.activate()
gl.glClearColor(0, 0, 0, 0)
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
self.deactivate()
def activate(self):
self.framebuffer.activate()
def deactivate(self):
self.framebuffer.deactivate()
class Slab(object):
def __init__(self, width, height, depth, interpolation=gl.GL_NEAREST):
self.Ping = Surface(width, height, depth, interpolation)
self.Pong = Surface(width, height, depth, interpolation)
def swap(self):
self.Ping, self.Pong = self.Pong, self.Ping
def Program(fragment):
program = gloo.Program("smoke.vert", fragment, count=4)
program['Position'] = [(-1,-1), (-1,+1), (+1,-1), (+1,+1)]
return program
Velocity = Slab(GridWidth, GridHeight, 2)
Density = Slab(GridWidth, GridHeight, 1, gl.GL_LINEAR)
Pressure = Slab(GridWidth, GridHeight, 1)
Temperature = Slab(GridWidth, GridHeight, 1, gl.GL_LINEAR)
Divergence = Surface(GridWidth, GridHeight, 3)
Obstacles = Surface(GridWidth, GridHeight, 3, gl.GL_LINEAR)
prog_gradient = Program("gradient.frag")
prog_jacobi = Program("jacobi.frag")
prog_advect = Program("advect.frag")
prog_divergence = Program("divergence.frag")
prog_fill = Program("fill.frag")
prog_splat = Program("splat.frag")
prog_buoyancy = Program("buoyancy.frag")
prog_visualize = Program("visualize.frag")
prog_advect["InverseSize"] = 1.0 / GridWidth, 1.0 / GridHeight
prog_divergence["InverseSize"] = 1.0 / GridWidth, 1.0 / GridHeight
prog_gradient["InverseSize"] = 1.0 / GridWidth, 1.0 / GridHeight
prog_buoyancy["InverseSize"] = 1.0 / GridWidth, 1.0 / GridHeight
prog_jacobi["InverseSize"] = 1.0 / GridWidth, 1.0 / GridHeight
prog_fill["InverseSize"] = 1.0 / GridWidth, 1.0 / GridHeight
prog_advect["TimeStep"] = TimeStep
prog_jacobi["Alpha"] = -CellSize * CellSize
prog_jacobi["InverseBeta"] = 0.25
prog_gradient["GradientScale"] = GradientScale
prog_divergence["HalfInverseCellSize"] = 0.5 / CellSize
prog_splat["Radius"] = SplatRadius
prog_splat["Point"] = ImpulsePosition
prog_buoyancy["AmbientTemperature"] = AmbientTemperature
prog_buoyancy["TimeStep"] = TimeStep
prog_buoyancy["Sigma"] = SmokeBuoyancy
prog_buoyancy["Kappa"] = SmokeWeight
def Advect(velocity, source, obstacles, dest, dissipation):
prog_advect["Dissipation"] = dissipation
prog_advect["VelocityTexture"] = velocity.texture
prog_advect["SourceTexture"] = source.texture
prog_advect["Obstacles"] = obstacles.texture
dest.activate()
prog_advect.draw(gl.GL_TRIANGLE_STRIP)
dest.deactivate()
def Jacobi(pressure, divergence, obstacles, dest):
prog_jacobi["Pressure"] = pressure.texture
prog_jacobi["Divergence"] = divergence.texture
prog_jacobi["Obstacles"] = obstacles.texture
dest.activate()
prog_jacobi.draw(gl.GL_TRIANGLE_STRIP)
dest.deactivate()
def SubtractGradient(velocity, pressure, obstacles, dest):
prog_gradient["Velocity"] = velocity.texture
prog_gradient["Pressure"] = pressure.texture
prog_gradient["Obstacles"] = obstacles.texture
dest.activate()
prog_gradient.draw(gl.GL_TRIANGLE_STRIP)
dest.deactivate()
def ComputeDivergence(velocity, obstacles, dest):
prog_divergence["Obstacles"] = obstacles.texture
prog_divergence["Velocity"] = velocity.texture
dest.activate()
prog_divergence.draw(gl.GL_TRIANGLE_STRIP)
dest.deactivate()
def ApplyImpulse(dest, position, value):
prog_splat["FillColor"] = value,value,value
dest.activate()
gl.glEnable(gl.GL_BLEND)
prog_splat.draw(gl.GL_TRIANGLE_STRIP)
dest.deactivate()
gl.glDisable(gl.GL_BLEND)
def ApplyBuoyancy(velocity, temperature, density, dest):
prog_buoyancy["Density"] = density.texture
prog_buoyancy["Velocity"] = velocity.texture
prog_buoyancy["Temperature"] = temperature.texture
dest.activate()
prog_buoyancy.draw(gl.GL_TRIANGLE_STRIP)
dest.deactivate()
def ClearSurface(surface, v):
surface.activate()
gl.glClearColor(v, v, v, v)
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
surface.deactivate()
def disc(shape=(256,256), center=(128,128), radius = 96):
def distance(x,y):
return np.sqrt((x-center[0])**2+(y-center[1])**2)
D = np.fromfunction(distance,shape)
return np.where(D<=radius,1.0,0.0).astype(np.float32)
def CreateObstacles(dest, width, height):
dest.activate()
gl.glViewport(0, 0, width, height)
gl.glClearColor(0, 0, 0, 0)
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
T = np.ones((height,width,3), np.float32).view(gloo.Texture2D)
T[+1:-1,+1:-1] = 0.0
T[...,0] += disc(shape = (GridHeight,GridWidth),
center = (GridHeight/2,GridWidth/2),
radius = 32)
T[...,2] += -2*disc(shape = (GridHeight,GridWidth),
center = (GridHeight/2,GridWidth/2),
radius = 32)
prog_fill["Sampler"] = T
prog_fill.draw(gl.GL_TRIANGLE_STRIP)
dest.deactivate()
@window.event
def on_init():
gl.glDisable(gl.GL_DEPTH_TEST)
gl.glDisable(gl.GL_BLEND)
ClearSurface(Temperature.Ping, AmbientTemperature)
CreateObstacles(Obstacles, GridWidth, GridHeight)
@window.event
def on_draw(dt):
gl.glViewport(0, 0, GridWidth, GridHeight)
gl.glDisable(gl.GL_BLEND)
Advect(Velocity.Ping, Velocity.Ping, Obstacles, Velocity.Pong, VelocityDissipation)
Velocity.swap()
Advect(Velocity.Ping, Temperature.Ping, Obstacles, Temperature.Pong, TemperatureDissipation)
Temperature.swap()
Advect(Velocity.Ping, Density.Ping, Obstacles, Density.Pong, DensityDissipation)
Density.swap()
ApplyBuoyancy(Velocity.Ping, Temperature.Ping, Density.Ping, Velocity.Pong)
Velocity.swap()
ApplyImpulse(Temperature.Ping, ImpulsePosition, ImpulseTemperature)
ApplyImpulse(Density.Ping, ImpulsePosition, ImpulseDensity)
ComputeDivergence(Velocity.Ping, Obstacles, Divergence)
ClearSurface(Pressure.Ping, 0.0)
for i in range(NumJacobiIterations):
Jacobi(Pressure.Ping, Divergence, Obstacles, Pressure.Pong)
Pressure.swap()
SubtractGradient(Velocity.Ping, Pressure.Ping, Obstacles, Velocity.Pong)
Velocity.swap()
gl.glViewport(0,0,window.width,window.height)
gl.glClearColor(0, 0, 0, 1)
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
prog_visualize['u_data'] = Density.Ping.texture
prog_visualize['u_shape'] = Density.Ping.texture.shape[1], Density.Ping.texture.shape[0]
prog_visualize['u_kernel'] = data.get("spatial-filters.npy")
prog_visualize["Sampler"] = Density.Ping.texture
prog_visualize["FillColor"] = 0.95, 0.925, 1.00
prog_visualize["Scale"] = 1.0/window.width, 1.0/window.height
prog_visualize.draw(gl.GL_TRIANGLE_STRIP)
app.run()
|
examples/ordering.py | wyfo/apimodel | 118 | 11106362 | import json
from dataclasses import dataclass, field
from datetime import date
from apischema import order, serialize, serialized
@order({"trigram": order(-1)})
@dataclass
class User:
firstname: str
lastname: str
address: str = field(metadata=order(after="birthdate"))
birthdate: date = field()
@serialized
@property
def trigram(self) -> str:
return (self.firstname[0] + self.lastname[0] + self.lastname[-1]).lower()
@serialized(order=order(before=birthdate))
@property
def age(self) -> int:
age = date.today().year - self.birthdate.year
if age > 0 and (date.today().month, date.today().day) < (
self.birthdate.month,
self.birthdate.day,
):
age -= 1
return age
user = User("Harry", "Potter", "London", date(1980, 7, 31))
dump = """{
"trigram": "hpr",
"firstname": "Harry",
"lastname": "Potter",
"age": 41,
"birthdate": "1980-07-31",
"address": "London"
}"""
assert json.dumps(serialize(User, user), indent=4) == dump
|
packages/typescript/test/devmode_consumer/devmode_consumer.bzl | Aghassi/rules_nodejs | 645 | 11106375 | <gh_stars>100-1000
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of a rule that requires es2015 (devmode) inputs.
"""
load("@build_bazel_rules_nodejs//:providers.bzl", "JSNamedModuleInfo")
def _devmode_consumer(ctx):
sources_depsets = []
for dep in ctx.attr.deps:
if JSNamedModuleInfo in dep:
sources_depsets.append(dep[JSNamedModuleInfo].sources)
sources = depset(transitive = sources_depsets)
return [DefaultInfo(
files = sources,
runfiles = ctx.runfiles(transitive_files = sources),
)]
devmode_consumer = rule(
implementation = _devmode_consumer,
attrs = {
"deps": attr.label_list(),
},
)
|
Chapter07/06_cartpole.py | haohaoxiao/Deep-Reinforcement-Learning-Hands-On-Second-Edition | 621 | 11106381 | <reponame>haohaoxiao/Deep-Reinforcement-Learning-Hands-On-Second-Edition
import gym
import ptan
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
HIDDEN_SIZE = 128
BATCH_SIZE = 16
TGT_NET_SYNC = 10
GAMMA = 0.9
REPLAY_SIZE = 1000
LR = 1e-3
EPS_DECAY=0.99
class Net(nn.Module):
def __init__(self, obs_size, hidden_size, n_actions):
super(Net, self).__init__()
self.net = nn.Sequential(
nn.Linear(obs_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, n_actions)
)
def forward(self, x):
return self.net(x.float())
@torch.no_grad()
def unpack_batch(batch, net, gamma):
states = []
actions = []
rewards = []
done_masks = []
last_states = []
for exp in batch:
states.append(exp.state)
actions.append(exp.action)
rewards.append(exp.reward)
done_masks.append(exp.last_state is None)
if exp.last_state is None:
last_states.append(exp.state)
else:
last_states.append(exp.last_state)
states_v = torch.tensor(states)
actions_v = torch.tensor(actions)
rewards_v = torch.tensor(rewards)
last_states_v = torch.tensor(last_states)
last_state_q_v = net(last_states_v)
best_last_q_v = torch.max(last_state_q_v, dim=1)[0]
best_last_q_v[done_masks] = 0.0
return states_v, actions_v, best_last_q_v * gamma + rewards_v
if __name__ == "__main__":
env = gym.make("CartPole-v0")
obs_size = env.observation_space.shape[0]
n_actions = env.action_space.n
net = Net(obs_size, HIDDEN_SIZE, n_actions)
tgt_net = ptan.agent.TargetNet(net)
selector = ptan.actions.ArgmaxActionSelector()
selector = ptan.actions.EpsilonGreedyActionSelector(
epsilon=1, selector=selector)
agent = ptan.agent.DQNAgent(net, selector)
exp_source = ptan.experience.ExperienceSourceFirstLast(
env, agent, gamma=GAMMA)
buffer = ptan.experience.ExperienceReplayBuffer(
exp_source, buffer_size=REPLAY_SIZE)
optimizer = optim.Adam(net.parameters(), LR)
step = 0
episode = 0
solved = False
while True:
step += 1
buffer.populate(1)
for reward, steps in exp_source.pop_rewards_steps():
episode += 1
print("%d: episode %d done, reward=%.3f, epsilon=%.2f" % (
step, episode, reward, selector.epsilon))
solved = reward > 150
if solved:
print("Congrats!")
break
if len(buffer) < 2*BATCH_SIZE:
continue
batch = buffer.sample(BATCH_SIZE)
states_v, actions_v, tgt_q_v = unpack_batch(
batch, tgt_net.target_model, GAMMA)
optimizer.zero_grad()
q_v = net(states_v)
q_v = q_v.gather(1, actions_v.unsqueeze(-1)).squeeze(-1)
loss_v = F.mse_loss(q_v, tgt_q_v)
loss_v.backward()
optimizer.step()
selector.epsilon *= EPS_DECAY
if step % TGT_NET_SYNC == 0:
tgt_net.sync()
|
rastervision_aws_batch/rastervision/aws_batch/__init__.py | theoway/raster-vision | 1,577 | 11106452 | # flake8: noqa
import rastervision.pipeline
from rastervision.aws_batch.aws_batch_runner import *
def register_plugin(registry):
registry.set_plugin_version('rastervision.aws_batch', 0)
registry.set_plugin_aliases('rastervision.aws_batch',
['rastervision2.aws_batch'])
registry.add_runner(AWS_BATCH, AWSBatchRunner)
registry.add_rv_config_schema(AWS_BATCH, [
'gpu_job_queue', 'gpu_job_def', 'cpu_job_queue', 'cpu_job_def',
'attempts'
])
|
trigger/twister2.py | jccardonar/trigger | 380 | 11106456 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Login and basic command-line interaction support using the Twisted asynchronous
I/O framework. The Trigger Twister is just like the Mersenne Twister, except
not at all.
"""
import fcntl
import os
import re
import signal
import struct
import sys
import tty
from copy import copy
from collections import deque
from crochet import wait_for, run_in_reactor, setup, EventLoop
setup()
from twisted.conch.ssh import session, common, transport
from twisted.conch.ssh.channel import SSHChannel
from twisted.conch.endpoints import (SSHCommandClientEndpoint,
_NewConnectionHelper,
_ExistingConnectionHelper,
_CommandTransport, TCP4ClientEndpoint,
connectProtocol,
_UserAuth,
_ConnectionReady)
from twisted.internet import defer, protocol, reactor, threads
from twisted.internet.defer import CancelledError
from twisted.internet.task import LoopingCall
from twisted.protocols.policies import TimeoutMixin
from twisted.python import log
from trigger.conf import settings
from trigger import tacacsrc, exceptions
from trigger.twister import is_awaiting_confirmation, has_ioslike_error, TriggerSSHUserAuth
from twisted.internet import reactor
@run_in_reactor
def generate_endpoint(device):
"""Generate Trigger endpoint for a given device.
The purpose of this function is to generate endpoint clients for use by a `~trigger.netdevices.NetDevice` object.
:param device: `~trigger.netdevices.NetDevice` object
"""
creds = tacacsrc.get_device_password(device.nodeName)
return TriggerSSHShellClientEndpointBase.newConnection(
reactor, creds.username, device, password=creds.password
)
class SSHSessionAddress(object):
"""This object represents an endpoint's session details.
This object would typically be loaded as follows:
:Example:
>>> sess = SSHSessionAddress()
>>> sess.server = "192.168.127.12"
>>> sess.username = "cisco"
>>> sess.command = ""
We load command with a null string as Cisco device's typically do not support bash!
"""
def __init__(self, server, username, command):
self.server = server
self.username = username
self.command = command
class _TriggerShellChannel(SSHChannel):
"""This is the Trigger subclassed Channel object.
"""
name = b'session'
def __init__(self, creator, command, protocolFactory, commandConnected, incremental,
with_errors, prompt_pattern, timeout, command_interval):
SSHChannel.__init__(self)
self._creator = creator
self._protocolFactory = protocolFactory
self._command = command
self._commandConnected = commandConnected
self.incremental = incremental
self.with_errors = with_errors
self.prompt = prompt_pattern
self.timeout = timeout
self.command_interval = command_interval
self._reason = None
def openFailed(self, reason):
"""Channel failed handler."""
self._commandConnected.errback(reason)
def channelOpen(self, ignored):
"""Channel opened handler.
Once channel is opened, setup the terminal environment and signal
endpoint to load the shell subsystem.
"""
pr = session.packRequest_pty_req(os.environ['TERM'],
self._get_window_size(), '')
self.conn.sendRequest(self, 'pty-req', pr)
command = self.conn.sendRequest(
self, 'shell', '', wantReply=True)
# signal.signal(signal.SIGWINCH, self._window_resized)
command.addCallbacks(self._execSuccess, self._execFailure)
def _window_resized(self, *args):
"""Triggered when the terminal is rezied."""
win_size = self._get_window_size()
new_size = win_size[1], win_size[0], win_size[2], win_size[3]
self.conn.sendRequest(self, 'window-change',
struct.pack('!4L', *new_size))
def _get_window_size(self):
"""Measure the terminal."""
stdin_fileno = sys.stdin.fileno()
winsz = fcntl.ioctl(stdin_fileno, tty.TIOCGWINSZ, '12345678')
return struct.unpack('4H', winsz)
def _execFailure(self, reason):
"""Callback for when the exec command fails.
"""
self._commandConnected.errback(reason)
def _execSuccess(self, ignored):
"""Callback for when the exec command succees.
"""
self._protocol = self._protocolFactory.buildProtocol(
SSHSessionAddress(
self.conn.transport.transport.getPeer(),
self.conn.transport.creator.username,
self._command
))
self._bind_protocol_data()
self._protocol.makeConnection(self)
self._commandConnected.callback(self._protocol)
def _bind_protocol_data(self):
"""Helper method to bind protocol related attributes to the channel.
"""
# This was a string before, now it's a NetDevice.
self._protocol.device = self.conn.transport.creator.device or None
# FIXME(jathan): Is this potentially non-thread-safe?
self._protocol.startup_commands = copy(
self._protocol.device.startup_commands
)
self._protocol.incremental = self.incremental or None
self._protocol.prompt = self.prompt or None
self._protocol.with_errors = self.with_errors or None
self._protocol.timeout = self.timeout or None
self._protocol.command_interval = self.command_interval or None
def dataReceived(self, data):
"""Callback for when data is received.
Once data is received in the channel we defer to the protocol level dataReceived method.
"""
self._protocol.dataReceived(data)
# SSHChannel.dataReceived(self, data)
class _TriggerUserAuth(_UserAuth):
"""Perform user authentication over SSH."""
# The preferred order in which SSH authentication methods are tried.
preferredOrder = settings.SSH_AUTHENTICATION_ORDER
def getPassword(self, prompt=None):
"""Send along the password."""
log.msg('Performing password authentication', debug=True)
return defer.succeed(self.password)
def getGenericAnswers(self, name, information, prompts):
"""
Send along the password when authentication mechanism is not 'password'
This is most commonly the case with 'keyboard-interactive', which even
when configured within self.preferredOrder, does not work using default
getPassword() method.
"""
log.msg('Performing interactive authentication', debug=True)
log.msg('Prompts: %r' % prompts, debug=True)
# The response must always a sequence, and the length must match that
# of the prompts list
response = [''] * len(prompts)
for idx, prompt_tuple in enumerate(prompts):
prompt, echo = prompt_tuple # e.g. [('Password: ', False)]
if 'assword' in prompt:
log.msg("Got password prompt: %r, sending password!" % prompt,
debug=True)
response[idx] = self.password
return defer.succeed(response)
def ssh_USERAUTH_FAILURE(self, packet):
"""
An almost exact duplicate of SSHUserAuthClient.ssh_USERAUTH_FAILURE
modified to forcefully disconnect. If we receive authentication
failures, instead of looping until the server boots us and performing a
sendDisconnect(), we raise a `~trigger.exceptions.LoginFailure` and
call loseConnection().
See the base docstring for the method signature.
"""
canContinue, partial = common.getNS(packet)
partial = ord(partial)
log.msg('Previous method: %r ' % self.lastAuth, debug=True)
# If the last method succeeded, track it. If network devices ever start
# doing second-factor authentication this might be useful.
if partial:
self.authenticatedWith.append(self.lastAuth)
# If it failed, track that too...
else:
log.msg('Previous method failed, skipping it...', debug=True)
self.authenticatedWith.append(self.lastAuth)
def orderByPreference(meth):
"""
Invoked once per authentication method in order to extract a
comparison key which is then used for sorting.
@param meth: the authentication method.
@type meth: C{str}
@return: the comparison key for C{meth}.
@rtype: C{int}
"""
if meth in self.preferredOrder:
return self.preferredOrder.index(meth)
else:
# put the element at the end of the list.
return len(self.preferredOrder)
canContinue = sorted([meth for meth in canContinue.split(',')
if meth not in self.authenticatedWith],
key=orderByPreference)
log.msg('Can continue with: %s' % canContinue)
log.msg('Already tried: %s' % self.authenticatedWith, debug=True)
return self._cbUserauthFailure(None, iter(canContinue))
def _cbUserauthFailure(self, result, iterator):
"""Callback for ssh_USERAUTH_FAILURE"""
if result:
return
try:
method = iterator.next()
except StopIteration:
msg = (
'No more authentication methods available.\n'
'Tried: %s\n'
'If not using ssh-agent w/ public key, make sure '
'SSH_AUTH_SOCK is not set and try again.\n'
% (self.preferredOrder,)
)
self.transport.factory.err = exceptions.LoginFailure(msg)
self.transport.loseConnection()
else:
d = defer.maybeDeferred(self.tryAuth, method)
d.addCallback(self._cbUserauthFailure, iterator)
return d
class _TriggerCommandTransport(_CommandTransport):
def connectionMade(self):
"""
Once the connection is up, set the ciphers but don't do anything else!
"""
self.currentEncryptions = transport.SSHCiphers(
'none', 'none', 'none', 'none'
)
self.currentEncryptions.setKeys('', '', '', '', '', '')
# FIXME(jathan): Make sure that this isn't causing a regression to:
# https://github.com/trigger/trigger/pull/198
def dataReceived(self, data):
"""
Explicity override version detection for edge cases where "SSH-"
isn't on the first line of incoming data.
"""
# Store incoming data in a local buffer until we've detected the
# presence of 'SSH-', then handover to default .dataReceived() for
# version banner processing.
if not hasattr(self, 'my_buf'):
self.my_buf = ''
self.my_buf = self.my_buf + data
preVersion = self.gotVersion
# One extra loop should be enough to get the banner to come through.
if not self.gotVersion and b'SSH-' not in self.my_buf:
return
# This call should populate the SSH version and carry on as usual.
_CommandTransport.dataReceived(self, data)
# We have now seen the SSH version in the banner.
# signal that the connection has been made successfully.
if self.gotVersion and not preVersion:
_CommandTransport.connectionMade(self)
def connectionSecure(self):
"""
When the connection is secure, start the authentication process.
"""
self._state = b'AUTHENTICATING'
command = _ConnectionReady(self.connectionReady)
self._userauth = _TriggerUserAuth(self.creator.username, command)
self._userauth.password = <PASSWORD>
if self.creator.keys:
self._userauth.keys = list(self.creator.keys)
if self.creator.agentEndpoint is not None:
d = self._userauth.connectToAgent(self.creator.agentEndpoint)
else:
d = defer.succeed(None)
def maybeGotAgent(ignored):
self.requestService(self._userauth)
d.addBoth(maybeGotAgent)
class _TriggerSessionTransport(_TriggerCommandTransport):
def verifyHostKey(self, hostKey, fingerprint):
hostname = self.creator.hostname
ip = self.transport.getPeer().host
self._state = b'SECURING'
return defer.succeed(1)
class _NewTriggerConnectionHelperBase(_NewConnectionHelper):
"""
Return object used for establishing an async session rather than executing
a single command.
"""
def __init__(self, reactor, device, port, username, keys, password,
agentEndpoint, knownHosts, ui):
self.reactor = reactor
self.device = device
self.hostname = device.nodeName
self.port = port
self.username = username
self.keys = keys
self.password = password
self.agentEndpoint = agentEndpoint
if knownHosts is None:
knownHosts = self._knownHosts()
self.knownHosts = knownHosts
self.ui = ui
def secureConnection(self):
protocol = _TriggerSessionTransport(self)
ready = protocol.connectionReady
sshClient = TCP4ClientEndpoint(self.reactor, self.hostname, self.port)
d = connectProtocol(sshClient, protocol)
d.addCallback(lambda ignored: ready)
return d
class TriggerEndpointClientFactory(protocol.Factory):
"""
Factory for all clients. Subclass me.
"""
def __init__(self, creds=None, init_commands=None):
self.creds = tacacsrc.validate_credentials(creds)
self.results = []
self.err = None
# Setup and run the initial commands
if init_commands is None:
init_commands = [] # We need this to be a list
self.init_commands = init_commands
log.msg('INITIAL COMMANDS: %r' % self.init_commands, debug=True)
self.initialized = False
def clientConnectionFailed(self, connector, reason):
"""Do this when the connection fails."""
log.msg('Client connection failed. Reason: %s' % reason)
self.d.errback(reason)
def clientConnectionLost(self, connector, reason):
"""Do this when the connection is lost."""
log.msg('Client connection lost. Reason: %s' % reason)
if self.err:
log.msg('Got err: %r' % self.err)
# log.err(self.err)
self.d.errback(self.err)
else:
log.msg('Got results: %r' % self.results)
self.d.callback(self.results)
def stopFactory(self):
# IF we're out of channels, shut it down!
log.msg('All done!')
def _init_commands(self, protocol):
"""
Execute any initial commands specified.
:param protocol: A Protocol instance (e.g. action) to which to write
the commands.
"""
if not self.initialized:
log.msg('Not initialized, sending init commands', debug=True)
for next_init in self.init_commands:
log.msg('Sending: %r' % next_init, debug=True)
protocol.write(next_init + '\r\n')
else:
self.initialized = True
def connection_success(self, conn, transport):
log.msg('Connection success.')
self.conn = conn
self.transport = transport
log.msg('Connection information: %s' % self.transport)
class TriggerSSHShellClientEndpointBase(SSHCommandClientEndpoint):
"""
Base class for SSH endpoints.
Subclass me when you want to create a new ssh client.
"""
@classmethod
def newConnection(cls, reactor, username, device, keys=None, password=<PASSWORD>,
port=22, agentEndpoint=None, knownHosts=None, ui=None):
helper = _NewTriggerConnectionHelperBase(
reactor, device, port, username, keys, password, agentEndpoint,
knownHosts, ui
)
return cls(helper)
@classmethod
def existingConnection(cls, connection):
"""Overload stock existinConnection to not require ``commands``."""
helper = _ExistingConnectionHelper(connection)
return cls(helper)
def __init__(self, creator):
self._creator = creator
def _executeCommand(self, connection, protocolFactory, command, incremental,
with_errors, prompt_pattern, timeout, command_interval):
"""Establish the session on a given endpoint.
For IOS like devices this is normally just a null string.
"""
commandConnected = defer.Deferred()
def disconnectOnFailure(passthrough):
# Close the connection immediately in case of cancellation, since
# that implies user wants it gone immediately (e.g. a timeout):
immediate = passthrough.check(CancelledError)
self._creator.cleanupConnection(connection, immediate)
return passthrough
commandConnected.addErrback(disconnectOnFailure)
channel = _TriggerShellChannel(
self._creator, command, protocolFactory, commandConnected, incremental,
with_errors, prompt_pattern, timeout, command_interval)
connection.openChannel(channel)
self.connected = True
return commandConnected
def connect(self, factory, command='', incremental=None,
with_errors=None, prompt_pattern=None, timeout=0,
command_interval=1):
"""Method to initiate SSH connection to device.
:param factory: Trigger factory responsible for setting up connection
:type factory: `~trigger.twister2.TriggerEndpointClientFactory`
"""
d = self._creator.secureConnection()
d.addCallback(self._executeCommand, factory, command, incremental,
with_errors, prompt_pattern, timeout, command_interval)
return d
class IoslikeSendExpect(protocol.Protocol, TimeoutMixin):
"""
Action for use with TriggerTelnet as a state machine.
Take a list of commands, and send them to the device until we run out or
one errors. Wait for a prompt after each.
"""
def __init__(self):
self.device = None
self.commands = []
self.commanditer = iter(self.commands)
self.connected = False
self.disconnect = False
self.initialized = False
self.startup_commands = []
# FIXME(tom) This sux and should be set by trigger settings
self.timeout = 10
self.on_error = defer.Deferred()
self.todo = deque()
self.done = None
self.doneLock = defer.DeferredLock()
def connectionMade(self):
"""Do this when we connect."""
self.connected = True
self.finished = defer.Deferred()
self.results = self.factory.results = []
self.data = ''
log.msg('[%s] connectionMade, data: %r' % (self.device, self.data))
# self.factory._init_commands(self)
def connectionLost(self, reason):
self.finished.callback(None)
# Don't call _send_next, since we expect to see a prompt, which
# will kick off initialization.
def _schedule_commands(self, results, commands):
"""Schedule commands onto device loop.
This is the actual routine to schedule a set of commands onto a device.
:param results: Typical twisted results deferred
:type results: twisted.internet.defer
:param commands: List containing commands to schedule onto device loop.
:type commands: list
"""
d = defer.Deferred()
self.todo.append(d)
# Schedule next command to run after the previous
# has finished.
if self.done and self.done.called is False:
self.done.addCallback(
self._schedule_commands,
commands
)
self.done = d
return d
# First iteration, setup the previous results deferred.
if not results and self.done is None:
self.done = defer.Deferred()
self.done.callback(None)
# Either initial state or we are ready to execute more commands.
if results or self.done is None or self.done.called:
log.msg("SCHEDULING THE FOLLOWING {0} :: {1} WAS PREVIOUS RESULTS".format( commands, self.done))
self.commands = commands
self.commanditer = iter(commands)
self._send_next()
self.done = d
# Each call must return a deferred.
return d
def add_commands(self, commands, on_error):
"""Add commands to abstract list of outstanding commands to execute
The public method for `~trigger.netdevices.NetDevice` to use for appending more commands
onto the device loop.
:param commands: A list of commands to schedule onto device"
:type commands: list
:param on_error: Error handler
:type on_error: func
"""
# Exception handler to be used in case device throws invalid command warning.
self.on_error.addCallback(on_error)
d = self.doneLock.run(self._schedule_commands, None, commands)
return d
def dataReceived(self, bytes):
"""Do this when we get data."""
log.msg('[%s] BYTES: %r' % (self.device, bytes))
self.data += bytes # See if the prompt matches, and if it doesn't, see if it is waiting
# for more input (like a [y/n]) prompt), and continue, otherwise return
# None
m = self.prompt.search(self.data)
if not m:
# If the prompt confirms set the index to the matched bytes,
if is_awaiting_confirmation(self.data):
log.msg('[%s] Got confirmation prompt: %r' % (self.device,
self.data))
prompt_idx = self.data.find(bytes)
else:
return None
else:
# Or just use the matched regex object...
prompt_idx = m.start()
result = self.data[:prompt_idx]
# Trim off the echoed-back command. This should *not* be necessary
# since the telnet session is in WONT ECHO. This is confirmed with
# a packet trace, and running self.transport.dont(ECHO) from
# connectionMade() returns an AlreadyDisabled error. What's up?
log.msg('[%s] result BEFORE: %r' % (self.device, result))
result = result[result.find('\n')+1:]
log.msg('[%s] result AFTER: %r' % (self.device, result))
if self.initialized:
self.results.append(result)
if has_ioslike_error(result) and not self.with_errors:
log.msg('[%s] Command failed: %r' % (self.device, result))
self.factory.err = exceptions.IoslikeCommandFailure(result)
else:
if self.command_interval:
log.msg('[%s] Waiting %s seconds before sending next command' %
(self.device, self.command_interval))
reactor.callLater(self.command_interval, self._send_next)
def _send_next(self):
"""Send the next command in the stack."""
self.data = ''
self.resetTimeout()
if not self.initialized:
log.msg('[%s] Not initialized, sending startup commands' %
self.device)
if self.startup_commands:
next_init = self.startup_commands.pop(0)
log.msg('[%s] Sending initialize command: %r' % (self.device,
next_init))
self.transport.write(next_init.strip() + self.device.delimiter)
return None
else:
log.msg('[%s] Successfully initialized for command execution' %
self.device)
self.initialized = True
if self.incremental:
self.incremental(self.results)
try:
next_command = self.commanditer.next()
except StopIteration:
log.msg('[%s] No more commands to send, moving on...' %
self.device)
if self.todo:
payload = list(reversed(self.results))[:len(self.commands)]
payload.reverse()
d = self.todo.pop()
d.callback(payload)
return d
else:
return
if next_command is None:
self.results.append(None)
self._send_next()
else:
log.msg('[%s] Sending command %r' % (self.device, next_command))
self.transport.write(next_command + '\n')
def timeoutConnection(self):
"""Do this when we timeout."""
log.msg('[%s] Timed out while sending commands' % self.device)
self.factory.err = exceptions.CommandTimeout('Timed out while '
'sending commands')
self.transport.loseConnection()
|
auctioning_platform/shipping/shipping/application/queries/package.py | nhdinh/smp-modulith | 299 | 11106490 | import abc
from dataclasses import dataclass
from typing import Optional
@dataclass
class PackageDto:
...
class GetNextPackage(abc.ABC):
@abc.abstractmethod
def query(self) -> Optional[PackageDto]:
pass
|
sdk/python/pulumi_azure/avs/_inputs.py | henriktao/pulumi-azure | 109 | 11106526 | <reponame>henriktao/pulumi-azure<filename>sdk/python/pulumi_azure/avs/_inputs.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'PrivateCloudCircuitArgs',
'PrivateCloudManagementClusterArgs',
]
@pulumi.input_type
class PrivateCloudCircuitArgs:
def __init__(__self__, *,
express_route_id: Optional[pulumi.Input[str]] = None,
express_route_private_peering_id: Optional[pulumi.Input[str]] = None,
primary_subnet_cidr: Optional[pulumi.Input[str]] = None,
secondary_subnet_cidr: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] express_route_id: The ID of the ExpressRoute Circuit.
:param pulumi.Input[str] express_route_private_peering_id: The ID of the ExpressRoute Circuit private peering.
:param pulumi.Input[str] primary_subnet_cidr: The CIDR of the primary subnet.
:param pulumi.Input[str] secondary_subnet_cidr: The CIDR of the secondary subnet.
"""
if express_route_id is not None:
pulumi.set(__self__, "express_route_id", express_route_id)
if express_route_private_peering_id is not None:
pulumi.set(__self__, "express_route_private_peering_id", express_route_private_peering_id)
if primary_subnet_cidr is not None:
pulumi.set(__self__, "primary_subnet_cidr", primary_subnet_cidr)
if secondary_subnet_cidr is not None:
pulumi.set(__self__, "secondary_subnet_cidr", secondary_subnet_cidr)
@property
@pulumi.getter(name="expressRouteId")
def express_route_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the ExpressRoute Circuit.
"""
return pulumi.get(self, "express_route_id")
@express_route_id.setter
def express_route_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "express_route_id", value)
@property
@pulumi.getter(name="expressRoutePrivatePeeringId")
def express_route_private_peering_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the ExpressRoute Circuit private peering.
"""
return pulumi.get(self, "express_route_private_peering_id")
@express_route_private_peering_id.setter
def express_route_private_peering_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "express_route_private_peering_id", value)
@property
@pulumi.getter(name="primarySubnetCidr")
def primary_subnet_cidr(self) -> Optional[pulumi.Input[str]]:
"""
The CIDR of the primary subnet.
"""
return pulumi.get(self, "primary_subnet_cidr")
@primary_subnet_cidr.setter
def primary_subnet_cidr(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "primary_subnet_cidr", value)
@property
@pulumi.getter(name="secondarySubnetCidr")
def secondary_subnet_cidr(self) -> Optional[pulumi.Input[str]]:
"""
The CIDR of the secondary subnet.
"""
return pulumi.get(self, "secondary_subnet_cidr")
@secondary_subnet_cidr.setter
def secondary_subnet_cidr(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secondary_subnet_cidr", value)
@pulumi.input_type
class PrivateCloudManagementClusterArgs:
def __init__(__self__, *,
size: pulumi.Input[int],
hosts: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
id: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] size: The size of the management cluster. This field can not updated with `internet_connection_enabled` together.
:param pulumi.Input[Sequence[pulumi.Input[str]]] hosts: A list of hosts in the management cluster.
:param pulumi.Input[int] id: The ID of the management cluster.
"""
pulumi.set(__self__, "size", size)
if hosts is not None:
pulumi.set(__self__, "hosts", hosts)
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def size(self) -> pulumi.Input[int]:
"""
The size of the management cluster. This field can not updated with `internet_connection_enabled` together.
"""
return pulumi.get(self, "size")
@size.setter
def size(self, value: pulumi.Input[int]):
pulumi.set(self, "size", value)
@property
@pulumi.getter
def hosts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of hosts in the management cluster.
"""
return pulumi.get(self, "hosts")
@hosts.setter
def hosts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "hosts", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[int]]:
"""
The ID of the management cluster.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "id", value)
|
examples/whatsapp.py | Julian-O/pyadb | 105 | 11106555 | <reponame>Julian-O/pyadb<gh_stars>100-1000
#!/usr/bin/env python
"""
Simple pyadb example to retrieve whatsapp
databases from the selected device.
~/pyadb/example$ python whatsapp.py
[+] Using PyADB version 0.1.1
[+] Verifying ADB path... OK
[+] ADB Version: 1.0.29
[+] Restarting ADB server...
[+] Detecting devices... OK
0: XXXXXXXXXXX
[+] Using "XXXXXXXXXXX" as target device
[+] Looking for 'su' binary: /system/xbin/su
[+] Checking if 'su' binary can give root access:
- Yes
[+] Copying Whatsapp data folder
- Local destination [~/pyadb/example]:
[+] Creating remote tar file: /sdcard/whatsapp_hpBSiSnPYI.tar
- Command: /system/xbin/su -c 'tar -c /data/data/com.whatsapp -f /sdcard/whatsapp_hpBSiSnPYI.tar'
[+] Retrieving remote file: /sdcard/whatsapp_hpBSiSnPYI.tar
[+] Removing remote file: /sdcard/whatsapp_hpBSiSnPYI.tar
[+] Remote Whatsapp files from device memory are now locally accessible at "~/pyadb/example/databases/whatsapp_hpBSiSnPYI.tar"
[+] Looking for 'tar' binary... /system/xbin/tar
[+] Creating remote tar file: /sdcard/whatsapp_djsAFumAGW.tar
+ Command: /system/xbin/tar -c /sdcard/WhatsApp -f /sdcard/whatsapp_djsAFumAGW.tar
[+] Remote tar file created: /sdcard/whatsapp_djsAFumAGW.tar
- Local destination [~/pyadb/example]:
[+] Retrieving remote file: /sdcard/whatsapp_djsAFumAGW.tar...
[+] WhatsApp SDcard folder is now available in tar file: ~/pyadb/example/whatsapp_djsAFumAGW.tar
~/pyadb/example$
"""
import errno
import logging
import random
import string
from os import getcwd
from os import mkdir
from os.path import basename
from sys import stdin, exit
try:
from pyadb import ADB
except ImportError as e:
print("[f] Required module missing. %s" % e.args[0])
exit(-1)
def get_whatsapp_root(adb, supath):
tmp = getcwd()
print("\n[+] Copying Whatsapp data folder")
print("\t- Local destination [%s]: " % tmp, end=' ')
destination = stdin.readline().strip()
if destination == '':
destination = tmp
if not destination[-1:] == '/':
destination += '/'
destination += 'databases/'
try:
mkdir(destination)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
return False, e.args
tarname = '/sdcard/whatsapp_' + ''.join(
random.choice(string.ascii_letters) for _ in range(10)) + '.tar'
print("\n[+] Creating remote tar file: %s" % tarname)
cmd = "%s -c 'tar -c /data/data/com.whatsapp -f %s'" % (supath, tarname)
print("\t- Command: %s" % cmd)
adb.shell_command(cmd)
print("\n[+] Retrieving remote file: %s" % tarname)
adb.get_remote_file(tarname, destination + '/' + basename(tarname))
print("[+] Removing remote file: %s" % tarname)
cmd = 'su -c \'rm %s\'' % tarname
adb.shell_command(cmd)
print(
"\n[+] Remote Whatsapp files from device memory are now "
"locally accessible at \"%s%s\"\n" % (
destination, basename(tarname)))
get_whatsapp_nonroot(adb)
return True, ""
def get_sdcard_iter(adb, rpath=None, lpath=None):
"""
When 'tar' binary is not available, this method get the whole content of
the remote WhatsApp directory from the sdcard
This does NOT return an iter object, despite its name.
"""
if lpath is None:
return False, "Local path not provided"
maindir = "/sdcard/WhatsApp/"
if rpath is None:
rdir = maindir
else:
rdir = rpath
res = adb.shell_command("ls -1 \"%s\"" % rdir)
if res == "ls: %s: No such file or directory" % rdir:
return False, "WhatsApp directory does not exists!"
try:
res = res.split('\n')
except AttributeError:
return False, "Directory empty"
for item in res:
if item is None:
continue
item = item.strip()
if item == "":
continue
ftype, _ = adb.shell_command("ls -ld \"%s\"" % (rdir + item))[:1]
# if it is a directory
if ftype == "d":
try:
mkdir(lpath + item)
except Exception:
# ToDo: Limit scope of this except-clause.
pass
get_sdcard_iter(adb, rdir + item + '/', lpath + item + '/')
else: # item is a file
print("\t- Retrieving remote file: %s" % (rdir + item))
adb.get_remote_file(rdir + item, lpath + item)
return True, ""
def create_sdcard_tar(adb, tarpath):
"""
Returns the remote path of the tar file containing the whole WhatsApp
directory from the SDcard
"""
tarname = '/sdcard/whatsapp_' + ''.join(
random.choice(string.ascii_letters) for _ in range(10)) + '.tar'
print("\n[+] Creating remote tar file: %s" % tarname)
cmd = "%s -c /sdcard/WhatsApp -f %s" % (tarpath, tarname)
print("\t+ Command: %s" % cmd)
output, error = adb.shell_command(cmd)
# Ignore error!
output, error = adb.shell_command("ls %s" % tarname)
if error.startswith("ls: %s: No such file or directory" % tarname):
return None
else:
return tarname
def get_destination_path():
"""
Creates and returns the path provided by the user
"""
tmp = getcwd()
print("\t- Local destination [%s]: " % tmp, end=' ')
destination = stdin.readline().strip()
if destination == '':
destination = tmp
if not destination[-1:] == '/':
destination += '/'
try:
mkdir(destination)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
print("\t- ERROR!: ", e.args)
return None
return destination
def get_whatsapp_nonroot(adb):
"""
Method to get the whole WhatsApp directory from the SD card
"""
# look for 'tar' binary
print("[+] Looking for 'tar' binary...", end=' ')
try:
tarpath = adb.find_binary("tar")
print("%s" % tarpath)
except ADB.AdbException as err:
print("Error: %s" % err)
tarpath = None
if tarpath is not None:
wapath = create_sdcard_tar(adb, tarpath)
if wapath is not None:
print("\n[+] Remote tar file created: %s" % wapath)
destination = get_destination_path()
if destination is not None:
print("\n[+] Retrieving remote file: %s..." % wapath)
adb.get_remote_file(wapath, destination + basename(wapath))
adb.shell_command("rm %s" % wapath)
print(
"\n[+] WhatsApp SDcard folder is now available "
"in tar file: %s\n" % (
destination + basename(wapath)))
return
else:
adb.shell_command("rm %s" % wapath)
# get the remote WhatsApp folder from the SDcard (the iterative way)
path = get_destination_path()
if path is None:
print(
"\n[!] Error while retrieving remote WhatsApp SDcard "
"folder: User has provided an invalid path")
return
print("\n[+] Retrieving remote WhatsApp SDcard folder...")
try:
ret = get_sdcard_iter(adb, None, path)
print("\n[+] Remote WhatsApp SDcard folder is now available "
"at: %s" % path)
except ADB.AdbException as error:
print("\n[!] Error while retrieving remote WhatsApp"
"SDcard folder: %s" % error)
return
def main():
logging.basicConfig(level=logging.WARNING)
adb = ADB()
# set ADB path, using a couple of popular addresses.
try:
adb.set_adb_path('~/android-sdk-linux/platform-tools/adb')
except ADB.BadCall:
adb.set_adb_path(r'C:\Android\android-sdk\platform-tools\adb.exe')
print("[+] Using PyADB version %s" % adb.pyadb_version())
# verity ADB path
print("[+] Verifying ADB path...", end='')
if not adb.check_path():
print("ERROR")
exit(-2)
print("OK")
# print ADB Version
print("[+] ADB Version: %s" % adb.get_version())
print("")
# restart server (may be other instances running)
print("[+] Restarting ADB server...")
try:
adb.restart_server()
except Exception as err:
print("\t- ERROR\n", err)
exit(-3)
# get detected devices
while True:
print("[+] Detecting devices...", end=' ')
try:
devices = adb.get_devices()
except adb.PermissionsError:
devices = None
print("You haven't enough permissions!")
exit(-3)
if devices:
print("OK")
break
# no devices connected
print("No devices connected")
print("[+] Waiting for devices...")
adb.wait_for_device()
# this should never be reached
if len(devices) == 0:
print("[+] No devices detected!")
exit(-4)
# show detected devices
i = 0
for dev in devices:
print("\t%d: %s" % (i, dev))
i += 1
# if there are more than one devices, ask to the user to choose one of them
if i > 1:
dev = i + 1
while dev < 0 or dev > int(i - 1):
print("\n[+] Select target device [0-%d]: " % int(i - 1), end=' ')
dev = int(stdin.readline())
else:
dev = 0
# set target device
try:
adb.set_target_device(devices[dev])
except Exception as e:
print("\n[!] Error: " % e)
exit(-5)
print("\n[+] Using \"%s\" as target device" % devices[dev])
# check if 'su' binary is available
print("[+] Looking for 'su' binary: ", end=' ')
try:
supath = adb.find_binary("su")
except ADB.AdbException as err:
if str(err) != "'su' was not found":
print("Error: %s" % err)
exit(-6)
supath = None
if supath is not None:
# 'su' binary has been found
print("[+] Checking if 'su' binary can give root access:")
try:
rootid = adb.shell_command('%s -c id' % supath)
if 'root' in rootid.replace('(', ')').split(')'):
# it can provide root privileges
print("\t- Yes")
get_whatsapp_root(adb, supath)
else:
print("\t- No: %s" % rootid)
get_whatsapp_nonroot(adb)
except adb.AdbException as err:
print("\t- No: %s" % err)
get_whatsapp_nonroot(adb)
else:
print("Not found.")
get_whatsapp_nonroot(adb)
exit(0)
if __name__ == "__main__":
main()
|
detect_secrets/core/upgrades/__init__.py | paulo-sampaio/detect-secrets | 2,212 | 11106584 | """
All modules in this package needs to have the following boilerplate:
```python
from typing import Any
from typing import Dict
def upgrade(baseline: Dict[str, Any]) -> None:
pass
```
These upgrades SHOULD NOT be used to add new plugins, as that will require more information
than can be obtained from the baseline itself.
"""
|
dataset/chunked.py | patarapolw/dataset | 207 | 11106640 |
class ChunkedInsert(object):
"""Batch up insert operations
with ChunkedStorer(my_table) as storer:
table.insert(row)
Rows will be inserted in groups of 1000
"""
def __init__(self, table, chunksize=1000):
self.queue = []
self.fields = set()
self.table = table
self.chunksize = chunksize
def flush(self):
for item in self.queue:
for field in self.fields:
item[field] = item.get(field)
self.table.insert_many(self.queue)
self.queue = []
def insert(self, item):
self.fields.update(item.keys())
self.queue.append(item)
if len(self.queue) >= self.chunksize:
self.flush()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.flush()
|
data/scripts/json-format.py | undeadinu/soletta | 266 | 11106648 | #!/usr/bin/env python3
# This file is part of the Soletta (TM) Project
#
# Copyright (C) 2015 Intel Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import argparse
import os
import os.path
import sys
parser = argparse.ArgumentParser()
parser.add_argument("input",
help="Input description file in JSON format",
type=argparse.FileType('r'))
parser.add_argument("output",
help="Where to output JSON",
type=str)
args = parser.parse_args()
data = json.load(args.input)
if args.output == '-':
outfile = sys.stdout
else:
if os.path.exists(args.output):
bkp = "%s~" % (args.output,)
try:
os.unlink(bkp)
except FileNotFoundError:
pass
os.rename(args.output, bkp)
outfile = open(args.output, "w")
# Workaround for Python < 3.4
# In those versions a trailing whitespace is added at the end of each line
data = json.dumps(data, indent=True, sort_keys=True).replace(' \n','\n') + "\n";
outfile.write(data)
|
core/feature_extractor.py | jacke121/MBMD | 220 | 11106663 | <filename>core/feature_extractor.py<gh_stars>100-1000
from object_detection.models.ssd_mobilenet_v1_feature_extractor import SSDMobileNetV1FeatureExtractor
from object_detection.models import feature_map_generators
from nets import mobilenet_v1
import tensorflow as tf
import tensorflow.contrib.slim as slim
import collections
class MobileNetFeaturePyramidExtractor(SSDMobileNetV1FeatureExtractor):
def extract_features(self, preprocessed_inputs, init_extraction=False):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
if init_extraction:
preprocessed_inputs.get_shape().assert_has_rank(4)
shape_assert = tf.Assert(
tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33),
tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)),
['image size must at least be 33 in both height and width.'])
with tf.control_dependencies([shape_assert]):
with slim.arg_scope(self._conv_hyperparams):
with tf.variable_scope('MobilenetV1',
reuse=self._reuse_weights) as scope:
_, image_features = mobilenet_v1.mobilenet_v1_base(
preprocessed_inputs,
final_endpoint='Conv2d_13_pointwise',
min_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
scope=scope)
feature_head = image_features['Conv2d_13_pointwise']
feature_head = slim.conv2d(
feature_head,
512, [3,3],
stride=1,
padding='SAME',
scope='Conv2d_Append_1x1_256'
)
feature_head = tf.nn.avg_pool(feature_head, strides=[1,1,1,1], ksize=[1,4,4,1],
padding='VALID', )
return feature_head
else:
preprocessed_inputs.get_shape().assert_has_rank(4)
shape_assert = tf.Assert(
tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33),
tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)),
['image size must at least be 33 in both height and width.'])
bottomup_features_names = [ 'Conv2d_11_pointwise', 'Conv2d_13_pointwise']
num_appended_layers = 0
#appended_channel_num = [512, 256, 256, 256]
appended_channel_num = [512]
with tf.control_dependencies([shape_assert]):
with slim.arg_scope(self._conv_hyperparams):
with tf.variable_scope('MobilenetV1',
reuse=self._reuse_weights) as scope:
_, image_features = mobilenet_v1.mobilenet_v1_base(
preprocessed_inputs,
final_endpoint='Conv2d_13_pointwise',
min_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
scope=scope)
topdown_features = self._topdown_feature_maps(
image_features,
bottomup_features_names=bottomup_features_names,
num_appended_layers = num_appended_layers,
appended_channel_num = appended_channel_num)
return topdown_features.values()
def _topdown_feature_maps(self, image_features, bottomup_features_names,num_appended_layers=2,
appended_channel_num=256, stride=2, topdown_channel_num = 512):
""" Building a top down feature pyramid.
Args:
image_features: a dictionary of input bottom_up features with layer names being the keys
bottomup_features_names: a list of names of selected bottom_up features, which are combined
with top down features through a lateral connection. The names are sorted from bottom
layers to top layers.
num_appended_layers: number of layers which are appended to the last bottom up features.
Each of the appended layers consists of a 3x3 conv2d followed by a batch_norm and a relus.
Together with the selected bottom up features, they construct base features of top down branch.
appended_channel_num: number of channels of output features in appended layers. Could be a scalar or
a list of length num_appended_layers.
stride: stride of the appended layers with respect to the input features.
topdown_channel_num: number of channels of the output features in the top down branch. Since topdown
feature pyramid has the same channel number. This should be a scalar. Topdown layers are firstly
resized with nearest neighbor method to have the same with the lateral features and then combined
with them through element-wise addition. The lateral features are obtained by applying 1x1 conv2d
with no nonlinearity to the corresponding bottom up features
Returns:
topdown_features: An ordered dictionary of the top down feature pyramid.
"""
# if isinstance(appended_channel_num, list) and len(appended_channel_num) != num_appended_layers:
# raise RuntimeError('appened_channel_num should have the length of num_appended_layers')
# append layers
feature_head = image_features[bottomup_features_names[-1]]
appended_features = dict()
appended_features_names = list()
for index in range(num_appended_layers):
if isinstance(appended_channel_num, list):
num_channel = appended_channel_num[index]
else:
num_channel = appended_channel_num
layer_name = 'Append_{}_Conv2d_3x3_{}'.format(index, num_channel)
feature_head = slim.conv2d(
feature_head,
num_channel, [3,3],
stride=stride,
padding='SAME',
scope=layer_name
)
appended_features[layer_name] = feature_head
appended_features_names.append(layer_name)
# top down branch
bottomup_features_names += appended_features_names
image_features.update(appended_features)
topdown_features = list()
topdown_features_names = list()
# init top_down feature
level_ind = len(bottomup_features_names)-1
layer_name = 'TopDown_{}_Conv2d_3x3_{}'.format(level_ind, topdown_channel_num)
feature_head = slim.conv2d(
feature_head,
topdown_channel_num, [3, 3],
stride=1,
padding='SAME',
scope=layer_name
)
topdown_features.append(feature_head)
topdown_features_names.append(layer_name)
level_ind -= 1
for bottomup_feature_name in bottomup_features_names[-2::-1]:
layer_name = 'Lateral_{}_Conv2d_1x1_{}'.format(level_ind, topdown_channel_num)
lateral_feature = slim.conv2d(
image_features[bottomup_feature_name],
topdown_channel_num, [1, 1],
padding='SAME',
scope=layer_name)
output_size = lateral_feature.get_shape().as_list()[1:3]
if output_size[0] != feature_head.get_shape().as_list()[1]:
feature_head = tf.image.resize_images(feature_head, output_size,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
feature_head = slim.conv2d(
feature_head,
topdown_channel_num, [3,3],
padding='SAME',
scope='TopDown_{}_Conv2d_3x3_{}'.format(level_ind, topdown_channel_num)
)
layer_name = 'TopDown_{}_Add_{}'.format(level_ind, topdown_channel_num)
feature_head += lateral_feature
topdown_features.append(feature_head)
topdown_features_names.append(layer_name)
level_ind -= 1
return collections.OrderedDict(
[(x, y) for (x, y) in zip(topdown_features_names[-1::-1], topdown_features[-1::-1])])
class MobileNetBoxFeatureExtractor(SSDMobileNetV1FeatureExtractor):
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs.get_shape().assert_has_rank(4)
shape_assert = tf.Assert(
tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33),
tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)),
['image size must at least be 33 in both height and width.'])
bottomup_features_names = ['Conv2d_11_pointwise', 'Conv2d_13_pointwise']
num_appended_layers = 4
appended_channel_num = [512, 256, 256, 256]
with tf.control_dependencies([shape_assert]):
with slim.arg_scope(self._conv_hyperparams):
with tf.variable_scope('MobilenetV1',
reuse=self._reuse_weights) as scope:
_, image_features = mobilenet_v1.mobilenet_v1_base(
preprocessed_inputs,
final_endpoint='Conv2d_13_pointwise',
min_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
scope=scope)
topdown_features = self._topdown_feature_maps(
image_features,
bottomup_features_names=bottomup_features_names,
num_appended_layers=num_appended_layers,
appended_channel_num=appended_channel_num)
return topdown_features.values()
def _topdown_feature_maps(self, image_features, bottomup_features_names, num_appended_layers=2,
appended_channel_num=256, stride=2, topdown_channel_num=256):
""" Building a top down feature pyramid.
Args:
image_features: a dictionary of input bottom_up features with layer names being the keys
bottomup_features_names: a list of names of selected bottom_up features, which are combined
with top down features through a lateral connection. The names are sorted from bottom
layers to top layers.
num_appended_layers: number of layers which are appended to the last bottom up features.
Each of the appended layers consists of a 3x3 conv2d followed by a batch_norm and a relus.
Together with the selected bottom up features, they construct base features of top down branch.
appended_channel_num: number of channels of output features in appended layers. Could be a scalar or
a list of length num_appended_layers.
stride: stride of the appended layers with respect to the input features.
topdown_channel_num: number of channels of the output features in the top down branch. Since topdown
feature pyramid has the same channel number. This should be a scalar. Topdown layers are firstly
resized with nearest neighbor method to have the same with the lateral features and then combined
with them through element-wise addition. The lateral features are obtained by applying 1x1 conv2d
with no nonlinearity to the corresponding bottom up features
Returns:
topdown_features: An ordered dictionary of the top down feature pyramid.
"""
if isinstance(appended_channel_num, list) and len(appended_channel_num) != num_appended_layers:
raise RuntimeError('appened_channel_num should have the length of num_appended_layers')
# append layers
feature_head = image_features[bottomup_features_names[-1]]
appended_features = dict()
appended_features_names = list()
for index in range(num_appended_layers):
if isinstance(appended_channel_num, list):
num_channel = appended_channel_num[index]
else:
num_channel = appended_channel_num
layer_name = 'Append_{}_Conv2d_3x3_{}'.format(index, num_channel)
feature_head = slim.conv2d(
feature_head,
num_channel, [3, 3],
stride=stride,
padding='SAME',
scope=layer_name
)
appended_features[layer_name] = feature_head
appended_features_names.append(layer_name)
# top down branch
bottomup_features_names += appended_features_names
image_features.update(appended_features)
topdown_features = list()
topdown_features_names = list()
# init top_down feature
level_ind = len(bottomup_features_names) - 1
layer_name = 'TopDown_{}_Conv2d_3x3_{}'.format(level_ind, topdown_channel_num)
feature_head = slim.conv2d(
feature_head,
topdown_channel_num, [3, 3],
stride=1,
padding='SAME',
scope=layer_name
)
topdown_features.append(feature_head)
topdown_features_names.append(layer_name)
level_ind -= 1
for bottomup_feature_name in bottomup_features_names[-2::-1]:
layer_name = 'Lateral_{}_Conv2d_1x1_{}'.format(level_ind, topdown_channel_num)
lateral_feature = slim.conv2d(
image_features[bottomup_feature_name],
topdown_channel_num, [1, 1],
padding='SAME',
scope=layer_name)
output_size = lateral_feature.get_shape().as_list()[1:3]
if output_size[0] != feature_head.get_shape().as_list()[1]:
feature_head = tf.image.resize_images(feature_head, output_size,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
feature_head = slim.conv2d(
feature_head,
topdown_channel_num, [3, 3],
padding='SAME',
scope='TopDown_{}_Conv2d_3x3_{}'.format(level_ind, topdown_channel_num)
)
layer_name = 'TopDown_{}_Add_{}'.format(level_ind, topdown_channel_num)
feature_head += lateral_feature
topdown_features.append(feature_head)
topdown_features_names.append(layer_name)
level_ind -= 1
return collections.OrderedDict(
[(x, y) for (x, y) in zip(topdown_features_names[-1::-1], topdown_features[-1::-1])])
|
language/casper/augment/casper_converters.py | greck2908/language | 1,199 | 11106670 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Converters that take cached retrievals and yield seq2seq examples."""
import collections
import dataclasses
import functools
import random
from absl import logging
from language.casper.augment import casper_formatters
from language.casper.utils import data_types
from language.casper.utils import sample_utils
from language.casper.utils import top_utils
RawExample = data_types.RawExample
AugmentedExample = data_types.AugmentedExample
def _get_frame(funcall, funcall_format):
"""Returns the frame (intent and slot labels) of the function call."""
if funcall_format == "top":
return top_utils.get_frame_top(funcall)
else:
raise ValueError(f"Unknown funcall_format: {funcall_format}")
_CONVERTER_CONFIG_ALIASES = {
"n": "num_samples",
"k": "max_exemplars",
"p": "sample_prob",
}
@dataclasses.dataclass
class ConverterConfig:
"""Config for the ExampleConverters.
Attrs:
num_samples: The number of exemplar lists to generate.
sampler: The sampler for sampling exemplars. Available choices are:
- "uniform": Uniform sampling.
- "geometric": Geometric sampling without replacement; exemplars with
higher ranks have a higher chance of being sampled.
max_exemplars: Maximum number of exemplars in each exemplar list.
sample_prob: Probability for geometric sampling.
"""
num_samples: int = 1
sampler: str = "geometric"
max_exemplars: int = 1
sample_prob: float = 0.5
@classmethod
def from_dict(cls, converter_kwargs):
"""Constructs a ConverterConfig from the given dict."""
# Make a copy
converter_kwargs = dict(converter_kwargs)
# Resolve aliases
for abbr_key, full_key in _CONVERTER_CONFIG_ALIASES.items():
if abbr_key in converter_kwargs:
converter_kwargs[full_key] = converter_kwargs[abbr_key]
del converter_kwargs[abbr_key]
return cls(**converter_kwargs)
def get_sampler(self):
"""Returns the exemplar sampler based on the config."""
if self.sampler == "uniform":
return functools.partial(
sample_utils.uniform_sample, max_num_items=self.max_exemplars)
elif self.sampler == "geometric":
return functools.partial(
sample_utils.geometric_sample,
max_num_items=self.max_exemplars,
sample_prob=self.sample_prob)
else:
raise ValueError(f"Unknown sampler: {self.sampler}")
class BaseExampleConverter:
"""Abstract base class for example converters."""
def __init__(self, retrieval_index, funcall_format,
converter_config,
formatter_config):
"""Constructs a new example converter.
Args:
retrieval_index: The retrieval index.
funcall_format: Format of the output function call or logical form.
converter_config: A ConverterConfig object.
formatter_config: A FormatterConfig object.
"""
self._funcall_format = funcall_format
self._converter_config = converter_config
self._preprocess_example = functools.partial(
casper_formatters.preprocess_example, funcall_format=funcall_format)
self._augment_exemplars = functools.partial(
casper_formatters.augment_exemplars,
funcall_format=funcall_format,
config=formatter_config)
self._process_index(retrieval_index)
# A Counter that can collect arbitrary statistics.
self.stats = collections.Counter()
def _process_index(self, retrieval_index):
"""Preprocesses the retrieval index."""
self._hashed_id_to_exemplar = {}
self._frame_to_hashed_ids = {}
self._hashed_id_to_frame = {}
for example in retrieval_index:
example = self._preprocess_example(example)
hashed_id = example["hashed_id"]
if hashed_id in self._hashed_id_to_exemplar:
# Check for duplicates
existing_entry = self._hashed_id_to_exemplar[hashed_id]
if existing_entry["hashed_id"] != example["hashed_id"]:
raise ValueError(f"Duplicated hashed ID: {hashed_id}")
else:
self._hashed_id_to_exemplar[hashed_id] = example
frame = _get_frame(example["output_str"], self._funcall_format)
self._hashed_id_to_frame[hashed_id] = frame
self._frame_to_hashed_ids.setdefault(frame, []).append(hashed_id)
logging.info("Read %d index entries with %d unique frames.",
len(self._hashed_id_to_exemplar),
len(self._frame_to_hashed_ids))
# List of hashed IDs (for sampling)
self._all_hashed_ids = sorted(self._hashed_id_to_exemplar)
# List of frames (for sampling)
self._all_frames = sorted(self._frame_to_hashed_ids)
def verify_exemplars(self, example):
"""Filters out an example's exemplars that are not in the index.
Args:
example: an Example. The "exemplars" field will be modified in-place.
"""
if "exemplars" not in example:
# No retrieval (for the query_only converter).
return
filtered_hashed_ids = []
filtered_distances = []
for hashed_id, distance in zip(example["exemplars"]["hashed_ids"],
example["exemplars"]["distances"]):
if hashed_id not in self._hashed_id_to_exemplar:
logging.warn("Example %s: Exemplar hashed ID %s is not in the index.",
example["hashed_id"], hashed_id)
else:
filtered_hashed_ids.append(hashed_id)
filtered_distances.append(distance)
example["exemplars"]["hashed_ids"] = filtered_hashed_ids
example["exemplars"]["distances"] = filtered_distances
def convert(self, example):
"""Takes the retrieval results of an example and yields seq2seq examples.
Args:
example: a RawExample.
Yields:
AugmentedExample, one for each seq2seq example.
"""
example = self._preprocess_example(example)
for hashed_ids in self._select_exemplars(example):
exemplars = [
self._hashed_id_to_exemplar[hashed_id] for hashed_id in hashed_ids
]
input_str, output_str = self._augment_exemplars(example, exemplars)
yield AugmentedExample(input_str, output_str)
def _select_exemplars(self, example):
"""Selects lists of exemplars to be augmented to the given example.
This method should be overridden.
Args:
example: a preprocessed RawExample.
Yields:
Lists of hashed_ids of the selected exemplars. Each list will be used to
create a retrieval-augmented example.
"""
raise NotImplementedError
class QueryOnlyConverter(BaseExampleConverter):
"""Generates the example without using the retrievals."""
def _select_exemplars(self, example):
"""Yields a single empty list (no exemplars)."""
for _ in range(self._converter_config.num_samples):
yield []
class AddTopKConverter(BaseExampleConverter):
"""Adds the top K exemplars to the input query."""
def _select_exemplars(self, example):
"""Yields a single list containing the top `max_exemplars` exemplars."""
exemplar_hashed_ids = example["exemplars"]["hashed_ids"]
for _ in range(self._converter_config.num_samples):
yield exemplar_hashed_ids[:self._converter_config.max_exemplars]
class AddSampledKConverter(BaseExampleConverter):
"""Adds K sampled exemplars to the input query."""
def _select_exemplars(self, example):
"""Yields `num_samples` lists with `max_exemplars` sampled exemplars."""
sampler = self._converter_config.get_sampler()
exemplar_hashed_ids = example["exemplars"]["hashed_ids"]
for _ in range(self._converter_config.num_samples):
yield sampler(exemplar_hashed_ids)
class AddOracleKConverter(AddSampledKConverter):
"""Adds K exemplars whose semantic frame matches the target output.
Used for oracle and controllability experiments.
"""
def _select_exemplars(self, example):
"""Yields `num_samples` lists with `max_exemplars` oracle exemplars."""
self.stats["num_examples"] += 1
gold_frame = _get_frame(example["output_str"], self._funcall_format)
candidate_hashed_ids = []
# Find all retrieved exemplars with a matching frame
for hashed_id in example["exemplars"]["hashed_ids"]:
exemplar_frame = self._hashed_id_to_frame[hashed_id]
if exemplar_frame == gold_frame:
candidate_hashed_ids.append(hashed_id)
if not candidate_hashed_ids:
self.stats["no_match_in_retrieved"] += 1
# Find all index entries with a matching frame
extra_candidate_hashed_ids = []
for hashed_id in self._frame_to_hashed_ids.get(gold_frame, []):
if (hashed_id != example["hashed_id"] and
hashed_id not in candidate_hashed_ids):
extra_candidate_hashed_ids.append(hashed_id)
if not extra_candidate_hashed_ids:
self.stats["no_match_in_index"] += 1
candidate_hashed_ids.extend(extra_candidate_hashed_ids)
if not candidate_hashed_ids:
return
# Sample K exemplars
sampler = self._converter_config.get_sampler()
for _ in range(self._converter_config.num_samples):
yield sampler(candidate_hashed_ids)
class AddAdversarialKConverter(AddSampledKConverter):
"""Adds K exemplars with the same frame but different from the target output.
Used for parse guiding analysis.
"""
# Try finding an adversarial frame this number of times before giving up.
_MAX_TRIALS = 100
def _select_exemplars(self, example):
"""Yields `num_samples` lists with `max_exemplars` adversarial exemplars."""
gold_frame = _get_frame(example["output_str"], self._funcall_format)
sampler = self._converter_config.get_sampler()
for _ in range(self._converter_config.num_samples):
# Pick index entries with the same frame but different from the target.
adversarial_frame = None
found_adversarial_frame = False
for _ in range(self._MAX_TRIALS):
adversarial_frame = random.choice(self._all_frames)
if adversarial_frame == gold_frame:
continue
# Ensure that there are enough exemplars.
num_exemplars = len(self._frame_to_hashed_ids[adversarial_frame])
if num_exemplars >= self._converter_config.max_exemplars:
found_adversarial_frame = True
break
if not found_adversarial_frame:
raise RuntimeError("An adversarial frame is not found.")
yield sampler(self._frame_to_hashed_ids[adversarial_frame])
_CONVERTERS = {
"query_only": QueryOnlyConverter,
"add_top": AddTopKConverter,
"add_samp": AddSampledKConverter,
"add_oracle": AddOracleKConverter,
"add_adversarial": AddAdversarialKConverter,
}
def get_converter(converter_name, retrieval_index,
funcall_format, converter_kwargs,
formatter_kwargs):
"""Returns an example converter with the specified name.
Args:
converter_name: Name of the converter.
retrieval_index: An iterable of dicts, where each dict contains information
about an entry in the retrieval index.
funcall_format: Format of the output function call or logical form.
converter_kwargs: Keyword arguments for the converter's initializer. Some
keywords have shorthands as defined in _CONVERTER_CONFIG_ALIASES.
formatter_kwargs: Keyword arguments for the converter's initializer. Some
keywords have shorthands as defined in _FORMATTER_CONFIG_ALIASES.
Returns:
A subclass of BaseExampleConverter.
"""
converter = _CONVERTERS[converter_name]
converter_config = ConverterConfig.from_dict(converter_kwargs)
formatter_config = casper_formatters.FormatterConfig.from_dict(
formatter_kwargs)
return converter(retrieval_index, funcall_format, converter_config,
formatter_config)
|
insights/tests/test_yaml_parser.py | lhuett/insights-core | 121 | 11106721 | import datetime
import pytest
from insights.core import YAMLParser, ParseException, SkipException
from insights.tests import context_wrap
bi_conf_content = """
{"remote_branch": -1, "remote_leaf": -1}
""".strip()
yaml_test_strings = {"""
type: Acquisition
date: 2019-07-09
""": {'type': 'Acquisition', 'date': datetime.date(2019, 7, 9)}, """
- Hesperiidae
- Papilionidae
- Apatelodidae
- Epiplemidae
""": ['Hesperiidae', 'Papilionidae', 'Apatelodidae', 'Epiplemidae']
}
empty_yaml_content = """
---
# This YAML file is empty
""".strip()
wrong_yaml_content = """
"unbalanced blackets: ]["
""".strip()
class FakeYamlParser(YAMLParser):
""" Class for parsing the content of ``branch_info``."""
pass
class MyYamlParser(YAMLParser):
pass
def test_yaml_parser_success():
for ymlstr in yaml_test_strings:
ctx = context_wrap(ymlstr)
assert FakeYamlParser(ctx).data == yaml_test_strings[ymlstr]
def test_yaml_parser_failure():
ctx = context_wrap("boom /")
with pytest.raises(ParseException) as ex:
FakeYamlParser(ctx)
assert "FakeYamlParser" in ex.value.args[0]
def test_settings_yml():
ctx = context_wrap(bi_conf_content)
ctx.content = bi_conf_content
result = FakeYamlParser(ctx)
assert result.data['remote_branch'] == -1
assert result.data['remote_leaf'] == -1
def test_settings_yml_list():
ctx = context_wrap(bi_conf_content)
result = FakeYamlParser(ctx)
assert result.data['remote_branch'] == -1
assert result.data['remote_leaf'] == -1
def test_empty_content():
ctx = context_wrap(empty_yaml_content)
with pytest.raises(SkipException) as ex:
FakeYamlParser(ctx)
assert "There is no data" in ex.value.args[0]
|
benchmarks/distributed/rpc/parameter_server/metrics/MetricBase.py | Hacky-DH/pytorch | 60,067 | 11106774 | <reponame>Hacky-DH/pytorch
from abc import ABC, abstractmethod
class MetricBase(ABC):
def __init__(self, name):
self.name = name
self.start = None
self.end = None
@abstractmethod
def record_start(self):
return
@abstractmethod
def record_end(self):
return
@abstractmethod
def elapsed_time(self):
return
def get_name(self):
return self.name
def get_end(self):
return self.end
|
src/scripts/reformat_training_data.py | f0r3ns1cat0r/tram | 109 | 11106778 | <filename>src/scripts/reformat_training_data.py
"""
Reformat training data into a report export so that it can be imported into TRAM.
The target format is:
{
"sentence-text": ["<technique-id-1>", "<technique-id-2>", "<technique-id-N>"]
}
The original format is:
* negative_data.json - A file with sentences that have no mappings. This is a simple list of strings.
* all_analyzed_reports.json - A file with mappings. Has the following structure:
{
"<attack-technique-description>": ["<sentence-1>", "<sentence-2>", "<sentence-n>"], # OR
"<description-1 description-2 description-N -multi>": { # Can use key.endswith('-multi') to test
"technique_names": [
"description-1",
"description-2",
"description-N",
],
"sentances": [ # Note the word sentences is misspelled as sentances
"<sentence-1>",
"<sentence-2>",
"<sentence-N>"
]
}
}
The target format is defined by tram.serializers.ReportExportSerializer
"""
import json
import logging
import os
import sys
from datetime import datetime
from functools import partial
import django
sys.path.append("src/tram/")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tram.settings")
django.setup()
from tram.serializers import ReportExportSerializer # noqa: E402
outfile = "data/training/bootstrap-training-data.json"
logger = logging.getLogger(__name__)
ATTACK_LOOKUP = { # A mapping of attack descriptions to technique IDs
"drive-by compromise": "T1189",
"system information discovery": "T1082",
"new service": "T1543",
"service execution": "T1569.002",
"command-line interface": "T1059", # Maps to: T1059 - Command and Scripting Interpreter
"obfuscated files or information": "T1027",
"custom cryptographic protocol": "T1573", # Maps to: T1573 - Encrypted Channel
"system network configuration discovery": "T1016",
"web shell": "T1505.003",
"application window discovery": "T1010",
"file deletion": "T1070.004", # Technique that became a subtechnique
"standard application layer protocol": "T1071",
"web service": "T1102",
"exfiltration over command and control channel": "T1041",
"fallback channels": "T1008",
"bypass user account control": "T1548.002", # Technique that became a subtechnique
"system time discovery": "T1124",
"deobfuscate/decode files or information": "T1140",
"disabling security tools": "T1562.001", # Maps to: T1562.001 - Impair Defenses: Disable or Modify Tools
"registry run keys / startup folder": "T1547.001",
"remote file copy": "T1105", # Maps to: T1105 - Ingress Tool Transfer
"dll search order hijacking": "T1574.001",
"screen capture": "T1113",
"file and directory discovery": "T1083",
"tor": "S0183", # Software??
"shortcut modification": "T1547.009",
"remote services": "T1021",
"connection proxy": "T1090",
"data encoding": "T1132",
"spearphishing link": "T1566.002",
"spearphishing attachment": "T1566.001",
"arp": "S0099",
"user execution": "T1204",
"process hollowing": "T1055.012",
"execution through api": "T1106", # Maps to T1106 - Native API
"masquerading": "T1036",
"code signing": "T1553.002",
"standard cryptographic protocol": "T1521",
"scripting": "T1059",
"remote system discovery": "T1018",
"credential dumping": "T1003",
"exploitation for client execution": "T1203",
"exploitation for privilege escalation": "T1068",
"security software discovery": "T1518.001",
"data from local system": "T1533",
"remote desktop protocol": "T1021.001",
"data compressed": "T1560", # Maps to T1560 - Archive Collected Data
"software packing": "T1027.002",
"ping": "S0097",
"brute force": "T1110",
"commonly used port": "T1571",
"modify registry": "T1112",
"uncommonly used port": "T1571",
"process injection": "T1055",
"timestomp": "T1070.006",
"windows management instrumentation": "T1047",
"data staged": "T1074",
"rundll32": "T1218.011",
"regsvr32": "T1218.010",
"account discovery": "T1087",
"process discovery": "T1057",
"clipboard data": "T1115",
"binary padding": "T1027.001",
"pass the hash": "T1550.002",
"network service scanning": "T1046",
"system service discovery": "T1007",
"data encrypted": "T1486",
"system network connections discovery": "T1049",
"windows admin shares": "T1021.002",
"system owner/user discovery": "T1033",
"launch agent": "T1543.001",
"permission groups discovery": "T1069",
"indicator removal on host": "T1070",
"input capture": "T1056",
"virtualization/sandbox evasion": "T1497.001",
"dll side-loading": "T1574.002",
"scheduled task": "T1053",
"access token manipulation": "T1134",
"powershell": "T1546.013",
"exfiltration over alternative protocol": "T1048",
"hidden files and directories": "T1564.001",
"network share discovery": "T1135",
"query registry": "T1012",
"credentials in files": "T1552.001",
"audio capture": "T1123",
"video capture": "T1125",
"peripheral device discovery": "T1120",
"spearphishing via service": "T1566.003",
"data encrypted for impact": "T1486",
"data destruction": "T1485",
"template injection": "T1221",
"inhibit system recovery": "T1490",
"create account": "T1136",
"exploitation of remote services": "T1210",
"valid accounts": "T1078",
"dynamic data exchange": "T1559.002",
"office application startup": "T1137",
"data obfuscation": "T1001",
"domain trust discovery": "T1482",
"email collection": "T1114",
"man in the browser": "T1185",
"data from removable media": "T1025",
"bootkit": "T1542.003",
"logon scripts": "T1037",
"execution through module load": "T1129",
"llmnr/nbt-ns poisoning and relay": "T1557.001",
"external remote services": "T1133",
"domain fronting": "T1090.004",
"sid-history injection": "T1134.005",
"service stop": "T1489",
"disk structure wipe": "T1561.002",
"credentials in registry": "T1552.002",
"appinit dlls": "T1546.010",
"exploit public-facing application": "T1190",
"remote access tools": "T1219",
"signed binary proxy execution": "T1218",
"appcert dlls": "T1546.009",
"winlogon helper dll": "T1547.004",
"file permissions modification": "T1222",
"hooking": "T1056.004",
"system firmware": "T1542.001",
"lsass driver": "T1547.008",
"distributed component object model": "T1021.003",
"cmstp": "T1218.003",
"execution guardrails": "T1480",
"component object model hijacking": "T1546.015",
"accessibility features": "T1546.008", # TODO: Help wanted
"keychain": "T1555.001",
"mshta": "T1218.005",
"pass the ticket": "T1550.003",
"kerberoasting": "T1558.003",
"password policy discovery": "T1201",
"local job scheduling": "T1053.001",
"windows remote management": "T1021.006",
"bits jobs": "T1197",
"data from information repositories": "T1213",
"lc_load_dylib addition": "T1546.006",
"histcontrol": "T1562.003",
"file system logical offsets": "T1006",
"regsvcs/regasm": "T1218.009",
"exploitation for credential access": "T1212",
"sudo": "T1548.003",
"installutil": "T1218.004",
"query registry ": "T1012",
"launchctl": "T1569.001",
".bash_profile and .bashrc": "T1546.004",
"applescript": "T1059.002",
"emond": "T1546.014",
"control panel items": "T1218.002",
"application shimming": "T1546.011",
}
class TrainingData(object):
def __init__(self):
self.mappings = {} # Mapping is sentence text plus a list of Attack IDs
def add_mapping(self, sentence_text, attack_id=None):
mappings = self.mappings.get(sentence_text, []) # Get mappings or empty list
if attack_id: # If attack_id is specified, add it to the list
if attack_id not in mappings:
mappings.append(attack_id)
self.mappings[sentence_text] = mappings # Put the mapping list back in
def to_report_export_serializer_json(self):
"""Creates a dict that can be used to create
a serializers.ReportExportSerializer instance
"""
utc_now = datetime.utcnow().isoformat() + "Z"
res_json = {
"name": "Bootstrap Training Data",
"text": "There is no text for this report. These sentences were mapped by human analysts.",
"ml_model": "humans",
"created_on": utc_now,
"updated_on": utc_now,
"sentences": [],
}
order = 0
for sentence_text, mappings in self.mappings.items():
if len(sentence_text.strip()) == 0: # Skip empty sentences
continue
sentence = {
"text": sentence_text,
"order": order,
"disposition": "accept",
"mappings": [],
}
order += 1
for mapping in mappings:
mapping = {"attack_id": mapping, "confidence": "100.0"}
sentence["mappings"].append(mapping)
res_json["sentences"].append(sentence)
return res_json
def get_attack_id(description):
"""Given a description, get the ATTACK ID. Raises IndexError if the retrieval fails."""
lower_description = description.lower()
attack_id = ATTACK_LOOKUP[lower_description]
return attack_id
def main():
with open("data/training/archive/all_analyzed_reports.json") as f:
all_analyzed_reports = json.load(f)
with open("data/training/archive/negative_data.json") as f:
negative_data = json.load(f)
training_data = TrainingData()
# Add the positives
for key, value in all_analyzed_reports.items():
if key.endswith("-multi"): # It's a multi-mapping, value is a dictionary
for sentence in value[
"sentances"
]: # Sentences is misspelled in the source data
map(
partial(training_data.add_mapping, sentence),
[ATTACK_LOOKUP[name.lower()] for name in value["technique_names"]],
)
else: # It's a single-mapping, value is a list of sentences
technique_id = get_attack_id(key)
for sentence in value:
training_data.add_mapping(sentence, technique_id)
# Add the negatives
for sentence in negative_data:
training_data.add_mapping(sentence, None)
res_json = training_data.to_report_export_serializer_json()
res = ReportExportSerializer(data=res_json)
res.is_valid(raise_exception=True)
with open(outfile, "w") as f:
json.dump(res.initial_data, f, indent=4)
logger.info("Wrote data to %s" % outfile)
if __name__ == "__main__":
main()
|
ctpn_crnn_ocr/ctpnport.py | shijieS/Scene-Text-Understanding | 380 | 11106789 | <reponame>shijieS/Scene-Text-Understanding
import sys
import numpy as np
class cfg:
MEAN=np.float32([102.9801, 115.9465, 122.7717])
TEST_GPU_ID=0
SCALE=600
MAX_SCALE=1000
LINE_MIN_SCORE=0.7
TEXT_PROPOSALS_MIN_SCORE=0.7
TEXT_PROPOSALS_NMS_THRESH=0.3
MAX_HORIZONTAL_GAP=50
TEXT_LINE_NMS_THRESH=0.3
MIN_NUM_PROPOSALS=2
MIN_RATIO=1.2
MIN_V_OVERLAPS=0.7
MIN_SIZE_SIM=0.7
TEXT_PROPOSALS_WIDTH=16
def init():
sys.path.insert(0, "./CTPN/tools")
sys.path.insert(0, "./CTPN/caffe/python")
sys.path.insert(0, "./CTPN/src")
init()
from other import draw_boxes, resize_im, CaffeModel
import cv2, os, caffe
from detectors import TextProposalDetector, TextDetector
import os.path as osp
from utils.timer import Timer
def ctpnSource():
DEMO_IMAGE_DIR = "img/"
NET_DEF_FILE = "CTPN/models/deploy.prototxt"
MODEL_FILE = "CTPN/models/ctpn_trained_model.caffemodel"
caffe.set_mode_gpu()
caffe.set_device(cfg.TEST_GPU_ID)
# initialize the detectors
text_proposals_detector = TextProposalDetector(CaffeModel(NET_DEF_FILE, MODEL_FILE))
text_detector = TextDetector(text_proposals_detector)
return text_detector
def getCharBlock(text_detector,im):
im, f=resize_im(im, cfg.SCALE, cfg.MAX_SCALE)
cv2.imshow("src", im)
tmp = im.copy()
#timer=Timer()
#timer.tic()
text_lines=text_detector.detect(im)
#print "Number of the detected text lines: %s"%len(text_lines)
#print "Time: %f"%timer.toc()
text_recs = draw_boxes(tmp, text_lines, caption='im_name', wait=True)
return tmp,text_recs
|
Z - Tool Box/LaZagne/Linux/lazagne/softwares/memory/mimipy.py | dfirpaul/Active-Directory-Exploitation-Cheat-Sheet-1 | 1,290 | 11106796 | <gh_stars>1000+
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Author: <NAME> (<EMAIL>)
Original idea from @huntergregal (https://github.com/huntergregal/mimipenguin)
This is a port in python of @huntergregal's bash script mimipenguin.sh with some improvments :
- possibility to clean passwords found from memory
- possibility to search for any trace of your password in all your processes
- possibility to scan a process by pid
- add some additional processes to scan like lightDM
You can find the bleeding edge version of mimipy here : https://github.com/n1nj4sec/mimipy
"""
import os
import crypt
import re
import traceback
from lazagne.config.lib.memorpy import *
from lazagne.config.module_info import ModuleInfo
from lazagne.softwares.browsers.mozilla import python_version
class Mimipy(ModuleInfo):
def __init__(self):
ModuleInfo.__init__(self, 'mimipy', 'memory')
self.shadow_hashes = []
self.rules = [
{
"desc": "[SYSTEM - GNOME]",
"process": r"gnome-keyring-daemon|gdm-password|gdm-session-worker",
"near": r"libgcrypt\.so\..+|libgck\-1\.so\.0|_pammodutil_getpwnam_|gkr_system_authtok",
"func": self.test_shadow,
},
{
"desc": "[SYSTEM - LightDM]", # Ubuntu/xubuntu login screen :) https://doc.ubuntu-fr.org/lightdm
"process": r"lightdm",
"near": r"_pammodutil_getpwnam_|gkr_system_authtok",
"func": self.test_shadow,
},
{
"desc": "[SYSTEM - SSH Server]",
"process": r"/sshd$",
"near": r"sudo.+|_pammodutil_getpwnam_",
"func": self.test_shadow,
},
{
"desc": "[SSH Client]",
"process": r"/ssh$",
"near": r"sudo.+|/tmp/ICE-unix/[0-9]+",
"func": self.test_shadow,
},
{
"desc": "[SYSTEM - VSFTPD]",
"process": r"vsftpd",
"near": r"^::.+\:[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$",
"func": self.test_shadow,
},
]
regex_type = type(re.compile("^plop$"))
# precompile regexes to optimize speed
for x in self.rules:
if "near" in x:
if type(x["near"]) != regex_type:
x["near"] = re.compile(x["near"])
if "process" in x:
if type(x["process"]) != regex_type:
x["process"] = re.compile(x["process"])
self.look_after_size = 1000 * 10 ** 3
self.look_before_size = 500 * 10 ** 3
def get_shadow_hashes(self):
hashes = []
with open('/etc/shadow', 'rb') as f:
for line in f:
tab = line.decode().split(":")
if len(tab[1]) > 10:
hashes.append((tab[0], tab[1]))
return hashes
def memstrings(self, mw, start_offset=None, end_offset=None, optimizations=''):
for _, x in mw.mem_search(r"([\x20-\x7e]{6,50})[^\x20-\x7e]", ftype='re', start_offset=start_offset,
end_offset=end_offset, optimizations=optimizations):
yield x
def password_list_match(self, password_list, near):
for password in password_list:
if near.search(password.decode('latin')):
return True
return False
def cleanup_string(self, s):
try:
ns = ""
for c in s:
if ord(c) < 0x20 or ord(c) > 0x7e:
break
ns += c
return ns
except Exception:
return s
def test_shadow(self, name, pid, rule, optimizations='nsrx'):
self.info('Analysing process %s (%s) for shadow passwords ...' % (name, pid))
password_tested = set() # to avoid hashing the same string multiple times
with MemWorker(name=name, pid=pid) as mw:
scanned_segments = []
for _, match_addr in mw.mem_search(rule["near"], ftype='re', optimizations=optimizations):
password_list = []
total = 0
start = int(match_addr - self.look_after_size)
end = int(match_addr + self.look_after_size)
for s, e in scanned_segments:
if end < s or start > e:
continue # no collision
elif start >= s and e >= start and end >= e:
start = e - 200 # we only scan a smaller region because some of it has already been scanned
scanned_segments.append((start, end))
for x in self.memstrings(mw, start_offset=start, end_offset=end, optimizations=optimizations):
password = self.cleanup_string(x.read(type='string', maxlen=51, errors='ignore'))
total += 1
password_list.append(password)
if len(password_list) > 40:
password_list = password_list[1:]
if self.password_list_match(password_list, rule["near"]):
for p in password_list:
if p not in password_tested:
password_tested.add(p)
for user, h in self.shadow_hashes:
if crypt.crypt(p.decode('latin'), h) == h:
p = p if python_version == 2 else p.decode()
yield (rule["desc"], user, p)
def mimipy_loot_passwords(self, optimizations='nsrx'):
self.shadow_hashes = self.get_shadow_hashes()
for procdic in Process.list():
name = procdic["name"]
pid = int(procdic["pid"])
for rule in self.rules:
if re.search(rule["process"], name):
try:
for t, u, p in rule["func"](name, pid, rule, optimizations=optimizations):
yield (t, name, u, p)
except Exception:
self.debug(traceback.format_exc())
def run(self):
if os.getuid() != 0:
self.info('You need sudo privileges')
return
pwd_found = []
for t, process, user, password in self.mimipy_loot_passwords(optimizations="nsrx"):
pwd_found.append({
'Process': str(process),
'Login': str(user),
'Password': str(password),
})
return pwd_found
|
splash/kernel/kernelbase.py | Germey/splash | 3,612 | 11106847 | <reponame>Germey/splash
# -*- coding: utf-8 -*-
"""
Refactored from IPython.kernel.zmq.kernelbase.Kernel with async execution
support. See https://github.com/ipython/ipython/pull/7713.
"""
import functools
import time
import sys
from ipython_genutils import py3compat
from ipykernel.jsonutil import json_clean
from ipykernel.kernelbase import Kernel as _Kernel
if hasattr(_Kernel, "send_execute_reply"):
# patched IPython version
raise Exception("Incompatible IPython version")
else:
# non-patched IPython version
class Kernel(_Kernel):
async_msg_types = {'execute_request'}
def __init__(self, **kwargs):
super(Kernel, self).__init__(**kwargs)
# XXX: A HUGE HACK
# In existing ipykernel implementation
# `dispatch_control` and `dispatch_shell` methods
# publish 'idle' state at the end. This is not correct
# in presence of async handlers. Overriding `dispatch_control` and
# `dispatch_shell` is problematic because it is a big copy-paste.
# So all handlers are overridden to set "idle" at the end,
# and `_publish_status` skips "idle" by default.
this = self
def send_idle(meth):
@functools.wraps(meth)
def wrapper(self, *args, **kwargs):
res = meth(self, *args, **kwargs)
this._publish_idle()
return res
return wrapper
for msg_type in self.shell_handlers:
if msg_type in self.async_msg_types:
continue
self.shell_handlers[msg_type] = send_idle(self.shell_handlers[msg_type])
for msg_type in self.control_handlers:
if msg_type in self.async_msg_types:
continue
self.control_handlers[msg_type] = send_idle(self.control_handlers[msg_type])
def _publish_status(self, status, parent=None, force=False):
if status != 'idle' or force:
super(Kernel, self)._publish_status(status, parent)
def _publish_idle(self, parent=None):
self._publish_status("idle", parent, force=True)
def execute_request(self, stream, ident, parent):
"""handle an execute_request"""
# This function is mostly a copy-pasted version from ipykernel,
# but it is split into several functions in order to allow
# overriding them in subclasses.
# ============ BEGIN COPY-PASTE =============
try:
content = parent[u'content']
code = py3compat.cast_unicode_py2(content[u'code'])
silent = content[u'silent']
store_history = content.get(u'store_history', not silent)
user_expressions = content.get('user_expressions', {})
allow_stdin = content.get('allow_stdin', False)
except:
self.log.error("Got bad msg: ")
self.log.error("%s", parent)
return
metadata = self.init_metadata(parent)
# Re-broadcast our input for the benefit of listening clients, and
# start computing output
if not silent:
self.execution_count += 1
self._publish_execute_input(code, parent, self.execution_count)
reply_content = self.do_execute(code, silent, store_history,
user_expressions, allow_stdin)
# ============ END COPY-PASTE =============
self.send_execute_reply(stream, ident, parent, metadata, reply_content)
def send_execute_reply(self, stream, ident, parent, metadata, reply_content):
""" Send a reply to execute_request """
# This function is mostly copy-pasted from the last part of
# ipykernel's execute_reply method.
# It is extracted to allow overriding in subclasses.
# Splash kernel overrides it for async replies: instead
# of returning result immediately it only calls the original
# implementation when async reply is received.
content = parent[u'content']
stop_on_error = content.get('stop_on_error', True)
silent = content[u'silent']
# ============ BEGIN COPY-PASTE ============
# Flush output before sending the reply.
sys.stdout.flush()
sys.stderr.flush()
# FIXME: on rare occasions, the flush doesn't seem to make it to the
# clients... This seems to mitigate the problem, but we definitely need
# to better understand what's going on.
if self._execute_sleep:
time.sleep(self._execute_sleep)
# Send the reply
reply_content = json_clean(reply_content)
metadata = self.finish_metadata(parent, metadata, reply_content)
reply_msg = self.session.send(stream, u'execute_reply',
reply_content, parent, metadata=metadata,
ident=ident)
self.log.debug("%s", reply_msg)
if not silent and reply_msg['content']['status'] == u'error' and stop_on_error:
self._abort_queues()
# ============== END COPY-PASTE ==============
# fix idle signal handling for async replies
self._publish_idle()
if hasattr(stream, 'flush'):
stream.flush()
|
PhysicsTools/PatAlgos/python/cleaningLayer1/muonCleaner_cfi.py | ckamtsikis/cmssw | 852 | 11106875 | <filename>PhysicsTools/PatAlgos/python/cleaningLayer1/muonCleaner_cfi.py<gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
cleanPatMuons = cms.EDProducer("PATMuonCleaner",
src = cms.InputTag("selectedPatMuons"),
# preselection (any string-based cut for pat::Muon)
preselection = cms.string(''),
# overlap checking configurables
checkOverlaps = cms.PSet(),
# finalCut (any string-based cut for pat::Muon)
finalCut = cms.string(''),
)
|
test/test_transforms.py | HarshTrivedi/tnt | 1,463 | 11106887 | <filename>test/test_transforms.py<gh_stars>1000+
import torchnet.transform as transform
import unittest
import torch
class TestTransforms(unittest.TestCase):
def testCompose(self):
self.assertEqual(transform.compose([lambda x: x + 1, lambda x: x + 2, lambda x: x / 2])(1), 2)
def testTableMergeKeys(self):
x = {
'sample1': {'input': 1, 'target': "a"},
'sample2': {'input': 2, 'target': "b", 'flag': "hard"}
}
y = transform.tablemergekeys()(x)
self.assertEqual(y['input'], {'sample1': 1, 'sample2': 2})
self.assertEqual(y['target'], {'sample1': "a", 'sample2': "b"})
self.assertEqual(y['flag'], {'sample2': "hard"})
def testTableApply(self):
x = {1: 1, 2: 2}
y = transform.tableapply(lambda x: x + 1)(x)
self.assertEqual(y, {1: 2, 2: 3})
def testMakeBatch(self):
x = [
{'input': torch.randn(4), 'target': "a"},
{'input': torch.randn(4), 'target': "b"},
]
y = transform.makebatch()(x)
self.assertEqual(y['input'].size(), torch.Size([2, 4]))
self.assertEqual(y['target'], ["a", "b"])
if __name__ == '__main__':
unittest.main()
|
tests/test_pool.py | mvaleev/asyncpgsa | 419 | 11106918 | <gh_stars>100-1000
import sqlalchemy as sa
async def test_pool_basic(pool):
async with pool.acquire() as con:
result = await con.fetch('SELECT * FROM sqrt(16)')
assert result[0]['sqrt'] == 4.0
async def test_pool_connection_transaction_context_manager(pool):
async with pool.transaction() as conn:
result = await conn.fetch('SELECT * FROM sqrt(16)')
assert result[0]['sqrt'] == 4.0
async def test_use_sqlalchemy_with_escaped_params(pool):
"""
Use sqlalchemy with escaped params
Make sure that the escaped parameters get used in the right order
:return:
"""
query = sa.select('*') \
.select_from(sa.text('sqrt(:num) as a')) \
.select_from(sa.text('sqrt(:a2) as b')) \
.select_from(sa.text('sqrt(:z3) as c')) \
.params(num=16, a2=36, z3=25)
async with pool.transaction() as conn:
result = await conn.fetch(query)
row = result[0]
assert row['a'] == 4.0
assert row['b'] == 6.0
assert row['c'] == 5.0
async def test_use_sa_core_objects(pool):
pg_tables = sa.Table(
'pg_tables', sa.MetaData(),
sa.Column('schemaname'),
sa.Column('tablename'),
sa.Column('tableowner'),
sa.Column('tablespace'),
sa.Column('hasindexes')
)
query = pg_tables.select().where(pg_tables.c.schemaname == 'pg_catalog')
async with pool.transaction() as conn:
result = await conn.fetch(query)
for row in result:
# just making sure none of these throw KeyError exceptions
assert isinstance(row['schemaname'], str)
assert 'tablename' in row
assert 'tableowner' in row
assert 'tablespace' in row
assert 'hasindexes' in row
async def test_with_without_async_should_throw_exception(pool):
try:
with pool.transaction() as conn:
result = await conn.fetch('SELECT * FROM sqrt(16)')
raise Exception('Should have thrown RuntimeError')
except RuntimeError as e:
assert str(e) == 'Must use "async with" for a transaction'
async def test_falsyness_of_rows_on_fetch(pool):
async with pool.acquire() as conn:
rows = await conn.fetch('SELECT * FROM pg_stat_activity '
'WHERE pid=400')
assert bool(rows) == False
|
c7n/resources/cloudsearch.py | al3pht/cloud-custodian | 2,415 | 11106927 | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from c7n.actions import Action
from c7n.manager import resources
from c7n.query import QueryResourceManager, TypeInfo
from c7n.utils import local_session, type_schema
@resources.register('cloudsearch')
class CloudSearch(QueryResourceManager):
class resource_type(TypeInfo):
service = "cloudsearch"
enum_spec = ("describe_domains", "DomainStatusList", None)
name = id = "DomainName"
dimension = "DomainName"
filter_name = 'DomainNames'
filter_type = 'list'
arn_type = "domain"
@CloudSearch.action_registry.register('delete')
class Delete(Action):
schema = type_schema('delete')
permissions = ('cloudsearch:DeleteDomain',)
def process(self, resources):
client = local_session(
self.manager.session_factory).client('cloudsearch')
for r in resources:
if r['Created'] is not True or r['Deleted'] is True:
continue
client.delete_domain(DomainName=r['DomainName'])
|
fedml_core/distributed/communication/gRPC/ip_config_utils.py | xuwanwei/FedML | 1,120 | 11106945 | import csv
def build_ip_table(path):
ip_config = dict()
with open(path, newline="") as csv_file:
csv_reader = csv.reader(csv_file)
# skip header line
next(csv_reader)
for row in csv_reader:
receiver_id, receiver_ip = row
ip_config[receiver_id] = receiver_ip
return ip_config
|
doc/sphinx_preview.py | rohanraja/cgt_distributed | 698 | 11107063 | <filename>doc/sphinx_preview.py<gh_stars>100-1000
import sublime, sublime_plugin
import subprocess, os
class SphinxPreviewCommand(sublime_plugin.TextCommand):
def run(self, edit, **kwargs):
if self.view.file_name():
folder_name, file_name = os.path.split(self.view.file_name())
command = './build_and_view.sh'
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=folder_name, shell=True)
result, err = p.communicate()
print(result,err)
# self.view.set_status('p4',str(result+err))
# sublime.set_timeout(self.clear,2000) |
milksnake/_compat.py | srinivas32/milksnake | 771 | 11107095 | import sys
PY2 = sys.version_info[0] == 2
if PY2:
text_type = unicode
else:
text_type = str
|
mlens/estimators/__init__.py | mehrdad-shokri/mlens | 760 | 11107183 | """ML-ENSEMBLE
:author: <NAME>
:copyright: 2017-2018
:licence: MIT
"""
from .estimators import BaseEstimator
from .estimators import LearnerEstimator, TransformerEstimator, LayerEnsemble
__all__ = ['LearnerEstimator', 'TransformerEstimator', 'LayerEnsemble',
'BaseEstimator']
|
mozillians/users/migrations/0021_auto_20171003_0716.py | divyamoncy/mozillians | 202 | 11107205 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('users', '0020_auto_20170908_0257'),
]
operations = [
migrations.AddField(
model_name='idpprofile',
name='created',
field=models.DateTimeField(default=datetime.datetime(2017, 10, 3, 14, 16, 34, 743365, tzinfo=utc), auto_now_add=True),
preserve_default=False,
),
migrations.AddField(
model_name='idpprofile',
name='primary',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='idpprofile',
name='updated',
field=models.DateTimeField(default=datetime.datetime(2017, 10, 3, 14, 16, 38, 392655, tzinfo=utc), auto_now=True),
preserve_default=False,
),
]
|
vtr_flow/scripts/python_libs/vtr/yosys/__init__.py | amin1377/vtr-verilog-to-routing | 682 | 11107206 | """
init for the YOSYS module
"""
from .yosys import run
|
brotab/tests/test_utils.py | naeloob/brotab | 239 | 11107253 | from unittest import TestCase
from brotab.utils import split_tab_ids
class TestUtils(TestCase):
def test_split_tab_ids(self):
text = 'c.1.0 c.1.1\tc.1.2\r\nc.1.3 \r\t\n'
expected = ['c.1.0', 'c.1.1', 'c.1.2', 'c.1.3']
self.assertEqual(expected, split_tab_ids(text))
|
Python/TextConverter/text_converter.py | Corentin-Leffy/Racing-Car-Katas | 224 | 11107257 | <reponame>Corentin-Leffy/Racing-Car-Katas
# This is for Python 3
import html as html_converter
# for Python 2 uncomment this line
#import cgi as html_converter
class UnicodeFileToHtmlTextConverter(object):
def __init__(self, full_filename_with_path):
self.full_filename_with_path = full_filename_with_path
def convert_to_html(self):
f = open(self.full_filename_with_path, "r")
html = ""
for line in f:
line = line.rstrip()
html += html_converter.escape(line, quote=True)
html += "<br />"
return html |
safe_control_gym/envs/env_wrappers/vectorized_env/vec_env.py | catgloss/safe-control-gym | 120 | 11107260 | """Adapted from OpenAI Baselines.
See also:
* https://github.com/openai/baselines/blob/master/baselines/common/vec_env/vec_env.py
* https://github.com/DLR-RM/stable-baselines3/blob/master/stable_baselines3/common/vec_env/base_vec_env.py
"""
import os
import numpy as np
from abc import ABC, abstractmethod
from safe_control_gym.envs.env_wrappers.vectorized_env.vec_env_utils import tile_images
class VecEnv(ABC):
"""An abstract asynchronous, vectorized environment.
Used to batch data from multiple copies of an environment, so that each observation becomes a
batch of observations, and expected action is a batch of actions to be applied per-environment.
"""
closed = False
viewer = None
metadata = {'render.modes': ['human', 'rgb_array']}
def __init__(self,
num_envs,
observation_space,
action_space
):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
@abstractmethod
def reset(self):
"""Reset all the environments and return an array of observations, or a dict of observation arrays.
If step_async is still doing work, that work will be cancelled and step_wait() should not
be called until step_async() is invoked again.
"""
pass
@abstractmethod
def step_async(self,
actions
):
"""Tell all the environments to start taking a step with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is already pending.
"""
pass
@abstractmethod
def step_wait(self):
"""Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a dict of arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close_extras(self):
"""Clean up the extra resources. Only runs when not self.closed.
"""
pass
def close(self):
if self.closed:
return
if self.viewer is not None:
self.viewer.close()
self.close_extras()
self.closed = True
def step(self,
actions
):
"""Step the environments synchronously.
"""
self.step_async(actions)
return self.step_wait()
def render(self,
mode='human'
):
"""Display environment via a viewer.
"""
imgs = self.get_images()
bigimg = tile_images(imgs)
if mode == 'human':
self.get_viewer().imshow(bigimg)
return self.get_viewer().isopen
elif mode == 'rgb_array':
return bigimg
else:
raise NotImplementedError
def get_images(self):
"""Return RGB images from each environment.
"""
raise NotImplementedError
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
def get_viewer(self):
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
return self.viewer
@abstractmethod
def get_attr(self, attr_name, indices=None):
"""Return attribute from vectorized environment.
"""
pass
@abstractmethod
def set_attr(self, attr_name, values, indices=None):
"""Set attribute inside vectorized environments.
"""
pass
@abstractmethod
def env_method(self,
method_name,
method_args=None,
method_kwargs=None,
indices=None):
"""Call instance methods of vectorized environments.
"""
raise NotImplementedError()
def _get_indices(self,
indices
):
"""Convert a flexibly-typed reference to environment indices to an implied list of indices."""
if indices is None:
indices = range(self.num_envs)
elif isinstance(indices, int):
indices = [indices]
return indices
class VecEnvWrapper(VecEnv):
"""An environment wrapper that applies to an entire batch of environments at once."""
def __init__(self,
venv,
observation_space=None,
action_space=None
):
self.venv = venv
super().__init__(num_envs=venv.num_envs,
observation_space=observation_space or venv.observation_space,
action_space=action_space or venv.action_space)
def step_async(self, actions):
self.venv.step_async(actions)
@abstractmethod
def reset(self):
pass
@abstractmethod
def step_wait(self):
pass
def close(self):
return self.venv.close()
def render(self, mode='human'):
return self.venv.render(mode=mode)
def get_images(self):
return self.venv.get_images()
def __getattr__(self,
name
):
if name.startswith('_'):
raise AttributeError(
"attempted to get missing private attribute '{}'".format(name))
return getattr(self.venv, name)
def get_attr(self,
attr_name,
indices=None
):
return self.venv.get_attr(attr_name, indices)
def set_attr(self,
attr_name,
values,
indices=None
):
return self.venv.set_attr(attr_name, values, indices)
def env_method(self,
method_name,
method_args=None,
method_kwargs=None,
indices=None):
return self.venv.env_method(method_name,
method_args=method_args,
method_kwargs=method_kwargs,
indices=indices)
|
lib/model/train_val_ori.py | yingxingde/FasterRCNN-pytorch | 128 | 11107272 | <filename>lib/model/train_val_ori.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from lib.model.config import cfg
import lib.roi_data_layer.roidb as rdl_roidb
from lib.roi_data_layer.layer import RoIDataLayer
from lib.nets.layers_util import *
from lib.utils.timer import Timer
try:
import cPickle as pickle
except ImportError:
import pickle
import numpy as np
import PIL.Image as Image
import os
import torch
import sys
import pprint
import time
import cv2
import h5py
import torchvision.utils as vutils
import torchvision.transforms as torchtrans
class SolverWrapper(object):
def __init__(self, network, imdb, roidb, valroidb, model_dir, pretrained_model=None):
self.net = network
self.imdb = imdb
self.roidb = roidb
self.valroidb = valroidb
self.model_dir = model_dir
self.tbdir = os.path.join(model_dir, 'train_log')
if not os.path.exists(self.tbdir):
os.makedirs(self.tbdir)
self.pretrained_model = pretrained_model
def set_learn_strategy(self, learn_dict):
self._disp_interval = learn_dict['disp_interval']
self._valid_interval = learn_dict['disp_interval']*5
self._use_tensorboard = learn_dict['use_tensorboard']
self._use_valid = learn_dict['use_valid']
self._save_point_interval = learn_dict['save_point_interval']
self._lr_decay_steps = learn_dict['lr_decay_steps']
def train_model(self, resume=None, max_iters=100000):
# Build data layers for both training and validation set
self.data_layer = RoIDataLayer(self.roidb, self.imdb.num_classes)
self.data_layer_val = RoIDataLayer(self.valroidb, self.imdb.num_classes, random=True)
self.prepare_construct(resume)
net = self.net
# training
train_loss = 0
rpn_cls_loss = 0
rpn_bbox_loss = 0
fast_rcnn_cls_loss = 0
fast_rcnn_bbox_loss = 0
tp, tf, fg, bg = 0., 0., 0, 0
step_cnt = 0
re_cnt = False
t = Timer()
t.tic()
for step in range(self.start_step, max_iters + 1):
blobs = self.data_layer.forward()
im_data = blobs['data']
im_info = blobs['im_info']
gt_boxes = blobs['gt_boxes']
# forward
result_cls_prob, result_bbox_pred, result_rois = net(im_data, im_info, gt_boxes)
loss = net.loss + net._rpn.loss
train_loss += loss.data.cpu()[0]
rpn_cls_loss += net._rpn.cross_entropy.data.cpu()[0]
rpn_bbox_loss += net._rpn.loss_box.data.cpu()[0]
fast_rcnn_cls_loss += net.cross_entropy.data.cpu()[0]
fast_rcnn_bbox_loss += net.loss_box.data.cpu()[0]
step_cnt += 1
# backward
self._optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm(self._parameters, max_norm=10)
self._optimizer.step()
# clear middle memory
net._delete_cache()
if step % self._disp_interval == 0:
duration = t.toc(average=False)
fps = step_cnt / duration
log_text = 'step %d, image: %s, loss: %.4f, fps: %.2f (%.2fs per batch)' % (
step, blobs['im_name'], train_loss / step_cnt, fps, 1. / fps)
pprint.pprint(log_text)
if self._use_tensorboard:
self._tensor_writer.add_text('Train', log_text, global_step=step)
# Train
avg_rpn_cls_loss = rpn_cls_loss / step_cnt
avg_rpn_bbox_loss = rpn_bbox_loss / step_cnt
avg_fast_rcnn_cls_loss = fast_rcnn_cls_loss / step_cnt
avg_fast_rcnn_bbox_loss = fast_rcnn_bbox_loss / step_cnt
self._tensor_writer.add_scalars('TrainSetLoss', {
'RPN_cls_loss': avg_rpn_cls_loss,
'RPN_bbox_loss': avg_rpn_bbox_loss,
'FastRcnn_cls_loss': avg_fast_rcnn_cls_loss,
'FastRcnn_bbox_loss': avg_fast_rcnn_bbox_loss
}, global_step=step)
self._tensor_writer.add_scalar('Learning_rate', self._lr, global_step=step)
re_cnt = True
if self._use_tensorboard and step % self._valid_interval == 0:
new_gt_boxes = gt_boxes.copy()
new_gt_boxes[:, :4] = new_gt_boxes[:, :4]
image = self.back_to_image(blobs['data']).astype(np.uint8)
im_shape = image.shape
pred_boxes, scores, classes = net.interpret_faster_rcnn_scale(result_cls_prob, result_bbox_pred, result_rois,
im_shape, min_score=0.1)
image = self.draw_photo(image, pred_boxes, scores, classes, new_gt_boxes)
image = torchtrans.ToTensor()(image)
image = vutils.make_grid([image])
self._tensor_writer.add_image('Image', image, step)
if self._use_valid and step % self._valid_interval == 0:
total_valid_loss = 0.0
valid_rpn_cls_loss = 0.0
valid_rpn_bbox_loss = 0.0
valid_fast_rcnn_cls_loss = 0.0
valid_fast_rcnn_bbox_loss = 0.0
valid_step_cnt = 0
start_time = time.time()
valid_length = self._disp_interval
net.eval()
for valid_batch in range(valid_length):
# get one batch
blobs = self.data_layer_val.forward()
im_data = blobs['data']
im_info = blobs['im_info']
gt_boxes = blobs['gt_boxes']
# forward
result_cls_prob, result_bbox_pred, result_rois = net(im_data, im_info, gt_boxes)
valid_loss = net.loss + net._rpn.loss
total_valid_loss += valid_loss.data.cpu()[0]
valid_rpn_cls_loss += net._rpn.cross_entropy.data.cpu()[0]
valid_rpn_bbox_loss += net._rpn.loss_box.data.cpu()[0]
valid_fast_rcnn_cls_loss += net.cross_entropy.data.cpu()[0]
valid_fast_rcnn_bbox_loss += net.loss_box.data.cpu()[0]
valid_step_cnt += 1
net.train()
duration = time.time() - start_time
fps = valid_step_cnt / duration
log_text = 'step %d, valid average loss: %.4f, fps: %.2f (%.2fs per batch)' % (
step, total_valid_loss / valid_step_cnt, fps, 1. / fps)
pprint.pprint(log_text)
if self._use_tensorboard:
self._tensor_writer.add_text('Valid', log_text, global_step=step)
new_gt_boxes = gt_boxes.copy()
new_gt_boxes[:, :4] = new_gt_boxes[:, :4]
image = self.back_to_image(blobs['data']).astype(np.uint8)
im_shape = image.shape
pred_boxes, scores, classes = net.interpret_faster_rcnn_scale(result_cls_prob, result_bbox_pred, result_rois,
im_shape, min_score=0.1)
image = self.draw_photo(image, pred_boxes, scores, classes, new_gt_boxes)
image = torchtrans.ToTensor()(image)
image = vutils.make_grid([image])
self._tensor_writer.add_image('Image_Valid', image, step)
if self._use_tensorboard:
# Valid
avg_rpn_cls_loss_valid = valid_rpn_cls_loss / valid_step_cnt
avg_rpn_bbox_loss_valid = valid_rpn_bbox_loss / valid_step_cnt
avg_fast_rcnn_cls_loss_valid = valid_fast_rcnn_cls_loss / valid_step_cnt
avg_fast_rcnn_bbox_loss_valid = valid_fast_rcnn_bbox_loss / valid_step_cnt
real_total_loss_valid = valid_rpn_cls_loss + valid_rpn_bbox_loss + valid_fast_rcnn_cls_loss + valid_fast_rcnn_bbox_loss
# Train
avg_rpn_cls_loss = rpn_cls_loss / step_cnt
avg_rpn_bbox_loss = rpn_bbox_loss / step_cnt
avg_fast_rcnn_cls_loss = fast_rcnn_cls_loss / step_cnt
avg_fast_rcnn_bbox_loss = fast_rcnn_bbox_loss / step_cnt
real_total_loss = rpn_cls_loss + rpn_bbox_loss + fast_rcnn_cls_loss + fast_rcnn_bbox_loss
self._tensor_writer.add_scalars('Total_Loss', {
'train': train_loss / step_cnt,
'valid': total_valid_loss / valid_step_cnt
}, global_step=step)
self._tensor_writer.add_scalars('Real_loss', {
'train': real_total_loss / step_cnt,
'valid': real_total_loss_valid / valid_step_cnt
}, global_step=step)
self._tensor_writer.add_scalars('RPN_cls_loss', {
'train': avg_rpn_cls_loss,
'valid': avg_rpn_cls_loss_valid
}, global_step=step)
self._tensor_writer.add_scalars('RPN_bbox_loss', {
'train': avg_rpn_bbox_loss,
'valid': avg_rpn_bbox_loss_valid
}, global_step=step)
self._tensor_writer.add_scalars('FastRcnn_cls_loss', {
'train': avg_fast_rcnn_cls_loss,
'valid': avg_fast_rcnn_cls_loss_valid
}, global_step=step)
self._tensor_writer.add_scalars('FastRcnn_bbox_loss', {
'train': avg_fast_rcnn_bbox_loss,
'valid': avg_fast_rcnn_bbox_loss_valid
}, global_step=step)
self._tensor_writer.add_scalars('ValidSetLoss', {
'RPN_cls_loss': avg_rpn_cls_loss_valid,
'RPN_bbox_loss': avg_rpn_bbox_loss_valid,
'FastRcnn_cls_loss': avg_fast_rcnn_cls_loss_valid,
'FastRcnn_bbox_loss': avg_fast_rcnn_bbox_loss_valid
}, global_step=step)
# self._tensor_writer.add_scalars('TrainSetLoss', {
# 'RPN_cls_loss': avg_rpn_cls_loss,
# 'RPN_bbox_loss': avg_rpn_bbox_loss,
# 'FastRcnn_cls_loss': avg_fast_rcnn_cls_loss,
# 'FastRcnn_bbox_loss': avg_fast_rcnn_bbox_loss
# }, global_step=step)
# self._tensor_writer.add_scalar('Learning_rate', self._lr, global_step=step)
if (step % self._save_point_interval == 0) and step > 0:
save_name, _ = self.save_check_point(step)
print('save model: {}'.format(save_name))
if step in self._lr_decay_steps:
self._lr *= self._lr_decay
self._optimizer = self._train_optimizer()
if re_cnt:
tp, tf, fg, bg = 0., 0., 0, 0
train_loss = 0
rpn_cls_loss = 0
rpn_bbox_loss = 0
fast_rcnn_cls_loss = 0
fast_rcnn_bbox_loss = 0
step_cnt = 0
t.tic()
re_cnt = False
if self._use_tensorboard:
self._tensor_writer.export_scalars_to_json(os.path.join(self.tbdir, 'all_scalars.json'))
def draw_photo(self, image, dets, scores, classes, gt_boxes):
# im2show = np.copy(image)
im2show = image
# color_b = (0, 191, 255)
for i, det in enumerate(dets):
det = tuple(int(x) for x in det)
r = min(0+i*10, 255)
r_i = i / 5
g = min(150+r_i*10, 255)
g_i = r_i / 5
b = min(200+g_i, 255)
color_b_c = (r,g,b)
cv2.rectangle(im2show, det[0:2], det[2:4], color_b_c, 2)
cv2.putText(im2show, '%s: %.3f' % (classes[i], scores[i]), (det[0], det[1] + 15), cv2.FONT_HERSHEY_PLAIN,
1.0, (0, 0, 255), thickness=1)
for i, det in enumerate(gt_boxes):
det = tuple(int(x) for x in det)
gt_class = self.net._classes[det[-1]]
cv2.rectangle(im2show, det[0:2], det[2:4], (255, 0, 0), 2)
cv2.putText(im2show, '%s' % (gt_class), (det[0], det[1] + 15), cv2.FONT_HERSHEY_PLAIN,
1.0, (0, 0, 255), thickness=1)
return im2show
def back_to_image(self, img):
image = img[0] + cfg.PIXEL_MEANS
image = image[:,:,::-1].copy(order='C')
return image
def save_check_point(self, step):
net = self.net
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
# store the model snapshot
filename = os.path.join(self.model_dir, 'fasterRcnn_iter_{}.h5'.format(step))
h5f = h5py.File(filename, mode='w')
for k, v in net.state_dict().items():
h5f.create_dataset(k, data=v.cpu().numpy())
# store data information
nfilename = os.path.join(self.model_dir, 'fasterRcnn_iter_{}.pkl'.format(step))
# current state of numpy random
st0 = np.random.get_state()
# current position in the database
cur = self.data_layer._cur
# current shuffled indexes of the database
perm = self.data_layer._perm
# current position in the validation database
cur_val = self.data_layer_val._cur
# current shuffled indexes of the validation database
perm_val = self.data_layer_val._perm
# current learning rate
lr = self._lr
# Dump the meta info
with open(nfilename, 'wb') as fid:
pickle.dump(st0, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(cur, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(perm, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(cur_val, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(perm_val, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(lr, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(step, fid, pickle.HIGHEST_PROTOCOL)
return filename, nfilename
def load_check_point(self, step):
net = self.net
filename = os.path.join(self.model_dir, 'fasterRcnn_iter_{}.h5'.format(step))
nfilename = os.path.join(self.model_dir, 'fasterRcnn_iter_{}.pkl'.format(step))
print('Restoring model snapshots from {:s}'.format(filename))
if not os.path.exists(filename):
print('The checkPoint is not Right')
sys.exit(1)
# load model
h5f = h5py.File(filename, mode='r')
for k, v in net.state_dict().items():
param = torch.from_numpy(np.asarray(h5f[k]))
v.copy_(param)
# load data information
with open(nfilename, 'rb') as fid:
st0 = pickle.load(fid)
cur = pickle.load(fid)
perm = pickle.load(fid)
cur_val = pickle.load(fid)
perm_val = pickle.load(fid)
lr = pickle.load(fid)
last_snapshot_iter = pickle.load(fid)
np.random.set_state(st0)
self.data_layer._cur = cur
self.data_layer._perm = perm
self.data_layer_val._cur = cur_val
self.data_layer_val._perm = perm_val
self._lr = lr
if last_snapshot_iter == step:
print('Restore over ')
else:
print('The checkPoint is not Right')
raise ValueError
return last_snapshot_iter
def weights_normal_init(self, model, dev=0.01):
import math
def _gaussian_init(m, dev):
m.weight.data.normal_(0.0, dev)
if hasattr(m.bias, 'data'):
m.bias.data.zero_()
def _xaiver_init(m):
nn.init.xavier_normal(m.weight.data)
if hasattr(m.bias, 'data'):
m.bias.data.zero_()
def _hekaiming_init(m):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if hasattr(m.bias, 'data'):
m.bias.data.zero_()
def _resnet_init(model, dev):
if isinstance(model, list):
for m in model:
self.weights_normal_init(m, dev)
else:
for m in model.modules():
if isinstance(m, nn.Conv2d):
_hekaiming_init(m)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
_gaussian_init(m, dev)
def _vgg_init(model, dev):
if isinstance(model, list):
for m in model:
self.weights_normal_init(m, dev)
else:
for m in model.modules():
if isinstance(m, nn.Conv2d):
_gaussian_init(m, dev)
elif isinstance(m, nn.Linear):
_gaussian_init(m, dev)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
if cfg.TRAIN.INIT_WAY == 'resnet':
_vgg_init(model, dev)
elif cfg.TRAIN.INIT_WAY == 'vgg':
_vgg_init(model, dev)
else:
raise NotImplementedError
def prepare_construct(self, resume_iter):
# init network
self.net.init_fasterRCNN()
# Set the random seed
torch.manual_seed(cfg.RNG_SEED)
np.random.seed(cfg.RNG_SEED)
# Set learning rate and momentum
self._lr = cfg.TRAIN.LEARNING_RATE
self._lr_decay = 0.1
self._momentum = cfg.TRAIN.MOMENTUM
self._weight_decay = cfg.TRAIN.WEIGHT_DECAY
# load model
if resume_iter:
self.start_step = resume_iter + 1
self.load_check_point(resume_iter)
else:
self.start_step = 0
self.weights_normal_init(self.net, dev=0.01)
# refer to caffe faster RCNN
self.net.init_special_bbox_fc(dev=0.001)
if self.pretrained_model != None:
self.net._rpn._network._load_pre_trained_model(self.pretrained_model)
print('Load parameters from Path: {}'.format(self.pretrained_model))
else:
pass
# model
self.net.train()
if cfg.CUDA_IF:
self.net.cuda()
# resnet fixed BN should be eval
if cfg.TRAIN.INIT_WAY == 'resnet':
self.net._rpn._network._bn_eval()
# set optimizer
self._parameters = [params for params in self.net.parameters() if params.requires_grad==True]
self._optimizer = self._train_optimizer()
# tensorboard
if self._use_tensorboard:
import tensorboardX as tbx
self._tensor_writer = tbx.SummaryWriter(log_dir=self.tbdir)
def _train_optimizer(self):
parameters = self._train_parameter()
optimizer = torch.optim.SGD(parameters, momentum=self._momentum)
return optimizer
def _train_parameter(self):
params = []
for key, value in self.net.named_parameters():
if value.requires_grad == True:
if 'bias' in key:
params += [{'params': [value],
'lr': self._lr * (cfg.TRAIN.DOUBLE_BIAS+1),
'weight_decay': 0}]
else:
params += [{'params': [value],
'lr': self._lr ,
'weight_decay': self._weight_decay}]
return params
def get_training_roidb(imdb):
"""Returns a roidb (Region of Interest database) for use in training."""
if cfg.TRAIN.USE_FLIPPED:
print('Appending horizontally-flipped training examples...')
imdb.append_flipped_images()
print('done')
print('Preparing training data...')
rdl_roidb.prepare_roidb(imdb)
print('done')
return imdb.roidb
def filter_roidb(roidb):
"""Remove roidb entries that have no usable RoIs."""
def is_valid(entry):
# Valid images have:
# (1) At least one foreground RoI OR
# (2) At least one background RoI
overlaps = entry['max_overlaps']
# find boxes with sufficient overlap
fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
(overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# image is only valid if such boxes exist
valid = len(fg_inds) > 0 or len(bg_inds) > 0
return valid
num = len(roidb)
filtered_roidb = [entry for entry in roidb if is_valid(entry)]
num_after = len(filtered_roidb)
print('Filtered {} roidb entries: {} -> {}'.format(num - num_after,
num, num_after))
return filtered_roidb
def train_net(network, imdb, roidb, valroidb, model_dir,
learn_dict, resume,
pretrained_model=None,
max_iters=40000):
"""Train a Faster R-CNN network."""
roidb = filter_roidb(roidb)
valroidb = filter_roidb(valroidb)
sw = SolverWrapper(network, imdb, roidb, valroidb, model_dir,
pretrained_model=pretrained_model)
sw.set_learn_strategy(learn_dict)
sw.train_model(resume, max_iters)
|
scale.app/scripts/feature_extraction/tests/test_parse_coverity.py | f4rsh/SCALe | 239 | 11107300 | #!/usr/bin/env python
# Copyright (c) 2007-2018 Carnegie Mellon University. All Rights Reserved.
# See COPYRIGHT file for details.
import unittest
import StringIO
import feature_extraction as fe
from feature_extraction.tests.coverity_input import CoverityJsonV2, CoverityJsonV2Event, CoverityJsonV2Issue
from feature_extraction.features import FeatureName
class TestCoverityJsonParser(unittest.TestCase):
def test_coverity_json_parser_one_diag(self):
covdata = CoverityJsonV2()
issue = CoverityJsonV2Issue()
event = CoverityJsonV2Event()
issue.add_event(event)
covdata.add_issue(issue)
diagnostics = fe.extractors[fe.Tool.Coverity][
"json_v2"](StringIO.StringIO(str(covdata)))
self.assertEqual(len(diagnostics), 1)
actual_diag = diagnostics[0]
features = actual_diag.feature_dict()
self.assertEqual(
features[FeatureName.Checker].value, issue.data["checkerName"])
self.assertEqual(
features[FeatureName.FilePath].value, event.data["filePathname"])
self.assertEqual(
features[FeatureName.LineStart].value, event.data["lineNumber"])
self.assertEqual(
features[FeatureName.Message].value, event.data["eventDescription"])
def test_coverity_json_parser_multi_diag(self):
return
covdata = CoverityJsonV2()
for i in range(10):
issue = CoverityJsonV2Issue()
issue.data["checkerName"] = "SomeChecker" + str(i)
event = CoverityJsonV2Event()
event.data["eventDescription"] = "Message" + str(i)
event.data["filePathname"] = "Path" + str(i)
event.data["lineNumber"] = i
event.data["main"] = True
issue.add_event(event)
covdata.add_issue(issue)
diagnostics = fe.extractors[fe.Tool.Coverity][
"json_v2"](StringIO.StringIO(str(covdata)))
self.assertEqual(len(diagnostics), 10)
counter = 0
for diag in diagnostics:
features = diag.feature_dict()
self.assertEqual(
features[FeatureName.Checker].value, ("SomeChecker" + str(counter)))
self.assertEqual(
features[FeatureName.FilePath].value, ("Path" + str(counter)))
self.assertEqual(features[FeatureName.LineStart].value, counter)
self.assertEqual(
features[FeatureName.Message].value, ("Message" + str(counter)))
counter += 1
if __name__ == '__main__':
unittest.main()
|
dp/62.py | lidongdongbuaa/leetcode | 1,232 | 11107301 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: <NAME>
# 62. Unique Path
# 解题思路:
# 先解决横向竖向第一列,全部设成1,用了个小trick把所有的DP矩阵都设成了1
# 状态:计算从前到现在节点为止所有的路劲
# 状态转移方程:f[x][y] = f[x-1][y] + f[x][y-1]
# 最后返回右下角的数就行。
class Solution(object):
def uniquePaths(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
#edge:
if not m or not n:
return 0
dp = [[1 for _ in range(n)] for _ in range(m)]
for i in xrange(1, m):
for j in xrange(1, n):
dp[i][j] = dp[i-1][j] + dp[i][j-1]
return dp[-1][-1]
|
eend/bin/rttm_stats.py | lekynam2000/EEND | 250 | 11107365 | <reponame>lekynam2000/EEND<filename>eend/bin/rttm_stats.py
#!/usr/bin/env python3
# Copyright 2019 Hitachi, Ltd. (author: <NAME>)
# Licensed under the MIT license.
import numpy as np
import argparse
def load_rttm(rttm_file):
""" load rttm file as numpy structured array """
segments = []
for line in open(rttm_file):
toks = line.strip().split()
# number of columns is 9 (RT-05S) or 10 (RT-09S)
(stype, fileid, ch, start, duration,
_, _, speaker, _) = toks[:9]
if stype != "SPEAKER":
continue
start = float(start)
end = start + float(duration)
segments.append((fileid, speaker, start, end))
return np.array(segments, dtype=[
('recid', 'object'), ('speaker', 'object'), ('st', 'f'), ('et', 'f')])
def time2frame(t, rate, shift):
""" time in second (float) to frame index (int) """
return np.rint(t * rate / shift).astype(int)
def get_frame_labels(
rttm, start=0, end=None, rate=16000, shift=256):
""" Get frame labels from RTTM file
Args:
start: start time in seconds
end: end time in seconds
rate: sampling rate
shift: number of frame shift samples
n_speakers: number of speakers
if None, determined from rttm file
Returns:
labels.T: frame labels
(n_frames, n_speaker)-shaped numpy.int32 array
speakers: list of speaker ids
"""
# sorted uniq speaker ids
speakers = np.unique(rttm['speaker']).tolist()
# start and end frames
rec_sf = time2frame(start, rate, shift)
rec_ef = time2frame(end if end else rttm['et'].max(), rate, shift)
labels = np.zeros((rec_ef - rec_sf, len(speakers)), dtype=np.int32)
for seg in rttm:
seg_sp = speakers.index(seg['speaker'])
seg_sf = time2frame(seg['st'], rate, shift)
seg_ef = time2frame(seg['et'], rate, shift)
# relative frame index from 'rec_sf'
sf = ef = None
if rec_sf <= seg_sf and seg_sf < rec_ef:
sf = seg_sf - rec_sf
if rec_sf < seg_ef and seg_ef <= rec_ef:
ef = seg_ef - rec_sf
if sf is not None or ef is not None:
labels[sf:ef, seg_sp] = 1
return labels.T, speakers
parser = argparse.ArgumentParser()
parser.add_argument('rttm')
args = parser.parse_args()
rttm = load_rttm(args.rttm)
def _min_max_ave(a):
return [f(a) for f in [np.min, np.max, np.mean]]
vafs = []
uds = []
ids = []
reclens = []
pres = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
den = 0
recordings = np.unique(rttm['recid'])
for recid in recordings:
rec = rttm[rttm['recid'] == recid]
speakers = np.unique(rec['speaker'])
for speaker in speakers:
spk = rec[rec['speaker'] == speaker]
spk.sort()
durs = spk['et'] - spk['st']
stats_dur = _min_max_ave(durs)
uds.append(np.mean(durs))
if len(durs) > 1:
intervals = spk['st'][1:] - spk['et'][:-1]
stats_int = _min_max_ave(intervals)
ids.append(np.mean(intervals))
vafs.append(np.sum(durs)/(np.sum(durs) + np.sum(intervals)))
labels, _ = get_frame_labels(rec)
n_presense = np.sum(labels, axis=0)
for n in np.unique(n_presense):
pres[n] += np.sum(n_presense == n)
den += len(n_presense)
#for s in speakers: print(s)
reclens.append(rec['et'].max() - rec['st'].min())
print(list(range(2, len(pres))))
total_speaker = np.sum([n * pres[n] for n in range(len(pres))])
total_overlap = np.sum([n * pres[n] for n in range(2, len(pres))])
print(total_speaker, total_overlap, total_overlap/total_speaker)
print("single-speaker overlap", pres[3]/np.sum(pres[2:]))
print(len(recordings), np.mean(reclens), np.mean(vafs), np.mean(uds), np.mean(ids), "overlap ratio:", np.sum(pres[2:])/np.sum(pres[1:]), "overlaps", ' '.join(str(x) for x in pres/den))
|
apex/contrib/transducer/transducer.py | Muflhi01/apex | 6,523 | 11107392 | <filename>apex/contrib/transducer/transducer.py
import torch
import transducer_loss_cuda
import transducer_joint_cuda
class TransducerJoint(torch.nn.Module):
"""Transducer joint
Detail of this loss function can be found in: Sequence Transduction with Recurrent Neural
Networks
Arguments:
pack_output (bool, optional): whether to pack the output in a compact form with don't-care
data being removed. (default: False)
relu (bool, optional): apply ReLU to the output of the joint operation. Requires opt=1
(default: False)
dropout (bool, optional): apply dropout to the output of the joint operation. Requires opt=1
(default: False)
opt (int, optional): pick the optimization level in [0, 1]. opt=1 picks a tiled algorithm.
(default: 1)
fwd_tile_size (int, optional): tile size used in forward operation. This argument will be
ignored if opt != 1. (default: 4)
dropout_prob (float, optional): dropout probability. (default: 0.0)
probe_mask (bool, optional): a flag used to probe the mask generated by ReLU and/or dropout
operation. When this argument is set to True, the mask can be accessed through
self.mask_probe. (default: false)
"""
def __init__(self, pack_output=False, relu=False, dropout=False, opt=1, fwd_tile_size=4,
dropout_prob=0, probe_mask=False):
super(TransducerJoint, self).__init__()
self.pack_output = pack_output
self.relu = relu
self.dropout = dropout
self.dropout_prob = dropout_prob
self.opt = opt
self.fwd_tile_size = fwd_tile_size
self.dummy_batch_offset = torch.empty(0)
masked = self.relu or self.dropout
self.mask_probe = [] if masked and probe_mask else None
if masked and opt != 1:
raise NotImplementedError("ReLU and dropout fusion is only supported with opt=1")
def forward(self, f, g, f_len, g_len, batch_offset=None, packed_batch=0):
"""Forward operation of transducer joint
Arguments:
f (tensor): transcription vector from encode block of shape (B, T, H).
g (tensor): prediction vector form predict block of shape (B, U, H).
f_len (tensor): length of transcription vector for each batch.
g_len (tensor): length of prediction vector minus 1 for each batch.
batch_offset (tensor, optional): tensor containing the offset of each batch
in the results. For example, batch offset can be obtained from:
batch_offset = torch.cumsum(f_len*g_len, dim=0)
This argument is required if pack_output == True, and is ignored if
pack_output == False. (default: None)
packed_batch (int, optional): the batch size after packing. This argument is
ignored if pack_output == False. (default: 0)
"""
my_batch_offset = batch_offset if self.pack_output else self.dummy_batch_offset
if self.pack_output and (batch_offset is None or packed_batch == 0):
raise Exception("Please specify batch_offset and packed_batch when packing is enabled")
dropout = self.dropout and self.training # only dropout for training
return TransducerJointFunc.apply(f, g, f_len, g_len, self.pack_output, self.relu, dropout,
my_batch_offset, packed_batch, self.opt,
self.fwd_tile_size, self.dropout_prob, self.mask_probe)
class TransducerLoss(torch.nn.Module):
"""Transducer loss
Detail of this loss function can be found in: Sequence Transduction with Recurrent Neural
Networks
Arguments:
fuse_softmax_backward (bool, optional) whether to fuse the backward of transducer loss with
softmax. (default: True)
opt (int, optional): pick the optimization level in [0, 1]. opt=1 picks a more optimized
algorithm. In some cases, opt=1 might fall back to opt=0. (default: 1)
packed_input (bool, optional): whether to pack the output in a compact form with don't-care
data being removed. (default: False)
"""
def __init__(self, fuse_softmax_backward=True, opt=1, packed_input=False):
super(TransducerLoss, self).__init__()
self.fuse_softmax_backward = fuse_softmax_backward
self.opt = opt
self.packed_input = packed_input
self.dummy_batch_offset = torch.empty(0)
def forward(self, x, label, f_len, y_len, blank_idx, batch_offset=None, max_f_len=None,
debug_list=None):
"""Forward operation of transducer joint
Arguments:
x (tensor): input tensor to the loss function with a shape of (B, T, U, H).
label (tensor): labels for the input data.
f_len (tensor): lengths of the inputs in the time dimension for each batch.
y_len (tensor): lengths of the labels for each batch.
blank_idx (int): index for the null symbol.
batch_offset (tensor, optional): tensor containing the offset of each batch
in the input. For example, batch offset can be obtained from:
batch_offset = torch.cumsum(f_len*(y_len+1), dim=0)
This argument is required if packed_input == True, and is ignored if
packed_input == False. (default: None)
max_f_len (int, optional): maximum length of the input in the time dimension.
For example, it can be obtained as
max_f_len = max(f_len)
This argument is required if packed_input == True, and is ignored if
packed_input == False. (default: None)
(default: None)
debug_list (list, optional): when an empty list is supplied, Alpha and Beta generated
in the forward operation will be attached to this list for debug purpose.
(default: None)
"""
if self.packed_input:
if batch_offset is None or max_f_len is None:
raise Exception("Please specify batch_offset and max_f_len when packing is \
enabled")
my_batch_offset = batch_offset
my_max_f_len = max_f_len
else:
my_batch_offset = self.dummy_batch_offset
my_max_f_len = x.size(1)
return TransducerLossFunc.apply(x, label, f_len, y_len, my_batch_offset, my_max_f_len,
blank_idx, self.fuse_softmax_backward, debug_list,
self.opt, self.packed_input)
class TransducerLossFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, x, label, f_len, y_len, batch_offset, max_f_len, blank_idx,
fuse_softmax_backward, debug_list, opt, packed_input):
if fuse_softmax_backward == False:
with torch.enable_grad():
x = torch.nn.functional.log_softmax(x, dim=-1)
else:
x = torch.nn.functional.log_softmax(x, dim=-1)
alpha, beta, loss = transducer_loss_cuda.forward( x, label, f_len, y_len, batch_offset,
max_f_len, blank_idx, opt, packed_input)
if debug_list == []:
debug_list += [alpha, beta]
ctx.save_for_backward(x, alpha, beta, f_len, y_len, label, batch_offset)
ctx.blank_idx = blank_idx
ctx.fuse_softmax_backward = fuse_softmax_backward
ctx.opt = opt
ctx.packed_input = packed_input
ctx.max_f_len = max_f_len
return loss
@staticmethod
def backward(ctx, loss_grad):
x, alpha, beta, f_len, y_len, label, batch_offset = ctx.saved_tensors
x_grad = transducer_loss_cuda.backward( x, loss_grad, alpha, beta, f_len, y_len, label,
batch_offset, ctx.max_f_len, ctx.blank_idx, ctx.opt,
ctx.fuse_softmax_backward, ctx.packed_input)
if ctx.fuse_softmax_backward == False:
x_grad = x.backward(x_grad)
return x_grad, None, None, None, None, None, None, None, None, None, None
class TransducerJointFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, f, g, f_len, g_len, pack_output, relu, dropout, batch_offset, packed_batch,
opt, fwd_tile_size, dropout_prob, mask_probe):
h = transducer_joint_cuda.forward(f, g, f_len, g_len, batch_offset, packed_batch, opt,
pack_output, relu, dropout, dropout_prob, fwd_tile_size)
masked = relu or dropout
if masked:
ctx.save_for_backward(h[1], f_len, g_len, batch_offset)
if mask_probe is not None:
mask_probe.append(h[1])
else:
ctx.save_for_backward(f_len, g_len, batch_offset)
ctx.pack_output = pack_output
ctx.masked = relu or dropout
ctx.max_f_len = f.size(1)
ctx.max_g_len = g.size(1)
ctx.scale = 1 / (1-dropout_prob) if dropout and dropout_prob != 1 else 1
return h[0]
@staticmethod
def backward(ctx, loss_grad):
if ctx.masked:
mask, f_len, g_len, batch_offset = ctx.saved_tensors
inp = [loss_grad, mask]
else:
f_len, g_len, batch_offset = ctx.saved_tensors
inp = [loss_grad]
f_grad, g_grad = transducer_joint_cuda.backward( inp, f_len, g_len, batch_offset,
ctx.max_f_len, ctx.max_g_len,
ctx.pack_output, ctx.scale)
return f_grad, g_grad, None, None, None, None, None, None, None, None, None, None, None, \
None, None, None
|
tools/qftasm/qftasm_pp.py | gkgoat1/elvm | 803 | 11107402 | <reponame>gkgoat1/elvm
import sys
import re
filepath = sys.argv[1]
with open(filepath, "rt") as f:
text = f.read()
d = {}
for i_line, line in enumerate(text.split("\n")):
m = re.search(r'pc == ([0-9]+):', line)
if m:
d["pc" + m.group(1)] = i_line
text = text.format(**d)
text = text[:-1] # Remove the newline at the end
print(text, end="")
|
minihack/tiles/glyph_mapper.py | samvelyan/minihack-1 | 217 | 11107415 | # Copyright (c) Facebook, Inc. and its affiliates.
from minihack.tiles import glyph2tile, MAXOTHTILE
from nle.nethack import MAX_GLYPH
import numpy as np
import pkg_resources
import pickle
import os
class GlyphMapper:
"""This class is used to map glyphs to rgb pixels."""
def __init__(self):
self.tiles = self.load_tiles()
def load_tiles(self):
"""This function expects that tile.npy already exists.
If it doesn't, call make_tiles.py in win/
"""
tile_rgb_path = os.path.join(
pkg_resources.resource_filename("minihack", "tiles"),
"tiles.pkl",
)
return pickle.load(open(tile_rgb_path, "rb"))
def glyph_id_to_rgb(self, glyph_id):
tile_id = glyph2tile[glyph_id]
assert 0 <= tile_id <= MAXOTHTILE
return self.tiles[tile_id]
def _glyph_to_rgb(self, glyphs):
# Expects glhyphs as two-dimensional numpy ndarray
cols = None
col = None
for i in range(glyphs.shape[1]):
for j in range(glyphs.shape[0]):
rgb = self.glyph_id_to_rgb(glyphs[j, i])
if col is None:
col = rgb
else:
col = np.concatenate((col, rgb))
if cols is None:
cols = col
else:
cols = np.concatenate((cols, col), axis=1)
col = None
return cols
def to_rgb(self, glyphs):
return self._glyph_to_rgb(glyphs)
|
test/integration/dpr_w100.py | tuberj/ir_datasets | 149 | 11107457 | <reponame>tuberj/ir_datasets
import re
import unittest
from ir_datasets.datasets.dpr_w100 import DprW100Doc, DprW100Query
from ir_datasets.formats import TrecQrel, TrecQuery
from .base import DatasetIntegrationTest
class TestDprW100(DatasetIntegrationTest):
def test_docs(self):
self._test_docs('dpr-w100', count=21015324, items={
0: DprW100Doc('1', re.compile('^"<NAME> \\( or ; ""Ahärôn""\\) is a prophet, high priest, and the brother of Moses in the Abrahamic.{412} brother\'s spokesman \\(""prophet""\\) to the Pharaoh\\. Part of the Law \\(Torah\\) that Moses received from"$', flags=48), 'Aaron'),
9: DprW100Doc('10', re.compile('^"families some time in Israel\'s past\\. Others argue that the story simply shows what can happen if th.{397}ho affirmed Moses\' uniqueness as the one with whom the spoke face to face\\. Miriam was punished with"$', flags=48), 'Aaron'),
21015323: DprW100Doc('21015324', re.compile('^"committee was established before the building was opened\\. It is the District Nursing base for North.{425}ontains 81 extra care apartments two GP surgeries, a public library, a community café, an optician,"$', flags=48), '"Limelight centre"'),
})
def test_queries(self):
self._test_queries('dpr-w100/natural-questions/train', count=58880, items={
0: DprW100Query('0', 'big little lies season 2 how many episodes', ('seven',)),
9: DprW100Query('9', 'who is in charge of enforcing the pendleton act of 1883', ('United States Civil Service Commission',)),
58879: DprW100Query('58879', 'who plays the army guy in pitch perfect 3', ('<NAME>', '<NAME>')),
})
self._test_queries('dpr-w100/natural-questions/dev', count=6515, items={
0: DprW100Query('0', 'who sings does he love me with reba', ('<NAME>',)),
9: DprW100Query('9', 'what is the name of wonder womans mother', ('<NAME>',)),
6514: DprW100Query('6514', 'girl from the shut up and dance video', ('<NAME>',)),
})
self._test_queries('dpr-w100/trivia-qa/train', count=78785, items={
0: DprW100Query('0', 'Who was President when the first Peanuts cartoon was published?', ('Presidency of <NAME>', '<NAME>', '<NAME>', "<NAME>", '<NAME>', '<NAME>', '<NAME>', '<NAME>', 'President <NAME>', 'Truman administration', 'Presidency of <NAME>', 'Mr. Citizen', 'HST (president)', '<NAME>', '<NAME>', '<NAME>', 'S truman', '<NAME>', 'President Truman', '33rd President of the United States', 'Truman Administration', '<NAME>', '<NAME>', 'H<NAME>uman', '<NAME>')),
9: DprW100Query('9', 'Which was the first European country to abolish capital punishment?', ('Norvège', 'Mainland Norway', 'Norway', 'Norvege', 'Noregur', 'NORWAY', 'Norwegian state', 'Etymology of Norway', 'Noruega', 'Norwegen', 'ISO 3166-1:NO', 'Noreg', 'Republic of Norway', 'Norwegian kingdom', 'Kongeriket Noreg', 'Name of Norway', 'Kongeriket Norge', 'Noorwegen', 'Kingdom of Norway', 'Sport in Norway', 'Norwegia', 'Royal Kingdom of Norway')),
78784: DprW100Query('78784', 'According to the Bart Simpsons TV ad, Nobody better lay a finger on my what??', ('Butterfingers Snackerz', 'Butterfinger (ice cream)', 'Butterfinger Crisp', 'Nestlé Butterfinger', 'Butterfinger Snackerz', 'Butterfinger Ice Cream Bars', "Butterfinger BB's", 'Butterfinger', 'The Butterfinger Group')),
})
self._test_queries('dpr-w100/trivia-qa/dev', count=8837, items={
0: DprW100Query('0', 'The VS-300 was a type of what?', ('🚁', 'Helicopters', 'Civilian helicopter', 'Pescara (helicopter)', 'Cargo helicopter', 'Copter', 'Helecopter', 'List of deadliest helicopter crashes', 'Helichopper', 'Helocopter', 'Cargo Helicopter', 'Helicopter', 'Helicoptor', 'Anatomy of a helicopter')),
9: DprW100Query('9', 'Who wrote The Turn Of The Screw in the 19th century and The Ambassadors in the 20th?', ('The Finer Grain', '<NAME>', '<NAME>', '<NAME>')),
8836: DprW100Query('8836', 'Name the artist and the title of this 1978 classic that remains popular today: We were at the beach Everybody had matching towels Somebody went under a dock And there they saw a rock It wasnt a rock', ('Rock Lobster by the B-52s',)),
})
def test_qrels(self):
self._test_qrels('dpr-w100/natural-questions/train', count=8856662, items={
0: TrecQrel('0', '18768923', 2, '0'),
9: TrecQrel('0', '928112', 0, '0'),
8856661: TrecQrel('58879', '14546521', -1, '0'),
})
self._test_qrels('dpr-w100/natural-questions/dev', count=979893, items={
0: TrecQrel('0', '11828866', 2, '0'),
9: TrecQrel('0', '9446572', 0, '0'),
979892: TrecQrel('6514', '11133390', -1, '0'),
})
self._test_qrels('dpr-w100/trivia-qa/train', count=7878500, items={
0: TrecQrel('0', '525858', 0, '0'),
9: TrecQrel('0', '16254256', 0, '0'),
7878499: TrecQrel('78784', '5674041', 0, '0'),
})
self._test_qrels('dpr-w100/trivia-qa/dev', count=883700, items={
0: TrecQrel('0', '7108855', 1, '0'),
9: TrecQrel('0', '10764863', 0, '0'),
883699: TrecQrel('8836', '9491145', 0, '0'),
})
if __name__ == '__main__':
unittest.main()
|
questions/55945085/pyslam_qt_test.py | sesu089/stackoverflow | 302 | 11107515 | import pyslam
from PyQt5 import QtCore, QtGui, QtWidgets
class Thread(QtCore.QThread):
def run(self):
obj = pyslam.Slam()
obj.setParams(10)
print("before")
obj.start()
print("after")
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
w = QtWidgets.QWidget()
thread = Thread()
QtCore.QTimer.singleShot(1000, thread.start)
w.show()
sys.exit(app.exec_())
|
apps/autotest/models/monkey.py | rainydaygit/testtcloudserver | 349 | 11107542 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# monkey 任务
from library.api.db import EntityModel, db
class Monkey(EntityModel):
ACTIVE = 0
DISABLE = 1
TEST_TYPE = {'monkey': 1, 'performance': 2}
app_name = db.Column(db.String(100)) # app 名称,例如:萌推
package_name = db.Column(db.String(100)) # 要测试的包名
app_version = db.Column(db.String(100)) # app 版本
app_id = db.Column(db.Integer) # app package id
download_app_status = db.Column(db.Integer) # app 下载状态
begin_time = db.Column(db.TIMESTAMP) # 开始时间
end_time = db.Column(db.TIMESTAMP) # 结束时间
jenkins_url = db.Column(db.String(100)) # jenkins 构建任务的 url
report_url = db.Column(db.String(100)) # 需要请求进行报告的 url
user_id = db.Column(db.Integer) # 触发用户 ID
mobile_ids = db.Column(db.String(100)) # 设备的 IDs
parameters = db.Column(db.String(1000)) # 请求的参数
process = db.Column(db.Integer) # 完成度 %100
status = db.Column(db.Integer, default=ACTIVE) # 状态 可用 0,不可用 1
type_id = db.Column(db.Integer) # monkey 类型 ID
run_time = db.Column(db.Integer) # 运行时间
actual_run_time = db.Column(db.Integer) # 实际运行时间
app_install_required = db.Column(db.Integer) # 是否需要安装 app
system_device = db.Column(db.Integer) # 是否是 系统设备
login_required = db.Column(db.Integer) # 是否需要登陆
login_username = db.Column(db.String(100)) # 登陆 用户名
login_password = db.Column(db.String(100)) # 登陆 密码
cancel_status = db.Column(db.Integer, default=DISABLE) # 是否cancel 此次monkey,默认 1,0为确认
test_type = db.Column(db.Integer) # 测试类型 monkey:1, performance:2
# monkey log
class MonkeyErrorLog(EntityModel):
ACTIVE = 0
DISABLE = 1
monkey_id = db.Column(db.Integer) # monkey id
task_id = db.Column(db.Integer) # monkey device id
error_type = db.Column(db.String(100)) # error log 类型
error_message = db.Column(db.TEXT) # error message
error_count = db.Column(db.Integer) # error show count in test
# monkey log
class MonkeyReport(EntityModel):
ACTIVE = 0
DISABLE = 1
monkey_id = db.Column(db.Integer) # monkey id
task_id = db.Column(db.Integer) # monkey device id
report_type = db.Column(db.Integer, default=1) # report 类型,1 bug_report
report_url = db.Column(db.String(1000)) # report url on oss
# monkey packages
class MonkeyPackage(EntityModel):
ACTIVE = 0
DISABLE = 1
PACKAGE_TYPE = {'monkey': 1, 'performance': 2}
name = db.Column(db.String(100)) # package name
package_name = db.Column(db.String(100)) # android package name
oss_url = db.Column(db.String(200)) # package oss url
picture = db.Column(db.Text) # package picture
version = db.Column(db.String(200)) # picture url
default_activity = db.Column(db.String(100)) # default activity
user_id = db.Column(db.Integer) # upload user id
status = db.Column(db.Integer, default=ACTIVE) # package status
size = db.Column(db.String(200)) # package size
test_type = db.Column(db.Integer) # test type : monkey=1,performance=2
# monkey device using
class MonkeyDeviceUsing(EntityModel):
ACTIVE = 0
DISABLE = 1
serial = db.Column(db.String(100)) # serial number
status = db.Column(db.Integer, default=ACTIVE) # status
using = db.Column(db.Integer, default=DISABLE) # not using
# monkey device status
class MonkeyDeviceStatus(EntityModel):
ACTIVE = 0
DISABLE = 1
monkey_id = db.Column(db.Integer) # monkey id
mobile_id = db.Column(db.Integer) # mobile id
mobile_serial = db.Column(db.String(100)) # 序列号
mobile_model = db.Column(db.String(100)) # mobile_model
mobile_version = db.Column(db.String(100)) # mobile version
process = db.Column(db.Integer) # 进程
activity_count = db.Column(db.Integer) # 测试的当前包的 Activity 数量
activity_tested_count = db.Column(db.Integer) # 当前包的 已经测试的 Activity 数量
activity_all = db.Column(db.String(10000)) # 测试的当前 app 的 activity
activity_tested = db.Column(db.String(10000)) # 测试的当前包的 已经测试过得 activity
anr_count = db.Column(db.Integer) # anr 数量
crash_count = db.Column(db.Integer) # crash 数量
crash_rate = db.Column(db.Integer) # crash 比率
exception_count = db.Column(db.Integer) # exception 数量
exception_run_time = db.Column(db.Integer) # exception 运行时间
# 状态 0:未开始, 1:成功, 2:失败
device_connect_status = db.Column(db.Integer) # 设备连接状态
screen_lock_status = db.Column(db.Integer) # 设备锁屏状态
setup_install_app_status = db.Column(db.Integer) # 安装 app
start_app_status = db.Column(db.Integer) # 启动 app
setup_uninstall_app_status = db.Column(db.Integer) # 卸载 app
login_app_status = db.Column(db.Integer) # 登录 app
running_status = db.Column(db.Integer) # 运行状态
teardown_uninstall_app_status = db.Column(db.Integer) # 最后卸载 app
current_stage = db.Column(db.Integer, default=0) # 当前执行的具体步骤
begin_time = db.Column(db.TIMESTAMP) # 开始时间
end_time = db.Column(db.TIMESTAMP) # 结束时间
run_time = db.Column(db.Integer) # 运行时间
running_error_reason = db.Column(db.String(1000)) # 运行失败的具体原因
mobile_resolution = db.Column(db.String(100)) # device 分辨率
cancel_status = db.Column(db.Integer, default=DISABLE) # 取消此设备的构建,默认 1,取消为 0
|
ml_editor/model_v3.py | VestiDev/ml-powered-applications-2020-book | 542 | 11107583 | <reponame>VestiDev/ml-powered-applications-2020-book
import os
from pathlib import Path
import spacy
from sklearn.externals import joblib
from tqdm import tqdm
import pandas as pd
import nltk
from ml_editor.explanation_generation import (
parse_explanations,
get_recommendation_string_from_parsed_exps,
EXPLAINER,
FEATURE_ARR,
)
from ml_editor.model_v2 import add_v2_text_features
nltk.download("vader_lexicon")
SPACY_MODEL = spacy.load("en_core_web_sm")
tqdm.pandas()
curr_path = Path(os.path.dirname(__file__))
model_path = Path("../models/model_3.pkl")
MODEL = joblib.load(curr_path / model_path)
def get_features_from_input_text(text_input):
"""
Generates features for a unique text input
:param text_input: question string
:return: one row series containing v3 model features
"""
arr_features = get_features_from_text_array([text_input])
return arr_features.iloc[0]
def get_features_from_text_array(input_array):
"""
Generated features for an input array of text
:param input_array: array of input questions
:return: DataFrame of features
"""
text_ser = pd.DataFrame(input_array, columns=["full_text"])
text_ser = add_v2_text_features(text_ser.copy())
features = text_ser[FEATURE_ARR].astype(float)
return features
def get_model_probabilities_for_input_texts(text_array):
"""
Returns estimated v3 model probabilities from input text array
:param text_array: array of input questions
:return: array of predictions
"""
global MODEL
features = get_features_from_text_array(text_array)
return MODEL.predict_proba(features)
def get_question_score_from_input(text):
"""
Returns v3 model probability for a unique text input
:param text: input string
:return: estimated probability of question receiving a high score
"""
preds = get_model_probabilities_for_input_texts([text])
positive_proba = preds[0][1]
return positive_proba
def get_recommendation_and_prediction_from_text(input_text, num_feats=10):
"""
Gets a score and recommendations that can be displayed in the Flask app
:param input_text: input string
:param num_feats: number of features to suggest recommendations for
:return: current score along with recommendations
"""
global MODEL
feats = get_features_from_input_text(input_text)
pos_score = MODEL.predict_proba([feats])[0][1]
print("explaining")
exp = EXPLAINER.explain_instance(
feats, MODEL.predict_proba, num_features=num_feats, labels=(1,)
)
print("explaining done")
parsed_exps = parse_explanations(exp.as_list())
recs = get_recommendation_string_from_parsed_exps(parsed_exps)
output_str = """
Current score (0 is worst, 1 is best):
<br/>
%s
<br/>
<br/>
Recommendations (ordered by importance):
<br/>
<br/>
%s
""" % (
pos_score,
recs,
)
return output_str
|
tests/test_rosbag.py | v-mehta/kapture | 264 | 11107586 | <gh_stars>100-1000
#!/usr/bin/env python3
# Copyright 2020-present NAVER Corp. Under BSD 3-clause license
"""
Tests of various rosbag to kapture converters.
"""
import os
import os.path as path
import tempfile
import unittest
# kapture
import path_to_kapture # enables import kapture # noqa: F401
import kapture
from kapture.algo.compare import equal_kapture
from kapture.core.Sensors import Camera, CameraType
import kapture.io.csv as kcsv
# tools
from kapture.converter.ros_tools.import_utbm_sensor import BB2_CAMERA_IDENTIFIERS, TOPICS_BB2
from kapture.converter.ros_tools.import_utbm_sensor import import_utbm_sensors
from kapture.utils.open_cv import import_opencv_camera_calibration
try:
import rosbag # noqa: F401
from kapture.converter.ros_tools.import_rosbag import RosBagImporter
has_rosbag = True
except ModuleNotFoundError:
has_rosbag = False
TOPIC_ODOMETRY = '/camera/odom/sample'
@unittest.skipIf(not has_rosbag, "rosbag module is missing")
class TestImportT265Rosbag(unittest.TestCase):
def setUp(self) -> None:
"""
Setup before every test
"""
samples_t265_folder = path.abspath(path.join(path.dirname(__file__), '../samples/t265'))
self.bag_file_path = path.join(samples_t265_folder, 'trimmed_locoffice.bag')
t265_rig_kapture = path.join(samples_t265_folder, 'rigs_only_kapture')
kapture_data = kcsv.kapture_from_dir(t265_rig_kapture)
self.ros_sample_kapture_path = path.join(samples_t265_folder, 'ros_kapture')
self.tempdir = tempfile.TemporaryDirectory()
self.kapture_path = path.join(self.tempdir.name, 'from_ros')
self.importer = RosBagImporter(rosbag_path=self.bag_file_path,
rigs=kapture_data.rigs,
sensors=kapture_data.sensors,
kapture_path=self.kapture_path,
force_overwrite_existing=True)
self.camera_ids = sorted(list(kapture_data.cameras.keys()))
self.first_cam_id = self.camera_ids[0]
def testMissingTopic(self):
with self.assertRaisesRegex(ValueError, 'Missing image topic',
msg="Missing image topic detected"):
self.importer.import_multi_camera(odometry_topic="/message/odometry",
image_topics=[],
camera_identifiers=[],
percent=100)
with self.assertRaisesRegex(ValueError, 'Unequal number of .*',
msg="Missing camera identifier detected"):
self.importer.import_multi_camera(odometry_topic="/message/odometry",
image_topics="/camera/image/left",
camera_identifiers=[],
percent=100)
def testUnknownTopic(self):
with self.assertRaisesRegex(ValueError, 'Missing topic .* Rosbag',
msg="Unknown odometry topic detected"):
self.importer.import_multi_camera(odometry_topic="/message/odometry",
image_topics="/camera/image/left",
camera_identifiers=self.first_cam_id,
percent=100)
with self.assertRaisesRegex(ValueError, 'Missing image topic .* Rosbag',
msg="Unknown image topic detected"):
self.importer.import_multi_camera(odometry_topic=TOPIC_ODOMETRY,
image_topics="/camera/image/left",
camera_identifiers=self.first_cam_id,
percent=100)
def testInvalidCameraIdentifiers(self):
with self.assertRaisesRegex(ValueError, 'Camera identifier left .* not defined',
msg="Invalid camera identifier"):
self.importer.import_multi_camera(odometry_topic=TOPIC_ODOMETRY,
image_topics=["/camera/image/left"],
camera_identifiers=["left"],
percent=100)
def test_import_t265_rosbag(self):
# rosbag was trimmed with
# filter LocOffice_2019-03-21-15-57-04.bag trimmed_bag.bag "t.secs <= 1553180224 and t.nsecs <= 460916281"
self.importer.import_multi_camera(odometry_topic=TOPIC_ODOMETRY,
image_topics=['/camera/fisheye1/image_raw', '/camera/fisheye2/image_raw'],
camera_identifiers=self.camera_ids,
save_all_positions=False,
find_image_position=True)
rig_id = list(self.importer._rigs.keys())[0]
self.importer.save_to_kapture(rig_id)
rosbag_kapture_data = kcsv.kapture_from_dir(self.ros_sample_kapture_path)
imported_data = kcsv.kapture_from_dir(self.kapture_path)
self.assertEqual(len(imported_data.trajectories), len(rosbag_kapture_data.records_camera),
"one pose per image pair")
self.assertTrue(equal_kapture(rosbag_kapture_data, imported_data), "Imported kapture ok")
images_path = kapture.io.records.get_image_fullpath(self.kapture_path)
images_files = []
for root, dirs, files in os.walk(images_path):
for name in files:
images_files.append(path.join(root, name))
self.assertEqual(len(images_files), 6)
# Check the files exist
for image_file in images_files:
self.assertTrue(path.isfile(image_file), f"Image file {image_file} exist")
def tearDown(self) -> None:
"""
Clean up after every test
"""
self.tempdir.cleanup()
class TestImportUtbmRosbag(unittest.TestCase):
def setUp(self) -> None:
"""
Setup before every test
"""
self._samples_utbm_folder = path.abspath(path.join(path.dirname(__file__), '../samples/utbm'))
self.tempdir = tempfile.TemporaryDirectory()
self.kapture_path = path.join(self.tempdir.name, 'from_ros')
def test_read_bb2_camera_info(self) -> None:
"""
Test the creation of a kapture camera object from a camera info file
"""
cam_info_file = path.join(self._samples_utbm_folder, 'bb2.yaml')
sensors = import_utbm_sensors(cam_info_file)
self.assertEqual(1, len(sensors), "Created one sensor")
camera_name = list(sensors)[0]
self.assertEqual('bb2_cam', camera_name, "Correct camera name")
camera = sensors[camera_name]
self.assertIsInstance(camera, Camera, "Is of type Camera")
self.assertEqual(CameraType.OPENCV, camera.camera_type, "of type openCV")
self.assertEqual('bb2_cam', camera.name, "Named bb2_cam")
self.assertEqual(1024, camera.camera_params[0], "Image width")
self.assertEqual(768, camera.camera_params[1], "Image height")
def test_read_bb2_with_k3_camera_info(self) -> None:
"""
Test exception thrown when a camera info file k3 parameter is not zero
"""
cam_info_file = path.join(self._samples_utbm_folder, 'bb2_with_k3.yaml')
camera = import_opencv_camera_calibration(cam_info_file)
self.assertIsInstance(camera, Camera, "Is of type Camera")
self.assertEqual(CameraType.FULL_OPENCV, camera.camera_type, "of type full openCV")
self.assertEqual('bb2_cam', camera.name, "Named bb2_cam")
self.assertEqual(1024, camera.camera_params[0], "Image width")
self.assertEqual(768, camera.camera_params[1], "Image height")
self.assertNotEqual(0.0, camera.camera_params[10], "K3 is not null")
@unittest.skipIf(not has_rosbag, "rosbag module is missing")
def test_utbm_images_rosbag_import(self) -> None:
"""
Test the import of an image rosbag
"""
# Use a small bb2 rosbag
rosbag_path = path.join(self._samples_utbm_folder, '2018-07-13-15-17-20_1_first10_bb2.bag')
sensors = import_utbm_sensors([path.join(self._samples_utbm_folder, 'bb2_left.yaml'),
path.join(self._samples_utbm_folder, 'bb2_right.yaml')])
importer = RosBagImporter(rosbag_path, None, sensors, self.kapture_path, force_overwrite_existing=True)
importer.import_multi_camera(None, TOPICS_BB2, BB2_CAMERA_IDENTIFIERS, False, False)
importer.save_to_kapture()
ros_sample_kapture_path = path.join(self._samples_utbm_folder, '2018-07-13-15-17-20_1_first10_bb2_kapture')
rosbag_kapture_data = kcsv.kapture_from_dir(ros_sample_kapture_path)
imported_data = kcsv.kapture_from_dir(self.kapture_path)
self.assertTrue(equal_kapture(rosbag_kapture_data, imported_data), "Imported UTBM kapture ok")
images_path = kapture.io.records.get_image_fullpath(self.kapture_path)
images_files = []
for root, dirs, files in os.walk(images_path):
for name in files:
images_files.append(path.join(root, name))
self.assertEqual(len(images_files), 10)
# Check the files exist
for image_file in images_files:
self.assertTrue(path.isfile(image_file), f"Image file {image_file} exist")
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.