max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tests/data/user_dir/datasets/always_one.py | dk25021999/mmf | 3,252 | 12637766 | # Copyright (c) Facebook, Inc. and its affiliates.
from mmf.common.registry import registry
from mmf.datasets.base_dataset_builder import BaseDatasetBuilder
from tests.test_utils import NumbersDataset
DATASET_LEN = 20
@registry.register_builder("always_one")
class AlwaysOneBuilder(BaseDatasetBuilder):
def __init__(self):
super().__init__("always_one")
def build(self, *args, **Kwargs):
pass
@classmethod
def config_path(cls):
return "configs/always_one.yaml"
def load(self, config, dataset_type="train", *args, **kwargs):
dataset = NumbersDataset(DATASET_LEN, data_item_key="input", always_one=True)
dataset.dataset_name = self.dataset_name
dataset.dataset_type = dataset_type
return dataset
|
iot_hunter/dynamic_analysis/DynamicPlugins/ScannerOn.py | byamao1/HaboMalHunter | 727 | 12637769 | <filename>iot_hunter/dynamic_analysis/DynamicPlugins/ScannerOn.py
# Tencent is pleased to support the open source community by making IoTHunter available.
# Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the MIT License (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://opensource.org/licenses/MIT
#
# Unless required by applicable law or agreed to in writing, software distributed under the
# License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
class ScannerOn():
"""Plugin to find scanneron cmd in network data."""
def __init__(self):
pass
def analyze(self, behaviors):
hit = 0
for databyte in behaviors.recvfrom_data.values():
print databyte
if '!SCANNERON' in databyte:
hit = 1
return hit
def get_result(self):
return 'Scanner Command Find'
if __name__ == '__main__':
pass
|
Algo and DSA/LeetCode-Solutions-master/Python/maximize-palindrome-length-from-subsequences.py | Sourav692/FAANG-Interview-Preparation | 3,269 | 12637790 | <gh_stars>1000+
# Time: O((m + n)^2)
# Space: O((m + n)^2)
class Solution(object):
def longestPalindrome(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: int
"""
s = word1+word2
dp = [[0]*len(s) for _ in xrange(len(s))]
result = 0
for j in xrange(len(s)):
dp[j][j] = 1
for i in reversed(xrange(j)):
if s[i] == s[j]:
dp[i][j] = 2 if i+1 == j else dp[i+1][j-1] + 2
if i < len(word1) <= j:
result = max(result, dp[i][j])
else:
dp[i][j] = max(dp[i+1][j], dp[i][j-1])
return result
# Time: O((m + n)^2)
# Space: O((m + n)^2)
class Solution2(object):
def longestPalindrome(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: int
"""
s = word1+word2
dp = [[0]*len(s) for _ in xrange(len(s))]
for j in xrange(len(s)):
dp[j][j] = 1
for i in reversed(xrange(j)):
if s[i] == s[j]:
dp[i][j] = 2 if i+1 == j else dp[i+1][j-1] + 2
else:
dp[i][j] = max(dp[i+1][j], dp[i][j-1])
return max([dp[i][j] for i in xrange(len(word1)) for j in xrange(len(word1), len(s)) if s[i] == s[j]] or [0])
|
sfaira/versions/topologies/musmusculus/embedding/vaeiaf.py | johnmous/sfaira | 110 | 12637813 | <gh_stars>100-1000
VAEIAF_TOPOLOGIES = {
"0.1": {
"model_type": "vaeiaf",
"input": {
"genome": "Mus_musculus.GRCm38.102",
"genes": ["biotype", "protein_coding"],
},
"output": {},
"hyper_parameters": {
"latent_dim": (256, 128, 64, 128, 256),
"n_iaf": 2,
"l1_coef": 0.,
"l2_coef": 0.,
"dropout_rate": 0.,
"batchnorm": True,
"activation": "tanh",
"init": "glorot_uniform",
"output_layer": "nb_shared_disp"
}
},
"0.2": {
"model_type": "vaeiaf",
"input": {
"genome": "Mus_musculus.GRCm38.102",
"genes": ["biotype", "protein_coding"],
},
"output": {},
"hyper_parameters": {
"latent_dim": (512, 256, 128, 256, 512),
"n_iaf": 2,
"l1_coef": 0.,
"l2_coef": 0.,
"dropout_rate": 0.,
"batchnorm": True,
"activation": "tanh",
"init": "glorot_uniform",
"output_layer": "nb_shared_disp"
}
}
}
# Load versions from extension if available:
try:
from sfaira_extension.versions.topology_versions.mouse.embedding import VAEIAF_TOPOLOGIES as VAEIAF_TOPOLOGIES_EXTENSION
VAEIAF_TOPOLOGIES = {
**VAEIAF_TOPOLOGIES,
**VAEIAF_TOPOLOGIES_EXTENSION
}
except ImportError:
pass
|
scripts/script.py | nickk/awesome-panel | 179 | 12637825 | import panel as pn
pn.config.sizing_mode = "stretch_width"
pn.extension()
pn.template.MaterialTemplate(
sidebar=[
pn.Column(
pn.widgets.AutocompleteInput(options=["test"] * 1000),
pn.widgets.Select(options=["a", "b", "c"]),
pn.widgets.Select(name="under"),
)
]
).save("save.html")
|
vegans/models/unconditional/AAE.py | unit8co/vegans | 459 | 12637834 | <reponame>unit8co/vegans
"""
AAE
---
Implements the Adversarial Autoencoder[1].
Instead of using the Kullback Leibler divergence to improve the latent space distribution
we use a discriminator to determine the "realness" of the latent vector.
Losses:
- Encoder: Binary cross-entropy + Mean-squared error
- Generator: Mean-squared error
- Adversary: Binary cross-entropy
Default optimizer:
- torch.optim.Adam
Custom parameter:
- lambda_z: Weight for the discriminator loss computing the realness of the latent z dimension.
References
----------
.. [1] https://arxiv.org/pdf/1511.05644.pdf
"""
import torch
import numpy as np
import torch.nn as nn
from torch.nn import MSELoss, BCELoss, L1Loss
from vegans.utils.utils import WassersteinLoss
from vegans.utils.networks import Encoder, Generator, Autoencoder, Adversary
from vegans.models.unconditional.AbstractGenerativeModel import AbstractGenerativeModel
class AAE(AbstractGenerativeModel):
"""
Parameters
----------
generator: nn.Module
Generator architecture. Produces output in the real space.
adversary: nn.Module
Adversary architecture. Produces predictions for real and fake samples to differentiate them.
encoder: nn.Module
Encoder architecture. Produces predictions in the latent space.
x_dim : list, tuple
Number of the output dimensions of the generator and input dimension of the discriminator / critic.
In the case of images this will be [nr_channels, nr_height_pixels, nr_width_pixels].
z_dim : int, list, tuple
Number of the latent dimensions for the generator input. Might have dimensions of an image.
optim : dict or torch.optim
Optimizer used for each network. Could be either an optimizer from torch.optim or a dictionary with network
name keys and torch.optim as value, i.e. {"Generator": torch.optim.Adam}.
optim_kwargs : dict
Optimizer keyword arguments used for each network. Must be a dictionary with network
name keys and dictionary with keyword arguments as value, i.e. {"Generator": {"lr": 0.0001}}.
lambda_z: float
Weight for the discriminator loss computing the realness of the latent z dimension.
adv_type: "Discriminator", "Critic" or "Autoencoder"
Indicating which adversarial architecture will be used.
feature_layer : torch.nn.*
Output layer used to compute the feature loss. Should be from either the discriminator or critic.
If `feature_layer` is not None, the original generator loss is replaced by a feature loss, introduced
[here](https://arxiv.org/abs/1606.03498v1).
fixed_noise_size : int
Number of images shown when logging. The fixed noise is used to produce the images in the folder/images
subdirectory, the tensorboard images tab and the samples in get_training_results().
device : string
Device used while training the model. Either "cpu" or "cuda".
ngpu : int
Number of gpus used during training if device == "cuda".
folder : string
Creates a folder in the current working directory with this name. All relevant files like summary, images, models and
tensorboard output are written there. Existing folders are never overwritten or deleted. If a folder with the same name
already exists a time stamp is appended to make it unique.
"""
#########################################################################
# Actions before training
#########################################################################
def __init__(
self,
generator,
adversary,
encoder,
x_dim,
z_dim,
optim=None,
optim_kwargs=None,
lambda_z=10,
adv_type="Discriminator",
feature_layer=None,
fixed_noise_size=32,
device=None,
ngpu=0,
folder="./veganModels/AAE",
secure=True):
self.adv_type = adv_type
self.encoder = Encoder(encoder, input_size=x_dim, device=device, ngpu=ngpu, secure=secure)
self.generator = Generator(generator, input_size=z_dim, device=device, ngpu=ngpu, secure=secure)
self.adversary = Adversary(adversary, input_size=z_dim, device=device, ngpu=ngpu, adv_type=adv_type, secure=secure)
self.neural_nets = {
"Generator": self.generator, "Encoder": self.encoder, "Adversary": self.adversary
}
super().__init__(
x_dim=x_dim, z_dim=z_dim, optim=optim, optim_kwargs=optim_kwargs, feature_layer=feature_layer,
fixed_noise_size=fixed_noise_size, device=device, folder=folder, ngpu=ngpu, secure=secure
)
self.lambda_z = lambda_z
self.hyperparameters["lambda_z"] = lambda_z
self.hyperparameters["adv_type"] = adv_type
if self.secure:
assert self.encoder.output_size == self.z_dim, (
"Encoder output shape must be equal to z_dim. {} vs. {}.".format(self.encoder.output_size, self.z_dim)
)
assert self.generator.output_size == self.x_dim, (
"Generator output shape must be equal to x_dim. {} vs. {}.".format(self.generator.output_size, self.x_dim)
)
def _define_loss(self):
if self.adv_type == "Discriminator":
loss_functions = {"Generator": MSELoss(), "Adversary": BCELoss()}
elif self.adv_type == "Critic":
loss_functions = {"Generator": MSELoss(), "Adversary": WassersteinLoss()}
else:
raise NotImplementedError("'adv_type' must be one of Discriminator or Critic.")
return loss_functions
#########################################################################
# Actions during training
#########################################################################
def encode(self, x):
return self.encoder(x)
def calculate_losses(self, X_batch, Z_batch, who=None):
if who == "Generator":
losses = self._calculate_generator_loss(X_batch=X_batch, Z_batch=Z_batch)
elif who == "Encoder":
losses = self._calculate_encoder_loss(X_batch=X_batch, Z_batch=Z_batch)
elif who == "Adversary":
losses = self._calculate_adversary_loss(X_batch=X_batch, Z_batch=Z_batch)
else:
losses = self._calculate_generator_loss(X_batch=X_batch, Z_batch=Z_batch)
losses.update(self._calculate_encoder_loss(X_batch=X_batch, Z_batch=Z_batch))
losses.update(self._calculate_adversary_loss(X_batch=X_batch, Z_batch=Z_batch))
return losses
def _calculate_generator_loss(self, X_batch, Z_batch, fake_images=None):
if fake_images is None:
encoded_output = self.encode(x=X_batch).detach()
fake_images = self.generate(encoded_output)
gen_loss = self.loss_functions["Generator"](
fake_images, X_batch
)
return {
"Generator": gen_loss,
}
def _calculate_encoder_loss(self, X_batch, Z_batch, fake_images=None, encoded_output=None):
if fake_images is None:
encoded_output = self.encode(x=X_batch)
fake_images = self.generate(z=encoded_output)
if self.feature_layer is None:
fake_predictions = self.predict(x=encoded_output)
enc_loss_fake = self.loss_functions["Generator"](
fake_predictions, torch.ones_like(fake_predictions, requires_grad=False)
)
else:
enc_loss_fake = self._calculate_feature_loss(X_real=Z_batch, X_fake=encoded_output)
enc_loss_reconstruction = self.loss_functions["Generator"](
fake_images, X_batch
)
enc_loss = self.lambda_z*enc_loss_fake + enc_loss_reconstruction
return {
"Encoder": enc_loss,
"Encoder_x": self.lambda_z*enc_loss_fake,
"Encoder_fake": enc_loss_reconstruction,
}
def _calculate_adversary_loss(self, X_batch, Z_batch, encoded_output=None):
if encoded_output is None:
encoded_output = self.encode(x=X_batch).detach()
fake_predictions = self.predict(x=encoded_output)
real_predictions = self.predict(x=Z_batch)
adv_loss_fake = self.loss_functions["Adversary"](
fake_predictions, torch.zeros_like(fake_predictions, requires_grad=False)
)
adv_loss_real = self.loss_functions["Adversary"](
real_predictions, torch.ones_like(real_predictions, requires_grad=False)
)
adv_loss = 1/2*(adv_loss_real + adv_loss_fake)
return {
"Adversary": adv_loss,
"Adversary_fake": adv_loss_fake,
"Adversary_real": adv_loss_real,
"RealFakeRatio": adv_loss_real / adv_loss_fake
}
def _step(self, who=None):
if who is not None:
self.optimizers[who].step()
if who == "Adversary":
if self.adv_type == "Critic":
for p in self.adversary.parameters():
p.data.clamp_(-0.01, 0.01)
else:
[optimizer.step() for _, optimizer in self.optimizers.items()]
|
examples.py/Topics/Image Processing/EdgeDetection.py | timgates42/processing.py | 1,224 | 12637872 | <gh_stars>1000+
"""
* Edge Detection.
*
* Exposing areas of contrast within an image
* by processing it through a high-pass filter.
"""
kernel = (( -1, -1, -1 ),
( -1, 9, -1 ),
( -1, -1, -1 ))
size(200, 200)
img = loadImage("house.jpg") # Load the original image
image(img, 0, 0) # Displays the image from point (0,0)
img.loadPixels();
# Create an opaque image of the same size as the original
edgeImg = createImage(img.width, img.height, RGB)
# Loop through every pixel in the image.
for y in range(1, img.height-1): # Skip top and bottom edges
for x in range(1, img.width-1): # Skip left and right edges
sum = 0 # Kernel sum for this pixel
for ky in (-1, 0, 1):
for kx in (-1, 0, 1):
# Calculate the adjacent pixel for this kernel point
pos = (y + ky)*img.width + (x + kx)
# Image is grayscale, red/green/blue are identical
val = red(img.pixels[pos])
# Multiply adjacent pixels based on the kernel values
sum += kernel[ky+1][kx+1] * val
# For this pixel in the new image, set the gray value
# based on the sum from the kernel
edgeImg.pixels[y*img.width + x] = color(sum)
# State that there are changes to edgeImg.pixels[]
edgeImg.updatePixels()
image(edgeImg, 100, 0) # Draw the new image
|
pcdet/utils/depth_utils.py | Bosszhe/CaDDN_wzh | 205 | 12637892 | <gh_stars>100-1000
import torch
import math
def bin_depths(depth_map, mode, depth_min, depth_max, num_bins, target=False):
"""
Converts depth map into bin indices
Args:
depth_map [torch.Tensor(H, W)]: Depth Map
mode [string]: Discretiziation mode (See https://arxiv.org/pdf/2005.13423.pdf for more details)
UD: Uniform discretiziation
LID: Linear increasing discretiziation
SID: Spacing increasing discretiziation
depth_min [float]: Minimum depth value
depth_max [float]: Maximum depth value
num_bins [int]: Number of depth bins
target [bool]: Whether the depth bins indices will be used for a target tensor in loss comparison
Returns:
indices [torch.Tensor(H, W)]: Depth bin indices
"""
if mode == "UD":
bin_size = (depth_max - depth_min) / num_bins
indices = ((depth_map - depth_min) / bin_size)
elif mode == "LID":
bin_size = 2 * (depth_max - depth_min) / (num_bins * (1 + num_bins))
indices = -0.5 + 0.5 * torch.sqrt(1 + 8 * (depth_map - depth_min) / bin_size)
elif mode == "SID":
indices = num_bins * (torch.log(1 + depth_map) - math.log(1 + depth_min)) / \
(math.log(1 + depth_max) - math.log(1 + depth_min))
else:
raise NotImplementedError
if target:
# Remove indicies outside of bounds
mask = (indices < 0) | (indices > num_bins) | (~torch.isfinite(indices))
indices[mask] = num_bins
# Convert to integer
indices = indices.type(torch.int64)
return indices |
extraPackages/matplotlib-3.0.3/examples/text_labels_and_annotations/date_index_formatter.py | dolboBobo/python3_ios | 130 | 12637912 | """
=====================================
Custom tick formatter for time series
=====================================
When plotting time series, e.g., financial time series, one often wants
to leave out days on which there is no data, i.e. weekends. The example
below shows how to use an 'index formatter' to achieve the desired plot
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
# Load a numpy record array from yahoo csv data with fields date, open, close,
# volume, adj_close from the mpl-data/example directory. The record array
# stores the date as an np.datetime64 with a day unit ('D') in the date column.
with cbook.get_sample_data('goog.npz') as datafile:
r = np.load(datafile)['price_data'].view(np.recarray)
r = r[-30:] # get the last 30 days
# Matplotlib works better with datetime.datetime than np.datetime64, but the
# latter is more portable.
date = r.date.astype('O')
# first we'll do it the default way, with gaps on weekends
fig, axes = plt.subplots(ncols=2, figsize=(8, 4))
ax = axes[0]
ax.plot(date, r.adj_close, 'o-')
ax.set_title("Default")
fig.autofmt_xdate()
# next we'll write a custom formatter
N = len(r)
ind = np.arange(N) # the evenly spaced plot indices
def format_date(x, pos=None):
thisind = np.clip(int(x + 0.5), 0, N - 1)
return date[thisind].strftime('%Y-%m-%d')
ax = axes[1]
ax.plot(ind, r.adj_close, 'o-')
ax.xaxis.set_major_formatter(ticker.FuncFormatter(format_date))
ax.set_title("Custom tick formatter")
fig.autofmt_xdate()
plt.show()
|
ratelimit/backends/__init__.py | abersheeran/asgi-ratelim | 136 | 12637919 | from .base import BaseBackend # noqa: F401
|
upvote/gae/datastore/models/event.py | iwikmai/upvote | 453 | 12637948 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models for storing execution events from the various platforms."""
import logging
from google.appengine.ext import ndb
from google.appengine.ext.ndb import polymodel
from upvote.gae.datastore.models import mixin
from upvote.shared import constants
class Event(mixin.Base, polymodel.PolyModel):
"""Blockable Event.
key = Key(User, user_email) -> Key(Host, host_id) ->
Key(..., Blockable, hash) -> Key(Event, '1')
NOTE: The Blockable key may be of any length (e.g. for Bundles).
NOTE: The Event id is always '1'.
Attributes:
blockable_key: key, key to the blockable associated with this event.
cert_key: key, key to the cert associated with this event.
host_id: str, unique ID for the host on which this event occurred.
file_name: str, filename of the blockable on last block.
file_path: str, path of the blockable on last block.
publisher: str, publisher of this file.
version: str, version number of this file.
executing_user: str, user who executed the binary (may be a system user).
event_type: str, reason this event was initially created.
recorded_dt: datetime, when this event was received by the server.
first_blocked_dt: datetime, time of the first block.
last_blocked_dt: datetime, time of the last block.
count: int, the number of times a given event has occurred.
"""
blockable_key = ndb.KeyProperty()
cert_key = ndb.KeyProperty()
file_name = ndb.StringProperty()
file_path = ndb.StringProperty()
publisher = ndb.StringProperty()
version = ndb.StringProperty()
host_id = ndb.StringProperty()
executing_user = ndb.StringProperty()
event_type = ndb.StringProperty(
choices=constants.EVENT_TYPE.SET_ALL, required=True)
recorded_dt = ndb.DateTimeProperty(auto_now_add=True)
first_blocked_dt = ndb.DateTimeProperty()
last_blocked_dt = ndb.DateTimeProperty()
count = ndb.IntegerProperty(default=1)
@property
def run_by_local_admin(self):
"""Whether the Event was generated by the platform's admin user.
Due to the platform-specific nature of "admin user," this property should be
overridden by each platform's derivative models.
Returns:
bool, See method description.
"""
return False
@property
def user_key(self):
if not self.key:
return None
return ndb.Key(flat=self.key.pairs()[0])
def _DedupeMoreRecentEvent(self, more_recent_event):
"""Updates if the related Event is more recent than the current one."""
self.last_blocked_dt = more_recent_event.last_blocked_dt
self.file_name = more_recent_event.file_name
self.file_path = more_recent_event.file_path
self.executing_user = more_recent_event.executing_user
self.event_type = more_recent_event.event_type
def _DedupeEarlierEvent(self, earlier_event):
"""Updates if the related Event occurred earlier than the current one."""
self.first_blocked_dt = earlier_event.first_blocked_dt
def Dedupe(self, related_event):
"""Updates the current Event state with another, related Event."""
self.count += related_event.count or 1
# related_event registered an Event earlier than the earliest recorded date
if self.first_blocked_dt > related_event.first_blocked_dt:
self._DedupeEarlierEvent(related_event)
# related_event registered an Event more recently than the most recent
# recorded date
if self.last_blocked_dt < related_event.last_blocked_dt:
self._DedupeMoreRecentEvent(related_event)
@classmethod
def DedupeMultiple(cls, events):
"""Dedupes an iterable of new-style Events.
Args:
events: An iterable of new-style Event entities to be deduped.
Returns:
A list of deduped Events.
"""
distinct_events = {}
for event in events:
duped_event = distinct_events.get(event.key)
if duped_event:
duped_event.Dedupe(event)
else:
distinct_events[event.key] = event
return list(distinct_events.values())
def to_dict(self, include=None, exclude=None): # pylint: disable=g-bad-name
result = super(Event, self).to_dict(include=include, exclude=exclude)
result['blockable_id'] = self.blockable_key.id()
return result
class Bit9Event(mixin.Bit9, Event):
"""An event from Bit9.
Attributes:
description: str, Description.
bit9_id: int, The largest Bit9 database ID associated with this event.
"""
description = ndb.StringProperty()
bit9_id = ndb.IntegerProperty(default=0)
@property
def run_by_local_admin(self):
return self.executing_user == constants.LOCAL_ADMIN.WINDOWS
def _DedupeMoreRecentEvent(self, more_recent_event):
"""Updates if the related Event is more recent than the current one."""
if self.bit9_id > more_recent_event.bit9_id:
logging.warning(
'Database ID out-of-order with respect to event timestamp: '
'(id=%s, dt=%s) occurred earlier than (id=%s, dt=%s)', self.bit9_id,
self.last_blocked_dt, more_recent_event.bit9_id,
more_recent_event.last_blocked_dt)
super(Bit9Event, self)._DedupeMoreRecentEvent(more_recent_event)
def Dedupe(self, related_event):
"""See base class."""
super(Bit9Event, self).Dedupe(related_event)
# We only care about the most recent event with respect to its ID in Bit9.
self.bit9_id = max(self.bit9_id, related_event.bit9_id)
class QuarantineMetadata(ndb.Model):
"""Metadata provided by macOS File Quarantine.
Attributes:
data_url: str, the URL the file was downloaded from
referer_url: str, the referer of the above URL
downloaded_dt: datetime, when the file was downloaded
agent_bundle_id: str, the program that downloaded the file
"""
data_url = ndb.StringProperty(indexed=False)
referer_url = ndb.StringProperty(indexed=False)
downloaded_dt = ndb.DateTimeProperty()
agent_bundle_id = ndb.StringProperty()
class SantaEvent(mixin.Santa, Event):
"""An event from Santa.
Attributes:
bundle_key: ndb.Key, If present, the key of the bundle to which the
associated Blockable belongs.
quarantine: QuarantineMetadata, metadata detailing the provenance of the
Blockable.
event_type: str, the reason that the last block was generated
bundle_path: str, path of the associated bundle on the last block.
DEPRECATED
cert_sha256: the SHA-256 of the cert this file was signed with
"""
bundle_key = ndb.KeyProperty()
quarantine = ndb.StructuredProperty(QuarantineMetadata)
event_type = ndb.StringProperty(
choices=constants.EVENT_TYPE.SET_ALL, required=True)
bundle_path = ndb.StringProperty()
# DEPRECATED
cert_sha256 = ndb.StringProperty()
@property
def run_by_local_admin(self):
return self.executing_user == constants.LOCAL_ADMIN.MACOS
def _DedupeMoreRecentEvent(self, more_recent_event):
"""Updates if the related Event is more recent than the current one."""
super(SantaEvent, self)._DedupeMoreRecentEvent(more_recent_event)
self.bundle_path = more_recent_event.bundle_path
# Keep the newest non-null quarantine information
if more_recent_event.quarantine:
self.quarantine = more_recent_event.quarantine
def _DedupeEarlierEvent(self, earlier_event):
"""Updates if the related Event occurred earlier than the current one."""
super(SantaEvent, self)._DedupeEarlierEvent(earlier_event)
# If an older Event has quarantine information and this one does not, pull
# in the older Event's data
if not self.quarantine and earlier_event.quarantine:
self.quarantine = earlier_event.quarantine
|
examples/sfwa_ukf/python/ukf/__init__.py | rafaelrietmann/ukf | 320 | 12637994 | <reponame>rafaelrietmann/ukf
#Copyright (C) 2013 <NAME>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import os
from ctypes import *
# Taken from c/cukf.h
UKF_PRECISION_FLOAT = 0
UKF_PRECISION_DOUBLE = 1
UKF_MODEL_NONE = 0
UKF_MODEL_CENTRIPETAL = 1
UKF_MODEL_CUSTOM = 2
UKF_MODEL_X8 = 3
state = None
covariance = None
model = UKF_MODEL_NONE
custom_model = None
# Internal globals, set during init
_cukf = None
_REAL_T = None
_CONTROL_DIM = None
_CUSTOM_MODEL_FUNC = None
class _SigmaPoint(object):
def __init__(self, arr):
self.position = (arr[0], arr[1], arr[2])
self.velocity = (arr[3], arr[4], arr[5])
self.acceleration = (arr[6], arr[7], arr[8])
self.attitude = (arr[9], arr[10], arr[11], arr[12])
self.angular_velocity = (arr[13], arr[14], arr[15])
self.angular_acceleration = (arr[16], arr[17], arr[18])
self.wind_velocity = (arr[19], arr[20], arr[21])
self.gyro_bias = (arr[22], arr[23], arr[24])
def __repr__(self):
return str(self.__dict__)
# Wrapper around the custom model function
_custom_model_func_wrapper_cb = None # avoid garbage-collection
def _custom_model_wrapper(state, control, output):
if not custom_model:
return
result = custom_model(_SigmaPoint(state.contents),
tuple(control.contents[0:3]))
output.contents = (_REAL_T * 6)(*result[0:6])
# Internal classes, wrapping cukf structs directly
class _IOBoardParams(Structure):
pass
class _State(Structure):
def __repr__(self):
fields = {
"position": tuple(self.position),
"velocity": tuple(self.velocity),
"acceleration": tuple(self.acceleration),
"attitude": tuple(self.attitude),
"angular_velocity": tuple(self.angular_velocity),
"angular_acceleration": tuple(self.angular_acceleration),
"wind_velocity": tuple(self.wind_velocity),
"gyro_bias": tuple(self.gyro_bias)
}
return str(fields)
# Public interface
def iterate(dt, control=None):
global _cukf, state, model
if not _cukf:
raise RuntimeError("Please call ukf.init()")
if control is None:
control = (0.0, ) * _CONTROL_DIM
elif len(control) != _CONTROL_DIM:
raise ValueError("Control vector must contain %d elements" %
_CONTROL_DIM)
_cukf.ukf_choose_dynamics(model)
if model == UKF_MODEL_CUSTOM:
if not custom_model:
raise RuntimeError(
"Can't use ukf.model == UKF_MODEL_CUSTOM without a value " +
"for ukf.custom_model"
)
_cukf.ukf_set_state(state)
_cukf.ukf_iterate(dt, (_REAL_T * _CONTROL_DIM)(*control))
_cukf.ukf_sensor_clear()
_cukf.ukf_get_state(state)
#_cukf.ukf_get_state_covariance(covariance)
def set_sensors(accelerometer=None, gyroscope=None, magnetometer=None,
gps_position=None, gps_velocity=None, pitot_tas=None,
barometer_amsl=None):
if accelerometer is not None:
_cukf.ukf_sensor_set_accelerometer(*accelerometer)
if gyroscope is not None:
_cukf.ukf_sensor_set_gyroscope(*gyroscope)
if magnetometer is not None:
_cukf.ukf_sensor_set_magnetometer(*magnetometer)
if gps_position is not None:
_cukf.ukf_sensor_set_gps_position(*gps_position)
if gps_velocity is not None:
_cukf.ukf_sensor_set_gps_velocity(*gps_velocity)
if pitot_tas is not None:
_cukf.ukf_sensor_set_pitot_tas(pitot_tas)
if barometer_amsl is not None:
_cukf.ukf_sensor_set_barometer_amsl(barometer_amsl)
def configure_sensors(accelerometer_offset=None,
accelerometer_orientation=None, gyroscope_orientation=None,
magnetometer_orientation=None, wmm_field=None,
accelerometer_covariance=None, gyroscope_covariance=None,
magnetometer_covariance=None, gps_position_covariance=None,
gps_velocity_covariance=None, pitot_tas_covariance=None,
barometer_amsl_covariance=None):
params = _IOBoardParams()
if accelerometer_offset is not None:
params.accel_offset = accelerometer_offset
else:
params.accel_offset = (0.0, 0.0, 0.0)
if accelerometer_orientation is not None:
params.accel_orientation = accelerometer_orientation
else:
params.accel_orientation = (0.0, 0.0, 0.0, 1.0)
if gyroscope_orientation is not None:
params.gyro_orientation = gyroscope_orientation
else:
params.gyro_orientation = (0.0, 0.0, 0.0, 1.0)
if magnetometer_orientation is not None:
params.mag_orientation = magnetometer_orientation
else:
params.mag_orientation = (0.0, 0.0, 0.0, 1.0)
if wmm_field is not None:
params.mag_field = wmm_field
else:
params.mag_field = (1.0, 0.0, 0.0)
if getattr(accelerometer_covariance, '__iter__', False):
params.accel_covariance = accelerometer_covariance
elif accelerometer_covariance is not None:
params.accel_covariance = (accelerometer_covariance, ) * 3
else:
params.accel_covariance = (1.0, 1.0, 1.0)
if getattr(gyroscope_covariance, '__iter__', False):
params.gyro_covariance = gyroscope_covariance
elif gyroscope_covariance is not None:
params.gyro_covariance = (gyroscope_covariance, ) * 3
else:
params.gyro_covariance = (1.0, 1.0, 1.0)
if getattr(magnetometer_covariance, '__iter__', False):
params.mag_covariance = magnetometer_covariance
elif magnetometer_covariance is not None:
params.mag_covariance = (magnetometer_covariance, ) * 3
else:
params.mag_covariance = (1.0, 1.0, 1.0)
if getattr(gps_position_covariance, '__iter__', False):
params.gps_position_covariance = gps_position_covariance
elif gps_position_covariance is not None:
params.gps_position_covariance = (gps_position_covariance, ) * 3
else:
params.gps_position_covariance = (1.0, 1.0, 1.0)
if getattr(gps_velocity_covariance, '__iter__', False):
params.gps_velocity_covariance = gps_velocity_covariance
elif gps_velocity_covariance is not None:
params.gps_velocity_covariance = (gps_velocity_covariance, ) * 3
else:
params.gps_velocity_covariance = (1.0, 1.0, 1.0)
if pitot_tas_covariance is not None:
params.pitot_covariance = pitot_tas_covariance
else:
params.pitot_covariance = 1.0
if barometer_amsl_covariance is not None:
params.barometer_amsl_covariance = barometer_amsl_covariance
else:
params.barometer_amsl_covariance = 1.0
_cukf.ukf_set_params(params)
def configure_process_noise(process_noise_covariance):
_cukf.ukf_set_process_noise((_REAL_T * 24)(*process_noise_covariance))
def init(implementation="c"):
global _cukf, _REAL_T, _CONTROL_DIM, _CUSTOM_MODEL_FUNC, state, \
_custom_model_func_wrapper_cb
# Load the requested library and determine configuration parameters
if implementation == "c":
lib = os.path.join(os.path.dirname(__file__), "c", "libcukf.dylib")
elif implementation == "c66x":
lib = os.path.join(os.path.dirname(__file__), "ccs-c66x",
"libc66ukf.dylib")
else:
raise NameError(
"Unknown UKF implementation: %s (options are 'c', 'c66x')" %
implementation)
_cukf = cdll.LoadLibrary(lib)
_cukf.ukf_init.argtypes = []
_cukf.ukf_init.restype = None
_cukf.ukf_config_get_precision.argtypes = []
_cukf.ukf_config_get_precision.restype = c_long
_cukf.ukf_config_get_state_dim.argtypes = []
_cukf.ukf_config_get_state_dim.restype = c_long
_cukf.ukf_config_get_control_dim.argtypes = []
_cukf.ukf_config_get_control_dim.restype = c_long
_cukf.ukf_config_get_measurement_dim.argtypes = []
_cukf.ukf_config_get_measurement_dim.restype = c_long
_PRECISION = _cukf.ukf_config_get_precision()
_REAL_T = c_double if _PRECISION == UKF_PRECISION_DOUBLE else c_float
_CONTROL_DIM = _cukf.ukf_config_get_control_dim()
_STATE_DIM = _cukf.ukf_config_get_state_dim()
_MEASUREMENT_DIM = _cukf.ukf_config_get_measurement_dim()
_IOBoardParams._fields_ = [
("accel_orientation", _REAL_T * 4),
("accel_offset", _REAL_T * 3),
("gyro_orientation", _REAL_T * 4),
("mag_orientation", _REAL_T * 4),
("mag_field", _REAL_T * 3),
("accel_covariance", _REAL_T * 3),
("gyro_covariance", _REAL_T * 3),
("mag_covariance", _REAL_T * 3),
("gps_position_covariance", _REAL_T * 3),
("gps_velocity_covariance", _REAL_T * 3),
("pitot_covariance", _REAL_T),
("barometer_amsl_covariance", _REAL_T)
]
_State._fields_ = [
("position", _REAL_T * 3),
("velocity", _REAL_T * 3),
("acceleration", _REAL_T * 3),
("attitude", _REAL_T * 4),
("angular_velocity", _REAL_T * 3),
("angular_acceleration", _REAL_T * 3),
("wind_velocity", _REAL_T * 3),
("gyro_bias", _REAL_T * 3)
]
# Set up the function prototypes
_cukf.ukf_set_position.argtypes = [_REAL_T, _REAL_T, _REAL_T]
_cukf.ukf_set_position.restype = None
_cukf.ukf_set_velocity.argtypes = [_REAL_T, _REAL_T, _REAL_T]
_cukf.ukf_set_velocity.restype = None
_cukf.ukf_set_acceleration.argtypes = [_REAL_T, _REAL_T, _REAL_T]
_cukf.ukf_set_acceleration.restype = None
_cukf.ukf_set_attitude.argtypes = [_REAL_T, _REAL_T, _REAL_T, _REAL_T]
_cukf.ukf_set_attitude.restype = None
_cukf.ukf_set_angular_velocity.argtypes = [_REAL_T, _REAL_T, _REAL_T]
_cukf.ukf_set_angular_velocity.restype = None
_cukf.ukf_set_angular_acceleration.argtypes = [_REAL_T, _REAL_T, _REAL_T]
_cukf.ukf_set_angular_acceleration.restype = None
_cukf.ukf_set_wind_velocity.argtypes = [_REAL_T, _REAL_T, _REAL_T]
_cukf.ukf_set_wind_velocity.restype = None
_cukf.ukf_set_gyro_bias.argtypes = [_REAL_T, _REAL_T, _REAL_T]
_cukf.ukf_set_gyro_bias.restype = None
_cukf.ukf_get_state.argtypes = [POINTER(_State)]
_cukf.ukf_get_state.restype = None
_cukf.ukf_set_state.argtypes = [POINTER(_State)]
_cukf.ukf_set_state.restype = None
_cukf.ukf_get_state_covariance.argtypes = [
POINTER(_REAL_T * (_STATE_DIM**2))]
_cukf.ukf_get_state_covariance.restype = None
_cukf.ukf_sensor_clear.argtypes = []
_cukf.ukf_sensor_clear.restype = None
_cukf.ukf_sensor_set_accelerometer.argtypes = [_REAL_T, _REAL_T, _REAL_T]
_cukf.ukf_sensor_set_accelerometer.restype = None
_cukf.ukf_sensor_set_gyroscope.argtypes = [_REAL_T, _REAL_T, _REAL_T]
_cukf.ukf_sensor_set_gyroscope.restype = None
_cukf.ukf_sensor_set_magnetometer.argtypes = [_REAL_T, _REAL_T, _REAL_T]
_cukf.ukf_sensor_set_magnetometer.restype = None
_cukf.ukf_sensor_set_gps_position.argtypes = [_REAL_T, _REAL_T, _REAL_T]
_cukf.ukf_sensor_set_gps_position.restype = None
_cukf.ukf_sensor_set_gps_velocity.argtypes = [_REAL_T, _REAL_T, _REAL_T]
_cukf.ukf_sensor_set_gps_velocity.restype = None
_cukf.ukf_sensor_set_pitot_tas.argtypes = [_REAL_T]
_cukf.ukf_sensor_set_pitot_tas.restype = None
_cukf.ukf_sensor_set_barometer_amsl.argtypes = [_REAL_T]
_cukf.ukf_sensor_set_barometer_amsl.restype = None
_cukf.ukf_set_params.argtypes = [POINTER(_IOBoardParams)]
_cukf.ukf_set_params.restype = None
_cukf.ukf_choose_dynamics.argtypes = [c_int]
_cukf.ukf_choose_dynamics.restype = None
_CUSTOM_MODEL_FUNC = CFUNCTYPE(None,
POINTER(_REAL_T * (_STATE_DIM + 1)),
POINTER(_REAL_T * _CONTROL_DIM),
POINTER(_REAL_T * 6))
_cukf.ukf_set_custom_dynamics_model.argtypes = [_CUSTOM_MODEL_FUNC]
_cukf.ukf_set_custom_dynamics_model.restype = None
_cukf.ukf_iterate.argtypes = [c_float, POINTER(_REAL_T * _CONTROL_DIM)]
_cukf.ukf_iterate.restype = None
_cukf.ukf_set_process_noise.argtypes = [POINTER(_REAL_T * _STATE_DIM)]
_cukf.ukf_set_process_noise.restype = None
# Initialize the library
_cukf.ukf_init()
# Set the custom model callback
_custom_model_func_wrapper_cb = _CUSTOM_MODEL_FUNC(_custom_model_wrapper)
_cukf.ukf_set_custom_dynamics_model(_custom_model_func_wrapper_cb)
# Set up the state
state = _State()
_cukf.ukf_get_state(state)
|
src/main/resources/resource/Deeplearning4j/Deeplearning4j.py | holgerfriedrich/myrobotlab | 179 | 12638034 | <gh_stars>100-1000
##################################################################################
# Deeplearning4j.py
# description: A wrapper service for the Deeplearning4j framework.
# categories: ai
# more info @: http://myrobotlab.org/service/Deeplearning4j
##################################################################################
# start the deeplearning4j service
deeplearning4j = Runtime.start('deeplearning4j','Deeplearning4j')
# load the VGG16 model from the zoo
deeplearning4j.loadVGG16()
# run an image file through the model and get the classifications / confidence
classifications = deeplearning4j.classifyImageFileVGG16("image0-1.png")
# print them out... it's a dictionary/map of label to confidence level (between 0-1)
for label in classifications:
print(label + " : " + str(classifications.get(label)))
|
Liez-python-code/0000/0000.py | saurabh896/python-1 | 3,976 | 12638090 | <gh_stars>1000+
from PIL import Image, ImageDraw, ImageFont
def add_num():
im = Image.open('in.jpg')
xsize, ysize = im.size
draw = ImageDraw.Draw(im)
font = ImageFont.truetype("arial.ttf", xsize // 3)
draw.text((ysize // 5 * 4, 0), '3', (250,128,114), font)
im.save('out.jpg')
add_num()
|
seahub/api2/endpoints/be_shared_repo.py | weimens/seahub | 420 | 12638092 | <reponame>weimens/seahub
# Copyright (c) 2012-2016 Seafile Ltd.
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
import seaserv
from seaserv import seafile_api
from seahub.api2.authentication import TokenAuthentication
from seahub.api2.throttling import UserRateThrottle
from seahub.api2.utils import api_error
from seahub.utils import is_valid_username, is_org_context, send_perm_audit_msg
from seahub.share.models import ExtraSharePermission
from seahub.share.utils import check_user_share_in_permission
json_content_type = 'application/json; charset=utf-8'
class BeSharedRepo(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def delete(self, request, repo_id, format=None):
if not seafile_api.get_repo(repo_id):
return api_error(status.HTTP_400_BAD_REQUEST, 'Library does not exist')
username = request.user.username
share_type = request.GET.get('share_type', None)
if share_type == 'personal':
from_email = request.GET.get('from', None)
if not is_valid_username(from_email):
return api_error(status.HTTP_400_BAD_REQUEST, 'Invalid argument')
is_org = is_org_context(request)
repo = seafile_api.get_repo(repo_id)
permission = check_user_share_in_permission(repo_id, username, is_org)
if is_org:
org_id = request.user.org.org_id
seaserv.seafserv_threaded_rpc.org_remove_share(org_id,
repo_id,
from_email,
username)
else:
seaserv.remove_share(repo_id, from_email, username)
# Delete data of ExtraSharePermission table.
ExtraSharePermission.objects.delete_share_permission(repo_id,
username)
if repo.is_virtual:
send_perm_audit_msg('delete-repo-perm', username, username,
repo.origin_repo_id, repo.origin_path, permission)
else:
send_perm_audit_msg('delete-repo-perm', username, username,
repo_id, '/', permission)
elif share_type == 'group':
from_email = request.GET.get('from', None)
if not is_valid_username(from_email):
return api_error(status.HTTP_400_BAD_REQUEST, 'Invalid argument')
group_id = request.GET.get('group_id', None)
group = seaserv.get_group(group_id)
if not group:
return api_error(status.HTTP_400_BAD_REQUEST, 'Group does not exist')
if not seaserv.check_group_staff(group_id, username) and \
not seafile_api.is_repo_owner(username, repo_id):
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied')
if seaserv.is_org_group(group_id):
org_id = seaserv.get_org_id_by_group(group_id)
seaserv.del_org_group_repo(repo_id, org_id, group_id)
else:
seafile_api.unset_group_repo(repo_id, group_id, from_email)
elif share_type == 'public':
if is_org_context(request):
org_repo_owner = seafile_api.get_org_repo_owner(repo_id)
is_org_repo_owner = True if org_repo_owner == username else False
if not request.user.org.is_staff and not is_org_repo_owner:
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied')
org_id = request.user.org.org_id
seaserv.seafserv_threaded_rpc.unset_org_inner_pub_repo(org_id,
repo_id)
else:
if not seafile_api.is_repo_owner(username, repo_id) and \
not request.user.is_staff:
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied')
seaserv.unset_inner_pub_repo(repo_id)
else:
return api_error(status.HTTP_400_BAD_REQUEST, 'Invalid argument')
return Response({'success': True}, status=status.HTTP_200_OK)
|
datar/dplyr/arrange.py | stjordanis/datar | 110 | 12638113 | <reponame>stjordanis/datar<filename>datar/dplyr/arrange.py
"""Arrange rows by column values
See source https://github.com/tidyverse/dplyr/blob/master/R/arrange.R
"""
from typing import Any, Iterable, Mapping, Tuple
from pandas import DataFrame
from pipda import register_verb
from pipda.symbolic import DirectRefAttr, DirectRefItem
from pipda.function import FastEvalFunction
from pipda.utils import CallingEnvs
from ..core.contexts import Context
from ..core.utils import check_column_uniqueness, reconstruct_tibble
from ..base import union
from .group_data import group_vars
from .group_by import ungroup
from .mutate import mutate
from .desc import desc
@register_verb(DataFrame, context=Context.PENDING)
def arrange(
_data: DataFrame, *args: Any, _by_group: bool = False, **kwargs: Any
) -> DataFrame:
"""orders the rows of a data frame by the values of selected columns.
The original API:
https://dplyr.tidyverse.org/reference/arrange.html
Args:
_data: A data frame
*series: Variables, or functions of variables.
Use desc() to sort a variable in descending order.
_by_group: If TRUE, will sort first by grouping variable.
Applies to grouped data frames only.
**kwargs: Name-value pairs that apply with mutate
Returns:
An object of the same type as _data.
The output has the following properties:
All rows appear in the output, but (usually) in a different place.
Columns are not modified.
Groups are not modified.
Data frame attributes are preserved.
"""
if not args and not kwargs:
return _data
check_column_uniqueness(
_data, "Cannot arrange a data frame with duplicate names"
)
# See if we don't need to mutate
# If all series are the ones from the _data itself
if not kwargs:
by = _series_cols(args, _data.columns)
if by is not None:
if _by_group:
gvars = group_vars(_data, __calling_env=CallingEnvs.REGULAR)
gby = dict(zip(gvars, [True] * len(gvars)))
gby.update(by)
by = gby
out = _data.sort_values(
list(by), ascending=list(by.values())
).reset_index(drop=True)
return reconstruct_tibble(_data, out, keep_rowwise=True)
if not _by_group:
sorting_df = mutate(
ungroup(_data, __calling_env=CallingEnvs.REGULAR),
*args,
**kwargs,
_keep="none",
__calling_env=CallingEnvs.REGULAR,
)
sorting_df = sorting_df.sort_values(by=sorting_df.columns.tolist())
else:
gvars = group_vars(_data, __calling_env=CallingEnvs.REGULAR)
sorting_df = ungroup(
mutate(
_data,
*args,
**kwargs,
_keep="none",
__calling_env=CallingEnvs.REGULAR,
),
__calling_env=CallingEnvs.REGULAR,
)
by = union(gvars, sorting_df.columns, __calling_env=CallingEnvs.REGULAR)
sorting_df = sorting_df.sort_values(by=by)
out = _data.loc[sorting_df.index, :].reset_index(drop=True)
return reconstruct_tibble(_data, out, keep_rowwise=True)
def _series_col(arg: Any, columns: Iterable[str]) -> str:
"""Turn a single arg into name and desc if possible"""
if (
isinstance(arg, (DirectRefAttr, DirectRefItem))
and arg._pipda_ref in columns
):
return arg._pipda_ref
return None
def _series_cols(
args: Tuple,
columns: Iterable[str],
) -> Mapping[str, bool]:
"""Check if one of the args is a series column or columns in original df"""
out = {}
for arg in args:
sercol = _series_col(arg, columns)
if sercol:
out[sercol] = True
elif (
isinstance(arg, FastEvalFunction)
and arg._pipda_func is desc.__origfunc__
):
for col in arg._pipda_args:
sercol = _series_col(col, columns)
if sercol is None:
return None
out[sercol] = False
else:
return None
return out
|
src/mygrad/typing/_array_like.py | kw-0/MyGrad | 147 | 12638139 | import sys
from typing import TYPE_CHECKING, List, Sequence, Tuple, TypeVar, Union
import numpy as np
if TYPE_CHECKING: # pragma: no cover
from mygrad import Tensor
if sys.version_info >= (3, 8): # pragma: no cover
from typing import Protocol
HAS_PROTOCOL = True
else: # pragma: no cover
try:
from typing_extensions import Protocol
except ImportError:
HAS_PROTOCOL = False
Protocol = object
else:
HAS_PROTOCOL = True
if not TYPE_CHECKING and not HAS_PROTOCOL: # pragma: no cover
class ImplementsArray:
def __array__(self, dtype: None = ...) -> np.ndarray:
...
else: # pragma: no cover
class ImplementsArray(Protocol):
def __array__(self, dtype: None = ...) -> np.ndarray:
...
Real = Union[int, float]
sr1 = Sequence[Real]
sr2 = Sequence[sr1]
sr3 = Sequence[sr2]
sr4 = Sequence[sr3]
# Sequence[Union[s1, s2]] is *not* valid!
SequenceNDReals = Union[sr1, sr2, sr3, sr4]
# include Tensor and ndarray explicitly in case `ImplementsArray`
# is not protocol
if TYPE_CHECKING: # pragma: no cover
ArrayLike = Union[Real, "Tensor", np.ndarray, ImplementsArray, SequenceNDReals]
else: # pragma: no cover
ArrayLike = TypeVar(
"ArrayLike", Real, "Tensor", np.ndarray, ImplementsArray, SequenceNDReals
)
sb1 = Sequence[bool]
sb2 = Sequence[sb1]
sb3 = Sequence[sb2]
sb4 = Sequence[sb3]
# Sequence[Union[s1, s2]] is *not* valid!
SequenceNDBools = Union[sb1, sb2, sb3, sb4]
if TYPE_CHECKING: # pragma: no cover
Mask = Union[ImplementsArray, np.ndarray, "Tensor", bool, SequenceNDBools]
else: # pragma: no cover
Mask = TypeVar(
"Mask",
bound=Union[ImplementsArray, np.ndarray, "Tensor", bool, SequenceNDBools],
)
Index = Union[
int,
None,
slice,
ImplementsArray,
np.ndarray,
Sequence[int],
Tuple["Index"],
List["Index"],
]
|
my_algo.py | codesociety/friartuck | 157 | 12638148 | <reponame>codesociety/friartuck<filename>my_algo.py<gh_stars>100-1000
"""
MIT License
Copyright (c) 2017 Code Society
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from datetime import datetime, timedelta
import logging
import pandas as pd
from friartuck.api import OrderType
log = logging.getLogger("friar_tuck")
def initialize(context, data):
log.info("hello, I am in initialize...")
context.assets = []
context.symbol_metadata = {}
dataset = pd.read_csv("https://dl.dropboxusercontent.com/s/vz8cudbw6t9i04e/my_universe.csv?dl=0")
for (index, series) in dataset.iterrows():
asset = lookup_security(series["symbol"])
context.symbol_metadata[asset] = series
context.assets.append(asset)
log.debug("symbol_metadata (%s)" % context.symbol_metadata)
# timestamp of current handle_data event in environment timezone
date = get_datetime()
log.info("datetime: %s" % date)
context.aapl = lookup_security("AAPL")
context.wtw = lookup_security("WTW")
context.fit = lookup_security("FIT")
context.gevo = lookup_security("GEVO")
context.czr = lookup_security("CZR")
log.info(context.account)
log.info(context.portfolio)
log.info("pnl(%s)" % context.portfolio.pnl)
for sec in context.portfolio.positions:
log.info("symbol(%s) pos(%s) " % (sec.symbol, context.portfolio.positions[sec]))
# hist_quotes = data.history([context.aapl, context.wtw], frequency='15m', bar_count=1400, field=['open', 'close'])
hist_quotes = data.current([context.aapl, context.wtw], field=['open', 'close'])
log.debug(hist_quotes)
# fifteenMinInSecs = (15*60)
now = datetime.now()
multiples = int(now.minute/15)
diff = now.minute-(multiples*15)
next_trig = now + timedelta(minutes=(15-diff))
log.info("next trigger:%s, mult:%s, diff:%s" % (next_trig, multiples, diff))
# order_id = order_shares(security=context.czr, shares=1, order_type=OrderType(stop_price=15.51), time_in_force='gtc')
# order_id = order_for_robinhood(context=context, security=context.fit, weight=1.0, order_type=OrderType(stop_price=6.56))
# order_id = order_for_robinhood(context=context, security=context.gevo, weight=1.0, order_type=OrderType(stop_price=0.50))
# order_id = order_shares(context.gevo, -1, order_type=OrderType(stop_price=0.50), time_in_force='gtc')
# order_id = order_shares(context.fit, 1, order_type=OrderType(stop_price=6.04), time_in_force='gtc')
# order = get_order(order_id)
# log.info("order=%s" % order)
# open_orders = get_open_orders()
"""
last_orders_by_side = get_last_filled_orders_by_side(context.gevo)
log.info("last_buy: %s" % last_orders_by_side["buy"])
log.info("last_sell: %s" % last_orders_by_side["sell"])
current_quote = data.current(context.aapl, field=['bid_price', 'ask_price'])
log.info(current_quote)
bid_price = data.current(context.aapl, field='bid_price')
log.info("bid_price=(%s)" % bid_price)
ask_price = data.current(context.aapl, field='ask_price')
log.info("ask_price=(%s)" % ask_price)
price = 13.18
log.info("CZR price_convert_up_by_tick_size price(%s) converted (%s)" % (price, context.czr.price_convert_up_by_tick_size(price)))
log.info("CZR price_convert_down_by_tick_size price(%s) converted (%s)" % (price, context.czr.price_convert_down_by_tick_size(price)))
price = 162.76
log.info("AAPL price_convert_up_by_tick_size price(%s) converted (%s)" % (price, context.aapl.price_convert_up_by_tick_size(price)))
log.info("AAPL price_convert_down_by_tick_size price(%s) converted (%s)" % (price, context.aapl.price_convert_down_by_tick_size(price)))
"""
def on_market_open(context, data):
log.info("on market open")
pass
def handle_data(context, data):
log.info("hello, I am in handle_data")
current_quote = data.current(context.aapl, field=['close', 'open'])
log.debug(current_quote)
current_quote = data.current(context.wtw, field='close')
log.debug(current_quote)
current_quote = data.current([context.aapl, context.wtw], field='close')
log.debug(current_quote)
hist_quotes = data.history([context.aapl, context.wtw], frequency='1m', bar_count=10, field='close')
log.debug(hist_quotes)
log.debug(context.fit)
current_data = data.current(context.fit, field=['close', 'price'])
log.debug(current_data)
#order_id = order_for_robinhood(context=context, security=context.fit, weight=1.0, order_type=OrderType(stop_price=6.56))
#order = get_order(order_id)
#log.info("order=%s" % order)
"""
open_orders = get_open_orders(sec2)
log.info("FIT open_orders=%s" % open_orders)
open_orders = get_open_orders()
log.info("ALL open_orders=%s" % open_orders)
cancel_order(order_id)
"""
def order_for_robinhood(context, security, weight, order_type=None):
"""
This is a custom order method for this particular algorithm and
places orders based on:
(1) How much of each position in context.assets we currently hold
(2) How much cash we currently hold
This means that if you have existing positions (e.g. AAPL),
your positions in that security will not be taken into
account when calculating order amounts.
The portfolio value that we'll be ordering on is labeled
`valid_portfolio_value`.
If you'd like to use a Stop/Limit/Stop-Limit Order please follow the
following format:
STOP - order_type = OrderType(stop_price=y)
LIMIT - order_type = OrderType(limit_price=x)
STOPLIMIT - order_type = OrderType(limit_price=x, stop_price=y)
"""
# We use .95 as the cash because all market orders are converted into
# limit orders with a 5% buffer. So any market order placed through
# Robinhood is submitted as a limit order with (last_traded_price * 1.05)
valid_portfolio_value = context.portfolio.cash * .95
# Calculate the percent of each security that we want to hold
percent_to_order = weight - get_percent_held(context, security, valid_portfolio_value)
# If within 1% of target weight, ignore.
if abs(percent_to_order) < .01:
log.info("Can't Make Order - Percent (%s) to order is less than 0.01 " % percent_to_order)
return
# Calculate the dollar value to order for this security
value_to_order = percent_to_order * valid_portfolio_value
if order_type:
return order_value(security, value_to_order, order_type=order_type, time_in_force='gtc')
else:
return order_value(security, value_to_order, time_in_force='gtc')
def get_percent_held(context, security, portfolio_value):
"""
This calculates the percentage of each security that we currently
hold in the portfolio.
"""
if security in context.portfolio.positions:
position = context.portfolio.positions[security]
value_held = position.last_sale_price * position.amount
percent_held = value_held / float(portfolio_value)
return percent_held
else:
# If we don't hold any positions, return 0%
return 0.0 |
inselect/tests/gui/test_action_state.py | NaturalHistoryMuseum/inselect | 128 | 12638162 | import unittest
from pathlib import Path
from .gui_test import GUITest
TESTDATA = Path(__file__).parent.parent / 'test_data'
class TestActionState(GUITest):
"""Test the state of UI actions
"""
def _test_no_document(self):
"Enabled state for actions when no document is open"
w = self.window
self.assertEqual(0, w.model.rowCount())
# File menu
self.assertFalse(w.save_action.isEnabled())
self.assertFalse(w.save_crops_action.isEnabled())
self.assertFalse(w.export_csv_action.isEnabled())
self.assertFalse(w.close_action.isEnabled())
# Edit menu
self.assertFalse(w.select_all_action.isEnabled())
self.assertFalse(w.select_none_action.isEnabled())
self.assertFalse(w.delete_action.isEnabled())
self.assertFalse(w.next_box_action.isEnabled())
self.assertFalse(w.previous_box_action.isEnabled())
self.assertFalse(w.rotate_clockwise_action.isEnabled())
self.assertFalse(w.rotate_counter_clockwise_action.isEnabled())
self.assertTrue(w.sort_by_rows_action.isEnabled())
self.assertTrue(w.sort_by_columns_action.isEnabled())
self.assertFalse(w.plugin_actions[0].isEnabled())
# View
self.assertTrue(w.boxes_view_action.isEnabled())
self.assertTrue(w.objects_view_action.isEnabled())
self.assertFalse(w.zoom_in_action.isEnabled())
self.assertFalse(w.zoom_out_action.isEnabled())
self.assertFalse(w.zoom_to_selection_action.isEnabled())
self.assertFalse(w.zoom_home_action.isEnabled())
def _test_document_open(self):
"Enabled state for actions when a document is open"
w = self.window
# File menu
self.assertTrue(w.save_action.isEnabled())
self.assertTrue(w.save_crops_action.isEnabled())
self.assertTrue(w.export_csv_action.isEnabled())
self.assertTrue(w.close_action.isEnabled())
# Edit menu
self.assertTrue(w.select_all_action.isEnabled())
self.assertTrue(w.select_none_action.isEnabled())
self.assertFalse(w.delete_action.isEnabled())
self.assertTrue(w.next_box_action.isEnabled())
self.assertTrue(w.previous_box_action.isEnabled())
self.assertFalse(w.rotate_clockwise_action.isEnabled())
self.assertFalse(w.rotate_counter_clockwise_action.isEnabled())
self.assertTrue(w.sort_by_rows_action.isEnabled())
self.assertTrue(w.sort_by_columns_action.isEnabled())
self.assertTrue(w.plugin_actions[0].isEnabled())
# View
self.assertTrue(w.boxes_view_action.isEnabled())
self.assertTrue(w.objects_view_action.isEnabled())
self.assertTrue(w.zoom_in_action.isEnabled())
self.assertTrue(w.zoom_out_action.isEnabled())
self.assertTrue(w.zoom_to_selection_action.isEnabled())
self.assertTrue(w.zoom_home_action.isEnabled())
def test_open_and_closed(self):
"Enabled state for actions as documents are opened and closed"
w = self.window
self.window.close_document()
self._test_no_document()
self.window.open_document(path=TESTDATA / 'shapes.inselect')
self.assertEqual(5, w.model.rowCount())
self._test_document_open()
self.window.close_document()
self._test_no_document()
def test_selection_dependent(self):
"Enabled state for actions that depend upon what is selected"
w = self.window
w.open_document(path=TESTDATA / 'shapes.inselect')
# Select all boxes
w.select_all()
self.assertTrue(w.delete_action.isEnabled())
self.assertTrue(w.rotate_clockwise_action.isEnabled())
self.assertTrue(w.rotate_counter_clockwise_action.isEnabled())
# Clear selection
w.select_none()
self.assertFalse(w.delete_action.isEnabled())
self.assertFalse(w.rotate_clockwise_action.isEnabled())
self.assertFalse(w.rotate_counter_clockwise_action.isEnabled())
def test_boxes_view_active(self):
"Checked state of view actions reflects the active view"
w = self.window
w.boxes_view_action.trigger()
self.assertTrue(w.boxes_view_action.isChecked())
self.assertFalse(w.objects_view_action.isChecked())
w.objects_view_action.trigger()
self.assertFalse(w.boxes_view_action.isChecked())
self.assertTrue(w.objects_view_action.isChecked())
w.boxes_view_action.trigger()
self.assertTrue(w.boxes_view_action.isChecked())
self.assertFalse(w.objects_view_action.isChecked())
if __name__ == '__main__':
unittest.main()
|
lib/GANDCTAnalysis/crop_celeba.py | vwesselkamp/deepfake-fingerprint-atacks | 108 | 12638164 | <filename>lib/GANDCTAnalysis/crop_celeba.py
"""Script for cropping celebA adopted from: https://github.com/ningyu1991/GANFingerprints/"""
import argparse
import os
from PIL import Image
import numpy as np
from concurrent.futures import ProcessPoolExecutor
def crop_image(stupid):
i, directory, file_path, output = stupid
if file_path.endswith("png") or file_path.endswith("jpeg") or file_path.endswith("jpg"):
image = np.asarray(Image.open(f"{directory}/{file_path}"))
if image.shape[0] != 128 or image.shape[1] != 128:
x,y,_= image.shape
image = np.copy(image)
x_upper = min(121+64, x)
y_upper = min(89+64, y)
image = image[x_upper-128:x_upper, y_upper-128:y_upper]
image = np.clip(image, 0, 255.).astype(np.uint8)
if not (image.shape[0] == 128 and image.shape[1] == 128):
print("Aborting")
return i
Image.fromarray(image).save(f"{output}/celeba_{file_path}")
return i
def main(args):
os.makedirs(args.OUTPUT, exist_ok=True)
paths = os.listdir(args.DIRECTORY)[:args.SIZE]
packed = map(lambda x: (x[0], args.DIRECTORY, x[1], args.OUTPUT) , enumerate(paths))
with ProcessPoolExecutor() as pool:
jobs = pool.map(crop_image, packed)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("DIRECTORY", help="Source directory.", type=str)
parser.add_argument("OUTPUT", help="Output directory.", type=str)
parser.add_argument("SIZE", help="Amount of data to convert.", type=int)
return parser.parse_args()
if __name__ == "__main__":
main(parse_args())
|
test/absolute_import/local_module.py | DamnWidget/jedi | 239 | 12638182 | <filename>test/absolute_import/local_module.py
"""
This is a module that imports the *standard library* unittest,
despite there being a local "unittest" module. It specifies that it
wants the stdlib one with the ``absolute_import`` __future__ import.
The twisted equivalent of this module is ``twisted.trial._synctest``.
"""
from __future__ import absolute_import
import unittest
class Assertions(unittest.TestCase):
pass
|
nazurin/sites/Bilibili/api.py | Misaka13514/nazurin | 170 | 12638191 | import json
import os
from typing import List
from nazurin.models import Caption, Illust, Image
from nazurin.utils import Request
class Bilibili(object):
async def getDynamic(self, dynamic_id: int):
"""Get dynamic data from API."""
api = 'https://api.vc.bilibili.com/dynamic_svr/v1/dynamic_svr/get_dynamic_detail?dynamic_id=' + str(
dynamic_id)
async with Request() as request:
async with request.get(api) as response:
source = await response.json()
card = json.loads(source['data']['card']['card'])
return card
async def fetch(self, dynamic_id: int) -> Illust:
"""Fetch images and detail."""
card = await self.getDynamic(dynamic_id)
imgs = self.getImages(card, dynamic_id)
caption = self.buildCaption(card)
caption['url'] = f"https://t.bilibili.com/{dynamic_id}"
return Illust(imgs, caption, card)
def getImages(self, card, dynamic_id: int) -> List[Image]:
"""Get all images in a dynamic card."""
pics = card['item']['pictures']
imgs = list()
for index, pic in enumerate(pics):
url = pic['img_src']
basename = os.path.basename(url)
extension = os.path.splitext(basename)[1]
imgs.append(
Image(
str(dynamic_id) + '_' + str(index) + extension, url,
url + '@518w.jpg', pic['img_size'], pic['img_width'],
pic['img_height']))
return imgs
def buildCaption(self, card) -> Caption:
return Caption({
'author': card['user']['name'],
'content': card['item']['description']
})
|
pkg/win32/mod_tools/tools/scripts/compiledefs.py | hexgear-studio/ds_mod_tools | 112 | 12638205 | <reponame>hexgear-studio/ds_mod_tools<gh_stars>100-1000
import sys
import optparse
import glob
import os
import string
import tempfile
import shutil
import zipfile
from pipelinetools import *
from objloader import *
import struct
def ProcessFile(file, options):
#figure out some name stuff
basename = os.path.splitext(os.path.basename(file))[0]
print "Compiling " + basename
basedir = GetBaseDirectory(file, "intermediates")
tooldir = VerifyDirectory(basedir,"tools")
outdir = VerifyDirectory(basedir, "data/defs")
outfilename = os.path.join(outdir, basename + ".xml")
shutil.copyfile(file, outfilename)
shutil.copyfile(file, outfilename)
def main(argv):
try:
import psyco
psyco.full()
except ImportError:
pass
parser = optparse.OptionParser(description='compiles defs (currently just copy the xml)')
parser.add_option('-b', '--bigendian', help="bigendian", action="store_true", dest="bigendian")
options, arguments = parser.parse_args()
#generate a list of unique files from the arguments
files = list(set(sum([glob.glob(x) for x in arguments], [])))
if len(files) == 0:
print ("No input files specified")
sys.exit(2)
for file in files:
ProcessFile(file, options)
if __name__ == "__main__":
main(sys.argv[1:]) |
Python3/181.py | rakhi2001/ecom7 | 854 | 12638206 | <filename>Python3/181.py<gh_stars>100-1000
__________________________________________________________________________________________________
SELECT e1.Name FROM Employee e1
JOIN Employee e2 ON e1.ManagerId = e2.Id
WHERE e1.Salary > e2.Salary;
__________________________________________________________________________________________________
SELECT e1.Name FROM Employee e1, Employee e2
WHERE e1.ManagerId = e2.Id AND e1.Salary > e2.Salary;
__________________________________________________________________________________________________ |
analysis_engine/load_algo_dataset_from_s3.py | virdesai/stock-analysis-engine | 819 | 12638233 | """
Helper for loading datasets from s3
"""
import boto3
import analysis_engine.consts as ae_consts
import analysis_engine.prepare_dict_for_algo as prepare_utils
import analysis_engine.s3_read_contents_from_key as s3_utils
import spylunking.log.setup_logging as log_utils
log = log_utils.build_colorized_logger(name=__name__)
def load_algo_dataset_from_s3(
s3_key,
s3_address,
s3_bucket,
s3_access_key,
s3_secret_key,
s3_region_name,
s3_secure,
serialize_datasets=ae_consts.DEFAULT_SERIALIZED_DATASETS,
compress=False,
encoding='utf-8'):
"""load_algo_dataset_from_s3
Load an algorithm-ready dataset for algorithm backtesting
from a local file
:param serialize_datasets: optional - list of dataset names to
deserialize in the dataset
:param compress: optional - boolean flag for decompressing
the contents of the ``path_to_file`` if necessary
(default is ``False`` and algorithms
use ``zlib`` for compression)
:param encoding: optional - string for data encoding
**Minio (S3) connectivity arguments**
:param s3_enabled: bool - toggle for auto-archiving on Minio (S3)
(default is ``True``)
:param s3_key: string - key to save the data in redis
(default is ``None``)
:param s3_address: Minio S3 connection string format: ``host:port``
(default is ``localhost:9000``)
:param s3_bucket: S3 Bucket for storing the artifacts
(default is ``dev``) which should be viewable on a browser:
http://localhost:9000/minio/dev/
:param s3_access_key: S3 Access key
(default is ``trexaccesskey``)
:param s3_secret_key: S3 Secret key
(default is ``trex123321``)
:param s3_region_name: S3 region name
(default is ``us-east-1``)
:param s3_secure: Transmit using tls encryption
(default is ``False``)
"""
log.info(
f'start s3={s3_address}:{s3_bucket}/{s3_key}')
data_from_file = None
endpoint_url = f'http://{s3_address}'
if s3_secure:
endpoint_url = f'https://{s3_address}'
s3 = boto3.resource(
's3',
endpoint_url=endpoint_url,
aws_access_key_id=s3_access_key,
aws_secret_access_key=s3_secret_key,
region_name=s3_region_name,
config=boto3.session.Config(signature_version='s3v4'))
# compressed files will not work with json.dumps
try:
data_from_file = s3_utils.s3_read_contents_from_key(
s3=s3,
s3_bucket_name=s3_bucket,
s3_key=s3_key,
encoding=encoding,
convert_as_json=not compress,
compress=compress)
except Exception as e:
if (
'An error occurred (NoSuchBucket) '
'when calling the GetObject operation') in str(e):
msg = (
f'missing s3_bucket={s3_address} in s3_address={s3_bucket}')
log.error(msg)
raise Exception(msg)
else:
raise Exception(e)
if not data_from_file:
log.error(
'missing data from s3={s3_address}:{s3_bucket}/{s3_key}')
return None
return prepare_utils.prepare_dict_for_algo(
data=data_from_file,
compress=False,
convert_to_dict=True,
encoding=encoding)
# end of load_algo_dataset_from_s3
|
src/tests/api/test_membershiptypes.py | fabm3n/pretix | 1,248 | 12638236 | #
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 <NAME> and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
import pytest
from django_scopes import scopes_disabled
from i18nfield.strings import LazyI18nString
@pytest.fixture
def membershiptype(organizer, event):
return organizer.membership_types.create(
name=LazyI18nString({"en": "Week pass"}),
transferable=True,
allow_parallel_usage=False,
max_usages=15,
)
TEST_TYPE_RES = {
"name": {
"en": "Week pass"
},
"transferable": True,
"allow_parallel_usage": False,
"max_usages": 15,
}
@pytest.mark.django_db
def test_membershiptype_list(token_client, organizer, membershiptype):
res = dict(TEST_TYPE_RES)
res["id"] = membershiptype.pk
resp = token_client.get('/api/v1/organizers/{}/membershiptypes/'.format(organizer.slug))
assert resp.status_code == 200
assert [res] == resp.data['results']
@pytest.mark.django_db
def test_membershiptype_detail(token_client, organizer, membershiptype):
res = dict(TEST_TYPE_RES)
res["id"] = membershiptype.pk
resp = token_client.get('/api/v1/organizers/{}/membershiptypes/{}/'.format(organizer.slug, membershiptype.pk))
assert resp.status_code == 200
assert res == resp.data
@pytest.mark.django_db
def test_membershiptype_create(token_client, organizer):
resp = token_client.post(
'/api/v1/organizers/{}/membershiptypes/'.format(organizer.slug),
format='json',
data={
"name": {
"en": "Week pass"
},
"transferable": True,
"allow_parallel_usage": False,
"max_usages": 15,
}
)
assert resp.status_code == 201
with scopes_disabled():
membershiptype = organizer.membership_types.get(id=resp.data['id'])
assert str(membershiptype.name) == "Week pass"
assert membershiptype.transferable
assert not membershiptype.allow_parallel_usage
@pytest.mark.django_db
def test_membershiptype_patch(token_client, organizer, membershiptype):
resp = token_client.patch(
'/api/v1/organizers/{}/membershiptypes/{}/'.format(organizer.slug, membershiptype.pk),
format='json',
data={
'transferable': False,
}
)
assert resp.status_code == 200
membershiptype.refresh_from_db()
assert not membershiptype.transferable
@pytest.mark.django_db
def test_membershiptype_delete(token_client, organizer, membershiptype):
resp = token_client.delete(
'/api/v1/organizers/{}/membershiptypes/{}/'.format(organizer.slug, membershiptype.pk),
)
assert resp.status_code == 204
assert not organizer.membership_types.exists()
|
examples/novels/baidu_novels.py | atiasn/novel | 2,344 | 12638252 | <reponame>atiasn/novel
#!/usr/bin/env python
import aiohttp
import arrow
import asyncio
import async_timeout
import re
from bs4 import BeautifulSoup
from urllib.parse import urlparse
from owllook.fetcher.function import get_random_user_agent
from owllook.config import CONFIG, LOGGER, BLACK_DOMAIN, RULES, LATEST_RULES
async def fetch(client, url, name, is_web):
with async_timeout.timeout(15):
try:
headers = {'user-agent': await get_random_user_agent()}
if is_web:
params = {'wd': name, 'ie': 'utf-8', 'rn': CONFIG.BAIDU_RN, 'vf_bl': 1}
else:
params = {'word': name}
async with client.get(url, params=params, headers=headers) as response:
assert response.status == 200
LOGGER.info('Task url: {}'.format(response.url))
try:
text = await response.text()
except:
text = await response.read()
return text
except Exception as e:
LOGGER.exception(e)
return None
async def get_real_url(client, url):
with async_timeout.timeout(5):
try:
headers = {'user-agent': await get_random_user_agent()}
async with client.head(url, headers=headers, allow_redirects=True) as response:
assert response.status == 200
LOGGER.info('Parse url: {}'.format(response.url))
# text = ""
# try:
# text = await response.text()
# except:
# text = await response.read()
# if text:
# print(text)
# text = re.findall(r'replace\(\"(.*?)\"\)', str(text))
# text = text[0] if text[0] else ""
url = response.url if response.url else None
return url
except Exception as e:
LOGGER.exception(e)
return None
async def data_extraction_for_phone(html):
with async_timeout.timeout(10):
try:
# Get title
data_log = eval(html['data-log'])
url = data_log.get('mu', None)
if not url:
return None
# Get title
title = html.find('h3').get_text()
# Get author and update_time (option)
novel_mess = html.findAll(class_='c-gap-right-large')
basic_mess = [i.get_text() for i in novel_mess] if novel_mess else None
return {'title': title, 'url': url, 'basic_mess': basic_mess}
except Exception as e:
LOGGER.exception(e)
return None
async def data_extraction_for_web(html):
with async_timeout.timeout(10):
try:
url = html.find('a').get('href', None)
if not url or 'baidu' in url or urlparse(url).netloc in BLACK_DOMAIN:
return None
netloc = urlparse(url).netloc
is_parse = 1 if netloc in RULES.keys() else 0
title = html.select('font[size="3"]')[0].get_text()
source = html.select('font[color="#008000"]')[0].get_text()
time = re.findall(r'\d+-\d+-\d+', source)
time = time[0] if time else None
timestamp = 0
if time:
try:
time_list = [int(i) for i in time.split('-')]
timestamp = arrow.get(time_list[0], time_list[1], time_list[2]).timestamp
except Exception as e:
LOGGER.exception(e)
timestamp = 0
return {'title': title, 'url': url.replace('index.html', '').replace('Index.html', ''), 'time': time,
'is_parse': is_parse,
'timestamp': timestamp,
'netloc': netloc}
except Exception as e:
LOGGER.exception(e)
return None
async def data_extraction_for_web_baidu(client, html):
with async_timeout.timeout(20):
try:
url = html.select('h3.t a')[0].get('href', None)
real_url = await get_real_url(client=client, url=url) if url else None
if real_url:
real_str_url = str(real_url)
netloc = urlparse(real_str_url).netloc
if "http://" + netloc + "/" == real_str_url:
return None
if 'baidu' in real_str_url or netloc in BLACK_DOMAIN:
return None
is_parse = 1 if netloc in RULES.keys() else 0
title = html.select('h3.t a')[0].get_text()
is_recommend = 1 if netloc in LATEST_RULES.keys() else 0
# time = re.findall(r'\d+-\d+-\d+', source)
# time = time[0] if time else None
timestamp = 0
time = ""
# if time:
# try:
# time_list = [int(i) for i in time.split('-')]
# timestamp = arrow.get(time_list[0], time_list[1], time_list[2]).timestamp
# except Exception as e:
# LOGGER.exception(e)
# timestamp = 0
return {'title': title, 'url': real_str_url.replace('index.html', ''), 'time': time,
'is_parse': is_parse,
'is_recommend': is_recommend,
'timestamp': timestamp,
'netloc': netloc}
else:
return None
except Exception as e:
LOGGER.exception(e)
return None
async def baidu_search(name, is_web=1):
url = CONFIG.URL_PC if is_web else CONFIG.URL_PHONE
async with aiohttp.ClientSession() as client:
html = await fetch(client=client, url=url, name=name, is_web=is_web)
if html:
soup = BeautifulSoup(html, 'html5lib')
if is_web:
# result = soup.find_all(class_='f')
result = soup.find_all(class_='result')
extra_tasks = [data_extraction_for_web_baidu(client=client, html=i) for i in result]
tasks = [asyncio.ensure_future(i) for i in extra_tasks]
else:
result = soup.find_all(class_='result c-result c-clk-recommend')
extra_tasks = [data_extraction_for_phone(i) for i in result]
tasks = [asyncio.ensure_future(i) for i in extra_tasks]
# return await asyncio.gather(*tasks)
done_list, pending_list = await asyncio.wait(tasks)
res = []
for task in done_list:
res.append(task.result())
return res
if __name__ == '__main__':
import time
start = time.time()
print(asyncio.get_event_loop().run_until_complete(baidu_search('雪中悍刀行')))
print(time.time() - start)
|
genia.py | kimiyoung/transfer | 147 | 12638264 |
import numpy as np
from collections import defaultdict as dd
from scipy import sparse as sp
import cnn_rnn
import sample
LABEL_INDEX = ['PRP$', 'VBG', 'VBD', '``', 'VBN', 'POS', "''", 'VBP', 'WDT', 'JJ',\
'WP', 'VBZ', 'DT', '#', 'RP', '$', 'NN', 'FW', ',', '.', 'TO', 'PRP', 'RB', '-LRB-',\
':', 'NNS', 'NNP', 'VB', 'WRB', 'CC', 'LS', 'PDT', 'RBS', 'RBR', 'CD', 'EX', 'IN', 'WP$',\
'MD', 'NNPS', '-RRB-', 'JJS', 'JJR', 'SYM', 'UH']
MAX_LEN = 176
MAX_CHAR_LEN = 35
DIR = 'biomed/genia/'
TRAIN_DATA = DIR + 'train.txt'
DEV_DATA = DIR + 'test.txt'
TEST_DATA = DIR + 'dev.txt'
HASH_FILE = 'words.lst'
EMB_FILE = 'embeddings.txt'
USE_DEV = True
LABELING_RATE = 0.001
def process(word):
word = word.lower()
word = "".join(c if not c.isdigit() else '0' for c in word)
return word
def trans_label(label):
if label == '(':
return '-LRB-'
elif label == ')':
return '-RRB-'
elif label == '':
return '-RRB-'
elif label == 'PP':
return 'PRP'
elif label == 'N':
return 'NN'
elif label == '-':
return 'SYM'
elif label == 'XT' or label == 'CT':
return 'DT'
else:
return label
def create_word_index(filenames):
word_index, word_cnt = {}, 1
for filename in filenames:
for line in open(filename):
if not '/' in line: continue
word = "/".join(line.strip().split('/')[: -1])
word = process(word)
if word in word_index: continue
word_index[word] = word_cnt
word_cnt += 1
return word_index, word_cnt
def create_char_index(filenames):
char_index, char_cnt = {}, 3
for filename in filenames:
for line in open(filename):
if not '/' in line: continue
word = "/".join(line.strip().split('/')[: -1])
for c in word:
if c not in char_index:
char_index[c] = char_cnt
char_cnt += 1
return char_index, char_cnt
def cnt_line(filename):
ret = 0
for line in open(filename):
if not '/' in line: ret += 1
return ret
def read_data(filename, word_index):
line_cnt = cnt_line(filename)
x, y = np.zeros((line_cnt, MAX_LEN), dtype = np.int32), np.zeros((line_cnt, MAX_LEN), dtype = np.int32)
mask = np.zeros((line_cnt, MAX_LEN), dtype = np.float32)
i, j = 0, 0
for line in open(filename):
if not '/' in line:
i += 1
j = 0
continue
inputs = line.strip().split('/')
label = inputs[-1].split('|')[0]
label = trans_label(label)
word = "/".join(inputs[: -1])
word = process(word)
word_ind, label_ind = word_index[word], LABEL_INDEX.index(label)
x[i, j] = word_ind
y[i, j] = label_ind
mask[i, j] = 1.0
j += 1
return x, y, mask
def read_char_data(filename, char_index):
line_cnt = cnt_line(filename)
x = np.zeros((line_cnt, MAX_LEN, MAX_CHAR_LEN), dtype = np.int32)
mask = np.zeros((line_cnt, MAX_LEN, MAX_CHAR_LEN), dtype = np.float32)
i, j = 0, 0
for line in open(filename):
if not '/' in line:
i += 1
j = 0
continue
inputs = line.strip().split('/')
label = inputs[-1]
word = "/".join(inputs[: -1])
for k, c in enumerate(word):
if k + 1 >= MAX_CHAR_LEN: break
x[i, j, k + 1] = char_index[c]
mask[i, j, k + 1] = 1.0
x[i, j, 0] = 1
mask[i, j, 0] = 1.0
if len(word) + 1 < MAX_CHAR_LEN:
x[i, j, len(word) + 1] = 2
mask[i, j, len(word) + 1] = 1.0
j += 1
return x, mask
def read_word2embedding():
words = []
for line in open(HASH_FILE):
words.append(line.strip())
word2embedding = {}
for i, line in enumerate(open(EMB_FILE)):
if words[i] in word2embedding: continue
inputs = line.strip().split()
word2embedding[words[i]] = np.array([float(e) for e in inputs], dtype = np.float32)
return word2embedding
def evaluate(py, y_, m_, full = False):
if len(py.shape) > 1:
py = np.argmax(py, axis = 1)
y, m = y_.flatten(), m_.flatten()
acc = 1.0 * (np.array(y == py, dtype = np.int32) * m).sum() / m.sum()
return acc, acc, acc, acc
if __name__ == '__main__':
word_index, word_cnt = create_word_index([TRAIN_DATA, DEV_DATA, TEST_DATA])
wx, y, m = read_data(TRAIN_DATA, word_index)
if USE_DEV:
dev_wx, dev_y, dev_m = read_data(TEST_DATA, word_index)
wx, y, m = np.vstack((wx, dev_wx)), np.vstack((y, dev_y)), np.vstack((m, dev_m))
twx, ty, tm = read_data(DEV_DATA, word_index)
char_index, char_cnt= create_char_index([TRAIN_DATA, DEV_DATA, TEST_DATA])
x, cm = read_char_data(TRAIN_DATA, char_index)
if USE_DEV:
dev_x, dev_cm = read_char_data(TEST_DATA, char_index)
x, cm = np.vstack((x, dev_x)), np.vstack((cm, dev_cm))
tx, tcm = read_char_data(DEV_DATA, char_index)
model = cnn_rnn.cnn_rnn(char_cnt, len(LABEL_INDEX), word_cnt)
if LABELING_RATE < 1.0:
ind = sample.create_sample_index(LABELING_RATE, x.shape[0])
x, y, m, wx, cm = sample.sample_arrays((x, y, m, wx, cm), ind)
model.add_data(x, y, m, wx, cm, None, tx, ty, tm, twx, tcm, None)
model.build()
word2embedding = read_word2embedding()
model.set_embedding(word2embedding, word_index)
model.train(evaluate)
|
tests/utils/test_bipartite.py | UCLCheminformatics/ScaffoldGraph | 121 | 12638281 | """
scaffoldgraph tests.utils.test_bipartite
"""
import scaffoldgraph as sg
import networkx as nx
from scaffoldgraph.utils.bipartite import make_bipartite_graph
from . import mock_sdf
def test_bipartite(sdf_file):
network = sg.ScaffoldNetwork.from_sdf(sdf_file)
biparite = make_bipartite_graph(network)
assert nx.is_bipartite(biparite)
|
tools/analysis/report_map.py | rlleshi/mmaction2 | 1,870 | 12638335 | <reponame>rlleshi/mmaction2
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import mmcv
import numpy as np
from mmaction.core import ActivityNetLocalization
args = None
def cuhk17_top1():
"""Assign label for each proposal with the cuhk17 result, which is the #2
entry in http://activity-net.org/challenges/2017/evaluation.html."""
if not osp.exists('cuhk_anet17_pred.json'):
os.system('wget https://download.openmmlab.com/'
'mmaction/localization/cuhk_anet17_pred.json')
proposal = mmcv.load(args.proposal)
results = proposal['results']
cuhk_pred = mmcv.load('cuhk_anet17_pred.json')['results']
def get_topk(preds, k):
preds.sort(key=lambda x: x['score'])
return preds[-k:]
for k, v in results.items():
action_pred = cuhk_pred[k]
top1 = get_topk(action_pred, 1)
top1_label = top1[0]['label']
new_value = []
for item in v:
x = dict(label=top1_label)
x.update(item)
new_value.append(x)
results[k] = new_value
proposal['results'] = results
mmcv.dump(proposal, args.det_output)
cls_funcs = {'cuhk17_top1': cuhk17_top1}
def parse_args():
parser = argparse.ArgumentParser(description='Report detection mAP for'
'ActivityNet proposal file')
parser.add_argument('--proposal', type=str, help='proposal file')
parser.add_argument(
'--gt',
type=str,
default='data/ActivityNet/'
'anet_anno_val.json',
help='groundtruth file')
parser.add_argument(
'--cls',
type=str,
default='cuhk17_top1',
choices=['cuhk17_top1'],
help='the way to assign label for each '
'proposal')
parser.add_argument(
'--det-output',
type=str,
default='det_result.json',
help='the path to store detection results')
args = parser.parse_args()
return args
def main():
global args, cls_funcs
args = parse_args()
func = cls_funcs[args.cls]
func()
anet_detection = ActivityNetLocalization(
args.gt,
args.det_output,
tiou_thresholds=np.linspace(0.5, 0.95, 10),
verbose=True)
mAP, average_mAP = anet_detection.evaluate()
print('[RESULTS] Performance on ActivityNet detection task.\n'
f'mAP: {mAP}\nAverage-mAP: {average_mAP}')
if __name__ == '__main__':
main()
|
example/test.py | osblinnikov/pytorch-binary | 293 | 12638349 | <reponame>osblinnikov/pytorch-binary<gh_stars>100-1000
import torch
import torch.nn as nn
from torch.autograd import Variable
from modules.add import MyAddModule
class MyNetwork(nn.Module):
def __init__(self):
super(MyNetwork, self).__init__()
self.add = MyAddModule()
def forward(self, input1, input2):
return self.add(input1, input2)
model = MyNetwork()
x = torch.range(1, 25).view(5, 5)
input1, input2 = Variable(x), Variable(x * 4)
print(model(input1, input2))
print(input1 + input2)
if torch.cuda.is_available():
input1, input2, = input1.cuda(), input2.cuda()
print(model(input1, input2))
print(input1 + input2)
|
utils/common.py | PaddleEdu/Transformer-CV-models | 113 | 12638351 | <reponame>PaddleEdu/Transformer-CV-models<filename>utils/common.py
import paddle
import paddle.nn as nn
from paddle.nn.initializer import TruncatedNormal, KaimingNormal, Constant, Assign
# Common initializations
ones_ = Constant(value=1.)
zeros_ = Constant(value=0.)
kaiming_normal_ = KaimingNormal()
trunc_normal_ = TruncatedNormal(std=.02)
# Common Functions
def to_2tuple(x):
return tuple([x] * 2)
def add_parameter(layer, datas, name=None):
parameter = layer.create_parameter(
shape=(datas.shape),
default_initializer=Assign(datas)
)
if name:
layer.add_parameter(name, parameter)
return parameter
# Common Layers
def drop_path(x, drop_prob=0., training=False):
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ...
"""
if drop_prob == 0. or not training:
return x
keep_prob = paddle.to_tensor(1 - drop_prob)
shape = (paddle.shape(x)[0], ) + (1, ) * (x.ndim - 1)
random_tensor = keep_prob + paddle.rand(shape, dtype=x.dtype)
random_tensor = paddle.floor(random_tensor) # binarize
output = x.divide(keep_prob) * random_tensor
return output
class DropPath(nn.Layer):
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Identity(nn.Layer):
def __init__(self):
super(Identity, self).__init__()
def forward(self, input):
return input
|
Common/Core/Testing/Python/TestFilePath.py | cclauss/VTK | 1,755 | 12638353 | <reponame>cclauss/VTK<gh_stars>1000+
"""Test support for fspath protocol VTK-Python
In VTK 3.6 and later, the Python open() method accepts pathlike objects,
such as pathlib.Path(), which represent file system paths and which
return a string via their __fspath__ slot. This test checks that the
VTK SetFileName() methods accept pathlike objects just like open() does.
Created on March 27, 2021 by <NAME>
"""
import sys
import os
from vtkmodules.vtkCommonCore import vtkFileOutputWindow
from vtkmodules.vtkCommonSystem import vtkDirectory
from vtkmodules.test import Testing
from vtkmodules.util.misc import vtkGetTempDir
VTK_TEMP_DIR = vtkGetTempDir()
pathstring = os.path.join(VTK_TEMP_DIR, "log.txt")
if sys.hexversion >= 0x03060000:
from pathlib import Path
pathobj = Path(pathstring)
else:
Path = str
pathobj = Path(pathstring)
class TestFilePath(Testing.vtkTest):
def testSetFilePath(self):
"""Pass a path to a SetFileName method
"""
w = vtkFileOutputWindow()
w.SetFileName(pathobj)
self.assertEqual(w.GetFileName(), str(pathobj))
def testtDirectory(self):
"""Check the hinted vtkDirectory methods
"""
d = vtkDirectory()
# This will raise an exception if Path isn't accepted
d.Open(Path(VTK_TEMP_DIR))
if __name__ == "__main__":
Testing.main([(TestFilePath, 'test')])
|
agents/tools/attr_dict_test.py | DoxasticFox/batch-ppo | 210 | 12638355 | <filename>agents/tools/attr_dict_test.py
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the attribute dictionary."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from agents.tools import attr_dict
class AttrDictTest(tf.test.TestCase):
def test_construct_from_dict(self):
initial = dict(foo=13, bar=42)
obj = attr_dict.AttrDict(initial)
self.assertEqual(13, obj.foo)
self.assertEqual(42, obj.bar)
def test_construct_from_kwargs(self):
obj = attr_dict.AttrDict(foo=13, bar=42)
self.assertEqual(13, obj.foo)
self.assertEqual(42, obj.bar)
def test_has_attribute(self):
obj = attr_dict.AttrDict(foo=13)
self.assertTrue('foo' in obj)
self.assertFalse('bar' in obj)
def test_access_default(self):
obj = attr_dict.AttrDict()
self.assertEqual(None, obj.foo)
def test_access_magic(self):
obj = attr_dict.AttrDict()
with self.assertRaises(AttributeError):
obj.__getstate__ # pylint: disable=pointless-statement
def test_immutable_create(self):
obj = attr_dict.AttrDict()
with self.assertRaises(RuntimeError):
obj.foo = 42
def test_immutable_modify(self):
obj = attr_dict.AttrDict(foo=13)
with self.assertRaises(RuntimeError):
obj.foo = 42
def test_immutable_unlocked(self):
obj = attr_dict.AttrDict()
with obj.unlocked:
obj.foo = 42
self.assertEqual(42, obj.foo)
if __name__ == '__main__':
tf.test.main()
|
astrality/tests/actions/test_symlink_action.py | JakobGM/Astrality | 111 | 12638361 | """Tests for astrality.actions.SymlinkAction."""
from pathlib import Path
from astrality.actions import SymlinkAction
from astrality.persistence import CreatedFiles
def test_null_object_pattern():
"""Copy actions without options should do nothing."""
symlink_action = SymlinkAction(
options={},
directory=Path('/'),
replacer=lambda x: x,
context_store={},
creation_store=CreatedFiles().wrapper_for(module='test'),
)
symlink_action.execute()
def test_symlink_dry_run(create_temp_files, caplog):
"""If dry_run is True, only log and not symlink."""
content, target = create_temp_files(2)
symlink_action = SymlinkAction(
options={'content': str(content), 'target': str(target)},
directory=Path('/'),
replacer=lambda x: x,
context_store={},
creation_store=CreatedFiles().wrapper_for(module='test'),
)
caplog.clear()
result = symlink_action.execute(dry_run=True)
# We should log the symlink that had been performed
assert 'SKIPPED:' in caplog.record_tuples[0][2]
assert str(content) in caplog.record_tuples[0][2]
assert str(target) in caplog.record_tuples[0][2]
# We should also still return the intended result
assert result == {content: target}
# But the symlink should not be created in a dry run
assert not target.is_symlink()
def test_symlink_action_using_all_parameters(tmpdir):
"""All three parameters should be respected."""
temp_dir = Path(tmpdir) / 'content'
temp_dir.mkdir()
target = Path(tmpdir) / 'target'
target.mkdir()
file1 = temp_dir / 'file1'
file1.touch()
file2 = temp_dir / 'file2'
file2.touch()
recursive_dir = temp_dir / 'recursive'
recursive_dir.mkdir()
file3 = temp_dir / 'recursive' / 'file3'
file3.touch()
symlink_options = {
'content': str(temp_dir),
'target': str(target),
'include': r'file(\d)',
}
symlink_action = SymlinkAction(
options=symlink_options,
directory=temp_dir,
replacer=lambda x: x,
context_store={},
creation_store=CreatedFiles().wrapper_for(module='test'),
)
symlink_action.execute()
assert (target / '1').is_symlink()
assert (target / '2').is_symlink()
assert (target / 'recursive' / '3').is_symlink()
assert (target / '1').resolve() == file1
assert (target / '2').resolve() == file2
assert (target / 'recursive' / '3').resolve() == file3
def test_symlinking_without_renaming(tmpdir):
"""When include is not given, keep symlink name."""
temp_dir = Path(tmpdir) / 'content'
temp_dir.mkdir()
target = Path(tmpdir) / 'target'
target.mkdir()
file1 = temp_dir / 'file1'
file1.touch()
file2 = temp_dir / 'file2'
file2.touch()
recursive_dir = temp_dir / 'recursive'
recursive_dir.mkdir()
file3 = temp_dir / 'recursive' / 'file3'
file3.touch()
symlink_options = {
'content': str(temp_dir),
'target': str(target),
}
symlink_action = SymlinkAction(
options=symlink_options,
directory=temp_dir,
replacer=lambda x: x,
context_store={},
creation_store=CreatedFiles().wrapper_for(module='test'),
)
symlink_action.execute()
assert (target / 'file1').is_symlink()
assert (target / 'file2').is_symlink()
assert (target / 'recursive' / 'file3').is_symlink()
assert (target / 'file1').resolve() == file1
assert (target / 'file2').resolve() == file2
assert (target / 'recursive' / 'file3').resolve() == file3
def test_symlinking_file_to_directory(tmpdir):
"""If symlinking from directory to file, place file in directory."""
temp_dir = Path(tmpdir) / 'content'
temp_dir.mkdir()
target = Path(tmpdir) / 'target'
target.mkdir()
file1 = temp_dir / 'file1'
file1.touch()
symlink_options = {
'content': str(file1),
'target': str(target),
'include': r'file1',
}
symlink_action = SymlinkAction(
options=symlink_options,
directory=temp_dir,
replacer=lambda x: x,
context_store={},
creation_store=CreatedFiles().wrapper_for(module='test'),
)
symlink_action.execute()
assert (target / 'file1').is_symlink()
assert (target / 'file1').resolve() == file1
assert symlink_action.symlinked_files == {
file1: {target / 'file1'},
}
def test_running_symlink_action_twice(create_temp_files):
"""Symlink action should be idempotent."""
content, target = create_temp_files(2)
content.write_text('content')
target.write_text('target')
symlink_options = {
'content': str(content),
'target': str(target),
}
symlink_action = SymlinkAction(
options=symlink_options,
directory=content.parent,
replacer=lambda x: x,
context_store={},
creation_store=CreatedFiles().wrapper_for(module='test'),
)
# Symlink first time
symlink_action.execute()
assert target.is_symlink()
assert target.read_text() == 'content'
# A backup shoud be created
backup = CreatedFiles().creations['test'][str(target)]['backup']
assert Path(backup).read_text() == 'target'
# Symlink one more time, and assert idempotency
symlink_action.execute()
assert target.is_symlink()
assert target.read_text() == 'content'
backup = CreatedFiles().creations['test'][str(target)]['backup']
assert Path(backup).read_text() == 'target'
def test_backup_of_symlink_target(create_temp_files):
"""Overwritten copy targets should be backed up."""
target, content = create_temp_files(2)
# This file is the original and should be backed up
target.write_text('original')
# This is the new content which will be symlinked to
content.write_text('new')
symlink_options = {
'content': str(content.name),
'target': str(target),
}
symlink_action = SymlinkAction(
options=symlink_options,
directory=content.parent,
replacer=lambda x: x,
context_store={},
creation_store=CreatedFiles().wrapper_for(module='test'),
)
# We replace the content by executing the action
symlink_action.execute()
assert target.resolve().read_text() == 'new'
# And when cleaning up the module, the backup should be restored
CreatedFiles().cleanup(module='test')
assert target.read_text() == 'original'
def test_cleanup_of_created_directory(create_temp_files, tmpdir):
"""Created directories should be cleaned up."""
tmpdir = Path(tmpdir)
[content] = create_temp_files(1)
# The target requires a new directory to be created
directory = tmpdir / 'dir'
target = directory / 'target.tmp'
# Execute the symlink action
symlink_options = {
'content': str(content.name),
'target': str(target),
}
created_files = CreatedFiles().wrapper_for(module='test')
symlink_action = SymlinkAction(
options=symlink_options,
directory=content.parent,
replacer=lambda x: x,
context_store={},
creation_store=created_files,
)
symlink_action.execute()
# The directory should now exist and be persisted
assert directory.is_dir()
assert directory in created_files.creation_store
# But it should be deleted on cleanup
CreatedFiles().cleanup(module='test')
assert not directory.is_dir()
|
ch12/tspTests.py | MohammedMajidKhadim/GeneticAlgorithmsWithPython | 1,008 | 12638363 | <filename>ch12/tspTests.py
# File: tspTests.py
# from chapter 12 of _Genetic Algorithms with Python_
#
# Author: <NAME> <<EMAIL>>
# Copyright (c) 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import datetime
import math
import random
import unittest
from itertools import chain
import genetic
def get_fitness(genes, idToLocationLookup):
fitness = get_distance(idToLocationLookup[genes[0]],
idToLocationLookup[genes[-1]])
for i in range(len(genes) - 1):
start = idToLocationLookup[genes[i]]
end = idToLocationLookup[genes[i + 1]]
fitness += get_distance(start, end)
return Fitness(round(fitness, 2))
def display(candidate, startTime):
timeDiff = datetime.datetime.now() - startTime
print("{}\t{}\t{}\t{}".format(
' '.join(map(str, candidate.Genes)),
candidate.Fitness,
candidate.Strategy.name,
timeDiff))
def get_distance(locationA, locationB):
sideA = locationA[0] - locationB[0]
sideB = locationA[1] - locationB[1]
sideC = math.sqrt(sideA * sideA + sideB * sideB)
return sideC
def mutate(genes, fnGetFitness):
count = random.randint(2, len(genes))
initialFitness = fnGetFitness(genes)
while count > 0:
count -= 1
indexA, indexB = random.sample(range(len(genes)), 2)
genes[indexA], genes[indexB] = genes[indexB], genes[indexA]
fitness = fnGetFitness(genes)
if fitness > initialFitness:
return
def crossover(parentGenes, donorGenes, fnGetFitness):
pairs = {Pair(donorGenes[0], donorGenes[-1]): 0}
for i in range(len(donorGenes) - 1):
pairs[Pair(donorGenes[i], donorGenes[i + 1])] = 0
tempGenes = parentGenes[:]
if Pair(parentGenes[0], parentGenes[-1]) in pairs:
# find a discontinuity
found = False
for i in range(len(parentGenes) - 1):
if Pair(parentGenes[i], parentGenes[i + 1]) in pairs:
continue
tempGenes = parentGenes[i + 1:] + parentGenes[:i + 1]
found = True
break
if not found:
return None
runs = [[tempGenes[0]]]
for i in range(len(tempGenes) - 1):
if Pair(tempGenes[i], tempGenes[i + 1]) in pairs:
runs[-1].append(tempGenes[i + 1])
continue
runs.append([tempGenes[i + 1]])
initialFitness = fnGetFitness(parentGenes)
count = random.randint(2, 20)
runIndexes = range(len(runs))
while count > 0:
count -= 1
for i in runIndexes:
if len(runs[i]) == 1:
continue
if random.randint(0, len(runs)) == 0:
runs[i] = [n for n in reversed(runs[i])]
indexA, indexB = random.sample(runIndexes, 2)
runs[indexA], runs[indexB] = runs[indexB], runs[indexA]
childGenes = list(chain.from_iterable(runs))
if fnGetFitness(childGenes) > initialFitness:
return childGenes
return childGenes
class TravelingSalesmanTests(unittest.TestCase):
def test_8_queens(self):
idToLocationLookup = {
'A': [4, 7],
'B': [2, 6],
'C': [0, 5],
'D': [1, 3],
'E': [3, 0],
'F': [5, 1],
'G': [7, 2],
'H': [6, 4]
}
optimalSequence = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
self.solve(idToLocationLookup, optimalSequence)
def test_ulysses16(self):
idToLocationLookup = load_data("ulysses16.tsp")
optimalSequence = [14, 13, 12, 16, 1, 3, 2, 4,
8, 15, 5, 11, 9, 10, 7, 6]
self.solve(idToLocationLookup, optimalSequence)
def test_benchmark(self):
genetic.Benchmark.run(lambda: self.test_ulysses16())
def solve(self, idToLocationLookup, optimalSequence):
geneset = [i for i in idToLocationLookup.keys()]
def fnCreate():
return random.sample(geneset, len(geneset))
def fnDisplay(candidate):
display(candidate, startTime)
def fnGetFitness(genes):
return get_fitness(genes, idToLocationLookup)
def fnMutate(genes):
mutate(genes, fnGetFitness)
def fnCrossover(parent, donor):
return crossover(parent, donor, fnGetFitness)
optimalFitness = fnGetFitness(optimalSequence)
startTime = datetime.datetime.now()
best = genetic.get_best(fnGetFitness, None, optimalFitness, None,
fnDisplay, fnMutate, fnCreate, maxAge=500,
poolSize=25, crossover=fnCrossover)
self.assertTrue(not optimalFitness > best.Fitness)
def load_data(localFileName):
""" expects:
HEADER section before DATA section, all lines start in column 0
DATA section element all have space in column 0
<space>1 23.45 67.89
last line of file is: " EOF"
"""
with open(localFileName, mode='r') as infile:
content = infile.read().splitlines()
idToLocationLookup = {}
for row in content:
if row[0] != ' ': # HEADERS
continue
if row == " EOF":
break
id, x, y = row.split(' ')[1:4]
idToLocationLookup[int(id)] = [float(x), float(y)]
return idToLocationLookup
class Fitness:
def __init__(self, totalDistance):
self.TotalDistance = totalDistance
def __gt__(self, other):
return self.TotalDistance < other.TotalDistance
def __str__(self):
return "{:0.2f}".format(self.TotalDistance)
class Pair:
def __init__(self, node, adjacent):
if node < adjacent:
node, adjacent = adjacent, node
self.Node = node
self.Adjacent = adjacent
def __eq__(self, other):
return self.Node == other.Node and self.Adjacent == other.Adjacent
def __hash__(self):
return hash(self.Node) * 397 ^ hash(self.Adjacent)
if __name__ == '__main__':
unittest.main()
|
examples/widgets/meter/lv_example_meter_2.py | nickzhuang0613/lvgl | 5,238 | 12638372 | #!//opt/bin/lv_micropython -i
import utime as time
import lvgl as lv
import display_driver
def set_value(indic,v):
meter.set_indicator_end_value(indic, v)
#
# A meter with multiple arcs
#
meter = lv.meter(lv.scr_act())
meter.center()
meter.set_size(200, 200)
# Remove the circle from the middle
meter.remove_style(None, lv.PART.INDICATOR)
# Add a scale first
scale = meter.add_scale()
meter.set_scale_ticks(scale, 11, 2, 10, lv.palette_main(lv.PALETTE.GREY))
meter.set_scale_major_ticks(scale, 1, 2, 30, lv.color_hex3(0xeee), 10)
meter.set_scale_range(scale, 0, 100, 270, 90)
# Add a three arc indicator
indic1 = meter.add_arc(scale, 10, lv.palette_main(lv.PALETTE.RED), 0)
indic2 = meter.add_arc(scale, 10, lv.palette_main(lv.PALETTE.GREEN), -10)
indic3 = meter.add_arc(scale, 10, lv.palette_main(lv.PALETTE.BLUE), -20)
# Create an animation to set the value
a1 = lv.anim_t()
a1.init()
a1.set_values(0, 100)
a1.set_time(2000)
a1.set_repeat_delay(100)
a1.set_playback_delay(100)
a1.set_playback_time(500)
a1.set_var(indic1)
a1.set_repeat_count(lv.ANIM_REPEAT.INFINITE)
a1.set_custom_exec_cb(lambda a,val: set_value(indic1,val))
lv.anim_t.start(a1)
a2 = lv.anim_t()
a2.init()
a2.set_values(0, 100)
a2.set_time(1000)
a2.set_repeat_delay(100)
a2.set_playback_delay(100)
a2.set_playback_time(1000)
a2.set_var(indic2)
a2.set_repeat_count(lv.ANIM_REPEAT.INFINITE)
a2.set_custom_exec_cb(lambda a,val: set_value(indic2,val))
lv.anim_t.start(a2)
a3 = lv.anim_t()
a3.init()
a3.set_values(0, 100)
a3.set_time(1000)
a3.set_repeat_delay(100)
a3.set_playback_delay(100)
a3.set_playback_time(2000)
a3.set_var(indic3)
a3.set_repeat_count(lv.ANIM_REPEAT.INFINITE)
a3.set_custom_exec_cb(lambda a,val: set_value(indic3,val))
lv.anim_t.start(a3)
|
social/utils.py | raccoongang/python-social-auth | 1,987 | 12638378 | from social_core.utils import social_logger, SSLHttpAdapter, import_module, \
module_member, user_agent, url_add_parameters, to_setting_name, \
setting_name, sanitize_redirect, user_is_authenticated, user_is_active, \
slugify, first, parse_qs, drop_lists, partial_pipeline_data, \
build_absolute_uri, constant_time_compare, is_url, setting_url, \
handle_http_errors, append_slash
|
tests/PyroTests/testsupport.py | brubbel/Pyro4 | 638 | 12638414 | <reponame>brubbel/Pyro4<gh_stars>100-1000
"""
Support code for the test suite.
There's some Python 2.x <-> 3.x compatibility code here.
Pyro - Python Remote Objects. Copyright by <NAME> (<EMAIL>).
"""
import sys
import pickle
import threading
from Pyro4 import errors, core, expose, behavior, current_context
from Pyro4.configuration import config
__all__ = ["tobytes", "tostring", "unicode", "unichr", "basestring", "StringIO",
"NonserializableError", "MyThingPartlyExposed", "MyThingFullExposed",
"MyThingExposedSub", "MyThingPartlyExposedSub", "ConnectionMock",
"AtomicCounter", "ResourceService", "Resource"]
config.reset(False) # reset the config to default
if sys.version_info < (3, 0):
# noinspection PyUnresolvedReferences
from StringIO import StringIO
def tobytes(string, encoding=None):
return string
def tostring(bytes):
return bytes
unicode = unicode
unichr = unichr
basestring = basestring
else:
from io import StringIO
def tobytes(string, encoding="iso-8859-1"):
return bytes(string, encoding)
def tostring(bytes, encoding="utf-8"):
return str(bytes, encoding)
unicode = str
unichr = chr
basestring = str
class NonserializableError(Exception):
def __reduce__(self):
raise pickle.PicklingError("to make this error non-serializable")
class MyThingPartlyExposed(object):
c_attr = "hi"
propvalue = 42
_private_attr1 = "hi"
__private_attr2 = "hi"
name = ""
def __init__(self, name="dummy"):
self.name = name
def __eq__(self, other):
if type(other) is MyThingPartlyExposed:
return self.name == other.name
return False
def method(self, arg, default=99, **kwargs):
pass
@staticmethod
def staticmethod(arg):
pass
@classmethod
def classmethod(cls, arg):
pass
def __dunder__(self):
pass
def __private(self):
pass
def _private(self):
pass
@core.expose
@property
def prop1(self):
return self.propvalue
@core.expose
@prop1.setter
def prop1(self, value):
self.propvalue = value
@core.expose
@property
def readonly_prop1(self):
return self.propvalue
@property
def prop2(self):
return self.propvalue
@prop2.setter
def prop2(self, value):
self.propvalue = value
@core.oneway
@core.expose
def oneway(self, arg):
pass
@core.expose
def exposed(self):
pass
__hash__ = object.__hash__
@core.expose
class MyThingFullExposed(object):
"""this is the same as MyThingPartlyExposed but the whole class should be exposed"""
c_attr = "hi"
propvalue = 42
_private_attr1 = "hi"
__private_attr2 = "hi"
name = ""
def __init__(self, name="dummy"):
self.name = name # note: not affected by @expose, only real properties are
def __eq__(self, other):
if type(other) is MyThingFullExposed:
return self.name == other.name
return False
def method(self, arg, default=99, **kwargs):
pass
@staticmethod
def staticmethod(arg):
pass
@classmethod
def classmethod(cls, arg):
pass
def __dunder__(self):
pass
def __private(self):
pass
def _private(self):
pass
@property
def prop1(self):
return self.propvalue
@prop1.setter
def prop1(self, value):
self.propvalue = value
@property
def readonly_prop1(self):
return self.propvalue
@property
def prop2(self):
return self.propvalue
@prop2.setter
def prop2(self, value):
self.propvalue = value
@core.oneway
def oneway(self, arg):
pass
def exposed(self):
pass
__hash__ = object.__hash__
@core.expose
class MyThingExposedSub(MyThingFullExposed):
def sub_exposed(self):
pass
def sub_unexposed(self):
pass
@core.oneway
def oneway2(self):
pass
class MyThingPartlyExposedSub(MyThingPartlyExposed):
@core.expose
def sub_exposed(self):
pass
def sub_unexposed(self):
pass
@core.oneway
def oneway2(self):
pass
class ConnectionMock(object):
def __init__(self, initial_msg=None):
self.keep_open = False
if not initial_msg:
self.received = b""
elif isinstance(initial_msg, (str, bytes)):
self.received = initial_msg
else:
self.received = initial_msg.to_bytes() # it's probably a Message object
def send(self, data):
self.received += data
def recv(self, datasize):
chunk = self.received[:datasize]
self.received = self.received[datasize:]
if len(chunk) < datasize:
raise errors.ConnectionClosedError("receiving: not enough data")
return chunk
class AtomicCounter(object):
def __init__(self, value=0):
self.__initial = value
self.__value = value
self.__lock = threading.Lock()
def reset(self):
self.__value = self.__initial
def incr(self, amount=1):
with self.__lock:
self.__value += amount
return self.__value
def decr(self, amount=1):
with self.__lock:
self.__value -= amount
return self.__value
@property
def value(self):
return self.__value
class Resource(object):
# a fictional resource that gets allocated and must be freed again later.
def __init__(self, name, collection):
self.name = name
self.collection = collection
self.close_called = False
def close(self):
# Pyro will call this on a tracked resource once the client's connection gets closed!
self.collection.discard(self)
self.close_called = True
@expose
@behavior(instance_mode="single")
class ResourceService(object):
def __init__(self):
self.resources = set() # the allocated resources
def allocate(self, name):
resource = Resource(name, self.resources)
self.resources.add(resource)
current_context.track_resource(resource)
def free(self, name):
resources = {r for r in self.resources if r.name == name}
self.resources -= resources
for r in resources:
r.close()
current_context.untrack_resource(r)
def list(self):
return [r.name for r in self.resources]
|
ngym_shaping/wrappers/noise.py | manuelmolano/ngym_shaping | 112 | 12638422 | """
Noise wrapper.
Created on Thu Feb 28 15:07:21 2019
@author: molano
"""
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
import gym
class Noise(gym.Wrapper):
"""Add Gaussian noise to the observations.
Args:
std_noise: Standard deviation of noise. (def: 0.1)
perf_th: If != None, the wrapper will adjust the noise so the mean
performance is not larger than perf_th. (def: None, float)
w: Window used to compute the mean performance. (def: 100, int)
step_noise: Step used to increment/decrease std. (def: 0.001, float)
"""
metadata = {
'description': 'Add Gaussian noise to the observations.',
'paper_link': None,
'paper_name': None,
}
def __init__(self, env, std_noise=.1):
super().__init__(env)
self.env = env
self.std_noise = std_noise
def reset(self, step_fn=None):
if step_fn is None:
step_fn = self.step
return self.env.reset(step_fn=step_fn)
def step(self, action):
obs, reward, done, info = self.env.step(action)
# add noise
obs += self.env.rng.normal(loc=0, scale=self.std_noise,
size=obs.shape)
return obs, reward, done, info
|
sblibs/display/asciiart.py | StudyBlue/sblibs | 281 | 12638426 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright © <NAME> 2016
#
# @project: Decorating
# @author: <NAME>
# @email: <EMAIL>
#
#
# .. .;-:!!>>!!:--;.. .
# . .-:!>7?CO$QQQQ$OC?:--::-.....
# . .-!7?CO$QQQQQ$$$OCCCC?!:--:>>:;;;...
# . .->CCCC>?OO$$OO$$$$$$O$$$O?7>:>C$C7!:-;;. .
# . .!?O$Q$$OOCC?CCO$QQQQQQ$$QQQ$$$OO$QNHH$C?>!:; .
# . .!OQQQHHHQQQQQ$$QQQQHQHQQQQQHQQHHHHHHHNNNHHQ$OC>. .
# . ;CQHHHHHHHHHHHHHHHHHHHHHHHQHHHHHHNNNNNNNNHNHNNHHH$!;
# :OHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHNNNNNNNNNHHHHNNHNNO:-.
# !OHNNNNNHHHHHHHHHHHHHHHHHHHHHHHHNNHHNNNNNNHHHHHHNHNNNH>;;.
# . !OHNHHHHNHHHHHHHHHHHHHHHNNNNNNNHHHNNNNNNNNHNMNNNNHNNNHNN7 .
# -OQNHNNNNNNHNHHNNHHNNNNNNNNNNNNNNHHHNNNNNHHNQ>!>OHNNHNNHHN$;
# .?$HHNNNNNNNNNNHHNHHNNNNNNNNNNNNNNNNNNNNNNHNQ.;>:::CNNHNNHHNQ!.
# . :$HNNNNNNHHNNHHNNNNHNNNNNNNNNNHHNHNNNNNNHHHN? !; . -ONHNNHNO!-....
# .?QHNNNNNNNNNNNNNNNNNNNNNNNNNHHNNHNHHNNNNNNNNC .;->!:..QNHHHN$!:-;.
# . :$HNNNHNNNNNNNNNNNNNNNNNNNNHHNM$QNHNNHNNNHQQNO :$O>!- CNHHHHN$O7...
# . !QHNNNNNNNNNNNNNNNNNNNHHNNHNNO>;;?:7QNNNQQHQQH! .O7!; :HHHHHNNNH:...
# . !HHNNNNNNNNNNNNNNNNNNNHHHHH$>;.-!7C?O$HOHNNNNO?; -... CMHNNNHQH> ...
# . !HHNNNNNNNNNNNNNNNNNNNHHNN$>77OHNNHHQ7OC$QHHQ>;.. ..;$QOCOQOO!.....
# . !NNNNNNNNNNNNNNNNHHHHNHNNHC$CHO?Q$QQ? ?HQHQ$O$- .. .:??!-. :>- .....
# .QNHNNNNNNNNNNNNNNNNNHHHN$!:-?; :7!. ;>7NNNHQ7 . .;;;..;:.......
# ONHNNNNNNNNNNNNHO$HHMNNM? . .!HNHQO. ;-.-:;....
# . 7NHHNHNNNNNNNNHN?:!;>7:!!. .. .. 7NHQQ: . ;;::>:;;.....
# -!HNHHNNNNNNNNNNQ?>:;. . . .QNQQ? . ;>7C?:::.........
# 7NHNNHNNNNNHNQCCO$Q$O:. . ?MHQ$. >QO>!>7;.........
# . .QHNNNNNHHHHHH$QHQNQ$>... . >NHHN7 ;QO!>CC-..........
# :-$HO$ONHHNHNNHO->CC; .;. . 7NHHH$;?$>7O$:....... ..
# -$:--QHNQN>CMNQ:;--;;;.... .... ?NHNQ!OHCOOQ>;;;..... .
# . 7: ;OH>?;.77- ;;..:- ..... ONHNQCHHHQHQ!--...... ..
# .. . --.;;. . ;O>:; .;........>$NNHNNNHHHH?-.... . ..
# ;.. ......;.;->>:::--;; .;..!>CNNQQNHNHNQ!....
# . ;:. .......... .;:->? -: .-.!HHHN7>NHHNH>;... .
# .!; ... ........;!ONC>>C$7?O>7QNHNQ?HNNNO:... .
# :C!.;. .... ...;-!?$NNNNNNNNNNNHHNHNNHHNQ7;... ....
# 7$O-.. . ...!$QNNHHHHHHHHHHHHNNHHNNQC-... ......
# .?Q$:...... ...-$NNNNHHHHNHNNNHHNNNNNQC>;.. .......
"""
This is another LOL-zone
LOOOOOOOOOOOOOL ART
"""
from __future__ import unicode_literals
# START
# /\O | _O | O
# /\/ | //|_ | /_
# /\ | | | |\
# / \ | /| | / |
# LOL LOL | LLOL | LOLLOL
# -----------+----------+-----------
ARROWS = "←↖↑↗→↘↓↙"
BALLS = ".oO@*"
BRAILY = "⣾⣽⣻⢿⡿⣟⣯"
CIRCLE1 = "◐◓◑◒"
CIRCLE2 = "◴◷◶◵"
CYCLE = "|/-\\"
EYES = ["◡◡" "⊙⊙" "◠◠"]
HPULSE = "▉▊▋▌▍▎▏▎▍▌▋▊▉"
PIPES = "┤┘┴└├┌┬┐"
ROTATING = "▖▘▝▗"
SQUARES = "◰◳◲◱"
TRIANGULES = "◢◣◤◥"
VPULSE = '▁▂▃▄▅▆▇▆▅▄▃▁'
WAVE = "⠁⠂⠄⡀⢀⠠⠐⠈"
# END
# /\O | _O | O
# /\/ | //|_ | /_
# /\ | | | |\
# / \ | /| | / |
# LOL LOL | LLOL | LOLLOL
# -----------+----------+-----------
|
chapter3/chapter3_path_parameters_03.py | GoodMonsters/Building-Data-Science-Applications-with-FastAPI | 107 | 12638449 | from enum import Enum
from fastapi import FastAPI
class UserType(str, Enum):
STANDARD = "standard"
ADMIN = "admin"
app = FastAPI()
@app.get("/users/{type}/{id}/")
async def get_user(type: UserType, id: int):
return {"type": type, "id": id}
|
rules/community/packetbeat/packetbeat_dns_lookup.py | cninja1/streamalert | 2,770 | 12638472 | <gh_stars>1000+
"""Alert on PacketBeat events"""
from streamalert.shared.rule import rule
@rule(logs=['packetbeat:dns'])
def packetbeat_dns_lookup(rec):
"""
author: gavin (gavinelder)
description: Alert on DNS lookup for Blacklisted domain
testing: (a) Review traffic logs for machine in question.
reference: https://www.elastic.co/guide/en/beats/packetbeat/master/packetbeat-overview.html
"""
return rec['dns']['question']['name'].endswith('.evil.com.')
|
reagent/prediction/cfeval/predictor_wrapper.py | dmitryvinn/ReAgent | 1,156 | 12638475 | <gh_stars>1000+
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from typing import List, Tuple
import torch
from reagent.core import types as rlt
from reagent.prediction.predictor_wrapper import DiscreteDqnWithPreprocessor
logger = logging.getLogger(__name__)
class BanditRewardNetPredictorWrapper(torch.jit.ScriptModule):
def __init__(
self,
reward_model_with_preprocessor: DiscreteDqnWithPreprocessor,
action_names: List[str],
state_feature_config: rlt.ModelFeatureConfig,
) -> None:
super().__init__()
self.reward_model_with_preprocessor = torch.jit.trace(
reward_model_with_preprocessor,
reward_model_with_preprocessor.input_prototype(),
)
self.action_names = torch.jit.Attribute(action_names, List[str])
@torch.jit.script_method
def forward(
self, state: rlt.ServingFeatureData
) -> Tuple[torch.Tensor, torch.Tensor]:
reward_predictions = self.reward_model_with_preprocessor(state)
num_examples = reward_predictions.size()[0]
num_actions = len(self.action_names)
assert reward_predictions.shape == (
num_examples,
num_actions,
), f"Invalid shape {reward_predictions.shape} != ({num_examples}, {num_actions})"
mask = torch.ones_like(reward_predictions, dtype=torch.uint8)
return (reward_predictions, mask)
|
SRT/lib/xvision/transformsImage.py | yerang823/landmark-detection | 612 | 12638478 | <reponame>yerang823/landmark-detection
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import division
import torch, cv2
import sys, math, random, PIL
from PIL import Image, ImageOps
import numpy as np
import numbers
import types
from .affine_utils import identity2affine
from .affine_utils import crop2affine
from .affine_utils import offset2affine
from .affine_utils import scale2affine
from .affine_utils import rotate2affine
from .affine_utils import horizontalmirror2affine
def sample_from_bounded_gaussian(x):
return max(-2*x, min(2*x, random.gauss(0,1)*x))
class Compose2V(object):
def __init__(self, transforms):
self.transforms = transforms
def __repr__(self):
xstrs = [str(x) for x in self.transforms]
xstr = ', '.join(xstrs)
return ('{name}('.format(name=self.__class__.__name__, **self.__dict__) + xstr + ')')
def __call__(self, img, point, init_theta=None):
if init_theta is None:
theta = identity2affine(True)
else:
theta = init_thet.clone()
for t in self.transforms:
img, point, theta = t(img, point, theta)
return img, point, theta
class ToPILImage(object):
"""Convert a tensor to PIL Image.
Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
H x W x C to a PIL.Image while preserving the value range.
"""
def __init__(self, normalize=None, return_mode='PIL'):
if normalize is None:
self.mean = None
self.std = None
else:
self.mean = normalize.mean
self.std = normalize.std
self.return_mode = return_mode
def __repr__(self):
return ('{name}(mean={mean}, std={std}, mode={return_mode})'.format(name=self.__class__.__name__, **self.__dict__))
def __call__(self, pic):
"""
Args:
pic (Tensor or numpy.ndarray): Image to be converted to PIL.Image.
Returns:
PIL.Image: Image converted to PIL.Image.
"""
xinput = []
with torch.no_grad():
for idx, t in enumerate(pic):
if self.std is not None:
t = torch.mul(t, self.std[idx])
if self.mean is not None:
t = torch.add(t, self.mean[idx])
xinput.append( t )
pic = torch.stack(xinput).cpu()
npimg = pic
mode = None
if isinstance(pic, torch.FloatTensor):
pic = pic.mul(255).byte()
if torch.is_tensor(pic):
npimg = np.transpose(pic.numpy(), (1, 2, 0))
assert isinstance(npimg, np.ndarray), 'pic should be Tensor or ndarray'
if npimg.shape[2] == 1:
npimg = npimg[:, :, 0]
if npimg.dtype == np.uint8:
mode = 'L'
if npimg.dtype == np.int16:
mode = 'I;16'
if npimg.dtype == np.int32:
mode = 'I'
elif npimg.dtype == np.float32:
mode = 'F'
else:
if npimg.dtype == np.uint8:
mode = 'RGB'
assert mode is not None, '{:} is not supported'.format(npimg.dtype)
if self.return_mode == 'PIL':
return Image.fromarray(npimg, mode=mode)
elif self.return_mode == 'cv2':
if npimg.ndim == 3: npimg = npimg[:,:,::-1]
return npimg
elif self.return_mode == 'cv2gray':
if npimg.ndim == 2: return npimg
else : return cv2.cvtColor(npimg[:,:,::-1], cv2.COLOR_BGR2GRAY)
else: raise ValueError('invalid return_mode : {:}'.format(self.return_mode))
class ToTensor(object):
"""Convert a ``PIL.Image`` or ``numpy.ndarray`` to tensor.
Converts a PIL.Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].
"""
def __repr__(self):
return ('{name}()'.format(name=self.__class__.__name__, **self.__dict__))
def __call__(self, pics, points, theta):
"""
Args:
pic (PIL.Image or numpy.ndarray): Image to be converted to tensor.
points 3 * N numpy.ndarray [x, y, visiable] or Point_Meta
Returns:
Tensor: Converted image.
"""
## add to support list
if isinstance(pics, list): is_list = True
else: is_list, pics = False, [pics]
returned = []
for pic in pics:
if isinstance(pic, np.ndarray):
# handle numpy array
img = torch.from_numpy(pic.transpose((2, 0, 1)))
# backward compatibility
returned.append( img.float().div(255) )
continue
# handle PIL Image
if pic.mode == 'I':
img = torch.from_numpy(np.array(pic, np.int32, copy=False))
elif pic.mode == 'I;16':
img = torch.from_numpy(np.array(pic, np.int16, copy=False))
else:
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
# PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
if pic.mode == 'YCbCr':
nchannel = 3
elif pic.mode == 'I;16':
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
# put it from HWC to CHW format
# yikes, this transpose takes 80% of the loading time/CPU
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torch.ByteTensor):
img = img.float().div(255)
returned.append(img)
if is_list == False:
assert len(returned) == 1, 'For non-list data, length of answer must be one not {}'.format(len(returned))
returned = returned[0]
return returned, points, theta.clone()
class ColorDisturb(object):
def __init__(self, scale_max):
assert isinstance(scale_max, numbers.Number) and scale_max>0 and scale_max<1, 'The scale_max is wrong : {:}'.format(scale_max)
self.scale_max = scale_max
def __repr__(self):
return ('{name}(scale={scale_max})'.format(name=self.__class__.__name__, **self.__dict__))
def __call__(self, tensors, points, theta):
if isinstance(tensors, list): is_list = True
else : is_list, tensors = False, [tensors]
for tensor in tensors:
for t in tensor:
t.mul_( random.uniform(1-self.scale_max, 1+self.scale_max) ).clamp_(0, 1)
if is_list == False: tensors = tensors[0]
return tensors, points, theta.clone()
class Normalize(object):
"""Normalize an tensor image with mean and standard deviation.
Given mean: (R, G, B) and std: (R, G, B),
will normalize each channel of the torch.*Tensor, i.e.
channel = (channel - mean) / std
Args:
mean (sequence): Sequence of means for R, G, B channels respecitvely.
std (sequence): Sequence of standard deviations for R, G, B channels
respecitvely.
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __repr__(self):
return ('{name}(mean={mean}, std={std}])'.format(name=self.__class__.__name__, **self.__dict__))
def __call__(self, tensors, points, theta):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
# TODO: make efficient
if isinstance(tensors, list): is_list = True
else : is_list, tensors = False, [tensors]
for tensor in tensors:
assert tensor.size(0) == len(self.mean) == len(self.std), '{:} vs {:} vs {:}'.format(tensor.size(), len(self.mean), len(self.std))
for t, m, s in zip(tensor, self.mean, self.std):
t.sub_(m).div_(s)
if is_list == False: tensors = tensors[0]
return tensors, points, theta.clone()
class PreCrop(object):
def __init__(self, expand_ratio):
assert expand_ratio is None or isinstance(expand_ratio, numbers.Number), 'The expand_ratio should not be {}'.format(expand_ratio)
if expand_ratio is None:
self.expand_ratio = 0
else:
self.expand_ratio = expand_ratio
assert self.expand_ratio >= 0, 'The expand_ratio should not be {}'.format(expand_ratio)
def __repr__(self):
return ('{name}(expand={expand_ratio}])'.format(name=self.__class__.__name__, **self.__dict__))
def __call__(self, imgs, point_meta, theta):
point_meta = point_meta.copy()
if isinstance(imgs, list):
_, h, w = imgs[0].size()
else:
_, h, w = imgs.size()
box = point_meta.get_box().tolist()
face_ex_w, face_ex_h = (box[2] - box[0]) * self.expand_ratio, (box[3] - box[1]) * self.expand_ratio
x1, y1 = max(box[0]-face_ex_w, 0.0), max(box[1]-face_ex_h, 0.0)
x2, y2 = min(box[2]+face_ex_w, w-1), min(box[3]+face_ex_h, h-1)
xtheta = crop2affine((x1,y1,x2,y2), w, h)
xtheta = torch.mm(theta, xtheta)
return imgs, point_meta, xtheta
class RandomOffset(object):
def __init__(self, ratios):
if ratios is None:
ratios = (0, 0)
elif isinstance(ratios, numbers.Number):
ratios = (ratios, ratios)
assert isinstance(ratios, tuple) and len(ratios) == 2, 'ratios is wrong : {:}'.format(ratios)
self.vertical_ratio = ratios[0]
self.horizontal_ratio = ratios[1]
def __repr__(self):
return ('{name}(vertical={vertical_ratio}, horizontal={horizontal_ratio}])'.format(name=self.__class__.__name__, **self.__dict__))
def __call__(self, imgs, point_meta, theta):
point_meta = point_meta.copy()
offx = random.uniform(-1, 1) * self.horizontal_ratio
offy = random.uniform(-1, 1) * self.vertical_ratio
parameters = offset2affine(offx, offy)
xtheta = torch.mm(theta, parameters)
return imgs, point_meta, xtheta
class AugScale(object):
def __init__(self, scale_prob, scale_min, scale_max):
assert isinstance(scale_prob, numbers.Number) and scale_prob >= 0, 'scale_prob : {:}'.format(scale_prob)
assert isinstance(scale_min, numbers.Number) and isinstance(scale_max, numbers.Number), 'scales : {:}, {:}'.format(scale_min, scale_max)
self.scale_prob = scale_prob
self.scale_min = scale_min
self.scale_max = scale_max
def __repr__(self):
return ('{name}(prob={scale_prob}, range=[{scale_min}, {scale_max}])'.format(name=self.__class__.__name__, **self.__dict__))
def __call__(self, imgs, point_meta, theta):
point_meta = point_meta.copy()
dice = random.random()
if dice > self.scale_prob:
return imgs, point_meta, theta.clone()
scale = random.uniform(self.scale_min, self.scale_max)
parameters = scale2affine(scale, scale)
xtheta = torch.mm(theta, parameters)
return imgs, point_meta, xtheta
class CenterCrop(object):
def __init__(self, ratios):
if isinstance(ratios, numbers.Number):
ratios = (ratios, ratios)
if ratios is None:
self.ratios = ratios
else:
assert isinstance(ratios, tuple) and len(ratios) == 2, 'Invalid ratios : {:}'.format(ratios)
self.ratios = ratios
assert ratios[0] <= 1.0 and ratios[1] <= 1.0
def __repr__(self):
return ('{name}(range={ratios})'.format(name=self.__class__.__name__, **self.__dict__))
def __call__(self, imgs, point_meta, theta):
if self.ratios is None: return imgs, point_meta, theta
point_meta = point_meta.copy()
xtheta = scale2affine(*self.ratios)
xparam = torch.mm(theta, xtheta)
return imgs, point_meta, xparam
class AugCrop(object):
def __init__(self, ratios):
if isinstance(ratios, numbers.Number):
ratios = (ratios, ratios)
if ratios is None:
self.ratios = ratios
else:
assert isinstance(ratios, tuple) and len(ratios) == 2, 'Invalid ratios : {:}'.format(ratios)
self.ratios = ratios
assert ratios[0] <= 1.0 and ratios[1] <= 1.0
def __repr__(self):
return ('{name}(range={ratios})'.format(name=self.__class__.__name__, **self.__dict__))
def __call__(self, imgs, point_meta, theta):
if self.ratios is None: return imgs, point_meta, theta
point_meta = point_meta.copy()
offx = (1-self.ratios[0])/2/self.ratios[0]
offy = (1-self.ratios[1])/2/self.ratios[1]
OFFX, OFFY = random.uniform(-offx, offx), random.uniform(-offy, offy)
offsetP = offset2affine(OFFX, OFFY)
scalerP = scale2affine(*self.ratios)
xtheta = torch.mm(scalerP, offsetP)
xparam = torch.mm(theta, xtheta)
return imgs, point_meta, xparam
class AugRotate(object):
def __init__(self, max_rotate_degree, rotate_prob=1):
assert isinstance(max_rotate_degree, numbers.Number), 'max_rotate_degree : {:}'.format(max_rotate_degree)
assert isinstance(rotate_prob, numbers.Number) and rotate_prob>=0 and rotate_prob<=1, 'The probablity is wrong : {:}'.format(rotate_prob)
self.max_rotate_degree = max_rotate_degree
self.rotate_prob = rotate_prob
def __repr__(self):
return ('{name}(max-degree={max_rotate_degree})'.format(name=self.__class__.__name__, **self.__dict__))
def __call__(self, imgs, point_meta, theta):
point_meta = point_meta.copy()
if random.random() < self.rotate_prob:
#degree = random.uniform(-self.max_rotate_degree, self.max_rotate_degree)
degree = sample_from_bounded_gaussian(self.max_rotate_degree)
if degree < 0: degree = 360 + degree
params = rotate2affine(degree)
theta = torch.mm(theta, params)
return imgs, point_meta, theta
class AugHorizontalFlip(object):
def __init__(self, p=0.5):
assert isinstance(p, numbers.Number) and p>=0 and p<=1, 'The probablity is wrong : {:}'.format(p)
self.probablity = p
def __repr__(self):
return ('{name}(flip_probability={max_rotate_degree})'.format(name=self.__class__.__name__, **self.__dict__))
def __call__(self, imgs, point_meta, theta):
point_meta = point_meta.copy()
if random.random() < self.probablity:
point_meta.apply_horizontal_flip()
params = horizontalmirror2affine()
theta = torch.mm(theta, params)
return imgs, point_meta, theta
class RandomTransf(object):
def __init__(self, scales, offset, rotate, iters):
assert isinstance(scales, tuple) or isinstance(scales, list), 'scales were wrong : {:}'.format(scales)
assert scales[0] < scales[1], 'scales : {:}'.format(scales)
assert offset >= 0.0 and offset <= 1.0 , 'invalid crop value : {:}'.format(offset)
assert rotate >= 0 and rotate <= 360, 'invalid rotate value : {:}'.format(rotate)
assert isinstance(iters, int) and iters > 0, 'invalid iters : {:}'.format(iters)
self.scale_range = scales
self.offset_max = offset
self.rotate_max = rotate
self.iters = iters
def __repr__(self):
return ('{name}(scale={scale_range}, offset={offset_max}, rotate={rotate_max}, iters={iters})'.format(name=self.__class__.__name__, **self.__dict__))
def __call__(self, imgs, point_meta, theta):
point_meta = point_meta.copy()
thetas = []
for _iter in range(self.iters):
# scale:
_theta = theta.clone()
scale = random.uniform(self.scale_range[0], self.scale_range[1])
_theta = torch.mm(_theta, scale2affine(scale, scale))
# random crop
offx = random.uniform(-1, 1) * self.offset_max
offy = random.uniform(-1, 1) * self.offset_max
parameters = offset2affine(offx, offy)
_theta = torch.mm(_theta, parameters)
# random
degree = random.uniform(-self.rotate_max, self.rotate_max)
if degree < 0: degree = 360 + degree
_theta = torch.mm(_theta, rotate2affine(degree))
thetas.append(_theta)
return imgs, point_meta, thetas
|
simfin/signals.py | tom3131/simfin | 231 | 12638481 | ##########################################################################
#
# Functions for calculating signals from share-prices and financial data.
#
##########################################################################
# SimFin - Simple financial data for Python.
# www.simfin.com - www.github.com/simfin/simfin
# See README.md for instructions and LICENSE.txt for license details.
##########################################################################
import pandas as pd
import numpy as np
from simfin.cache import cache
from simfin.derived import free_cash_flow, ncav, netnet, shares
from simfin.rel_change import rel_change
from simfin.resample import reindex
from simfin.utils import apply, add_date_offset
from simfin.names import *
##########################################################################
@cache
def price_signals(df_prices, group_index=TICKER):
"""
Calculate price-signals such as Moving Average and MACD for all stocks
in the given DataFrame.
This function can take a while to compute, so it will create a cache-file
if you pass the arg `cache_refresh`. The next time you call this function,
the cache-file will get loaded if it is more recent than specified by
`cache_refresh`, otherwise the function will get computed again and the
result saved in the cache-file for future use. See the documentation for
the :obj:`~simfin.cache.cache` wrapper for details on its arguments.
.. warning:: You **MUST** use keyword arguments to this function,
otherwise the first unnamed arguments would get passed to the
:obj:`~simfin.cache.cache` wrapper instead.
:param df_prices:
Pandas DataFrame with share-prices for multiple stocks.
:param group_index:
If the DataFrame has a MultiIndex then group data using this
index-column. By default this is TICKER but it could also be e.g.
SIMFIN_ID if you are using that as an index in your DataFrame.
:return:
Pandas DataFrame with price-signals.
"""
# Helper-function for calculating signals for a single stock.
def _signals(df_prices):
# Create new DataFrame for the signals.
# Setting the index improves performance.
df_signals = pd.DataFrame(index=df_prices.index)
# Use the closing share-price for all the signals.
df_price = df_prices[CLOSE]
# Moving Average for past 20 days.
df_signals[MAVG_20] = df_price.rolling(window=20).mean()
# Moving Average for past 200 days.
df_signals[MAVG_200] = df_price.rolling(window=200).mean()
# Exponential Moving Average for past 20 days.
df_signals[EMA] = df_price.ewm(span=20).mean()
# Moving Average Convergence Divergence for 12 and 26 days.
# https://en.wikipedia.org/wiki/MACD
df_signals[MACD] = df_price.ewm(span=12).mean() \
- df_price.ewm(span=26).mean()
# MACD with extra smoothing by Exp. Moving Average for 9 days.
df_signals[MACD_EMA] = df_signals[MACD].ewm(span=9).mean()
return df_signals
# Calculate signals and use Pandas groupby if `df` has multiple stocks.
df_signals = apply(df=df_prices, func=_signals, group_index=group_index)
# Sort the columns by their names.
df_signals.sort_index(axis='columns', inplace=True)
return df_signals
##########################################################################
@cache
def trade_signals(df, signal1, signal2, group_index=TICKER):
"""
Create Buy / Sell / Hold signals from two signals in the given DataFrame.
- If `df[signal1] >= df[signal2]` create a Hold signal.
- If `df[signal1]` crosses above `df[signal2]` create a Buy signal.
- if `df[signal1]` crosses below `df[signal2]` create a Sell signal.
This function can take a while to compute, so it will create a cache-file
if you pass the arg `cache_refresh`. The next time you call this function,
the cache-file will get loaded if it is more recent than specified by
`cache_refresh`, otherwise the function will get computed again and the
result saved in the cache-file for future use. See the documentation for
the :obj:`~simfin.cache.cache` wrapper for details on its arguments.
.. warning:: You **MUST** use keyword arguments to this function,
otherwise the first unnamed arguments would get passed to the
:obj:`~simfin.cache.cache` wrapper instead.
:param df:
Pandas DataFrame with columns `signal1` and `signal2`.
May contain data for one or more stocks.
:param signal1:
String with the name of a column in `df`.
:param signal2:
String with the name of a column in `df`.
:param group_index:
If the DataFrame has a MultiIndex then group data using this
index-column. By default this is TICKER but it could also be e.g.
SIMFIN_ID if you are using that as an index in your DataFrame.
:return:
Pandas Dataframe with BUY, SELL, HOLD signals.
"""
# Helper-function for calculating signals for a single stock.
def _signals(df):
# Create new DataFrame for the signals.
# Setting the index improves performance.
df_signals = pd.DataFrame(index=df.index)
# Boolean whether signal1 >= signal2.
df_above = (df[signal1] >= df[signal2])
# Boolean whether to buy the stock.
df_signals[BUY] = df_above & ~df_above.shift(1, fill_value=True)
# Boolean whether to sell the stock.
df_signals[SELL] = ~df_above & df_above.shift(1, fill_value=False)
# Boolean whether to keep holding the stock.
df_signals[HOLD] = df_above
return df_signals
# Calculate signals and use Pandas groupby if `df` has multiple stocks.
df_signals = apply(df=df, func=_signals, group_index=group_index)
# Sort the columns by their names.
df_signals.sort_index(axis='columns', inplace=True)
return df_signals
##########################################################################
@cache
def volume_signals(df_prices, df_shares, window=20, fill_method='ffill',
offset=None, date_index=REPORT_DATE,
shares_index=SHARES_BASIC, group_index=TICKER):
"""
Calculate signals for the daily trading-volume of stocks, such as:
- REL_VOL: The daily trading-volume relative to its moving average.
- VOLUME_MCAP: The Market-Capitalization of the daily trading volume.
- VOLUME_TURNOVER: Trading-volume relative to the shares outstanding.
The moving-average is calculated in different ways for the signals.
For REL_VOL it is a part of the formula definition. For VOLUME_MCAP
and VOLUME_TURNOVER the moving-average is calculated afterwards.
This function can take a while to compute, so it will create a cache-file
if you pass the arg `cache_refresh`. The next time you call this function,
the cache-file will get loaded if it is more recent than specified by
`cache_refresh`, otherwise the function will get computed again and the
result saved in the cache-file for future use. See the documentation for
the :obj:`~simfin.cache.cache` wrapper for details on its arguments.
.. warning:: You **MUST** use keyword arguments to this function,
otherwise the first unnamed arguments would get passed to the
:obj:`~simfin.cache.cache` wrapper instead.
:param df_prices:
Pandas DataFrame with share-prices for multiple stocks.
:param df_shares:
Pandas DataFrame with both columns SHARES_BASIC and SHARES_DILUTED
e.g. `df_shares=df_income_ttm`
:param window:
Integer for the number of days to use in moving-average calculations.
:param fill_method:
String or callable for the method of filling in empty values when
reindexing financial data to daily data-points.
See :obj:`~simfin.resample.reindex` for valid options.
:param offset:
Pandas DateOffset added to the date-index of `df_shares`. Example:
`pd.DateOffset(days=60)`
See :obj:`~simfin.utils.add_date_offset` for more details.
:param date_index:
Name of the date-column for `df_shares` e.g. REPORT_DATE.
:param shares_index:
Name of the column for share-counts in `df_shares`. SHARES_DILUTED
takes the potential diluting impact of stock-options into account,
while SHARES_BASIC does not take potential dilution into account.
:param group_index:
If the DataFrame has a MultiIndex then group data using this
index-column. By default this is TICKER but it could also be e.g.
SIMFIN_ID if you are using that as an index in your DataFrame.
:return:
Pandas DataFrame with volume-signals.
"""
# Copy the given share-counts (e.g. SHARES_BASIC) and fill in missing
# values with the other share-counts (e.g. SHARES_DILUTED).
df_shares = shares(df=df_shares, index=shares_index)
# Helper-function for calculating signals for a single stock.
def _signals(df):
# Create new DataFrame for the signals.
# Setting the index improves performance.
df_signals = pd.DataFrame(index=df.index)
# Get the relevant data.
df_price = df[CLOSE]
df_volume = df[VOLUME]
# Share-counts from financial reports, reindexed to daily data-points.
df_shares_daily = df[shares_index]
# Moving average for the daily trading volume.
df_volume_mavg = df_volume.rolling(window=window).mean()
# Last trading volume relative to its moving average.
df_rel_vol = df_volume / df_volume_mavg
df_signals[REL_VOL] = np.log(df_rel_vol)
# Calculate Market-Capitalization of the daily trading-volume.
df_vol_mcap = df_volume * df_price
df_signals[VOLUME_MCAP] = df_vol_mcap.rolling(window=window).mean()
# Calculate Volume Turnover as the daily trading-volume
# divided by the total number of shares outstanding.
df_vol_turn = df_volume / df_shares_daily
df_signals[VOLUME_TURNOVER] = df_vol_turn.rolling(window=window).mean()
return df_signals
# Add offset / lag to the dates of the share-counts.
if offset is not None:
df_shares = add_date_offset(df=df_shares, offset=offset,
date_index=date_index)
# Reindex the share-counts to daily data-points.
df_shares_daily = reindex(df_src=df_shares, df_target=df_prices,
method=fill_method, group_index=group_index)
# Combine the relevant data into a single DataFrame.
dfs = [df_prices[[CLOSE, VOLUME]], df_shares_daily]
df = pd.concat(dfs, axis=1)
# Calculate signals and use Pandas groupby if `df` has multiple stocks.
df_signals = apply(df=df, func=_signals, group_index=group_index)
# Sort the columns by their names.
df_signals.sort_index(axis='columns', inplace=True)
return df_signals
##########################################################################
@cache
def fin_signals(df_income_ttm, df_balance_ttm, df_cashflow_ttm, df_prices=None,
offset=None, func=None, fill_method='ffill',
date_index=REPORT_DATE, group_index=TICKER, banks=False, insurance=False):
"""
Calculate financial signals such as Net Profit Margin, Debt Ratio, ROA,
etc. for all stocks in the given DataFrames.
This function can take a while to compute, so it will create a cache-file
if you pass the arg `cache_refresh`. The next time you call this function,
the cache-file will get loaded if it is more recent than specified by
`cache_refresh`, otherwise the function will get computed again and the
result saved in the cache-file for future use. See the documentation for
the :obj:`~simfin.cache.cache` wrapper for details on its arguments.
.. warning:: You **MUST** use keyword arguments to this function,
otherwise the first unnamed arguments would get passed to the
:obj:`~simfin.cache.cache` wrapper instead.
:param df_prices:
Optional Pandas DataFrame with share-prices for one or more stocks.
If not `None`, then the signals will be reindexed to the same daily
data-points as `df_prices`, otherwise the signals will be quarterly.
:param df_income_ttm:
Pandas DataFrame with Income Statement TTM data for one or more stocks.
:param df_balance_ttm:
Pandas DataFrame with Balance Sheet TTM data for one or more stocks.
:param df_cashflow_ttm:
Pandas DataFrame with Cash-Flow Statement TTM data for one or more stocks.
:param func:
Function to apply on a per-stock basis after the signals have been
calculated, but before they have been reindexed to daily data-points.
This is useful e.g. to calculate multi-year averages.
For example, to calculate the 2-year averages of TTM data:
`func = lambda df: 0.5 * (df + df.shift(4))`
:param fill_method:
String or callable for the method of filling in empty values when
reindexing financial data to daily data-points.
See :obj:`~simfin.resample.reindex` for valid options.
:param offset:
Pandas DateOffset added to the date-index of the Pandas DataFrames with
the financial data. Example: `pd.DateOffset(days=60)` This is useful if
you want to add a lag of e.g. 60 days to the dates of financial reports
with Income Statements, Balance Sheets, and Cash-Flow Statements, because
the REPORT_DATE is not when it was actually made available to the public,
which can be 1, 2 or even 3 months after the REPORT_DATE.
See :obj:`~simfin.utils.add_date_offset` for more details.
:param date_index:
Name of the date-column for the financial data e.g. REPORT_DATE.
:param group_index:
If the DataFrames have a MultiIndex then group data using this
index-column. By default this is TICKER but it could also be e.g.
SIMFIN_ID if you are using that as an index in your DataFrame.
:param banks:
Boolean whether to use the special datasets for banks.
:param insurance:
Boolean whether to use the special datasets for insurance
companies.
:return:
Pandas DataFrame with financial signals.
"""
# Helper-function for calculating signals for a single stock.
def _signals(df):
# Create new DataFrame for the signals.
# Setting the index improves performance.
df_signals = pd.DataFrame(index=df.index)
# Net Profit Margin.
df_signals[NET_PROFIT_MARGIN] = df[NET_INCOME] / df[REVENUE]
# Gross Profit Margin.
# Note: Not available for banks or insurances.
if not banks and not insurance:
df_signals[GROSS_PROFIT_MARGIN] = df[GROSS_PROFIT] / df[REVENUE]
# R&D / Revenue.
# Note: RESEARCH_DEV must be negated.
# Note: Not available for banks or insurances.
if not banks and not insurance:
df_signals[RD_REVENUE] = -df[RESEARCH_DEV] / df[REVENUE]
# R&D / Gross Profit.
# Note: RESEARCH_DEV must be negated.
# Note: Not available for banks or insurances.
if not banks and not insurance:
df_signals[RD_GROSS_PROFIT] = -df[RESEARCH_DEV] / df[GROSS_PROFIT]
# Return on Research Capital (RORC).
# Note: RESEARCH_DEV must be negated.
# Note: Not available for banks or insurances.
if not banks and not insurance:
df_signals[RORC] = df[GROSS_PROFIT] / -df[RESEARCH_DEV]
# Interest Coverage.
# Note: INTEREST_EXP_NET must be negated.
# Note: Not available for banks or insurances.
if not banks and not insurance:
df_signals[INTEREST_COV] = df[OPERATING_INCOME] / -df[INTEREST_EXP_NET]
# Current Ratio = Current Assets / Current Liabilities.
# Note: Not available for banks or insurances.
if not banks and not insurance:
df_signals[CURRENT_RATIO] = df[TOTAL_CUR_ASSETS] / df[TOTAL_CUR_LIAB]
#: Quick Ratio = (Cash + Equiv. + ST Inv. + Recv.) / Current Liab.
# Note: Not available for banks or insurances.
if not banks and not insurance:
df_signals[QUICK_RATIO] = \
(df[CASH_EQUIV_ST_INVEST] + df[ACC_NOTES_RECV].fillna(0.0)) \
/ df[TOTAL_CUR_LIAB]
# Debt Ratio = (Short-term Debt + Long-term Debt) / Total Assets.
df_signals[DEBT_RATIO] = (df[ST_DEBT] + df[LT_DEBT]) / df[TOTAL_ASSETS]
# NOTE: There are different ways of calculating ROA, ROE,
# ASSET_TURNOVER, etc. See Tutorial 04. For example, we could use the
# Assets or Equity from last year instead of from the current year,
# but the resulting ROA, ROE, etc. are usually very similar, and using
# last year's Assets or Equity would cause us to loose one year of
# data-points for the signals we are calculating here.
# Return on Assets = Net Income / Total Assets. See note above.
df_signals[ROA] = df[NET_INCOME] / df[TOTAL_ASSETS]
# Return on Equity = Net Income / Total Equity. See note above.
df_signals[ROE] = df[NET_INCOME] / df[TOTAL_EQUITY]
# Asset Turnover = Revenue / Total Assets. See note above.
df_signals[ASSET_TURNOVER] = df[REVENUE] / df[TOTAL_ASSETS]
# Inventory Turnover = Revenue / Inventory. See note above.
# Note: Not available for banks or insurances.
if not banks and not insurance:
df_signals[INVENTORY_TURNOVER] = df[REVENUE] / df[INVENTORIES]
# Payout Ratio = Dividends / Free Cash Flow
# Note the negation because DIVIDENDS_PAID is negative.
df_signals[PAYOUT_RATIO] = -df[DIVIDENDS_PAID].fillna(0) / df[FCF]
# Buyback Ratio = Share Buyback / Free Cash Flow
# Note the negation because CASH_REPURCHASE_EQUITY is negative.
df_signals[BUYBACK_RATIO] = \
-df[CASH_REPURCHASE_EQUITY].fillna(0) / df[FCF]
# Payout + Buyback Ratio = (Dividends + Share Buyback) / Free Cash Flow
# Note the negation because DIVIDENDS_PAID and CASH_REP.. are negative.
df_signals[PAYOUT_BUYBACK_RATIO] = \
-(df[DIVIDENDS_PAID].fillna(0) +
df[CASH_REPURCHASE_EQUITY].fillna(0)) / df[FCF]
# Net Acquisitions & Divestitures / Total Assets.
# Note the negation because NET_CASH_ACQ_DIVEST is usually negative.
# Note: Not available for insurances.
if not insurance:
df_signals[ACQ_ASSETS_RATIO] = \
-df[NET_CASH_ACQ_DIVEST] / df[TOTAL_ASSETS]
# Capital Expenditures / (Depreciation + Amortization).
# Note the negation because CAPEX is negative.
df_signals[CAPEX_DEPR_RATIO] = -df[CAPEX] / df[DEPR_AMOR]
# Log10(Revenue).
df_signals[LOG_REVENUE] = np.log10(df[REVENUE])
return df_signals
# Get relevant data from Income Statements.
if banks or insurance:
columns = [REVENUE, OPERATING_INCOME,
NET_INCOME]
else:
columns = [REVENUE, GROSS_PROFIT, OPERATING_INCOME, INTEREST_EXP_NET,
NET_INCOME, RESEARCH_DEV]
df1 = df_income_ttm[columns]
# Get relevant data from Balance Sheets.
if banks or insurance:
columns = [TOTAL_ASSETS, TOTAL_EQUITY,
ST_DEBT, LT_DEBT]
else:
columns = [TOTAL_ASSETS, TOTAL_CUR_ASSETS, TOTAL_CUR_LIAB, TOTAL_EQUITY,
ST_DEBT, LT_DEBT, INVENTORIES, CASH_EQUIV_ST_INVEST,
ACC_NOTES_RECV]
df2 = df_balance_ttm[columns]
# Get relevant data from Cash-Flow Statements.
if banks:
columns = [DIVIDENDS_PAID, CASH_REPURCHASE_EQUITY, NET_CASH_ACQ_DIVEST,
CAPEX, DEPR_AMOR]
elif insurance:
columns = [DIVIDENDS_PAID, CASH_REPURCHASE_EQUITY,
CAPEX, DEPR_AMOR]
else:
columns = [DIVIDENDS_PAID, CASH_REPURCHASE_EQUITY, NET_CASH_ACQ_DIVEST,
CAPEX, DEPR_AMOR]
df3 = df_cashflow_ttm[columns]
# Calculate Free Cash Flow.
df_fcf = free_cash_flow(df_cashflow=df_cashflow_ttm)
# Combine the data into a single DataFrame.
df = pd.concat([df1, df2, df3, df_fcf], axis=1)
# Add offset / lag to the index-dates of the financial data.
if offset is not None:
df = add_date_offset(df=df, offset=offset, date_index=date_index)
# Calculate signals and use Pandas groupby if `df` has multiple stocks.
df_signals = apply(df=df, func=_signals, group_index=group_index)
# Process the signals using the supplied function e.g. to calculate averages.
if func is not None:
df_signals = apply(df=df_signals, func=func, group_index=group_index)
# Reindex to the same daily data-points as the share-prices.
if df_prices is not None:
df_signals = reindex(df_src=df_signals, df_target=df_prices,
method=fill_method, group_index=group_index)
# Sort the columns by their names.
df_signals.sort_index(axis='columns', inplace=True)
return df_signals
##########################################################################
@cache
def growth_signals(df_income_ttm, df_income_qrt,
df_balance_ttm, df_balance_qrt,
df_cashflow_ttm, df_cashflow_qrt,
df_prices=None, fill_method='ffill',
offset=None, func=None,
date_index=REPORT_DATE, group_index=TICKER):
"""
Calculate growth-signals such as Sales Growth, Earnings Growth, etc.
for all stocks in the given DataFrames.
Three growth-signals are given for each type of financial data, e.g.:
- SALES_GROWTH is calculated from the TTM Revenue divided by the
TTM Revenue from one year ago.
- SALES_GROWTH_YOY is calculated from the Quarterly Revenue divided by
the Quarterly Revenue from one year ago.
- SALES_GROWTH_QOQ is calculated from the Quarterly Revenue divided by
the Quarterly Revenue from the previous quarter.
This function can take a while to compute, so it will create a cache-file
if you pass the arg `cache_refresh`. The next time you call this function,
the cache-file will get loaded if it is more recent than specified by
`cache_refresh`, otherwise the function will get computed again and the
result saved in the cache-file for future use. See the documentation for
the :obj:`~simfin.cache.cache` wrapper for details on its arguments.
.. warning:: You **MUST** use keyword arguments to this function,
otherwise the first unnamed arguments would get passed to the
:obj:`~simfin.cache.cache` wrapper instead.
:param df_prices:
Optional Pandas DataFrame with share-prices for one or more stocks.
If not `None`, then the signals will be reindexed to the same daily
data-points as `df_prices`, otherwise the signals will be quarterly.
:param df_income_ttm:
Pandas DataFrame with Income Statement TTM data for one or more stocks.
:param df_income_qrt:
Pandas DataFrame with Income Statement Quarterly data for one or more
stocks.
:param df_balance_ttm:
Pandas DataFrame with Balance Sheet TTM data for one or more stocks.
:param df_balance_qrt:
Pandas DataFrame with Balance Sheet Quarterly data for one or more
stocks.
:param df_cashflow_ttm:
Pandas DataFrame with Cash-Flow Statement TTM data for one or more
stocks.
:param df_cashflow_qrt:
Pandas DataFrame with Cash-Flow Statement Quarterly data for one or
more stocks.
:param func:
Function to apply on a per-stock basis after the signals have been
calculated, but before they have been reindexed to daily data-points.
This is useful e.g. to calculate multi-year averages.
For example, to calculate the 2-year averages of TTM data:
`func = lambda df: 0.5 * (df + df.shift(4))`
:param fill_method:
String or callable for the method of filling in empty values when
reindexing financial data to daily data-points.
See :obj:`~simfin.resample.reindex` for valid options.
:param offset:
Pandas DateOffset added to the date-index of the Pandas DataFrames with
the financial data. Example: `pd.DateOffset(days=60)` This is useful if
you want to add a lag of e.g. 60 days to the dates of financial reports
with Income Statements, Balance Sheets, and Cash-Flow Statements, because
the REPORT_DATE is not when it was actually made available to the public,
which can be 1, 2 or even 3 months after the REPORT_DATE.
See :obj:`~simfin.utils.add_date_offset` for more details.
:param date_index:
Name of the date-column for the financial data e.g. REPORT_DATE.
:param group_index:
If the DataFrames have a MultiIndex then group data using this
index-column. By default this is TICKER but it could also be e.g.
SIMFIN_ID if you are using that as an index in your DataFrame.
:return:
Pandas DataFrame with growth signals.
"""
# This implementation uses sf.rel_change() to calculate the growth-rates,
# which means that several groupby operations are performed. But this is
# easier to implement and for large DataFrames it is only about 10% slower
# than using sf.apply() with a function like _signals() in fin_signals().
###############################
# Annual growth using TTM data.
# Select and combine the data we need.
df_ttm1 = df_income_ttm[[REVENUE, NET_INCOME]]
df_ttm2 = free_cash_flow(df_cashflow_ttm)
df_ttm3 = df_balance_ttm[[TOTAL_ASSETS]]
df_ttm = pd.concat([df_ttm1, df_ttm2, df_ttm3], axis=1)
# Dict mapping to the new column-names.
new_names = {REVENUE: SALES_GROWTH,
NET_INCOME: EARNINGS_GROWTH,
FCF: FCF_GROWTH,
TOTAL_ASSETS: ASSETS_GROWTH}
# Calculate the growth-rates.
df_growth = rel_change(df=df_ttm, freq='q', quarters=4,
future=False, annualized=False,
new_names=new_names)
#############################################
# Year-Over-Year growth using Quarterly data.
# Select and combine the data we need.
df_qrt1 = df_income_qrt[[REVENUE, NET_INCOME]]
df_qrt2 = free_cash_flow(df_cashflow_qrt)
df_qrt3 = df_balance_qrt[[TOTAL_ASSETS]]
df_qrt = pd.concat([df_qrt1, df_qrt2, df_qrt3], axis=1)
# Dict mapping to the new column-names.
new_names = {REVENUE: SALES_GROWTH_YOY,
NET_INCOME: EARNINGS_GROWTH_YOY,
FCF: FCF_GROWTH_YOY,
TOTAL_ASSETS: ASSETS_GROWTH_YOY}
# Calculate the growth-rates.
df_growth_yoy = rel_change(df=df_qrt, freq='q', quarters=4,
future=False, annualized=False,
new_names=new_names)
########################################################
# Quarter-Over-Quarter growth using Quarterly data.
# Note: This uses the same Quarterly DataFrame as above.
# Dict mapping to the new column-names.
new_names = {REVENUE: SALES_GROWTH_QOQ,
NET_INCOME: EARNINGS_GROWTH_QOQ,
FCF: FCF_GROWTH_QOQ,
TOTAL_ASSETS: ASSETS_GROWTH_QOQ}
# Calculate the growth-rates.
df_growth_qoq = rel_change(df=df_qrt, freq='q', quarters=1,
future=False, annualized=False,
new_names=new_names)
##################
# Post-processing.
# Combine into a single DataFrame.
df_signals = pd.concat([df_growth, df_growth_yoy, df_growth_qoq], axis=1)
# Add offset / lag to the index-dates of the signals.
if offset is not None:
df_signals = add_date_offset(df=df_signals, offset=offset,
date_index=date_index)
# Process the signals using the supplied function e.g. to calculate averages.
if func is not None:
df_signals = apply(df=df_signals, func=func, group_index=group_index)
# Reindex to the same daily data-points as the share-prices.
if df_prices is not None:
df_signals = reindex(df_src=df_signals, df_target=df_prices,
method=fill_method, group_index=group_index)
# Sort the columns by their names.
df_signals.sort_index(axis='columns', inplace=True)
return df_signals
##########################################################################
@cache
def val_signals(df_prices, df_income_ttm, df_balance_ttm, df_cashflow_ttm,
fill_method='ffill', offset=None, func=None,
date_index=REPORT_DATE, shares_index=SHARES_DILUTED,
group_index=TICKER, banks=False, insurance=False):
"""
Calculate valuation signals such as P/E and P/Sales ratios for all stocks
in the given DataFrames.
This function can take a while to compute, so it will create a cache-file
if you pass the arg `cache_refresh`. The next time you call this function,
the cache-file will get loaded if it is more recent than specified by
`cache_refresh`, otherwise the function will get computed again and the
result saved in the cache-file for future use. See the documentation for
the :obj:`~simfin.cache.cache` wrapper for details on its arguments.
.. warning:: You **MUST** use keyword arguments to this function,
otherwise the first unnamed arguments would get passed to the
:obj:`~simfin.cache.cache` wrapper instead.
:param df_prices:
Pandas DataFrame with share-prices for one or more stocks.
:param df_income_ttm:
Pandas DataFrame with Income Statement TTM data for one or more stocks.
:param df_balance_ttm:
Pandas DataFrame with Balance Sheet TTM data for one or more stocks.
:param df_cashflow_ttm:
Pandas DataFrame with Cash-Flow Statement TTM data for one or more stocks.
:param fill_method:
String or callable for the method of filling in empty values when
reindexing financial data to daily data-points.
See :obj:`~simfin.resample.reindex` for valid options.
:param offset:
Pandas DateOffset added to the date-index of the Pandas DataFrames with
the financial data. Example: `pd.DateOffset(days=60)` This is useful if
you want to add a lag of e.g. 60 days to the dates of financial reports
with Income Statements, Balance Sheets, and Cash-Flow Statements, because
the REPORT_DATE is not when it was actually made available to the public,
which can be 1, 2 or even 3 months after the REPORT_DATE.
See :obj:`~simfin.utils.add_date_offset` for more details.
:param func:
Function to apply on a per-stock basis on the financial data, before
calculating the valuation signals. This is useful e.g. to calculate
multi-year averages of the Net Income and Revenue and use those when
calculating P/E and P/Sales ratios.
For example, to calculate the 2-year averages of TTM data:
`func = lambda df: 0.5 * (df + df.shift(4))`
:param date_index:
Name of the date-column for the financial data e.g. REPORT_DATE.
:param shares_index:
String with the column-name for the share-counts. SHARES_DILUTED
takes the potential diluting impact of stock-options into account, so
it results in more conservative valuation ratios than SHARES_BASIC.
:param group_index:
If the DataFrames have a MultiIndex then group data using this
index-column. By default this is TICKER but it could also be e.g.
SIMFIN_ID if you are using that as an index in your DataFrame.
:param banks:
Boolean whether to use the special datasets for banks.
:param insurance:
Boolean whether to use the special datasets for insurance
companies.
:return:
Pandas DataFrame with valuation signals.
"""
# Get the required data from the Income Statements.
columns = [REVENUE, NET_INCOME_COMMON, SHARES_BASIC, SHARES_DILUTED]
df_inc = df_income_ttm[columns]
# Get the required data from the Balance Sheets.
if banks or insurance:
columns = [TOTAL_ASSETS, TOTAL_LIABILITIES, TOTAL_EQUITY]
else:
columns = [TOTAL_CUR_ASSETS, CASH_EQUIV_ST_INVEST, ACC_NOTES_RECV,
INVENTORIES, TOTAL_LIABILITIES, TOTAL_EQUITY]
df_bal = df_balance_ttm[columns]
# Get the required data from the Cash-Flow Statements.
columns = [DIVIDENDS_PAID]
df_cf = df_cashflow_ttm[columns]
# Combine all the data. This creates a new copy that we can add columns to.
df = pd.concat([df_inc, df_bal, df_cf], axis=1)
# Calculate derived financial data such as Free Cash Flow (FCF),
# and add it as new columns to the DataFrame.
# This is only TTM data with 4 data-points per year, so it is
# faster than calculating it for the daily data-points below.
df[FCF] = free_cash_flow(df_cashflow_ttm)
# Note: Not for banks and insurances.
if not banks and not insurance:
df[NCAV] = ncav(df_balance_ttm)
# Note: Not for banks and insurances.
if not banks and not insurance:
df[NETNET] = netnet(df_balance_ttm)
# Add offset / lag to the index-dates of the financial data.
if offset is not None:
df = add_date_offset(df=df, offset=offset, date_index=date_index)
# Copy the number of shares before applying the user-supplied function,
# which might change the number of shares in the original DataFrame df.
# This tries to use the given share-counts (e.g. SHARES_DILUTED) and
# fill in missing values with the other share-counts (e.g. SHARES_BASIC).
df_shares = shares(df=df, index=shares_index)
# Reindex the share-counts to daily data-points.
df_shares_daily = reindex(df_src=df_shares, df_target=df_prices,
method=fill_method, group_index=group_index)
# Process the financial data using the user-supplied function
# e.g. to calculate multi-year averages of Earnings, Sales, etc.
if func is not None:
df = apply(df=df, func=func, group_index=group_index)
# Calculate Per-Share numbers. It is important to use the share-count
# from before the user-supplied function was applied.
df_per_share = df.div(df_shares, axis=0)
# Reindex the per-share financial data to daily data-points.
df_daily = reindex(df_src=df_per_share, df_target=df_prices,
method=fill_method, group_index=group_index)
# Create new DataFrame for the signals.
# Setting the index improves performance.
df_signals = pd.DataFrame(index=df_prices.index)
# Use the closing share-price for all signals.
df_price = df_prices[CLOSE]
# Calculate basic signals.
df_signals[PSALES] = df_price / df_daily[REVENUE]
df_signals[PE] = df_price / df_daily[NET_INCOME_COMMON]
df_signals[PFCF] = df_price / df_daily[FCF]
df_signals[PBOOK] = df_price / df_daily[TOTAL_EQUITY]
# Calculate Price / Net Current Asset Value (NCAV).
# This measures the share-price relative to estimated liquidation value.
# Note: Not for banks and insurances.
if not banks and not insurance:
df_signals[P_NCAV] = df_price / df_daily[NCAV]
# Calculate Price / Net-Net Working Capital (NNWC aka. NetNet).
# This measures the share-price relative to a more conservative estimate
# of liquidation value, which values the Receivables and Inventories at
# a discount to their book-value.
# Note: Not for banks and insurances.
if not banks and not insurance:
df_signals[P_NETNET] = df_price / df_daily[NETNET]
# Calculate Price / (Cash + Equivalents + Short-Term Investments)
# This can be used to screen for companies that might be takeover targets.
# Note: Not for banks and insurances.
if not banks and not insurance:
df_signals[P_CASH] = df_price / df_daily[CASH_EQUIV_ST_INVEST]
# Calculate Earnings Yield (inverse of the P/E ratio).
df_signals[EARNINGS_YIELD] = df_daily[NET_INCOME_COMMON] / df_price
# Calculate FCF Yield (inverse of the P/FCF ratio).
df_signals[FCF_YIELD] = df_daily[FCF] / df_price
# Calculate Dividend Yield using TTM Cash-Flow data, which is easier than
# using df_prices[DIVIDEND] because the actual payment dates may differ
# slightly from one year to the next, making it difficult to calculate TTM.
# Note the negation because DIVIDENDS_PAID is negative.
df_signals[DIV_YIELD] = -df_daily[DIVIDENDS_PAID] / df_price
# Calculate Market Capitalization.
df_signals[MARKET_CAP] = df_shares_daily * df_price
# Sort the columns by their names.
df_signals.sort_index(axis='columns', inplace=True)
return df_signals
##########################################################################
|
src/agent/kubernetes-agent/src/utils/download.py | hyperledger-gerrit-archive/cello | 865 | 12638497 | #
# SPDX-License-Identifier: Apache-2.0
#
import requests
import mimetypes
import os
from uuid import uuid4
def download_file(url, target_dir):
r = requests.get(url, allow_redirects=True)
content_type = r.headers["content-type"]
extension = mimetypes.guess_extension(content_type)
file_name = "%s%s" % (uuid4().hex, extension)
target_file = os.path.join(target_dir, file_name)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
open(target_file, "wb").write(r.content)
return target_file
|
chembl_webresource_client/elastic_client.py | RowAnalytics/chembl_webresource_client | 248 | 12638513 | <reponame>RowAnalytics/chembl_webresource_client
import json
from chembl_webresource_client.query import Query
from chembl_webresource_client.settings import Settings
class ElasticClient(Query):
def __init__(self):
super(ElasticClient, self).__init__()
def _search(self, query, method_name):
url = '{0}/{1}/_search'.format(Settings.Instance().ELASTIC_URL, method_name)
res = self.session.post(url, data=
json.dumps({"size": 0,
"suggest": {
"autocomplete": {
"prefix": query,
"completion": {
"field": "_metadata.es_completion"
}
}
}
}
))
if not res.ok:
return
try:
return [x['_id'] for x in res.json()['suggest']['autocomplete'][0]['options']]
except:
pass
def search_molecule(self, query):
return self._search(query, 'chembl_molecule')
def search_target(self, query):
return self._search(query, 'chembl_target')
def search_assay(self, query):
return self._search(query, 'chembl_assay')
def search_document(self, query):
return self._search(query, 'chembl_document')
def search_cell_line(self, query):
return self._search(query, 'chembl_cell_line')
def search_tissue(self, query):
return self._search(query, 'chembl_tissue')
|
python/oneflow/framework/distribute.py | grybd/oneflow | 3,285 | 12638526 | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import traceback
from contextlib import contextmanager
import oneflow._oneflow_internal
def split_sbp(axis: int) -> oneflow._oneflow_internal.sbp.sbp:
"""Generate a split scheme in which op will be splitted at `axis`.
Args:
axis (int): At `axis` the op will be splitted.
Returns:
SbpParallel: Split scheme object, often required by `to_consistent` method of `Tensor`
Example::
array = numpy.array([[1.0, 2.0], [3.0, 4.0]])
t1 = flow.tensor(array)
ct2 = t1.to_consistent(sbp=flow.sbp.split(0), placement=("cuda", {0: [0, 1, 2, 3]}))
"""
assert type(axis) is int
return oneflow._oneflow_internal.sbp.split(axis)
|
academicstoday_project/teacher/tests/test_syllabus.py | LeeDoona/EasyGrading | 146 | 12638532 | <reponame>LeeDoona/EasyGrading
from django.core.urlresolvers import resolve
from django.http import HttpRequest
from django.http import QueryDict
from django.test import TestCase
from django.test import Client
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.conf.urls.static import static, settings
import json
from registrar.models import Course
from registrar.models import Teacher
from registrar.models import Syllabus
from teacher.views import syllabus
TEST_USER_EMAIL = "<EMAIL>"
TEST_USER_USERNAME = "Ledo"
TEST_USER_PASSWORD = "<PASSWORD>"
TEST_USER_EMAIL2 = "<EMAIL>"
TEST_USER_USERNAME2 = "whalesquid"
TEST_USER_PASSWORD2 = "<PASSWORD>"
class SyllabusTestCase(TestCase):
def tearDown(self):
syllabuses = Syllabus.objects.all()
for syllabus in syllabuses:
syllabus.delete()
courses = Course.objects.all()
for course in courses:
course.delete()
User.objects.all().delete()
def setUp(self):
# Create our Trudy user.
User.objects.create_user(
email=TEST_USER_EMAIL2,
username=TEST_USER_USERNAME2,
password=<PASSWORD>
)
user = User.objects.get(email=TEST_USER_EMAIL2)
teacher = Teacher.objects.create(user=user)
# Create our Student.
User.objects.create_user(
email=TEST_USER_EMAIL,
username=TEST_USER_USERNAME,
password=<PASSWORD>,
)
user = User.objects.get(email=TEST_USER_EMAIL)
teacher = Teacher.objects.create(user=user)
# Create a test course.
Course.objects.create(
id=1,
title="Comics Book Course",
sub_title="The definitive course on comics!",
category="",
teacher=teacher,
)
def get_logged_in_client(self):
client = Client()
client.login(
username=TEST_USER_USERNAME,
password=<PASSWORD>
)
return client
def get_logged_in_trudy_client(self):
client = Client()
client.login(
username=TEST_USER_USERNAME2,
password=<PASSWORD>
)
return client
def test_url_resolves_to_syllabus_page_view(self):
found = resolve('/teacher/course/1/syllabus')
self.assertEqual(found.func, syllabus.syllabus_page)
def test_syllabus_page_without_pdf_file(self):
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/syllabus')
self.assertEqual(response.status_code, 200)
self.assertIn(b'Comics Book Course',response.content)
self.assertIn(b'<h1>Upload Syllabus</h1>',response.content)
def test_syllabus_modal(self):
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/syllabus_modal')
self.assertEqual(response.status_code, 200)
self.assertIn(b'syllabus_modal',response.content)
def test_syllabus_page_with_pdf_file(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
file_path = settings.MEDIA_ROOT + '/sample.pdf'
with open(file_path, 'rb') as fp:
self.assertTrue(fp is not None)
response = client.post('/teacher/course/1/save_syllabus',{
'file': fp,
}, **kwargs)
self.assertEqual(response.status_code, 200)
response = client.post('/teacher/course/1/syllabus')
self.assertEqual(response.status_code, 200)
self.assertIn(b'Comics Book Course',response.content)
self.assertIn(b'ajax_delete_syllabus',response.content)
self.assertIn(b'PDF RESULT',response.content)
def test_save_syllabus(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
file_path = settings.MEDIA_ROOT + '/sample.pdf'
with open(file_path, 'rb') as fp:
self.assertTrue(fp is not None)
response = client.post('/teacher/course/1/save_syllabus',{
'file': fp,
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'saved')
self.assertEqual(array['status'], 'success')
def test_delete_syllabus_with_correct_user(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
file_path = settings.MEDIA_ROOT + '/sample.pdf'
with open(file_path, 'rb') as fp:
self.assertTrue(fp is not None)
response = client.post('/teacher/course/1/save_syllabus',{
'file': fp,
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'saved')
self.assertEqual(array['status'], 'success')
response = client.post('/teacher/course/1/delete_syllabus',{
'syllabus_id': 1,
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'deleted')
self.assertEqual(array['status'], 'success')
def test_delete_syllabus_with_correct_user(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
file_path = settings.MEDIA_ROOT + '/sample.pdf'
with open(file_path, 'rb') as fp:
self.assertTrue(fp is not None)
response = client.post('/teacher/course/1/save_syllabus',{
'file': fp,
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'saved')
self.assertEqual(array['status'], 'success')
client.logout()
client = self.get_logged_in_trudy_client()
response = client.post('/teacher/course/1/delete_syllabus',{
'syllabus_id': 1,
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'unauthorized deletion')
self.assertEqual(array['status'], 'failed')
|
GPy/kern/src/__init__.py | ekalosak/GPy | 1,685 | 12638547 |
from . import psi_comp
|
src/tests/v010/delete_model_test.py | isaacna/FireO | 231 | 12638555 | <gh_stars>100-1000
from fireo.fields import TextField, NumberField
from fireo.models import Model
class DeleteModelUser(Model):
name = TextField()
class DeleteModelChild(Model):
age = NumberField()
def test_simple_delete():
d = DeleteModelUser(name="Name")
d.save()
DeleteModelUser.collection.delete(d.key)
d2 = DeleteModelUser.collection.get(d.key)
assert d2 is None
def test_multi_delete():
d = DeleteModelUser(name="Name1")
d.save()
d = DeleteModelUser(name="Name2")
d.save()
DeleteModelUser.collection.delete()
d2 = DeleteModelUser.collection.fetch()
assert next(d2, None) is None
def test_parent_delete():
d = DeleteModelUser(name="Name")
d.save()
c = DeleteModelChild(parent=d.key)
c.age = 26
c.save()
DeleteModelChild.collection.delete(c.key)
c2 = DeleteModelChild.collection.get(c.key)
assert c2 is None
def test_multi_parent_delete():
d = DeleteModelUser(name="Name")
d.save()
c = DeleteModelChild(parent=d.key)
c.age = 26
c.save()
c = DeleteModelChild(parent=d.key)
c.age = 27
c.save()
DeleteModelChild.collection.parent(d.key).delete()
c2 = DeleteModelChild.collection.parent(d.key).fetch()
assert next(c2, None) is None |
recipes/Python/576487_flatten_sequences/recipe-576487.py | tdiprima/code | 2,023 | 12638557 | def flatten(list):
"""Flatten a list of elements into a unique list
Author: <NAME>
Examples:
>>> flatten(['a'])
['a']
>>> flatten('b')
['b']
>>> flatten( [] )
[]
>>> flatten( [[], [[]]] )
[]
>>> flatten( [[['a','b'], 'c'], 'd', ['e', [], 'f']] )
['a', 'b', 'c', 'd', 'e', 'f']
>>> t = (1,2,(3,), [4, 5, [6, [7], (8, 9), ([10, 11, (12, 13)]), [14, [], (15,)], []]])
>>> flatten(t)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
"""
def isiterable(x):
return hasattr(x, "__iter__")
r = []
for e in list:
if isiterable(e):
map(r.append, flatten(e))
else:
r.append(e)
return r
if __name__ == '__main__':
import doctest
doctest.testmod()
|
blockchain-workbench/rest-api-samples/python/swagger_client/models/contract_action.py | chaosmail/blockchain | 738 | 12638559 | # coding: utf-8
"""
Azure Blockchain Workbench REST API
The Azure Blockchain Workbench REST API is a Workbench extensibility point, which allows developers to create and manage blockchain applications, manage users and organizations within a consortium, integrate blockchain applications into services and platforms, perform transactions on a blockchain, and retrieve transactional and contract data from a blockchain. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.contract_action_parameter import ContractActionParameter # noqa: F401,E501
class ContractAction(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'user_id': 'int',
'provisioning_status': 'int',
'timestamp': 'datetime',
'parameters': 'list[ContractActionParameter]',
'workflow_function_id': 'int',
'transaction_id': 'int'
}
attribute_map = {
'id': 'id',
'user_id': 'userId',
'provisioning_status': 'provisioningStatus',
'timestamp': 'timestamp',
'parameters': 'parameters',
'workflow_function_id': 'workflowFunctionId',
'transaction_id': 'transactionId'
}
def __init__(self, id=None, user_id=None, provisioning_status=None, timestamp=None, parameters=None, workflow_function_id=None, transaction_id=None): # noqa: E501
"""ContractAction - a model defined in Swagger""" # noqa: E501
self._id = None
self._user_id = None
self._provisioning_status = None
self._timestamp = None
self._parameters = None
self._workflow_function_id = None
self._transaction_id = None
self.discriminator = None
if id is not None:
self.id = id
if user_id is not None:
self.user_id = user_id
if provisioning_status is not None:
self.provisioning_status = provisioning_status
if timestamp is not None:
self.timestamp = timestamp
if parameters is not None:
self.parameters = parameters
if workflow_function_id is not None:
self.workflow_function_id = workflow_function_id
if transaction_id is not None:
self.transaction_id = transaction_id
@property
def id(self):
"""Gets the id of this ContractAction. # noqa: E501
:return: The id of this ContractAction. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ContractAction.
:param id: The id of this ContractAction. # noqa: E501
:type: int
"""
self._id = id
@property
def user_id(self):
"""Gets the user_id of this ContractAction. # noqa: E501
:return: The user_id of this ContractAction. # noqa: E501
:rtype: int
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""Sets the user_id of this ContractAction.
:param user_id: The user_id of this ContractAction. # noqa: E501
:type: int
"""
self._user_id = user_id
@property
def provisioning_status(self):
"""Gets the provisioning_status of this ContractAction. # noqa: E501
:return: The provisioning_status of this ContractAction. # noqa: E501
:rtype: int
"""
return self._provisioning_status
@provisioning_status.setter
def provisioning_status(self, provisioning_status):
"""Sets the provisioning_status of this ContractAction.
:param provisioning_status: The provisioning_status of this ContractAction. # noqa: E501
:type: int
"""
self._provisioning_status = provisioning_status
@property
def timestamp(self):
"""Gets the timestamp of this ContractAction. # noqa: E501
:return: The timestamp of this ContractAction. # noqa: E501
:rtype: datetime
"""
return self._timestamp
@timestamp.setter
def timestamp(self, timestamp):
"""Sets the timestamp of this ContractAction.
:param timestamp: The timestamp of this ContractAction. # noqa: E501
:type: datetime
"""
self._timestamp = timestamp
@property
def parameters(self):
"""Gets the parameters of this ContractAction. # noqa: E501
:return: The parameters of this ContractAction. # noqa: E501
:rtype: list[ContractActionParameter]
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
"""Sets the parameters of this ContractAction.
:param parameters: The parameters of this ContractAction. # noqa: E501
:type: list[ContractActionParameter]
"""
self._parameters = parameters
@property
def workflow_function_id(self):
"""Gets the workflow_function_id of this ContractAction. # noqa: E501
:return: The workflow_function_id of this ContractAction. # noqa: E501
:rtype: int
"""
return self._workflow_function_id
@workflow_function_id.setter
def workflow_function_id(self, workflow_function_id):
"""Sets the workflow_function_id of this ContractAction.
:param workflow_function_id: The workflow_function_id of this ContractAction. # noqa: E501
:type: int
"""
self._workflow_function_id = workflow_function_id
@property
def transaction_id(self):
"""Gets the transaction_id of this ContractAction. # noqa: E501
:return: The transaction_id of this ContractAction. # noqa: E501
:rtype: int
"""
return self._transaction_id
@transaction_id.setter
def transaction_id(self, transaction_id):
"""Sets the transaction_id of this ContractAction.
:param transaction_id: The transaction_id of this ContractAction. # noqa: E501
:type: int
"""
self._transaction_id = transaction_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ContractAction):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
ex48/lexicon.py | youknowone/learn-python3-thw-code-ko | 329 | 12638586 | <filename>ex48/lexicon.py
WORD_TYPES = {
"north" : "direction",
"south" : "direction",
"east" : "direction",
"west" : "direction",
"go" : "verb",
"kill" : "verb",
"eat" : "verb",
"the" : "stop",
"in" : "stop",
"of" : "stop",
"bear" : "noun",
"princess" : "noun",
}
def convert_number(word):
try:
return int(word)
except:
return None
def scan(sentence):
words = sentence.split()
results = []
for word in words:
word_type = WORD_TYPES.get(word)
if word_type == None:
# it might be a number, so try converting
number = convert_number(word)
if number != None:
results.append(('number', number))
else:
results.append(('error', word))
else:
results.append((word_type, word))
return results
|
tests/test_base.py | ParikhKadam/pycorrector | 3,153 | 12638632 | # -*- coding: utf-8 -*-
"""
@author:XuMing(<EMAIL>)
@description:
"""
import sys
import unittest
sys.path.append('..')
import pycorrector
class BaseTestCase(unittest.TestCase):
def test_base_correct(self):
query = '机七学习是人工智能领遇最能体现智能的一个分知'
corrected_sent, detail = pycorrector.correct(query)
print(corrected_sent, detail)
self.assertEqual(corrected_sent, '机器学习是人工智能领域最能体现智能的一个分知')
self.assertEqual(detail, [('机七', '机器', 0, 2), ('领遇', '领域', 9, 11)])
def test_base_demos(self):
sents = [
'少先队员因该为老人让坐',
'今天心情很好',
'真麻烦你了。希望你们好好的跳无',
'机七学习是人工智能领遇最能体现智能的一个分知',
'一只小鱼船浮在平净的河面上',
'我的家乡是有明的渔米之乡',
]
res = []
for name in sents:
s, r = pycorrector.correct(name)
print(r)
res.append(r)
self.assertEqual(res[0], [('因该', '应该', 4, 6), ('坐', '座', 10, 11)])
self.assertEqual(res[1], [])
self.assertEqual(res[2], [('无', '舞', 14, 15)])
self.assertEqual(res[3], [('机七', '机器', 0, 2), ('领遇', '领域', 9, 11)])
self.assertEqual(res[4], [('平净', '平静', 7, 9)])
self.assertEqual(res[5], [('有明', '有名', 5, 7)])
def test_confusion_dict(self):
sents = [
'买iphonex,要多少钱',
'共同实际控制人萧华、霍荣铨、张旗康',
]
res = []
for name in sents:
s, r = pycorrector.correct(name)
print(r)
res.append(r)
self.assertEqual(res[0], [])
self.assertEqual(res[1], [('张旗康', '张启康', 14, 17)])
pycorrector.set_custom_confusion_dict('../examples/my_custom_confusion.txt')
res = []
for name in sents:
s, r = pycorrector.correct(name)
print(r)
res.append(r)
self.assertEqual(res[0], [('iphonex', 'iphoneX', 1, 8)])
self.assertEqual(res[1], [])
if __name__ == '__main__':
unittest.main()
|
package/kedro_viz/data_access/__init__.py | deepyaman/kedro-viz | 125 | 12638639 | <gh_stars>100-1000
"""`kedro_viz.data_access` provides an interface to save and load data for viz backend."""
from .managers import DataAccessManager
data_access_manager = DataAccessManager()
|
maskrcnn_benchmark/layers/sigmoid_focal_loss.py | microsoft/GLIP | 295 | 12638649 | <filename>maskrcnn_benchmark/layers/sigmoid_focal_loss.py
import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from maskrcnn_benchmark import _C
# TODO: Use JIT to replace CUDA implementation in the future.
class _SigmoidFocalLoss(Function):
@staticmethod
def forward(ctx, logits, targets, gamma, alpha):
ctx.save_for_backward(logits, targets)
num_classes = logits.shape[1]
ctx.num_classes = num_classes
ctx.gamma = gamma
ctx.alpha = alpha
losses = _C.sigmoid_focalloss_forward(
logits, targets, num_classes, gamma, alpha
)
return losses
@staticmethod
@once_differentiable
def backward(ctx, d_loss):
logits, targets = ctx.saved_tensors
num_classes = ctx.num_classes
gamma = ctx.gamma
alpha = ctx.alpha
d_loss = d_loss.contiguous()
d_logits = _C.sigmoid_focalloss_backward(
logits, targets, d_loss, num_classes, gamma, alpha
)
return d_logits, None, None, None, None
sigmoid_focal_loss_cuda = _SigmoidFocalLoss.apply
def sigmoid_focal_loss_cpu(logits, targets, gamma, alpha):
num_classes = logits.shape[1]
dtype = targets.dtype
device = targets.device
class_range = torch.arange(1, num_classes + 1, dtype=dtype, device=device).unsqueeze(0)
t = targets.unsqueeze(1)
p = torch.sigmoid(logits)
term1 = (1 - p) ** gamma * torch.log(p)
term2 = p ** gamma * torch.log(1 - p)
return -(t == class_range).float() * term1 * alpha - ((t != class_range) * (t >= 0)).float() * term2 * (1 - alpha)
class SigmoidFocalLoss(nn.Module):
def __init__(self, gamma, alpha):
super(SigmoidFocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
def forward(self, logits, targets):
if logits.is_cuda:
loss_func = sigmoid_focal_loss_cuda
else:
loss_func = sigmoid_focal_loss_cpu
loss = loss_func(logits, targets, self.gamma, self.alpha)
return loss.sum()
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "gamma=" + str(self.gamma)
tmpstr += ", alpha=" + str(self.alpha)
tmpstr += ")"
return tmpstr
def token_sigmoid_softmax_focal_loss(pred_logits, targets, alpha, gamma, text_mask=None):
# Another modification is that because we use the cross entropy version, there is no frequent or not frequent class.
# So we temporarily retired the design of alpha.
assert (targets.dim() == 3)
assert (pred_logits.dim() == 3) # batch x from x to
# reprocess target to become probability map ready for softmax
targets = targets.float()
target_num = targets.sum(-1) + 1e-8 # numerical stability
targets = targets / target_num.unsqueeze(-1) # T(x)
if text_mask is not None:
# reserve the last token for non object
assert (text_mask.dim() == 2)
text_mask[:, -1] = 1
text_mask = (text_mask > 0).unsqueeze(1).repeat(1, pred_logits.size(1), 1) # copy along the image channel
pred_logits = pred_logits.masked_fill(~text_mask, -1000000) # softmax
out_prob = pred_logits.softmax(-1)
filled_targets = targets.clone()
filled_targets[filled_targets == 0] = 1.0
weight = torch.clamp(targets - out_prob, min=0.001) / filled_targets
weight = torch.pow(weight, gamma) # weight = torch.pow(torch.clamp(target - out_prob, min=0.01), gamma)
loss_ce = - targets * weight * pred_logits.log_softmax(
-1) # only those positives with positive target_sim will have losses.
return loss_ce
def token_sigmoid_binary_focal_loss_v2(pred_logits, targets, alpha, gamma, text_mask=None):
assert (targets.dim() == 3)
assert (pred_logits.dim() == 3) # batch x from x to
if text_mask is not None:
assert (text_mask.dim() == 2)
# We convert everything into binary
out_prob = pred_logits.sigmoid()
out_prob_neg_pos = torch.stack([1 - out_prob, out_prob], dim=-1) + 1e-8 # batch x boxes x 256 x 2
weight = torch.pow(-out_prob_neg_pos + 1.0, gamma)
focal_zero = - weight[:, :, :, 0] * torch.log(out_prob_neg_pos[:, :, :, 0]) * (
1 - alpha) # negative class
focal_one = - weight[:, :, :, 1] * torch.log(out_prob_neg_pos[:, :, :, 1]) * alpha # positive class
focal = torch.stack([focal_zero, focal_one], dim=-1)
loss_ce = torch.gather(focal, index=targets.long().unsqueeze(-1), dim=-1)
return loss_ce
def token_sigmoid_binary_focal_loss(pred_logits, targets, alpha, gamma, text_mask=None):
# binary version of focal loss
# copied from https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/focal_loss.py
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha: (optional) Weighting factor in range (0,1) to balance
positive vs negative examples. Default = -1 (no weighting).
gamma: Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples.
Returns:
Loss tensor with the reduction option applied.
"""
assert (targets.dim() == 3)
assert (pred_logits.dim() == 3) # batch x from x to
bs, n, _ = pred_logits.shape
if text_mask is not None:
assert (text_mask.dim() == 2)
text_mask = (text_mask > 0).unsqueeze(1)
text_mask = text_mask.repeat(1, pred_logits.size(1), 1) # copy along the image channel dimension
pred_logits = torch.masked_select(pred_logits, text_mask)
targets = torch.masked_select(targets, text_mask)
# print(pred_logits.shape)
# print(targets.shape)
p = torch.sigmoid(pred_logits)
ce_loss = F.binary_cross_entropy_with_logits(pred_logits, targets, reduction="none")
p_t = p * targets + (1 - p) * (1 - targets)
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
return loss
class TokenSigmoidFocalLoss(nn.Module):
def __init__(self, alpha, gamma):
super(TokenSigmoidFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
def forward(self, logits, targets, text_masks=None, version="binary", **kwargs):
if version == "binary":
loss_func = token_sigmoid_binary_focal_loss
elif version == "softmax":
loss_func = token_sigmoid_softmax_focal_loss
elif version == "binaryv2":
loss_func = token_sigmoid_binary_focal_loss_v2
else:
raise NotImplementedError
loss = loss_func(logits, targets, self.alpha, self.gamma, text_masks, **kwargs)
return loss.sum()
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "gamma=" + str(self.gamma)
tmpstr += ", alpha=" + str(self.alpha)
tmpstr += ")"
return tmpstr
|
lib/exabgp/bgp/message/update/nlri/evpn/__init__.py | cloudscale-ch/exabgp | 1,560 | 12638656 | """
evpn/__init__.py
Created by <NAME> on 2014-06-27.
Copyright (c) 2014-2017 Orange. All rights reserved.
License: 3-clause BSD. (See the COPYRIGHT file)
"""
# Every EVPN should be imported from this file
# as it makes sure that all the registering decorator are run
from exabgp.bgp.message.update.nlri.evpn.nlri import EVPN
from exabgp.bgp.message.update.nlri.evpn.ethernetad import EthernetAD
from exabgp.bgp.message.update.nlri.evpn.mac import MAC
from exabgp.bgp.message.update.nlri.evpn.multicast import Multicast
from exabgp.bgp.message.update.nlri.evpn.segment import EthernetSegment
from exabgp.bgp.message.update.nlri.evpn.prefix import Prefix
|
apps/dash-3d-image-partitioning/app.py | JeroenvdSande/dash-sample-apps | 2,332 | 12638663 | import dash
from dash.dependencies import Input, Output, State, ClientsideFunction
import dash_html_components as html
import dash_core_components as dcc
import plotly.graph_objects as go
from skimage import data, img_as_ubyte, segmentation, measure
from dash_canvas.utils import array_to_data_url
import plotly.graph_objects as go
import plot_common
import image_utils
import numpy as np
from nilearn import image
import nibabel as nib
import plotly.express as px
import shape_utils
from sys import exit
import io
import base64
import skimage
import time
import os
DEBUG_MASK = False
DEFAULT_STROKE_COLOR = px.colors.qualitative.Light24[0]
DEFAULT_STROKE_WIDTH = 5
# the scales for the top and side images (they might be different)
# TODO: If the width and height scales are different, strange things happen? For
# example I have observed the masks getting scaled unevenly, maybe because the
# axes are actually scaled evenly (fixed to the x axis?) but then the mask gets
# scaled differently?
hwscales = [(2, 2), (2, 2)]
# the number of dimensions displayed
NUM_DIMS_DISPLAYED = 2 # top and side
# the color of the triangles displaying the slice number
INDICATOR_COLOR = "DarkOrange"
DISPLAY_BG_COLOR = "darkgrey"
# A string, if length non-zero, saves superpixels to this file and then exits
SAVE_SUPERPIXEL = os.environ.get("SAVE_SUPERPIXEL", default="")
# A string, if length non-zero, loads superpixels from this file
LOAD_SUPERPIXEL = os.environ.get("LOAD_SUPERPIXEL", default="")
# If not "0", debugging mode is on.
DEBUG = os.environ.get("DEBUG", default="0") != "0"
def PRINT(*vargs):
if DEBUG:
print(*vargs)
def make_seg_image(img):
""" Segment the image, then find the boundaries, then return an array that
is clear (alpha=0) where there are no boundaries. """
segb = np.zeros_like(img).astype("uint8")
seg = segmentation.slic(
img, start_label=1, multichannel=False, compactness=0.1, n_segments=300
)
# Only keep superpixels with an average intensity greater than threshold
# in order to remove superpixels of the background
superpx_avg = (
np.histogram(
seg.astype(np.float), bins=np.arange(0, 310), weights=img.astype(np.float)
)[0]
/ np.histogram(seg.astype(np.float), bins=np.arange(0, 310))[0]
> 10
)
mask_brain = superpx_avg[seg]
seg[np.logical_not(mask_brain)] = 0
seg, _, _ = segmentation.relabel_sequential(seg)
segb = segmentation.find_boundaries(seg).astype("uint8")
segl = image_utils.label_to_colors(
segb, colormap=["#000000", "#E48F72"], alpha=[0, 128], color_class_offset=0
)
return (segl, seg)
def make_default_figure(
images=[],
stroke_color=DEFAULT_STROKE_COLOR,
stroke_width=DEFAULT_STROKE_WIDTH,
img_args=dict(layer="above"),
width_scale=1,
height_scale=1,
):
fig = plot_common.dummy_fig()
plot_common.add_layout_images_to_fig(
fig,
images,
img_args=img_args,
width_scale=width_scale,
height_scale=height_scale,
update_figure_dims="height",
)
# add an empty image with the same size as the greatest of the already added
# images so that we can add computed masks clientside later
mwidth, mheight = [
max([im[sz] for im in fig["layout"]["images"]]) for sz in ["sizex", "sizey"]
]
fig.add_layout_image(
dict(
source="",
xref="x",
yref="y",
x=0,
y=0,
sizex=mwidth,
sizey=mheight,
sizing="contain",
layer="above",
)
)
fig.update_layout(
{
"dragmode": "drawopenpath",
"newshape.line.color": stroke_color,
"newshape.line.width": stroke_width,
"margin": dict(l=0, r=0, b=0, t=0, pad=4),
"plot_bgcolor": DISPLAY_BG_COLOR,
}
)
return fig
img = image.load_img("assets/BraTS19_2013_10_1_flair.nii")
img = img.get_data().transpose(2, 0, 1)[::-1].astype("float")
img = img_as_ubyte((img - img.min()) / (img.max() - img.min()))
def make_empty_found_segments():
""" fstc_slices is initialized to a bunch of images containing nothing (clear pixels) """
found_segs_tensor = np.zeros_like(img)
# convert to a colored image (but it will just be colored "clear")
fst_colored = image_utils.label_to_colors(
found_segs_tensor,
colormap=["#000000", "#8A2BE2"],
alpha=[0, 128],
color_class_offset=0,
)
fstc_slices = [
[
array_to_data_url(np.moveaxis(fst_colored, 0, j)[i])
for i in range(np.moveaxis(fst_colored, 0, j).shape[0])
]
for j in range(NUM_DIMS_DISPLAYED)
]
return fstc_slices
if len(LOAD_SUPERPIXEL) > 0:
# load partitioned image (to save time)
if LOAD_SUPERPIXEL.endswith(".gz"):
import gzip
with gzip.open(LOAD_SUPERPIXEL) as fd:
dat = np.load(fd)
segl = dat["segl"]
seg = dat["seg"]
else:
dat = np.load(LOAD_SUPERPIXEL)
segl = dat["segl"]
seg = dat["seg"]
else:
# partition image
segl, seg = make_seg_image(img)
if len(SAVE_SUPERPIXEL) > 0:
np.savez(SAVE_SUPERPIXEL, segl=segl, seg=seg)
exit(0)
seg_img = img_as_ubyte(segl)
img_slices, seg_slices = [
[
# top
[array_to_data_url(im[i, :, :]) for i in range(im.shape[0])],
# side
[array_to_data_url(im[:, i, :]) for i in range(im.shape[1])],
]
for im in [img, seg_img]
]
# initially no slices have been found so we don't draw anything
found_seg_slices = make_empty_found_segments()
# store encoded blank slices for each view to save recomputing them for slices
# containing no colored pixels
blank_seg_slices = [found_seg_slices[0][0], found_seg_slices[1][0]]
app = dash.Dash(__name__)
server = app.server
top_fig, side_fig = [
make_default_figure(
images=[img_slices[i][0], seg_slices[i][0]],
width_scale=hwscales[i][1],
height_scale=hwscales[i][0],
)
for i in range(NUM_DIMS_DISPLAYED)
]
default_3d_layout = dict(
scene=dict(
yaxis=dict(visible=False, showticklabels=False, showgrid=False, ticks=""),
xaxis=dict(visible=True, title="Side View Slice Number"),
zaxis=dict(visible=True, title="Top View Slice Number"),
camera=dict(
up=dict(x=0, y=0, z=1),
center=dict(x=0, y=0, z=0),
eye=dict(x=1.25, y=1.25, z=1.25),
),
),
height=800,
)
def make_default_3d_fig():
fig = go.Figure(data=[go.Mesh3d()])
fig.update_layout(**default_3d_layout)
return fig
def make_modal():
with open("assets/howto.md", "r") as f:
readme_md = f.read()
return html.Div(
id="markdown",
className="modal",
style={"display": "none"},
children=[
html.Div(
id="markdown-container",
className="markdown-container",
# style={
# "color": text_color["light"],
# "backgroundColor": card_color["light"],
# },
children=[
html.Div(
className="close-container",
children=html.Button(
"Close",
id="markdown_close",
n_clicks=0,
className="closeButton",
style={"color": "DarkBlue"},
),
),
html.Div(
className="markdown-text", children=dcc.Markdown(readme_md)
),
],
)
],
)
app.layout = html.Div(
id="main",
children=[
# Banner display
html.Div(
id="banner",
children=[
html.Div(
html.H1(
"3D Image Annotation",
id="title",
style={
"color": "#f9f9f9",
"display": "inline-block",
"margin": "0",
},
),
),
html.Div(
html.Button(
"Learn more",
id="learn-more-button",
n_clicks=0,
style={"width": "auto"},
),
),
# Adding the modal content here. It is only shown if the show-modal
# button is pressed
make_modal(),
html.Img(id="logo", src=app.get_asset_url("dash-logo-new.png"),),
],
style={
"display": "flex",
"position": "relative",
"margin": "10px 10px 10px 10px",
},
),
dcc.Store(id="image-slices", data=img_slices),
dcc.Store(id="seg-slices", data=seg_slices),
dcc.Store(
id="drawn-shapes",
data=[
[[] for _ in range(seg_img.shape[i])] for i in range(NUM_DIMS_DISPLAYED)
],
),
dcc.Store(id="slice-number-top", data=0),
dcc.Store(id="slice-number-side", data=0),
dcc.Store(
id="undo-data",
data=dict(
undo_n_clicks=0,
redo_n_clicks=0,
undo_shapes=[],
redo_shapes=[],
# 2 arrays, one for each image-display-graph-{top,side}
# each array contains the number of slices in that image view, and each
# item of this array contains a list of shapes
empty_shapes=[
[[] for _ in range(seg_img.shape[i])]
for i in range(NUM_DIMS_DISPLAYED)
],
),
),
# In this implementation we want to prevent needless passing of the
# large image array from client to server, so when "downloaded-button"
# is clicked, the contents of the "found-segs" store is converted to nii
# imaging format, converted to base64, and stored in the
# "found-image-tensor-data" store. When this store's contents are
# updated, they are stored, decoded, in a Blob and a url is created from
# the contents of this blob and set as the href of "download-link". Then
# somehow we need to simulate a "click" on the "download-link". The
# "found-image-tensor-data" store is necessary because we can only pass
# base64-encoded data between client and server: we let the browser
# handle how data from the browser can be written to the client's
# filesystem.
html.Div(
id="loader-wrapper",
children=[
# required so callback triggered by writing to "found-image-tensor-data"
# has an output
html.Div(id="dummy", style={"display": "none"}),
html.Div(id="dummy2", style={"display": "none"}, children=",0"),
# hidden elements so we can show/hide segmentations on 2d and 3d figures
html.Div(
id="show-hide-seg-2d", children="show", style={"display": "none"}
),
html.Div(
id="show-hide-seg-3d", children="show", style={"display": "none"}
),
dcc.Loading(
id="graph-loading",
type="circle",
children=[
html.A(id="download-link", download="found_image.nii",),
# the image data of the found segmentation is stored
# here before it is downloaded
dcc.Store(id="found-image-tensor-data", data=""),
html.Div(
children=[
html.Button(
"3D View",
id="view-select-button",
n_clicks=0,
style={"width": "25%"},
),
html.Button(
"Hide Segmentation",
id="show-seg-check",
n_clicks=0,
style={"width": "25%"},
),
html.Button(
"Download Brain Volume",
id="download-brain-button",
style={"width": "auto"},
),
html.Button(
"Download Selected Partitions",
id="download-button",
style={"width": "auto"},
),
html.Button(
"Undo",
id="undo-button",
n_clicks=0,
style={"width": "12.5%"},
),
html.Button(
"Redo",
id="redo-button",
n_clicks=0,
style={"width": "12.5%"},
),
],
style={"display": "flex", "margin": "2px 0 2px 0"},
),
html.Div(
id="2D-graphs",
style={
"display": "grid",
"grid-template-columns": "repeat(2,1fr)",
"grid-auto-rows": "auto",
"grid-gap": "0 2px",
},
children=[
html.Div(
[
html.H6(
"Top View", style={"text-align": "center",}
)
],
style={
"grid-column": "1",
"grid-row": "1",
"background-color": DISPLAY_BG_COLOR,
},
),
html.Div(
[
dcc.Graph(
id="image-display-graph-top",
figure=top_fig,
)
],
style={
"grid-column": "1",
"grid-row": "2",
"background-color": DISPLAY_BG_COLOR,
},
),
html.Div(
[
html.Div(
id="image-select-top-display",
style={"width": "125px"},
),
html.Div(
dcc.Slider(
id="image-select-top",
min=0,
max=len(img_slices[0]) - 1,
step=1,
updatemode="drag",
value=len(img_slices[0]) // 2,
),
style={"flex-grow": "1"},
),
],
style={
"grid-column": "1",
"grid-row": "3",
"display": "flex",
"background": "grey",
},
),
html.Div(
[
html.H6(
"Side View", style={"text-align": "center"}
)
],
style={
"grid-column": "2",
"grid-row": "1",
"background-color": DISPLAY_BG_COLOR,
},
),
html.Div(
[
dcc.Graph(
id="image-display-graph-side",
figure=side_fig,
)
],
style={
"grid-column": "2",
"grid-row": "2",
"background-color": DISPLAY_BG_COLOR,
},
),
html.Div(
[
html.Div(
id="image-select-side-display",
style={"width": "125px"},
),
html.Div(
[
dcc.Slider(
id="image-select-side",
min=0,
max=len(img_slices[1]) - 1,
step=1,
updatemode="drag",
value=len(img_slices[1]) // 2,
)
],
style={"flex-grow": "1"},
),
],
style={
"grid-column": "2",
"grid-row": "3",
"display": "flex",
"background": "grey",
},
),
# This store has to be put here so dcc.Loading sees that it is updating.
dcc.Store(id="found-segs", data=found_seg_slices),
],
),
html.Div(
id="3D-graphs",
children=[
dcc.Graph(
"image-display-graph-3d",
figure=make_default_3d_fig(),
config=dict(displayModeBar=False,),
)
],
style={"display": "none"},
),
],
),
],
),
dcc.Store(id="fig-3d-scene", data=default_3d_layout,),
dcc.Store(id="current-render-id", data=0),
dcc.Store(id="last-render-id", data=0),
],
)
app.clientside_callback(
"""
function (show_seg_n_clicks) {
// update show segmentation button
var show_seg_button = document.getElementById("show-seg-check");
if (show_seg_button) {
show_seg_button.textContent = show_seg_n_clicks % 2 ?
"Show Segmentation" :
"Hide Segmentation";
}
var ret = (show_seg_n_clicks % 2) ? "" : "show";
return [ret,ret];
}
""",
[Output("show-hide-seg-2d", "children"), Output("show-hide-seg-3d", "children")],
[Input("show-seg-check", "n_clicks")],
)
app.clientside_callback(
"""
function(
image_select_top_value,
image_select_side_value,
show_hide_seg_2d,
found_segs_data,
image_slices_data,
image_display_top_figure,
image_display_side_figure,
seg_slices_data,
drawn_shapes_data) {{
let show_seg_check = show_hide_seg_2d;
let image_display_figures_ = figure_display_update(
[image_select_top_value,image_select_side_value],
show_seg_check,
found_segs_data,
image_slices_data,
[image_display_top_figure,image_display_side_figure],
seg_slices_data,
drawn_shapes_data),
// slider order reversed because the image slice number is shown on the
// other figure
side_figure = image_display_figures_[1],
top_figure = image_display_figures_[0],
d=3,
sizex, sizey;
// append shapes that show what slice the other figure is in
sizex = top_figure.layout.images[0].sizex,
sizey = top_figure.layout.images[0].sizey;
// tri_shape draws the triangular shape, see assets/app_clientside.js
if (top_figure.layout.shapes) {{
top_figure.layout.shapes=top_figure.layout.shapes.concat([
tri_shape(d/2,sizey*image_select_side_value/found_segs_data[1].length,
d/2,d/2,'right'),
tri_shape(sizex-d/2,sizey*image_select_side_value/found_segs_data[1].length,
d/2,d/2,'left'),
]);
}}
sizex = side_figure.layout.images[0].sizex,
sizey = side_figure.layout.images[0].sizey;
if (side_figure.layout.shapes) {{
side_figure.layout.shapes=side_figure.layout.shapes.concat([
tri_shape(d/2,sizey*image_select_top_value/found_segs_data[0].length,
d/2,d/2,'right'),
tri_shape(sizex-d/2,sizey*image_select_top_value/found_segs_data[0].length,
d/2,d/2,'left'),
]);
}}
// return the outputs
return image_display_figures_.concat([
"Slice: " + (image_select_top_value+1) + " / {num_top_slices}",
"Slice: " + (image_select_side_value+1) + " / {num_side_slices}",
image_select_top_value,
image_select_side_value
]);
}}
""".format(
num_top_slices=len(img_slices[0]), num_side_slices=len(img_slices[1])
),
[
Output("image-display-graph-top", "figure"),
Output("image-display-graph-side", "figure"),
Output("image-select-top-display", "children"),
Output("image-select-side-display", "children"),
Output("slice-number-top", "data"),
Output("slice-number-side", "data"),
],
[
Input("image-select-top", "value"),
Input("image-select-side", "value"),
Input("show-hide-seg-2d", "children"),
Input("found-segs", "data"),
],
[
State("image-slices", "data"),
State("image-display-graph-top", "figure"),
State("image-display-graph-side", "figure"),
State("seg-slices", "data"),
State("drawn-shapes", "data"),
],
)
app.clientside_callback(
"""
function(top_relayout_data,
side_relayout_data,
undo_n_clicks,
redo_n_clicks,
top_slice_number,
side_slice_number,
drawn_shapes_data,
undo_data)
{
// Ignore if "shapes" not in any of the relayout data
let triggered = window.dash_clientside.callback_context.triggered.map(
t => t['prop_id'])[0];
if ((triggered === "image-display-graph-top.relayoutData" && !("shapes" in
top_relayout_data)) || (triggered === "image-display-graph-side.relayoutData"
&& !("shapes" in side_relayout_data))) {
return [window.dash_clientside.no_update,window.dash_clientside.no_update];
}
drawn_shapes_data = json_copy(drawn_shapes_data);
let ret = undo_track_slice_figure_shapes (
[top_relayout_data,side_relayout_data],
["image-display-graph-top.relayoutData",
"image-display-graph-side.relayoutData"],
undo_n_clicks,
redo_n_clicks,
undo_data,
drawn_shapes_data,
[top_slice_number,side_slice_number],
// a function that takes a list of shapes and returns those that we want to
// track (for example if some shapes are to show some attribute but should not
// be tracked by undo/redo)
function (shapes) { return shapes.filter(function (s) {
let ret = true;
try { ret &= (s.fillcolor == "%s"); } catch(err) { ret &= false; }
try { ret &= (s.line.color == "%s"); } catch(err) { ret &= false; }
// return !ret because we don't want to keep the indicators
return !ret;
});
});
undo_data=ret[0];
drawn_shapes_data=ret[1];
return [drawn_shapes_data,undo_data];
}
"""
% ((INDICATOR_COLOR,) * 2),
[Output("drawn-shapes", "data"), Output("undo-data", "data")],
[
Input("image-display-graph-top", "relayoutData"),
Input("image-display-graph-side", "relayoutData"),
Input("undo-button", "n_clicks"),
Input("redo-button", "n_clicks"),
],
[
State("slice-number-top", "data"),
State("slice-number-side", "data"),
State("drawn-shapes", "data"),
State("undo-data", "data"),
],
)
def shapes_to_segs(
drawn_shapes_data, image_display_top_figure, image_display_side_figure,
):
masks = np.zeros_like(img)
for j, (graph_figure, (hscale, wscale)) in enumerate(
zip([image_display_top_figure, image_display_side_figure], hwscales)
):
fig = go.Figure(**graph_figure)
# we use the width and the height of the first layout image (this will be
# one of the images of the brain) to get the bounding box of the SVG that we
# want to rasterize
width, height = [fig.layout.images[0][sz] for sz in ["sizex", "sizey"]]
for i in range(seg_img.shape[j]):
shape_args = [
dict(width=width, height=height, shape=s)
for s in drawn_shapes_data[j][i]
]
if len(shape_args) > 0:
mask = shape_utils.shapes_to_mask(
shape_args,
# we only have one label class, so the mask is given value 1
1,
)
# TODO: Maybe there's a more elegant way to downsample the mask?
np.moveaxis(masks, 0, j)[i, :, :] = mask[::hscale, ::wscale]
found_segs_tensor = np.zeros_like(img)
if DEBUG_MASK:
found_segs_tensor[masks == 1] = 1
else:
# find labels beneath the mask
labels = set(seg[1 == masks])
# for each label found, select all of the segment with that label
for l in labels:
found_segs_tensor[seg == l] = 1
return found_segs_tensor
@app.callback(
[Output("found-segs", "data"), Output("current-render-id", "data")],
[Input("drawn-shapes", "data")],
[
State("image-display-graph-top", "figure"),
State("image-display-graph-side", "figure"),
State("current-render-id", "data"),
],
)
def draw_shapes_react(
drawn_shapes_data,
image_display_top_figure,
image_display_side_figure,
current_render_id,
):
if any(
[
e is None
for e in [
drawn_shapes_data,
image_display_top_figure,
image_display_side_figure,
]
]
):
return dash.no_update
t1 = time.time()
found_segs_tensor = shapes_to_segs(
drawn_shapes_data, image_display_top_figure, image_display_side_figure,
)
t2 = time.time()
PRINT("Time to convert shapes to segments:", t2 - t1)
# convert to a colored image
fst_colored = image_utils.label_to_colors(
found_segs_tensor,
colormap=["#8A2BE2"],
alpha=[128],
# we map label 0 to the color #000000 using no_map_zero, so we start at
# color_class 1
color_class_offset=1,
labels_contiguous=True,
no_map_zero=True,
)
t3 = time.time()
PRINT("Time to convert from labels to colored image:", t3 - t2)
fstc_slices = [
[
array_to_data_url(s) if np.any(s != 0) else blank_seg_slices[j]
for s in np.moveaxis(fst_colored, 0, j)
]
for j in range(NUM_DIMS_DISPLAYED)
]
t4 = time.time()
PRINT("Time to convert to data URLs:", t4 - t3)
PRINT("Total time to compute 2D annotations:", t4 - t1)
return fstc_slices, current_render_id + 1
def _decode_b64_slice(s):
return base64.b64decode(s.encode())
def slice_image_list_to_ndarray(fstc_slices):
# convert encoded slices to array
# TODO eventually make it format agnostic, right now we just assume png and
# strip off length equal to uri_header from the uri string
uri_header = "data:image/png;base64,"
# preallocating the final tensor by reading the first image makes converting
# much faster (because all the images have the same dimensions)
n_slices = len(fstc_slices)
first_img = plot_common.str_to_img_ndarrary(
_decode_b64_slice(fstc_slices[0][len(uri_header) :])
)
fstc_ndarray = np.zeros((n_slices,) + first_img.shape, dtype=first_img.dtype)
PRINT("first_img.dtype", first_img.dtype)
fstc_ndarray[0] = first_img
for n, img_slice in enumerate(fstc_slices[1:]):
img = plot_common.str_to_img_ndarrary(
_decode_b64_slice(img_slice[len(uri_header) :])
)
fstc_ndarray[n] = img
PRINT("fstc_ndarray.shape", fstc_ndarray.shape)
# transpose back to original
if len(fstc_ndarray.shape) == 3:
# Brain data is lacking the 4th channel dimension
# Here we allow for this function to also return an array for the 3D brain data
return fstc_ndarray.transpose((1, 2, 0))
return fstc_ndarray.transpose((1, 2, 0, 3))
# Converts found slices to nii file and encodes in b64 so it can be downloaded
def save_found_slices(fstc_slices):
# we just save the first view (it makes no difference in the end)
fstc_slices = fstc_slices[0]
fstc_ndarray = slice_image_list_to_ndarray(fstc_slices)
# if the tensor is all zero (no partitions found) return None
if np.all(fstc_ndarray == 0):
return None
# TODO add affine
# technique for writing nii to bytes from here:
# https://gist.github.com/arokem/423d915e157b659d37f4aded2747d2b3
fstc_nii = nib.Nifti1Image(skimage.img_as_ubyte(fstc_ndarray), affine=None)
fstcbytes = io.BytesIO()
file_map = fstc_nii.make_file_map({"image": fstcbytes, "header": fstcbytes})
fstc_nii.to_file_map(file_map)
fstcb64 = base64.b64encode(fstcbytes.getvalue()).decode()
return fstcb64
@app.callback(
Output("found-image-tensor-data", "data"),
[Input("download-button", "n_clicks"), Input("download-brain-button", "n_clicks")],
[State("found-segs", "data"), State("image-slices", "data")],
)
def download_button_react(
download_button_n_clicks,
download_brain_button_n_clicks,
found_segs_data,
brain_data,
):
ctx = dash.callback_context
# Find out which download button was triggered
if not ctx.triggered:
# Nothing has happened yet
return ""
trigger_id = ctx.triggered[0]["prop_id"].split(".")[0]
if trigger_id == "download-button":
ret = save_found_slices(found_segs_data)
elif trigger_id == "download-brain-button":
ret = save_found_slices(brain_data)
else:
return ""
if ret is None:
return ""
return ret
app.clientside_callback(
"""
function (found_image_tensor_data) {
if (found_image_tensor_data.length <= 0) {
return "";
}
// for some reason you can't use the conversion to ascii from base64 directly
// with blob, you have to use the ascii encoded as numbers
const byte_chars = window.atob(found_image_tensor_data);
const byte_numbers = Array.from(byte_chars,(b,i)=>byte_chars.charCodeAt(i));
const byte_array = new Uint8Array(byte_numbers);
let b = new Blob([byte_array],{type: 'application/octet-stream'});
let url = URL.createObjectURL(b);
return url;
}
""",
Output("download-link", "href"),
[Input("found-image-tensor-data", "data")],
)
app.clientside_callback(
"""
function (href) {
if (href != "") {
let download_a=document.getElementById("download-link");
download_a.click();
}
return '';
}
""",
Output("dummy", "children"),
[Input("download-link", "href")],
)
app.clientside_callback(
"""
function (view_select_button_nclicks,current_render_id) {
console.log("view_select_button_nclicks");
console.log(view_select_button_nclicks);
var graphs_2d = document.getElementById("2D-graphs"),
graphs_3d = document.getElementById("3D-graphs"),
ret = "";
// update view select button
var view_select_button = document.getElementById("view-select-button");
if (view_select_button) {
view_select_button.textContent = view_select_button_nclicks % 2 ?
"2D View" :
"3D View";
}
if (graphs_2d && graphs_3d) {
if (view_select_button_nclicks % 2) {
graphs_2d.style.display = "none";
graphs_3d.style.display = "";
ret = "3d shown";
} else {
graphs_2d.style.display = "grid";
graphs_3d.style.display = "none";
ret = "2d shown";
}
}
ret += ","+current_render_id;
return ret;
}
""",
Output("dummy2", "children"),
[Input("view-select-button", "n_clicks")],
[State("current-render-id", "data")],
)
@app.callback(
Output("fig-3d-scene", "data"),
[Input("image-display-graph-3d", "relayoutData")],
[State("fig-3d-scene", "data")],
)
def store_scene_data(graph_3d_relayoutData, last_3d_scene):
PRINT("graph_3d_relayoutData", graph_3d_relayoutData)
if graph_3d_relayoutData is not None:
for k in graph_3d_relayoutData.keys():
last_3d_scene[k] = graph_3d_relayoutData[k]
return last_3d_scene
return dash.no_update
@app.callback(
[Output("image-display-graph-3d", "figure"), Output("last-render-id", "data")],
[Input("dummy2", "children"), Input("show-hide-seg-3d", "children")],
[
State("drawn-shapes", "data"),
State("fig-3d-scene", "data"),
State("last-render-id", "data"),
State("image-display-graph-top", "figure"),
State("image-display-graph-side", "figure"),
],
)
def populate_3d_graph(
dummy2_children,
show_hide_seg_3d,
drawn_shapes_data,
last_3d_scene,
last_render_id,
image_display_top_figure,
image_display_side_figure,
):
# extract which graph shown and the current render id
graph_shown, current_render_id = dummy2_children.split(",")
current_render_id = int(current_render_id)
start_time = time.time()
cbcontext = [p["prop_id"] for p in dash.callback_context.triggered][0]
# check that we're not toggling the display of the 3D annotation
if cbcontext != "show-hide-seg-3d.children":
PRINT(
"might render 3D, current_id: %d, last_id: %d"
% (current_render_id, last_render_id)
)
if graph_shown != "3d shown" or current_render_id == last_render_id:
if current_render_id == last_render_id:
PRINT("not rendering 3D because it is up to date")
return dash.no_update
PRINT("rendering 3D")
segs_ndarray = shapes_to_segs(
drawn_shapes_data, image_display_top_figure, image_display_side_figure,
).transpose((1, 2, 0))
# image, color
images = [
(img.transpose((1, 2, 0))[:, :, ::-1], "grey"),
]
if show_hide_seg_3d == "show":
images.append((segs_ndarray[:, :, ::-1], "purple"))
data = []
for im, color in images:
im = image_utils.combine_last_dim(im)
try:
verts, faces, normals, values = measure.marching_cubes(im, 0, step_size=3)
x, y, z = verts.T
i, j, k = faces.T
data.append(
go.Mesh3d(x=x, y=y, z=z, color=color, opacity=0.5, i=i, j=j, k=k)
)
except RuntimeError:
continue
fig = go.Figure(data=data)
fig.update_layout(**last_3d_scene)
end_time = time.time()
PRINT("serverside 3D generation took: %f seconds" % (end_time - start_time,))
return (fig, current_render_id)
# ======= Callback for modal popup =======
@app.callback(
Output("markdown", "style"),
[Input("learn-more-button", "n_clicks"), Input("markdown_close", "n_clicks")],
)
def update_click_output(button_click, close_click):
if button_click > close_click:
return {"display": "block"}
else:
return {"display": "none"}
if __name__ == "__main__":
app.run_server(debug=DEBUG)
|
coding_interviews/leetcode/easy/remove_duplicates.py | LeandroTk/Algorithms | 205 | 12638683 | <reponame>LeandroTk/Algorithms
# https://leetcode.com/problems/remove-duplicates-from-sorted-array/description/
'''
- Examples:
[1, 1, 2] # => 2
[] # => 0
[1, 1, 1] # => 1
[1, 2, 3, 3, 4, 5] # => 5
'''
def remove_duplicates(nums):
if not nums:
return 0
total_result = 1
num = nums[0]
for index in range(1, len(nums)):
if nums[index] != num:
nums[total_result] = nums[index]
total_result += 1
num = nums[index]
return total_result
list1 = [1, 1, 2]
list2 = []
list3 = [1, 1, 1]
list4 = [1, 2, 3, 3, 4, 5]
print remove_duplicates(list1)
print remove_duplicates(list2)
print remove_duplicates(list3)
print remove_duplicates(list4)
print list1
print list2
print list3
print list4
|
pyqubo/integer/one_hot_enc_integer.py | dmiracle/pyqubo | 124 | 12638685 | # Copyright 2018 Recruit Communications Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyqubo import Array, SubH, Constraint
from pyqubo.integer.integer import IntegerWithPenalty
from pyqubo import WithPenalty, Placeholder
class OneHotEncInteger(IntegerWithPenalty):
"""One-hot encoded integer. The value that takes :math:`[1, n]` is represented by :math:`\sum_{i=1}^{n}ix_{i}`.
Also we have the penalty function :math:`strength \\times (\sum_{i=1}^{n}x_{i}-1)^2` in the Hamiltonian.
Args:
label (str): Label of the integer.
lower (int): Lower value of the integer.
upper (int): Upper value of the integer.
strength (float/Placeholder): Strength of the constraint.
Examples:
This example is equivalent to the following Hamiltonian.
.. math::
H = \\left(\\left(\sum_{i=1}^{3}ia_{i}+1\\right) - 2\\right)^2 + strength \\times \\left(\sum_{i=1}^{3}a_{i}-1\\right)^2
>>> from pyqubo import OneHotEncInteger
>>> a = OneHotEncInteger("a", (1, 3), strength=5)
>>> H = (a-2)**2
>>> model = H.compile()
>>> bqm = model.to_bqm()
>>> import dimod
>>> sampleset = dimod.ExactSolver().sample(bqm)
>>> decoded_samples = model.decode_sampleset(sampleset)
>>> best_sample = min(decoded_samples, key=lambda s: s.energy)
>>> print(best_sample.subh['a'])
2.0
"""
def __init__(self, label, value_range, strength):
lower, upper = value_range
assert upper > lower, "upper value should be larger than lower value"
assert isinstance(lower, int)
assert isinstance(upper, int)
assert isinstance(strength, int) or isinstance(strength, float) or\
isinstance(strength, Placeholder)
self._num_variables = (upper - lower + 1)
self.array = Array.create(label, shape=self._num_variables, vartype='BINARY')
self.constraint = Constraint((sum(self.array)-1)**2, label=label+"_const", condition=lambda x: x==0)
express = SubH(lower + sum(i*x for i, x in enumerate(self.array)), label=label)
penalty = self.constraint * strength
super().__init__(
label=label,
value_range=value_range,
express=express,
penalty=penalty)
def equal_to(self, k):
"""Variable representing whether the value is equal to `k`.
Note:
You cannot use this method alone. You should use this variable with the entire integer.
Args:
k (int): Integer value.
Returns:
:class:`Express`
"""
lower, upper = self.value_range
assert isinstance(k, int), "k should be integer"
assert lower <= k <= upper, "This value never takes {}".format(k)
return self.array[k-lower]
|
prml/nn/math/__init__.py | jinmang2/PRML | 11,017 | 12638694 | from prml.nn.math.negative import negative
from prml.nn.math.add import add
from prml.nn.math.subtract import subtract, rsubtract
from prml.nn.math.divide import divide, rdivide
from prml.nn.math.mean import mean
from prml.nn.math.multiply import multiply
from prml.nn.math.matmul import matmul, rmatmul
from prml.nn.math.power import power, rpower
from prml.nn.math.sum import sum
from prml.nn.array import Array
Array.__neg__ = negative
Array.__add__ = add
Array.__radd__ = add
Array.__sub__ = subtract
Array.__rsub__ = rsubtract
Array.__truediv__ = divide
Array.__rtruediv__ = rdivide
Array.__mul__ = multiply
Array.__rmul__ = multiply
Array.__matmul__ = matmul
Array.__rmatmul__ = rmatmul
Array.__pow__ = power
Array.__rpow__ = rpower
Array.sum = sum
Array.mean = mean
|
sandbox/gkahn/gcg/eval_exp.py | ICRA-2018/gcg | 120 | 12638725 | import os
import yaml
import argparse
import joblib
from rllab.misc.ext import set_seed
import rllab.misc.logger as logger
from sandbox.gkahn.gcg.policies.mac_policy import MACPolicy
from sandbox.gkahn.gcg.sampler.sampler import RNNCriticSampler
from sandbox.gkahn.gcg.envs.env_utils import create_env
class EvalExp(object):
def __init__(self, folder, num_rollouts):
"""
:param kwargs: holds random extra properties
"""
self._folder = folder
self._num_rollouts = num_rollouts
### load data
self.name = os.path.basename(self._folder)
with open(self._params_file, 'r') as f:
self.params = yaml.load(f)
self.env = create_env(self.params['alg']['env'])
#############
### Files ###
#############
def _itr_file(self, itr):
return os.path.join(self._folder, 'itr_{0:d}.pkl'.format(itr))
@property
def _params_file(self):
yamls = [fname for fname in os.listdir(self._folder) if os.path.splitext(fname)[-1] == '.yaml' and os.path.basename(self._folder) in fname]
assert(len(yamls) == 1)
return os.path.join(self._folder, yamls[0])
def save_eval_rollouts(self, itr, rollouts):
fname = os.path.join(self._folder, 'itr_{0:d}_exp_eval.pkl'.format(itr))
joblib.dump({'rollouts': rollouts}, fname, compress=3)
####################
### Data loading ###
####################
def _load_itr_policy(self, itr):
d = joblib.load(self._itr_file(itr))
policy = d['policy']
return policy
def eval_policy(self, itr, gpu_device=None, gpu_frac=None):
if itr == -1:
itr = 0
while os.path.exists(self._itr_file(itr)):
itr += 1
itr -= 1
if self.params['seed'] is not None:
set_seed(self.params['seed'])
if gpu_device is None:
gpu_device = self.params['policy']['gpu_device']
if gpu_frac is None:
gpu_frac = self.params['policy']['gpu_frac']
sess, graph = MACPolicy.create_session_and_graph(gpu_device=gpu_device, gpu_frac=gpu_frac)
with graph.as_default(), sess.as_default():
policy = self._load_itr_policy(itr)
logger.log('Evaluating policy for itr {0}'.format(itr))
n_envs = 1
if 'max_path_length' in self.params['alg']:
max_path_length = self.params['alg']['max_path_length']
else:
max_path_length = self.env.horizon
sampler = RNNCriticSampler(
policy=policy,
env=self.env,
n_envs=n_envs,
replay_pool_size=int(1e4),
max_path_length=max_path_length,
save_rollouts=True,
sampling_method=self.params['alg']['replay_pool_sampling']
)
rollouts = []
step = 0
logger.log('Starting rollout {0}'.format(len(rollouts)))
while len(rollouts) < self._num_rollouts:
sampler.step(step)
step += n_envs
new_rollouts = sampler.get_recent_paths()
if len(new_rollouts) > 0:
rollouts += new_rollouts
logger.log('Starting rollout {0}'.format(len(rollouts)))
self.save_eval_rollouts(itr, rollouts)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('folder', type=str)
parser.add_argument('numrollouts', type=int)
args = parser.parse_args()
eval_exp = EvalExp(args.folder, args.numrollouts)
eval_exp.eval_policy(-1)
|
src/ostorlab/cli/auth/revoke/__init__.py | bbhunter/ostorlab | 113 | 12638726 | """Module for the auth revoke command"""
from ostorlab.cli.auth.revoke import revoke
|
Common/DataModel/Testing/Python/TestNumericArrayImageData.py | txwhhny/vtk | 1,755 | 12638746 | #!/usr/bin/env python
"""
This file tests vtk.util.vtkImageExportToArray and
vtk.util.vtkImageImportFromArray. It tests the code by first
exporting a PNG image to a Numeric Array and then converts the array
to an image and compares that image to the original image. It does
this for all PNG images in a particular directory.
The test naturally requires Numeric Python to be installed:
http://numpy.sf.net
"""
# This test requires Numeric.
import sys
from vtk.test import Testing
try:
import numpy.core.numeric as numeric
except ImportError:
print("WARNING: This test requires Numeric Python: http://numpy.sf.net")
Testing.skip()
import os
import glob
import vtk
from vtk.test import Testing
from vtk.util.vtkImageExportToArray import vtkImageExportToArray
from vtk.util.vtkImageImportFromArray import vtkImageImportFromArray
class TestNumericArrayImageData(Testing.vtkTest):
def testImportExport(self):
"Testing if images can be imported to and from numeric arrays."
imp = vtkImageImportFromArray()
exp = vtkImageExportToArray()
idiff = vtk.vtkImageDifference()
img_dir = Testing.getAbsImagePath("")
for i in glob.glob(os.path.join(img_dir, "*.png")):
# Putting the reader outside the loop causes bad problems.
reader = vtk.vtkPNGReader()
reader.SetFileName(i)
reader.Update()
# convert the image to a Numeric Array and convert it back
# to an image data.
exp.SetInputConnection(reader.GetOutputPort())
imp.SetArray(exp.GetArray())
# ensure there is no difference between orig image and the
# one we converted and un-converted.
idiff.SetInputConnection(imp.GetOutputPort())
idiff.SetImage(reader.GetOutput())
idiff.Update()
err = idiff.GetThresholdedError()
msg = "Test failed on image %s, with threshold "\
"error: %d"%(i, err)
self.assertEqual(err, 0.0, msg)
if __name__ == "__main__":
Testing.main([(TestNumericArrayImageData, 'test')])
|
hackerrank/cracking-the-coding-interview/ctci-array-left-rotation.py | Ashindustry007/competitive-programming | 506 | 12638755 | <reponame>Ashindustry007/competitive-programming
#!/usr/bin/env python2
# https://www.hackerrank.com/challenges/ctci-array-left-rotation
def array_left_rotation(a, n, k):
return a[k:] + a[:k]
n, k = map(int, raw_input().strip().split(' '))
a = map(int, raw_input().strip().split(' '))
answer = array_left_rotation(a, n, k);
print ' '.join(map(str,answer))
|
testing/play_rtree.py | alitrack/dtreeviz | 1,905 | 12638777 | <gh_stars>1000+
from dtreeviz.trees import *
df_cars = pd.read_csv("data/cars.csv")
X = df_cars.drop('MPG', axis=1)
y = df_cars['MPG']
X_train, y_train = X, y
max_depth = 3
fig = plt.figure()
ax = fig.gca()
t = rtreeviz_univar(ax,
X_train.WGT, y_train,
max_depth=max_depth,
feature_name='Vehicle Weight',
target_name='MPG',
fontsize=14,
show={'splits'})
plt.savefig(f"/tmp/cars-univar-{max_depth}.svg", bbox_inches=0, pad_inches=0)
plt.show()
|
examples/fireworks.py | aforren1/glumpy | 1,074 | 12638787 | <reponame>aforren1/glumpy
# -----------------------------------------------------------------------------
# Copyright (c) 2009-2016 <NAME>. All rights reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
"""
Example demonstrating simulation of fireworks using point sprites.
(adapted from the "OpenGL ES 2.0 Programming Guide")
"""
import numpy as np
from glumpy import app, gl, gloo
vertex = """
#version 120
uniform float time;
uniform vec2 center;
attribute vec2 start, end;
attribute float lifetime;
varying float v_lifetime;
void main () {
gl_Position = vec4(start + (time * end) + center, 0.0, 1.0);
gl_Position.y -= 1.0 * time * time;
v_lifetime = clamp(1.0 - (time / lifetime), 0.0, 1.0);
gl_PointSize = (v_lifetime * v_lifetime) * 30.0;
}
"""
fragment = """
#version 120
const float SQRT_2 = 1.4142135623730951;
uniform vec4 color;
varying float v_lifetime;
void main()
{
gl_FragColor = color * (SQRT_2/2.0 - length(gl_PointCoord.xy - 0.5));
gl_FragColor.a *= v_lifetime;
}
"""
n = 2500
window = app.Window(512,512)
program = gloo.Program(vertex, fragment, count=n)
def explosion():
program['center'] = np.random.uniform(-0.5,+0.5)
program['color'] = np.random.uniform(0.1,0.9,4)
program['color'][3] = 1.0 / n ** 0.05
program['lifetime'] = np.random.normal(4.0, 0.5, n)
program['start'] = np.random.normal(0.0, 0.2, (n,2))
program['end'] = np.random.normal(0.0, 1.2, (n,2))
program['time'] = 0
@window.event
def on_init():
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE)
@window.event
def on_draw(dt):
window.clear()
program.draw(gl.GL_POINTS)
program['time'] += dt
if program['time'] > 1.75:
explosion()
explosion()
app.run(framerate=60)
|
applications/SwimmingDEMApplication/tests/drag_tests/chien_law/chien_drag_test_analysis.py | clazaro/Kratos | 778 | 12638806 | <reponame>clazaro/Kratos
from KratosMultiphysics import Parameters
import os
# Importing the Kratos Library
import KratosMultiphysics
file_path = os.path.abspath(__file__)
dir_path = os.path.dirname(file_path)
import KratosMultiphysics.KratosUnittest as KratosUnittest
from KratosMultiphysics.SwimmingDEMApplication.swimming_DEM_analysis import SwimmingDEMAnalysis
class ChienDragAnalysis(SwimmingDEMAnalysis, KratosUnittest.TestCase):
def __init__(self, model, varying_parameters = Parameters("{}")):
super().__init__(model, varying_parameters)
def FinalizeSolutionStep(self):
super().FinalizeSolutionStep()
def CheckValues(self, x_vel):
tol = 1.0e-18
x_vel_ref = 0.9886575480896711 #ChienDragLaw
# Other results.
# x_vel_ref = 0.9880047941854828 #SchillerAndNaumannDragLaw
#StokesDragLaw BeetstraDragLaw HaiderAndLevenspielDragLaw GanserDragLaw
self.assertAlmostEqual(x_vel, x_vel_ref, delta=tol)
def Finalize(self):
for node in self.spheres_model_part.Nodes:
if node.Id == 111:
x_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_X)
print(x_vel)
self.CheckValues(x_vel)
self.procedures.RemoveFoldersWithResults(str(dir_path), str('chien_drag_test'), '')
super().Finalize() |
tacker/tests/unit/sol_refactored/api/test_api_version.py | h1r0mu/tacker | 116 | 12638825 | <filename>tacker/tests/unit/sol_refactored/api/test_api_version.py
# Copyright (C) 2021 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from tacker.sol_refactored.api import api_version
from tacker.sol_refactored.common import exceptions as sol_ex
from tacker.tests import base
class TestAPIVersion(base.BaseTestCase):
def test_init_null(self):
vers = api_version.APIVersion()
self.assertTrue(vers.is_null())
@mock.patch.object(api_version, 'supported_versions',
new=["3.1.4159", "2.0.0"])
def test_init(self):
for vers, vers_str in [("2.0.0", "2.0.0"),
("3.1.4159", "3.1.4159"),
("2.0.0-impl:foobar", "2.0.0")]:
v = api_version.APIVersion(vers)
self.assertEqual(str(v), vers_str)
def test_init_exceptions(self):
self.assertRaises(sol_ex.InvalidAPIVersionString,
api_version.APIVersion, "0.1.2")
self.assertRaises(sol_ex.APIVersionNotSupported,
api_version.APIVersion, "9.9.9")
@mock.patch.object(api_version, 'supported_versions',
new=["1.3.0", "1.3.1", "2.0.0"])
def test_compare(self):
self.assertTrue(api_version.APIVersion("1.3.0") <
api_version.APIVersion("1.3.1"))
self.assertTrue(api_version.APIVersion("2.0.0") >
api_version.APIVersion("1.3.1"))
@mock.patch.object(api_version, 'supported_versions',
new=["1.3.0", "1.3.1", "2.0.0"])
def test_matches(self):
vers = api_version.APIVersion("2.0.0")
self.assertTrue(vers.matches(api_version.APIVersion("1.3.0"),
api_version.APIVersion()))
self.assertFalse(vers.matches(api_version.APIVersion(),
api_version.APIVersion("1.3.1")))
|
tools/codegen/core/gen_server_registered_method_bad_client_test_body.py | samotarnik/grpc | 2,151 | 12638835 | <filename>tools/codegen/core/gen_server_registered_method_bad_client_test_body.py
#!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def esc_c(line):
out = "\""
last_was_hex = False
for c in line:
if 32 <= c < 127:
if c in hex_bytes and last_was_hex:
out += "\"\""
if c != ord('"'):
out += chr(c)
else:
out += "\\\""
last_was_hex = False
else:
out += "\\x%02x" % c
last_was_hex = True
return out + "\""
done = set()
for message_length in range(0, 3):
for send_message_length in range(0, message_length + 1):
payload = [
0, (message_length >> 24) & 0xff, (message_length >> 16) & 0xff,
(message_length >> 8) & 0xff, (message_length) & 0xff
] + send_message_length * [0]
for frame_length in range(0, len(payload) + 1):
is_end = frame_length == len(
payload) and send_message_length == message_length
frame = [(frame_length >> 16) & 0xff, (frame_length >> 8) & 0xff,
(frame_length) & 0xff, 0, 1
if is_end else 0, 0, 0, 0, 1] + payload[0:frame_length]
text = esc_c(frame)
if text not in done:
print 'GRPC_RUN_BAD_CLIENT_TEST(verifier_%s, PFX_STR %s, %s);' % (
'succeeds' if is_end else 'fails', text, '0'
if is_end else 'GRPC_BAD_CLIENT_DISCONNECT')
done.add(text)
|
haxor_news/lib/haxor/haxor.py | donnemartin/hn | 4,091 | 12638862 | <reponame>donnemartin/hn
# The MIT License (MIT)
# Copyright (c) 2014-15 <NAME> <<EMAIL>>
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
haxor
Unofficial Python wrapper for official Hacker News API
@author <NAME>
@email <EMAIL>
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
import json
import sys
import requests
from .settings import supported_api_versions
__all__ = [
'User',
'Item',
'HackerNewsApi',
'InvalidAPIVersion',
'InvalidItemID',
'InvalidUserID']
class InvalidItemID(Exception):
pass
class InvalidUserID(Exception):
pass
class InvalidAPIVersion(Exception):
pass
class HTTPError(Exception):
pass
class HackerNewsApi(object):
def __init__(self, version='v0'):
"""
Args:
version (string): specifies Hacker News API version. Default is `v0`.
Raises:
InvalidAPIVersion: If Hacker News version is not supported.
"""
self.session = requests.Session()
try:
self.base_url = supported_api_versions[version]
except KeyError:
raise InvalidAPIVersion
def _get(self, url):
"""Internal method used for GET requests
Args:
url (string): URL to send GET.
Returns:
requests' response object
Raises:
HTTPError: If HTTP request failed.
"""
response = self.session.get(url)
if response.status_code == requests.codes.ok:
return response
else:
raise HTTPError
def _get_page(self, page):
return self._get('{0}{1}.json'.format(self.base_url, page))
def _get_page_param(self, page, param):
return self._get('{0}{1}/{2}.json'.format(self.base_url, page, param))
def get_item(self, item_id):
"""Returns Hacker News `Item` object.
Args:
item_id (int or string): Unique item id of Hacker News story, comment etc.
Returns:
`Item` object representing Hacker News item.
Raises:
InvalidItemID: If corresponding Hacker News story does not exist.
"""
response = self._get_page_param('item', item_id).json()
if not response:
raise InvalidItemID
return Item(response)
def get_user(self, user_id):
"""Returns Hacker News `User` object.
Args:
user_id (string): unique user id of a Hacker News user.
Returns:
`User` object representing a user on Hacker News.
Raises:
InvalidUserID: If no such user exists on Hacker News.
"""
response = self._get_page_param('user', user_id).json()
if not response:
raise InvalidUserID
return User(response)
def top_stories(self, limit=None):
"""Returns list of item ids of current top stories
Args:
limit (int): specifies the number of stories to be returned.
Returns:
`list` object containing ids of top stories.
"""
return self._get_page('topstories').json()[:limit]
def new_stories(self, limit=None):
"""Returns list of item ids of current new stories
Args:
limit (int): specifies the number of stories to be returned.
Returns:
`list` object containing ids of new stories.
"""
return self._get_page('newstories').json()[:limit]
def ask_stories(self, limit=None):
"""Returns list of item ids of latest Ask HN stories
Args:
limit (int): specifies the number of stories to be returned.
Returns:
`list` object containing ids of Ask HN stories.
"""
return self._get_page('askstories').json()[:limit]
def best_stories(self, limit=None):
"""Returns list of item ids of best HN stories
Args:
limit (int): specifies the number of stories to be returned.
Returns:
`list` object containing ids of best stories.
"""
return self._get_page('beststories').json()[:limit]
def show_stories(self, limit=None):
"""Returns list of item ids of latest Show HN stories
Args:
limit (int): specifies the number of stories to be returned.
Returns:
`list` object containing ids of Show HN stories.
"""
return self._get_page('showstories').json()[:limit]
def job_stories(self, limit=None):
"""Returns list of item ids of latest Job stories
Args:
limit (int): specifies the number of stories to be returned.
Returns:
`list` object containing ids of Job stories.
"""
return self._get_page('jobstories').json()[:limit]
def updates(self):
"""Returns list of item ids and user ids that have been
changed/updated recently.
Returns:
`dict` with two keys whose values are `list` objects
"""
return self._get_page('updates').json()
def get_max_item(self):
"""Returns list of item ids of current top stories
Args:
limit (int): specifies the number of stories to be returned.
Returns:
`int` if successful.
"""
return self._get_page('maxitem').json()
class Item(object):
"""
Represents stories, comments, jobs, Ask HNs and polls
"""
def __init__(self, data):
self.item_id = data.get('id')
self.deleted = data.get('deleted')
self.item_type = data.get('type')
self.by = data.get('by')
self.submission_time = datetime.datetime.fromtimestamp(
data.get(
'time',
0))
self.text = data.get('text')
self.dead = data.get('dead')
self.parent = data.get('parent')
self.kids = data.get('kids')
self.url = data.get('url')
self.score = data.get('score')
self.title = data.get('title')
self.parts = data.get('parts')
self.descendants = data.get('descendants')
self.raw = json.dumps(data)
def __repr__(self):
retval = '<hackernews.Item: {0} - {1}>'.format(
self.item_id, self.title)
if sys.version_info.major < 3:
return retval.encode('utf-8', errors='backslashreplace')
return retval
class User(object):
"""
Represents a hacker i.e. a user on Hacker News
"""
def __init__(self, data):
self.user_id = data.get('id')
self.delay = data.get('delay')
self.created = datetime.datetime.fromtimestamp(data.get('created', 0))
self.karma = data.get('karma')
self.about = data.get('about')
self.submitted = data.get('submitted')
self.raw = json.dumps(data)
def __repr__(self):
retval = '<hackernews.User: {0}>'.format(self.user_id)
if sys.version_info.major < 3:
return retval.encode('utf-8', errors='backslashreplace')
return retval
|
examples/find_tags.py | pketthong/ruuvitag-sensor | 166 | 12638864 | <reponame>pketthong/ruuvitag-sensor
"""
Find RuuviTags
"""
from ruuvitag_sensor.ruuvi import RuuviTagSensor
import ruuvitag_sensor.log
ruuvitag_sensor.log.enable_console()
# This will print sensor's mac and state when new sensor is found
if __name__ == "__main__":
RuuviTagSensor.find_ruuvitags()
|
bugtests/test385.py | jeff5/jython-whinchat | 577 | 12638867 | """
Try importing from a jar after sys.path.append(jar)
This nails down a bug reported here:
http://sourceforge.net/mailarchive/message.php?msg_id=14088259
which only occurred on systems where java.io.File.separatorChar is not a forward slash ('/')
since - at the moment - jython modules hide java packages with the same name from import,
use a unique java package name for the sake of this test
"""
import jarmaker
import support
import sys
jarfn, package, clazz = jarmaker.mkjar()
# append this jar file to sys.path
sys.path.append(jarfn)
# try to import the class
importStmt = "from %s import %s" % (package, clazz)
try:
exec(importStmt)
finally:
sys.path.remove(jarfn)
|
tbx/people/migrations/0026_culturepage_instagram_posts.py | elviva404/wagtail-torchbox | 103 | 12638916 | # Generated by Django 2.2.17 on 2021-05-21 21:22
from django.db import migrations
import tbx.people.blocks
import wagtail.core.blocks
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
("people", "0025_auto_20210505_1057"),
]
operations = [
migrations.AddField(
model_name="culturepage",
name="instagram_posts",
field=wagtail.core.fields.StreamField(
[
(
"posts",
wagtail.core.blocks.StreamBlock(
[("post", tbx.people.blocks.InstagramEmbedBlock())],
icon="fa-instagram",
max_num=8,
min_num=8,
required=False,
template="patterns/molecules/instagram-gallery/instagram-gallery.html",
),
)
],
blank=True,
null=True,
),
),
]
|
AutoSketcher/predict.py | D1anaGreen/essaykiller | 4,551 | 12638933 | import torch
import random
from utils.logging import logger, init_logger
from models.pytorch_pretrained_bert.modeling import BertConfig
from models import data_loader, model_builder
from models.trainer import build_trainer
import argparse
import os
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
class Running(object):
"""Run Model"""
def __init__(self, args, device_id):
"""
:param args: parser.parse_args()
:param device_id: 0 or -1
"""
self.args = args
self.device_id = device_id
self.model_flags = ['hidden_size', 'ff_size', 'heads', 'inter_layers', 'encoder', 'ff_actv', 'use_interval',
'rnn_size']
self.device = "cpu" if self.args.visible_gpus == '-1' else "cuda"
logger.info('Device ID %d' % self.device_id)
logger.info('Device %s' % self.device)
torch.manual_seed(self.args.seed)
random.seed(self.args.seed)
if self.device_id >= 0:
torch.cuda.set_device(self.device_id)
init_logger(args.log_file)
try:
self.step = int(self.args.test_from.split('.')[-2].split('_')[-1])
except IndexError:
self.step = 0
logger.info('Loading checkpoint from %s' % self.args.test_from)
checkpoint = torch.load(self.args.test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if k in self.model_flags:
setattr(self.args, k, opt[k])
config = BertConfig.from_json_file(self.args.bert_config_path)
self.model = model_builder.Summarizer(self.args, self.device, load_pretrained_bert=False, bert_config=config)
self.model.load_cp(checkpoint)
self.model.eval()
def predict(self):
test_iter = data_loader.DataLoader(self.args, data_loader.load_dataset(self.args, 'test', shuffle=False),
self.args.batch_size, self.device, shuffle=False, is_test=True)
trainer = build_trainer(self.args, self.device_id, self.model, None)
trainer.predict(test_iter, self.step)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-encoder", default='transformer', type=str,
choices=['classifier', 'transformer', 'rnn', 'baseline'])
parser.add_argument("-data_name", default='chinese_summary', help='vy_text')
parser.add_argument("-bert_data_path", default='./data/predict_data/', help='./data/predict_data/')
parser.add_argument("-model_path", default='./models/models_check_points/')
parser.add_argument("-result_path", default='./results/')
parser.add_argument("-temp_dir", default='./temp/')
parser.add_argument("-bert_pretrained_model_path", default='./models/pytorch_pretrained_bert/bert_pretrain/')
parser.add_argument("-bert_config_path", default='./models/pytorch_pretrained_bert/bert_pretrain/bert_config.json')
parser.add_argument("-batch_size", default=1000, type=int)
parser.add_argument("-use_interval", type=str2bool, nargs='?', const=True, default=True)
parser.add_argument("-hidden_size", default=128, type=int)
parser.add_argument("-ff_size", default=2048, type=int)
parser.add_argument("-heads", default=8, type=int)
parser.add_argument("-inter_layers", default=2, type=int)
parser.add_argument("-rnn_size", default=512, type=int)
parser.add_argument("-param_init", default=0, type=float)
parser.add_argument("-param_init_glorot", type=str2bool, nargs='?', const=True, default=True)
parser.add_argument("-dropout", default=0.1, type=float)
parser.add_argument("-optimizer", default='adam', type=str)
parser.add_argument("-lr", default=2e-3, type=float, help='learning rate')
parser.add_argument("-beta1", default=0.9, type=float)
parser.add_argument("-beta2", default=0.999, type=float)
parser.add_argument("-decay_method", default='noam', type=str)
parser.add_argument("-warmup_steps", default=8000, type=int)
parser.add_argument("-max_grad_norm", default=0, type=float)
parser.add_argument("-save_checkpoint_steps", default=5000, type=int)
parser.add_argument("-accum_count", default=2, type=int)
parser.add_argument("-world_size", default=1, type=int)
parser.add_argument("-report_every", default=50, type=int)
parser.add_argument("-recall_eval", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument('-visible_gpus', default='0', type=str)
parser.add_argument('-gpu_ranks', default='0', type=str)
parser.add_argument('-log_file', default='./logs/project.log')
parser.add_argument('-dataset', default='')
parser.add_argument('-seed', default=666, type=int)
parser.add_argument("-test_all", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument("-test_from", default='./models/models_check_points/model_step_50000.pt')
parser.add_argument("-report_rouge", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument("-block_trigram", type=str2bool, nargs='?', const=True, default=True)
parser.add_argument("-shuffle_data", type=str2bool, nargs='?', const=False, default=False)
parser.add_argument("-vy_predict", type=str2bool, nargs='?', const=False, default=True)
_args = parser.parse_args()
gpu_ranks: str = str(_args.gpu_ranks)
_args.gpu_ranks = [int(i) for i in gpu_ranks.split(',')]
os.environ["CUDA_VISIBLE_DEVICES"] = _args.visible_gpus
init_logger(_args.log_file)
_device = "cpu" if _args.visible_gpus == '-1' else "cuda"
_device_id = 0 if _device == "cuda" else -1
runner = Running(args=_args, device_id=_device_id)
runner.predict()
|
meilisearch/tests/settings/test_settings_ranking_rules_meilisearch.py | jrinder42/meilisearch-python | 159 | 12638934 |
NEW_RANKING_RULES = ['typo', 'exactness']
DEFAULT_RANKING_RULES = [
'words',
'typo',
'proximity',
'attribute',
'sort',
'exactness'
]
def test_get_ranking_rules_default(empty_index):
"""Tests getting the default ranking rules."""
response = empty_index().get_ranking_rules()
assert isinstance(response, list)
for rule in DEFAULT_RANKING_RULES:
assert rule in response
def test_update_ranking_rules(empty_index):
"""Tests changing the ranking rules."""
index = empty_index()
response = index.update_ranking_rules(NEW_RANKING_RULES)
assert isinstance(response, dict)
assert 'updateId' in response
index.wait_for_pending_update(response['updateId'])
response = index.get_ranking_rules()
assert isinstance(response, list)
for rule in NEW_RANKING_RULES:
assert rule in response
def test_update_ranking_rules_none(empty_index):
"""Tests updating the ranking rules at null."""
index = empty_index()
# Update the settings first
response = index.update_ranking_rules(NEW_RANKING_RULES)
update = index.wait_for_pending_update(response['updateId'])
assert update['status'] == 'processed'
# Check the settings have been correctly updated
response = index.get_ranking_rules()
for rule in NEW_RANKING_RULES:
assert rule in response
# Launch test to update at null the setting
response = index.update_ranking_rules(None)
assert isinstance(response, dict)
assert 'updateId' in response
index.wait_for_pending_update(response['updateId'])
response = index.get_ranking_rules()
assert isinstance(response, list)
for rule in DEFAULT_RANKING_RULES:
assert rule in response
def test_reset_ranking_rules(empty_index):
"""Tests resetting the ranking rules setting to its default value."""
index = empty_index()
# Update the settings first
response = index.update_ranking_rules(NEW_RANKING_RULES)
update = index.wait_for_pending_update(response['updateId'])
assert update['status'] == 'processed'
# Check the settings have been correctly updated
response = index.get_ranking_rules()
assert isinstance(response, list)
for rule in NEW_RANKING_RULES:
assert rule in response
# Check the reset of the settings
response = index.reset_ranking_rules()
assert isinstance(response, dict)
assert 'updateId' in response
index.wait_for_pending_update(response['updateId'])
response = index.get_ranking_rules()
for rule in DEFAULT_RANKING_RULES:
assert rule in response
|
wagtailmenus/migrations/0007_auto_20160131_2000.py | pierremanceaux/wagtailmenus | 329 | 12638938 | <filename>wagtailmenus/migrations/0007_auto_20160131_2000.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wagtailmenus', '0006_auto_20160131_1347'),
]
operations = [
migrations.RemoveField(
model_name='flatmenuitem',
name='url_append',
),
migrations.RemoveField(
model_name='mainmenuitem',
name='url_append',
),
migrations.AlterField(
model_name='flatmenuitem',
name='link_page',
field=models.ForeignKey(verbose_name='Link to an internal page', blank=True, on_delete=models.deletion.CASCADE, to='wagtailcore.Page', null=True),
),
migrations.AlterField(
model_name='mainmenuitem',
name='link_page',
field=models.ForeignKey(verbose_name='Link to an internal page', blank=True, on_delete=models.deletion.CASCADE, to='wagtailcore.Page', null=True),
),
]
|
functions/Cythonize.py | mmfink/raster-functions | 173 | 12638987 | <gh_stars>100-1000
"""
Cythonize.py build_ext --inplace
Cythonize.py clean
"""
from distutils.core import setup
from Cython.Build import cythonize
setup(ext_modules = cythonize("*.py"))
|
corehq/util/metrics/const.py | dimagilg/commcare-hq | 471 | 12639027 | import settings
ALERT_ERROR = 'error'
ALERT_WARNING = 'warning'
ALERT_INFO = 'info'
ALERT_SUCCESS = 'success'
COMMON_TAGS = {'environment': settings.SERVER_ENVIRONMENT}
TAG_UNKNOWN = '<unknown>'
# Prometheus multiprocess_mode options
MPM_ALL = 'all'
MPM_LIVEALL = 'liveall'
MPM_LIVESUM = 'livesum'
MPM_MAX = 'max'
MPM_MIN = 'min'
|
tests/list_repos_test.py | charlievieth/all-repos | 350 | 12639036 | from all_repos.list_repos import main
def test_list_repos_main(file_config_files, capsys):
assert not main(('--config-filename', str(file_config_files.cfg)))
out, _ = capsys.readouterr()
assert out == 'repo1\nrepo2\n'
def test_list_repos_with_output_paths(file_config_files, capsys):
assert not main((
'--config-filename', str(file_config_files.cfg),
'--output-paths',
))
out, _ = capsys.readouterr()
assert out == '{}\n{}\n'.format(
file_config_files.output_dir.join('repo1'),
file_config_files.output_dir.join('repo2'),
)
|
lfs/core/__init__.py | michael-hahn/django-lfs | 345 | 12639039 | <reponame>michael-hahn/django-lfs
default_app_config = 'lfs.core.apps.LfsCoreAppConfig'
|
Burp/lib/methodology_tsl.py | wisdark/HUNT | 1,628 | 12639046 | <reponame>wisdark/HUNT
from javax.swing.event import TreeSelectionListener
class TSL(TreeSelectionListener):
def __init__(self, view):
self.tree = view.get_tree()
self.pane = view.get_pane()
self.checklist = view.get_checklist()
self.issues = view.get_issues()
self.tabbed_panes = view.get_tabbed_panes()
self.settings = view.get_settings()
def valueChanged(self, tse):
pane = self.pane
pane.setDividerLocation(300)
node = self.tree.getLastSelectedPathComponent()
# Check if node is root. If it is, don't display anything
if node is None or node.getParent() is None:
return
test_name = node.toString()
functionality_name = node.getParent().toString()
is_leaf = node.isLeaf()
is_settings = is_leaf and (test_name == "Settings")
is_folder = is_leaf and (test_name == "Functionality")
is_functionality = is_leaf and not is_settings
if node:
if is_functionality:
key = functionality_name + "." + test_name
tabbed_pane = self.tabbed_panes[key]
pane.setRightComponent(tabbed_pane)
elif is_settings:
pane.setRightComponent(self.settings)
else:
print("No description for " + test_name)
else:
print("Cannot set a pane for " + test_name)
|
querybook/server/lib/query_executor/executors/presto.py | shivammmmm/querybook | 1,144 | 12639053 | <gh_stars>1000+
from pyhive.exc import Error
from const.query_execution import QueryExecutionErrorType
from lib.query_executor.base_executor import QueryExecutorBaseClass
from lib.query_executor.utils import get_parsed_syntax_error
from lib.query_executor.clients.presto import PrestoClient
from lib.query_executor.executor_template.templates import presto_executor_template
def get_presto_error_dict(e):
if hasattr(e, "args") and e.args[0] is not None:
error_arg = e.args[0]
if type(error_arg) is dict:
return error_arg
return None
class PrestoQueryExecutor(QueryExecutorBaseClass):
@classmethod
def _get_client(cls, client_setting):
return PrestoClient(**client_setting)
@classmethod
def EXECUTOR_NAME(cls):
return "presto"
@classmethod
def EXECUTOR_LANGUAGE(cls):
return "presto"
@classmethod
def EXECUTOR_TEMPLATE(cls):
return presto_executor_template
def _parse_exception(self, e):
error_type = QueryExecutionErrorType.INTERNAL.value
error_str = str(e)
error_extracted = None
try:
if isinstance(e, Error):
error_type = QueryExecutionErrorType.ENGINE.value
error_dict = get_presto_error_dict(e)
if error_dict:
error_extracted = error_dict.get("message", None)
# In Presto, only context free syntax error are labelled as
# SYNTAX_ERROR, and context sensitive errors are user errors
# However in both cases errorLocation is provided
if "errorLocation" in error_dict:
return get_parsed_syntax_error(
error_extracted,
error_dict["errorLocation"].get("lineNumber", 1) - 1,
error_dict["errorLocation"].get("columnNumber", 1) - 1,
)
except Exception:
pass
return error_type, error_str, error_extracted
|
tests/meltano/core/test_project_files.py | meltano/meltano | 122 | 12639070 | <reponame>meltano/meltano<gh_stars>100-1000
import pytest # noqa: F401
class TestProjectFiles:
def test_resolve_subfiles(self, project_files):
assert project_files._meltano_file_path == (project_files.root / "meltano.yml")
assert project_files.meltano == {
"version": 1,
"include_paths": [
"./subconfig_[0-9].yml",
"./*/subconfig_[0-9].yml",
"./*/**/subconfig_[0-9].yml",
],
"plugins": {
"extractors": [{"name": "tap-meltano-yml"}],
"loaders": [{"name": "target-meltano-yml"}],
},
"schedules": [
{
"name": "test-meltano-yml",
"extractor": "tap-meltano-yml",
"loader": "target-meltano-yml",
"transform": "skip",
"interval": "@once",
}
],
"environments": [
{"name": "test-meltano-environment", "env": {"TEST": "TEST-MELTANO"}}
],
}
assert project_files.include_paths == [
(project_files.root / "subconfig_2.yml"),
(project_files.root / "subfolder" / "subconfig_1.yml"),
]
def test_load(self, project_files):
expected_result = {
"version": 1,
"include_paths": [
"./subconfig_[0-9].yml",
"./*/subconfig_[0-9].yml",
"./*/**/subconfig_[0-9].yml",
],
"plugins": {
"extractors": [
{"name": "tap-meltano-yml"},
{"name": "tap-subconfig-2-yml"},
{"name": "tap-subconfig-1-yml"},
],
"loaders": [
{"name": "target-meltano-yml"},
{"name": "target-subconfig-2-yml"},
{"name": "target-subconfig-1-yml"},
],
},
"schedules": [
{
"name": "test-meltano-yml",
"extractor": "tap-meltano-yml",
"loader": "target-meltano-yml",
"transform": "skip",
"interval": "@once",
},
{
"name": "test-subconfig-2-yml",
"extractor": "tap-subconfig-2-yml",
"loader": "target-subconfig-2-yml",
"transform": "skip",
"interval": "@once",
},
{
"name": "test-subconfig-1-yml",
"extractor": "tap-subconfig-1-yml",
"loader": "target-subconfig-1-yml",
"transform": "skip",
"interval": "@once",
},
],
"environments": [
{"name": "test-meltano-environment", "env": {"TEST": "TEST-MELTANO"}},
{
"name": "test-subconfig-2-yml",
"env": {"TEST": "TEST-SUBCONFIG-2-YML"},
},
{
"name": "test-subconfig-1-yml",
"env": {"TEST": "TEST-SUBCONFIG-1-YML"},
},
],
}
read_result = project_files.load()
assert read_result == expected_result
def test_update(self, project_files):
meltano_config = project_files.load()
meltano_config["version"] = 2
meltano_config["plugins"]["extractors"][1][
"name"
] = "modified-tap-subconfig-2-yml"
meltano_config["plugins"]["loaders"][2][
"name"
] = "modified-target-subconfig-1-yml"
meltano_config["schedules"][0]["name"] = "modified-test-meltano-yml"
project_files.update(meltano_config)
expected_result = {
"include_paths": [
"./subconfig_[0-9].yml",
"./*/subconfig_[0-9].yml",
"./*/**/subconfig_[0-9].yml",
],
"plugins": {
"extractors": [
{"name": "tap-meltano-yml"},
{"name": "modified-tap-subconfig-2-yml"},
{"name": "tap-subconfig-1-yml"},
],
"loaders": [
{"name": "target-meltano-yml"},
{"name": "modified-target-subconfig-1-yml"},
{"name": "target-subconfig-2-yml"},
],
},
"schedules": [
{
"extractor": "tap-meltano-yml",
"interval": "@once",
"loader": "target-meltano-yml",
"name": "modified-test-meltano-yml",
"transform": "skip",
},
{
"extractor": "tap-subconfig-2-yml",
"interval": "@once",
"loader": "target-subconfig-2-yml",
"name": "test-subconfig-2-yml",
"transform": "skip",
},
{
"extractor": "tap-subconfig-1-yml",
"interval": "@once",
"loader": "target-subconfig-1-yml",
"name": "test-subconfig-1-yml",
"transform": "skip",
},
],
"environments": [
{"name": "test-meltano-environment", "env": {"TEST": "TEST-MELTANO"}},
{
"name": "test-subconfig-2-yml",
"env": {"TEST": "TEST-SUBCONFIG-2-YML"},
},
{
"name": "test-subconfig-1-yml",
"env": {"TEST": "TEST-SUBCONFIG-1-YML"},
},
],
"version": 2,
}
read_result = project_files.load()
assert read_result == expected_result
|
Alignment/CommonAlignmentProducer/python/ALCARECOTkAlUpsilonMuMuPA_Output_cff.py | ckamtsikis/cmssw | 852 | 12639074 | import FWCore.ParameterSet.Config as cms
# AlCaReco for track based alignment using Upsilon->mumu events in heavy ion (PA) data
OutALCARECOTkAlUpsilonMuMuPA_noDrop = cms.PSet(
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('pathALCARECOTkAlUpsilonMuMuPA')
),
outputCommands = cms.untracked.vstring(
'keep *_ALCARECOTkAlUpsilonMuMuPA_*_*',
'keep L1AcceptBunchCrossings_*_*_*',
'keep L1GlobalTriggerReadoutRecord_gtDigis_*_*',
'keep *_TriggerResults_*_*',
'keep DcsStatuss_scalersRawToDigi_*_*',
'keep *_offlinePrimaryVertices_*_*')
)
import copy
OutALCARECOTkAlUpsilonMuMuPA = copy.deepcopy(OutALCARECOTkAlUpsilonMuMuPA_noDrop)
OutALCARECOTkAlUpsilonMuMuPA.outputCommands.insert(0, "drop *")
|
tutorials/ner_pytorch_medical/scripts/azure/text_analytics.py | apjanco/projects | 823 | 12639106 | <filename>tutorials/ner_pytorch_medical/scripts/azure/text_analytics.py<gh_stars>100-1000
"""
Custom Entity Recognition pipeline component using Azure Text Analytics.
This implementation is based on the Presidio example here: https://microsoft.github.io/presidio/samples/python/text_analytics/example_text_analytics_recognizer/
Needs setting up a Text Analytics resource as a prerequisite:
https://github.com/MicrosoftDocs/azure-docs/blob/master/articles/cognitive-services/text-analytics/includes/create-text-analytics-resource.md
"""
from enum import Enum
from typing import Dict, Iterable, List, Optional
from pydantic import BaseModel
import requests
class RequestDocument(BaseModel):
id: str
text: str
language: str
class RequestBody(BaseModel):
documents: List[RequestDocument]
class Entity(BaseModel):
offset: int
length: int
category: str
subcategory: Optional[str]
confidenceScore: Optional[float]
class ResponseDocument(BaseModel):
id: str
entities: List[Entity]
class ResponseBody(BaseModel):
documents: List[ResponseDocument]
class Endpoint(str, Enum):
GENERAL = "general"
PII = "pii"
class TextAnalyticsClient:
"""Client for Azure Text Analytics Entity Recognition API."""
def __init__(
self,
key: str,
base_url: str,
endpoint: Endpoint = Endpoint.PII,
domain: str = "phi",
default_language: str = "en",
):
"""Initialize TextAnalyticsClient
key (str): The key used to authenticate to Text Analytics Azure Instance.
base_url (str): Supported Cognitive Services or Text Analytics
resource endpoints (protocol and hostname).
endpoint (Endpoint): Endpoint for prediction. Defaults to PII.
domain (str): Domain to use for recognition. Defaults to PHI.
"""
self.__key = key
self.base_url = base_url
self.endpoint = endpoint
self.domain = domain
self.default_language = default_language
def predict(
self, texts: Iterable[str], language: Optional[str] = None
) -> ResponseBody:
"""Extract Azure entities from batch of texts
texts (Iterable[str]): Input texts
language (Optional[str]): Input text language.
RETURNS (List[Dict]): List of recognized entities with character offsets
"""
if not language:
language = self.default_language
if not texts:
return ResponseBody(documents=[])
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"Ocp-Apim-Subscription-Key": self.__key,
}
data = {
"documents": [
{"id": i, "language": language, "text": text}
for i, text in enumerate(texts)
]
}
recognition_path = f"/text/analytics/v3.1-preview.5/entities/recognition/{self.endpoint}?domain={self.domain}"
res = requests.post(
self.base_url + recognition_path, json=data, headers=headers
)
data = res.json()
response = ResponseBody(**data)
return response
|
starthinker/tool/newsletter.py | arbrown/starthinker | 138 | 12639207 | ###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
import json
import argparse
import textwrap
from starthinker.util.email import send_email
from starthinker.util.email_template import EmailTemplate
from starthinker.util.configuration import commandline_parser, Configuration
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""\
Command line to send email template via gMail.
Email templates are JSON that assembles into both HTMl and TXT parts of an email.
For email sample see: https://github.com/google/starthinker/blob/master/starthinker/task/newsletter/sample.json
Example:
- Generate an HTML page from a template, then view via browser.
python newsletter.py --template scripts/newsletter_sample.json > ~/Downloads/email.html
- Send an email template via gMail.
python newsletter.py --template scripts/newsletter_sample.json --email_to <EMAIL> --email_from <EMAIL> -u $STARTHINKER_USER
"""))
# get parameters
parser.add_argument(
'--template',
help='template to use for email',
default=None,
required=True)
parser.add_argument('--email_to', help='email to', default=None)
parser.add_argument('--email_from', help='email from', default=None)
# initialize project
parser = commandline_parser(parser, arguments=('-u', '-c', '-v'))
args = parser.parse_args()
config = Configuration(
user=args.user,
client=args.client,
verbose=args.verbose
)
# load template
with open(args.template, 'r') as json_file:
email = EmailTemplate(json.load(json_file))
# send or print
if args.email_to and args.email_from:
print('EMAILING: ', args.email_to)
send_email(
config,
'user',
args.email_to,
args.email_from,
None,
email.get_subject(),
email.get_text(),
email.get_html()
)
else:
# write to STDOUT
print(email.get_html())
print('<pre style="width:600px;margin:0px auto;">%s</pre>' % email.get_text())
if __name__ == '__main__':
main()
|
backend/kale/common/serveutils.py | brness/kale | 502 | 12639208 | # Copyright 2020 The Kale Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import yaml
import time
import json
import shutil
import logging
import requests
import kubernetes
import pkg_resources
from typing import Any, Dict, Callable
from kubernetes.client.rest import ApiException
from kale import NotebookProcessor, marshal
from kale.common import (astutils, flakeutils, podutils, k8sutils, rokutils,
jputils, utils)
log = logging.getLogger(__name__)
PREDICTORS = [
"onnx"
"custom",
"triton",
"pytorch",
"sklearn",
"xgboost",
"tensorflow",
]
CO_GROUP = "serving.kubeflow.org"
CO_VERSION = "v1alpha2"
CO_PLURAL = "inferenceservices"
API_VERSION = "%s/%s" % (CO_GROUP, CO_VERSION)
RAW_TEMPLATE = """\
apiVersion: %s
kind: InferenceService
metadata:
annotations:
sidecar.istio.io/inject: "false"
name: {name}
spec:
minReplicas: 0
default:
predictor:
""" % API_VERSION
PVC_PREDICTOR_TEMPLATE = """\
{predictor}:
storageUri: "pvc://{pvc_name}{model_path}"
"""
CUSTOM_PREDICTOR_TEMPLATE = """\
container:
image: {image}
name: kfserving-container
ports:
- containerPort: {port}
env:
- name: STORAGE_URI
value: "pvc://{pvc_name}{model_path}"
"""
TRANSFORMER_CUSTOM_TEMPLATE = """\
custom:
container:
command:
- python3
- -m
- "kale.kfserving"
image: {image}
name: kfserving-container
securityContext:
runAsUser: 0
env:
- name: STORAGE_URI
value: "pvc://{pvc_name}/"
- name: PVC_MOUNT_POINT
value: {pvc_mount_point}
"""
PVC_ROOT = os.getenv("HOME") # assume there is always a PVC mounted at /HOME
TRANSFORMER_ASSETS_DIR = os.path.join(PVC_ROOT,
".kale.kfserving-transformer.dir")
TRANSFORMER_FN_ASSET_NAME = "transformer_function"
PREDICTOR_MODEL_DIR = os.path.join(PVC_ROOT, ".kale.kfserving.model.dir")
TRANSFORMER_SRC_NOTEBOOK_NAME = "source_notebook.ipynb"
class KFServer(object):
"""Wrapper around a deployed InferenceService.
Use this class to retrieve information about an InferenceService or to
hit the prediction endpoint.
"""
def __init__(self, name: str, spec: str):
self.name = name
self.spec = spec
def __repr__(self):
"""Show an interactive text in notebooks."""
if utils.is_ipython():
import IPython
html = ('InferenceService <pre>%s</pre> serving requests at host'
' <pre>%s</pre><br>'
'View model <a href="/models/details/%s/%s"'
' target="_blank" >here</a>'
% (self.name,
get_inference_service_host(self.name),
podutils.get_namespace(),
self.name))
IPython.display.display(IPython.display.HTML(html))
return ""
else:
return super(KFServer, self).__repr__()
def delete(self):
"""Delete the InferenceService CR."""
namespace = podutils.get_namespace()
log.info("Deleting InferenceServer '%s/%s'...", namespace, self.name)
k8s_co_client = k8sutils.get_co_client()
k8s_co_client.delete_namespaced_custom_object(CO_GROUP, CO_VERSION,
namespace, CO_PLURAL,
self.name)
log.info("Successfully deleted InferenceService.")
def predict(self, data: str, tensor=False):
"""Hit the InferenceService endpoint.
tensor: when set to True, return the result loaded in tensor objects,
based on the framework being used.
"""
# FIXME: Change this API to accept a dictionary and perform a
# json.dumps instead of relying on the user to do this.
log.info("Sending a request to the InferenceService...")
log.info("Getting InferenceService's host...")
host = get_inference_service_host(self.name)
headers = {"content-type": "application/json", "Host": host}
log.info("Sending request to InferenceService...")
response = requests.post(
"http://cluster-local-gateway.istio-system/v1/models/"
"%s:predict" % self.name, data=data,
headers=headers)
if response.status_code == 200:
log.info("Response: %s", utils.shorten_long_string(response.text))
if tensor:
return self._to_tensor(json.loads(response.text))
else:
return json.loads(response.text)
else:
log.error("The request failed with status code %s",
response.status_code)
return response
def _to_tensor(self, data):
log.warning("Kale does not yet support converting the predicted"
" response to a tensor.")
return data
def serve(model: Any,
name: str = None,
wait: bool = True,
predictor: str = None,
preprocessing_fn: Callable = None,
preprocessing_assets: Dict = None) -> KFServer:
"""Main API used to serve models from a notebook or a pipeline step.
This function procedurally deploys a KFServing InferenceService, starting
from a model object. A summary list of actions follows:
* Autogenerate an InferenceService name, if not provided
* Process transformer function (and related assets)
* Dump the model, to a path under a mounted PVC
* Snapshot the PVC
* Hydrate a new PVC from the new snapshot
* Submit an InferenceService CR
* Monitor the CR until it becomes ready
FIXME: Improve documentation. Provide some examples in the docstring and
explain how the preprocessing function parsing works.
Args:
model: Model object to be used as a predictor
name (optional): Name of the predictor. Will be autogenerated if not
provided
wait (optional): Wait for the InferenceService to become ready.
Default: True
predictor (optional): Predictor type to be used for the
InferenceService. If not provided it will be inferred using
the the matching marshalling backends.
preprocessing_fn (optional): A processing function that will be
deployed as a KFServing Transformer
preprocessing_assets (optional): A dictionary with object required by
the preprocessing function. This is needed in case the
preprocessing function references global objects.
Returns: A KFServer instance
"""
log.info("Starting serve procedure for model '%s'", model)
if not name:
name = "%s-%s" % (podutils.get_pod_name(), utils.random_string(5))
# Validate and process transformer
if preprocessing_fn:
_prepare_transformer_assets(preprocessing_fn, preprocessing_assets)
# Detect predictor type
predictor_type = marshal.get_backend(model).predictor_type
if predictor and predictor != predictor_type:
raise RuntimeError("Trying to create an InferenceService with"
" predictor of type '%s' but the model is of type"
" '%s'" % (predictor, predictor_type))
if not predictor_type:
log.error("Kale does not yet support serving objects with '%s'"
" backend.\n\nPlease help us improve Kale by opening a new"
" issue at:\n"
"https://github.com/kubeflow-kale/kale/issues",
marshal.get_backend(model).display_name)
utils.graceful_exit(-1)
predictor = predictor_type # in case `predictor` is None
volume = podutils.get_volume_containing_path(PVC_ROOT)
volume_name = volume[1].persistent_volume_claim.claim_name
log.info("Model is contained in volume '%s'", volume_name)
# Dump the model
marshal.set_data_dir(PREDICTOR_MODEL_DIR)
model_filepath = marshal.save(model, "model")
log.info("Model saved successfully at '%s'", model_filepath)
# Take snapshot
task_info = rokutils.snapshot_pvc(volume_name,
bucket=rokutils.SERVING_BUCKET,
wait=True)
task = rokutils.get_task(task_info["task"]["id"],
bucket=rokutils.SERVING_BUCKET)
new_pvc_name = "%s-pvc-%s" % (name, utils.random_string(5))
rokutils.hydrate_pvc_from_snapshot(task["result"]["event"]["object"],
task["result"]["event"]["version"],
new_pvc_name,
bucket=rokutils.SERVING_BUCKET)
# Cleanup: remove dumped model and transformer assets from the current PVC
utils.rm_r(os.path.join(PREDICTOR_MODEL_DIR,
os.path.basename(model_filepath)))
utils.rm_r(TRANSFORMER_ASSETS_DIR, silent=True)
# Need an absolute path from the *root* of the PVC. Add '/' if not exists.
pvc_model_path = "/" + PREDICTOR_MODEL_DIR.lstrip(PVC_ROOT)
# Tensorflow saves the model's files into a directory by itself
if predictor == "tensorflow":
pvc_model_path += "/" + os.path.basename(model_filepath).lstrip("/")
kfserver = create_inference_service(
name=name,
predictor=predictor,
pvc_name=new_pvc_name,
model_path=pvc_model_path,
transformer=preprocessing_fn is not None)
if wait:
monitor_inference_service(kfserver.name)
return kfserver
def _prepare_transformer_assets(fn: Callable, assets: Dict = None):
notebook_path = jputils.get_notebook_path()
processor = NotebookProcessor(nb_path=notebook_path,
skip_validation=True)
fn_source = astutils.get_function_source(fn, strip_signature=False)
missing_names = flakeutils.pyflakes_report(
processor.get_imports_and_functions() + "\n" + fn_source)
if not assets:
assets = dict()
if not isinstance(assets, dict):
ValueError("Please provide preprocessing assets as a dictionary"
" mapping variables *names* to their objects")
missing_assets = [x not in assets.keys() for x in missing_names]
if any(missing_assets):
raise RuntimeError("The following abjects are a dependency for the"
" provided preprocessing function. Please add the"
" to the `preprocessing_assets` dictionary: %s"
% [a for a, m in zip(missing_names, missing_assets)
if m])
# save function and assets
utils.clean_dir(TRANSFORMER_ASSETS_DIR)
marshal.set_data_dir(TRANSFORMER_ASSETS_DIR)
marshal.save(fn, TRANSFORMER_FN_ASSET_NAME)
for asset_name, asset_value in assets.items():
marshal.save(asset_value, asset_name)
# save notebook as well
shutil.copy(notebook_path, os.path.join(TRANSFORMER_ASSETS_DIR,
TRANSFORMER_SRC_NOTEBOOK_NAME))
def create_inference_service(name: str,
predictor: str,
pvc_name: str,
model_path: str,
image: str = None,
port: int = None,
transformer: bool = False,
submit: bool = True) -> KFServer:
"""Create and submit an InferenceService.
Args:
name (str): Name of the InferenceService CR
predictor (str): One of serveutils.PREDICTORS
pvc_name (str): Name of the PVC which contains the model
model_path (str): Absolute path to the dump of the model
image (optional): Image to run the InferenceService
port (optional): To be used in conjunction with `image`. The port where
the custom endpoint is exposed.
transformer (bool): True if the InferenceService is to be deployed with
a transformer.
submit (bool): Set to False to just create the YAML and not submit the
CR to the K8s.
Returns (str): Path to the generated YAML
"""
if predictor not in PREDICTORS:
raise ValueError("Invalid predictor: %s. Choose one of %s"
% (predictor, PREDICTORS))
if predictor == "custom":
if not image:
raise ValueError("You must specify an image when using a custom"
" predictor.")
if not port:
raise ValueError("You must specify a port when using a custom"
" predictor.")
predictor_spec = CUSTOM_PREDICTOR_TEMPLATE.format(
image=image,
port=port,
pvc_name=pvc_name,
model_path=model_path)
else:
if image is not None:
log.info("Creating an InferenceService with predictor '%s'."
" Ignoring image...", predictor)
if port is not None:
log.info("Creating an InferenceService with predictor '%s'."
" Ignoring port...", predictor)
predictor_spec = PVC_PREDICTOR_TEMPLATE.format(predictor=predictor,
pvc_name=pvc_name,
model_path=model_path)
infs_spec = yaml.safe_load(RAW_TEMPLATE.format(name=name))
predictor_spec = yaml.safe_load(predictor_spec)
if predictor == "tensorflow":
# XXX: TF Server is the only predictor being pulled from an external
# repository. TFServer container are tagger using the library's version
# number. All the other predictor are built by the KFServing community
# and are tagged following KFServing's version number. Default values
# for these can be set in the `inferenceservice-config` ConfigMap.
_version = _get_runtime_version(predictor)
predictor_spec["tensorflow"]["runtimeVersion"] = _version
infs_spec["spec"]["default"]["predictor"] = predictor_spec
if transformer:
transformer_spec = yaml.safe_load(
TRANSFORMER_CUSTOM_TEMPLATE.format(
image=podutils.get_docker_base_image(),
pvc_name=pvc_name,
pvc_mount_point=PVC_ROOT
))
infs_spec["spec"]["default"]["transformer"] = transformer_spec
yaml_filename = "%s.kfserving.yaml" % name
yaml_contents = yaml.dump(infs_spec)
log.info("Saving InferenceService definition at '%s'", yaml_filename)
with open(yaml_filename, "w") as yaml_file:
yaml_file.write(yaml_contents)
if submit:
_submit_inference_service(infs_spec, podutils.get_namespace())
_add_owner_references(name, pvc_name)
return KFServer(name=name, spec=yaml_contents)
def _get_runtime_version(predictor: str):
library = [backend.display_name
for backend in marshal.get_backends().values()
if backend.predictor_type == predictor]
if not library:
raise ValueError("The provided predictor is not backed by any"
" Kale marshalling backend.")
if len(library) > 1:
raise ValueError("Too many backends are matching the '%s' predictor:"
" %s" % (predictor, library))
return pkg_resources.get_distribution(library[0]).version
def _submit_inference_service(inference_service: Dict, namespace: str):
k8s_co_client = k8sutils.get_co_client()
name = inference_service["metadata"]["name"]
log.info("Creating InferenceService '%s'...", name)
try:
k8s_co_client.create_namespaced_custom_object(CO_GROUP, CO_VERSION,
namespace, CO_PLURAL,
inference_service)
except ApiException:
log.exception("Failed to create InferenceService")
raise
log.info("Successfully created InferenceService: %s", name)
def _add_owner_references(infs_name: str, pvc_name: str):
# add owner reference to the PVC
log.info("Adding owner references to PVC '%s' for InferenceService '%s'",
pvc_name, infs_name)
client = k8sutils.get_v1_client()
infs = get_inference_service(infs_name)
pvc = client.read_namespaced_persistent_volume_claim(
pvc_name, podutils.get_namespace())
ref = kubernetes.client.V1OwnerReference(api_version=API_VERSION,
kind="InferenceService",
name=infs_name,
uid=infs["metadata"]["uid"])
if not pvc.metadata.owner_references:
pvc.metadata.owner_references = [ref]
else:
pvc.metadata.owner_references.append(ref)
client.patch_namespaced_persistent_volume_claim(
name=pvc_name,
namespace=podutils.get_namespace(),
body=pvc)
def monitor_inference_service(name: str):
"""Waits for an InferenceService to become ready.
An InferenceService is considered ready when two conditions are met:
1. the ``status.conditions`` field of the CR contains a condition of
type ``Ready`` with a ``True`` status.
2. The CR defines a valid host/url for the (default) predictor.
Args:
name (str): Name of the KFServing InferenceService
"""
host = None
def _is_ready(inference_service):
if not inference_service.get("status"):
return False
for condition in inference_service["status"].get("conditions", []):
if (condition.get("type") == "Ready"
and condition.get("status") == "True"):
return True
return False
while host is None:
log.info("Waiting for InferenceService '%s' to become ready...", name)
try:
inf = get_inference_service(name)
except ApiException as e:
log.error("Failed to get InferenceService. ApiException: %s", e)
return
if _is_ready(inf):
try:
if inf["status"].get("default"): # v1alpha3
host = inf["status"]["default"]["predictor"]["host"]
elif inf["status"].get("components"): # v1beta1
host = inf["status"]["components"]["predictor"]["url"]
except KeyError:
pass
time.sleep(3)
log.info("InferenceService '%s' is ready.", name)
def get_inference_service(name: str):
"""Get an InferenceService object."""
k8s_co_client = k8sutils.get_co_client()
ns = podutils.get_namespace()
return k8s_co_client.get_namespaced_custom_object(CO_GROUP, CO_VERSION,
ns, CO_PLURAL, name)
def get_inference_service_host(name: str) -> str:
"""Get the hostname of the InferenceService.
Args:
name (str): Name of the KFServing InferenceService
Returns:
str: The status.url field of the InferenceService CR. Empty string
if ``url`` is not defined.
"""
inference_service = get_inference_service(name)
try:
url = inference_service["status"]["url"]
except KeyError:
log.error("Could not find url for InferenceService '%s'", name)
return ""
if url.startswith("http://"):
url = url[len("http://"):]
return url.replace("example.com", "svc.cluster.local")
def get_inference_service_default_predictor_host(name: str) -> str:
"""Get the hostname of the default predictor.
This function supports both v1alpha3 and v1beta1 InferenceService CRDs.
The predictor's url/host is defined in two different fields:
- ``v1alpha3``: ``status.default.predictor.host``
- ``v1beta1``: ``status.components.predictor.url``
Args:
name (str): Name of the KFServing InferenceService
Returns:
str: The host/url field of the (default) predictor. Empty string if
it cannot be determined.
"""
inf = get_inference_service(name)
try:
if inf["status"].get("default"): # v1alpha3
return inf["status"]["default"]["predictor"]["host"]
elif inf["status"].get("components"): # v1beta1
return inf["status"]["components"]["predictor"]["url"]
except KeyError:
log.error("Could not find the predictor's url for InferenceService"
" '%s'", name)
return ""
|
Chapter06/flat/flatten.py | apolukhin/boost-cookbook | 313 | 12639244 | <reponame>apolukhin/boost-cookbook<gh_stars>100-1000
import os
import sys
import signal
import subprocess
import re
import shutil
class flattener:
''' ****************************************** Private functions ********************************************** '''
@staticmethod
def _process_source(out, path):
with open(path, 'r') as f:
for line in f:
m = re.findall(r'#include.*"(.*?)"', line)
if m:
d = os.path.dirname(path)
flattener._process_source(out, os.path.join(d, m[0]))
elif 'BOOK_' not in line:
out.write(line)
@staticmethod
def _flat_source(path):
flat_name = os.path.join(
os.path.dirname(path).replace('../', ''),
"main.cpp"
)
try:
with open(flat_name, 'w+') as out:
flattener._process_source(out, path)
if 'tasks_processor_network_accept' in flat_name:
with open('../03_tasks_processor_network_client/client.cpp', 'r') as f:
for line in f:
m = re.findall(r'#include.*"(.*?)"', line)
if not m and 'using namespace' not in line:
out.write(line)
except:
pass
@staticmethod
def _is_source(path):
return os.path.isfile(path) and '.cpp' in path and 'flat' not in path
''' ****************************************** Public functions *********************************************** '''
@staticmethod
def make_flat():
print "\nStarting flattening..."
for folder, _, files in os.walk('..'):
if 'flat' in folder:
continue
for f in files:
path = os.path.join(folder, f)
if flattener._is_source(path):
flattener._flat_source(path)
print "\n*** SUCESS ***"
if __name__ == "__main__":
flattener.make_flat()
|
colossalai/nn/layer/colossalai_layer/linear.py | RichardoLuo/ColossalAI | 1,630 | 12639259 | <filename>colossalai/nn/layer/colossalai_layer/linear.py
import math
import inspect
from typing import Callable
from colossalai.utils import get_current_device
from torch import dtype, nn
from ... import init as init
from ..parallel_1d import *
from ..parallel_2d import *
from ..parallel_2p5d import *
from ..parallel_3d import *
from ..utils import get_tensor_parallel_mode
from ..vanilla import *
from ._utils import ColossalaiModule
_parallel_linear = {'1d': Linear1D, '2d': Linear2D, '2.5d': Linear2p5D, '3d': Linear3D}
_parallel_classifier = {
None: VanillaClassifier,
'1d': Classifier1D,
'2d': Classifier2D,
'2.5d': Classifier2p5D,
'3d': Classifier3D
}
_vocab_parallel_classifier = {
'1d': VocabParallelClassifier1D,
'2d': VocabParallelClassifier2D,
'2.5d': VocabParallelClassifier2p5D,
'3d': VocabParallelClassifier3D
}
class Linear(ColossalaiModule):
"""Linear layer of colossalai.
Args:
in_features (int): size of each input sample.
out_features (int): size of each output sample.
bias (bool, optional): If set to ``False``, the layer will not learn an additive bias, defaults to ``True``.
dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None.
weight_initializer (:class:`typing.Callable`, optional):
The initializer of weight, defaults to kaiming uniform initializer.
bias_initializer (:class:`typing.Callable`, optional):
The initializer of bias, defaults to xavier uniform initializer.
Note: ``kwargs`` would contain different parameters when you use different parallelisms.
The ``kwargs`` should contain parameters below:
::
Linear1D:
gather_output: bool (optional, default to be false)
skip_bias_add: bool (optional, default to be false)
Linear2D:
skip_bias_add: bool (optional, default to be false)
Linear2p5D:
skip_bias_add: bool (optional, default to be false)
Linear3D:
None
More details about ``initializer`` please refer to
`init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_.
"""
def __init__(self,
in_features: int,
out_features: int,
bias: bool = True,
dtype: dtype = None,
weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)),
bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1),
**kwargs) -> None:
tensor_parallel = get_tensor_parallel_mode()
if tensor_parallel is None:
layer = nn.Linear(in_features, out_features, bias=bias).to(dtype).to(get_current_device())
weight_initializer(layer.weight, fan_in=in_features, fan_out=out_features)
if layer.bias is not None:
bias_initializer(layer.bias, fan_in=in_features)
else:
linear_cls = _parallel_linear[tensor_parallel]
gather_output = kwargs.pop('gather_output', None)
if 'gather_output' in inspect.signature(linear_cls.__init__).parameters.keys(): # gather_out arg is available
kwargs['gather_output'] = gather_output
layer = linear_cls(
in_features,
out_features,
bias=bias,
dtype=dtype,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
**kwargs,
)
super().__init__(layer)
class Classifier(ColossalaiModule):
"""Classifier layer of colossalai.
Args:
in_features (int): size of each input sample.
num_classes (int): number of classes.
weight (:class:`torch.nn.Parameter`, optional): weight of the classifier, defaults to None.
bias (bool, optional): If set to ``False``, the layer will not learn an additive bias, defaults to ``True``.
dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None.
weight_initializer (:class:`typing.Callable`, optional):
The initializer of weight, defaults to kaiming uniform initializer.
bias_initializer (:class:`typing.Callable`, optional):
The initializer of bias, defaults to xavier uniform initializer.
More details about ``initializer`` please refer to
`init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_.
"""
def __init__(self,
in_features: int,
num_classes: int,
weight: nn.Parameter = None,
bias: bool = True,
dtype: dtype = None,
weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)),
bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1),
vocab_parallel_limit: int = 2048) -> None:
tensor_parallel = get_tensor_parallel_mode()
if num_classes <= vocab_parallel_limit or tensor_parallel is None:
layer = _parallel_classifier[tensor_parallel](
in_features,
num_classes,
weight=weight,
bias=bias,
dtype=dtype,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
)
else:
layer = _vocab_parallel_classifier[tensor_parallel](
in_features,
num_classes,
weight=weight,
bias=bias,
dtype=dtype,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
)
super().__init__(layer)
|
macro_benchmark/BERT_PyTorch/run_pretraining_inference.py | songhappy/ai-matrix | 180 | 12639260 | <gh_stars>100-1000
# coding=utf-8
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#==================
import csv
import os
import logging
import argparse
import random
import h5py
from tqdm import tqdm, trange
import os
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, Dataset
from torch.utils.data.distributed import DistributedSampler
import math
import time
from tokenization import BertTokenizer
from modeling import BertForPreTraining, BertConfig
# from fused_adam_local import FusedAdamBert
from file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from apex.parallel import DistributedDataParallel as DDP
import torch.distributed as dist
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
class pretraining_dataset(Dataset):
def __init__(self, input_file, max_pred_length):
self.input_file = input_file
self.max_pred_length = max_pred_length
f = h5py.File(input_file, "r")
self.input_ids = np.asarray(f["input_ids"][:]).astype(np.int64)#[num_instances x max_seq_length])
self.input_masks = np.asarray(f["input_mask"][:]).astype(np.int64) #[num_instances x max_seq_length]
self.segment_ids = np.asarray(f["segment_ids"][:]).astype(np.int64) #[num_instances x max_seq_length]
self.masked_lm_positions = np.asarray(f["masked_lm_positions"][:]).astype(np.int64) #[num_instances x max_pred_length]
self.masked_lm_ids= np.asarray(f["masked_lm_ids"][:]).astype(np.int64) #[num_instances x max_pred_length]
self.next_sentence_labels = np.asarray(f["next_sentence_labels"][:]).astype(np.int64) # [num_instances]
f.close()
def __len__(self):
'Denotes the total number of samples'
return len(self.input_ids)
def __getitem__(self, index):
input_ids= torch.from_numpy(self.input_ids[index]) # [max_seq_length]
input_mask = torch.from_numpy(self.input_masks[index]) #[max_seq_length]
segment_ids = torch.from_numpy(self.segment_ids[index])# [max_seq_length]
masked_lm_positions = torch.from_numpy(self.masked_lm_positions[index]) #[max_pred_length]
masked_lm_ids = torch.from_numpy(self.masked_lm_ids[index]) #[max_pred_length]
next_sentence_labels = torch.from_numpy(np.asarray(self.next_sentence_labels[index])) #[1]
masked_lm_labels = torch.ones(input_ids.shape, dtype=torch.long) * -1
index = self.max_pred_length
# store number of masked tokens in index
if len((masked_lm_positions == 0).nonzero()) != 0:
index = (masked_lm_positions == 0).nonzero()[0].item()
masked_lm_labels[masked_lm_positions[:index]] = masked_lm_ids[:index]
return [input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels]
def main():
print("IN NEW MAIN XD\n")
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--input_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain .hdf5 files for the task.")
parser.add_argument("--config_file",
default="bert_config.json",
type=str,
required=False,
help="The BERT model config")
ckpt_group = parser.add_mutually_exclusive_group(required=True)
ckpt_group.add_argument("--ckpt_dir",
default=None,
type=str,
help="The ckpt directory, e.g. /results")
ckpt_group.add_argument("--ckpt_path",
default=None,
type=str,
help="Path to the specific checkpoint")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--eval', dest='do_eval', action='store_true')
group.add_argument('--prediction', dest='do_eval', action='store_false')
## Other parameters
parser.add_argument("--bert_model", default="bert-large-uncased", type=str, required=False,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
parser.add_argument("--max_seq_length",
default=512,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--max_predictions_per_seq",
default=80,
type=int,
help="The maximum total of masked tokens in input sequence")
parser.add_argument("--ckpt_step",
default=-1,
type=int,
required=False,
help="The model checkpoint iteration, e.g. 1000")
parser.add_argument("--eval_batch_size",
default=8,
type=int,
help="Total batch size for training.")
parser.add_argument("--max_steps",
default=-1,
type=int,
help="Total number of eval steps to perform, otherwise use full dataset")
parser.add_argument("--no_cuda",
default=False,
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--fp16',
default=False,
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
args = parser.parse_args()
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl', init_method='env://')
n_gpu = torch.cuda.device_count()
if n_gpu > 1:
assert(args.local_rank != -1) # only use torch.distributed for multi-gpu
logger.info("device %s n_gpu %d distributed inference %r", device, n_gpu, bool(args.local_rank != -1))
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
# Prepare model
config = BertConfig.from_json_file(args.config_file)
# Padding for divisibility by 8
if config.vocab_size % 8 != 0:
config.vocab_size += 8 - (config.vocab_size % 8)
model = BertForPreTraining(config)
if args.ckpt_dir:
if args.ckpt_step == -1:
#retrieve latest model
model_names = [f for f in os.listdir(args.ckpt_dir) if f.endswith(".pt")]
args.ckpt_step = max([int(x.split('.pt')[0].split('_')[1].strip()) for x in model_names])
print("load model saved at iteraton", args.ckpt_step)
model_file = os.path.join(args.ckpt_dir, "ckpt_" + str(args.ckpt_step) + ".pt")
else:
model_file = args.ckpt_path
state_dict = torch.load(model_file, map_location="cpu")["model"]
model.load_state_dict(state_dict, strict=False)
if args.fp16:
model.half() # all parameters and buffers are converted to half precision
model.to(device)
multi_gpu_training = args.local_rank != -1 and torch.distributed.is_initialized()
if multi_gpu_training:
model = DDP(model)
files = [os.path.join(args.input_dir, f) for f in os.listdir(args.input_dir) if os.path.isfile(os.path.join(args.input_dir, f)) and 'test' in f]
files.sort()
logger.info("***** Running evaluation *****")
logger.info(" Batch size = %d", args.eval_batch_size)
model.eval()
print("Evaluation. . .")
nb_instances = 0
max_steps = args.max_steps if args.max_steps > 0 else np.inf
global_step = 0
with torch.no_grad():
if args.do_eval:
final_loss = 0.0 #
for data_file in files:
logger.info("file %s" %( data_file))
dataset = pretraining_dataset(input_file=data_file, max_pred_length=args.max_predictions_per_seq)
if not multi_gpu_training:
train_sampler = RandomSampler(dataset)
datasetloader = DataLoader(dataset, sampler=train_sampler, batch_size=args.eval_batch_size, num_workers=4, pin_memory=True)
else:
train_sampler = DistributedSampler(dataset)
datasetloader = DataLoader(dataset, sampler=train_sampler, batch_size=args.eval_batch_size, num_workers=4, pin_memory=True)
for step, batch in enumerate(tqdm(datasetloader, desc="Iteration")):
if global_step > max_steps:
break
batch = [t.to(device) for t in batch]
input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels = batch#\
loss = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask, masked_lm_labels=masked_lm_labels, next_sentence_label=next_sentence_labels)
final_loss += loss.item()
global_step += 1
torch.cuda.empty_cache()
if global_step > max_steps:
break
final_loss /= global_step
if multi_gpu_training:
final_loss = torch.tensor(final_loss, device=device)
dist.all_reduce(final_loss)
final_loss /= torch.distributed.get_world_size()
if (not multi_gpu_training or (multi_gpu_training and torch.distributed.get_rank() == 0)):
logger.info("Finished: Final Loss = {}".format(final_loss))
else: # inference
# if multi_gpu_training:
# torch.distributed.barrier()
# start_t0 = time.time()
for data_file in files:
logger.info("file %s" %( data_file))
dataset = pretraining_dataset(input_file=data_file, max_pred_length=args.max_predictions_per_seq)
if not multi_gpu_training:
train_sampler = RandomSampler(dataset)
datasetloader = DataLoader(dataset, sampler=train_sampler, batch_size=args.eval_batch_size, num_workers=4, pin_memory=True)
else:
train_sampler = DistributedSampler(dataset)
datasetloader = DataLoader(dataset, sampler=train_sampler, batch_size=args.eval_batch_size, num_workers=4, pin_memory=True)
for step, batch in enumerate(tqdm(datasetloader, desc="Iteration")):
if global_step > max_steps:
break
batch = [t.to(device) for t in batch]
input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels = batch#\
lm_logits, nsp_logits = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask, masked_lm_labels=None, next_sentence_label=None)
nb_instances += input_ids.size(0)
global_step += 1
torch.cuda.empty_cache()
if global_step > max_steps:
break
# if multi_gpu_training:
# torch.distributed.barrier()
if (not multi_gpu_training or (multi_gpu_training and torch.distributed.get_rank() == 0)):
logger.info("Finished")
if __name__ == "__main__":
main()
|
docs/OOPS/Reference_variable_2.py | munyumunyu/Python-for-beginners | 158 | 12639290 | '''
Just like a balloon without a ribbon, an object without a reference variable cannot be used later.
'''
class Mobile:
def __init__(self, price, brand):
self.price = price
self.brand = brand
Mobile(1000, "Apple")
#After the above line the Mobile
# object created is lost and unusable
|
src/enamlnative/widgets/checkbox.py | codelv/enaml-native | 237 | 12639292 | """
Copyright (c) 2017, <NAME>.
Distributed under the terms of the MIT License.
The full license is in the file LICENSE, distributed with this software.
Created on May 20, 2017
@author: jrm
"""
from atom.api import (
Typed, ForwardTyped, Bool, observe
)
from enaml.core.declarative import d_
from .compound_button import CompoundButton, ProxyCompoundButton
class ProxyCheckBox(ProxyCompoundButton):
""" The abstract definition of a proxy CheckBox object.
"""
#: A reference to the Label declaration.
declaration = ForwardTyped(lambda: CheckBox)
class CheckBox(CompoundButton):
""" A simple control for displaying a CheckBox.
"""
#: A reference to the ProxyCheckBox object.
proxy = Typed(ProxyCheckBox)
|
third_party/mesa/MesaLib/src/gallium/drivers/llvmpipe/lp_tile_shuffle_mask.py | Scopetta197/chromium | 212 | 12639309 | <reponame>Scopetta197/chromium
tile = [[0,1,4,5],
[2,3,6,7],
[8,9,12,13],
[10,11,14,15]]
shift = 0
align = 1
value = 0L
holder = []
import sys
basemask = [0x
fd = sys.stdout
indent = " "*9
for c in range(4):
fd.write(indent + "*pdst++ = \n");
for l,line in enumerate(tile):
fd.write(indent + " %s_mm_shuffle_epi8(line%d, (__m128i){"%(l and '+' or ' ',l))
for i,pos in enumerate(line):
mask = 0x00ffffffff & (~(0xffL << shift))
value = mask | ((pos) << shift)
holder.append(value)
if holder and (i + 1) %2 == 0:
fd.write("0x%8.0x"%(holder[0] + (holder[1] << 32)))
holder = []
if (i) %4 == 1:
fd.write( ',')
fd.write("})%s\n"%((l == 3) and ';' or ''))
print
shift += 8
|
app/lib/cli/settings.py | grepleria/SnitchDNS | 152 | 12639352 | <reponame>grepleria/SnitchDNS<filename>app/lib/cli/settings.py<gh_stars>100-1000
import click
import tabulate
from flask.cli import with_appcontext
from app.lib.base.provider import Provider
@click.group('settings', help='SnitchDNS Setting Management')
@with_appcontext
def main():
pass
@main.command('list')
@with_appcontext
def cli_settings_list():
settings = Provider().settings()
headers = ['name', 'value']
table = []
for name, value in settings.all().items():
table.append([
name,
value
])
print(tabulate.tabulate(table, headers))
return True
@main.command('get')
@click.option('--name', required=True, help='Config variable name', type=click.STRING)
@click.option('--default', required=False, default='', help='Default value to return if config does not exist', type=click.STRING)
@with_appcontext
def cli_settings_get(name, default):
settings = Provider().settings()
print(settings.get(name, default))
return True
@main.command('set')
@click.option('--name', required=True, help='Config variable name', type=click.STRING)
@click.option('--value', required=True, help='Config variable value', type=click.STRING)
@with_appcontext
def cli_settings_get(name, value):
settings = Provider().settings()
settings.save(name, value)
print("OK")
return True
|
pages/widgets.py | timbortnik/django-page-cms | 113 | 12639366 | # -*- coding: utf-8 -*-
"""Django CMS come with a set of ready to use widgets that you can enable
in the admin via a placeholder tag in your template."""
from pages.settings import PAGES_MEDIA_URL, PAGES_STATIC_URL
from pages.settings import PAGE_LANGUAGES
from pages.models import Page
from pages.widgets_registry import register_widget
from django import forms
from django.forms import TextInput, Textarea
from django.forms import MultiWidget
from django.forms import FileInput as DFileInput
from django.contrib.admin.widgets import AdminTextInputWidget
from django.contrib.admin.widgets import AdminTextareaWidget
from django.utils.safestring import mark_safe
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from os.path import join
register_widget(TextInput)
register_widget(Textarea)
register_widget(AdminTextInputWidget)
register_widget(AdminTextareaWidget)
class RichTextarea(Textarea):
"""A RichTextarea widget."""
class Media:
js = [join(PAGES_STATIC_URL, path) for path in (
'javascript/jquery.js',
'javascript/jquery.rte.js'
)]
css = {
'all': [join(PAGES_STATIC_URL, path) for path in (
'css/rte.css',
'css/font-awesome.min.css'
)]
}
def __init__(self, language=None, attrs=None, **kwargs):
attrs = {'class': 'rte'}
self.language = language
super(RichTextarea, self).__init__(attrs)
def render(self, name, value, attrs=None, **kwargs):
rendered = super(RichTextarea, self).render(name, value, attrs)
context = {
'name': name,
'PAGES_STATIC_URL': PAGES_STATIC_URL,
'PAGES_MEDIA_URL': PAGES_MEDIA_URL,
}
return rendered + mark_safe(render_to_string(
'pages/widgets/richtextarea.html', context))
register_widget(RichTextarea)
insert_image_link = u'''
<br>
<button title='insert image from the media library' class='image-lookup-{name}'>
From media library
</button>
<input name="{name}-selected" id="{name}-selected" type="hidden">
<span id="{name}-selected-value">
</span>
<br><label for="{name}-delete">
<input name="{name}-delete" style="display:inline-block" id="{name}-delete" type="checkbox" value="true"> {del_msg}
</label>
<br style="clear:both">
<script>
$(function(){{
function dismissRelatedLookupPopup(win, chosenId) {{
$.get('/admin/pages/page/' + chosenId + '/media-url/', function(response) {{
console.log(response);
$('#{name}-selected').val(response);
$('#{name}-selected-value').text(response);
}});
win.close();
window.dismissRelatedLookupPopup = oldDismissRelatedLookupPopup;
window.dismissAddRelatedObjectPopup = oldDismissAddRelatedObjectPopup;
}}
function showMediaAdminPopup() {{
var name = 'mediaWindowSelect';
var href = '/admin/pages/media/?_to_field=id&_popup=1';
window.dismissRelatedLookupPopup = dismissRelatedLookupPopup;
window.dismissAddRelatedObjectPopup = dismissRelatedLookupPopup;
var win = window.open(href, name, 'height=500,width=800,resizable=yes,scrollbars=yes');
win.focus();
return false;
}}
$('.image-lookup-{name}').click(function(e) {{
e.preventDefault();
showMediaAdminPopup();
return false;
}});
}});
</script>
'''
class FileInput(DFileInput):
def __init__(self, page=None, language=None, attrs=None, **kwargs):
self.language = language
self.page = page
super(FileInput, self).__init__(attrs)
please_save_msg = _('Please save the page to show the file field')
delete_msg = _('Delete file')
def render(self, name, value, attrs=None, **kwargs):
if not self.page:
field_content = self.please_save_msg
else:
field_content = '<span class="placeholder-fileinput">'
if value:
field_content += _('Current file: %s<br/>') % value
field_content += '<hr>'
field_content += super(FileInput, self).render(name, attrs)
field_content += insert_image_link.format(
name=name,
del_msg=self.delete_msg,
value=value)
field_content += '</span>'
return mark_safe(field_content)
register_widget(FileInput)
class ImageInput(FileInput):
please_save_msg = _('Please save the page to show the image field')
delete_msg = _('Delete image')
register_widget(ImageInput)
class LanguageChoiceWidget(TextInput):
def __init__(self, language=None, attrs=None, **kwargs):
self.language = language
self.page = kwargs.get('page')
# page is None
super(LanguageChoiceWidget, self).__init__(attrs)
def render(self, name, value, attrs=None, **kwargs):
context = {
'name': name,
'value': value,
'page': self.page,
'language': value,
'page_languages': PAGE_LANGUAGES
}
return mark_safe(render_to_string(
'pages/widgets/languages.html', context))
class PageLinkWidget(MultiWidget):
'''A page link `Widget` for the admin.'''
def __init__(
self, attrs=None, page=None, language=None,
video_url=None, linkedpage=None, text=None):
l = [('', '----')]
for p in Page.objects.all():
l.append((p.id, str(p)))
widgets = [
forms.Select(choices=l),
TextInput(attrs=attrs)
]
super(PageLinkWidget, self).__init__(widgets, attrs)
def decompress(self, value):
import json
try:
return json.loads(value)
except:
pass
return []
def value_from_datadict(self, data, files, name):
import json
value = ['', '']
for da in [x for x in data if x.startswith(name)]:
index = int(da[len(name) + 1:])
value[index] = data[da]
if value[0] == value[1] == '':
return None
return json.dumps(value)
def _has_changed(self, initial, data):
"""Need to be reimplemented to be correct."""
if data == initial:
return False
return bool(initial) != bool(data)
def format_output(self, rendered_widgets):
"""
Given a list of rendered widgets (as strings), it inserts an HTML
linebreak between them.
Returns a Unicode string representing the HTML for the whole lot.
"""
return """<table>
<tr><td>page</td><td>%s</td></tr>
<tr><td>text</td><td>%s</td></tr>
</table>""" % tuple(rendered_widgets)
register_widget(PageLinkWidget)
|
test/test_objective.py | HowardHu97/ZOOpt | 403 | 12639390 | <reponame>HowardHu97/ZOOpt
from zoopt import Objective
from zoopt import Parameter
from zoopt import Dimension
from zoopt import Solution
import numpy as np
def ackley(solution):
"""
Ackley function for continuous optimization
"""
x = solution.get_x()
bias = 0.2
ave_seq = sum([(i - bias) * (i - bias) for i in x]) / len(x)
ave_cos = sum([np.cos(2.0 * np.pi * (i - bias)) for i in x]) / len(x)
value = -20 * np.exp(-0.2 * np.sqrt(ave_seq)) - np.exp(ave_cos) + 20.0 + np.e
return value
class TestObjective(object):
def test_parameter_set(self):
par = Parameter(budget=1000, noise_handling=True, suppression=True)
assert 1
def test_eval(self):
dim = 100
obj = Objective(func=ackley, dim=Dimension(dim, [[-1, 1]] * dim, [True] * dim))
sol = Solution(x=[0.2] * dim)
res = obj.eval(sol)
assert abs(res) <= 1e-7
def test_resample(self):
dim = 100
obj = Objective(func=ackley, dim=Dimension(dim, [[-1, 1]] * dim, [True] * dim))
sol = Solution(x=[0.2] * dim)
res = obj.eval(sol)
obj.resample(sol, 3)
assert abs(sol.get_value()) <= 1e-7
sol.set_value(0)
obj.resample_func(sol, 3)
assert abs(sol.get_value()) <= 1e-7
def test_history_best_so_far(self):
input_data = [0.5, 0.6, 0.4, 0.7, 0.3, 0.2]
output_data = [0.5, 0.5, 0.4, 0.4, 0.3, 0.2]
obj = Objective()
obj.set_history(input_data)
best_history = obj.get_history_bestsofar()
assert best_history == output_data
|
reviewboard/reviews/evolutions/group_email_list_only.py | amalik2/reviewboard | 921 | 12639406 | from __future__ import unicode_literals
from django_evolution.mutations import AddField
from django.db import models
MUTATIONS = [
AddField('Group', 'email_list_only', models.BooleanField, initial=True)
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.