metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jfburdet/pyicloud",
"score": 2
}
|
#### File: pyicloud/pyicloud/base.py
```python
from six import PY2, string_types
from uuid import uuid1
import inspect
import json
import logging
from requests import Session
from tempfile import gettempdir
from os import path, mkdir
from re import match
import http.cookiejar as cookielib
from pyicloud.exceptions import (
PyiCloudFailedLoginException,
PyiCloudAPIResponseException,
PyiCloud2SARequiredException,
PyiCloudServiceNotActivatedException,
)
from pyicloud.services import (
FindMyiPhoneServiceManager,
CalendarService,
UbiquityService,
ContactsService,
RemindersService,
PhotosService,
AccountService,
DriveService,
)
from pyicloud.utils import get_password_from_keyring
LOGGER = logging.getLogger(__name__)
class PyiCloudPasswordFilter(logging.Filter):
"""Password log hider."""
def __init__(self, password):
super(PyiCloudPasswordFilter, self).__init__(password)
def filter(self, record):
message = record.getMessage()
if self.name in message:
record.msg = message.replace(self.name, "*" * 8)
record.args = []
return True
class PyiCloudSession(Session):
"""iCloud session."""
def __init__(self, service):
self.service = service
Session.__init__(self)
def request(self, method, url, **kwargs): # pylint: disable=arguments-differ
# Charge logging to the right service endpoint
callee = inspect.stack()[2]
module = inspect.getmodule(callee[0])
request_logger = logging.getLogger(module.__name__).getChild("http")
if self.service.password_filter not in request_logger.filters:
request_logger.addFilter(self.service.password_filter)
request_logger.debug("%s %s %s", method, url, kwargs.get("data", ""))
has_retried = kwargs.get("retried")
kwargs.pop("retried", None)
response = super(PyiCloudSession, self).request(method, url, **kwargs)
content_type = response.headers.get("Content-Type", "").split(";")[0]
json_mimetypes = ["application/json", "text/json"]
if not response.ok and content_type not in json_mimetypes:
if has_retried is None and response.status_code == 450:
api_error = PyiCloudAPIResponseException(
response.reason, response.status_code, retry=True
)
request_logger.debug(api_error)
kwargs["retried"] = True
return self.request(method, url, **kwargs)
self._raise_error(response.status_code, response.reason)
if content_type not in json_mimetypes:
return response
try:
data = response.json()
except: # pylint: disable=bare-except
request_logger.warning("Failed to parse response with JSON mimetype")
return response
request_logger.debug(data)
if isinstance(data, dict):
reason = data.get("errorMessage")
reason = reason or data.get("reason")
reason = reason or data.get("errorReason")
if not reason and isinstance(data.get("error"), string_types):
reason = data.get("error")
if not reason and data.get("error"):
reason = "Unknown reason"
code = data.get("errorCode")
if not code and data.get("serverErrorCode"):
code = data.get("serverErrorCode")
if reason:
self._raise_error(code, reason)
return response
def _raise_error(self, code, reason):
if (
self.service.requires_2sa
and reason == "Missing X-APPLE-WEBAUTH-TOKEN cookie"
):
raise PyiCloud2SARequiredException(self.service.user["apple_id"])
if code in ("ZONE_NOT_FOUND", "AUTHENTICATION_FAILED"):
reason = (
"Please log into https://icloud.com/ to manually "
"finish setting up your iCloud service"
)
api_error = PyiCloudServiceNotActivatedException(reason, code)
LOGGER.error(api_error)
raise (api_error)
if code == "ACCESS_DENIED":
reason = (
reason + ". Please wait a few minutes then try again."
"The remote servers might be trying to throttle requests."
)
api_error = PyiCloudAPIResponseException(reason, code)
LOGGER.error(api_error)
raise api_error
class PyiCloudService(object):
"""
A base authentication class for the iCloud service. Handles the
authentication required to access iCloud services.
Usage:
from pyicloud import PyiCloudService
pyicloud = PyiCloudService('<EMAIL>', 'password')
pyicloud.iphone.location()
"""
HOME_ENDPOINT = "https://www.icloud.com"
SETUP_ENDPOINT = "https://setup.icloud.com/setup/ws/1"
def __init__(
self,
apple_id,
password=<PASSWORD>,
cookie_directory=None,
verify=True,
client_id=None,
with_family=True,
):
if password is None:
password = <PASSWORD>from_keyring(apple_id)
self.data = {}
self.client_id = client_id or str(uuid1()).upper()
self.with_family = with_family
self.user = {"apple_id": apple_id, "password": password}
self.password_filter = PyiCloudPasswordFilter(password)
LOGGER.addFilter(self.password_filter)
self._base_login_url = "%s/login" % self.SETUP_ENDPOINT
if cookie_directory:
self._cookie_directory = path.expanduser(path.normpath(cookie_directory))
else:
self._cookie_directory = path.join(gettempdir(), "pyicloud")
self.session = PyiCloudSession(self)
self.session.verify = verify
self.session.headers.update(
{
"Origin": self.HOME_ENDPOINT,
"Referer": "%s/" % self.HOME_ENDPOINT,
"User-Agent": "Opera/9.52 (X11; Linux i686; U; en)",
}
)
cookiejar_path = self._get_cookiejar_path()
self.session.cookies = cookielib.LWPCookieJar(filename=cookiejar_path)
if path.exists(cookiejar_path):
try:
self.session.cookies.load()
LOGGER.debug("Read cookies from %s", cookiejar_path)
except: # pylint: disable=bare-except
# Most likely a pickled cookiejar from earlier versions.
# The cookiejar will get replaced with a valid one after
# successful authentication.
LOGGER.warning("Failed to read cookiejar %s", cookiejar_path)
self.params = {
"clientBuildNumber": "17DHotfix5",
"clientMasteringNumber": "17DHotfix5",
"ckjsBuildVersion": "17DProjectDev77",
"ckjsVersion": "2.0.5",
"clientId": self.client_id,
}
self.authenticate()
self._drive = None
self._files = None
self._photos = None
def authenticate(self):
"""
Handles authentication, and persists the X-APPLE-WEB-KB cookie so that
subsequent logins will not cause additional e-mails from Apple.
"""
LOGGER.info("Authenticating as %s", self.user["apple_id"])
data = dict(self.user)
# We authenticate every time, so "remember me" is not needed
data.update({"extended_login": False})
try:
req = self.session.post(
self._base_login_url, params=self.params, data=json.dumps(data)
)
except PyiCloudAPIResponseException as error:
msg = "Invalid email/password combination."
raise PyiCloudFailedLoginException(msg, error)
self.data = req.json()
self.params.update({"dsid": self.data["dsInfo"]["dsid"]})
self._webservices = self.data["webservices"]
if not path.exists(self._cookie_directory):
mkdir(self._cookie_directory)
self.session.cookies.save()
LOGGER.debug("Cookies saved to %s", self._get_cookiejar_path())
LOGGER.info("Authentication completed successfully")
LOGGER.debug(self.params)
def _get_cookiejar_path(self):
"""Get path for cookiejar file."""
return path.join(
self._cookie_directory,
"".join([c for c in self.user.get("apple_id") if match(r"\w", c)]),
)
@property
def requires_2sa(self):
"""Returns True if two-step authentication is required."""
return (
self.data.get("hsaChallengeRequired", False)
and self.data["dsInfo"].get("hsaVersion", 0) >= 1
)
# FIXME: Implement 2FA for hsaVersion == 2 # pylint: disable=fixme
@property
def trusted_devices(self):
"""Returns devices trusted for two-step authentication."""
request = self.session.get(
"%s/listDevices" % self.SETUP_ENDPOINT, params=self.params
)
return request.json().get("devices")
def send_verification_code(self, device):
"""Requests that a verification code is sent to the given device."""
data = json.dumps(device)
request = self.session.post(
"%s/sendVerificationCode" % self.SETUP_ENDPOINT,
params=self.params,
data=data,
)
return request.json().get("success", False)
def validate_verification_code(self, device, code):
"""Verifies a verification code received on a trusted device."""
device.update({"verificationCode": code, "trustBrowser": True})
data = json.dumps(device)
try:
self.session.post(
"%s/validateVerificationCode" % self.SETUP_ENDPOINT,
params=self.params,
data=data,
)
except PyiCloudAPIResponseException as error:
if error.code == -21669:
# Wrong verification code
return False
raise
# Re-authenticate, which will both update the HSA data, and
# ensure that we save the X-APPLE-WEBAUTH-HSA-TRUST cookie.
self.authenticate()
return not self.requires_2sa
def _get_webservice_url(self, ws_key):
"""Get webservice URL, raise an exception if not exists."""
if self._webservices.get(ws_key) is None:
raise PyiCloudServiceNotActivatedException(
"Webservice not available", ws_key
)
return self._webservices[ws_key]["url"]
@property
def devices(self):
"""Returns all devices."""
service_root = self._get_webservice_url("findme")
return FindMyiPhoneServiceManager(
service_root, self.session, self.params, self.with_family
)
@property
def iphone(self):
"""Returns the iPhone."""
return self.devices[0]
@property
def account(self):
"""Gets the 'Account' service."""
service_root = self._get_webservice_url("account")
return AccountService(service_root, self.session, self.params)
@property
def files(self):
"""Gets the 'File' service."""
if not self._files:
service_root = self._get_webservice_url("ubiquity")
self._files = UbiquityService(service_root, self.session, self.params)
return self._files
@property
def photos(self):
"""Gets the 'Photo' service."""
if not self._photos:
service_root = self._get_webservice_url("ckdatabasews")
self._photos = PhotosService(service_root, self.session, self.params)
return self._photos
@property
def calendar(self):
"""Gets the 'Calendar' service."""
service_root = self._get_webservice_url("calendar")
return CalendarService(service_root, self.session, self.params)
@property
def contacts(self):
"""Gets the 'Contacts' service."""
service_root = self._get_webservice_url("contacts")
return ContactsService(service_root, self.session, self.params)
@property
def reminders(self):
"""Gets the 'Reminders' service."""
service_root = self._get_webservice_url("reminders")
return RemindersService(service_root, self.session, self.params)
@property
def drive(self):
"""Gets the 'Drive' service."""
if not self._drive:
self._drive = DriveService(
service_root=self._get_webservice_url("drivews"),
document_root=self._get_webservice_url("docws"),
session=self.session,
params=self.params,
)
return self._drive
def __unicode__(self):
return "iCloud API: %s" % self.user.get("apple_id")
def __str__(self):
as_unicode = self.__unicode__()
if PY2:
return as_unicode.encode("utf-8", "ignore")
return as_unicode
def __repr__(self):
return "<%s>" % str(self)
```
|
{
"source": "jfc43/eval-transductive-robustness",
"score": 3
}
|
#### File: eval-transductive-robustness/DENT/norm.py
```python
from copy import deepcopy
import torch
import torch.nn as nn
class Norm(nn.Module):
"""Norm adapts a model by estimating feature statistics during testing.
Once equipped with Norm, the model normalizes its features during testing
with batch-wise statistics, just like batch norm does during training.
"""
def __init__(self, model, eps=1e-5, momentum=0.1,
reset_stats=False, no_stats=False):
super().__init__()
self.model = model
self.model = configure_model(model, eps, momentum, reset_stats,
no_stats)
self.model_state = deepcopy(self.model.state_dict())
def forward(self, x):
return self.model(x)
def reset(self):
self.model.load_state_dict(self.model_state, strict=True)
def collect_stats(model):
"""Collect the normalization stats from batch norms.
Walk the model's modules and collect all batch normalization stats.
Return the stats and their names.
"""
stats = []
names = []
for nm, m in model.named_modules():
if isinstance(m, nn.BatchNorm2d):
state = m.state_dict()
if m.affine:
del state['weight'], state['bias']
for ns, s in state.items():
stats.append(s)
names.append(f"{nm}.{ns}")
return stats, names
def configure_model(model, eps, momentum, reset_stats, no_stats):
"""Configure model for adaptation by test-time normalization."""
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
# use batch-wise statistics in forward
m.train()
# configure epsilon for stability, and momentum for updates
m.eps = eps
m.momentum = momentum
if reset_stats:
# reset state to estimate test stats without train stats
m.reset_running_stats()
if no_stats:
# disable state entirely and use only batch stats
m.track_running_stats = False
m.running_mean = None
m.running_var = None
return model
```
#### File: RMC/models/resnet.py
```python
import torch
import torch.nn as nn
from torch.autograd import Function
import torch.nn.functional as F
import torch.nn.init as init
from torch.autograd import Variable
import math
class NormalizeLayer(torch.nn.Module):
"""Standardize the channels of a batch of images by subtracting the dataset mean
and dividing by the dataset standard deviation.
"""
def __init__(self, means, sds):
"""
:param means: the channel means
:param sds: the channel standard deviations
"""
super(NormalizeLayer, self).__init__()
self.means = torch.tensor(means).cuda()
self.sds = torch.tensor(sds).cuda()
def forward(self, input):
(batch_size, num_channels, height, width) = input.shape
means = self.means.repeat((batch_size, height, width, 1)).permute(0, 3, 1, 2)
sds = self.sds.repeat((batch_size, height, width, 1)).permute(0, 3, 1, 2)
return (input - means)/sds
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion *
planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
# ResNet34
def __init__(self, means, sds, block=BasicBlock, num_blocks=[3, 4, 6, 3], num_classes=10):
super(ResNet, self).__init__()
self.normalize_layer = NormalizeLayer(means, sds)
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def get_feature(self, x):
out = self.normalize_layer(x)
out = F.relu(self.bn1(self.conv1(out)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
return out
def forward(self, x):
out = self.normalize_layer(x)
out = F.relu(self.bn1(self.conv1(out)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
```
#### File: eval-transductive-robustness/RMC/prepare_augmented_data.py
```python
import argparse
import os
import numpy as np
import torch
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
from torchvision import transforms
from torchvision import datasets
from utils.lib import *
from utils.pgd_attack import *
from models.resnet import ResNet
def test(model, dataloader):
model.eval()
n_correct, n_total = 0, 0
for img, label in iter(dataloader):
batch_size = len(label)
img, label = img.cuda(), label.cuda()
with torch.no_grad():
class_output = model(img)
pred = class_output.data.max(1, keepdim=True)[1]
n_correct += pred.eq(label.data.view_as(pred)).cpu().sum()
n_total += batch_size
acc = n_correct.double() / n_total
return acc
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate augmented training dataset and extract features')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--model-type', default='nat_model',
choices=['nat_model', 'adv_model'], type=str, help='model type')
parser.add_argument('--save-dir', default='./generate_data/', type=str, help='dir to save data')
parser.add_argument('--model-dir', default='./checkpoints/', type=str, help='dir to saved model')
# args parse
args = parser.parse_args()
# Set random seed
set_seed(args.seed)
model_type = args.model_type
save_dir = os.path.join(args.save_dir, model_type)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
model_path = os.path.join(args.model_dir, model_type, "checkpoint.pth")
batch_size = 128
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
])
mean = [x/255.0 for x in [125.3, 123.0, 113.9]]
std = [x/255.0 for x in [63.0, 62.1, 66.7]]
train_dataset = datasets.CIFAR10('./datasets/cifar10', train=True, download=True, transform=transform_train)
train_dataloader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=False, num_workers=2)
test_dataset = datasets.CIFAR10('./datasets/cifar10', train=False, transform=transform_test)
test_dataloader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False, num_workers=2)
# Model Setup
model = torch.load(model_path).cuda()
model.eval()
attacker = LinfPGDAttack(model, eps=8/255.0, nb_iter=40,
eps_iter=1/255.0, rand_init=True, clip_min=0., clip_max=1.,
targeted=False, num_classes=10, elementwise_best=True)
augment_data = []
augment_label = []
for batch_x, batch_y in train_dataloader:
augment_data.extend(batch_x.numpy())
augment_label.extend(batch_y.numpy())
correct = 0.0
count = 0.0
for j in range(4):
for batch_x, batch_y in train_dataloader:
batch_x = batch_x.cuda()
batch_y = batch_y.cuda()
adv_batch_x = attacker.perturb(batch_x, batch_y)
augment_data.extend(adv_batch_x.cpu().numpy())
augment_label.extend(batch_y.cpu().numpy())
with torch.no_grad():
outputs = model(adv_batch_x)
preds = torch.argmax(outputs, axis=1)
correct += torch.sum(preds==batch_y)
count += batch_x.shape[0]
print("Adv acc: {:.2f}%".format((correct/count)*100))
augment_data = np.array(augment_data)
augment_label = np.array(augment_label)
np.save(os.path.join(save_dir, "augment_data.npy"), augment_data)
np.save(os.path.join(save_dir, "augment_label.npy"), augment_label)
augment_data = torch.Tensor(augment_data)
augment_label = torch.Tensor(augment_label).long()
augment_dataset = TensorDataset(augment_data, augment_label)
augment_dataloader = DataLoader(augment_dataset, batch_size=batch_size, shuffle=False)
augment_features = []
for batch_x, batch_y in augment_dataloader:
batch_x = batch_x.cuda()
with torch.no_grad():
feature = model.get_feature(batch_x)
augment_features.extend(feature.cpu().numpy())
augment_features = np.array(augment_features)
np.save(os.path.join(save_dir, "augment_feature.npy"), augment_features)
```
#### File: TADV/attacks/gmsa_attack.py
```python
from __future__ import print_function
import torch
import torch.nn as nn
class GMSAMINLinfPGDAttack:
"""
GMSA-MIN Attack with order=Linf
:param eps: maximum distortion.
:param nb_iter: number of iterations.
:param eps_iter: attack step size.
:param rand_init: (optional bool) random initialization.
:param clip_min: mininum value per input dimension.
:param clip_max: maximum value per input dimension.
:param targeted: if the attack is targeted.
"""
def __init__(
self, models, eps=0.1, nb_iter=100,
eps_iter=0.01, rand_init=True, clip_min=0., clip_max=1.,
targeted=False, num_classes=10, elementwise_best=False):
self.eps = eps
self.nb_iter = nb_iter
self.eps_iter = eps_iter
self.rand_init = rand_init
self.targeted = targeted
self.elementwise_best = elementwise_best
self.models = models
self.num_classes = num_classes
self.loss_func = nn.CrossEntropyLoss(reduction='none')
self.clip_min = clip_min
self.clip_max = clip_max
def get_loss(self, x, y):
min_loss = None
for model in self.models:
outputs = model(x)
curr_loss = self.loss_func(outputs, y)
if min_loss is None:
min_loss = curr_loss
else:
cond = curr_loss.data < min_loss.data
min_loss[cond] = curr_loss[cond]
return min_loss
def perturb(self, x, y):
"""
Given examples (x, y), returns their adversarial counterparts with
an attack length of eps.
:param x: input tensor.
:param y: label tensor.
:return: tensor containing perturbed inputs.
"""
for model in self.models:
model.eval()
x = x.detach().clone()
y = y.detach().clone()
y = y.cuda()
delta = torch.zeros_like(x)
delta = nn.Parameter(delta)
delta.requires_grad_()
if self.elementwise_best:
with torch.no_grad():
loss = self.get_loss(x, y)
worst_loss = loss.data.clone()
worst_perb = delta.data.clone()
if self.rand_init:
delta.data.uniform_(-self.eps, self.eps)
delta.data = (torch.clamp(x.data + delta.data, min=self.clip_min, max=self.clip_max) - x.data)
for ii in range(self.nb_iter*len(self.models)):
adv_x = x + delta
loss = self.get_loss(adv_x, y)
if self.elementwise_best:
cond = loss.data > worst_loss
worst_loss[cond] = loss.data[cond]
worst_perb[cond] = delta.data[cond]
loss.mean().backward()
grad_sign = delta.grad.data.sign()
delta.data = delta.data + grad_sign * self.eps_iter
delta.data = torch.clamp(delta.data, min=-self.eps, max=self.eps)
delta.data = torch.clamp(x.data + delta.data, min=self.clip_min, max=self.clip_max) - x.data
delta.grad.data.zero_()
if self.elementwise_best:
adv_x = x + delta
with torch.no_grad():
loss = self.get_loss(adv_x, y)
cond = loss.data > worst_loss
worst_loss[cond] = loss.data[cond]
worst_perb[cond] = delta.data[cond]
adv_x = x + worst_perb
else:
adv_x = x + delta.data
return adv_x
class GMSAAVGLinfPGDAttack:
"""
GMSA-AVG Attack with order=Linf
:param eps: maximum distortion.
:param nb_iter: number of iterations.
:param eps_iter: attack step size.
:param rand_init: (optional bool) random initialization.
:param clip_min: mininum value per input dimension.
:param clip_max: maximum value per input dimension.
:param targeted: if the attack is targeted.
"""
def __init__(
self, models, eps=0.1, nb_iter=100,
eps_iter=0.01, rand_init=True, clip_min=0., clip_max=1.,
targeted=False, num_classes=10, elementwise_best=False):
self.eps = eps
self.nb_iter = nb_iter
self.eps_iter = eps_iter
self.rand_init = rand_init
self.targeted = targeted
self.elementwise_best = elementwise_best
self.models = models
self.num_classes = num_classes
self.loss_func = nn.CrossEntropyLoss(reduction='none')
self.clip_min = clip_min
self.clip_max = clip_max
def get_loss(self, x, y, update=False):
loss = 0.0
for model in self.models:
outputs = model(x)
if self.targeted:
target = ((y + torch.randint(1, self.num_classes, y.shape).cuda()) % self.num_classes).long()
curr_loss = -self.loss_func(outputs, target)
else:
curr_loss = self.loss_func(outputs, y)
if update:
curr_loss.mean().backward()
loss += curr_loss.data
return loss
def perturb(self, x, y):
"""
Given examples (x, y), returns their adversarial counterparts with
an attack length of eps.
:param x: input tensor.
:param y: label tensor.
:return: tensor containing perturbed inputs.
"""
for model in self.models:
model.eval()
x = x.detach().clone()
y = y.detach().clone()
y = y.cuda()
delta = torch.zeros_like(x)
delta = nn.Parameter(delta)
delta.requires_grad_()
if self.elementwise_best:
with torch.no_grad():
loss = self.get_loss(x, y, update=False)
worst_loss = loss.data.clone()
worst_perb = delta.data.clone()
if self.rand_init:
delta.data.uniform_(-self.eps, self.eps)
delta.data = (torch.clamp(x.data + delta.data, min=self.clip_min, max=self.clip_max) - x.data)
for ii in range(self.nb_iter):
adv_x = x + delta
loss = self.get_loss(adv_x, y, update=True)
if self.elementwise_best:
cond = loss.data > worst_loss
worst_loss[cond] = loss.data[cond]
worst_perb[cond] = delta.data[cond]
grad_sign = delta.grad.data.sign()
delta.data = delta.data + grad_sign * self.eps_iter
delta.data = torch.clamp(delta.data, min=-self.eps, max=self.eps)
delta.data = torch.clamp(x.data + delta.data, min=self.clip_min, max=self.clip_max) - x.data
delta.grad.data.zero_()
if self.elementwise_best:
adv_x = x + delta
with torch.no_grad():
loss = self.get_loss(adv_x, y, update=False)
cond = loss.data > worst_loss
worst_loss[cond] = loss.data[cond]
worst_perb[cond] = delta.data[cond]
adv_x = x + worst_perb
else:
adv_x = x + delta.data
return adv_x
class LinfPGDAttack:
"""
PGD Attack with order=Linf
:param eps: maximum distortion.
:param nb_iter: number of iterations.
:param eps_iter: attack step size.
:param rand_init: (optional bool) random initialization.
:param clip_min: mininum value per input dimension.
:param clip_max: maximum value per input dimension.
:param targeted: if the attack is targeted.
"""
def __init__(
self, model, eps=0.1, nb_iter=100,
eps_iter=0.01, rand_init=True, clip_min=0., clip_max=1.,
targeted=False, num_classes=10, elementwise_best=False):
self.eps = eps
self.nb_iter = nb_iter
self.eps_iter = eps_iter
self.rand_init = rand_init
self.targeted = targeted
self.elementwise_best = elementwise_best
self.model = model
self.num_classes = num_classes
self.loss_func = nn.CrossEntropyLoss(reduction='none')
self.clip_min = clip_min
self.clip_max = clip_max
def get_loss(self, x, y):
outputs = self.model(x)
if self.targeted:
target = ((y + torch.randint(1, self.num_classes, y.shape).cuda()) % self.num_classes).long()
loss = -self.loss_func(outputs, target)
else:
loss = self.loss_func(outputs, y)
return loss
def perturb(self, x, y):
"""
Given examples (x, y), returns their adversarial counterparts with
an attack length of eps.
:param x: input tensor.
:param y: label tensor.
:return: tensor containing perturbed inputs.
"""
self.model.eval()
x = x.detach().clone()
y = y.detach().clone()
y = y.cuda()
delta = torch.zeros_like(x)
delta = nn.Parameter(delta)
delta.requires_grad_()
if self.elementwise_best:
loss = self.get_loss(x, y)
worst_loss = loss.data.clone()
worst_perb = delta.data.clone()
if self.rand_init:
delta.data.uniform_(-self.eps, self.eps)
delta.data = (torch.clamp(x.data + delta.data, min=self.clip_min, max=self.clip_max) - x.data)
for ii in range(self.nb_iter):
adv_x = x + delta
loss = self.get_loss(adv_x, y)
if self.elementwise_best:
cond = loss.data > worst_loss
worst_loss[cond] = loss.data[cond]
worst_perb[cond] = delta.data[cond]
loss.mean().backward()
grad_sign = delta.grad.data.sign()
delta.data = delta.data + grad_sign * self.eps_iter
delta.data = torch.clamp(delta.data, min=-self.eps, max=self.eps)
delta.data = torch.clamp(x.data + delta.data, min=self.clip_min, max=self.clip_max) - x.data
delta.grad.data.zero_()
if self.elementwise_best:
adv_x = x + delta
loss = self.get_loss(adv_x, y)
cond = loss.data > worst_loss
worst_loss[cond] = loss.data[cond]
worst_perb[cond] = delta.data[cond]
adv_x = x + worst_perb
else:
adv_x = x + delta.data
return adv_x
```
#### File: TADV/models/fixed_lenet.py
```python
import torch
import utils.torch
from .classifier import Classifier
import torch.nn as nn
class FixedLeNet(Classifier):
"""
Fixed LeNet architecture, working on MNIST architectures only.
"""
def __init__(self, N_class, resolution=(1, 28, 28), **kwargs):
"""
Initialize classifier.
:param N_class: number of classes to classify
:type N_class: int
:param resolution: resolution (assumed to be square)
:type resolution: int
"""
assert resolution[0] == 1
assert resolution[1] == 28
assert resolution[2] == 28
super(FixedLeNet, self).__init__(N_class, resolution, **kwargs)
self.append_layer('0', nn.Conv2d(resolution[0], 32, 5, padding=2))
self.append_layer('1', nn.ReLU())
self.append_layer('2', nn.MaxPool2d(2, 2))
self.append_layer('3', nn.Conv2d(32, 64, 5, padding=2))
self.append_layer('4', nn.ReLU())
self.append_layer('5', nn.MaxPool2d(2, 2))
self.append_layer('6', utils.torch.Flatten())
self.append_layer('7', nn.Linear(7 * 7 * 64, 1024))
self.append_layer('8', nn.ReLU())
self.append_layer('9', nn.Linear(1024, self.N_class))
```
#### File: TADV/utils/torch.py
```python
import torch
import numpy
import scipy.ndimage
import math
import random
def set_seed(seed):
"""Sets seed"""
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.manual_seed(seed)
numpy.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def is_cuda(mixed):
"""
Check if model/tensor is on CUDA.
:param mixed: model or tensor
:type mixed: torch.nn.Module or torch.autograd.Variable or torch.Tensor
:return: on cuda
:rtype: bool
"""
assert isinstance(mixed, torch.nn.Module) or isinstance(mixed, torch.autograd.Variable) \
or isinstance(mixed, torch.Tensor), 'mixed has to be torch.nn.Module, torch.autograd.Variable or torch.Tensor'
is_cuda = False
if isinstance(mixed, torch.nn.Module):
is_cuda = True
for parameters in list(mixed.parameters()):
is_cuda = is_cuda and parameters.is_cuda
if isinstance(mixed, torch.autograd.Variable):
is_cuda = mixed.is_cuda
if isinstance(mixed, torch.Tensor):
is_cuda = mixed.is_cuda
return is_cuda
def binary_labels(classes):
"""
Convert 0,1 labels to -1,1 labels.
:param classes: classes as B x 1
:type classes: torch.autograd.Variable or torch.Tensor
"""
classes[classes == 0] = -1
return classes
def one_hot(classes, C):
"""
Convert class labels to one-hot vectors.
:param classes: classes as B x 1
:type classes: torch.autograd.Variable or torch.Tensor
:param C: number of classes
:type C: int
:return: one hot vector as B x C
:rtype: torch.autograd.Variable or torch.Tensor
"""
assert isinstance(classes, torch.autograd.Variable) or isinstance(classes, torch.Tensor), 'classes needs to be torch.autograd.Variable or torch.Tensor'
assert len(classes.size()) == 2 or len(classes.size()) == 1, 'classes needs to have rank 2 or 1'
assert C > 0
if len(classes.size()) < 2:
classes = classes.view(-1, 1)
one_hot = torch.Tensor(classes.size(0), C)
if is_cuda(classes):
one_hot = one_hot.cuda()
if isinstance(classes, torch.autograd.Variable):
one_hot = torch.autograd.Variable(one_hot)
one_hot.zero_()
one_hot.scatter_(1, classes, 1)
return one_hot
def tensor_or_value(mixed):
"""
Get tensor or single value.
:param mixed: variable, tensor or value
:type mixed: mixed
:return: tensor or value
:rtype: torch.Tensor or value
"""
if isinstance(mixed, torch.Tensor):
if mixed.numel() > 1:
return mixed
else:
return mixed.item()
elif isinstance(mixed, torch.autograd.Variable):
return tensor_or_value(mixed.cpu().data)
else:
return mixed
def as_variable(mixed, cuda=False, requires_grad=False):
"""
Get a tensor or numpy array as variable.
:param mixed: input tensor
:type mixed: torch.Tensor or numpy.ndarray
:param device: gpu or not
:type device: bool
:param requires_grad: gradients
:type requires_grad: bool
:return: variable
:rtype: torch.autograd.Variable
"""
assert isinstance(mixed, numpy.ndarray) or isinstance(mixed, torch.Tensor), 'input needs to be numpy.ndarray or torch.Tensor'
if isinstance(mixed, numpy.ndarray):
mixed = torch.from_numpy(mixed)
if cuda:
mixed = mixed.cuda()
return torch.autograd.Variable(mixed, requires_grad)
def tile(a, dim, n_tile):
"""
Numpy-like tiling in torch.
:param a: tensor
:type a: torch.Tensor or torch.autograd.Variable
:param dim: dimension to tile
:type dim: int
:param n_tile: number of tiles
:type n_tile: int
:return: tiled tensor
:rtype: torch.Tensor or torch.autograd.Variable
"""
init_dim = a.size(dim)
repeat_idx = [1] * a.dim()
repeat_idx[dim] = n_tile
a = a.repeat(*(repeat_idx))
order_index = torch.LongTensor(numpy.concatenate([init_dim * numpy.arange(n_tile) + i for i in range(init_dim)]))
if is_cuda(a):
order_index = order_index.cuda()
return torch.index_select(a, dim, order_index)
def expand_as(tensor, tensor_as):
"""
Expands the tensor using view to allow broadcasting.
:param tensor: input tensor
:type tensor: torch.Tensor or torch.autograd.Variable
:param tensor_as: reference tensor
:type tensor_as: torch.Tensor or torch.autograd.Variable
:return: tensor expanded with singelton dimensions as tensor_as
:rtype: torch.Tensor or torch.autograd.Variable
"""
view = list(tensor.size())
for i in range(len(tensor.size()), len(tensor_as.size())):
view.append(1)
return tensor.view(view)
def get_exponential_scheduler(optimizer, batches_per_epoch, gamma=0.97):
"""
Get exponential scheduler.
Note that the resulting optimizer's step function is called after each batch!
:param optimizer: optimizer
:type optimizer: torch.optim.Optimizer
:param batches_per_epoch: number of batches per epoch
:type batches_per_epoch: int
:param gamma: gamma
:type gamma: float
:return: scheduler
:rtype: torch.optim.lr_scheduler.LRScheduler
"""
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=[lambda epoch: gamma ** math.floor(epoch/batches_per_epoch)])
def classification_error(logits, targets, reduction='mean'):
"""
Accuracy.
:param logits: predicted classes
:type logits: torch.autograd.Variable
:param targets: target classes
:type targets: torch.autograd.Variable
:param reduce: reduce to number or keep per element
:type reduce: bool
:return: error
:rtype: torch.autograd.Variable
"""
assert logits.size()[0] == targets.size()[0]
assert len(list(targets.size())) == 1# or (len(list(targets.size())) == 2 and targets.size(1) == 1)
assert len(list(logits.size())) == 2
if logits.size()[1] > 1:
values, indices = torch.max(torch.nn.functional.softmax(logits, dim=1), dim=1)
else:
indices = torch.round(torch.sigmoid(logits)).view(-1)
errors = torch.clamp(torch.abs(indices.long() - targets.long()), max=1)
if reduction == 'mean':
return torch.mean(errors.float())
elif reduction == 'sum':
return torch.sum(errors.float())
else:
return errors
def f7p_loss(logits, true_classes, reduction='mean'):
if logits.size(1) > 1:
current_probabilities = torch.nn.functional.softmax(logits, dim=1)
current_probabilities = current_probabilities * (1 - one_hot(true_classes, current_probabilities.size(1)))
loss = torch.max(current_probabilities, dim=1)[0]
else:
loss = true_classes.float()*(1 - torch.nn.functional.sigmoid(logits.view(-1))) + (1 - true_classes.float())*(torch.nn.functional.sigmoid(logits.view(-1)))
if reduction == "none":
return loss
elif reduction == "mean":
return torch.mean(loss)
elif reduction == "sum":
return torch.sum(loss)
else:
raise ValueError
def classification_loss(logits, targets, reduction='mean'):
"""
Calculates either the multi-class or binary cross-entropy loss.
:param logits: predicted classes
:type logits: torch.autograd.Variable
:param targets: target classes
:type targets: torch.autograd.Variable
:param reduction: reduction type
:type reduction: str
:return: error
:rtype: torch.autograd.Variable
"""
if logits.size()[1] > 1:
return torch.nn.functional.cross_entropy(logits, targets, reduction=reduction)
else:
# probability 1 is class 1
# probability 0 is class 0
return torch.nn.functional.binary_cross_entropy(torch.sigmoid(logits).view(-1), targets.float(), reduction=reduction)
def step_function_perturbation(perturb, epsilon_0, alpha=1e-4, norm_type='inf', smooth_approx=False, temperature=0.01):
"""
Step function applied to the perturbation norm. By default, it computes the exact step function which is not
differentiable. If a smooth approximation based on the sigmoid function is needed, set `smooth_approx=True` and set
the `temperature` to a suitably small value.
:param perturb: Torch Tensor with the perturbation. Can be a tensor of shape `(b, d1, ...)`, where `b` is the batch
size and the rest are dimensions. Can also be a single vector of shape `[d]`.
:param epsilon_0: Radius of the smaller perturbation ball - a small positive value.
:param alpha: Small negative offset for the step function. The step function's value is `-alpha` when the
perturbation norm is less than `epsilon_0`.
:param norm_type: Type of norm: 'inf' for infinity norm, '1', '2' etc for the other types of norm.
:param smooth_approx: Set to True to get a sigmoid-based approximation of the step function.
:param temperature: small non-negative value that controls the steepness of the sigmoid approximation.
:returns: tensor of function values computed for each element in the batch. Has shape `[b]`.
"""
assert isinstance(perturb, (torch.Tensor, torch.autograd.Variable)), ("Input 'perturb' should be of type "
"torch.Tensor or torch.autograd.Variable")
s = perturb.shape
dim = 1
if len(s) > 2:
perturb = perturb.view(s[0], -1) # flatten into a vector
elif len(s) == 1:
# single vector
dim = None
if norm_type == 'inf':
norm_type = float('inf')
elif not isinstance(norm_type, (int, float)):
# example: norm_type = '2'
norm_type = int(norm_type)
norm_val = torch.linalg.vector_norm(perturb, ord=norm_type, dim=dim)
if not smooth_approx:
return torch.where(norm_val <= epsilon_0, -1. * alpha, 1.)
else:
return torch.sigmoid((1. / temperature) * (norm_val - epsilon_0)) - alpha
def ramp_function_perturbation(perturb, epsilon_0, epsilon, alpha=1e-4, norm_type='inf'):
"""
Ramp function applied to the perturbation norm as defined in the paper.
:param perturb: Torch Tensor with the perturbation. Can be a tensor of shape `(b, d1, ...)`, where `b` is the batch
size and the rest are dimensions. Can also be a single vector of shape `(d)`.
:param epsilon_0: Radius of the smaller perturbation ball - a small positive value.
:param epsilon: Radius of the larger perturbation ball. Should be >= `epsilon_0`.
:param alpha: Small negative offset for the step function. The step function's value is `-alpha` when the
perturbation norm is less than `epsilon_0`.
:param norm_type: Type of norm: 'inf' for infinity norm, '1', '2' etc for the other types of norm.
:returns: tensor of function values computed for each element in the batch. Has shape `[b]`.
"""
assert isinstance(perturb, (torch.Tensor, torch.autograd.Variable)), ("Input 'perturb' should be of type "
"torch.Tensor or torch.autograd.Variable")
assert epsilon >= epsilon_0, "Value of 'epsilon' cannot be smaller than 'epsilon_0'"
s = perturb.shape
dim = 1
if len(s) > 2:
perturb = perturb.view(s[0], -1) # flatten into a vector
elif len(s) == 1:
# single vector
dim = None
if norm_type == 'inf':
norm_type = float('inf')
elif not isinstance(norm_type, (int, float)):
# example: norm_type = '2'
norm_type = int(norm_type)
norm_val = torch.linalg.vector_norm(perturb, ord=norm_type, dim=dim)
temp = torch.maximum(norm_val - epsilon_0, torch.zeros_like(norm_val))
return ((1. + alpha) / (epsilon - epsilon_0)) * temp - alpha
def max_p_loss(logits, targets=None, reduction='mean'):
"""
Loss.
:param logits: predicted classes
:type logits: torch.autograd.Variable
:param targets: target classes
:type targets: torch.autograd.Variable
:param reduction: reduction type
:type reduction: str
:return: error
:rtype: torch.autograd.Variable
"""
max_log = torch.max(torch.nn.functional.softmax(logits, dim=1), dim=1)[0]
if reduction == 'mean':
return torch.mean(max_log)
elif reduction == 'sum':
return torch.sum(max_log)
else:
return max_log
def max_log_loss(logits, targets=None, reduction='mean'):
"""
Loss.
:param logits: predicted classes
:type logits: torch.autograd.Variable
:param targets: target classes
:type targets: torch.autograd.Variable
:param reduction: reduction type
:type reduction: str
:return: error
:rtype: torch.autograd.Variable
"""
max_log = torch.max(torch.nn.functional.log_softmax(logits, dim=1), dim=1)[0]
if reduction == 'mean':
return torch.mean(max_log)
elif reduction == 'sum':
return torch.sum(max_log)
else:
return max_log
def cross_entropy_divergence(logits, targets, reduction='mean'):
"""
Loss.
:param logits: predicted logits
:type logits: torch.autograd.Variable
:param targets: target distributions
:type targets: torch.autograd.Variable
:param reduction: reduction type
:type reduction: str
:return: error
:rtype: torch.autograd.Variable
"""
assert len(list(logits.size())) == len(list(targets.size()))
assert logits.size()[0] == targets.size()[0]
assert logits.size()[1] == targets.size()[1]
assert logits.size()[1] > 1
divergences = torch.sum(- targets * torch.nn.functional.log_softmax(logits, dim=1), dim=1)
if reduction == 'mean':
return torch.mean(divergences)
elif reduction == 'sum':
return torch.sum(divergences)
else:
return divergences
class View(torch.nn.Module):
"""
Simple view layer.
"""
def __init__(self, *args):
"""
Constructor.
:param args: shape
:type args: [int]
"""
super(View, self).__init__()
self.shape = args
def forward(self, input):
"""
Forward pass.
:param input: input
:type input: torch.autograd.Variable
:return: output
:rtype: torch.autograd.Variable
"""
return input.view(self.shape)
class Flatten(torch.nn.Module):
"""
Flatten module.
"""
def forward(self, input):
"""
Forward pass.
:param input: input
:type input: torch.autograd.Variable
:return: output
:rtype: torch.autograd.Variable
"""
return input.view(input.shape[0], -1)
class Clamp(torch.nn.Module):
"""
Wrapper for clamp.
"""
def __init__(self, min=0, max=1):
"""
Constructor.
"""
super(Clamp, self).__init__()
self.min = min
""" (float) Min value. """
self.max = max
""" (float) Max value. """
def forward(self, input):
"""
Forward pass.
:param input: input
:type input: torch.autograd.Variable
:return: output
:rtype: torch.autograd.Variable
"""
return torch.clamp(torch.clamp(input, min=self.min), max=self.max)
class Scale(torch.nn.Module):
"""
Simply scaling layer, mainly to allow simple saving and loading.
"""
def __init__(self, shape):
"""
Constructor.
:param shape: shape
:type shape: [int]
"""
super(Scale, self).__init__()
self.weight = torch.nn.Parameter(torch.zeros(shape)) # min
self.bias = torch.nn.Parameter(torch.ones(shape)) # max
def forward(self, input):
"""
Forward pass.
:param input: input
:type input: torch.autograd.Variable
:return: output
:rtype: torch.autograd.Variable
"""
return expand_as(self.weight, input) + torch.mul(expand_as(self.bias, input) - expand_as(self.weight, input), input)
class Entropy(torch.nn.Module):
"""
Entropy computation based on logits.
"""
def __init__(self):
"""
Constructor.
"""
super(Entropy, self).__init__()
def forward(self, input):
"""
Forward pass.
:param input: input
:type input: torch.autograd.Variable
:return: output
:rtype: torch.autograd.Variable
"""
return -1.*torch.sum(torch.nn.functional.softmax(input, dim=1) * torch.nn.functional.log_softmax(input, dim=1))
class Normalize(torch.nn.Module):
"""
Normalization layer to be learned.
"""
def __init__(self, n_channels):
"""
Constructor.
:param n_channels: number of channels
:type n_channels: int
"""
super(Normalize, self).__init__()
self.weight = torch.nn.Parameter(torch.ones(n_channels))
self.bias = torch.nn.Parameter(torch.zeros(n_channels))
def forward(self, input):
"""
Forward pass.
:param input: input
:type input: torch.autograd.Variable
:return: output
:rtype: torch.autograd.Variable
"""
return (input - self.bias.view(1, -1, 1, 1))/self.weight.view(1, -1, 1, 1)
class GaussianLayer(torch.nn.Module):
"""
Gaussian convolution.
"""
def __init__(self, sigma=3, channels=3):
"""
"""
super(GaussianLayer, self).__init__()
self.sigma = sigma
""" (float) Sigma. """
padding = math.ceil(self.sigma)
kernel = 2*padding + 1
self.seq = torch.nn.Sequential(
torch.nn.ReflectionPad2d((padding, padding, padding, padding)),
torch.nn.Conv2d(channels, channels, kernel, stride=1, padding=0, bias=None, groups=channels)
)
n = numpy.zeros((kernel, kernel))
n[padding, padding] = 1
k = scipy.ndimage.gaussian_filter(n, sigma=self.sigma)
for name, f in self.named_parameters():
f.data.copy_(torch.from_numpy(k))
def forward(self, input):
"""
Forward pass.
:param input: input
:type input: torch.autograd.Variable
:return: output
:rtype: torch.autograd.Variable
"""
return self.seq(input)
```
#### File: URejectron/utils/data_util.py
```python
import os
import pickle
import sys
import csv
import matplotlib.pyplot as plt
from PIL import Image
version = sys.version_info
import torch
from torch.utils.data import Dataset, DataLoader
from PIL import Image
import numpy as np
import skimage.data
import scipy.io as sio
import cv2
def image_brightness_normalisation(image):
image[:,:,0] = cv2.equalizeHist(image[:,:,0])
image[:,:,1] = cv2.equalizeHist(image[:,:,1])
image[:,:,2] = cv2.equalizeHist(image[:,:,2])
return image
def preprocess_data(X):
for i in range(len(X)):
X[i,:,:,:] = image_brightness_normalisation(X[i,:,:,:])
return X
def get_gtsrb_data(path):
loaded = np.load(os.path.join(path, 'train.npz'))
train_images = loaded['images']
train_images = preprocess_data(train_images)
train_labels = loaded['labels']
return (train_images, train_labels)
class MyDataset(Dataset):
def __init__(self, data, targets, transform=None):
self.data = data
self.targets = torch.LongTensor(targets)
self.transform = transform
def __getitem__(self, index):
x = self.data[index]
y = self.targets[index]
if self.transform:
x = self.transform(x)
return x, y
def __len__(self):
return len(self.data)
class MyDatasetNoLabel(Dataset):
def __init__(self, data, transform=None):
self.data = data
self.transform = transform
def __getitem__(self, index):
x = self.data[index]
if self.transform:
x = self.transform(x)
return x
def __len__(self):
return len(self.data)
```
|
{
"source": "jfc43/informative-outlier-mining",
"score": 2
}
|
#### File: jfc43/informative-outlier-mining/eval_ood_detection.py
```python
from __future__ import print_function
import argparse
import os
import sys
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from sklearn.linear_model import LogisticRegressionCV
import models.densenet as dn
import models.wideresnet as wn
import models.gmm as gmmlib
import utils.svhn_loader as svhn
import numpy as np
import time
from scipy import misc
from utils import OODScoreLinfPGDAttack, ConfidenceLinfPGDAttack, MahalanobisLinfPGDAttack, SOFLLinfPGDAttack, metric, sample_estimator, get_Mahalanobis_score, gen_corruction_image
parser = argparse.ArgumentParser(description='Pytorch Detecting Out-of-distribution examples in neural networks')
parser.add_argument('--in-dataset', default="CIFAR-10", type=str, help='in-distribution dataset')
parser.add_argument('--name', required=True, type=str, help='the name of the model trained')
parser.add_argument('--model-arch', default='densenet', type=str, help='model architecture')
parser.add_argument('--gpu', default = '0', type = str, help='gpu index')
parser.add_argument('--adv', help='L_inf OOD', action='store_true')
parser.add_argument('--corrupt', help='corrupted OOD', action='store_true')
parser.add_argument('--adv-corrupt', help='comp. OOD', action='store_true')
parser.add_argument('--in-dist-only', help='only evaluate in-distribution', action='store_true')
parser.add_argument('--out-dist-only', help='only evaluate out-distribution', action='store_true')
parser.add_argument('--method', default='msp', type=str, help='scoring function')
parser.add_argument('--cal-metric', help='calculate metric directly', action='store_true')
parser.add_argument('--epsilon', default=8.0, type=float, help='epsilon')
parser.add_argument('--iters', default=40, type=int,
help='attack iterations')
parser.add_argument('--iter-size', default=1.0, type=float, help='attack step size')
parser.add_argument('--severity-level', default=5, type=int, help='severity level')
parser.add_argument('--epochs', default=100, type=int,
help='number of total epochs to run')
parser.add_argument('-b', '--batch-size', default=50, type=int,
help='mini-batch size')
parser.add_argument('--base-dir', default='output/ood_scores', type=str, help='result directory')
parser.add_argument('--layers', default=100, type=int,
help='total number of layers (default: 100)')
parser.add_argument('--depth', default=40, type=int,
help='depth of resnet')
parser.add_argument('--width', default=4, type=int,
help='width of resnet')
parser.set_defaults(argument=True)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
torch.manual_seed(1)
torch.cuda.manual_seed(1)
np.random.seed(1)
def get_msp_score(inputs, model, method_args):
with torch.no_grad():
outputs = model(inputs)
scores = np.max(F.softmax(outputs, dim=1).detach().cpu().numpy(), axis=1)
return scores
def get_sofl_score(inputs, model, method_args):
num_classes = method_args['num_classes']
with torch.no_grad():
outputs = model(inputs)
scores = -F.softmax(outputs, dim=1)[:, num_classes:].sum(dim=1).detach().cpu().numpy()
return scores
def get_rowl_score(inputs, model, method_args, raw_score=False):
num_classes = method_args['num_classes']
with torch.no_grad():
outputs = model(inputs)
if raw_score:
scores = -1.0 * F.softmax(outputs, dim=1)[:, num_classes].float().detach().cpu().numpy()
else:
scores = -1.0 * (outputs.argmax(dim=1)==num_classes).float().detach().cpu().numpy()
return scores
def get_ood_score(inputs, model, method_args):
with torch.no_grad():
outputs = model(inputs)
scores = -1.0 * (F.softmax(outputs, dim=1)[:,-1]).float().detach().cpu().numpy()
return scores
def get_odin_score(inputs, model, method_args):
# Calculating the perturbation we need to add, that is,
# the sign of gradient of cross entropy loss w.r.t. input
temper = method_args['temperature']
noiseMagnitude1 = method_args['magnitude']
criterion = nn.CrossEntropyLoss()
inputs = Variable(inputs, requires_grad = True)
outputs = model(inputs)
maxIndexTemp = np.argmax(outputs.data.cpu().numpy(), axis=1)
# Using temperature scaling
outputs = outputs / temper
labels = Variable(torch.LongTensor(maxIndexTemp).cuda())
loss = criterion(outputs, labels)
loss.backward()
# Normalizing the gradient to binary in {0, 1}
gradient = torch.ge(inputs.grad.data, 0)
gradient = (gradient.float() - 0.5) * 2
# Adding small perturbations to images
tempInputs = torch.add(inputs.data, -noiseMagnitude1, gradient)
outputs = model(Variable(tempInputs))
outputs = outputs / temper
# Calculating the confidence after adding perturbations
nnOutputs = outputs.data.cpu()
nnOutputs = nnOutputs.numpy()
nnOutputs = nnOutputs - np.max(nnOutputs, axis=1, keepdims=True)
nnOutputs = np.exp(nnOutputs) / np.sum(np.exp(nnOutputs), axis=1, keepdims=True)
scores = np.max(nnOutputs, axis=1)
return scores
def get_mahalanobis_score(inputs, model, method_args):
num_classes = method_args['num_classes']
sample_mean = method_args['sample_mean']
precision = method_args['precision']
magnitude = method_args['magnitude']
regressor = method_args['regressor']
num_output = method_args['num_output']
Mahalanobis_scores = get_Mahalanobis_score(inputs, model, num_classes, sample_mean, precision, num_output, magnitude)
scores = -regressor.predict_proba(Mahalanobis_scores)[:, 1]
return scores
def get_score(inputs, model, method, method_args, raw_score=False):
if method == "msp":
scores = get_msp_score(inputs, model, method_args)
elif method == "odin":
scores = get_odin_score(inputs, model, method_args)
elif method == "mahalanobis":
scores = get_mahalanobis_score(inputs, model, method_args)
elif method == "sofl":
scores = get_sofl_score(inputs, model, method_args)
elif method == "rowl":
scores = get_rowl_score(inputs, model, method_args, raw_score)
elif method == "ntom":
scores = get_ood_score(inputs, model, method_args)
elif method == "atom":
scores = get_ood_score(inputs, model, method_args)
return scores
def corrupt_attack(x, model, method, method_args, in_distribution, severity_level = 5):
x = x.detach().clone()
scores = get_score(x, model, method, method_args, raw_score=True)
worst_score = scores.copy()
worst_x = x.clone()
xs = gen_corruction_image(x.cpu(), severity_level)
for curr_x in xs:
curr_x = curr_x.cuda()
scores = get_score(curr_x, model, method, method_args, raw_score=True)
if in_distribution:
cond = scores < worst_score
else:
cond = scores > worst_score
worst_score[cond] = scores[cond]
worst_x[cond] = curr_x[cond]
return worst_x
def eval_ood_detector(base_dir, in_dataset, out_datasets, batch_size, method, method_args, name, epochs, adv, corrupt, adv_corrupt, adv_args, mode_args):
if adv:
in_save_dir = os.path.join(base_dir, in_dataset, method, name, 'adv', str(int(adv_args['epsilon'])))
elif adv_corrupt:
in_save_dir = os.path.join(base_dir, in_dataset, method, name, 'adv_corrupt', str(int(adv_args['epsilon'])))
elif corrupt:
in_save_dir = os.path.join(base_dir, in_dataset, method, name, 'corrupt')
else:
in_save_dir = os.path.join(base_dir, in_dataset, method, name, 'nat')
if not os.path.exists(in_save_dir):
os.makedirs(in_save_dir)
transform = transforms.Compose([
transforms.ToTensor(),
])
if in_dataset == "CIFAR-10":
normalizer = transforms.Normalize((125.3/255, 123.0/255, 113.9/255), (63.0/255, 62.1/255.0, 66.7/255.0))
testset = torchvision.datasets.CIFAR10(root='./datasets/cifar10', train=False, download=True, transform=transform)
testloaderIn = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=True, num_workers=2)
num_classes = 10
num_reject_classes = 5
elif in_dataset == "CIFAR-100":
normalizer = transforms.Normalize((125.3/255, 123.0/255, 113.9/255), (63.0/255, 62.1/255.0, 66.7/255.0))
testset = torchvision.datasets.CIFAR100(root='./datasets/cifar100', train=False, download=True, transform=transform)
testloaderIn = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=True, num_workers=2)
num_classes = 100
num_reject_classes = 10
elif in_dataset == "SVHN":
normalizer = None
testset = svhn.SVHN('datasets/svhn/', split='test',
transform=transforms.ToTensor(), download=False)
testloaderIn = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=True, num_workers=2)
num_classes = 10
num_reject_classes = 5
if method != "sofl":
num_reject_classes = 0
if method == "rowl" or method == "atom" or method=="ntom":
num_reject_classes = 1
method_args['num_classes'] = num_classes
if args.model_arch == 'densenet':
model = dn.DenseNet3(args.layers, num_classes + num_reject_classes, normalizer=normalizer)
elif args.model_arch == 'wideresnet':
model = wn.WideResNet(args.depth, num_classes + num_reject_classes, widen_factor=args.width, normalizer=normalizer)
elif args.model_arch == 'densenet_ccu':
model = dn.DenseNet3(args.layers, num_classes + num_reject_classes, normalizer=normalizer)
gmm = torch.load("checkpoints/{in_dataset}/{name}/".format(in_dataset=args.in_dataset, name=args.name) + 'in_gmm.pth.tar')
gmm.alpha = nn.Parameter(gmm.alpha)
gmm_out = torch.load("checkpoints/{in_dataset}/{name}/".format(in_dataset=args.in_dataset, name=args.name) + 'out_gmm.pth.tar')
gmm_out.alpha = nn.Parameter(gmm.alpha)
whole_model = gmmlib.DoublyRobustModel(model, gmm, gmm_out, loglam = 0., dim=3072, classes=num_classes)
elif args.model_arch == 'wideresnet_ccu':
model = wn.WideResNet(args.depth, num_classes + num_reject_classes, widen_factor=args.width, normalizer=normalizer)
gmm = torch.load("checkpoints/{in_dataset}/{name}/".format(in_dataset=args.in_dataset, name=args.name) + 'in_gmm.pth.tar')
gmm.alpha = nn.Parameter(gmm.alpha)
gmm_out = torch.load("checkpoints/{in_dataset}/{name}/".format(in_dataset=args.in_dataset, name=args.name) + 'out_gmm.pth.tar')
gmm_out.alpha = nn.Parameter(gmm.alpha)
whole_model = gmmlib.DoublyRobustModel(model, gmm, gmm_out, loglam = 0., dim=3072, classes=num_classes)
else:
assert False, 'Not supported model arch: {}'.format(args.model_arch)
checkpoint = torch.load("./checkpoints/{in_dataset}/{name}/checkpoint_{epochs}.pth.tar".format(in_dataset=in_dataset, name=name, epochs=epochs))
if args.model_arch == 'densenet_ccu' or args.model_arch == 'wideresnet_ccu':
whole_model.load_state_dict(checkpoint['state_dict'])
else:
model.load_state_dict(checkpoint['state_dict'])
model.eval()
model.cuda()
if method == "mahalanobis":
temp_x = torch.rand(2,3,32,32)
temp_x = Variable(temp_x).cuda()
temp_list = model.feature_list(temp_x)[1]
num_output = len(temp_list)
method_args['num_output'] = num_output
if adv or adv_corrupt:
epsilon = adv_args['epsilon']
iters = adv_args['iters']
iter_size = adv_args['iter_size']
if method == "msp" or method == "odin":
attack_out = ConfidenceLinfPGDAttack(model, eps=epsilon, nb_iter=iters,
eps_iter=args.iter_size, rand_init=True, clip_min=0., clip_max=1., num_classes = num_classes)
elif method == "mahalanobis":
attack_out = MahalanobisLinfPGDAttack(model, eps=args.epsilon, nb_iter=args.iters,
eps_iter=iter_size, rand_init=True, clip_min=0., clip_max=1., num_classes = num_classes,
sample_mean = sample_mean, precision = precision,
num_output = num_output, regressor = regressor)
elif method == "sofl":
attack_out = SOFLLinfPGDAttack(model, eps=epsilon, nb_iter=iters,
eps_iter=iter_size, rand_init=True, clip_min=0., clip_max=1.,
num_classes = num_classes, num_reject_classes=num_reject_classes)
elif method == "rowl":
attack_out = OODScoreLinfPGDAttack(model, eps=epsilon, nb_iter=iters,
eps_iter=iter_size, rand_init=True, clip_min=0., clip_max=1.,
num_classes = num_classes)
elif method == "atom" or method=="ntom":
attack_out = OODScoreLinfPGDAttack(model, eps=epsilon, nb_iter=iters,
eps_iter=iter_size, rand_init=True, clip_min=0., clip_max=1.,
num_classes = num_classes)
if not mode_args['out_dist_only']:
t0 = time.time()
f1 = open(os.path.join(in_save_dir, "in_scores.txt"), 'w')
g1 = open(os.path.join(in_save_dir, "in_labels.txt"), 'w')
########################################In-distribution###########################################
print("Processing in-distribution images")
N = len(testloaderIn.dataset)
count = 0
for j, data in enumerate(testloaderIn):
images, labels = data
images = images.cuda()
labels = labels.cuda()
curr_batch_size = images.shape[0]
inputs = images
scores = get_score(inputs, model, method, method_args)
for score in scores:
f1.write("{}\n".format(score))
if method == "rowl":
outputs = F.softmax(model(inputs), dim=1)
outputs = outputs.detach().cpu().numpy()
preds = np.argmax(outputs, axis=1)
confs = np.max(outputs, axis=1)
else:
outputs = F.softmax(model(inputs)[:, :num_classes], dim=1)
outputs = outputs.detach().cpu().numpy()
preds = np.argmax(outputs, axis=1)
confs = np.max(outputs, axis=1)
for k in range(preds.shape[0]):
g1.write("{} {} {}\n".format(labels[k], preds[k], confs[k]))
count += curr_batch_size
print("{:4}/{:4} images processed, {:.1f} seconds used.".format(count, N, time.time()-t0))
t0 = time.time()
f1.close()
g1.close()
if mode_args['in_dist_only']:
return
for out_dataset in out_datasets:
out_save_dir = os.path.join(in_save_dir, out_dataset)
if not os.path.exists(out_save_dir):
os.makedirs(out_save_dir)
f2 = open(os.path.join(out_save_dir, "out_scores.txt"), 'w')
if not os.path.exists(out_save_dir):
os.makedirs(out_save_dir)
if out_dataset == 'SVHN':
testsetout = svhn.SVHN('datasets/ood_datasets/svhn/', split='test',
transform=transforms.ToTensor(), download=False)
testloaderOut = torch.utils.data.DataLoader(testsetout, batch_size=batch_size,
shuffle=True, num_workers=2)
elif out_dataset == 'dtd':
testsetout = torchvision.datasets.ImageFolder(root="datasets/ood_datasets/dtd/images",
transform=transforms.Compose([transforms.Resize(32), transforms.CenterCrop(32), transforms.ToTensor()]))
testloaderOut = torch.utils.data.DataLoader(testsetout, batch_size=batch_size, shuffle=True,
num_workers=2)
elif out_dataset == 'places365':
testsetout = torchvision.datasets.ImageFolder(root="datasets/ood_datasets/places365/test_subset",
transform=transforms.Compose([transforms.Resize(32), transforms.CenterCrop(32), transforms.ToTensor()]))
testloaderOut = torch.utils.data.DataLoader(testsetout, batch_size=batch_size, shuffle=True,
num_workers=2)
else:
testsetout = torchvision.datasets.ImageFolder("./datasets/ood_datasets/{}".format(out_dataset),
transform=transforms.Compose([transforms.Resize(32), transforms.CenterCrop(32), transforms.ToTensor()]))
testloaderOut = torch.utils.data.DataLoader(testsetout, batch_size=batch_size,
shuffle=True, num_workers=2)
###################################Out-of-Distributions#####################################
t0 = time.time()
print("Processing out-of-distribution images")
N = len(testloaderOut.dataset)
count = 0
for j, data in enumerate(testloaderOut):
images, labels = data
images = images.cuda()
labels = labels.cuda()
curr_batch_size = images.shape[0]
if adv:
inputs = attack_out.perturb(images)
elif corrupt:
inputs = corrupt_attack(images, model, method, method_args, False, adv_args['severity_level'])
elif adv_corrupt:
corrupted_images = corrupt_attack(images, model, method, method_args, False, adv_args['severity_level'])
inputs = attack_out.perturb(corrupted_images)
else:
inputs = images
scores = get_score(inputs, model, method, method_args)
for score in scores:
f2.write("{}\n".format(score))
count += curr_batch_size
print("{:4}/{:4} images processed, {:.1f} seconds used.".format(count, N, time.time()-t0))
t0 = time.time()
f2.close()
return
if __name__ == '__main__':
method_args = dict()
adv_args = dict()
mode_args = dict()
adv_args['epsilon'] = args.epsilon
adv_args['iters'] = args.iters
adv_args['iter_size'] = args.iter_size
adv_args['severity_level'] = args.severity_level
mode_args['in_dist_only'] = args.in_dist_only
mode_args['out_dist_only'] = args.out_dist_only
out_datasets = ['LSUN', 'LSUN_resize', 'iSUN', 'dtd', 'places365', 'SVHN']
if args.method == 'msp':
eval_ood_detector(args.base_dir, args.in_dataset, out_datasets, args.batch_size, args.method, method_args, args.name, args.epochs, args.adv, args.corrupt, args.adv_corrupt, adv_args, mode_args)
elif args.method == "odin":
method_args['temperature'] = 1000.0
if args.model_arch == 'densenet':
if args.in_dataset == "CIFAR-10":
method_args['magnitude'] = 0.0016
elif args.in_dataset == "CIFAR-100":
method_args['magnitude'] = 0.0012
elif args.in_dataset == "SVHN":
method_args['magnitude'] = 0.0006
elif args.model_arch == 'wideresnet':
if args.in_dataset == "CIFAR-10":
method_args['magnitude'] = 0.0006
elif args.in_dataset == "CIFAR-100":
method_args['magnitude'] = 0.0012
elif args.in_dataset == "SVHN":
method_args['magnitude'] = 0.0002
else:
assert False, 'Not supported model arch'
eval_ood_detector(args.base_dir, args.in_dataset, out_datasets, args.batch_size, args.method, method_args, args.name, args.epochs, args.adv, args.corrupt, args.adv_corrupt, adv_args, mode_args)
elif args.method == 'mahalanobis':
sample_mean, precision, lr_weights, lr_bias, magnitude = np.load(os.path.join('output/hyperparams/', args.in_dataset, args.name, 'results.npy'), allow_pickle=True)
regressor = LogisticRegressionCV(cv=2).fit([[0,0,0,0],[0,0,0,0],[1,1,1,1],[1,1,1,1]], [0,0,1,1])
regressor.coef_ = lr_weights
regressor.intercept_ = lr_bias
method_args['sample_mean'] = sample_mean
method_args['precision'] = precision
method_args['magnitude'] = magnitude
method_args['regressor'] = regressor
eval_ood_detector(args.base_dir, args.in_dataset, out_datasets, args.batch_size, args.method, method_args, args.name, args.epochs, args.adv, args.corrupt, args.adv_corrupt, adv_args, mode_args)
elif args.method == 'sofl':
eval_ood_detector(args.base_dir, args.in_dataset, out_datasets, args.batch_size, args.method, method_args, args.name, args.epochs, args.adv, args.corrupt, args.adv_corrupt, adv_args, mode_args)
elif args.method == 'rowl':
eval_ood_detector(args.base_dir, args.in_dataset, out_datasets, args.batch_size, args.method, method_args, args.name, args.epochs, args.adv, args.corrupt, args.adv_corrupt, adv_args, mode_args)
elif args.method == 'ntom':
eval_ood_detector(args.base_dir, args.in_dataset, out_datasets, args.batch_size, args.method, method_args, args.name, args.epochs, args.adv, args.corrupt, args.adv_corrupt, adv_args, mode_args)
elif args.method == 'atom':
eval_ood_detector(args.base_dir, args.in_dataset, out_datasets, args.batch_size, args.method, method_args, args.name, args.epochs, args.adv, args.corrupt, args.adv_corrupt, adv_args, mode_args)
```
#### File: jfc43/informative-outlier-mining/train_ccu.py
```python
import argparse
import os
import sys
import shutil
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision
import numpy as np
import models.densenet as dn
import models.wideresnet as wn
import models.gmm as gmmlib
from utils import TinyImages
import utils.svhn_loader as svhn
from sklearn import mixture
# used for logging to TensorBoard
from tensorboard_logger import configure, log_value
parser = argparse.ArgumentParser(description='PyTorch DenseNet Training')
parser.add_argument('--gpu', default='0', type=str, help='which gpu to use')
parser.add_argument('--in-dataset', default="CIFAR-10", type=str, help='in-distribution dataset')
parser.add_argument('--model-arch', default='densenet', type=str, help='model architecture')
parser.add_argument('--epochs', default=100, type=int,
help='number of total epochs to run')
parser.add_argument('--save-epoch', default=10, type=int,
help='save the model every save_epoch')
parser.add_argument('--start-epoch', default=0, type=int,
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=64, type=int,
help='mini-batch size (default: 64)')
parser.add_argument('--ood-batch-size', default=50, type=int,
help='mini-batch size (default: 50)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--weight-decay', '--wd', default=0.0001, type=float,
help='weight decay (default: 0.0001)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
help='print frequency (default: 10)')
parser.add_argument('--layers', default=100, type=int,
help='total number of layers (default: 100)')
parser.add_argument('--depth', default=40, type=int,
help='depth of resnet')
parser.add_argument('--width', default=4, type=int,
help='width of resnet')
parser.add_argument('--growth', default=12, type=int,
help='number of new channels per layer (default: 12)')
parser.add_argument('--droprate', default=0.0, type=float,
help='dropout probability (default: 0.0)')
parser.add_argument('--no-augment', dest='augment', action='store_false',
help='whether to use standard augmentation (default: True)')
parser.add_argument('--reduce', default=0.5, type=float,
help='compression rate in transition stage (default: 0.5)')
parser.add_argument('--no-bottleneck', dest='bottleneck', action='store_false',
help='To not use bottleneck block')
parser.add_argument('--resume', default='', type=str,
help='path to latest checkpoint (default: none)')
parser.add_argument('--name', required=True, type=str,
help='name of experiment')
parser.add_argument('--tensorboard',
help='Log progress to TensorBoard', action='store_true')
parser.set_defaults(bottleneck=True)
parser.set_defaults(augment=True)
args = parser.parse_args()
state = {k: v for k, v in args._get_kwargs()}
print(state)
directory = "checkpoints/{in_dataset}/{name}/".format(in_dataset=args.in_dataset, name=args.name)
if not os.path.exists(directory):
os.makedirs(directory)
save_state_file = os.path.join(directory, 'args.txt')
fw = open(save_state_file, 'w')
print(state, file=fw)
fw.close()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
torch.manual_seed(1)
np.random.seed(1)
def main():
if args.tensorboard: configure("runs/%s"%(args.name))
if args.augment:
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
else:
transform_train = transforms.Compose([
transforms.ToTensor(),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
])
kwargs = {'num_workers': 1, 'pin_memory': True}
if args.in_dataset == "CIFAR-10":
# Data loading code
normalizer = transforms.Normalize(mean=[x/255.0 for x in [125.3, 123.0, 113.9]],
std=[x/255.0 for x in [63.0, 62.1, 66.7]])
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./datasets/cifar10', train=True, download=True,
transform=transform_train),
batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./datasets/cifar10', train=False, transform=transform_test),
batch_size=args.batch_size, shuffle=True, **kwargs)
lr_schedule=[50, 75, 90]
num_classes = 10
elif args.in_dataset == "CIFAR-100":
# Data loading code
normalizer = transforms.Normalize(mean=[x/255.0 for x in [125.3, 123.0, 113.9]],
std=[x/255.0 for x in [63.0, 62.1, 66.7]])
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100('./datasets/cifar100', train=True, download=True,
transform=transform_train),
batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR100('./datasets/cifar100', train=False, transform=transform_test),
batch_size=args.batch_size, shuffle=True, **kwargs)
lr_schedule=[50, 75, 90]
num_classes = 100
elif args.in_dataset == "SVHN":
# Data loading code
normalizer = None
train_loader = torch.utils.data.DataLoader(
svhn.SVHN('datasets/svhn/', split='train',
transform=transforms.ToTensor(), download=False),
batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = torch.utils.data.DataLoader(
svhn.SVHN('datasets/svhn/', split='test',
transform=transforms.ToTensor(), download=False),
batch_size=args.batch_size, shuffle=False, **kwargs)
args.epochs = 20
args.save_epoch = 2
lr_schedule=[10, 15, 18]
num_classes = 10
out_loader = torch.utils.data.DataLoader(
TinyImages(transform=transforms.Compose(
[transforms.ToTensor(), transforms.ToPILImage(), transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(), transforms.ToTensor()])),
batch_size=args.ood_batch_size, shuffle=False, **kwargs)
# create model
if args.model_arch == 'densenet':
base_model = dn.DenseNet3(args.layers, num_classes, args.growth, reduction=args.reduce,
bottleneck=args.bottleneck, dropRate=args.droprate, normalizer=normalizer)
elif args.model_arch == 'wideresnet':
base_model = wn.WideResNet(args.depth, num_classes, widen_factor=args.width, dropRate=args.droprate, normalizer=normalizer)
else:
assert False, 'Not supported model arch: {}'.format(args.model_arch)
gen_gmm(train_loader, out_loader, data_used=50000, PCA=True, N=[100])
gmm = torch.load("checkpoints/{in_dataset}/{name}/".format(in_dataset=args.in_dataset, name=args.name) + 'in_gmm.pth.tar')
gmm.alpha = nn.Parameter(gmm.alpha)
gmm.mu.requires_grad = True
gmm.logvar.requires_grad = True
gmm.alpha.requires_grad = False
gmm_out = torch.load("checkpoints/{in_dataset}/{name}/".format(in_dataset=args.in_dataset, name=args.name) + 'out_gmm.pth.tar')
gmm_out.alpha = nn.Parameter(gmm.alpha)
gmm_out.mu.requires_grad = True
gmm_out.logvar.requires_grad = True
gmm_out.alpha.requires_grad = False
loglam = 0.
model = gmmlib.DoublyRobustModel(base_model, gmm, gmm_out,
loglam, dim=3072,
classes=num_classes).cuda()
model.loglam.requires_grad = False
# get the number of model parameters
print('Number of model parameters: {}'.format(
sum([p.data.nelement() for p in model.parameters()])))
model = model.cuda()
criterion = nn.CrossEntropyLoss().cuda()
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# define loss function (criterion) and pptimizer
lr = args.lr
lr_gmm = 1e-5
param_groups = [{'params':model.mm.parameters(),'lr':lr_gmm, 'weight_decay':0.},
{'params':model.mm_out.parameters(),'lr':lr_gmm, 'weight_decay':0.},
{'params':model.base_model.parameters(),'lr':lr, 'weight_decay':args.weight_decay}]
optimizer = torch.optim.SGD(param_groups, momentum=args.momentum, nesterov=True)
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch, lr_schedule)
# train for one epoch
lam = model.loglam.data.exp().item()
train_CEDA_gmm_out(model, train_loader, out_loader, optimizer, epoch, lam=lam)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion, epoch)
# remember best prec@1 and save checkpoint
if (epoch + 1) % args.save_epoch == 0:
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
}, epoch + 1)
def gen_gmm(train_loader, out_loader, data_used=50000, PCA=True, N=[100]):
print('Generate GMM...')
start = time.time()
dim = 3072
X = []
for x, f in train_loader:
X.append(x.view(-1,dim))
X = torch.cat(X, 0)
X = X[:data_used] #needed to keep memory of distance matrix below 800 GB
if PCA:
metric = gmmlib.PCAMetric(X, p=2, min_sv_factor=1e6)
X = ( ([email protected]_vecs.t()) / metric.singular_values_sqrt[None,:] )
else:
metric = gmmlib.LpMetric()
for n in N:
print(n)
gmm = gmmlib.GMM(n, dim, metric=metric)
clf = mixture.GMM(n_components=n, covariance_type='spherical', params='mc')
clf.fit(X)
mu = torch.tensor(clf.means_ ,dtype=torch.float)
logvar = torch.tensor(np.log(clf.covars_[:,0]) ,dtype=torch.float)
logvar = 0.*logvar + logvar.exp().mean().log()
alpha = torch.tensor(np.log(clf.weights_) ,dtype=torch.float)
gmm = gmmlib.GMM(n, dim, mu=mu, logvar=logvar, metric=metric)
if PCA:
gmm.mu.data = ( (gmm.mu.data * metric.singular_values_sqrt[None,:] )
@ metric.comp_vecs.t().inverse() )
directory = "checkpoints/{in_dataset}/{name}/".format(in_dataset=args.in_dataset, name=args.name)
if not os.path.exists(directory):
os.makedirs(directory)
filename = directory + 'in_gmm.pth.tar'
torch.save(gmm, filename)
X = []
for idx, (x, f) in enumerate(out_loader):
if idx>400:
break;
X.append(x.view(-1,dim))
X = torch.cat(X, 0)
if PCA:
X = ( ([email protected]_vecs.t()) / metric.singular_values_sqrt[None,:] )
for n in N:
print(n)
# Out GMM
gmm = gmmlib.GMM(n, dim, metric=metric)
clf = mixture.GMM(n_components=n, covariance_type='spherical', params='mc')
clf.fit(X)
mu = torch.tensor(clf.means_ ,dtype=torch.float)
logvar = torch.tensor(np.log(clf.covars_[:,0]) ,dtype=torch.float)
logvar = 0.*logvar + logvar.exp().mean().log()
alpha = torch.tensor(np.log(clf.weights_) ,dtype=torch.float)
gmm = gmmlib.GMM(n, dim, mu=mu, logvar=logvar, metric=metric)
if PCA:
gmm.mu.data = ( (gmm.mu.data * metric.singular_values_sqrt[None,:] )
@ metric.comp_vecs.t().inverse() )
directory = "checkpoints/{in_dataset}/{name}/".format(in_dataset=args.in_dataset, name=args.name)
if not os.path.exists(directory):
os.makedirs(directory)
filename = directory + 'out_gmm.pth.tar'
torch.save(gmm, filename)
print('Time: ', time.time() - start)
print('Done!')
def train_CEDA_gmm_out(model, train_loader, ood_loader, optimizer, epoch, lam=1., verbose=10):
criterion = nn.NLLLoss()
model.train()
train_loss = 0
likelihood_loss = 0
correct = 0
margin = np.log(4.)
if ood_loader is not None:
ood_loader.dataset.offset = np.random.randint(len(ood_loader.dataset))
ood_loader_iter = iter(ood_loader)
p_in = torch.tensor(1. / (1. + lam), dtype=torch.float).cuda()
p_out = torch.tensor(lam, dtype=torch.float).cuda() * p_in
log_p_in = p_in.log()
log_p_out = p_out.log()
start = time.time()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.cuda(), target.cuda()
noise = next(ood_loader_iter)[0].cuda()
optimizer.zero_grad()
full_data = torch.cat([data, noise], 0)
full_out = model(full_data)
full_out = F.log_softmax(full_out, dim=1)
output = full_out[:data.shape[0]]
output_adv = full_out[data.shape[0]:]
like_in_in = torch.logsumexp(model.mm(data.view(data.shape[0], -1)), 0 )
like_out_in = torch.logsumexp(model.mm(noise.view(noise.shape[0], -1)), 0 )
like_in_out = torch.logsumexp(model.mm_out(data.view(data.shape[0], -1)), 0 )
like_out_out = torch.logsumexp(model.mm_out(noise.view(noise.shape[0], -1)), 0 )
loss1 = criterion(output, target)
loss2 = -output_adv.mean()
loss3 = - torch.logsumexp(torch.stack([log_p_in + like_in_in,
log_p_out + like_in_out], 0), 0).mean()
loss4 = - torch.logsumexp(torch.stack([log_p_in + like_out_in,
log_p_out + like_out_out], 0), 0).mean()
loss = p_in*(loss1 + loss3) + p_out*(loss2 + loss4)
loss.backward()
optimizer.step()
likelihood_loss += loss3.item()
train_loss += loss.item()
_, predicted = output.max(1)
correct += predicted.eq(target).sum().item()
threshold = model.mm.logvar.max() + margin
idx = model.mm_out.logvar<threshold
model.mm_out.logvar.data[idx] = threshold
if (batch_idx % verbose == 0) and verbose>0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
print('Time: ', time.time() - start)
def validate(val_loader, model, criterion, epoch):
"""Perform validation on the validation set"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
input = input.cuda()
target = target.cuda()
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1 = accuracy(output.data, target, topk=(1,))[0]
losses.update(loss.data, input.size(0))
top1.update(prec1, input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1))
print(' * Prec@1 {top1.avg:.3f}'.format(top1=top1))
# log to TensorBoard
if args.tensorboard:
log_value('val_loss', losses.avg, epoch)
log_value('val_acc', top1.avg, epoch)
return top1.avg
def save_checkpoint(state, epoch):
"""Saves checkpoint to disk"""
directory = "checkpoints/{in_dataset}/{name}/".format(in_dataset=args.in_dataset, name=args.name)
if not os.path.exists(directory):
os.makedirs(directory)
filename = directory + 'checkpoint_{}.pth.tar'.format(epoch)
torch.save(state, filename)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, lr_schedule=[50, 75, 90]):
"""Sets the learning rate to the initial LR decayed by 10 after 40 and 80 epochs"""
if epoch in lr_schedule:
for group in optimizer.param_groups:
group['lr'] *= .1
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
```
#### File: informative-outlier-mining/utils/confidence_pgd_attack.py
```python
from __future__ import print_function
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import numpy as np
import time
import os
from scipy import misc
class OELoss(nn.Module):
def __init__(self):
super(OELoss, self).__init__()
def forward(self, x, y = None):
return -(x.mean(1) - torch.logsumexp(x, dim=1))
class ConfidenceLinfPGDAttack:
def __init__(
self, model, eps=4.0, nb_iter=40,
eps_iter=1.0, rand_init=True, clip_min=0., clip_max=1.,
num_classes = 10, n_restarts=1):
self.eps = eps
self.nb_iter = nb_iter
self.eps_iter = eps_iter
self.rand_init = rand_init
self.model = model
self.detector_loss_func = OELoss()
self.clip_min = clip_min
self.clip_max = clip_max
self.num_classes = num_classes
self.n_restarts = n_restarts
def get_loss(self, x):
outputs = self.model(x)
loss = self.detector_loss_func(outputs)
return loss
def attack_single_run(self, x):
x = x.detach().clone()
delta = torch.zeros_like(x)
delta = nn.Parameter(delta)
delta.requires_grad_()
with torch.no_grad():
loss = self.get_loss(x)
worst_loss = loss.data.clone()
worst_perb = delta.data.clone()
if self.rand_init:
delta.data.uniform_(-1, 1)
delta.data *= self.eps
delta.data = torch.round(delta.data)
delta.data = (torch.clamp(x.data + delta.data / 255.0, min=self.clip_min, max=self.clip_max) - x.data) * 255.0
for ii in range(self.nb_iter):
adv_x = x + delta / 255.0
loss = self.get_loss(adv_x)
cond = loss.data > worst_loss
worst_loss[cond] = loss.data[cond]
worst_perb[cond] = delta.data[cond]
loss.mean().backward()
grad_sign = delta.grad.data.sign()
delta.data = delta.data + grad_sign * self.eps_iter
delta.data = torch.clamp(delta.data, min=-self.eps, max=self.eps)
delta.data = (torch.clamp(x.data + delta.data / 255.0, min=self.clip_min, max=self.clip_max) - x.data) * 255.0
delta.grad.data.zero_()
with torch.no_grad():
adv_x = x + delta / 255.0
loss = self.get_loss(adv_x)
cond = loss.data > worst_loss
worst_loss[cond] = loss.data[cond]
worst_perb[cond] = delta.data[cond]
return worst_perb, worst_loss
def perturb(self, x):
"""
Given examples x, returns their adversarial counterparts with
an attack length of eps.
:param x: input tensor.
:return: tensor containing perturbed inputs.
"""
self.model.eval()
with torch.no_grad():
loss = self.get_loss(x)
worst_loss = loss.data.clone()
worst_perb = torch.zeros_like(x)
for k in range(self.n_restarts):
delta, loss = self.attack_single_run(x)
cond = loss.data > worst_loss
worst_loss[cond] = loss.data[cond]
worst_perb[cond] = delta.data[cond]
adv_x = torch.clamp(x + torch.clamp(torch.round(worst_perb), min=-self.eps, max=self.eps) / 255.0, min=self.clip_min, max=self.clip_max)
return adv_x
```
|
{
"source": "jfc43/robust-attribution-regularization",
"score": 2
}
|
#### File: robust-attribution-regularization/Flower/utils.py
```python
import numpy as np
import tensorflow as tf
import random
from skimage import feature, transform
import _pickle as pkl
import matplotlib.pyplot as plt
from pylab import rcParams
import scipy
import scipy.stats as stats
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
import flower_input
config_gpu = tf.ConfigProto()
config_gpu.gpu_options.allow_growth = True
EPSILON = 1e-12
MIN_INPUT = np.zeros([1, 128, 128, 3]).astype(np.float32)
MAX_INPUT = 255 * np.ones([1,128, 128,3]).astype(np.float32)
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
def plot(data, xi=None, cmap=None, axis=plt, percentile=100, dilation=3.0, alpha=0.8):
dx, dy = 0.05, 0.05
xx = np.arange(0.0, data.shape[1], dx)
yy = np.arange(0.0, data.shape[0], dy)
xmin, xmax, ymin, ymax = np.amin(xx), np.amax(xx), np.amin(yy), np.amax(yy)
extent = xmin, xmax, ymin, ymax
cmap_xi = plt.get_cmap('Greys_r')
cmap_xi.set_bad(alpha=0)
overlay = None
if xi is not None:
# Compute edges (to overlay to heatmaps later)
xi_greyscale = xi if len(xi.shape) == 2 else np.mean(xi, axis=-1)
in_image_upscaled = transform.rescale(xi_greyscale, dilation, mode='constant')
edges = feature.canny(in_image_upscaled).astype(float)
edges[edges < 0.5] = np.nan
edges[:5, :] = np.nan
edges[-5:, :] = np.nan
edges[:, :5] = np.nan
edges[:, -5:] = np.nan
overlay = edges
abs_max = np.percentile(np.abs(data), percentile)
abs_min = abs_max
if xi is None:
axis.imshow(data, extent=extent, interpolation='none', cmap=cmap)
else:
axis.imshow(data, extent=extent, interpolation='none', cmap=cmap, vmin=-abs_min, vmax=abs_max)
if overlay is not None:
axis.imshow(overlay, extent=extent, interpolation='none', cmap=cmap_xi, alpha=alpha)
axis.axis('off')
axis.xticks([])
axis.yticks([])
return axis
def dataReader():
flower = flower_input.FlowerData('data')
X = flower.eval_data.xs
y = flower.eval_data.ys
return X, y.astype(int)
def get_session(number=None):
config_gpu = tf.ConfigProto()
config_gpu.gpu_options.allow_growth = True
return tf.Session(config=config_gpu)
def integrated_gradients(
sess,
baseline,
inp,
target_label_index,
model,
gradient_func='output_input_gradient',
steps=50):
"""Computes integrated gradients for a given network and prediction label.
Integrated gradients is a technique for attributing a deep network's
prediction to its input features. It was introduced by:
https://arxiv.org/abs/1703.01365
In addition to the integrated gradients tensor, the method also
returns some additional debugging information for sanity checking
the computation. See sanity_check_integrated_gradients for how this
information is used.
This method only applies to classification networks, i.e., networks
that predict a probability distribution across two or more class labels.
Access to the specific network is provided to the method via a
'predictions_and_gradients' function provided as argument to this method.
The function takes a batch of inputs and a label, and returns the
predicted probabilities of the label for the provided inputs, along with
gradients of the prediction with respect to the input. Such a function
should be easy to create in most deep learning frameworks.
Args:
inp: The specific input for which integrated gradients must be computed.
target_label_index: Index of the target class for which integrated gradients
must be computed.
predictions_and_gradients: This is a function that provides access to the
network's predictions and gradients. It takes the following
arguments:
- inputs: A batch of tensors of the same same shape as 'inp'. The first
dimension is the batch dimension, and rest of the dimensions coincide
with that of 'inp'.
- target_label_index: The index of the target class for which gradients
must be obtained.
and returns:
- predictions: Predicted probability distribution across all classes
for each input. It has shape <batch, num_classes> where 'batch' is the
number of inputs and num_classes is the number of classes for the model.
- gradients: Gradients of the prediction for the target class (denoted by
target_label_index) with respect to the inputs. It has the same shape
as 'inputs'.
baseline: [optional] The baseline input used in the integrated
gradients computation. If None (default), the all zero tensor with
the same shape as the input (i.e., 0*input) is used as the baseline.
The provided baseline and input must have the same shape.
steps: [optional] Number of intepolation steps between the baseline
and the input used in the integrated gradients computation. These
steps along determine the integral approximation error. By default,
steps is set to 50.
Returns:
integrated_gradients: The integrated_gradients of the prediction for the
provided prediction label to the input. It has the same shape as that of
the input.
The following output is meant to provide debug information for sanity
checking the integrated gradients computation.
See also: sanity_check_integrated_gradients
prediction_trend: The predicted probability distribution across all classes
for the various (scaled) inputs considered in computing integrated gradients.
It has shape <steps, num_classes> where 'steps' is the number of integrated
gradient steps and 'num_classes' is the number of target classes for the
model.
"""
if baseline is None:
baseline = 0*inp
assert(baseline.shape == inp.shape)
# Scale input and compute gradients.
scaled_inputs = [baseline + (float(i + 1)/steps)*(inp-baseline) for i in range(0, steps)]
scaled_labels = [target_label_index for i in range(0, steps)]
if gradient_func == 'loss_input_gradient':
grads = sess.run(model.loss_input_gradient, feed_dict = {model.input: scaled_inputs, model.label: scaled_labels}) # shapes: <steps+1, inp.shape>
else:
grads = sess.run(model.output_input_gradient, feed_dict = {model.input: scaled_inputs, model.label_ph: target_label_index})
avg_grads = np.average(grads[:-1], axis=0)
integrated_gradients = (inp-baseline)*avg_grads # shape: <inp.shape>
return integrated_gradients
```
#### File: robust-attribution-regularization/GTSRB/gtsrb_input.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pickle
import sys
import tensorflow as tf
import csv
import matplotlib.pyplot as plt
from PIL import Image
version = sys.version_info
import numpy as np
import skimage.data
import scipy.io as sio
import cv2
def image_brightness_normalisation(image):
image[:,:,0] = cv2.equalizeHist(image[:,:,0])
image[:,:,1] = cv2.equalizeHist(image[:,:,1])
image[:,:,2] = cv2.equalizeHist(image[:,:,2])
return image
def preprocess_data(X):
for i in range(len(X)):
X[i,:,:,:] = image_brightness_normalisation(X[i,:,:,:])
return X
class GTSRBData(object):
def __init__(self, path):
loaded = np.load(os.path.join(path, 'train.npz'))
train_images = loaded['images']
train_images = preprocess_data(train_images)
train_labels = loaded['labels']
loaded = np.load(os.path.join(path,'test.npz'))
eval_images = loaded['images']
eval_images = preprocess_data(eval_images)
eval_labels = loaded['labels']
self.train_data = DataSubset(train_images, train_labels)
self.eval_data = DataSubset(eval_images, eval_labels)
class DataSubset(object):
def __init__(self, xs, ys):
self.xs = xs
self.n = xs.shape[0]
self.ys = ys
self.batch_start = 0
self.cur_order = np.random.permutation(self.n)
def get_next_batch(self, batch_size, multiple_passes=False, reshuffle_after_pass=True):
if self.n < batch_size:
raise ValueError('Batch size can be at most the dataset size')
if not multiple_passes:
actual_batch_size = min(batch_size, self.n - self.batch_start)
if actual_batch_size <= 0:
raise ValueError('Pass through the dataset is complete.')
batch_end = self.batch_start + actual_batch_size
batch_xs = self.xs[self.cur_order[self.batch_start : batch_end], ...]
batch_ys = self.ys[self.cur_order[self.batch_start : batch_end], ...]
self.batch_start += actual_batch_size
return batch_xs, batch_ys
actual_batch_size = min(batch_size, self.n - self.batch_start)
if actual_batch_size < batch_size:
if reshuffle_after_pass:
self.cur_order = np.random.permutation(self.n)
self.batch_start = 0
batch_end = self.batch_start + batch_size
batch_xs = self.xs[self.cur_order[self.batch_start : batch_end], ...]
batch_ys = self.ys[self.cur_order[self.batch_start : batch_end], ...]
self.batch_start += actual_batch_size
return batch_xs, batch_ys
```
|
{
"source": "jfc43/robust-ood-detection",
"score": 2
}
|
#### File: robust-ood-detection/CIFAR/eval.py
```python
import argparse
import os
import sys
sys.path.append("..")
import shutil
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import models.densenet as dn
from utils import TinyImages, softmax, LinfPGDAttack
parser = argparse.ArgumentParser(description='PyTorch DenseNet Training')
parser.add_argument('--gpu', default='0', type=str, help='which gpu to use')
parser.add_argument('--adv', action='store_true', help='adversarial robustness')
parser.add_argument('--epsilon', default=1.0, type=float, help='epsilon')
parser.add_argument('--iters', default=10, type=int,
help='attack iterations')
parser.add_argument('--iter-size', default=1.0, type=float, help='attack step size')
parser.add_argument('--epochs', default=100, type=int,
help='number of total epochs to run')
parser.add_argument('--dataset', default="CIFAR-10", type=str, help='in-distribution dataset')
parser.add_argument('--classes', default=10, type=int,
help='number of classes')
parser.add_argument('-b', '--batch-size', default=25, type=int,
help='mini-batch size')
parser.add_argument('--layers', default=100, type=int,
help='total number of layers (default: 100)')
parser.add_argument('--name', required=True, type=str,
help='name of model')
parser.set_defaults(bottleneck=True)
parser.set_defaults(augment=True)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
# Data loading code
normalizer = transforms.Normalize(mean=[x/255.0 for x in [125.3, 123.0, 113.9]],
std=[x/255.0 for x in [63.0, 62.1, 66.7]])
transform_test = transforms.Compose([
transforms.ToTensor()
])
if args.dataset == "CIFAR-10":
testset = datasets.CIFAR10(root='./datasets/cifar10', train=False, download=True, transform=transform_test)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size,
shuffle=False, num_workers=2)
num_classes = 10
elif args.dataset == "CIFAR-100":
testset = datasets.CIFAR100(root='./datasets/cifar100', train=False, download=True, transform=transform_test)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size,
shuffle=False, num_workers=2)
num_classes = 100
# create model
model = dn.DenseNet3(args.layers, num_classes, normalizer=normalizer)
model.eval()
model = model.cuda()
checkpoint = torch.load("./checkpoints/{name}/checkpoint_{epochs}.pth.tar".format(name=args.name, epochs=args.epochs))
model.load_state_dict(checkpoint['state_dict'])
attack = LinfPGDAttack(model = model, eps=args.epsilon, nb_iter=args.iters, eps_iter=args.iter_size, rand_init=True)
nat_top1 = AverageMeter()
adv_top1 = AverageMeter()
for batch_index, (input, target) in enumerate(test_loader):
print(batch_index * args.batch_size, '/', 10000)
target = target.cuda()
nat_input = input.detach().clone()
nat_output = model(nat_input)
nat_prec1 = accuracy(nat_output.data, target, topk=(1,))[0]
nat_top1.update(nat_prec1, input.size(0))
if args.adv:
adv_input = attack.perturb(input, target)
adv_output = model(adv_input)
adv_prec1 = accuracy(adv_output.data, target, topk=(1,))[0]
adv_top1.update(adv_prec1, input.size(0))
print('Accuracy: %.4f'%nat_top1.avg)
print('Robustness: %.4f'%adv_top1.avg)
```
#### File: robust-ood-detection/utils/tinyimages_80mn_loader.py
```python
import numpy as np
import torch
from bisect import bisect_left
class TinyImages(torch.utils.data.Dataset):
def __init__(self, transform=None, exclude_cifar=True):
data_file = open('datasets/unlabeled_datasets/80M_Tiny_Images/tiny_images.bin', "rb")
def load_image(idx):
data_file.seek(idx * 3072)
data = data_file.read(3072)
return np.fromstring(data, dtype='uint8').reshape(32, 32, 3, order="F")
self.load_image = load_image
self.offset = 0 # offset index
self.transform = transform
self.exclude_cifar = exclude_cifar
if exclude_cifar:
self.cifar_idxs = []
with open('datasets/unlabeled_datasets/80M_Tiny_Images/80mn_cifar_idxs.txt', 'r') as idxs:
for idx in idxs:
# indices in file take the 80mn database to start at 1, hence "- 1"
self.cifar_idxs.append(int(idx) - 1)
# hash table option
self.cifar_idxs = set(self.cifar_idxs)
self.in_cifar = lambda x: x in self.cifar_idxs
# bisection search option
# self.cifar_idxs = tuple(sorted(self.cifar_idxs))
#
# def binary_search(x, hi=len(self.cifar_idxs)):
# pos = bisect_left(self.cifar_idxs, x, 0, hi) # find insertion position
# return True if pos != hi and self.cifar_idxs[pos] == x else False
#
# self.in_cifar = binary_search
def __getitem__(self, index):
index = (index + self.offset) % 79302016
if self.exclude_cifar:
while self.in_cifar(index):
index = np.random.randint(79302017)
img = self.load_image(index)
if self.transform is not None:
img = self.transform(img)
return img, 0 # 0 is the class
def __len__(self):
return 79302017
```
|
{
"source": "jfcaballero/Tutorial-sobre-scikit-learn-abreviado",
"score": 3
}
|
#### File: talleres_inov_docente/figures/plot_2d_separator.py
```python
import numpy as np
import matplotlib.pyplot as plt
def plot_2d_separator(classifier, X, fill=False, ax=None, eps=None):
if eps is None:
eps = X.std() / 2.
x_min, x_max = X[:, 0].min() - eps, X[:, 0].max() + eps
y_min, y_max = X[:, 1].min() - eps, X[:, 1].max() + eps
xx = np.linspace(x_min, x_max, 100)
yy = np.linspace(y_min, y_max, 100)
X1, X2 = np.meshgrid(xx, yy)
X_grid = np.c_[X1.ravel(), X2.ravel()]
try:
decision_values = classifier.decision_function(X_grid)
levels = [0]
fill_levels = [decision_values.min(), 0, decision_values.max()]
except AttributeError:
# no decision_function
decision_values = classifier.predict_proba(X_grid)[:, 1]
levels = [.5]
fill_levels = [0, .5, 1]
if ax is None:
ax = plt.gca()
if fill:
ax.contourf(X1, X2, decision_values.reshape(X1.shape),
levels=fill_levels, colors=['blue', 'red'])
else:
ax.contour(X1, X2, decision_values.reshape(X1.shape), levels=levels,
colors="black")
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
ax.set_xticks(())
ax.set_yticks(())
if __name__ == '__main__':
from sklearn.datasets import make_blobs
from sklearn.linear_model import LogisticRegression
X, y = make_blobs(centers=2, random_state=42)
clf = LogisticRegression().fit(X, y)
plot_2d_separator(clf, X, fill=True)
plt.scatter(X[:, 0], X[:, 1], c=y)
plt.show()
```
#### File: talleres_inov_docente/figures/plot_scaling.py
```python
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.preprocessing import StandardScaler, MinMaxScaler, Normalizer, RobustScaler
from sklearn.model_selection import train_test_split
from .plot_helpers import cm2
def plot_scaling():
X, y = make_blobs(n_samples=50, centers=2, random_state=4, cluster_std=1)
X += 3
plt.figure(figsize=(15, 8))
main_ax = plt.subplot2grid((2, 4), (0, 0), rowspan=2, colspan=2)
main_ax.scatter(X[:, 0], X[:, 1], c=y, cmap=cm2, s=60)
maxx = np.abs(X[:, 0]).max()
maxy = np.abs(X[:, 1]).max()
main_ax.set_xlim(-maxx + 1, maxx + 1)
main_ax.set_ylim(-maxy + 1, maxy + 1)
main_ax.set_title("Datos originales")
other_axes = [plt.subplot2grid((2, 4), (i, j)) for j in range(2, 4) for i in range(2)]
for ax, scaler in zip(other_axes, [StandardScaler(), RobustScaler(),
MinMaxScaler(), Normalizer(norm='l2')]):
X_ = scaler.fit_transform(X)
ax.scatter(X_[:, 0], X_[:, 1], c=y, cmap=cm2, s=60)
ax.set_xlim(-2, 2)
ax.set_ylim(-2, 2)
ax.set_title(type(scaler).__name__)
other_axes.append(main_ax)
for ax in other_axes:
ax.spines['left'].set_position('center')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position('center')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
def plot_relative_scaling():
# make synthetic data
X, _ = make_blobs(n_samples=50, centers=5, random_state=4, cluster_std=2)
# split it into training and test set
X_train, X_test = train_test_split(X, random_state=5, test_size=.1)
# plot the training and test set
fig, axes = plt.subplots(1, 3, figsize=(13, 4))
axes[0].scatter(X_train[:, 0], X_train[:, 1],
c='b', label="conjunto de entrenamiento", s=60)
axes[0].scatter(X_test[:, 0], X_test[:, 1], marker='^',
c='r', label="conjunto de test", s=60)
axes[0].legend(loc='upper left')
axes[0].set_title("datos originales")
# scale the data using MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
# visualize the properly scaled data
axes[1].scatter(X_train_scaled[:, 0], X_train_scaled[:, 1],
c='b', label="conjunto de entrenamiento", s=60)
axes[1].scatter(X_test_scaled[:, 0], X_test_scaled[:, 1], marker='^',
c='r', label="conjunto de test", s=60)
axes[1].set_title("datos escalados")
# rescale the test set separately, so that test set min is 0 and test set max is 1
# DO NOT DO THIS! For illustration purposes only
test_scaler = MinMaxScaler()
test_scaler.fit(X_test)
X_test_scaled_badly = test_scaler.transform(X_test)
# visualize wrongly scaled data
axes[2].scatter(X_train_scaled[:, 0], X_train_scaled[:, 1],
c='b', label="conjunto de entrenamiento", s=60)
axes[2].scatter(X_test_scaled_badly[:, 0], X_test_scaled_badly[:, 1], marker='^',
c='r', label="conjunto de test", s=60)
axes[2].set_title("datos incorrectamente escalados")
```
|
{
"source": "Jfcabral/SE2015",
"score": 2
}
|
#### File: Jfcabral/SE2015/cluster_handler.py
```python
from time import sleep
from sys import stderr, exc_info
from upload_to_s3 import select_s3_bucket
from boto.emr.connection import EmrResponseError
from boto.emr.instance_group import InstanceGroup
from amazon_utilities import ACTIVE_CLUSTER_STATES, ALL_CLUSTER_STATES, RESIZABLE_CLUSTER_STATES, connect_emr
class Cluster:
# Maximum allowed Task instances for a Cluster
MAX_TASK_INSTANCES = 5
# Maximum allowed Core instances for a Cluster
MAX_CORE_INSTANCES = 3
# Hadoop version 1.x (required to allow the use of m1.small instances)
EMR_HADOOP_VERSION = '1.0.3'
# Amazon AMI last version to support Hadoop 1
EMR_AMI_VERSION = '2.4'
DEFAULT_INSTANCE_GROUPS = [
InstanceGroup(1, 'MASTER', 'm1.small', 'ON_DEMAND', 'Master'),
InstanceGroup(1, 'CORE', 'm1.small', 'ON_DEMAND', 'Core'),
InstanceGroup(1, 'TASK', 'm1.small', 'ON_DEMAND', 'Task')
]
def __init__(self, conn=connect_emr()):
self.conn_emr = conn
def menu(self):
op = -1
while op != 0:
print "\n---------- Cluster Menu ----------"
print "1.\tList Active Clusters\n" \
"2.\tList All Clusters\n" \
"3.\tCreate Cluster\n" \
"4.\tResize Cluster\n" \
"5.\tTerminate Cluster\n\n" \
"0.\tMain Menu\n"
ops = {1: 'self.list_clusters()',
2: 'self.list_clusters(True)',
3: 'self.create_cluster()',
4: 'self.handle_cluster_resize()',
5: 'self.terminate_cluster()'}
op = input('Your option: ')
if op in ops:
print
eval(ops[op])
sleep(2)
elif op != 0:
print 'Invalid option! Please, try again...'
def create_cluster(self):
print 'Please select a bucket to save the logs '
s3_bucket_log = select_s3_bucket()
_name = raw_input('\tCluster Name: ')
cluster_id = self.conn_emr.run_jobflow(log_uri='s3://%s/logs' % s3_bucket_log,
name=_name,
steps=[],
# num_instances=1,
instance_groups=Cluster.DEFAULT_INSTANCE_GROUPS,
job_flow_role='EMR_EC2_DefaultRole',
service_role='EMR_DefaultRole',
action_on_failure='CANCEL_AND_WAIT', # TODO careful! # should it be CANCEL_AND_WAIT?
ami_version=Cluster.EMR_AMI_VERSION,
hadoop_version=Cluster.EMR_HADOOP_VERSION,
keep_alive=True)
# Enabling the termination protection
self.conn_emr.set_termination_protection(cluster_id, True)
if cluster_id:
print 'Created cluster: ', cluster_id
return cluster_id
def terminate_cluster(self):
cluster = self.input_select_cluster()
if cluster:
try:
self.conn_emr.set_termination_protection(cluster.id, False)
self.conn_emr.terminate_jobflow(cluster.id)
print 'Cluster %s was successfully terminated after %s normalized hours.' \
% (cluster.id, cluster.normalizedinstancehours)
except EmrResponseError:
print >> stderr, 'Error terminating Cluster', exc_info()[1].message
def input_select_cluster(self):
ret = None
cluster_list = self.list_clusters()
if cluster_list and len(cluster_list) > 0:
while True:
cluster_op = input('Please select an option (-1 to go back): ')
if cluster_op < -1 or cluster_op > len(cluster_list):
print >> stderr, 'Invalid option'
else:
break
if cluster_op != -1:
ret = cluster_list[cluster_op]
return ret
def input_select_create_cluster(self):
cluster_id = None
cluster_list = self.list_clusters(False, True)
index_create_new_cluster = len(cluster_list)
print '%d.\tCreate new Cluster' % index_create_new_cluster
while True:
try:
cluster_op = int(raw_input('Please select an option (-1 to go back): '))
if cluster_op == index_create_new_cluster:
cluster_id = self.create_cluster()
break
elif cluster_op == -1:
cluster_id = -1
break
elif cluster_list and 0 <= cluster_op < len(cluster_list):
cluster_id = cluster_list[cluster_op].id
break
else:
print >> stderr, 'Invalid option'
except ValueError:
print >> stderr, 'Invalid option'
return cluster_id
def handle_cluster_resize(self):
cluster = self.input_select_cluster()
if cluster:
instances = self.conn_emr.list_instance_groups(cluster.id)
if not instances or len(instances.instancegroups) == 0:
print >> stderr, 'No instances were found.'
else:
instance = self.print_input_instance_group(instances)
if instance:
if instance.instancegrouptype == 'MASTER':
print >> stderr, 'Master instances can not be resided!'
elif instance.instancegrouptype in 'CORE':
self.__handle_core_increase(instance)
elif instance.instancegrouptype == 'TASK':
self.__handle_task_resize(instance)
else:
print >> stderr, 'Unknown Instance Group Type ', instance.instancegrouptype
def __handle_task_resize(self, instance):
print '\tCurrent instances: ', instance.runninginstancecount
number_instances = input('\tNew number of instances: ')
if 0 <= number_instances <= Cluster.MAX_TASK_INSTANCES:
try:
self.conn_emr.modify_instance_groups(instance.id, number_instances)
print 'Task instance number was successfully changed from %s to %d!' \
% (instance.runninginstancecount, number_instances)
sleep(1)
except EmrResponseError:
print >> stderr, 'Error occurred when increasing core size ', exc_info()[1].message
else:
print >> stderr, 'Invalid option'
def __handle_core_increase(self, instance):
print 'Attention: Core instances can only be increased!\n\tCurrent instances: ', instance.runninginstancecount
number_instances = input('\tNew number of instances: ')
if int(instance.runninginstancecount) < number_instances <= Cluster.MAX_CORE_INSTANCES:
try:
self.conn_emr.modify_instance_groups(instance.id, number_instances)
print 'Core instance number was successfully increased from %s to %d!' \
% (instance.runninginstancecount, number_instances)
sleep(2)
except EmrResponseError:
print >> stderr, 'Error occurred when increasing core size', exc_info()[1].message
else:
print >> stderr, 'Invalid option'
def print_input_instance_group(self, instances):
i = 0
resizable_instance = False
print "\n\n\t---------- Groups ----------"
for instance in instances.instancegroups:
if instance.status.state in RESIZABLE_CLUSTER_STATES:
resizable_instance = True
print '\t%d)\n\t\tType:\t%s\n\t\tID:\t%s\n\t\tType:\t%s - %s\n\t\tRequested / Running Count:\t%s/%s\n' \
'\t\tStatus:\t%s\n\t------------------------------------' \
% (i, instance.instancegrouptype, instance.id, instance.instancetype, instance.market,
instance.requestedinstancecount,
instance.runninginstancecount, instance.status.state)
i += 1
op = -1
if resizable_instance:
while True:
try:
op = int(raw_input('Select a Instance Group (-1 to go back): '))
if op < -1 or op >= len(instances.instancegroups) \
or instances.instancegroups[op].status.state not in RESIZABLE_CLUSTER_STATES:
raise ValueError()
else:
break
except ValueError:
print >> stderr, 'Invalid option'
else:
print 'No Running or Waiting (resizable) instances were found at the moment. Try a bit later.'
if op != -1:
ret = instances.instancegroups[op]
else:
ret = None
return ret
def list_instances(self, cluster):
temp = self.conn_emr.list_instances(cluster.id)
print temp
def list_clusters(self, all_states=False, silent=False):
if all_states:
out = self.conn_emr.list_clusters(cluster_states=ALL_CLUSTER_STATES)
else:
out = self.conn_emr.list_clusters(cluster_states=ACTIVE_CLUSTER_STATES)
if not out or not out.clusters or len(out.clusters) == 0:
if not silent:
print 'No clusters found / available for the selected option.'
else:
i = 0
print "\n---------- Clusters ----------\n"
for cluster in out.clusters:
instances = self.conn_emr.list_instance_groups(cluster.id)
master_count = 0
core_count = 0
task_count = 0
for instance in instances.instancegroups:
if instance.instancegrouptype in "MASTER":
master_count += 1
elif instance.instancegrouptype in "CORE":
core_count += 1
elif instance.instancegrouptype in "TASK":
task_count += 1
print '%d:\n\tID:\t%s\n\tName:\t%s\n\tStatus:\t%s\n\tComposition:\n\t\t%d x Master\n\t\t%d x Core\n' \
'\t\t%d x Task\n--------------------------------\n' \
% (i, cluster.id, cluster.name, cluster.status.state, master_count, core_count, task_count)
i += 1
return out.clusters
if __name__ == '__main__':
from boto.emr.connection import EmrConnection
# input_select_cluster(EmrConnection())
#cluster_menu(EmrConnection())
Cluster().menu()
```
#### File: SE2015/deprecated/consumerProgram.py
```python
import cPickle
import time
import boto.sqs
from boto.sqs.message import Message
import boto.emr
from boto.emr.step import StreamingStep
from deprecated.emailNotification import sendemail
'''
ACCESS_KEY = "<enter ACCESS_KEY>"
SECRET_KEY = "<enter SECRET_KEY>"
'''
REGION = "eu-west-1"
def sendnotification(msg, status, downloadlink):
receipients_list = (msg('emailaddress'))
subject = 'MapReduce Job Notification'
if status == 'COMPLETED':
message = "Your MapReduce job is complete. Download results from: " + downloadlink
sendemail(receipients_list, subject, message)
def createemrjob(msg):
print "Connecting to EMR"
conn = boto.emr.connect_to_region(REGION)
print "Creating streaming step"
t = time.localtime(time.time())
job_datetime = str(t.tm_year) + str(t.tm_mon) + str(t.tm_mday) + str(t.tm_hour) + str(t.tm_min) + str(t.tm_sec)
outputlocation = 's3n://mybucket/uploadfiles/' + job_datetime
step = StreamingStep(name=job_datetime,
mapper=msg('mapper'),
reducer=msg('reducer'),
input=msg('datafile'),
output=outputlocation)
print "Creating job flow"
jobid = conn.run_jobflow(name=job_datetime,
log_uri='s3n://mybucket/uploadfiles/mapred_logs',
steps=[step])
print "Submitted job flow"
print "Waiting for job flow to complete"
status = conn.describe_jobflow(jobid)
print status.state
while status.state == 'STARTING' or status.state == 'RUNNING' or status.state == 'WAITING' or status.state == 'SHUTTING_DOWN':
time.sleep(10)
status = conn.describe_jobflow(jobid)
print "Job status: " + str(status.state)
print "Completed Job: " + job_datetime
downloadlink = 'http://mybucket.s3.amazonaws.com/uploadedfiles/' + job_datetime + '/part-00000'
sendnotification(msg, status.state, downloadlink)
print "Connecting to SQS"
conn = boto.sqs.connect_to_region(REGION)
queue_name = 'arsh_queue'
print "Connecting to queue: " + queue_name
q = conn.get_all_queues(prefix=queue_name)
count = q(0).count()
print "Total messages in queue: " + str(count)
print "Reading message from queue"
for i in range(count):
m = q(0).read()
msg = cPickle.loads(m.get_body())
print "Message %d: %s" % (i + 1, msg)
q(0).delete_message(m)
createemrjob(msg)
print "Read %d messages from queue" % count
```
#### File: SE2015/deprecated/createEnqueueJob.py
```python
import cPickle
import boto.sqs
from boto.sqs.message import Message
import s3upload
'''
ACCESS_KEY = "<enter ACCESS_KEY>"
SECRET_KEY = "<enter SECRET_KEY>"
'''
REGION = "eu-weast-1"
def enqueuejob(datafile, mapper, reducer, emailaddress):
conn = boto.sqs.connect_to_region(REGION)
queue_name = 'arsh_queue'
q = conn.get_all_queues(prefix=queue_name)
msgdict = {'datafile': datafile,
'mapper': mapper,
'reducer': reducer,
'emailaddress': emailaddress}
msg = cPickle.dumps(msgdict)
m = Message()
m.set_body(msg)
status = q(0).write(m)
def createjob(datafilename, mapfilename, reducefilename, mapreduceprogram, emailaddress):
s3upload.upload('mybucket', 'uploadfiles', datafilename)
datafile = 's3n://mybucket/uploadedfiles/' + datafilename
if mapreduceprogram == 'wordcount':
mapper = 's3n://mybucket/uploadedfiles/wordCountMapper.py'
reducer = 's3n://mybucket/uploadedfiles/wordCountReducer.py'
elif mapreduceprogram == 'invertedindex':
mapper = 's3n://mybucket/uploadedfiles/invertedindexMapper.py'
reducer = 's3n://mybucket/uploadedfiles/invertedindexReducer.py'
else:
s3upload.upload('mybucket', 'uploadedfiles', mapfilename)
s3upload.upload('mybucket', 'uploadedfiles', reducefilename)
mapper = 's3n://mybucket/uploadedfiles/' + mapfilename
reducer = 's3n://mybucket/uploadedfiles' + reducefilename
enqueuejob(datafile, mapper, reducer, emailaddress)
return datafile, mapper, reducer, emailaddress
```
#### File: Jfcabral/SE2015/dynamo_handler.py
```python
from time import sleep
from sys import exc_info, stderr
from boto.dynamodb2.fields import HashKey, RangeKey
from boto.dynamodb2.table import Table
from boto.dynamodb2.layer1 import DynamoDBConnection
from boto.dynamodb2.exceptions import JSONResponseError, ItemNotFound
# This class aims to encapsulate the operations with Amazon NoSQL Engine: Dynamo DB v2
# Docs:
# * http://boto.readthedocs.org/en/latest/dynamodb2_tut.html
# * http://boto.readthedocs.org/en/latest/ref/dynamodb2.html
class DynamoHandler:
CREATE_TABLE_IF_MISSING = True
TABLE_CREATION_TIME = 30
MAP_REDUCE_PROFILE_TABLE = 'map_reduce_profiles'
MAP_REDUCE_PROFILE_SCHEMA = [HashKey('profile_name')]
MAP_REDUCE_PROFILE_THROUGHPUT = {'read': 1, 'write': 1}
def __init__(self, region='us-east-1'):
#self.connection = connect_to_region(region) # bugs the scan list
self.connection = DynamoDBConnection()
self.profiles_table = Table(DynamoHandler.MAP_REDUCE_PROFILE_TABLE,
schema=DynamoHandler.MAP_REDUCE_PROFILE_SCHEMA,
throughput=DynamoHandler.MAP_REDUCE_PROFILE_THROUGHPUT,
connection=self.connection)
"""if DynamoHandler.CREATE_TABLE_IF_MISSING:
try:
self.profiles_table.describe()
except JSONResponseError:
if exc_info()[1].error_code == 'ResourceNotFoundException':
self.configure_first_run()"""
def check_create_table(self):
try:
self.profiles_table.describe()
print '\tMap-Reduce dynamo db already exists'
except JSONResponseError:
if exc_info()[1].error_code == 'ResourceNotFoundException':
self.create_table()
def get_all_profile_names(self):
ret = None
profiles = self.connection.scan(table_name=DynamoHandler.MAP_REDUCE_PROFILE_TABLE,
attributes_to_get=['profile_name'])
if profiles and 'Items' in profiles and len(profiles['Items']) > 0:
ret = profiles['Items']
if ret:
return [elem['profile_name']['S'] for elem in ret]
else:
return None
def list_input_profile(self):
ret = None
profiles = self.get_all_profile_names()
while not ret:
i = 1
for elem in profiles:
print '\t%d.\t%s' % (i, elem)
i += 1
try:
option = int(raw_input("\t0.\tMain Menu\nPlease chose a profile: "))
if option == 0:
break
elif 0 < option <= len(profiles):
ret = self.get_profile_data(profiles[option-1])
else:
print >> stderr, 'Invalid Option!'
except ValueError:
print >> stderr, 'Invalid option'
return ret
def check_if_profile_exists(self, profile_name):
try:
self.profiles_table.get_item(profile_name=profile_name)
ret = True
except ItemNotFound:
ret = False
return ret
def get_profile_data(self, profile_name):
return self.profiles_table.get_item(profile_name=profile_name)._data
# Todo check repeated
def create_profile(self, profile_name, sample_path, mapper_file_path, reducer_file_path, combiner_file_path=None):
if combiner_file_path:
self.profiles_table.put_item(data={'profile_name': profile_name,
'sample_path': sample_path,
'mapper_file_path': mapper_file_path,
'reducer_file_path': reducer_file_path,
'combiner_file_path': combiner_file_path})
else:
self.profiles_table.put_item(data={'profile_name': profile_name,
'sample_path': sample_path,
'mapper_file_path': mapper_file_path,
'reducer_file_path': reducer_file_path})
def create_table(self):
print '\tCreating a new table on Dynamo to save the Map Reduce Profiles. Please wait...'
self.profiles_table = Table.create(DynamoHandler.MAP_REDUCE_PROFILE_TABLE,
schema=DynamoHandler.MAP_REDUCE_PROFILE_SCHEMA,
throughput=DynamoHandler.MAP_REDUCE_PROFILE_THROUGHPUT,
connection=self.connection)
def configure_first_run(self):
self.create_table()
# wait until the table is created before populating it
sleep(self.TABLE_CREATION_TIME)
print 'Populating with default profiles'
# Word Count profile
base_path = 's3://eng-serv-teste3/'
self.create_profile('Word Count',
base_path + 'input/Word_Count',
base_path + 'scripts/WordCountMapper.py',
base_path + 'scripts/WordCountReducer2.py')
if __name__ == '__main__':
handler = DynamoHandler()
#print handler.get_all_profile_names()
print handler.get_profile_data('Word Count')
```
#### File: Jfcabral/SE2015/email_dispatcher.py
```python
import json
from time import sleep
from email_management import send_email
from amazon_utilities import connect_emr, connect_ses, connect_s3, connect_sqs, create_get_queue, set_folder_public_simple, QUEUE_EMAIL_DISPATCH
TIME_WAIT_FOR_MESSAGES = 20 # 20s is the maximum allowed by amazon
SLEEP_TIME_AFTER_EMAIL = 15 # in seconds
# Allows to send email notifications in regard to a step
# Receives email notification requests from a Queue
class EmailDispatcher:
def __init__(self):
self.jobs = []
self.conn_emr = connect_emr()
self.conn_ses = connect_ses()
self.conn_s3 = connect_s3()
def init_mail_dispatch(self, queue):
while True:
# read message from queue
print 'waiting for messages'
message = queue.read(wait_time_seconds=TIME_WAIT_FOR_MESSAGES)
if message:
print 'New Message!'
# get message content
body = message.get_body()
#print body
# parse message {'recipient', 'link', 'step_id', 'job_id'}
self.jobs.append(json.loads(body))
# delete message
queue.delete_message(message)
sleep(SLEEP_TIME_AFTER_EMAIL)
# check if a job has finished
self.check_jobs()
def check_jobs(self):
for elem in self.jobs:
if 'link' in elem and 'recipient' in elem and 'step_id' in elem and 'cluster_id' in elem:
step_info = self.conn_emr.describe_step(elem['cluster_id'], elem['step_id'])
step_data = self.parse_step_info(elem['link'], step_info)
if step_data:
if step_info.status.state == 'COMPLETED':
# Make sure the link is accessible
set_folder_public_simple(elem['link'], self.conn_s3)
send_email(elem['recipient'], step_data, self.conn_ses)
self.jobs.remove(elem)
# returns a dictionary containing all data relevant to the step
def parse_step_info(self, url, step_info):
ret = None
if step_info.status.state in ['COMPLETED', 'TERMINATING', 'TERMINATED']:
ret = {'step_id': step_info.id,
'step_name': step_info.name,
'status': step_info.status.state,
'creation_date': step_info.status.timeline.creationdatetime,
'end_date': step_info.status.timeline.enddatetime,
'link': url}
elif step_info.status.state in ['TERMINATED_WITH_ERRORS', 'FAILED', 'CANCELLED', 'SHUTTING_DOWN']:
ret = {'step_id': step_info.id,
'step_name': step_info.name,
'status': step_info.status.state,
'creation_date': step_info.status.timeline.creationdatetime}
return ret
if __name__ == '__main__':
# connect SQS
conn_sqs = connect_sqs()
# setup queue (create if it doesn't exit)
queue = create_get_queue(QUEUE_EMAIL_DISPATCH, conn_sqs)
# start listening from the queue and sending emails
EmailDispatcher().init_mail_dispatch(queue)
"""conn_emr = connect_emr()
step_info = conn_emr.describe_step('j-3O983JX5SNZBI', 's-9V22DHFEBQB4')
EmailDispatcher().parse_step_info('fjdsfjhsdjfhsljflskxhgds', step_info)
print step_info"""
```
|
{
"source": "jfcamel/meta-mender",
"score": 2
}
|
#### File: tests/acceptance/common.py
```python
from fabric.api import *
from fabric.contrib.files import append
import fabric.network
from distutils.version import LooseVersion
import pytest
import os
import re
import subprocess
import time
import tempfile
import errno
import shutil
import signal
import sys
from contextlib import contextmanager
import conftest
class ProcessGroupPopen(subprocess.Popen):
"""Wrapper for subprocess.Popen that starts the underlying process in a
separate process group. The wrapper overrides kill() and terminate() so
that the corresponding SIGKILL/SIGTERM are sent to the whole process group
and not just the forked process.
Note that ProcessGroupPopen() constructor hijacks preexec_fn parameter.
"""
def __init__(self, *args, **kwargs):
def start_new_session():
os.setsid()
# for Python > 3.2 it's enough to set start_new_session=True
super(ProcessGroupPopen, self).__init__(*args,
preexec_fn=start_new_session,
**kwargs)
def __signal(self, sig):
os.killpg(self.pid, sig)
def terminate(self):
self.__signal(signal.SIGTERM)
def kill(self):
self.__signal(signal.SIGKILL)
def start_qemu(qenv=None):
"""Start qemu and return a subprocess.Popen object corresponding to a running
qemu process. `qenv` is a dict of environment variables that will be added
to `subprocess.Popen(..,env=)`.
Once qemu is stated, a connection over ssh will attempted, so the returned
process is actually a qemu instance with fully booted guest os.
The helper uses `meta-mender-qemu/scripts/mender-qemu` to start qemu, thus
you can use `VEXPRESS_IMG`, `QEMU_DRIVE` and other environment variables to
override the default behavior.
"""
env = dict(os.environ)
if qenv:
env.update(qenv)
proc = ProcessGroupPopen(["../../meta-mender-qemu/scripts/mender-qemu", "-snapshot"],
env=env)
try:
# make sure we are connected.
execute(run_after_connect, "true", hosts = conftest.current_hosts())
execute(qemu_prep_after_boot, hosts = conftest.current_hosts())
except:
# or do the necessary cleanup if we're not
try:
# qemu might have exited and this would raise an exception
print('cleaning up qemu instance with pid {}'.format(proc.pid))
proc.terminate()
except:
pass
proc.wait()
raise
return proc
def start_qemu_block_storage(latest_sdimg, suffix):
"""Start qemu instance running block storage"""
fh, img_path = tempfile.mkstemp(suffix=suffix, prefix="test-image")
# don't need an open fd to temp file
os.close(fh)
# Make a disposable image.
shutil.copy(latest_sdimg, img_path)
# pass QEMU drive directly
qenv = {}
qenv["DISK_IMG"] = img_path
try:
qemu = start_qemu(qenv)
except:
os.remove(img_path)
raise
return qemu, img_path
def start_qemu_flash(latest_vexpress_nor):
"""Start qemu instance running *.vexpress-nor image"""
print("qemu raw flash with image {}".format(latest_vexpress_nor))
# make a temp file, make sure that it has .vexpress-nor suffix, so that
# mender-qemu will know how to handle it
fh, img_path = tempfile.mkstemp(suffix=".vexpress-nor", prefix="test-image")
# don't need an open fd to temp file
os.close(fh)
# vexpress-nor is more complex than sdimg, inside it's compose of 2 raw
# files that represent 2 separate flash banks (and each file is a 'drive'
# passed to qemu). Because of this, we cannot directly apply qemu-img and
# create a qcow2 image with backing file. Instead make a disposable copy of
# flash image file.
shutil.copyfile(latest_vexpress_nor, img_path)
qenv = {}
# pass QEMU drive directly
qenv["DISK_IMG"] = img_path
qenv["MACHINE"] = "vexpress-qemu-flash"
try:
qemu = start_qemu(qenv)
except:
os.remove(img_path)
raise
return qemu, img_path
def reboot(wait = 120):
with settings(warn_only = True):
try:
run("reboot")
except:
# qemux86-64 is so fast that sometimes the above call fails with
# an exception because the connection was broken before we returned.
# So catch everything, even though it might hide real errors (but
# those will probably be caught below after the timeout).
pass
# Make sure reboot has had time to take effect.
time.sleep(5)
for attempt in range(5):
try:
fabric.network.disconnect_all()
break
except IOError:
# Occasionally we get an IO error here because resource is temporarily
# unavailable.
time.sleep(5)
continue
run_after_connect("true", wait)
qemu_prep_after_boot()
def run_after_connect(cmd, wait=360):
output = ""
start_time = time.time()
with settings(timeout=30, abort_exception=Exception):
while True:
attempt_time = time.time()
try:
output = run(cmd)
break
except BaseException as e:
print("Could not connect to host %s: %s" % (env.host_string, e))
if attempt_time >= start_time + wait:
raise Exception("Could not reconnect to host")
now = time.time()
if now - attempt_time < 5:
time.sleep(60)
continue
return output
def ssh_prep_args():
return ssh_prep_args_impl("ssh")
def scp_prep_args():
return ssh_prep_args_impl("scp")
def ssh_prep_args_impl(tool):
if not env.host_string:
raise Exception("get()/put() called outside of execute()")
cmd = ("%s -C -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" %
tool)
host_parts = env.host_string.split(":")
host = ""
port = ""
port_flag = "-p"
if tool == "scp":
port_flag = "-P"
if len(host_parts) == 2:
host = host_parts[0]
port = "%s%s" % (port_flag, host_parts[1])
elif len(host_parts) == 1:
host = host_parts[0]
port = ""
else:
raise Exception("Malformed host string")
return (cmd, host, port)
def determine_active_passive_part(bitbake_variables):
"""Given the output from mount, determine the currently active and passive
partitions, returning them as a pair in that order."""
mount_output = run("mount")
a = bitbake_variables["MENDER_ROOTFS_PART_A"]
b = bitbake_variables["MENDER_ROOTFS_PART_B"]
if mount_output.find(a) >= 0:
return (a, b)
elif mount_output.find(b) >= 0:
return (b, a)
else:
raise Exception("Could not determine active partition. Mount output:\n {}" \
"\nwas looking for {}".format(mount_output, (a, b)))
# Yocto build SSH is lacking SFTP, let's override and use regular SCP instead.
def put(file, local_path = ".", remote_path = "."):
(scp, host, port) = scp_prep_args()
local("%s %s %s/%s %s@%s:%s" %
(scp, port, local_path, file, env.user, host, remote_path))
# See comment for put().
def get(file, local_path = ".", remote_path = "."):
(scp, host, port) = scp_prep_args()
local("%s %s %s@%s:%s/%s %s" %
(scp, port, env.user, host, remote_path, file, local_path))
def qemu_prep_after_boot():
# Nothing needed ATM.
pass
def qemu_prep_fresh_host():
# Nothing needed ATM.
pass
def manual_uboot_commit():
run("fw_setenv upgrade_available 0")
run("fw_setenv bootcount 0")
def common_board_setup(files=None, remote_path='/tmp', image_file=None):
"""
Deploy and activate an image to a board that usese mender-qa tools.
:param image_file: IMAGE_FILE as passed to deploy-test-image, can be None
:param remote_path: where files will be stored in the remote system
:param files: list of files to deploy
"""
for f in files:
put(os.path.basename(f), local_path=os.path.dirname(f),
remote_path=remote_path)
env_overrides = {}
if image_file:
env_overrides['IMAGE_FILE'] = image_file
run("{} mender-qa deploy-test-image".format(' '.join(
['{}={}'.format(k, v) for k, v in env_overrides.items()])))
with settings(warn_only=True):
sudo("mender-qa activate-test-image")
def common_board_cleanup():
sudo("mender-qa activate-test-image off")
with settings(warn_only=True):
sudo("reboot")
execute(run_after_connect, "true", hosts = conftest.current_hosts())
def common_boot_from_internal():
sudo("mender-qa activate-test-image on")
with settings(warn_only=True):
sudo("reboot")
execute(run_after_connect, "true", hosts = conftest.current_hosts())
def latest_build_artifact(builddir, extension):
output = subprocess.check_output(["sh", "-c", "ls -t %s/tmp*/deploy/images/*/*%s | grep -v data*%s| head -n 1" % (builddir, extension, extension)])
output = output.rstrip('\r\n')
print("Found latest image of type '%s' to be: %s" % (extension, output))
return output
def get_bitbake_variables(target, env_setup="true", export_only=False):
current_dir = os.open(".", os.O_RDONLY)
os.chdir(os.environ['BUILDDIR'])
output = subprocess.Popen("%s && bitbake -e %s" % (env_setup, target),
stdout=subprocess.PIPE,
shell=True,
executable="/bin/bash")
if export_only:
export_only_expr = ""
else:
export_only_expr = "?"
matcher = re.compile('^(?:export )%s([A-Za-z][^=]*)="(.*)"$' % export_only_expr)
ret = {}
for line in output.stdout:
line = line.strip()
match = matcher.match(line)
if match is not None:
ret[match.group(1)] = match.group(2)
output.wait()
os.fchdir(current_dir)
# For some unknown reason, 'MACHINE' is not included in the 'bitbake -e' output.
# We set MENDER_MACHINE in mender-setup.bbclass as a proxy so look for that instead.
if ret.get('MACHINE') is None:
if ret.get('MENDER_MACHINE') is not None:
ret['MACHINE'] = ret.get('MENDER_MACHINE')
else:
raise Exception("Could not determine MACHINE or MENDER_MACHINE value.")
return ret
def signing_key(key_type):
# RSA pregenerated using these.
# openssl genrsa -out files/test-private-RSA.pem 2048
# openssl rsa -in files/test-private-RSA.pem -outform PEM -pubout -out files/test-public-RSA.pem
# EC pregenerated using these.
# openssl ecparam -genkey -name prime256v1 -out /tmp/private-and-params.pem
# openssl ec -in /tmp/private-and-params.pem -out files/test-private-EC.pem
# openssl ec -in files/test-private-EC.pem -pubout -out files/test-public-EC.pem
class KeyPair:
if key_type == "EC":
private = "files/test-private-EC.pem"
public = "files/test-public-EC.pem"
else:
private = "files/test-private-RSA.pem"
public = "files/test-public-RSA.pem"
return KeyPair()
# `capture` can be a bool, meaning the captured output is returned, or a stream,
# in which case the output is redirected there, and the process handle is
# returned instead.
def run_verbose(cmd, capture=False):
if type(capture) is not bool:
print("subprocess.Popen(\"%s\")" % cmd)
return subprocess.Popen(cmd, shell=True, executable="/bin/bash",
stderr=subprocess.STDOUT, stdout=capture)
elif capture:
print("subprocess.check_output(\"%s\")" % cmd)
return subprocess.check_output(cmd, shell=True, executable="/bin/bash",
stderr=subprocess.STDOUT)
else:
print(cmd)
return subprocess.check_call(cmd, shell=True, executable="/bin/bash")
# Capture is true or false and conditonally returns output.
def run_bitbake(prepared_test_build, target=None, capture=False):
if target is None:
target = prepared_test_build['image_name']
cmd = "%s && bitbake %s" % (prepared_test_build['env_setup'], target)
ps = run_verbose(cmd, capture=subprocess.PIPE)
output = ""
try:
# Cannot use for loop here due to buffering and iterators.
while True:
line = ps.stdout.readline()
if not line:
break
if line.find("is not a recognized MENDER_ variable") >= 0:
pytest.fail("Found variable which is not in mender-vars.json: %s" % line.strip())
if capture:
output += line
else:
sys.stdout.write(line)
finally:
# Empty any remaining lines.
try:
if capture:
output += ps.stdout.readlines()
else:
ps.stdout.readlines()
except:
pass
ps.wait()
if ps.returncode != 0:
e = subprocess.CalledProcessError(ps.returncode, cmd)
if capture:
e.output = output
raise e
return output
def add_to_local_conf(prepared_test_build, string):
"""Add given string to local.conf before the build. Newline is added
automatically."""
with open(prepared_test_build['local_conf'], "a") as fd:
fd.write('\n## ADDED BY TEST\n')
fd.write("%s\n" % string)
def reset_local_conf(prepared_test_build):
new_file = prepared_test_build['local_conf']
old_file = prepared_test_build['local_conf_orig']
# Restore original local.conf
run_verbose("cp %s %s" % (old_file, new_file))
def versions_of_recipe(recipe):
"""Returns a list of all the versions we have of the given recipe, excluding
git recipes."""
versions = []
for entry in os.listdir("../../meta-mender-core/recipes-mender/%s/" % recipe):
match = re.match(r"^%s_([1-9][0-9]*\.[0-9]+\.[0-9]+[^.]*)\.bb" % recipe, entry)
if match is not None:
versions.append(match.group(1))
return versions
@contextmanager
def make_tempdir(delete=True):
"""context manager for temporary directories"""
tdir = tempfile.mkdtemp(prefix='meta-mender-acceptance.')
print('created dir', tdir)
try:
yield tdir
finally:
if delete:
shutil.rmtree(tdir)
```
|
{
"source": "jfcann/va-transformer",
"score": 2
}
|
#### File: preprocessing/mimic/process_for_strong_coupling.py
```python
import os
import numpy as np
import pickle as pickle
import tqdm
from pprint import pprint
from preprocessing_arguments import PreprocessingArguments
def fetch_data_as_numpy(path, var_key):
with open(path, 'rb') as f:
data = pickle.load(f)
di = data[var_key]
return di
def postprocess(args):
print('*' * 17, 'processing data for strong-coupling experiment with the following settings:', sep='\n')
pprint(vars(args), indent=2)
print('*' * 17)
# define and save mapping dicts
mappings_path = os.path.join(args.data_root, "mappings.pkl")
with open(mappings_path, "rb") as f:
mappings_dict = pickle.load(f)
del mappings_dict['itemid2token']['[PAD]']
del mappings_dict['token2itemid'][0]
del mappings_dict['qname2qtoken']['[PAD]']
del mappings_dict['qtoken2qname'][0]
l1 = len(mappings_dict['itemid2token'])
l2 = len(mappings_dict['qname2qtoken'])
itemval2ptokens = dict(zip(
[(i, j)
for i in mappings_dict['itemid2token']
for j in mappings_dict['qname2qtoken']],
range(1, l1 * l2)
))
itemval2ptokens[('[PAD]', '[PAD]')] = 0
ptokens2itemval = {v:k for k, v in itemval2ptokens.items()}
tokqtok2ptokens = dict(zip(
[(i, j)
for i in mappings_dict['token2itemid']
for j in mappings_dict['qtoken2qname']],
range(1, l1 * l2)
))
tokqtok2ptokens[(0, 0)] = 0
ptokens2tokqtok = {v: k for k, v in tokqtok2ptokens.items()}
def convert_pair_to_tokens(t, q):
return tokqtok2ptokens[(t, q)]
ptokens2count = {k: 0 for k in ptokens2tokqtok}
# loop through index sets and generate output files
for subset in ['train', 'val', 'test']:
print(f'Processing {subset} set data...')
# data
data_path = os.path.join(args.data_root, f'{subset}_data.pkl')
tokens_np = fetch_data_as_numpy(data_path, f'{subset}_tokens')
quants_np = fetch_data_as_numpy(data_path, f'{subset}_quants')
times_rel = fetch_data_as_numpy(data_path, f'{subset}_times_rel')
# initialise
tokens_sc = dict()
# populate with entries
for i in tqdm.tqdm(tokens_np):
tokens_sc[i] = np.fromiter(
map(convert_pair_to_tokens, tokens_np[i], quants_np[i]),
dtype=np.int32
)
if subset == 'train':
for tok in tokens_sc[i]:
ptokens2count[tok] += 1
# reverse-sort ptokens2count
ptokens2count = dict(sorted(ptokens2count.items(), key=lambda item: item[1], reverse=True))
ptokens2count = {k:v for k, v in ptokens2count.items() if v > 0}
print(ptokens2count)
save_path = os.path.join(args.save_root, f'{subset}_data_sc.pkl')
with open(save_path, 'wb') as f:
pickle.dump({f'{subset}_tokens': tokens_sc,
f'{subset}_times_rel': times_rel
},
f)
del tokens_np, tokens_sc, quants_np, times_rel
print(f'{subset} set data processed!')
mappings_save_path = os.path.join(args.save_root, "mappings_sc.pkl")
with open(mappings_save_path, "wb") as f:
pickle.dump({'itemid2token': tokqtok2ptokens,
'token2itemid': ptokens2tokqtok,
'token2trcount': ptokens2count,
'qname2qtoken': None,
'qtoken2qname': None},
f)
if __name__ == "__main__":
arguments = PreprocessingArguments().parse()
postprocess(arguments)
```
#### File: va-transformer/va_transformers/va_transformers.py
```python
import sys
import math
import torch
from torch import nn, einsum
import torch.nn.functional as F
from functools import partial
from inspect import isfunction
from collections import namedtuple
from einops import rearrange, repeat, reduce
from einops.layers.torch import Rearrange
from entmax import entmax15
# constants
DEFAULT_DIM_HEAD = 64
Intermediates = namedtuple('Intermediates', [
'pre_softmax_attn',
'post_softmax_attn'
])
LayerIntermediates = namedtuple('Intermediates', [
'hiddens',
'attn_intermediates'
])
# helpers
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
class always():
def __init__(self, val):
self.val = val
def __call__(self, *args, **kwargs):
return self.val
class not_equals():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return x != self.val
class equals():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return x == self.val
def max_neg_value(tensor):
return -torch.finfo(tensor.dtype).max
# keyword argument helpers
def pick_and_pop(keys, d):
values = list(map(lambda key: d.pop(key), keys))
return dict(zip(keys, values))
def group_dict_by_key(cond, d):
return_val = [dict(), dict()]
for key in d.keys():
match = bool(cond(key))
ind = int(not match)
return_val[ind][key] = d[key]
return (*return_val,)
def string_begins_with(prefix, str):
return str.startswith(prefix)
def group_by_key_prefix(prefix, d):
return group_dict_by_key(partial(string_begins_with, prefix), d)
def groupby_prefix_and_trim(prefix, d):
kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
return kwargs_without_prefix, kwargs
# positional embeddings
class DepthWiseConv1d(nn.Module):
def __init__(self, dim_in, dim_out, kernel_size, padding=0, stride=1, bias=True, groups=False):
super().__init__()
groups = default(groups, dim_in)
self.net = nn.Sequential(
nn.Conv1d(dim_in, dim_in, kernel_size=kernel_size, padding=padding, groups=dim_in, stride=stride,
bias=bias),
nn.Conv1d(dim_in, dim_out, 1)
)
def forward(self, x):
return self.net(x)
class AbsolutePositionalEmbedding(nn.Module):
def __init__(self, dim, max_seq_len):
super().__init__()
self.scale = dim ** -0.5
self.emb = nn.Embedding(max_seq_len, dim)
def forward(self, x):
n = torch.arange(x.shape[1], device=x.device)
pos_emb = self.emb(n)
pos_emb = rearrange(pos_emb, 'n d -> () n d')
return pos_emb * self.scale
class FixedPositionalEmbedding(nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
def forward(self, x, seq_dim=1, offset=0):
t = torch.arange(x.shape[seq_dim], device=x.device).type_as(self.inv_freq) + offset
sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq)
emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1)
return emb[None, :, :]
class RelativePositionBias(nn.Module):
def __init__(self, scale, causal=False, num_buckets=32, max_distance=128, heads=8):
super().__init__()
self.scale = scale
self.causal = causal
self.num_buckets = num_buckets
self.max_distance = max_distance
self.relative_attention_bias = nn.Embedding(num_buckets, heads)
@staticmethod
def _relative_position_bucket(relative_position, causal=True, num_buckets=32, max_distance=128):
ret = 0
n = -relative_position
if not causal:
num_buckets //= 2
ret += (n < 0).long() * num_buckets
n = torch.abs(n)
else:
n = torch.max(n, torch.zeros_like(n))
max_exact = num_buckets // 2
is_small = n < max_exact
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).long()
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
def forward(self, qk_dots):
i, j, device = *qk_dots.shape[-2:], qk_dots.device
q_pos = torch.arange(i, dtype=torch.long, device=device)
k_pos = torch.arange(j, dtype=torch.long, device=device)
rel_pos = k_pos[None, :] - q_pos[:, None]
rp_bucket = self._relative_position_bucket(rel_pos, causal=self.causal, num_buckets=self.num_buckets,
max_distance=self.max_distance)
values = self.relative_attention_bias(rp_bucket)
bias = rearrange(values, 'i j h -> () h i j')
return qk_dots + (bias * self.scale) # TODO:
class RotaryEmbedding(nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
def forward(self, max_seq_len, device):
t = torch.arange(max_seq_len, device=device).type_as(self.inv_freq)
freqs = torch.einsum('i, j -> i j', t, self.inv_freq)
emb = torch.cat((freqs, freqs), dim=-1)
return rearrange(emb, 'n d -> () () n d')
def rotate_half(x):
x = rearrange(x, '... (j d) -> ... j d', j=2)
x1, x2 = x.unbind(dim=-2)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(t, freqs):
seq_len = t.shape[-2]
freqs = freqs[:, :, -seq_len:]
return (t * freqs.cos()) + (rotate_half(t) * freqs.sin())
# classes
class Scale(nn.Module):
def __init__(self, value, fn):
super().__init__()
self.value = value
self.fn = fn
def forward(self, x, **kwargs):
x, *rest = self.fn(x, **kwargs)
return (x * self.value, *rest)
class Rezero(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
self.g = nn.Parameter(torch.zeros(1))
def forward(self, x, **kwargs):
x, *rest = self.fn(x, **kwargs)
return (x * self.g, *rest)
class ScaleNorm(nn.Module):
def __init__(self, dim, eps=1e-5):
super().__init__()
self.scale = dim ** -0.5
self.eps = eps
self.g = nn.Parameter(torch.ones(1))
def forward(self, x):
norm = torch.norm(x, dim=-1, keepdim=True) * self.scale
return x / norm.clamp(min=self.eps) * self.g
class RMSNorm(nn.Module):
def __init__(self, dim, eps=1e-8):
super().__init__()
self.scale = dim ** -0.5
self.eps = eps
self.g = nn.Parameter(torch.ones(dim))
def forward(self, x):
norm = torch.norm(x, dim=-1, keepdim=True) * self.scale
return x / norm.clamp(min=self.eps) * self.g
class Residual(nn.Module):
def forward(self, x, residual):
return x + residual
class GRUGating(nn.Module):
def __init__(self, dim):
super().__init__()
self.gru = nn.GRUCell(dim, dim)
def forward(self, x, residual):
gated_output = self.gru(
rearrange(x, 'b n d -> (b n) d'),
rearrange(residual, 'b n d -> (b n) d')
)
return gated_output.reshape_as(x)
# feedforward
class GEGLU(nn.Module):
def __init__(self, dim_in, dim_out):
super().__init__()
self.proj = nn.Linear(dim_in, dim_out * 2)
def forward(self, x):
x, gate = self.proj(x).chunk(2, dim=-1)
return x * F.gelu(gate)
class FeedForward(nn.Module):
def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
super().__init__()
inner_dim = int(dim * mult)
dim_out = default(dim_out, dim)
project_in = nn.Sequential(
nn.Linear(dim, inner_dim),
nn.GELU()
) if not glu else GEGLU(dim, inner_dim)
self.net = nn.Sequential(
project_in,
nn.Dropout(dropout),
nn.Linear(inner_dim, dim_out)
)
def forward(self, x):
return self.net(x)
# attention.
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head=DEFAULT_DIM_HEAD,
heads=8,
causal=False,
mask=None,
talking_heads=False,
collab_heads=False,
collab_compression=.3,
sparse_topk=None,
use_entmax15=False,
num_mem_kv=0,
dropout=0.,
on_attn=False,
gate_values=False
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
self.causal = causal
self.mask = mask
qk_dim = v_dim = heads * dim_head # stacking heads together
# collaborative heads
self.collab_heads = collab_heads
if self.collab_heads:
qk_dim = int(collab_compression * qk_dim)
self.collab_mixing = nn.Parameter(torch.randn(heads, qk_dim))
self.to_q = nn.Linear(dim, qk_dim, bias=False) # the attention heads
self.to_k = nn.Linear(dim, qk_dim, bias=False)
self.to_v = nn.Linear(dim, v_dim, bias=False)
self.dropout = nn.Dropout(dropout)
# add GLU gating for aggregated values, from alphafold2
self.to_v_gate = None
if gate_values:
self.to_v_gate = nn.Linear(dim, v_dim)
nn.init.constant_(self.to_v_gate.weight, 0)
nn.init.constant_(self.to_v_gate.bias, 1)
# talking heads
self.talking_heads = talking_heads
if talking_heads:
self.pre_softmax_proj = nn.Parameter(torch.randn(heads, heads))
self.post_softmax_proj = nn.Parameter(torch.randn(heads, heads))
# explicit topk sparse attention
self.sparse_topk = sparse_topk
# entmax
self.attn_fn = entmax15 if use_entmax15 else F.softmax
# add memory key / values
self.num_mem_kv = num_mem_kv
if num_mem_kv > 0:
self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
# attention on attention
self.attn_on_attn = on_attn
self.to_out = nn.Sequential(nn.Linear(v_dim, dim * 2), nn.GLU()) if on_attn else nn.Linear(v_dim, dim)
def forward(
self,
x,
context=None,
mask=None,
context_mask=None,
rel_pos=None,
sinusoidal_emb=None,
rotary_pos_emb=None,
prev_attn=None,
mem=None
):
b, n, _, h = *x.shape, self.heads
talking_heads, collab_heads = self.talking_heads, self.collab_heads
device, has_context = x.device, exists(context)
kv_input = default(context, x)
q_input = x # queries always computed from x
k_input = kv_input # keys and values computed from context in cross-attention, otherwise from x
v_input = kv_input
if exists(mem):
k_input = torch.cat((mem, k_input), dim=-2)
v_input = torch.cat((mem, v_input), dim=-2)
if exists(sinusoidal_emb):
# in shortformer, the query would start at a position offset depending on the past cached memory
offset = k_input.shape[-2] - q_input.shape[-2]
q_input = q_input + sinusoidal_emb(q_input, offset=offset)
k_input = k_input + sinusoidal_emb(k_input)
q = self.to_q(q_input) # output is x_dims * (dim_head * heads). i.e. b *
k = self.to_k(k_input)
v = self.to_v(v_input)
if not collab_heads:
# split b x n x (dim_head + dim) into b x h x (n x d) matrices. i.e. each head h is an n * d matrix
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))
else:
q = einsum('b i d, h d -> b h i d', q, self.collab_mixing)
k = rearrange(k, 'b n d -> b () n d')
v = rearrange(v, 'b n (h d) -> b h n d', h=h)
if exists(rotary_pos_emb) and not has_context:
l = rotary_pos_emb.shape[-1]
(ql, qr), (kl, kr), (vl, vr) = map(lambda t: (t[..., :l], t[..., l:]), (q, k, v))
ql, kl = map(lambda t: apply_rotary_pos_emb(t, rotary_pos_emb), (ql, kl))
q, k, v = map(lambda t: torch.cat(t, dim=-1), ((ql, qr), (kl, kr), (vl, vr)))
input_mask = None
if any(map(exists, (mask, context_mask))):
q_mask = default(mask, lambda: torch.ones((b, n), device=device).bool())
k_mask = q_mask if not exists(context) else context_mask
k_mask = default(k_mask, lambda: torch.ones((b, k.shape[-2]), device=device).bool())
q_mask = rearrange(q_mask, 'b i -> b () i ()')
k_mask = rearrange(k_mask, 'b j -> b () () j')
input_mask = q_mask * k_mask
if self.num_mem_kv > 0:
mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b=b), (self.mem_k, self.mem_v))
k = torch.cat((mem_k, k), dim=-2)
v = torch.cat((mem_v, v), dim=-2)
if exists(input_mask):
input_mask = F.pad(input_mask, (self.num_mem_kv, 0), value=True)
if collab_heads:
k = k.expand(-1, h, -1, -1)
dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale # inner product between q and k
mask_value = max_neg_value(dots) # gets max neg value for given torch dtype, to be pushed through softmax.
if exists(prev_attn):
dots = dots + prev_attn
pre_softmax_attn = dots.clone()
if talking_heads:
dots = einsum('b h i j, h k -> b k i j', dots, self.pre_softmax_proj).contiguous()
if exists(rel_pos):
dots = rel_pos(dots)
if exists(input_mask):
dots.masked_fill_(~input_mask, mask_value)
del input_mask
if self.causal:
i, j = dots.shape[-2:]
r = torch.arange(i, device=device)
mask = rearrange(r, 'i -> () () i ()') < rearrange(r, 'j -> () () () j')
mask = F.pad(mask, (j - i, 0), value=False) # fit mask to correct shape. only necc if q.shape != k.shape
dots.masked_fill_(mask, mask_value)
del mask
if exists(self.sparse_topk) and self.sparse_topk < dots.shape[-1]:
top, _ = dots.topk(self.sparse_topk, dim=-1)
vk = top[..., -1].unsqueeze(-1).expand_as(dots)
mask = dots < vk
dots.masked_fill_(mask, mask_value)
del mask
attn = self.attn_fn(dots, dim=-1) # specify attention non-linearity
post_softmax_attn = attn.clone()
attn = self.dropout(attn)
if talking_heads:
attn = einsum('b h i j, h k -> b k i j', attn, self.post_softmax_proj).contiguous()
out = einsum('b h i j, b h j d -> b h i d', attn, v) # use attn weights with values
out = rearrange(out, 'b h n d -> b n (h d)') # collapse output of all heads and dim again.
if exists(self.to_v_gate):
gates = self.gate_v(x)
out = out * gates.sigmoid()
intermediates = Intermediates(
pre_softmax_attn=pre_softmax_attn,
post_softmax_attn=post_softmax_attn
)
return self.to_out(out), intermediates
class AttentionLayers(nn.Module):
def __init__(
self,
dim,
depth,
heads=8,
causal=False,
cross_attend=False,
only_cross=False,
use_scalenorm=False,
use_rmsnorm=False,
use_rezero=False,
rel_pos_bias=False,
rel_pos_num_buckets=32,
rel_pos_max_distance=128,
position_infused_attn=False,
rotary_pos_emb=False,
rotary_emb_dim=None,
custom_layers=None,
sandwich_coef=None,
par_ratio=None,
residual_attn=False,
cross_residual_attn=False,
macaron=False,
pre_norm=True,
gate_residual=False,
**kwargs
):
super().__init__()
ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs) # note: trims and groups kwargs
attn_kwargs, _ = groupby_prefix_and_trim('attn_', kwargs)
dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD)
self.dim = dim
self.depth = depth
self.layers = nn.ModuleList([])
self.has_pos_emb = position_infused_attn or rel_pos_bias or rotary_pos_emb
self.pia_pos_emb = FixedPositionalEmbedding(dim) if position_infused_attn else None
rotary_emb_dim = max(default(rotary_emb_dim, dim_head // 2), 32)
self.rotary_pos_emb = RotaryEmbedding(rotary_emb_dim) if rotary_pos_emb else None
assert rel_pos_num_buckets <= rel_pos_max_distance, \
'number of relative position buckets must be less than the relative position max distance'
self.rel_pos = RelativePositionBias(scale=dim_head ** 0.5, causal=causal, heads=heads,
num_buckets=rel_pos_num_buckets,
max_distance=rel_pos_max_distance) if rel_pos_bias else None
self.pre_norm = pre_norm
self.residual_attn = residual_attn
self.cross_residual_attn = cross_residual_attn
self.cross_attend = cross_attend
norm_class = ScaleNorm if use_scalenorm else nn.LayerNorm
norm_class = RMSNorm if use_rmsnorm else norm_class
norm_fn = partial(norm_class, dim)
norm_fn = nn.Identity if use_rezero else norm_fn
branch_fn = Rezero if use_rezero else None
if cross_attend and not only_cross:
default_block = ('a', 'c', 'f')
elif cross_attend and only_cross:
default_block = ('c', 'f')
else:
default_block = ('a', 'f')
if macaron:
default_block = ('f',) + default_block
if exists(custom_layers):
layer_types = custom_layers
elif exists(par_ratio):
par_depth = depth * len(default_block)
assert 1 < par_ratio <= par_depth, 'par ratio out of range'
default_block = tuple(filter(not_equals('f'), default_block))
par_attn = par_depth // par_ratio
depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper
par_width = (depth_cut + depth_cut // par_attn) // par_attn
assert len(default_block) <= par_width, 'default block is too large for par_ratio'
par_block = default_block + ('f',) * (par_width - len(default_block))
par_head = par_block * par_attn
layer_types = par_head + ('f',) * (par_depth - len(par_head))
elif exists(sandwich_coef):
assert 0 < sandwich_coef <= depth, 'sandwich coefficient should be less than the depth'
layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef
else:
layer_types = default_block * depth
self.layer_types = layer_types
self.num_attn_layers = len(list(filter(equals('a'), layer_types)))
for layer_type in self.layer_types:
if layer_type == 'a':
layer = Attention(dim,
heads=heads,
causal=causal,
**attn_kwargs)
elif layer_type == 'c':
layer = Attention(dim, heads=heads, **attn_kwargs)
elif layer_type == 'f':
layer = FeedForward(dim, **ff_kwargs)
layer = layer if not macaron else Scale(0.5, layer)
else:
raise Exception(f'invalid layer type {layer_type}')
if isinstance(layer, Attention) and exists(branch_fn):
layer = branch_fn(layer)
if gate_residual:
residual_fn = GRUGating(dim)
else:
residual_fn = Residual()
self.layers.append(nn.ModuleList([
norm_fn(),
layer,
residual_fn
]))
def forward(
self,
x,
context=None,
mask=None,
context_mask=None,
mems=None,
return_hiddens=False
):
assert not (self.cross_attend ^ exists(context)), 'context must be passed in if cross_attend is set to True'
hiddens = []
intermediates = []
prev_attn = None
prev_cross_attn = None
mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers
rotary_pos_emb = None
if exists(self.rotary_pos_emb):
max_rotary_emb_length = max(list(map(lambda m: (m.shape[1] if exists(m) else 0) + x.shape[1], mems)))
rotary_pos_emb = self.rotary_pos_emb(max_rotary_emb_length, x.device)
for i, (layer_type, (norm, block, residual_fn)) in enumerate(zip(self.layer_types, self.layers)):
is_last = i == (len(self.layers) - 1)
if layer_type == 'a':
hiddens.append(x)
layer_mem = mems.pop(0)
residual = x
if self.pre_norm:
x = norm(x)
if layer_type == 'a':
out, inter = block(x,
mask=mask,
sinusoidal_emb=self.pia_pos_emb,
rel_pos=self.rel_pos,
rotary_pos_emb=rotary_pos_emb,
prev_attn=prev_attn,
mem=layer_mem)
elif layer_type == 'c':
out, inter = block(x, context=context, mask=mask, context_mask=context_mask, prev_attn=prev_cross_attn)
elif layer_type == 'f':
out = block(x)
x = residual_fn(out, residual)
if layer_type in ('a', 'c'):
intermediates.append(inter)
if layer_type == 'a' and self.residual_attn:
prev_attn = inter.pre_softmax_attn
elif layer_type == 'c' and self.cross_residual_attn:
prev_cross_attn = inter.pre_softmax_attn
if not self.pre_norm and not is_last:
x = norm(x)
if return_hiddens:
intermediates = LayerIntermediates(
hiddens=hiddens,
attn_intermediates=intermediates
)
return x, intermediates
return x
class Encoder(AttentionLayers):
def __init__(self, **kwargs):
assert 'causal' not in kwargs, 'cannot set causality on encoder'
super().__init__(causal=False, **kwargs)
class Decoder(AttentionLayers):
def __init__(self, **kwargs):
assert 'causal' not in kwargs, 'cannot set causality on decoder'
super().__init__(causal=True, **kwargs)
class CrossAttender(AttentionLayers):
def __init__(self, **kwargs):
super().__init__(cross_attend=True, only_cross=True, **kwargs)
class TransformerWrapper(nn.Module):
def __init__(self, *, num_tokens, max_seq_len, attn_layers, emb_dim=None, token_emb_dim=None, quant_emb_dim=None,
max_mem_len=0., emb_dropout=0., num_quant_tokens=None, use_pos_emb=True, va_transformer=False,
with_values=False, logit_head=None):
super().__init__()
assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder'
dim = attn_layers.dim
token_emb_dim = default(token_emb_dim, dim)
quant_emb_dim = default(quant_emb_dim, dim)
emb_dim = default(emb_dim, dim)
self.quant_emb_dim = quant_emb_dim
self.with_values = with_values
self.va_transformer = va_transformer
self.num_quant_tokens = num_quant_tokens
self.logit_head = logit_head
self.max_seq_len = max_seq_len
self.max_mem_len = max_mem_len
self.token_emb = nn.Embedding(num_tokens, token_emb_dim)
if self.va_transformer:
self.quant_emb = nn.Embedding(num_quant_tokens, quant_emb_dim)
emb_dim = quant_emb_dim + token_emb_dim
self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len) \
if (use_pos_emb and not attn_layers.has_pos_emb) else always(0)
else:
self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len) \
if (use_pos_emb and not attn_layers.has_pos_emb) else always(0)
self.emb_dropout = nn.Dropout(emb_dropout)
self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity()
self.attn_layers = attn_layers
self.norm = nn.LayerNorm(dim)
self.init_(self.token_emb.weight)
if self.va_transformer:
self.init_(self.quant_emb.weight)
if self.va_transformer:
if self.logit_head == "weak":
assert dim > quant_emb_dim
self.to_logits = nn.Linear(dim - quant_emb_dim, num_tokens)
self.to_quant_logits = nn.Linear(dim, num_quant_tokens)
elif self.logit_head == "separate":
assert dim > quant_emb_dim
self.to_logits = nn.Linear(dim - quant_emb_dim, num_tokens)
self.to_quant_logits = nn.Linear(quant_emb_dim, num_quant_tokens)
elif self.logit_head == "shared":
self.to_logits = nn.Linear(dim, num_tokens)
self.to_quant_logits = nn.Linear(dim, num_quant_tokens)
elif self.logit_head == "hierarchical":
self.project_out = nn.Linear(dim, emb_dim) if emb_dim != dim else nn.Identity()
self.to_logits = nn.Linear(token_emb_dim, num_tokens)
self.to_quant_logits = nn.Linear(quant_emb_dim + 1, num_quant_tokens)
else:
raise Exception("Unknown or missing logit_head specified for chart-transformers!")
else:
self.to_logits = nn.Linear(dim, num_tokens)
@staticmethod
def init_(weights):
nn.init.kaiming_normal_(weights)
def forward(
self,
x,
quants=None,
return_embeddings=False,
mask=None,
return_attn=False,
mems=None,
**kwargs
):
b, n, device = *x.shape, x.device
x = self.token_emb(x)
if self.va_transformer:
quants = self.quant_emb(quants)
x = torch.cat((x, quants), dim=2)
x = x + self.pos_emb(x)
x = self.emb_dropout(x)
x = self.project_emb(x)
x, intermediates = self.attn_layers(x,
mask=mask,
mems=mems,
return_hiddens=True,
**kwargs)
x = self.norm(x)
if self.va_transformer:
if self.logit_head == "weak":
x_token = x[:, :, :-self.quant_emb_dim]
x_quant = x
elif self.logit_head == "separate":
x_token = x[:, :, :-self.quant_emb_dim]
x_quant = x[:, :, -self.quant_emb_dim:]
elif self.logit_head == "shared":
x_token = x_quant = x
elif self.logit_head == "hierarchical":
x = self.project_out(x)
x_token = x[:, :, :-self.quant_emb_dim]
x_quant = x[:, :, -self.quant_emb_dim:]
else:
raise Exception("Unknown or missing logit_head specified for chart-transformers!")
out = self.to_logits(x_token) if not return_embeddings else x_token
if self.logit_head == "hierarchical":
pred = torch.argmax(out, dim=2)
pred = torch.unsqueeze(pred, dim=-1)
x_quant = torch.cat((x_quant, pred), dim=2)
quants_out = self.to_quant_logits(x_quant) if not return_embeddings else x_quant
return out, quants_out
else:
out = self.to_logits(x) if not return_embeddings else x
if return_attn:
attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates))
return out, attn_maps
return out
```
|
{
"source": "jfcarr-astronomy/astro-now",
"score": 3
}
|
#### File: jfcarr-astronomy/astro-now/unittest_astro_util.py
```python
import astro_util as AU
import sys
import unittest
class AstroTest(unittest.TestCase):
def test_Azimuth_North(self):
self.assertEqual(AU.AzimuthToCompassDirection(AU.DegreesToRadians(0)), "North")
def test_Azimuth_East(self):
self.assertEqual(AU.AzimuthToCompassDirection(AU.DegreesToRadians(90)), "East")
def test_Azimuth_South(self):
self.assertEqual(AU.AzimuthToCompassDirection(AU.DegreesToRadians(180)), "South")
def test_Azimuth_West(self):
self.assertEqual(AU.AzimuthToCompassDirection(AU.DegreesToRadians(270)), "West")
def main(args):
unittest.main()
if __name__ == '__main__':
sys.exit(main(sys.argv))
```
|
{
"source": "jfcarr/GFPGAN",
"score": 2
}
|
#### File: GFPGAN/tests/test_stylegan2_clean_arch.py
```python
import torch
from gfpgan.archs.stylegan2_clean_arch import StyleGAN2GeneratorClean
def test_stylegan2generatorclean():
"""Test arch: StyleGAN2GeneratorClean."""
# model init and forward (gpu)
if torch.cuda.is_available():
net = StyleGAN2GeneratorClean(
out_size=32, num_style_feat=512, num_mlp=8, channel_multiplier=1, narrow=0.5).cuda().eval()
style = torch.rand((1, 512), dtype=torch.float32).cuda()
output = net([style], input_is_latent=False)
assert output[0].shape == (1, 3, 32, 32)
assert output[1] is None
# -------------------- with return_latents ----------------------- #
output = net([style], input_is_latent=True, return_latents=True)
assert output[0].shape == (1, 3, 32, 32)
assert len(output[1]) == 1
# check latent
assert output[1][0].shape == (8, 512)
# -------------------- with randomize_noise = False ----------------------- #
output = net([style], randomize_noise=False)
assert output[0].shape == (1, 3, 32, 32)
assert output[1] is None
# -------------------- with truncation = 0.5 and mixing----------------------- #
output = net([style, style], truncation=0.5, truncation_latent=style)
assert output[0].shape == (1, 3, 32, 32)
assert output[1] is None
# ------------------ test make_noise ----------------------- #
out = net.make_noise()
assert len(out) == 7
assert out[0].shape == (1, 1, 4, 4)
assert out[1].shape == (1, 1, 8, 8)
assert out[2].shape == (1, 1, 8, 8)
assert out[3].shape == (1, 1, 16, 16)
assert out[4].shape == (1, 1, 16, 16)
assert out[5].shape == (1, 1, 32, 32)
assert out[6].shape == (1, 1, 32, 32)
# ------------------ test get_latent ----------------------- #
out = net.get_latent(style)
assert out.shape == (1, 512)
# ------------------ test mean_latent ----------------------- #
out = net.mean_latent(2)
assert out.shape == (1, 512)
```
|
{
"source": "jfcarter2358/aphelper",
"score": 3
}
|
#### File: aphelper/aphelper/enums.py
```python
from enum import Enum, unique
class BaseEnum(Enum):
@classmethod
def values(cls) -> list:
"""Returns a list of raw values for the class"""
values = [member.value for role, member in cls.__members__.items()]
return values
@unique
class Keys(BaseEnum):
"""Represents categories of objects in cli definition files"""
META = "meta"
SUBPARSERS = "subparsers"
SUBCOMMANDS = "subcommands"
@unique
class LogicalOperators(BaseEnum):
"""Represents logical operators that can be used in determining required arguments"""
NOT = "NOT"
OR = "OR"
AND = "AND"
XOR = "XOR"
NAND = "NAND"
NOR = "NOR"
@unique
class SubparserMeta(BaseEnum):
"""Represents fields that are used to define subparser metadata"""
PARSER_DESCRIPTION = "parser_description"
PARSER_HELP = "parser_help"
SUBPARSER_TITLE = "subparser_title"
SUBPARSER_DESCRIPTION = "subparser_description"
SUBPARSER_DEST = "subparser_dest"
@unique
class SubcommandMeta(BaseEnum):
"""Represents fields that are used to define subcommand metadata"""
SUBCOMMAND_DESCRIPTION = "description"
SUBCOMMAND_HELP = "help"
SUBCOMMAND_FUNCTION = "function"
SUBCOMMAND_REQUIRES = "requires"
class Argument(BaseEnum):
ARGUMENT_SHORT = "short"
ARGUMENT_LONG = "long"
```
|
{
"source": "jfcarter2358/calligraphy",
"score": 3
}
|
#### File: calligraphy/calligraphy_scripting/cli.py
```python
from __future__ import annotations
import sys
import os
from calligraphy_scripting import parser
from calligraphy_scripting import runner
from calligraphy_scripting import transpiler
from calligraphy_scripting import __version__
# Setup global helper variables
here = os.path.dirname(os.path.abspath(__file__))
if "-n" in sys.argv or "--no-ansi" in sys.argv: # pragma: no cover
ANSI_BOLD = ""
ANSI_RED = ""
ANSI_GREEN = ""
ANSI_BLUE = ""
ANSI_RESET = ""
transpiler.ANSI_GREEN = ""
transpiler.ANSI_CYAN = ""
transpiler.ANSI_BLUE = ""
transpiler.ANSI_RESET = ""
else:
ANSI_BOLD = "\033[1m"
ANSI_RED = "\033[31m"
ANSI_GREEN = "\033[32m"
ANSI_BLUE = "\033[34m"
ANSI_RESET = "\033[0m"
def version() -> None:
"""Print out the currently installed version of Calligraphy"""
# Output the detected version
print(
f"{ANSI_GREEN}{ANSI_BOLD}Calligraphy{ANSI_RESET}: {ANSI_BLUE}{__version__}{ANSI_RESET}"
)
def explain(path: str) -> None:
"""Print out the source code with language annotations
Args:
path (str): Path to the Calligraphy script file
"""
if path == "-":
# Grab the code from stdin
contents = "\n".join(sys.stdin.readlines())
else:
# Open the file to parse
with open(path, encoding="utf-8") as code_file:
contents = code_file.read()
# Process the contents
contents, inline_indices = parser.handle_line_breaks(contents)
lines, langs = parser.determine_language(contents)
explanation = transpiler.explain(lines, langs, inline_indices)
print(explanation)
def intermediate(path: str, args: list) -> None:
"""Print out the intermediate Python code that will be run
Args:
path (str): Path to the Calligraphy script file
args (list): Command line arguments to pass to the program
"""
if path == "-":
# Grab the code from stdin
contents = "\n".join(sys.stdin.readlines())
else:
# Open the file to parse
with open(path, encoding="utf-8") as code_file:
contents = code_file.read()
# Process the contents
contents, inline_indices = parser.handle_line_breaks(contents)
contents = parser.handle_sourcing(contents)
lines, langs = parser.determine_language(contents)
transpiled = transpiler.transpile(lines, langs, inline_indices)
# Add the header to enable functionality
with open(os.path.join(here, "data", "header.py"), encoding="utf-8") as header_file:
header = header_file.read()
header = header.replace('"PROGRAM_ARGS"', str(["calligraphy"] + args))
code = f"{header}\n\n{transpiled}"
print(code)
def execute(path: str, args: list) -> None:
"""Run a Calligraphy script
Args:
path (str): Path to the Calligraphy script file
args (list): Command line arguments to pass to the program
"""
if path == "-":
# Grab the code from stdin
contents = "\n".join(sys.stdin.readlines())
else:
# Open the file to parse
with open(path, encoding="utf-8") as code_file:
contents = code_file.read()
# Run the code
try:
runner.execute(contents, args=[sys.argv[1]] + args)
except Exception:
help_prefix = f'Use `calligraphy -i {path} {" ".join(args)}'.strip()
print(f"{help_prefix}` to see the intermediate Python for debugging")
def cli() -> None:
"""Handle command line parsing"""
# Help text to be displayed if need be
help_text = f"""{ANSI_GREEN}usage{ANSI_RESET}: calligraphy [option] [file | -] [arg]
{ANSI_BOLD}{ANSI_BLUE}options:{ANSI_RESET}
-h, --help Show this help message and exit
-e, --explain Parse input and show the language breakdown of the source
-v, --version Print out the version of Calligraphy and exit
-i, --intermediate Print out the compiled Python code and exit
-n, --no-ansi Print without ANSI terminal colors
{ANSI_BOLD}{ANSI_BLUE}arguments:{ANSI_RESET}
file Program read from script file
- Program read from stdin
arg ... Arguments passed to the program"""
args = sys.argv[1:]
# Check if any arguments have been passed
if len(args) == 0:
print(help_text)
sys.exit(1)
# Setup variable defaults
flag_intermediate = False
flag_explain = False
program_path = ""
program_args = []
# Parse arguments
for arg in args:
if program_path:
program_args.append(arg)
continue
if arg in ("-n", "--no-ansi"):
continue # pragma: no cover
if arg in ("-h", "--help"):
print(help_text)
sys.exit(0)
if arg in ("-v", "--version"):
version()
sys.exit(0)
if arg in ("-i", "--intermediate"):
flag_intermediate = True
if flag_explain:
print(
f"{ANSI_RED}{ANSI_BOLD}[ERROR]{ANSI_RESET} :: Both the `intermediate` and `explain` options cannot be set at the same time."
)
sys.exit(1)
continue # pragma: no cover
if arg in ("-e", "--explain"):
flag_explain = True
if flag_intermediate:
print(
f"{ANSI_RED}{ANSI_BOLD}[ERROR]{ANSI_RESET} :: Both the `intermediate` and `explain` options cannot be set at the same time."
)
sys.exit(1)
continue # pragma: no cover
program_path = arg
# Make sure a program path was supplied
if not program_path:
print(
f"{ANSI_RED}{ANSI_BOLD}[ERROR]{ANSI_RESET} :: Program input is required, either supply a file path or use `-` for stdin"
)
sys.exit(1)
# Handle any set flags
if flag_explain:
explain(program_path)
sys.exit(0)
if flag_intermediate:
intermediate(program_path, program_args)
sys.exit(0)
# If we did nothing else then run the program
execute(program_path, program_args)
if __name__ == "__main__":
cli() # pragma: no cover
```
#### File: calligraphy/calligraphy_scripting/runner.py
```python
import os
import sys
import traceback
from calligraphy_scripting import parser
from calligraphy_scripting import transpiler
from calligraphy_scripting import utils
here = os.path.dirname(os.path.abspath(__file__))
def execute(contents: str, args: list) -> None:
"""Run Calligraphy code from another program
Args:
contents (str): The Calligraphy code to run
args: (list): The arguments to pass to the script
"""
# Process the contents
contents, inline_indices = parser.handle_line_breaks(contents)
contents = parser.handle_sourcing(contents)
lines, langs = parser.determine_language(contents)
transpiled = transpiler.transpile(lines, langs, inline_indices)
# Add the header to enable functionality
header = utils.load_header()
header = header.replace('"PROGRAM_ARGS"', str(args))
code = f"{header}\n\n{transpiled}"
# Run the code
try:
exec(code, globals())
except KeyboardInterrupt:
sys.exit()
except Exception as exception:
trace = traceback.format_exc()
parts = trace.split("\n")
idx = 1
exception_out = [parts[0]]
while not parts[idx].startswith(' File "<string>"') and idx < len(parts):
idx += 1
while idx < len(parts):
exception_out.append(parts[idx])
idx += 1
print("\n".join(exception_out))
raise exception
```
#### File: calligraphy/tests/test_cli.py
```python
from calligraphy_scripting.cli import __version__
from calligraphy_scripting import cli
import os
import pytest
import io
import sys
import re
class MockIO():
def __init__(self, stdin=''):
self._stdout = io.StringIO()
self._stderr = io.StringIO()
self._stdin = io.StringIO(stdin)
def __enter__(self):
sys.stdout = self._stdout
sys.stderr = self._stderr
sys.stdin = self._stdin
return self
def __exit__(self, _, __, ___):
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
sys.stdin = sys.__stdin__
@property
def stdout(self):
self._stdout.seek(0)
return self._stdout.read()
@property
def stderr(self):
self._stderr.seek(0)
return self._stderr.read()
here = os.path.dirname(os.path.abspath(__file__))
def escape_ansi(line):
ansi_escape = re.compile(r'(?:\x1B[@-_]|[\x80-\x9F])[0-?]*[ -/]*[@-~]')
return ansi_escape.sub('', line)
def test_version(capfd):
cli.version()
out, _ = capfd.readouterr()
assert escape_ansi(out) == f"Calligraphy: {__version__}\n"
def test_explain(capfd):
file_path = os.path.join(here, 'data', 'data.txt')
with open(os.path.join(here, 'data', 'cli.explain.out')) as out_file:
explain_out = out_file.read()
with open(os.path.join(here, 'data', 'cli.explain.stdin.out')) as out_file:
explain_stdin_out = out_file.read()
with open(os.path.join(here, 'data', 'test1.script')) as script_file:
script = script_file.read()
# Test with file
cli.explain(os.path.join(here, 'data', 'test1.script'))
out, _ = capfd.readouterr()
assert escape_ansi(out) == explain_out
# Test with stdin
with MockIO(stdin=script) as mock:
cli.explain('-')
out = mock.stdout
assert escape_ansi(out) == explain_stdin_out
def test_intermediate(capfd):
file_path = os.path.join(here, 'data', 'data.txt')
with open(os.path.join(here, 'data', 'cli.intermediate.out')) as out_file:
intermediate_out = out_file.read()
intermediate_out = intermediate_out.replace('<FILE_PATH>', file_path)
with open(os.path.join(here, 'data', 'cli.intermediate.stdin.out')) as out_file:
intermediate_stdin_out = out_file.read()
intermediate_stdin_out = intermediate_stdin_out.replace('<FILE_PATH>', file_path)
with open(os.path.join(here, 'data', 'test1.script')) as script_file:
script = script_file.read()
# Test with file
cli.intermediate(os.path.join(here, 'data', 'test1.script'), [file_path, 'Plagueis'])
out, _ = capfd.readouterr()
assert escape_ansi(out) == intermediate_out
# Test with stdin
with MockIO(stdin=script) as mock:
cli.intermediate('-', [file_path, 'Plagueis'])
out = mock.stdout
assert escape_ansi(out) == intermediate_stdin_out
def test_execute(capfd):
file_path = os.path.join(here, 'data', 'data.txt')
with open(os.path.join(here, 'data', 'cli.execute.out')) as out_file:
execute_out = out_file.read()
execute_out = execute_out.replace('<FILE_PATH>', file_path)
with open(os.path.join(here, 'data', 'test1.script')) as script_file:
script = script_file.read()
# Test with file
cli.execute(os.path.join(here, 'data', 'test1.script'), [file_path, 'Plagueis'])
out, _ = capfd.readouterr()
assert escape_ansi(out) == execute_out
# Test with stdin
with MockIO(stdin=script) as mock:
cli.execute('-', [file_path, 'Plagueis'])
out = mock.stdout
assert escape_ansi(out) == execute_out
def test_calligraphy_cli(capfd):
file_path = os.path.join(here, 'data', 'data.txt')
with open(os.path.join(here, 'data', 'cli.version.out')) as out_file:
version_out = out_file.read()
with open(os.path.join(here, 'data', 'cli.explain.out')) as out_file:
explain_out = out_file.read()
with open(os.path.join(here, 'data', 'cli.explain.stdin.out')) as out_file:
explain_stdin_out = out_file.read()
with open(os.path.join(here, 'data', 'cli.intermediate.out')) as out_file:
intermediate_out = out_file.read()
intermediate_out = intermediate_out.replace('<FILE_PATH>', file_path)
with open(os.path.join(here, 'data', 'cli.execute.out')) as out_file:
execute_out = out_file.read()
execute_out = execute_out.replace('<FILE_PATH>', file_path)
with open(os.path.join(here, 'data', 'cli.cli.help.out')) as out_file:
help_out = out_file.read()
with open(os.path.join(here, 'data', 'cli.cli.flag_conflict.out')) as out_file:
conflict_out = out_file.read()
with open(os.path.join(here, 'data', 'cli.cli.no_program.out')) as out_file:
no_program_out = out_file.read()
with open(os.path.join(here, 'data', 'test1.script')) as script_file:
script = script_file.read()
# test no args
sys.argv = ['foobar']
with pytest.raises(SystemExit) as pytest_wrapped_e:
cli.cli()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 1
out, _ = capfd.readouterr()
assert escape_ansi(out) == help_out
# Test help flag
sys.argv = ['foobar', '-h']
with pytest.raises(SystemExit) as pytest_wrapped_e:
cli.cli()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 0
out, _ = capfd.readouterr()
assert escape_ansi(out) == help_out
# Test long help flag
sys.argv = ['foobar', '--help']
with pytest.raises(SystemExit) as pytest_wrapped_e:
cli.cli()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 0
out, _ = capfd.readouterr()
assert escape_ansi(out) == help_out
# Test version flag
sys.argv = ['foobar', '-v']
with pytest.raises(SystemExit) as pytest_wrapped_e:
cli.cli()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 0
out, _ = capfd.readouterr()
assert escape_ansi(out) == version_out
# Test long version flag
sys.argv = ['foobar', '--version']
with pytest.raises(SystemExit) as pytest_wrapped_e:
cli.cli()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 0
out, _ = capfd.readouterr()
assert escape_ansi(out) == version_out
# Test intermediate flag
sys.argv = ['foobar', '-i', os.path.join(here, 'data', 'test1.script'), file_path, 'Plagueis']
with pytest.raises(SystemExit) as pytest_wrapped_e:
cli.cli()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 0
out, _ = capfd.readouterr()
assert escape_ansi(out) == intermediate_out
# Test long intermediate flag
sys.argv = ['foobar', '--intermediate', os.path.join(here, 'data', 'test1.script'), file_path, 'Plagueis']
with pytest.raises(SystemExit) as pytest_wrapped_e:
cli.cli()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 0
out, _ = capfd.readouterr()
assert escape_ansi(out) == intermediate_out
# Test intermediate flag conflict
sys.argv = ['foobar', '--intermediate', '-e', os.path.join(here, 'data', 'test1.script'), file_path, 'Plagueis']
with pytest.raises(SystemExit) as pytest_wrapped_e:
cli.cli()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 1
out, _ = capfd.readouterr()
assert escape_ansi(out) == conflict_out
# Test explain flag
sys.argv = ['foobar', '-e', os.path.join(here, 'data', 'test1.script')]
with pytest.raises(SystemExit) as pytest_wrapped_e:
cli.cli()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 0
out, _ = capfd.readouterr()
assert escape_ansi(out) == explain_out
# Test long explain flag
sys.argv = ['foobar', '--explain', os.path.join(here, 'data', 'test1.script')]
with pytest.raises(SystemExit) as pytest_wrapped_e:
cli.cli()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 0
out, _ = capfd.readouterr()
assert escape_ansi(out) == explain_out
# Test explain flag conflict
sys.argv = ['foobar', '-e', '-i', os.path.join(here, 'data', 'test1.script')]
with pytest.raises(SystemExit) as pytest_wrapped_e:
cli.cli()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 1
out, _ = capfd.readouterr()
assert escape_ansi(out) == conflict_out
# Test not passing a path
sys.argv = ['foobar', '-e']
with pytest.raises(SystemExit) as pytest_wrapped_e:
cli.cli()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 1
out, _ = capfd.readouterr()
assert escape_ansi(out) == no_program_out
# Test running a script
sys.argv = ['foobar', os.path.join(here, 'data', 'test1.script'), file_path, 'Plagueis']
cli.cli()
out, _ = capfd.readouterr()
assert escape_ansi(out) == execute_out
def test_help_flag(capfd):
with open(os.path.join(here, 'data', 'cli.help_flag.out')) as out_file:
help_flag_out = out_file.read()
with open(os.path.join(here, 'data', 'cli.explain2.out')) as out_file:
explain_out = out_file.read()
# Test help flag passing
sys.argv = ['foobar', os.path.join(here, 'data', 'test2.script'), '-h']
cli.cli()
out, _ = capfd.readouterr()
assert escape_ansi(out) == help_flag_out
# Test explain output with source
sys.argv = ['foobar', '-e', os.path.join(here, 'data', 'test2.script')]
with pytest.raises(SystemExit) as pytest_wrapped_e:
cli.cli()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 0
out, _ = capfd.readouterr()
assert escape_ansi(out) == explain_out
def test_shellopts(capfd):
with open(os.path.join(here, 'data', 'cli.shellopts.out')) as out_file:
shellopts_out = out_file.read()
with open(os.path.join(here, 'data', 'cli.shellopts.err')) as err_file:
shellopts_err = err_file.read()
# Test help flag passing
sys.argv = ['foobar', os.path.join(here, 'data', 'test4.script')]
cli.cli()
out, err = capfd.readouterr()
assert escape_ansi(out) == shellopts_out
assert escape_ansi(err) == shellopts_err
def test_formatting(capfd):
with open(os.path.join(here, 'data', 'cli.formatting.out')) as out_file:
formatting_out = out_file.read()
# Test help flag passing
sys.argv = ['foobar', os.path.join(here, 'data', 'test5.script')]
cli.cli()
out, _ = capfd.readouterr()
assert escape_ansi(out) == formatting_out
def test_exception(capfd):
file_path = os.path.join(here, 'data', 'test6.script')
with open(os.path.join(here, 'data', 'cli.exception.out')) as out_file:
formatting_out = out_file.read()
formatting_out = formatting_out.replace('<FILE_PATH>', file_path)
# Test help flag passing
sys.argv = ['foobar', os.path.join(here, 'data', 'test6.script')]
cli.cli()
out, _ = capfd.readouterr()
assert escape_ansi(out) == formatting_out
```
|
{
"source": "jfcarter2358/Ceres",
"score": 2
}
|
#### File: Ceres/src/exporter.py
```python
import common
import utils
def get_data(id):
group, block, offset, length = utils.extract_from_id(id)
with open('{}/{}/{}'.format(common.CERES_HOME + "/data", group, block), "rb") as f:
f.seek(common.BLOCK_SIZE * block + offset)
data = f.read(length)
return data.decode(common.DATA_ENCODING)
```
#### File: Ceres/src/manager.py
```python
import common
import utils
import index
import exporter
def delete_data(ident, free_data):
data = exporter.get_data(ident)
formatted = utils.map_dict(data, ident)
for k in common.SCHEMA['meta']:
index.remove_ident(ident, '{}/{}'.format(k, formatted[k]))
group, block, offset, length = utils.extract_from_id(ident)
with open('{}/{}/{}'.format(common.CERES_HOME + "/data", group, block), "r+b") as f:
f.seek(common.BLOCK_SIZE * block + offset)
f.write('\x00'.encode(common.DATA_ENCODING) * length)
free_data[group][block].append({'start': offset, 'end': offset + length})
return free_data
def merge_free(free_data):
for group in range(0, len(free_data)):
for block in range(0, len(free_data[group])):
data = sorted(free_data[group][block], key = lambda x: x['start'])
i = 1
while i < len(data):
if data[i - 1]['end'] == data[i]['start']:
data[i - 1]['end'] = data[i]['end']
del data[i]
else:
i += 1
free_data[group][block] = data
return free_data
```
#### File: Ceres/src/utils.py
```python
import common
def merge_lists(A, B):
out = []
for i in range(0, len(A)):
if len(B) == 0:
out += A[i:]
break
while B[0] < A[i]:
out.append(B[0])
B.pop(0)
if len(B) == 0:
out += A[i:]
break
out.append(A[i])
out += B
return out
def extract_from_id(id):
parts = id.split(':')
return int(parts[0]), int(parts[1]), int(parts[2]), int(parts[3])
def map_dict(datum, ident):
data = datum.split(',')
out = {'id': ident}
try:
for i in range(0, len(common.SCHEMA['order'])):
out[common.SCHEMA['order'][i]] = data[i].replace('<COMMA>', ',')
except:
print('MISSING DATA')
print(data)
return out
```
|
{
"source": "jfcarter2358/ceres",
"score": 2
}
|
#### File: test/regression/test_regression.py
```python
from ceresdb_python import __version__
from ceresdb_python import Connection
import json
import pytest
CERESDB_USERNAME="ceresdb"
CERESDB_PASSWORD="<PASSWORD>"
CERESDB_HOST="localhost"
CERESDB_PORT=7437
def test_database():
conn = Connection(CERESDB_USERNAME, CERESDB_PASSWORD, CERESDB_HOST, CERESDB_PORT)
# Delete resources
conn.query("delete database foo")
# Get databases when none exist
expected_data = [
{"name":"_auth"},
]
data = conn.query("get database")
assert data == expected_data
# Post database
expected_data = [
{"name":"_auth"},
{"name":"foo"}
]
conn.query("post database foo")
data = conn.query("get database")
assert data == expected_data
# Delete database
expected_data = [
{"name":"_auth"},
]
conn.query("delete database foo")
data = conn.query("get database")
assert data == expected_data
def test_collection():
conn = Connection(CERESDB_USERNAME, CERESDB_PASSWORD, CERESDB_HOST, CERESDB_PORT)
# Delete resources
conn.query("delete database foo")
# Create resources
conn.query("post database foo")
# Get collections when none exist
expected_data = [
{"name":"_users",'schema':{'role':'STRING','username':'STRING'}}
]
data = conn.query("get collection foo")
assert data == expected_data
# Post collection
expected_data = [
{"name":"_users",'schema':{'role':'STRING','username':'STRING'}},
{"name":"bar",'schema':{'hello':'STRING','world':'INT'}}
]
input_data = {"hello":"STRING","world":"INT"}
conn.query(f"post collection foo.bar {json.dumps(input_data)}")
data = conn.query("get collection foo | ORDERASC name")
assert data == expected_data
# Put collection
expected_data = [
{"name":"_users",'schema':{'role':'STRING','username':'STRING'}},
{"name":"bar",'schema':{'hello':'STRING','world':'STRING'}}
]
input_data = {"hello":"STRING","world":"STRING"}
conn.query(f"put collection foo.bar {json.dumps(input_data)}")
data = conn.query("get collection foo | ORDERASC name")
data = sorted(data, key=lambda d: d['name'])
assert data == expected_data
# Delete collection
expected_data = [
{"name":"_users",'schema':{'role':'STRING','username':'STRING'}}
]
conn.query("delete collection foo.bar")
data = conn.query("get collection foo")
assert data == expected_data
# Delete resources
conn.query("delete database foo")
def test_record():
conn = Connection(CERESDB_USERNAME, CERESDB_PASSWORD, CERESDB_HOST, CERESDB_PORT)
# Delete resources
conn.query("delete database foo")
# Create resources
conn.query("post database foo")
conn.query("post collection foo.bar {\"item\":\"STRING\",\"price\":\"FLOAT\",\"in_stock\":\"BOOL\",\"count\":\"INT\"}")
# Get records when none exist
data = conn.query("get record foo.bar *")
assert data == []
# Post record
expected_data = [
{"item": "bolt", "price": 0.8, "in_stock": True, "count": 20},
{"item": "nail", "price": 0.3, "in_stock": True, "count": 10},
{"item": "nut", "price": 0.5, "in_stock": True, "count": 50},
{"item": "screw", "price": 0.2, "in_stock": False, "count": 0}
]
input_data = [
{"item": "bolt", "price": 0.8, "in_stock": True, "count": 20},
{"item": "nail", "price": 0.3, "in_stock": True, "count": 10},
{"item": "nut", "price": 0.5, "in_stock": True, "count": 50},
{"item": "screw", "price": 0.2, "in_stock": False, "count": 0}
]
conn.query(f"post record foo.bar {json.dumps(input_data)}")
data = conn.query("get record foo.bar [\"item\",\"price\",\"in_stock\",\"count\"] | ORDERASC item")
assert data == expected_data
# Put record
expected_data = [
{"item": "bolt", "price": 0.8, "in_stock": True, "count": 20},
{"item": "nail", "price": 0.3, "in_stock": True, "count": 10},
{"item": "nut", "price": 0.5, "in_stock": True, "count": 60},
{"item": "screw", "price": 0.2, "in_stock": False, "count": 0}
]
input_data = conn.query(f"get record foo.bar | filter item = \"nut\"")[0]
input_data["count"] = 60
conn.query(f"put record foo.bar {json.dumps(input_data)}")
data = conn.query("get record foo.bar [\"item\",\"price\",\"in_stock\",\"count\"] | ORDERASC item")
assert data == expected_data
# Patch record
expected_data = [
{"item": "bolt", "price": 1.0, "in_stock": True, "count": 20},
{"item": "nail", "price": 1.0, "in_stock": True, "count": 10},
{"item": "nut", "price": 1.0, "in_stock": True, "count": 60},
{"item": "screw", "price": 1.0, "in_stock": False, "count": 0}
]
input_data = {"price": 1.0}
conn.query(f"get record foo.bar | patch record foo.bar - {json.dumps(input_data)}")
data = conn.query("get record foo.bar [\"item\",\"price\",\"in_stock\",\"count\"] | ORDERASC item")
assert data == expected_data
# Delete record
expected_data = []
input_data = [datum[".id"] for datum in conn.query("get record foo.bar .id")]
conn.query(f"delete record foo.bar {json.dumps(input_data)}")
data = conn.query("get record foo.bar *")
assert data == expected_data
# Delete resources
conn.query("delete collection foo.bar")
conn.query("delete database foo")
def test_permit():
conn = Connection(CERESDB_USERNAME, CERESDB_PASSWORD, CERESDB_HOST, CERESDB_PORT)
# Delete resources
conn.query("delete database foo")
# Create resources
conn.query("post database foo")
conn.query("post user {\"username\":\"readonly\",\"role\":\"READ\",\"password\":\"<PASSWORD>\"}")
# Get permit when none exist
expected_data = [
{"username": "ceresdb", "role": "ADMIN"},
]
data = conn.query("get permit foo [\"username\",\"role\"]")
assert data == expected_data
# Post permit
expected_data = [
{"username": "ceresdb", "role": "ADMIN"},
{"username": "readonly", "role": "READ"}
]
input_data = [
{"username": "readonly", "role": "READ"}
]
conn.query(f"post permit foo {json.dumps(input_data)}")
data = conn.query("get permit foo [\"username\",\"role\"] | ORDERASC username")
assert data == expected_data
# Verify permission check
with pytest.raises(Exception):
input_data = {'role':'STRING','username':'STRING'}
conn_readonly = Connection("readonly", "readonly", CERESDB_HOST, CERESDB_PORT)
conn_readonly.query(f"post collection foo.bar {json.dumps(input_data)}")
# Put permit
expected_data = [
{"username": "ceresdb", "role": "ADMIN"},
{"username": "readonly", "role": "WRITE"}
]
input_data = conn.query(f"get permit foo | filter username = \"readonly\"")[0]
input_data["role"] = "WRITE"
conn.query(f"put permit foo {json.dumps(input_data)}")
data = conn.query("get permit foo [\"username\",\"role\"] | ORDERASC username")
assert data == expected_data
# Delete permit
expected_data = [
{"username": "ceresdb", "role": "ADMIN"},
]
input_data = [datum[".id"] for datum in conn.query("get permit foo .id | filter username != \"ceresdb\"")]
conn.query(f"delete permit foo {json.dumps(input_data)}")
data = conn.query("get permit foo [\"username\",\"role\"] | ORDERASC username")
assert data == expected_data
# Delete resources
conn.query("delete collection foo.bar")
conn.query("delete database foo")
def test_user():
conn = Connection(CERESDB_USERNAME, CERESDB_PASSWORD, CERESDB_HOST, CERESDB_PORT)
# Delete resources
input_data = [datum[".id"] for datum in conn.query("get user .id | filter username != \"ceresdb\"")]
conn.query(f"delete user {json.dumps(input_data)}")
# Get user when none exist
expected_data = [
{"username": "ceresdb", "role": "ADMIN"},
]
data = conn.query("get user [\"username\",\"role\"]")
assert data == expected_data
# Post user
expected_data = [
{"username": "ceresdb", "role": "ADMIN"},
{"username": "readonly", "role": "READ"}
]
input_data = [
{"username": "readonly", "password": "<PASSWORD>", "role": "READ"}
]
conn.query(f"post user {json.dumps(input_data)}")
data = conn.query("get user [\"username\",\"role\"] | ORDERASC username")
assert data == expected_data
# Verify permission check
with pytest.raises(Exception):
input_data = {'role':'STRING','username':'STRING'}
conn_readonly = Connection("readonly", "readonly", CERESDB_HOST, CERESDB_PORT)
conn_readonly.query(f"post collection foo.bar {json.dumps(input_data)}")
# Put user
expected_data = [
{"username": "readonly", "role": "WRITE"},
{"username": "ceresdb", "role": "ADMIN"}
]
input_data = conn.query(f"get user | filter username = \"readonly\"")[0]
input_data["role"] = "WRITE"
input_data["password"] = "<PASSWORD>"
conn.query(f"put user {json.dumps(input_data)}")
data = conn.query("get user [\"username\",\"role\"] | ORDERDSC username")
assert data == expected_data
# Delete user
expected_data = [
{"username": "ceresdb", "role": "ADMIN"},
]
input_data = [datum[".id"] for datum in conn.query("get user .id | filter username != \"ceresdb\"")]
conn.query(f"delete user {json.dumps(input_data)}")
data = conn.query("get user [\"username\",\"role\"] | ORDERASC username")
assert data == expected_data
```
#### File: test/stress/test_stress.py
```python
from ceresdb_python import __version__
from ceresdb_python import Connection
import json
import pytest
import matplotlib.pyplot as plt
CERESDB_USERNAME="ceresdb"
CERESDB_PASSWORD="<PASSWORD>"
CERESDB_HOST="localhost"
CERESDB_PORT=7437
def test_stress_1000():
conn = Connection(CERESDB_USERNAME, CERESDB_PASSWORD, CERESDB_HOST, CERESDB_PORT)
conn.query("post database foo")
conn.query("post collection foo.bar {\"foo\":\"STRING\",\"idx\":\"INT\"}")
max_count = 1000
iterations = 100
# populate with records
timings_post = []
timings_get = []
for i in range(0, iterations):
input_data = [{"foo": "bar", "idx": j} for j in range(0, max_count)]
_, timing = conn.timed_query(f"post record foo.bar {json.dumps(input_data)}")
timings_post.append(timing)
_, timing = conn.timed_query(f"get record foo.bar *")
timings_get.append(timing)
with open(f'timing_{max_count}_post.json', 'w') as f:
json.dump(timings_post, f, indent=4)
with open(f'timing_{max_count}_get.json', 'w') as f:
json.dump(timings_get, f, indent=4)
conn.query("delete collection foo.bar")
conn.query("delete database foo")
get_send_x = [(i+1) * max_count for i in range(0, iterations)]
get_send_y = [float(datum["send"]) for datum in timings_get]
get_process_x = [(i+1) * max_count for i in range(0, iterations)]
get_process_y = [float(datum["process"]) for datum in timings_get]
get_receive_x = [(i+1) * max_count for i in range(0, iterations)]
get_receive_y = [float(datum["receive"]) for datum in timings_get]
post_send_x = [(i+1) * max_count for i in range(0, iterations)]
post_send_y = [float(datum["send"]) for datum in timings_post]
post_process_x = [(i+1) * max_count for i in range(0, iterations)]
post_process_y = [float(datum["process"]) for datum in timings_post]
post_receive_x = [(i+1) * max_count for i in range(0, iterations)]
post_receive_y = [float(datum["receive"]) for datum in timings_post]
plt.plot(get_send_x, get_send_y, label = "Send")
plt.plot(get_process_x, get_process_y, label = "Process")
plt.plot(get_receive_x, get_receive_y, label = "Receive")
plt.legend()
plt.savefig('get.png')
plt.clf()
plt.plot(post_send_x, post_send_y, label = "Send")
plt.plot(post_process_x, post_process_y, label = "Process")
plt.plot(post_receive_x, post_receive_y, label = "Receive")
plt.legend()
plt.savefig('post.png')
```
|
{
"source": "jfcarter2358/manifest-to-helm",
"score": 3
}
|
#### File: manifest-to-helm/tests/testing_utils.py
```python
import filecmp
import os.path
class dircmp(filecmp.dircmp):
"""
Compare the content of dir1 and dir2. In contrast with filecmp.dircmp, this
subclass compares the content of files with the same path.
"""
def phase3(self):
"""
Find out differences between common files.
Ensure we are using content comparison with shallow=False.
"""
fcomp = filecmp.cmpfiles(self.left, self.right, self.common_files,
shallow=False)
self.same_files, self.diff_files, self.funny_files = fcomp
def is_same(dir1, dir2):
"""
Compare two directory trees content.
Return False if they differ, True is they are the same.
"""
compared = dircmp(dir1, dir2)
if (compared.left_only or compared.right_only or compared.diff_files
or compared.funny_files):
return False
for subdir in compared.common_dirs:
if not is_same(os.path.join(dir1, subdir), os.path.join(dir2, subdir)):
return False
return True
```
|
{
"source": "jfcarter2358/Meringue",
"score": 3
}
|
#### File: Meringue/meringue/create_config.py
```python
import os
class create_config:
def __init__(self, meringue_path):
with open(meringue_path + '/data/meringue_config.ini', 'w') as f_out:
f_out.write('foreground=#ffffff\n')
f_out.write('background=#000000\n')
f_out.write('file_color=#00FF00\n')
f_out.write('dir_color=#ff00ff\n')
f_out.write('line_num_color=#ff0000\n')
f_out.write('line_num_background_color=#333333\n')
f_out.write('file_bar_color=#666666\n')
f_out.write('file_bar_text_color=#ffffff\n')
f_out.write('notebook_background=#333333\n')
f_out.write('highlight_foreground=#ffff00\n')
f_out.write('highlight_background=#0000ff\n')
f_out.write('token_keyword=#ff0000\n')
f_out.write('token_name=#ff<PASSWORD>\n')
f_out.write('token_literal=#ffff00\n')
f_out.write('token_string=#ff00ff\n')
f_out.write('token_number=#0000ff\n')
f_out.write('token_operators=#00ff00\n')
f_out.write('token_punctuation=#00ffff\n')
f_out.write('token_comments=#777777\n')
f_out.write('token_generic=#77ffff\n')
f_out.write('folder=\n')
```
#### File: Meringue/meringue/find_and_replace_dialog.py
```python
try:
from Tkinter import *
import Tkinter as tk
import ttk
import tkFileDialog
import tkMessageBox
from tkFileDialog import askdirectory
except:
from tkinter import *
import tkinter as tk
import tkinter.ttk as ttk
import tkinter.messagebox as tkMessageBox
from tkinter.filedialog import askdirectory
import os
from os import listdir
from os.path import isfile, join
from os import walk
class find_and_replace_dialog:
#tell the parent to find all instances of the item the user typed in
def find(self):
self.parent_obj.find(self.entryWidget.get())
#tell the parent to find one instance of the item the user typed in
def find_one(self):
self.parent_obj.find_one(self.entryWidget.get())
#tell the parent to replace items
def replace(self):
self.parent_obj.replace(self.entryWidget.get(), self.entryWidget2.get())
#tell the parent to replace all instances of the items
def replace_all(self):
self.parent_obj.replace_all(self.entryWidget.get(), self.entryWidget2.get())
def end(self):
self.parent_obj.reset_counters()
self.top.destroy()
def __init__(self, parent, parent_obj):
#create all the gui -- see access_ssh.py and change_color.py for a explanation
top = self.top = Toplevel(parent)
self.textFrame = Frame(top)
self.entryLabel = Label(self.textFrame)
self.entryLabel["text"] = "Find:"
self.entryLabel["width"] = 20
self.entryLabel.grid(row=0, column=0)
self.entryWidget = Entry(self.textFrame)
self.entryWidget["width"] = 50
self.entryWidget.grid(row=0, column=1)
self.entryWidget.focus_set()
self.entryLabel2 = Label(self.textFrame)
self.entryLabel2["text"] = "Replace:"
self.entryLabel2.grid(row=1, column=0)
self.entryWidget2 = Entry(self.textFrame)
self.entryWidget2["width"] = 50
self.entryWidget2.grid(row=1, column=1)
self.textFrame.grid()
self.button = Button(self.textFrame, text="Find All", command=self.find)
self.button.grid(row=2, column=0, columnspan=2, sticky=E+W)
self.button1 = Button(self.textFrame, text="Find Next", command=self.find_one)
self.button1.grid(row=3, column=0, columnspan=2, sticky=E+W)
self.button2 = Button(self.textFrame, text="Replace", command=self.replace)
self.button2.grid(row=4, column=0, columnspan=2, sticky=E+W)
self.button3 = Button(self.textFrame, text="Replace All", command=self.replace_all)
self.button3.grid(row=5, column=0, columnspan=2, sticky=E+W)
self.button4 = Button(self.textFrame, text="Done", command=self.end)
self.button4.grid(row=6, column=0, columnspan=2, sticky=E+W)
self.parent_obj = parent_obj
```
#### File: Meringue/meringue/interactive_paramiko.py
```python
import threading
import paramiko
import time
class SSH:
shell = None
client = None
transport = None
parnent = None
current_command = None
def __init__(self, address, username, password, port, parent):
print("Connecting to server on ip", str(address) + ".")
self.parent = parent
self.client = paramiko.client.SSHClient()
self.client.set_missing_host_key_policy(paramiko.client.AutoAddPolicy())
self.client.connect(address, username=username, password=password, look_for_keys=False)
self.transport = paramiko.Transport((address, port))
self.transport.connect(username=username, password=password)
thread = threading.Thread(target=self.process)
thread.daemon = True
thread.start()
def closeConnection(self):
if(self.client != None):
self.client.close()
self.transport.close()
def openShell(self):
self.shell = self.client.invoke_shell()
def sendShell(self, command):
if(self.shell):
self.current_command = command[command.find('ls'):]
print(self.current_command)
self.shell.send(command + "\n")
else:
print("Shell not opened.")
def process(self):
global connection
output = []
should_return = False
temp_str = ''
while True:
while should_return == False:
if self.shell != None and self.shell.recv_ready():
alldata = self.shell.recv(1024)
while self.shell.recv_ready():
alldata += self.shell.recv(1024)
time.sleep(0.5)
strdata = str(alldata)
strdata = strdata.replace("b'", '')
strdata = strdata.replace("'", '')
strdata = strdata.replace('\\r', '')
strdata = strdata.replace('\\n', '\n')
strdata = strdata.replace('\r', '')
temp_str = temp_str + strdata
data = strdata.split(' ')
for dat in data:
if dat != '':
if dat.endswith("$"):
should_return = True
else:
pass
if should_return:
temp = temp_str.split('\n')
print(temp)
del(temp[0])
del(temp[len(temp) - 1])
if temp[0].startswith('ls: cannot access */'):
output = []
else:
tot_string = ''
for item in temp:
tot_string = tot_string + ' ' + item
print(tot_string)
data = tot_string.split(' ')
output = []
for dat in data:
if dat != '':
dat = dat.replace('/', '')
output.append(dat)
self.parent.Process_Output(self.current_command, output)
should_return = False
temp_str = ''
```
|
{
"source": "jfcarter2358/pipeline-reader",
"score": 3
}
|
#### File: src/pipeline_reader/objects.py
```python
class Pipeline:
def __init__(self):
self.stages = []
self.options = []
def __str__(self):
return f'Pipeline(stages="{self.stages}", options="{self.options}")'
def __repr__(self):
return self.__str__()
class Stage:
def __init__(self, name=''):
self.name = name
self.code = ''
self.indent = -1
def __str__(self):
return f'Stage(name="{self.name}", code="{self.code}")'
def __repr__(self):
return self.__str__()
class Options:
def __init__(self):
self.code = ''
```
|
{
"source": "jfcb853/Udacity-DevOps-Azure-Project-2",
"score": 3
}
|
#### File: jfcb853/Udacity-DevOps-Azure-Project-2/locustfile.py
```python
import time
from locust import HttpUser, task, between
class WebsiteTestUser(HttpUser):
wait_time = between(0.5, 3.0)
def on_start(self):
""" on_start is called when a Locust start before any task is scheduled """
pass
def on_stop(self):
""" on_stop is called when the TaskSet is stopping """
pass
@task(1)
def index(self):
self.client.get("https://jc-my-ml-app.azurewebsites.net/")
@task(2)
def predict(self):
self.client.post("/predict",{
"CHAS":{
"0":0
},
"RM":{
"0":6.575
},
"TAX":{
"0":296.0
},
"PTRATIO":{
"0":15.3
},
"B":{
"0":396.9
},
"LSTAT":{
"0":4.98
}
},
headers="Content-Type: application/json")
```
|
{
"source": "JFCeron/turingadvice",
"score": 3
}
|
#### File: turingadvice/best_of_n/generator.py
```python
import os
import tensorflow as tf
from data.to_tfrecord_t5 import _fix_reddit_text, _trim_to_desired_length, encoder
from reward.comparative.data import SELFTEXT_DESIRED_LEN
class BestOfNGenerator():
def __init__(
self, N, t5_model, t5_model_ckpt_steps, sampling_keep_top_p,
reward_model, reward_model_ckpt_steps, tmp_dir
):
self.N = N
self.t5_model = t5_model
self.t5_model_ckpt_steps = t5_model_ckpt_steps
self.sampling_keep_top_p = sampling_keep_top_p
self.reward_model = reward_model
self.reward_model_ckpt_steps = reward_model_ckpt_steps
self.tmp_dir = tmp_dir
def generate_N(self, inputs_path, outputs_path):
"""
Args:
inputs_path: str
A text file with one T5-formatted question per line, i.e.
line = "Subreddit: ... Date: ... Title: ... Selftext: ..."
outputs_path: str
A text file with N answers per question, preceded by the question,
i.e. each line looks like <question>\t<answer>
"""
# Repeat each input N times, store in temporary file
REPEATED_QUESTIONS_PATH = os.path.join(self.tmp_dir, "repeated-questions.txt")
T5_PREDICTIONS_PATH = os.path.join(self.tmp_dir, "t5-predictions.txt")
with tf.io.gfile.GFile(inputs_path, "r") as inputs_file, \
tf.io.gfile.GFile(REPEATED_QUESTIONS_PATH, "w") as repeats_file:
for line in inputs_file:
for _ in range(self.N):
repeats_file.write(line)
# Predict over repeated inputs file
self.t5_model.predict(
input_file=REPEATED_QUESTIONS_PATH,
output_file=T5_PREDICTIONS_PATH,
checkpoint_steps=self.t5_model_ckpt_steps,
sampling_keep_top_p=self.sampling_keep_top_p
)
T5_PREDICTIONS_PATH += f"-{self.t5_model_ckpt_steps}"
# Append answers to repeated questions and write output
with tf.io.gfile.GFile(REPEATED_QUESTIONS_PATH, "r") as repeats_file, \
tf.io.gfile.GFile(T5_PREDICTIONS_PATH, "r") as predictions_file, \
tf.io.gfile.GFile(outputs_path, "w") as outputs_file:
for question, answer in zip(repeats_file, predictions_file):
question = question[:-1] # remove newline character
outputs_file.write(question + "\t" + answer)
def generate(self, inputs_path, outputs_path):
"""
Args:
inputs_path: str
A text file with one T5-formatted question per line, i.e
line = "Subreddit: ... Date: ... Title: ... Selftext: ..."
outputs_path: str
A text file with one answer (no preceding question!) per line
"""
N_GENERATIONS_PATH = os.path.join(self.tmp_dir, "N-generations")
N_SCORES_PATH = os.path.join(self.tmp_dir, "N-scores")
self.generate_N(inputs_path, N_GENERATIONS_PATH)
self.reward_model.predict_from_file(
input_path=N_GENERATIONS_PATH,
output_path=N_SCORES_PATH,
checkpoint_steps=self.reward_model_ckpt_steps
)
# Write top-scoring answer to every question
with tf.io.gfile.GFile(N_GENERATIONS_PATH, "r") as gens_file, \
tf.io.gfile.GFile(N_SCORES_PATH, "r") as scores_file, \
tf.io.gfile.GFile(outputs_path, "w") as outputs_file:
best_score = -float("inf")
best_answer = None
answer_count = 0
for answer, str_score in zip(gens_file, scores_file):
score = float(str_score)
answer_count += 1
if score > best_score:
best_score = score
best_answer = answer.split("\t")[1] # Drop question text
if answer_count >= self.N:
# Write best answer and start processing next question
outputs_file.write(best_answer)
answer_count = 0
best_score = -float("inf")
best_answer = None
def generate_from_instances(self, instances):
"""
Args:
instances: [dict]
Each element is a dict with keys "title", "date", "selftext",
"subreddit"
Returns:
advices: [str]
"""
# Write a temporary file with the instances
TMP_INSTANCES_FILE = os.path.join(self.tmp_dir, "instances.txt")
with tf.io.gfile.GFile(TMP_INSTANCES_FILE, "w") as instances_file:
for instance in instances:
instance["selftext"] = _trim_to_desired_length(
encoder,
instance["selftext"],
desired_len=SELFTEXT_DESIRED_LEN
)
instance = {k: _fix_reddit_text(v) for k,v in instance.items()}
str_instance = \
"Subreddit: " + instance["subreddit"] \
+ " Date: " + instance["date"] \
+ " Title: " + instance["title"] \
+ " Selftext: " + instance["selftext"]
instances_file.write(str_instance + "\n")
# Call self.generate
TMP_OUTPUTS_PATH = os.path.join(self.tmp_dir, "instance_outputs.txt")
self.generate(TMP_INSTANCES_FILE, TMP_OUTPUTS_PATH)
advices = []
with tf.io.gfile.GFile(TMP_OUTPUTS_PATH, "r") as outputs_file:
for instance_output in outputs_file:
advices.append(instance_output[:-1]) # Remove newline
return advices
```
#### File: turingadvice/best_of_n/sample_generations.py
```python
import sys
from absl import flags
import tensorflow as tf
def _define_flags():
flags.DEFINE_string(
name="input_path",
default=None,
help="Path to a text file"
)
flags.DEFINE_string(
name="output_path",
default=None,
help="File to store sampled generations"
)
flags.DEFINE_integer(
name="N",
default=1,
help="The number of generations per question in the input file"
)
flags.DEFINE_integer(
name="n",
default=None,
help="The number of generations per question in the output file"
)
return flags.FLAGS
if __name__ == "__main__":
"""
Sample n <= N text generations per question from an input file with N
generations per question
"""
FLAGS = _define_flags()
FLAGS(sys.argv)
assert FLAGS.n <= FLAGS.N, "n > N"
with tf.io.gfile.GFile(FLAGS.input_path, "r") as input_file, \
tf.io.gfile.GFile(FLAGS.output_path, "w") as output_file:
gens_written = 0
gens_seen = 0
for input_line in input_file:
if gens_written < FLAGS.n:
output_file.write(input_line)
gens_written += 1
gens_seen += 1
if gens_seen >= FLAGS.N:
gens_seen = 0
gens_written = 0
```
#### File: turingadvice/frontend/api.py
```python
import os
import re
import json
import logging
from datetime import datetime
import flask
from flask_cors import CORS
import click
from gevent.pywsgi import WSGIServer
import tensorflow as tf
from t5.models.mtf_model import MtfModel
from reward.comparative.model import ComparativeRewardModel
from best_of_n.generator import BestOfNGenerator
SAMPLING_KEEP_TOP_P = 0.95
BEST_OF_N_N = 80
T5_MODEL_DIR = "gs://seri2021-advice-eu/turingadvice/baselines/t5/11B"
T5_MODEL_CKPT = 1010000
REWARD_MODEL_DIR = "gs://seri2021-advice-eu/turingadvice/reward/comparative/checkpoints/3B/f2-1-small-batch"
REWARD_MODEL_CKPT = 1019348
BoN_TMP_DIR = "gs://seri2021-advice-eu/turingadvice/frontend"
MODEL_PARALLELISM = 8
ITERATIONS_PER_LOOP = 10
TEMPLATE_DIR = "./frontend"
# Initialize models and Best-of-N generator
t5_model = MtfModel(
model_dir=T5_MODEL_DIR,
tpu=os.uname()[1],
tpu_topology="2x2", # Must be this for validation (Rowan)
model_parallelism=MODEL_PARALLELISM,
batch_size=1,
sequence_length={"inputs": 1280, "targets": 512},
iterations_per_loop=ITERATIONS_PER_LOOP
)
reward_model = ComparativeRewardModel(
model_dir=REWARD_MODEL_DIR,
tpu=os.uname()[1],
tpu_topology="2x2", # Must be this for validation (Rowan)
model_parallelism=MODEL_PARALLELISM,
batch_size=1,
sequence_length={"inputs": 1280, "targets": 512},
iterations_per_loop=ITERATIONS_PER_LOOP
)
BoN_generator = BestOfNGenerator(
t5_model=t5_model,
t5_model_ckpt_steps=T5_MODEL_CKPT,
reward_model=reward_model,
reward_model_ckpt_steps=REWARD_MODEL_CKPT,
N=BEST_OF_N_N,
sampling_keep_top_p=SAMPLING_KEEP_TOP_P,
tmp_dir=BoN_TMP_DIR
)
# Initialize API
app = flask.Flask(__name__, template_folder=TEMPLATE_DIR)
CORS(app, resources={r'/api/*': {'origins': '*'}})
logger = logging.getLogger(__name__)
def _datetime_to_str(date):
return [
'January', 'February', 'March', 'April', 'May', 'June', 'July',
'August', 'September', 'October', 'November', 'December'
][date.month - 1] + ' {}, {}'.format(date.day, date.year)
@app.route('/api/askbatch', methods=['POST'])
def api_askbatch():
request_dict = dict(flask.request.json)
instances = request_dict["instances"]
date = datetime.utcnow()
date_str = _datetime_to_str(date)
for instance in instances:
instance["date"] = date_str
advices = BoN_generator.generate_from_instances(instances)
advices = [re.sub(r'\s+»\s+', '\n\n', advice).strip() for advice in advices]
request_dict.update({"advices": advices})
with tf.io.gfile.GFile(os.path.join(BoN_TMP_DIR, "log.jsonl"), "a+") as logfile:
logfile.write(json.dumps(request_dict) + "\n")
return flask.jsonify({"gens": advices}), 200
@click.command()
def serve():
"""Serve predictions on port 5000."""
logging.basicConfig(
format='%(asctime)s:%(levelname)s:%(name)s:%(message)s',
level=logging.INFO)
logger.info('Running prod server on http://127.0.0.1:5000/')
WSGIServer(('0.0.0.0', 5000), app).serve_forever()
```
|
{
"source": "Jf-Chen/FRN-main",
"score": 2
}
|
#### File: FRN-main/data/init_meta_iNat.py
```python
import os
import json
import torch
import argparse
from PIL import Image
import sys
from tqdm import tqdm
import shutil
import yaml
sys.path.append('..')
from utils import util
# import util
with open('../config.yml','r') as f:
# with open('config.yml','r') as f:
config = yaml.safe_load(f)
data_path = os.path.abspath(config['data_path'])
origin_path = os.path.join(data_path,'inat2017')
imgfolder = 'inat2017_84x84'
img_path = os.path.join(data_path,imgfolder)
rel_path = os.path.join('..','..','..',imgfolder)
inat_path = os.path.join(data_path,'meta_iNat')
tier_path = os.path.join(data_path,'tiered_meta_iNat')
util.mkdir(img_path)
util.mkdir(inat_path)
util.mkdir(tier_path)
with open(os.path.join(origin_path,'train_2017_bboxes.json')) as f:
allinfo = json.load(f)
annolist = allinfo['annotations']
annodict = dict() # im_id to list of box_ids
boxdict = dict() # box_id to box coords
catdict = dict() # dict of numerical category codes / labels to corresponding list of image ids
for d in annolist:
im = d['image_id']
boxid = d['id']
cat = d['category_id']
# Add box_id to image entry
if im in annodict:
annodict[im].append(boxid)
else:
annodict[im] = [boxid]
# Add mapping from box_id to box
boxdict[boxid] = d['bbox']
# Add image to category set
if cat in catdict:
catdict[cat].add(im)
else:
catdict[cat] = set([im])
# assemble im_id -> filepath dictionary
namelist = allinfo['images']
keys = []
vals = []
for d in namelist:
keys.append(d['id'])
vals.append(os.path.join(origin_path,d['file_name']))
pather = dict(zip(keys,vals))
# Pare down the category dictionary to the desired size
clist = list(catdict.keys())
for c in clist:
if len(catdict[c]) < 50 or len(catdict[c]) > 1000:
catdict.pop(c)
supercat = dict()
for d in allinfo['categories']:
catid = d['id']
if catid in catdict:
sc = d['supercategory']
if sc in supercat:
supercat[sc].append(catid)
else:
supercat[sc] = [catid,]
# shrink images
catlist = list(catdict.keys())
boxdict_shrunk = dict() # abbreviated path -> [box corners]
pather_shrunk = dict() # im_id -> new path (relative, for symlinks)
print('Shrinking images to 84x84 ...')
for c in tqdm(catlist):
# For each category:
catpath = os.path.join(img_path,str(c))
if not os.path.exists(catpath):
os.makedirs(catpath)
ims = catdict[c]
for imkey in ims:
# For each image:
path = pather[imkey]
file = path[path.rfind(os.path.sep)+1:path.rfind('.')]+'.png'
fname = os.path.join(str(c),file)
newpath = os.path.join(catpath,fname)
pather_shrunk[imkey] = os.path.join(rel_path,fname)
# Downsize the image to 84x84
with open(path, 'rb') as f:
p = Image.open(f)
w,h = p.size
p = p.convert('RGB')
p = p.resize((84,84), Image.BILINEAR)
p.save(newpath)
# Downsize the bounding box annotations to 10x10
boxes = annodict[imkey]
boxdict_shrunk[str(c)+'/'+file] = []
for boxcode in boxes:
box = boxdict[boxcode]
xmin = box[0]
xmax = box[2]+xmin
ymin = box[1]
ymax = box[3]+ymin
boxdict_shrunk[str(c)+'/'+file].append([xmin*10/w, ymin*10/h, xmax*10/w, ymax*10/h])
torch.save(boxdict_shrunk, os.path.join(img_path,'box_coords.pth'))
def makedataset(traincatlist, testcatlist, datapath, catdict, pather):
def makesplit(catlist, datapath, split, catdict, pather, imsplit):
splitpath = os.path.join(datapath,split)
util.mkdir(splitpath)
for c in catlist:
# For each category:
catpath = os.path.join(splitpath,str(c))
if not os.path.exists(catpath):
os.makedirs(catpath)
ims = list(catdict[c])
ims = imsplit(ims)
for imkey in ims:
path = pather[imkey]
newpath = os.path.join(catpath,path[path.rfind(os.path.sep)+1:path.rfind('.')]+'.png')
os.symlink(path, newpath)
makesplit(traincatlist, datapath, 'train', catdict, pather, lambda x: x)
makesplit(testcatlist, datapath, 'test', catdict, pather, lambda x: x)
makesplit(testcatlist, datapath, 'refr', catdict, pather, lambda x: x[:len(x)//5])
makesplit(testcatlist, datapath, 'query', catdict, pather, lambda x: x[len(x)//5:])
# meta-iNat
print('Organizing meta-iNat ...')
split_folder = os.path.abspath('./meta_iNat_split/')
traincatlist = torch.load(os.path.join(split_folder,'meta_iNat_traincats.pth'))
testcatlist = torch.load(os.path.join(split_folder,'meta_iNat_testcats.pth'))
makedataset(traincatlist, testcatlist, inat_path, catdict, pather_shrunk)
torch.save(boxdict_shrunk, os.path.join(inat_path,'box_coords.pth'))
# tiered meta-iNat
print('Organizing tiered meta-iNat ...')
traincatlist = (supercat['Animalia']+supercat['Aves']+supercat['Reptilia']+supercat['Amphibia']
+supercat['Mammalia']+supercat['Actinopterygii']+supercat['Mollusca'])
testcatlist = supercat['Insecta']+supercat['Arachnida']
makedataset(traincatlist, testcatlist, tier_path, catdict, pather_shrunk)
torch.save(boxdict_shrunk, os.path.join(tier_path,'box_coords.pth'))
print('Organizing complete!')
```
#### File: FRN-main/utils/util.py
```python
from PIL import Image
import torch
import os
import numpy as np
import sys
import argparse
import shutil
from tqdm import tqdm
import torchvision.transforms as transforms
def mkdir(path):
if os.path.exists(path):
print("--- the folder already exists ---")
else:
os.makedirs(path)
# get pre-resized 84x84 images for validation and test
def get_pre_folder(image_folder,transform_type):
split = ['val','test']
if transform_type == 0:
transform = transforms.Compose([transforms.Resize(92),
transforms.CenterCrop(84)])
elif transform_type == 1:
transform = transforms.Compose([transforms.Resize([92,92]),
transforms.CenterCrop(84)])
cat_list = []
for i in split:
cls_list = os.listdir(os.path.join(image_folder,i))
folder_name = i+'_pre'
mkdir(os.path.join(image_folder,folder_name))
for j in tqdm(cls_list):
mkdir(os.path.join(image_folder,folder_name,j))
img_list = os.listdir(os.path.join(image_folder,i,j))
for img_name in img_list:
img = Image.open(os.path.join(image_folder,i,j,img_name))
img = img.convert('RGB')
img = transform(img)
img.save(os.path.join(image_folder,folder_name,j,img_name[:-3]+'png'))
def get_device_map(gpu):
cuda = lambda x: 'cuda:%d'%x
temp = {}
for i in range(4):
temp[cuda(i)]=cuda(gpu)
return temp
```
|
{
"source": "Jf-Chen/my_FRN",
"score": 3
}
|
#### File: my_FRN/models/set_function.py
```python
import torch
from torch import nn
from torch.nn import functional as F
# 看作是MLP
# 为了防止出bug,应该在forward的输入中添加train_shot和query_shot,而不是初始化后就一成不变
class SetFunction(nn.Module):
def __init__(self, train_way,train_shot, resolution,input_dimension, output_dimension):
super(SetFunction, self).__init__()# nn.Module.__init()__
self.resolution=resolution
self.train_way=train_way
self.train_shot=train_shot
self.resolution=resolution
self.input_dimension = input_dimension
self.output_dimension = output_dimension
self.psi = nn.Sequential(
# nn.Linear() y=x*A'+b,A是weight,
nn.Linear(input_dimension, input_dimension * 2),
nn.ReLU(),
nn.Linear(input_dimension * 2, input_dimension * 2),
nn.ReLU()
)
self.rho = nn.Sequential(
nn.Linear(input_dimension * 3, input_dimension * 2),
nn.ReLU(),
nn.Linear(input_dimension * 2, output_dimension),
)
def forward(self, support_embeddings, level,train_way,train_shot, resolution):
if level == 'task':
psi_output = self.psi(support_embeddings) #[
rho_input = torch.cat([psi_output, support_embeddings], dim=2)
rho_input = torch.sum(rho_input, dim=0, keepdim=True)
rho_output = torch.nn.functional.relu6(self.rho(rho_input)) / 6
return rho_output
elif level == 'class':
psi_output = self.psi(support_embeddings)
rho_input = torch.cat([psi_output, support_embeddings], dim=2)
rho_input = rho_input.view(train_way, train_shot,resolution, -1)
rho_input = torch.sum(rho_input, dim=1)
rho_output = torch.nn.functional.relu6(self.rho(rho_input)) / 6
return rho_output
# level = balance 不要了
```
|
{
"source": "jfcherng/LSP-bash",
"score": 2
}
|
#### File: jfcherng/LSP-bash/plugin.py
```python
from LSP.plugin.core.typing import Tuple
from lsp_utils import NpmClientHandler
import os
def plugin_loaded():
LspBashPlugin.setup()
def plugin_unloaded():
LspBashPlugin.cleanup()
class LspBashPlugin(NpmClientHandler):
package_name = __package__
server_directory = "language-server"
server_binary_path = os.path.join(
server_directory,
"node_modules",
"bash-language-server",
"bin",
"main.js",
)
@classmethod
def minimum_node_version(cls) -> Tuple[int, int, int]:
return (12, 0, 0)
```
|
{
"source": "jfcherng/Sublime-ASS",
"score": 3
}
|
#### File: plugin/commands/ass_toggle_comment.py
```python
from typing import List, Set, Tuple
import sublime
import sublime_plugin
ASS_COMMENT_PAIRS: Tuple[Tuple[str, str], ...] = (
# (before_command_executed, after_command_executed),
("Comment: ", "Dialogue: "),
("Dialogue: ", "Comment: "),
("; ", ""),
("", "; "), # always matches
)
def find_first_diff_pos(str1: str, str2: str) -> int:
"""Finds the first difference position. Returns `-1` if both strings are identical."""
if str1 == str2:
return -1
shorter, longer = sorted((str1, str2), key=len)
shorter_len = len(shorter)
return next(
(i for i in range(shorter_len) if shorter[i] != longer[i]),
shorter_len,
)
def get_caret_line_beginning_points(view: sublime.View) -> List[int]:
"""Gets the points of the first non-space char of the caret lines."""
points: Set[int] = set()
for region in view.sel():
points |= {view.find(r"^[ \t]*", line_region.begin()).end() for line_region in view.lines(region)}
return sorted(points)
class AssToggleCommentCommand(sublime_plugin.TextCommand):
def run(self, edit: sublime.Edit) -> None:
for beginning_point in reversed(get_caret_line_beginning_points(self.view)):
for before, after in ASS_COMMENT_PAIRS:
comment_region = sublime.Region(beginning_point, beginning_point + len(before))
comment_content = self.view.substr(comment_region)
if not comment_content.startswith(before.rstrip()):
continue
if comment_content != before:
comment_region.b -= len(comment_content) - find_first_diff_pos(comment_content, before)
self.view.insert(edit, comment_region.a, after)
self.view.erase(
edit,
sublime.Region(
comment_region.a + len(after),
comment_region.b + len(after),
),
)
break
```
#### File: Sublime-ASS/plugin/functions.py
```python
import sublime
def is_my_syntax(view: sublime.View) -> bool:
return syntax.scope == "text.ass" if (syntax := view.syntax()) else False
def is_my_scope(view: sublime.View, point: int) -> bool:
return view.match_selector(point, "text.ass")
```
#### File: Sublime-ASS/plugin/listener.py
```python
from .functions import is_my_scope
from .functions import is_my_syntax
from typing import Any, Dict, Optional, Tuple
import sublime
import sublime_plugin
class AssToggleCommentEventListener(sublime_plugin.EventListener):
def on_text_command(
self,
view: sublime.View,
command_name: str,
args: Dict[str, Any],
) -> Optional[Tuple[str, Optional[Dict[str, Any]]]]:
if command_name != "toggle_comment":
return None
if is_my_syntax(view):
return ("ass_toggle_comment", None)
# command only works when all target lines are in ASS scope
if not all(
# ...
is_my_scope(view, line.begin())
for sel_region in view.sel()
for line in view.lines(sel_region)
):
return None
return ("ass_toggle_comment", None)
```
|
{
"source": "jfcherng/Sublime-AutoSetSyntax",
"score": 2
}
|
#### File: plugin/commands/auto_set_syntax_restart_guesslang.py
```python
from ..constant import PLUGIN_NAME
from ..guesslang.client import GuesslangClient
from ..guesslang.server import GuesslangServer
from ..settings import get_merged_plugin_setting
from ..shared import G
from .auto_set_syntax import GuesslangClientCallbacks
import sublime_plugin
import threading
import time
class AutoSetSyntaxRestartGuesslangCommand(sublime_plugin.ApplicationCommand):
def description(self) -> str:
return f"{PLUGIN_NAME}: Restart Guesslang Client And Server"
def is_enabled(self) -> bool:
return bool(get_merged_plugin_setting("guesslang.enabled"))
def run(self) -> None:
t = threading.Thread(target=self._worker)
t.start()
def _worker(self) -> None:
host = "localhost"
port: int = get_merged_plugin_setting("guesslang.port")
GuesslangServer.stop()
if GuesslangServer.start(host, port):
time.sleep(1) # wait for server initialization
G.guesslang = GuesslangClient(host, port, callback_object=GuesslangClientCallbacks())
```
#### File: plugin/guesslang/server.py
```python
from ..constant import PLUGIN_NAME
from ..constant import PLUGIN_STORAGE_DIR
from ..helper import expand_variables
from ..logger import Logger
from ..settings import get_merged_plugin_setting
from pathlib import Path
from typing import Dict, Optional, Set, Sequence, Union
import os
import shutil
import socket
import sublime
import subprocess
class GuesslangServer:
server_dir = PLUGIN_STORAGE_DIR / "guesslang-server"
server_file = PLUGIN_STORAGE_DIR / "guesslang-server/websocket.js"
# background server process(es)
_subprocesses: Set[subprocess.Popen] = set()
@classmethod
def start(cls, host: str, port: int) -> bool:
"""Starts the guesslang server and return whether it starts."""
if not is_executable(node_path := parse_node_path()):
sublime.error_message(f'[{PLUGIN_NAME}] Node.js binary not found or not executable: "{node_path}"')
return False
if is_port_in_use(port):
Logger.log(sublime.active_window(), f"⚠ Port {port} is in use.")
try:
process = cls._start_process(
(node_path, cls.server_file),
cwd=cls.server_dir,
extra_env={
"NODE_SKIP_PLATFORM_CHECK": "1",
"HOST": host,
"PORT": str(port),
},
)
except Exception as e:
sublime.error_message(f"[{PLUGIN_NAME}] Fail starting guesslang server because {e}")
return False
if process.stdout and process.stdout.read(2) == "OK":
cls._subprocesses.add(process)
return True
sublime.error_message(f"[{PLUGIN_NAME}] Fail starting guesslang server.")
return False
@classmethod
def stop(cls) -> None:
for p in cls._subprocesses:
try:
p.kill()
except Exception:
pass
for p in cls._subprocesses:
try:
p.wait()
except Exception:
pass
cls._subprocesses.clear()
@classmethod
def is_running(cls) -> bool:
return len(cls._subprocesses) > 0
@staticmethod
def _start_process(
cmd: Union[str, Path, Sequence[Union[str, Path]]],
extra_env: Optional[Dict[str, str]] = None,
**kwargs,
) -> subprocess.Popen:
if os.name == "nt":
# do not create a window for the process
startupinfo = subprocess.STARTUPINFO() # type: ignore
startupinfo.dwFlags |= subprocess.SW_HIDE | subprocess.STARTF_USESHOWWINDOW # type: ignore
else:
startupinfo = None # type: ignore
env = os.environ.copy()
env.update(extra_env or {})
return subprocess.Popen(
cmd,
startupinfo=startupinfo,
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
encoding="utf-8",
env=env,
**kwargs,
)
def parse_node_path() -> str:
return expand_variables(get_merged_plugin_setting("guesslang.node_bin"))
def is_executable(path: Union[str, Path]) -> bool:
return bool((os.path.isfile(path) and os.access(path, os.X_OK)) or shutil.which(path))
def is_port_in_use(port: int) -> bool:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
return s.connect_ex(("localhost", port)) == 0
```
#### File: Sublime-AutoSetSyntax/plugin/logger.py
```python
from .compatibility import view_clear_undo_stack
from .constant import PLUGIN_NAME
from .helper import get_st_window
from .settings import get_merged_plugin_setting
from .settings import get_st_setting
from contextlib import contextmanager
from typing import Dict, Generator, Optional, Union
import math
import re
import sublime
import sublime_plugin
@contextmanager
def _editable_view(view: sublime.View) -> Generator[sublime.View, None, None]:
is_read_only = view.is_read_only()
view.set_read_only(False)
try:
yield view
finally:
view.set_read_only(is_read_only)
def _find_log_panel(obj: Union[sublime.View, sublime.Window]) -> Optional[sublime.View]:
return window.find_output_panel(PLUGIN_NAME) if (window := get_st_window(obj)) else None
def _create_log_panel(window: sublime.Window) -> sublime.View:
panel = window.create_output_panel(PLUGIN_NAME)
# Somehow there is an error about "scope:output.autosetsyntax.log" not found during updating this plugin.
# Thus, I change it to use the syntax path to load the syntax.
panel.assign_syntax("Packages/AutoSetSyntax/syntaxes/AutoSetSyntaxLog.sublime-syntax")
panel.set_read_only(True)
panel.set_scratch(True)
panel.settings().update(
{
"draw_white_space": "none",
"gutter": False,
"is_widget": True, # ST 3 convention for a non-normal view
"line_numbers": False,
"scroll_past_end": False,
"spell_check": False,
"word_wrap": False,
}
)
return panel
class Logger:
# per-window, WindowId => history count
history_counts: Dict[int, int] = {}
@classmethod
def log(cls, window: Optional[sublime.Window], msg: str, enabled: bool = True) -> None:
if not (enabled and window and get_merged_plugin_setting("enable_log", window=window)):
return
max_lines = get_st_setting("console_max_history_lines", math.inf) / 8
if cls._get_history_count(window) >= max_lines:
cls.clear(window)
window.run_command("auto_set_syntax_append_log", {"msg": msg})
cls._increase_history_count(window)
cls._clear_undo_stack(window)
@classmethod
def clear(cls, window: sublime.Window) -> None:
window.run_command("auto_set_syntax_clear_log_panel", {"from_logger": True})
cls._set_history_count(window, 0)
cls._clear_undo_stack(window)
@classmethod
def destroy(cls, window: sublime.Window) -> None:
window.destroy_output_panel(PLUGIN_NAME)
cls.history_counts.pop(window.id())
@classmethod
def _get_history_count(cls, window: sublime.Window) -> int:
return cls.history_counts.get(window.id(), 0)
@classmethod
def _set_history_count(cls, window: sublime.Window, value: int) -> None:
cls.history_counts[window.id()] = value
@classmethod
def _increase_history_count(cls, window: sublime.Window, amount: int = 1) -> None:
cls._set_history_count(window, cls._get_history_count(window) + amount)
@classmethod
def _clear_undo_stack(cls, window: sublime.Window) -> None:
if panel := _find_log_panel(window):
view_clear_undo_stack(panel)
class AutoSetSyntaxAppendLogCommand(sublime_plugin.TextCommand):
"""Internal use only."""
def is_visible(self) -> bool:
return False
def run(self, edit: sublime.Edit, msg: str, squash_history: bool = True) -> None:
if not (window := self.view.window()):
return
if not (panel := _find_log_panel(window)):
panel = _create_log_panel(window)
if (
squash_history
and (last_line_region := panel.full_line(panel.size() - 1))
and (last_line := panel.substr(last_line_region).rstrip()).startswith(msg)
and (m := re.match(r"(?: +\(x(\d+)\))?", last_line[len(msg) :]))
):
msg = f"{msg} (x{int(m.group(1) or 1) + 1})"
replace_region = last_line_region
else:
replace_region = sublime.Region(panel.size()) # EOF
with _editable_view(panel) as panel:
panel.replace(edit, replace_region, f"{msg}\n")
class AutoSetSyntaxClearLogPanelCommand(sublime_plugin.TextCommand):
"""Clear the plugin log panel for the current window."""
def description(self) -> str:
return f"{PLUGIN_NAME}: Clear Log Panel"
def is_enabled(self) -> bool:
return bool(_find_log_panel(self.view))
def run(self, edit: sublime.Edit, from_logger: bool = False) -> None:
if not (window := self.view.window()):
return
# ensure command is triggered by the logger so that we can maintain internal states
if not from_logger:
Logger.clear(window)
return
if panel := _find_log_panel(window):
with _editable_view(panel) as panel:
panel.erase(edit, sublime.Region(0, panel.size()))
class AutoSetSyntaxToggleLogPanelCommand(sublime_plugin.WindowCommand):
"""Toggle the visibility of the plugin log panel for the current window."""
def description(self) -> str:
return f"{PLUGIN_NAME}: Toggle Log Panel"
def is_enabled(self) -> bool:
return bool(_find_log_panel(self.window))
def run(self) -> None:
self.window.run_command(
"show_panel",
{
"panel": f"output.{PLUGIN_NAME}",
"toggle": True,
},
)
```
#### File: Sublime-AutoSetSyntax/plugin/lru_cache.py
```python
from functools import _lru_cache_wrapper
from functools import lru_cache
from typing import Any, Callable, Set, TypeVar, cast
AnyCallable = TypeVar("AnyCallable", bound=Callable[..., Any])
cached_functions: Set[_lru_cache_wrapper] = set()
def clearable_lru_cache(*args: Any, **kwargs: Any) -> Callable[[AnyCallable], AnyCallable]:
def decorator(func: AnyCallable) -> AnyCallable:
wrapped = lru_cache(*args, **kwargs)(func)
cached_functions.add(wrapped)
return cast(AnyCallable, wrapped)
return decorator
def clear_all_cached_functions():
for func in cached_functions:
func.cache_clear()
```
#### File: rules/matches/any.py
```python
from ..match import AbstractMatch
from ..match import MatchableRule
from typing import Tuple, final
import sublime
@final
class AnyMatch(AbstractMatch):
"""Matches when any rule is matched."""
def is_droppable(self, rules: Tuple[MatchableRule, ...]) -> bool:
return len(rules) == 0
def test(self, view: sublime.View, rules: Tuple[MatchableRule, ...]) -> bool:
return any(rule.test(view) for rule in rules)
```
#### File: plugin/rules/syntax.py
```python
from __future__ import annotations
# __future__ must be the first import
from ..constant import VERSION
from ..helper import find_syntax_by_syntax_likes
from ..helper import first
from ..types import ListenerEvent, Optimizable, ST_SyntaxRule
from .match import MatchRule
from dataclasses import dataclass
from typing import Generator, Iterable, List, Optional, Set, Tuple
import sublime
@dataclass
class SyntaxRule(Optimizable):
comment: str = ""
syntax: Optional[sublime.Syntax] = None
syntaxes_name: Optional[Tuple[str, ...]] = tuple()
selector: str = "text.plain"
on_events: Optional[Set[ListenerEvent]] = None
root_rule: Optional[MatchRule] = None
def is_droppable(self) -> bool:
return not (self.syntax and self.on_events != [] and self.root_rule)
def optimize(self) -> Generator[Optimizable, None, None]:
if self.root_rule:
if self.root_rule.is_droppable():
yield self.root_rule
self.root_rule = None
else:
yield from self.root_rule.optimize()
if self.root_rule.is_droppable():
yield self.root_rule
self.root_rule = None
def test(self, view: sublime.View, event: Optional[ListenerEvent] = None) -> bool:
if event and self.on_events is not None and event not in self.on_events:
return False
if self.selector and not view.match_selector(0, self.selector):
return False
assert self.root_rule
return self.root_rule.test(view)
@classmethod
def make(cls, syntax_rule: ST_SyntaxRule) -> SyntaxRule:
"""Build this object with the `syntax_rule`."""
obj = cls()
if comment := syntax_rule.get("comment"):
obj.comment = str(comment)
syntaxes = syntax_rule.get("syntaxes", [])
if isinstance(syntaxes, str):
syntaxes = [syntaxes]
obj.syntaxes_name = tuple(syntaxes)
if target_syntax := find_syntax_by_syntax_likes(syntaxes):
obj.syntax = target_syntax
# note that an empty string selector should match any scope
if (selector := syntax_rule.get("selector")) is not None:
obj.selector = selector
if (on_events := syntax_rule.get("on_events")) is not None:
if isinstance(on_events, str):
on_events = [on_events]
obj.on_events = set(filter(None, map(ListenerEvent.from_value, on_events)))
if match_rule_compiled := MatchRule.make(syntax_rule):
obj.root_rule = match_rule_compiled
return obj
@dataclass
class SyntaxRuleCollection(Optimizable):
version: str = VERSION
rules: Tuple[SyntaxRule, ...] = tuple()
def optimize(self) -> Generator[Optimizable, None, None]:
rules: List[SyntaxRule] = []
for rule in self.rules:
if rule.is_droppable():
yield rule
continue
yield from rule.optimize()
if rule.is_droppable():
yield rule
continue
rules.append(rule)
self.rules = tuple(rules)
def test(self, view: sublime.View, event: Optional[ListenerEvent] = None) -> Optional[SyntaxRule]:
return first(self.rules, lambda rule: rule.test(view, event))
@classmethod
def make(cls, syntax_rules: Iterable[ST_SyntaxRule]) -> SyntaxRuleCollection:
"""Build this object with the `syntax_rules`."""
obj = cls()
obj.rules = tuple(map(SyntaxRule.make, syntax_rules))
return obj
```
#### File: Sublime-AutoSetSyntax/plugin/shared.py
```python
from .guesslang.client import GuesslangClient
from .rules import SyntaxRuleCollection
from .settings import get_merged_plugin_settings
from typing import Any, Dict, Optional, Tuple
import sublime
WindowId = int
DroppedRules = Tuple[Any, ...]
class G:
"""This class holds "G"lobal variables as its class variables."""
# the guesslang object, which interacts with the Node.js guesslang server
guesslang: Optional[GuesslangClient] = None
# views exist when ST just starts (even before plugin loaded)
views_on_init: Tuple[sublime.View, ...] = tuple()
# per window, the compiled top-level plugin rules
windows_syntax_rule_collection: Dict[WindowId, SyntaxRuleCollection] = {}
# per window, those rules which are dropped after doing optimizations
windows_dropped_rules: Dict[WindowId, DroppedRules] = {}
@classmethod
def is_plugin_ready(cls, window: sublime.Window) -> bool:
return bool(get_merged_plugin_settings(window=window) and cls.get_syntax_rule_collection(window))
@classmethod
def set_syntax_rule_collection(cls, window: sublime.Window, value: SyntaxRuleCollection) -> None:
cls.windows_syntax_rule_collection[window.id()] = value
@classmethod
def get_syntax_rule_collection(cls, window: sublime.Window) -> Optional[SyntaxRuleCollection]:
return cls.windows_syntax_rule_collection.get(window.id())
@classmethod
def remove_syntax_rule_collection(cls, window: sublime.Window) -> Optional[SyntaxRuleCollection]:
return cls.windows_syntax_rule_collection.pop(window.id(), None)
@classmethod
def set_dropped_rules(cls, window: sublime.Window, value: DroppedRules) -> None:
cls.windows_dropped_rules[window.id()] = value
@classmethod
def get_dropped_rules(cls, window: sublime.Window) -> DroppedRules:
return cls.windows_dropped_rules.get(window.id()) or tuple()
@classmethod
def remove_dropped_rules(cls, window: sublime.Window) -> Optional[DroppedRules]:
return cls.windows_dropped_rules.pop(window.id(), None)
```
|
{
"source": "jfcherng-sublime/LSP-intelephense-patcher",
"score": 2
}
|
#### File: LSP-intelephense-patcher/plugin/plugin_message.py
```python
import sublime
def pluginfy_msg(msg: str, *args, **kwargs) -> str:
package_name = __package__.split(".")[0]
return msg.format(*args, _=package_name, **kwargs)
def console_msg(msg: str, *args, **kwargs) -> None:
print(pluginfy_msg(msg, *args, **kwargs))
def status_msg(msg: str, *args, **kwargs) -> None:
sublime.status_message(pluginfy_msg(msg, *args, **kwargs))
def info_box(msg: str, *args, **kwargs) -> None:
sublime.message_dialog(pluginfy_msg(msg, *args, **kwargs))
def error_box(msg: str, *args, **kwargs) -> None:
sublime.error_message(pluginfy_msg(msg, *args, **kwargs))
```
|
{
"source": "jfcherng-sublime/ST-AceJump",
"score": 3
}
|
#### File: ST-AceJump/libs/char_width_converter.py
```python
FULL_TO_HALF_TABLE = {i + 0xFEE0: i for i in range(0x21, 0x7F)}
HALF_TO_FULL_TABLE = {i: i + 0xFEE0 for i in range(0x21, 0x7F)}
def f2h(string: str) -> str:
""" Convert into half-width. """
return string.translate(FULL_TO_HALF_TABLE)
def h2f(string: str) -> str:
""" Convert into full-width. """
return string.translate(HALF_TO_FULL_TABLE)
```
|
{
"source": "jfcherng-sublime/ST-AutoSetIndentation",
"score": 2
}
|
#### File: ST-AutoSetIndentation/plugin/functions.py
```python
import sublime
from typing import Any, Dict
from .settings import get_setting
from .log import msg, print_msg, show_status_message
def is_view_at_front(view: sublime.View) -> bool:
if not view:
return False
window = view.window()
if not window:
return False
return view == window.active_view()
def is_view_only_invisible_chars(view: sublime.View) -> bool:
return bool(view and view.find(r"[^\s]", 0).begin() < 0)
def is_view_set_by_editorconfig_plugin(view: sublime.View) -> bool:
EDITORCONFIG_PLUGIN_MARKER = "editorconfig"
return bool(view.settings().get(EDITORCONFIG_PLUGIN_MARKER, False))
def is_event_listener_enabled(event: str) -> bool:
"""
@brief Check if a event listener is enabled.
@param event The event
@return True if event listener enabled, False otherwise.
"""
try:
return bool(get_setting("event_listeners", {})[event])
except KeyError:
print_msg('"event_listeners[%s]" is not set in user settings (assumed false)' % event)
return False
def set_indentation_for_view(view: sublime.View, args: Dict[str, Any] = {}) -> None:
"""
@brief Set the indentation for the current view.
@param view The view
@param args The arguments
"""
_args = {"show_message": is_view_at_front(view)}
_args.update(args)
if is_view_set_by_editorconfig_plugin(view):
show_status_message(msg("EditorConfig detected indentation"), _args["show_message"])
else:
view.run_command("auto_set_indentation", _args) # type: ignore
view.settings().set("ASI_is_indentation_detected", True)
```
#### File: libs/IndentFinder/indent_finder.py
```python
import sys
import re
help = \
"""Usage : %s [ --vim-output ] [ --verbose ] file1 file2 ... fileN
Display indentation used in the list of files. Possible answers are (with X
being the number of spaces used for indentation):
space X
tab 8
mixed tab X space Y
mixed means that indentation style is tab at the beginning of the line (tab
being 8 positions) and then spaces to do the indentation, unless you reach 8
spaces which are replaced by a tab. This is the vim source file indentation
for example. In my opinion, this is the worst possible style.
--vim-output: output suitable to use inside vim:
set sts=0 | set tabstop=4 | set noexpandtab | set shiftwidth=4
"""
VERSION='1.4'
### Used when indentation is tab, to set tabstop in vim
DEFAULT_TAB_WIDTH = 4
### default values for files where indentation is not meaningful (empty files)
# possible values:
# DEFAULT_RESULT = ('space', 4 )
# DEFAULT_RESULT = ('space', 2 )
# DEFAULT_RESULT = ('space', 8 )
# DEFAULT_RESULT = ('tab', DEFAULT_TAB_WIDTH )
DEFAULT_RESULT = ('space', 4 )
VERBOSE_QUIET = 0
VERBOSE_INFO = 1
VERBOSE_DEBUG = 2
VERBOSE_DEEP_DEBUG = 3
DEFAULT_VERBOSITY = VERBOSE_QUIET
###
class LineType:
NoIndent = 'NoIndent'
SpaceOnly = 'SpaceOnly'
TabOnly = 'TabOnly'
Mixed = 'Mixed'
BeginSpace = 'BeginSpace'
def info( s ): log( VERBOSE_INFO, s )
def dbg( s ): log( VERBOSE_DEBUG, s )
def deepdbg( s ): log( VERBOSE_DEEP_DEBUG, s )
def log( level, s ):
if level <= IndentFinder.VERBOSITY:
print( s )
class IndentFinder:
"""
IndentFinder reports the indentation used in a source file. Its approach
is not tied to any particular language. It was tested successfully with
python, C, C++ and Java code.
How does it work ?
It scans each line of the entry file for a space character (white space or
tab) repeated until a non space character is found. Such a line
is considered to be a properly indented line of code. Blank lines and
comments line (starting with # or /* or * ) are ignored. Lines coming
after a line ending in '\\' have higher chance of being not properly
indented, and are thus ignored too.
Only the increment in indentation are fed in. Dedentation or maintaining
the same indentation is not taken into account when analysing a file. Increment
in indentation from zero indentation to some indentation is also ignored because
it's wrong in many cases (header file with many structures for example, do not always
obey the indentation of the rest of the code).
Each line is analysed as:
- SpaceOnly: indentation of more than 8 space
- TabOnly: indentation of tab only
- Mixed: indentation of tab, then less than 8 spaces
- BeginSpace: indentation of less than 8 space, that could be either a mixed indentation
or a pure space indentation.
- non-significant
Then two consecutive significant lines are then considered. The only valid combinations are:
- (NoIndent, BeginSpace) => space or mixed
- (NoIndent, Tab) => tab
- (BeginSpace, BeginSpace) => space or mixed
- (BeginSpace, SpaceOnly) => space
- (SpaceOnly, SpaceOnly) => space
- (TabOnly, TabOnly) => tab
- (TabOnly, Mixed) => mixed
- (Mixed, TabOnly) => mixed
The increment in number of spaces is then recorded.
At the end, the number of lines with space indentation, mixed space and tab indentation
are compared and a decision is made.
If no decision can be made, DEFAULT_RESULT is returned.
If IndentFinder ever reports wrong indentation, send me immediately a
mail, if possible with the offending file.
"""
def __init__(self, default_result=DEFAULT_RESULT):
self.clear()
self.default_result = default_result
VERBOSITY = DEFAULT_VERBOSITY
def parse_string( self, string ):
self.clear()
for line in string.splitlines():
self.analyse_line( line )
def parse_file( self, fname ):
self.clear()
with open( fname ) as file:
for line in file:
self.analyse_line( line )
def clear( self ):
self.lines = {}
for i in range(2,9): self.lines['space%d' % i] = 0
for i in range(2,9): self.lines['mixed%d' % i] = 0
self.lines['tab'] = 0
self.nb_processed_lines = 0
self.nb_indent_hint = 0
self.indent_re = re.compile( "^([ \t]+)([^ \t]+)" )
self.mixed_re = re.compile( "^(\t+)( +)$" )
self.skip_next_line = False
self.previous_line_info = None
def analyse_line( self, line ):
if line[-1:] == '\n':
line = line[:-1]
deepdbg( 'analyse_line: "%s"' % line.replace(' ', '.' ).replace('\t','\\t') )
self.nb_processed_lines += 1
skip_current_line = self.skip_next_line
self.skip_next_line = False
if line[-1:] == '\\':
deepdbg( 'analyse_line: Ignoring next line!' )
# skip lines after lines ending in \
self.skip_next_line = True
if skip_current_line:
deepdbg( 'analyse_line: Ignoring current line!' )
return
ret = self.analyse_line_indentation( line )
if ret:
self.nb_indent_hint += 1
deepdbg( 'analyse_line: Result of line analysis: %s' % str(ret) )
return ret
def analyse_line_type( self, line ):
"""Analyse the type of line and return (LineType, <indentation part of
the line>).
The function will reject improperly formatted lines (mixture of tab
and space for example) and comment lines.
"""
mixed_mode = False
tab_part = ''
space_part = ''
if len(line) > 0 and line[0] != ' ' and line[0] != '\t':
return (LineType.NoIndent, '')
mo = self.indent_re.match( line )
if not mo:
deepdbg( 'analyse_line_type: line is not indented' )
return None
indent_part = mo.group(1)
text_part = mo.group(2)
deepdbg( 'analyse_line_type: indent_part="%s" text_part="%s"' %
(indent_part.replace(' ', '.').replace('\t','\\t').replace('\n', '\\n' ),
text_part ) )
if text_part[0] == '*':
# continuation of a C/C++ comment, unlikely to be indented correctly
return None
if text_part[0:2] == '/*' or text_part[0] == '#':
# python, C/C++ comment, might not be indented correctly
return None
if '\t' in indent_part and ' ' in indent_part:
# mixed mode
mo = self.mixed_re.match( indent_part )
if not mo:
# line is not composed of '\t\t\t ', ignore it
return None
mixed_mode = True
tab_part = mo.group(1)
space_part = mo.group(2)
if mixed_mode:
if len(space_part) >= 8:
# this is not mixed mode, this is garbage !
return None
return (LineType.Mixed, tab_part, space_part )
if '\t' in indent_part:
return (LineType.TabOnly, indent_part)
if ' ' in indent_part:
if len(indent_part) < 8:
# this could be mixed mode too
return (LineType.BeginSpace, indent_part)
else:
# this is really a line indented with spaces
return (LineType.SpaceOnly, indent_part )
assert False, 'We should never get there !'
def analyse_line_indentation( self, line ):
previous_line_info = self.previous_line_info
current_line_info = self.analyse_line_type( line )
self.previous_line_info = current_line_info
if current_line_info == None or previous_line_info == None:
deepdbg('analyse_line_indentation: Not enough line info to analyse line: %s, %s' % (str(previous_line_info), str(current_line_info)))
return
t = (previous_line_info[0], current_line_info[0])
deepdbg( 'analyse_line_indentation: Indent analysis: %s %s' % t )
if (t == (LineType.TabOnly, LineType.TabOnly)
or t == (LineType.NoIndent, LineType.TabOnly) ):
if len(current_line_info[1]) - len(previous_line_info[1]) == 1 :
self.lines['tab'] += 1
return 'tab'
elif (t == (LineType.SpaceOnly, LineType.SpaceOnly)
or t == (LineType.BeginSpace, LineType.SpaceOnly)
or t == (LineType.NoIndent, LineType.SpaceOnly) ):
nb_space = len(current_line_info[1]) - len(previous_line_info[1])
if 1 < nb_space <= 8:
key = 'space%d' % nb_space
self.lines[key] += 1
return key
elif (t == (LineType.BeginSpace, LineType.BeginSpace)
or t == (LineType.NoIndent, LineType.BeginSpace) ):
nb_space = len(current_line_info[1]) - len(previous_line_info[1])
if 1 < nb_space <= 8:
key1 = 'space%d' % nb_space
key2 = 'mixed%d' % nb_space
self.lines[ key1 ] += 1
self.lines[ key2 ] += 1
return key1
elif t == (LineType.BeginSpace, LineType.TabOnly):
# we assume that mixed indentation used 8 characters tabs
if len(current_line_info[1]) == 1:
# more than one tab on the line --> not mixed mode !
nb_space = len(current_line_info[1])*8 - len(previous_line_info[1])
if 1 < nb_space <= 8:
key = 'mixed%d' % nb_space
self.lines[ key ] += 1
return key
elif t == (LineType.TabOnly, LineType.Mixed):
tab_part, space_part = tuple(current_line_info[1:3])
if len(previous_line_info[1]) == len(tab_part):
nb_space = len(space_part)
if 1 < nb_space <= 8:
key = 'mixed%d' % nb_space
self.lines[ key ] += 1
return key
elif t == (LineType.Mixed, LineType.TabOnly):
tab_part, space_part = previous_line_info[1:3]
if len(tab_part)+1 == len(current_line_info[1]):
nb_space = 8-len(space_part)
if 1 < nb_space <= 8:
key = 'mixed%d' % nb_space
self.lines[ key ] += 1
return key
else:
pass
return None
def results( self ):
dbg( "Nb of scanned lines : %d" % self.nb_processed_lines )
dbg( "Nb of indent hint : %d" % self.nb_indent_hint )
dbg( "Collected data:" )
for key in self.lines:
if self.lines[key] > 0:
dbg( '%s: %d' % (key, self.lines[key] ) )
max_line_space = max( [ self.lines['space%d'%i] for i in range(2,9) ] )
max_line_mixed = max( [ self.lines['mixed%d'%i] for i in range(2,9) ] )
max_line_tab = self.lines['tab']
dbg( 'max_line_space: %d' % max_line_space )
dbg( 'max_line_mixed: %d' % max_line_mixed )
dbg( 'max_line_tab: %d' % max_line_tab )
### Result analysis
#
# 1. Space indented file
# - lines indented with less than 8 space will fill mixed and space array
# - lines indented with 8 space or more will fill only the space array
# - almost no lines indented with tab
#
# => more lines with space than lines with mixed
# => more a lot more lines with space than tab
#
# 2. Tab indented file
# - most lines will be tab only
# - very few lines as mixed
# - very few lines as space only
#
# => a lot more lines with tab than lines with mixed
# => a lot more lines with tab than lines with space
#
# 3. Mixed tab/space indented file
# - some lines are tab-only (lines with exactly 8 step indentation)
# - some lines are space only (less than 8 space)
# - all other lines are mixed
#
# If mixed is tab + 2 space indentation:
# - a lot more lines with mixed than with tab
# If mixed is tab + 4 space indentation
# - as many lines with mixed than with tab
#
# If no lines exceed 8 space, there will be only lines with space
# and tab but no lines with mixed. Impossible to detect mixed indentation
# in this case, the file looks like it's actually indented as space only
# and will be detected so.
#
# => same or more lines with mixed than lines with tab only
# => same or more lines with mixed than lines with space only
#
result = None
# Detect space indented file
if max_line_space >= max_line_mixed and max_line_space > max_line_tab:
nb = 0
indent_value = None
for i in range(8,1,-1):
if self.lines['space%d'%i] > int( nb * 1.1 ) : # give a 10% threshold
indent_value = i
nb = self.lines[ 'space%d' % indent_value ]
if indent_value is None: # no lines
result = self.default_result
else:
result = ('space', indent_value )
# Detect tab files
elif max_line_tab > max_line_mixed and max_line_tab > max_line_space:
result = ('tab', DEFAULT_TAB_WIDTH )
# Detect mixed files
elif max_line_mixed >= max_line_tab and max_line_mixed > max_line_space:
nb = 0
indent_value = None
for i in range(8,1,-1):
if self.lines['mixed%d'%i] > int( nb * 1.1 ) : # give a 10% threshold
indent_value = i
nb = self.lines[ 'mixed%d' % indent_value ]
if indent_value is None: # no lines
result = self.default_result
else:
result = ('mixed', (8,indent_value) )
else:
# not enough information to make a decision
result = self.default_result
info( "Result: %s" % str( result ) )
return result
def __str__ (self):
itype, ival = self.results()
if itype != 'mixed':
return '%s %d' % (itype, ival)
else:
itab, ispace = ival
return '%s tab %d space %d' % (itype, itab, ispace)
def vim_output( self ):
result = self.results()
indent_type, n = result
if indent_type == "space":
# spaces:
# => set sts to the number of spaces
# => set tabstop to the number of spaces
# => expand tabs to spaces
# => set shiftwidth to the number of spaces
return "set sts=%d | set tabstop=%d | set expandtab | set shiftwidth=%d \" (%s %d)" % (n,n,n,indent_type,n)
elif indent_type == "tab":
# tab:
# => set sts to 0
# => set tabstop to preferred value
# => set expandtab to false
# => set shiftwidth to tabstop
return "set sts=0 | set tabstop=%d | set noexpandtab | set shiftwidth=%d \" (%s)" % (DEFAULT_TAB_WIDTH, DEFAULT_TAB_WIDTH, indent_type )
if indent_type == 'mixed':
tab_indent, space_indent = n
# tab:
# => set sts to 0
# => set tabstop to tab_indent
# => set expandtab to false
# => set shiftwidth to space_indent
return "set sts=4 | set tabstop=%d | set noexpandtab | set shiftwidth=%d \" (%s %d)" % (tab_indent, space_indent, indent_type, space_indent )
def main():
VIM_OUTPUT = 0
file_list = []
for opt in sys.argv[1:]:
if opt == "--vim-output":
VIM_OUTPUT = 1
elif opt == "--verbose" or opt == '-v':
IndentFinder.VERBOSITY += 1
elif opt == "--version":
print( 'IndentFinder v%s' % VERSION )
return
elif opt[0] == "-":
print( help % sys.argv[0] )
return
else:
file_list.append( opt )
fi = IndentFinder()
one_file = (len(file_list) == 1)
for fname in file_list:
fi.parse_file( fname )
if not one_file:
if VIM_OUTPUT:
print( "%s : %s" % (fname, fi.vim_output()) )
else:
print( "%s : %s" % (fname, str(fi)) )
if one_file:
if VIM_OUTPUT:
sys.stdout.write( fi.vim_output() )
else:
print( str(fi) )
if __name__ == "__main__":
main()
```
#### File: ST-AutoSetIndentation/plugin/log.py
```python
import sublime
def msg(message: str) -> str:
"""
@brief Generate plugin message.
@param message The message
@return The plugin message.
"""
from .settings import get_package_name
return "[{plugin}] {message}".format(plugin=get_package_name(), message=message)
def print_msg(message: str, show_message: bool = True) -> None:
"""
@brief Print plugin message to ST's console.
@param message The message
@param show_message Whether to print the message
"""
if show_message:
print(msg(message))
def show_status_message(message: str, show_message: bool = True) -> None:
"""
@brief Shows message in the status bar.
@param message The message
@param show_message Whether to show the message
"""
from .settings import get_setting
if show_message and get_setting("show_status_message"):
sublime.status_message(message)
```
|
{
"source": "jfcherng-sublime/ST-AutoSetSyntax",
"score": 3
}
|
#### File: rules/constraints/is_in_git_repo.py
```python
from ..constraint import AbstractConstraint
import sublime
class IsInGitRepoConstraint(AbstractConstraint):
"""Check whether this file is in a git repo."""
def test(self, view: sublime.View) -> bool:
view_info = self.get_view_info(view)
# early return so that we may save some IO operations
if not view_info["file_name"]:
return False
# `.git/` directory for normal Git repo and `.git` file for Git worktree
return self.has_sibling(view_info["file_path"], ".git", use_exists=True)
```
#### File: rules/constraints/name_contains.py
```python
from ..constraint import AbstractConstraint
from typing import Any, Tuple
import sublime
class NameContainsConstraint(AbstractConstraint):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.needles: Tuple[str, ...] = self._handled_args()
def is_droppable(self) -> bool:
return not self.needles
def test(self, view: sublime.View) -> bool:
filename = self.get_view_info(view)["file_name"]
return any((needle in filename) for needle in self.needles)
```
#### File: plugin/rules/match.py
```python
from __future__ import annotations
# __future__ must be the first import
from ..helper import camel_to_snake
from ..helper import first
from ..helper import get_all_subclasses
from ..helper import remove_suffix
from ..lru_cache import clearable_lru_cache
from ..types import Optimizable, ST_MatchRule
from .constraint import ConstraintRule
from abc import ABCMeta, abstractmethod
from dataclasses import dataclass
from dataclasses import field
from typing import Any, Dict, Generator, List, Optional, Tuple, Type, Union
import sublime
def get_match(obj: Any) -> Optional[Type[AbstractMatch]]:
return first(get_matches(), lambda t: t.is_supported(obj))
@clearable_lru_cache()
def get_matches() -> Tuple[Type[AbstractMatch], ...]:
return tuple(
sorted(
get_all_subclasses(AbstractMatch, skip_abstract=True), # type: ignore
key=lambda cls: cls.name(),
)
)
@dataclass
class MatchRule(Optimizable):
match: Optional[AbstractMatch] = None
match_name: str = ""
args: Tuple[Any, ...] = tuple()
kwargs: Dict[str, Any] = field(default_factory=dict)
rules: Tuple[MatchableRule, ...] = tuple()
def is_droppable(self) -> bool:
return not (self.rules and self.match and not self.match.is_droppable(self.rules))
def optimize(self) -> Generator[Any, None, None]:
rules: List[MatchableRule] = []
for rule in self.rules:
if rule.is_droppable():
yield rule
continue
yield from rule.optimize()
if rule.is_droppable():
yield rule
continue
rules.append(rule)
self.rules = tuple(rules)
def test(self, view: sublime.View) -> bool:
assert self.match
return self.match.test(view, self.rules)
@classmethod
def make(cls, match_rule: ST_MatchRule) -> MatchRule:
"""Build this object with the `match_rule`."""
obj = cls()
if args := match_rule.get("args"):
# make sure args is always a tuple
obj.args = tuple(args) if isinstance(args, list) else (args,)
if kwargs := match_rule.get("kwargs"):
obj.kwargs = kwargs
match = match_rule.get("match", "any")
if match_class := get_match(match):
obj.match_name = match
obj.match = match_class(*obj.args, **obj.kwargs)
rules_compiled: List[MatchableRule] = []
for rule in match_rule.get("rules", []):
rule_class: Optional[Type[MatchableRule]] = None
if "constraint" in rule:
rule_class = ConstraintRule
elif "match" in rule:
rule_class = MatchRule
if rule_class and (rule_compiled := rule_class.make(rule)): # type: ignore
rules_compiled.append(rule_compiled)
obj.rules = tuple(rules_compiled)
return obj
# rules that can be used in a match rule
MatchableRule = Union[ConstraintRule, MatchRule]
class AbstractMatch(metaclass=ABCMeta):
def __init__(self, *args: Any, **kwargs: Any) -> None:
self.args = args
self.kwargs = kwargs
@classmethod
def name(cls) -> str:
"""The nickname of this class. Converts "FooBarMatch" into "foo_bar" by default."""
return camel_to_snake(remove_suffix(cls.__name__, "Match"))
@classmethod
def is_supported(cls, obj: Any) -> bool:
"""Determines whether this class supports `obj`."""
return str(obj) == cls.name()
def is_droppable(self, rules: Tuple[MatchableRule, ...]) -> bool:
"""
Determines whether this object is droppable.
If it's droppable, then it may be dropped by who holds it during optimizing.
"""
return False
@abstractmethod
def test(self, view: sublime.View, rules: Tuple[MatchableRule, ...]) -> bool:
"""Tests whether the `view` passes this `match` with those `rules`."""
...
@staticmethod
def test_count(view: sublime.View, rules: Tuple[MatchableRule, ...], goal: float) -> bool:
"""Tests whether the amount of passing `rules` is greater than or equal to `goal`."""
if goal <= 0:
return True
tolerance = len(rules) - goal # how many rules can be failed at most
for rule in rules:
if tolerance < 0:
return False
if rule.test(view):
goal -= 1
if goal == 0:
return True
else:
tolerance -= 1
return False
```
|
{
"source": "jfcherng-sublime/ST-BracketHighlighter",
"score": 3
}
|
#### File: ST-BracketHighlighter/bh_modules/erlangcase.py
```python
from BracketHighlighter.bh_plugin import import_module
lowercase = import_module("bh_modules.lowercase")
def validate(*args):
"""Check if bracket is lowercase."""
return lowercase.validate(*args)
```
#### File: ST-BracketHighlighter/bh_modules/phpkeywords.py
```python
def compare(name, first, second, bfr):
"""Pair the appropriate open bracket with its close."""
return "end" + bfr[first.begin:first.end].lower() == bfr[second.begin:second.end].lower()
```
#### File: ST-BracketHighlighter/bh_modules/tags.py
```python
from backrefs import bre
from collections import namedtuple
import sublime
from os.path import basename, splitext
TAG_OPEN = 0
TAG_CLOSE = 1
last_mode = None
def process_tag_pattern(pattern, variables=None):
"""Process the tag pattern."""
if variables is None:
variables = {}
if isinstance(pattern, str):
pattern = bre.compile_search(pattern % variables, bre.I | bre.M)
return pattern
class TagEntry(namedtuple('TagEntry', ['begin', 'end', 'name', 'optional', 'single'], verbose=False)):
"""Tag entry tuple."""
def move(self, begin, end):
"""Create a new tuple from this tuple."""
return self._replace(begin=begin, end=end)
def compare_languge(language, lang_list):
"""Check if language is found."""
found = False
for l in lang_list:
if language == l.lower():
found = True
break
return found
def get_tag_mode(view, tag_mode_config):
"""Get the tag mode."""
default_mode = None
syntax = view.settings().get('syntax')
language = splitext(basename(syntax))[0].lower() if syntax is not None else "plain text"
if isinstance(tag_mode_config, list):
for item in tag_mode_config:
if isinstance(item, dict) and compare_languge(language, item.get('syntax', [])):
first_line = item.get('first_line', '')
if first_line:
size = view.size() - 1
if size > 256:
size = 256
if (
isinstance(first_line, str) and
bre.compile_search(first_line, bre.I).match(view.substr(sublime.Region(0, size)))
):
return item.get('mode', default_mode)
else:
return item.get('mode', default_mode)
return default_mode
def highlighting(view, name, style, left, right):
"""Highlight only the tag name."""
tag_settings = sublime.load_settings("bh_tag.sublime-settings")
match_style = tag_settings.get("tag_style", {}).get(last_mode, None)
if match_style is not None and style == match_style:
tag_name = tag_settings.get('tag_name', {}).get(last_mode, r'[\w\:\.\-]+')
if left is not None:
region = view.find(tag_name, left.begin)
left = left.move(region.begin(), region.end())
if right is not None:
region = view.find(tag_name, right.begin)
right = right.move(region.begin(), region.end())
return left, right
def post_match(view, name, style, first, second, center, bfr, threshold):
"""
Given two brackets, determine if they contain a tag.
Decide whether it is an opening or closing, and then
find its respective closing or opening.
"""
# We need to know the mode during the highlight event, so track the last mode.
global last_mode
left, right = first, second
threshold = [0, len(bfr)] if threshold is None else threshold
bh_settings = sublime.load_settings("bh_core.sublime-settings")
tag_settings = sublime.load_settings("bh_tag.sublime-settings")
tag_mode = get_tag_mode(view, tag_settings.get("tag_mode", []))
tag_style = tag_settings.get("tag_style", {}).get(tag_mode, '?')
last_mode = tag_mode
outside_adj = bh_settings.get("bracket_outside_adjacent", False)
bracket_style = style
if first is not None and tag_mode is not None:
matcher = TagMatch(view, bfr, threshold, first, second, center, outside_adj, tag_mode)
left, right = matcher.match()
if not matcher.no_tag:
bracket_style = tag_style
return left, right, bracket_style
class TagSearch(object):
"""Searches for tags."""
def __init__(
self, view, bfr, window, center, pattern,
match_type, mode, optional_tags, self_closing_tags, void_tags
):
"""Prepare tag search object."""
self.start = int(window[0])
self.end = int(window[1])
self.optional_tags = optional_tags
self.void_tags = void_tags
self.self_closing_tags = self_closing_tags
self.center = center
self.pattern = pattern
self.match_type = match_type
self.mode = mode
self.bfr = bfr
self.prev_match = None
self.return_prev = False
self.done = False
self.view = view
settings = sublime.load_settings("bh_tag.sublime-settings")
try:
self.scope_exclude = settings.get("tag_scope_exclude", {}).get(mode, ['string', 'comment'])
except Exception:
self.scope_exclude = ['string', 'comment']
def scope_check(self, pt):
"""Check if scope is good."""
illegal_scope = False
for exclude in self.scope_exclude:
illegal_scope |= bool(self.view.score_selector(pt, exclude))
return illegal_scope
def reset_end_state(self):
"""Reset and end the current state."""
self.done = False
self.prev_match = None
self.return_prev = False
def remember(self):
"""Instruct object to return the last tag."""
self.return_prev = True
self.done = False
def get_tags(self):
"""Find all the tags."""
if self.done:
return
if self.return_prev:
self.return_prev = False
yield self.prev_match
for m in self.pattern.finditer(self.bfr, self.start, self.end):
name = m.group(1).lower()
if not self.match_type:
self_closing_slash = bool(m.group(2) != "")
if not self_closing_slash and self.optional_tags is not None:
optional = self.optional_tags.match(name) is not None
else:
optional = False
if self_closing_slash and self.self_closing_tags is not None:
self_closing = self.self_closing_tags.match(name) is not None
else:
self_closing = False
if not optional and not self_closing and self.void_tags is not None:
void = self.void_tags.match(name) is not None
else:
void = False
else:
if self.void_tags is not None and self.void_tags.match(name) is not None:
continue
void = False
optional = False
self_closing = False
start = m.start(0)
end = m.end(0)
if not self.scope_check(start):
self.prev_match = TagEntry(start, end, name, optional, void or self_closing)
self.start = end
yield self.prev_match
self.done = True
class TagMatch(object):
"""Find a tag match."""
def __init__(self, view, bfr, threshold, first, second, center, outside_adj, mode):
"""Prepare tag match object."""
tag_settings = sublime.load_settings('bh_tag.sublime-settings')
self.view = view
self.bfr = bfr
self.mode = mode
self.tag_open = process_tag_pattern(
tag_settings.get("start_tag")[mode],
{
"attributes": tag_settings.get('attributes', {}).get(mode, ''),
"tag_name": tag_settings.get('tag_name', {}).get(mode, '')
}
)
self.tag_close = process_tag_pattern(
tag_settings.get("end_tag")[mode]
)
try:
self.optional_tags = bre.compile_search(tag_settings.get('optional_tag_patterns')[self.mode], bre.I)
except Exception:
self.optional_tags = None
try:
self.void_tags = bre.compile_search(tag_settings.get('void_tag_patterns')[self.mode], bre.I)
except Exception:
self.void_tags = None
try:
self.self_closing_tags = bre.compile_search(tag_settings.get('self_closing_tag_patterns')[self.mode], bre.I)
except Exception:
self.self_closing_tags = None
tag, tag_type, tag_end = self.get_first_tag(first[0])
self.left, self.right = None, None
self.window = None
self.no_tag = False
if outside_adj:
if first[0] == center:
center += 1
elif center == tag_end:
center -= 1
if tag and first[0] < center < tag_end:
if tag.single:
self.left = tag
self.right = tag
else:
if tag_type == "open":
self.left = tag
self.window = (tag_end, len(bfr) if threshold is None else threshold[1])
else:
self.right = tag
self.window = (0 if threshold is None else threshold[0], first[0])
else:
self.left = first
self.right = second
self.no_tag = True
def get_first_tag(self, offset):
"""
Check if tag region is an opening tag or closing tag.
Return the results
"""
tag = None
tag_type = None
optional = False
void = False
m = self.tag_open.match(self.bfr[offset:])
end = None
if m:
name = m.group(1).lower()
self_closing_slash = bool(m.group(2) != "")
optional = (
self.optional_tags is not None and
not self_closing_slash and
self.optional_tags.match(name) is not None
)
self_closing = (
self.self_closing_tags is not None and
self_closing_slash and
self.self_closing_tags.match(name) is not None
)
void = (
not optional and
not self_closing and
self.void_tags is not None and
self.void_tags.match(name) is not None
)
start = m.start(0) + offset
end = m.end(0) + offset
tag = TagEntry(start, end, name, optional, void or self_closing)
tag_type = "open"
self.center = end
else:
m = self.tag_close.match(self.bfr[offset:])
if m:
name = m.group(1).lower()
void = (
self.void_tags is not None and
self.void_tags.match(name) is not None
)
if not void:
start = m.start(0) + offset
end = m.end(0) + offset
tag = TagEntry(start, end, name, optional, void)
tag_type = "close"
self.center = offset
return tag, tag_type, end
def compare_tags(self, left, right):
"""Check if tags share the same name."""
return left.name == right.name
def resolve_optional(self, stack, c):
"""Handle self closing tags."""
found_tag = None
b = stack[-1]
if self.compare_tags(b, c):
found_tag = b
stack.pop()
else:
while b is not None and b.optional:
stack.pop()
if len(stack):
b = stack[-1]
if self.compare_tags(b, c):
found_tag = b
stack.pop()
break
else:
b = None
return found_tag
def match(self):
"""
Find the corresponding open or close.
Match only if either the close or open is already found.
"""
stack = []
# No tags to search for
if self.no_tag or (self.left and self.right):
return self.left, self.right
# Initialize tag matching objects
osearch = TagSearch(
self.view, self.bfr, self.window,
self.center, self.tag_open,
0, self.mode,
self.optional_tags,
self.self_closing_tags,
self.void_tags
)
csearch = TagSearch(
self.view, self.bfr, self.window,
self.center, self.tag_close,
1, self.mode,
self.optional_tags,
self.self_closing_tags,
self.void_tags
)
# Searching for opening or closing tag to match
match_type = TAG_OPEN if self.right else TAG_CLOSE
# Match the tags
for c in csearch.get_tags():
if len(stack) and osearch.done:
if self.resolve_optional(stack, c):
continue
for o in osearch.get_tags():
if o.end <= c.begin:
if not o.single:
stack.append(o)
continue
else:
osearch.remember()
break
if len(stack):
if self.resolve_optional(stack, c):
continue
elif match_type == TAG_OPEN and not osearch.done:
continue
if match_type == TAG_CLOSE:
if self.left is None or self.compare_tags(self.left, c):
self.right = c
elif self.left.optional:
self.right = self.left
break
if match_type == TAG_OPEN:
# Find the rest of the the unmatched left side open brackets
# approaching the cursor if all closing brackets were matched
# Select the most recent open bracket on the stack.
for o in osearch.get_tags():
if not o.single:
stack.append(o)
if len(stack):
self.left = self.resolve_optional(stack, self.right)
elif self.right is None and self.left is not None and self.left.optional:
# Account for the opening tag that was found being a self closing
self.right = self.left
return self.left, self.right
```
#### File: jfcherng-sublime/ST-BracketHighlighter/bh_plugin.py
```python
import sublime
import sublime_plugin
from os.path import normpath, join
import imp
from collections import namedtuple
import sys
import traceback
import re
from .bh_logging import log
class Payload(object):
"""Plugin payload."""
status = False
plugin = None
args = None
@classmethod
def clear(cls):
"""Clear payload."""
cls.status = False
cls.plugin = None
cls.args = None
class BracketRegion (namedtuple('BracketRegion', ['begin', 'end'], verbose=False)):
"""Bracket regions for plugins."""
def move(self, begin, end):
"""Move bracket region to different points."""
return self._replace(begin=begin, end=end)
def size(self):
"""Get the size of the region."""
return abs(self.begin - self.end)
def toregion(self):
"""Convert to sublime region."""
return sublime.Region(self.begin, self.end)
def is_bracket_region(obj):
"""Check if object is a `BracketRegion`."""
return isinstance(obj, BracketRegion)
def sublime_format_path(pth):
"""Format path for Sublime internally."""
m = re.match(r"^([A-Za-z]{1}):(?:/|\\)(.*)", pth)
if sublime.platform() == "windows" and m is not None:
pth = m.group(1) + "/" + m.group(2)
return pth.replace("\\", "/")
def load_modules(obj, loaded):
"""Load bracket plugin modules."""
plib = obj.get("plugin_library")
if plib is None:
return
try:
module = _import_module(plib, loaded)
obj["compare"] = getattr(module, "compare", None)
obj["post_match"] = getattr(module, "post_match", None)
obj["validate"] = getattr(module, "validate", None)
obj["highlighting"] = getattr(module, "highlighting", None)
loaded.add(plib)
except Exception:
log("Could not load module %s\n%s" % (plib, str(traceback.format_exc())))
raise
def _import_module(module_name, loaded=None):
"""
Import the module.
Import the module and track which modules have been loaded
so we don't load already loaded modules.
"""
# Pull in built-in and custom plugin directory
if module_name.startswith("bh_modules."):
path_name = join("Packages", "BracketHighlighter", normpath(module_name.replace('.', '/')))
else:
path_name = join("Packages", normpath(module_name.replace('.', '/')))
path_name += ".py"
if loaded is not None and module_name in loaded:
module = sys.modules[module_name]
else:
module = imp.new_module(module_name)
sys.modules[module_name] = module
exec(
compile(
sublime.load_resource(sublime_format_path(path_name)),
module_name,
'exec'
),
sys.modules[module_name].__dict__
)
return module
def import_module(module, attribute=None):
"""Import module or module attribute."""
mod = _import_module(module)
return getattr(mod, attribute) if attribute is not None else mod
class BracketPluginRunCommand(sublime_plugin.TextCommand):
"""Sublime run command to run BH plugins."""
def run(self, edit):
"""Run the plugin."""
try:
Payload.args["edit"] = edit
Payload.plugin.run(**Payload.args)
Payload.status = True
except Exception:
print("BracketHighlighter: Plugin Run Error:\n%s" % str(traceback.format_exc()))
class BracketPlugin(object):
"""Class for preparing and running plugins."""
def __init__(self, plugin, loaded):
"""Load plugin module."""
self.enabled = False
self.args = plugin['args'] if ("args" in plugin) else {}
self.plugin = None
if 'command' in plugin:
plib = plugin['command']
try:
module = _import_module(plib, loaded)
self.plugin = getattr(module, 'plugin')()
loaded.add(plib)
self.enabled = True
except Exception:
print('BracketHighlighter: Load Plugin Error: %s\n%s' % (plugin['command'], traceback.format_exc()))
def is_enabled(self):
"""Check if plugin is enabled."""
return self.enabled
def run_command(self, view, name, left, right, selection):
"""Load arguments into plugin and run."""
nobracket = False
refresh_match = False
Payload.status = False
Payload.plugin = self.plugin()
setattr(Payload.plugin, "left", left)
setattr(Payload.plugin, "right", right)
setattr(Payload.plugin, "view", view)
setattr(Payload.plugin, "selection", selection)
setattr(Payload.plugin, "nobracket", False)
setattr(Payload.plugin, "refresh_match", False)
self.args["edit"] = None
self.args["name"] = name
Payload.args = self.args
# Call a `TextCommand` to run the plugin so it can feed in the `Edit` object
view.run_command("bracket_plugin_run")
if Payload.status:
left = Payload.plugin.left
right = Payload.plugin.right
selection = Payload.plugin.selection
nobracket = Payload.plugin.nobracket
refresh_match = Payload.plugin.refresh_match
Payload.clear()
return left, right, selection, nobracket, refresh_match
class BracketPluginCommand(object):
"""Bracket Plugin base class."""
def run(self, bracket, content, selection):
"""Run the plugin class."""
pass
```
|
{
"source": "jfcherng-sublime/ST-CommandAndMenu",
"score": 2
}
|
#### File: plugin/commands/open_git_repo_on_web.py
```python
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
import re
import shlex
import shutil
import sublime
import sublime_plugin
import subprocess
import threading
PathLike = Union[str, Path]
class GitException(Exception):
"""Exception raised when something went wrong for git"""
def __init__(self, message: str) -> None:
super().__init__(message)
class Git:
"""Git command wrapper"""
def __init__(
self,
repo_path: PathLike,
git_bin: str = "git",
encoding: str = "utf-8",
shell: bool = False,
timeout_s: float = 3,
) -> None:
"""Init a Git wrapper with an instance"""
# always use folder as repo path
if (path := Path(repo_path).resolve()).is_file():
path = path.parent
self.repo_path = path
self.git_bin = shutil.which(git_bin) or git_bin
self.encoding = encoding
self.shell = shell
self.timeout_s = timeout_s
def run(self, *args: str) -> str:
"""Run a git command."""
cmd_tuple = (self.git_bin,) + args
if sublime.platform() == "windows":
# do not create a window for the process
startupinfo = subprocess.STARTUPINFO() # type: ignore
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW # type: ignore
else:
startupinfo = None # type: ignore
process = subprocess.Popen(
cmd_tuple,
cwd=self.repo_path,
encoding=self.encoding,
shell=self.shell,
startupinfo=startupinfo,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
text=True,
)
out, err = process.communicate(timeout=self.timeout_s)
ret_code = process.poll() or 0
if ret_code:
cmd_str = " ".join(map(shlex.quote, cmd_tuple))
raise GitException(f"`{cmd_str}` returned code {ret_code}: {err}")
return out.rstrip()
def get_version(self) -> Optional[Tuple[int, int, int]]:
try:
m = re.search(r"(\d+)\.(\d+)\.(\d+)", self.run("version"))
return tuple(map(int, m.groups())) if m else None # type: ignore
except GitException:
return None
def get_remote_web_url(self, remote: Optional[str] = None) -> Optional[str]:
try:
# use the tracking upstream
if not remote:
# `upstream` will be something like "refs/remotes/origin/master"
upstream = self.run("rev-parse", "--symbolic-full-name", "@{upstream}")
remote = re.sub(r"^refs/remotes/", "", upstream).partition("/")[0]
remote_uri = self.run("remote", "get-url", remote)
remote_url = self.get_url_from_remote_uri(remote_uri)
return remote_url
except GitException:
return None
@staticmethod
def is_in_git_repo(path: PathLike) -> bool:
path_prev, path = None, Path(path).resolve()
while path != path_prev:
# git dir or worktree, which has a .git file in it
if (path / ".git").exists():
return True
path_prev, path = path, path.parent
return False
@staticmethod
def get_url_from_remote_uri(uri: str) -> Optional[str]:
url: Optional[str] = None
re_flags = re.IGNORECASE | re.MULTILINE
# SSH (unsupported)
if re.search(r"^ssh://", uri, re_flags):
url = None
# HTTP
if re.search(r"^https?://", uri, re_flags):
url = uri
# common providers
if re.search(r"^git@", uri, re_flags):
parts = uri[4:].split(":") # "4:" removes "git@"
host = ":".join(parts[:-1])
path = parts[-1]
url = f"https://{host}/{path}"
return re.sub(r"\.git$", "", url, re_flags) if url else None
def get_dir_for_git(view: sublime.View) -> Optional[str]:
if filename := view.file_name():
return str(Path(filename).parent)
if not (window := view.window()):
return None
return next(iter(window.folders()), None)
def guarantee_git_dir(failed_return: Optional[Any] = None) -> Callable:
def decorator(func: Callable) -> Callable:
def wrapped(self: sublime_plugin.WindowCommand, *args: Any, **kwargs: Any) -> Any:
if not ((view := self.window.active_view()) and (git_dir := get_dir_for_git(view))):
return failed_return
return func(self, git_dir, *args, **kwargs)
return wrapped
return decorator
class OpenGitRepoOnWebCommand(sublime_plugin.WindowCommand):
@guarantee_git_dir(failed_return=False)
def is_enabled(self, git_dir: str) -> bool:
return Git.is_in_git_repo(git_dir)
@guarantee_git_dir()
def run(self, git_dir: str, remote: Optional[str] = None) -> None:
t = threading.Thread(target=self._worker, args=(git_dir, remote))
t.start()
@staticmethod
def _worker(git_dir: str, remote: Optional[str] = None) -> None:
if not (git := Git(git_dir)):
return
if not (repo_url := git.get_remote_web_url(remote=remote)):
return sublime.error_message("Can't determine repo web URL...")
sublime.run_command("open_url", {"url": repo_url})
```
|
{
"source": "jfcherng-sublime/ST-FollowLnk",
"score": 2
}
|
#### File: libs/pylnk/pylnk3.py
```python
import argparse
import ntpath
import os
import re
import time
from datetime import datetime
from io import BytesIO, IOBase
from pprint import pformat
from struct import pack, unpack
from typing import Dict, Optional, Tuple, Union
DEFAULT_CHARSET = 'cp1251'
# ---- constants
_SIGNATURE = b'L\x00\x00\x00'
_GUID = b'\x01\x14\x02\x00\x00\x00\x00\x00\xc0\x00\x00\x00\x00\x00\x00F'
_LINK_INFO_HEADER_DEFAULT = 0x1C
_LINK_INFO_HEADER_OPTIONAL = 0x24
_LINK_FLAGS = (
'HasLinkTargetIDList',
'HasLinkInfo',
'HasName',
'HasRelativePath',
'HasWorkingDir',
'HasArguments',
'HasIconLocation',
'IsUnicode',
'ForceNoLinkInfo',
# new
'HasExpString',
'RunInSeparateProcess',
'Unused1',
'HasDarwinID',
'RunAsUser',
'HasExpIcon',
'NoPidlAlias',
'Unused2',
'RunWithShimLayer',
'ForceNoLinkTrack',
'EnableTargetMetadata',
'DisableLinkPathTracking',
'DisableKnownFolderTracking',
'DisableKnownFolderAlias',
'AllowLinkToLink',
'UnaliasOnSave',
'PreferEnvironmentPath',
'KeepLocalIDListForUNCTarget',
)
_FILE_ATTRIBUTES_FLAGS = (
'read_only', 'hidden', 'system_file', 'reserved1',
'directory', 'archive', 'reserved2', 'normal',
'temporary', 'sparse_file', 'reparse_point',
'compressed', 'offline', 'not_content_indexed',
'encrypted',
)
_MODIFIER_KEYS = ('SHIFT', 'CONTROL', 'ALT')
WINDOW_NORMAL = "Normal"
WINDOW_MAXIMIZED = "Maximized"
WINDOW_MINIMIZED = "Minimized"
_SHOW_COMMANDS = {1: WINDOW_NORMAL, 3: WINDOW_MAXIMIZED, 7: WINDOW_MINIMIZED}
_SHOW_COMMAND_IDS = dict((v, k) for k, v in _SHOW_COMMANDS.items())
DRIVE_UNKNOWN = "Unknown"
DRIVE_NO_ROOT_DIR = "No root directory"
DRIVE_REMOVABLE = "Removable"
DRIVE_FIXED = "Fixed (Hard disk)"
DRIVE_REMOTE = "Remote (Network drive)"
DRIVE_CDROM = "CD-ROM"
DRIVE_RAMDISK = "Ram disk"
_DRIVE_TYPES = {0: DRIVE_UNKNOWN,
1: DRIVE_NO_ROOT_DIR,
2: DRIVE_REMOVABLE,
3: DRIVE_FIXED,
4: DRIVE_REMOTE,
5: DRIVE_CDROM,
6: DRIVE_RAMDISK}
_DRIVE_TYPE_IDS = dict((v, k) for k, v in _DRIVE_TYPES.items())
_KEYS = {
0x30: '0', 0x31: '1', 0x32: '2', 0x33: '3', 0x34: '4', 0x35: '5', 0x36: '6',
0x37: '7', 0x38: '8', 0x39: '9', 0x41: 'A', 0x42: 'B', 0x43: 'C', 0x44: 'D',
0x45: 'E', 0x46: 'F', 0x47: 'G', 0x48: 'H', 0x49: 'I', 0x4A: 'J', 0x4B: 'K',
0x4C: 'L', 0x4D: 'M', 0x4E: 'N', 0x4F: 'O', 0x50: 'P', 0x51: 'Q', 0x52: 'R',
0x53: 'S', 0x54: 'T', 0x55: 'U', 0x56: 'V', 0x57: 'W', 0x58: 'X', 0x59: 'Y',
0x5A: 'Z', 0x70: 'F1', 0x71: 'F2', 0x72: 'F3', 0x73: 'F4', 0x74: 'F5',
0x75: 'F6', 0x76: 'F7', 0x77: 'F8', 0x78: 'F9', 0x79: 'F10', 0x7A: 'F11',
0x7B: 'F12', 0x7C: 'F13', 0x7D: 'F14', 0x7E: 'F15', 0x7F: 'F16', 0x80: 'F17',
0x81: 'F18', 0x82: 'F19', 0x83: 'F20', 0x84: 'F21', 0x85: 'F22', 0x86: 'F23',
0x87: 'F24', 0x90: 'NUM LOCK', 0x91: 'SCROLL LOCK'
}
_KEY_CODES = dict((v, k) for k, v in _KEYS.items())
ROOT_MY_COMPUTER = 'MY_COMPUTER'
ROOT_MY_DOCUMENTS = 'MY_DOCUMENTS'
ROOT_NETWORK_SHARE = 'NETWORK_SHARE'
ROOT_NETWORK_SERVER = 'NETWORK_SERVER'
ROOT_NETWORK_PLACES = 'NETWORK_PLACES'
ROOT_NETWORK_DOMAIN = 'NETWORK_DOMAIN'
ROOT_INTERNET = 'INTERNET'
RECYCLE_BIN = 'RECYCLE_BIN'
ROOT_CONTROL_PANEL = 'CONTROL_PANEL'
ROOT_USER = 'USERPROFILE'
ROOT_UWP_APPS = 'APPS'
_ROOT_LOCATIONS = {
'{20D04FE0-3AEA-1069-A2D8-08002B30309D}': ROOT_MY_COMPUTER,
'{450D8FBA-AD25-11D0-98A8-0800361B1103}': ROOT_MY_DOCUMENTS,
'{54a754c0-4bf1-11d1-83ee-00a0c90dc849}': ROOT_NETWORK_SHARE,
'{c0542a90-4bf0-11d1-83ee-00a0c90dc849}': ROOT_NETWORK_SERVER,
'{208D2C60-3AEA-1069-A2D7-08002B30309D}': ROOT_NETWORK_PLACES,
'{46e06680-4bf0-11d1-83ee-00a0c90dc849}': ROOT_NETWORK_DOMAIN,
'{871C5380-42A0-1069-A2EA-08002B30309D}': ROOT_INTERNET,
'{645FF040-5081-101B-9F08-00AA002F954E}': RECYCLE_BIN,
'{21EC2020-3AEA-1069-A2DD-08002B30309D}': ROOT_CONTROL_PANEL,
'{59031A47-3F72-44A7-89C5-5595FE6B30EE}': ROOT_USER,
'{4234D49B-0245-4DF3-B780-3893943456E1}': ROOT_UWP_APPS,
}
_ROOT_LOCATION_GUIDS = dict((v, k) for k, v in _ROOT_LOCATIONS.items())
TYPE_FOLDER = 'FOLDER'
TYPE_FILE = 'FILE'
_ENTRY_TYPES = {
0x00: 'KNOWN_FOLDER',
0x31: 'FOLDER',
0x32: 'FILE',
0x35: 'FOLDER (UNICODE)',
0x36: 'FILE (UNICODE)',
0x802E: 'ROOT_KNOWN_FOLDER',
# founded in doc, not tested
0x1f: 'ROOT_FOLDER',
0x61: 'URI',
0x71: 'CONTROL_PANEL',
}
_ENTRY_TYPE_IDS = dict((v, k) for k, v in _ENTRY_TYPES.items())
_DRIVE_PATTERN = re.compile(r'(\w)[:/\\]*$')
# ---- read and write binary data
def read_byte(buf):
return unpack('<B', buf.read(1))[0]
def read_short(buf):
return unpack('<H', buf.read(2))[0]
def read_int(buf):
return unpack('<I', buf.read(4))[0]
def read_double(buf):
return unpack('<Q', buf.read(8))[0]
def read_cunicode(buf):
s = b""
b = buf.read(2)
while b != b'\x00\x00':
s += b
b = buf.read(2)
return s.decode('utf-16-le')
def read_cstring(buf, padding=False):
s = b""
b = buf.read(1)
while b != b'\x00':
s += b
b = buf.read(1)
if padding and not len(s) % 2:
buf.read(1) # make length + terminator even
# TODO: encoding is not clear, unicode-escape has been necessary sometimes
return s.decode(DEFAULT_CHARSET)
def read_sized_string(buf, string=True):
size = read_short(buf)
if string:
return buf.read(size*2).decode('utf-16-le')
else:
return buf.read(size)
def get_bits(value, start, count, length=16):
mask = 0
for i in range(count):
mask = mask | 1 << i
shift = length - start - count
return value >> shift & mask
def read_dos_datetime(buf):
date = read_short(buf)
time = read_short(buf)
year = get_bits(date, 0, 7) + 1980
month = get_bits(date, 7, 4)
day = get_bits(date, 11, 5)
hour = get_bits(time, 0, 5)
minute = get_bits(time, 5, 6)
second = get_bits(time, 11, 5)
# fix zeroes
month = max(month, 1)
day = max(day, 1)
return datetime(year, month, day, hour, minute, second)
def write_byte(val, buf):
buf.write(pack('<B', val))
def write_short(val, buf):
buf.write(pack('<H', val))
def write_int(val, buf):
buf.write(pack('<I', val))
def write_double(val, buf):
buf.write(pack('<Q', val))
def write_cstring(val, buf, padding=False):
# val = val.encode('unicode-escape').replace('\\\\', '\\')
val = val.encode(DEFAULT_CHARSET)
buf.write(val + b'\x00')
if padding and not len(val) % 2:
buf.write(b'\x00')
def write_cunicode(val, buf):
uni = val.encode('utf-16-le')
buf.write(uni + b'\x00\x00')
def write_sized_string(val, buf, string=True):
size = len(val)
write_short(size, buf)
if string:
buf.write(val.encode('utf-16-le'))
else:
buf.write(val.encode())
def put_bits(bits, target, start, count, length=16):
return target | bits << (length - start - count)
def write_dos_datetime(val, buf):
date = time = 0
date = put_bits(val.year-1980, date, 0, 7)
date = put_bits(val.month, date, 7, 4)
date = put_bits(val.day, date, 11, 5)
time = put_bits(val.hour, time, 0, 5)
time = put_bits(val.minute, time, 5, 6)
time = put_bits(val.second, time, 11, 5)
write_short(date, buf)
write_short(time, buf)
# ---- helpers
def convert_time_to_unix(windows_time):
# Windows time is specified as the number of 0.1 nanoseconds since January 1, 1601.
# UNIX time is specified as the number of seconds since January 1, 1970.
# There are 134774 days (or 11644473600 seconds) between these dates.
unix_time = windows_time / 10000000.0 - 11644473600
try:
return datetime.fromtimestamp(unix_time)
except OSError:
return datetime.now()
def convert_time_to_windows(unix_time):
if isinstance(unix_time, datetime):
unix_time = time.mktime(unix_time.timetuple())
return int((unix_time + 11644473600) * 10000000)
class FormatException(Exception):
pass
class MissingInformationException(Exception):
pass
class InvalidKeyException(Exception):
pass
def guid_from_bytes(bytes):
if len(bytes) != 16:
raise FormatException("This is no valid _GUID: %s" % bytes)
ordered = [
bytes[3], bytes[2], bytes[1], bytes[0],
bytes[5], bytes[4], bytes[7], bytes[6],
bytes[8], bytes[9], bytes[10], bytes[11],
bytes[12], bytes[13], bytes[14], bytes[15]
]
return "{%02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X}" % tuple([x for x in ordered])
def bytes_from_guid(guid):
nums = [
guid[1:3], guid[3:5], guid[5:7], guid[7:9],
guid[10:12], guid[12:14], guid[15:17], guid[17:19],
guid[20:22], guid[22:24], guid[25:27], guid[27:29],
guid[29:31], guid[31:33], guid[33:35], guid[35:37]
]
ordered_nums = [
nums[3], nums[2], nums[1], nums[0],
nums[5], nums[4], nums[7], nums[6],
nums[8], nums[9], nums[10], nums[11],
nums[12], nums[13], nums[14], nums[15],
]
return bytes([int(x, 16) for x in ordered_nums])
def assert_lnk_signature(f):
f.seek(0)
sig = f.read(4)
guid = f.read(16)
if sig != _SIGNATURE:
raise FormatException("This is not a .lnk file.")
if guid != _GUID:
raise FormatException("Cannot read this kind of .lnk file.")
def is_lnk(f):
if hasattr(f, 'name'):
if f.name.split(os.path.extsep)[-1] == "lnk":
assert_lnk_signature(f)
return True
else:
return False
else:
try:
assert_lnk_signature(f)
return True
except FormatException:
return False
def path_levels(p):
dirname, base = ntpath.split(p)
if base != '':
for level in path_levels(dirname):
yield level
yield p
def is_drive(data):
if type(data) not in (str, str):
return False
p = re.compile("[a-zA-Z]:\\\\?$")
return p.match(data) is not None
# ---- data structures
class Flags(object):
def __init__(self, flag_names: Tuple[str, ...], flags_bytes=0):
self._flag_names = flag_names
self._flags: Dict[str, bool] = dict([(name, False) for name in flag_names])
self.set_flags(flags_bytes)
def set_flags(self, flags_bytes):
for pos, flag_name in enumerate(self._flag_names):
self._flags[flag_name] = bool(flags_bytes >> pos & 0x1)
@property
def bytes(self):
bytes = 0
for pos in range(len(self._flag_names)):
bytes = (self._flags[self._flag_names[pos]] and 1 or 0) << pos | bytes
return bytes
def __getitem__(self, key):
if key in self._flags:
return object.__getattribute__(self, '_flags')[key]
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self._flags:
raise KeyError("The key '%s' is not defined for those flags." % key)
self._flags[key] = value
def __getattr__(self, key):
if key in self._flags:
return object.__getattribute__(self, '_flags')[key]
return object.__getattribute__(self, key)
def __setattr__(self, key, value):
if '_flags' not in self.__dict__:
object.__setattr__(self, key, value)
elif key in self.__dict__:
object.__setattr__(self, key, value)
else:
self.__setitem__(key, value)
def __str__(self):
return pformat(self._flags, indent=2)
class ModifierKeys(Flags):
def __init__(self, flags_bytes=0):
Flags.__init__(self, _MODIFIER_KEYS, flags_bytes)
def __str__(self):
s = ""
s += self.CONTROL and "CONTROL+" or ""
s += self.SHIFT and "SHIFT+" or ""
s += self.ALT and "ALT+" or ""
return s
# _ROOT_INDEX = {
# 0x00: 'INTERNET_EXPLORER1',
# 0x42: 'LIBRARIES',
# 0x44: 'USERS',
# 0x48: 'MY_DOCUMENTS',
# 0x50: 'MY_COMPUTER',
# 0x58: 'MY_NETWORK_PLACES',
# 0x60: 'RECYCLE_BIN',
# 0x68: 'INTERNET_EXPLORER2',
# 0x70: 'UNKNOWN',
# 0x80: 'MY_GAMES',
# }
class RootEntry(object):
def __init__(self, root):
if root is not None:
# create from text representation
if root in list(_ROOT_LOCATION_GUIDS.keys()):
self.root = root
self.guid = _ROOT_LOCATION_GUIDS[root]
return
# from binary
root_type = root[0]
index = root[1]
guid_bytes = root[2:18]
self.guid = guid_from_bytes(guid_bytes)
self.root = _ROOT_LOCATIONS.get(self.guid, f"UNKNOWN {self.guid}")
# if self.root == "UNKNOWN":
# self.root = _ROOT_INDEX.get(index, "UNKNOWN")
@property
def bytes(self):
guid = self.guid[1:-1].replace('-', '')
chars = [bytes([int(x, 16)]) for x in [guid[i:i+2] for i in range(0, 32, 2)]]
return (
b'\x1F\x50'
+ chars[3] + chars[2] + chars[1] + chars[0]
+ chars[5] + chars[4] + chars[7] + chars[6]
+ b''.join(chars[8:])
)
def __str__(self):
return "<RootEntry: %s>" % self.root
class DriveEntry(object):
def __init__(self, drive: str):
if len(drive) == 23:
# binary data from parsed lnk
self.drive = drive[1:3]
else:
# text representation
m = _DRIVE_PATTERN.match(drive.strip())
if m:
self.drive = m.groups()[0].upper() + ':'
self.drive = self.drive.encode()
else:
raise FormatException("This is not a valid drive: " + str(drive))
@property
def bytes(self):
drive = self.drive
padded_str = drive + b'\\' + b'\x00' * 19
return b'\x2F' + padded_str
# drive = self.drive
# if isinstance(drive, str):
# drive = drive.encode()
# return b'/' + drive + b'\\' + b'\x00' * 19
def __str__(self):
return "<DriveEntry: %s>" % self.drive
class PathSegmentEntry(object):
def __init__(self, bytes=None):
self.type = None
self.file_size = None
self.modified = None
self.short_name = None
self.created = None
self.accessed = None
self.full_name = None
if bytes is None:
return
buf = BytesIO(bytes)
self.type = _ENTRY_TYPES.get(read_short(buf), 'UNKNOWN')
short_name_is_unicode = self.type.endswith('(UNICODE)')
if self.type == 'ROOT_KNOWN_FOLDER':
self.full_name = '::' + guid_from_bytes(buf.read(16))
# then followed Beef0026 structure:
# short size
# short version
# int signature == 0xBEEF0026
# (16 bytes) created timestamp
# (16 bytes) modified timestamp
# (16 bytes) accessed timestamp
return
if self.type == 'KNOWN_FOLDER':
_ = read_short(buf) # extra block size
extra_signature = read_int(buf)
if extra_signature == 0x23FEBBEE:
_ = read_short(buf) # unknown
_ = read_short(buf) # guid len
# that format recognized by explorer
self.full_name = '::' + guid_from_bytes(buf.read(16))
return
self.file_size = read_int(buf)
self.modified = read_dos_datetime(buf)
unknown = read_short(buf) # FileAttributesL
if short_name_is_unicode:
self.short_name = read_cunicode(buf)
else:
self.short_name = read_cstring(buf, padding=True)
extra_size = read_short(buf)
extra_version = read_short(buf)
extra_signature = read_int(buf)
if extra_signature == 0xBEEF0004:
# indicator_1 = read_short(buf) # see below
# only_83 = read_short(buf) < 0x03
# unknown = read_short(buf) # 0x04
# self.is_unicode = read_short(buf) == 0xBeef
self.created = read_dos_datetime(buf) # 4 bytes
self.accessed = read_dos_datetime(buf) # 4 bytes
offset_unicode = read_short(buf) # offset from start of extra_size
# only_83_2 = offset_unicode >= indicator_1 or offset_unicode < 0x14
if extra_version >= 7:
offset_ansi = read_short(buf)
file_reference = read_double(buf)
unknown2 = read_double(buf)
long_string_size = 0
if extra_version >= 3:
long_string_size = read_short(buf)
if extra_version >= 9:
unknown4 = read_int(buf)
if extra_version >= 8:
unknown5 = read_int(buf)
if extra_version >= 3:
self.full_name = read_cunicode(buf)
if long_string_size > 0:
if extra_version >= 7:
self.localized_name = read_cunicode(buf)
else:
self.localized_name = read_cstring(buf)
version_offset = read_short(buf)
@classmethod
def create_for_path(cls, path):
entry = cls()
entry.type = os.path.isdir(path) and TYPE_FOLDER or TYPE_FILE
try:
st = os.stat(path)
entry.file_size = st.st_size
entry.modified = datetime.fromtimestamp(st.st_mtime)
entry.created = datetime.fromtimestamp(st.st_ctime)
entry.accessed = datetime.fromtimestamp(st.st_atime)
except FileNotFoundError:
now = datetime.now()
entry.file_size = 0
entry.modified = now
entry.created = now
entry.accessed = now
entry.short_name = ntpath.split(path)[1]
entry.full_name = entry.short_name
return entry
def _validate(self):
if self.type is None:
raise MissingInformationException("Type is missing, choose either TYPE_FOLDER or TYPE_FILE.")
if self.file_size is None:
if self.type.startswith('FOLDER') or self.type in ['KNOWN_FOLDER', 'ROOT_KNOWN_FOLDER']:
self.file_size = 0
else:
raise MissingInformationException("File size missing")
if self.created is None:
self.created = datetime.now()
if self.modified is None:
self.modified = datetime.now()
if self.accessed is None:
self.accessed = datetime.now()
# if self.modified is None or self.accessed is None or self.created is None:
# raise MissingInformationException("Date information missing")
if self.full_name is None:
raise MissingInformationException("A full name is missing")
if self.short_name is None:
self.short_name = self.full_name
@property
def bytes(self):
if self.full_name is None:
return
self._validate()
out = BytesIO()
entry_type = self.type
if entry_type == 'KNOWN_FOLDER':
write_short(_ENTRY_TYPE_IDS[entry_type], out)
write_short(0x1A, out) # size
write_int(0x23FEBBEE, out) # extra signature
write_short(0x00, out) # extra signature
write_short(0x10, out) # guid size
out.write(bytes_from_guid(self.full_name.strip(':')))
return out.getvalue()
if entry_type == 'ROOT_KNOWN_FOLDER':
write_short(_ENTRY_TYPE_IDS[entry_type], out)
out.write(bytes_from_guid(self.full_name.strip(':')))
write_short(0x26, out) # 0xBEEF0026 structure size
write_short(0x01, out) # version
write_int(0xBEEF0026, out) # extra signature
write_int(0x11, out) # some flag for containing datetime
write_double(0x00, out) # created datetime
write_double(0x00, out) # modified datetime
write_double(0x00, out) # accessed datetime
write_short(0x14, out) # unknown
return out.getvalue()
short_name_len = len(self.short_name) + 1
try:
self.short_name.encode("ascii")
short_name_is_unicode = False
short_name_len += short_name_len % 2 # padding
except (UnicodeEncodeError, UnicodeDecodeError):
short_name_is_unicode = True
short_name_len = short_name_len * 2
self.type += " (UNICODE)"
write_short(_ENTRY_TYPE_IDS[entry_type], out)
write_int(self.file_size, out)
write_dos_datetime(self.modified, out)
write_short(0x10, out)
if short_name_is_unicode:
write_cunicode(self.short_name, out)
else:
write_cstring(self.short_name, out, padding=True)
indicator = 24 + 2 * len(self.short_name)
write_short(indicator, out) # size
write_short(0x03, out) # version
write_short(0x04, out) # signature part1
write_short(0xBeef, out) # signature part2
write_dos_datetime(self.created, out)
write_dos_datetime(self.accessed, out)
offset_unicode = 0x14 # fixed data structure, always the same
write_short(offset_unicode, out)
offset_ansi = 0 # we always write unicode
write_short(offset_ansi, out) # long_string_size
write_cunicode(self.full_name, out)
offset_part2 = 0x0E + short_name_len
write_short(offset_part2, out)
return out.getvalue()
def __str__(self):
return "<PathSegmentEntry: %s>" % self.full_name
class UwpSubBlock:
block_names = {
0x11: 'PackageFamilyName',
# 0x0e: '',
# 0x19: '',
0x15: 'PackageFullName',
0x05: 'Target',
0x0f: 'Location',
0x20: 'RandomGuid',
0x0c: 'Square150x150Logo',
0x02: 'Square44x44Logo',
0x0d: 'Wide310x150Logo',
# 0x04: '',
# 0x05: '',
0x13: 'Square310x310Logo',
# 0x0e: '',
0x0b: 'DisplayName',
0x14: 'Square71x71Logo',
0x64: 'RandomByte',
0x0a: 'DisplayName',
# 0x07: '',
}
block_types = {
'string': [0x11, 0x15, 0x05, 0x0f, 0x0c, 0x02, 0x0d, 0x13, 0x0b, 0x14, 0x0a],
}
def __init__(self, bytes=None, type=None, value=None):
self._data = bytes or b''
self.type = type
self.value = value
self.name = None
if self.type is not None:
self.name = self.block_names.get(self.type, 'UNKNOWN')
if not bytes:
return
buf = BytesIO(bytes)
self.type = read_byte(buf)
self.name = self.block_names.get(self.type, 'UNKNOWN')
self.value = self._data[1:] # skip type
if self.type in self.block_types['string']:
unknown = read_int(buf)
probably_type = read_int(buf)
if probably_type == 0x1f:
string_len = read_int(buf)
self.value = read_cunicode(buf)
def __str__(self):
string = f'UwpSubBlock {self.name} ({hex(self.type)}): {self.value}'
return string.strip()
@property
def bytes(self):
out = BytesIO()
if self.value:
if isinstance(self.value, str):
string_len = len(self.value) + 1
write_byte(self.type, out)
write_int(0, out)
write_int(0x1f, out)
write_int(string_len, out)
write_cunicode(self.value, out)
if string_len % 2 == 1: # padding
write_short(0, out)
elif isinstance(self.value, bytes):
write_byte(self.type, out)
out.write(self.value)
result = out.getvalue()
return result
class UwpMainBlock:
magic = b'\x31\x53\x50\x53'
def __init__(self, bytes=None, guid: Optional[str] = None, blocks=None):
self._data = bytes or b''
self._blocks = blocks or []
self.guid: str = guid
if not bytes:
return
buf = BytesIO(bytes)
magic = buf.read(4)
self.guid = guid_from_bytes(buf.read(16))
# read sub blocks
while True:
sub_block_size = read_int(buf)
if not sub_block_size: # last size is zero
break
sub_block_data = buf.read(sub_block_size - 4) # includes block_size
self._blocks.append(UwpSubBlock(sub_block_data))
def __str__(self):
string = f'<UwpMainBlock> {self.guid}:\n'
for block in self._blocks:
string += f' {block}\n'
return string.strip()
@property
def bytes(self):
blocks_bytes = [block.bytes for block in self._blocks]
out = BytesIO()
out.write(self.magic)
out.write(bytes_from_guid(self.guid))
for block in blocks_bytes:
write_int(len(block) + 4, out)
out.write(block)
write_int(0, out)
result = out.getvalue()
return result
class UwpSegmentEntry:
magic = b'APPS'
header = b'\x08\x00\x03\x00\x00\x00\x00\x00\x00\x00'
def __init__(self, bytes=None):
self._blocks = []
self._data = bytes
if bytes is None:
return
buf = BytesIO(bytes)
unknown = read_short(buf)
size = read_short(buf)
magic = buf.read(4) # b'APPS'
blocks_size = read_short(buf)
unknown2 = buf.read(10)
# read main blocks
while True:
block_size = read_int(buf)
if not block_size: # last size is zero
break
block_data = buf.read(block_size - 4) # includes block_size
self._blocks.append(UwpMainBlock(block_data))
def __str__(self):
string = '<UwpSegmentEntry>:\n'
for block in self._blocks:
string += f' {block}\n'
return string.strip()
@property
def bytes(self):
blocks_bytes = [block.bytes for block in self._blocks]
blocks_size = sum([len(block) + 4 for block in blocks_bytes]) + 4 # with terminator
size = (
2 # size
+ len(self.magic)
+ 2 # second size
+ len(self.header)
+ blocks_size # blocks with terminator
)
out = BytesIO()
write_short(0, out)
write_short(size, out)
out.write(self.magic)
write_short(blocks_size, out)
out.write(self.header)
for block in blocks_bytes:
write_int(len(block) + 4, out)
out.write(block)
write_int(0, out) # empty block
write_short(0, out) # ??
result = out.getvalue()
return result
@classmethod
def create(cls, package_family_name, target, location=None, logo44x44=None):
segment = cls()
blocks = [
UwpSubBlock(type=0x11, value=package_family_name),
UwpSubBlock(type=0x0e, value=b'\x00\x00\x00\x00\x13\x00\x00\x00\x02\x00\x00\x00'),
UwpSubBlock(type=0x05, value=target),
]
if location:
blocks.append(UwpSubBlock(type=0x0f, value=location)) # need for relative icon path
main1 = UwpMainBlock(guid='{9F4C2855-9F79-4B39-A8D0-E1D42DE1D5F3}', blocks=blocks)
segment._blocks.append(main1)
if logo44x44:
main2 = UwpMainBlock(
guid='{86D40B4D-9069-443C-819A-2A54090DCCEC}',
blocks=[UwpSubBlock(type=0x02, value=logo44x44)]
)
segment._blocks.append(main2)
return segment
class LinkTargetIDList(object):
def __init__(self, bytes=None):
self.items = []
if bytes is not None:
buf = BytesIO(bytes)
raw = []
entry_len = read_short(buf)
while entry_len > 0:
raw.append(buf.read(entry_len - 2)) # the length includes the size
entry_len = read_short(buf)
self._interpret(raw)
def _interpret(self, raw):
if not raw:
return
elif raw[0][0] == 0x1F:
self.items.append(RootEntry(raw[0]))
if self.items[0].root == ROOT_MY_COMPUTER:
if len(raw[1]) == 0x17:
self.items.append(DriveEntry(raw[1]))
elif raw[1][0:2] == b'\x2E\x80': # ROOT_KNOWN_FOLDER
self.items.append(PathSegmentEntry(raw[1]))
else:
raise ValueError("This seems to be an absolute link which requires a drive as second element.")
items = raw[2:]
elif self.items[0].root == ROOT_NETWORK_PLACES:
raise NotImplementedError(
"Parsing network lnks has not yet been implemented. "
"If you need it just contact me and we'll see..."
)
else:
items = raw[1:]
else:
items = raw
for item in items:
if item[4:8] == b'APPS':
self.items.append(UwpSegmentEntry(item))
else:
self.items.append(PathSegmentEntry(item))
def get_path(self):
segments = []
for item in self.items:
if type(item) == RootEntry:
segments.append('%' + item.root + '%')
elif type(item) == DriveEntry:
segments.append(item.drive.decode())
elif type(item) == PathSegmentEntry:
if item.full_name is not None:
segments.append(item.full_name)
else:
segments.append(item)
return '\\'.join(segments)
def _validate(self):
if not len(self.items):
return
if type(self.items[0]) == RootEntry and self.items[0].root == ROOT_MY_COMPUTER:
if type(self.items[1]) == DriveEntry:
return
if type(self.items[1]) == PathSegmentEntry and self.items[1].full_name.startswith('::'):
return
raise ValueError("A drive is required for absolute lnks")
@property
def bytes(self):
self._validate()
out = BytesIO()
for item in self.items:
bytes = item.bytes
# skip invalid
if bytes is None:
continue
write_short(len(bytes) + 2, out) # len + terminator
out.write(bytes)
out.write(b'\x00\x00')
return out.getvalue()
def __str__(self):
string = '<LinkTargetIDList>:\n'
for item in self.items:
string += f' {item}\n'
return string.strip()
class LinkInfo(object):
def __init__(self, lnk=None):
if lnk is not None:
self.start = lnk.tell()
self.size = read_int(lnk)
self.header_size = read_int(lnk)
link_info_flags = read_int(lnk)
self.local = link_info_flags & 1
self.remote = link_info_flags & 2
self.offs_local_volume_table = read_int(lnk)
self.offs_local_base_path = read_int(lnk)
self.offs_network_volume_table = read_int(lnk)
self.offs_base_name = read_int(lnk)
if self.header_size >= _LINK_INFO_HEADER_OPTIONAL:
print("TODO: read the unicode stuff") # TODO: read the unicode stuff
self._parse_path_elements(lnk)
else:
self.size = None
self.header_size = _LINK_INFO_HEADER_DEFAULT
self.local = 0
self.remote = 0
self.offs_local_volume_table = 0
self.offs_local_base_path = 0
self.offs_network_volume_table = 0
self.offs_base_name = 0
self.drive_type = None
self.drive_serial = None
self.volume_label = None
self.local_base_path = None
self.network_share_name = None
self.base_name = None
self._path = None
def _parse_path_elements(self, lnk):
if self.remote:
# 20 is the offset of the network share name
lnk.seek(self.start + self.offs_network_volume_table + 20)
self.network_share_name = read_cstring(lnk)
lnk.seek(self.start + self.offs_base_name)
self.base_name = read_cstring(lnk)
if self.local:
lnk.seek(self.start + self.offs_local_volume_table + 4)
self.drive_type = _DRIVE_TYPES.get(read_int(lnk))
self.drive_serial = read_int(lnk)
lnk.read(4) # volume name offset (10h)
self.volume_label = read_cstring(lnk)
lnk.seek(self.start + self.offs_local_base_path)
self.local_base_path = read_cstring(lnk)
# TODO: unicode
self.make_path()
def make_path(self):
if self.remote:
self._path = self.network_share_name + '\\' + self.base_name
if self.local:
self._path = self.local_base_path
def write(self, lnk):
if self.remote is None:
raise MissingInformationException("No location information given.")
self.start = lnk.tell()
self._calculate_sizes_and_offsets()
write_int(self.size, lnk)
write_int(self.header_size, lnk)
write_int((self.local and 1) + (self.remote and 2), lnk)
write_int(self.offs_local_volume_table, lnk)
write_int(self.offs_local_base_path, lnk)
write_int(self.offs_network_volume_table, lnk)
write_int(self.offs_base_name, lnk)
if self.remote:
self._write_network_volume_table(lnk)
write_cstring(self.base_name, lnk, padding=False)
else:
self._write_local_volume_table(lnk)
write_cstring(self.local_base_path, lnk, padding=False)
write_byte(0, lnk)
def _calculate_sizes_and_offsets(self):
self.size_base_name = 1 # len(self.base_name) + 1 # zero terminated strings
self.size = 28 + self.size_base_name
if self.remote:
self.size_network_volume_table = 20 + len(self.network_share_name) + len(self.base_name) + 1
self.size += self.size_network_volume_table
self.offs_local_volume_table = 0
self.offs_local_base_path = 0
self.offs_network_volume_table = 28
self.offs_base_name = self.offs_network_volume_table + self.size_network_volume_table
else:
self.size_local_volume_table = 16 + len(self.volume_label) + 1
self.size_local_base_path = len(self.local_base_path) + 1
self.size += self.size_local_volume_table + self.size_local_base_path
self.offs_local_volume_table = 28
self.offs_local_base_path = self.offs_local_volume_table + self.size_local_volume_table
self.offs_network_volume_table = 0
self.offs_base_name = self.offs_local_base_path + self.size_local_base_path
def _write_network_volume_table(self, buf):
write_int(self.size_network_volume_table, buf)
write_int(2, buf) # ?
write_int(20, buf) # size of Network Volume Table
write_int(0, buf) # ?
write_int(131072, buf) # ?
write_cstring(self.network_share_name, buf)
def _write_local_volume_table(self, buf):
write_int(self.size_local_volume_table, buf)
try:
drive_type = _DRIVE_TYPE_IDS[self.drive_type]
except KeyError:
raise ValueError("This is not a valid drive type: %s" % self.drive_type)
write_int(drive_type, buf)
write_int(self.drive_serial, buf)
write_int(16, buf) # volume name offset
write_cstring(self.volume_label, buf)
@property
def path(self):
return self._path
def __str__(self):
s = "File Location Info:"
if self._path is None:
return s + " <not specified>"
if self.remote:
s += "\n (remote)"
s += "\n Network Share: %s" % self.network_share_name
s += "\n Base Name: %s" % self.base_name
else:
s += "\n (local)"
s += "\n Volume Type: %s" % self.drive_type
s += "\n Volume Serial Number: %s" % self.drive_serial
s += "\n Volume Label: %s" % self.volume_label
s += "\n Path: %s" % self.local_base_path
return s
EXTRA_DATA_TYPES = {
0xA0000002: 'ConsoleDataBlock', # size 0x000000CC
0xA0000004: 'ConsoleFEDataBlock', # size 0x0000000C
0xA0000006: 'DarwinDataBlock', # size 0x00000314
0xA0000001: 'EnvironmentVariableDataBlock', # size 0x00000314
0xA0000007: 'IconEnvironmentDataBlock', # size 0x00000314
0xA000000B: 'KnownFolderDataBlock', # size 0x0000001C
0xA0000009: 'PropertyStoreDataBlock', # size >= 0x0000000C
0xA0000008: 'ShimDataBlock', # size >= 0x00000088
0xA0000005: 'SpecialFolderDataBlock', # size 0x00000010
0xA0000003: 'VistaAndAboveIDListDataBlock', # size 0x00000060
0xA000000C: 'VistaIDListDataBlock', # size 0x00000173
}
class ExtraData_Unparsed(object):
def __init__(self, bytes=None, signature=None, data=None):
self._signature = signature
self._size = None
self.data = data
# if data:
# self._size = len(data)
if bytes:
# self._size = len(bytes)
self.data = bytes
# self.read(bytes)
# def read(self, bytes):
# buf = BytesIO(bytes)
# size = len(bytes)
# # self._size = read_int(buf)
# # self._signature = read_int(buf)
# self.data = buf.read(self._size - 8)
def bytes(self):
buf = BytesIO()
write_int(len(self.data)+8, buf)
write_int(self._signature, buf)
buf.write(self.data)
return buf.getvalue()
def __str__(self):
s = 'ExtraDataBlock\n signature %s\n data: %s' % (hex(self._signature), self.data)
return s
def padding(val, size, byte=b'\x00'):
return val + (size-len(val)) * byte
class ExtraData_IconEnvironmentDataBlock(object):
def __init__(self, bytes=None):
# self._size = None
# self._signature = None
self._signature = 0xA0000007
self.target_ansi = None
self.target_unicode = None
if bytes:
self.read(bytes)
def read(self, bytes):
buf = BytesIO(bytes)
# self._size = read_int(buf)
# self._signature = read_int(buf)
self.target_ansi = buf.read(260).decode('ansi')
self.target_unicode = buf.read(520).decode('utf-16-le')
def bytes(self):
target_ansi = padding(self.target_ansi.encode(), 260)
target_unicode = padding(self.target_unicode.encode('utf-16-le'), 520)
size = 8 + len(target_ansi) + len(target_unicode)
assert self._signature == 0xA0000007
assert size == 0x00000314
buf = BytesIO()
write_int(size, buf)
write_int(self._signature, buf)
buf.write(target_ansi)
buf.write(target_unicode)
return buf.getvalue()
def __str__(self):
target_ansi = self.target_ansi.replace('\x00', '')
target_unicode = self.target_unicode.replace('\x00', '')
s = f'IconEnvironmentDataBlock\n TargetAnsi: {target_ansi}\n TargetUnicode: {target_unicode}'
return s
def guid_to_str(guid):
ordered = [guid[3], guid[2], guid[1], guid[0], guid[5], guid[4],
guid[7], guid[6], guid[8], guid[9], guid[10], guid[11],
guid[12], guid[13], guid[14], guid[15]]
res = "{%02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X}" % tuple([x for x in ordered])
# print(guid, res)
return res
class TypedPropertyValue(object):
# types: [MS-OLEPS] section 2.15
def __init__(self, bytes=None, type=None, value=None):
self.type = type
self.value = value
if bytes:
self.type = read_short(BytesIO(bytes))
padding = bytes[2:4]
self.value = bytes[4:]
def set_string(self, value):
self.type = 0x1f
buf = BytesIO()
write_int(len(value)+2, buf)
buf.write(value.encode('utf-16-le'))
# terminator (included in size)
buf.write(b'\x00\x00\x00\x00')
# padding (not included in size)
if len(value) % 2:
buf.write(b'\x00\x00')
self.value = buf.getvalue()
@property
def bytes(self):
buf = BytesIO()
write_short(self.type, buf)
write_short(0x0000, buf)
buf.write(self.value)
return buf.getvalue()
def __str__(self):
value = self.value
if self.type == 0x1F:
size = value[:4]
value = value[4:].decode('utf-16-le')
if self.type == 0x15:
value = unpack('<Q', value)[0]
if self.type == 0x13:
value = unpack('<I', value)[0]
if self.type == 0x14:
value = unpack('<q', value)[0]
if self.type == 0x16:
value = unpack('<i', value)[0]
if self.type == 0x17:
value = unpack('<I', value)[0]
if self.type == 0x48:
value = guid_to_str(value)
if self.type == 0x40:
# FILETIME (Packet Version)
stream = BytesIO(value)
low = read_int(stream)
high = read_int(stream)
num = (high << 32) + low
value = convert_time_to_unix(num)
return '%s: %s' % (hex(self.type), value)
class PropertyStore:
def __init__(self, bytes=None, properties=None, format_id=None, is_strings=False):
self.is_strings = is_strings
self.properties = []
self.format_id = format_id
self._is_end = False
if properties:
self.properties = properties
if bytes:
self.read(bytes)
def read(self, bytes_io):
buf = bytes_io
size = read_int(buf)
assert size < len(buf.getvalue())
if size == 0x00000000:
self._is_end = True
return
version = read_int(buf)
assert version == 0x53505331
self.format_id = buf.read(16)
if self.format_id == b'\xD5\xCD\xD5\x05\x2E\x9C\x10\x1B\x93\x97\x08\x00\x2B\x2C\xF9\xAE':
self.is_strings = True
else:
self.is_strings = False
while True:
# assert lnk.tell() < (start + size)
value_size = read_int(buf)
if value_size == 0x00000000:
break
if self.is_strings:
name_size = read_int(buf)
reserved = read_byte(buf)
name = buf.read(name_size).decode('utf-16-le')
value = TypedPropertyValue(buf.read(value_size-9))
self.properties.append((name, value))
else:
value_id = read_int(buf)
reserved = read_byte(buf)
value = TypedPropertyValue(buf.read(value_size-9))
self.properties.append((value_id, value))
@property
def bytes(self):
size = 8 + len(self.format_id)
properties = BytesIO()
for name, value in self.properties:
value_bytes = value.bytes
if self.is_strings:
name_bytes = name.encode('utf-16-le')
value_size = 9 + len(name_bytes) + len(value_bytes)
write_int(value_size, properties)
name_size = len(name_bytes)
write_int(name_size, properties)
properties.write(b'\x00')
properties.write(name_bytes)
else:
value_size = 9 + len(value_bytes)
write_int(value_size, properties)
write_int(name, properties)
properties.write(b'\x00')
properties.write(value_bytes)
size += value_size
write_int(0x00000000, properties)
size += 4
buf = BytesIO()
write_int(size, buf)
write_int(0x53505331, buf)
buf.write(self.format_id)
buf.write(properties.getvalue())
return buf.getvalue()
def __str__(self):
s = ' PropertyStore'
s += '\n FormatID: %s' % guid_to_str(self.format_id)
for name, value in self.properties:
s += '\n %3s = %s' % (name, str(value))
return s.strip()
class ExtraData_PropertyStoreDataBlock(object):
def __init__(self, bytes=None, stores=None):
self._size = None
self._signature = 0xA0000009
self.stores = []
if stores:
self.stores = stores
if bytes:
self.read(bytes)
def read(self, bytes):
buf = BytesIO(bytes)
# self._size = read_int(buf)
# self._signature = read_int(buf)
# [MS-PROPSTORE] section 2.2
while True:
prop_store = PropertyStore(buf)
if prop_store._is_end:
break
self.stores.append(prop_store)
def bytes(self):
stores = b''
for prop_store in self.stores:
stores += prop_store.bytes
size = len(stores) + 8 + 4
assert self._signature == 0xA0000009
assert size >= 0x0000000C
buf = BytesIO()
write_int(size, buf)
write_int(self._signature, buf)
buf.write(stores)
write_int(0x00000000, buf)
return buf.getvalue()
def __str__(self):
s = 'PropertyStoreDataBlock'
for prop_store in self.stores:
s += '\n %s' % str(prop_store)
return s
class ExtraData_EnvironmentVariableDataBlock(object):
def __init__(self, bytes=None):
self._signature = 0xA0000001
self.target_ansi = None
self.target_unicode = None
if bytes:
self.read(bytes)
def read(self, bytes):
buf = BytesIO(bytes)
self.target_ansi = buf.read(260).decode()
self.target_unicode = buf.read(520).decode('utf-16-le')
def bytes(self):
target_ansi = padding(self.target_ansi.encode(), 260)
target_unicode = padding(self.target_unicode.encode('utf-16-le'), 520)
size = 8 + len(target_ansi) + len(target_unicode)
assert self._signature == 0xA0000001
assert size == 0x00000314
buf = BytesIO()
write_int(size, buf)
write_int(self._signature, buf)
buf.write(target_ansi)
buf.write(target_unicode)
return buf.getvalue()
def __str__(self):
target_ansi = self.target_ansi.replace('\x00', '')
target_unicode = self.target_unicode.replace('\x00', '')
s = f'EnvironmentVariableDataBlock\n TargetAnsi: {target_ansi}\n TargetUnicode: {target_unicode}'
return s
EXTRA_DATA_TYPES_CLASSES = {
'IconEnvironmentDataBlock': ExtraData_IconEnvironmentDataBlock,
'PropertyStoreDataBlock': ExtraData_PropertyStoreDataBlock,
'EnvironmentVariableDataBlock': ExtraData_EnvironmentVariableDataBlock,
}
class ExtraData(object):
# EXTRA_DATA = *EXTRA_DATA_BLOCK TERMINAL_BLOCK
def __init__(self, lnk=None, blocks=None):
self.blocks = []
if blocks:
self.blocks = blocks
if lnk is None:
return
while True:
size = read_int(lnk)
if size < 4: # TerminalBlock
break
signature = read_int(lnk)
bytes = lnk.read(size-8)
# lnk.seek(-8, 1)
block_type = EXTRA_DATA_TYPES[signature]
if block_type in EXTRA_DATA_TYPES_CLASSES:
block_class = EXTRA_DATA_TYPES_CLASSES[block_type]
block = block_class(bytes=bytes)
else:
block_class = ExtraData_Unparsed
block = block_class(bytes=bytes, signature=signature)
self.blocks.append(block)
@property
def bytes(self):
result = b''
for block in self.blocks:
result += block.bytes()
result += b'\x00\x00\x00\x00' # TerminalBlock
return result
def __str__(self):
s = ''
for block in self.blocks:
s += '\n' + str(block)
return s
class Lnk(object):
def __init__(self, f=None):
self.file = None
if type(f) == str or type(f) == str:
self.file = f
try:
f = open(self.file, 'rb')
except IOError:
self.file += ".lnk"
f = open(self.file, 'rb')
# defaults
self.link_flags = Flags(_LINK_FLAGS)
self.file_flags = Flags(_FILE_ATTRIBUTES_FLAGS)
self.creation_time = datetime.now()
self.access_time = datetime.now()
self.modification_time = datetime.now()
self.file_size = 0
self.icon_index = 0
self._show_command = WINDOW_NORMAL
self.hot_key = None
self._link_info = LinkInfo()
self.description = None
self.relative_path = None
self.work_dir = None
self.arguments = None
self.icon = None
self.extra_data = None
if f is not None:
assert_lnk_signature(f)
self._parse_lnk_file(f)
if self.file:
f.close()
def _read_hot_key(self, lnk):
low = read_byte(lnk)
high = read_byte(lnk)
key = _KEYS.get(low, '')
modifier = high and str(ModifierKeys(high)) or ''
return modifier + key
def _write_hot_key(self, hot_key, lnk):
if hot_key is None or not hot_key:
low = high = 0
else:
hot_key = hot_key.split('+')
try:
low = _KEY_CODES[hot_key[-1]]
except KeyError:
raise InvalidKeyException("Cannot find key code for %s" % hot_key[1])
modifiers = ModifierKeys()
for modifier in hot_key[:-1]:
modifiers[modifier.upper()] = True
high = modifiers.bytes
write_byte(low, lnk)
write_byte(high, lnk)
def _parse_lnk_file(self, lnk):
# SHELL_LINK_HEADER [LINKTARGET_IDLIST] [LINKINFO] [STRING_DATA] *EXTRA_DATA
# SHELL_LINK_HEADER
lnk.seek(20) # after signature and guid
self.link_flags.set_flags(read_int(lnk))
self.file_flags.set_flags(read_int(lnk))
self.creation_time = convert_time_to_unix(read_double(lnk))
self.access_time = convert_time_to_unix(read_double(lnk))
self.modification_time = convert_time_to_unix(read_double(lnk))
self.file_size = read_int(lnk)
self.icon_index = read_int(lnk)
show_command = read_int(lnk)
self._show_command = _SHOW_COMMANDS[show_command] if show_command in _SHOW_COMMANDS else _SHOW_COMMANDS[1]
self.hot_key = self._read_hot_key(lnk)
lnk.read(10) # reserved (0)
# LINKTARGET_IDLIST (HasLinkTargetIDList)
if self.link_flags.HasLinkTargetIDList:
shell_item_id_list_size = read_short(lnk)
self.shell_item_id_list = LinkTargetIDList(lnk.read(shell_item_id_list_size))
# LINKINFO (HasLinkInfo)
if self.link_flags.HasLinkInfo and not self.link_flags.ForceNoLinkInfo:
self._link_info = LinkInfo(lnk)
lnk.seek(self._link_info.start + self._link_info.size)
# STRING_DATA = [NAME_STRING] [RELATIVE_PATH] [WORKING_DIR] [COMMAND_LINE_ARGUMENTS] [ICON_LOCATION]
if self.link_flags.HasName:
self.description = read_sized_string(lnk, self.link_flags.IsUnicode)
if self.link_flags.HasRelativePath:
self.relative_path = read_sized_string(lnk, self.link_flags.IsUnicode)
if self.link_flags.HasWorkingDir:
self.work_dir = read_sized_string(lnk, self.link_flags.IsUnicode)
if self.link_flags.HasArguments:
self.arguments = read_sized_string(lnk, self.link_flags.IsUnicode)
if self.link_flags.HasIconLocation:
self.icon = read_sized_string(lnk, self.link_flags.IsUnicode)
# *EXTRA_DATA
self.extra_data = ExtraData(lnk)
def save(self, f: Optional[Union[str, IOBase]] = None, force_ext=False):
if f is None:
f = self.file
if f is None:
raise ValueError("File (name) missing for saving the lnk")
is_file = hasattr(f, 'write')
if not is_file:
if not type(f) == str and not type(f) == str:
raise ValueError("Need a writeable object or a file name to save to, got %s" % f)
if force_ext:
if not f.lower().endswith('.lnk'):
f += '.lnk'
f = open(f, 'wb')
self.write(f)
# only close the stream if it's our own
if not is_file:
f.close()
def write(self, lnk):
lnk.write(_SIGNATURE)
lnk.write(_GUID)
write_int(self.link_flags.bytes, lnk)
write_int(self.file_flags.bytes, lnk)
write_double(convert_time_to_windows(self.creation_time), lnk)
write_double(convert_time_to_windows(self.access_time), lnk)
write_double(convert_time_to_windows(self.modification_time), lnk)
write_int(self.file_size, lnk)
write_int(self.icon_index, lnk)
write_int(_SHOW_COMMAND_IDS[self._show_command], lnk)
self._write_hot_key(self.hot_key, lnk)
lnk.write(b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00') # reserved
if self.link_flags.HasLinkTargetIDList:
shell_item_id_list = self.shell_item_id_list.bytes
write_short(len(shell_item_id_list), lnk)
lnk.write(shell_item_id_list)
if self.link_flags.HasLinkInfo:
self._link_info.write(lnk)
if self.link_flags.HasName:
write_sized_string(self.description, lnk, self.link_flags.IsUnicode)
if self.link_flags.HasRelativePath:
write_sized_string(self.relative_path, lnk, self.link_flags.IsUnicode)
if self.link_flags.HasWorkingDir:
write_sized_string(self.work_dir, lnk, self.link_flags.IsUnicode)
if self.link_flags.HasArguments:
write_sized_string(self.arguments, lnk, self.link_flags.IsUnicode)
if self.link_flags.HasIconLocation:
write_sized_string(self.icon, lnk, self.link_flags.IsUnicode)
if self.extra_data:
lnk.write(self.extra_data.bytes)
else:
lnk.write(b'\x00\x00\x00\x00')
def _get_shell_item_id_list(self):
return self._shell_item_id_list
def _set_shell_item_id_list(self, shell_item_id_list):
self._shell_item_id_list = shell_item_id_list
self.link_flags.HasLinkTargetIDList = shell_item_id_list is not None
shell_item_id_list = property(_get_shell_item_id_list, _set_shell_item_id_list)
def _get_link_info(self):
return self._link_info
def _set_link_info(self, link_info):
self._link_info = link_info
self.link_flags.ForceNoLinkInfo = link_info is None
self.link_flags.HasLinkInfo = link_info is not None
link_info = property(_get_link_info, _set_link_info)
def _get_description(self):
return self._description
def _set_description(self, description):
self._description = description
self.link_flags.HasName = description is not None
description = property(_get_description, _set_description)
def _get_relative_path(self):
return self._relative_path
def _set_relative_path(self, relative_path):
self._relative_path = relative_path
self.link_flags.HasRelativePath = relative_path is not None
relative_path = property(_get_relative_path, _set_relative_path)
def _get_work_dir(self):
return self._work_dir
def _set_work_dir(self, work_dir):
self._work_dir = work_dir
self.link_flags.HasWorkingDir = work_dir is not None
work_dir = working_dir = property(_get_work_dir, _set_work_dir)
def _get_arguments(self):
return self._arguments
def _set_arguments(self, arguments):
self._arguments = arguments
self.link_flags.HasArguments = arguments is not None
arguments = property(_get_arguments, _set_arguments)
def _get_icon(self):
return self._icon
def _set_icon(self, icon):
self._icon = icon
self.link_flags.HasIconLocation = icon is not None
icon = property(_get_icon, _set_icon)
def _get_window_mode(self):
return self._show_command
def _set_window_mode(self, value):
if value not in list(_SHOW_COMMANDS.values()):
raise ValueError("Not a valid window mode: %s. Choose any of pylnk.WINDOW_*" % value)
self._show_command = value
window_mode = show_command = property(_get_window_mode, _set_window_mode)
@property
def path(self):
# lnk can contains several different paths at different structures
# here is some logic consistent with link properties at explorer (at least on test examples)
link_info_path = self._link_info.path if self._link_info and self._link_info.path else None
id_list_path = self._shell_item_id_list.get_path() if hasattr(self, '_shell_item_id_list') else None
env_var_path = None
if self.extra_data and self.extra_data.blocks:
for block in self.extra_data.blocks:
if type(block) == ExtraData_EnvironmentVariableDataBlock:
env_var_path = block.target_unicode.strip('\x00') or block.target_ansi.strip('\x00')
break
if id_list_path and id_list_path.startswith('%MY_COMPUTER%'):
# full local path has priority
return id_list_path[14:]
if id_list_path and id_list_path.startswith('%USERPROFILE%\\::'):
# path to KNOWN_FOLDER also has priority over link_info
return id_list_path[14:]
if link_info_path:
# local path at link_info_path has priority over network path at id_list_path
# full local path at link_info_path has priority over partial path at id_list_path
return link_info_path
if env_var_path:
# some links in Recent folder contains path only at ExtraData_EnvironmentVariableDataBlock
return env_var_path
return id_list_path
def specify_local_location(self, path, drive_type=None, drive_serial=None, volume_label=None):
self._link_info.drive_type = drive_type or DRIVE_UNKNOWN
self._link_info.drive_serial = drive_serial or ''
self._link_info.volume_label = volume_label or ''
self._link_info.local_base_path = path
self._link_info.local = True
self._link_info.make_path()
def specify_remote_location(self, network_share_name, base_name):
self._link_info.network_share_name = network_share_name
self._link_info.base_name = base_name
self._link_info.remote = True
self._link_info.make_path()
def __str__(self):
s = "Target file:\n"
s += str(self.file_flags)
s += "\nCreation Time: %s" % self.creation_time
s += "\nModification Time: %s" % self.modification_time
s += "\nAccess Time: %s" % self.access_time
s += "\nFile size: %s" % self.file_size
s += "\nWindow mode: %s" % self._show_command
s += "\nHotkey: %s\n" % self.hot_key
s += str(self._link_info)
if self.link_flags.HasLinkTargetIDList:
s += "\n%s" % self.shell_item_id_list
if self.link_flags.HasName:
s += "\nDescription: %s" % self.description
if self.link_flags.HasRelativePath:
s += "\nRelative Path: %s" % self.relative_path
if self.link_flags.HasWorkingDir:
s += "\nWorking Directory: %s" % self.work_dir
if self.link_flags.HasArguments:
s += "\nCommandline Arguments: %s" % self.arguments
if self.link_flags.HasIconLocation:
s += "\nIcon: %s" % self.icon
if self._link_info:
s += "\nUsed Path: %s" % self.path
if self.extra_data:
s += str(self.extra_data)
return s
# ---- convenience functions
def parse(lnk):
return Lnk(lnk)
def create(f=None):
lnk = Lnk()
lnk.file = f
return lnk
def for_file(
target_file, lnk_name=None, arguments=None, description=None, icon_file=None, icon_index=0,
work_dir=None, window_mode=None,
):
lnk = create(lnk_name)
lnk.link_flags.IsUnicode = True
lnk.link_info = None
if target_file.startswith('\\\\'):
# remote link
lnk.link_info = LinkInfo()
lnk.link_info.remote = 1
# extract server + share name from full path
path_parts = target_file.split('\\')
share_name, base_name = '\\'.join(path_parts[:4]), '\\'.join(path_parts[4:])
lnk.link_info.network_share_name = share_name.upper()
lnk.link_info.base_name = base_name
# somehow it requires EnvironmentVariableDataBlock & HasExpString flag
env_data_block = ExtraData_EnvironmentVariableDataBlock()
env_data_block.target_ansi = target_file
env_data_block.target_unicode = target_file
lnk.extra_data = ExtraData(blocks=[env_data_block])
lnk.link_flags.HasExpString = True
else:
# local link
levels = list(path_levels(target_file))
elements = [RootEntry(ROOT_MY_COMPUTER),
DriveEntry(levels[0])]
for level in levels[1:]:
segment = PathSegmentEntry.create_for_path(level)
elements.append(segment)
lnk.shell_item_id_list = LinkTargetIDList()
lnk.shell_item_id_list.items = elements
# lnk.link_flags.HasLinkInfo = True
if arguments:
lnk.link_flags.HasArguments = True
lnk.arguments = arguments
if description:
lnk.link_flags.HasName = True
lnk.description = description
if icon_file:
lnk.link_flags.HasIconLocation = True
lnk.icon = icon_file
lnk.icon_index = icon_index
if work_dir:
lnk.link_flags.HasWorkingDir = True
lnk.work_dir = work_dir
if window_mode:
lnk.window_mode = window_mode
if lnk_name:
lnk.save()
return lnk
def from_segment_list(data, lnk_name=None):
"""
Creates a lnk file from a list of path segments.
If lnk_name is given, the resulting lnk will be saved
to a file with that name.
The expected list for has the following format ("C:\\dir\\file.txt"):
['c:\\',
{'type': TYPE_FOLDER,
'size': 0, # optional for folders
'name': "dir",
'created': datetime.datetime(2012, 10, 12, 23, 28, 11, 8476),
'modified': datetime.datetime(2012, 10, 12, 23, 28, 11, 8476),
'accessed': datetime.datetime(2012, 10, 12, 23, 28, 11, 8476)
},
{'type': TYPE_FILE,
'size': 823,
'name': "file.txt",
'created': datetime.datetime(2012, 10, 12, 23, 28, 11, 8476),
'modified': datetime.datetime(2012, 10, 12, 23, 28, 11, 8476),
'accessed': datetime.datetime(2012, 10, 12, 23, 28, 11, 8476)
}
]
For relative paths just omit the drive entry.
Hint: Correct dates really are not crucial for working lnks.
"""
if type(data) not in (list, tuple):
raise ValueError("Invalid data format, list or tuple expected")
lnk = Lnk()
entries = []
if is_drive(data[0]):
# this is an absolute link
entries.append(RootEntry(ROOT_MY_COMPUTER))
if not data[0].endswith('\\'):
data[0] += "\\"
drive = data.pop(0).encode("ascii")
entries.append(DriveEntry(drive))
for level in data:
segment = PathSegmentEntry()
segment.type = level['type']
if level['type'] == TYPE_FOLDER:
segment.file_size = 0
else:
segment.file_size = level['size']
segment.short_name = level['name']
segment.full_name = level['name']
segment.created = level['created']
segment.modified = level['modified']
segment.accessed = level['accessed']
entries.append(segment)
lnk.shell_item_id_list = LinkTargetIDList()
lnk.shell_item_id_list.items = entries
if data[-1]['type'] == TYPE_FOLDER:
lnk.file_flags.directory = True
if lnk_name:
lnk.save(lnk_name)
return lnk
def build_uwp(
package_family_name, target, location=None,logo44x44=None, lnk_name=None,
) -> Lnk:
"""
:param lnk_name: ex.: crafted_uwp.lnk
:param package_family_name: ex.: Microsoft.WindowsCalculator_10.1910.0.0_x64__8wekyb3d8bbwe
:param target: ex.: Microsoft.WindowsCalculator_8wekyb3d8bbwe!App
:param location: ex.: C:\\Program Files\\WindowsApps\\Microsoft.WindowsCalculator_10.1910.0.0_x64__8wekyb3d8bbwe
:param logo44x44: ex.: Assets\\CalculatorAppList.png
"""
lnk = Lnk()
lnk.link_flags.HasLinkTargetIDList = True
lnk.link_flags.IsUnicode = True
lnk.link_flags.EnableTargetMetadata = True
lnk.shell_item_id_list = LinkTargetIDList()
elements = [
RootEntry(ROOT_UWP_APPS),
UwpSegmentEntry.create(
package_family_name=package_family_name,
target=target,
location=location,
logo44x44=logo44x44,
)
]
lnk.shell_item_id_list.items = elements
if lnk_name:
lnk.file = lnk_name
lnk.save()
return lnk
def get_prop(obj, prop_queue):
attr = getattr(obj, prop_queue[0])
if len(prop_queue) > 1:
return get_prop(attr, prop_queue[1:])
return attr
def cli():
parser = argparse.ArgumentParser(add_help=False)
subparsers = parser.add_subparsers(dest='action', metavar='{p, c, d}')
parser.add_argument('--help', '-h', action='store_true')
parser_parse = subparsers.add_parser('parse', aliases=['p'], help='read lnk file')
parser_parse.add_argument('filename', help='lnk filename to read')
parser_parse.add_argument('props', nargs='*', help='props path to read')
parser_create = subparsers.add_parser('create', aliases=['c'], help='create new lnk file')
parser_create.add_argument('target', help='target path')
parser_create.add_argument('name', help='lnk filename to create')
parser_create.add_argument('--arguments', '-a', nargs='?', help='additional arguments')
parser_create.add_argument('--description', '-d', nargs='?', help='description')
parser_create.add_argument('--icon', '-i', nargs='?', help='icon filename')
parser_create.add_argument('--icon-index', '-ii', type=int, default=0, nargs='?', help='icon index')
parser_create.add_argument('--workdir', '-w', nargs='?', help='working directory')
parser_create.add_argument('--mode', '-m', nargs='?', choices=['Maximized', 'Normal', 'Minimized'], help='window mode')
parser_dup = subparsers.add_parser('duplicate', aliases=['d'], help='read and write lnk file')
parser_dup.add_argument('filename', help='lnk filename to read')
parser_dup.add_argument('new_filename', help='new filename to write')
args = parser.parse_args()
if args.help or not args.action:
print('''
Tool for read or create .lnk files
usage: pylnk3.py [p]arse / [c]reate ...
Examples:
pylnk3 p filename.lnk
pylnk3 c c:\\prog.exe shortcut.lnk
pylnk3 c \\\\192.168.1.1\\share\\file.doc doc.lnk
pylnk3 create c:\\1.txt text.lnk -m Minimized -d "Description"
for more info use help for each action (ex.: "pylnk3 create -h")
'''.strip())
exit(1)
if args.action in ['create', 'c']:
for_file(
args.target, args.name, arguments=args.arguments,
description=args.description, icon_file=args.icon,
icon_index=args.icon_index, work_dir=args.workdir,
window_mode=args.mode,
)
elif args.action in ['parse', 'p']:
lnk = parse(args.filename)
props = args.props
if len(props) == 0:
print(lnk)
else:
for prop in props:
print(get_prop(lnk, prop.split('.')))
elif args.action in ['d', 'duplicate']:
lnk = parse(args.filename)
new_filename = args.new_filename
print(lnk)
lnk.save(new_filename)
print('saved')
if __name__ == '__main__':
cli()
```
|
{
"source": "jfcherng-sublime/ST-OpenUri",
"score": 2
}
|
#### File: plugin/helpers/image_processing.py
```python
from ..libs import png
from .settings import get_image_color
from .shared import global_get
from .utils import simple_decorator
from functools import lru_cache
from typing import List, Sequence
import base64
import io
import re
import sublime
@lru_cache
def get_colored_image_base64_by_color(img_name: str, rgba_code: str) -> str:
"""
@brief Get the colored image in base64 string by RGBA color code.
@param img_name The image name
@param rgba_code The color code in #RRGGBBAA
@return The image base64 string
"""
if not rgba_code:
return global_get(f"images.{img_name}.base64")
img_bytes: bytes = global_get(f"images.{img_name}.bytes")
img_bytes = change_png_bytes_color(img_bytes, rgba_code)
return base64.b64encode(img_bytes).decode()
def get_colored_image_base64_by_region(img_name: str, region: sublime.Region) -> str:
"""
@brief Get the colored image in base64 string by region.
@param img_name The image name
@param region The region
@return The image base64 string
"""
return get_colored_image_base64_by_color(img_name, get_image_color(img_name, region))
@lru_cache
def change_png_bytes_color(img_bytes: bytes, rgba_code: str) -> bytes:
"""
@brief Change all colors in the PNG bytes to the new color.
@param img_bytes The PNG image bytes
@param rgba_code The color code in the form of #RRGGBBAA
@return Color-changed PNG image bytes.
"""
if not rgba_code:
return img_bytes
if not re.match(r"#[0-9a-fA-F]{8}$", rgba_code):
raise ValueError("Invalid RGBA color code: " + rgba_code)
def render_pixel(rgba_src: Sequence[int], rgba_dst: Sequence[int], invert_gray: bool = False) -> List[int]:
gray = calculate_gray(rgba_src)
if invert_gray:
gray = 0xFF - gray
# ">> 8" is an approximation for "/ 0xFF" in following calculations
return [
int(rgba_dst[0] * gray) >> 8,
int(rgba_dst[1] * gray) >> 8,
int(rgba_dst[2] * gray) >> 8,
int(rgba_dst[3] * rgba_src[3]) >> 8,
]
invert_gray = not is_img_light(img_bytes) # invert for dark image to get a solid looking
rgba_dst = [int(rgba_code[i : i + 2], 16) for i in range(1, 9, 2)]
w, h, rows_src, img_info = png.Reader(bytes=img_bytes).asRGBA()
rows_dst: List[List[int]] = []
for row_src in rows_src:
row_dst: List[int] = []
for i in range(0, len(row_src), 4):
row_dst.extend(render_pixel(row_src[i : i + 4], rgba_dst, invert_gray))
rows_dst.append(row_dst)
buf = io.BytesIO()
png.from_array(rows_dst, "RGBA").write(buf)
return buf.getvalue()
def calculate_gray(rgb: Sequence[int]) -> int:
"""
@brief Calculate the gray scale of a color.
@see https://atlaboratary.blogspot.com/2013/08/rgb-g-rey-l-gray-r0.html
@param rgb The rgb color in list form
@return The gray scale.
"""
return int(rgb[0] * 38 + rgb[1] * 75 + rgb[2] * 15) >> 7
def is_img_light(img_bytes: bytes) -> bool:
"""
@brief Determine if image is light colored.
@param img_bytes The image bytes
@return True if image is light, False otherwise.
"""
w, h, rows, img_info = png.Reader(bytes=img_bytes).asRGBA()
gray_sum = 0
for row in rows:
for i in range(0, len(row), 4):
gray_sum += calculate_gray(row[i : i + 4])
return (gray_sum >> 7) > w * h
def add_alpha_to_rgb(color_code: str) -> str:
"""
@brief Add the alpha part to a valid RGB color code (#RGB, #RRGGBB, #RRGGBBAA)
@param color_code The color code
@return The color code in the form of #RRGGBBAA
"""
if not color_code:
return ""
rgb = color_code[1:9] # strip "#" and possible extra chars
# RGB to RRGGBB
if len(rgb) == 3:
rgb = rgb[0] * 2 + rgb[1] * 2 + rgb[2] * 2
return "#" + (rgb + "ff")[:8].lower()
@simple_decorator(add_alpha_to_rgb)
def color_code_to_rgba(color_code: str, region: sublime.Region) -> str:
"""
@brief Convert user settings color code into #RRGGBBAA form
@param color_code The color code string from user settings
@param region The scope-related region
@return The color code in the form of #RRGGBBAA
"""
if not color_code:
return ""
# "color_code" is a scope?
if not color_code.startswith("#"):
if view := sublime.active_window().active_view():
# "color" is guaranteed to be #RRGGBB or #RRGGBBAA
color = view.style_for_scope(view.scope_name(region.end() - 1)).get("foreground", "")
if color_code == "@scope":
return color
if color_code == "@scope_inverted":
# strip "#" and make color into RRGGBBAA int
rgba_int = int((color + "ff")[1:9], 16)
# invert RRGGBB, remain AA, strip "0x" prefix from hex and prepend 0s until 8 chars
return "#" + hex((~rgba_int & 0xFFFFFF00) | (rgba_int & 0xFF))[2:].zfill(8)
return ""
# now color code must starts with "#"
rgb = color_code[1:9] # strip "#" and possible extra chars
# RGB, RRGGBB, RRGGBBAA are legal
if len(rgb) in [3, 6, 8] and re.match(r"[0-9a-fA-F]+$", rgb):
return f"#{rgb}"
return ""
```
#### File: ST-OpenUri/plugin/OpenUriCommands.py
```python
from .helpers.functions import find_uri_regions_by_region
from .helpers.functions import find_uri_regions_by_regions
from .helpers.functions import open_uri_with_browser
from .helpers.settings import get_setting
import sublime
import sublime_plugin
class OpenUriFromCursorsCommand(sublime_plugin.TextCommand):
def run(self, edit: sublime.Edit, browser: str = "") -> None:
uris = map(
self.view.substr,
find_uri_regions_by_regions(self.view, self.view.sel(), get_setting("uri_search_radius")),
)
for uri in set(uris):
open_uri_with_browser(uri, browser)
class OpenUriFromViewCommand(sublime_plugin.TextCommand):
def run(self, edit: sublime.Edit, browser: str = "") -> None:
region = (0, self.view.size())
uris = map(
self.view.substr,
find_uri_regions_by_region(self.view, region, get_setting("uri_search_radius")),
)
for uri in set(uris):
open_uri_with_browser(uri, browser)
class SelectUriFromCursorsCommand(sublime_plugin.TextCommand):
def run(self, edit: sublime.Edit) -> None:
sel = self.view.sel()
if uri_regions := find_uri_regions_by_regions(self.view, sel, get_setting("uri_search_radius")):
sel.clear()
sel.add_all(uri_regions)
class SelectUriFromViewCommand(sublime_plugin.TextCommand):
def run(self, edit: sublime.Edit) -> None:
region = (0, self.view.size())
sel = self.view.sel()
if uri_regions := find_uri_regions_by_region(self.view, region, get_setting("uri_search_radius")):
sel.clear()
sel.add_all(uri_regions)
```
#### File: ST-OpenUri/plugin/OpenUri.py
```python
from .helpers.functions import find_uri_regions_by_region
from .helpers.functions import view_is_dirty_val
from .helpers.functions import view_last_typing_timestamp_val
from .helpers.phantom_set import delete_phantom_set
from .helpers.phantom_set import init_phantom_set
from .helpers.popup import show_popup
from .helpers.region_drawing import draw_uri_regions
from .helpers.settings import get_setting
from .helpers.settings import get_setting_show_open_button
from .helpers.settings import get_timestamp
from typing import List
import sublime
import sublime_plugin
class OpenUri(sublime_plugin.ViewEventListener):
def __init__(self, view: sublime.View) -> None:
super().__init__(view)
init_phantom_set(self.view)
view_last_typing_timestamp_val(self.view, 0)
def on_pre_close(self) -> None:
delete_phantom_set(self.view)
def on_load_async(self) -> None:
view_is_dirty_val(self.view, True)
def on_modified_async(self) -> None:
view_is_dirty_val(self.view, True)
view_last_typing_timestamp_val(self.view, get_timestamp())
def on_hover(self, point: int, hover_zone: int) -> None:
if hover_zone != sublime.HOVER_TEXT:
uri_regions: List[sublime.Region] = []
else:
uri_regions = find_uri_regions_by_region(self.view, point, get_setting("uri_search_radius"))
if uri_regions and get_setting_show_open_button(self.view) == "hover":
show_popup(self.view, uri_regions[0], point)
if get_setting("draw_uri_regions.enabled") == "hover":
draw_uri_regions(self.view, uri_regions)
```
|
{
"source": "jfcherng/Sublime-ToggleLoggings",
"score": 2
}
|
#### File: plugin/commands/console_loggings.py
```python
from abc import ABCMeta
from typing import Callable, Optional
import sublime
import sublime_plugin
ST_METHODS = set(dir(sublime))
class AbstractToggleConsoleLoggingCommand(sublime_plugin.ApplicationCommand, metaclass=ABCMeta):
@property
def logging_method_name(self) -> str:
# strips the leading "toggle_" from the command name
return self.name()[7:]
@property
def logging_method(self) -> Callable[..., None]:
return getattr(sublime, self.logging_method_name)
@property
def logging_status_method(self) -> Callable[[], bool]:
return getattr(sublime, f"get_{self.logging_method_name}")
def description(self) -> str:
# "toogle_log_fps" => "Toggle log fps"
return self.name().replace("_", " ").capitalize()
def is_checked(self) -> bool:
return (self.logging_status_method)()
def is_enabled(self) -> bool:
return self.logging_method_name in ST_METHODS
is_visible = is_enabled
def run(self, enable: Optional[bool] = None) -> None:
args = tuple() if enable is None else (enable,)
self.logging_method(*args)
class ToggleLogBuildSystemsCommand(AbstractToggleConsoleLoggingCommand):
"""Toggle `sublime.log_build_systems()`"""
...
class ToggleLogCommandsCommand(AbstractToggleConsoleLoggingCommand):
"""Toggle `sublime.log_commands()`"""
...
class ToggleLogControlTreeCommand(AbstractToggleConsoleLoggingCommand):
"""Toggle `sublime.log_control_tree()`"""
...
class ToggleLogFpsCommand(AbstractToggleConsoleLoggingCommand):
"""Toggle `sublime.log_fps()`"""
...
class ToggleLogIndexingCommand(AbstractToggleConsoleLoggingCommand):
"""Toggle `sublime.log_indexing()`"""
...
class ToggleLogInputCommand(AbstractToggleConsoleLoggingCommand):
"""Toggle `sublime.log_input()`"""
...
class ToggleLogResultRegexCommand(AbstractToggleConsoleLoggingCommand):
"""Toggle `sublime.log_result_regex()`"""
...
```
#### File: plugin/commands/open_sublime_text_dir.py
```python
from functools import lru_cache
from pathlib import Path
from typing import Dict
import sublime
import sublime_plugin
import tempfile
PACKAGE_NAME = __package__.partition(".")[0]
@lru_cache
def get_folder_map() -> Dict[str, str]:
cache_path = Path(sublime.cache_path())
packages_path = Path(sublime.packages_path())
return {
name: str(path.resolve())
for name, path in {
# from OS
"home": Path.home(),
"temp_dir": Path(tempfile.gettempdir()),
# from ST itself
"bin": Path(sublime.executable_path()).parent,
"cache": cache_path,
"data": packages_path / "..",
"index": cache_path / ".." / "Index",
"installed_packages": Path(sublime.installed_packages_path()),
"lib": packages_path / ".." / "Lib",
"local": packages_path / ".." / "Local",
"log": packages_path / ".." / "Log",
"packages": packages_path,
# from LSP
"package_storage": cache_path / ".." / "Package Storage",
}.items()
}
class OpenSublimeTextDirCommand(sublime_plugin.ApplicationCommand):
def run(self, folder: str, error_on_not_found: bool = True) -> None:
window = sublime.active_window()
path = Path(
sublime.expand_variables(
folder,
{
**window.extract_variables(), # type: ignore
**get_folder_map(),
},
)
)
if not path.is_dir():
if error_on_not_found:
sublime.error_message(f"[{PACKAGE_NAME}] Directory not found: `{path}`")
return
window.run_command("open_dir", {"dir": str(path)})
```
|
{
"source": "jfcherng/Sublime-VisualizeZeroWidthChars",
"score": 2
}
|
#### File: Sublime-VisualizeZeroWidthChars/plugin/PhatomSetsManager.py
```python
import sublime
from typing import Dict, Iterable, Optional
class PhatomSetsManager:
# class-level (shared across objects)
_phantom_sets = {
# phantom_set_id: PhantomSet object,
} # type: Dict[str, sublime.PhantomSet]
@classmethod
def get_phantom_set(cls, phantom_set_id: str) -> Optional[sublime.PhantomSet]:
return cls._phantom_sets.get(phantom_set_id)
@classmethod
def init_phantom_set(cls, view: sublime.View, phantom_set_id: str, phantom_set_key: str = "") -> None:
cls._phantom_sets[phantom_set_id] = sublime.PhantomSet(view, phantom_set_key)
@classmethod
def delete_phantom_set(cls, phantom_set_id: str) -> None:
cls._phantom_sets.pop(phantom_set_id, None)
@classmethod
def erase_phantom_set(cls, phantom_set_id: str) -> None:
if phantom_set_id in cls._phantom_sets:
cls._phantom_sets[phantom_set_id].update([])
@classmethod
def update_phantom_set(cls, phantom_set_id: str, phantoms: Iterable[sublime.Phantom]) -> None:
if phantom_set_id in cls._phantom_sets:
cls._phantom_sets[phantom_set_id].update(list(phantoms))
@classmethod
def clear(cls) -> None:
for phantom_set_id in list(cls._phantom_sets.keys()):
cls.delete_phantom_set(phantom_set_id)
cls._phantom_sets = {}
```
#### File: Sublime-VisualizeZeroWidthChars/plugin/popup.py
```python
import sublime
from .functions import get_char_unicode_info
POPUP_TEMPLATE = """
<body id="visualize-zero-width-chars-popup">
<span>U+{code_point}: {name}</span>
</body>
"""
def generate_popup_html(view: sublime.View, char_region: sublime.Region) -> str:
char = view.substr(char_region)
char_info = get_char_unicode_info(char)
return POPUP_TEMPLATE.format_map(char_info)
def show_popup(view: sublime.View, char_region: sublime.Region, point: int) -> None:
view.show_popup(
generate_popup_html(view, char_region),
flags=sublime.COOPERATE_WITH_AUTO_COMPLETE,
location=point,
max_width=500,
)
```
#### File: Sublime-VisualizeZeroWidthChars/plugin/RepeatingTimer.py
```python
import threading
from typing import Callable, Optional
class RepeatingTimer:
def __init__(self, interval_ms: int, func: Callable, *args, **kwargs) -> None:
self.interval_s = interval_ms / 1000
self.func = func
self.args = args
self.kwargs = kwargs
self.timer = None # type: Optional[threading.Timer]
self.is_running = False
def set_func(self, func: Callable, *args, **kwargs) -> None:
self.func = func
self.args = args
self.kwargs = kwargs
def set_interval(self, interval_ms: int) -> None:
self.interval_s = interval_ms / 1000
def start(self) -> None:
self.timer = threading.Timer(self.interval_s, self._callback)
self.timer.start()
self.is_running = True
def cancel(self) -> None:
assert isinstance(self.timer, threading.Timer)
self.timer.cancel()
self.is_running = False
def _callback(self) -> None:
self.func(*self.args, **self.kwargs)
self.start()
```
|
{
"source": "jfchevrette/qontract-reconcile",
"score": 2
}
|
#### File: qontract-reconcile/reconcile/jenkins_job_builder.py
```python
import sys
import json
import logging
from reconcile.utils import gql
from reconcile import queries
from reconcile.utils.defer import defer
from reconcile.utils.jjb_client import JJB
from reconcile.utils.state import State
QUERY = """
{
jenkins_configs: jenkins_configs_v1 {
name
instance {
name
serverUrl
token {
path
field
}
deleteMethod
}
type
config
config_path
}
}
"""
QONTRACT_INTEGRATION = 'jenkins-job-builder'
GENERATE_TYPE = ['jobs', 'views']
def get_openshift_saas_deploy_job_name(saas_file_name, env_name, settings):
job_template_name = settings['saasDeployJobTemplate']
return f"{job_template_name}-{saas_file_name}-{env_name}"
def collect_saas_file_configs(settings, instance_name=None):
# collect a list of jobs per saas file per environment.
# each saas_file_config should have the structure described
# in the above query.
# to make things understandable, each variable used to form
# the structure will be called `jc_<variable>` (jenkins config).
saas_file_configs = []
repo_urls = set()
saas_files = queries.get_saas_files()
job_template_name = settings['saasDeployJobTemplate']
for saas_file in saas_files:
saas_file_name = saas_file['name']
jc_instance = saas_file['instance']
if instance_name is not None and jc_instance['name'] != instance_name:
continue
app_name = saas_file['app']['name']
# currently ignoring the actual Slack workspace
# as that is configured in Jenkins.
# revisit this if we support more then a single Slack workspace.
output = saas_file['slack'].get('output') or 'publish'
# if the output type is 'publish', we send notifications
# to the selected slack_channel
slack_channel = \
saas_file['slack']['channel'] \
if output == 'publish' \
else 'dev-null'
slack_notify_start = False
slack_notifications = saas_file['slack'].get('notifications')
if slack_notifications:
start = slack_notifications.get('start')
if start:
slack_notify_start = True
timeout = saas_file.get('timeout', None)
for resource_template in saas_file['resourceTemplates']:
url = resource_template['url']
repo_urls.add(url)
for target in resource_template['targets']:
env_name = target['namespace']['environment']['name']
upstream = target.get('upstream') or ''
final_job_template_name = \
f'{job_template_name}-with-upstream' if upstream \
else job_template_name
jc_name = get_openshift_saas_deploy_job_name(
saas_file_name, env_name, settings)
existing_configs = \
[c for c in saas_file_configs if c['name'] == jc_name]
if existing_configs:
# if upstream is defined - append it to existing upstreams
if upstream:
# should be exactly one
jc_data = existing_configs[0]['data']
project = jc_data['project']
# append upstream to existing upstreams
project['upstream'] += f',{upstream}'
# update job template name if needed
job_definition = project['jobs'][0]
if job_template_name in job_definition:
job_definition[final_job_template_name] = \
job_definition.pop(job_template_name)
continue
# each config is a list with a single item
# with the following structure:
# project:
# name: 'openshift-saas-deploy-{saas_file_name}-{env_name}'
# saas_file_name: '{saas_file_name}'
# env_name: '{env_name}'
# app_name: '{app_name}'
# slack_channel: '{slack_channel}'
# slack_notify_start: '{slack_notify_start}
# jobs:
# - 'openshift-saas-deploy':
# display_name: display name of the job
jc_data = {
'project': {
'name': jc_name,
'saas_file_name': saas_file_name,
'env_name': env_name,
'app_name': app_name,
'slack_channel': slack_channel,
'slack_notify_start': slack_notify_start,
'upstream': upstream,
'jobs': [{
final_job_template_name: {
'display_name': jc_name
}
}]
}
}
if timeout:
jc_data['project']['timeout'] = timeout
saas_file_configs.append({
'name': jc_name,
'instance': jc_instance,
'type': 'jobs',
'data': jc_data
})
for saas_file_config in saas_file_configs:
jc_data = saas_file_config.pop('data')
saas_file_config['config'] = json.dumps([jc_data])
return saas_file_configs, repo_urls
def collect_configs(instance_name, config_name, settings):
gqlapi = gql.get_api()
raw_jjb_configs = gqlapi.query(QUERY)['jenkins_configs']
if instance_name is not None:
raw_jjb_configs = [n for n in raw_jjb_configs
if n['instance']['name'] == instance_name]
if config_name is not None:
raw_jjb_configs = [n for n in raw_jjb_configs
if n['type'] not in GENERATE_TYPE
or n['name'] == config_name]
if not raw_jjb_configs:
raise ValueError(f"config name {config_name} is not found")
return raw_jjb_configs, {}
saas_file_configs, saas_file_repo_urls = \
collect_saas_file_configs(settings, instance_name)
configs = raw_jjb_configs + saas_file_configs
if not configs:
raise ValueError(f"instance name {instance_name} is not found")
return configs, saas_file_repo_urls
def init_jjb(instance_name=None, config_name=None, print_only=False):
settings = queries.get_app_interface_settings()
configs, additional_repo_urls = \
collect_configs(instance_name, config_name, settings)
return JJB(configs, ssl_verify=False,
settings=settings, print_only=print_only), \
additional_repo_urls
def validate_repos_and_admins(jjb, additional_repo_urls):
jjb_repos = jjb.get_repos()
jjb_repos.update(additional_repo_urls)
app_int_repos = queries.get_repos()
missing_repos = [r for r in jjb_repos if r not in app_int_repos]
for r in missing_repos:
logging.error('repo is missing from codeComponents: {}'.format(r))
jjb_admins = jjb.get_admins()
app_int_users = queries.get_users()
app_int_bots = queries.get_bots()
external_users = queries.get_external_users()
github_usernames = \
[u.get('github_username') for u in app_int_users] + \
[b.get('github_username') for b in app_int_bots] + \
[u.get('github_username') for u in external_users]
unknown_admins = [a for a in jjb_admins if a not in github_usernames]
for a in unknown_admins:
logging.warning('admin is missing from users: {}'.format(a))
if missing_repos:
sys.exit(1)
@defer
def run(dry_run, io_dir='throughput/', print_only=False,
config_name=None, job_name=None, instance_name=None, defer=None):
if not print_only and config_name is not None:
raise Exception("--config-name must works with --print-only mode")
jjb, additional_repo_urls = \
init_jjb(instance_name, config_name, print_only)
defer(jjb.cleanup)
if print_only:
jjb.print_jobs(job_name=job_name)
if config_name is not None:
jjb.generate(io_dir, 'printout')
sys.exit(0)
accounts = queries.get_aws_accounts()
state = State(
integration=QONTRACT_INTEGRATION,
accounts=accounts,
settings=jjb.settings
)
if dry_run:
validate_repos_and_admins(jjb, additional_repo_urls)
jjb.generate(io_dir, 'desired')
jjb.overwrite_configs(state)
jjb.generate(io_dir, 'current')
jjb.print_diffs(io_dir, instance_name)
else:
jjb.update()
configs = jjb.get_configs()
for name, desired_config in configs.items():
state.add(name, value=desired_config, force=True)
```
|
{
"source": "JFChi/Understanding-and-Mitigating-Accuracy-Disparity-in-Regression",
"score": 4
}
|
#### File: JFChi/Understanding-and-Mitigating-Accuracy-Disparity-in-Regression/prep_law.py
```python
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
'''
The bar passage study was initiated in 1991 by Law School Admission
Council national longitudinal. The dataset contains records for law students who took the bar exam.
The binary outcome indicates whether the student passed the bar exam or not. The features include
variables such as cluster, lsat score, undergraduate GPA, zfyGPA, zGPA, full-time status, family
income, age and also sensitive variables such as race and gender. The variable cluster is the result of a
clustering of similar law schools (which is done apriori), and is used to adjust for the effect of type of
law school. zGPA is the z-scores of the students overall GPA and zfyGPA is the first year GPA relative
to students at the same law school.
'''
def clean_dataset(dataset, attributes, centered):
df = pd.read_csv(dataset)
sens_df = pd.read_csv(attributes)
## Get and remove label Y
y_col = [str(c) for c in sens_df.columns if sens_df[c][0] == 2]
print('label feature: {}'.format(y_col))
if(len(y_col) > 1):
raise ValueError('More than 1 label column used')
if (len(y_col) < 1):
raise ValueError('No label column used')
y = df[y_col[0]]
## Do not use labels in rest of data
X = df.loc[:, df.columns != y_col[0]]
X = X.loc[:, X.columns != 'Unnamed: 0']
## Create X_prime, by getting protected attributes
sens_cols = [str(c) for c in sens_df.columns if sens_df[c][0] == 1]
print('sensitive features: {}'.format(sens_cols))
sens_dict = {c: 1 if c in sens_cols else 0 for c in df.columns}
X, sens_dict = one_hot_code(X, sens_dict)
sens_names = [key for key in sens_dict.keys() if sens_dict[key] == 1]
print('there are {} sensitive features including derivative features'.format(len(sens_names)))
X_prime = X[sens_names]
if(centered):
X = center(X)
# normalize y to [0, 1]
y = ( y - np.min(y) ) / (np.max(y) - np.min(y) )
return X, X_prime, y
def center(X):
for col in X.columns:
X.loc[:, col] = ( X.loc[:, col]-np.mean(X.loc[:, col]) ) / np.std(X.loc[:, col])
# X.loc[:, col] = X.loc[:, col]-np.mean(X.loc[:, col])
return X
def one_hot_code(df1, sens_dict):
cols = df1.columns
for c in cols:
if isinstance(df1[c][0], str):
column = df1[c]
df1 = df1.drop(c, 1)
unique_values = list(set(column))
n = len(unique_values)
if n > 2:
for i in range(n):
col_name = '{}.{}'.format(c, i)
col_i = [1 if el == unique_values[i] else 0 for el in column]
df1[col_name] = col_i
sens_dict[col_name] = sens_dict[c]
del sens_dict[c]
else:
col_name = c
col = [1 if el == unique_values[0] else 0 for el in column]
df1[col_name] = col
return df1, sens_dict
if __name__ == "__main__":
# load data
data_path = "data/lawschool.csv"
centered = True
lawschool_attributes = "data/lawschool_protected.csv"
# save data summary to csv
law_school_df = pd.read_csv(data_path)
law_school_df.describe().to_csv("data/lawschool_summary.csv")
df_X, df_A, df_Y = clean_dataset(data_path, lawschool_attributes, centered)
# to numpy
X = df_X.values
Y = df_Y.values
A = df_A.values.astype(int).squeeze()
print("X.shape, Y.shape, A.shape", X.shape, Y.shape, A.shape)
print(np.min(X), np.max(X))
print(np.min(Y), np.max(Y))
print(np.min(A), np.max(A))
# train/test split: 0.8/0.2
X_train, X_test, Y_train, Y_test, A_train, A_test = train_test_split(X, Y, A,
test_size=0.2,
random_state=0,
stratify=A)
print(A_train.shape, A_test.shape)
print(A_train.sum(), A_test.sum())
f_out_np = 'data/law_school.npz'
np.savez(f_out_np, x_train=X_train, x_test=X_test,
y_train=Y_train, y_test=Y_test,
attr_train=A_train, attr_test=A_test)
```
|
{
"source": "jfckm/ocf",
"score": 2
}
|
#### File: tests/security/conftest.py
```python
import os
import sys
from ctypes import (
c_uint64,
c_uint32,
c_uint16,
c_int
)
from tests.utils import get_random_strings, get_random_ints
import pytest
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
@pytest.fixture(params=get_random_ints(c_uint16))
def c_uint16_randomize(request):
return request.param
@pytest.fixture(params=get_random_ints(c_uint32))
def c_uint32_randomize(request):
return request.param
@pytest.fixture(params=get_random_ints(c_uint64))
def c_uint64_randomize(request):
return request.param
@pytest.fixture(params=get_random_ints(c_int))
def c_int_randomize(request):
return request.param
@pytest.fixture(params=get_random_strings())
def string_randomize(request):
return request.param
```
#### File: tests/security/test_management_fuzzy.py
```python
import pytest
from pyocf.types.cache import Cache, CacheMode, CleaningPolicy, AlruParams, AcpParams
from pyocf.types.core import Core
from pyocf.types.volume import Volume
from pyocf.utils import Size as S
from tests.utils import generate_random_numbers
from pyocf.types.shared import OcfError, CacheLineSize, SeqCutOffPolicy
from ctypes import (
c_uint64,
c_uint32
)
@pytest.mark.parametrize("cm", CacheMode)
@pytest.mark.parametrize("cls", CacheLineSize)
@pytest.mark.security
def test_neg_change_cache_mode(pyocf_ctx, cm, cls):
"""
Test whether it is possible to change cache mode to invalid value.
:param pyocf_ctx: basic pyocf context fixture
:param cm: cache mode we start with
:param cls: cache line size we start with
"""
# Start cache device
cache_device = Volume(S.from_MiB(30))
cache = Cache.start_on_device(
cache_device, cache_mode=cm, cache_line_size=cls
)
# Change cache mode to invalid one and check if failed
for i in generate_random_numbers(c_uint32):
if i in [item.value for item in CacheMode]:
continue
with pytest.raises(OcfError, match="Error changing cache mode"):
cache.change_cache_mode(i)
@pytest.mark.parametrize("cm", CacheMode)
@pytest.mark.parametrize("cls", CacheLineSize)
@pytest.mark.security
def test_neg_set_cleaning_policy(pyocf_ctx, cm, cls):
"""
Test whether it is possible to change cleaning policy to invalid value
:param pyocf_ctx: basic pyocf context fixture
:param cm: cache mode we start with
:param cls: cache line size we start with
:return:
"""
# Start cache device
cache_device = Volume(S.from_MiB(30))
cache = Cache.start_on_device(
cache_device, cache_mode=cm, cache_line_size=cls
)
# Set cleaning policy to invalid one and check if failed
for i in generate_random_numbers(c_uint32):
if i in [item.value for item in CleaningPolicy]:
continue
with pytest.raises(OcfError, match="Error changing cleaning policy"):
cache.set_cleaning_policy(i)
@pytest.mark.parametrize("cm", CacheMode)
@pytest.mark.parametrize("cls", CacheLineSize)
@pytest.mark.security
def test_neg_attach_cls(pyocf_ctx, cm, cls):
"""
Test whether it is possible to change cache line size to
invalid value while attaching cache device
:param pyocf_ctx: basic pyocf context fixture
:param cm: cache mode we start with
:param cls: cache line size we start with
:return:
"""
# Start cache device
cache_device = Volume(S.from_MiB(30))
cache = Cache(owner=cache_device.owner, cache_mode=cm, cache_line_size=cls)
cache.start_cache()
# Check whether it is possible to attach cache device with invalid cache line size
for i in generate_random_numbers(c_uint64):
if i in [item.value for item in CacheLineSize]:
continue
with pytest.raises(OcfError, match="Attaching cache device failed"):
cache.attach_device(cache_device, cache_line_size=i)
@pytest.mark.parametrize("cm", CacheMode)
@pytest.mark.parametrize("cls", CacheLineSize)
@pytest.mark.security
def test_neg_cache_set_seq_cut_off_policy(pyocf_ctx, cm, cls):
"""
Test whether it is possible to change cache seq cut-off policy to invalid value
:param pyocf_ctx: basic pyocf context fixture
:param cm: cache mode we start with
:param cls: cache line size we start with
:return:
"""
# Start cache device
cache_device = Volume(S.from_MiB(30))
cache = Cache.start_on_device(
cache_device, cache_mode=cm, cache_line_size=cls
)
# Create 2 core devices
core_device1 = Volume(S.from_MiB(10))
core1 = Core.using_device(core_device1)
core_device2 = Volume(S.from_MiB(10))
core2 = Core.using_device(core_device2)
# Add cores
cache.add_core(core1)
cache.add_core(core2)
# Change cache seq cut off policy to invalid one and check if failed
for i in generate_random_numbers(c_uint32):
if i in [item.value for item in SeqCutOffPolicy]:
continue
with pytest.raises(OcfError, match="Error setting cache seq cut off policy"):
cache.set_seq_cut_off_policy(i)
@pytest.mark.parametrize("cm", CacheMode)
@pytest.mark.parametrize("cls", CacheLineSize)
@pytest.mark.security
def test_neg_core_set_seq_cut_off_policy(pyocf_ctx, cm, cls):
"""
Test whether it is possible to change core seq cut-off policy to invalid value
:param pyocf_ctx: basic pyocf context fixture
:param cm: cache mode we start with
:param cls: cache line size we start with
:return:
"""
# Start cache device
cache_device = Volume(S.from_MiB(30))
cache = Cache.start_on_device(
cache_device, cache_mode=cm, cache_line_size=cls
)
# Create core device
core_device = Volume(S.from_MiB(10))
core = Core.using_device(core_device)
# Add core
cache.add_core(core)
# Change core seq cut off policy to invalid one and check if failed
for i in generate_random_numbers(c_uint32):
if i in [item.value for item in SeqCutOffPolicy]:
continue
with pytest.raises(OcfError, match="Error setting core seq cut off policy"):
core.set_seq_cut_off_policy(i)
@pytest.mark.parametrize("cm", CacheMode)
@pytest.mark.parametrize("cls", CacheLineSize)
@pytest.mark.security
def test_neg_set_alru_param(pyocf_ctx, cm, cls):
"""
Test whether it is possible to set invalid param for alru cleaning policy
:param pyocf_ctx: basic pyocf context fixture
:param cm: cache mode we start with
:param cls: cache line size we start with
:return:
"""
# Start cache device
cache_device = Volume(S.from_MiB(30))
cache = Cache.start_on_device(
cache_device, cache_mode=cm, cache_line_size=cls
)
# Change invalid alru param and check if failed
for i in generate_random_numbers(c_uint32):
if i in [item.value for item in AlruParams]:
continue
with pytest.raises(OcfError, match="Error setting cleaning policy param"):
cache.set_cleaning_policy_param(CleaningPolicy.ALRU, i, 1)
@pytest.mark.parametrize("cm", CacheMode)
@pytest.mark.parametrize("cls", CacheLineSize)
@pytest.mark.security
def test_neg_set_acp_param(pyocf_ctx, cm, cls):
"""
Test whether it is possible to set invalid param for acp cleaning policy
:param pyocf_ctx: basic pyocf context fixture
:param cm: cache mode we start with
:param cls: cache line size we start with
:return:
"""
# Start cache device
cache_device = Volume(S.from_MiB(30))
cache = Cache.start_on_device(
cache_device, cache_mode=cm, cache_line_size=cls
)
# Change invalid acp param and check if failed
for i in generate_random_numbers(c_uint32):
if i in [item.value for item in AcpParams]:
continue
with pytest.raises(OcfError, match="Error setting cleaning policy param"):
cache.set_cleaning_policy_param(CleaningPolicy.ALRU, i, 1)
```
#### File: functional/tests/utils.py
```python
import random
import string
from ctypes import (
c_uint64,
c_uint32,
c_uint16,
c_int,
c_uint
)
def generate_random_numbers(c_type, count=1000):
type_dict = {
c_uint16: [0, c_uint16(-1).value],
c_uint32: [0, c_uint32(-1).value],
c_uint64: [0, c_uint64(-1).value],
c_int: [int(-c_uint(-1).value / 2) - 1, int(c_uint(-1).value / 2)]
}
values = []
for i in range(0, count):
values.append(random.randint(type_dict[c_type][0], type_dict[c_type][1]))
return values
def generate_random_strings():
values = []
for t in [string.digits,
string.ascii_letters + string.digits,
string.ascii_lowercase,
string.ascii_uppercase,
string.printable,
string.punctuation,
string.hexdigits]:
for i in range(0, 100):
values.append(''.join(random.choice(t) for _ in range(random.randint(0, 20))))
return values
def get_random_ints(c_type):
for value in generate_random_numbers(c_type):
yield value
def get_random_strings():
for value in generate_random_strings():
yield value
```
|
{
"source": "jfckm/open-cas-linux",
"score": 2
}
|
#### File: utils_tests/opencas-py-tests/test_casadm_01.py
```python
import pytest
import subprocess
import mock
from opencas import casadm
from helpers import get_process_mock
@mock.patch("subprocess.Popen")
def test_run_cmd_01(mock_popen):
mock_popen.return_value = get_process_mock(0, "successes", "errors")
result = casadm.run_cmd(["casadm", "-L"])
assert result.exit_code == 0
assert result.stdout == "successes"
assert result.stderr == "errors"
mock_popen.assert_called_once_with(
["casadm", "-L"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
@mock.patch("subprocess.Popen")
def test_run_cmd_02(mock_popen):
mock_popen.return_value = get_process_mock(4, "successes", "errors")
with pytest.raises(casadm.CasadmError):
casadm.run_cmd(["casadm", "-L"])
@mock.patch("subprocess.Popen")
def test_get_version_01(mock_popen):
mock_popen.return_value = get_process_mock(0, "0.0.1", "errors")
result = casadm.get_version()
assert result.exit_code == 0
assert result.stdout == "0.0.1"
assert result.stderr == "errors"
mock_popen.assert_called_once_with(
[casadm.casadm_path, "--version", "--output-format", "csv"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
@mock.patch("subprocess.Popen")
def test_get_version_02(mock_popen):
mock_popen.return_value = get_process_mock(4, "successes", "errors")
with pytest.raises(casadm.CasadmError):
casadm.get_version()
```
|
{
"source": "jfckm/spdk",
"score": 2
}
|
#### File: scripts/rpc/env_dpdk.py
```python
def env_dpdk_get_mem_stats(client):
"""Dump the applications memory stats to a file.
Returns:
The path to the file where the stats are written.
"""
return client.call('env_dpdk_get_mem_stats')
```
#### File: scripts/rpc/ioat.py
```python
from .helpers import deprecated_alias
@deprecated_alias('ioat_scan_copy_engine')
@deprecated_alias('scan_ioat_copy_engine')
def ioat_scan_accel_engine(client):
"""Enable IOAT accel engine.
"""
return client.call('ioat_scan_accel_engine')
```
|
{
"source": "jfclere/AC2021",
"score": 3
}
|
#### File: AC2021/scripts/ssl.py
```python
import sys
def main():
print(f"Arguments count: {len(sys.argv)}")
for i, arg in enumerate(sys.argv):
filename = {arg}
print(f"Argument {i:>6}: {arg}")
with open(arg) as file:
msg = bytearray()
for line in file:
if line.endswith("|\n"):
start = " | "
end = "|\n"
string = (line.split(start))[1].split(end)[0]
data = string[6:-19]
data = data.replace('-', ' ')
print(data)
hexdata = bytearray.fromhex(data)
msg += hexdata
print(msg)
# analisys of the message
i = 3
if msg[0] == 0x16:
print("TLS handshake protocol")
print("Version: " + str(msg[1]) + str(msg[2]))
# length is the 2 next bytes
l = msg[3]*256 + msg[4]
print("Length: " + str(l) + ":" + str(len(msg)))
if l > len(msg):
raise SystemExit("not enough bytes in the trace")
i = 5
if msg[5] == 0x01:
print("client hello")
l = (msg[6]*256 + msg[7])*256 + msg[8]
print("Payload length: " + str(l))
# version again
i = 9
i += 2
# Client Random
i += 32
# Client Session
l = msg[i]
print("Session ID length: " + str(l))
i += l + 1
# Cipher Suites
l = msg[i]*256 + msg[i+1]
print("Cipher Suites length: " + str(l))
i +=l + 2
# Compression Methods
l = msg[i]
print("Compression Methods length: " + str(l))
i +=l + 1
# Extensions
l = msg[i]*256 + msg[i+1]
print("Extensions length: " + str(l))
i += 2
endext = i + l
while i < endext:
# the extensions are 2 bytes type + 2 bytes length + value
t = msg[i]*256 + msg[i+1]
i += 2
l = msg[i]*256 + msg[i+1]
i += 2
print("Extension: " + str(t) + " length: " + str(l))
if t == 0:
# the sni several pieces 2 byte length + 1 byte type + 2 byte length
j = i + 3
snil = msg[j]*256 + msg[j+1]
j +=3
sni = msg[j:j+snil]
print("SNI: length: " + str(snil) + " value: " + str(sni))
if t == 41:
print("pre_shared_key")
if t == 65281:
print("65281: weird...")
i += l
print("client hello: " + str(i) + " bytes decoded")
if __name__=="__main__":
main()
```
|
{
"source": "jf---/compas_occ",
"score": 2
}
|
#### File: compas_occ/scripts/nsided.py
```python
import math
from compas.geometry import Point, Polygon, Polyline, Rotation
from compas_occ.geometry.surfaces import BSplineSurface
from compas_occ.geometry.curves import BSplineCurve
from compas_view2.app import App
from OCC.Core.BRepFill import BRepFill_Filling
from OCC.Core.BRepBuilderAPI import BRepBuilderAPI_MakePolygon
from OCC.Core.GeomAbs import GeomAbs_C0
from OCC.Core.gp import gp_Pnt
from OCC.Extend.TopologyUtils import TopologyExplorer
polygon = Polygon.from_sides_and_radius_xy(5, 2).transformed(Rotation.from_axis_and_angle([1, 1, 0], math.radians(30)))
points = [gp_Pnt(* point) for point in polygon]
poly = BRepBuilderAPI_MakePolygon()
for point in points:
poly.Add(point)
poly.Build()
poly.Close()
edges = TopologyExplorer(poly.Wire()).edges()
nsided = BRepFill_Filling()
for edge in edges:
nsided.Add(edge, GeomAbs_C0)
nsided.Add(gp_Pnt(* (polygon.centroid + Point(0, 0, 0.2))))
nsided.Build()
face = nsided.Face()
surface = BSplineSurface.from_face(face)
viewer = App()
viewer.add(surface.to_vizmesh(resolution=100))
viewer.add(polygon, linewidth=5, linecolor=(1, 0, 0))
for edge in edges:
curve = BSplineCurve.from_edge(edge)
if curve:
viewer.add(Polyline(curve.to_locus(resolution=16)), linecolor=(1, 0, 0), linewidth=5)
viewer.run()
# def build_plate(polygon, points):
# '''
# build a surface from a constraining polygon(s) and point(s)
# @param polygon: list of polygons ( TopoDS_Shape)
# @param points: list of points ( gp_Pnt )
# '''
# # plate surface
# bpSrf = GeomPlate_BuildPlateSurface(3, 15, 2)
# # add curve constraints
# for poly in polygon:
# for edg in WireExplorer(poly).ordered_edges():
# c = BRepAdaptor_HCurve()
# c.ChangeCurve().Initialize(edg)
# constraint = BRepFill_CurveConstraint(c, 0)
# bpSrf.Add(constraint)
# # add point constraint
# for pt in points:
# bpSrf.Add(GeomPlate_PointConstraint(pt, 0))
# bpSrf.Perform()
# maxSeg, maxDeg, critOrder = 9, 8, 0
# tol = 1e-4
# dmax = max([tol, 10 * bpSrf.G0Error()])
# srf = bpSrf.Surface()
# plate = GeomPlate_MakeApprox(srf, tol, maxSeg, maxDeg, dmax, critOrder)
# uMin, uMax, vMin, vMax = srf.Bounds()
# return make_face(plate.Surface(), uMin, uMax, vMin, vMax, 1e-4)
# def build_geom_plate(edges):
# bpSrf = GeomPlate_BuildPlateSurface(3, 9, 12)
# # add curve constraints
# for edg in edges:
# c = BRepAdaptor_HCurve()
# print('edge:', edg)
# c.ChangeCurve().Initialize(edg)
# constraint = BRepFill_CurveConstraint(c, 0)
# bpSrf.Add(constraint)
# # add point constraint
# try:
# bpSrf.Perform()
# except RuntimeError:
# print('failed to build the geom plate surface ')
# srf = bpSrf.Surface()
# plate = GeomPlate_MakeApprox(srf, 0.01, 10, 5, 0.01, 0, GeomAbs_C0)
# uMin, uMax, vMin, vMax = srf.Bounds()
# face = make_face(plate.Surface(), uMin, uMax, vMin, vMax, 1e-6)
# return
```
|
{
"source": "jf---/compas",
"score": 2
}
|
#### File: compas_blender/artists/networkartist.py
```python
from functools import partial
import compas_blender
from compas_blender.artists._artist import BaseArtist
from compas.utilities import color_to_colordict
colordict = partial(color_to_colordict, colorformat='rgb', normalize=True)
__all__ = [
'NetworkArtist',
]
class NetworkArtist(BaseArtist):
"""Artist for COMPAS network objects.
Parameters
----------
network : :class:`compas.datastructures.Network`
A COMPAS network.
settings : dict, optional
A dict with custom visualisation settings.
Attributes
----------
network : :class:`compas.datastructures.Network`
The COMPAS network associated with the artist.
settings : dict
Default settings for color, scale, tolerance, ...
"""
def __init__(self, network):
super().__init__()
self._nodecollection = None
self._edgecollection = None
self._pathcollection = None
self._object_node = {}
self._object_edge = {}
self._object_path = {}
self.color_nodes = (1.0, 1.0, 1.0)
self.color_edges = (0.0, 0.0, 0.0)
self.show_nodes = True,
self.show_edges = True,
self.show_nodelabels = False,
self.show_edgelabels = False
self.network = network
@property
def nodecollection(self):
path = f"{self.network.name}::Nodes"
if not self._nodecollection:
self._nodecollection = compas_blender.create_collections_from_path(path)[1]
return self._nodecollection
@property
def edgecollection(self):
path = f"{self.network.name}::Edges"
if not self._edgecollection:
self._edgecollection = compas_blender.create_collections_from_path(path)[1]
return self._edgecollection
@property
def pathcollection(self):
path = f"{self.network.name}::Paths"
if not self._pathcollection:
self._pathcollection = compas_blender.create_collections_from_path(path)[1]
return self._pathcollection
@property
def object_node(self):
if not self._object_node:
self._object_node = {}
return self._object_node
@object_node.setter
def object_node(self, values):
self._object_node = dict(values)
@property
def object_edge(self):
if not self._object_edge:
self._object_edge = {}
return self._object_edge
@object_edge.setter
def object_edge(self, values):
self._object_edge = dict(values)
@property
def object_path(self):
if not self._object_path:
self._object_path = {}
return self._object_path
@object_path.setter
def object_path(self, values):
self._object_path = dict(values)
def clear(self):
objects = list(self.object_node)
objects += list(self.object_edge)
objects += list(self.object_path)
compas_blender.delete_objects(objects, purge_data=True)
self._object_node = {}
self._object_edge = {}
self._object_path = {}
def draw(self):
"""Draw the network.
Returns
-------
list of :class:`bpy.types.Object`
The created Blender objects.
"""
self.clear()
if self.show_nodes:
self.draw_nodes()
if self.show_edges:
self.draw_edges()
return self.objects
def draw_nodes(self, nodes=None, color=None):
"""Draw a selection of nodes.
Parameters
----------
nodes : list of int, optional
A list of node identifiers.
Default is ``None``, in which case all nodes are drawn.
color : rgb-tuple or dict of rgb-tuples
The color specififcation for the nodes.
Returns
-------
list of :class:`bpy.types.Object`
"""
nodes = nodes or list(self.network.nodes())
node_color = colordict(color, nodes, default=self.color_nodes)
points = []
for node in nodes:
points.append({
'pos': self.network.node_coordinates(node),
'name': f"{self.network.name}.node.{node}",
'color': node_color[node],
'radius': 0.05})
objects = compas_blender.draw_points(points, self.nodecollection)
self.object_node = zip(objects, nodes)
return objects
def draw_edges(self, edges=None, color=None):
"""Draw a selection of edges.
Parameters
----------
edges : list
A list of edge keys (as uv pairs) identifying which edges to draw.
The default is ``None``, in which case all edges are drawn.
color : rgb-tuple or dict of rgb-tuples
The color specififcation for the edges.
Returns
-------
list of :class:`bpy.types.Object`
"""
edges = edges or list(self.network.edges())
edge_color = colordict(color, edges, default=self.color_edges)
lines = []
for edge in edges:
lines.append({
'start': self.network.node_coordinates(edge[0]),
'end': self.network.node_coordinates(edge[1]),
'color': edge_color[edge],
'name': f"{self.network.name}.edge.{edge[0]}-{edge[1]}",
'width': 0.02})
objects = compas_blender.draw_lines(lines, self.edgecollection)
self.object_edge = zip(objects, edges)
return objects
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
pass
```
#### File: geometry/icp/icp_numpy.py
```python
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import numpy as np
from numpy import asarray
from numpy import argmin
from numpy.linalg import det
from scipy.spatial.distance import cdist
from scipy.linalg import svd
from scipy.linalg import norm
from compas.numerical import pca_numpy
from compas.numerical import normrow
from compas.geometry import Transformation
from compas.geometry import Frame
from compas.geometry import transform_points_numpy
__all__ = ['icp_numpy']
def bestfit_transform(A, B):
n, m = A.shape
Am = np.mean(A, axis=0)
Bm = np.mean(B, axis=0)
AA = A - Am
BB = B - Bm
# cross-covariance matrix
C = np.dot(AA.T, BB)
U, S, Vt = svd(C)
# rigid rotation of the data frames
R = np.dot(Vt.T, U.T)
# check for RotoReflection
if det(R) < 0:
Vt[m-1, :] *= -1
R = np.dot(Vt.T, U.T)
# translation that moves data set means to same location
# this can be done differently (by applying three transformations (T1, R, T2))
T = Bm.T - np.dot(R, Am.T)
X = np.identity(m+1)
X[:m, :m] = R
X[:m, m] = T
return X
def icp_numpy(source, target, tol=1e-3):
"""Align two point clouds using the Iterative Closest Point (ICP) method.
Parameters
----------
source : list of point
The source data.
target : list of point
The target data.
tol : float, optional
Tolerance for finding matches.
Default is ``1e-3``.
Returns
-------
The transformed points
Notes
-----
First we align the source with the target cloud using the frames resulting
from a PCA of each of the clouds, simply by calculating a frame-to-frame transformation.
This initial alignment is used to establish an initial correspondence between
the points of the two clouds.
Then we iteratively improve the alignment by computing successive "best-fit"
transformations using SVD of the cross-covariance matrix of the two data sets.
During this iterative process, we continuously update the correspondence
between the point clouds by finding the closest point in the target to each
of the source points.
The algorithm terminates when the alignment error is below a specified tolerance.
Examples
--------
>>>
"""
A = asarray(source)
B = asarray(target)
origin, axes, _ = pca_numpy(A)
A_frame = Frame(origin, axes[0], axes[1])
origin, axes, _ = pca_numpy(B)
B_frame = Frame(origin, axes[0], axes[1])
X = Transformation.from_frame_to_frame(A_frame, B_frame)
A = transform_points_numpy(A, X)
for i in range(20):
D = cdist(A, B, 'euclidean')
closest = argmin(D, axis=1)
if norm(normrow(A - B[closest])) < tol:
break
X = bestfit_transform(A, B[closest])
A = transform_points_numpy(A, X)
return A, X
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
import doctest
doctest.testmod(globs=globals())
```
#### File: geometry/transformations/transformations_numpy.py
```python
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from numpy import asarray
from numpy import hstack
from numpy import ones
from numpy import vectorize
from numpy import tile
from scipy.linalg import solve
from compas.geometry import cross_vectors
__all__ = [
'transform_points_numpy',
'transform_vectors_numpy',
'homogenize_numpy',
'dehomogenize_numpy',
'homogenize_and_flatten_frames_numpy',
'dehomogenize_and_unflatten_frames_numpy',
'world_to_local_coordinates_numpy',
'local_to_world_coordinates_numpy',
]
def transform_points_numpy(points, T):
"""Transform multiple points with one Transformation using numpy.
Parameters
----------
points : list of :class:`Point` or list of list of float
A list of points to be transformed.
T : :class:`Transformation` or list of list of float
The transformation to apply.
Examples
--------
>>> points = [[1, 0, 0], [1, 2, 4], [4, 7, 1]]
>>> T = matrix_from_axis_and_angle([0, 2, 0], math.radians(45), point=[4, 5, 6])
>>> points_transformed = transform_points_numpy(points, T)
"""
T = asarray(T)
points = homogenize_numpy(points, w=1.0)
return dehomogenize_numpy(points.dot(T.T))
def transform_vectors_numpy(vectors, T):
"""Transform multiple vectors with one Transformation using numpy.
Parameters
----------
vectors : list of :class:`Vector`
A list of vectors to be transformed.
T : :class:`Transformation`
The transformation to apply.
Examples
--------
>>> vectors = [[1, 0, 0], [1, 2, 4], [4, 7, 1]]
>>> T = matrix_from_axis_and_angle([0, 2, 0], math.radians(45), point=[4, 5, 6])
>>> vectors_transformed = transform_vectors_numpy(vectors, T)
"""
T = asarray(T)
vectors = homogenize_numpy(vectors, w=0.0)
return dehomogenize_numpy(vectors.dot(T.T))
def transform_frames_numpy(frames, T):
"""Transform multiple frames with one Transformation usig numpy.
Parameters
----------
frames : list of :class:`Frame`
A list of frames to be transformed.
T : :class:`Transformation`
The transformation to apply on the frames.
Examples
--------
>>> frames = [Frame([1, 0, 0], [1, 2, 4], [4, 7, 1]), Frame([0, 2, 0], [5, 2, 1], [0, 2, 1])]
>>> T = matrix_from_axis_and_angle([0, 2, 0], math.radians(45), point=[4, 5, 6])
>>> transformed_frames = transform_frames_numpy(frames, T)
"""
T = asarray(T)
points_and_vectors = homogenize_and_flatten_frames_numpy(frames)
return dehomogenize_and_unflatten_frames_numpy(points_and_vectors.dot(T.T))
def world_to_local_coordinates_numpy(frame, xyz):
"""Convert global coordinates to local coordinates.
Parameters
----------
frame : :class:`Frame` or [point, xaxis, yaxis]
The local coordinate system.
xyz : array-like
The global coordinates of the points to convert.
Returns
-------
array
The coordinates of the given points in the local coordinate system.
Examples
--------
>>> import numpy as np
>>> frame = Frame([0, 1, 0], [3, 4, 1], [1, 5, 9])
>>> xyz = [Point(2, 3, 5)]
>>> rst = world_to_local_coordinates_numpy(frame, xyz)
>>> np.allclose(rst, [[3.726, 4.088, 1.550]], rtol=1e-3)
True
"""
origin = frame[0]
uvw = [frame[1], frame[2], cross_vectors(frame[1], frame[2])]
uvw = asarray(uvw).T
xyz = asarray(xyz).T - asarray(origin).reshape((-1, 1))
rst = solve(uvw, xyz)
return rst.T
def local_to_world_coordinates_numpy(frame, rst):
"""Convert local coordinates to global (world) coordinates.
Parameters
----------
frame : :class:`Frame` or [point, xaxis, yaxis]
The local coordinate system.
rst : array-like
The coordinates of the points wrt the local coordinate system.
Returns
-------
array
The world coordinates of the given points.
Notes
-----
``origin`` and ``uvw`` together form the frame of local coordinates.
Examples
--------
>>> frame = Frame([0, 1, 0], [3, 4, 1], [1, 5, 9])
>>> rst = [Point(3.726, 4.088, 1.550)]
>>> xyz = local_to_world_coordinates_numpy(frame, rst)
>>> numpy.allclose(xyz, [[2.000, 3.000, 5.000]], rtol=1e-3)
True
"""
origin = frame[0]
uvw = [frame[1], frame[2], cross_vectors(frame[1], frame[2])]
uvw = asarray(uvw).T
rst = asarray(rst).T
xyz = uvw.dot(rst) + asarray(origin).reshape((-1, 1))
return xyz.T
# ==============================================================================
# helping helpers
# ==============================================================================
def homogenize_numpy(points, w=1.0):
"""Dehomogenizes points or vectors.
Parameters
----------
points: list of :class:`Points` or list of :class:`Vectors`
Returns
-------
:class:`numpy.ndarray`
Examples
--------
>>> points = [[1, 1, 1], [0, 1, 0], [1, 0, 0]]
>>> res = homogenize_numpy(points, w=1.0)
>>> numpy.allclose(res, [[1.0, 1.0, 1.0, 1.0], [0.0, 1.0, 0.0, 1.0], [1.0, -0.0, 0.0, 1.0]])
True
"""
points = asarray(points)
points = hstack((points, w * ones((points.shape[0], 1))))
return points
def dehomogenize_numpy(points):
"""Dehomogenizes points or vectors.
Parameters
----------
points: list of :class:`Points` or list of :class:`Vectors`
Returns
-------
:class:`numpy.ndarray`
Examples
--------
>>> points = [[1, 1, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0]]
>>> res = dehomogenize_numpy(points)
>>> numpy.allclose(res, [[1.0, 1.0, 1.0], [0.0, 1.0, 0.0], [1.0, -0.0, 0.0]])
True
"""
def func(a):
return a if a else 1.
func = vectorize(func)
points = asarray(points)
return points[:, :-1] / func(points[:, -1]).reshape((-1, 1))
def homogenize_and_flatten_frames_numpy(frames):
"""Homogenize a list of frames and flatten the 3D list into a 2D list using numpy.
The frame consists of a point and 2 orthonormal vectors.
Parameters
----------
frames: list of :class:`Frame`
Returns
-------
:class:`numpy.ndarray`
An array of points and vectors.
Examples
--------
>>> frames = [Frame((1, 1, 1), (0, 1, 0), (1, 0, 0))]
>>> res = homogenize_and_flatten_frames_numpy(frames)
>>> numpy.allclose(res, [[1.0, 1.0, 1.0, 1.0], [0.0, 1.0, 0.0, 0.0], [1.0, -0.0, 0.0, 0.0]])
True
"""
n = len(frames)
frames = asarray(frames).reshape(n * 3, 3)
extend = tile(asarray([1, 0, 0]).reshape(3, 1), (n, 1))
return hstack((frames, extend))
def dehomogenize_and_unflatten_frames_numpy(points_and_vectors):
"""Dehomogenize a list of vectors and unflatten the 2D list into a 3D list.
Parameters
----------
points_and_vectors: list of list of float
Homogenized points and vectors.
Returns
-------
:class:`numpy.ndarray`
The frames.
Examples
--------
>>> points_and_vectors = [(1., 1., 1., 1.), (0., 1., 0., 0.), (1., 0., 0., 0.)]
>>> res = dehomogenize_and_unflatten_frames_numpy(points_and_vectors)
>>> numpy.allclose(res, [[1.0, 1.0, 1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]])
True
"""
frames = dehomogenize_numpy(points_and_vectors)
return frames.reshape((int(frames.shape[0]/3.), 3, 3))
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
import doctest
import numpy # noqa: F401
import math # noqa: F401
from compas.geometry import Frame # noqa: F401
from compas.geometry import Point # noqa: F401
from compas.geometry import matrix_from_axis_and_angle # noqa: F401
doctest.testmod(globs=globals())
```
#### File: compas_rhino/artists/polyhedronartist.py
```python
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import compas_rhino
from ._shapeartist import ShapeArtist
class PolyhedronArtist(ShapeArtist):
"""Artist for drawing polyhedron shapes.
Parameters
----------
shape : :class:`compas.geometry.Polyhedron`
A COMPAS polyhedron.
Notes
-----
See :class:`compas_rhino.artists.ShapeArtist` for all other parameters.
Examples
--------
.. code-block:: python
import random
from compas.geometry import Pointcloud
from compas.geometry import Polyhedron
from compas.geometry import Translation
from compas.utilities import i_to_rgb
import compas_rhino
from compas_rhino.artists import PolyhedronArtist
pcl = Pointcloud.from_bounds(10, 10, 10, 100)
tpl = Polyhedron.from_platonicsolid(12)
compas_rhino.clear_layer("Test::PolyhedronArtist")
for point in pcl.points:
polyhedron = tpl.transformed(Translation.from_vector(point))
artist = PolyhedronArtist(polyhedron, color=i_to_rgb(random.random()), layer="Test::PolyhedronArtist")
artist.draw()
"""
def draw(self, show_vertices=False, show_edges=False, show_faces=True, join_faces=True):
"""Draw the polyhedron associated with the artist.
Parameters
----------
show_vertices : bool, optional
Default is ``False``.
show_edges : bool, optional
Default is ``False``.
show_faces : bool, optional
Default is ``True``.
join_faces : bool, optional
Default is ``True``.
Returns
-------
list
The GUIDs of the objects created in Rhino.
"""
vertices = [list(vertex) for vertex in self.shape.vertices]
guids = []
if show_vertices:
points = [{'pos': point, 'color': self.color, 'name': str(index)} for index, point in enumerate(vertices)]
guids += compas_rhino.draw_points(points, layer=self.layer, clear=False, redraw=False)
if show_edges:
edges = self.shape.edges
lines = [{'start': vertices[i], 'end': vertices[j], 'color': self.color} for i, j in edges]
guids += compas_rhino.draw_lines(lines, layer=self.layer, clear=False, redraw=False)
if show_faces:
faces = self.shape.faces
if join_faces:
guid = compas_rhino.draw_mesh(vertices, faces, layer=self.layer, name=self.name, color=self.color, disjoint=True)
guids.append(guid)
else:
polygons = [{'points': [vertices[index] for index in face], 'color': self.color} for face in faces]
guids += compas_rhino.draw_faces(polygons, layer=self.layer, clear=False, redraw=False)
self._guids = guids
return guids
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
pass
```
#### File: compas_rhino/objects/_select.py
```python
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import ast
import rhinoscriptsyntax as rs
__all__ = [
'mesh_select_vertex',
'mesh_select_vertices',
'mesh_select_face',
'mesh_select_faces',
'mesh_select_edge',
'mesh_select_edges',
'network_select_node',
'network_select_nodes',
'network_select_edge',
'network_select_edges',
]
def mesh_select_vertex(mesh, message="Select a vertex."):
"""Select a single vertex of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
int or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.point | rs.filter.textdot)
if guid:
prefix = mesh.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'vertex' in name:
if not prefix or prefix in name:
key = name[-1]
return ast.literal_eval(key)
return None
def mesh_select_vertices(mesh, message="Select vertices."):
"""Select multiple vertices of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
list of int
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.point | rs.filter.textdot)
if guids:
prefix = mesh.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'vertex' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
key = ast.literal_eval(key)
keys.append(key)
return keys
def mesh_select_face(mesh, message="Select a face."):
"""Select a single face of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
int or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.mesh | rs.filter.textdot)
if guid:
prefix = mesh.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'face' in name:
if not prefix or prefix in name:
key = name[-1]
key = ast.literal_eval(key)
return key
return None
def mesh_select_faces(mesh, message="Select faces."):
"""Select multiple faces of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
list of int
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.mesh | rs.filter.textdot)
if guids:
prefix = mesh.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'face' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
key = ast.literal_eval(key)
keys.append(key)
return keys
def mesh_select_edge(mesh, message="Select an edge."):
"""Select a single edge of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
tuple of int, or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.curve | rs.filter.textdot)
if guid:
prefix = mesh.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'edge' in name:
if not prefix or prefix in name:
key = name[-1]
u, v = key.split('-')
u = ast.literal_eval(u)
v = ast.literal_eval(v)
return u, v
return None
def mesh_select_edges(mesh, message="Select edges."):
"""Select multiple edges of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
list of tuple of int
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.curve | rs.filter.textdot)
if guids:
prefix = mesh.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'edge' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
u, v = key.split('-')
u = ast.literal_eval(u)
v = ast.literal_eval(v)
keys.append((u, v))
return keys
def network_select_node(network, message="Select a node."):
"""Select a single node of a network.
Parameters
----------
network: :class:`compas.datastructures.Network`
message: str, optional
Returns
-------
hashable or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.point | rs.filter.textdot)
if guid:
prefix = network.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'node' in name:
if not prefix or prefix in name:
key = name[-1]
return ast.literal_eval(key)
return None
def network_select_nodes(network, message="Select nodes."):
"""Select multiple nodes of a network.
Parameters
----------
network: :class:`compas.datastructures.Network`
message: str, optional
Returns
-------
list of hashable
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.point | rs.filter.textdot)
if guids:
prefix = network.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'node' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
key = ast.literal_eval(key)
keys.append(key)
return keys
def network_select_edge(network, message="Select an edge."):
"""Select a single edge of a network.
Parameters
----------
network: :class:`compas.datastructures.Network`
message: str, optional
Returns
-------
tuple of hashable, or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.curve | rs.filter.textdot)
if guid:
prefix = network.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'edge' in name:
if not prefix or prefix in name:
key = name[-1]
u, v = key.split('-')
u = ast.literal_eval(u)
v = ast.literal_eval(v)
return u, v
return None
def network_select_edges(network, message="Select edges."):
"""Select multiple edges of a network.
Parameters
----------
network: :class:`compas.datastructures.Network`
message: str, optional
Returns
-------
list of tuple of hashable
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.curve | rs.filter.textdot)
if guids:
prefix = network.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'edge' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
u, v = key.split('-')
u = ast.literal_eval(u)
v = ast.literal_eval(v)
keys.append((u, v))
return keys
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
pass
```
|
{
"source": "jfcorbett/data-driven-web-apps-with-flask",
"score": 3
}
|
#### File: pypi_org/data/package.py
```python
import datetime
from typing import Iterable
import sqlalchemy as sa
import sqlalchemy.orm as orm
from pypi_org.data.modelbase import SqlAlchemyBase
from pypi_org.data.releases import Release
class Package(SqlAlchemyBase):
# This class will be associated to one table in the db.
# Inherits from singleton base class. (One base class for one database.)
# Style: db table names are same as class, but lowercase and plural
__tablename__ = 'packages'
# The class' fields are each associated to a column in the db table (col name = field var name)
id = sa.Column(sa.String, primary_key=True) # primary keys automatically get an index
created = sa.Column(sa.DateTime, default=datetime.datetime.now, index=True)
summary = sa.Column(sa.String, nullable=False)
description = sa.Column(sa.String, nullable=True)
homepage = sa.Column(sa.String)
docs_url = sa.Column(sa.String)
package_url = sa.Column(sa.String)
author_name = sa.Column(sa.String)
author_email = sa.Column(sa.String, index=True) # index improves++ perf for sorting, lookup...
license = sa.Column(sa.String, index=True)
# releases relationship
# Set up this field's (db column's) relationship to some other db table
releases: Iterable[Release] = orm.relation("Release", order_by=[
Release.major_ver.desc(),
Release.minor_ver.desc(),
Release.build_ver.desc(),
], back_populates='package') # user can now access relationship via Release.package
def __repr__(self):
return f'<Package {self.id}>'
```
#### File: flasktut/tests/test_package.py
```python
import datetime
import unittest.mock
from flask import Response
from tests.client_for_tests import flask_app, client
def test_package_details_success():
# Arrange
from pypi_org.views.package_views import package_details
from pypi_org.data.package import Package
from pypi_org.data.releases import Release
test_package = Package()
test_package.id = 'sqlalchemy'
test_package.description = "TDB"
test_package.releases = [
Release(created_date=datetime.datetime.now(), major_ver=1, minor_ver=2, build_ver=200),
Release(created_date=datetime.datetime.now() - datetime.timedelta(days=10)),
]
# Act
with unittest.mock.patch('pypi_org.services.package_service.get_package_by_id',
return_value=test_package):
with flask_app.test_request_context(path='/project/' + test_package.id):
resp: Response = package_details(test_package.id)
# Assert
assert b'sqlalchemy 1.2.200' in resp.data
def test_package_details_404(client):
# Arrange
bad_package_url = 'sqlalchemy_missing'
# Act
with unittest.mock.patch('pypi_org.services.package_service.get_package_by_id',
return_value=None):
resp: Response = client.get(bad_package_url)
assert resp.status_code == 404
```
|
{
"source": "jfcorbett/flask-sspi",
"score": 2
}
|
#### File: flask-sspi/flask_sspi_fake/_stubs.py
```python
import base64
import logging
logger = logging.getLogger(__name__)
from flask import Response
from flask import _request_ctx_stack as stack
from flask import make_response
from flask import request, session, g
from functools import wraps
from socket import gethostname
import os
import datetime
import uuid
_PKG_NAME = 'NTLM'
_sessions = {}
def _user_context_processor():
if hasattr(g, "current_user") and g.current_user is not None:
return dict(current_user=g.current_user)
else:
return {}
def init_sspi(app, service='HTTP', hostname=gethostname(), package='NTLM', add_context_processor=True):
'''
Configure the SSPI service, and validate the presence of the
appropriate informations if necessary.
:param app: a flask application
:type app: flask.Flask
:param service: GSSAPI service name
:type service: str
:param hostname: hostname the service runs under
:type hostname: str
:param package: package the service runs under ('NTLM') ('Negotiate' is not yet implemented)
:type package: str
'''
global _SERVICE_NAME
_SERVICE_NAME = "%s@%s" % (service, hostname)
_PKG_NAME = package
if add_context_processor:
app.context_processor(_user_context_processor)
def _get_user_name():
return os.getlogin()
def _init_session():
logger.debug("Init session")
session['uuid'] = uuid.uuid4().bytes
_sessions[session['uuid']] = {}
_sessions[session['uuid']]['sa'] = 'sspi.ServerAuth(_PKG_NAME)' # one per session
# TODO cleanup other entries
def _sspi_handler(session):
global _sessions
if 'uuid' not in session or session['uuid'] not in _sessions:
_init_session()
if 'username' in _sessions[session['uuid']]:
if 30 * 60 < (datetime.datetime.now() - _sessions[session['uuid']]['last_access']).seconds:
logger.debug('timed out.')
del _sessions[session['uuid']]
_init_session()
else:
logger.debug('Already authenticated')
_sessions[session['uuid']]['last_access'] = datetime.datetime.now()
return None
logger.debug("Negotiation complete")
return None
class Impersonate():
'''
Class that creates a context for the impersonalisation of the client user.
May be used to get his name or group appartenance. Could also be used
to make trusted connections with databases (not tested).
Preferred usage:
with Impersonate():
...
'''
def open(self):
'''
Start of the impersonalisation
'''
uuid = session['uuid']
self._sa = _sessions[uuid]['sa']
def close(self):
'''
End of the impersonalisation
'''
if self._sa:
self._sa = None
def __del__(self):
if self._sa:
self.close()
def __enter__(self):
self.open()
return self
def __exit__(self, type, value, tb):
self.close()
def requires_authentication(function):
'''
Require that the wrapped view function only be called by users
authenticated with SSPI. The view function will have the authenticated
users principal passed to it as its first argument.
:param function: flask view function
:type function: function
:returns: decorated function
:rtype: function
'''
@wraps(function)
def decorated(*args, **kwargs):
ret = _sspi_handler(session)
if ret is not None:
return ret
else:
uuid = session['uuid']
if 'username' not in _sessions[session['uuid']]:
# get username through impersonalisation
with Impersonate():
current_user = _get_user_name()
g.current_user = current_user
_sessions[uuid]['username'] = current_user
_sessions[uuid]['last_access'] = datetime.datetime.now()
else:
g.current_user = _sessions[uuid]['username']
# call route function
response = function(g.current_user, *args, **kwargs)
response = make_response(response)
return response
return decorated
def authenticate(function):
'''
Require that the wrapped view function only be called by users
authenticated with SSPI.
:param function: flask view function
:type function: function
:returns: decorated function
:rtype: function
'''
@wraps(function)
def decorated(*args, **kwargs):
ret = _sspi_handler(session)
if ret is not None:
return ret
else:
uuid = session['uuid']
if 'username' not in _sessions[session['uuid']]:
# get username through impersonalisation
with Impersonate():
current_user = _get_user_name()
g.current_user = current_user
_sessions[uuid]['username'] = current_user
_sessions[uuid]['last_access'] = datetime.datetime.now()
else:
g.current_user = _sessions[uuid]['username']
# call route function
response = function(*args, **kwargs)
if response:
response = make_response(response)
return response
return decorated
```
|
{
"source": "jfcorsini/aviguardx",
"score": 4
}
|
#### File: aviguardx/FRCNN_predict/predict.py
```python
from FRCNN_predict.frcnn import FRCNN
from PIL import Image
frcnn = FRCNN()
def run_prediction(image_path, show=False):
image = Image.open('./results/' + image_path + '.jpeg')
try:
image = image.convert("RGB")
except:
print('Open Error! Try again!')
else:
r_image, prediction = frcnn.detect_image(image)
r_image.save('./results/' + image_path + '_predicted.jpeg')
if show:
r_image.show()
if len(prediction) == 0:
print("No object")
else:
print('left, top, right, bottom coordinates:')
print(prediction)
return prediction
```
#### File: processing/passiveRadar/config.py
```python
import yaml
import fractions
import numpy as np
def getConfiguration():
'''sets up parameters for passive radar processing'''
config_file = open('processing/PRconfig.yaml', 'r')
config = yaml.safe_load(config_file)
config_file.close()
# get the cpi length in samples - should be a power of 2 for computational
# efficiency.
config['cpi_samples'] = nextpow2(config['input_sample_rate']
* config['cpi_seconds_nominal'])
# Override because sample
config['IF_sample_rate'] = config['input_sample_rate']
# as a result of the approximate rational resampling, the actual cpi
# duration differs from the nominal value by a small amount.
config['cpi_seconds_actual'] = config['cpi_samples'] \
/ config['input_sample_rate']
config['doppler_cell_width'] = 1 / config['cpi_seconds_actual']
# the width of each range cell in km
config['range_cell_width'] = 2.998e5 / config['IF_sample_rate']
# number of range cells needed to obtaine the desired range
config['num_range_cells'] = round(config['max_range_nominal']
/ config['range_cell_width'])
# true bistatic range
config['max_range_actual'] = config['num_range_cells'] \
* config['range_cell_width']
# number of doppler cells - is a power of 2 for computational efficiency
config['num_doppler_cells'] = nearestpow2(2 * config['max_doppler_nominal']
* config['cpi_seconds_actual'])
# actual maximum doppler shift
config['max_doppler_actual'] = config['num_doppler_cells'] \
/ (2 * config['cpi_seconds_actual'])
# get the chunk sizes to be used for processing. This depends on whether
# the CPI sections overlap
if config['overlap_cpi']:
config['input_chunk_length'] = int(np.floor(config['cpi_samples']))
if config['input_chunk_length'] % 2 != 0:
config['input_chunk_length'] -= 1
config['output_chunk_length'] = config['cpi_samples'] // 2
config['window_overlap'] = config['cpi_samples'] // 4
config['frame_interval'] = config['cpi_seconds_actual'] / 2
else:
config['input_chunk_length'] = int(np.floor(config['cpi_samples']) * 2)
config['output_chunk_length'] = config['cpi_samples']
config['frame_interval'] = config['cpi_seconds_actual']
config['range_doppler_map_fname'] = config['output_fname'] + '.zarr'
config['meta_fname'] = config['output_fname'] + '.npz'
return config
def nextpow2(i):
n = 1
while n < i:
n *= 2
return n
def nearestpow2(i):
nextp2 = nextpow2(i)
prevp2 = nextp2 // 2
if (nextp2 - i) < (i - prevp2):
return nextp2
else:
return prevp2
```
#### File: processing/passiveRadar/data_processor.py
```python
import numpy as np
import zarr
import dask.array as da
import scipy.signal as signal
from processing.passiveRadar.signal_utils import find_channel_offset, \
deinterleave_IQ, preprocess_kerberossdr_input
from processing.passiveRadar.clutter_removal import LS_Filter_Multiple
from processing.passiveRadar.range_doppler_processing import fast_xambg
def process_data(config, folder_name):
file1 = './data/' + folder_name + '/output1'
file2 = './data/' + folder_name + '/output2'
refInputFile = preprocess_kerberossdr_input(np.fromfile(
open(file1), dtype=np.float32))
svrInputFile = preprocess_kerberossdr_input(np.fromfile(
open(file2), dtype=np.float32))
# get the first few hundred thousand samples of data and use it to
# estimate the offset between the channels
refc1 = refInputFile[0:20*config['cpi_samples']]
srvc1 = svrInputFile[0:20*config['cpi_samples']]
offset = find_channel_offset(refc1, srvc1, 1, 5000000)
# Convert to dask array after de-interleave IQ samples
if offset > 0:
ref_data = da.from_array(deinterleave_IQ(refInputFile[offset:]),
chunks=(config['input_chunk_length']//2,))
srv_data = da.from_array(deinterleave_IQ(svrInputFile[:-offset]),
chunks=(config['input_chunk_length']//2,))
elif offset < 0:
ref_data = da.from_array(deinterleave_IQ(refInputFile[:offset]),
chunks=(config['input_chunk_length']//2,))
srv_data = da.from_array(deinterleave_IQ(svrInputFile[-offset:]),
chunks=(config['input_chunk_length']//2,))
else:
ref_data = da.from_array(deinterleave_IQ(refInputFile),
chunks=(config['input_chunk_length']//2,))
srv_data = da.from_array(deinterleave_IQ(svrInputFile),
chunks=(config['input_chunk_length']//2,))
print(f"Corrected a sample offset of {offset} samples between channels")
# trim the data to an integer number of block lengths
N_chunks_ref = ref_data.shape[0] // (config['input_chunk_length']//2)
N_chunks_srv = srv_data.shape[0] // (config['input_chunk_length']//2)
N_chunks = min(N_chunks_ref, N_chunks_srv, config['num_frames']) - 1
ref_data = ref_data[0:N_chunks*config['input_chunk_length']//2]
srv_data = srv_data[0:N_chunks*config['input_chunk_length']//2]
# apply the block least squares filter
srv_cleaned = da.map_blocks(LS_Filter_Multiple,
ref_data,
srv_data,
config['num_range_cells'],
config['IF_sample_rate'],
# remove clutter at 0Hz, +/-1Hz, +/-2Hz
[0, 1, -1, 2, -2],
dtype=np.complex64,
chunks=(config['output_chunk_length'],))
if config['overlap_cpi']:
# pad chunks with overlapping sections
ref_data = da.overlap.overlap(
ref_data, depth=config['window_overlap'], boundary=0)
srv_cleaned = da.overlap.overlap(
srv_cleaned, depth=config['window_overlap'], boundary=0)
# Compute Dask operation and convert to Numpy array
ref_data = ref_data.compute()
srv_cleaned = srv_cleaned.compute()
# reshape to N_chunks, chunk_length
ref_data = ref_data.reshape(N_chunks, -1)
srv_cleaned = srv_cleaned.reshape(N_chunks, -1)
window = signal.get_window(('kaiser', 5.0), config['cpi_samples'])
# use the cross-ambiguity function to compute range-doppler maps
xambg = fast_xambg(ref_data,
srv_cleaned,
config['num_range_cells'],
config['num_doppler_cells'],
config['cpi_samples'],
window)
return xambg
```
|
{
"source": "jfcoz/azure-cli",
"score": 2
}
|
#### File: command_modules/acr/build_task.py
```python
from msrest.exceptions import ValidationError
from msrestazure.azure_exceptions import CloudError
from knack.log import get_logger
from knack.util import CLIError
from azure.cli.core.commands import LongRunningOperation
from azure.mgmt.containerregistry.v2018_02_01_preview.models import (
BuildTask,
SourceRepositoryProperties,
SourceControlAuthInfo,
PlatformProperties,
DockerBuildStep,
BuildTaskBuildRequest,
BuildTaskUpdateParameters,
SourceRepositoryUpdateParameters,
DockerBuildStepUpdateParameters,
OsType
)
from ._utils import validate_managed_registry
from ._stream_utils import stream_logs
from ._build_polling import get_build_with_polling
logger = get_logger(__name__)
BUILD_TASKS_NOT_SUPPORTED = 'Build Tasks are only supported for managed registries.'
DEFAULT_TOKEN_TYPE = 'PAT'
def acr_build_task_create(cmd, # pylint: disable=too-many-locals
client,
build_task_name,
registry_name,
repository_url,
image_names,
git_access_token,
alias=None,
status='Enabled',
os_type=OsType.linux.value,
cpu=2,
timeout=3600,
commit_trigger_enabled=True,
branch='master',
no_push=False,
no_cache=False,
docker_file_path="Dockerfile",
build_arg=None,
secret_build_arg=None,
base_image_trigger='Runtime',
resource_group_name=None):
registry, resource_group_name = validate_managed_registry(
cmd.cli_ctx, registry_name, resource_group_name, BUILD_TASKS_NOT_SUPPORTED)
source_control_type = 'VisualStudioTeamService'
if 'GITHUB.COM' in repository_url.upper():
source_control_type = 'GitHub'
build_task_create_parameters = BuildTask(
location=registry.location,
alias=alias if alias else build_task_name,
source_repository=SourceRepositoryProperties(
source_control_type=source_control_type,
repository_url=repository_url,
is_commit_trigger_enabled=commit_trigger_enabled,
source_control_auth_properties=SourceControlAuthInfo(
token=git_access_token,
token_type=DEFAULT_TOKEN_TYPE,
refresh_token='',
scope='repo',
expires_in=1313141
)
),
platform=PlatformProperties(os_type=os_type, cpu=cpu),
status=status,
timeout=timeout
)
try:
build_task = LongRunningOperation(cmd.cli_ctx)(
client.create(resource_group_name=resource_group_name,
registry_name=registry_name,
build_task_name=build_task_name,
build_task_create_parameters=build_task_create_parameters))
except ValidationError as e:
raise CLIError(e)
from ._client_factory import cf_acr_build_steps
client_build_steps = cf_acr_build_steps(cmd.cli_ctx)
docker_build_step = DockerBuildStep(
branch=branch,
image_names=image_names,
is_push_enabled=not no_push,
no_cache=no_cache,
docker_file_path=docker_file_path,
build_arguments=(build_arg if build_arg else []) + (secret_build_arg if secret_build_arg else []),
base_image_trigger=base_image_trigger
)
try:
build_step = LongRunningOperation(cmd.cli_ctx)(
client_build_steps.create(resource_group_name=resource_group_name,
registry_name=registry_name,
build_task_name=build_task_name,
step_name=_get_build_step_name(build_task_name),
properties=docker_build_step))
setattr(build_task, 'properties', build_step.properties)
except ValidationError as e:
raise CLIError(e)
return build_task
def acr_build_task_show(cmd,
client,
build_task_name,
registry_name,
with_secure_properties=False,
resource_group_name=None):
_, resource_group_name = validate_managed_registry(
cmd.cli_ctx, registry_name, resource_group_name, BUILD_TASKS_NOT_SUPPORTED)
build_task = client.get(resource_group_name, registry_name, build_task_name)
from ._client_factory import cf_acr_build_steps
client_build_steps = cf_acr_build_steps(cmd.cli_ctx)
try:
build_step = client_build_steps.get(resource_group_name,
registry_name,
build_task_name,
_get_build_step_name(build_task_name))
setattr(build_task, 'properties', build_step.properties)
except CloudError as e:
if e.status_code != 404:
raise
logger.warning("Could not get build task details. Build task basic information is printed.")
if not with_secure_properties:
return build_task
try:
source_repository = client.list_source_repository_properties(resource_group_name,
registry_name,
build_task_name)
setattr(getattr(build_task, 'source_repository'),
'source_control_auth_properties',
getattr(source_repository, 'source_control_auth_properties'))
except CloudError as e:
if e.status_code != 403:
raise
logger.warning("No permission to get source repository secure properties.")
try:
build_arguments = client_build_steps.list_build_arguments(resource_group_name=resource_group_name,
registry_name=registry_name,
build_task_name=build_task_name,
step_name=_get_build_step_name(build_task_name))
setattr(getattr(build_task, 'properties'), 'buildArguments', list(build_arguments))
except CloudError as e:
if e.status_code != 403:
raise
logger.warning("No permission to get secure build arguments.")
return build_task
def acr_build_task_list(cmd,
client,
registry_name,
resource_group_name=None):
_, resource_group_name = validate_managed_registry(
cmd.cli_ctx, registry_name, resource_group_name, BUILD_TASKS_NOT_SUPPORTED)
return client.list(resource_group_name, registry_name)
def acr_build_task_delete(cmd,
client,
build_task_name,
registry_name,
resource_group_name=None):
_, resource_group_name = validate_managed_registry(
cmd.cli_ctx, registry_name, resource_group_name, BUILD_TASKS_NOT_SUPPORTED)
return client.delete(resource_group_name, registry_name, build_task_name)
def acr_build_task_update(cmd, # pylint: disable=too-many-locals
client,
build_task_name,
registry_name,
resource_group_name=None,
# build task parameters
alias=None,
status=None,
os_type=None,
cpu=None,
timeout=None,
repository_url=None,
commit_trigger_enabled=None,
git_access_token=None,
# build step parameters
branch=None,
image_names=None,
no_push=None,
no_cache=None,
docker_file_path=None,
build_arg=None,
secret_build_arg=None,
base_image_trigger=None):
_, resource_group_name = validate_managed_registry(
cmd.cli_ctx, registry_name, resource_group_name, BUILD_TASKS_NOT_SUPPORTED)
build_task = client.get(resource_group_name, registry_name, build_task_name)
# pylint: disable=too-many-boolean-expressions
if alias or status or os_type or cpu or timeout or repository_url or commit_trigger_enabled or git_access_token:
build_task_update_parameters = BuildTaskUpdateParameters()
build_task_update_parameters.alias = alias
build_task_update_parameters.status = status
build_task_update_parameters.platform = PlatformProperties(os_type=os_type or build_task.platform.os_type,
cpu=cpu)
build_task_update_parameters.timeout = timeout
build_task_update_parameters.source_repository = SourceRepositoryUpdateParameters(
source_control_auth_properties=SourceControlAuthInfo(
token=git_access_token, token_type=DEFAULT_TOKEN_TYPE) if git_access_token else None,
is_commit_trigger_enabled=commit_trigger_enabled)
build_task = LongRunningOperation(cmd.cli_ctx)(
client.update(resource_group_name=resource_group_name,
registry_name=registry_name,
build_task_name=build_task_name,
step_name=_get_build_step_name(build_task_name),
build_task_update_parameters=build_task_update_parameters))
from ._client_factory import cf_acr_build_steps
client_build_steps = cf_acr_build_steps(cmd.cli_ctx)
build_step = None
# pylint: disable=too-many-boolean-expressions
if branch or image_names or no_push is not None or no_cache is not None or \
docker_file_path or build_arg or secret_build_arg or base_image_trigger:
build_step_update_parameters = DockerBuildStepUpdateParameters()
build_step_update_parameters.branch = branch
build_step_update_parameters.image_names = image_names
build_step_update_parameters.is_push_enabled = (not no_push) if no_push is not None else None
build_step_update_parameters.no_cache = no_cache
build_step_update_parameters.docker_file_path = docker_file_path
build_step_update_parameters.build_arguments = build_arg + secret_build_arg \
if build_arg and secret_build_arg else build_arg or secret_build_arg
build_step_update_parameters.base_image_trigger = base_image_trigger
try:
build_step = LongRunningOperation(cmd.cli_ctx)(
client_build_steps.update(resource_group_name=resource_group_name,
registry_name=registry_name,
build_task_name=build_task_name,
step_name=_get_build_step_name(build_task_name),
properties=build_step_update_parameters))
except CloudError as e:
if e.status_code != 404:
raise
logger.warning("Could not update build task details. Build task basic information is updated.")
# If build step is not updated, get it here
if not build_step:
try:
build_step = client_build_steps.get(resource_group_name,
registry_name,
build_task_name,
_get_build_step_name(build_task_name))
except CloudError as e:
if e.status_code != 404:
raise
if build_step:
setattr(build_task, 'properties', build_step.properties)
return build_task
def acr_build_task_update_build(cmd,
client,
build_id,
registry_name,
no_archive=None,
resource_group_name=None):
_, resource_group_name = validate_managed_registry(
cmd.cli_ctx, registry_name, resource_group_name, BUILD_TASKS_NOT_SUPPORTED)
is_archive_enabled = not no_archive if no_archive is not None else None
return client.update(resource_group_name=resource_group_name,
registry_name=registry_name,
build_id=build_id,
is_archive_enabled=is_archive_enabled)
def _get_build_step_name(build_task_name):
return '{}StepName'.format(build_task_name)
def acr_build_task_run(cmd,
client, # cf_acr_builds
build_task_name,
registry_name,
no_format=False,
no_logs=False,
resource_group_name=None):
_, resource_group_name = validate_managed_registry(
cmd.cli_ctx, registry_name, resource_group_name, BUILD_TASKS_NOT_SUPPORTED)
from ._client_factory import cf_acr_registries_builds
client_registries = cf_acr_registries_builds(cmd.cli_ctx)
queued_build = LongRunningOperation(cmd.cli_ctx)(
client_registries.queue_build(resource_group_name,
registry_name,
BuildTaskBuildRequest(build_task_name=build_task_name)))
build_id = queued_build.build_id
logger.warning("Queued a build with ID: %s", build_id)
logger.warning("Waiting for agent...")
if no_logs:
return get_build_with_polling(client, build_id, registry_name, resource_group_name)
return stream_logs(client, build_id, registry_name, resource_group_name, no_format, True)
def acr_build_task_show_build(cmd,
client, # cf_acr_builds
build_id,
registry_name,
resource_group_name=None):
_, resource_group_name = validate_managed_registry(
cmd.cli_ctx, registry_name, resource_group_name, BUILD_TASKS_NOT_SUPPORTED)
return client.get(resource_group_name, registry_name, build_id)
def acr_build_task_list_builds(cmd,
client, # cf_acr_builds
registry_name,
top=15,
build_task_name=None,
build_status=None,
image=None,
resource_group_name=None):
_, resource_group_name = validate_managed_registry(
cmd.cli_ctx, registry_name, resource_group_name, BUILD_TASKS_NOT_SUPPORTED)
filter_str = None
filter_str = _add_build_filter(filter_str, 'BuildTaskName', build_task_name, 'eq')
filter_str = _add_build_filter(filter_str, 'Status', build_status, 'eq')
if image:
from .repository import get_image_digest
try:
repository, _, manifest = get_image_digest(cmd.cli_ctx, registry_name, image)
filter_str = _add_build_filter(
filter_str, 'OutputImageManifests', '{}@{}'.format(repository, manifest), 'contains')
except CLIError as e:
raise CLIError("Could not find image '{}'. {}".format(image, e))
return client.list(resource_group_name, registry_name, filter=filter_str, top=top)
def _add_build_filter(orig_filter, name, value, operator):
if not value:
return orig_filter
if operator == 'contains':
new_filter_str = "contains({}, '{}')".format(name, value)
elif operator == 'eq':
new_filter_str = "{} eq '{}'".format(name, value)
else:
raise ValueError("Allowed filter operator: {}".format(['contains', 'eq']))
return "{} and {}".format(orig_filter, new_filter_str) if orig_filter else new_filter_str
def acr_build_task_logs(cmd,
client, # cf_acr_builds
registry_name,
build_id=None,
build_task_name=None,
image=None,
no_format=False,
resource_group_name=None):
_, resource_group_name = validate_managed_registry(
cmd.cli_ctx, registry_name, resource_group_name, BUILD_TASKS_NOT_SUPPORTED)
if not build_id:
# show logs for the last build
paged_builds = acr_build_task_list_builds(cmd,
client,
registry_name,
top=1,
build_task_name=build_task_name,
image=image)
try:
build_id = paged_builds.get(0)[0].build_id
logger.warning(_get_list_builds_message(base_message="Showing logs of the last created build",
build_task_name=build_task_name,
image=image))
logger.warning("Build ID: %s", build_id)
except (AttributeError, KeyError, TypeError, IndexError):
raise CLIError(_get_list_builds_message(base_message="Could not find the last created build",
build_task_name=build_task_name,
image=image))
return stream_logs(client, build_id, registry_name, resource_group_name, no_format)
def _get_list_builds_message(base_message, build_task_name=None, image=None):
if build_task_name:
base_message = "{} for build task '{}'".format(base_message, build_task_name)
if image:
base_message = "{} for image '{}'".format(base_message, image)
return "{}.".format(base_message)
```
#### File: command_modules/acs/_format.py
```python
from collections import OrderedDict
def aks_list_table_format(results):
""""Format a list of managed clusters as summary results for display with "-o table"."""
return [_aks_table_format(r) for r in results]
def osa_list_table_format(results):
""""Format a list of OpenShift managed clusters as summary results for display with "-o table"."""
return [_osa_table_format(r) for r in results]
def aks_show_table_format(result):
"""Format a managed cluster as summary results for display with "-o table"."""
return [_aks_table_format(result)]
def _aks_table_format(result):
from jmespath import compile as compile_jmes, Options
parsed = compile_jmes("""{
name: name,
location: location,
resourceGroup: resourceGroup,
kubernetesVersion: kubernetesVersion,
provisioningState: provisioningState,
fqdn: fqdn
}""")
# use ordered dicts so headers are predictable
return parsed.search(result, Options(dict_cls=OrderedDict))
def _osa_table_format(result):
from jmespath import compile as compile_jmes, Options
parsed = compile_jmes("""{
name: name,
location: location,
resourceGroup: resourceGroup,
openShiftVersion: openShiftVersion,
provisioningState: provisioningState,
fqdn: fqdn
}""")
# use ordered dicts so headers are predictable
return parsed.search(result, Options(dict_cls=OrderedDict))
def aks_upgrades_table_format(result):
"""Format get-upgrades results as a summary for display with "-o table"."""
from jmespath import compile as compile_jmes, Options
# This expression assumes there is one node pool, and that the master and nodes upgrade in lockstep.
parsed = compile_jmes("""{
name: name,
resourceGroup: resourceGroup,
masterVersion: controlPlaneProfile.kubernetesVersion || `unknown`,
nodePoolVersion: agentPoolProfiles[0].kubernetesVersion || `unknown`,
upgrades: controlPlaneProfile.upgrades || [`None available`] | sort_versions(@) | join(`, `, @)
}""")
# use ordered dicts so headers are predictable
return parsed.search(result, Options(dict_cls=OrderedDict, custom_functions=_custom_functions()))
def aks_versions_table_format(result):
"""Format get-versions results as a summary for display with "-o table"."""
from jmespath import compile as compile_jmes, Options
parsed = compile_jmes("""orchestrators[].{
kubernetesVersion: orchestratorVersion,
upgrades: upgrades[].orchestratorVersion || [`None available`] | sort_versions(@) | join(`, `, @)
}""")
# use ordered dicts so headers are predictable
results = parsed.search(result, Options(dict_cls=OrderedDict, custom_functions=_custom_functions()))
return sorted(results, key=lambda x: version_to_tuple(x.get('kubernetesVersion')), reverse=True)
def version_to_tuple(v):
"""Quick-and-dirty sort function to handle simple semantic versions like 1.7.12 or 1.8.7."""
return tuple(map(int, (v.split('.'))))
def _custom_functions():
from jmespath import functions
class CustomFunctions(functions.Functions): # pylint: disable=too-few-public-methods
@functions.signature({'types': ['array']})
def _func_sort_versions(self, s): # pylint: disable=no-self-use
"""Custom JMESPath `sort_versions` function that sorts an array of strings as software versions."""
try:
return sorted(s, key=version_to_tuple)
except (TypeError, ValueError): # if it wasn't sortable, return the input so the pipeline continues
return s
return CustomFunctions()
```
#### File: tests/latest/test_configure.py
```python
import unittest
class TestConfigure(unittest.TestCase):
def test_configure_output_options(self):
from azure.cli.core._output import AzOutputProducer
from azure.cli.core.mock import DummyCli
from azure.cli.command_modules.configure._consts import OUTPUT_LIST
output_producer = AzOutputProducer(DummyCli())
cli_output_options = set(output_producer._FORMAT_DICT.keys())
configure_output_options = set(item["name"] for item in OUTPUT_LIST)
self.assertEqual(cli_output_options, configure_output_options,
"\n{}'s output options: {}\ndon't match az configure's output options ({})."
.format(AzOutputProducer.__name__, cli_output_options, configure_output_options))
if __name__ == '__main__':
unittest.main()
```
#### File: command_modules/eventgrid/custom.py
```python
import re
from six.moves.urllib.parse import quote # pylint: disable=import-error
from knack.log import get_logger
from knack.util import CLIError
from msrestazure.tools import parse_resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.mgmt.eventgrid.models import (
EventSubscription,
EventSubscriptionUpdateParameters,
WebHookEventSubscriptionDestination,
RetryPolicy,
EventHubEventSubscriptionDestination,
StorageQueueEventSubscriptionDestination,
HybridConnectionEventSubscriptionDestination,
StorageBlobDeadLetterDestination,
EventSubscriptionFilter)
logger = get_logger(__name__)
EVENTGRID_NAMESPACE = "Microsoft.EventGrid"
RESOURCES_NAMESPACE = "Microsoft.Resources"
SUBSCRIPTIONS = "subscriptions"
RESOURCE_GROUPS = "resourcegroups"
EVENTGRID_TOPICS = "topics"
WEBHOOK_DESTINATION = "webhook"
EVENTHUB_DESTINATION = "eventhub"
STORAGEQUEUE_DESTINATION = "storagequeue"
HYBRIDCONNECTION_DESTINATION = "hybridconnection"
GLOBAL = "global"
def cli_topic_list(
client,
resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list_by_subscription()
def cli_topic_create_or_update(
client,
resource_group_name,
topic_name,
location,
tags=None):
async_topic_create = client.create_or_update(
resource_group_name,
topic_name,
location,
tags)
created_topic = async_topic_create.result()
return created_topic
def cli_eventgrid_event_subscription_create( # pylint: disable=too-many-locals
cmd,
client,
event_subscription_name,
endpoint,
resource_id=None,
source_resource_id=None,
resource_group_name=None,
topic_name=None,
endpoint_type=WEBHOOK_DESTINATION,
included_event_types=None,
subject_begins_with=None,
subject_ends_with=None,
is_subject_case_sensitive=False,
max_delivery_attempts=30,
event_ttl=1440,
deadletter_endpoint=None,
labels=None):
scope = _get_scope_for_event_subscription(
cli_ctx=cmd.cli_ctx,
source_resource_id=source_resource_id,
resource_id=resource_id,
topic_name=topic_name,
resource_group_name=resource_group_name)
# Construct RetryPolicy based on max_delivery_attempts and event_ttl
max_delivery_attempts = int(max_delivery_attempts)
event_ttl = int(event_ttl)
_validate_retry_policy(max_delivery_attempts, event_ttl)
retry_policy = RetryPolicy(max_delivery_attempts=max_delivery_attempts, event_time_to_live_in_minutes=event_ttl)
destination = _get_endpoint_destination(endpoint_type, endpoint)
event_subscription_filter = EventSubscriptionFilter(
subject_begins_with=subject_begins_with,
subject_ends_with=subject_ends_with,
included_event_types=included_event_types,
is_subject_case_sensitive=is_subject_case_sensitive)
deadletter_destination = None
if deadletter_endpoint is not None:
deadletter_destination = _get_deadletter_destination(deadletter_endpoint)
event_subscription_info = EventSubscription(
destination=destination,
filter=event_subscription_filter,
labels=labels,
retry_policy=retry_policy,
dead_letter_destination=deadletter_destination)
_warn_if_manual_handshake_needed(endpoint_type, endpoint)
return client.create_or_update(
scope,
event_subscription_name,
event_subscription_info).result()
def cli_eventgrid_event_subscription_delete(
cmd,
client,
event_subscription_name,
resource_id=None,
source_resource_id=None,
resource_group_name=None,
topic_name=None):
scope = _get_scope_for_event_subscription(
cli_ctx=cmd.cli_ctx,
source_resource_id=source_resource_id,
resource_id=resource_id,
topic_name=topic_name,
resource_group_name=resource_group_name)
async_event_subscription_delete = client.delete(
scope,
event_subscription_name)
return async_event_subscription_delete.result()
def event_subscription_setter(
cmd,
client,
parameters,
event_subscription_name,
source_resource_id=None,
resource_id=None,
resource_group_name=None,
topic_name=None):
scope = _get_scope_for_event_subscription(
cli_ctx=cmd.cli_ctx,
source_resource_id=source_resource_id,
resource_id=resource_id,
topic_name=topic_name,
resource_group_name=resource_group_name)
async_event_subscription_update = client.update(
scope,
event_subscription_name,
parameters)
updated_event_subscription = async_event_subscription_update.result()
return updated_event_subscription
def cli_eventgrid_event_subscription_get(
cmd,
client,
event_subscription_name,
source_resource_id=None,
resource_id=None,
resource_group_name=None,
topic_name=None,
include_full_endpoint_url=False):
scope = _get_scope_for_event_subscription(
cli_ctx=cmd.cli_ctx,
source_resource_id=source_resource_id,
resource_id=resource_id,
topic_name=topic_name,
resource_group_name=resource_group_name)
retrieved_event_subscription = client.get(scope, event_subscription_name)
destination = retrieved_event_subscription.destination
if include_full_endpoint_url and isinstance(destination, WebHookEventSubscriptionDestination):
full_endpoint_url = client.get_full_url(scope, event_subscription_name)
destination.endpoint_url = full_endpoint_url.endpoint_url
return retrieved_event_subscription
def cli_event_subscription_list( # pylint: disable=too-many-return-statements
client,
resource_id=None,
source_resource_id=None,
topic_name=None,
resource_group_name=None,
location=None,
topic_type_name=None):
if source_resource_id is not None:
# If Source Resource ID is specified, we need to list event subscriptions for that particular resource.
# Since a full resource ID is specified, it should override all other defaults such as default location and RG
# No other parameters must be specified
if (topic_type_name is not None or resource_id is not None):
raise CLIError('usage error: Since --source-resource-id is specified, none of the other parameters must '
'be specified.')
return _list_event_subscriptions_by_resource_id(client, source_resource_id)
if resource_id is not None:
# DEPRECATED
# If resource ID is specified, we need to list event subscriptions for that particular resource.
# Since a full resource ID is specified, it should override all other defaults such as default location and RG
# No other parameters must be specified
if topic_type_name is not None:
raise CLIError('usage error: Since --resource-id is specified, none of the other parameters must '
'be specified.')
return _list_event_subscriptions_by_resource_id(client, resource_id)
if topic_name:
# DEPRECATED
if resource_group_name is None:
raise CLIError('Since --topic-name is specified, --resource-group must also be specified.')
return client.list_by_resource(
resource_group_name,
EVENTGRID_NAMESPACE,
EVENTGRID_TOPICS,
topic_name)
if location is None:
# Since resource-id was not specified, location must be specified: e.g. "westus2" or "global". If not error OUT.
raise CLIError('usage error: --source-resource-id ID | --location LOCATION'
' [--resource-group RG] [--topic-type-name TOPIC_TYPE_NAME]')
if topic_type_name is None:
# No topic-type is specified: return event subscriptions across all topic types for this location.
if location.lower() == GLOBAL.lower():
if resource_group_name:
return client.list_global_by_resource_group(resource_group_name)
return client.list_global_by_subscription()
if resource_group_name:
return client.list_regional_by_resource_group(resource_group_name, location)
return client.list_regional_by_subscription(location)
# Topic type name is specified
if location.lower() == GLOBAL.lower():
if not _is_topic_type_global_resource(topic_type_name):
raise CLIError('Invalid usage: Global cannot be specified for the location '
'as the specified topic type is a regional topic type with '
'regional event subscriptions. Specify a location value such '
'as westus. Global can be used only for global topic types: '
'Microsoft.Resources.Subscriptions and Microsoft.Resources.ResourceGroups.')
if resource_group_name:
return client.list_global_by_resource_group_for_topic_type(resource_group_name, topic_type_name)
return client.list_global_by_subscription_for_topic_type(topic_type_name)
if resource_group_name:
return client.list_regional_by_resource_group_for_topic_type(resource_group_name, location, topic_type_name)
return client.list_regional_by_subscription_for_topic_type(location, topic_type_name)
def _get_scope(
cli_ctx,
resource_group_name,
provider_namespace,
resource_type,
resource_name):
subscription_id = get_subscription_id(cli_ctx)
if provider_namespace == RESOURCES_NAMESPACE:
if resource_group_name:
scope = (
'/subscriptions/{}/resourceGroups/{}'
.format(quote(subscription_id),
quote(resource_group_name)))
else:
scope = (
'/subscriptions/{}'
.format(quote(subscription_id)))
else:
scope = (
'/subscriptions/{}/resourceGroups/{}/providers/{}/{}/{}'
.format(quote(subscription_id),
quote(resource_group_name),
quote(provider_namespace),
quote(resource_type),
quote(resource_name)))
return scope
def _get_scope_for_event_subscription(
cli_ctx,
resource_id,
source_resource_id,
topic_name,
resource_group_name):
if all([resource_id, source_resource_id]):
raise CLIError('usage error: specify either "--resource-id" or "--source-resource-id", not both.')
if all([resource_id, topic_name]):
raise CLIError('usage error: specify either "--topic-name" or "--resource-id", not both.')
if all([source_resource_id, topic_name]):
raise CLIError('usage error: specify either "--topic-name" or "--source-resource-id", not both.')
# A default resource Group Name could have been configured
# but if --resource-id or --source-resource-id is provided, it always overrides it.
if source_resource_id:
# Source Resource ID is provided, use that as the scope for the event subscription.
# This is the latest non-deprecated way of specifying the source resource.
scope = source_resource_id
elif resource_id:
# Deprecated
scope = resource_id
elif topic_name:
# DEPRECATED: Topic name is provided, use the topic and resource group to build a scope for the user topic
if resource_group_name is None:
raise CLIError("When --topic-name is specified, the --resource-group-name must also be specified.")
scope = _get_scope(cli_ctx, resource_group_name, EVENTGRID_NAMESPACE, EVENTGRID_TOPICS, topic_name)
elif resource_group_name:
# DEPRECATED: Event subscription to a resource group.
scope = _get_scope(cli_ctx, resource_group_name, RESOURCES_NAMESPACE, RESOURCE_GROUPS, resource_group_name)
else:
# DEPRECATED
logger.warning('This default option uses Azure subscription as the source resource.'
' This is deprecated and will be removed in a future release.'
' Use `--source-resource-id /subscriptions/{subid}` instead.')
scope = _get_scope(cli_ctx, None, RESOURCES_NAMESPACE, SUBSCRIPTIONS, get_subscription_id(cli_ctx))
return scope
def event_subscription_getter(
cmd,
client,
event_subscription_name,
source_resource_id=None,
resource_id=None,
resource_group_name=None,
topic_name=None):
scope = _get_scope_for_event_subscription(
cli_ctx=cmd.cli_ctx,
source_resource_id=source_resource_id,
resource_id=resource_id,
topic_name=topic_name,
resource_group_name=resource_group_name)
return client.get(scope, event_subscription_name)
def update_event_subscription(
instance,
endpoint=None,
endpoint_type=WEBHOOK_DESTINATION,
subject_begins_with=None,
subject_ends_with=None,
included_event_types=None,
labels=None,
deadletter_endpoint=None):
event_subscription_destination = None
deadletter_destination = None
event_subscription_labels = instance.labels
event_subscription_filter = instance.filter
retry_policy = instance.retry_policy
if endpoint_type.lower() != WEBHOOK_DESTINATION.lower() and endpoint is None:
raise CLIError('Invalid usage: Since --endpoint-type is specified, a valid endpoint must also be specified.')
if endpoint is not None:
event_subscription_destination = _get_endpoint_destination(endpoint_type, endpoint)
if deadletter_endpoint is not None:
deadletter_destination = _get_deadletter_destination(deadletter_endpoint)
if subject_begins_with is not None:
event_subscription_filter.subject_begins_with = subject_begins_with
if subject_ends_with is not None:
event_subscription_filter.subject_ends_with = subject_ends_with
if included_event_types is not None:
event_subscription_filter.included_event_types = included_event_types
if labels is not None:
event_subscription_labels = labels
params = EventSubscriptionUpdateParameters(
destination=event_subscription_destination,
filter=event_subscription_filter,
labels=event_subscription_labels,
retry_policy=retry_policy,
dead_letter_destination=deadletter_destination
)
return params
def _get_endpoint_destination(endpoint_type, endpoint):
if endpoint_type.lower() == WEBHOOK_DESTINATION.lower():
destination = WebHookEventSubscriptionDestination(endpoint_url=endpoint)
elif endpoint_type.lower() == EVENTHUB_DESTINATION.lower():
destination = EventHubEventSubscriptionDestination(resource_id=endpoint)
elif endpoint_type.lower() == HYBRIDCONNECTION_DESTINATION.lower():
destination = HybridConnectionEventSubscriptionDestination(resource_id=endpoint)
elif endpoint_type.lower() == STORAGEQUEUE_DESTINATION.lower():
destination = _get_storage_queue_destination(endpoint)
return destination
def _get_storage_queue_destination(endpoint):
# Supplied endpoint would be in the following format:
# /subscriptions/.../storageAccounts/sa1/queueServices/default/queues/{queueName}))
# and we need to break it up into:
# /subscriptions/.../storageAccounts/sa1 and queueName
queue_items = re.split(
"/queueServices/default/queues/", endpoint, flags=re.IGNORECASE)
if len(queue_items) != 2 or queue_items[0] is None or queue_items[1] is None:
raise CLIError('Argument Error: Expected format of --endpoint for storage queue is:' +
'/subscriptions/id/resourceGroups/rg/providers/Microsoft.Storage/' +
'storageAccounts/sa1/queueServices/default/queues/queueName')
return StorageQueueEventSubscriptionDestination(resource_id=queue_items[0], queue_name=queue_items[1])
def _get_deadletter_destination(deadletter_endpoint):
blob_items = re.split(
"/blobServices/default/containers/", deadletter_endpoint, flags=re.IGNORECASE)
if len(blob_items) != 2 or blob_items[0] is None or blob_items[1] is None:
raise CLIError('Argument Error: Expected format of --deadletter-endpoint is:' +
'/subscriptions/id/resourceGroups/rg/providers/Microsoft.Storage/' +
'storageAccounts/sa1/blobServices/default/containers/containerName')
return StorageBlobDeadLetterDestination(resource_id=blob_items[0], blob_container_name=blob_items[1])
def _validate_retry_policy(max_delivery_attempts, event_ttl):
if max_delivery_attempts < 1 or max_delivery_attempts > 30:
raise CLIError('--max-delivery-attempts should be a number between 1 and 30.')
if event_ttl < 1 or event_ttl > 1440:
raise CLIError('--event-ttl should be a number between 1 and 1440.')
def _warn_if_manual_handshake_needed(endpoint_type, endpoint):
# If the endpoint belongs to a service that we know implements the subscription validation
# handshake, there's no need to show this message, hence we check for those services
# before showing this message. This list includes Azure Automation, EventGrid Trigger based
# Azure functions, and Azure Logic Apps.
if endpoint_type.lower() == WEBHOOK_DESTINATION.lower() and \
"azure-automation" not in endpoint.lower() and \
"eventgridextension" not in endpoint.lower() and \
"logic.azure" not in endpoint.lower():
logger.warning('If the provided endpoint does not support subscription validation '
'handshake, navigate to the validation URL that you receive in the '
'subscription validation event, in order to complete the event '
'subscription creation or update. For more details, '
'please visit http://aka.ms/esvalidation')
def _list_event_subscriptions_by_resource_id(client, resource_id):
# parse_resource_id doesn't handle resource_ids for Azure subscriptions and RGs
# so, first try to look for those two patterns.
if resource_id is not None:
id_parts = list(filter(None, resource_id.split('/')))
if len(id_parts) < 5:
# Azure subscriptions or Resource group
if id_parts[0].lower() != "subscriptions":
raise CLIError('The specified value for resource-id is not in the'
' expected format. It should start with /subscriptions.')
subscription_id = id_parts[1]
_validate_subscription_id_matches_default_subscription_id(
default_subscription_id=client.config.subscription_id,
provided_subscription_id=subscription_id)
if len(id_parts) == 2:
return client.list_global_by_subscription_for_topic_type("Microsoft.Resources.Subscriptions")
if len(id_parts) == 4 and id_parts[2].lower() == "resourcegroups":
resource_group_name = id_parts[3]
if resource_group_name is None:
raise CLIError('The specified value for resource-id is not'
' in the expected format. A valid value for'
' resource group must be provided.')
return client.list_global_by_resource_group_for_topic_type(
resource_group_name,
"Microsoft.Resources.ResourceGroups")
id_parts = parse_resource_id(resource_id)
subscription_id = id_parts.get('subscription')
_validate_subscription_id_matches_default_subscription_id(
default_subscription_id=client.config.subscription_id,
provided_subscription_id=subscription_id)
rg_name = id_parts.get('resource_group')
resource_name = id_parts.get('name')
namespace = id_parts.get('namespace')
resource_type = id_parts.get('type')
if (subscription_id is None or rg_name is None or resource_name is None or
namespace is None or resource_type is None):
raise CLIError('The specified value for resource-id is not'
' in the expected format.')
# Invoke the standard list_by_resource
return client.list_by_resource(
rg_name,
namespace,
resource_type,
resource_name)
def _is_topic_type_global_resource(topic_type_name):
# TODO: Add here if any other global topic types get added in the future.
TOPIC_TYPE_AZURE_SUBSCRIPTIONS = "Microsoft.Resources.Subscriptions"
TOPIC_TYPE_AZURE_RESOURCE_GROUP = "Microsoft.Resources.ResourceGroups"
TOPIC_TYPE_MAPS_ACCOUNTS = "Microsoft.Maps.Accounts"
if (topic_type_name.lower() == TOPIC_TYPE_AZURE_SUBSCRIPTIONS.lower() or
topic_type_name.lower() == TOPIC_TYPE_MAPS_ACCOUNTS or
topic_type_name.lower() == TOPIC_TYPE_AZURE_RESOURCE_GROUP.lower()):
return True
return False
def _validate_subscription_id_matches_default_subscription_id(
default_subscription_id,
provided_subscription_id):
# The CLI/SDK infrastructure doesn't support overriding the subscription ID.
# Hence, we validate that the provided subscription ID is the same as the default
# configured subscription.
if provided_subscription_id.lower() != default_subscription_id.lower():
raise CLIError('The subscription ID in the specified resource-id'
' does not match the default subscription ID. To set the default subscription ID,'
' use az account set ID_OR_NAME, or use the global argument --subscription ')
```
#### File: command_modules/kusto/commands.py
```python
from azure.cli.command_modules.kusto._client_factory import cf_cluster, cf_database
from azure.cli.command_modules.kusto._validators import validate_cluster_args, validate_database_args
def load_command_table(self, _):
from azure.cli.core.commands import CliCommandType
clusters_operations = CliCommandType(
operations_tmpl='azure.mgmt.kusto.operations.clusters_operations#ClustersOperations.{}',
client_factory=cf_cluster)
database_operations = CliCommandType(
operations_tmpl='azure.mgmt.kusto.operations.databases_operations#DatabasesOperations.{}',
client_factory=cf_database)
with self.command_group('kusto cluster',
clusters_operations,
client_factory=cf_cluster) as g:
g.custom_command('create', 'cluster_create', supports_no_wait=True, validator=validate_cluster_args)
g.custom_command('stop', 'cluster_stop', supports_no_wait=True)
g.custom_command('start', 'cluster_start', supports_no_wait=True)
g.command('list', 'list_by_resource_group')
g.show_command('show', 'get')
g.command('delete', 'delete', confirmation=True)
g.generic_update_command('update', custom_func_name='update_kusto_cluster')
g.wait_command('wait')
with self.command_group('kusto database',
database_operations,
client_factory=cf_database) as g:
g.custom_command('create', 'database_create', supports_no_wait=True, validator=validate_database_args)
g.command('delete', 'delete', confirmation=True)
g.generic_update_command('update', custom_func_name='update_kusto_database', validator=validate_database_args, supports_no_wait=True)
g.command('list', 'list_by_cluster')
g.show_command('show', 'get')
g.wait_command('wait')
```
#### File: command_modules/kusto/_validators.py
```python
from knack.util import CLIError
def validate_database_args(namespace):
if namespace.hot_cache_period:
hot_cache_period_in_days = round_hot_cache_to_days(namespace.hot_cache_period)
if hot_cache_period_in_days < 0:
raise CLIError('hot_cache_period must be a valid time')
if namespace.soft_delete_period:
soft_delete_period_in_days = round_soft_delete_to_days(namespace.soft_delete_period)
if soft_delete_period_in_days < 0:
raise CLIError('soft_delete_period must be a valid time')
def validate_cluster_args(namespace):
max_name_length = 22
name_length = len(namespace.cluster_name)
if name_length > max_name_length:
raise CLIError('name can not be longer then ' + str(max_name_length) + " letters")
def round_hot_cache_to_days(time):
return round_timedelta_to_days(time, 'hot_cache_period')
def round_soft_delete_to_days(time):
return round_timedelta_to_days(time, 'soft_delete_period')
def round_timedelta_to_days(time, parameter_name):
try:
splitted = time.split(":")
numberOfDays = int(splitted[0])
if int(splitted[1]) > 0:
numberOfDays += 1
return numberOfDays
except:
raise CLIError(parameter_name + ' must be a valid time format')
```
#### File: command_modules/security/_params.py
```python
from azure.cli.core.commands.parameters import resource_group_name_type
from knack.arguments import CLIArgumentType
from ._validators import (validate_alert_status,
validate_auto_provisioning_toggle,
validate_pricing_tier)
name_arg_type = CLIArgumentType(options_list=('--name', '-n'), metavar='NAME', help='name of the resource to be fetched')
home_region_arg_type = CLIArgumentType(options_list=('--home-region', '-hr'), metavar='HOMEREGION', help='home region that was selected for the subscription')
location_arg_type = CLIArgumentType(options_list=('--location', '-l'), metavar='LOCATION', help='location of the resource')
# Alerts
alert_status_arg_type = CLIArgumentType(options_list=('--status'), metavar='STATUS', help='target status of the alert. possible values are "dismiss" and "activate"')
# Auto Provisioning
auto_provisioning_auto_provision_arg_type = CLIArgumentType(options_list=('--auto-provision'), metavar='AUTOPROVISION', help='Automatic provisioning toggle. possible values are "on" or "off"')
# Contacts
contact_email_arg_type = CLIArgumentType(options_list=('--email'), metavar='EMAIL', help='E-mail of the security contact')
contact_phone_arg_type = CLIArgumentType(options_list=('--phone'), metavar='PHONE', help='Phone of the security contact')
contact_alert_notifications_arg_type = CLIArgumentType(options_list=('--alert-notifications'), metavar='ALERTNOTIFICATIONS', help='Whether to send mail notifications to the security contacts')
contact_alerts_admins_arg_type = CLIArgumentType(options_list=('--alerts-admins'), metavar='ALERTADMINS', help='Whether to send mail notifications to the subscription administrators')
# Pricing
pricing_tier_arg_type = CLIArgumentType(options_list=('--tier'), metavar='TIER', help='pricing tier type')
# Workspace settings
workspace_setting_target_workspace_arg_type = CLIArgumentType(options_list=('--target-workspace'), metavar='TARGETWORKSPACE', help='An ID of the workspace resource that will hold the security data')
def load_arguments(self, _):
for scope in ['alert',
'task',
'setting',
'contact',
'auto-provisioning-setting',
'discovered-security-solution',
'external-security-solution',
'jit-policy',
'location',
'pricing',
'topology',
'workspace-setting']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'resource_group_name',
options_list=['--resource-group', '-g'],
arg_type=resource_group_name_type)
c.argument(
'resource_name',
arg_type=name_arg_type)
c.argument(
'location',
arg_type=location_arg_type)
for scope in ['alert update']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'status',
validator=validate_alert_status,
arg_type=alert_status_arg_type)
for scope in ['auto-provisioning-setting update']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'auto_provision',
validator=validate_auto_provisioning_toggle,
arg_type=auto_provisioning_auto_provision_arg_type)
for scope in ['contact create']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'email',
arg_type=contact_email_arg_type)
c.argument(
'phone',
arg_type=contact_phone_arg_type)
c.argument(
'alert_notifications',
arg_type=contact_alert_notifications_arg_type)
c.argument(
'alerts_admins',
arg_type=contact_alerts_admins_arg_type)
for scope in ['pricing create']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'tier',
validator=validate_pricing_tier,
arg_type=pricing_tier_arg_type)
for scope in ['workspace-setting create']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'target_workspace',
arg_type=workspace_setting_target_workspace_arg_type)
```
#### File: tests/latest/test_tasks_scenario.py
```python
from azure.cli.testsdk import ScenarioTest
from azure_devtools.scenario_tests import AllowLargeResponse
import re
class SecurityCenterTasksTests(ScenarioTest):
@AllowLargeResponse()
def test_security_tasks(self):
tasks = self.cmd('az security task list').get_output_in_json()
assert len(tasks) >= 0
rg_task = next(task for task in tasks if "resourceGroups" in task["id"])
match = re.search('resourceGroups/([^/]+)', rg_task["id"])
task = self.cmd('az security task show -g ' + match.group(1) + ' -n ' + rg_task["name"]).get_output_in_json()
assert task is not None
subscription_task = next(task for task in tasks if "resourceGroups" not in task["id"])
task = self.cmd('az security task show -n ' + subscription_task["name"]).get_output_in_json()
assert task is not None
```
#### File: command_modules/storage/_validators.py
```python
from knack.log import get_logger
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands.validators import validate_key_value_pairs
from azure.cli.core.profiles import ResourceType, get_sdk
from azure.cli.command_modules.storage._client_factory import get_storage_data_service_client
from azure.cli.command_modules.storage.util import glob_files_locally, guess_content_type
from azure.cli.command_modules.storage.sdkutil import get_table_data_type
from azure.cli.command_modules.storage.url_quote_util import encode_for_url
from azure.cli.command_modules.storage.oauth_token_util import TokenUpdater
storage_account_key_options = {'primary': 'key1', 'secondary': 'key2'}
logger = get_logger(__name__)
# Utilities
# pylint: disable=inconsistent-return-statements,too-many-lines
def _query_account_key(cli_ctx, account_name):
"""Query the storage account key. This is used when the customer doesn't offer account key but name."""
rg, scf = _query_account_rg(cli_ctx, account_name)
t_storage_account_keys = get_sdk(
cli_ctx, ResourceType.MGMT_STORAGE, 'models.storage_account_keys#StorageAccountKeys')
if t_storage_account_keys:
return scf.storage_accounts.list_keys(rg, account_name).key1
# of type: models.storage_account_list_keys_result#StorageAccountListKeysResult
return scf.storage_accounts.list_keys(rg, account_name).keys[0].value # pylint: disable=no-member
def _query_account_rg(cli_ctx, account_name):
"""Query the storage account's resource group, which the mgmt sdk requires."""
scf = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_STORAGE)
acc = next((x for x in scf.storage_accounts.list() if x.name == account_name), None)
if acc:
from msrestazure.tools import parse_resource_id
return parse_resource_id(acc.id)['resource_group'], scf
raise ValueError("Storage account '{}' not found.".format(account_name))
def _create_token_credential(cli_ctx):
from knack.cli import EVENT_CLI_POST_EXECUTE
TokenCredential = get_sdk(cli_ctx, ResourceType.DATA_STORAGE, 'common#TokenCredential')
token_credential = TokenCredential()
updater = TokenUpdater(token_credential, cli_ctx)
def _cancel_timer_event_handler(_, **__):
updater.cancel()
cli_ctx.register_event(EVENT_CLI_POST_EXECUTE, _cancel_timer_event_handler)
return token_credential
# region PARAMETER VALIDATORS
def process_resource_group(cmd, namespace):
"""Processes the resource group parameter from the account name"""
if namespace.account_name and not namespace.resource_group_name:
namespace.resource_group_name = _query_account_rg(cmd.cli_ctx, namespace.account_name)[0]
def validate_table_payload_format(cmd, namespace):
t_table_payload = get_table_data_type(cmd.cli_ctx, 'table', 'TablePayloadFormat')
if namespace.accept:
formats = {
'none': t_table_payload.JSON_NO_METADATA,
'minimal': t_table_payload.JSON_MINIMAL_METADATA,
'full': t_table_payload.JSON_FULL_METADATA
}
namespace.accept = formats[namespace.accept.lower()]
def validate_bypass(namespace):
if namespace.bypass:
namespace.bypass = ', '.join(namespace.bypass) if isinstance(namespace.bypass, list) else namespace.bypass
def validate_client_parameters(cmd, namespace):
""" Retrieves storage connection parameters from environment variables and parses out connection string into
account name and key """
n = namespace
def get_config_value(section, key, default):
return cmd.cli_ctx.config.get(section, key, default)
if hasattr(n, 'auth_mode'):
auth_mode = n.auth_mode or get_config_value('storage', 'auth_mode', None)
del n.auth_mode
if not n.account_name:
n.account_name = get_config_value('storage', 'account', None)
if auth_mode == 'login':
n.token_credential = _create_token_credential(cmd.cli_ctx)
# give warning if there are account key args being ignored
account_key_args = [n.account_key and "--account-key", n.sas_token and "--sas-token",
n.connection_string and "--connection-string"]
account_key_args = [arg for arg in account_key_args if arg]
if account_key_args:
logger.warning('In "login" auth mode, the following arguments are ignored: %s',
' ,'.join(account_key_args))
return
if not n.connection_string:
n.connection_string = get_config_value('storage', 'connection_string', None)
# if connection string supplied or in environment variables, extract account key and name
if n.connection_string:
conn_dict = validate_key_value_pairs(n.connection_string)
n.account_name = conn_dict.get('AccountName')
n.account_key = conn_dict.get('AccountKey')
n.sas_token = conn_dict.get('SharedAccessSignature')
# otherwise, simply try to retrieve the remaining variables from environment variables
if not n.account_name:
n.account_name = get_config_value('storage', 'account', None)
if not n.account_key:
n.account_key = get_config_value('storage', 'key', None)
if not n.sas_token:
n.sas_token = get_config_value('storage', 'sas_token', None)
# strip the '?' from sas token. the portal and command line are returns sas token in different
# forms
if n.sas_token:
n.sas_token = n.sas_token.lstrip('?')
# if account name is specified but no key, attempt to query
if n.account_name and not n.account_key and not n.sas_token:
n.account_key = _query_account_key(cmd.cli_ctx, n.account_name)
def process_blob_source_uri(cmd, namespace):
"""
Validate the parameters referenced to a blob source and create the source URI from them.
"""
from .util import create_short_lived_blob_sas
usage_string = \
'Invalid usage: {}. Supply only one of the following argument sets to specify source:' \
'\n\t --source-uri' \
'\n\tOR --source-container --source-blob --source-snapshot [--source-account-name & sas] ' \
'\n\tOR --source-container --source-blob --source-snapshot [--source-account-name & key] '
ns = vars(namespace)
# source as blob
container = ns.pop('source_container', None)
blob = ns.pop('source_blob', None)
snapshot = ns.pop('source_snapshot', None)
# source credential clues
source_account_name = ns.pop('source_account_name', None)
source_account_key = ns.pop('source_account_key', None)
sas = ns.pop('source_sas', None)
# source in the form of an uri
uri = ns.get('copy_source', None)
if uri:
if any([container, blob, sas, snapshot, source_account_name, source_account_key]):
raise ValueError(usage_string.format('Unused parameters are given in addition to the '
'source URI'))
else:
# simplest scenario--no further processing necessary
return
validate_client_parameters(cmd, namespace) # must run first to resolve storage account
# determine if the copy will happen in the same storage account
if not source_account_name and source_account_key:
raise ValueError(usage_string.format('Source account key is given but account name is not'))
elif not source_account_name and not source_account_key:
# neither source account name or key is given, assume that user intends to copy blob in
# the same account
source_account_name = ns.get('account_name', None)
source_account_key = ns.get('account_key', None)
elif source_account_name and not source_account_key:
if source_account_name == ns.get('account_name', None):
# the source account name is same as the destination account name
source_account_key = ns.get('account_key', None)
else:
# the source account is different from destination account but the key is missing
# try to query one.
try:
source_account_key = _query_account_key(cmd.cli_ctx, source_account_name)
except ValueError:
raise ValueError('Source storage account {} not found.'.format(source_account_name))
# else: both source account name and key are given by user
if not source_account_name:
raise ValueError(usage_string.format('Storage account name not found'))
if not sas:
sas = create_short_lived_blob_sas(cmd, source_account_name, source_account_key, container, blob)
query_params = []
if sas:
query_params.append(sas)
if snapshot:
query_params.append('snapshot={}'.format(snapshot))
uri = 'https://{}.blob.{}/{}/{}{}{}'.format(source_account_name,
cmd.cli_ctx.cloud.suffixes.storage_endpoint,
container,
blob,
'?' if query_params else '',
'&'.join(query_params))
namespace.copy_source = uri
def validate_source_uri(cmd, namespace): # pylint: disable=too-many-statements
from .util import create_short_lived_blob_sas, create_short_lived_file_sas
usage_string = \
'Invalid usage: {}. Supply only one of the following argument sets to specify source:' \
'\n\t --source-uri [--source-sas]' \
'\n\tOR --source-container --source-blob [--source-account-name & sas] [--source-snapshot]' \
'\n\tOR --source-container --source-blob [--source-account-name & key] [--source-snapshot]' \
'\n\tOR --source-share --source-path' \
'\n\tOR --source-share --source-path [--source-account-name & sas]' \
'\n\tOR --source-share --source-path [--source-account-name & key]'
ns = vars(namespace)
# source as blob
container = ns.pop('source_container', None)
blob = ns.pop('source_blob', None)
snapshot = ns.pop('source_snapshot', None)
# source as file
share = ns.pop('source_share', None)
path = ns.pop('source_path', None)
# source credential clues
source_account_name = ns.pop('source_account_name', None)
source_account_key = ns.pop('source_account_key', None)
source_sas = ns.pop('source_sas', None)
# source in the form of an uri
uri = ns.get('copy_source', None)
if uri:
if any([container, blob, snapshot, share, path, source_account_name,
source_account_key]):
raise ValueError(usage_string.format('Unused parameters are given in addition to the '
'source URI'))
if source_sas:
source_sas = source_sas.lstrip('?')
uri = '{}{}{}'.format(uri, '?', source_sas)
namespace.copy_source = uri
return
# ensure either a file or blob source is specified
valid_blob_source = container and blob and not share and not path
valid_file_source = share and path and not container and not blob and not snapshot
if not valid_blob_source and not valid_file_source:
raise ValueError(usage_string.format('Neither a valid blob or file source is specified'))
elif valid_blob_source and valid_file_source:
raise ValueError(usage_string.format('Ambiguous parameters, both blob and file sources are '
'specified'))
validate_client_parameters(cmd, namespace) # must run first to resolve storage account
if not source_account_name:
if source_account_key:
raise ValueError(usage_string.format('Source account key is given but account name is not'))
# assume that user intends to copy blob in the same account
source_account_name = ns.get('account_name', None)
# determine if the copy will happen in the same storage account
same_account = False
if not source_account_key and not source_sas:
if source_account_name == ns.get('account_name', None):
same_account = True
source_account_key = ns.get('account_key', None)
source_sas = ns.get('sas_token', None)
else:
# the source account is different from destination account but the key is missing try to query one.
try:
source_account_key = _query_account_key(cmd.cli_ctx, source_account_name)
except ValueError:
raise ValueError('Source storage account {} not found.'.format(source_account_name))
# Both source account name and either key or sas (or both) are now available
if not source_sas:
# generate a sas token even in the same account when the source and destination are not the same kind.
if valid_file_source and (ns.get('container_name', None) or not same_account):
import os
dir_name, file_name = os.path.split(path) if path else (None, '')
source_sas = create_short_lived_file_sas(cmd, source_account_name, source_account_key, share,
dir_name, file_name)
elif valid_blob_source and (ns.get('share_name', None) or not same_account):
source_sas = create_short_lived_blob_sas(cmd, source_account_name, source_account_key, container, blob)
query_params = []
if source_sas:
query_params.append(source_sas.lstrip('?'))
if snapshot:
query_params.append('snapshot={}'.format(snapshot))
uri = 'https://{0}.{1}.{6}/{2}/{3}{4}{5}'.format(
source_account_name,
'blob' if valid_blob_source else 'file',
container if valid_blob_source else share,
encode_for_url(blob if valid_blob_source else path),
'?' if query_params else '',
'&'.join(query_params),
cmd.cli_ctx.cloud.suffixes.storage_endpoint)
namespace.copy_source = uri
def validate_blob_type(namespace):
if not namespace.blob_type:
namespace.blob_type = 'page' if namespace.file_path.endswith('.vhd') else 'block'
def validate_storage_data_plane_list(namespace):
if namespace.num_results == '*':
namespace.num_results = None
else:
namespace.num_results = int(namespace.num_results)
def get_content_setting_validator(settings_class, update, guess_from_file=None):
def _class_name(class_type):
return class_type.__module__ + "." + class_type.__class__.__name__
def validator(cmd, namespace):
t_base_blob_service, t_file_service, t_blob_content_settings, t_file_content_settings = cmd.get_models(
'blob.baseblobservice#BaseBlobService',
'file#FileService',
'blob.models#ContentSettings',
'file.models#ContentSettings')
# must run certain validators first for an update
if update:
validate_client_parameters(cmd, namespace)
if update and _class_name(settings_class) == _class_name(t_file_content_settings):
get_file_path_validator()(namespace)
ns = vars(namespace)
# retrieve the existing object properties for an update
if update:
account = ns.get('account_name')
key = ns.get('account_key')
cs = ns.get('connection_string')
sas = ns.get('sas_token')
if _class_name(settings_class) == _class_name(t_blob_content_settings):
client = get_storage_data_service_client(cmd.cli_ctx,
t_base_blob_service,
account,
key,
cs,
sas)
container = ns.get('container_name')
blob = ns.get('blob_name')
lease_id = ns.get('lease_id')
props = client.get_blob_properties(container, blob, lease_id=lease_id).properties.content_settings
elif _class_name(settings_class) == _class_name(t_file_content_settings):
client = get_storage_data_service_client(cmd.cli_ctx, t_file_service, account, key, cs, sas)
share = ns.get('share_name')
directory = ns.get('directory_name')
filename = ns.get('file_name')
props = client.get_file_properties(share, directory, filename).properties.content_settings
# create new properties
new_props = settings_class(
content_type=ns.pop('content_type', None),
content_disposition=ns.pop('content_disposition', None),
content_encoding=ns.pop('content_encoding', None),
content_language=ns.pop('content_language', None),
content_md5=ns.pop('content_md5', None),
cache_control=ns.pop('content_cache_control', None)
)
# if update, fill in any None values with existing
if update:
for attr in ['content_type', 'content_disposition', 'content_encoding', 'content_language', 'content_md5',
'cache_control']:
if getattr(new_props, attr) is None:
setattr(new_props, attr, getattr(props, attr))
else:
if guess_from_file:
new_props = guess_content_type(ns[guess_from_file], new_props, settings_class)
ns['content_settings'] = new_props
return validator
def validate_custom_domain(namespace):
if namespace.use_subdomain and not namespace.custom_domain:
raise ValueError('usage error: --custom-domain DOMAIN [--use-subdomain]')
def validate_encryption_services(cmd, namespace):
"""
Builds up the encryption services object for storage account operations based on the list of services passed in.
"""
if namespace.encryption_services:
t_encryption_services, t_encryption_service = get_sdk(cmd.cli_ctx, ResourceType.MGMT_STORAGE,
'EncryptionServices', 'EncryptionService', mod='models')
services = {service: t_encryption_service(enabled=True) for service in namespace.encryption_services}
namespace.encryption_services = t_encryption_services(**services)
def validate_encryption_source(cmd, namespace):
ns = vars(namespace)
key_name = ns.pop('encryption_key_name', None)
key_version = ns.pop('encryption_key_version', None)
key_vault_uri = ns.pop('encryption_key_vault', None)
if namespace.encryption_key_source == 'Microsoft.Keyvault' and not (key_name and key_version and key_vault_uri):
raise ValueError('--encryption-key-name, --encryption-key-vault, and --encryption-key-version are required '
'when --encryption-key-source=Microsoft.Keyvault is specified.')
if key_name or key_version or key_vault_uri:
if namespace.encryption_key_source != 'Microsoft.Keyvault':
raise ValueError('--encryption-key-name, --encryption-key-vault, and --encryption-key-version are not '
'applicable when --encryption-key-source=Microsoft.Keyvault is not specified.')
KeyVaultProperties = get_sdk(cmd.cli_ctx, ResourceType.MGMT_STORAGE, 'KeyVaultProperties',
mod='models')
if not KeyVaultProperties:
return
kv_prop = KeyVaultProperties(key_name=key_name, key_version=key_version, key_vault_uri=key_vault_uri)
namespace.encryption_key_vault_properties = kv_prop
def validate_entity(namespace):
""" Converts a list of key value pairs into a dictionary. Ensures that required
RowKey and PartitionKey are converted to the correct case and included. """
values = dict(x.split('=', 1) for x in namespace.entity)
keys = values.keys()
for key in keys:
if key.lower() == 'rowkey':
val = values[key]
del values[key]
values['RowKey'] = val
elif key.lower() == 'partitionkey':
val = values[key]
del values[key]
values['PartitionKey'] = val
keys = values.keys()
missing_keys = 'RowKey ' if 'RowKey' not in keys else ''
missing_keys = '{}PartitionKey'.format(missing_keys) \
if 'PartitionKey' not in keys else missing_keys
if missing_keys:
import argparse
raise argparse.ArgumentError(
None, 'incorrect usage: entity requires: {}'.format(missing_keys))
def cast_val(key, val):
""" Attempts to cast numeric values (except RowKey and PartitionKey) to numbers so they
can be queried correctly. """
if key in ['PartitionKey', 'RowKey']:
return val
def try_cast(to_type):
try:
return to_type(val)
except ValueError:
return None
return try_cast(int) or try_cast(float) or val
# ensure numbers are converted from strings so querying will work correctly
values = {key: cast_val(key, val) for key, val in values.items()}
namespace.entity = values
def validate_marker(namespace):
""" Converts a list of key value pairs into a dictionary. Ensures that required
nextrowkey and nextpartitionkey are included. """
if not namespace.marker:
return
marker = dict(x.split('=', 1) for x in namespace.marker)
expected_keys = {'nextrowkey', 'nextpartitionkey'}
for key in marker:
new_key = key.lower()
if new_key in expected_keys:
expected_keys.remove(key.lower())
val = marker[key]
del marker[key]
marker[new_key] = val
if expected_keys:
import argparse
raise argparse.ArgumentError(
None, 'incorrect usage: marker requires: {}'.format(' '.join(expected_keys)))
namespace.marker = marker
def get_file_path_validator(default_file_param=None):
""" Creates a namespace validator that splits out 'path' into 'directory_name' and 'file_name'.
Allows another path-type parameter to be named which can supply a default filename. """
def validator(namespace):
import os
if not hasattr(namespace, 'path'):
return
path = namespace.path
dir_name, file_name = os.path.split(path) if path else (None, '')
if default_file_param and '.' not in file_name:
dir_name = path
file_name = os.path.split(getattr(namespace, default_file_param))[1]
dir_name = None if dir_name in ('', '.') else dir_name
namespace.directory_name = dir_name
namespace.file_name = file_name
del namespace.path
return validator
def validate_included_datasets(cmd, namespace):
if namespace.include:
include = namespace.include
if set(include) - set('cmsd'):
help_string = '(c)opy-info (m)etadata (s)napshots (d)eleted'
raise ValueError('valid values are {} or a combination thereof.'.format(help_string))
t_blob_include = cmd.get_models('blob#Include')
namespace.include = t_blob_include('s' in include, 'm' in include, False, 'c' in include, 'd' in include)
def validate_key(namespace):
namespace.key_name = storage_account_key_options[namespace.key_name]
def validate_metadata(namespace):
if namespace.metadata:
namespace.metadata = dict(x.split('=', 1) for x in namespace.metadata)
def get_permission_help_string(permission_class):
allowed_values = [x.lower() for x in dir(permission_class) if not x.startswith('__')]
return ' '.join(['({}){}'.format(x[0], x[1:]) for x in allowed_values])
def get_permission_validator(permission_class):
allowed_values = [x.lower() for x in dir(permission_class) if not x.startswith('__')]
allowed_string = ''.join(x[0] for x in allowed_values)
def validator(namespace):
if namespace.permission:
if set(namespace.permission) - set(allowed_string):
help_string = get_permission_help_string(permission_class)
raise ValueError(
'valid values are {} or a combination thereof.'.format(help_string))
namespace.permission = permission_class(_str=namespace.permission)
return validator
def table_permission_validator(cmd, namespace):
""" A special case for table because the SDK associates the QUERY permission with 'r' """
t_table_permissions = get_table_data_type(cmd.cli_ctx, 'table', 'TablePermissions')
if namespace.permission:
if set(namespace.permission) - set('raud'):
help_string = '(r)ead/query (a)dd (u)pdate (d)elete'
raise ValueError('valid values are {} or a combination thereof.'.format(help_string))
namespace.permission = t_table_permissions(_str=namespace.permission)
def validate_container_public_access(cmd, namespace):
from .sdkutil import get_container_access_type
t_base_blob_svc = cmd.get_models('blob.baseblobservice#BaseBlobService')
if namespace.public_access:
namespace.public_access = get_container_access_type(cmd.cli_ctx, namespace.public_access.lower())
if hasattr(namespace, 'signed_identifiers'):
# must retrieve the existing ACL to simulate a patch operation because these calls
# are needlessly conflated
ns = vars(namespace)
validate_client_parameters(cmd, namespace)
account = ns.get('account_name')
key = ns.get('account_key')
cs = ns.get('connection_string')
sas = ns.get('sas_token')
client = get_storage_data_service_client(cmd.cli_ctx, t_base_blob_svc, account, key, cs, sas)
container = ns.get('container_name')
lease_id = ns.get('lease_id')
ns['signed_identifiers'] = client.get_container_acl(container, lease_id=lease_id)
def validate_select(namespace):
if namespace.select:
namespace.select = ','.join(namespace.select)
# pylint: disable=too-many-statements
def get_source_file_or_blob_service_client(cmd, namespace):
"""
Create the second file service or blob service client for batch copy command, which is used to
list the source files or blobs. If both the source account and source URI are omitted, it
indicates that user want to copy files or blobs in the same storage account, therefore the
destination client will be set None hence the command will use destination client.
"""
t_file_svc, t_block_blob_svc = cmd.get_models('file#FileService', 'blob.blockblobservice#BlockBlobService')
usage_string = 'invalid usage: supply only one of the following argument sets:' + \
'\n\t --source-uri [--source-sas]' + \
'\n\tOR --source-container' + \
'\n\tOR --source-container --source-account-name --source-account-key' + \
'\n\tOR --source-container --source-account-name --source-sas' + \
'\n\tOR --source-share --source-account-name --source-account-key' + \
'\n\tOR --source-share --source-account-name --source-account-sas'
ns = vars(namespace)
source_account = ns.pop('source_account_name', None)
source_key = ns.pop('source_account_key', None)
source_uri = ns.pop('source_uri', None)
source_sas = ns.get('source_sas', None)
source_container = ns.get('source_container', None)
source_share = ns.get('source_share', None)
if source_uri and source_account:
raise ValueError(usage_string)
if not source_uri and bool(source_container) == bool(source_share): # must be container or share
raise ValueError(usage_string)
if (not source_account) and (not source_uri):
# Set the source_client to None if neither source_account or source_uri is given. This
# indicates the command that the source files share or blob container is in the same storage
# account as the destination file share or blob container.
#
# The command itself should create the source service client since the validator can't
# access the destination client through the namespace.
#
# A few arguments check will be made as well so as not to cause ambiguity.
if source_key or source_sas:
raise ValueError('invalid usage: --source-account-name is missing; the source account is assumed to be the'
' same as the destination account. Do not provide --source-sas or --source-account-key')
ns['source_client'] = None
if 'token_credential' not in ns: # not using oauth
return
# oauth is only possible through destination, must still get source creds
source_account, source_key, source_sas = ns['account_name'], ns['account_key'], ns['sas_token']
if source_account:
if not (source_key or source_sas):
# when neither storage account key or SAS is given, try to fetch the key in the current
# subscription
source_key = _query_account_key(cmd.cli_ctx, source_account)
if source_container:
ns['source_client'] = get_storage_data_service_client(
cmd.cli_ctx, t_block_blob_svc, name=source_account, key=source_key, sas_token=source_sas)
elif source_share:
ns['source_client'] = get_storage_data_service_client(
cmd.cli_ctx, t_file_svc, name=source_account, key=source_key, sas_token=source_sas)
elif source_uri:
if source_key or source_container or source_share:
raise ValueError(usage_string)
from .storage_url_helpers import StorageResourceIdentifier
if source_sas:
source_uri = '{}{}{}'.format(source_uri, '?', source_sas.lstrip('?'))
identifier = StorageResourceIdentifier(cmd.cli_ctx.cloud, source_uri)
nor_container_or_share = not identifier.container and not identifier.share
if not identifier.is_url():
raise ValueError('incorrect usage: --source-uri expects a URI')
elif identifier.blob or identifier.directory or \
identifier.filename or nor_container_or_share:
raise ValueError('incorrect usage: --source-uri has to be blob container or file share')
if identifier.sas_token:
ns['source_sas'] = identifier.sas_token
else:
source_key = _query_account_key(cmd.cli_ctx, identifier.account_name)
if identifier.container:
ns['source_container'] = identifier.container
if identifier.account_name != ns.get('account_name'):
ns['source_client'] = get_storage_data_service_client(
cmd.cli_ctx, t_block_blob_svc, name=identifier.account_name, key=source_key,
sas_token=identifier.sas_token)
elif identifier.share:
ns['source_share'] = identifier.share
if identifier.account_name != ns.get('account_name'):
ns['source_client'] = get_storage_data_service_client(
cmd.cli_ctx, t_file_svc, name=identifier.account_name, key=source_key,
sas_token=identifier.sas_token)
def add_progress_callback(cmd, namespace):
def _update_progress(current, total):
hook = cmd.cli_ctx.get_progress_controller(det=True)
if total:
hook.add(message='Alive', value=current, total_val=total)
if total == current:
hook.end()
if not namespace.no_progress:
namespace.progress_callback = _update_progress
del namespace.no_progress
def process_container_delete_parameters(cmd, namespace):
"""Process the parameters for storage container delete command"""
# check whether to use mgmt or data-plane
if namespace.bypass_immutability_policy:
# use management-plane
namespace.processed_account_name = namespace.account_name
namespace.processed_resource_group, namespace.mgmt_client = _query_account_rg(
cmd.cli_ctx, namespace.account_name)
del namespace.auth_mode
else:
# use data-plane, like before
validate_client_parameters(cmd, namespace)
def process_blob_download_batch_parameters(cmd, namespace):
"""Process the parameters for storage blob download command"""
import os
# 1. quick check
if not os.path.exists(namespace.destination) or not os.path.isdir(namespace.destination):
raise ValueError('incorrect usage: destination must be an existing directory')
# 2. try to extract account name and container name from source string
_process_blob_batch_container_parameters(cmd, namespace)
# 3. Call validators
add_progress_callback(cmd, namespace)
def process_blob_upload_batch_parameters(cmd, namespace):
"""Process the source and destination of storage blob upload command"""
import os
# 1. quick check
if not os.path.exists(namespace.source) or not os.path.isdir(namespace.source):
raise ValueError('incorrect usage: source must be an existing directory')
# 2. try to extract account name and container name from destination string
_process_blob_batch_container_parameters(cmd, namespace, source=False)
# 3. collect the files to be uploaded
namespace.source = os.path.realpath(namespace.source)
namespace.source_files = [c for c in glob_files_locally(namespace.source, namespace.pattern)]
# 4. determine blob type
if namespace.blob_type is None:
vhd_files = [f for f in namespace.source_files if f[0].endswith('.vhd')]
if any(vhd_files) and len(vhd_files) == len(namespace.source_files):
# when all the listed files are vhd files use page
namespace.blob_type = 'page'
elif any(vhd_files):
# source files contain vhd files but not all of them
from knack.util import CLIError
raise CLIError("""Fail to guess the required blob type. Type of the files to be
uploaded are not consistent. Default blob type for .vhd files is "page", while
others are "block". You can solve this problem by either explicitly set the blob
type or ensure the pattern matches a correct set of files.""")
else:
namespace.blob_type = 'block'
# 5. call other validators
validate_metadata(namespace)
t_blob_content_settings = cmd.loader.get_sdk('blob.models#ContentSettings')
get_content_setting_validator(t_blob_content_settings, update=False)(cmd, namespace)
add_progress_callback(cmd, namespace)
def process_blob_delete_batch_parameters(cmd, namespace):
_process_blob_batch_container_parameters(cmd, namespace)
def _process_blob_batch_container_parameters(cmd, namespace, source=True):
"""Process the container parameters for storage blob batch commands before populating args from environment."""
if source:
container_arg, container_name_arg = 'source', 'source_container_name'
else:
# destination
container_arg, container_name_arg = 'destination', 'destination_container_name'
# try to extract account name and container name from source string
from .storage_url_helpers import StorageResourceIdentifier
container_arg_val = getattr(namespace, container_arg) # either a url or name
identifier = StorageResourceIdentifier(cmd.cli_ctx.cloud, container_arg_val)
if not identifier.is_url():
setattr(namespace, container_name_arg, container_arg_val)
elif identifier.blob:
raise ValueError('incorrect usage: {} should be either a container URL or name'.format(container_arg))
else:
setattr(namespace, container_name_arg, identifier.container)
if namespace.account_name is None:
namespace.account_name = identifier.account_name
elif namespace.account_name != identifier.account_name:
raise ValueError('The given storage account name is not consistent with the '
'account name in the destination URL')
# if no sas-token is given and the container url contains one, use it
if not namespace.sas_token and identifier.sas_token:
namespace.sas_token = identifier.sas_token
# Finally, grab missing storage connection parameters from environment variables
validate_client_parameters(cmd, namespace)
def process_file_upload_batch_parameters(cmd, namespace):
"""Process the parameters of storage file batch upload command"""
import os
# 1. quick check
if not os.path.exists(namespace.source):
raise ValueError('incorrect usage: source {} does not exist'.format(namespace.source))
if not os.path.isdir(namespace.source):
raise ValueError('incorrect usage: source must be a directory')
# 2. try to extract account name and container name from destination string
from .storage_url_helpers import StorageResourceIdentifier
identifier = StorageResourceIdentifier(cmd.cli_ctx.cloud, namespace.destination)
if identifier.is_url():
if identifier.filename or identifier.directory:
raise ValueError('incorrect usage: destination must be a file share url')
namespace.destination = identifier.share
if not namespace.account_name:
namespace.account_name = identifier.account_name
namespace.source = os.path.realpath(namespace.source)
def process_file_download_batch_parameters(cmd, namespace):
"""Process the parameters for storage file batch download command"""
import os
# 1. quick check
if not os.path.exists(namespace.destination) or not os.path.isdir(namespace.destination):
raise ValueError('incorrect usage: destination must be an existing directory')
# 2. try to extract account name and share name from source string
process_file_batch_source_parameters(cmd, namespace)
def process_file_batch_source_parameters(cmd, namespace):
from .storage_url_helpers import StorageResourceIdentifier
identifier = StorageResourceIdentifier(cmd.cli_ctx.cloud, namespace.source)
if identifier.is_url():
if identifier.filename or identifier.directory:
raise ValueError('incorrect usage: source should be either share URL or name')
namespace.source = identifier.share
if not namespace.account_name:
namespace.account_name = identifier.account_name
def process_file_download_namespace(namespace):
import os
get_file_path_validator()(namespace)
dest = namespace.file_path
if not dest or os.path.isdir(dest):
namespace.file_path = os.path.join(dest, namespace.file_name) \
if dest else namespace.file_name
def process_metric_update_namespace(namespace):
import argparse
namespace.hour = namespace.hour == 'true'
namespace.minute = namespace.minute == 'true'
namespace.api = namespace.api == 'true' if namespace.api else None
if namespace.hour is None and namespace.minute is None:
raise argparse.ArgumentError(
None, 'incorrect usage: must specify --hour and/or --minute')
if (namespace.hour or namespace.minute) and namespace.api is None:
raise argparse.ArgumentError(
None, 'incorrect usage: specify --api when hour or minute metrics are enabled')
def validate_subnet(cmd, namespace):
from msrestazure.tools import resource_id, is_valid_resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
subnet = namespace.subnet
subnet_is_id = is_valid_resource_id(subnet)
vnet = namespace.vnet_name
if (subnet_is_id and not vnet) or (not subnet and not vnet):
return
elif subnet and not subnet_is_id and vnet:
namespace.subnet = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet,
child_type_1='subnets',
child_name_1=subnet)
else:
from knack.util import CLIError
raise CLIError('incorrect usage: [--subnet ID | --subnet NAME --vnet-name NAME]')
def get_datetime_type(to_string):
""" Validates UTC datetime. Examples of accepted forms:
2017-12-31T01:11:59Z,2017-12-31T01:11Z or 2017-12-31T01Z or 2017-12-31 """
from datetime import datetime
def datetime_type(string):
""" Validates UTC datetime. Examples of accepted forms:
2017-12-31T01:11:59Z,2017-12-31T01:11Z or 2017-12-31T01Z or 2017-12-31 """
accepted_date_formats = ['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%MZ',
'%Y-%m-%dT%HZ', '%Y-%m-%d']
for form in accepted_date_formats:
try:
if to_string:
return datetime.strptime(string, form).strftime(form)
return datetime.strptime(string, form)
except ValueError:
continue
raise ValueError("Input '{}' not valid. Valid example: 2000-12-31T12:59:59Z".format(string))
return datetime_type
def ipv4_range_type(string):
""" Validates an IPv4 address or address range. """
import re
ip_format = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}'
if not re.match("^{}$".format(ip_format), string):
if not re.match("^{}-{}$".format(ip_format, ip_format), string):
raise ValueError
return string
def resource_type_type(loader):
""" Returns a function which validates that resource types string contains only a combination of service,
container, and object. Their shorthand representations are s, c, and o. """
def impl(string):
t_resources = loader.get_models('common.models#ResourceTypes')
if set(string) - set("sco"):
raise ValueError
return t_resources(_str=''.join(set(string)))
return impl
def services_type(loader):
""" Returns a function which validates that services string contains only a combination of blob, queue, table,
and file. Their shorthand representations are b, q, t, and f. """
def impl(string):
t_services = loader.get_models('common.models#Services')
if set(string) - set("bqtf"):
raise ValueError
return t_services(_str=''.join(set(string)))
return impl
def get_char_options_validator(types, property_name):
def _validator(namespace):
service_types = set(getattr(namespace, property_name, list()))
if not service_types:
raise ValueError('Missing options --{}.'.format(property_name.replace('_', '-')))
if service_types - set(types):
raise ValueError(
'--{}: only valid values are: {}.'.format(property_name.replace('_', '-'), ', '.join(types)))
setattr(namespace, property_name, service_types)
return _validator
def page_blob_tier_validator(cmd, namespace):
if not namespace.tier:
return
if namespace.blob_type != 'page' and namespace.tier:
raise ValueError('Blob tier is only applicable to page blobs on premium storage accounts.')
try:
namespace.tier = getattr(cmd.get_models('blob.models#PremiumPageBlobTier'), namespace.tier)
except AttributeError:
from azure.cli.command_modules.storage.sdkutil import get_blob_tier_names
raise ValueError('Unknown premium page blob tier name. Choose among {}'.format(', '.join(
get_blob_tier_names(cmd.cli_ctx, 'PremiumPageBlobTier'))))
def block_blob_tier_validator(cmd, namespace):
if not namespace.tier:
return
if namespace.blob_type != 'block' and namespace.tier:
raise ValueError('Blob tier is only applicable to block blobs on standard storage accounts.')
try:
namespace.tier = getattr(cmd.get_models('blob.models#StandardBlobTier'), namespace.tier)
except AttributeError:
from azure.cli.command_modules.storage.sdkutil import get_blob_tier_names
raise ValueError('Unknown block blob tier name. Choose among {}'.format(', '.join(
get_blob_tier_names(cmd.cli_ctx, 'StandardBlobTier'))))
def blob_tier_validator(cmd, namespace):
if namespace.blob_type == 'page':
page_blob_tier_validator(cmd, namespace)
elif namespace.blob_type == 'block':
block_blob_tier_validator(cmd, namespace)
else:
raise ValueError('Blob tier is only applicable to block or page blob.')
```
#### File: cli_linter/rules/help_rules.py
```python
from ..rule_decorators import help_file_entry_rule
from ..linter import RuleError
from ..util import LinterError
import shlex
import mock
from knack.log import get_logger
logger = get_logger(__name__)
@help_file_entry_rule
def unrecognized_help_entry_rule(linter, help_entry):
if help_entry not in linter.commands and help_entry not in linter.command_groups:
raise RuleError('Not a recognized command or command-group')
@help_file_entry_rule
def faulty_help_type_rule(linter, help_entry):
if linter.get_help_entry_type(help_entry) != 'group' and help_entry in linter.command_groups:
raise RuleError('Command-group should be of help-type `group`')
elif linter.get_help_entry_type(help_entry) != 'command' and help_entry in linter.commands:
raise RuleError('Command should be of help-type `command`')
@help_file_entry_rule
def unrecognized_help_parameter_rule(linter, help_entry):
if help_entry not in linter.commands:
return
param_help_names = linter.get_help_entry_parameter_names(help_entry)
violations = []
for param_help_name in param_help_names:
if not linter.is_valid_parameter_help_name(help_entry, param_help_name):
violations.append(param_help_name)
if violations:
raise RuleError('The following parameter help names are invalid: {}'.format(' | '.join(violations)))
@help_file_entry_rule
def faulty_help_example_rule(linter, help_entry):
violations = []
for index, example in enumerate(linter.get_help_entry_examples(help_entry)):
if 'az '+ help_entry not in example.get('text', ''):
violations.append(str(index))
if violations:
raise RuleError('The following example entry indices do not include the command: {}'.format(
' | '.join(violations)))
@help_file_entry_rule
def faulty_help_example_parameters_rule(linter, help_entry):
parser = linter.command_parser
violations = []
for example in linter.get_help_entry_examples(help_entry):
max_profile = example.get('max_profile')
if max_profile and max_profile != 'latest':
logger.warning("\n\tSKIPPING example: {}\n\tas its max profile is {}, instead of latest.".format(example['text'], example['max_profile']))
continue
example_text = example.get('text','')
commands = _extract_commands_from_example(example_text)
while commands:
command = commands.pop()
violation, nested_commands = _lint_example_command(command, parser)
commands.extend(nested_commands) # append commands that are the source of any arguments
if violation:
violations.append(violation)
if violations:
num_err = len(violations)
violation_str = "\n\n".join(violations)
violation_msg = "\n\tThere is a violation:\n{}.".format(violation_str) if num_err == 1 else \
"\n\tThere are {} violations:\n{}".format(num_err, violation_str)
raise RuleError(violation_msg + "\n\n")
### Faulty help example parameters rule helpers
@mock.patch("azure.cli.core.parser.AzCliCommandParser._check_value")
@mock.patch("argparse.ArgumentParser._get_value")
@mock.patch("azure.cli.core.parser.AzCliCommandParser.error")
def _lint_example_command(command, parser, mocked_error_method, mocked_get_value, mocked_check_value):
def get_value_side_effect(action, arg_string):
return arg_string
mocked_error_method.side_effect = LinterError # mock call of parser.error so usage won't be printed.
mocked_get_value.side_effect = get_value_side_effect
violation = None
nested_commands = []
try:
command_args = shlex.split(command)[1:]
command_args, nested_commands = _process_command_args(command_args)
parser.parse_args(command_args)
except ValueError as e: # handle exception thrown by shlex.
if str(e) == "No closing quotation":
violation = '\t"{}"\n\thas no closing quotation. Tip: to continue an example ' \
'command on the next line, use a "\\" followed by a newline.\n\t' \
'If needed, you can escape the "\\", like so "\\\\"'.format(command)
else:
raise e
except LinterError: # handle parsing failure due to invalid option
violation = '\t"{}" is not a valid command'.format(command)
if mocked_error_method.called:
call_args = mocked_error_method.call_args
violation = "{}.\n\t{}".format(violation, call_args[0][0])
return violation, nested_commands
# return list of commands in the example text
def _extract_commands_from_example(example_text):
# fold commands spanning multiple lines into one line. Split commands that use pipes
example_text = example_text.replace("\\\n", " ")
example_text = example_text.replace("\\ ", " ")
example_text = example_text.replace(" | ", "\n")
commands = example_text.splitlines()
processed_commands = []
for command in commands: # filter out commands
if command.startswith("az"):
processed_commands.append(command)
elif "az " in command: # some commands start with "$(az ..." and even "`az in one case"
idx = command.find("az ")
command = command[idx:]
processed_commands.append(command)
return processed_commands
def _process_command_args(command_args):
result_args = []
new_commands = []
unwanted_chars = "$()`"
control_operators = ["&&","||"]
for arg in command_args: # strip unnecessary punctuation, otherwise arg validation could fail.
if arg in control_operators: # handle cases where multiple commands are connected by control operators.
idx = command_args.index(arg)
maybe_new_command = " ".join(command_args[idx:])
idx = maybe_new_command.find("az ")
if idx != -1:
new_commands.append(maybe_new_command[idx:]) # remaining command is in fact a new command / commands.
break
arg = arg.strip(unwanted_chars)
if arg.startswith("az "): # store any new commands
new_commands.append(arg)
result_args.append(arg)
return result_args, new_commands
```
|
{
"source": "jfcrenshaw/GausSED",
"score": 3
}
|
#### File: GausSED/gaussed/bandpasses.py
```python
import numpy as np
from numbers import Number
import os
class Bandpass:
"""
Class to handle instrument filter bandpass.
Parameters
----------
filename : str
Location and name of file where bandpass is stored.
Must be a file with two columns: wavelength and bandpass throughput.
name : str
Name of bandpass
dlambda : float
Width of wavelength grid on which bandpass will be sampled.
Default = 10
Attributes
----------
wavelen : numpy.ndarray
Bandpass wavelength grid.
Range determined from the wavelengths in the provided file.
Resampled on wavelength grid with width dlambda.
T : numpy.ndarray
Bandpass throughput from the provided file.
R : numpy.ndarray
Bandpass normalized response function, defined
:math:`R = \lambda T(\lambda) / \int(\lambda T(\lambda) d\lambda)`
mean_wavelen: float
Mean wavelength of the bandpass, defined
:math:`\lambda_{\mathrm{mean}} = \int \lambda R(\lambda) d\lambda`
eff_width: float
Effective width of the bandpass, defined
:math:`W_{\mathrm{eff}} = \max{[R(\lambda)]}^{-1}`
"""
def __init__(self, filename: str, name: str = None, dlambda: float = 10):
# validate inputs
if not isinstance(filename, str):
raise ValueError("filename should be a string")
if not os.path.isfile(filename):
raise OSError(f"Cannot find bandpass file '{filename}'")
if name is not None and not isinstance(name, str):
raise ValueError("name should be a string")
if not isinstance(dlambda, Number):
raise ValueError("dlambda should be a number")
elif dlambda <= 0:
raise ValueError("dlambda must be a positive number")
# load system response from file
wavelen, T = np.loadtxt(filename, unpack=True)
# resample wavelen and calculate R
self.wavelen = np.arange(min(wavelen), max(wavelen) + dlambda, dlambda)
self.T = np.interp(self.wavelen, wavelen, T, left=0, right=0)
self.R = self.T * self.wavelen
self.R /= (self.R * dlambda).sum()
del wavelen, T
# calculate mean wavelength and effective width
self.mean_wavelen = (self.wavelen * self.R * dlambda).sum()
self.eff_width = (self.R * dlambda).sum() / max(self.R)
# set the name
name = filename.split("/")[-1].split(".")[0] if name is None else name
self.name = name
def flux(self, seds) -> np.ndarray:
"""
Return flux through the bandpass for the provided sed(s).
Parameters
----------
seds : SED or iterable of SEDs
Returns
-------
np.ndarray
Array of fluxes.
If only a single SED is given, array is squeezed.
"""
raise NotImplementedError
def __repr__(self):
return f"Bandpass({self.name})"
```
#### File: GausSED/tests/test_bandpasses.py
```python
import os
import numpy as np
import pytest
from gaussed.bandpasses import Bandpass
filename = os.path.join(os.getcwd(), "tests/test_files/lsst-r.dat")
@pytest.mark.parametrize(
"filename,name,dlambda,exception",
[
(1, "N", 1, ValueError("filename should be a string")),
("filename", "N", 1, OSError("Cannot find bandpass file 'filename'")),
(filename, 1, 1, ValueError("name should be a string")),
(filename, "N", "a", ValueError("dlambda should be a number")),
(filename, "N", -2, ValueError("dlambda must be a positive number")),
],
)
def test_bandpass_bad_inputs(filename, name, dlambda, exception):
try:
Bandpass(filename, name=name, dlambda=dlambda)
except (ValueError, OSError) as err:
pass
assert isinstance(err, type(exception))
assert err.args == exception.args
else:
pytest.fail("Expected error but found none")
def test_bandpass_creation():
# check auto-naming
bandpass = Bandpass(filename)
assert bandpass.name == "lsst-r"
# check specified name and dlambda
name = "Name"
dlambda = 1
bandpass = Bandpass(filename, dlambda=dlambda, name=name)
assert bandpass.name == name
diff = np.diff(bandpass.wavelen)
assert all([d == dlambda for d in diff])
# check that T and R are sensible
assert isinstance(bandpass.T, np.ndarray)
assert isinstance(bandpass.R, np.ndarray)
Rsum = (bandpass.R * np.diff(bandpass.wavelen)[0]).sum()
assert np.isclose(Rsum, 1)
# check mean wavelen and eff width against known values
r_mean_wavelen = 6257.74
assert np.isclose(bandpass.mean_wavelen, r_mean_wavelen)
r_eff_width = 1206.92
assert np.isclose(bandpass.eff_width, r_eff_width)
assert bandpass.__repr__() == "Bandpass(Name)"
```
|
{
"source": "jfcrenshaw/LGSM",
"score": 3
}
|
#### File: LGSM/lgsm/losses.py
```python
import elegy
import jax.numpy as jnp
import numpy as np
from jax import random
class KLDiv(elegy.Loss):
"""Kullback-Leibler Divergence for a Gaussian distribution over latent variables."""
def __init__(self, alpha: float = 0):
super().__init__()
self.alpha = alpha
def call(self, y_pred: dict) -> np.ndarray:
logvar = 2 * jnp.log(y_pred["latent_std"])
mean = y_pred["latent_mean"]
return (
-(1 - self.alpha)
/ 2
* jnp.mean((1 + logvar) - mean ** 2 - jnp.exp(logvar), axis=-1)
)
class MMD(elegy.Loss):
"""Maximum-Mean Discrepancy, calculated using the method found in this tutorial
https://ermongroup.github.io/blog/a-tutorial-on-mmd-variational-autoencoders/
"""
def __init__(self, alpha: float = 0, beta: float = 1e3, nsamples: int = 200):
super().__init__()
self.alpha = alpha
self.beta = beta
self.nsamples = nsamples
@staticmethod
def kernel(x: np.ndarray, y: np.ndarray) -> np.ndarray:
"""
Computes mean of the Gaussian kernel matrix
"""
# set the hyperparam to dim / 2
dim = x.shape[1]
sigma_sq = float(dim / 2)
# compute the matrix of square distances
sq_dist_matrix = jnp.sum((x[:, None, ...] - y[None, :, ...]) ** 2, axis=-1)
# compute and return mean of the kernel matrix
return jnp.exp(-sq_dist_matrix / (2 * sigma_sq)).mean()
def call(self, y_pred: dict) -> np.ndarray:
# get the latent samples from the training set
train_latent_samples = y_pred["intrinsic_latents"]
# generate latent samples from the true latent distribution
PRNGKey = random.PRNGKey(train_latent_samples[0, 0].astype(int))
true_latent_samples = random.normal(
PRNGKey, shape=[self.nsamples, train_latent_samples.shape[1]]
)
# calculate MMD
E_xx = self.kernel(train_latent_samples, train_latent_samples)
E_yy = self.kernel(true_latent_samples, true_latent_samples)
E_xy = self.kernel(train_latent_samples, true_latent_samples)
mmd = E_xx + E_yy - 2 * E_xy
return (self.alpha + self.beta - 1) * mmd
class PhotometryMSE(elegy.losses.MeanSquaredError):
"""Mean Square Error for photometry.
Note I added a factor of 0.5 for the VAE definition, and 1/0.05^2 to hardcode
an error of 0.05 mags for all photometry. Will change this later.
"""
def call(self, x: np.ndarray, y_pred: dict) -> np.ndarray:
return (
0.5
/ 0.05 ** 2
* super().call(y_true=x[:, 1:], y_pred=y_pred["predicted_photometry"])
)
class ColorMSE(elegy.Loss):
"""Mean Square Error for the colors.
Note I added a factor of 0.5 for the VAE definition, and I hardcoded
an error of 0.05 mags for all photometry.
"""
def __init__(self, ref_idx: int):
super().__init__()
self.ref_idx = ref_idx # index of the reference band
def call(self, x: np.ndarray, y_pred: dict) -> np.ndarray:
mag_SE = (
1
/ 0.05 ** 2
* (
x[..., self.ref_idx + 1]
- y_pred["predicted_photometry"][..., self.ref_idx]
)
** 2
)
color_SE = (
1
/ (2 * 0.05 ** 2)
* (
jnp.diff(x[:, 1:], axis=-1)
- jnp.diff(y_pred["predicted_photometry"], axis=-1)
)
** 2
).sum(axis=-1)
MSE = mag_SE + color_SE / y_pred["predicted_photometry"].shape[-1]
return 0.5 * MSE
class SlopeLoss(elegy.Loss):
"""Penalty on differences in neighboring bins.
I also hardcoded an error of 0.05 mags here
"""
def __init__(self, eta: float):
super().__init__()
self.eta = eta
def call(self, y_pred: dict) -> np.ndarray:
return (
1
/ 0.05 ** 2
* self.eta
* jnp.mean(jnp.diff(y_pred["sed_mag"], axis=-1) ** 2)
)
class SpectralLoss(elegy.Loss):
"""Calculates loss w.r.t. latent SEDs, assuming a fraction of true
SEDs are known exactly.
"""
def __init__(self, eta: float, frac: int):
super().__init__()
self.eta = eta
self.frac = frac
def call(self, y_true: np.ndarray, y_pred: dict) -> np.ndarray:
N = int(self.frac * y_pred["amplitude"].shape[0])
assert N > 0, "With this batch size, the given frac results in no spectra."
true_sed = y_true[:N]
pred_sed = y_pred["amplitude"][:N] + y_pred["sed_mag"][:N]
return self.eta * jnp.mean((true_sed - pred_sed) ** 2)
```
#### File: LGSM/lgsm/sed_utils.py
```python
import jax.numpy as jnp
import numpy as np
def mag_to_fnu(
mag: np.ndarray, wave: None = None # pylint: disable=unused-argument
) -> np.ndarray:
"""Convert AB magnitudes to F_nu in erg/s/Hz/cm^2.
In the AB magnitude system, an object with flux density F_nu = 3631 Jansky
has a magnitude of zero in all bands (note: 1 Jansky = 1e-23 erg/s/Hz/cm^2).
Thus, to convert from AB magnitude to F_nu, we do the following:
F_nu = (3631e-23 erg/s/Hz/cm^2) * 10^(m_AB / -2.5).
"""
fnu = 3631e-23 * 10 ** (mag / -2.5)
return fnu
def fnu_to_flambda(fnu: np.ndarray, wave: np.ndarray) -> np.ndarray:
"""Convert F_nu in erg/s/Hz/cm^2 to F_lambda in erg/s/AA/cm^2.
To convert from F_nu to F_lambda, we use the formula
F_lambda = c / lambda^2 * F_nu,
resulting in an SED with units of erg/s/cm^2/AA.
"""
flambda = 2.998e18 / wave ** 2 * fnu
return flambda
def mag_to_flambda(mag: np.ndarray, wave: np.ndarray) -> np.ndarray:
"""Convert AB magnitudes to F_lambda in erg/s/AA/cm^2.
In the AB magnitude system, an object with flux density F_nu = 3631 Jansky
has a magnitude of zero in all bands (note: 1 Jansky = 1e-23 erg/s/Hz/cm^2).
Thus, to convert from AB magnitude to F_nu, we do the following:
F_nu = (3631e-23 erg/s/Hz/cm^2) * 10^(m_AB / -2.5).
To convert from F_nu to F_lambda, we use the formula
F_lambda = c / lambda^2 * F_nu,
resulting in an SED with units of erg/s/cm^2/AA.
"""
fnu = mag_to_fnu(mag)
flambda = fnu_to_flambda(fnu, wave)
return flambda
def flambda_to_fnu(flambda: np.ndarray, wave: np.ndarray) -> np.ndarray:
"""Convert F_lambda in erg/s/AA/cm^2 to F_nu in erg/s/Hz/cm^2.
To convert from F_lambda to F_nu, we use the formula
F_nu = lambda^2 / c * F_lambda,
resulting in an SED with units of erg/s/Hz/cm^2.
"""
fnu = wave ** 2 / 2.998e18 * flambda
return fnu
def fnu_to_mag(
fnu: np.ndarray, wave: None = None # pylint: disable=unused-argument
) -> np.ndarray:
"""Convert F_nu in erg/s/Hz/cm^2 to AB magnitudes.
In the AB magnitude system, an object with flux density F_nu = 3631 Jansky
has a magnitude of zero in all bands (note: 1 Jansky = 1e-23 erg/s/Hz/cm^2).
Thus, to convert from F_nu to AB magnitude, we do the following:
m_AB = -2.5 * log10(F_nu / 3631e-23 erg/s/Hz/cm^2).
"""
mag = -2.5 * jnp.log10(fnu / 3631e-23)
return mag
def flambda_to_mag(flambda: np.ndarray, wave: np.ndarray) -> np.ndarray:
"""Convert F_lambda in erg/s/AA/cm^2 to AB magnitudes.
To convert from F_lambda to F_nu, we use the formula
F_nu = lambda^2 / c * F_lambda,
resulting in an SED with units of erg/s/Hz/cm^2.
In the AB magnitude system, an object with flux density F_nu = 3631 Jansky
has a magnitude of zero in all bands (note: 1 Jansky = 1e-23 erg/s/Hz/cm^2).
Thus, to convert from F_nu to AB magnitude, we do the following:
m_AB = -2.5 * log10(F_nu / 3631e-23 erg/s/Hz/cm^2).
"""
fnu = flambda_to_fnu(flambda, wave)
mag = fnu_to_mag(fnu)
return mag
def wave_to_freq(wave: np.ndarray) -> np.ndarray:
"""Convert wavelength in Angstroms (AA) to frequency in Hertz (Hz)."""
freq = 2.998e18 / wave
return freq
def freq_to_wave(freq: np.ndarray) -> np.ndarray:
"""Convert frequency in Hertz (Hz) to wavelength in Angstroms (AA)."""
wave = 2.998e18 / freq
return wave
def setup_wave_grid(wave_min: float, wave_max: float, wave_bins: int) -> np.ndarray:
"""Setup an evenly spaced wavelength grid."""
dwave = dwave = (wave_max - wave_min) / (wave_bins - 1)
wave_grid = jnp.arange(wave_min, wave_max + dwave, dwave)
return wave_grid
```
|
{
"source": "jfcrenshaw/ml-aos",
"score": 3
}
|
#### File: ml-aos/ml_aos/david_net.py
```python
import numpy as np
import torch
from torch import nn
class DavidNet(nn.Module):
"""Network to predict wavefront Zernike coefficients from donut images.
Consists of a DonutNet that creates image features from the donut image.
These are concatenated with a set of meta parameters (usually the donut's
location on the focal plane), which is then passed to the MetaNet, which
predicts a set of Zernike coefficients.
"""
def __init__(self, n_meta_layers: int) -> None:
"""Create a WaveNet to predict Zernike coefficients for donut images.
Parameters
----------
n_meta_layers: int
Number of fully connected layers in the MetaNet.
"""
super().__init__()
self.donut_net = DonutNet()
self.meta_net = MetaNet(n_meta_layers)
def forward(
self,
image: torch.Tensor,
fx: torch.Tensor,
fy: torch.Tensor,
intra: torch.Tensor,
) -> torch.Tensor:
"""Predict Zernike coefficients for the donut image.
Parameters
----------
image: torch.Tensor
The donut image
fx: torch.Tensor
The x angle of the source with respect to the optic axis
fy: torch.Tensor
The y angle of the source with respect to the optic axis
intra: torch.Tensor
Boolean indicating whether the donut is intra or extra focal
Returns
-------
torch.Tensor
Array of Zernike coefficients
"""
image_features = self.donut_net(image)
features = torch.cat([image_features, fx, fy, intra], axis=1)
return self.meta_net(features)
class DonutNet(nn.Module):
"""Network encodes donut image as latent_dim dimensional latent vector.
Takes batches of 1x256x256 donut images as input and produces a
(1 x 1024) dimensional representation.
"""
def __init__(self) -> None:
"""Create the donut encoder network."""
super().__init__()
# first apply a convolution that maintains the image dimensions
# but increases the channels from 1 to 8
self.layers = nn.ModuleList(
[
nn.Conv2d(1, 8, 3, stride=1, padding=1),
nn.BatchNorm2d(8),
nn.ReLU(inplace=True),
]
)
# now apply a series of DownBlocks that increases the number of
# channels by a factor of 2, while decreasing height and width
# by a factor of 2.
for i in range(7):
in_channels = 2 ** (i + 3)
out_channels = 2 ** (i + 3 + 1)
self.layers.append(DownBlock(in_channels, out_channels))
# a final down block that doesn't increase the number of channels
self.layers.append(DownBlock(2 ** 10, 2 ** 10))
# Finally, flatten the output
self.layers.append(nn.Flatten())
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Return latent space encoding of the donut image.
Parameters
----------
x: torch.Tensor
Input images of shape (batch x 256 x 256)
Returns
-------
torch.Tensor
Latent space encoding of shape (batch x 1024)
"""
for layer in self.layers:
x = layer(x)
return x
class DownBlock(nn.Module):
"""Convolutional block that decreases height and width by factor of 2.
Consists of a convolutional residual/skip layer, followed by a regular
convolutional layer that decreases the dimensions by a factor of 2.
"""
def __init__(self, in_channels: int, out_channels: int) -> None:
"""Create a downblock that reduces image dimensions.
Parameters
----------
in_channels: int
The number of input channels
out_channels: int
The number of output channels
"""
super().__init__()
# create the list of layers
self.layers = nn.ModuleList(
[
# residual layer with convolution that preserves dimensions
SkipBlock(in_channels),
# this convolution decreases height and width by factor of 2
nn.Conv2d(in_channels, out_channels, 3, stride=2, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
]
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Return a convolved image with half the height and weight.
Parameters
----------
x: torch.Tensor
Input image of shape (batch x in_channels x height x width)
Returns
-------
torch.Tensor
Output image of shape (batch x out_channels x height/2 x width/2)
"""
for layer in self.layers:
x = layer(x)
return x
class SkipBlock(nn.Module):
"""Convolutional layer with a residual/skip connection."""
def __init__(self, channels: int) -> None:
"""Create a convolution layer with a skip connection.
Parameters
----------
channels: int
The number of input and output channels for the convolution.
"""
super().__init__()
# layers to compute dx
self.layers = nn.Sequential(
nn.Conv2d(channels, channels, 3, stride=1, padding="same"),
nn.BatchNorm2d(channels),
nn.ReLU(inplace=True),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Convolve image and add to original via the skip connection.
Parameters
----------
x: torch.Tensor
Input image of shape (batch x channels x height x width)
Returns
-------
torch.Tensor
Output image of shape (batch x channels x height x width)
"""
dx = self.layers(x)
return x + dx
class MetaNet(nn.Module):
"""Network that maps image features and meta parameters onto Zernikes.
Consists of several fully connected layers.
"""
# number of Zernike coefficients to predict
N_ZERNIKES = 18
# number of meta parameters to use in prediction
N_METAS = 3
# the dimenson of the image features. This is determined by looking
# at the dimension of outputs from DonutNet
IMAGE_DIM = 1024
def __init__(self, n_layers: int) -> None:
"""Create a MetaNet to map image features and meta params to Zernikes.
Parameters
----------
n_layers: int
The number of layers in the MetaNet.
"""
super().__init__()
# set number of nodes in network layers using a geometric series
n_nodes = np.geomspace(
self.IMAGE_DIM + self.N_METAS,
self.N_ZERNIKES,
n_layers + 1,
dtype=int,
)
# create the hidden layers, which all have BatchNorm and ReLU
self.layers = nn.ModuleList()
for i in range(n_layers - 1):
self.layers.extend(
[
nn.Linear(n_nodes[i], n_nodes[i + 1]),
nn.BatchNorm1d(n_nodes[i + 1]),
nn.ReLU(inplace=True),
]
)
# we will add dropout to the first layer for regularization
if i == 0:
self.layers.append(nn.Dropout(0.1))
# create the output layer, which doesn't have BatchNorm or ReLU
self.layers.append(nn.Linear(n_nodes[-2], n_nodes[-1]))
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Map image features and meta parameters onto Zernikes.
Parameters
----------
x: torch.Tensor
Input vector of image features and meta parameters.
Returns
-------
torch.Tensor
Array of Zernike coefficients. Size = cls.N_ZERNIKES
"""
for layer in self.layers:
x = layer(x)
return x
```
#### File: ml-aos/ml_aos/lightning.py
```python
from typing import Dict, Tuple
import matplotlib.pyplot as plt
import pytorch_lightning as pl
import torch
import wandb
from torch.utils.data import DataLoader
from ml_aos.dataloader import Donuts
from ml_aos.david_net import DavidNet as TorchDavidNet
class DonutLoader(pl.LightningDataModule):
"""Pytorch Lightning wrapper for the simulated Donuts DataSet."""
def __init__(
self,
background: bool = True,
badpix: bool = True,
dither: int = 5,
max_blend: float = 0.50,
center_brightest: bool = True,
normalize_pixels: bool = True,
convert_zernikes: bool = True,
mask_buffer: int = 0,
nval: int = 2 ** 16,
ntest: int = 2 ** 16,
split_seed: int = 0,
batch_size: int = 64,
num_workers: int = 16,
persistent_workers: bool = True,
pin_memory: bool = True,
data_dir: str = "/epyc/users/jfc20/thomas_aos_sims/",
) -> None:
"""Load the simulated Donuts data.
Parameters
----------
mode: str, default="train"
Which set to load. Options are train, val (i.e. validation),
or test.
background: bool, default=True
Whether to add the sky background to the donut images.
badpix: bool, default=True
Whether to simulate bad pixels and columns.
dither: int, default=5
Maximum number of pixels to dither in both directions.
This simulates mis-centering.
max_blend: float, default=0.50
Maximum fraction of the central star to be blended. For images
with many blends, only the first handful of stars will be drawn,
stopping when the next star would pass this blend threshold.
center_brightest: bool, default=True
Whether to center the brightest star in blended images.
normalize_pixels: bool, default=True
Whether to normalize the pixel values using the mean and std
of the single-donut pixels.
convert_zernikes: bool, default=True
Whether to convert Zernike coefficients from units of r band
wavelength to quadrature contribution to PSF FWHM.
mask_buffer: int, default=0
The number of buffer pixels to add to outside of masks.
nval: int, default=256
Number of donuts in the validation set.
ntest: int, default=2048
Number of donuts in the test set
split_seed: int, default=0
Random seed for training set/test set/validation set selection.
batch_size: int, default=64
The batch size for SGD.
num_workers: int, default=16
The number of workers for parallel loading of batches.
persistent_workers: bool, default=True
Whether to shutdown worker processes after dataset is consumed once
pin_memory: bool, default=True
Whether to automatically put data in pinned memory (recommended
whenever using a GPU).
data_dir: str, default=/epyc/users/jfc20/thomas_aos_sims/
Location of the data directory. The default location is where
I stored the simulations on epyc.
"""
super().__init__()
self.save_hyperparameters()
def _build_loader(self, mode: str, shuffle: bool = False) -> DataLoader:
return DataLoader(
Donuts(
mode=mode,
background=self.hparams.background,
badpix=self.hparams.badpix,
dither=self.hparams.dither,
max_blend=self.hparams.max_blend,
center_brightest=self.hparams.center_brightest,
normalize_pixels=self.hparams.normalize_pixels,
convert_zernikes=self.hparams.convert_zernikes,
mask_buffer=self.hparams.mask_buffer,
nval=self.hparams.nval,
ntest=self.hparams.ntest,
split_seed=self.hparams.split_seed,
data_dir=self.hparams.data_dir,
),
batch_size=self.hparams.batch_size,
num_workers=self.hparams.num_workers,
persistent_workers=self.hparams.persistent_workers,
pin_memory=self.hparams.pin_memory,
shuffle=shuffle,
)
def train_dataloader(self) -> DataLoader:
"""Return the training DataLoader."""
return self._build_loader("train", shuffle=True)
def val_dataloader(self) -> DataLoader:
"""Return the validation DataLoader."""
return self._build_loader("val")
def test_dataloader(self) -> DataLoader:
"""Return the testing DataLoader."""
return self._build_loader("test")
class DavidNet(TorchDavidNet, pl.LightningModule):
"""Pytorch Lightning wrapper for training DavidNet."""
def __init__(self, n_meta_layers: int = 3) -> None:
"""Create the DavidNet.
Parameters
----------
n_meta_layers: int, default=3
Number of layers in the MetaNet inside the DavidNet. These
are the linear layers that map image features plus field
position to Zernike coefficients.
"""
# set up the DavidNet implemented in torch,
# as well as the LightningModule boilerplate
super().__init__(n_meta_layers=n_meta_layers)
# save the hyperparams in the log
self.save_hyperparameters()
def _predict(
self, batch: Dict[str, torch.Tensor]
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Make predictions for a batch of donuts."""
# unpack the data
img = batch["image"]
z_true = batch["zernikes"]
fx = batch["field_x"]
fy = batch["field_y"]
intra = batch["intrafocal"]
# predict the zernikes
z_pred = self(img, fx, fy, intra)
return z_pred, z_true
def training_step(
self, batch: Dict[str, torch.Tensor], batch_idx: int
) -> torch.Tensor:
"""Calculate the loss of the training step."""
# calculate the MSE for the batch
z_pred, z_true = self._predict(batch)
mse = calc_mse(z_pred, z_true)
# log the mean rmse
self.log("train_rmse", torch.sqrt(mse).mean())
# loss = mean mse
loss = mse.mean()
self.log("train_loss", loss)
return loss
def validation_step(
self, batch: Dict[str, torch.Tensor], batch_idx: int
) -> Tuple[torch.Tensor, ...]:
"""Perform validation step."""
# calculate the MSE for the validation sample
z_pred, z_true = self._predict(batch)
mse = calc_mse(z_pred, z_true)
# log the mean rmse
self.log("val_rmse", torch.sqrt(mse).mean())
# log the loss
self.log("val_loss", mse.mean())
# for the first batch of the validation set, plot the Zernikes
if batch_idx == 0 and wandb.run is not None:
# draw the Zernike figure and convert to wandb image for logging
fig = wandb.Image(plot_zernikes(z_true.cpu(), z_pred.cpu()))
# log the image
wandb.log(
{"zernikes": fig, "global_step": self.trainer.global_step}
)
del fig
# calculate distance from the center of focal plane in meters
x = batch["focal_x"]
y = batch["focal_y"]
dist_rads = torch.sqrt(x ** 2 + y ** 2) # distance in radians
dist_arcsecs = dist_rads * 206_265 # distance in arcsecs
dist_microns = dist_arcsecs * 5 # distance in microns
dist_meters = dist_microns / 1e6
# get the fraction blended
frac_blended = batch["fraction_blended"]
val_outputs = torch.hstack((dist_meters, frac_blended, mse))
return val_outputs
def validation_epoch_end(self, val_outputs: torch.Tensor) -> None:
"""Compute metrics for the whole validation epoch."""
# extract the validation outputs
val_outputs = torch.stack(val_outputs).reshape(-1, 3)
frac_blended = val_outputs[:, 1]
mse = val_outputs[:, 2]
# compute the validation loss for the unblended stars
unblended_idx = torch.where(frac_blended < 0.01)
self.log("val_rmse_unblended", torch.sqrt(mse[unblended_idx]).mean())
self.log("val_loss_unblended", mse[unblended_idx].mean())
# compute the validation loss for the blended stars
blended_idx = torch.where(frac_blended >= 0.01)
self.log("val_rmse_blended", torch.sqrt(mse[blended_idx]).mean())
self.log("val_loss_blended", mse[blended_idx].mean())
def calc_mse(pred: torch.Tensor, true: torch.Tensor) -> torch.Tensor:
"""Calculate the MSE for the predicted values.
Parameters
----------
pred: torch.Tensor
Array of predicted values
true: torch.Tensor
Array of true values
Returns
-------
torch.Tensor
Array of MSE values
"""
return torch.mean((pred - true) ** 2, axis=1, keepdim=True)
def plot_zernikes(z_true: torch.Tensor, z_pred: torch.Tensor) -> plt.Figure:
"""Plot true and predicted zernikes (up to 8).
Parameters
----------
z_true: torch.Tensor
2D Array of true Zernike coefficients
z_pred: torch.Tensor
2D Array of predicted Zernike coefficients
Returns
-------
plt.Figure
Figure containing the 8 axes with the true and predicted Zernike
coefficients plotted together.
"""
# create the figure
fig, axes = plt.subplots(
2,
4,
figsize=(12, 5),
constrained_layout=True,
dpi=150,
sharex=True,
sharey=True,
)
# loop through the axes/zernikes
for ax, zt, zp in zip(axes.flatten(), z_true, z_pred):
ax.plot(zt, label="True")
ax.plot(zp, label="Predicted")
axes[0, 0].set(xticks=[]) # remove x ticks
axes[0, 0].legend() # add legend to first panel
# set axis labels
for ax in axes[:, 0]:
ax.set_ylabel("Arcsec FWHM")
for ax in axes[1, :]:
ax.set_xlabel("Zernike number (Noll)")
return fig
def plot_loss_vs_blended(
frac_blended: torch.Tensor, val_loss: torch.Tensor
) -> plt.Figure:
"""Plot validation loss vs the fraction blended
Parameters
----------
frac_blended: torch.Tensor
Array of blend fraction.
val_loss: torch.Tensor
Array of validation losses
Returns
-------
plt.Figure
Figure containing plot of validation loss vs fraction blended
"""
fig, ax = plt.subplots(constrained_layout=True, dpi=150)
ax.scatter(frac_blended[:100], val_loss[:100], marker=".", rasterized=True)
ax.set(xlabel="Fraction blended", ylabel="Validation loss [arcsec FWHM]")
return fig
def plot_loss_vs_distance(
distance: torch.Tensor, val_loss: torch.Tensor
) -> plt.Figure:
"""Plot validation loss vs the distance from the center of the focal
plane, in meters.
Parameters
----------
distance: torch.Tensor
Array of distances from the center of the focal plane, in meters
val_loss: torch.Tensor
Array of validation losses
Returns
-------
plt.Figure
Figure containing plot of validation loss vs distance from center
of the focal plane
"""
fig, ax = plt.subplots(constrained_layout=True, dpi=150)
ax.scatter(distance, val_loss, marker=".")
ax.set(
xlabel="Dist. from center of focal plane [m]",
ylabel="Validation loss [arcsec FWHM]",
)
return fig
```
|
{
"source": "jfcrenshaw/python-package",
"score": 2
}
|
#### File: python-package/package/module.py
```python
def f(x): return 2*x
```
|
{
"source": "jfcrenshaw/pzflow",
"score": 2
}
|
#### File: pzflow/pzflow/flow.py
```python
import itertools
from typing import Any, Callable, Sequence, Tuple
import dill as pickle
import jax.numpy as np
import numpy as onp
import pandas as pd
from jax import grad, jit, ops, random
from jax.experimental.optimizers import Optimizer, adam
from pzflow import distributions
from pzflow.bijectors import Bijector_Info, InitFunction, Pytree
from pzflow.utils import build_bijector_from_info, gaussian_error_model
class Flow:
"""A normalizing flow that models tabular data.
Attributes
----------
data_columns : tuple
List of DataFrame columns that the flow expects/produces.
conditional_columns : tuple
List of DataFrame columns on which the flow is conditioned.
info : Any
Object containing any kind of info included with the flow.
Often describes the data the flow is trained on.
latent
The latent distribution of the normalizing flow.
Has it's own sample and log_prob methods.
"""
def __init__(
self,
data_columns: Sequence[str] = None,
bijector: Tuple[InitFunction, Bijector_Info] = None,
conditional_columns: Sequence[str] = None,
latent=None,
data_error_model: Callable = None,
condition_error_model: Callable = None,
autoscale_conditions: bool = True,
seed: int = 0,
info: Any = None,
file: str = None,
_dictionary: dict = None,
):
"""Instantiate a normalizing flow.
Note that while all of the init parameters are technically optional,
you must provide either data_columns and bijector OR file.
In addition, if a file is provided, all other parameters must be None.
Parameters
----------
data_columns : Sequence[str], optional
Tuple, list, or other container of column names.
These are the columns the flow expects/produces in DataFrames.
bijector : Bijector Call, optional
A Bijector call that consists of the bijector InitFunction that
initializes the bijector and the tuple of Bijector Info.
Can be the output of any Bijector, e.g. Reverse(), Chain(...), etc.
conditional_columns : Sequence[str], optional
Names of columns on which to condition the normalizing flow.
latent : distribution, optional
The latent distribution for the normalizing flow. Can be any of
the distributions from pzflow.distributions. If not provided,
a normal distribution is used with the number of dimensions
inferred.
data_error_model : Callable, optional
A callable that defines the error model for data variables.
data_error_model must take key, X, Xerr, nsamples as arguments where:
key is a jax rng key, e.g. jax.random.PRNGKey(0)
X is a 2 dimensional array of data variables, where the order
of variables matches the order of the columns in data_columns
Xerr is the corresponding 2 dimensional array of errors
nsamples is the number of samples to draw from the error distribution
data_error_model must return an array of samples with the shape
(X.shape[0], nsamples, X.shape[1]).
If data_error_model is not provided, a Gaussian error model is assumed.
condition_error_model : Callable, optional
A callable that defines the error model for conditional variables.
condition_error_model must take key, X, Xerr, nsamples as arguments where:
key is a jax rng key, e.g. jax.random.PRNGKey(0)
X is a 2 dimensional array of conditional variables, where the order
of variables matches the order of the columns in conditional_columns
Xerr is the corresponding 2 dimensional array of errors
nsamples is the number of samples to draw from the error distribution
condition_error_model must return an array of samples with the shape
(X.shape[0], nsamples, X.shape[1]).
If condition_error_model is not provided, a Gaussian error model is assumed.
autoscale_conditions : bool, default=True
Sets whether or not conditions are automatically standard scaled when
passed to a conditional flow. I recommend you leave this as True.
seed : int, default=0
The random seed for initial parameters
info : Any, optional
An object to attach to the info attribute.
file : str, optional
Path to file from which to load a pretrained flow.
If a file is provided, all other parameters must be None.
"""
# validate parameters
if (
data_columns is None
and bijector is None
and file is None
and _dictionary is None
):
raise ValueError("You must provide data_columns and bijector OR file.")
if data_columns is not None and bijector is None:
raise ValueError("Please also provide a bijector.")
if data_columns is None and bijector is not None:
raise ValueError("Please also provide data_columns.")
if any(
(
data_columns is not None,
bijector is not None,
conditional_columns is not None,
latent is not None,
data_error_model is not None,
condition_error_model is not None,
info is not None,
)
):
if file is not None:
raise ValueError(
"If providing a file, please do not provide any other parameters."
)
if _dictionary is not None:
raise ValueError(
"If providing a dictionary, please do not provide any other parameters."
)
if file is not None and _dictionary is not None:
raise ValueError("Only provide file or _dictionary, not both.")
# if file or dictionary is provided, load everything from it
if file is not None or _dictionary is not None:
save_dict = self._save_dict()
if file is not None:
with open(file, "rb") as handle:
save_dict.update(pickle.load(handle))
else:
save_dict.update(_dictionary)
if save_dict["class"] != self.__class__.__name__:
raise TypeError(
f"This save file isn't a {self.__class__.__name__}."
+ f"It is a {save_dict['class']}"
)
# load columns and dimensions
self.data_columns = save_dict["data_columns"]
self.conditional_columns = save_dict["conditional_columns"]
self._input_dim = len(self.data_columns)
self.info = save_dict["info"]
# load the latent distribution
self._latent_info = save_dict["latent_info"]
self.latent = getattr(distributions, self._latent_info[0])(
*self._latent_info[1]
)
# load the error models
self.data_error_model = save_dict["data_error_model"]
self.condition_error_model = save_dict["condition_error_model"]
# load the bijector
self._bijector_info = save_dict["bijector_info"]
init_fun, _ = build_bijector_from_info(self._bijector_info)
_, self._forward, self._inverse = init_fun(
random.PRNGKey(0), self._input_dim
)
self._params = save_dict["params"]
# load the conditional means and stds
self._condition_means = save_dict["condition_means"]
self._condition_stds = save_dict["condition_stds"]
# set whether or not to automatically standard scale any
# conditions passed to the normalizing flow
self._autoscale_conditions = save_dict["autoscale_conditions"]
# if no file is provided, use provided parameters
else:
self.data_columns = tuple(data_columns)
self._input_dim = len(self.data_columns)
self.info = info
if conditional_columns is None:
self.conditional_columns = None
self._condition_means = None
self._condition_stds = None
else:
self.conditional_columns = tuple(conditional_columns)
self._condition_means = np.zeros(len(self.conditional_columns))
self._condition_stds = np.ones(len(self.conditional_columns))
# set whether or not to automatically standard scale any
# conditions passed to the normalizing flow
self._autoscale_conditions = autoscale_conditions
# set up the latent distribution
if latent is None:
self.latent = distributions.Normal(self._input_dim)
else:
self.latent = latent
self._latent_info = self.latent.info
# set up the error models
if data_error_model is None:
self.data_error_model = gaussian_error_model
else:
self.data_error_model = data_error_model
if condition_error_model is None:
self.condition_error_model = gaussian_error_model
else:
self.condition_error_model = condition_error_model
# set up the bijector with random params
init_fun, self._bijector_info = bijector
bijector_params, self._forward, self._inverse = init_fun(
random.PRNGKey(seed), self._input_dim
)
self._params = (self.latent._params, bijector_params)
def _get_conditions(self, inputs: pd.DataFrame) -> np.ndarray:
"""Return an array of the bijector conditions."""
# if this isn't a conditional flow, just return empty conditions
if self.conditional_columns is None:
conditions = np.zeros((inputs.shape[0], 1))
# if this a conditional flow, return an array of the conditions
else:
columns = list(self.conditional_columns)
conditions = np.array(inputs[columns].values)
conditions = (conditions - self._condition_means) / self._condition_stds
return conditions
def _get_err_samples(
self,
key,
inputs: pd.DataFrame,
err_samples: int,
type: str = "data",
skip: str = None,
) -> np.ndarray:
"""Draw error samples for each row of inputs. """
X = inputs.copy()
# get list of columns
if type == "data":
columns = list(self.data_columns)
error_model = self.data_error_model
elif type == "conditions":
if self.conditional_columns is None:
return np.zeros((err_samples * X.shape[0], 1))
else:
columns = list(self.conditional_columns)
error_model = self.condition_error_model
else:
raise ValueError("type must be `data` or `conditions`.")
# make sure all relevant variables have error columns
for col in columns:
# if errors not provided for the column, fill in zeros
if f"{col}_err" not in inputs.columns and col != skip:
X[f"{col}_err"] = np.zeros(X.shape[0])
# if we are skipping this column, fill in nan's
elif col == skip:
X[col] = np.nan * np.zeros(X.shape[0])
X[f"{col}_err"] = np.nan * np.zeros(X.shape[0])
# pull out relevant columns
err_columns = [col + "_err" for col in columns]
X, Xerr = np.array(X[columns].values), np.array(X[err_columns].values)
# generate samples
Xsamples = error_model(key, X, Xerr, err_samples)
Xsamples = Xsamples.reshape(X.shape[0] * err_samples, X.shape[1])
# delete the column corresponding to skip
if skip is not None:
idx = columns.index(skip)
Xsamples = np.delete(Xsamples, idx, axis=1)
# if these are samples of conditions, standard scale them!
if type == "conditions":
Xsamples = (Xsamples - self._condition_means) / self._condition_stds
return Xsamples
def _log_prob(
self, params: Pytree, inputs: np.ndarray, conditions: np.ndarray
) -> np.ndarray:
"""Log prob for arrays."""
# calculate log_prob
u, log_det = self._forward(params[1], inputs, conditions=conditions)
log_prob = self.latent.log_prob(params[0], u) + log_det
# set NaN's to negative infinity (i.e. zero probability)
log_prob = np.nan_to_num(log_prob, nan=np.NINF)
return log_prob
def log_prob(
self, inputs: pd.DataFrame, err_samples: int = None, seed: int = None
) -> np.ndarray:
"""Calculates log probability density of inputs.
Parameters
----------
inputs : pd.DataFrame
Input data for which log probability density is calculated.
Every column in self.data_columns must be present.
If self.conditional_columns is not None, those must be present
as well. If other columns are present, they are ignored.
err_samples : int, default=None
Number of samples from the error distribution to average over for
the log_prob calculation. If provided, Gaussian errors are assumed,
and method will look for error columns in `inputs`. Error columns
must end in `_err`. E.g. the error column for the variable `u` must
be `u_err`. Zero error assumed for any missing error columns.
seed : int, default=None
Random seed for drawing the samples with Gaussian errors.
Returns
-------
np.ndarray
Device array of shape (inputs.shape[0],).
"""
if err_samples is None:
# convert data to an array with columns ordered
columns = list(self.data_columns)
X = np.array(inputs[columns].values)
# get conditions
conditions = self._get_conditions(inputs)
# calculate log_prob
return self._log_prob(self._params, X, conditions)
else:
# validate nsamples
assert isinstance(
err_samples, int
), "err_samples must be a positive integer."
assert err_samples > 0, "err_samples must be a positive integer."
# get Gaussian samples
seed = onp.random.randint(1e18) if seed is None else seed
key = random.PRNGKey(seed)
X = self._get_err_samples(key, inputs, err_samples, type="data")
C = self._get_err_samples(key, inputs, err_samples, type="conditions")
# calculate log_probs
log_probs = self._log_prob(self._params, X, C)
probs = np.exp(log_probs.reshape(-1, err_samples))
return np.log(probs.mean(axis=1))
def posterior(
self,
inputs: pd.DataFrame,
column: str,
grid: np.ndarray,
marg_rules: dict = None,
normalize: bool = True,
err_samples: int = None,
seed: int = None,
batch_size: int = None,
nan_to_zero: bool = True,
) -> np.ndarray:
"""Calculates posterior distributions for the provided column.
Calculates the conditional posterior distribution, assuming the
data values in the other columns of the DataFrame.
Parameters
----------
inputs : pd.DataFrame
Data on which the posterior distributions are conditioned.
Must have columns matching self.data_columns, *except*
for the column specified for the posterior (see below).
column : str
Name of the column for which the posterior distribution
is calculated. Must be one of the columns in self.data_columns.
However, whether or not this column is one of the columns in
`inputs` is irrelevant.
grid : np.ndarray
Grid on which to calculate the posterior.
marg_rules : dict, optional
Dictionary with rules for marginalizing over missing variables.
The dictionary must contain the key "flag", which gives the flag
that indicates a missing value. E.g. if missing values are given
the value 99, the dictionary should contain {"flag": 99}.
The dictionary must also contain {"name": callable} for any
variables that will need to be marginalized over, where name is
the name of the variable, and callable is a callable that takes
the row of variables nad returns a grid over which to marginalize
the variable. E.g. {"y": lambda row: np.linspace(0, row["x"], 10)}.
Note: the callable for a given name must *always* return an array
of the same length, regardless of the input row.
err_samples : int, default=None
Number of samples from the error distribution to average over for
the posterior calculation. If provided, Gaussian errors are assumed,
and method will look for error columns in `inputs`. Error columns
must end in `_err`. E.g. the error column for the variable `u` must
be `u_err`. Zero error assumed for any missing error columns.
seed : int, default=None
Random seed for drawing the samples with Gaussian errors.
batch_size : int, default=None
Size of batches in which to calculate posteriors. If None, all
posteriors are calculated simultaneously. Simultaneous calculation
is faster, but memory intensive for large data sets.
normalize : boolean, default=True
Whether to normalize the posterior so that it integrates to 1.
nan_to_zero : bool, default=True
Whether to convert NaN's to zero probability in the final pdfs.
Returns
-------
np.ndarray
Device array of shape (inputs.shape[0], grid.size).
"""
# get the index of the provided column, and remove it from the list
columns = list(self.data_columns)
idx = columns.index(column)
columns.remove(column)
nrows = inputs.shape[0]
batch_size = nrows if batch_size is None else batch_size
# make sure indices run 0 -> nrows
inputs = inputs.reset_index(drop=True)
if err_samples is not None:
# validate nsamples
assert isinstance(
err_samples, int
), "err_samples must be a positive integer."
assert err_samples > 0, "err_samples must be a positive integer."
# set the seed
seed = onp.random.randint(1e18) if seed is None else seed
key = random.PRNGKey(seed)
# empty array to hold pdfs
pdfs = np.zeros((nrows, len(grid)))
# if marginalization rules were passed, we will loop over the rules
# and repeatedly call this method
if marg_rules is not None:
# if the flag is NaN, we must use np.isnan to check for flags
if onp.isnan(marg_rules["flag"]):
def check_flags(data):
return onp.isnan(data)
# else we use np.isclose to check for flags
else:
def check_flags(data):
return onp.isclose(data, marg_rules["flag"])
# first calculate pdfs for unflagged rows
unflagged_idx = inputs[
~check_flags(inputs[columns]).any(axis=1)
].index.tolist()
unflagged_pdfs = self.posterior(
inputs=inputs.iloc[unflagged_idx],
column=column,
grid=grid,
err_samples=err_samples,
seed=seed,
batch_size=batch_size,
normalize=False,
nan_to_zero=nan_to_zero,
)
# save these pdfs in the big array
pdfs = ops.index_update(
pdfs,
ops.index[unflagged_idx, :],
unflagged_pdfs,
indices_are_sorted=True,
unique_indices=True,
)
# we will keep track of all the rows we've already calculated
# posteriors for
already_done = unflagged_idx
# now we will loop over the rules in marg_rules
for name, rule in marg_rules.items():
# ignore the flag, because that's not a column in the data
if name == "flag":
continue
# get the list of new rows for which we need to calculate posteriors
flagged_idx = inputs[check_flags(inputs[name])].index.tolist()
flagged_idx = list(set(flagged_idx).difference(already_done))
# if flagged_idx is empty, move on!
if len(flagged_idx) == 0:
continue
# get the marginalization grid for each row
marg_grids = (
inputs.iloc[flagged_idx]
.apply(rule, axis=1, result_type="expand")
.values
)
# make a new data frame with the marginalization grids replacing
# the values of the flag in the column
marg_inputs = pd.DataFrame(
np.repeat(
inputs.iloc[flagged_idx].values, marg_grids.shape[1], axis=0
),
columns=inputs.columns,
)
marg_inputs[name] = marg_grids.reshape(marg_inputs.shape[0], 1)
# remove the error column if it's present
marg_inputs.drop(f"{name}_err", axis=1, inplace=True, errors="ignore")
# calculate posteriors for these
marg_pdfs = self.posterior(
inputs=marg_inputs,
column=column,
grid=grid,
marg_rules=marg_rules,
err_samples=err_samples,
seed=seed,
batch_size=batch_size,
normalize=False,
nan_to_zero=nan_to_zero,
)
# sum over the marginalized dimension
marg_pdfs = marg_pdfs.reshape(
len(flagged_idx), marg_grids.shape[1], grid.size
)
marg_pdfs = marg_pdfs.sum(axis=1)
# save the new pdfs in the big array
pdfs = ops.index_update(
pdfs,
ops.index[flagged_idx, :],
marg_pdfs,
indices_are_sorted=True,
unique_indices=True,
)
# add these flagged indices to the list of rows already done
already_done += flagged_idx
# now for the main posterior calculation loop
else:
# loop through batches
for batch_idx in range(0, nrows, batch_size):
# get the data batch
# and, if this is a conditional flow, the correpsonding conditions
batch = inputs.iloc[batch_idx : batch_idx + batch_size]
# if not drawing samples, just grab batch and conditions
if err_samples is None:
conditions = self._get_conditions(batch)
batch = np.array(batch[columns].values)
# if only drawing condition samples...
elif len(self.data_columns) == 1:
conditions = self._get_err_samples(
key, batch, err_samples, type="conditions"
)
batch = np.repeat(batch[columns].values, err_samples, axis=0)
# if drawing data and condition samples...
else:
conditions = self._get_err_samples(
key, batch, err_samples, type="conditions"
)
batch = self._get_err_samples(
key, batch, err_samples, skip=column, type="data"
)
# make a new copy of each row for each value of the column
# for which we are calculating the posterior
batch = np.hstack(
(
np.repeat(batch[:, :idx], len(grid), axis=0,),
np.tile(grid, len(batch))[:, None],
np.repeat(batch[:, idx:], len(grid), axis=0,),
)
)
# make similar copies of the conditions
conditions = np.repeat(conditions, len(grid), axis=0)
# calculate probability densities
log_prob = self._log_prob(self._params, batch, conditions).reshape(
(-1, len(grid))
)
prob = np.exp(log_prob)
# if we were Gaussian sampling, average over the samples
if err_samples is not None:
prob = prob.reshape(-1, err_samples, len(grid))
prob = prob.mean(axis=1)
# add the pdfs to the bigger list
pdfs = ops.index_update(
pdfs,
ops.index[batch_idx : batch_idx + batch_size, :],
prob,
indices_are_sorted=True,
unique_indices=True,
)
if normalize:
# normalize so they integrate to one
pdfs = pdfs / np.trapz(y=pdfs, x=grid).reshape(-1, 1)
if nan_to_zero:
# set NaN's equal to zero probability
pdfs = np.nan_to_num(pdfs, nan=0.0)
return pdfs
def sample(
self,
nsamples: int = 1,
conditions: pd.DataFrame = None,
save_conditions: bool = True,
seed: int = None,
) -> pd.DataFrame:
"""Returns samples from the normalizing flow.
Parameters
----------
nsamples : int, default=1
The number of samples to be returned.
conditions : pd.DataFrame, optional
If this is a conditional flow, you must pass conditions for
each sample. nsamples will be drawn for each row in conditions.
save_conditions : bool, default=True
If true, conditions will be saved in the DataFrame of samples
that is returned.
seed : int, optional
Sets the random seed for the samples.
Returns
-------
pd.DataFrame
Pandas DataFrame of samples.
"""
# validate nsamples
assert isinstance(nsamples, int), "nsamples must be a positive integer."
assert nsamples > 0, "nsamples must be a positive integer."
if self.conditional_columns is not None and conditions is None:
raise ValueError(
f"Must provide the following conditions\n{self.conditional_columns}"
)
# if this isn't a conditional flow, get empty conditions
if self.conditional_columns is None:
conditions = np.zeros((nsamples, 1))
# otherwise get conditions and make `nsamples` copies of each
else:
conditions = self._get_conditions(conditions)
conditions = np.repeat(conditions, nsamples, axis=0)
# draw from latent distribution
u = self.latent.sample(self._params[0], conditions.shape[0], seed)
# take the inverse back to the data distribution
x = self._inverse(self._params[1], u, conditions=conditions)[0]
# if not conditional, or save_conditions is False, this is all we need
if self.conditional_columns is None or save_conditions is False:
x = pd.DataFrame(x, columns=self.data_columns)
# but if conditional and save_conditions is True,
# save conditions with samples
else:
# unscale the conditons
conditions = conditions * self._condition_stds + self._condition_means
x = pd.DataFrame(
np.hstack((x, conditions)),
columns=self.data_columns + self.conditional_columns,
)
# return the samples!
return x
def _save_dict(self):
"""Returns the dictionary of all flow params to be saved."""
save_dict = {"class": self.__class__.__name__}
keys = [
"data_columns",
"conditional_columns",
"condition_means",
"condition_stds",
"data_error_model",
"condition_error_model",
"autoscale_conditions",
"info",
"latent_info",
"bijector_info",
"params",
]
for key in keys:
try:
save_dict[key] = getattr(self, key)
except AttributeError:
try:
save_dict[key] = getattr(self, "_" + key)
except AttributeError:
save_dict[key] = None
return save_dict
def save(self, file: str):
"""Saves the flow to a file.
Pickles the flow and saves it to a file that can be passed as
the `file` argument during flow instantiation.
WARNING: Currently, this method only works for bijectors that are
implemented in the `bijectors` module. If you want to save a flow
with a custom bijector, you either need to add the bijector to that
module, or handle the saving and loading on your end.
Parameters
----------
file : str
Path to where the flow will be saved.
Extension `.pkl` will be appended if not already present.
"""
save_dict = self._save_dict()
with open(file, "wb") as handle:
pickle.dump(save_dict, handle, recurse=True)
def train(
self,
inputs: pd.DataFrame,
epochs: int = 50,
batch_size: int = 1024,
optimizer: Optimizer = None,
loss_fn: Callable = None,
convolve_errs: bool = False,
seed: int = 0,
verbose: bool = False,
) -> list:
"""Trains the normalizing flow on the provided inputs.
Parameters
----------
inputs : pd.DataFrame
Data on which to train the normalizing flow.
Must have columns matching self.data_columns.
epochs : int, default=50
Number of epochs to train.
batch_size : int, default=1024
Batch size for training.
optimizer : jax Optimizer, default=adam(step_size=1e-3)
An optimizer from jax.experimental.optimizers.
loss_fn : Callable, optional
A function to calculate the loss: loss = loss_fn(params, x).
If not provided, will be -mean(log_prob).
convolve_errs : bool, default=False
Whether to draw new data from the error distributions during
each epoch of training. Assumes errors are Gaussian, and method
will look for error columns in `inputs`. Error columns must end
in `_err`. E.g. the error column for the variable `u` must be
`u_err`. Zero error assumed for any missing error columns.
seed : int, default=0
A random seed to control the batching and the (optional)
error sampling.
verbose : bool, default=False
If true, print the training loss every 5% of epochs.
Returns
-------
list
List of training losses from every epoch.
"""
# validate epochs
if not isinstance(epochs, int) or epochs <= 0:
raise ValueError("epochs must be a positive integer.")
# if no loss_fn is provided, use the default loss function
if loss_fn is None:
@jit
def loss_fn(params, x, c):
return -np.mean(self._log_prob(params, x, c))
# initialize the optimizer
optimizer = adam(step_size=1e-3) if optimizer is None else optimizer
opt_init, opt_update, get_params = optimizer
opt_state = opt_init(self._params)
# define the training step function
@jit
def step(i, opt_state, x, c):
params = get_params(opt_state)
gradients = grad(loss_fn)(params, x, c)
return opt_update(i, gradients, opt_state)
# get list of data columns
columns = list(self.data_columns)
# if this is a conditional flow, and autoscale_conditions == True
# save the means and stds of the conditional columns
if self.conditional_columns is not None and self._autoscale_conditions:
self._condition_means = np.array(
inputs[list(self.conditional_columns)].values.mean(axis=0)
)
condition_stds = np.array(
inputs[list(self.conditional_columns)].values.std(axis=0)
)
self._condition_stds = np.where(condition_stds != 0, condition_stds, 1)
# define a function to return batches
if convolve_errs:
def get_batch(sample_key, x, type):
return self._get_err_samples(sample_key, x, 1, type=type)
else:
def get_batch(sample_key, x, type):
if type == "conditions":
return self._get_conditions(x)
else:
return np.array(x[columns].values)
# get random seed for training loop
key = random.PRNGKey(seed)
if verbose:
print(f"Training {epochs} epochs \nLoss:")
# save the initial loss
X = np.array(inputs[columns].values)
C = self._get_conditions(inputs)
losses = [loss_fn(self._params, X, C)]
if verbose:
print(f"(0) {losses[-1]:.4f}")
# loop through training
itercount = itertools.count()
for epoch in range(epochs):
# new permutation of batches
permute_key, sample_key, key = random.split(key, num=3)
idx = random.permutation(permute_key, inputs.shape[0])
X = inputs.iloc[idx]
# loop through batches and step optimizer
for batch_idx in range(0, len(X), batch_size):
# if sampling from the error distribution, this returns a
# Gaussian sample of the batch. Else just returns batch as a
# jax array
batch = get_batch(
sample_key, X.iloc[batch_idx : batch_idx + batch_size], type="data"
)
batch_conditions = get_batch(
sample_key,
X.iloc[batch_idx : batch_idx + batch_size],
type="conditions",
)
opt_state = step(next(itercount), opt_state, batch, batch_conditions,)
# save end-of-epoch training loss
params = get_params(opt_state)
losses.append(
loss_fn(params, np.array(X[columns].values), self._get_conditions(X),)
)
if verbose and (
epoch % max(int(0.05 * epochs), 1) == 0 or (epoch + 1) == epochs
):
print(f"({epoch+1}) {losses[-1]:.4f}")
# update the flow parameters with the final training state
self._params = get_params(opt_state)
return losses
```
#### File: pzflow/tests/test_examples.py
```python
import jax.numpy as np
import pandas as pd
from pzflow import examples
from pzflow import Flow
def test_get_twomoons_data():
data = examples.get_twomoons_data()
assert isinstance(data, pd.DataFrame)
assert data.shape == (10000, 2)
def test_get_galaxy_data():
data = examples.get_galaxy_data()
assert isinstance(data, pd.DataFrame)
assert data.shape == (100000, 7)
def test_get_city_data():
data = examples.get_city_data()
assert isinstance(data, pd.DataFrame)
assert data.shape == (47966, 5)
def test_get_example_flow():
flow = examples.get_example_flow()
assert isinstance(flow, Flow)
assert isinstance(flow.info, str)
samples = flow.sample(2)
flow.log_prob(samples)
grid = np.arange(0, 2.5, 0.5)
flow.posterior(samples, column="redshift", grid=grid)
```
#### File: pzflow/tests/test_flow.py
```python
import dill as pickle
import jax.numpy as np
import numpy as onp
import pandas as pd
import pytest
from jax import random
from pzflow import Flow
from pzflow.bijectors import Reverse, RollingSplineCoupling
from pzflow.distributions import *
@pytest.mark.parametrize(
"data_columns,bijector,info,file,_dictionary",
[
(None, None, None, None, None),
(("x", "y"), None, None, None, None),
(None, Reverse(), None, None, None),
(("x", "y"), None, None, "file", None),
(None, Reverse(), None, "file", None),
(None, None, "fake", "file", None),
(("x", "y"), Reverse(), None, None, "dict"),
(None, None, None, "file", "dict"),
],
)
def test_bad_inputs(data_columns, bijector, info, file, _dictionary):
with pytest.raises(ValueError):
Flow(
data_columns,
bijector=bijector,
info=info,
file=file,
_dictionary=_dictionary,
)
@pytest.mark.parametrize(
"flow",
[
Flow(("redshift", "y"), Reverse(), latent=Normal(2)),
Flow(("redshift", "y"), Reverse(), latent=Tdist(2)),
Flow(("redshift", "y"), Reverse(), latent=Uniform((-3, 3), (-3, 3))),
Flow(("redshift", "y"), Reverse(), latent=CentBeta(2)),
],
)
def test_returns_correct_shape(flow):
xarray = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
x = pd.DataFrame(xarray, columns=("redshift", "y"))
conditions = flow._get_conditions(x)
xfwd, xfwd_log_det = flow._forward(flow._params, xarray, conditions=conditions)
assert xfwd.shape == x.shape
assert xfwd_log_det.shape == (x.shape[0],)
xinv, xinv_log_det = flow._inverse(flow._params, xarray, conditions=conditions)
assert xinv.shape == x.shape
assert xinv_log_det.shape == (x.shape[0],)
nsamples = 4
assert flow.sample(nsamples).shape == (nsamples, x.shape[1])
assert flow.log_prob(x).shape == (x.shape[0],)
grid = np.arange(0, 2.1, 0.12)
pdfs = flow.posterior(x, column="y", grid=grid)
assert pdfs.shape == (x.shape[0], grid.size)
pdfs = flow.posterior(x.iloc[:, 1:], column="redshift", grid=grid)
assert pdfs.shape == (x.shape[0], grid.size)
pdfs = flow.posterior(x.iloc[:, 1:], column="redshift", grid=grid, batch_size=2)
assert pdfs.shape == (x.shape[0], grid.size)
assert len(flow.train(x, epochs=11, verbose=True)) == 12
assert len(flow.train(x, epochs=11, verbose=True, convolve_errs=True)) == 12
@pytest.mark.parametrize("flag", [99, onp.nan,])
def test_posterior_with_marginalization(flag):
flow = Flow(("a", "b", "c", "d"), Reverse())
# test posteriors with marginalization
x = pd.DataFrame(np.arange(16).reshape(-1, 4), columns=("a", "b", "c", "d"))
grid = np.arange(0, 2.1, 0.12)
marg_rules = {
"flag": flag,
"b": lambda row: np.linspace(0, 1, 2),
"c": lambda row: np.linspace(1, 2, 3),
}
x["b"] = flag * np.ones(x.shape[0])
pdfs = flow.posterior(x, column="a", grid=grid, marg_rules=marg_rules)
assert pdfs.shape == (x.shape[0], grid.size)
x["c"] = flag * np.ones(x.shape[0])
pdfs = flow.posterior(x, column="a", grid=grid, marg_rules=marg_rules)
assert pdfs.shape == (x.shape[0], grid.size)
@pytest.mark.parametrize(
"flow,x,x_with_err",
[
(
Flow(("redshift", "y"), RollingSplineCoupling(2), latent=Normal(2)),
pd.DataFrame(
np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]),
columns=("redshift", "y"),
),
pd.DataFrame(
np.array(
[[1.0, 2.0, 0.1, 0.2], [3.0, 4.0, 0.2, 0.3], [5.0, 6.0, 0.1, 0.2]]
),
columns=("redshift", "y", "redshift_err", "y_err"),
),
),
(
Flow(
("redshift", "y"),
RollingSplineCoupling(2, n_conditions=2),
latent=Normal(2),
conditional_columns=("a", "b"),
),
pd.DataFrame(
np.array([[1.0, 2.0, 10, 20], [3.0, 4.0, 30, 40], [5.0, 6.0, 50, 60]]),
columns=("redshift", "y", "a", "b"),
),
pd.DataFrame(
np.array(
[
[1.0, 2.0, 10, 20, 0.1, 0.2, 1, 2],
[3.0, 4.0, 30, 40, 0.2, 0.3, 3, 4],
[5.0, 6.0, 50, 60, 0.1, 0.2, 5, 6],
]
),
columns=(
"redshift",
"y",
"a",
"b",
"redshift_err",
"y_err",
"a_err",
"b_err",
),
),
),
(
Flow(
("redshift", "y"),
RollingSplineCoupling(2, n_conditions=1),
latent=Normal(2),
conditional_columns=("a",),
),
pd.DataFrame(
np.array([[1.0, 2.0, 10], [3.0, 4.0, 30], [5.0, 6.0, 50]]),
columns=("redshift", "y", "a"),
),
pd.DataFrame(
np.array(
[
[1.0, 2.0, 10, 0.1, 0.2, 1],
[3.0, 4.0, 30, 0.2, 0.3, 3],
[5.0, 6.0, 50, 0.1, 0.2, 5],
]
),
columns=("redshift", "y", "a", "redshift_err", "y_err", "a_err",),
),
),
(
Flow(
("y",),
RollingSplineCoupling(1, n_conditions=2),
latent=Normal(1),
conditional_columns=("a", "b"),
),
pd.DataFrame(
np.array([[1.0, 10, 20], [3.0, 30, 40], [5.0, 50, 60]]),
columns=("y", "a", "b"),
),
pd.DataFrame(
np.array(
[
[1.0, 10, 20, 0.1, 1, 2],
[3.0, 30, 40, 0.2, 3, 4],
[5.0, 50, 60, 0.1, 5, 6],
]
),
columns=("y", "a", "b", "y_err", "a_err", "b_err",),
),
),
],
)
def test_error_convolution(flow, x, x_with_err):
assert flow.log_prob(x, err_samples=10).shape == (x.shape[0],)
assert np.allclose(flow.log_prob(x, err_samples=10, seed=0), flow.log_prob(x),)
assert ~np.allclose(
flow.log_prob(x_with_err, err_samples=10, seed=0), flow.log_prob(x_with_err),
)
assert np.allclose(
flow.log_prob(x_with_err, err_samples=10, seed=0),
flow.log_prob(x_with_err, err_samples=10, seed=0),
)
assert ~np.allclose(
flow.log_prob(x_with_err, err_samples=10, seed=0),
flow.log_prob(x_with_err, err_samples=10, seed=1),
)
assert ~np.allclose(
flow.log_prob(x_with_err, err_samples=10),
flow.log_prob(x_with_err, err_samples=10),
)
grid = np.arange(0, 2.1, 0.12)
pdfs = flow.posterior(x, column="y", grid=grid, err_samples=10)
assert pdfs.shape == (x.shape[0], grid.size)
assert np.allclose(
flow.posterior(x, column="y", grid=grid, err_samples=10, seed=0),
flow.posterior(x, column="y", grid=grid),
rtol=1e-4,
)
assert np.allclose(
flow.posterior(x_with_err, column="y", grid=grid, err_samples=10, seed=0),
flow.posterior(x_with_err, column="y", grid=grid, err_samples=10, seed=0),
)
def test_posterior_batch():
columns = ("redshift", "y")
flow = Flow(columns, Reverse())
xarray = np.array([[1, 2], [3, 4], [5, 6]])
x = pd.DataFrame(xarray, columns=columns)
grid = np.arange(0, 2.1, 0.12)
pdfs = flow.posterior(x.iloc[:, 1:], column="redshift", grid=grid)
pdfs_batched = flow.posterior(
x.iloc[:, 1:], column="redshift", grid=grid, batch_size=2
)
assert np.allclose(pdfs, pdfs_batched)
def test_flow_bijection():
columns = ("x", "y")
flow = Flow(columns, Reverse())
x = np.array([[1, 2], [3, 4]])
xrev = np.array([[2, 1], [4, 3]])
assert np.allclose(flow._forward(flow._params, x)[0], xrev)
assert np.allclose(
flow._inverse(flow._params, flow._forward(flow._params, x)[0])[0], x
)
assert np.allclose(
flow._forward(flow._params, x)[1], flow._inverse(flow._params, x)[1]
)
def test_load_flow(tmp_path):
columns = ("x", "y")
flow = Flow(columns, Reverse(), info=["random", 42])
file = tmp_path / "test-flow.pzflow.pkl"
flow.save(str(file))
file = tmp_path / "test-flow.pzflow.pkl"
flow = Flow(file=str(file))
x = np.array([[1, 2], [3, 4]])
xrev = np.array([[2, 1], [4, 3]])
assert np.allclose(flow._forward(flow._params, x)[0], xrev)
assert np.allclose(
flow._inverse(flow._params, flow._forward(flow._params, x)[0])[0], x
)
assert np.allclose(
flow._forward(flow._params, x)[1], flow._inverse(flow._params, x)[1]
)
assert flow.info == ["random", 42]
with open(str(file), "rb") as handle:
save_dict = pickle.load(handle)
save_dict["class"] = "FlowEnsemble"
with open(str(file), "wb") as handle:
pickle.dump(save_dict, handle, recurse=True)
with pytest.raises(TypeError):
Flow(file=str(file))
def test_control_sample_randomness():
columns = ("x", "y")
flow = Flow(columns, Reverse())
assert onp.all(~onp.isclose(flow.sample(2), flow.sample(2)))
assert onp.allclose(flow.sample(2, seed=0), flow.sample(2, seed=0))
@pytest.mark.parametrize(
"epochs,loss_fn,", [(-1, None), (2.4, None), ("a", None),],
)
def test_train_bad_inputs(epochs, loss_fn):
columns = ("redshift", "y")
flow = Flow(columns, Reverse())
xarray = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
x = pd.DataFrame(xarray, columns=columns)
with pytest.raises(ValueError):
flow.train(
x, epochs=epochs, loss_fn=loss_fn,
)
def test_conditional_sample():
flow = Flow(("x", "y"), Reverse(), conditional_columns=("a", "b"))
x = np.arange(12).reshape(-1, 4)
x = pd.DataFrame(x, columns=("x", "y", "a", "b"))
conditions = flow._get_conditions(x)
assert conditions.shape == (x.shape[0], 2)
with pytest.raises(ValueError):
flow.sample(4)
samples = flow.sample(4, conditions=x)
assert samples.shape == (4 * x.shape[0], 4)
samples = flow.sample(4, conditions=x, save_conditions=False)
assert samples.shape == (4 * x.shape[0], 2)
def test_train_no_errs_same():
columns = ("redshift", "y")
flow = Flow(columns, Reverse())
xarray = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
x = pd.DataFrame(xarray, columns=columns)
losses1 = flow.train(x, convolve_errs=True)
losses2 = flow.train(x, convolve_errs=False)
assert np.allclose(np.array(losses1), np.array(losses2))
def test_get_err_samples():
rng = random.PRNGKey(0)
# check Gaussian data samples
columns = ("x", "y")
flow = Flow(columns, Reverse())
xarray = np.array([[1.0, 2.0, 0.1, 0.2], [3.0, 4.0, 0.3, 0.4]])
x = pd.DataFrame(xarray, columns=("x", "y", "x_err", "y_err"))
samples = flow._get_err_samples(rng, x, 10)
assert samples.shape == (20, 2)
# test skip
xarray = np.array([[1.0, 2.0, 0, 0]])
x = pd.DataFrame(xarray, columns=("x", "y", "x_err", "y_err"))
samples = flow._get_err_samples(rng, x, 10, skip="y")
assert np.allclose(samples, np.ones((10, 1)))
samples = flow._get_err_samples(rng, x, 10, skip="x")
assert np.allclose(samples, 2 * np.ones((10, 1)))
# check Gaussian conditional samples
flow = Flow(("x"), Reverse(), conditional_columns=("y"))
samples = flow._get_err_samples(rng, x, 10, type="conditions")
assert np.allclose(samples, 2 * np.ones((10, 1)))
# check incorrect type
with pytest.raises(ValueError):
flow._get_err_samples(rng, x, 10, type="wrong")
# check constant shift data samples
columns = ("x", "y")
shift_err_model = lambda key, X, Xerr, nsamples: np.repeat(
X + Xerr, nsamples, axis=0
).reshape(X.shape[0], nsamples, X.shape[1])
flow = Flow(columns, Reverse(), data_error_model=shift_err_model)
xarray = np.array([[1.0, 2.0, 0.1, 0.2], [3.0, 4.0, 0.3, 0.4]])
x = pd.DataFrame(xarray, columns=("x", "y", "x_err", "y_err"))
samples = flow._get_err_samples(rng, x, 10)
assert samples.shape == (20, 2)
assert np.allclose(
samples, shift_err_model(None, xarray[:, :2], xarray[:, 2:], 10).reshape(20, 2),
)
# check constant shift conditional samples
flow = Flow(
("x"),
Reverse(),
conditional_columns=("y"),
condition_error_model=shift_err_model,
)
samples = flow._get_err_samples(rng, x, 10, type="conditions")
assert np.allclose(samples, np.repeat(np.array([[2.2], [4.4]]), 10, axis=0))
def test_train_w_conditions():
xarray = np.array(
[[1.0, 2.0, 0.1, 0.2], [3.0, 4.0, 0.3, 0.4], [5.0, 6.0, 0.5, 0.6]]
)
x = pd.DataFrame(xarray, columns=("redshift", "y", "a", "b"))
flow = Flow(
("redshift", "y"), Reverse(), latent=Normal(2), conditional_columns=("a", "b")
)
assert len(flow.train(x, epochs=11)) == 12
print("------->>>>>")
print(flow._condition_stds, "\n\n")
print(xarray[:, 2:].std(axis=0))
assert np.allclose(flow._condition_means, xarray[:, 2:].mean(axis=0))
assert np.allclose(flow._condition_stds, xarray[:, 2:].std(axis=0))
```
|
{
"source": "jfcrenshaw/uw_stempals_demos",
"score": 3
}
|
#### File: uw_stempals_demos/uw_stempals_demos/pandemic_sim.py
```python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation, rc
# set animation backends
rc("animation", html="jshtml")
rc("animation", embed_limit=60)
def pandemicSim(
N: int = 50,
Nsick: int = 1,
pVacc: float = 0.0,
pMask: float = 50.0,
qMask: float = 75.0,
c: float = 1.0,
v: float = 1.0,
L: float = 1.0,
tRecover: int = 200,
tTotal: int = 550,
seed: int = 0,
) -> animation.FuncAnimation:
"""
Parameters
----------
N : int, default=50
Total number of people in the simulation. Must be a positive integer.
Nsick : int, default=1
Number of people sick at the beginning. Must be a positive integer < N.
pVacc : float, default=0
Percent of people vaccinated at the beginning. Must be a float between 0 and 100.
pMask : float, default=50
Percent of people wearing masks. Must be a float between 0 and 100.
qMask : float, default=75
Quality of the masks in percent reduction of contagiousness. Must be a float
between 0 and 100.
c : float, default=1
The contagiousness of the disease. Must be a positive float.
v : float, default=1
Speed of the people. Must be a positive float
L : float, default=1
Side length of the box the people move in. Must be a positive float.
Note that technically they are moving on the surface of a 2-torus that cannot
be embedded in R^3. This is the torus circumference in both directions.
tRecover : int, default=200
The time to recover from the disease. Must be a positive integer.
tTotal : int, default=550
The total time of the simulation. Must be a positive integer.
seed : int, default=0
The random seed that determines the initial conditions and the paths walked
by the people during the simulation.
Returns
-------
anim : matplotlib.animation.FuncAnimation
"""
# --------------------------
# validate parameter values
# --------------------------
assert isinstance(N, int) & (N > 0), "N must be a positive integer."
assert isinstance(Nsick, int) & (
Nsick >= 0
), "Nsick must be a non-negative integer."
assert (pVacc >= 0) & (pVacc <= 100), "pVacc must be between 0 and 100."
assert (pMask >= 0) & (pMask <= 100), "pMask must be between 0 and 100."
assert (qMask >= 0) & (qMask <= 100), "qMask must be between 0 and 100."
assert c >= 0, "c must be non-negative."
assert v >= 0, "v must be non-negative."
assert L > 0, "L must be positive."
assert isinstance(tRecover, int) & (
tRecover >= 0
), "tRecover must be a non-negative integer."
assert isinstance(tTotal, int) & (tTotal > 0), "tTotal must be a positive integer."
assert isinstance(seed, int) & (seed >= 0), "seed must be non-negative."
# -----------------------
# set initial conditions
# -----------------------
rng = np.random.default_rng(seed)
x = rng.uniform(0, L, size=(N, 2)) # initial positions
phi = rng.uniform(0, 2 * np.pi, size=N) # initial directions
Nvacc = int(pVacc / 100 * N) # number of vaccinated people
Nnormal = N - Nsick - Nvacc # number of normal people
Nmasked = int(pMask / 100 * N) # number of masked people
status = Nnormal * ["normal"] + Nsick * ["sick"] + Nvacc * ["vaccinated"]
status = rng.permutation(np.array(status, dtype="<U10")) # initial statuses
masked, unmasked = np.split(rng.permutation(np.arange(N)), [Nmasked])
recover_counter = np.zeros(N)
recover_counter[status == "sick"] = 1 # days since infected for each person
# -----------------
# setup the figure
# -----------------
fig_inches = 8
wpad, hpad, mpad = 0.1, 0.15, 0.1
wsubplot = (1 - 2 * wpad - mpad) / 2
hsubplot = 1 - 2 * hpad
fig = plt.figure(figsize=(fig_inches, fig_inches / 2), dpi=150)
ax1 = fig.add_axes(
[wpad, hpad, wsubplot, hsubplot], xlim=(0, L), ylim=(0, L), xticks=[], yticks=[]
)
ax2 = fig.add_axes(
[wpad + wsubplot + mpad, hpad, wsubplot, hsubplot],
xlim=(0, tTotal),
ylim=(0, N),
)
ax2.set(xlabel="Time", ylabel="Number of sick people")
# bubble colors for each status
colors = {
"normal": "cornflowerblue",
"sick": "tomato",
"vaccinated": "orange",
"recovered": "mediumseagreen",
}
# set sizes of plotting elements
r_bbl = 0.04 * np.sqrt(c) * np.ones(N) # radius of the unmasked bubbles
r_bbl[masked] *= np.sqrt(1 - qMask / 100) # radius of masked bubbles
s_bbl = (72 * 2 * r_bbl * fig_inches * wsubplot / L) ** 2 * np.ones(N)
s_unmasked_ppl = 3 / L ** 2
s_masked_ppl = 10 / L ** 2
# bubbles around people
bbl = ax1.scatter(
x[:, 0], x[:, 1], c=np.vectorize(colors.get)(status), s=s_bbl, alpha=0.5
)
# markers for people
unmasked_ppl = ax1.scatter(x[unmasked, 0], x[unmasked, 1], c="k", s=s_unmasked_ppl)
masked_ppl = ax1.scatter(
x[masked, 0], x[masked, 1], c="k", s=s_masked_ppl, marker="x"
)
# track number of sick people
N_currently_sick, N_total_sick = [Nsick], [Nsick]
(currently_sick_ppl,) = ax2.plot(N_currently_sick, c="C3", label="Currently")
(total_sick_ppl,) = ax2.plot(N_total_sick, c="k", ls=":", label="Total")
ax2.legend(loc="upper left")
plt.close()
# ------------------------------------------------------
# define a function to perform a step of the simulation
# ------------------------------------------------------
def step(n):
# step forward
nonlocal x, phi
phi = rng.vonmises(phi, 10)
V = v / 100 * np.vstack([np.cos(phi), np.sin(phi)])
x = (x + V.T) % L
# new infections
if "normal" in status:
normal = np.where(status == "normal")[0]
sick = np.where(status == "sick")[0]
# calculate square distances of sick people to normal people
d2 = np.sum((x[sick, None] - x[normal]) ** 2, axis=-1)
# infect!
too_close = (
((d2 > 0) & (d2 < r_bbl[sick, None] ** 2))
.squeeze()
.astype(int)
.reshape(-1, len(normal))
)
if too_close.ndim > 1:
too_close = too_close.sum(axis=0)
new_cases = normal[too_close > 0]
status[new_cases] = "sick"
# record the number of sick people
N_currently_sick.append(np.where(status == "sick")[0].size)
N_total_sick.append(
np.where((status == "sick") | (status == "recovered"))[0].size
)
# update the plots
bbl.set_offsets(x)
bbl.set_color(np.vectorize(colors.get)(status))
unmasked_ppl.set_offsets(x[unmasked])
masked_ppl.set_offsets(x[masked])
currently_sick_ppl.set_data(np.arange(len(N_currently_sick)), N_currently_sick)
total_sick_ppl.set_data(np.arange(len(N_total_sick)), N_total_sick)
# recovery
status[recover_counter > tRecover] = "recovered"
recover_counter[status == "sick"] += 1
return bbl, unmasked_ppl, masked_ppl, currently_sick_ppl, total_sick_ppl
# ---------------------
# create the animation
# ---------------------
anim = animation.FuncAnimation(fig, step, frames=tTotal, interval=50, blit=True)
return anim
```
|
{
"source": "jfcsantos/python-threads",
"score": 3
}
|
#### File: jfcsantos/python-threads/onu_logging.py
```python
import json
logBuffer = ""
def __init__(self, arg):
super(OnuLogging, self).__init__()
self.arg = arg
def logMessage(message):
global logBuffer
print message
logBuffer += "<p>" + message + "</p>"
def logErrorMessage(message, lineNumber):
logMessage("Line " + str(lineNumber))
logMessage(message)
def logJsonMessage(message, jsonObj):
jsonString = json.dumps(jsonObj, sort_keys=True,
indent=4, separators=(',', ': '))
logMessage(message)
logMessage(jsonString)
def returnLog():
global logBuffer
log = logBuffer
return log
```
|
{
"source": "jfdahl/Advent-of-Code-2019",
"score": 4
}
|
#### File: Advent-of-Code-2019/day04/code2.py
```python
import re
import numpy as np
start = 168630
stop = 718098
double = re.compile(r'(\d)\1')
triple = re.compile(r'(\d)\1\1')
def is_decreasing(num):
previous = None
for digit in str(num):
if not previous:
previous = digit
continue
if previous > digit:
return True
previous = digit
return False
v_is_decreasing = np.vectorize(is_decreasing)
def has_doubles(num):
return bool(double.search(str(num)))
v_has_doubles = np.vectorize(has_doubles)
def remove_triples(num):
num = str(num)
dbs = set(double.findall(num))
tps = set(triple.findall(num))
if dbs - tps:
return int(num)
else:
return False
v_remove_triples = np.vectorize(remove_triples)
data = np.arange(start, stop) # Create the initial data set
data = data[~v_is_decreasing(data)] # Remove the items containing decreasing sequences
data = data[v_has_doubles(data)] # Remove the items not containing doubles
print(f'Part 1: {len(data)}')
data = data[v_remove_triples(data)] # Remove the items containing triplets without doubles
print(f'Part 2: {len(data)}')
```
|
{
"source": "jfdelgad/etherscanAPI",
"score": 2
}
|
#### File: etherscanAPI/etherscanAPI/etherscanAPI.py
```python
import requests
class etherscan:
def __init__(self, apikey, network):
self.network = network
self.apikey = apikey
if network=='mainnet':
self.apipath = 'https://api.etherscan.io/api?'
else:
self.apipath = 'https://api' + '-' + network + '.etherscan.io/api?'
# Acounts API
def getBalance(self, address):
payload = {'module':'account', 'action':'balance', 'address':address, 'tag':'latest', 'apikey':self.apikey}
return requests.get(self.apipath, params=payload).json()['result']
def getBalanceMulti(self, address):
payload = {'module':'account', 'action':'balancemulti', 'address':','.join(address), 'tag':'latest', 'apikey':self.apikey}
return requests.get(self.apipath, params=payload).json()['result']
def getTransactions(self, address, fromblock, toblock):
payload = {'module':'account', 'action':'txlist', 'address':address, 'startblock':fromblock, 'endblock':toblock, 'sort':'asc', 'apikey':self.apikey}
return requests.get(self.apipath, params=payload).json()['result']
def getInternalTransactionsByAddress(self, address, fromblock, toblock):
payload = {'module':'account', 'action':'txlistinternal', 'address':address, 'startblock':fromblock, 'endblock':toblock, 'sort':'asc', 'apikey':self.apikey}
return requests.get(self.apipath, params=payload).json()['result']
def getInternalTransactionsByTxHash(self, txhash, fromblock, toblock):
payload = {'module':'account', 'action':'txlistinternal', 'txhash':txhash, 'apikey':self.apikey}
return requests.get(self.apipath, params=payload).json()['result']
def getERC20TransfersByAddress(self, address, fromblock, toblock):
payload = {'module':'account', 'action':'tokentx', 'address':address, 'startblock':fromblock, 'endblock':toblock, 'sort':'asc', 'apikey':self.apikey}
return requests.get(self.apipath, params=payload).json()['result']
def getERC20TransfersByContract(self, contractAddress, address, fromblock, toblock):
payload = {'module':'account', 'action':'tokentx', 'address':address, 'contractaddress':contractAddress, 'startblock':fromblock, 'endblock':toblock, 'sort':'asc', 'apikey':self.apikey}
return requests.get(self.apipath, params=payload).json()['result']
def getMinedBlocks(self, address):
payload = {'module':'account', 'action':'getminedblocks', 'address':address, 'blocktype':'blocks', 'apikey':self.apikey}
return requests.get(self.apipath, params=payload).json()['result']
# Contracts API
def getContractABI(self, address):
payload = {'module':'contract', 'action':'getabi', 'address':address, 'apikey':self.apikey}
return requests.get(self.apipath, params=payload).json()['result']
def getSourceCode(self, address):
payload = {'module':'contract', 'action':'getsourcecode', 'address':address, 'apikey':self.apikey}
return requests.get(self.apipath, params=payload).json()['result']
# Transactions API
def getReceiptStatus(self, txhash):
payload = {'module':'transaction', 'action':'gettxreceiptstatus', 'txhash':txhash, 'apikey':self.apikey}
return requests.get(self.apipath, params=payload).json()['result']
def getContractTxStatus(self, txhash):
payload = {'module':'transaction', 'action':'getstatus', 'txhash':txhash, 'apikey':self.apikey}
return requests.get(self.apipath, params=payload).json()['result']
# Blocks
def getBlockRewards(self, blockNumber):
payload = {'module':'block', 'action':'getblockreward', 'blockno':blockNumber, 'apikey':self.apikey}
return requests.get(self.apipath, params=payload).json()['result']
# GETH/PARITY Proxy API
def getBlockNumber(self):
payload = {'module':'proxy', 'action':'eth_BlockNumber', 'apikey':self.apikey}
return requests.get(self.apipath, params=payload).json()['result']
def getBlockByNumber(self, number):
payload = {'module':'proxy', 'action':'eth_getBlockByNumber', 'tag':hex(number), 'boolean':'true', 'apikey':self.apikey}
return requests.get(self.apipath, params=payload).json()['result']
def getBlockTransactionCountByNumber(self, number):
payload = {'module':'proxy', 'action':'eth_getBlockTransactionCountByNumber', 'tag':hex(number), 'apikey':self.apikey}
return requests.get(self.apipath, params=payload).json()['result']
def getUncleByBlockNumberAndIndex(self, number, index):
payload = {'module':'proxy', 'action':'eth_getUncleByBlockNumberAndIndex', 'tag':hex(number), 'index':hex(index), 'apikey':self.apikey}
return requests.get(self.apipath, params=payload).json()['result']
def getTransactionByHash(self, txhash):
payload = {'module':'proxy', 'action':'eth_getTransactionByHash', 'txhash':txhash, 'apikey':self.apikey}
return requests.get(self.apipath, params=payload).json()['result']
def getTransactionByBlockNumberAndIndex(self, number, index):
payload = {'module':'proxy', 'action':'eth_getTransactionByBlockNumberAndIndex', 'tag':hex(number), 'index':hex(index), 'apikey':self.apikey}
return requests.get(self.apipath, params=payload).json()['result']
def getTransactionCount(self, address):
payload = {'module':'proxy', 'action':'eth_getTransactionCount', 'address':address,'tag':'latest', 'apikey':self.apikey}
return requests.get(self.apipath, params=payload).json()['result']
def sendRawTransaction(self, signedTx):
payload = {'module':'proxy', 'action':'eth_sendRawTransaction', 'hex':signedTx, 'apikey':self.apikey}
return requests.get(self.apipath, params=payload).json()['result']
def getTransactionReceipt(self, txhash):
payload = {'module':'proxy', 'action':'eth_getTransactionReceipt', 'txhash':txhash, 'apikey':self.apikey}
return requests.get(self.apipath, params=payload).json()['result']
def call(self, to, data):
payload = {'module':'proxy', 'action':'eth_call', 'tag':'latest', 'apikey':self.apikey}
return requests.get(self.apipath, params=payload).json()['result']
def getCode(self, address):
payload = {'module':'proxy', 'action':'eth_getCode', 'address':address, 'tag':'latest', 'apikey':self.apikey}
return requests.get(self.apipath, params=payload).json()['result']
def getStorageAt(self, address, position):
payload = {'module':'proxy', 'action':'eth_getStorageAt', 'address':address, 'position':hex(position), 'tag':'latest', 'apikey':self.apikey}
return requests.get(self.apipath, params=payload).json()['result']
def gasPrice(self):
payload = {'module':'proxy', 'action':'eth_gasPrice', 'apikey':self.apikey}
return requests.get(self.apipath, params=payload).json()['result']
def estimateGas(self, to, value, gasprice, gas):
payload = {'module':'proxy', 'action':'eth_estimateGas', 'to':to, 'value':hex(value), 'gasPrice':hex(gasprice), 'gas':hex(gas), 'apikey':self.apikey}
return requests.get(self.apipath, params=payload).json()['result']
# EVENT logs
def getLogs(self, fromBlock, toBlock, address, topics, topicsOperator):
payload = {'module':'logs', 'action':'getLogs', 'fromBlock':fromBlock, 'toBlock':toBlock, 'apikey':self.apikey}
payload.update(dict(topics,**topicsOperator))
return requests.get(self.apipath, params=payload).json()['result']
# Token
def getTokenTotalSupply(self, contractAddress):
payload = {'module':'account', 'action':'tokensupply', 'contractaddress':contractAddress, 'apikey':self.apikey}
return requests.get(self.apipath, params=payload).json()['result']
def getTokenBalance(self, address, contractAddress):
payload = {'module':'account', 'action':'tokenbalance', 'contractaddress':contractAddress, 'address':address, 'tag':'latest', 'apikey':self.apikey}
return requests.get(self.apipath, params=payload).json()['result']
# Stats
def getEtherSupply(self):
payload = {'module':'stats', 'action':'ethsupply', 'apikey':self.apikey}
return requests.get(self.apipath, params=payload).json()['result']
def getEtherPrice(self):
payload = {'module':'stats', 'action':'ethprice', 'apikey':self.apikey}
return requests.get(self.apipath, params=payload).json()['result']
```
|
{
"source": "JFDesigner/MTG",
"score": 2
}
|
#### File: scripts/mtg/mtgGUI.py
```python
import os
import sys
import time
import subprocess
from functools import partial as par
import threading
import Queue
try:
import maya.cmds as cmds
import maya.OpenMaya as Om
import maya.mel as mel
import maya.utils as mu
except:
pass
import mayaSnippet.mayaFuncs as Mf
import mtg.terrainWave as Tw
import mtg.mtgMain as Main
CLIFF_COLOUR = (0.41, 0.311468, 0.26937)
GRASS_COLOUR = (0.15478, 0.494, 0.138814)
SNOW_COLOUR = (1, 1, 1)
if sys.platform == 'win32':
def open_file(fileDir):
"""Used to open a file with the default program on Windows32 OS system
Parameter:
fileDir [str] : The directory of the file to be opened.
On Exit:
Opens the file with the default program on the system."""
try:
os.startfile(fileDir)
return True, None, None
except WindowsError:
return False, 0, fileDir
except Exception as e:
return False, None, e.args
elif sys.platform == "darwin":
def open_file(file_directory):
"""Used to open a file with the default program on a Darwin OS system
Parameter:
file_directory [str] : The directory of the file to be opened.
On Exit:
Opens the file with the default program on the system."""
try:
subprocess.call(["open", file_directory])
except Exception as e:
return False, None, e.args
else:
def open_file(file_directory):
"""Used to open a file with the default program on a Linux OS system
Parameter:
file_directory [str] : The directory of the file to be opened.
On Exit:
Opens the file with the default program on the system."""
try:
subprocess.call(["xdg-open", file_directory])
except Exception as e:
return False, None, e.args
class GenerateTerrainThread(threading.Thread):
def __init__(self, queue, songInfo, deformMag, pObjectNam, axis, sSelect, falloffCurve, falloffMode, falloffRadius,
negativeValues, separateDeformDirection, reverseSong, refresh):
threading.Thread.__init__(self)
self.daemon = False
self.q = queue
self.signal = False
self.songInfo = songInfo
self.deformMag = deformMag
self.pObjectNam = pObjectNam
self.axis = axis
self.sSelect = sSelect
self.falloffCurve = falloffCurve
self.falloffMode = falloffMode
self.falloffRadius = falloffRadius
self.negativeValues = negativeValues
self.separateDeformDir = separateDeformDirection
self.reverse = reverseSong
self.refresh = refresh
def run(self):
mu.executeInMainThreadWithResult(Main.music_displace, self.songInfo, self.deformMag, self.pObjectNam, self.axis, self.sSelect, self.falloffCurve,
self.falloffMode, self.falloffRadius, self.negativeValues, self.separateDeformDir,
self.reverse, self.refresh, self.q)
class MTGGui:
"""Creates the Music Terrain Generator
Parameters:
winID [str] : The window name. This is to ensure that no two of the
same window exist at the same time.
Attributes:
win [str] : The actual name of the newly created
window.
wavFilter [str] : The string used to filter files in the
open file dialog.
currentSong [str] : The name of the current song loaded into
the program.
currentSongDir [None][str] : The current song's directory on the disc
drive.
songInfo [None][object] : The song info class object from
terrainWave.TerrainWaveFile.
fileTextures [dict] : Stores all the files from the file
directories. The string name
'cliffTextures' and 'grassTextures'
are the only values currently used.
cliffTypeFolders [list] : The list of all the folders in the
main.CLIFF_TEX folder
self.newFileJob [str] : The name of the script job so to close the
script when a new file is created to
remove the chance of errors
polygonObjTFGrp [str] : The name of the polygon object text field
group
songText [str] : The name of the currently loaded song text
label
musicLoadGrp [str] : The name of the Music Location text field
button group
loadMusicB [str] : The name of the load music button
playMusicB [str] : The name of the play music button
reloadMusicB [str] : The name of the reload music button
clearMusicB [str] : The name of the clear music button
soundScrb [str] : The name of the Sound Scrubber preview
control
soundCtrl [str] : Then name of the sound control to change the
Sound Scrubber
deformMagFSLGrp [str] : The name of the Deform Magnitude Float
slider group
deformDirCBGrp [str] : The name of the Deform Direction Check Box
group
otherOptCBGrp [str] : The name of the Other Options Check Box
group
sSelectCB [str] : The name of the Soft Select check box
sSelectReset [str] : The name of the Soft Select reset button
falloffModeOMGrp [str] : The name of the soft select Falloff mode
Option Menu group
falloffRadFSlGrp [str] : The name of the soft select Falloff radius
float slider group
falloffCurveRow [str] : The name of the soft select falloff curve
row
falloffCurveCtrl [str] : The name of the soft select falloff curve
graph control
interpolationOMGrp [str] : The name of the curve interpolation option
menu group
curvePresetsRow [str] : The name of the row containing all the curve
preset curves
generateTerrainB [str] : The name of the Generate Terrain Button
randomCTexRow [str] : The name of the random cliff texture row
randomCTexCB [str] : The name of the random cliff texture check
button
cliffTypesOMGrp [str] : The name of the cliff types option menu
group
nOfCliffTexIF [str] : The name of the cliff texture integer field
texRepeatU [str] : The name of the texture repeat U float
slider
texRepeatV [str] : The name of the texture repeat V float
slider
texNoiseU [str] : The name of the texture noise U float
slider
texNoiseV [str] : The name of the texture noise V float
slider
bDepth [str] : The name of the bump depth float slider
texPosFrame [str] : The name of the texture positioning frame
ramp [str] : The name of the current ramp preview name in
the interface
rampPreviewImg [str] : The name of the ramp preview swatch display
rampType [str] : The name of the ramp type option menu group
rampInterpol [str] : The name of the ramp interpolation option
menu group
rampCliffTex [str] : The name of the ramp cliff texture colour
port
resetButton [str] : The name of the ramp reset button
entryTypeOMGrp [str] : The name of the ramp entry type option menu
cliffEntry [str] : The name of the cliff entry menu item of
entryTypeOMGrp
snowEntry [str] : The name of the snow entry menu item of
entryTypeOMGrp
grassEntry [str] : The name of the grass entry menu item of
entryTypeOMGrp
createEntryButton [str] : The name of the create entry button for the
ramp preview
uWaveSl [str] : The name of the ramp U Wave float slider
vWaveSl [str] : The name of the ramp V Wave float slider
noiseSl [str] : The name of the ramp noise float slider
freqSl [str] : The name of the ramp noise frequency slider
generateTerrainB [str] : The name of the generate terrain button
"""
def __init__(self, winID='mtgScriptWindow'):
if cmds.window(winID, exists=True):
cmds.deleteUI(winID)
self.win = cmds.window(winID, title='MTG :: Music Terrain Generator',
iconName='MTG', widthHeight=(570,770))
self.wavFilter = "WAV Files (*.wav);;All Files (*.*)"
self.currentSong = '...'
self.currentSongDir = None
self.songInfo = None
self.fileTextures = {}
self.create_interface()
self.newFileJob = cmds.scriptJob(event=['deleteAll', self.end],
protected=True)
self.queue = Queue.Queue()
self.complete = False
cmds.showWindow(self.win)
def end(self):
"""Used to, when a new file is created, to close the window and end
the scriptJob which causes it to do this.
On Exit:
Closes the window on a new file creation and kills the script job
that allows it.
"""
if cmds.window(self.win, exists=True):
cmds.deleteUI(self.win)
cmds.scriptJob(kill=self.newFileJob, force=True)
def error_message(self, errNo=None, value=''):
"""Used to create error messages or notifications to the user about a
specific error.
Parameters:
errNo [None][int] : The number pertaining to the error message.
value [str] : The value pertaining to the error message.
Usually the cause of the error.
On Exit:
Creates a error message dialog for the user to be notified of the
problem with the program.
"""
if errNo == 0:
cmds.confirmDialog(title='Error',
message='The system cannot fine the file "%s".'\
' The song will not be played.' % value,
icon="warning")
elif errNo == 1:
cmds.confirmDialog(title='Error',
message='No value has been entered into the '\
'"Music Location" field.\nPlease enter a WAV '\
'file directory to load.', icon="warning")
elif errNo == 2:
cmds.confirmDialog(title='Error',
message='The file location you have entered '\
'into the "Music Location" field is incorrect.'\
'\nPlease enter a valid WAV file directory to '\
'load.', icon="warning")
elif errNo == 3:
cmds.confirmDialog(title='Error',
message='The file location for the song "%s" '\
'has been moved or deleted.\nThe song will now'\
' be unloaded from the program.' % value,
icon="warning")
elif errNo == 4:
cmds.confirmDialog(title='Error',
message='No object has the name "%s" in the '\
'scene.\nPlease select another object.' % value,
icon="warning")
elif errNo == 5:
cmds.confirmDialog(title='Error',
message='You have not selected a direction for'\
' the terrain to be deformed in.\nPlease '\
'select one to continue.', icon="warning")
elif errNo == 6:
cmds.confirmDialog(title='Error',
message='The current song you have loaded has'\
' been moved or removed from the hard drive. '\
'\n Please try loading a new song. This file'\
' will be unloaded', icon="warning")
elif errNo == 7:
cmds.confirmDialog(title='Error',
message='You have not loaded a song into the'\
' program.\nPlease do this before continuing.',
icon="warning")
else:
if 'RIFF' in value[0]:
cmds.confirmDialog(title='Error',
message='This is not a WAV file.\nPlease '\
'select a valid WAV file.', icon="warning")
else:
cmds.confirmDialog(title='Unknown Error',
message='This is an unknown error. Here is'\
' the message:\n%s' % value, icon="warning")
def music_browse(self, *args):
"""Creates a open file dialog window to find the location of the song
file on the disc drive.
Parameters:
args [tuple] : Ignore value. The value is returned by the button
and is unused.
On Exit:
Finds a wave file on the disc drive and sets the text field to the
found files location.
"""
filename = cmds.fileDialog2(fileMode=1, caption="Import Music",
fileFilter=self.wavFilter)
if filename != None:
cmds.textFieldButtonGrp(self.musicLoadGrp, e=True,
fileName=filename[0])
def load_song(self, *args):
"""Loads the song in the musicLoadGrp field and loads it into maya and
creates a TerrainWaveFile with it.
Parameters:
args [tuple] : Ignore value. The value is returned by the button
and is unused.
On Exit:
Loads the song into the program and stores the information into
the self.songInfo variable. It also activates the buttons related
to the newly loaded song.
"""
filename = cmds.textFieldButtonGrp(self.musicLoadGrp, q=True,
fileName=True)
if filename == '':
self.error_message(1, filename)
else:
self.clear_song()
try:
self.songInfo = Tw.TerrainWaveFile(os.path.abspath(filename), self.queue)
except IOError:
self.error_message(2, filename)
except Exception as e:
self.error_message(value=str(e.args))
else:
self.currentSong = mel.eval('doSoundImportArgList ("1", {"%s","0"})'
% filename)
self.currentSongDir = filename
endFrame = cmds.getAttr('%s.endFrame' % self.currentSong)
cmds.soundControl(self.soundScrb, e=True,
sound=self.currentSong, maxTime=endFrame)
cmds.text(self.songText, e=True, label=self.currentSong)
self.enable_disable_widgets((self.reloadMusicB,
self.playMusicB,
self.clearMusicB), enable=True)
def reload_song(self, *args):
"""Reloads the currently loaded song.
Parameters:
args [tuple] : Ignore value. The value is returned by the button
and is unused.
On Exit:
Reloads the song into the program and stores the information into
the self.songInfo variable.
"""
try:
self.songInfo = Tw.TerrainWaveFile(self.currentSongDir, self.queue)
except IOError:
self.error_message(3, self.currentSong)
self.clear_song()
except:
self.error_message(6)
self.clear_song()
self.currentSong = mel.eval('doSoundImportArgList ("1", {"%s","0"})'
% self.currentSongDir)
def play_song(self, *args):
"""Attempts to open the chosen song file using the systems default
music player.
Parameters:
args [tuple] : Ignore value. The value is returned by the button
and is unused.
On Exit:
On success, will open the file and play the song in the default
music player.
"""
filename = cmds.textFieldButtonGrp(self.musicLoadGrp, q=True,
fileName=True)
check, errNo, msg = open_file(filename)
if not(check):
self.error_message(errNo, msg)
def clear_song(self, *args):
"""Clears the currently loaded song in the program.
Parameters:
args [tuple] : Ignore value. The value is returned by the button
and is unused.
On Exit:
Clears the song from the program aswell as the songInfo stored in
the program. It also disables the related widgets with a loaded
song.
"""
if self.currentSong != '...':
cmds.delete(self.currentSong)
self.currentSong = '...'
self.currentSongDir = None
cmds.text(self.songText, e=True, label=self.currentSong)
self.songInfo.close()
self.songInfo = None
self.enable_disable_widgets((self.reloadMusicB, self.playMusicB,
self.clearMusicB), enable=False)
def enable_disable_widgets(self, widgets, enable=True):
"""This command is used to disable the widgets/controls passed to the
function.
Parameters:
widgets [str][list] : The widget(s) that will be either enabled or
disabled dependent on the 'enable' parameter.
enable [bool] : If True, the widgets will be enabled, else
disabled
On Exit:
The listed widgets/controls will be disabled or enabled.
"""
if isinstance(widgets, (str,unicode)):
cmds.control(widgets, e=True, enable=enable)
else:
for widg in widgets:
cmds.control(widg, e=True, enable=enable)
def visbile_invisible_widgets(self, widgets, visible=True):
"""This command is used to change the visibility of the
widgets/controls passed to the function.
Parameters:
widgets [str][list] : The widget(s) that will be either enabled or
disabled dependent on the 'enable' parameter.
visible [bool] : If True, the widgets will be visible, else
invisible
On Exit:
The listed widgets/controls will be visible or invisible.
"""
if isinstance(widgets, (str,unicode)):
cmds.control(widgets, e=True, visible=visible)
else:
for widg in widgets:
cmds.control(widg, e=True, visible=visible)
def toggle_sselect_widgets(self, state, *args):
"""Used to change the enable state of the soft select widgets/controls.
Parameters:
state [bool] : The state for which the widgets will be turned to.
args [tuple] : Ignore value. The value is returned by the button
and is unused.
On Exit:
The soft select widgets/controls will be enabled or disabled.
"""
widgets = (self.falloffModeOMGrp, self.falloffRadFSlGrp,
self.falloffCurveRow, self.interpolationOMGrp,
self.curvePresetsRow)
self.enable_disable_widgets(widgets, state)
def select_obj(self):
"""Used for the polygonObjTFGrp. Stores the first currently selected
object in the Maya scene if it is a polygon object.
Parameters:
An object selection in Maya.
On Exit:
Either, changes the field to be the name of a selected poly object
or clears the field.
"""
obj = cmds.ls(selection=True)
if len(obj) != 0:
obj = obj[0]
if Mf.poly_check(obj):
cmds.textFieldButtonGrp(self.polygonObjTFGrp, e=True, text=obj)
else:
cmds.textFieldButtonGrp(self.polygonObjTFGrp, e=True, text='')
def default_falloff_curve(self):
"""Sets up the option variable in Maya for the interfaces soft select
curve graph.
On Exit:
Either stores the current graph values of the default falloff curve
for soft select or re-uses the values already stored in the option
variable.
"""
if cmds.optionVar(q="softSelectCurve") == 0:
graphValues = Mf.setup_graph_values(cmds.softSelect(q=True,
softSelectCurve=True))
self.setup_falloff_curve(values=graphValues)
else:
pass
def reset_falloff_curve(self):
"""Used to reset the soft select falloff curve curve to the default
variable in the option variable.
On Exit:
Sets the current falloff curve to the graph values in the soft
select option variable.
"""
graphValues = cmds.optionVar(q='softSelectCurveOptionVar')
self.change_falloff_curve_prest(values=graphValues)
def setup_falloff_curve(self, values, stringValues=('softSelectCurve',
'softSelectCurveOptionVar')):
"""Sets up the falloff curve option variables with values.
Parameters:
values [list] : Values used to set the option variables
storing the soft select falloff curves
stringValues [str][tuple] : The name of the option variables to
which the values will change for.
On Exit:
Stores the values in the option variables passed in 'stringValues'
"""
if isinstance(stringValues, (str, unicode)):
stringValues = (stringValues,)
for optVal in stringValues:
cmds.optionVar(stringValue=[optVal, values[0]])
for optVal in stringValues:
for val in values[1:]:
cmds.optionVar(stringValueAppend=[optVal, val])
def falloff_curve_change_key(self, *args):
"""Updates the interpolation of the selected soft select curve graph in
the interpolation option menu to match the graph curve..
Parameters:
args [tuple] : Ignore value. The value is returned by the control
and is unused.
On Exit:
Changes the interpolation option menu to the one set on the curve
"""
curKeyInterpVal = cmds.gradientControlNoAttr(self.falloffCurveCtrl,
q=True, civ=True)
cmds.optionMenuGrp(self.interpolationOMGrp, e=True,
select=curKeyInterpVal+1)
def change_falloff_key_interp(self, *args):
"""Changes the currently selected key of the soft select curve graph to
the value in the interpolationOMGrp.
Parameters:
args [tuple] : Ignore value. The value is returned by the control
and is unused.
On Exit:
Sets the currently selected point on the soft select curve to the
interpolation value in the interpolationOMGrp.
"""
curKey = cmds.gradientControlNoAttr(self.falloffCurveCtrl, q=True,
ck=True)
newInterpVal = cmds.optionMenuGrp(self.interpolationOMGrp, q=True,
select=True)
cmds.gradientControlNoAttr(self.falloffCurveCtrl, e=True, ck=curKey,
civ=newInterpVal-1)
self.falloff_curve_change_key()
def change_falloff_curve_prest(self, preset=None, values=None):
"""Changes the falloff graph curve to one of the selected presets or
to the values passed to the function.
Parameters:
preset [str][int][None] : Can either be the name of a preset or the
number of the preset as the key for the
mf.SSELECT_CURVES.
values [str][None] : Can be used instead of preset to set the
value of the curve.
On Exit:
Sets the value of the falloff curve graph by setting the value
of the option variable 'softSelectCurve'.
"""
pare = cmds.gradientControlNoAttr(self.falloffCurveCtrl, q=True,
parent=True)
cmds.deleteUI(self.falloffCurveCtrl)
if preset != None:
self.setup_falloff_curve(Mf.setup_graph_values(Mf.SSELECT_CURVES[preset]),
'softSelectCurve')
elif values != None:
self.setup_falloff_curve(values, 'softSelectCurve')
else:
raise ValueError('No Value has been specified. Either "preset" or'\
' "values" should be a non None value.')
self.falloffCurveCtrl = cmds.gradientControlNoAttr('falloffCurveGradient',
h=90,
optionVar='softSelectCurve',
width=200,
ckc=self.falloff_curve_change_key,
parent=pare)
def reset_soft_select_options(self, *args):
cmds.optionMenuGrp(self.falloffModeOMGrp, e=True, select=1)
cmds.floatSliderGrp(self.falloffRadFSlGrp, e=True, value=5)
self.reset_falloff_curve()
def cycle_preview_img(self, texVar, fNode, *args):
curImgPath = cmds.getAttr('%s.fileTextureName' % fNode)
curImgNo = self.fileTextures[texVar].index(curImgPath)
if curImgNo >= len(self.fileTextures[texVar])-1:
newImgNo = 0
else:
newImgNo = curImgNo+1
self.set_preview(newImgNo, texVar, fNode)
def set_preview(self, num, texVar, fNode):
imgPath = self.fileTextures[texVar][num]
cmds.setAttr('%s.fileTextureName' % fNode,imgPath, type='string')
def update_file_node_swatch(self, fNode):
mel.eval('updateFileNodeSwatch("%s")' % fNode)
def update_preview(self, texVar, imgDir, fNode, all=False, *args):
if all:
self.fileTextures[texVar] = []
for root, _, filenames in os.walk(imgDir):
if os.path.split(root)[1] == '.mayaSwatches':
continue
self.fileTextures[texVar].extend([os.path.join(root,f).replace('\\','/') \
for f in filenames])
else:
value = cmds.optionMenuGrp(args[0], q=True, value=True)
texPath = os.path.join(imgDir,value)
self.fileTextures[texVar] = [os.path.join(texPath,f).replace('\\','/') \
for f in os.listdir(texPath) \
if os.path.isfile(os.path.join(texPath,f))]
self.set_preview(0, texVar, fNode)
def toggle_randomtex(self, texVar, imgDir, field, fNode, *args):
state=cmds.checkBox(args[0], q=True, value=True)
self.update_preview(texVar, imgDir, fNode, all=True)
self.enable_disable_widgets(field, not(state))
def toggle_visble_grasstex(self, state, *args):
self.visbile_invisible_widgets(self.grassTexturesFrame, state)
self.check_other_tex_states()
def check_other_tex_states(self, *args):
snow = cmds.checkBox(self.snowTexCB, q=True, value=1)
grass = cmds.checkBox(self.grassTexCB, q=True, value=1)
if snow or grass:
self.visbile_invisible_widgets(self.texPosFrame, True)
else:
self.visbile_invisible_widgets(self.texPosFrame, False)
if not(snow):
self.replace_ramp_entry_type(self.ramp, SNOW_COLOUR)
if not(grass):
self.replace_ramp_entry_type(self.ramp, GRASS_COLOUR)
if not(grass) and not(snow):
self.clear_ramp(self.ramp)
cmds.menuItem(self.snowEntry, e=True, enable=snow)
cmds.menuItem(self.grassEntry, e=True, enable=grass)
cmds.optionMenuGrp(self.entryTypeOMGrp, e=True, select=1)
def setup_preview_file(self, name):
if cmds.optionVar(exists='mtg_%s' % name) and cmds.objExists(cmds.optionVar(q='mtg_%s' % name)):
fileNode = cmds.optionVar(q='mtg_%s' % name)
else:
fileNode = cmds.createNode('file', name='mtg_%s' % name, skipSelect=True)
cmds.lockNode(fileNode)
cmds.optionVar(stringValue=('mtg_%s' % name, fileNode))
return fileNode
def setup_texture_pos_ramp(self):
if cmds.optionVar(exists='mtg_texPositionRamp') and cmds.objExists(cmds.optionVar(q='mtg_texPositionRamp')):
self.ramp = cmds.optionVar(q='mtg_texPositionRamp')
else:
self.ramp = cmds.createNode('ramp', name='mtg_texPositionRamp', skipSelect=True)
cmds.lockNode(self.ramp)
cmds.optionVar(stringValue=('mtg_texPositionRamp', self.ramp))
self.reset_ramp(self.ramp, self.snowTexCB, self.grassTexCB)
def get_tex_ramp_info(self, ramp):
rampColPos = {'ramp': ramp}
for index in cmds.getAttr('%s.colorEntryList' % ramp, mi=True):
rampColPos[index] = (cmds.getAttr('%s.colorEntryList[%d].position'
% (rampColPos['ramp'], index)),
cmds.getAttr('%s.colorEntryList[%d].color'
% (rampColPos['ramp'], index))[0])
return rampColPos
def reset_ramp_colours(self, ramp):
graphInfo = self.get_tex_ramp_info(ramp)
for key, value in graphInfo.items():
if key == 'ramp':
continue
closestColour = Mf.closest_colour(value[1], (GRASS_COLOUR,CLIFF_COLOUR,SNOW_COLOUR))
if closestColour == GRASS_COLOUR:
cmds.setAttr(*('%s.colorEntryList[%d].color' % (self.ramp, key),)
+ GRASS_COLOUR, type='double3')
elif closestColour == CLIFF_COLOUR:
cmds.setAttr(*('%s.colorEntryList[%d].color' % (self.ramp, key),)
+ CLIFF_COLOUR, type='double3')
elif closestColour == SNOW_COLOUR:
cmds.setAttr(*('%s.colorEntryList[%d].color' % (self.ramp, key),)
+ SNOW_COLOUR, type='double3')
def replace_ramp_entry_type(self, ramp, remove):
graphInfo = self.get_tex_ramp_info(ramp)
for key, value in graphInfo.items():
if key == 'ramp':
continue
closestColour = Mf.closest_colour(value[1],
(GRASS_COLOUR,CLIFF_COLOUR,SNOW_COLOUR))
if closestColour == remove:
cmds.removeMultiInstance('%s.colorEntryList[%d]' % (ramp,key))
self.reset_ramp_colours(ramp)
def clear_ramp(self,ramp):
entriesLs = cmds.getAttr('%s.colorEntryList' % ramp, mi=True)
for eNum in entriesLs:
cmds.removeMultiInstance('%s.colorEntryList[%d]' % (ramp,eNum))
cmds.setAttr(*('%s.colorEntryList[0].color' % ramp,) + CLIFF_COLOUR,
type='double3')
def reset_ramp(self, ramp, snowCtrl, grassCtrl, *args):
cmds.setAttr(*('%s.colorEntryList[0].color' % ramp,) + CLIFF_COLOUR,
type='double3')
cmds.setAttr('%s.colorEntryList[0].position' % ramp, 0.5)
snow = cmds.checkBox(snowCtrl, q=True, value=True)
grass = cmds.checkBox(grassCtrl, q=True, value=True)
entriesLs = cmds.getAttr('%s.colorEntryList' % ramp, mi=True)
for eNum in entriesLs:
if eNum == 0:
continue
cmds.removeMultiInstance('%s.colorEntryList[%d]' % (ramp,eNum))
if snow:
cmds.setAttr(*('%s.colorEntryList[1].color' % ramp,) + SNOW_COLOUR,
type='double3')
cmds.setAttr('%s.colorEntryList[1].position' % ramp, 1)
if grass:
cmds.setAttr(*('%s.colorEntryList[2].color' % ramp,) + GRASS_COLOUR,
type='double3')
cmds.setAttr('%s.colorEntryList[2].position' % ramp, 0)
def create_entry(self, entryType, ramp, *args):
eType = cmds.optionMenuGrp(entryType, q=True, value=True)
entriesLs = cmds.getAttr('%s.colorEntryList' % ramp, mi=True)
newEntryNum = Mf.find_empty_entry_value(entriesLs)
if eType == 'Cliff':
color = CLIFF_COLOUR
elif eType == 'Snow':
color = SNOW_COLOUR
elif eType == 'Grass':
color = GRASS_COLOUR
else:
color = (0,0,0)
cmds.setAttr(*('%s.colorEntryList[%d].color' % (ramp,newEntryNum),)+color,
type='double3')
cmds.setAttr('%s.colorEntryList[%d].position' % (ramp,newEntryNum), 0.5)
def generate_terrain(self, *args):
checkBoxOpt = {'axis': ''}
check = True
pObjectNam = cmds.textFieldButtonGrp(self.polygonObjTFGrp, q=True,
tx=True)
deformMag = cmds.floatSliderGrp(self.deformMagFSlGrp, q=True,
v=True)
for val, axis in enumerate(('x', 'y', 'z', 'n'), 1):
if cmds.checkBoxGrp(self.deformDirCBGrp,
**{"q": True, "v%d" % val: True}):
checkBoxOpt['axis'] += axis
for val, option in enumerate(('negativeValues',
'separateDeformDirection',
'reverseSong', 'refresh'), 1):
checkBoxOpt[option] = cmds.checkBoxGrp(self.otherOptCBGrp,
**{"q": True, "v%d" % val: True})
sSelect = cmds.checkBox(self.sSelectCB, q=True, v=True)
falloffMode = cmds.optionMenuGrp(self.falloffModeOMGrp, q=True,
sl=True)-1
falloffRadius = cmds.floatSliderGrp(self.falloffRadFSlGrp, q=True,
v=True)
falloffCurve = ",".join(cmds.optionVar(q='softSelectCurve'))
if self.currentSongDir is not None and not(os.path.exists(self.currentSongDir)):
self.error_message(6)
check = False
if self.currentSongDir is None:
self.error_message(7)
check = False
if not(cmds.objExists(pObjectNam)):
self.error_message(4, pObjectNam)
check = False
if checkBoxOpt['axis'] == '':
self.error_message(5)
check = False
if check:
msg = "Starting"
ratio = 1
currentProgress = 0
completeMessage = "{}: {}%".format(msg, currentProgress)
cmds.progressWindow(title='Generating Terrain', progress=currentProgress,
status=completeMessage, isInterruptable=False)
thread = GenerateTerrainThread(self.queue, self.songInfo, deformMag, pObjectNam,
checkBoxOpt['axis'], sSelect, falloffCurve, falloffMode, falloffRadius,
checkBoxOpt['negativeValues'], checkBoxOpt['separateDeformDirection'],
checkBoxOpt['reverseSong'], checkBoxOpt['refresh'])
mu.processIdleEvents()
thread.start()
self.complete = False
while not self.complete:
while not self.queue.empty():
getM = self.queue.get(block=False)
if type(getM) == tuple:
ratio = 100.0/getM[1]
msg = getM[0]
currentProgress = 0
completeMessage = "{}: {}%".format(msg, currentProgress)
elif type(getM) == str and getM.lower() == "complete":
self.complete=True
elif getM == None:
pass
else:
currentProgress = int(getM * ratio)
completeMessage = "{}: {}%".format(msg, currentProgress)
cmds.progressWindow(edit=True, progress=currentProgress, status=completeMessage)
cmds.pause(seconds=0.001)
mu.processIdleEvents()
cmds.progressWindow(endProgress=1)
def generate_texture(self, *args):
check=True
pObjectNam = cmds.textFieldButtonGrp(self.polygonObjTFGrp, q=True, tx=True)
cRandomTex = cmds.checkBox(self.randomCTexCB, q=True, value=True)
cTexType = cmds.optionMenuGrp(self.cliffTypesOMGrp, q=True, value=True)
cNumOfTex = cmds.intField(self.nOfCliffTexIF, q=True, value=True)
snowTex = cmds.checkBox(self.snowTexCB, q=True, value=True)
grassTex = cmds.checkBox(self.grassTexCB, q=True, value=True)
gRandomTex = cmds.checkBox(self.randomGTexCB, q=True, value=True)
gTexType = cmds.optionMenuGrp(self.grassTypesOMGrp, q=True, value=True)
gNumofTex = cmds.intField(self.nOfGrassTexIF, q=True, value=True)
texRepU = cmds.floatSliderGrp(self.texRepeatU, q=True, value=True)
texRepV = cmds.floatSliderGrp(self.texRepeatV, q=True, value=True)
texNoiseU = cmds.floatSliderGrp(self.texNoiseV, q=True, value=True)
texNoiseV = cmds.floatSliderGrp(self.texNoiseV, q=True, value=True)
imgColSpace = cmds.optionMenuGrp(self.imageColOMGrp, q=True, select=True)
renColSpace = cmds.optionMenuGrp(self.renderColOMGrp, q=True, select=True)
bDepth = cmds.floatSliderGrp(self.bDepth, q=True, value=True)
rampType = cmds.getAttr('%s.type' % self.ramp)
rampInterp = cmds.getAttr('%s.interpolation' % self.ramp)
rampUWave = cmds.getAttr('%s.uWave' % self.ramp)
rampVWave = cmds.getAttr('%s.vWave' % self.ramp)
rampNoise = cmds.getAttr('%s.noise' % self.ramp)
rampNoiseFreq = cmds.getAttr('%s.noiseFreq' % self.ramp)
self.reset_ramp_colours(self.ramp)
rampInfo = self.get_tex_ramp_info(self.ramp)
cliffPos = []
grassPos = []
snowPos = []
for key,value in rampInfo.items():
if key == 'ramp':
continue
v = Mf.closest_colour(value[1], (GRASS_COLOUR,CLIFF_COLOUR,SNOW_COLOUR))
if v == CLIFF_COLOUR:
cliffPos.append(value[0])
elif v == GRASS_COLOUR:
grassPos.append(value[0])
elif v == SNOW_COLOUR:
snowPos.append(value[0])
else:
raise ValueError('Something has gone wrong with ramp colours')
if not(cmds.objExists(pObjectNam)):
self.error_message(4, pObjectNam)
check = False
if check:
tInfo = Main.create_texture(cTexType, nOfCTex=cNumOfTex,
cRandTexs=cRandomTex, cliffPos=cliffPos,
snow=snowTex, snowPos=snowPos,
grass=grassTex, grassPos=grassPos,
grassType=gTexType, nOfGTex=gNumofTex,
gRandTexs=gRandomTex, uRep=texRepU,
vRep=texRepV, uNoise=texNoiseU,
vNoise=texNoiseV, bDepth=bDepth,
rampType=rampType, rampInterp=rampInterp,
rampUWave=rampUWave, rampVWave=rampVWave,
rampNoise=rampNoise, rampFreq=rampNoiseFreq,
colSpace=imgColSpace, rendSpace=renColSpace)
Main.assign_terrain_shader(tInfo['lambert'][1], pObjectNam,
tInfo['placements'])
cmds.select(tInfo['lambert'][0])
def create_interface(self):
mainForm = cmds.formLayout()
bannerPane = cmds.paneLayout(height=140,
bgc=(0.247059, 0.278431, 0.305882))
imageLayout = cmds.formLayout()
bannerImg = cmds.image( image=os.path.join(Main.MTG_DIRECTORY,
'banner.jpg'))
cmds.formLayout(imageLayout, e=True, attachForm=[(bannerImg, 'left', 0),
(bannerImg, 'right', 0),
(bannerImg, 'top', 0),
(bannerImg, 'bottom', 0)])
cmds.setParent(mainForm)
self.polygonObjTFGrp = cmds.textFieldButtonGrp(label='Polygon Object:',
buttonLabel='Select',
bc=self.select_obj,
adj=2, cw=[(1,100)],
cat=[(2,'left', 5)])
cmds.setParent('..')
tabs = cmds.tabLayout(innerMarginWidth=5, innerMarginHeight=5,
scr=True, cr=True)
cmds.formLayout(mainForm, e=True,
attachForm=[(bannerPane, 'left', 5),
(bannerPane, 'top', 5),
(bannerPane, 'right', 5),
(tabs, 'left', 5),
(tabs, 'right', 5),
(tabs, 'bottom', 5),
(self.polygonObjTFGrp, 'left', 5),
(self.polygonObjTFGrp, 'right', 5)],
attachControl=[(tabs, 'top', 2, self.polygonObjTFGrp),
(self.polygonObjTFGrp, 'top', 2, bannerPane)])
mainTerrainTab = cmds.columnLayout(adjustableColumn=True)
cmds.frameLayout(label='Load Music', borderStyle='in', cll=True)
cmds.rowLayout(numberOfColumns=2, adjustableColumn=2,
columnWidth2=(80, 75),
columnAlign=[(1, 'right'), (2, 'left')],
columnAttach=[(1, 'both', 0), (2, 'both', 0)])
cmds.text(label='Current Song:')
self.songText = cmds.text(label=self.currentSong, font="boldLabelFont")
cmds.setParent('..')
cmds.columnLayout(adjustableColumn=True)
self.musicLoadGrp = cmds.textFieldButtonGrp(label='Music Location',
buttonLabel='Browse',
buttonCommand=self.music_browse,
adj=2,
columnWidth3=(80, 75, 150))
musicForm = cmds.formLayout(numberOfDivisions=100)
self.loadMusicB = cmds.button(label='Load', command=self.load_song,
width=70)
self.playMusicB = cmds.button(label='Play', command=self.play_song,
width=70, enable=False)
self.reloadMusicB = cmds.button(label='Reload', command=self.reload_song,
width=70, enable=False)
self.clearMusicB = cmds.button(label='Clear', command=self.clear_song,
width=70, enable=False)
cmds.formLayout(musicForm, edit=True,
attachForm=[(self.loadMusicB, "bottom", 5),
(self.playMusicB, "bottom", 5),
(self.reloadMusicB, "bottom", 5),
(self.clearMusicB, "bottom", 5)],
attachControl=[(self.loadMusicB, 'right', 5, self.playMusicB),
(self.playMusicB, 'right', 5, self.reloadMusicB),
(self.reloadMusicB, 'right', 5, self.clearMusicB)],
attachPosition=[(self.loadMusicB, "left", 5, 15),
(self.playMusicB, "left", 5, 34),
(self.reloadMusicB, "left", 5, 53),
(self.clearMusicB, "left", 5, 71),
(self.clearMusicB, "right", 5, 90)],
attachNone=[(self.loadMusicB, "top"),
(self.playMusicB, "top"),
(self.reloadMusicB, "top"),
(self.clearMusicB, "top")])
cmds.setParent('..')
cmds.frameLayout(label='Sound Scrubber', borderStyle='in', cll=True)
cmds.columnLayout(adjustableColumn=True)
self.soundScrb = cmds.soundControl(height=45, displaySound=True,
waveform='both')
self.soundCtrl = cmds.rangeControl(minRange=1, maxRange=25, height=20)
cmds.setParent(mainTerrainTab)
cmds.frameLayout(label='Terrain Generator', borderStyle='in', cll=True)
terrainOptColLayout = cmds.columnLayout(adjustableColumn=True)
self.deformMagFSlGrp = cmds.floatSliderGrp(label='Deform Magnitude:',
field=True, minValue=0,
fieldMaxValue=100000,
value=10, adj=3,
cw=[(1,100)],
cat=[(2,'left', 5)])
self.deformDirCBGrp = cmds.checkBoxGrp(numberOfCheckBoxes=4,
label='Deform Direction:',
labelArray4=['X', 'Y', 'Z', 'N'],
cw=[(1,100),(2,50),(3,50),(4,50)],
cat=[(2,'left', 7)], value2=True)
self.otherOptCBGrp = cmds.checkBoxGrp(numberOfCheckBoxes=4,
label='Other Options:',
labelArray4=['Negative Values',
'Separate Deform Direction',
'Reverse Song', 'Refresh on Deform'],
height=23, cw=[(1,100),(3,150),(4,90)],
cat=[(2,'left', 7)])
cmds.frameLayout(label='Soft Select Options', borderStyle='in',
cll=True)
cmds.rowLayout(numberOfColumns=3, columnWidth3=(100, 150, 75),
columnAlign=[(1, 'right'), (2, 'left'), (3, 'left')],
columnAttach=[(1, 'right', 0), (2, 'left', 5), (3, 'left', 5)],
height=25)
cmds.text(label='Soft Select:')
self.sSelectCB = cmds.checkBox(label='', cc=self.toggle_sselect_widgets,
value=1)
self.sSelectResetB = cmds.button('Reset', width=50,
command=self.reset_soft_select_options)
cmds.setParent('..')
self.falloffModeOMGrp = cmds.optionMenuGrp(label='Falloff Mode:',
cw=[(1,100),(3,150)],
cat=[(2,'left', 5)])
cmds.menuItem(label='Volume')
cmds.menuItem(label='Surface')
cmds.menuItem(label='Global')
cmds.menuItem(label='Object')
self.falloffRadFSlGrp = cmds.floatSliderGrp(label='Falloff radius:',
field=True, minValue=0,
fieldMaxValue=100000,
value=5, precision=2,
adj=3, cw=[(1,100)],
cat=[(2,'left', 5)])
self.falloffCurveRow = cmds.rowLayout(numberOfColumns=2,
columnWidth2=(100, 200),
columnAlign=[(1, 'right'),
(2, 'left')],
columnAttach=[(1, 'both', 0),
(2, 'left', 5)])
self.default_falloff_curve()
cmds.text(label='Falloff curve:')
self.falloffCurveCtrl = cmds.gradientControlNoAttr(h=90,
optionVar='softSelectCurve',
width=200)
cmds.setParent('..')
self.interpolationOMGrp = cmds.optionMenuGrp(label='Interpolation:',
cw=[(1,100),(3,150)],
cat=[(2,'left', 5)],
changeCommand=self.change_falloff_key_interp)
cmds.menuItem(label='None')
cmds.menuItem(label='Linear')
cmds.menuItem(label='Smooth')
cmds.menuItem(label='Spline')
cmds.gradientControlNoAttr(self.falloffCurveCtrl, e=True,
ckc=self.falloff_curve_change_key)
self.curvePresetsRow = cmds.rowLayout(numberOfColumns=10,
columnWidth=[(1,100)],
columnAlign=[(1, 'right')],
columnAttach=[(1, 'right', 0),
(2, 'left', 5)],
height=25)
cmds.text(label='Curve presets:')
cmds.iconTextButton( style='iconOnly', image1='softCurveProfile.png',
c=par(self.change_falloff_curve_prest,'soft'))
cmds.iconTextButton( style='iconOnly', image1='mediumCurveProfile.png',
c=par(self.change_falloff_curve_prest,'medium'))
cmds.iconTextButton( style='iconOnly', image1='linearCurveProfile.png',
c=par(self.change_falloff_curve_prest,'linear'))
cmds.iconTextButton( style='iconOnly', image1='hardCurveProfile.png',
c=par(self.change_falloff_curve_prest,'hard'))
cmds.iconTextButton( style='iconOnly', image1='craterCurveProfile.png',
c=par(self.change_falloff_curve_prest,'crater'))
cmds.iconTextButton( style='iconOnly', image1='waveCurveProfile.png',
c=par(self.change_falloff_curve_prest,'wave'))
cmds.iconTextButton( style='iconOnly', image1='stairsCurveProfile.png',
c=par(self.change_falloff_curve_prest,'stairs'))
cmds.iconTextButton( style='iconOnly', image1='ringCurveProfile.png',
c=par(self.change_falloff_curve_prest,'ring'))
cmds.iconTextButton( style='iconOnly', image1='sineCurveProfile.png',
c=par(self.change_falloff_curve_prest,'sine'))
cmds.setParent(mainTerrainTab)
self.generateTerrainB = cmds.button(label='Generate Terrain!',
height=29,
command=self.generate_terrain)
cmds.setParent(tabs)
##########################################
mainTexturingTab = cmds.columnLayout(adjustableColumn=True)
cmds.frameLayout(label='Cliff Textures', borderStyle='in', cll=True)
cliffTypesForm = cmds.formLayout()
self.randomCTexRow = cmds.rowLayout(numberOfColumns=2,
columnWidth2=(115, 15),
columnAlign=[(1, 'right'),
(2, 'left')],
columnAttach=[(1, 'right', 0),
(2, 'left', 5)],
height=25)
cmds.text(label='Random Textures:')
self.randomCTexCB = cmds.checkBox(label='', value=0)
cmds.setParent('..')
self.cliffTypesOMGrp = cmds.optionMenuGrp(label='Cliff Types:',
cw=[(1,110),(3,150)],
cat=[(2,'left', 5)])
self.cliffTypeFolders = [f for f in os.listdir(Main.CLIFF_TEX_DIR) \
if os.path.isdir(os.path.join(Main.CLIFF_TEX_DIR,f))]
for cliff in self.cliffTypeFolders:
cmds.menuItem(label=cliff)
nOfCTexRow = cmds.rowLayout(nc=2, cw=[(1,110)], cal=[(1,'right')],
cat=[(1,'right',0),(2,'left', 5)])
cmds.text(label='Number of Textures:')
self.nOfCliffTexIF = cmds.intField(v=3, min=1)
cmds.setParent('..')
cliffPrevText = cmds.text('Cliff Preview:')
cliffPreviewPL = cmds.paneLayout(w=65,h=65)
cliffFileNode = self.setup_preview_file('cliffTexturePreview')
cliffPreviewImg = cmds.swatchDisplayPort(wh=(65,65), sn=cliffFileNode,
pc=par(self.update_file_node_swatch, cliffFileNode))
self.update_preview('cliffTextures', Main.CLIFF_TEX_DIR, cliffFileNode,
False, self.cliffTypesOMGrp)
cmds.setParent('..')
self.cycleCliffPrevB = cmds.button(label='Cycle Images',
command=par(self.cycle_preview_img,
'cliffTextures',
cliffFileNode))
cmds.checkBox(self.randomCTexCB, e=True,
cc=par(self.toggle_randomtex,'cliffTextures',
Main.CLIFF_TEX_DIR, self.cliffTypesOMGrp,
cliffFileNode, self.randomCTexCB))
cmds.optionMenuGrp(self.cliffTypesOMGrp, e=True,
cc=par(self.update_preview, 'cliffTextures',
Main.CLIFF_TEX_DIR, cliffFileNode, False,
self.cliffTypesOMGrp))
cmds.formLayout(cliffTypesForm, e=True,
attachForm=[(self.randomCTexRow, 'left', 0),
(self.randomCTexRow, 'top', 5),
(self.cliffTypesOMGrp, 'left', 5),
(nOfCTexRow, 'left', 5),
(cliffPreviewPL, 'top', 5),
(cliffPreviewPL, 'bottom', 5)],
attachControl=[(cliffPrevText, 'left', 100, self.cliffTypesOMGrp),
(cliffPreviewPL, 'left', 5, cliffPrevText),
(nOfCTexRow, 'top', 5, self.cliffTypesOMGrp),
(self.cliffTypesOMGrp, 'top', 5, self.randomCTexRow),
(self.cycleCliffPrevB, 'left', 5, cliffPreviewPL)],
attachNone=[(nOfCTexRow,'bottom'),
(self.randomCTexRow, 'right'),
(cliffPrevText, 'bottom'),
(self.cycleCliffPrevB, 'right')],
attachPosition=[(self.cycleCliffPrevB, 'top', 0, 40),
(cliffPrevText, 'top', 0, 45)])
cmds.setParent(mainTexturingTab)
cmds.frameLayout(label='Other Textures', borderStyle='in', cll=True)
self.snowTexRow = cmds.rowLayout(numberOfColumns=2,
columnWidth2=(115, 15),
columnAlign=[(1, 'right'),
(2, 'left')],
columnAttach=[(1, 'right', 0),
(2, 'left', 5)],
height=25)
cmds.text(label='Snow Texture:')
self.snowTexCB = cmds.checkBox(label='', value=1,
cc=self.check_other_tex_states)
cmds.setParent('..')
self.grassTexRow = cmds.rowLayout(numberOfColumns=2,
columnWidth2=(115, 15),
columnAlign=[(1, 'right'),
(2, 'left')],
columnAttach=[(1, 'right', 0),
(2, 'left', 5)],
height=25)
cmds.text(label='Grass Texture:')
self.grassTexCB = cmds.checkBox(label='', value=1,
cc=self.toggle_visble_grasstex)
cmds.setParent('..')
self.grassTexturesFrame = cmds.frameLayout(label='Grass Textures',
borderStyle='in', cll=True)
grassTypesForm = cmds.formLayout()
self.randomGTexRow = cmds.rowLayout(numberOfColumns=2,
columnWidth2=(115, 15),
columnAlign=[(1, 'right'),
(2, 'left')],
columnAttach=[(1, 'right', 0),
(2, 'left', 5)],
height=25)
cmds.text(label='Random Textures:')
self.randomGTexCB = cmds.checkBox(label='', value=0)
cmds.setParent('..')
self.grassTypesOMGrp = cmds.optionMenuGrp(label='Grass Types:',
cw=[(1,110),(3,150)],
cat=[(2,'left', 5)])
self.grassTypeFolders = [f for f in os.listdir(Main.GRASS_TEX_DIR) \
if os.path.isdir(os.path.join(Main.GRASS_TEX_DIR,f))]
for cliff in self.grassTypeFolders:
cmds.menuItem(label=cliff)
nOfGTexRow = cmds.rowLayout(nc=2, cw=[(1,110)], cal=[(1,'right')],
cat=[(1,'right',0),(2,'left', 5)])
cmds.text(label='Number of Textures:')
self.nOfGrassTexIF = cmds.intField(v=3, min=1)
cmds.setParent('..')
self.grassPrevText = cmds.text('Grass Preview:')
self.grassPreviewPL = cmds.paneLayout(width=65, height=65)
self.grassFileNode = self.setup_preview_file('grassTexturePreview')
self.grassPreviewImg = cmds.swatchDisplayPort(wh=(65,65),
sn=self.grassFileNode,
pc=par(self.update_file_node_swatch,
self.grassFileNode))
self.update_preview('grassTextures', Main.GRASS_TEX_DIR,
self.grassFileNode , False, self.grassTypesOMGrp)
cmds.setParent('..')
cycleGrassPrevB = cmds.button(label='Cycle Images',
command=par(self.cycle_preview_img,
'grassTextures',
self.grassFileNode))
cmds.checkBox(self.randomGTexCB, e=True,
cc=par(self.toggle_randomtex,'grassTextures',
Main.GRASS_TEX_DIR, self.grassTypesOMGrp,
self.grassFileNode, self.randomGTexCB))
cmds.optionMenuGrp(self.grassTypesOMGrp, e=True,
cc=par(self.update_preview, 'grassTextures',
Main.GRASS_TEX_DIR, self.grassFileNode,
False, self.grassTypesOMGrp))
cmds.formLayout(grassTypesForm, e=True,
attachForm=[(self.randomGTexRow, 'left', 0),
(self.randomGTexRow, 'top', 5),
(self.grassTypesOMGrp, 'left', 5),
(nOfGTexRow, 'left', 5),
(self.grassPreviewPL, 'top', 5),
(self.grassPreviewPL, 'bottom', 5)],
attachControl=[(self.grassPrevText, 'left', 85, self.grassTypesOMGrp),
(self.grassPreviewPL, 'left', 5, self.grassPrevText),
(nOfGTexRow, 'top', 5, self.grassTypesOMGrp),
(self.grassTypesOMGrp, 'top', 5, self.randomGTexRow),
(cycleGrassPrevB, 'left', 5, self.grassPreviewPL)],
attachNone=[(nOfGTexRow,'bottom'),
(self.randomGTexRow, 'right'),
(self.grassPrevText, 'bottom'),
(cycleGrassPrevB, 'right')],
attachPosition=[(cycleGrassPrevB, 'top', 0, 40),
(self.grassPrevText, 'top', 0, 45)])
cmds.setParent(mainTexturingTab)
otherOptions = cmds.frameLayout(label='Other Options',
borderStyle='in', cll=True)
self.texRepeatU = cmds.floatSliderGrp(label='Texture Repetition U:',
field=True, minValue=0,
maxValue=5, fieldMaxValue=100000,
value=1.25, precision=3, adj=3,
cw=[(1,110)], cat=[(2,'left', 5)])
self.texRepeatV = cmds.floatSliderGrp(label='Texture Repetition V:',
field=True, minValue=0, maxValue=5,
fieldMaxValue=100000, value=1.25,
precision=3, adj=3, cw=[(1,110)],
cat=[(2,'left', 5)])
self.texNoiseU = cmds.floatSliderGrp(label='Texture Noise U:',
field=True, minValue=0,
maxValue=5, fieldMaxValue=100000,
value=0.01, precision=3, adj=3,
cw=[(1,110)], cat=[(2,'left', 5)])
self.texNoiseV = cmds.floatSliderGrp(label='Texture Noise V:',
field=True, minValue=0,
maxValue=5, fieldMaxValue=100000,
value=0.01, precision=3, adj=3,
cw=[(1,110)], cat=[(2,'left', 5)])
self.bDepth = cmds.floatSliderGrp(label='Bump Depth:', field=True,
minValue=-5, maxValue=5,
fieldMaxValue=100000,
fieldMinValue=-100000, value=0.4,
precision=3, adj=3, cw=[(1,110)],
cat=[(2,'left', 5)])
cmds.rowLayout(nc=2, cw=[(1, 210)], cal=[(1, 'right')], cat=[(1, 'right', 0), (2, 'left', 5)])
self.imageColOMGrp = cmds.optionMenuGrp(label='Image Colour Space:',
cw=[(1,110),(3,150)],
cat=[(2,'left', 5)])
cmds.menuItem('sRGB')
cmds.menuItem('Linear sRGB')
self.renderColOMGrp = cmds.optionMenuGrp(label='Render Colour Space:',
cw=[(1,110),(3,150)],
cat=[(2,'left', 5)])
cmds.menuItem('sRGB')
cmds.menuItem('Linear sRGB')
cmds.setParent('..')
cmds.setParent('..')
self.texPosFrame = cmds.frameLayout(label='Texture Positioning',
borderStyle='in', cll=True)
self.setup_texture_pos_ramp()
sampleRow = cmds.rowLayout(numberOfColumns=2, cw=[(1,225)],
cal=[(1,'right')], cat=[(1,'right',5)])
cmds.text('Sample')
cmds.paneLayout(width=65, height=65)
self.rampPreviewImg = cmds.swatchDisplayPort(wh=(65, 65),
sn=self.ramp,
pc=par(self.update_file_node_swatch,
self.ramp))
cmds.setParent(self.texPosFrame)
cmds.columnLayout(adj=True)
self.rampType = cmds.attrEnumOptionMenuGrp(label='Type',
attribute='%s.type' % self.ramp,
cw=[(1,110),(3,150)],
cat=[(2,'left', 5)])
self.rampInterpol = cmds.attrEnumOptionMenuGrp(label='Interpolation',
attribute='%s.interpolation' % self.ramp,
cw=[(1,110),(3,150)], cat=[(2,'left', 5)])
rampRow = cmds.rowLayout(nc=2, cw=[(1,275)], cal=[(1,'right')],
cat=[(1,'right',5)])
self.rampCliffTex = cmds.rampColorPort(node=self.ramp)
self.resetButton = cmds.button(label='Reset',
command=par(self.reset_ramp, self.ramp,
self.snowTexCB,
self.grassTexCB))
cmds.setParent('..')
entryTypRow = cmds.rowLayout(nc=2)
self.entryTypeOMGrp = cmds.optionMenuGrp(label='Entry Type:',
cw=[(1,110),(3,150)],
cat=[(2,'left', 5)])
self.cliffEntry = cmds.menuItem(label='Cliff')
self.snowEntry = cmds.menuItem(label='Snow')
self.grassEntry = cmds.menuItem(label='Grass')
self.createEntryButton = cmds.button(label='Create Entry',
command=par(self.create_entry,
self.entryTypeOMGrp,
self.ramp))
cmds.setParent('..')
selectedPosition = cmds.attrFieldSliderGrp(label='Selected Position',
adj=3, cw=[(1,110),(3,150)],
cat=[(2,'left', 5)])
cmds.rampColorPort(self.rampCliffTex, e=True, sp= selectedPosition)
self.uWaveSl = cmds.attrFieldSliderGrp(at='%s.uWave' % self.ramp,
columnWidth=(4,0), adj=3,
cw=[(1,110),(3,150)],
cat=[(2,'left', 5)])
self.vWaveSl = cmds.attrFieldSliderGrp(at='%s.vWave' % self.ramp,
columnWidth=(4,0), adj=3,
cw=[(1,110),(3,150)],
cat=[(2,'left', 5)])
self.noiseSl = cmds.attrFieldSliderGrp(at='%s.noise' % self.ramp,
columnWidth=(4,0), adj=3,
cw=[(1,110),(3,150)],
cat=[(2,'left', 5)])
self.freqSl = cmds.attrFieldSliderGrp(at='%s.noiseFreq' % self.ramp,
columnWidth=(4,0), adj=3,
cw=[(1,110),(3,150)],
cat=[(2,'left', 5)])
cmds.setParent(mainTexturingTab)
self.generateTerrainB = cmds.button(label='Generate Texture!',
height=29,
command=self.generate_texture)
cmds.tabLayout(tabs, edit=True,
tabLabel=((mainTerrainTab, 'Terrain'),
(mainTexturingTab, 'Texturing')) )
def run():
win = MTGGui('MTGWindow')
return win
if __name__ == "__main__":
window = run()
```
|
{
"source": "j-fdion/cyme",
"score": 3
}
|
#### File: cyme/electrolyse/bt_vehicule.py
```python
from .. import bt
class ScanObjectifs(bt.Task):
def __init__(self, vehicule):
super().__init__()
self.vehicule = vehicule
def trouve_objectif_aller(self):
print("cibles_aller", len(self.vehicule.cibles_aller))
while True:
try:
noeud = self.vehicule.cibles_aller.pop()
except IndexError:
return None
else:
if noeud.allee.need_cab_ea or noeud.allee.need_ben_vide:
print(len(self.vehicule.cibles_aller))
return noeud
def trouve_objectif_retour(self):
print("cibles_retour", len(self.vehicule.cibles_retour))
while True:
try:
noeud = self.vehicule.cibles_retour.pop()
except IndexError:
return None
else:
if noeud.allee.has_cab_megot or noeud.allee.has_ben_pleine:
print(len(self.vehicule.cibles_retour))
return noeud
def run(self):
if self.vehicule.objectif_aller is None and self.vehicule.objectif_retour is None:
self.vehicule.objectif_aller = self.trouve_objectif_aller()
self.vehicule.objectif_retour = self.trouve_objectif_retour()
if self.vehicule.objectif_aller is not None or self.vehicule.objectif_retour is not None:
#print("ScanObjectifs", bt.Task.SUCCES)
return bt.Task.SUCCES
else:
print("ScanObjectifs", bt.Task.ECHEC, self.vehicule.objectif_aller, self.vehicule.objectif_retour)
return bt.Task.ECHEC
class ObjectifAllerExiste(bt.Task):
def __init__(self, vehicule):
super().__init__()
self.vehicule = vehicule
def run(self):
value = bt.Task.SUCCES if self.vehicule.objectif_aller is not None else bt.Task.ECHEC
self.vehicule.objectif_aller.box.color = (1,0,0)
print("ObjectifAllerExiste", self.vehicule.objectif_aller, value == bt.Task.SUCCES)
return value
class ObjectifRetourExiste(bt.Task):
def __init__(self, vehicule):
super().__init__()
self.vehicule = vehicule
def run(self):
value = bt.Task.SUCCES if self.vehicule.objectif_retour is not None else bt.Task.ECHEC
print("ObjectifRetourExiste", value == bt.Task.SUCCES)
return value
class ContientCabBen(bt.Task):
def __init__(self, vehicule):
super().__init__()
self.vehicule = vehicule
def run(self):
value = bt.Task.SUCCES if self.vehicule.cab or self.vehicule.ben else bt.Task.ECHEC
print("ContientCabBen", self.vehicule.cab, self.vehicule.ben, value == bt.Task.SUCCES)
return value
class MoveToLocation(bt.Task):
""" Demande au composant mobile de se déplacer vers location
"""
def __init__(self, vehicule, location):
super().__init__()
self.vehicule = vehicule
self.location = location
def run(self):
"""
:return: ``bt.Task.SUCCES`` si le vehicule est positionné au dessus de sa cible
"""
if self.vehicule.mobile.target != self.location:
self.vehicule.mobile.target = self.location
value = bt.Task.SUCCES if self.vehicule.mobile.target == self.vehicule.mobile.npos else bt.Task.ECHEC
print("MoveToLocation", self.location, value == bt.Task.SUCCES)
return value
class MoveToObjectifAller(bt.Task):
""" Demande au composant mobile de se déplacer vers l'objectif courant
"""
def __init__(self, vehicule):
super().__init__()
self.vehicule = vehicule
def run(self):
"""
:return: ``bt.Task.SUCCES`` si le vehicule est positionné au dessus de sa cible
"""
if self.vehicule.mobile.target != self.vehicule.objectif_aller:
self.vehicule.mobile.target = self.vehicule.objectif_aller
value = bt.Task.SUCCES if self.vehicule.mobile.target == self.vehicule.mobile.npos else bt.Task.RUNNING
print("MoveToObjectifAller", self.vehicule.objectif_aller, value == bt.Task.SUCCES)
return value
class MoveToObjectifRetour(bt.Task):
""" Demande au composant mobile de se déplacer vers l'objectif courant
"""
def __init__(self, vehicule):
super().__init__()
self.vehicule = vehicule
def run(self):
"""
:return: ``bt.Task.SUCCES`` si le vehicule est positionné au dessus de sa cible
"""
if self.vehicule.mobile.target != self.vehicule.objectif_retour:
self.vehicule.mobile.target = self.vehicule.objectif_retour
value = bt.Task.SUCCES if self.vehicule.mobile.target == self.vehicule.mobile.npos else bt.Task.RUNNING
print("MoveToObjectifRetour", self.vehicule.objectif_aller, value == bt.Task.SUCCES)
return value
class IsBenVide(bt.Task):
def __init__(self, vehicule):
super().__init__()
self.vehicule = vehicule
def run(self):
value = bt.Task.SUCCES if self.vehicule.objectif_aller.allee.need_ben_vide else bt.Task.ECHEC
print("IsBenVide", value == bt.Task.SUCCES)
return value
class IsBenPleine(bt.Task):
def __init__(self, vehicule):
super().__init__()
self.vehicule = vehicule
def run(self):
value = bt.Task.SUCCES if self.vehicule.objectif_retour.allee.has_ben_plein else bt.Task.ECHEC
print("IsBenPleine", value == bt.Task.SUCCES)
return value
class DeposerBenPleine(bt.Task):
def __init__(self, vehicule):
super().__init__()
self.vehicule = vehicule
def run(self):
print("DeposerBenPleine")
self.vehicule.ben = False
self.vehicule.objectif_retour = None
return bt.Task.SUCCES
class RamasserBenPleine(bt.Task):
def __init__(self, vehicule):
super().__init__()
self.vehicule = vehicule
def run(self):
print("RamasserBenPleine")
self.vehicule.objectif_retour.allee.has_ben_pleine = False
self.vehicule.objectif_retour.allee.ben = -1
self.vehicule.ben = True
return bt.Task.SUCCES
class DeposerBenVide(bt.Task):
def __init__(self, vehicule):
super().__init__()
self.vehicule = vehicule
def run(self):
print("DeposerBenVide")
self.vehicule.objectif_aller.allee.need_ben_vide = False
self.vehicule.objectif_aller.allee.ben = 0
self.vehicule.objectif_aller = None
self.vehicule.ben = False
return bt.Task.SUCCES
class RamasserBenVide(bt.Task):
def __init__(self, vehicule):
super().__init__()
self.vehicule = vehicule
def run(self):
print("RamasserBenVide")
self.vehicule.ben = True
return bt.Task.SUCCES
class DeposerCabEa(bt.Task):
def __init__(self, vehicule):
super().__init__()
self.vehicule = vehicule
def run(self):
print("DeposerCabEa")
self.vehicule.objectif_aller.allee.need_cab_ea = False
self.vehicule.objectif_aller.allee.cab = 3
self.vehicule.objectif_aller = None
self.vehicule.cab = False
return bt.Task.SUCCES
class RamasserCabEa(bt.Task):
def __init__(self, vehicule):
super().__init__()
self.vehicule = vehicule
def run(self):
print("RamasserCabEa")
self.vehicule.cab = True
return bt.Task.SUCCES
class DeposerCabMegot(bt.Task):
def __init__(self, vehicule):
super().__init__()
self.vehicule = vehicule
def run(self):
print("DeposerCabMegot")
self.vehicule.cab = False
self.vehicule.objectif_retour = None
return bt.Task.SUCCES
class RamasserCabMegot(bt.Task):
def __init__(self, vehicule):
super().__init__()
self.vehicule = vehicule
def run(self):
print("RamasserCabMegot")
self.vehicule.objectif_retour.allee.has_cab_megot = False
self.vehicule.objectif_retour.allee.megot = 0
self.vehicule.cab = True
return bt.Task.SUCCES
class IsCabEa(bt.Task):
def __init__(self, vehicule):
super().__init__()
self.vehicule = vehicule
def run(self):
value = bt.Task.SUCCES if self.vehicule.objectif_aller.allee.need_cab_ea else bt.Task.ECHEC
print("IsCabEa", value == bt.Task.SUCCES)
return value
class IsCabMegot(bt.Task):
def __init__(self, vehicule):
super().__init__()
self.vehicule = vehicule
def run(self):
value = bt.Task.SUCCES if self.vehicule.objectif_retour.allee.has_cab_megot else bt.Task.ECHEC
print("IsCabMegot", value == bt.Task.SUCCES)
return value
```
#### File: cyme/electrolyse/centre.py
```python
import re
import math
from kivy.graphics.context_instructions import Color, PushMatrix, Rotate, PopMatrix, Translate
from kivy.graphics.vertex_instructions import Rectangle, Line
from kivy.core.text import Label as CoreLabel
from .. import simulation
from .. import ecs
class Secteur(ecs.Component):
"""Le secteur de production est une entité qui formé d'une liste de cuves,
de tâches à faire et d'un pont qui y est affecté. On regrouppe souvent les secteurs
en famille.
"""
def __init__(self, cuve_debut, allee_debut, nb, famille, nom, num_quart_depart=0):
"""
:param Cuve cuve_debut: première cuve du secteur
:param Allee allee_debut: première allée du secteur (peut-être None si pas d'allée)
:param int nb: nombre de cuve du secteur
:param str famille: nom de la famille avec ce secteur
:param str nom: nom du secteur
:param int num_quart_depart: numéro du quart de départ des opération dans les listes de tâches (0 par défaut)
"""
self.famille = famille
self.nom = nom
self.cuve_debut = cuve_debut # handle sur la premiere cuve du secteur
self.allee_debut = allee_debut
self.nbcuve = nb # nombre de cuves d'un secteur
if cuve_debut:
self.cuve_debut = cuve_debut # handle sur la premiere cuve du secteur
c = self.cuve_debut.noeud
c.cuve.secteur = self
self.noeuds_cuves = [c]
for i in range(nb - 1):
c = c.next()
c.cuve.secteur = self
self.noeuds_cuves.append(c)
self.cuve_fin = c.cuve
if allee_debut:
self.allee_debut = allee_debut
a = self.allee_debut.noeud
a.allee.secteur = self
self.noeuds_allees = [a]
for i in range(nb - 1):
a = a.next()
a.allee.secteur = self
self.noeuds_allees.append(a)
self.allee_fin = a
self.num_quart = num_quart_depart
#kanbans est une liste de liste de kanbans, chaque liste représente un quart (ou poste) de travail
self.kanbans = None
# pont actuellement affecté aux tâches (kanbans) dans ce secteur
self.pont = None
self.quota_pause = 0
def set_kanbans(self, kanbans):
""" Set la liste de listes de kanban (tâches a faire dans ce secteur).
A priori, il y a une liste de tâches par quart (poste), avec cycle.
Typiquement, on cycle a tous les 2 ou 8 quarts, dépendamment du centre d'électrolyse.
C'est le nombre de liste de kanban dans kanbans qui fixe le cycle. """
self.kanbans = kanbans
def incr_num_quart(self):
""" On avance d'une liste de tâche quand on change de quart (poste). """
if len(self.kanbans) > 0:
self.num_quart = (self.num_quart + 1) % len(self.kanbans)
def get_kanbans_for_current_quart(self):
""" Return la liste de kanban du quart courant. """
return self.kanbans[self.num_quart]
def num_quart_max(self):
""" Nombre de listes de kanban, c'est-à-dire le nombre de postes du cycle. """
return len(self.kanbans)
def __repr__(self):
""" Nom du secteur via repr() """
return self.nom
class Allee(ecs.Component):
""" Allee voisine des cuves pour poser les cabarets d'anodes et les bennes. """
def __init__(self, n):
self.noeud = n
self.noeud.allee = self
self.need_cab_ea = False
self.need_ben_vide = False
self.has_cab_megot = False
self.has_ben_pleine = False
self.ea = 0
self.megot = 0
self.ben = -1
def is_vide(self):
if self.ea <= 0 and self.megot <= 0 and self.ben < 0:
return True
else:
return False
def is_occupee(self):
if self.ea > 0 or self.megot > 0 or self.ben > -1:
return True
else:
return False
class Cuve(ecs.Component):
"""Aspect cuve électrolytique d'un ``Noeud`` du graphe. Attributs:
* kA (int): courant sur cette cuve
* Faraday (float): facteur efficacite electrique en % (fonction du kA)
* R (float): cte de rendement en kg/(kA*24h)
* nbanode: nombre d'anodes dans la cuve
* cycle: cycle anodique en jours (float)
* na (float): nombre d'anodes a changer selon le cycle (variable d'état)
* metal (float): quantité de métal liquide siphonnable en kg (variable d'état)
"""
def __init__(self, n, nbanode, cycle=600, ka=400):
""":param n: on garde un handle vers le composant noeud frère
:type n: :class:`Noeud`
:param int nanode: nombre d'anodes (ea) dans une cuve
:param int cycle: cycle anodique en heures (defaut: 600h, i.e. 25j)
:param int ka: courant electrique en ka (defaut: 400kA)
"""
self.noeud=n # handle sur le noeud
self.noeud.cuve=self # handle du noeud vers sa cuve
self.nbanode=nbanode # nombre d'anodes dans la cuve
self.metal=3380.0 # kg de metal siphonnable
self.megot=0 # nb de ea a changer (megots)
self.R=8.0534 # cte standard de production en kg/(kA*24h) (voir doc)
self.set_kA(ka) # set le courant et le rendement en kA
self.set_cycle(cycle) # set le cycle anodique
self.nch=1 # nb d'anodes a changer au prochain changement d'anodes
self.nsi=1 # 1 s'il y a du metal a siphonner, 0 sinon (NB: dans le futur ce sera le nb de kg)
def set_cycle(self,cycle):
""" Set le cycle anodique. """
self.cycle = cycle # cycle de changement des anodes en heures
# facteur production de megots par 60 secondes
self.factMegot = self.nbanode / (self.cycle * 60.0)
def set_kA(self, ka):
"""Set la valeur du courant et du rendement Faraday (typiquement entre 90% et 97%)."""
self.kA = ka
self.Faraday = 0.93 # efficacite electrique en % (depend generalement du kA)
# facteur production de metal par 60 secondes
self.factMetal = 60.0 * self.R * self.Faraday * self.kA / 86400.0
def productionMetal(self, periode):
"""Calcul la quantité de métal total produit par la cuve en ``periode`` minutes."""
return self.factMetal * periode
def productionMegot(self, periode):
"""Calcul la quantité de mégots total produit par la cuve en ``periode`` minutes."""
return self.factMegot * periode
def update(self):
"""Update de l'etat de la cuve apres 60 secondes."""
self.metal += self.factMetal
self.megot += self.factMegot
if self.megot >= 1.0:
self.nch += 1
self.megot -= 1
class RenderCuve(ecs.System):
"""Systeme pour le rendering des cuves."""
def __init__(self, canvas):
super().__init__()
self.canvas = canvas
def init(self):
pass
def reset(self):
pass
def update(self, dt):
with self.canvas:
for entity, cuve in self.entity_manager.pairs_for_type(Cuve):
box = self.entity_manager.component_for_entity(entity, simulation.graphe.Box)
# draw rectangle with texture
Color(box.color[0], box.color[1], box.color[2], box.alpha)
Rectangle(pos=box.pos, size=box.size)
Color(box.contour_color[0], box.contour_color[1], box.contour_color[2], box.alpha)
Line(rectangle=box.pos + box.size)
# texte
#PushMatrix()
#my_label=CoreLabel(text=str(cuve.nch), font_size=10) # affichage du nb d'anodes a changer
#my_label=CoreLabel(text=" {0:.0f} {1:.1f}".format(cuve.metal,cuve.megot), font_size=10) # affichage du metal et du nb d'anode
#my_label.refresh() # force label to draw itself
#x_texture=my_label.texture # use the label texture
#Translate(float(box.ptxt[0]-3), float(box.ptxt[1]), 0)
#Rotate(90, 0, 0, 1)
#Rectangle(size=x_texture.size, pos=(0,0), texture=x_texture)
#PopMatrix()
class RenderAllee(ecs.System):
"""Systeme pour le rendering des cuves."""
def __init__(self, canvas):
super().__init__()
self.canvas = canvas
def init(self):
pass
def reset(self):
pass
def update(self, dt):
with self.canvas:
for entity, allee in self.entity_manager.pairs_for_type(Allee):
box = self.entity_manager.component_for_entity(entity, simulation.graphe.Box)
# draw rectangle with texture
Color(box.color[0], box.color[1], box.color[2], box.alpha)
if allee.need_cab_ea:
Color(0,1,0, box.alpha)
if allee.need_ben_vide:
Color(0, 0, 1, box.alpha)
if allee.has_cab_megot:
Color(0.2, 1, 0.2, box.alpha)
if allee.has_ben_pleine:
Color(0.2, 0.2, 1, box.alpha)
Rectangle(pos=box.pos, size=box.size)
Color(0, 0, 0) # toujours un coutour noir
Line(rectangle=box.pos + box.size)
#PushMatrix()
#my_label = CoreLabel(text=str(allee.secteur.nom), font_size=12)
## my_label=CoreLabel(text=" {0:.0f} {1:.1f}".format(cuve.metal,cuve.megot), font_size=10)
#my_label.refresh() # force label to draw itself
#x_texture = my_label.texture # use the label texture
#Translate(float(box.ptxt[0]), float(box.ptxt[1])+5,0)
#Rotate(90, 0, 0, 1)
#Rectangle(size=x_texture.size, pos=(0, 0), texture=x_texture)
#PopMatrix()
```
#### File: cyme/electrolyse/gestionnaire_ea.py
```python
from collections import defaultdict, deque
from .. import ecs
from . import centre
from . import vehicule
class GestionnaireEa(ecs.System):
def __init__(self, bd):
super().__init__()
self.bd = bd
self.secteurs = []
self.vehicules = []
self.ordres = {}
def create_famille(self, groupe_nom):
groupe_secteur = []
for g in groupe_nom:
s1 = None
s2 = None
s3 = None
s4 = None
for entity, secteur in self.entity_manager.pairs_for_type(centre.Secteur):
if secteur.nom == g[0]:
s1 = secteur
if secteur.nom == g[1]:
s2 = secteur
if secteur.nom == g[2]:
s3 = secteur
if secteur.nom == g[3]:
s4 = secteur
groupe_secteur.append((s1, s2, s3, s4))
return groupe_secteur
def create_objectifs_famille(self, groupe_secteur, ordres):
objectifs_famille = defaultdict(list)
for i, g in enumerate(groupe_secteur):
for ordre in ordres:
objectifs = []
for o in ordre:
index = o[0]
debut = o[1]
fin = o[2]
if debut < fin:
n = g[index].noeuds_allees[debut:fin + 1]
objectifs.extend(n)
else:
n = g[index].noeuds_allees[fin:debut][::-1]
objectifs.extend(n)
objectifs_famille[i].append(deque(reversed(objectifs)))
print(len(objectifs_famille[i]))
return objectifs_famille
def init(self):
groupe_nom = [('T1', 'T2', 'T7', 'T8'), ('G1', 'G2', 'G7', 'G8'), ('B3', 'B4', 'B5', 'B6')]
groupe_secteur = self.create_famille(groupe_nom)
ordres = [[(2, 21, 35), (0, 19, 0), (0, 19, 35), (1, 0, 20)],
[(1, 21, 35), (3, 19, 0), (3, 19, 35), (2, 0, 20)]]
objectifs_famille = self.create_objectifs_famille(groupe_secteur, ordres)
for entity, v in self.entity_manager.pairs_for_type(vehicule.VehiculeEa):
v.gestionnaire = self
self.vehicules.append(v)
for i, v in enumerate(self.vehicules):
v.cibles_aller = objectifs_famille[i][0]
v.cibles_retour = objectifs_famille[i][1]
print(objectifs_famille[i][0])
for objectif in list(v.cibles_aller):
objectif.box.color = (1,1,0)
for objectif in list(v.cibles_retour):
objectif.box.color = (0,1,1)
def reset(self):
pass
def update(self, dt):
pass
```
#### File: cyme/flux/bt_machine.py
```python
from .. import bt
class preTache(bt.Task):
def __init__(self, machine, k=1):
super().__init__()
self.machine=machine
self.k=k # nb traite
def run(self):
#print("action: machinePreTache")
return bt.Task.SUCCES
class machineTache(bt.Task):
def __init__(self, machine, k=1):
super().__init__()
self.machine=machine
self.k=k # nb traite
def run(self):
#print("action: machineTache")
return bt.Task.SUCCES
class postTache(bt.Task):
def __init__(self, machine):
""":param machine: un composant avec un handle sur un ``Noeud``. """
super().__init__()
self.machine=machine
def run(self):
#print("action: postTache")
err = False
ok = True
# on revalide que le materiel est encore dispo en entree
for idx in range(len(self.machine.noeud.ina)):
accumulateur = self.machine.noeud.ina[idx].accumulateur
if not accumulateur.dispo()>=self.machine.qin[idx]: ok = False
if not ok: # zut... on a perdu du temps :(
err=True
else: # le materiel est encore present en entree et il y a de la place en sortie
for idx in range(len(self.machine.noeud.ina)):
accumulateur = self.machine.noeud.ina[idx].accumulateur
done = accumulateur.rm(self.machine.qin[idx])
if not done:
print("Erreur", type(self), "rm item(s) non-existant?")
err=True
for idx in range(len(self.machine.noeud.oua)):
accumulateur = self.machine.noeud.oua[idx].accumulateur
done = accumulateur.add(self.machine.qout[idx])
if not done:
print("Erreur", type(self), "echec add item(s)?")
err=True
if err:
self.machine.travail_perdu += self.machine.tcycle
return bt.Task.ECHEC
else:
self.machine.x+=self.machine.qin_total_par_cycle
self.machine.xout+=self.machine.qout_total_par_cycle
return bt.Task.SUCCES
class entreeTest_i(bt.Task):
""" True si au moins k items dispo sur le idx-ème accumulateur en entree. """
def __init__(self, machine, accumulateur, k=1):
""":param machine: un composant avec un handle sur un ``Noeud``
:param accumulateur: accumulateur a tester
:param k: nombre minimal d'éléments """
super().__init__()
self.machine=machine
self.accumulateur=accumulateur
self.k=k # nb items
def run(self):
if self.accumulateur.dispo()>=self.k:
return bt.Task.SUCCES
return bt.Task.ECHEC
class sortieTest_i(bt.Task):
""" True si au moins k places dispo sur le idx-ème accumulateur en sortie. """
def __init__(self, machine, accumulateur, k=1):
""":param machine: un composant avec un handle sur un ``Noeud``
:param accumulateur: accumulateur a tester
:param k: nombre minimal de places """
super().__init__()
self.machine=machine
self.accumulateur=accumulateur
self.k=k # nb places
def run(self):
if not self.accumulateur.full(self.k):
return bt.Task.SUCCES
return bt.Task.ECHEC
```
#### File: cyme/simulation/base.py
```python
import csv
import math
import datetime
from collections import OrderedDict, defaultdict, namedtuple
class Pheromone:
""" Une phéromone est en fait un spot pour avoir une phéromone active.
C'est une condition True a l'update de l'état du spot qui renforce ou crée
la phéromone en tant que telle.
Exemple d'utilisation:
.. code-block:: python
p = base.Pheromone(60)
for i in range(600):
p.update(i==50 or i==150 or i==180)
p.show()
"""
def __init__(self,vie):
"""Création de la phéromone non-active. C'est l'update avec une variable qui l'active
ou la tiens en vie. Après vie unités de temps sans reactivation, elle se déactive.
D'un point de vue, l'objet est plutôt un spot pour recevoir une phéromone.
:param vie: durée de vie en unité de temps selon update
"""
self.vie=vie # parametre de vie lors de l'activation
self._vieRestante = 0 # tracking de la sante, i.e. dissipation de la pheromone
self.dt = 0 # duree de vie totale (age) de la pheromone courante
# statistiques
self.n = 0 # stat du nb de phéromones distinctes
self.dtmoy = 0 # duree de vie moyenne d'une pheromone
self.dtmax = 0 # duree de vie max d'une pheromone
def _emettre(self):
""" Emettre une nouvelle phéromone ou reforcer une existante. """
if self._vieRestante == 0: self.n += 1 # on emet une nouvelle
elif self._vieRestante>0:
self.dt+=self.vie-self._vieRestante # on update l'age
self._vieRestante = self.vie # dans tous les cas, on fixe la vie restante a vie
def update(self,depot=False):
""" Update (dissipation) de l'état de la phéromone.
:param depot: si vrai on emet ou renforce une phéromone
"""
if self._vieRestante > 0:
self._vieRestante -= 1
if self._vieRestante == 0: # viens de mourrir...
self.dt+=self.vie
if self.dt>self.dtmax: self.dtmax=self.dt
self.dtmoy = ((self.n-1)*self.dtmoy+self.dt)/self.n
self.dt=0 # reset de la duree de vie totale
if depot: self._emettre()
def show(self):
""" Print les stats. """
print("n",self.n,"age moy",self.dtmoy,"age max",self.dtmax)
class Monitor:
""" Cette classe gère une liste de données à meme la classe. C'est-à-dire que
la liste est un attribut de classe et non d'objet. Typiquement, le but est d'offrir
un mécanisme simple et léger pour collecter une trace globale d'exécution qu'on
peut afficher ou consulter avec un chiffrier. On suppose que chaque donnée est
un tuple structurée comme une ligne csv. Toute les méthodes sont statiques.
Exemple d'utilisation pour une entête et lignes de données:
.. code-block:: python
base.Monitor.add( ("Heure","X","Y") ) # ligne d'entete
base.Monitor.add( ("20h15",3,13) ) # premiere ligne de data
base.Monitor.add( ("21h35",4,19) ) # seconde ligne de data
base.Monitor.show() # affichage a l'ecran
base.Monitor.dump() # dump dans datadex.csv avec ; comme separateur
"""
_datadex = [] # les donnees
@staticmethod
def add(data):
""" Ajoute une ligne de données dans la liste du monitor. """
Monitor._datadex.append(data)
@staticmethod
def show():
""" Affiche a l'écran toutes les lignes de données du monitor. """
for data in Monitor._datadex:
print(data)
@staticmethod
def dump():
""" Dump dans datadex.csv avec ; comme séparateur. """
with open('datadex.csv', 'a', newline='') as fp:
z = csv.writer(fp, delimiter=';')
z.writerows(Monitor._datadex)
class Debug:
""" Permet de print un message concernant un obj lorsque le
flag `is_debug` existe dans l'objet cible.
Exemple d'utilisation:
.. code-block:: python
base.Debug.set_debug_print_on(obj)
base.Debug.print(obj, "Le flag is_debug est dans l'objet obj")
"""
@staticmethod
def set_debug_print_on(obj):
"""Insert l'attribut `is_debug` dans l'objet cible.
:param obj: objet cible
"""
obj.is_debug = True
@staticmethod
def set_debug_print_off(obj):
"""Delete l'attribut `is_debug` dans l'objet cible.
:param obj: objet cible
"""
delattr(obj, 'is_debug')
@staticmethod
def is_debug_print(obj):
"""Détermine si l'attribut 'is_debug' existe dans l'objet cible.
:param obj: objet cible
"""
return 'is_debug' in dir(obj)
@staticmethod
def print(obj, *args):
"""Print un message contenu dans 'args'
:param obj: objet cible
:param args: message à print
"""
if Debug.is_debug_print(obj):
print(*args)
class Blackboard:
"""Simple blackboard (bb) to store shared information. On crée des entrées dans le
bb en y insérant des valeurs directement ou avec `set`. On peut retrouver et
modifier les valeur directement, mais il est préférable d'utiliser `set` et `get`
car ils s'assurent que l'attribut est bien présent dans le bb.
Exemple d'utilisation:
.. code-block:: python
bb=simulation.base.Blackboard()
bb.x=2
bb.set('y',3)
"""
def show_content(self):
"""Liste du contenu du bb excluant les items privés (i.e. contenant ``__``)."""
print([x for x in dir(self) if x.find('__') < 0])
def delete(self, attr):
"""Delete l'attribut `attr` du blackboard.
:param attr: nom de l'attribut à retirer (une string)
"""
if attr in dir(self): delattr(self, attr)
def get(self, attr):
"""Get la valeur de l'attribut `attr` en s'assurant qu'il
existe dans le blackboard.
:param attr: nom de l'attribut à retrouver
"""
return getattr(self, attr)
def set(self, attr, value):
"""Set la valeur de l'attribut `attr` à `value` en s'assurant qu'il
existe dans le blackboard.
:param attr: nom de l'attribut à retrouver (une string)
:param value: valeur à affecter à l'attribut
"""
setattr(self, attr, value)
def has(self, attr):
"""Test si l'object blackboard possède l'attribut `attr`.
:param attr: nom de l'attribut à rechercher (une string)
"""
return attr in dir(self)
class Publisher:
"""Déclenche un événement chez les subscribers lorsque un message est reçu. """
instance = None
@staticmethod
def get_instance():
"""Recupère l'instance unique du publisher, crée l'instance lorsqu'elle n'existe pas.
"""
if Publisher.instance is None:
Publisher.instance = Publisher()
return Publisher.instance
def __init__(self):
self.events = defaultdict(lambda: defaultdict())
def get_subscribers(self, event):
"""Retourne les subscribers inscrit à 'event'
:param event: nom de l'event (une string)
"""
return self.events[event]
def register(self, event, who, callback=None):
"""Enregistre 'who' à l'événement 'event' celui-ci appelera la fonction 'callback' lorsque déchlenché.
:param event: nom de l'événement (une string)
:param who: l'objet inscrit à l'événement
:param callback: fonction à déclencher, par défaut 'notify()'
"""
if callback is None:
callback = getattr(who, 'notify')
self.get_subscribers(event)[who] = callback
def unregister(self, event, who):
"""Retire 'who' de l'événement 'event'
:param event: nom de l'événement (une string)
:param who: l'objet inscrit à l'événement
"""
del self.get_subscribers(event)[who]
def dispatch(self, event, sender, message):
"""Déclenche l'évenement 'event' envoit un message (peut être un objet)
:param event: nom de l'événement (une string)
:param sender: objet déclencher de l'événement
:param message: message à envoyer (peut être un objet)
"""
for subscriber, callback in self.get_subscribers(event).items():
callback(subscriber, sender, message)
class TripleManager:
"""Provide database-like access to triple based on a key: (object,property)."""
def __init__(self,interpreteCF=False):
"""Creation de 2 dictionnaires utilisant une clé (object,property).
Un est pour les valeurs, l'autre pour des commentaires. On peut interpréter
un commentaire comme un facteur de confiance dans [-1,1]. Dans ce cas,
le dictionnaire de commentaire contient le facteur de confiance.
"""
self.interpreteCF = interpreteCF # active l'interprétation des comments comme CF
self._bdv = OrderedDict() # values for (object,property) pairs
self._bdc = OrderedDict() # comments or CF for (object,property) pairs
def add(self, anobject, aproperty, avalue, acomment=""):
"""Add a triple to the database: (anobject,aproperty,avalue) and acomment or CF.
Si la clef existe déjà, on remplace le contenu par le plus récent.
Il est recommandé d'utiliser getv pour tester si la clef existe lorsque c'est utile.
Par convention, on traite les étoiles comme des separateurs pour generer des tuples d'entiers.
Exemple: '23*-40' devient le couple (23,-40).
:param anobject: premier membre du couple de la clé
:param aproperty: second membre du couple de la clé
:param avalue: item a mettre dans le dictionnaire des valeurs
:param acomment: texte optionnel ou CF a mettre dans le dictionnaire des commentaires
Les types sont arbitraires, par exemple ``avalue`` peut être un objet, un tuple, etc.
"""
if isinstance(avalue, str) and avalue.find('*') >= 0:
try:
avalue = tuple(int(i) for i in avalue.split('*'))
except ValueError:
avalue = tuple(str(i) for i in avalue.split('*'))
self._bdv[(anobject, aproperty)] = avalue
if self.interpreteCF:
if not isinstance(acomment,float): acomment=1.0
self._bdc[(anobject, aproperty)] = acomment
def getv(self, pair):
"""Return la valeur associée à la clé
:param pair: couple (objet,propriete) qui est une clé de la bd des valeurs
:return: la valeur associée à la clé
:rtype: quelconque ou None si pas dans le dict
"""
try:
return self._bdv[pair]
except KeyError:
return None
def getc(self, pair):
"""Return le commentaire associé à la clé
:param pair: couple (objet,propriete) qui est une clé de la bd des commentaires
:return: le commentaire associé à la clé
:rtype: une string ou None si pas dans le dict
"""
try:
return self._bdc[pair]
except KeyError:
return None
def to_blackboard(self):
def num(s):
try:
return int(s)
except ValueError:
try:
return float(s)
except ValueError:
return s
except TypeError:
return s
root_bb = Blackboard()
for k in self._bdv:
key0 = k[0].replace('-', '_')
key1 = k[1].replace('-', '_')
if root_bb.has(key0):
bb = root_bb.get(key0)
bb.set(key1, num(self.getv(k)))
root_bb.set(key0, bb)
else:
bb = Blackboard()
bb.set(key1, num(self.getv(k)))
root_bb.set(key0, bb)
return root_bb
def load(self, nom, bypass_title_line_one=True):
"""Load la BD des valeurs et des commentaires via le fichier csv ``nom``."""
with open(nom, newline='') as csvfile:
dat = csv.reader(csvfile, delimiter=';', quotechar='|')
if bypass_title_line_one:
dat.__next__() # saute la premiere ligne (les titres)
for row in dat:
if len(row) > 2:
acomment=row[3]
if self.interpreteCF:
try:
acomment=float(row[3])
except:
acomment=1.0
self.add(row[0], row[1], row[2], acomment)
def dump(self):
"""Dump la BD des valeurs et des commentaires dans le fichier csv ``dump.csv``.
On y trouve les entrées initiales et celles ajoutées. L'ordre est arbitraire.
Rien ne garantie que load(dump()) reconstruit correctement. En fait, les tuples
ne sont pas reconvertis avec des * et les nombres sont mis en string. """
with open('dump.csv', 'w', newline='') as fp:
a = csv.writer(fp, delimiter=';')
data = []
for k in self._bdv:
li = [k[0], k[1]]
li.append(self.getv(k))
li.append(self.getc(k))
data.append(li)
a.writerows(data)
def show(self):
"""Dump la BD des valeurs et des commentaires a la sortie standard."""
for k in self._bdv:
li = [k[0], k[1]]
li.append(self.getv(k))
li.append(self.getc(k))
print(li)
class Moment:
"""Gestionnaire de temps en système 24 heures."""
instance = None
def __init__(self, t0):
"""Cree une instance du gestionnaire de temps. A priori, il doit être unique.
:param int t0: seconde de référence d'une journée d'opération, exemple: 7*60*60 pour 7h.
"""
self.dt = 1 # pas de temps en seconde
self.t0 = t0 # heure de reference du depart en secondes
self.t = 0 # nombre de seconde brut (principale variable d'etat)
self.tnow = t0 # t now (somme de t0 et t) module 24h
self.trel = 0 # t module 12h (temps relatif au quart en cours)
self.ticks_reset() # reset des ticks
self.nbHeureQuart = 12
self.set_instance()
def set_instance(self):
if Moment.instance is None:
Moment.instance = self
@staticmethod
def get_instance():
if Moment.instance is not None:
return Moment.instance
else:
Moment.instance = Moment(0)
return Moment.instance
def update(self):
"""Avance de dt dans le temps et update des variables associees."""
self.t += self.dt
self.tnow = (self.t0 + self.t) % 86400
self.trel = self.t % 43200
self.ticks_reset()
self.ticks_set()
def ticks_reset(self):
"""Reset des ticks."""
self.tickQ = False # quart
self.tickM = False # minute
self.tickH = False # heure
self.tickJ = False # jour
def ticks_set(self):
"""Activation des ticks. Un tick est True durant une seconde selon la fréquence
d'activation. Les principaux tick disponibles sont:
* tickQ: activation à chaque quart (de 12 heures)
* tickM: activation à chaque minute
* tickH: activation à chaque heure
* tickJ: activation à chaque jour
"""
self.tickQ = self.t % 43200 == 0
self.tickM = self.t % 60 == 0
self.tickH = self.t % 3600 == 0
self.tickJ = self.t % 86400 == 0
def nbj(self):
""" Nb de jours depuis le debut de la simulation. """
return self.t//86400
def getJHMS(self):
"""Transforme le temps interne en jour h:m:s tenant compte du t0 de référence.
:return: une string en format jour:heure:min:sec
"""
x = self.t0 + self.t
q = self.t // 43200
h = x // 3600
j = h // 24
h = h % 24
x = x - j * 86400
h = x // 3600 # heure du jour
s = x % 3600 # secondes restantes
m = s // 60
s = s % 60
#d = 'D' if 7 <= h < 19 else 'N'
return '{0:02d}j{1:02d}h{2:02d}m{3:02d}s ({4})'.format(j, h, m, s, q+1)
@staticmethod
def seconds_to_hhmmss(seconds):
h = seconds // 3600
s = seconds % 3600
m = s // 60
s = s % 60
return '{0:02d}h{1:02d}m{2:02d}s'.format(h, m, s)
def get_t_as_hhmmss(self):
return Moment.seconds_to_hhmmss(self.t)
class MomentRT:
""" Gestionnaire de temps real-time. """
def __init__(self, _dt):
"""Cree une instance du gestionnaire de temps RT.
:param int _dt: nb de secondes RT pour trigger elapsed.
"""
self.dt = datetime.timedelta(0,_dt) # delta de temps RT en seconde
# temps de reference du trigger precedent (init a now-180jours)
self.stamp = datetime.datetime.now()-datetime.timedelta(180)
def elapsed(self):
"""True si dt secondes RT ce sont passées depuis le dernier appel."""
e=datetime.datetime.now()-self.stamp
if e.total_seconds()>=30:
self.stamp = datetime.datetime.now()
return True
else:
return False
class Vecteur2D:
@staticmethod
def proj(a, b):
d = Vecteur2D.dot(a, b)
m = Vecteur2D.mag(b)
t = d / m
return t, (t * b[0], t * b[1])
@staticmethod
def comp(a, b):
d = Vecteur2D.dot(a, b)
m = Vecteur2D.mag(a)
return d / m
@staticmethod
def magsqrt(a):
return math.sqrt(a[0] * a[0] + a[1] * a[1])
@staticmethod
def mag(a):
return a[0] * a[0] + a[1] * a[1]
@staticmethod
def sub(a, b):
return a[0] - b[0], a[1] - b[1]
@staticmethod
def add(a, b):
return a[0] + b[0], a[1] + b[1]
@staticmethod
def dist(a, b):
d = Vecteur2D.sub(a, b)
return d[0] * d[0] + d[1] * d[1]
@staticmethod
def distsqrt(a, b):
return math.sqrt(Vecteur2D.dist(a, b))
@staticmethod
def dot(a, b):
return a[0] * b[0] + a[1] * b[1]
class Math:
@staticmethod
def safe_scalar_div(x, y):
if y == 0:
return 0
return x / y
@staticmethod
def clamp(n, smallest, largest):
return max(smallest, min(n, largest))
class RGB(namedtuple('RGB', 'r, g, b')):
@staticmethod
def rgb_to_float(r, g, b):
return r/255.0, g/255.0, b/255.0
def to_float(self):
return self.r/255.0, self.g/255.0, self.b/255.0
class Palette():
# Color Contants
ALICEBLUE = RGB(240, 248, 255)
ANTIQUEWHITE = RGB(250, 235, 215)
ANTIQUEWHITE1 = RGB(255, 239, 219)
ANTIQUEWHITE2 = RGB(238, 223, 204)
ANTIQUEWHITE3 = RGB(205, 192, 176)
ANTIQUEWHITE4 = RGB(139, 131, 120)
AQUA = RGB(0, 255, 255)
AQUAMARINE1 = RGB(127, 255, 212)
AQUAMARINE2 = RGB(118, 238, 198)
AQUAMARINE3 = RGB(102, 205, 170)
AQUAMARINE4 = RGB(69, 139, 116)
AZURE1 = RGB(240, 255, 255)
AZURE2 = RGB(224, 238, 238)
AZURE3 = RGB(193, 205, 205)
AZURE4 = RGB(131, 139, 139)
BANANA = RGB(227, 207, 87)
BEIGE = RGB(245, 245, 220)
BISQUE1 = RGB(255, 228, 196)
BISQUE2 = RGB(238, 213, 183)
BISQUE3 = RGB(205, 183, 158)
BISQUE4 = RGB(139, 125, 107)
BLACK = RGB(0, 0, 0)
BLANCHEDALMOND = RGB(255, 235, 205)
BLUE = RGB(0, 0, 255)
BLUE2 = RGB(0, 0, 238)
BLUE3 = RGB(0, 0, 205)
BLUE4 = RGB(0, 0, 139)
BLUEVIOLET = RGB(138, 43, 226)
BRICK = RGB(156, 102, 31)
BROWN = RGB(165, 42, 42)
BROWN1 = RGB(255, 64, 64)
BROWN2 = RGB(238, 59, 59)
BROWN3 = RGB(205, 51, 51)
BROWN4 = RGB(139, 35, 35)
BURLYWOOD = RGB(222, 184, 135)
BURLYWOOD1 = RGB(255, 211, 155)
BURLYWOOD2 = RGB(238, 197, 145)
BURLYWOOD3 = RGB(205, 170, 125)
BURLYWOOD4 = RGB(139, 115, 85)
BURNTSIENNA = RGB(138, 54, 15)
BURNTUMBER = RGB(138, 51, 36)
CADETBLUE = RGB(95, 158, 160)
CADETBLUE1 = RGB(152, 245, 255)
CADETBLUE2 = RGB(142, 229, 238)
CADETBLUE3 = RGB(122, 197, 205)
CADETBLUE4 = RGB(83, 134, 139)
CADMIUMORANGE = RGB(255, 97, 3)
CADMIUMYELLOW = RGB(255, 153, 18)
CARROT = RGB(237, 145, 33)
CHARTREUSE1 = RGB(127, 255, 0)
CHARTREUSE2 = RGB(118, 238, 0)
CHARTREUSE3 = RGB(102, 205, 0)
CHARTREUSE4 = RGB(69, 139, 0)
CHOCOLATE = RGB(210, 105, 30)
CHOCOLATE1 = RGB(255, 127, 36)
CHOCOLATE2 = RGB(238, 118, 33)
CHOCOLATE3 = RGB(205, 102, 29)
CHOCOLATE4 = RGB(139, 69, 19)
COBALT = RGB(61, 89, 171)
COBALTGREEN = RGB(61, 145, 64)
COLDGREY = RGB(128, 138, 135)
CORAL = RGB(255, 127, 80)
CORAL1 = RGB(255, 114, 86)
CORAL2 = RGB(238, 106, 80)
CORAL3 = RGB(205, 91, 69)
CORAL4 = RGB(139, 62, 47)
CORNFLOWERBLUE = RGB(100, 149, 237)
CORNSILK1 = RGB(255, 248, 220)
CORNSILK2 = RGB(238, 232, 205)
CORNSILK3 = RGB(205, 200, 177)
CORNSILK4 = RGB(139, 136, 120)
CRIMSON = RGB(220, 20, 60)
CYAN2 = RGB(0, 238, 238)
CYAN3 = RGB(0, 205, 205)
CYAN4 = RGB(0, 139, 139)
DARKGOLDENROD = RGB(184, 134, 11)
DARKGOLDENROD1 = RGB(255, 185, 15)
DARKGOLDENROD2 = RGB(238, 173, 14)
DARKGOLDENROD3 = RGB(205, 149, 12)
DARKGOLDENROD4 = RGB(139, 101, 8)
DARKGRAY = RGB(169, 169, 169)
DARKGREEN = RGB(0, 100, 0)
DARKKHAKI = RGB(189, 183, 107)
DARKOLIVEGREEN = RGB(85, 107, 47)
DARKOLIVEGREEN1 = RGB(202, 255, 112)
DARKOLIVEGREEN2 = RGB(188, 238, 104)
DARKOLIVEGREEN3 = RGB(162, 205, 90)
DARKOLIVEGREEN4 = RGB(110, 139, 61)
DARKORANGE = RGB(255, 140, 0)
DARKORANGE1 = RGB(255, 127, 0)
DARKORANGE2 = RGB(238, 118, 0)
DARKORANGE3 = RGB(205, 102, 0)
DARKORANGE4 = RGB(139, 69, 0)
DARKORCHID = RGB(153, 50, 204)
DARKORCHID1 = RGB(191, 62, 255)
DARKORCHID2 = RGB(178, 58, 238)
DARKORCHID3 = RGB(154, 50, 205)
DARKORCHID4 = RGB(104, 34, 139)
DARKSALMON = RGB(233, 150, 122)
DARKSEAGREEN = RGB(143, 188, 143)
DARKSEAGREEN1 = RGB(193, 255, 193)
DARKSEAGREEN2 = RGB(180, 238, 180)
DARKSEAGREEN3 = RGB(155, 205, 155)
DARKSEAGREEN4 = RGB(105, 139, 105)
DARKSLATEBLUE = RGB(72, 61, 139)
DARKSLATEGRAY = RGB(47, 79, 79)
DARKSLATEGRAY1 = RGB(151, 255, 255)
DARKSLATEGRAY2 = RGB(141, 238, 238)
DARKSLATEGRAY3 = RGB(121, 205, 205)
DARKSLATEGRAY4 = RGB(82, 139, 139)
DARKTURQUOISE = RGB(0, 206, 209)
DARKVIOLET = RGB(148, 0, 211)
DEEPPINK1 = RGB(255, 20, 147)
DEEPPINK2 = RGB(238, 18, 137)
DEEPPINK3 = RGB(205, 16, 118)
DEEPPINK4 = RGB(139, 10, 80)
DEEPSKYBLUE1 = RGB(0, 191, 255)
DEEPSKYBLUE2 = RGB(0, 178, 238)
DEEPSKYBLUE3 = RGB(0, 154, 205)
DEEPSKYBLUE4 = RGB(0, 104, 139)
DIMGRAY = RGB(105, 105, 105)
DIMGRAY = RGB(105, 105, 105)
DODGERBLUE1 = RGB(30, 144, 255)
DODGERBLUE2 = RGB(28, 134, 238)
DODGERBLUE3 = RGB(24, 116, 205)
DODGERBLUE4 = RGB(16, 78, 139)
EGGSHELL = RGB(252, 230, 201)
EMERALDGREEN = RGB(0, 201, 87)
FIREBRICK = RGB(178, 34, 34)
FIREBRICK1 = RGB(255, 48, 48)
FIREBRICK2 = RGB(238, 44, 44)
FIREBRICK3 = RGB(205, 38, 38)
FIREBRICK4 = RGB(139, 26, 26)
FLESH = RGB(255, 125, 64)
FLORALWHITE = RGB(255, 250, 240)
FORESTGREEN = RGB(34, 139, 34)
GAINSBORO = RGB(220, 220, 220)
GHOSTWHITE = RGB(248, 248, 255)
GOLD1 = RGB(255, 215, 0)
GOLD2 = RGB(238, 201, 0)
GOLD3 = RGB(205, 173, 0)
GOLD4 = RGB(139, 117, 0)
GOLDENROD = RGB(218, 165, 32)
GOLDENROD1 = RGB(255, 193, 37)
GOLDENROD2 = RGB(238, 180, 34)
GOLDENROD3 = RGB(205, 155, 29)
GOLDENROD4 = RGB(139, 105, 20)
GRAY = RGB(128, 128, 128)
GRAY1 = RGB(3, 3, 3)
GRAY10 = RGB(26, 26, 26)
GRAY11 = RGB(28, 28, 28)
GRAY12 = RGB(31, 31, 31)
GRAY13 = RGB(33, 33, 33)
GRAY14 = RGB(36, 36, 36)
GRAY15 = RGB(38, 38, 38)
GRAY16 = RGB(41, 41, 41)
GRAY17 = RGB(43, 43, 43)
GRAY18 = RGB(46, 46, 46)
GRAY19 = RGB(48, 48, 48)
GRAY2 = RGB(5, 5, 5)
GRAY20 = RGB(51, 51, 51)
GRAY21 = RGB(54, 54, 54)
GRAY22 = RGB(56, 56, 56)
GRAY23 = RGB(59, 59, 59)
GRAY24 = RGB(61, 61, 61)
GRAY25 = RGB(64, 64, 64)
GRAY26 = RGB(66, 66, 66)
GRAY27 = RGB(69, 69, 69)
GRAY28 = RGB(71, 71, 71)
GRAY29 = RGB(74, 74, 74)
GRAY3 = RGB(8, 8, 8)
GRAY30 = RGB(77, 77, 77)
GRAY31 = RGB(79, 79, 79)
GRAY32 = RGB(82, 82, 82)
GRAY33 = RGB(84, 84, 84)
GRAY34 = RGB(87, 87, 87)
GRAY35 = RGB(89, 89, 89)
GRAY36 = RGB(92, 92, 92)
GRAY37 = RGB(94, 94, 94)
GRAY38 = RGB(97, 97, 97)
GRAY39 = RGB(99, 99, 99)
GRAY4 = RGB(10, 10, 10)
GRAY40 = RGB(102, 102, 102)
GRAY42 = RGB(107, 107, 107)
GRAY43 = RGB(110, 110, 110)
GRAY44 = RGB(112, 112, 112)
GRAY45 = RGB(115, 115, 115)
GRAY46 = RGB(117, 117, 117)
GRAY47 = RGB(120, 120, 120)
GRAY48 = RGB(122, 122, 122)
GRAY49 = RGB(125, 125, 125)
GRAY5 = RGB(13, 13, 13)
GRAY50 = RGB(127, 127, 127)
GRAY51 = RGB(130, 130, 130)
GRAY52 = RGB(133, 133, 133)
GRAY53 = RGB(135, 135, 135)
GRAY54 = RGB(138, 138, 138)
GRAY55 = RGB(140, 140, 140)
GRAY56 = RGB(143, 143, 143)
GRAY57 = RGB(145, 145, 145)
GRAY58 = RGB(148, 148, 148)
GRAY59 = RGB(150, 150, 150)
GRAY6 = RGB(15, 15, 15)
GRAY60 = RGB(153, 153, 153)
GRAY61 = RGB(156, 156, 156)
GRAY62 = RGB(158, 158, 158)
GRAY63 = RGB(161, 161, 161)
GRAY64 = RGB(163, 163, 163)
GRAY65 = RGB(166, 166, 166)
GRAY66 = RGB(168, 168, 168)
GRAY67 = RGB(171, 171, 171)
GRAY68 = RGB(173, 173, 173)
GRAY69 = RGB(176, 176, 176)
GRAY7 = RGB(18, 18, 18)
GRAY70 = RGB(179, 179, 179)
GRAY71 = RGB(181, 181, 181)
GRAY72 = RGB(184, 184, 184)
GRAY73 = RGB(186, 186, 186)
GRAY74 = RGB(189, 189, 189)
GRAY75 = RGB(191, 191, 191)
GRAY76 = RGB(194, 194, 194)
GRAY77 = RGB(196, 196, 196)
GRAY78 = RGB(199, 199, 199)
GRAY79 = RGB(201, 201, 201)
GRAY8 = RGB(20, 20, 20)
GRAY80 = RGB(204, 204, 204)
GRAY81 = RGB(207, 207, 207)
GRAY82 = RGB(209, 209, 209)
GRAY83 = RGB(212, 212, 212)
GRAY84 = RGB(214, 214, 214)
GRAY85 = RGB(217, 217, 217)
GRAY86 = RGB(219, 219, 219)
GRAY87 = RGB(222, 222, 222)
GRAY88 = RGB(224, 224, 224)
GRAY89 = RGB(227, 227, 227)
GRAY9 = RGB(23, 23, 23)
GRAY90 = RGB(229, 229, 229)
GRAY91 = RGB(232, 232, 232)
GRAY92 = RGB(235, 235, 235)
GRAY93 = RGB(237, 237, 237)
GRAY94 = RGB(240, 240, 240)
GRAY95 = RGB(242, 242, 242)
GRAY97 = RGB(247, 247, 247)
GRAY98 = RGB(250, 250, 250)
GRAY99 = RGB(252, 252, 252)
GREEN = RGB(0, 128, 0)
GREEN1 = RGB(0, 255, 0)
GREEN2 = RGB(0, 238, 0)
GREEN3 = RGB(0, 205, 0)
GREEN4 = RGB(0, 139, 0)
GREENYELLOW = RGB(173, 255, 47)
HONEYDEW1 = RGB(240, 255, 240)
HONEYDEW2 = RGB(224, 238, 224)
HONEYDEW3 = RGB(193, 205, 193)
HONEYDEW4 = RGB(131, 139, 131)
HOTPINK = RGB(255, 105, 180)
HOTPINK1 = RGB(255, 110, 180)
HOTPINK2 = RGB(238, 106, 167)
HOTPINK3 = RGB(205, 96, 144)
HOTPINK4 = RGB(139, 58, 98)
INDIANRED = RGB(176, 23, 31)
INDIANRED = RGB(205, 92, 92)
INDIANRED1 = RGB(255, 106, 106)
INDIANRED2 = RGB(238, 99, 99)
INDIANRED3 = RGB(205, 85, 85)
INDIANRED4 = RGB(139, 58, 58)
INDIGO = RGB(75, 0, 130)
IVORY1 = RGB(255, 255, 240)
IVORY2 = RGB(238, 238, 224)
IVORY3 = RGB(205, 205, 193)
IVORY4 = RGB(139, 139, 131)
IVORYBLACK = RGB(41, 36, 33)
KHAKI = RGB(240, 230, 140)
KHAKI1 = RGB(255, 246, 143)
KHAKI2 = RGB(238, 230, 133)
KHAKI3 = RGB(205, 198, 115)
KHAKI4 = RGB(139, 134, 78)
LAVENDER = RGB(230, 230, 250)
LAVENDERBLUSH1 = RGB(255, 240, 245)
LAVENDERBLUSH2 = RGB(238, 224, 229)
LAVENDERBLUSH3 = RGB(205, 193, 197)
LAVENDERBLUSH4 = RGB(139, 131, 134)
LAWNGREEN = RGB(124, 252, 0)
LEMONCHIFFON1 = RGB(255, 250, 205)
LEMONCHIFFON2 = RGB(238, 233, 191)
LEMONCHIFFON3 = RGB(205, 201, 165)
LEMONCHIFFON4 = RGB(139, 137, 112)
LIGHTBLUE = RGB(173, 216, 230)
LIGHTBLUE1 = RGB(191, 239, 255)
LIGHTBLUE2 = RGB(178, 223, 238)
LIGHTBLUE3 = RGB(154, 192, 205)
LIGHTBLUE4 = RGB(104, 131, 139)
LIGHTCORAL = RGB(240, 128, 128)
LIGHTCYAN1 = RGB(224, 255, 255)
LIGHTCYAN2 = RGB(209, 238, 238)
LIGHTCYAN3 = RGB(180, 205, 205)
LIGHTCYAN4 = RGB(122, 139, 139)
LIGHTGOLDENROD1 = RGB(255, 236, 139)
LIGHTGOLDENROD2 = RGB(238, 220, 130)
LIGHTGOLDENROD3 = RGB(205, 190, 112)
LIGHTGOLDENROD4 = RGB(139, 129, 76)
LIGHTGOLDENRODYELLOW = RGB(250, 250, 210)
LIGHTGREY = RGB(211, 211, 211)
LIGHTPINK = RGB(255, 182, 193)
LIGHTPINK1 = RGB(255, 174, 185)
LIGHTPINK2 = RGB(238, 162, 173)
LIGHTPINK3 = RGB(205, 140, 149)
LIGHTPINK4 = RGB(139, 95, 101)
LIGHTSALMON1 = RGB(255, 160, 122)
LIGHTSALMON2 = RGB(238, 149, 114)
LIGHTSALMON3 = RGB(205, 129, 98)
LIGHTSALMON4 = RGB(139, 87, 66)
LIGHTSEAGREEN = RGB(32, 178, 170)
LIGHTSKYBLUE = RGB(135, 206, 250)
LIGHTSKYBLUE1 = RGB(176, 226, 255)
LIGHTSKYBLUE2 = RGB(164, 211, 238)
LIGHTSKYBLUE3 = RGB(141, 182, 205)
LIGHTSKYBLUE4 = RGB(96, 123, 139)
LIGHTSLATEBLUE = RGB(132, 112, 255)
LIGHTSLATEGRAY = RGB(119, 136, 153)
LIGHTSTEELBLUE = RGB(176, 196, 222)
LIGHTSTEELBLUE1 = RGB(202, 225, 255)
LIGHTSTEELBLUE2 = RGB(188, 210, 238)
LIGHTSTEELBLUE3 = RGB(162, 181, 205)
LIGHTSTEELBLUE4 = RGB(110, 123, 139)
LIGHTYELLOW1 = RGB(255, 255, 224)
LIGHTYELLOW2 = RGB(238, 238, 209)
LIGHTYELLOW3 = RGB(205, 205, 180)
LIGHTYELLOW4 = RGB(139, 139, 122)
LIMEGREEN = RGB(50, 205, 50)
LINEN = RGB(250, 240, 230)
MAGENTA = RGB(255, 0, 255)
MAGENTA2 = RGB(238, 0, 238)
MAGENTA3 = RGB(205, 0, 205)
MAGENTA4 = RGB(139, 0, 139)
MANGANESEBLUE = RGB(3, 168, 158)
MAROON = RGB(128, 0, 0)
MAROON1 = RGB(255, 52, 179)
MAROON2 = RGB(238, 48, 167)
MAROON3 = RGB(205, 41, 144)
MAROON4 = RGB(139, 28, 98)
MEDIUMORCHID = RGB(186, 85, 211)
MEDIUMORCHID1 = RGB(224, 102, 255)
MEDIUMORCHID2 = RGB(209, 95, 238)
MEDIUMORCHID3 = RGB(180, 82, 205)
MEDIUMORCHID4 = RGB(122, 55, 139)
MEDIUMPURPLE = RGB(147, 112, 219)
MEDIUMPURPLE1 = RGB(171, 130, 255)
MEDIUMPURPLE2 = RGB(159, 121, 238)
MEDIUMPURPLE3 = RGB(137, 104, 205)
MEDIUMPURPLE4 = RGB(93, 71, 139)
MEDIUMSEAGREEN = RGB(60, 179, 113)
MEDIUMSLATEBLUE = RGB(123, 104, 238)
MEDIUMSPRINGGREEN = RGB(0, 250, 154)
MEDIUMTURQUOISE = RGB(72, 209, 204)
MEDIUMVIOLETRED = RGB(199, 21, 133)
MELON = RGB(227, 168, 105)
MIDNIGHTBLUE = RGB(25, 25, 112)
MINT = RGB(189, 252, 201)
MINTCREAM = RGB(245, 255, 250)
MISTYROSE1 = RGB(255, 228, 225)
MISTYROSE2 = RGB(238, 213, 210)
MISTYROSE3 = RGB(205, 183, 181)
MISTYROSE4 = RGB(139, 125, 123)
MOCCASIN = RGB(255, 228, 181)
NAVAJOWHITE1 = RGB(255, 222, 173)
NAVAJOWHITE2 = RGB(238, 207, 161)
NAVAJOWHITE3 = RGB(205, 179, 139)
NAVAJOWHITE4 = RGB(139, 121, 94)
NAVY = RGB(0, 0, 128)
OLDLACE = RGB(253, 245, 230)
OLIVE = RGB(128, 128, 0)
OLIVEDRAB = RGB(107, 142, 35)
OLIVEDRAB1 = RGB(192, 255, 62)
OLIVEDRAB2 = RGB(179, 238, 58)
OLIVEDRAB3 = RGB(154, 205, 50)
OLIVEDRAB4 = RGB(105, 139, 34)
ORANGE = RGB(255, 128, 0)
ORANGE1 = RGB(255, 165, 0)
ORANGE2 = RGB(238, 154, 0)
ORANGE3 = RGB(205, 133, 0)
ORANGE4 = RGB(139, 90, 0)
ORANGERED1 = RGB(255, 69, 0)
ORANGERED2 = RGB(238, 64, 0)
ORANGERED3 = RGB(205, 55, 0)
ORANGERED4 = RGB(139, 37, 0)
ORCHID = RGB(218, 112, 214)
ORCHID1 = RGB(255, 131, 250)
ORCHID2 = RGB(238, 122, 233)
ORCHID3 = RGB(205, 105, 201)
ORCHID4 = RGB(139, 71, 137)
PALEGOLDENROD = RGB(238, 232, 170)
PALEGREEN = RGB(152, 251, 152)
PALEGREEN1 = RGB(154, 255, 154)
PALEGREEN2 = RGB(144, 238, 144)
PALEGREEN3 = RGB(124, 205, 124)
PALEGREEN4 = RGB(84, 139, 84)
PALETURQUOISE1 = RGB(187, 255, 255)
PALETURQUOISE2 = RGB(174, 238, 238)
PALETURQUOISE3 = RGB(150, 205, 205)
PALETURQUOISE4 = RGB(102, 139, 139)
PALEVIOLETRED = RGB(219, 112, 147)
PALEVIOLETRED1 = RGB(255, 130, 171)
PALEVIOLETRED2 = RGB(238, 121, 159)
PALEVIOLETRED3 = RGB(205, 104, 137)
PALEVIOLETRED4 = RGB(139, 71, 93)
PAPAYAWHIP = RGB(255, 239, 213)
PEACHPUFF1 = RGB(255, 218, 185)
PEACHPUFF2 = RGB(238, 203, 173)
PEACHPUFF3 = RGB(205, 175, 149)
PEACHPUFF4 = RGB(139, 119, 101)
PEACOCK = RGB(51, 161, 201)
PINK = RGB(255, 192, 203)
PINK1 = RGB(255, 181, 197)
PINK2 = RGB(238, 169, 184)
PINK3 = RGB(205, 145, 158)
PINK4 = RGB(139, 99, 108)
PLUM = RGB(221, 160, 221)
PLUM1 = RGB(255, 187, 255)
PLUM2 = RGB(238, 174, 238)
PLUM3 = RGB(205, 150, 205)
PLUM4 = RGB(139, 102, 139)
POWDERBLUE = RGB(176, 224, 230)
PURPLE = RGB(128, 0, 128)
PURPLE1 = RGB(155, 48, 255)
PURPLE2 = RGB(145, 44, 238)
PURPLE3 = RGB(125, 38, 205)
PURPLE4 = RGB(85, 26, 139)
RASPBERRY = RGB(135, 38, 87)
RAWSIENNA = RGB(199, 97, 20)
RED1 = RGB(255, 0, 0)
RED2 = RGB(238, 0, 0)
RED3 = RGB(205, 0, 0)
RED4 = RGB(139, 0, 0)
ROSYBROWN = RGB(188, 143, 143)
ROSYBROWN1 = RGB(255, 193, 193)
ROSYBROWN2 = RGB(238, 180, 180)
ROSYBROWN3 = RGB(205, 155, 155)
ROSYBROWN4 = RGB(139, 105, 105)
ROYALBLUE = RGB(65, 105, 225)
ROYALBLUE1 = RGB(72, 118, 255)
ROYALBLUE2 = RGB(67, 110, 238)
ROYALBLUE3 = RGB(58, 95, 205)
ROYALBLUE4 = RGB(39, 64, 139)
SALMON = RGB(250, 128, 114)
SALMON1 = RGB(255, 140, 105)
SALMON2 = RGB(238, 130, 98)
SALMON3 = RGB(205, 112, 84)
SALMON4 = RGB(139, 76, 57)
SANDYBROWN = RGB(244, 164, 96)
SAPGREEN = RGB(48, 128, 20)
SEAGREEN1 = RGB(84, 255, 159)
SEAGREEN2 = RGB(78, 238, 148)
SEAGREEN3 = RGB(67, 205, 128)
SEAGREEN4 = RGB(46, 139, 87)
SEASHELL1 = RGB(255, 245, 238)
SEASHELL2 = RGB(238, 229, 222)
SEASHELL3 = RGB(205, 197, 191)
SEASHELL4 = RGB(139, 134, 130)
SEPIA = RGB(94, 38, 18)
SGIBEET = RGB(142, 56, 142)
SGIBRIGHTGRAY = RGB(197, 193, 170)
SGICHARTREUSE = RGB(113, 198, 113)
SGIDARKGRAY = RGB(85, 85, 85)
SGIGRAY12 = RGB(30, 30, 30)
SGIGRAY16 = RGB(40, 40, 40)
SGIGRAY32 = RGB(81, 81, 81)
SGIGRAY36 = RGB(91, 91, 91)
SGIGRAY52 = RGB(132, 132, 132)
SGIGRAY56 = RGB(142, 142, 142)
SGIGRAY72 = RGB(183, 183, 183)
SGIGRAY76 = RGB(193, 193, 193)
SGIGRAY92 = RGB(234, 234, 234)
SGIGRAY96 = RGB(244, 244, 244)
SGILIGHTBLUE = RGB(125, 158, 192)
SGILIGHTGRAY = RGB(170, 170, 170)
SGIOLIVEDRAB = RGB(142, 142, 56)
SGISALMON = RGB(198, 113, 113)
SGISLATEBLUE = RGB(113, 113, 198)
SGITEAL = RGB(56, 142, 142)
SIENNA = RGB(160, 82, 45)
SIENNA1 = RGB(255, 130, 71)
SIENNA2 = RGB(238, 121, 66)
SIENNA3 = RGB(205, 104, 57)
SIENNA4 = RGB(139, 71, 38)
SILVER = RGB(192, 192, 192)
SKYBLUE = RGB(135, 206, 235)
SKYBLUE1 = RGB(135, 206, 255)
SKYBLUE2 = RGB(126, 192, 238)
SKYBLUE3 = RGB(108, 166, 205)
SKYBLUE4 = RGB(74, 112, 139)
SLATEBLUE = RGB(106, 90, 205)
SLATEBLUE1 = RGB(131, 111, 255)
SLATEBLUE2 = RGB(122, 103, 238)
SLATEBLUE3 = RGB(105, 89, 205)
SLATEBLUE4 = RGB(71, 60, 139)
SLATEGRAY = RGB(112, 128, 144)
SLATEGRAY1 = RGB(198, 226, 255)
SLATEGRAY2 = RGB(185, 211, 238)
SLATEGRAY3 = RGB(159, 182, 205)
SLATEGRAY4 = RGB(108, 123, 139)
SNOW1 = RGB(255, 250, 250)
SNOW2 = RGB(238, 233, 233)
SNOW3 = RGB(205, 201, 201)
SNOW4 = RGB(139, 137, 137)
SPRINGGREEN = RGB(0, 255, 127)
SPRINGGREEN1 = RGB(0, 238, 118)
SPRINGGREEN2 = RGB(0, 205, 102)
SPRINGGREEN3 = RGB(0, 139, 69)
STEELBLUE = RGB(70, 130, 180)
STEELBLUE1 = RGB(99, 184, 255)
STEELBLUE2 = RGB(92, 172, 238)
STEELBLUE3 = RGB(79, 148, 205)
STEELBLUE4 = RGB(54, 100, 139)
TAN = RGB(210, 180, 140)
TAN1 = RGB(255, 165, 79)
TAN2 = RGB(238, 154, 73)
TAN3 = RGB(205, 133, 63)
TAN4 = RGB(139, 90, 43)
TEAL = RGB(0, 128, 128)
THISTLE = RGB(216, 191, 216)
THISTLE1 = RGB(255, 225, 255)
THISTLE2 = RGB(238, 210, 238)
THISTLE3 = RGB(205, 181, 205)
THISTLE4 = RGB(139, 123, 139)
TOMATO1 = RGB(255, 99, 71)
TOMATO2 = RGB(238, 92, 66)
TOMATO3 = RGB(205, 79, 57)
TOMATO4 = RGB(139, 54, 38)
TURQUOISE = RGB(64, 224, 208)
TURQUOISE1 = RGB(0, 245, 255)
TURQUOISE2 = RGB(0, 229, 238)
TURQUOISE3 = RGB(0, 197, 205)
TURQUOISE4 = RGB(0, 134, 139)
TURQUOISEBLUE = RGB(0, 199, 140)
VIOLET = RGB(238, 130, 238)
VIOLETRED = RGB(208, 32, 144)
VIOLETRED1 = RGB(255, 62, 150)
VIOLETRED2 = RGB(238, 58, 140)
VIOLETRED3 = RGB(205, 50, 120)
VIOLETRED4 = RGB(139, 34, 82)
WARMGREY = RGB(128, 128, 105)
WHEAT = RGB(245, 222, 179)
WHEAT1 = RGB(255, 231, 186)
WHEAT2 = RGB(238, 216, 174)
WHEAT3 = RGB(205, 186, 150)
WHEAT4 = RGB(139, 126, 102)
WHITE = RGB(255, 255, 255)
WHITESMOKE = RGB(245, 245, 245)
YELLOW1 = RGB(255, 255, 0)
YELLOW2 = RGB(238, 238, 0)
YELLOW3 = RGB(205, 205, 0)
YELLOW4 = RGB(139, 139, 0)
```
#### File: cyme/simulation/horaire.py
```python
from .. import ecs
from . import stochastique
class Horaire(ecs.Component):
"""Horaire de base se répétant a intervalle fixe."""
def __init__(self, mom, cible, mtags, periode):
"""Configure l'horaire. La var d'état clef est ``actif`` et est dans un composant target.
C'est le target qui est responsable de l'initialisation.
C'est le target qui est responsable de l'initialisation. Cette version de horaire peut
être mise-à-jour à la seconde ou à la minute, ça ne change rien car les tags sont en minutes.
:param mom: on garde un handle vers le moment
:type mom: :class:`sim.base.Moment`
:param cible: un composant target avec la var d'état ``actif``
:param mtags: la liste de triplet (j,h,m) de changement d'état, j=0 est le jour actuel
:param periode: un triplet (j,h,m) pour la periode de cycle chaque j jours, h heures et m min
"""
self.mom=mom # instance de Moment
self.target=cible # la target avec un horaire (doit avoir une var d'etat actif)
self.periode=60*(periode[0]*1440+periode[1]*60+periode[2])
# les tags de changement (en secondes) d'etat de self.target.actif sur 24h
self.tags=[60*(x[0]*1440+x[1]*60+x[2])-self.mom.t0 for x in mtags]
self.nextTagIdx=0 # on suppose partir de mom.t0 et que self.tags[0]>mom.t0
if not self.tags:
print("Attention! Horaire: Pas de tags")
def update(self):
""" Update la var d'état ``actif`` dans le target selon la minute actuelle et les tags."""
if self.tags and (self.mom.t%self.periode)==self.tags[self.nextTagIdx]:
self.target.actif=not self.target.actif
self.nextTagIdx+=1
if self.nextTagIdx>=len(self.tags): self.nextTagIdx=0
class HoraireSto(ecs.Component):
"""Horaire de base se répétant a intervalle fixe."""
def __init__(self, mom, cible, mtags, periode, mtbf, mttr, mttrMin=-1, mttrAlpha=-1):
"""Configure l'horaire. La var d'état clef est ``actif`` et est dans un composant target.
C'est le target qui est responsable de l'initialisation. Cette version de horaire peut
être mise-à-jour à la seconde ou à la minute, ça ne change rien car les tags sont en minutes.
Il faut fournir les mtbf et mttr en minutes.
Par defaut, la durée d'un bris est fixée à mttf. Cependant, si on donne des valeurs
acceptable pour mttrMin et mttrAlpha, on utilise une loi triangulaire. En outre, il
faut respecter mttrMin<mttr, 0.0<=mttrAlpha<=1.0. On fixe mttrMode=mttrMin+mttrAlpha*(mttr-mttrMin).
Ainsi, avec mttrAlpha=0 la loi s'étire au maximum vers les grandes valeurs et avec mttrAlpha=1
la loi est centrée symétrique sur la moyenne.
:param mom: on garde un handle vers le moment
:type mom: :class:`sim.base.Moment`
:param cible: un composant target avec la var d'état ``actif``
:param mtags: la liste de triplet (j,h,m) de changement d'état, j=0 est le jour actuel
:param periode: un triplet (j,h,m) pour la periode de cycle chaque j jours, h heures et m min
:param mtbf: mean time b4 failure en minutes wallclock
:param mttr: mean time to repair en minutes wallclock
:param mttrMin: mttr min pour loi triangulaire
:param mttrAlpha: facteur dans [0,1] pour le décentrement de la loi triangulaire (1=centré)
"""
self.mom=mom # instance de Moment
self.target=cible # la target avec un horaire (doit avoir une var d'etat actif)
self.periode=60*(periode[0]*1440+periode[1]*60+periode[2])
# les tags de changement (en secondes) d'etat de self.target.actif sur 24h
self.tags=[60*(x[0]*1440+x[1]*60+x[2])-self.mom.t0 for x in mtags]
self.nextTagIdx=0 # on suppose partir de mom.t0 et que self.tags[0]>mom.t0
# partie stochastique
self.triggerFreq=stochastique.TriggerFrequence(1.0/mtbf)
self.mttr=stochastique.ConstantValue(mttr)
if 0<mttrMin and mttrMin<mttr and 0<=mttrAlpha and mttrAlpha<=1:
mttrMode=(int)(mttrMin+mttrAlpha*(mttr-mttrMin))
mttrMax=3*mttr-mttrMin-mttrMode
if mttr<mttrMax:
print("HoraireSto avec loi triangulaire (min,mode,moy,max)=",mttrMin,mttrMode,mttr,mttrMax)
self.mttr=stochastique.TriangularDistributionSample(mttrMin,mttrMax,mttrMode)
self.actif_horaire=self.target.actif # set l'etat horaire selon le target
self.duree_arret=0 # en minute
self.new_trigger=False
def update(self):
""" Update la var d'état ``actif`` dans le target selon la minute actuelle et les tags."""
self.new_trigger=False
if self.nextTagIdx>=len(self.tags):
pass # pas de tag, donc pas de changement d'etat
#print("Erreur HoraireJour: Pas de tags")
elif (self.mom.t%self.periode)==self.tags[self.nextTagIdx]:
self.actif_horaire=not self.actif_horaire # etat de l'horaire
self.nextTagIdx+=1
if self.nextTagIdx>=len(self.tags): self.nextTagIdx=0
if self.mom.tickM: # tick a la minute
self.duree_arret-=1
if self.triggerFreq.get():
self.new_trigger=True
#print(" *** trigger!",self.mom.getJHMS())
self.duree_arret=self.mttr.get()
#if self.duree_arret==1: print(" === fin trigger!",self.mom.getJHMS())
if self.duree_arret>0: self.target.actif=False
else: self.target.actif=self.actif_horaire
class HoraireStoTAP(ecs.Component):
"""Horaire stochastique special pour une TAP, avec répétition."""
def __init__(self, mom, cible, mtags, periode, arretplan, freq=None, arretnonplan=None):
"""Configure l'horaire. La var d'état clef est ``actif`` et est dans un composant target.
On force l'initialisation a True. Les tags imposent les debuts des arrets planifies.
:param mom: on garde un handle vers l'entité père
:type mom: :class:`sim.base.Moment`
:param cible: un composant target avec la var d'état ``actif``
:param mtags: la liste de triplet (j,h,m) de changement d'état, j=0 est le jour actuel
:param periode: un triplet (j,h,m) pour la periode de cycle chaque j jours, h heures et m min
:param arretplan: est un objet avec la methode ``get`` pour obtenir la duree des arrets dans mtags (en secondes)
:param freq: est un objet qui trigger un arrets non-planifies
:param arretnonplan: est un objet avec la methode ``get`` pour obtenir la duree des arrets non-planifies genere via freq (en secondes)
"""
self.mom=mom # instance de Moment
self.target=cible # la target avec un horaire (doit avoir une var d'etat actif)
self.periode=60*(periode[0]*1440+periode[1]*60+periode[2])
# les tags de changement (en secondes) d'etat de self.target.actif sur 24h
self.tags=[60*(x[0]*1440+x[1]*60+x[2])-self.mom.t0 for x in mtags]
self.nextTagIdx=0 # on suppose partir de mom.t0 et que self.tags[0]>mom.t0
self.arretplan=arretplan # objet avec methode get de la duree d'un arret plan
self.freq=freq # objet frequence avec methode get de trigger d'un arret non-plan
self.arretnonplan=arretnonplan # objet avec methode get de la duree d'un arret non-plan
self.target.actif=True # init a True
self.duree=-1 # duree de l'arret en cours
self.trigger=False # trigger d'un arret non-plan
def update(self):
""" Update la var d'état ``actif`` dans le target selon la minute actuelle et les tags."""
if self.freq is not None and not self.trigger:
self.trigger=self.freq.get() # test de trigger d'un arret nonplan, si on en a pas deja un
self.duree-=1 # decroit la duree d'un arret
if self.nextTagIdx>=len(self.tags):
print("Erreur HoraireStoTAP: Pas de tags")
elif (self.mom.t%self.periode)==self.tags[self.nextTagIdx]: # debut d'un arret planifie
self.duree=self.arretplan.get() # duree stochastique de l'arret planifie
#print("Arret planifie (sec):",self.duree)
self.nextTagIdx+=1
if self.nextTagIdx>=len(self.tags): self.nextTagIdx=0
if self.duree<=0 and self.trigger: # si pas en arret, mais qu'on a un trigger d'arret nonplan
self.duree=self.arretnonplan.get() # duree de l'arret nonplan
#print(" Arret non-planifie (sec):",self.duree)
self.trigger=False # reset du trigger
# cas special pour entrepot plein (on suppose qu'on a un handle sur l'entrepot)
# le handle doit etre mis dans modele
if self.duree<=0 and not self.entrepot.place4crue():
self.duree=24*3600 # pause de 48h (entrepot plein) (update 28 oct: 24h)
#print(self.mom.getJHMS()," Pause TAP pour 48h car entrepot plein, duree de (sec)",self.duree)
# update de actif
if self.duree>0: self.target.actif=False # si en arret, alors non actif
else: self.target.actif=True
class HoraireJour(ecs.Component):
"""Horaire de base pour une journée (24h), se répétant. ATTENTION: desuet et non-supporte. """
def __init__(self, mom, cible, mtags):
"""Configure l'horaire. La var d'état clef est ``actif`` et est dans un composant target.
C'est le target qui est responsable de l'initialisation.
:param mom: on garde un handle vers l'entité père
:type mom: :class:`sim.base.Moment`
:param cible: un composant target avec la var d'état ``actif``
:param mtags: la liste de minutes de changement d'état, des entiers entre 0-1439
"""
self.mom=mom # instance de Moment
self.target=cible # la target avec un horaire (doit avoir une var d'etat actif)
# les tags de changement (en secondes) d'etat de self.target.actif sur 24h
self.tags=[x*60 for x in mtags]
self.nextTagIdx=0 # on suppose partir de mom.t0 et que self.tags[0]>mom.t0
def update(self):
""" Update la var d'état ``actif`` dans le target selon la minute actuelle et les tags."""
if self.nextTagIdx>=len(self.tags):
print("Erreur HoraireJour: Pas de tags")
elif self.mom.tnow==self.tags[self.nextTagIdx]:
self.target.actif=not self.target.actif
self.nextTagIdx+=1
if self.nextTagIdx>=len(self.tags): self.nextTagIdx=0
```
#### File: cyme/simulation/kanban.py
```python
import re
class MetaOperation(type):
"""
Metaclasse qui permet l'enregistrement de tous les classes qui héritent de la
classe Operation. À noter qu'il faut les importer avant MetaOperation pour qu'elles
soient disponibles dans le registre
"""
registry = {}
def __new__(cls, clsname, bases, attrs):
newclass = super(MetaOperation, cls).__new__(cls, clsname, bases, attrs)
newclass.name = clsname
MetaOperation.register(newclass)
return newclass
@staticmethod
def register(cls):
MetaOperation.registry[cls.__name__] = cls
return cls
@staticmethod
def get(clsname):
try:
return MetaOperation.registry[clsname]
except KeyError:
raise OperationNotImplemented("{" + clsname + "}")
class Operation(metaclass=MetaOperation):
"""
Classe de base de pour les opérations des ponts.
Les méthodes precondtion, pretache, tache, postcondition et posttache
retournent par défaut true pour ne pas empêcher l'exécution du bt si
elles n'ont pas été implémenté dans les classes qui héritent de cette classe.
"""
duree = 0
color = (0,0,0)
@classmethod
def get_duree(cls, context=None):
return cls.duree
@classmethod
def get_color(cls):
return cls.color
@classmethod
def precondition(cls, context=None):
return True
@classmethod
def pretache(cls, pont):
pont.is_operation = True
return True
@classmethod
def tache(cls, kanban):
kanban.noeud.box.contour_color = (0, 0, 0)
kanban.noeud.box.color = kanban.operation.get_color()
kanban.set_next_target()
return True
@classmethod
def postcondition(cls, context=None):
return True
@classmethod
def posttache(cls, pont):
pont.is_operation = False
return True
class Kanban(object):
def __init__(self, token):
self.operation = MetaOperation.get(token)
self.croissant = True
self.debut = 0
self.cuve_max = 24
self.cuve_courante = 0
self.noeud = None
self.extra = None
self.pont = None
self.actif = True #True, le pont peut commencer aussitôt recu
self.actif_defaut = True
self.completed = False
self.temps_termine = 0
self.temps_restant = 0
def reset(self):
self.cuve_courante = 0
self.temps_termine = 0
self.pont = None
self.completed = False
self.actif = self.actif_defaut
def init_current_target(self, secteur):
indice = self.debut + self.cuve_courante
if indice < len(secteur.noeuds_cuves):
self.noeud = secteur.noeuds_cuves[indice]
return self.noeud
else:
return None
def get_current_target(self, secteur):
indice = self.debut + self.cuve_courante
if indice < len(secteur.noeuds_cuves):
self.noeud = secteur.noeuds_cuves[indice]
return self.noeud
else:
return None
def set_next_target(self):
if self.cuve_courante < self.cuve_max:
self.cuve_courante += 1
if self.cuve_courante >= self.cuve_max:
self.completed = True
def is_completed(self):
return self.completed
def __str__(self):
return "{0} {1} {2}".format(self.operation.name, self.debut, self.cuve_max)
def __repr__(self):
return "{0} {1} {2}".format(self.operation.name, self.debut, self.cuve_max)
def to_str(self):
return "{0} {1} {2}".format(self.operation.name, self.debut, self.cuve_max)
class KanbanParser(object):
regex_pattern = r"([a-zA-Z]+)([0-9]+)_?([0-9]+)?_?([a-z]+)?"
@staticmethod
def string_to_kanbans_list(s):
kanbans_str = s.replace(" ", "").replace("\t", "").replace("\n", "")
tokens = kanbans_str.split(",")
kanbans = []
for token1 in tokens:
preprocessed_tokens = KanbanParser.preprocess_token(token1)
for token2 in preprocessed_tokens:
kanbans.append(KanbanParser.process_token(token2))
return kanbans
@staticmethod
def preprocess_token(token):
match = re.match(KanbanParser.regex_pattern, token, re.I)
if match:
operation_type = "NONE"
begin = 0
count = 0
extra = ""
items = match.groups()
preprocessed_tokens = []
if items[0]:
operation_type = items[0]
if items[1]:
begin = int(items[1])
if items[2]:
count = int(items[2])
else:
count = 1
if items[3]:
extra = str(items[3]).upper()
for i in range(count):
if extra == "":
preprocessed_tokens.append("{type}{begin}_{number_to_visit}".format(type=operation_type, begin=begin+i,
number_to_visit=1))
else:
preprocessed_tokens.append("{type}{begin}_{number_to_visit}_{extra}".format(type=operation_type, begin=begin+i,
number_to_visit=1, extra=extra))
return preprocessed_tokens
else:
raise ParsingException("{0} doit être dans le format: ([a-zA-Z]+)([0-9]+)_?([0-9]+)?_?([a-z]+)?".format(token))
@staticmethod
def process_token(token):
kanban = None
match = re.match(KanbanParser.regex_pattern, token, re.I)
op = ' '
if match:
items = match.groups()
if items[0]:
kanban = Kanban(items[0])
kanban.croissant = op.isupper()
if items[1]:
kanban.debut = int(items[1]) - 1
if items[2]:
kanban.cuve_max = int(items[2])
else:
kanban.cuve_max = 1
if items[3]:
kanban.extra = str(items[3]).upper()
else:
raise ParsingException("{0} doit être dans le format: ([a-zA-Z]+)([0-9]+)_?([0-9]+)?_?([a-z]+)?".format(token))
return kanban
class DelayedKanban(object):
def __init__(self, token):
self.operation = MetaOperation.get(token)
self.duree = 0
self.t_trigger = 0
self.pont = None
self.completed = False
self.temps_termine = 0
self.extra = None
self.temps_restant = 0
def reset(self):
self.temps_termine = 0
self.pont = None
self.completed = False
def is_completed(self):
return self.completed
def __str__(self):
return "{0} {1} {2}".format(self.operation.name, self.duree, self.t_trigger)
def __repr__(self):
return "{0} {1} {2}".format(self.operation.name, self.duree, self.t_trigger)
def to_str(self):
return "{0} {1} {2}".format(self.operation.name, self.duree, self.t_trigger)
class DelayedKanbanParser(object):
regex_pattern = r"([a-zA-Z]+)([0-9]+)_?([0-9]+)?_?([a-z]+)?"
@staticmethod
def string_to_list(s):
pauses_str = s.replace(" ", "").replace("\t", "").replace("\n", "")
tokens = pauses_str.split(",")
pauses = []
for token1 in tokens:
pauses.append(DelayedKanbanParser.process_token(token1))
return pauses
@staticmethod
def process_token(token):
pause = None
match = re.match(DelayedKanbanParser.regex_pattern, token, re.I)
if match:
items = match.groups()
if items[0]:
pause = DelayedKanban(items[0])
if items[1]:
pause.duree = int(items[1])
if items[2]:
pause.t_trigger = int(items[2])
else:
pause.t_trigger = 0
if items[3]:
pause.extra = str(items[3]).upper()
else:
raise ParsingException("{0} doit être dans le format: ([a-zA-Z]+)([0-9]+)_?([0-9]+)?_?([a-z]+)?".format(token))
return pause
class DeltaKanban(object):
def __init__(self, token):
self.operation = MetaOperation.get(token)
self.debut = 0
self.cuve_max = 1
self.cuve_courante = 0
self.delta = 0
self.pont = None
self.completed = False
self.temps_termine = 0
self.noeud = None
self.extra = None
self.temps_restant = 0
def is_completed(self):
return self.completed
def reset(self):
self.cuve_courante = 0
self.temps_termine = 0
self.pont = None
self.completed = False
def init_current_target(self, secteur):
indice = self.debut + self.cuve_courante
if indice < len(secteur.noeuds_cuves):
self.noeud = secteur.noeuds_cuves[indice]
return self.noeud
else:
return None
def get_current_target(self, secteur):
indice = self.debut + self.cuve_courante
if indice < len(secteur.noeuds_cuves):
self.noeud = secteur.noeuds_cuves[indice]
return self.noeud
else:
return None
def set_next_target(self):
if self.cuve_courante < self.cuve_max:
self.cuve_courante += 1
if self.cuve_courante >= self.cuve_max:
self.completed = True
def __str__(self):
return "{0} {1} {2}".format(self.operation.name, self.debut, self.delta)
def __repr__(self):
return "{0} {1} {2}".format(self.operation.name, self.debut, self.delta)
def to_str(self):
return "{0} {1} {2}".format(self.operation.name, self.debut, self.delta)
class DeltaKanbanParser(object):
regex_pattern = r"([a-zA-Z]+)([0-9]+)_?([0-9]+)?_?([a-z]+)?"
@staticmethod
def string_to_list(s):
dk_str = s.replace(" ", "").replace("\t", "").replace("\n", "")
tokens = dk_str.split(",")
dk = []
for token1 in tokens:
dk.append(DeltaKanbanParser.process_token(token1))
return dk
@staticmethod
def process_token(token):
dk = None
match = re.match(DeltaKanbanParser.regex_pattern, token, re.I)
if match:
items = match.groups()
if items[0]:
dk = DeltaKanban(items[0])
if items[1]:
dk.debut = int(items[1]) - 1
if items[2]:
dk.delta = int(items[2])
else:
dk.delta = 0
if items[3]:
dk.extra = str(items[3]).upper()
else:
raise ParsingException("{0} doit être dans le format: ([a-zA-Z]+)([0-9]+)_?([0-9]+)?_?([a-z]+)?".format(token))
return dk
class ParsingException(Exception):
pass
class OperationNotImplemented(Exception):
pass
```
|
{
"source": "jfdion/mine-sweeper",
"score": 3
}
|
#### File: mine-sweeper/minesweeper/board.py
```python
from minesweeper.grid import Grid
class Board:
def __init__(self, width, height, number_of_mines=10):
self.__width = width
self.__height = height
self.__number_of_mines = number_of_mines
self.__grid = Grid(self.__width, self.__height, self.__number_of_mines)
def width(self):
return self.__width
def height(self):
return self.__height
def print(self):
print(self.__grid.to_string())
def win(self):
return self.__grid.check_win()
def loose(self):
return self.__grid.check_mine_revealed()
def reveal(self, x, y):
self.__grid.reveal(x, y)
```
#### File: mine-sweeper/minesweeper/cell.py
```python
class Cell:
"""
| (-1, -1) | (0, -1) | (1, -1) |
| (-1, 0) | x | (1, 0) |
| (-1, 1) | (0, 1) | (1, 1) |
"""
__positions = [(-1, -1), (0, -1), (1, -1), (-1, 0), (1, 0), (-1, 1), (0, 1), (1, 1)]
"""
| | (0, -1) | |
| (-1, 0) | x | (1, 0) |
| | (0, 1) | |
"""
__cross_positions = [(0, -1), (-1, 0), (1, 0), (0, 1)]
def __init__(self, x, y, grid, is_mine=False):
self.__x = x
self.__y = y
self.__grid = grid
self.__is_mine = is_mine
self.__is_revealed = False
self.__neighbor_mines = 0
def to_string(self):
if not self.__is_revealed:
return "?"
elif self.__is_mine:
return "!"
elif self.__neighbor_mines > 0:
return str(self.__neighbor_mines)
else:
return " "
def revealed(self):
return self.__is_revealed
def is_mine(self):
return self.__is_mine
def neighbor_count(self):
return self.__neighbor_mines
def update_neighbor_count(self):
self.__neighbor_mines = 0
if self.__is_mine:
return
for (x, y) in self.__positions:
if self.__grid.exists(self.__x + x, self.__y + y) and \
self.__grid.is_mine(self.__x + x, self.__y + y):
self.__neighbor_mines += 1
def reveal(self):
if self.__is_revealed:
return
self.__is_revealed = True
if not self.__is_mine and not self.__neighbor_mines > 0:
for (x, y) in self.__cross_positions:
self.__grid.reveal(self.__x + x, self.__y + y)
```
#### File: mine-sweeper/minesweeper/grid.py
```python
from random import randint
from minesweeper.cell import Cell
class Grid:
def __init__(self, width, height, number_of_mines):
self.__width = width
self.__height = height
self.__mines_locations = self._build_mines_locations(number_of_mines)
self.__grid = [[Cell(x, y, self, is_mine=(x, y) in self.__mines_locations)
for x in range(0, self.__width)]
for y in range(0, self.__height)]
[[cell.update_neighbor_count() for cell in row] for row in self.__grid]
def init_grid_reveal_cache(self):
return [[False] * self.__width] * self.__height
def _build_mines_locations(self, number_of_mines):
mines_locations = []
i = 0
while i < number_of_mines:
x = randint(0, self.__width - 1)
y = randint(0, self.__height - 1)
if (x, y) not in mines_locations:
mines_locations.append((x, y))
i += 1
return mines_locations
def check_win(self):
return sum([sum([0 if cell.revealed() else 1 for cell in row]) for row in self.__grid]) == len(
self.__mines_locations)
def check_mine_revealed(self):
return sum([sum([1 if cell.revealed() and cell.is_mine() else 0 for cell in row]) for row in self.__grid]) > 0
def exists(self, x, y):
return 0 <= x < self.__width and 0 <= y < self.__height
def is_mine(self, x, y):
if self.exists(x, y):
return self.__grid[y][x].is_mine()
def reveal(self, x, y):
if self.exists(x, y) and not self.revealed(x, y):
self.__grid[y][x].reveal()
def revealed(self, x, y):
if self.exists(x, y):
return self.__grid[y][x].revealed()
return True
def has_neighbor(self, x, y):
if self.exists(x, y):
return self.__grid[y][x].neighbor_count() > 0
def to_string(self):
out = "| " + (len(str(self.__height)) * " ") + " |"
for i in range(0, self.__width):
out += " " + (self.__calculate_padding(str(i + 1), len(str(self.__width)), 1) * " ")
out += str(i + 1)
out += (self.__calculate_padding(str(i + 1), len(str(self.__width)), -1) * " ") + " |"
out += "\n"
j = 1
for row in self.__grid:
out += "| " + str(j) + ((len(str(self.__height)) - len(str(j))) * " ") + " |"
for (x, cell) in enumerate(row):
out += " "
out += (self.__calculate_padding(cell.to_string(), len(str(self.__width)), 1) * " ")
out += cell.to_string()
out += (self.__calculate_padding(cell.to_string(), len(str(self.__width)), -1) * " ") + " |"
out += "\n"
j += 1
return out
def __calculate_padding(self, text, total_length, modifier):
size = abs(total_length - len(text))
if size == 0:
return size
if modifier > 0:
modifier = 1
else:
modifier = -1
if size % 2 == 0:
return int(size / 2)
else:
return int((size + modifier) / 2)
```
|
{
"source": "jfdong/ocm",
"score": 3
}
|
#### File: python/scripts/parameter_test.py
```python
import argparse
import re
import os
from termcolor import colored
def mode_validation(mode):
regex = re.compile('[@_!#$%^&*()<>?/\|}{~:]')
if mode == "":
print("{0}".format(colored("Mode Is Empty", "red")))
if " " in mode:
print("{0}: {1} ".format(colored("Mode name Contain Spaces", "red"), mode))
if regex.search(mode) != None:
print("{0}: {1}".format(colored("Mode name Contain Special Characters", "red"), mode))
else:
print("Mode is ok")
def device_validation(device):
regex = re.compile('[@_!#$%^&*()<>?/\|}{~:]')
device_list=["CPU","GPU","MYRIAD","HDDL"]
if(device in device_list ):
if device == "":
print("{0}".format(colored("Device List is found Empty", "red")))
if " " in device:
print("{0}: {1} ".format(colored("Device Name Contains Spaces", "red"), device))
if regex.search(device) != None:
print("{0}: {1}".format(colored("Device Name Contain Special Characters", "red"), device))
else:
print("Devices is ok")
else:
print("{0}".format(colored("Please Choose Device", "red")))
def modelpath_validation(model_path):
if not os.path.exists(model_path) and not model_path=="None":
print("{0}".format(colored("Model Path does not exists or Please give None in model path", "red")))
else:
print("Path is ok")
def testlist_validation(test_list):
try:
file_size = os.path.getsize('test_list.txt')
except FileNotFoundError:
print("{0}".format(colored("File is not Available", "red")))
pass
if not os.path.isfile(test_list):
print("{0}".format(colored("Path doesn't Exists", "red")))
elif file_size == 0:
print("{0}".format(colored("File is found empty", "red")))
else:
print("Test List is ok")
```
|
{
"source": "JFDonovan/Kive",
"score": 2
}
|
#### File: app/backend/index.py
```python
from whoosh.index import create_in, open_dir
from whoosh.writing import BufferedWriter, AsyncWriter
from whoosh.fields import *
from whoosh import analysis
from whoosh.support.charset import accent_map
from extract_text import scrape_paths
from datetime import datetime
import os
import time
import sys
import multiprocessing
import config
def get_schema():
'''
Specifies what fields are stored in the index and returns to be passed to newly created index.
'''
# analyzer = analysis.SpaceSeparatedTokenizer() | analysis.LowercaseFilter() | analysis.CharsetFilter(accent_map)
analyzer = analysis.StandardAnalyzer(stoplist=None, minsize=1) | analysis.CharsetFilter(accent_map)
return Schema(name=TEXT(analyzer=analyzer, stored=True),
path=TEXT(stored=True),
content=TEXT(analyzer=analyzer, stored=True),
legacy_ingest=TEXT,
ingest=TEXT,
last_accessed=TEXT,
media_files=TEXT(analyzer=analyzer, stored=True),
indexed_time=DATETIME(stored=True),
id=ID(stored=True, unique=True))
def index_docs(json_lst, operation, workspace_guid):
'''
Writes, updates, or deletes documents to/from the index based on the specified operation.
'''
try:
# The schema specifies the fields of documents in an index
schema = get_schema()
# Index directory has to be made before index gets written
if len(os.listdir(config.app_data_path + '/workspace_repo/{}/index_dir'.format(workspace_guid))) == 0:
# Create index based on schema
ix = create_in(config.app_data_path + '/workspace_repo/{}/index_dir'.format(workspace_guid), schema)
else:
# Open existing index
ix = open_dir(config.app_data_path + '/workspace_repo/{}/index_dir'.format(workspace_guid))
# Prepare to write paths to index
writer = ix.writer(procs=os.cpu_count(), multisegment=True)
searcher = ix.searcher()
id_check = ""
if operation == 'add': # Used if files are imported into workspace
update_node_list = []
index_lst = scrape_paths(json_lst)
for entry, content, media_files, update_url_node in index_lst:
if update_url_node is not None:
update_node_list.append(update_url_node)
path = entry['path']
name = entry['name']
legacy_ingest = entry['legacy_ingest']
ingest = entry['ingest']
last_accessed = entry['last_accessed']
indexed_time = datetime.utcnow()
id = entry['id']
writer.add_document(name=name,
path=path,
content=content,
legacy_ingest=legacy_ingest,
ingest=ingest,
last_accessed=last_accessed,
media_files=media_files,
indexed_time=indexed_time,
id=id)
writer.commit()
return update_node_list
elif operation == 'update': # Used if names or dates get updated in workspace
# index_lst = scrape_paths(json_lst)
for entry in json_lst:
path = entry['path']
name = entry['name']
legacy_ingest = entry['legacy_ingest']
ingest = entry['ingest']
last_accessed = entry['last_accessed']
indexed_time = datetime.utcnow()
id = entry['id']
old_doc = searcher.document(id=id)
content = old_doc['content']
media_files = old_doc['media_files']
# id_check = id
writer.update_document(name=name,
path=path,
content=content,
legacy_ingest=legacy_ingest,
ingest=ingest,
last_accessed=last_accessed,
media_files=media_files,
indexed_time=indexed_time,
id=id)
elif operation == 'delete': # Used if files get deleted from workspace
for entry in json_lst:
id = entry['id']
writer.delete_by_term('id', id)
writer.commit()
except Exception as e:
writer.commit()
raise e
```
#### File: app/backend/parse_nonlegacy.py
```python
from datetime import datetime
from selectolax.parser import HTMLParser
import os
import uuid
'''
Non-legacy directory ingest workflow.
'''
def find_files(folder):
'''
Walks through the specified directory to find all web pages and create JSON objects for them.
'''
def find_files_helper(folder, json_obj):
folder_list = os.listdir(folder)
folders = []
html_files = set()
for entity in folder_list:
if os.path.isdir(folder + "/" + entity):
folders.append(folder + "/" + entity)
else:
# Get file extension
ext = os.path.splitext(entity)
# Only handle html or htm files
if ext[1] in ('.html', '.htm'):
html_files.add(folder + "/" + entity.split(".htm")[0] + "_files")
# Create new JSON object that represents a web page and its metadata
new_file_obj = {
'type': 'file',
'name': os.path.basename(folder + "/" + entity),
'legacy_ingest': '',
'ingest': datetime.now().strftime('%Y%m%d'),
'last_accessed': datetime.fromtimestamp(os.path.getmtime(folder + '/' + entity)).strftime('%Y%m%d'),
'path': folder + "/" + entity,
'source': '',
'icon': '',
'id': str(uuid.uuid4()),
'children': []
}
json_obj['children'].append(new_file_obj)
json_lst.append(new_file_obj)
for f in folders:
go_in = True
if f in html_files:
go_in = False
if go_in:
new_folder_obj = {
'type': 'folder',
'name': os.path.basename(f),
'id': str(uuid.uuid4()),
'children': []
}
json_obj['children'].append(new_folder_obj)
find_files_helper(f, new_folder_obj)
# Flat list of web page JSON nodes that are passed to the indexing function
json_lst = []
# Provides the root folder for the JSON tree
json_tree = {
'type': 'folder',
'name': os.path.basename(folder),
'id': str(uuid.uuid4()),
'children': []
}
try:
find_files_helper(folder, json_tree)
except Exception as e:
return None, None
return [json_tree], json_lst
```
#### File: app/backend/parse_sb.py
```python
from html.parser import HTMLParser
from datetime import datetime
import json
import sys
import os
import uuid
'''
ScrapBook directory ingest workflow.
'''
#Declaring global variables needed to be accessed everywhere
folder_path = ''
child_dict = {}
json_dict = {}
class MyRDFParser(HTMLParser):
# Declaring class variable
global child_dict
global json_dict
current_parent = ''
# Helper method to get ScrapBook name from 'RDF:about' attribute
def get_sb_name(self, full_string):
return full_string.split(':')[2]
# Method to handle behavior upon reading various tags
def handle_starttag(self, tag, attrs):
# 'RDF:Description' tag creates a new json object (file or folder) to be stored in json_dict
if (tag == 'rdf:description'):
name = self.get_sb_name(attrs[0][1])
sb_date = name.split('item')[1]
date = datetime.today().strftime('%Y%m%d')
file_path = folder_path + sb_date + '/index.html'
# Fields that will be read from RDF attributes
item_type = ''
source_url = ''
icon_src = ''
title = ''
# Loop through attributes to get desired data
for attr in attrs:
if attr[0] == 'ns1:type':
if attr[1] == 'folder':
item_type = 'folder'
file_path = ''
else:
item_type = 'file'
elif attr[0] == 'ns1:title':
title = attr[1]
elif attr[0] == 'ns1:source':
source_url = attr[1]
elif attr[0] == 'ns1:icon':
full_icon_str = attr[1]
if full_icon_str.startswith('resource://scrapbook'):
icon_src = folder_path + full_icon_str.split('data/')[1]
else:
icon_src = full_icon_str
# Creating new JSON object with desired fields
new_json = {
"type": item_type,
"legacy_ingest": sb_date[0:8],
"ingest": date,
"last_accessed": date,
"source": source_url,
"icon": icon_src,
"name": title,
"path": file_path,
"id": str(uuid.uuid4()),
"children": [],
}
# Adding new JSON to json_dict
json_dict[name] = new_json
#'RDF:Seq' tag adds folder name to child_dict keys and makes it current_parent
if (tag == 'rdf:seq'):
name = self.get_sb_name(attrs[0][1])
MyRDFParser.current_parent = name
child_dict[name] = []
#'RDF:li' tag adds current resource to current_parrent's field in child_dict
if (tag == 'rdf:li'):
name = self.get_sb_name(attrs[0][1])
child_dict[MyRDFParser.current_parent].append(name)
# Method to handle behavior on 'RDF:Seq' end tag
def handle_endtag(self, tag):
if (tag == 'rdf:seq'):
MyRDFParser.current_parent = ''
# Method to build JSON object from global variables
def build_tree_json():
global child_dict
global json_dict
final_json = []
file_json_list = []
for item in json_dict.values():
if (item['type'] == 'file'):
file_json_list.append(item)
# Iterate over every folder, represented by keys in 'child_dict'
for parent in child_dict:
# Check if parent is root, we will take care of root children later
if (parent != 'root'):
parent_json = json_dict[parent]
# Iterate over children of parent in top level loop
for child in child_dict[parent]:
# Append child JSON to parent JSON
parent_json['children'].append(json_dict[child])
# Update parent JSON in json_dict
json_dict[parent] = parent_json
# Add 'root' children to top level 'final_json'
for child in child_dict['root']:
final_json.append(json_dict[child])
# Return newly built nested JSON object respresenting full tree
return final_json, file_json_list
# Method to be called from outside scripts.
# Accepts filepath pointing to 'scrapbook.rdf' file in a valid ScrapBook repository
def parse_rdf(filepath):
global child_dict
global json_dict
child_dict = {}
json_dict = {}
# Open given filepath as 'rdf' and turn it into a string to be fed to MyRDFParser
with open (filepath, 'r', encoding='utf-8', errors='replace') as rdf:
rdf_str = rdf.read()
# Updating global folder path to be accessed in MyRDFParser
global folder_path
folder_path = filepath.split('scrapbook.rdf')[0] + 'data/'
# Checking if 'data/' folder exists
if (os.path.exists(folder_path)):
# Creating MyRDFParser object and feeding it 'rdf_str'
parser = MyRDFParser()
parser.feed(rdf_str)
parser.close()
# Build tree JSON and return it
return build_tree_json()
else:
# 'data/' folder doesn't exist, reset global 'folder_path' and return None
folder_path = ""
raise Exception('ScrapBook directory could not be imported.')
```
|
{
"source": "jfdupuis/garage-door-controller",
"score": 2
}
|
#### File: jfdupuis/garage-door-controller/garage_controller.py
```python
import datetime
import time
import syslog
import smtplib
import RPi.GPIO as gpio
import json
import urllib
from email.mime.text import MIMEText
from email.utils import formatdate
from email.utils import make_msgid
import sys
if sys.version_info < (3,):
import httplib as httpclient
else:
import http.client as httpclient
class Door(object):
last_action = None
last_action_time = None
alert_sent = False
confirm_close = False
pb_iden = None
def __init__(self, doorId, config):
self.id = doorId
self.name = config['name']
self.in_sentence = config['in_sentence']
self.relay_pin = config['relay_pin']
self.state_pin = config['state_pin']
self.state_pin_closed_value = config.get('state_pin_closed_value', 0)
self.time_to_close = config.get('approx_time_to_close', 10)
self.time_to_open = config.get('approx_time_to_open', 10)
self.openhab_name = config.get('openhab_name')
self.open_time = time.time()
self.alert_sent_time = time.time()
gpio.setup(self.relay_pin, gpio.OUT)
gpio.setup(self.state_pin, gpio.IN, pull_up_down=gpio.PUD_UP)
gpio.output(self.relay_pin, True)
def get_state(self):
if gpio.input(self.state_pin) == self.state_pin_closed_value:
return 'closed'
elif self.last_action == 'open':
if time.time() - self.last_action_time >= self.time_to_open:
return 'open'
else:
return 'opening'
elif self.last_action == 'close':
if time.time() - self.last_action_time >= self.time_to_close:
return 'open' # This state indicates a problem
else:
return 'closing'
else:
return 'open'
def toggle_relay(self):
state = self.get_state()
if (state == 'open'):
self.last_action = 'close'
self.last_action_time = time.time()
elif state == 'closed':
self.last_action = 'open'
self.last_action_time = time.time()
else:
self.last_action = None
self.last_action_time = None
gpio.output(self.relay_pin, False)
time.sleep(0.2)
gpio.output(self.relay_pin, True)
class Controller(object):
def __init__(self, config):
self.init_gpio()
self.updateHandler = None
self.config = config
self.doors = [Door(n, c) for (n, c) in list(config['doors'].items())]
for door in self.doors:
door.last_state = 'unknown'
door.last_state_time = time.time()
self.use_alerts = config['config']['use_alerts']
self.alert_type = config['alerts']['alert_type']
self.time_to_wait = config['alerts']['time_to_wait']
self.time_btw_alert_repeat = config['alerts']['time_btw_alert_repeat']
self.open_time_to_alert = config.get('open_time_to_alert', 30)
if self.alert_type == 'smtp':
self.use_smtp = False
smtp_params = ("smtphost", "smtpport", "smtp_tls", "username",
"password", "<PASSWORD>", "time_to_wait")
self.use_smtp = ('smtp' in config['alerts']) and set(
smtp_params) <= set(config['alerts']['smtp'])
syslog.syslog("we are using SMTP")
elif self.alert_type == 'pushbullet':
self.pushbullet_access_token = config['alerts']['pushbullet']['access_token']
syslog.syslog("we are using Pushbullet")
elif self.alert_type == 'pushover':
self.pushover_user_key = config['alerts']['pushover']['user_key']
syslog.syslog("we are using Pushover")
else:
self.alert_type = None
syslog.syslog("No alerts configured")
def init_gpio(self):
gpio.setwarnings(False)
gpio.cleanup()
gpio.setmode(gpio.BCM)
def set_update_handler(self, update_handler):
self.updateHandler = update_handler
def status_check(self):
for door in self.doors:
new_state = door.get_state()
if (door.last_state != new_state):
door.last_state = new_state
door.last_state_time = time.time()
door.alert_sent = False
self.notify_state_change(door, new_state)
send_open_alert = False
if (new_state == 'open') and door.alert_sent and (
time.time() - door.alert_sent_time >= self.time_btw_alert_repeat):
send_open_alert = True
if (new_state == 'open' and not door.alert_sent and time.time(
) - door.open_time >= self.time_to_wait + door.time_to_open):
send_open_alert = True
if send_open_alert:
if self.use_alerts:
elapsed_time = int(time.time() - door.open_time)
title = "%s%s%s" % (door.name, door.in_sentence, new_state)
message = "%s%shas been open for %s" % (
door.name, door.in_sentence, format_seconds(elapsed_time))
self.send_alert(door, title, message)
door.alert_sent = True
door.confirm_close = True
door.alert_sent_time = time.time()
if new_state == 'closed':
if self.use_alerts:
if door.confirm_close is True:
elapsed_time = int(time.time() - door.open_time)
title = "%s%s%s" % (
door.name, door.in_sentence, new_state)
message = "%s%sis now closed being open for %s " % (
door.name, door.in_sentence, format_seconds(elapsed_time))
self.send_alert(door, title, message)
door.open_time = time.time()
door.confirm_close = False
door.alert_sent = False
def notify_state_change(self, door, new_state):
syslog.syslog('%s: %s => %s' % (door.name, door.last_state, new_state))
if self.updateHandler is not None:
self.updateHandler.handle_updates()
if self.config['config']['use_openhab'] and (
new_state == "open" or new_state == "closed"):
self.update_openhab(door.openhab_name, new_state)
def send_alert(self, door, title, message):
if self.alert_type == 'smtp':
self.send_email(title, message)
elif self.alert_type == 'pushbullet':
self.send_pushbullet(door, title, message)
elif self.alert_type == 'pushover':
self.send_pushover(door, title, message)
def send_email(self, title, message):
try:
if self.use_smtp:
syslog.syslog("Sending email message")
config = self.config['alerts']['smtp']
message = MIMEText(message)
message['Date'] = formatdate()
message['From'] = config["username"]
message['To'] = config["to_email"]
message['Subject'] = config["subject"]
message['Message-ID'] = make_msgid()
server = smtplib.SMTP(config["smtphost"], config["smtpport"])
if (config["smtp_tls"] == "True"):
server.starttls()
server.login(config["username"], config["password"])
server.sendmail(config["username"],
config["to_email"], message.as_string())
server.close()
except Exception as inst:
syslog.syslog("Error sending email: " + str(inst))
def send_pushbullet(self, door, title, message):
try:
syslog.syslog("Sending pushbutton message")
config = self.config['alerts']['pushbullet']
if door.pb_iden is not None:
for token in config['access_token']:
conn = httpclient.HTTPSConnection("api.pushbullet.com:443")
conn.request("DELETE", '/v2/pushes/' + door.pb_iden, "",
{'Authorization': 'Bearer ' + token,
'Content-Type': 'application/json'})
conn.getresponse()
conn.close()
door.pb_iden = None
for token in config['access_token']:
conn = httpclient.HTTPSConnection("api.pushbullet.com:443")
conn.request("POST", "/v2/pushes",
json.dumps({
"type": "note",
"title": title,
"body": message,
}), {'Authorization': 'Bearer ' + token,
'Content-Type': 'application/json'})
response = conn.getresponse().read()
door.pb_iden = json.loads(response.decode('utf-8'))['iden']
conn.close()
except Exception as inst:
syslog.syslog("Error sending to pushbullet: " + str(inst))
def send_pushover(self, door, title, message):
try:
syslog.syslog("Sending Pushover message")
config = self.config['alerts']['pushover']
conn = httpclient.HTTPSConnection("api.pushover.net:443")
conn.request("POST", "/1/messages.json",
urllib.urlencode({
"token": config['api_key'],
"user": config['user_key'],
"title": title,
"message": message,
}), {"Content-type": "application/x-www-form-urlencoded"})
conn.getresponse()
conn.close()
except Exception as inst:
syslog.syslog("Error sending to pushover: " + str(inst))
def update_openhab(self, item, state):
try:
syslog.syslog("Updating openhab")
config = self.config['openhab']
conn = httpclient.HTTPConnection(
"%s:%s" % (config['server'], config['port']))
conn.request("PUT", "/rest/items/%s/state" % item, state)
conn.getresponse()
conn.close()
except Exception as inst:
syslog.syslog("Error updating openhab: " + str(inst))
def toggle(self, doorId):
for d in self.doors:
if d.id == doorId:
syslog.syslog('%s: toggled' % d.name)
d.toggle_relay()
return
def get_updates(self, lastupdate):
updates = []
for d in self.doors:
if d.last_state_time >= lastupdate:
updates.append((d.id, d.last_state, d.last_state_time))
return updates
def format_seconds(num_seconds):
return str(datetime.timedelta(seconds=num_seconds))
```
#### File: garage-door-controller/test/test_garage_server.py
```python
import unittest
import json
import sys
sys.modules['RPi'] = __import__('simRPi')
from garage_controller import Controller # noqa
import garage_server # noqa
class UptimeHandlerTest(unittest.TestCase):
def setUp(self):
config_file = open('config.json')
self.config = json.load(config_file)
config_file.close()
def test_uptime(self):
controller = Controller(self.config)
uptime_handler = garage_server.UptimeHandler(controller)
uptime = uptime_handler.getUptime()
# Check that all field returned a numerical value greater or equal to zero
uptime_fields = uptime.split(b":")
for field in uptime_fields:
self.assertGreaterEqual(float(field), 0.0)
```
|
{
"source": "jfdur/durham-year1-archive",
"score": 4
}
|
#### File: durham-year1-archive/ADS/q1.py
```python
def hash_quadratic(l):
#Create a new list filled with - to represent blanks
table = ['-']*19
#Iterate through the provided list
for i in range(0, len(l)):
#Determine the position the key should go to (if there is space) in hash table
pos = ((6 * l[i] + 3) % 19)
finished = False
step = 0
"""
While a finished condition hasn't been reached, try placing the element
at the next available location, increasing quadratically.
"""
while not finished:
newPos = ((pos + step ** 2) % 19)
#If the new found position is available, use it and finish.
if(table[newPos] == '-'):
table[newPos] = l[i]
finished = True
#If we've gone through the whole hash table and there is no space, exit
elif(step >= 19):
finished = True
#Increase the step so next quadratic number is tried on next run (if it occurs)
step += 1
return table
def hash_double(l):
#Create a new list filled with - to represent blanks
table = ['-']*19
#Iterate through the provided list
for i in range(0, len(l)):
#Generate hash value using first hash function
h1 = ((6 * l[i] + 3) % 19)
#If there is space at that loc, place the key
if(table[h1] == '-'):
table[h1] = l[i]
else:
#If there is no space, generate the secondary function
h2 = 11 - (l[i] % 11)
finished = False
step = 1
#Try place at next available location, using secondary hash function and a step.
while not finished:
pos = (h1 + step * h2) % 19
if(table[pos] == '-'):
table[pos] = l[i]
finished = True
#If all positions have been tried, give up
elif(step >= 19):
finished = True
step += 1
return table
```
#### File: durham-year1-archive/ADS/q6c.py
```python
def MergeSort(m):
"""
If the length is less than or equal to 1, just return
since we don't even need to run SelectionSort
"""
if len(m) <= 1:
return m
#Calculate the midpoint and cast to int so there is no decimal
middle = int(len(m) / 2)
#Init some empty arrays. ls/rs are sorted
l = []
r = []
ls = []
rs = []
#The left half must go from 0 to the midpoint (exclusive)
l = m[0:middle]
#The right half must go from the midpoint and capture all elements to the end
r = m[middle:]
"""
If the length is less than or equal to 4, use SelectionSort, as specified in the Q6a brief
If not, use MergeSort
"""
if len(l) <= 4:
ls = SelectionSort(l)
else:
ls = MergeSort(l)
if len(r) <= 4:
rs = SelectionSort(r)
else:
rs = MergeSort(r)
#Merge the parts together
return Merge(ls, rs)
def Merge(l, r):
result = []
#While one of the halves is not empty...
while(len(l) > 0 or len(r) > 0):
#If they're both not empty...
if len(l) > 0 and len(r) > 0:
#Check if first element in left is greater or equal than right
if l[0] >= r[0]:
#If it is, append it to result
result.append(l[0])
#Remove the element that has been added to result
l = l[1:]
else:
#If not, first element in right must be greater so use it instead
result.append(r[0])
r = r[1:]
elif len(l) > 0:
"""
If only left has something
Add whatever remaining items are on left to result and clear left
"""
result += l
l = []
else:
"""
If only right has something
Add whatever remaining items are on right to result and clear right
"""
result += r
r = []
return result
def SelectionSort(listPart):
#Location of max has not yet been found
maxLoc = -1
#Iterate through list
for i in range(len(listPart)):
#Set maxLoc to be the current element
maxLoc = i
#Iterate through remaining elements of list in front of i
for j in range(i + 1, len(listPart)):
#If the j element is greater than the one at current maxLoc, set maxLoc to be j
if listPart[j] > listPart[maxLoc]:
maxLoc = j
#Switch the element at i with the one at maxLoc.
listPart[i], listPart[maxLoc] = listPart[maxLoc], listPart[i]
return listPart
```
#### File: CT/bioinformatics/q1.py
```python
import time
import sys
# YOUR FUNCTIONS GO HERE -------------------------------------
# 1. Populate the scoring matrix and the backtracking matrix
# ------------------------------------------------------------
def calculateMatrices(seq1, seq2):
s = [[0]*(len(seq1) + 1) for _ in range(len(seq2) + 1)]
b = [['-']*(len(seq1) + 1) for _ in range(len(seq2) + 1)]
for i in range(0, len(s)):
newI = -2 * i
seq2Acc = i - 1
for j in range(0, len(s[i])):
if i == j == 0:
continue
elif j == 0:
s[i][j] = newI
b[i][j] = 'U'
elif i == 0:
s[i][j] = -2 * j
b[i][j] = 'L'
else:
c = -3
seq1Acc = j - 1
if seq1[seq1Acc] == seq2[seq2Acc]:
if seq1[seq1Acc] == 'A':
c = 4
elif seq1[seq1Acc] == 'C':
c = 3
elif seq1[seq1Acc] == 'G':
c = 2
elif seq1[seq1Acc] == 'T':
c = 1
largest = c + s[seq2Acc][seq1Acc]
b[i][j] = 'D'
nextR = s[seq2Acc][j] - 2
if nextR > largest:
largest = nextR
b[i][j] = 'U'
nextR = s[i][seq1Acc] - 2
if nextR > largest:
largest = nextR
b[i][j] = 'L'
s[i][j] = largest
i = len(seq2)
j = len(seq1)
bestScore = s[i][j]
alSeq1 = ''
alSeq2 = ''
while(not(i == j == 0)):
v = b[i][j]
if v == 'L':
alSeq1 = seq1[j - 1] + alSeq1
alSeq2 = '-' + alSeq2
j -= 1
elif v == 'U':
alSeq2 = seq2[i - 1] + alSeq2
alSeq1 = '-' + alSeq1
i -= 1
elif v == 'D':
alSeq1 = seq1[j - 1] + alSeq1
alSeq2 = seq2[i - 1] + alSeq2
i -= 1
j -= 1
response = {'alignment':[alSeq1, alSeq2], 'bestScore':bestScore}
return response
# DO NOT EDIT ------------------------------------------------
# Given an alignment, which is two strings, display it
def displayAlignment(alignment):
string1 = alignment[0]
string2 = alignment[1]
string3 = ''
for i in range(min(len(string1),len(string2))):
if string1[i]==string2[i]:
string3=string3+"|"
else:
string3=string3+" "
print('Alignment ')
print('String1: '+string1)
print(' '+string3)
print('String2: '+string2+'\n\n')
# ------------------------------------------------------------
# DO NOT EDIT ------------------------------------------------
# This opens the files, loads the sequences and starts the timer
file1 = open(sys.argv[1], 'r')
seq1=file1.read()
file1.close()
file2 = open(sys.argv[2], 'r')
seq2=file2.read()
file2.close()
start = time.time()
#-------------------------------------------------------------
# YOUR CODE GOES HERE ----------------------------------------
# The sequences are contained in the variables seq1 and seq2 from the code above.
# Intialise the scoring matrix and backtracking matrix and call the function to populate them
# Use the backtracking matrix to find the optimal alignment
# To work with the printing functions below the best alignment should be called best_alignment and its score should be called best_score.
result = calculateMatrices(seq1, seq2)
best_alignment = result.get('alignment')
best_score = result.get('bestScore')
#-------------------------------------------------------------
# DO NOT EDIT (unless you want to turn off displaying alignments for large sequences)------------------
# This calculates the time taken and will print out useful information
stop = time.time()
time_taken=stop-start
# Print out the best
print('Time taken: '+str(time_taken))
print('Best (score '+str(best_score)+'):')
displayAlignment(best_alignment)
#-------------------------------------------------------------
```
#### File: CT/bioinformatics/q3.py
```python
import networkx as nx
import matplotlib as plt
import time
import copy
def wpgma(fileName):
f = open(fileName, 'r')
m = []
species = []
first = True
for line in f:
lineTokens = line.strip().split(' ')
lineTokensNoFirst = lineTokens[1:]
if first:
species = lineTokensNoFirst
first = False
continue
m.append([float(x) for x in lineTokensNoFirst])
f.close()
originalSpecies = copy.copy(species)
G = nx.Graph()
level = 0
print(species)
for i in m:
print(i)
while(len(m) > 1):
print()
r = reduceMatrix(m, species, G, originalSpecies, level)
m = r[0]
species = r[1]
level = r[2]
nx.draw(G, with_labels=True)
plt.pyplot.draw()
plt.pyplot.savefig(fileName + '.png')
def reduceMatrix(m, species, G, originalSpecies, level):
currentSpecies = species
minRow = -1
minCol = -1
minVal = -1
for i in range(0, len(m)):
col, val = min(enumerate(m[i]), key=lambda x: x[1] if x[1] > 0 else float('inf'))
if val != 0 and (minVal == -1 or val < minVal):
minRow = i
minCol = col
minVal = val
for i in range(0, len(m)):
for j in range(0, len(m[i])):
if ((i == minRow or i == minCol) and j != minRow and j != minCol):
m[i][j] = (m[minRow][j] + m[minCol][j]) / 2
elif ((j == minRow or j == minCol) and i != minRow and i != minCol):
m[i][j] = (m[i][minRow] + m[i][minCol]) / 2
speciesGroup = '(' + currentSpecies[minRow] + ',' + currentSpecies[minCol] + ')'
if not G.has_node(currentSpecies[minRow]):
G.add_node(currentSpecies[minRow])
if not G.has_node(currentSpecies[minCol]):
G.add_node(currentSpecies[minCol])
if not G.has_node(speciesGroup):
G.add_node(speciesGroup)
G.add_edge(currentSpecies[minRow], speciesGroup)
G.add_edge(currentSpecies[minCol], speciesGroup)
currentSpecies[minRow] = speciesGroup
currentSpecies.pop(minCol)
print(currentSpecies)
m.pop(minCol)
for i in m:
del i[minCol]
print(i)
return [m, currentSpecies, level + 1]
start = time.time()
wpgma('matrix2(1).txt')
stop = time.time()
print('Time taken to calculate matrices and draw phylogenetic tree: ' + str(stop - start))
```
#### File: CT/error-correcting-codes/src.py
```python
def hammingGeneratorMatrix(r):
n = 2**r-1
#construct permutation pi
pi = []
for i in range(r):
pi.append(2**(r-i-1))
for j in range(1,r):
for k in range(2**j+1,2**(j+1)):
pi.append(k)
#construct rho = pi^(-1)
rho = []
for i in range(n):
rho.append(pi.index(i+1))
#construct H'
H = []
for i in range(r,n):
H.append(decimalToVector(pi[i],r))
#construct G'
GG = [list(i) for i in zip(*H)]
for i in range(n-r):
GG.append(decimalToVector(2**(n-r-i-1),n-r))
#apply rho to get Gtranpose
G = []
for i in range(n):
G.append(GG[rho[i]])
#transpose
G = [list(i) for i in zip(*G)]
return G
#function decimalToVector
#input: numbers n and r (0 <= n<2**r)
#output: a string v of r bits representing n
def decimalToVector(n,r):
v = []
for s in range(r):
v.insert(0,n%2)
n //= 2
return v
def message(a):
l = len(a)
#If the input is invalid, discard. No message can be calculated
if not type(a) == list or l < 1:
return []
m = []
#Determine the r value based on message length
r = 2
while(2**r - 2 * r - 1 < l):
r += 1
k = 2 ** r - r - 1
#Convert the input length to a binary list of length r
m += decimalToVector(l, r)
#Append the input to the message
m += a
#Add the required 0 padding to make a valid message
m += [0]*(k - len(m))
return m
def hammingEncoder(m):
#Calculate r value for the message
r = 2
while(2**r - r - 1 < len(m)):
r += 1
#Get the generator matrix
g = hammingGeneratorMatrix(r)
#If the length of the message does not match length of generator matrix,
#for the calculated r value, it is invalid.
if len(g) != len(m):
return []
c = []
#Encode the message using multiplication mod 2
#Need to iterate for as many times as there are rows in gen matrix
for i in range(len(g[0])):
total = 0
for j in range(len(g)):
total += (g[j][i] * m[j]) % 2
c.append(total % 2)
return c
def hammingDecoder(v):
r = 2
while(2 ** r - 1 < len(v)):
r += 1
H = []
#Determine the parity check matrix
for i in range(1, 2 ** r):
H += [decimalToVector(i, r)]
#If the length of encoded message does not match parity check mat length, invalid
if len(v) != len(H):
return []
vHT = [0]*len(H[0])
#Determine v * H transpose using mod 2 multiplication and addition
for i in range(len(H[0])):
for j in range(len(v)):
vHT[i] = (vHT[i] + ((v[j] * H[j][i]) % 2)) % 2
#If the v multiplied by H transpose is 0 vector, v is a codeword so return it
isZero = True
for i in range(len(vHT)):
if vHT[i] != 0:
isZero = False
if isZero:
return v
#Determine offset number
i = 0
for j in range(len(vHT)):
i += (2 ** j * vHT[len(vHT) - j - 1])
c = v
#Flip the offset bit (could also do this with mod 2 addition)
if c[i - 1] == 1:
c[i - 1] = 0
else:
c[i - 1] = 1
return c
def messageFromCodeword(c):
r = 2
while(2 ** r - 1 < len(c)):
r += 1
#Get the generator matrix to perform an easy validity check
g = hammingGeneratorMatrix(r)
m = []
if len(c) != len(g[0]):
return []
#Get the bits from every 2^n element. Use bitwise AND to do this
for i in range(len(c)):
pos = i + 1
if (pos & (pos - 1)) != 0:
m.append(c[i])
return m
def dataFromMessage(m):
r = 2
while(2 ** r - r - 1 < len(m)):
r += 1
l = []
#Get the list part that represents data length
l += m[0:r]
l.reverse()
n = 0
#Determine data length
for i in range(len(l)):
n += (2 ** i) * l[i]
#If there is less array left than message length, invalid
if len(m[r:]) < n:
return []
#Check that end of message is only padded with zeros
if m[r + n:].count(0) != len(m[r + n:]):
return []
#Strip off trailing 0s after data
return m[r:r + n]
def repetitionEncoder(m, n):
if len(m) != 1:
return []
return [m[0]] * n
def repetitionDecoder(v):
#Count 0s and 1s. Return larger count. If equal, invalid
c0 = v.count(0)
c1 = v.count(1)
if c1 > c0:
return [1]
elif c0 > c1:
return [0]
return []
```
#### File: CT/modelling-with-graphs/depth_first_pair_nodes.py
```python
import networkx as nx
import graph6
import graph7
import graph8
import graph9
import graph10
### count the length of the path between two pre-specified vertices a and b, using Depth-First-Search
def dfs(G,a,b,u):
print(u)
n = len(G.nodes())
G.node[u]['visited'] = 'yes'
if u == a:
G.node[a]['label'] = 0
if u == b:
return
neighbors = list(G.neighbors(u))
neighbors.sort()
for adj in neighbors:
if G.node[adj]['visited'] == 'no':
G.node[adj]['label'] = G.node[u]['label'] + 1
dfs(G,a,b,adj)
if adj == b:
return
print()
G6=graph6.Graph()
a=12
b=40
print('Depth-First-Search visited the following nodes of G6 in this order:')
dfs(G6,a,b,a) ### count the DFS-path from a to b, starting at a
print('Depth-First Search found a path in G6 between vertices', a, 'and', b, 'of length', G6.node[b]['label'])
print()
G7=graph7.Graph()
a=5
b=36
print('Depth-First-Search visited the following nodes of G7 in this order:')
dfs(G7,a,b,a) ### count the DFS-path from a to b, starting at a
print('Depth-First Search found a path in G7 between vertices', a, 'and', b, 'of length', G7.node[b]['label'])
print()
G8=graph8.Graph()
a=15
b=40
print('Depth-First-Search visited the following nodes of G8 in this order:')
dfs(G8,a,b,a) ### count the DFS-path from a to b, starting at a
print('Depth-First Search found a path in G8 between vertices', a, 'and', b, 'of length', G8.node[b]['label'])
print()
G9=graph9.Graph()
a=1
b=19
print('Depth-First-Search visited the following nodes of G9 in this order:')
dfs(G9,a,b,a) ### count the DFS-path from a to b, starting at a
print('Depth-First Search found a path in G9 between vertices', a, 'and', b, 'of length', G9.node[b]['label'])
print()
G10=graph10.Graph()
a=6
b=30
print('Depth-First-Search visited the following nodes of G10 in this order:')
dfs(G10,a,b,a) ### count the DFS-path from a to b, starting at a
print('Depth-First Search found a path in G10 between vertices', a, 'and', b, 'of length', G10.node[b]['label'])
print()
```
|
{
"source": "jfdur/durham-year2-archive",
"score": 3
}
|
#### File: durham-year2-archive/ML/classifier.py
```python
import numpy as np
import pandas as pd
import scipy as sc
import matplotlib.pyplot as plt
from pathlib import Path
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OrdinalEncoder, StandardScaler, RobustScaler
from sklearn.compose import ColumnTransformer
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import confusion_matrix, plot_confusion_matrix
#Load the data from given path string, and read CSV into pandas DataFrame
def loadData(path):
p = Path(path)
return pd.read_csv(p)
#Wrapper to load data from the datasets folder more quickly
def loadStudentDataCSV(file):
print('Attempting to load ' + file + '.csv')
return loadData('anonymisedData/' + file + '.csv')
"""Remove rows from the pandas DataFrame series where a column has a specified value
Do the replacement inplace"""
def dropRows(data, key, value):
data.drop(data[data[key] == value].index, inplace=True)
"""Convert (possibly dirty) percentage ranges to regular numerical ranges
in the format 00-00 using a simple regex"""
def pctRangeToNumRange(data, key):
data[key] = data[key].str.replace('[^0-9\\-]', '')
"""Fill a pandas DataFrame series null values with the specified value
Do the replacement inplace"""
def fillNa(data, key, value):
data[key].fillna(value, inplace=True)
def getData(studentVle, studentInfo):
#Set the keys to join on
keyColumns = ['code_module', 'code_presentation', 'id_student']
#Group the vle data by the number of clicks (aggregate number of clicks)
studentVleGrouped = studentVle.groupby(keyColumns)['sum_click'].sum()
#Merge the student general info with the vle data
mergedStudentVleInfo = studentInfo.merge(pd.DataFrame({'sum_click': studentVleGrouped}), left_on=keyColumns, right_index=True)
#Ditch any withdrawn students. Data for these will be incomplete and we only care about pass/fail
dropRows(mergedStudentVleInfo, 'final_result', 'Withdrawn')
#Do some cleanup on the imd_band which has some missing % symbols
pctRangeToNumRange(mergedStudentVleInfo, 'imd_band')
#Return the data with some simple cleaning
return mergedStudentVleInfo
#Print a subset of the data rows
def dataPeek(data, fields, start=15, end=20):
print(data[fields][start:end])
"""Run encoder transformations for given fields. We need DataFrames for analysis which is why we don't use
the pipelines. Don't use this function for generating ML model features or labels."""
def analysisTransform(dataStore, encoder, fields):
for field in fields:
#Run the encoder on the field. Flatten the resulting numpy ndarray
dataStore[field] = encoder.fit_transform(dataStore[[field]]).flatten()
return dataStore
"""
Generate some basic analysis information such as correlation and quartiles for the data.
Need to use encoders to make ordinals numeric
"""
def dataAnalysis(dataStore):
ds = dataStore.copy()
allFields = ['imd_band', 'age_band', 'gender', 'region', 'disability', 'highest_education', 'final_result', 'sum_click']
ds = ds[allFields]
oe = NullSafeOrdinalEncoder(strategy='median')
me = MappedEncoder(categories={'Distinction': 1, 'Pass': 1, 'Fail': 0})
qe = NullSafeOrdinalEncoder(strategy='median', categories=[[
'No Formal quals',
'Lower Than A Level',
'A Level or Equivalent',
'HE Qualification',
'Post Graduate Qualification',
]])
ds = analysisTransform(ds, oe, ['imd_band', 'age_band', 'gender', 'region', 'disability'])
ds = analysisTransform(ds, qe, ['highest_education'])
ds = analysisTransform(ds, me, ['final_result'])
correlation = ds.corr()
print(correlation['final_result'].sort_values(ascending=False))
print('\n')
print(ds.describe())
#Return the score for the given model
def scoreModel(model, XTest, yTest, name):
print("Score for " + name + " is " + str(model.score(XTest, yTest) * 100) + "%")
#Plot the confusion matrix for the model using the sklearn metrics
def plotConfusionMatrix(model, XTest, yTest, name):
p = plot_confusion_matrix(
model,
XTest,
yTest,
display_labels=['Fail', 'Pass'],
cmap=plt.cm.Blues,
normalize='true')
p.ax_.set_title('Confusion matrix for ' + name)
plt.show()
#Fit the given model, then score and plot confusion matrix
def fitAndPlot(model, XTrain, yTrain, XTest, yTest, name):
print("Running fitAndPlot for: " + name)
model.fit(XTrain, yTrain)
scoreModel(model, XTest, yTest, name)
plotConfusionMatrix(model, XTest, yTest, name)
"""
Run a grid search on the given model and plot the tuned result.
Experimentation has shown that we have a large number of false positives so we attempt to tune for precision
"""
def tune(model, params, XTrain, yTrain, XTest, yTest, name):
classifier = model.__class__
clf = GridSearchCV(classifier(), params, cv=5, verbose=True, n_jobs=-1, scoring='precision')
fitAndPlot(clf, XTrain, yTrain, XTest, yTest, name + ' Tuned')
print('Precision optimised params are: ' + str(clf.best_params_))
#Generate a complete model. First a basic version using the defaults, then try to tune
def model(model, params, XTrain, yTrain, XTest, yTest, name):
fitAndPlot(model, XTrain, yTrain, XTest, yTest, name)
tune(model, params, XTrain, yTrain, XTest, yTest, name)
"""
A custom version of the OrdinalEncoder that can handle NaN values in data.
This currently only supports one column to be passed at a time. We could fix this later, but don't need to at the moment
"""
class NullSafeOrdinalEncoder(BaseEstimator, TransformerMixin):
def __init__(self, strategy, categories='auto'):
self.strategy = strategy
self.categories = categories
def fit(self, X, y=None):
return self
def transform(self, X):
#Remove every row with a NaN value and get both the set with and without NaNs
nullRemoved = X.dropna()
nullOnly = X[~X.index.isin(nullRemoved.index)]
#Create encoder for categories
oe = OrdinalEncoder(self.categories)
#Run the encoder on the safe (no NaN) data and store in a new DataFrame with same indexing
encoded = pd.DataFrame(oe.fit_transform(nullRemoved), index=nullRemoved.index)
#Concat the encoded data with the null-containing data
result = pd.concat([encoded, nullOnly])
#Resort the keys or everything ends up out of order
result.sort_index(inplace=True)
#Fill the blanks and return the ndarray
imputer = SimpleImputer(strategy=self.strategy)
return imputer.fit_transform(result)
"""
Simple custom encoder for ordinals using a specific ordering where the categories don't follow
a lexicographic ordering that can be automatically detected and give the desired result
"""
class MappedEncoder(BaseEstimator, TransformerMixin):
def __init__(self, categories={}):
self.categories = categories
def fit(self, X, y=None):
return self
def transform(self, X):
Z = pd.DataFrame()
for column in X:
Z[column] = X[column].map(self.categories)
return Z.to_numpy()
def getPipelines(scaler):
stdNumPipeline = Pipeline([
('imputer', SimpleImputer(strategy='median')),
('std_scaler', scaler),
])
stdCatPipeline = Pipeline([
('encoder', NullSafeOrdinalEncoder(strategy='median')),
('std_scaler', scaler),
])
qualCatPipeline = Pipeline([
('encoder', NullSafeOrdinalEncoder(strategy='median', categories=[[
'No Formal quals',
'Lower Than A Level',
'A Level or Equivalent',
'HE Qualification',
'Post Graduate Qualification',
]])),
('std_scaler', scaler),
])
disCatPipeline = Pipeline([
('encoder', NullSafeOrdinalEncoder(strategy='median', categories=[[
'Y',
'N'
]])),
('std_scaler', scaler),
])
stdOutPipeline = Pipeline([
('encoder', MappedEncoder(categories={'Distinction': 1, 'Pass': 1, 'Fail': 0}))
])
labelPipeline = ColumnTransformer([
('stdOut', stdOutPipeline, ['final_result']),
])
featurePipeline = ColumnTransformer([
('stdNum', stdNumPipeline, ['sum_click']),
('stdCat', stdCatPipeline, ['imd_band']),
('qualCat', qualCatPipeline, ['highest_education']),
])
return featurePipeline, labelPipeline
def getFeaturesAndLabels(scaler, trainSet, testSet):
featurePipeline, labelPipeline = getPipelines(scaler)
"""Run transforms on the features and labels of both sets. We need to flatten labels since most
transforms return a numpy ndarray and we only want one column for labels"""
trainSetFeatures = featurePipeline.fit_transform(trainSet)
trainSetLabels = labelPipeline.fit_transform(trainSet).flatten()
testSetFeatures = featurePipeline.fit_transform(testSet)
testSetLabels = labelPipeline.fit_transform(testSet).flatten()
return trainSetFeatures, trainSetLabels, testSetFeatures, testSetLabels
print('Starting... Please wait while datasets are loaded\n')
#Load the data
studentVle = loadStudentDataCSV('studentVle')
studentInfo = loadStudentDataCSV('studentInfo')
dataPeek(studentInfo, ['imd_band', 'final_result'])
print('\n')
#Do some basic preprocessing such as merging and removing anything not of interest
dataStore = getData(studentVle, studentInfo)
#Look at some interesting features of the data
dataAnalysis(dataStore)
print('\n')
#Split our training and test set in 80:20 ratio. Seed the random index generator
trainSet, testSet = train_test_split(dataStore, test_size=0.2, random_state=42)
trainSetFeatures, trainSetLabels, testSetFeatures, testSetLabels = getFeaturesAndLabels(StandardScaler(), trainSet, testSet)
#Run a logistic classifier, then optimise it
paramGrid = [{'penalty' : ['elasticnet'], 'C' : np.logspace(-4, 4, 20), 'solver' : ['saga'], 'l1_ratio': np.linspace(0, 1, 20)}]
model(LogisticRegression(penalty='l1', solver='saga'), paramGrid, trainSetFeatures, trainSetLabels, testSetFeatures, testSetLabels, 'Logistic Classifier')
#Use a different pipeline with a different scaler since we can get better performance with a RobustScaler for an SVC
trainSetFeatures, trainSetLabels, testSetFeatures, testSetLabels = getFeaturesAndLabels(RobustScaler(), trainSet, testSet)
#Run an SVC, then optimise it. THIS MAY TAKE A COUPLE OF MINUTES. Tested on a 4C/8T CPU
paramGrid = [{'kernel': ['rbf'], 'gamma': [0.001, 0.0001], 'C': [1, 10, 100, 1000]}]
model(SVC(gamma='auto'), paramGrid, trainSetFeatures, trainSetLabels, testSetFeatures, testSetLabels, 'SVC')
#Cleanup just in case since the loaded data often remained in RAM for a while
del studentVle
del studentInfo
```
|
{
"source": "jfear/fly_tie",
"score": 3
}
|
#### File: stringtie-wf/scripts/fastq_dump.py
```python
from pathlib import Path
from tempfile import TemporaryDirectory
import pandas as pd
from snakemake import shell
# Objects passed in by snakemake
srx = snakemake.wildcards.sample
sampletable = pd.read_csv(snakemake.config['sampletable'], sep='\t', index_col=0)
oname1 = snakemake.output.r1
oname2 = snakemake.output.r2
# Download in a tmpdir
TMPDIR = TemporaryDirectory()
def main():
srrs = get_srrs()
fnames = [fastq_dump(srr) for srr in srrs]
r1 = [read[0] for read in fnames]
r2 = [read[1] for read in fnames]
cat_files(r1, oname1)
cat_files(r2, oname2)
def get_srrs():
srrs = sampletable.loc[srx, 'Run']
if isinstance(srrs, str):
return [srrs]
return srrs.tolist()
def fastq_dump(srr):
shell(
'cd {TMPDIR.name} '
'&& fastq-dump '
'{srr} '
'--gzip '
'--split-files '
'&& touch {srr}_1.fastq.gz '
'&& touch {srr}_2.fastq.gz '
)
return f'{srr}_1.fastq.gz', f'{srr}_2.fastq.gz'
def cat_files(fnames, oname):
# remove file if it is there
if Path(oname).exists():
shell('rm {oname}')
# Iterate over FASTQs and concat
for fname in fnames:
shell('cat {TMPDIR.name}/{fname} >> {oname}')
if __name__ == '__main__':
main()
```
|
{
"source": "jfear/larval_gonad",
"score": 3
}
|
#### File: larval_gonad/bin/make_tsne.py
```python
import os
import sys
from pathlib import Path
import string
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Project level imports
from larval_gonad.plotting import TSNEPlot
from larval_gonad.logging import logger
REF = os.environ['REFERENCES_DIR']
plt.style.use(['common', 'paper-wide'])
def sanitize_fname(fname):
valid_chars = "-_.%s%s" % (string.ascii_letters, string.digits)
return ''.join([x for x in fname if x in valid_chars])
def plot_gene(data, fbgn, symbol, output, **kwargs):
symbol = sanitize_fname(symbol)
fname = str(Path(output, f'{fbgn}_{symbol}.png'))
if Path(fname).exists():
return
df = data[['tSNE_1', 'tSNE_2', fbgn]]
fig, (ax1, ax2) = plt.subplots(1, 2,
gridspec_kw={'width_ratios': [1.3, 1]})
TSNEPlot(data=df, hue=fbgn, s=8, ax=ax1,
title='Normalized Expression\n(Continuous)', **kwargs)
try:
base_color = kwargs['palette'][0]
except KeyError:
base_color = 'w'
TSNEPlot(data=df, hue=df[fbgn] > 0,
cmap={
'0': base_color,
'1': 'k',
}, s=8, ax=ax2, title='Normalized Expression\n(Binary)',
**kwargs)
fig.suptitle(f'{symbol} ({fbgn})')
plt.tight_layout(rect=[0, 0, .9, .9])
plt.savefig(fname)
plt.close()
if __name__ == '__main__':
# gene annotations
fbgn2symbol = pd.read_csv(
str(Path(REF, 'dmel/r6-16/fb_annotation/dmel_r6-16.fb_annotation')),
sep='\t', usecols=['gene_symbol', 'primary_FBgn'],
index_col='primary_FBgn'
).fillna('nan').to_dict()['gene_symbol']
# Colors
colors = sns.color_palette('Reds')
color2 = sns.color_palette('Greys')
colors[0] = color2[1]
# # testes1
# logger.info('Plotting Testes Rep 1')
# FIGS = '../output/figures/testis1_tsne'
# Path(FIGS).mkdir(exist_ok=True)
#
# DAT = '../output/testis1_scRNAseq'
# tsne = pd.read_csv(Path(DAT, 'tsne.tsv'), sep='\t')
# norm = pd.read_csv(Path(DAT, 'normalized_read_counts.tsv'), sep='\t')
# data = tsne.join(norm.T)
#
# for fbgn in data.columns[2:]:
# symbol = fbgn2symbol[fbgn]
# plot_gene(data, fbgn, symbol, FIGS, palette=colors)
#
# # testes2
# logger.info('Plotting Testes Rep 2')
# FIGS = '../output/figures/testis2_tsne'
# Path(FIGS).mkdir(exist_ok=True)
#
# DAT = '../output/testis2_scRNAseq'
# tsne = pd.read_csv(Path(DAT, 'tsne.tsv'), sep='\t')
# norm = pd.read_csv(Path(DAT, 'normalized_read_counts.tsv'), sep='\t')
# data = tsne.join(norm.T)
#
# for fbgn in data.columns[2:]:
# symbol = fbgn2symbol[fbgn]
# plot_gene(data, fbgn, symbol, FIGS, palette=colors)
#
# # testes3
# logger.info('Plotting Testes Rep 3')
# FIGS = '../output/figures/testis3_tsne'
# Path(FIGS).mkdir(exist_ok=True)
#
# DAT = '../output/testis3_scRNAseq'
# tsne = pd.read_csv(Path(DAT, 'tsne.tsv'), sep='\t')
# norm = pd.read_csv(Path(DAT, 'normalized_read_counts.tsv'), sep='\t')
# data = tsne.join(norm.T)
#
# for fbgn in data.columns[2:]:
# symbol = fbgn2symbol[fbgn]
# plot_gene(data, fbgn, symbol, FIGS, palette=colors)
#
# # ovary1
# logger.info('Plotting Ovary Rep 1')
# FIGS = '../output/figures/ovary1_tsne'
# Path(FIGS).mkdir(exist_ok=True)
#
# DAT = '../output/ovary1_scRNAseq'
# tsne = pd.read_csv(Path(DAT, 'tsne.tsv'), sep='\t')
# norm = pd.read_csv(Path(DAT, 'normalized_read_counts.tsv'), sep='\t')
# data = tsne.join(norm.T)
#
# for fbgn in data.columns[2:]:
# symbol = fbgn2symbol[fbgn]
# plot_gene(data, fbgn, symbol, FIGS, palette=colors)
#
# # ovary2
# logger.info('Plotting Ovary Rep 2')
# FIGS = '../output/figures/ovary2_tsne'
# Path(FIGS).mkdir(exist_ok=True)
#
# DAT = '../output/ovary2_scRNAseq'
# tsne = pd.read_csv(Path(DAT, 'tsne.tsv'), sep='\t')
# norm = pd.read_csv(Path(DAT, 'normalized_read_counts.tsv'), sep='\t')
# data = tsne.join(norm.T)
#
# for fbgn in data.columns[2:]:
# symbol = fbgn2symbol[fbgn]
# plot_gene(data, fbgn, symbol, FIGS, palette=colors)
#
# # ovary3
# logger.info('Plotting Ovary Rep 3')
# FIGS = '../output/figures/ovary3_tsne'
# Path(FIGS).mkdir(exist_ok=True)
#
# DAT = '../output/ovary3_scRNAseq'
# tsne = pd.read_csv(Path(DAT, 'tsne.tsv'), sep='\t')
# norm = pd.read_csv(Path(DAT, 'normalized_read_counts.tsv'), sep='\t')
# data = tsne.join(norm.T)
#
# for fbgn in data.columns[2:]:
# symbol = fbgn2symbol[fbgn]
# plot_gene(data, fbgn, symbol, FIGS, palette=colors)
#
# combined
logger.info('Plotting Combined')
FIGS = '../output/figures/combined_tsne_force_png'
Path(FIGS).mkdir(exist_ok=True)
DAT = '../output/scrnaseq_combine_force'
tsne = pd.read_csv(Path(DAT, 'tsne.tsv'), sep='\t')
norm = pd.read_csv(Path(DAT, 'normalized_read_counts.tsv'), sep='\t')
data = tsne.join(norm.T)
for fbgn in data.columns[2:]:
symbol = fbgn2symbol[fbgn]
plot_gene(data, fbgn, symbol, FIGS, palette=colors)
```
#### File: bulk-rnaseq-wf/scripts/plot_demas_x_and_4th.py
```python
import os
import pandas as pd
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import seaborn as sns
from larval_gonad.io import shelve_load
from larval_gonad.plotting.stats import add_pvals
from larval_gonad.plotting.demasculinization import demasculinization
def main():
db = shelve_load(snakemake.input[0])
df = db["data"]
male = db["male_qval"].fillna(1) # If p-vals are NaN set to 1
# Change female Y location to come from top.
female = db["female_qval"].fillna(1)
female.y = 1 - female.y
fig, ax = plt.subplots(figsize=plt.figaspect(2))
demasculinization(df, ax=ax, title=f"larval (GO)", color=snakemake.params.colors)
add_pvals(male.x.values, male.y.values, male['q-value'].values, ax)
add_pvals(female.x.values, female.y.values, female['q-value'].values, ax)
plt.savefig(snakemake.output[0])
if __name__ == "__main__":
if os.getenv("SNAKE_DEBUG", False):
from larval_gonad.debug import snakemake_debug
snakemake = snakemake_debug(
workdir="bulk-rnaseq-wf",
input="../output/bulk-rnaseq-wf/testis_bias_by_muller_x_and_4th.dat",
params=dict(colors=["blue", "lightgray", "red"]),
)
plt.style.use("../config/figure_styles.mplstyle")
main()
```
#### File: cellselection-wf/scripts/raw_expression_matrix.py
```python
from pathlib import Path
import pandas as pd
import scipy.io
INPUT_FILES = snakemake.input
OUTPUT_FILE = snakemake.output[0]
def main():
df = pd.concat((create_matrix(mtx) for mtx in INPUT_FILES), axis=1)
df.reset_index().to_feather(OUTPUT_FILE)
def create_matrix(mtx):
genes = pd.read_csv(
Path(mtx).parent / "genes.tsv", sep="\t", header=None, index_col=0
).index
genes.name = "FBgn"
barcodes = pd.read_csv(
Path(mtx).parent / "barcodes.tsv", sep="\t", header=None, index_col=0
).index
barcodes.name = "cell_id"
matrix = scipy.io.mmread(mtx)
return pd.DataFrame(matrix.todense(), index=genes, columns=barcodes)
if __name__ == "__main__":
main()
```
#### File: expression-atlas-wf/scripts/dmel_tau_housekeeping.py
```python
import os
from functools import partial
import pandas as pd
from larval_gonad.io import pickle_load, pickle_dump
def main():
# Load mapping of YOgn to FBgn
annot = pickle_load(snakemake.input.annot[0])
pickle_dump(intersect_fbgns(snakemake.input.male, annot), snakemake.output.male)
pickle_dump(intersect_fbgns(snakemake.input.female, annot), snakemake.output.female)
def intersect_fbgns(file_names, annot):
return list(set.intersection(*list(map(partial(convert_to_fbgn, annot=annot), file_names))))
def convert_to_fbgn(file_name, annot):
return set(
[
fbgn
for fbgn in map(lambda x: annot.get(x, None), pickle_load(file_name))
if fbgn is not None
]
)
if __name__ == "__main__":
if os.getenv("SNAKE_DEBUG", False):
from larval_gonad.debug import snakemake_debug
snakemake = snakemake_debug(
workdir="expression-atlas-wf",
input=dict(
male=[
"../output/expression-atlas-wf/tau_housekeeping/w1118_male.pkl",
"../output/expression-atlas-wf/tau_housekeeping/orgR_male.pkl",
],
female=[
"../output/expression-atlas-wf/tau_housekeeping/w1118_female.pkl",
"../output/expression-atlas-wf/tau_housekeeping/orgR_female.pkl",
],
annot="../output/expression-atlas-wf/YOgn_to_dmel_ortholog/dmel.pkl",
),
)
main()
```
#### File: expression-atlas-wf/scripts/FBgn_to_muller.py
```python
import os
import pandas as pd
from larval_gonad.io import pickle_load, pickle_dump
def main():
muller = pickle_load(snakemake.input.muller)
ortholog = pickle_load(snakemake.input.ortholog)
dat = pd.Series({
ortholog[k]: v
for k, v in muller.items()
if k in ortholog
}, name=snakemake.wildcards.species)
dat.index.name = "FBgn"
dat.to_pickle(snakemake.output[0])
if __name__ == '__main__':
if os.getenv('SNAKE_DEBUG', False):
from larval_gonad.debug import snakemake_debug
snakemake = snakemake_debug(
workdir='expression-atlas-wf',
input=dict(
muller="../output/expression-atlas-wf/YOgn_to_muller/dmel.pkl",
ortholog="../output/expression-atlas-wf/YOgn_to_dmel_ortholog/dmel.pkl"
),
wildcards=dict(species="dmel")
)
main()
```
#### File: expression-atlas-wf/scripts/sex_bias_by_muller_all_genes.py
```python
import os
import pandas as pd
from larval_gonad.io import pickle_load, shelve_dump
from larval_gonad.stats import run_chisq
def main():
deg = read_deg(snakemake.input.deg, float(snakemake.params.alpha))
bias_counts_by_muller = count_by_muller(snakemake.input.muller, deg)
muller_proportions = calc_proprotions(bias_counts_by_muller)
enrichment = enrichment_analysis(bias_counts_by_muller)
# Pull out q-values with locations for easy plotting.
male_qval = pull_out_qval(enrichment, muller_proportions, "male")
female_qval = pull_out_qval(enrichment, muller_proportions, "female")
shelve_dump(
snakemake.output[0], data=muller_proportions, male_qval=male_qval, female_qval=female_qval
)
def read_deg(file_deg: str, alpha: float = 0.01) -> pd.DataFrame:
"""Read deg and make bias flags.
Make a flag for male, female, and NS.
"""
deg = pd.read_csv(file_deg, sep="\t").set_index("YOgn").fillna(1).assign(bias="NS")
sig_mask = deg.padj <= alpha
deg.loc[sig_mask & (deg.log2FoldChange > 0), "bias"] = "male"
deg.loc[sig_mask & (deg.log2FoldChange < 0), "bias"] = "female"
return deg
def count_by_muller(file_muller: str, deg: pd.DataFrame) -> pd.DataFrame:
# Read muller element
yo2muller = pd.Series(pickle_load(file_muller), name="muller").rename_axis("YOgn")
# Merge and count Male/Female/NS by muller
return (
deg.join(yo2muller, how="outer")
.assign(bias=lambda df: df.bias.fillna("NS"))
.groupby("muller")
.bias.value_counts()
.unstack()
.fillna(0)
.loc[["A", "B", "C", "D", "E", "F"], ["male", "female", "NS"]]
)
def calc_proprotions(counts_by_muller: pd.DataFrame) -> pd.DataFrame:
col_order = ["male", "NS", "female"]
return counts_by_muller.pipe(lambda x: x.div(x.sum(axis=1), axis=0)).loc[:, col_order]
def enrichment_analysis(counts_by_muller: pd.DataFrame) -> pd.DataFrame:
return (
run_chisq(counts_by_muller.T)
.loc[(["male", "female"], "fdr q-value"), :]
.droplevel("type")
.T
)
def pull_out_qval(enrichment: pd.DataFrame, proportions: pd.DataFrame, sex: str):
return (
enrichment[sex]
.rename("q-value")
.to_frame()
.assign(x=lambda x: range(x.shape[0]))
.join(proportions[sex].rename("y"))
)
if __name__ == "__main__":
if os.getenv("SNAKE_DEBUG", False):
from larval_gonad.debug import snakemake_debug
snakemake = snakemake_debug(
workdir="expression-atlas-wf",
input={
"muller": "../output/expression-atlas-wf/YOgn_to_muller/dmel.pkl",
"deg": "../output/expression-atlas-wf/sex_biased_expression/orgR_AC.tsv",
},
params=dict(alpha=0.01),
)
main()
```
#### File: expression-atlas-wf/scripts/tau_housekeeping_yogns.py
```python
import os
import pandas as pd
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from larval_gonad.io import pickle_dump
def main():
tau = pd.read_feather(snakemake.input[0]).set_index("YOgn").dropna(how="all")
# Plot distribution
ax = tau.plot.kde()
ax.axvline(snakemake.params[0], color="k", ls="--")
ax.set(xlim=(-1.5, 1.5))
plt.savefig(snakemake.output.svg)
# Housekeeping genes
flag_housekeeping = tau.fillna(999) <= snakemake.params[0]
# Save list of housekeeping genes
pickle_dump(flag_housekeeping[flag_housekeeping.male].index.tolist(), snakemake.output.male)
pickle_dump(flag_housekeeping[flag_housekeeping.female].index.tolist(), snakemake.output.female)
if __name__ == "__main__":
if os.getenv("SNAKE_DEBUG", False):
from larval_gonad.debug import snakemake_debug
snakemake = snakemake_debug(
workdir="expression-atlas-wf",
input="../output/expression-atlas-wf/tau/w1118.feather",
params=0.5,
)
main()
```
#### File: fish-wf/scripts/fish_sphere_boxplot.py
```python
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
from scipy.stats import ttest_rel
from larval_gonad import plotting
def main():
plt.style.use(["1c", "science_base"])
width = plt.rcParams["figure.figsize"][0]
plt.rcParams["figure.figsize"] = (width, width)
sphere = pd.read_csv(snakemake.input[0])
ax = sns.boxplot(
"chrom",
"um3",
data=sphere.melt(var_name="chrom", value_name="um3"),
palette=snakemake.params.colors,
notch=True
)
# Clean up plot
ax.set(ylabel=r"$\Psi$", xlabel="")
sns.despine(ax=ax)
# Test that not significant
pval = np.round(ttest_rel(sphere["X"], sphere["2L"])[1], 3)
if pval <= 0.05:
# Extend axis and add NS.
_max = sphere.max().max() + 0.05
ax.set_ylim(None, _max)
ax.text(0.5, 0.99, f"p = {pval}", transform=ax.transAxes, va="top", ha="center")
l = plt.Line2D([0.3, 0.7], [0.94, 0.94], transform=ax.transAxes, color="k", lw=0.8, ls="-")
ax.add_line(l)
plt.savefig(snakemake.output[0])
if __name__ == "__main__":
if os.getenv("SNAKE_DEBUG", False):
from larval_gonad.debug import snakemake_debug
snakemake = snakemake_debug(
workdir="fish-wf",
input="../data/external/miriam/oligopaint_sphere.csv",
params=dict(colors=["red", "grey"]),
)
main()
```
#### File: fish-wf/scripts/fish_volume_boxplot.py
```python
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from scipy.stats import ttest_rel
import larval_gonad.plotting # pylint: disable=unused-import
plt.style.use("minimal")
BOXPLOT_KWS = dict(order=["X", "2L"], palette=snakemake.params.colors, notch=True)
def main():
volume = pd.read_csv(snakemake.input[0])
stacked = pd.concat(
[
stack_data("X", "2L", volume, "um3"),
stack_data("X_scaled_probe", "2L_scaled_probe", volume, "um3_scaled_probe"),
stack_data(
"X_scaled_probe_count",
"2L_scaled_probe_count",
volume,
"um3_scaled_probe_count",
),
],
ignore_index=True,
)
pvals = pd.concat(
[
run_stats("X", "2L", volume, "um3"),
run_stats("X_scaled_probe", "2L_scaled_probe", volume, "um3_scaled_probe"),
run_stats(
"X_scaled_probe_count",
"2L_scaled_probe_count",
volume,
"um3_scaled_probe_count",
),
],
ignore_index=True,
)
g = sns.FacetGrid(stacked, col="metric", sharey=False)
g.map(sns.boxplot, "chrom", "volume", **BOXPLOT_KWS)
g.set_titles("{col_name}")
g.set_xlabels("")
add_pvals(g, pvals)
plt.savefig(snakemake.output[0])
def stack_data(c1: str, c2: str, df: pd.DataFrame, name: str) -> pd.DataFrame:
_df = df[[c1, c2]]
_df.columns = ["X" if col.startswith("X") else "2L" for col in _df.columns]
return _df.melt(var_name="chrom", value_name="volume").assign(metric=name)
def run_stats(c1: str, c2: str, df: pd.DataFrame, name: str) -> pd.DataFrame:
base_mean = df[[c1, c2]].sum().sum() / df.values.ravel().size
lfc = np.log2(df[c1].mean() / df[c2].mean())
pval = ttest_rel(df[c1], df[c2])[1]
return pd.DataFrame(
{
"metric": [name],
"baseMean": [base_mean],
"log2FoldChange": [lfc],
"p_value": [pval],
}
)
def add_pvals(g: sns.FacetGrid, pvals: pd.DataFrame):
for ax in g.axes.flat:
metric = ax.get_title()
pval = pvals.query("metric == @metric")["p_value"].values[0]
if pval <= 0.05:
# Extend axis and add p-value.
_, _max = ax.get_ylim()
ax.set_ylim(None, _max + (0.05 * _max))
ax.text(
0.5,
0.99,
f"p = {pval:0.4f}",
transform=ax.transAxes,
va="center",
ha="center",
)
l = plt.Line2D(
[0.3, 0.7],
[0.93, 0.93],
transform=ax.transAxes,
color="k",
lw=0.9,
ls="-",
)
ax.add_line(l)
if __name__ == "__main__":
main()
```
#### File: larval_gonad/notebook/2019-07-05_table_for_galletta.py
```python
import os
import yaml
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from larval_gonad.normalization import tpm as lgTPM
from larval_gonad.normalization import rpkm as lgRPKM
os.chdir('notebook')
#%% [markdown]
# ## Making outputs for <NAME>
#
# Brian asked for an update from the latest iteration for his paper. Here I make all
# of the different outputs that he has asked for. Including:
#
# * Updated tSNE and UMAP plots (SVGs)
# * Update TPM and RPKM aggregated to cluster level (excel workbook)
# * Differential expression results (excel workbook)
#%%
fbgn2symbol = (
pd.read_feather('../references/gene_annotation_dmel_r6-26.feather', columns=['FBgn', 'gene_symbol'])
.set_index('FBgn').squeeze()
)
# Brian's Target Gene List
brians_list = pd.read_csv('../data/external/galletta/trail_list_20190705.txt', sep='\t').FBGN.tolist()
#%% [markdown]
# ## Normalized Read Counts
#%%
## TPM By Cluster
def make_tpm():
fbgn2length = pd.read_feather('../references/gene_annotation_dmel_r6-26.feather', columns=['FBgn', 'length']).set_index('FBgn').squeeze()
raw = (
pd.read_feather('../output/paper_submission/raw_by_cluster_rep.feather')
.groupby(['FBgn', 'cluster']).sum()
.squeeze()
.unstack(level='cluster')
)
tpm = lgRPKM(raw, fbgn2length).dropna()
tpm.index = fbgn2symbol.reindex(tpm.index).to_frame().set_index('gene_symbol', append=True).index
return tpm
tpm = make_tpm()
## RPKM By Cluster
def make_rpkm():
fbgn2length = pd.read_feather('../references/gene_annotation_dmel_r6-26.feather', columns=['FBgn', 'length']).set_index('FBgn').squeeze()
raw = (
pd.read_feather('../output/paper_submission/raw_by_cluster_rep.feather')
.groupby(['FBgn', 'cluster']).sum()
.squeeze()
.unstack(level='cluster')
)
rpkm = lgRPKM(raw, fbgn2length).dropna()
rpkm.index = fbgn2symbol.reindex(rpkm.index).to_frame().set_index('gene_symbol', append=True).index
return rpkm
rpkm = make_rpkm()
# Save to excel
with pd.ExcelWriter('../output/notebook/2019-07-05_table_for_galletta.xlsx') as writer:
# Full TPM
tpm.to_excel(writer, sheet_name='TPM')
# Brian's Genes TPM
tpm.query(f'FBgn == {brians_list}').to_excel(writer, sheet_name='TPM (Genes of Interest)')
# Full TPM
rpkm.to_excel(writer, sheet_name='RPKM')
# Brian's Genes TPM
rpkm.query(f'FBgn == {brians_list}').to_excel(writer, sheet_name='RPKM (Genes of Interest)')
#%% [markdown]
# ## Differential Expression
#%%
## Gonia vs Primary Spermatocytes
gvc = (
pd.read_feather('../output/seurat3-cluster-wf/combined_n3_gonia_vs_cytes.feather')
.set_index(['FBgn', 'gene_symbol'])
# .query('p_val_adj <= 0.01')
.sort_values('avg_logFC')
)
gvc['direction up regulated'] = 'SP'
gvc.loc[gvc['avg_logFC'] < 0, 'direction up regulated'] = 'EPS|PS1|PS2|PS3'
gvc.loc[gvc['p_val_adj'] > 0.01, 'direction up regulated'] = 'NS'
## Gonia vs Early Primary Spermatocytes
gve = (
pd.read_feather('../output/seurat3-cluster-wf/combined_n3_gonia_vs_eps.feather')
.set_index(['FBgn', 'gene_symbol'])
.sort_values('avg_logFC')
)
gve['direction up regulated'] = 'SP'
gve.loc[gve['avg_logFC'] < 0, 'direction up regulated'] = 'EPS'
gve.loc[gve['p_val_adj'] > 0.01, 'direction up regulated'] = 'NS'
## Early Primary Spermatocytes vs Later Primary Spermatocytes
evp = (
pd.read_feather('../output/seurat3-cluster-wf/combined_n3_eps_vs_ps.feather')
.set_index(['FBgn', 'gene_symbol'])
.sort_values('avg_logFC')
)
evp['direction up regulated'] = 'EPS'
evp.loc[evp['avg_logFC'] < 0, 'direction up regulated'] = 'PS1|PS2|PS3'
evp.loc[evp['p_val_adj'] > 0.01, 'direction up regulated'] = 'NS'
# Save to excel
with pd.ExcelWriter('../output/notebook/2019-07-05_table_for_galletta_diff_expression.xlsx') as writer:
# G vs Cytes
gvc.query(f'FBgn == {brians_list}').to_excel(writer, sheet_name='SP vs Primary Spermatocytes')
# G vs Early Primary Spermatocytes
gve.query(f'FBgn == {brians_list}').to_excel(writer, sheet_name='S vs EPS')
# Early Primary Spermatocytes vs Later Primary Spermatocytes
evp.query(f'FBgn == {brians_list}').to_excel(writer, sheet_name='EPS vs PS1|PS2|PS3')
#%% [markdown]
# ## Plots
#%%
# Get colors scheme
config = yaml.safe_load(open('../config/common.yaml'))
cluster_annot = config['cluster_annot']
cluster_order = config['cluster_order']
colors = dict(zip(cluster_order, yaml.full_load(open('../config/colors.yaml'))['clusters']))
# Get cell id to cluster id
cell_annotation = (
pd.read_feather('../output/seurat3-cluster-wf/combined_n3_metadata.feather', columns=['cell_id', 'cluster']).set_index('cell_id')
.assign(cluster=lambda df: df.cluster.cat.rename_categories(cluster_annot))
.assign(cluster=lambda df: df.cluster.cat.reorder_categories(cluster_order))
.squeeze()
)
#%% [markdown]
# ### UMAP
#%%
umap = (
pd.read_feather('../output/seurat3-cluster-wf/combined_n3_umap.feather').set_index('cell_id')
.join(cell_annotation)
.assign(color=lambda df: df.cluster.map(colors))
)
fig, ax = plt.subplots(figsize=(8, 8))
ax.scatter(
umap.UMAP_1,
umap.UMAP_2,
c=umap.color,
s=3,
linewidth=0.02,
edgecolor="k",
rasterized=True
)
for clus, row in umap.groupby("cluster").agg({"UMAP_1": "mean", "UMAP_2": "mean"}).iterrows():
ax.text(
row.UMAP_1,
row.UMAP_2,
clus,
bbox=dict(facecolor=(1, 1, 1, 0.8), edgecolor="none", pad=0.2),
ha="center",
va="center",
fontweight="bold",
)
# clean up plot
plt.setp(
ax,
xlabel="UMAP 1",
ylabel="UMAP 2",
aspect="equal",
xmargin=0,
ymargin=0,
)
plt.savefig('../output/notebook/2019-07-05_table_for_galletta_umap.svg', bbox_inches='tight')
#%% [markdown]
# ### tSNE
#%%
tsne = (
pd.read_feather('../output/notebook/2019-07-05_output_tsne.feather').set_index('cell_id')
.join(cell_annotation)
.assign(color=lambda df: df.cluster.map(colors))
)
fig, ax = plt.subplots(figsize=(8, 8))
ax.scatter(
tsne.tSNE_1,
tsne.tSNE_2,
c=tsne.color,
s=3,
linewidth=0.02,
edgecolor="k",
rasterized=True
)
for clus, row in tsne.groupby("cluster").agg({"tSNE_1": "mean", "tSNE_2": "mean"}).iterrows():
ax.text(
row.tSNE_1,
row.tSNE_2,
clus,
bbox=dict(facecolor=(1, 1, 1, 0.8), edgecolor="none", pad=0.2),
ha="center",
va="center",
fontweight="bold",
)
# clean up plot
plt.setp(
ax,
xlabel="tSNE 1",
ylabel="tSNE 2",
aspect="equal",
xmargin=0,
ymargin=0,
)
plt.savefig('../output/notebook/2019-07-05_table_for_galletta_tsne.svg', bbox_inches='tight')
```
|
{
"source": "jfear/larval_gonad_ovary",
"score": 3
}
|
#### File: larval_gonad_ovary/larval_gonad_ovary/bulk.py
```python
from pathlib import Path
import pandas as pd
from scipy.stats import spearmanr
import seaborn as sns
import matplotlib.pyplot as plt
from lcdblib.plotting import maPlot, PairGrid, corrfunc
from .cell_selection import filter_gene_counts_by_barcode
BULK = [
'B9_OCP',
'B10_OCP',
'B11_OCP',
'B12_OCP',
]
def read_bulk(path, filter=None, pattern='*/*.featurecounts.txt'):
"""Read in a folder of feature count data.
Using the lcdb-wf, featurecounts are organized in a set of sub-folders for
each sample. Given a path will read in the data and return a dataframe.
Optionally a list of sample names can be given to filter by.
Parameters
----------
path : str
Directory path to output from the lcdb-wf.
filter : None | list
List of sample names to include. Defaults to use the TCP libraries.
pattern : str
Glob pattern for finding the featurecounts files.
Example
-------
>>> df = read_build('../bulk-rnaseq-wf/data/rnaseq_samples',
filter=['B5_TCP', 'B6_TCP'])
"""
bulk = Path(path)
dfs = []
for fname in bulk.glob(pattern):
sname = fname.parent.name
if (filter is not None) & (sname in filter):
dat = pd.read_csv(fname, sep='\t', comment='#',
index_col=[0]).iloc[:, -1]
dat.name = sname
dfs.append(dat)
bulk_dat = pd.concat(dfs, axis=1)
bulk_dat = bulk_dat[bulk_dat.columns.sort_values()]
return bulk_dat
def read_bulk_for_lengths(path, filter=None, pattern='*/*.featurecounts.txt'):
"""Read in a folder of feature count data to get gene lengths.
Using the lcdb-wf, featurecounts are organized in a set of sub-folders for
each sample. Given a path will read in the data and return a dataframe.
Optionally a list of sample names can be given to filter by.
Parameters
----------
path : str
Directory path to output from the lcdb-wf.
filter : None | list
List of sample names to include. Defaults to use the TCP libraries.
pattern : str
Glob pattern for finding the featurecounts files.
Example
-------
>>> df = read_build('../bulk-rnaseq-wf/data/rnaseq_samples',
filter=['B5_TCP', 'B6_TCP'])
"""
bulk = Path(path)
dfs = []
for fname in bulk.glob(pattern):
sname = fname.parent.name
if (filter is not None) & (sname in filter):
dat = pd.read_csv(fname, sep='\t', comment='#',
index_col=[0]).iloc[:, -2]
dat.name = 'length'
dfs.append(dat)
bulk_dat = pd.concat(dfs, axis=0)
return bulk_dat.to_frame().reset_index().drop_duplicates().set_index('Geneid').length
def plot_bulk_pairwise_corr(bulk_dat, subplots_kws=None, scatter_kws=None,
corrfunc_kws=None):
"""Plot a pairgrid of RNA-seq data.
The upper triangle is the scatter plot and spearman correlation. The lower
triangle is a common MA-Plot. The diagonal is the density.
bulk_dat : pd.DataFrame
DataFrame with RNA-seq data (genes, samples)
"""
if subplots_kws is None:
subplots_kws = {}
if scatter_kws is None:
scatter_kws = {}
if corrfunc_kws is None:
corrfunc_kws = {}
subplots_default = {
'sharex': False,
'sharey': False
}
subplots_default.update(subplots_kws)
scatter_default = {
's': 10
}
scatter_default.update(scatter_kws)
corrfunc_default = {
}
corrfunc_default.update(corrfunc_kws)
g = PairGrid(bulk_dat, subplots_kws=subplots_default)
g.map_lower(maPlot, scatter_kws=scatter_default)
g.map_upper(plt.scatter, **scatter_default)
g.map_upper(corrfunc, **corrfunc_default)
g.map_diag(sns.kdeplot)
return g
def scRNAseq_corr_distribution(umi, raw, bulk_dat, start=200,
interval=100, stop=10000):
"""Calculate the correlation distribution between scRNASeq and Bulk.
Iterate by intervals of cells and calculate the correlation of summed
scRNASeq vs Bulk RNA-Seq.
Parameters
----------
umi : pd.DataFrame
DataFrame of UMI counts by Cell (tidy)
raw : CellRangerCounts
A named tuple of CellRangerCounts.
bulk_dat : pd.DataFrame
DataFrame of bulk RNA-seq data (genes, samples)
start : int
Number of cells to start with [default 200]
interval : int
Number of cells to add each iteration [default 100]
stop : int
Number of cells to stop at [default 10,000]
Returns
-------
pd.DataFrame
Rows are the number of UMI sorted cells. Columns are Bulk RNASeq
samples. Values are Spearman r coefficients.
"""
_umi = umi.sort_values(by='umi_count', ascending=False)
res = []
loc = start
while loc < stop:
dat = filter_gene_counts_by_barcode(_umi.index[:loc], raw).sum(axis=1)
corrs = []
for col in bulk_dat.columns:
corrs.append(spearmanr(bulk_dat[col], dat).correlation)
res.append([loc, *corrs])
loc += interval
col_names = ['Cell Number']
col_names.extend(bulk_dat.columns)
df = pd.DataFrame(res, columns=col_names)
return df.set_index('Cell Number')
def plot_corr_distribution(corr):
fig, axes = plt.subplots(2, 2, sharex=True)
for col, ax in zip(corr.columns, axes.flatten()):
ax.plot(corr[col])
ax.set_title(col)
ax.set_ylabel('Spearman r')
ax.set_xlabel('Cells')
plt.tight_layout()
def scRNAseq_corr_distribution_random(umi, raw, bulk_dat, interval=100,
stop=10000, random_state=42):
"""Calculate the correlation distribution between scRNASeq and Bulk.
Iterate by intervals of cells and calculate the correlation of summed
scRNASeq vs Bulk RNA-Seq.
Parameters
----------
umi : pd.DataFrame
DataFrame of UMI counts by Cell (tidy)
raw : CellRangerCounts
A named tuple of CellRangerCounts.
bulk_dat : pd.DataFrame
DataFrame of bulk RNA-seq data (genes, samples)
interval : int
Number of cells to add each iteration [default 100]
stop : int
Number of cells to stop at [default 10,000]
random_state : None | int
Random state to use for sampling. Set to None if you want full random
with each iteration.
Returns
-------
pd.DataFrame
Rows are the number of UMI sorted cells. Columns are Bulk RNASeq
samples. Values are Spearman r coefficients.
"""
res = []
loc = interval
while loc < stop:
idx = umi.sample(n=loc, random_state=random_state).index
dat = filter_gene_counts_by_barcode(idx, raw).sum(axis=1)
corrs = []
for col in bulk_dat.columns:
corrs.append(spearmanr(bulk_dat[col], dat).correlation)
res.append([loc, *corrs])
loc += interval
col_names = ['Cell Number']
col_names.extend(bulk_dat.columns)
df = pd.DataFrame(res, columns=col_names)
return df.set_index('Cell Number')
```
|
{
"source": "jfear/larval_gonad",
"score": 2
}
|
#### File: paper_submission/scripts/correlation_table_bulk_sum_single_cell_pvals.py
```python
import os
import pandas as pd
from scipy.stats import spearmanr
from larval_gonad.normalization import tpm
from larval_gonad.constants import L3_BULK, L3_SC
def main():
df = pd.concat(
[read_l3_sc(), read_l3_bulk()], sort=True, axis=1, join="inner"
) # type: pd.DataFrame
_, pval = spearmanr(df.values, axis=0)
pd.DataFrame(pval, index=df.columns, columns=df.columns).rename_axis("Spearman pval").to_csv(
snakemake.output[0], sep="\t"
)
def read_l3_sc() -> pd.DataFrame:
gene_lengths = pd.read_feather(snakemake.input.gene_annot).set_index("FBgn")["length"].squeeze()
raw = (
pd.read_feather(snakemake.input.larval_scrnaseq)
.groupby(["FBgn", "rep"])
.Count.sum()
.unstack()
.rename(columns=L3_SC)
)
norm = tpm(raw, gene_lengths).dropna()
return norm
def read_l3_bulk() -> pd.DataFrame:
return (
pd.read_csv(snakemake.input.larval_bulk, sep="\t", index_col=0)
.rename_axis("FBgn")
.rename(columns=L3_BULK)
)
if __name__ == "__main__":
if os.getenv("SNAKE_DEBUG", False):
from larval_gonad.debug import snakemake_debug
snakemake = snakemake_debug(
input=dict(
gene_annot="../../references/gene_annotation_dmel_r6-26.feather",
larval_scrnaseq="../../output/seurat3-cluster-wf/aggegated_gene_counts.feather",
larval_bulk="../../output/bulk-rnaseq-wf/rnaseq_aggregation/tpm_gene_level_counts.tsv",
)
)
main()
```
#### File: paper_submission/scripts/plot_autosome_ratios.py
```python
from string import ascii_uppercase
from typing import List
import joblib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from larval_gonad.io import shelve_load
from larval_gonad.plotting.stats import add_pvals
plt.style.use(["2c", "science_base"])
def main():
df = (
load_genes()
.pipe(add_gene_set_annotation)
.pipe(add_total_expression)
.pipe(add_y_expression)
.pipe(add_ratios, snakemake.input.expressed_ratios, "All Genes")
.pipe(
add_ratios,
snakemake.input.widely_expressed_ratios,
"Widely Expressed Genes",
)
.pipe(tidy)
)
g = sns.FacetGrid(
df,
row="row",
col="gene_set",
col_order=["All Genes", "Widely Expressed Genes"],
sharey="row",
sharex=True,
aspect=1.2,
gridspec_kws=dict(wspace=0.15),
)
g.map(
sns.boxplot,
"cluster",
"value",
order=snakemake.params.cluster_order,
palette=snakemake.params.cluster_color,
notch=True,
linewidth=0.5,
showfliers=False,
)
tweak_axes(g)
add_x_stats(g.axes[1])
add_4th_stats(g.axes[2])
add_y_stats(g.axes[3])
g.savefig(snakemake.output[0])
def load_genes() -> pd.DataFrame:
return pd.read_feather(
snakemake.input.gene_annot, columns=["FBgn", "FB_chrom"]
).rename(columns={"FB_chrom": "chrom"})
def add_gene_set_annotation(df: pd.DataFrame) -> pd.DataFrame:
expressed = joblib.load(snakemake.input.expressed_fbgns)
widely_expressed = joblib.load(snakemake.input.widely_expressed_fbgns)
return pd.concat(
[
(df.query("FBgn in @expressed").assign(gene_set="All Genes")),
(
df.query("FBgn in @widely_expressed").assign(
gene_set="Widely Expressed Genes"
)
),
],
ignore_index=True,
sort=False,
)
def _load_log_tpm() -> pd.DataFrame:
return np.log10(
pd.read_feather(snakemake.input.tpm).set_index(["FBgn", "cluster"]).squeeze()
+ 1
).reset_index()
def add_total_expression(df: pd.DataFrame) -> pd.DataFrame:
tpm = _load_log_tpm().rename(columns={"TPM": "Total Expression\nLog(TPM)"})
return df.merge(tpm, on="FBgn", how="outer")
def add_y_expression(df: pd.DataFrame) -> pd.DataFrame:
y_genes = (
pd.read_feather(snakemake.input.gene_annot, columns=["FBgn", "FB_chrom"])
.query("FB_chrom == 'Y'")
.FBgn.to_list()
)
tpm = (
_load_log_tpm()
.query("FBgn in @y_genes")
.rename(columns={"TPM": "Y Expression\nLog(TPM)"})
)
return df.merge(tpm, on=["FBgn", "cluster"], how="outer")
def add_ratios(df: pd.DataFrame, gene_set_db, gene_set_name) -> pd.DataFrame:
ratios = (
shelve_load(gene_set_db)["data"]
.query("ratio_type != 'y_to_a_ratio'")
.reset_index()
.drop("rep", axis=1)
.set_index(["cell_id", "cluster", "ratio_type"])
.squeeze()
.unstack()
.reset_index()
.rename(columns={"x_to_a_ratio": "X/AA", "fourth_to_a_ratio": "44/AA"})
.assign(gene_set=gene_set_name)
)
return pd.concat([df, ratios], ignore_index=True, sort=False)
def tidy(df: pd.DataFrame) -> pd.DataFrame:
return pd.melt(
df,
id_vars=["gene_set", "cluster"],
value_vars=[
"Total Expression\nLog(TPM)",
"X/AA",
"44/AA",
"Y Expression\nLog(TPM)",
],
var_name="row",
value_name="value",
).dropna()
def tweak_axes(g: sns.FacetGrid):
g.set_titles("{col_name}")
g.set_xlabels("")
# Make Row Name the y-axis Label
for row_name, row in zip(g.row_names, g.axes):
row[0].set_ylabel(row_name)
for label, ax in zip(ascii_uppercase, g.axes.ravel()):
ax.text(
0.01,
1,
label,
ha="left",
va="top",
transform=ax.transAxes,
fontweight="bold",
fontsize=12,
)
def _add_horizontal_line(axes: List[plt.Axes], loc: float):
for ax in axes:
ax.axhline(loc, color="gray", ls="--")
def _add_ratio_pvalues(shelve: str, ratio_type: str, ax: plt.Axes):
pvals = shelve_load(shelve)["pvalues"].query("ratio_type == @ratio_type")
add_pvals(pvals.x, pvals.y, pvals.pvalue, ax, fontsize=12, fontweight="bold")
def add_x_stats(axes: List[plt.Axes]):
_add_ratio_pvalues(snakemake.input.expressed_ratios, "x_to_a_ratio", axes[0])
_add_ratio_pvalues(snakemake.input.widely_expressed_ratios, "x_to_a_ratio", axes[1])
_add_horizontal_line(axes, 1)
def add_4th_stats(axes: List[plt.Axes]):
_add_ratio_pvalues(snakemake.input.expressed_ratios, "fourth_to_a_ratio", axes[0])
_add_ratio_pvalues(
snakemake.input.widely_expressed_ratios, "fourth_to_a_ratio", axes[1]
)
_add_horizontal_line(axes, 1)
def add_y_stats(axes: List[plt.Axes]):
# From: notebook/2020-01-03-y_stats.ipynb
add_pvals([1], [1.45], [0.0017], axes[0], fontsize=12, fontweight="bold")
if __name__ == "__main__":
main()
```
#### File: paper_submission/scripts/plot_gene_umap.py
```python
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import larval_gonad.plotting # pylint: disable=unused-import
plt.style.use("minimal")
sns.set_style("dark")
FBGNS = [
"FBgn0039044", # p53
"FBgn0243486", # rdo
"FBgn0026573", # ADD1
"FBgn0083963", # Nlg3
"FBgn0011206", # bol
"FBgn0264953", # Piezo
]
SYMBOLS = ["p53", "rdo", "ADD1", "Nlg3", "bol", "Piezo"]
def main():
df = (
pd.read_feather(snakemake.input.zscores)
.query("FBgn in @FBGNS")
.pipe(add_gene_symbol)
.pipe(add_umap)
.sort_values("z-score")
)
scatter_defaults = dict(
s=3, linewidth=0, rasterized=True, vmin=-3, vmax=3, cmap="viridis"
)
g = sns.FacetGrid(
data=df,
col="gene_symbol",
col_wrap=2,
col_order=SYMBOLS,
sharex=True,
sharey=True,
)
g.map(facet_scatter, "UMAP_1", "UMAP_2", "z-score", **scatter_defaults)
add_colorbar(g, scatter_defaults)
tweak_axes(g)
g.savefig(snakemake.output[0])
def add_gene_symbol(df: pd.DataFrame) -> pd.DataFrame:
fbgn2symbol = pd.read_feather(
snakemake.input.gene_annot, columns=["FBgn", "gene_symbol"]
)
return df.merge(fbgn2symbol)
def add_umap(df: pd.DataFrame) -> pd.DataFrame:
umap = pd.read_feather(snakemake.input.umap)
return df.melt(
id_vars=["FBgn", "gene_symbol"], var_name="cell_id", value_name="z-score"
).merge(umap)
def facet_scatter(x, y, c, **kwargs):
"""Draw scatterplot with point colors from a faceted DataFrame columns."""
kwargs.pop("color")
plt.scatter(x, y, c=c, **kwargs)
def add_colorbar(g: sns.FacetGrid, scatter_defaults: dict):
g.fig.subplots_adjust(bottom=0.2)
# cax = g.fig.add_axes([0.94, 0.25, 0.02, 0.6])
cax = g.fig.add_axes([0.20, 0.10, 0.60, 0.02])
points = plt.scatter([], [], c=[], **scatter_defaults)
g.fig.colorbar(points, cax=cax, label="Z-Score (TPM)", orientation="horizontal")
return g
def tweak_axes(g: sns.FacetGrid):
g.set_titles("{col_name}", fontstyle="italic")
g.set_xlabels("UMAP 1")
g.set_ylabels("UMAP 2")
g.despine(left=True, bottom=True)
for ax in g.axes.ravel():
ax.set(xticks=[-10, 0, 10], yticks=[-10, 0, 10], aspect=1)
return g
if __name__ == "__main__":
main()
```
#### File: paper_submission/scripts/stats_gsea_pairwise_comparisons_of_cell_types.py
```python
import pandas as pd
import seaborn as sns
from larval_gonad.stats import PairwisePermutationTest
THREADS = snakemake.threads
def main():
df = pd.read_feather(snakemake.input[0])
pw = PairwisePermutationTest(
"cluster", "male", data=df, threads=THREADS, order=snakemake.params.order
).fit()
(
pw.results.sort_values(["name1", "name2"])
.set_index(["name1", "name2"])
.to_csv(snakemake.output[0], sep="\t")
)
if __name__ == "__main__":
main()
```
#### File: paper_submission/scripts/table_scRNASeq_cluster_deg.py
```python
from textwrap import dedent
import pandas as pd
from larval_gonad.config import config
from common import fbgn2symbol
def _add_sheet(writer : pd.ExcelWriter,
sheet_name : str,
fname : str,
cell_format,
alpha=0.01,
comment=None,
headers=None):
sheet = writer.book.add_worksheet(sheet_name)
writer.sheets[sheet_name] = sheet
sheet.set_column(0, 1, 20)
df = pd.read_csv(fname, sep='\t').query(f'p_val_adj <= {alpha}')
df.sort_values(by='avg_logFC', ascending=False, inplace=True)
if 'cluster' in df.columns:
df.sort_values(by='cluster', inplace=True)
df.cluster.replace(config['cluster_annot'], inplace=True)
idx = df.columns.tolist().index('cluster')
sheet.set_column(idx, idx, 20)
if headers:
df.rename({'pct.1': headers[0], 'pct.2': headers[1]}, inplace=True, axis=1)
df.to_excel(writer, sheet_name=sheet_name, index=False, startrow=1, freeze_panes=(2, 2))
if comment:
sheet.set_row(0, 100, cell_format)
sheet.merge_range('A1:G1', dedent(comment))
def _add_data(writer : pd.ExcelWriter,
sheet_name : str,
fname : str,
cell_format,
comment=None):
sheet = writer.book.add_worksheet(sheet_name)
writer.sheets[sheet_name] = sheet
sheet.set_column(0, 1, 20)
df = pd.read_parquet(fname)[config['sel_cluster_order_w_rep']]
df['gene'] = df.index.map(lambda x: fbgn2symbol[x])
df.set_index('gene', append=True, inplace=True)
df.to_excel(writer, sheet_name=sheet_name, index=True, startrow=1, freeze_panes=(2, 2))
if comment:
sheet.set_row(0, 100, cell_format)
sheet.merge_range('A1:G1', dedent(comment))
def main():
writer = pd.ExcelWriter(snakemake.output[0])
cell_format = writer.book.add_format({'valign': 'top'})
cell_format.set_text_wrap()
comment = "Identification of Bio markers. " \
"Here we compare expression of each cluster to all other cells. " \
"This creates a list of genes that are up-regulated in each cluster. " \
"This table is grouped by clusters and sorted by avg_logFC."
_add_sheet(writer, 'One vs Rest (biomarkers)', snakemake.input.biomarkers, cell_format, comment=comment)
comment = "Differential expression between germ cell and somatic cell clusters. " \
"I combine all of the germ cell clusters (6, 3, 2, 0) vs all of the somatic cell "\
"clusters (5, 1, 4, 7, 8).\n" \
"Positive avg_logFC are germ biased genes.\n" \
"Negative avg_logFC are soma biased genes.\n"
_add_sheet(writer, 'Germ Cells vs Somatic Cells', snakemake.input.germ_soma, cell_format, comment=comment,
headers=('pct.germ', 'pct.soma'))
comment = "Differential expression of spermatogonia vs 1º spermatocytes. " \
"Spermatogonia cluster and compared it to all spermatocyte clusters combined together.\n\n" \
"Positve avg_logFC are spermatogonia biased genes.\n" \
"Negative avg_logFC are 1º spermatocyte biased genes."
_add_sheet(writer, 'Gonia vs Cytes', snakemake.input.gonia_cytes, cell_format, comment=comment,
headers=('pct.gonia', 'pct.cytes'))
comment = "Differential expression of Early 1º spermatocytes vs Mid and Late 1º spermatocytes.\n" \
"Positve avg_logFC are early 1º spermatocyte biased genes.\n" \
"Negative avg_logFC are mid and late 1º spermatocyte biased genes."
_add_sheet(writer, 'Early cytes vs Mid and Late', snakemake.input.gonia_early, cell_format, comment=comment,
headers=('pct.early', 'pct.midLate'))
comment = "Differential expression of Mid 1º spermatocytes vs Early and Late 1º spermatocytes.\n" \
"Positve avg_logFC are mid 1º spermatocyte biased genes.\n" \
"Negative avg_logFC are early and late 1º spermatocyte biased genes."
_add_sheet(writer, 'Mid cytes vs Early and Late', snakemake.input.gonia_mid, cell_format, comment=comment,
headers=('pct.mid', 'pct.earlyLate'))
comment = "Differential expression of Late 1º spermatocytes vs Early and Mid 1º spermatocytes.\n" \
"Positve avg_logFC are late 1º spermatocyte biased genes.\n" \
"Negative avg_logFC are early and mid 1º spermatocyte biased genes."
_add_sheet(writer, 'Late cytes vs Early and Mid', snakemake.input.gonia_late, cell_format, comment=comment,
headers=('pct.late', 'pct.earlyMid'))
comment = "These are raw counts aggregated by Cluster:Rep using Sum."
_add_data(writer, 'Raw Counts (Sum)', snakemake.input.raw, cell_format, comment=comment)
comment = "These are tpm normalized counts by Cluster:Rep."
_add_data(writer, 'TPM', snakemake.input.tpm, cell_format, comment=comment)
comment = "These are z-scores of tpm normalized counts by Cluster:Rep."
_add_data(writer, 'Z-scores', snakemake.input.zscore, cell_format, comment=comment)
writer.close()
if __name__ == '__main__':
main()
```
#### File: response-to-review-wf/scripts/commonly_expressed_cluster_proportion.py
```python
import joblib
import pandas as pd
def _debug():
from larval_gonad.mock import MockSnake
snakemake = MockSnake(
input=dict(
annotation="../../references/gene_annotation_dmel_r6-26.feather",
commonly_expressed="../../output/cellselection-wf/commonly_expressed_genes.pkl",
clusters="../../output//seurat3-cluster-wf/combined_n3_clusters.feather",
raw="../../output/cellselection-wf/raw.feather",
tau="../../output/expression-atlas-wf/dmel_tau.feather",
tsps="../../output/expression-atlas-wf/dmel_tsps.feather",
)
)
def main():
expression = commonly_expressed_by_cluster()
prop_cells_from_cluster = cluster_proportion(expression)
fbgn_to_tau_tsps = load_tau_tsps()
prop_cells_from_cluster.join(fbgn_to_tau_tsps).to_csv(snakemake.output[0], sep="\t")
def commonly_expressed_by_cluster() -> pd.DataFrame:
annotation = pd.read_feather(snakemake.input.annotation)[
["FBgn", "gene_symbol", "FB_chrom"]
].rename(columns={"FB_chrom": "chromosome"})
common_fbgns = joblib.load(snakemake.input.commonly_expressed)
clusters = pd.read_feather(snakemake.input.clusters)[["cell_id", "cluster"]]
raw = (
pd.read_feather(snakemake.input.raw)
.query("FBgn in @common_fbgns")
.melt(id_vars="FBgn", var_name="cell_id", value_name="UMI")
.merge(clusters)
.merge(annotation)
)
return raw
def cluster_proportion(expression: pd.DataFrame) -> pd.DataFrame:
num_cells_with_expression_by_cluster = (
expression.query("UMI > 0")
.groupby(["FBgn", "gene_symbol", "chromosome"])
.cluster.value_counts()
.unstack()
.loc[:, expression.cluster.cat.categories]
.assign(total=lambda df: df.sum(axis=1))
.set_index("total", append=True)
)
return num_cells_with_expression_by_cluster.div(
num_cells_with_expression_by_cluster.index.get_level_values("total"), axis=0
)
def load_tau_tsps() -> pd.DataFrame:
tissue_specificity_scores = pd.concat(
[
pd.read_feather(snakemake.input.tau).set_index("FBgn").male_tau,
pd.read_feather(snakemake.input.tsps).set_index("FBgn").male_tsps,
],
axis=1,
)
return tissue_specificity_scores
if __name__ == "__main__":
main()
```
#### File: seurat3-cluster-wf/scripts/individual_gene_feature_plot.py
```python
import os
from pathlib import Path
import pandas as pd
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import seaborn as sns
def main():
zscores = pd.read_feather(snakemake.input.zscores).set_index("FBgn")
zscore = zscores.loc[snakemake.wildcards.FBgn, :]
umap = pd.read_feather(snakemake.input.umap).set_index("cell_id")
df = umap.join(zscore.rename("zscore"))
plot(df, snakemake.wildcards.symbol, snakemake.output[0])
def plot(umap, symbol, output_file):
# set up cmap
cmap = plt.get_cmap("viridis", 512)
norm = matplotlib.colors.Normalize(-3, 3)
# Plot
ax = sns.scatterplot(
x="UMAP_1",
y="UMAP_2",
data=umap.sort_values("zscore"),
hue="zscore",
hue_norm=norm,
palette=cmap,
s=8,
linewidth=0,
rasterized=True,
legend=False,
)
sns.despine(ax=ax)
ax.set_title(f"{symbol}", fontstyle="italic")
sm = matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap)
plt.colorbar(sm)
plt.savefig(output_file)
plt.close()
if __name__ == "__main__":
if os.getenv("SNAKE_DEBUG", False):
from larval_gonad.debug import snakemake_debug
snakemake = snakemake_debug(
workdir="seurat3-cluster-wf",
input=dict(
zscores="../output/seurat3-cluster-wf/zscore_by_cell.feather",
umap="../output/seurat3-cluster-wf/combined_n3_umap.feather",
gene_annot="../references/gene_annotation_dmel_r6-26.feather",
),
wildcards=dict(symbol="bol", FBgn="FBgn0011206")
)
main()
```
#### File: seurat3-cluster-wf/scripts/rpkm_by_cluster.py
```python
import pandas as pd
from larval_gonad.io import feather_to_cluster_matrix, melt_cluster_matrix
from larval_gonad.normalization import rpkm
RAW_AGG = snakemake.input["raw_agg"]
GENE_ANNOTATION = snakemake.input["gene_metadata"]
OUTPUT_FILE = snakemake.output[0]
def main():
fbgn2len = (
pd.read_feather(GENE_ANNOTATION, columns=["FBgn", "length"])
.set_index("FBgn")
.length
)
raw_agg = feather_to_cluster_matrix(RAW_AGG).dropna(axis=1, how="all")
_rpkm = rpkm(raw_agg, fbgn2len).dropna()
melt_cluster_matrix(_rpkm, name='RPKM').to_feather(OUTPUT_FILE)
if __name__ == "__main__":
main()
```
#### File: src/larval_gonad/config.py
```python
import os
from pathlib import Path
import yaml
from joblib import Memory
def read_config(fname, keepers=None):
"""Reads a YAML file.
If a list of keepers is provided, will look through the YAML and only
return those keys.
"""
with open(fname, "r", encoding="utf8") as fh:
c = yaml.full_load(fh)
if keepers is None:
return c
if isinstance(keepers, str):
return c.get(keepers, None)
config = {}
for k in keepers:
v = c.get(k, None)
if v is not None:
config[k] = v
return config
# Useful directories
PROJECT_DIR = Path(__file__).absolute().parents[2].as_posix()
CONFIG_DIR = Path(PROJECT_DIR, "config").as_posix()
CACHE_DIR = Path("~/.cache").expanduser().as_posix()
# Make missing dirs
Path(CACHE_DIR).mkdir(exist_ok=True, parents=True)
Path(CONFIG_DIR).mkdir(exist_ok=True, parents=True)
# Load config file
config = read_config(Path(CONFIG_DIR, "common.yaml"))
config.update({"colors": read_config(Path(CONFIG_DIR, "colors.yaml"))})
REFERENCES_DIR = config.get("references_dir", os.environ.get("REFERENCES_DIR", None))
# Trun on caching
memory = Memory(cachedir=CACHE_DIR, verbose=0)
```
#### File: src/larval_gonad/io.py
```python
import os
import shelve
from collections import namedtuple
import pickle
import numpy as np
import pandas as pd
import re
import scipy.sparse as sp_sparse
import tables
NUCS = ["A", "C", "G", "T"]
NUCS_INVERSE = {"A": 0, "C": 1, "G": 2, "T": 3}
CellRangerCounts = namedtuple("CellRangerCounts", ["matrix", "gene_ids", "barcodes"])
def safe_gene_name(symbol):
"""Normalize gene symbols for use as file names."""
return (
symbol.replace("(", "")
.replace(")", "")
.replace(":", "")
.replace("&", "")
.replace("|", "")
.replace(".", "")
.replace(" ", "")
)
def pickle_dump(obj: object, file_name: str):
with open(file_name, "wb") as handler:
pickle.dump(obj, handler)
def pickle_load(file_name: str):
with open(file_name, "rb") as handler:
return pickle.load(handler)
def shelve_dump(file_name: str, **kwargs):
"""Save a set of objects to a shelve.
Parameters
----------
file_name: The name of one of the files from a shelve.
**kwargs: Any number of key word arguments to save to the shelve.
"""
with shelve.open(os.path.splitext(file_name)[0]) as db:
for k, v in kwargs.items():
db[k] = v
def shelve_load(file_name: str, *args):
"""Load a set of objects from a shelve.
Parameters
----------
file_name: The name of one of the files from a shelve.
*args: depreciated, does nothing.
"""
res = {}
with shelve.open(os.path.splitext(file_name)[0]) as db:
for k, v in db.items():
res[k] = v
return res
def compress_seq(s: str):
""" Pack a DNA sequence (no Ns!) into a 2-bit format, in a 64-bit uint
Most significant bit is set if there was an error
Based on code from: https://github.com/10XGenomics/cellranger
cellranger/lib/python/cellranger/utils.py
"""
bits = 64
assert len(s) <= (bits / 2 - 1)
result = 0
for nuc in s:
if nuc not in NUCS_INVERSE:
return 1 << (bits - 1)
result = result << 2
result = result | NUCS_INVERSE[nuc]
return result
def decompress_seq(x: int, length=16):
""" Un-pack a DNA sequence from a 2-bit format
Based on code from: https://github.com/10XGenomics/cellranger
cellranger/lib/python/cellranger/utils.py
Parameters
----------
x : int
Number sequence to be decoded.
length : int
Length of the barcode. This can be found in the molecular info hdf5
file from 10x genome.
molInfo.get_node_attr('/metrics', 'chemistry_barcode_read_length')
"""
bits = 64
x = np.uint64(x)
assert length <= (bits / 2 - 1)
if x & (1 << (bits - 1)):
return "N" * length
result = bytearray(length)
for i in range(length):
result[(length - 1) - i] = bytearray(NUCS[x & np.uint64(0b11)].encode())[0]
x = x >> np.uint64(2)
return result.decode()
def two_bit_mapper(iterable):
"""Return a dictionary mapping 2bit encoded Seqs.
Parameters
----------
iterable : list-like
Unique list of 2bit encoded sequences.
Returns
-------
dict : Mapper from encoded to decoded
"""
return {k: decompress_seq(k) for k in iterable}
def decode_cell_names(iterable):
"""Use two_bit_mapper to decode cell names.
iterable : np.array
An array of twobit encoded cell names.
"""
mapper = two_bit_mapper(np.unique(iterable))
return [mapper[x] for x in iterable]
def cellranger_umi(fname):
with tables.open_file(fname, "r") as f:
group = f.get_node("/")
barcodes = getattr(group, "barcodes").read()
barcode_idx = getattr(group, "barcode_idx").read()
umi = getattr(group, "umi").read()
read_cnts = getattr(group, "count").read()
cell_ids = np.char.decode(barcodes[barcode_idx])
return pd.DataFrame(dict(cell_id=cell_ids, umi=umi, read_cnt=read_cnts))
def cellranger_counts(fname, genome="matrix"):
"""Import cell ranger counts.
Cell ranger stores it counts tables in a hdf5 formatted file. This reads
this file and outputs them as a named tuple.
Parameters
----------
fname : str
Name of hdf5 store.
genome : str
Group where data is stored.
barcodes : list of int
Encoded barcodes names to filter by
Returns
-------
namedtuple: matrix, gene_ids, barcodes
"""
with tables.open_file(fname, "r") as f:
try:
group = f.get_node(f.root, genome)
except tables.NoSuchNodeError:
print("That genome does not exist in this file.")
return None
gene_ids = getattr(group, "features/id").read()
barcodes = getattr(group, "barcodes").read()
data = getattr(group, "data").read()
indices = getattr(group, "indices").read()
indptr = getattr(group, "indptr").read()
shape = getattr(group, "shape").read()
matrix = sp_sparse.csc_matrix((data, indices, indptr), shape=shape)
gene_ids = np.array([x.decode() for x in gene_ids])
barcodes = np.array([x.decode().replace("-1", "") for x in barcodes])
return CellRangerCounts(matrix, gene_ids, barcodes)
def feather_to_cluster_rep_matrix(fname):
"""Helper function to building a cluster rep matrix from a feather"""
return (
pd.read_feather(fname)
.set_index(["FBgn", "cluster", "rep"])
.iloc[:, 0]
.unstack(level=[1, 2])
)
def feather_to_cluster_matrix(fname):
"""Helper function to building a cluster matrix from a feather"""
return (
pd.read_feather(fname)
.set_index(["FBgn", "cluster"])
.iloc[:, 0]
.unstack()
)
def melt_cluster_rep_matrix(df, name="count"):
"""Helper function to melt a cluster rep matrix for saving as feather"""
return df.T.reset_index().melt(id_vars=["cluster", "rep"], var_name="FBgn", value_name=name)
def melt_cluster_matrix(df, name="count"):
"""Helper function to melt a cluster matrix for saving as feather"""
return df.T.reset_index().melt(id_vars="cluster", var_name="FBgn", value_name=name)
class GffRow(object):
def __init__(self, row):
self.seqid, self.source, self.type, self.start, self.end, self.score, self.strand, self.phase, self.attributes = row.strip().split(
"\t"
)
self.is_gene = self.type == "gene"
self.parsed_attributes = self.parse_attributes()
def parse_attributes(self):
parsed_attributes = {}
for attr in self.attributes.split(";"):
mm = re.search('(?P<key>.*?)\s+"(?P<value>.*?)"', attr)
if mm:
parsed_attributes[mm.group("key").strip()] = mm.group("value").strip()
return parsed_attributes
def __getitem__(self, key):
return self.parsed_attributes[key]
if __name__ == "__main__":
main()
```
#### File: src/larval_gonad/mock.py
```python
from snakemake.io import Namedlist, InputFiles, OutputFiles, Wildcards, Params, Log
class MockSnake:
"""Mock the snakemake class used inside of python scripts."""
def __init__(
self,
input=None,
output=None,
params=None,
wildcards=None,
log="",
config=None,
threads=None,
):
self.input = InputFiles(self.make_namedlist(input))
self.output = OutputFiles(self.make_namedlist(output))
self.params = Params(self.make_namedlist(params))
self.wildcards = Wildcards(self.make_namedlist(wildcards))
self.log = Log(log)
self.config = config or {}
self.threads = threads or 1
self.rulename = "mock"
def make_namedlist(self, item, first_lvl=True):
if isinstance(item, list):
return Namedlist(item)
elif isinstance(item, dict):
return Namedlist(
fromdict={k: self.make_namedlist(v, False) for k, v in item.items()}
)
elif isinstance(item, (str, int, float)):
if first_lvl:
return Namedlist([item])
else:
return item
return Namedlist([])
```
#### File: larval_gonad/plotting/demasculinization.py
```python
import matplotlib.pyplot as plt
import seaborn as sns
def demasculinization(data, ax=None, title=None, legend=False, **kwargs):
"""Stacked barplot common for display of demasculinization.
Parameters
----------
data : pd.DataFrame
DataFrame organized with chomosome as index, columns as Up/NS/Down,
and values as proportions.
ax : plt.Axes, optional
Alternative axes to draw the plot, by default None
title : str, optional
Title to add to the plot, by default None
legend : bool, optional
If to keep the legend, by default False
Returns
-------
plt.Axes
Matplotlib axes with the stacked bar plot.
Example
-------
>>> data = pd.DataFrame({"Up": [.1, .3], "NS": [.7, .4], "Down": [.2, .3]}, index=["X", "A"])
>>> demasculinization(data)
"""
if ax is None:
fig, ax = plt.subplots()
plot_defaults = dict(
stacked=True,
color=["red", "lightgrey", "blue"],
width=0.9,
edgecolor="k",
linewidth=0.2,
)
plot_defaults.update(kwargs)
data.plot.bar(ax=ax, **plot_defaults)
# Clean up X
plt.setp(ax.get_xticklabels(), rotation=0)
ax.set_xlabel("")
# Clean up Y
ax.set_ylim(0, 1)
ax.set_ylabel("")
# Clean up other parts
if title is not None:
ax.set_title(title)
if not legend:
ax.legend_ = None
sns.despine(ax=ax, left=True, bottom=True)
return ax
```
#### File: larval_gonad/plotting/umap.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
def plot_umap_panel(umap_data: str, cluster_data: str, colors: list, axes: list = None):
"""Plot panel of UMAP for each replicate.
Example
-------
>>> umap_data = "output/seurat3-cluster-wf/combined_n3_umap.feather"
>>> cluster_data = "output/seurat3-cluster-wf/combined_n3_clusters.feather"
>>> from larval_gonad.config import read_config
>>> color_config = read_config("config/colors.yaml")
>>> colors = color_config["clusters"]
"""
umap = pd.read_feather(umap_data).set_index("cell_id")
clusters = pd.read_feather(cluster_data).set_index("cell_id")
df = umap.join(clusters)
if axes is None:
_, axes = plt.subplots(1, 4, figsize=plt.figaspect(1 / 4), sharex=True, sharey=True)
_make_scatter_panel(df, axes, colors)
_add_labels(df, axes[-1])
_cleanup_axes(axes)
_add_axes_labels(plt.gcf())
def _make_scatter_panel(df, axes, colors):
defaults = dict(
x="UMAP_1",
y="UMAP_2",
hue="cluster",
palette=colors,
s=2,
linewidth=0.02,
edgecolor="k",
rasterized=True,
legend=False,
)
# Plot each replicate
for (rep, subset), ax in zip(df.groupby("rep"), axes.flat[:-1]):
sns.scatterplot(data=subset, ax=ax, **defaults)
ax.set(title=rep)
# Plot combined
ax_combined = axes.flat[-1]
sns.scatterplot(data=df, ax=ax_combined, **defaults)
ax_combined.set(title="Combined")
def _add_labels(df, ax):
"""Add cluster labels to the combined panel"""
for clus, row in df.groupby("cluster")[["UMAP_1", "UMAP_2"]].mean().iterrows():
ax.text(
row.UMAP_1,
row.UMAP_2,
clus,
bbox=dict(facecolor=(1, 1, 1, 0.8), edgecolor="none", pad=0.2),
ha="center",
va="center",
fontweight="bold",
)
def _cleanup_axes(axes):
for ax in axes:
sns.despine(ax=ax)
ax.set(xticks=[-10, 10], yticks=[-10, 10], xlabel="", ylabel="", aspect="equal")
def _add_axes_labels(fig):
defaults = dict(transform=fig.transFigure, ha="center", va="center")
fig.text(0.5, 0.05, "Profile Similarity (UMAP-1)", **defaults)
fig.text(0.1, 0.5, "Profile Similarity (UMAP-2)", rotation=90, **defaults)
```
#### File: x-to-a-wf/scripts/boxplot_autosome_ratios.py
```python
import os
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from larval_gonad.io import shelve_load
from larval_gonad.plotting.stats import add_pvals
TXT = "Normalized by Number of Genes"
YLABELS = {
"x_to_a_ratio": f"X / A\n{TXT}",
"fourth_to_a_ratio": f"4 / A\n{TXT}",
"y_to_a_ratio": f"Y / A\n{TXT}",
}
def main():
db = shelve_load(snakemake.input[0])
df = db["data"].pipe(lambda x: x[x.ratio_type == snakemake.wildcards.ratio_type])
pvals = db["pvalues"].pipe(lambda x: x[x.ratio_type == snakemake.wildcards.ratio_type])
ylabel = YLABELS[snakemake.wildcards.ratio_type]
ax = sns.boxplot(
x="cluster",
y="ratio",
data=df,
palette=snakemake.params.cluster_color,
order=snakemake.params.cluster_order,
notch=True
)
ax.set(xlabel="Cluster ", ylabel=ylabel, title=snakemake.params.title)
add_pvals(pvals.x, pvals.y, pvals.pvalue, ax)
plt.savefig(snakemake.output[0])
if __name__ == "__main__":
if os.getenv("SNAKE_DEBUG", False):
from larval_gonad.debug import snakemake_debug
from larval_gonad.config import read_config
config = read_config("config/common.yaml")
color_config = read_config("config/colors.yaml")
snakemake = snakemake_debug(
workdir="x-to-a-wf",
input="../output/x-to-a-wf/db/ovary.bak",
params=dict(
cluster_color=color_config["clusters"],
cluster_order=config["cluster_order"],
title="Test",
),
wildcards=dict(fbgns="ovary", ratio_type="x_to_a_ratio"),
)
plt.style.use("../config/figure_styles.mplstyle")
plt.rcParams.update({
'figure.figsize': (4, 2)
})
main()
```
|
{
"source": "jfear/lcdb-references",
"score": 3
}
|
#### File: lcdb-references/scripts/gene_annotation.py
```python
import re
from lcdblib.utils.chrom_convert import import_conversion
GTF = "/home/fearjm/data/LCDB/lcdb-references/dmel/r6-32/gtf/dmel_r6-32.gtf"
OUTNAME = "../gene_annotation_dmel_r6-32.tsv"
class GffRow(object):
def __init__(self, row):
self.seqid, self.source, self.type, self.start, self.end, self.score, self.strand, self.phase, self.attributes = row.strip().split(
"\t"
)
self.is_gene = self.type == "gene"
self.parsed_attributes = self.parse_attributes()
def parse_attributes(self):
parsed_attributes = {}
for attr in self.attributes.split(";"):
mm = re.search('(?P<key>.*?)\s+"(?P<value>.*?)"', attr)
if mm:
parsed_attributes[mm.group("key").strip()] = mm.group("value").strip()
return parsed_attributes
def __getitem__(self, key):
return self.parsed_attributes[key]
def main():
fb_mapper = import_conversion("UCSC", "FlyBase")
with open(GTF) as fh, open(OUTNAME, "w") as fo:
fo.write(
"FBgn\tgene_symbol\tUCSC_chrom\tFB_chrom\tstart\tend\tlength\tstrand\n"
)
for row in fh.readlines():
grow = GffRow(row)
if grow.is_gene:
fo.write(
"\t".join(
[
grow["gene_id"],
grow["gene_symbol"],
grow.seqid,
fb_mapper[grow.seqid],
grow.start,
grow.end,
str(int(grow.end) - int(grow.start)),
grow.strand,
]
)
+ "\n"
)
if __name__ == "__main__":
main()
```
|
{
"source": "jfear/sra2mongo",
"score": 3
}
|
#### File: sra2mongo/tests/test_xml_helpers.py
```python
import sramongo.xml_helpers
from pathlib import Path
def test_xml_to_root_from_file_handler():
fname = Path(__file__).parent / "data/ERR1662611.xml"
with open(fname) as fh:
root = sramongo.xml_helpers.xml_to_root(fh)
experiment = root.find("EXPERIMENT")
assert experiment.attrib["accession"] == "ERX1732932"
def test_xml_to_root_from_string():
fname = Path(__file__).parent / "data/ERR1662611.xml"
with open(fname) as fh:
xml = fh.read()
root = sramongo.xml_helpers.xml_to_root(xml)
experiment = root.find("EXPERIMENT")
assert experiment.attrib["accession"] == "ERX1732932"
def test_xml_get_text(sra_xml_root):
root = sra_xml_root
srx = sramongo.xml_helpers.get_xml_text(root, "EXPERIMENT/IDENTIFIERS/PRIMARY_ID")
assert srx == "SRX971855"
def test_xml_get_text_invalid_path(sra_xml_root):
root = sra_xml_root
result = sramongo.xml_helpers.get_xml_text(
root, "EXPERIMENT/IDENTIFIERS/PRIMARY_ID/NOT_REALLY_HERE"
)
assert result == ""
```
|
{
"source": "jfear/tada_gonad",
"score": 3
}
|
#### File: carl-wf/scripts/combine_result_tables.py
```python
import re
import pandas as pd
def main():
with pd.ExcelWriter(snakemake.output[0]) as writer:
for file_name in snakemake.input.files:
sheet_name = make_sheet_name(file_name)
df = pd.read_csv(file_name, sep="\t", index_col=0).query("padj <= 0.05")
if snakemake.input.get("annot", False):
df = df.join(read_annot(snakemake.input.annot)).set_index("gene_symbol", append=True)
df.to_excel(writer, sheet_name=sheet_name)
def make_sheet_name(file_name):
res = re.findall(r".*_(?P<tissue>.*)_(?P<driver>.*)\.(?P<ext>\d+)\.tsv$", file_name)[0]
return "{}_{}_{}".format(*res)
def read_annot(file_name):
return (
pd.read_csv(file_name, sep="\t", usecols=["gene_symbol", "primary_FBgn"])
.set_index("primary_FBgn")
.squeeze()
.rename_axis("FBgn")
)
if __name__ == "__main__":
main()
```
|
{
"source": "jfear/vscode_demo",
"score": 4
}
|
#### File: vscode_demo/scripts/make_db.py
```python
import sqlite3 as db
import pandas as pd
def main():
df = pd.DataFrame({"one": [1, 2, 3], "two": [4, 5, 7]})
con = db.connect("/tmp/my_sql_test.sqlite")
df.to_sql("pd2sql", con)
con.close()
# TODO: pd.read_csv intellisense
# TODO: def snippet
# TODO: Show how to make your own snippets
if __name__ == "__main__":
main()
```
#### File: src/tests/test_io.py
```python
from pathlib import Path
from my_cool_pkg import io
def test_hello_world():
assert io.hello_world() == "Hello World!"
def test_current_path():
assert isinstance(io.current_path(), Path)
def test_fooBar():
assert io.fooBar()[0] == "Foo"
```
|
{
"source": "Jfeatherstone/ColorGlass",
"score": 3
}
|
#### File: ColorGlass/cgc/ArbColors.py
```python
from .Wavefunction import Wavefunction
from .LinAlg import expm, get_basis
import numba
import numpy as np
from scipy.fft import ifft2, fft2
class Nucleus(Wavefunction):
# Upon calling wilsonLine() or adjointWilsonLine(), these are properly defined
_wilsonLine = None
_adjointWilsonLine = None
# Some variables to keep track of what has been calculated/generated so far
# allowing us to avoid redundant computations
_wilsonLineExists = False
_adjointWilsonLineExists = False
def __init__(self, colorCharges, N, delta, mu, M=.5, g=1, Ny=100, rngSeed=None):
r"""
Dense object to be used in an instance of `cgc.Collision.Collision`.
Implements calculation of the Wilson Line using the generalized basis matrix set.
Parameters
----------
colorCharges : positive integer
The number of possible color charges; also the dimensionality of the special unitary group.
N : positive integer
The size of the square lattice to simulate.
delta : positive float
The distance between adjacent lattice sites.
mu : positive float
The scaling for the random gaussian distribution that generates the color charge density.
M : float (default=.5)
Infrared regulator parameter to regularize the Poisson equation for the gauge field.
g : float (default=1)
Parameter in the Poisson equation for the gauge field.
Ny : positive integer (default=100)
The longitudinal extent (in layers) of the nucleus object.
rngSeed : int (default=None)
Seed for the random number generator to initialize the color charge field
"""
super().__init__(colorCharges, N, delta, mu, M, g, rngSeed) # Super constructor
self._basis = get_basis(colorCharges)
self.Ny = Ny
# Modify the gaussian width to account for the multiple longitudinal layers
self.gaussianWidth = self.mu / self.delta / np.sqrt(self.Ny)
def colorChargeField(self, forceCalculate=False, verbose=0):
r"""
Generates the color charge density field according to a gaussian distribution. Differs
from super class implementation in that it generates the numerous fields according
to `Ny`. That is, the field \(\rho\) satisfies:
$$ \langle \rho_{a}^{(t)}(i^-,\vec i_{\perp}) \rho_{b}^{(t)}({j^-},\vec j_{\perp}) \rangle = g^2\mu_t^2 \frac{ 1 }{N_y \Delta^2} ~\delta_{ab}~\delta_{i_{\perp,1}\ j_{\perp,1}}~\delta_{i_{\perp,2} \ j_{\perp,2}} ~\delta_{i^- \ {j^-}} $$
If the field already exists, it is simply returned and no calculation is done.
Parameters
----------
forceCalculate : bool (default=False)
If the quantity has previously been calculated, the calculation will not be done
again unless this argument is set to True.
verbose : int (default=0)
How much output should be printed as calculations are done. Options are 0, 1, or 2.
Returns
-------
colorChargeField : array(Ny, N, N, `colorCharges`**2 - 1)
"""
if self._colorChargeFieldExists and not forceCalculate:
return self._colorChargeField
if verbose > 0:
print(f'Generating {type(self).__name__} color charge field' + '.'*10, end='')
# To compare to old results
#self._colorChargeField = self.rng.normal(scale=self.gaussianWidth, size=(self.Ny, self.gluonDOF, self.N, self.N))
#self._colorChargeField = self._colorChargeField.swapaxes(1, 2)
#self._colorChargeField = self._colorChargeField.swapaxes(2, 3)
# Randomly generate the intial color charge density using a gaussian distribution
self._colorChargeField = self.rng.normal(scale=self.gaussianWidth, size=(self.Ny, self.N, self.N, self.gluonDOF))
# Make sure we don't regenerate this field since it already exists on future calls
self._colorChargeFieldExists = True
if verbose > 0:
print('finished!')
return self._colorChargeField
def gaugeField(self, forceCalculate=False, verbose=0):
r"""
Calculates the gauge field for all longitudinal layers and charge distributions by solving the (modified)
Poisson equation involving the color charge field
$$g \frac{1 } {\partial_\perp^2 - m^2 } \rho_a(i^-, \vec {i}_\perp )$$
via Fourier method.
If the field already exists, it is simply returned and no calculation is done.
Parameters
----------
forceCalculate : bool (default=False)
If the quantity has previously been calculated, the calculation will not be done
again unless this argument is set to True.
verbose : int (default=0)
How much output should be printed as calculations are done. Options are 0, 1, or 2.
Returns
-------
gaugeField : array(Ny, N, N, `colorCharges`**2 - 1)
"""
if self._gaugeFieldExists and not forceCalculate:
return self._gaugeField
# Make sure the charge field has already been generated (if not, this will generate it)
self.colorChargeField(verbose=verbose)
if verbose > 0:
print(f'Calculating {type(self).__name__} gauge field' + '.'*10, end='')
# Compute the fourier transform of the charge field
# Note that the normalization is set to 'backward', which for scipy means that the
# ifft2 is scaled by 1/n (where n = N^2)
chargeDensityFFTArr = fft2(self._colorChargeField, axes=(1,2), norm='backward')
# Absorb the numerator constants in the equation above into the charge density
chargeDensityFFTArr = -self.delta2 * self.g / 2 * chargeDensityFFTArr
# Calculate the individual elements of the gauge field in fourier space
# Note here that we have to solve the gauge field for each layer and for each gluon degree of freedom
# This method is defined at the bottom of this file; see there for more information
gaugeFieldFFTArr = _calculateGaugeFFTOpt(self.gluonDOF, self.N, self.Ny, self.poissonReg, chargeDensityFFTArr);
# Take the inverse fourier transform to get the actual gauge field
self._gaugeField = np.real(ifft2(gaugeFieldFFTArr, axes=(1,2), norm='backward'))
# Make sure this process isn't repeated unnecessarily by denoting that it has been done
self._gaugeFieldExists = True
if verbose > 0:
print('finished!')
return self._gaugeField
def wilsonLine(self, forceCalculate=False, verbose=0):
"""
Calculate the Wilson line using the gauge field and the appropriate basis matrices.
If the line already exists, it is simply returned and no calculation is done.
Parameters
----------
forceCalculate : bool (default=False)
If the quantity has previously been calculated, the calculation will not be done
again unless this argument is set to True.
verbose : int (default=0)
How much output should be printed as calculations are done. Options are 0, 1, or 2.
Returns
-------
wilsonLine : array(N, N, `colorCharges`)
"""
if self._wilsonLineExists and not forceCalculate:
return self._wilsonLine
# Make sure the gauge field has already been calculated
self.gaugeField(verbose=verbose)
if verbose > 0:
print(f'Calculating {type(self).__name__} wilson line' + '.'*10, end='')
# We now combine all of the longitudinal layers into the single wilson line
# Optimized method is defined at the end of this file; see there for more information
self._wilsonLine = _calculateWilsonLineOpt(self.N, self.Ny, self.colorCharges, self._basis, self._gaugeField)
self._wilsonLineExists = True
if verbose > 0:
print('finished!')
return self._wilsonLine
def adjointWilsonLine(self, forceCalculate=False, verbose=0):
"""
Calculate the Wilson line in the adjoint representation.
If the line already exists, it is simply returned and no calculation is done.
Parameters
----------
forceCalculate : bool (default=False)
If the quantity has previously been calculated, the calculation will not be done
again unless this argument is set to True.
verbose : int (default=0)
How much output should be printed as calculations are done. Options are 0, 1, or 2.
Returns
-------
adjointWilsonLine : array(N, N, `colorCharges`**2 - 1, `colorCharges`**2 - 1)
"""
if self._adjointWilsonLineExists and not forceCalculate:
return self._adjointWilsonLine
# Make sure the wilson line has already been calculated
self.wilsonLine(verbose=verbose)
if verbose > 0:
print(f'Calculating {type(self).__name__} adjoint wilson line' + '.'*10, end='')
# Calculation is optimized with numba, as with the previous calculations
# See bottom of the file for more information
self._adjointWilsonLine = _calculateAdjointWilsonLineOpt(self.gluonDOF, self.N, self._basis, self._wilsonLine)
self._adjointWilsonLineExists = True
if verbose > 0:
print('finished!')
return self._adjointWilsonLine
# Since we want to speed up the calculate, we define the calculation of the fourier elements of
# the gauge field using a numba-compiled method
# This has to be defined outside of the Nuclelus class since numbda doesn't play well with custom classes
@numba.jit(nopython=True, cache=True)
def _calculateGaugeFFTOpt(gluonDOF, N, Ny, poissonReg, chargeDensityFFTArr):
r"""
Calculate the elements of the gauge field in fourier space.
This method is optimized using numba.
"""
gaugeFieldFFTArr = np.zeros_like(chargeDensityFFTArr, dtype='complex')
# Precompute for speed
two_pi_over_N = 2 * np.pi / N
for l in range(Ny):
for k in range(gluonDOF):
for i in range(N):
for j in range(N):
gaugeFieldFFTArr[l,i,j,k] = chargeDensityFFTArr[l,i,j,k]/(2 - np.cos(two_pi_over_N*i) - np.cos(two_pi_over_N*j) + poissonReg)
return gaugeFieldFFTArr
# Same deal as the above method, we have to define it outside the class so
# numba doesn't get confused
@numba.jit(nopython=True, cache=True)
def _calculateWilsonLineOpt(N, Ny, colorCharges, basis, gaugeField):
r"""
Calculate the elements of the wilson line.
This method is optimized using numba.
"""
wilsonLine = np.zeros((N, N, colorCharges, colorCharges), dtype='complex')
gluonDOF = colorCharges**2 - 1
# Slightly different ordering of indexing than in other places in the code,
# due to the fact that we have to sum of the gluonDOF and Ny axis
for i in range(N):
for j in range(N):
# Create the unit matrix for each point since we are multiplying
# the wilson line as we go (so we need to start with the identity)
for c in range(colorCharges):
wilsonLine[i,j,c,c] = 1
# The path ordered exponential becomes a product of exponentials for each layer
for l in range(Ny):
# Evaluate the argument of the exponential first
# We multiply the elements of the gauge field for each gluon degree of freedom
# by the respective basis matrix and sum them together
expArgument = np.zeros((colorCharges, colorCharges), dtype='complex') # Same shape as basis matrices
for k in range(gluonDOF):
expArgument = expArgument + gaugeField[l,i,j,k] * basis[k]
# Now actually calculate the exponential with our custom defined expm method
# that can properly interface with numba (scipy's can't)
exponential = np.ascontiguousarray(expm(-1.j * expArgument))
wilsonLine[i,j] = np.dot(wilsonLine[i,j], exponential)
return wilsonLine
@numba.jit(nopython=True, cache=True)
def _calculateAdjointWilsonLineOpt(gluonDOF, N, basis, wilsonLine):
r"""
Calculate the wilson line in the adjoint representation.
This method is optimized using numba
"""
# Wilson line is always real in adjoint representation, so need to dtype='complex' as with the others
adjointWilsonLine = np.zeros((N, N, gluonDOF, gluonDOF), dtype='double')
for a in range(gluonDOF):
for b in range(gluonDOF):
for i in range(N):
for j in range(N):
V = wilsonLine[i,j]
Vdag = np.conjugate(np.transpose(V))
adjointWilsonLine[i,j,a,b] = 2 * np.real(np.trace(np.dot(np.dot(basis[a], V), np.dot(basis[b], Vdag))))
return adjointWilsonLine
```
#### File: ColorGlass/cgc/Collision.py
```python
from .Wavefunction import Wavefunction
import numpy as np
from scipy.fft import ifft2, fft2
import numba
CACHE_OPTIMIZATIONS = True
class Collision():
targetWavefunction = None # Implements wilson line
incidentWavefunction = None # Doesn't (have to) implement wilson line
_omega = None
_omegaFFT = None
_particlesProduced = None
_particlesProducedDeriv = None
_momentaMagSquared = None
_momentaComponents = None
_thetaInFourierSpace = None
_momentaBins = None
_fourierHarmonics = None # This will be initialized as an empty dict to store harmonics (see __init__)
_omegaExists = False
_omegaFFTExists = False
_momentaComponentsExist = False
_particlesProducedExists = False
_particlesProducedDerivExists = False
_momentaBinsExists = False
def __init__(self, wavefunction1: Wavefunction, wavefunction2: Wavefunction):
r"""
Initialize a collision with two wavefunctions, presumably a nucleus and a proton. One must implement
the wilson line, though the order of the arguments does not matter.
In the case that both wavefunctions implement the wilson line, the first (wavefunction1) will be used as such.
In the case that neither implement the wilson line, an exception will be raised.
Parameters
----------
wavefunction1 : Wavefunction (or child)
The first wavefunction
wavefunction2 : Wavefunction (or child)
The second wavefunction
"""
# Make sure that at least one has a wilson line
wilsonLineExists1 = callable(getattr(wavefunction1, "wilsonLine", None))
wilsonLineExists2 = callable(getattr(wavefunction2, "wilsonLine", None))
if not wilsonLineExists1 and not wilsonLineExists2:
raise Exception("Neither of the wavefunctions passed to Collision(Wavefunction, Wavefunction) implement the wilsonLine() method; at least one is required to.")
if wilsonLineExists1 and not wilsonLineExists2:
self.targetWavefunction = wavefunction1
self.incidentWavefunction = wavefunction2
elif wilsonLineExists2 and not wilsonLineExists1:
self.targetWavefunction = wavefunction2
self.incidentWavefunction = wavefunction1
else:
self.targetWavefunction = wavefunction1
self.incidentWavefunction = wavefunction2
# Make sure that both use the same number of colors
if self.targetWavefunction.gluonDOF != self.incidentWavefunction.gluonDOF:
raise Exception(f"Wavefunctions implement different gluon degrees of freedom (number of color charges): {self.incidentWavefunction.gluonDOF} vs. {self.targetWavefunction.gluonDOF}")
# Probably some other checks that need to be done to make sure the two wavefunctions are compatable, but this is fine for now
# Carry over some variables so we don't have to call through the wavefunctions so much
self.N = self.targetWavefunction.N
self.length = self.targetWavefunction.length
self.gluonDOF = self.targetWavefunction.gluonDOF
self.delta = self.targetWavefunction.delta
self.delta2 = self.targetWavefunction.delta2
#print(self.targetWavefunction)
#print(self.incidentWavefunction)
# Variables to do with binning the momenta later on
self.binSize = 4*np.pi/self.length
self.kMax = 2/self.delta
self.numBins = int(self.kMax/self.binSize)
# This has to be initialized as an empty dict within the constructor
# because otherwise it can retain information across separate objects
# (no idea how, but this fixes it)
self._fourierHarmonics = {}
def omega(self, forceCalculate=False, verbose=0):
r"""
Calculate the field omega at each point on the lattice.
If the field already exists, it is simply returned and no calculation is done.
Parameters
----------
forceCalculate : bool (default=False)
If the quantity has previously been calculated, the calculation will not be done
again unless this argument is set to True.
verbose : int (default=0)
How much output should be printed as calculations are done. Options are 0, 1, or 2.
Returns
-------
omega : array(N, N, 2, 2, `colorCharges`**2 - 1)
"""
if self._omegaExists and not forceCalculate:
return self._omega
self.incidentWavefunction.gaugeField(verbose=verbose)
self.targetWavefunction.adjointWilsonLine(verbose=verbose)
if verbose > 0:
print(f'Calculating {type(self).__name__} omega' + '.'*10, end='')
self._omega = _calculateOmegaOpt(self.N, self.gluonDOF, self.delta, self.incidentWavefunction.gaugeField(), self.targetWavefunction.adjointWilsonLine())
self._omegaExists = True
if verbose > 0:
print('finished!')
return self._omega
def omegaFFT(self, forceCalculate=False, verbose=0):
r"""
Compute the fourier transform of the field omega on the lattice.
If the fft of the field already exists, it is simply returned and no calculation is done.
Parameters
----------
forceCalculate : bool (default=False)
If the quantity has previously been calculated, the calculation will not be done
again unless this argument is set to True.
verbose : int (default=0)
How much output should be printed as calculations are done. Options are 0, 1, or 2.
Returns
-------
omegaFFT : array(N, N, 2, 2, `colorCharges`**2 - 1)
"""
if self._omegaFFTExists and not forceCalculate:
return self._omegaFFT
# Make sure omega exists
self.omega(verbose=verbose)
if verbose > 0:
print(f'Calculating {type(self).__name__} omega fourier transform' + '.'*10, end='')
# We want to do the normalization explicitly, but scipy doesn't offer no
# normalization as an option, so we just set it to be the opposite of whatever
# we are doing (forward for ifft, backward for fft)
# (we had some issues with scipy changing its default mode)
self._omegaFFT = self.delta2 * fft2(self._omega, axes=(0,1), norm='backward')
self._omegaFFTExists = True
if verbose > 0:
print('finished!')
return self._omegaFFT
def momentaBins(self, forceCalculate=False, verbose=0):
r"""
Compute the range of momenta at which particles will be created based on the dimensions of the lattice.
The exact values are:
- \( k_{max} = 2 / \Delta\)
- \( w_k = 4 \pi / L \)
If the bins already exist, they are simply returned and no calculation is done.
Parameters
----------
forceCalculate : bool (default=False)
If the quantity has previously been calculated, the calculation will not be done
again unless this argument is set to True.
verbose : int (default=0)
How much output should be printed as calculations are done. Options are 0, 1, or 2.
Returns
-------
momentaBins : array(numBins = L / (delta 2 pi))
"""
if self._momentaBinsExists and not forceCalculate:
return self._momentaBins
if verbose > 0:
print(f'Calculating {type(self).__name__} momentum bins' + '.'*10, end='')
self._momentaBins = [i*self.binSize for i in range(self.numBins)]
self._momentaBinsExists = True
if verbose > 0:
print('finished!')
return self._momentaBins
def momentaComponents(self, forceCalculate=False, verbose=0):
r"""
Compute the components of the momentum at each point on the lattice, according to:
$$ (k_x, k_y) = \frac{2}{\Delta} \left( \sin\left( \frac{\pi i}{N} \right), \sin\left( \frac{\pi j}{N} \right) \right) $$
where \(i\) and \(j\) index the \(x\) and \(y\) directions in real space, respectively.
If the calculation has already been done, the result is simply returned and is not repeated.
Parameters
----------
forceCalculate : bool (default=False)
If the quantity has previously been calculated, the calculation will not be done
again unless this argument is set to True.
verbose : int (default=0)
How much output should be printed as calculations are done. Options are 0, 1, or 2.
Returns
-------
momentaComponents : array(N, N, 2)
"""
if self._momentaComponentsExist and not forceCalculate:
return self._momentaComponents
if verbose > 0:
print(f'Calculating {type(self).__name__} momentum components' + '.'*10, end='')
self._momentaComponents, self._thetaInFourierSpace = _calculateMomentaOpt(self.N, self.delta)
self._momentaMagSquared = np.linalg.norm(self._momentaComponents, axis=2)**2
self._momentaComponentsExist = True
if verbose > 0:
print('finished!')
return self._momentaComponents
def momentaMagnitudeSquared(self, forceCalculate=False, verbose=0):
r"""
Compute the magnitude of the momentum at each point on the lattice, according to:
$$ |k| = \sqrt{k_x^2 + k_y^2} $$
$$ (k_x, k_y) = \frac{2}{\Delta} \left( \sin\left( \frac{\pi i}{N} \right), \sin\left( \frac{\pi j}{N} \right) \right) $$
where \(i\) and \(j\) index the \(x\) and \(y\) directions in real space, respectively.
If the calculation has already been done, the result is simply returned and is not repeated.
Parameters
----------
forceCalculate : bool (default=False)
If the quantity has previously been calculated, the calculation will not be done
again unless this argument is set to True.
verbose : int (default=0)
How much output should be printed as calculations are done. Options are 0, 1, or 2.
Returns
-------
momentaComponents : array(N, N)
"""
if self._momentaComponentsExist and not forceCalculate:
return self._momentaMagSquared
if verbose > 0:
print(f'Calculating {type(self).__name__} momenta magnitude squared' + '.'*10, end='')
self._momentaComponents, self._thetaInFourierSpace = _calculateMomentaOpt(self.N, self.delta)
self._momentaMagSquared = np.linalg.norm(self._momentaComponents, axis=2)**2
self._momentaComponentsExist = True
if verbose > 0:
print('finished!')
return self._momentaMagSquared
def particlesProducedDeriv(self, forceCalculate=False, verbose=0):
r"""
Compute the derivative of particles produced (\( \frac{d^2 N}{d^2 k} \)) at each point on the lattice
If the calculation has already been done, the result is simply returned and is not repeated.
Parameters
----------
forceCalculate : bool (default=False)
If the quantity has previously been calculated, the calculation will not be done
again unless this argument is set to True.
verbose : int (default=0)
How much output should be printed as calculations are done. Options are 0, 1, or 2.
Returns
-------
particlesProducedDeriv : array(N, N)
"""
if self._particlesProducedDerivExists and not forceCalculate:
return self._particlesProducedDeriv
# Make sure these quantities exist
self.omegaFFT(verbose=verbose)
self.momentaMagnitudeSquared(verbose=verbose) # This also calculates thetaInFourierSpace and momentaComponents
if verbose > 0:
print(f'Calculating {type(self).__name__} derivative of particles produced' + '.'*10, end='')
self._particlesProducedDeriv = _calculateParticlesProducedDerivOpt(self.N, self.gluonDOF, self._momentaMagSquared, self._omegaFFT)
if verbose > 0:
print('finished!')
self._particlesProducedDerivExists = True
return self._particlesProducedDeriv
def particlesProduced(self, forceCalculate=False, verbose=0):
r"""
Compute the number of particles produced \(N(|k|)\) as a function of momentum. Note that this
is technically the zeroth fourier harmonic, so this actually just calls the
cgc.Collision.fourierHarmonic() function.
The particles are binned according to cgc.Collision.momentaBins().
Most likely will be plotted against cgc.Collision.momentaBins().
If the calculation has already been done, the result is simply returned and is not repeated.
Parameters
----------
forceCalculate : bool (default=False)
If the quantity has previously been calculated, the calculation will not be done
again unless this argument is set to True.
verbose : int (default=0)
How much output should be printed as calculations are done. Options are 0, 1, or 2.
Returns
-------
particlesProduced : array(numBins = L / (delta 2 pi))
"""
# This one is strictly real, so we should make sure that is updated
self._fourierHarmonics[0] = np.real(self.fourierHarmonic(0, forceCalculate, verbose))
return self._fourierHarmonics[0]
def fourierHarmonic(self, harmonic: int, forceCalculate=False, verbose=0):
r"""
Calculate the fourier harmonic of the particle production as:
$$ v_n = \frac{ \sum_{(i,j)\in [k, k+ \Delta k]} |k| \frac{d^2 N}{d^2 k} e^{i n \theta }} { \sum_{(i,j)\in [k, k+ \Delta k]} |k| } $$
If the calculation has already been done, the result is simply returned and is not repeated.
Parameters
----------
harmonic : int
The fourier harmonic to calculate. All odd harmonics should be zero, and the zeroth harmonic
will be equal to cgc.Collision.particlesProduced()
forceCalculate : bool (default=False)
If the quantity has previously been calculated, the calculation will not be done
again unless this argument is set to True.
verbose : int (default=0)
How much output should be printed as calculations are done. Options are 0, 1, or 2.
Returns
-------
particlesProduced : array(numBins = L / (delta 2 pi))
"""
# First, see if we have already calculated this harmonic
if harmonic in self._fourierHarmonics.keys() and not forceCalculate:
return self._fourierHarmonics[harmonic]
# For actually calculating the harmonic, we first have to make sure we've calculated
# the derivative, dN/d^2k
# This makes sure that _momentaMagSquared, _thetaInFourierSpace and _particlesProducedDeriv
# all exist
self.particlesProducedDeriv(verbose=verbose)
if verbose > 0:
print(f'Calculating {type(self).__name__} fourier harmonic: {harmonic}' + '.'*10, end='')
# Drop all of our arrays into long 1D structure, since we will want to bin them
vectorizedParticleDerivs = np.reshape(self._particlesProducedDeriv, [self.N*self.N])
vectorizedTheta = np.reshape(self._thetaInFourierSpace, [self.N*self.N])
vectorizedMomentaMag = np.reshape(np.sqrt(self._momentaMagSquared), [self.N*self.N])
# The number of particles that are produced in each bin
# These bins are actually just thin rings in momentum space
self._fourierHarmonics[harmonic] = np.zeros(self.numBins, dtype='complex')
# The bin sizes/bounds are calculated for elsewhere
self.momentaBins()
# Ideally, these rings should be only have a thickness dk (infinitesimal)
# but since we have a discrete lattice, we weight the particles by their momentum
# (which may slightly vary) and then properly normalize
# Go through each bin and calculate (for all points in that bin):
# 1. Sum over |k| * dN/d^2k * exp(i * harmonic * theta)
# 2. Sum over |k|
# 3. Divide 1./2.
for i in range(self.numBins):
# Find which places on the lattice fall into this particular momentum bin
# Note the use of element-wise (or bitwise) and, "&"
particleDerivsInRing = vectorizedParticleDerivs[(vectorizedMomentaMag < self.binSize*(i+1)) & (vectorizedMomentaMag > self.binSize*i)]
momentaMagInRing = vectorizedMomentaMag[(vectorizedMomentaMag < self.binSize*(i+1)) & (vectorizedMomentaMag > self.binSize*i)]
thetaInRing = vectorizedTheta[(vectorizedMomentaMag < self.binSize*(i+1)) & (vectorizedMomentaMag > self.binSize*i)]
# Note that multiplication is done element-wise by default
numeratorSum = np.sum(particleDerivsInRing * momentaMagInRing * np.exp(1.j * harmonic * thetaInRing))
denominatorSum = np.sum(momentaMagInRing)
self._fourierHarmonics[harmonic][i] = numeratorSum / denominatorSum
if verbose > 0:
print('finished!')
return self._fourierHarmonics[harmonic]
# Using custom functions within other jitted functions can cause some issues,
# so we define the signatures explicitly for these two functions.
@numba.jit((numba.float64[:,:], numba.int64, numba.int64, numba.int64, numba.float64), nopython=True, cache=CACHE_OPTIMIZATIONS)
def _x_deriv(matrix, i, j, N, delta):
return (matrix[i,(j+1)%N] - matrix[i,j-1]) / (2 * delta)
@numba.jit((numba.float64[:,:], numba.int64, numba.int64, numba.int64, numba.float64), nopython=True, cache=CACHE_OPTIMIZATIONS)
def _y_deriv(matrix, i, j, N, delta):
return (matrix[(i+1)%N,j] - matrix[i-1,j]) / (2 * delta)
# Because of the same issue described above, we can't cache this function
# This function gives a warning because numba only experimentally supports
# treating functions as objects (the list derivs).
@numba.jit(nopython=True)
def _calculateOmegaOpt(N, gluonDOF, delta, incidentGaugeField, targetAdjointWilsonLine):
"""
Calculate the field omega at each point on the lattice.
If the field already exists, it is simply returned and no calculation is done.
Returns
-------
numpy.array : shape=(N, N, 2, 2, `colorCharges`**2 - 1)
"""
# 2,2 is for the 2 dimensions, x and y
omega = np.zeros((N, N, 2, 2, gluonDOF), dtype='complex') # 2 is for two dimensions, x and y
derivs = [_x_deriv, _y_deriv]
for i in range(N):
for j in range(N):
for k in range(gluonDOF):
for l in range(2): # 2 is number of dimensions
for n in range(2): # 2 is number of dimensions
omega[i,j,l,n,k] = np.sum(np.array([derivs[l](incidentGaugeField[:,:,m], i, j, N, delta) * derivs[n](targetAdjointWilsonLine[:,:,k,m], i, j, N, delta) for m in range(gluonDOF)]))
return omega
@numba.jit(nopython=True, cache=CACHE_OPTIMIZATIONS)
def _calculateMomentaOpt(N, delta):
"""
Optimized (via numba) function to calculated the position (momentum) in Fourier space of each point
Parameters
----------
N : int
Size of the lattice
delta : double
Spacing between each point
Returns
-------
(momentaComponents, theta)
momentaComponents : array(N, N, 2)
x and y components of the momentum at each point
theta : array(N, N)
Relationship between x and y components at each point, or atan2(k_y, k_x)
"""
momentaComponents = np.zeros((N, N, 2))
theta = np.zeros((N, N))
for i in range(N):
for j in range(N):
# Note that these components are of the form:
# k_x = 2/a sin(k_x' a / 2)
# Though the argument of the sin is simplified a bit
momentaComponents[i,j] = [2/delta * np.sin(np.pi*i/N) * np.sign(np.sin(2*np.pi*i/N)), 2/delta * np.sin(np.pi*j/N) * np.sign(np.sin(2*np.pi*j/N))]
theta[i,j] = np.arctan2(momentaComponents[i,j,1], momentaComponents[i,j,0])
return momentaComponents, theta
@numba.jit(nopython=True, cache=CACHE_OPTIMIZATIONS)
def _calculateParticlesProducedDerivOpt(N, gluonDOF, momentaMagSquared, omegaFFT):
"""
Optimized (via numba) function to calculate dN/d^2k
Parameters
----------
N : int
The system size
gluonDOF : int
The number of gluon degrees of freedom ((possible color charges)^2 - 1)
momentaMagSquared : array(N, N)
The magnitude of the momentum at each point, likely calculated (in part) with _calculateMomentaOpt()
omegaFFT : array(2, 2, gluonDOF, N, N)
Previously calculated omega array
Returns
-------
particleProduction : array(N, N)
The number of particles produced at each point on the momentum lattice
"""
# Where we will calculate dN/d^2k
particleProduction = np.zeros((N,N))
# # 2D Levi-Cevita symbol
LCS = np.array([[0,1],[-1,0]])
# # 2D Delta function
KDF = np.array([[1,0],[0,1]])
# Note that unlike in the rest of the code, i and j *do not* refer to the
# spacial indices here: x and y do (too many indices... :/ )
for y in range(N):
for x in range(N):
# To prevent any divide by zero errors
if momentaMagSquared[y,x] == 0:
continue
# All of these 2s are for our two dimensions, x and y
for i in range(2):
for j in range(2):
for l in range(2):
for m in range(2):
for a in range(gluonDOF):
particleProduction[y,x] += np.real(2/(2*np.pi)**3 / momentaMagSquared[y,x] * (
(KDF[i,j]*KDF[l,m] + LCS[i,j]*LCS[l,m])) * (
omegaFFT[y,x,i,j,a] * np.conj(omegaFFT[y,x,l,m,a])))
return particleProduction
```
|
{
"source": "Jfeatherstone/pepe",
"score": 4
}
|
#### File: pepe/analysis/BrightnessGSquared.py
```python
import numpy as np
import cv2
import numba
from pepe.preprocess.Image import checkImageType
def averageBrightness(frame):
r"""
Compute the average brightness (pixel intensity) of a frame.
Parameters
----------
frame : np.ndarray[H,W] or str
The frame for which to compute the average brightness. Can also
be a path to an image.
Returns
-------
averageBrightness : float
The average brightness of the image.
"""
return np.mean(checkImageType(frame))
def varianceBrightness(frame):
r"""
Compute the standard deviation in brightness (pixel intensity) of a frame.
Parameters
----------
frame : np.ndarray[H,W] or str
The frame for which to compute the average brightness. Can also
be a path to an image.
Returns
-------
varianceBrightness: float
The standard deviation of the brightness of the image.
"""
return np.var(checkImageType(frame))
@numba.jit(nopython=True)
def gSquared(properFrame):
"""
The gradient squared at each pixel of the image, also known as the convolution
of a Laplacian kernel across the image.
Optimized via `numba`.
Edge values are padded with values of 0.
Parameters
----------
properFrame : np.ndarray[H,W]
An array representing a single channel of an image.
Returns
-------
gSqr : np.ndarray[H,W]
The gradient squared at every point.
References
----------
[1] DanielsLab Matlab implementation, https://github.com/DanielsNonlinearLab/Gsquared
[2] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>.
(2019). Enlightening force chains: A review of photoelasticimetry in granular matter. Granular
Matter, 21(4), 83. [10.1007/s10035-019-0942-2](https://doi.org/10.1007/s10035-019-0942-2)
"""
# Take the full size of the image, though know that the outermost row and
# column of pixels will be 0
gSquared = np.zeros_like(properFrame)
# Iterate over every pixel
# Regardless of whether we have a good region, G^2 needs a buffer of 1 pixel on each
# side, so we have to crop down more
for j in range(1, np.shape(properFrame)[0]-1):
for k in range(1, np.shape(properFrame)[1]-1):
# I've put a little picture of which pixels we are comparing
# for each calculation (O is the current pixel, X are the
# ones we are calculating)
# - - -
# X O X
# - - -
g1 = float(properFrame[j, k-1]) - float(properFrame[j, k+1])
# - X -
# - O -
# - X -
g2 = float(properFrame[j-1, k]) - float(properFrame[j+1, k])
# - - X
# - O -
# X - -
g3 = float(properFrame[j-1, k+1]) - float(properFrame[j+1, k-1])
# X - -
# - O -
# - - X
g4 = float(properFrame[j-1, k-1]) - float(properFrame[j+1, k+1])
gSquared[j,k] = (g1*g1/4.0 + g2*g2/4.0 + g3*g3/8.0 + g4*g4/8.0)/4.0
return gSquared
def averageGSquared(frame):
r"""
Compute the average local gradient squared, or \(G^2\), of a frame.
If multichannel image is provided, will convert to grayscale by averaging
over the channels.
Parameters
----------
frame : np.ndarray[H,W] or str
The frame for which to compute the average gradient squared. Can also
be a path to an image.
Returns
-------
averageGSquared : float
The average gradient squared of the image.
References
----------
[1] DanielsLab Matlab implementation, https://github.com/DanielsNonlinearLab/Gsquared
[2] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>.
(2019). Enlightening force chains: A review of photoelasticimetry in granular matter. Granular
Matter, 21(4), 83. [10.1007/s10035-019-0942-2](https://doi.org/10.1007/s10035-019-0942-2)
"""
# This will load in the image if the method is passed an image
# file path
properFrame = checkImageType(frame)
# Make sure that our image is grayscale
if properFrame.ndim == 3:
properFrame = np.mean(properFrame, axis=-1)
# Use the optimzed gSquared method so it is fast
return np.mean(gSquared(properFrame))
def varianceGSquared(frame):
r"""
Compute the variance in the local gradient squared, or \(G^2\), of a frame.
If multichannel image is provided, will convert to grayscale by averaging
over the channels.
Parameters
----------
frame : np.ndarray[H,W] or str
The frame for which to compute the variance of the gradient squared. Can also
be a path to an image.
Returns
-------
varianceGSquared : float
The variance of the gradient squared of the image.
References
----------
[1] DanielsLab Matlab implementation, https://github.com/DanielsNonlinearLab/Gsquared
[2] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>.
(2019). Enlightening force chains: A review of photoelasticimetry in granular matter. Granular
Matter, 21(4), 83. [10.1007/s10035-019-0942-2](https://doi.org/10.1007/s10035-019-0942-2)
"""
# This will load in the image if the method is passed an image
# file path
properFrame = checkImageType(frame)
# Make sure that our image is grayscale
if properFrame.ndim == 3:
properFrame = np.mean(properFrame, axis=-1)
# Use the optimzed gSquared method so it is fast
return np.var(gSquared(properFrame))
```
#### File: pepe/analysis/D2Min.py
```python
import numpy as np
from numpy.linalg import inv
# For calculating which neighbors are within a certain distance
from sklearn.neighbors import KDTree
def calculateD2Min(initialCenters, finalCenters, refParticleIndex=0, interactionRadius=None, interactionNeighbors=None, normalize=True):
"""
Calculate the d2min between an initial and final state of particles;
a measure of how non-affine the transformation is, as originally described
in [1].
Parameters
----------
initialCenters : np.ndarray[N,d] or list
List of initial center points of N particles in d dimensions.
finalCenters : np.ndarray[N,d] or list
List of final center points of N particles in d dimensions. Must be
in the same order as initialCenters.
refParticleIndex : int
The index of the particle to treat as the reference particle (r_0 in Falk & Langer
1998 eq. 2.11). If set to None, will calculate the D2min N times using each particle
as the reference (and return types will have an extra first dimension N). Simiarly, can
be a list of indices for which to calculate as the refernce indices.
interactionRadius : float
The maximum distance between particles that can be considered neighbors. Recommended to
be set to around 1.1 - 1.5 times the mean particle radius of the system.
If set to None, all other particles in the system will be considered neighbors. See interactionNeighbors
for specifying a fixed number of neighbors. In the case that neither a radius or number of
neighbors are specified, calculation will default to using all other particles as neighbors.
interactionNeighbors : int
As opposed to using an interactionRadius to define neighbors, a fixed number of neighbors can
be specified here. This number of neighbors will be found using a kd-tree for the reference point(s).
In the case that neither a radius or number of neighbors are specified, calculation will default
to using all other particles as neighbors.
normalize : bool
Whether to divide the d2min by the number of neighbors used to calculate it (True) or not (False).
For heterogeneous systems where the number of neighbors can vary significantly, recommend to set True.
Will make little difference if a fixed number of neighbors (see interactionNeigbors) are used.
Returns
-------
d2min : float
The minimum value of D2 for the transition from the initial to final state. Units
are a squared distance dimensionally the same as the initial and final centers (likely
a pixel^2 value if tracked from images). Changing units to particle diameters afterwards
may be necessary.
epsilon : numpy.ndarray[d,d]
The uniform strain tensor that minimizes D2; equation 2.14 in [1].
In the case that `refParticleIndex=None`, the return will instead be a tuple of numpy arrays
containing the same information but for every particle:
d2minArr : np.ndarray[N]
The minimum value of D2 for the transition from the initial to final state for
every possible configuration.
epsilonArr : np.ndarray[N,d,d]
The uniform strain tensor that minimizes D2 for every possible configuration
Examples
--------
See `test` folder in [standalone repository](https://github.com/Jfeatherstone/D2min).
References
----------
[1] <NAME>., & <NAME>. (1998). Dynamics of viscoplastic deformation in amorphous solids. Physical Review E, 57(6), 7192–7205.
[https://doi.org/10.1103/PhysRevE.57.7192](https://doi.org/10.1103/PhysRevE.57.7192)
"""
# The number of particles and spatial dimension
N, d = np.shape(initialCenters)
# In the case that a single reference particle is defined, we just calculate exactly as described in the paper
if not isinstance(refParticleIndex, list) and not isinstance(refParticleIndex, np.ndarray) and refParticleIndex != None:
# Determine which particles are neighbors using the parameters supplied on the function call
initialNeighbors = None
finalNeighbors = None
# If an interaction radius is supplied, we have to find the particles that are closest
# in the initial state using a kd-tree
if interactionRadius != None:
kdTree = KDTree(initialCenters)
ind = kdTree.query_radius([initialCenters[refParticleIndex]], interactionRadius)
# We ignore the first element since it will always be the particle itself
# And we have to sort we don't mess up the order
ind = np.sort(ind[0][1:])
# Make sure we actually found some particles
if len(ind) == 0:
print('Warning: no neighbors found within supplied interaction radius! Defaulting to all other particles')
else:
initialNeighbors = initialCenters[ind]
finalNeighbors = finalCenters[ind]
# If a fixed number of neighbors is provided instead, we find those particles again with
# a kd-tree
elif interactionNeighbors != None:
kdTree = KDTree(initialCenters)
# Make sure we don't look for more particles than are in our system
dist, ind = kdTree.query([initialCenters[refParticleIndex]], min(interactionNeighbors+1, N))
# We ignore the first element since it will always be the particle itself
# And we have to sort we don't mess up the order
ind = np.sort(ind[0][1:])
initialNeighbors = initialCenters[ind]
finalNeighbors = finalCenters[ind]
# If no information is supplied, or we ran into issues, use every other particle
if not isinstance(initialNeighbors, list) and not isinstance(initialNeighbors, np.ndarray):
initialNeighbors = initialCenters[np.arange(N) != refParticleIndex]
finalNeighbors = finalCenters[np.arange(N) != refParticleIndex]
# Now onto the actual D2min calculation
# Bin's original code defined the differences between centers in
# Falk & Langer eq. 2.11 - 2.13 as "bonds"
# We first calculate these bonds using our reference particle index
# Do this by subtracting the ref particle center from the center of every other particle
# Note that you could technically leave in the ref bond, since it will be 0 and not contribute,
# but it is cleaner to just remove it
initialBonds = initialNeighbors - initialCenters[refParticleIndex]
finalBonds = finalNeighbors - finalCenters[refParticleIndex]
# More succinct notation for doing the calculation, from Bin's original code
# Converting to numpy matrices makes matrix multiplication happen automatically
b0 = np.mat(initialBonds)
b = np.mat(finalBonds)
# Calculate the two functions used to minimize D2, X and Y (eq. 2.12 and 2.13 respectively)
X = b0.transpose() * b
Y = b0.transpose() * b0
# Calculate the uniform strain tensor that minimizes D2 (eq. 2.14)
# Note that we don't include the kronecker delta function since it will
# be cancelled out when plugged into the D2min equation (eq. 2.11).
# Also not that this is actually the transpose of the strain tensor as
# it is defined in the paper, since it makes the matrix multiplication easier
# in the next step
epsilon = inv(Y) * X
# Non-affine part, or the terms that are squared and summed over in eq. 2.11
non_affine = b - b0*epsilon
# The final value
d2min = np.sum(np.square(non_affine))
if normalize:
d2min /= len(initialNeighbors)
# Since we have been working with the transpose of the strain tensor,
# we have to transpose to get the proper one
return (d2min, np.array(epsilon.transpose()))
# If we don't have a reference particle, or we are given multiple, we calculate for each of those
if not isinstance(refParticleIndex, list) and not isinstance(refParticleIndex, np.ndarray):
refParticleIndex = np.arange(N)
# Now calculate for all of those possibilities
d2minArr = np.zeros(len(refParticleIndex))
epsilonArr = np.zeros([len(refParticleIndex), d, d])
for i in range(len(refParticleIndex)):
d2min, epsilon = calculateD2Min(initialCenters, finalCenters, refParticleIndex[i],
interactionRadius, interactionNeighbors, normalize)
d2minArr[i] = d2min
epsilonArr[i] = epsilon
return (d2minArr, epsilonArr)
def vonMisesStrain(uniformStrainTensor):
"""
WIP.
"""
# The number of spatial dimensions
dimension = np.shape(uniformStrainTensor)[0]
# Lagrangian strain matrix
eta = 0.5 * (uniformStrainTensor * uniformStrainTensor.transpose() - np.eye(dimension))
# von-Mises strain
eta_m = 1.0/np.double(dimension) * np.trace(eta)
tmp = eta - eta_m * np.eye(dimension)
eta_s = np.sqrt(0.5*np.trace(tmp*tmp))
return eta_s
```
#### File: pepe/analysis/ForceSolve.py
```python
import numpy as np
from pepe.preprocess import circularMask, rectMask, ellipticalMask, mergeMasks, upsample, downsample
from pepe.analysis import gSquared, adjacencyMatrix, testForceBalance
from pepe.simulate import genSyntheticResponse
from pepe.utils import outerSubtract
import numba
import copy
from lmfit import minimize, Parameters, fit_report
from lmfit.minimizer import AbortFitException
from scipy.signal import find_peaks
import matplotlib.pyplot as plt
# This dictionary defines the argument data types for
# forceOptimize(), which is used for reading in settings
# from a file
forceOptimizeArgDTypes = {"parametersToFit": list,
"method": str,
"maxEvals": int,
"forceBounds": tuple,
"betaBounds": tuple,
"alphaBounds": tuple,
"forceTolerance": float,
"betaTolerance": float,
"alphaTolerance": float,
"useTolerance": bool,
"returnOptResult": bool,
"allowAddForces": bool,
"allowRemoveForces": bool,
"minForceThreshold": float,
"contactMaskRadius": int,
"newBetaMinSeparation": float,
"newBetaG2Height": float,
"missingForceChiSqrThreshold": float,
"imageScaleFactor": float,
"localizeAlphaOptimization": bool,
"forceBalanceWeighting": float,
"debug": bool}
def initialForceSolve(photoelasticSingleChannel, centers, radii, fSigma, pxPerMeter, contactPadding=10, g2EdgePadding=1, contactG2Threshold=.1, contactMaskRadius=40, neighborEvaluations=4, boundaryMask=None, ignoreBoundary=True, brightfield=False, boundaryDetectionOptions={}, g2Cal=None):
"""
Calculate the approximate forces on each particle based on the photoelastic response.
No optimization/gradient descent is done in this method; this should be used either where
rough estimates of the forces are fine, or as an initial condition for a fitting function.
Heavily based on <NAME>t's PhD thesis (2012) and <NAME>'s PeGS:
https://github.com/jekollmer/PEGS
<NAME>., <NAME>., & <NAME>. (2017). Photoelastic force
measurements in granular materials. Review of Scientific Instruments, 88(5),
051808. https://doi.org/10.1063/1.4983049
Outline of the process:
0. Track particles to get centers and radii (external)
1. Calculate average gradient squared for each particle
2. Find which particles are in contact with each other
3. Determine if there are any boundary contacts (optional)
4. Calculate the position of each (boundary or interparticle) force for each particle
5. Use the average gradient from step 1 to estimate the magnitude of force at each position
Intended to be used as the initial condition for optimization process; see
`pepe.analysis.forceOptimize()` for more information.
When significant noise is present in the images, this method will often way overestimate
the magnitudes of forces (because it is based on the gradient squared). If the guesses
are much too high, it can be better to just set all force magnitudes to a reasonable
flat value, like `.1`.
Parameters
----------
photelasticSingleChannel : np.uint8[H,W]
A single channel image in array form of the photoelastic response of particles.
centers : np.ndarray[N,2]
A list of N centers of format [y, x].
radii : np.ndarray[N]
A list of N radii, corresponding to each particle center
contactPadding : int
Maximum difference between distance and sum of radii for which
two particles will still be considered in contact.
g2EdgePadding : int or float
Number of pixels to ignore at the edge of each particle when calculating the average
G^2. If float value < 1 is passed, gradient mask radius will be taken as that percent
of the full particle radius. A value of 0 means no padding is included.
contactG2Threshold : float
The neighbor weight value under which edges will be removed from the network,
as they would be considered to weak to represent anything physical. This will
help remove particle neighbors that are only barely touching, but not transmitting
any real force.
contactMaskRadius : float
The radius of the circular mask applied over a contact to estimate the magnitude
of the force there. Should be smaller than the radius, usually 25% of the radius is fine.
For higher forces, larger mask may be needed.
neightborEvaluations : int
How many of the closest points to find via the kd tree and test
as potential contacts. For homogeneous circles, or approximately
homogenous (< 2:1 size ratio), 4 should be plenty.
boundaryMask : np.uint8[H,W]
Optional mask removing uneccesary parts of the image, that is used
to detect boundaries. Locations with a value of 0 will be assumed to be
solid walls, that a particle could potentially be in contact with. If
not provided, or set to None, system will be assumed to be in a rectangular
box defined the most extreme top/bottom/left/right particles.
ignoreBoundary : bool
Whether to not attempt to find any contacts with the boundary. This will take effect
regardless of whether a boundary mask is passed as a parameter.
brightfield : bool
Whether the photoelastic response is seen through a brightfield (True)
polariscope or darkfield (False) polariscope.
boundaryDetectionOptions : kwargs
Dictionary of kwargs to be passed to the detectWallContact method, assuming
boundaries are being included in the solver. If none are provided, relevant
values are carried over from the kwargs for this method (`contactPadding`,
`g2EdgePadding`, `contactMaskRadius`, `contactG2Threshold`).
g2Cal : float
Gradient squared to force calibration value. For numerous calls of the
function, it is recommended to calculate this value once externally,
and pass it in. If left as None, will be recalculated internally.
Returns
-------
forceGuessArr : np.object[N,F(N)]
Triangular array of force magnitude guesses for N particles with F(N) forces each
(could be different number of forces per particle).
alphaGuessArr : np.object[N,F(N)]
Triangular array of alpha angle guesses for N particles with F(N) forces each
(could be different number of forces per particle).
betaGuessArr : np.object[N,F(N)]
Triangular array of beta angle guesses for N particles with F(N) forces each
(could be different number of forces per particle).
"""
# We are passed centers and radii
numParticles = len(centers)
# This should not include any channels, and should just be h x w
imageSize = photoelasticSingleChannel.shape
# Triangular arrays are a little wonky, but this type of initialization works
betaGuessArr = [np.empty(0) for i in range(numParticles)]
# Find neighbors
# (This is the unweighted adjacency matrix, so a 1 if two particles are neighbors, 0 otherwise)
# np.eye is the identity, which removes all diagonal "contacts" (a particle with itself)
adjMat = adjacencyMatrix(centers, radii, contactPadding, neighborEvaluations) - np.eye(numParticles)
# This includes duplicates, but that is ok because as long as we keep
# the index order consistent, each force will show up only once for each particle
# (but twice overall, which is what we want)
contacts = np.transpose(np.where(adjMat))
# Each pair of indices
for p in contacts:
betaGuessArr[p[0]] = np.append(betaGuessArr[p[0]], np.arctan2(centers[p[1]][1] - centers[p[0]][1], centers[p[1]][0] - centers[p[0]][0]))
# Find potential wall contacts (optional)
if not ignoreBoundary:
# If no boundary is given, but ignoreBoundary = False, we just make a box around
# our system
if boundaryMask is None:
# Working in image conventions, so top left corner is 0,0
# So y value increases as you move downwards
topLeftCorner = np.array([np.min(centers[:,0] - radii), np.min(centers[:,1] - radii)])
dimensions = np.array([np.max(centers[:,0] + radii) - topLeftCorner[0],
np.max(centers[:,1] + radii) - topLeftCorner[1]])
boundaryMask = rectMask(photoelasticSingleChannel.shape, topLeftCorner, dimensions, channels=None)
# We want to carry a few arguments over to the boundary detection
# if they haven't been explicitly provided
if not 'contactPadding' in boundaryDetectionOptions:
boundaryDetectionOptions["contactPadding"] = contactPadding
if not 'g2EdgePadding' in boundaryDetectionOptions:
boundaryDetectionOptions["g2EdgePadding"] = g2EdgePadding
if not 'contactMaskRadius' in boundaryDetectionOptions:
boundaryDetectionOptions["contactMaskRadius"] = contactMaskRadius
if not 'contactG2Threshold' in boundaryDetectionOptions:
boundaryDetectionOptions["contactG2Threshold"] = contactG2Threshold
# See detectWallContacts for more information
numWallContacts, wallBetaArr, wallG2AvgArr = detectWallContacts(centers, radii, boundaryMask, photoelasticSingleChannel, **boundaryDetectionOptions)
# Now merge these contacts with the interparticle ones
for i in range(numParticles):
for j in range(numWallContacts[i]):
betaGuessArr[i] = np.append(betaGuessArr[i], wallBetaArr[i][j])
# Alpha is very easy: we just assume all forces are radial
# This creates an empty array the same shape as the beta one,
# but with all of the values set to 0 (again, triangular arrays are wonky)
alphaGuessArr = np.zeros(len(betaGuessArr), dtype='object')
for i in range(len(alphaGuessArr)):
alphaGuessArr[i] = np.zeros(len(betaGuessArr[i]))
# Initialize force the same way, but this will end up with
# actual values
forceGuessArr = np.zeros(len(betaGuessArr), dtype='object')
for i in range(len(forceGuessArr)):
forceGuessArr[i] = np.zeros(len(betaGuessArr[i]))
# Calculate G^2 of the image
gSqr = gSquared(photoelasticSingleChannel)
# Find G2 calibration value
# Mean of the radii should be fine, though TODO would be a good idea to have options here
if g2Cal is None:
g2Cal = g2ForceCalibration(fSigma, np.mean(radii), pxPerMeter, brightfield=brightfield)
# Figure out how much of the particle we will be calculating the g2 over
if g2EdgePadding < 1. and g2EdgePadding > 0:
g2MaskRadii = radii * g2EdgePadding
elif g2EdgePadding >= 1:
g2MaskRadii = radii - g2EdgePadding
else:
g2MaskRadii = radii
for i in range(numParticles):
g2Mask = circularMask(photoelasticSingleChannel.shape, centers[i], g2MaskRadii[i])[:,:,0]
# This is the average g2 of the whole particle
avgGSqr = np.sum(gSqr * g2Mask) / np.sum(g2Mask)
# Now allocate this force to each contact
contactG2Arr = np.zeros(len(forceGuessArr[i]))
# We have to find the average gsqr at each contact
for j in range(len(forceGuessArr[i])):
contactMask = circularMask(photoelasticSingleChannel.shape,
centers[i] + radii[i]*np.array([np.cos(betaGuessArr[i][j]), np.sin(betaGuessArr[i][j])]),
contactMaskRadius)[:,:,0]
# Now make sure it stays within the particle
contactMask = 1 - np.int16((contactMask + g2Mask - 2) > 0)
# This is average g2 of each contact
contactG2Arr[j] = np.sum(gSqr * contactMask) / np.sum(contactMask)
forceGuessArr[i][:] = avgGSqr * contactG2Arr / np.sum(contactG2Arr) / g2Cal
# while (!solved)
# Few iterations of least squares for each remaining particle
# TODO
# Find the ones with the least error, and perform a many more iterations
# TODO
# Apply force balance to those particles (optional)
# TODO
# Fix those contacts, and carry over the force to their neighbors
# TODO
# Remove fixed particles from the queue
# TODO
return forceGuessArr, alphaGuessArr, betaGuessArr
def forceOptimize(forceGuessArr, betaGuessArr, alphaGuessArr, radius, center, realImage, fSigma, pxPerMeter, brightfield, parametersToFit=['f', 'a'], method='nelder', maxEvals=300, forceBounds=(0, 5), betaBounds=(-np.pi, np.pi), alphaBounds=(-np.pi/2, np.pi/2), forceTolerance=.5, betaTolerance=.2, alphaTolerance=.1, useTolerance=True, returnOptResult=False, allowAddForces=True, allowRemoveForces=True, minForceThreshold=.01, contactMaskRadius=30, newBetaMinSeparation=.4, newBetaG2Height=.0005, missingForceChiSqrThreshold=2.1e8, imageScaleFactor=1, localizeAlphaOptimization=True, forceBalanceWeighting=.2, debug=False):
"""
Optimize an initial guess for the forces acting on a particle using
a nonlinear minimization function.
Parameters
----------
forceGuessArr : np.ndarray[Z]
The initial guess for magnitudes of each force that is acting on the particle.
betaGuessArr : np.ndarray[Z]
The initial guess for the positional angles where each force is acting (in radians).
alphaGuessArr : np.ndarray[Z]
The initial guess for the directional angles for each force acting on the particle (in radians).
radius : float
The radius of the particle, in pixels.
center : [int, int]
The coordinates of the center, [y,x], of the particle.
realImage : np.ndarray[H,W]
The photoelastic channel that the optimizer will compare against. Can be a composite image
include other particles, since a mask will be applied around the given particle.
parametersToFit : ['f'[[, 'b'], 'a']]
Which parameters should be allowed to be varied in the process of minimizing the function; 'f' for
force magnitude, 'b' for beta positional angle, 'a' for alpha directional angle. Must include at least one.
Can also specify to perform multiple optimizations on different variables by providing a 2D list of
parameters keys (any of 'f', 'a', 'b') e.g. [ ['f', 'b'], ['f', 'a'] ] will first optimize the force
and beta values, then optimize the force and alpha values. When performing multiple optimizations,
the result of the previous one is used as initial condition for the following one, though the residual
array includes information about all optimizations.
Values for `maxEvals`, `forceTol`, `betaTol`, `alphaTol`, and `method` can be provided as lists of
acceptable values to specify different options for each optimization e.g. `method=['nelder', 'cobyla']`
will use the Nelder-Mead optimization scheme on the first pass, and then COBYLA on the second one. If
a single value is provided for any of these parameters, that will be used for all optimizations.
method : str
The method to use for optimization. See wiki page on Force Solver for more information on selecting
an appropriate one. Generally any method from [lmfit](https://lmfit.github.io/lmfit-py/fitting.html#the-minimize-function)
can be used, but the following tend to give the best results:
- `lm` - Levenberg-Marquardt
- `least_squares` - Trust region reflective
- `nelder` - Nelder-Mead
Can be provided as a list of values when performing multiple optimizations; see `parametersToFit`.
maxEvals : int
The maximum number of function evaluations before the minimizer exits. Convergence can occur before
this number of evaluations, but process will be interrupted at this value regardless of whether
or not the result has converged.
Can be provided as a list of values when performing multiple optimizations; see `parametersToFit`.
forceBounds : (float, float)
The upper and lower limits for the values of force magnitudes that the minimizer can explore,
assuming that `useTolerance` is set to False.
betaBounds : (float, float)
The upper and lower limits for the values of beta that the minimizer can explore,
assuming that `useTolerance` is set to False.
alphaBounds : (float, float)
The upper and lower limits for the values of alpha that the minimizer can explore,
assuming that `useTolerance` is set to False.
forceTolerance : float
If `useTolerance` is set to True, this value will be used to calculate the bounds for each
force individually, as the initial value minus this tolerance, and the initial value plus
this tolerance.
Can be provided as a list of values when performing multiple optimizations; see `parametersToFit`.
betaTolerance : float
If `useTolerance` is set to True, this value will be used to calculate the bounds for each
beta individually, as the initial value minus this tolerance, and the initial value plus
this tolerance.
Can be provided as a list of values when performing multiple optimizations; see `parametersToFit`.
alphaTolerance : float
If `useTolerance` is set to True, this value will be used to calculate the bounds for each
alpha individually, as the initial value minus this tolerance, and the initial value plus
this tolerance.
Can be provided as a list of values when performing multiple optimizations; see `parametersToFit`.
useTolerance : bool
Whether to calculate the bounds for each parameters as the initial value plus/minus a
certain tolerance (True) or to use the set intervals provided via the `forceBounds`,
`betaBounds`, and `alphaBounds` parameters (False).
returnOptResult : bool
Whether to return the optimizer result object, which includes all parameters values and
potentially uncertainties (True) or just the optimized values of the forces, betas, alphas,
and residuals (False). Note that if multiple optimizations are being performed, only the
final result object will be returned.
allowAddForces : bool
Whether or not to allow the optimization routine to attempt to detect missing forces,
and add them in. This detection is done by examining the gradient squared of the
photoelastic image provided to the method (`realImage`). If a force is added, the
last optimization routine will be run again, with the added ability to vary the
contact position, beta (if this was not already the case).
allowRemoveForces : bool
Whether or not to allow the optimization routine to remove forces that have a
magnitude below a certain threshold (see `minForceThreshold`).
minForceThreshold : float
The minimizer will automatically remove any forces whose magnitude is lower than
this value between fittings and after the final one.
contactMaskRadius : float
The radius in pixels of the circular mask applied over contacts, either during localized
alpha optimization (see `localizeAlphaOptimization`) or detection of missing
contacts (see `allowAddForces`).
newBetaMinSeparation : float
The minimum separation for a potential new contact (see `allowAddForces`) to be
considered as a real force to be added. Larger values can prevent doubling up
on forces, when there is really only a single, extended contact point
newBetaG2Height : float
The threshold for average gradient squared value in a localized region around
a potential new contact.
missingForceChiSqrThreshold : float
The minimum chi squared value (as calculated by the optimization routine; analagous to error)
above which the method will attempt to detect missing forces.
Found by experimentation.
localizeAlphaOptimization : bool
When optimizing for the incident angle (alpha) alone, the effect of varying
this value can be very subtle across the entire particle. This parameter will have
the largest effect in a small region around the contact. If this value is `True`,
optimizations only for alpha (done by passing `parametersToFit=['a']`, or similar)
will mask the particle except for a small region around the contact.
imageScaleFactor : int > 1 or positive float < 1
Factor by which to scale the real image when optimizating. Integer larger than 1 will
lead to the image being upsampled, and float less than 1 (but greater than 0) will lead
to the image being downsampled.
Must lead to an integer up- or downscale factor; for upscaling this is not an issue, but
downscale factors may be rounded in not the exact inverse of an integer. eg. a value of `.35`
will result in a downscale by a factor of 3, not 2.85.
forceBalanceWeighting : float
If set to some non-zero and non-None value, the optimization cost will include a
contribution depending on how badly the ensemble of forces satisfy force balance.
debug : bool
Whether or not to print out status statements as the optimization is performed.
"""
# If we are passed a 2d list of parametersToFit, that means we
# are to perform multiple minimizations, likely with different
# parameters being optimized each time.
# It is easier to set up this method to run for an arbitrary number
# of optimizations, even though most of the time that number of
# optimizations will be one
# So if we aren't given a 2d list structure, we have to make one
if type(parametersToFit[0]) is not list:
parametersToFitList = [parametersToFit]
else:
parametersToFitList = parametersToFit
numFits = len(parametersToFitList)
# Different numbers of evaluations can be provided for each optimization
# or just the one value can be used for all of them
if type(maxEvals) is not list:
maxEvalsList = [maxEvals for i in range(numFits)]
else:
maxEvalsList = maxEvals
# The different minimizations can use different methods
if type(method) is not list:
methodList = [method for i in range(numFits)]
else:
methodList = method
# Same deal for all of the tolerances (we don't do bound intervals, since tolerances
# are the recommended way to handle parameters bounds)
if type(forceTolerance) is not list:
forceTolList = [forceTolerance for i in range(numFits)]
else:
forceTolList = forceTolerance
if type(betaTolerance) is not list:
betaTolList = [betaTolerance for i in range(numFits)]
else:
betaTolList = betaTolerance
if type(alphaTolerance) is not list:
alphaTolList = [alphaTolerance for i in range(numFits)]
else:
alphaTolList = alphaTolerance
# Make sure everything is the same length
assert len(parametersToFitList) == numFits, 'Invalid parametersToFit provided'
assert len(maxEvalsList) == numFits, 'Invalid maxEvals provided'
assert len(methodList) == numFits, 'Invalid method provided'
assert len(forceTolList) == numFits, 'Invalid forceTol provided'
assert len(betaTolList) == numFits, 'Invalid betaTol provided'
assert len(alphaTolList) == numFits, 'Invalid alphaTol provided'
# We need to keep track of the best set of parameters externally from
# the optimization routine, in the case that the minimize() function
# fails.
global residuals, bestResidual, bestParams
residuals = []
bestResidual = 1e16 # Arbitrary large number that will be overwritten
bestParams = None
if imageScaleFactor > 1:
# Make sure we have an integer factor
realScaleFactor = int(imageScaleFactor)
scaledImage = upsample(realImage, realScaleFactor)
elif imageScaleFactor < 1:
# Make sure we have the inverse of an integer factor
realScaleFactor = 1/float(int(1/imageScaleFactor))
scaledImage = downsample(realImage, int(1/imageScaleFactor))
else:
scaledImage = realImage
realScaleFactor = 1
scaledRadius = np.int64(radius * realScaleFactor)
scaledCenter = np.array(center * realScaleFactor, dtype=np.int64)
scaledPxPerMeter = np.int64(pxPerMeter * realScaleFactor)
scaledContactMaskRadius = np.int64(contactMaskRadius * realScaleFactor)
scaledParticleArea = scaledRadius**2 * np.pi
# Setup our function based on what parameters we are fitting
# We want to avoid any if statements within the function itself, since
# that will be evaluated many many times.
# lmfit has a nice setup in that you can denote whether a variable can be
# changed or not, which means we don't actually have to change which variables
# are passed to the function.
def objectiveFunction(params, trueImage, z, radius, center, mask):
global residuals, bestResidual, bestParams
forceArr = np.array([params[f"f{j}"] for j in range(z)])
betaArr = np.array([params[f"b{j}"] for j in range(z)])
alphaArr = np.array([params[f"a{j}"] for j in range(z)])
synImage = genSyntheticResponse(forceArr, alphaArr, betaArr, fSigma, scaledRadius, scaledPxPerMeter, brightfield, imageSize=scaledImage.shape, center=scaledCenter, mask=mask)
# Error component for the response image matching up with the real one
diffResidual = np.sum((synImage - trueImage)**2)
# Error component for the forces being balanced
# We scale by the particle area here, since the one above will be proportional to that
# and we want the two to be somewhat comparable
balanceResidual = np.sum(testForceBalance(forceArr, betaArr, alphaArr)**2) * scaledParticleArea * forceBalanceWeighting
# Save residual for tracking error
residuals.append(diffResidual + balanceResidual)
# Save best configuration outside of minimization
if residuals[-1] < bestResidual:
bestResidual = residuals[-1]
# Deep copy to make sure we aren't just assigning by reference
bestParams = copy.deepcopy(params)
return residuals[-1]
# Mask our real image
particleMask = circularMask(scaledImage.shape, scaledCenter, scaledRadius)[:,:,0]
maskedImage = scaledImage * particleMask
# Since we may be doing multiple fits, we want to set up the initial conditions
# such that each fit uses the result of the previous one (and the first one
# of course uses what is provided to the function)
forceArr = np.array(forceGuessArr.copy())
betaArr = np.array(betaGuessArr.copy())
alphaArr = np.array(alphaGuessArr.copy())
# Now that we have all of that bookkeeping done, we can actually get on
# to doing the minimization
result = None
# Whether we should vary beta only for the next run, in case
# we have added a force
tempVaryBeta = False
i = 0
# We use a while loop here because we may want to repeat a fit if
# we add a new force, which isn't possible in a for loop
while i < numFits:
z = len(forceArr)
# If we lose all of our forces in the optimization,
# the next optimization will fail.
if z == 0:
break
# Out fitting parameters
# if vary kwarg is false, that value won't be fit
params = Parameters()
for j in range(z):
if useTolerance:
# Have to make sure that certain values aren't allowed to go negative, but
# otherwise the bounds are just the initial value +/- the tolerances
params.add(f'f{j}', value=forceArr[j], vary='f' in parametersToFitList[i], min=max(forceArr[j]-forceTolList[i], 0), max=forceArr[j]+forceTolList[i])
params.add(f'b{j}', value=betaArr[j], vary='b' in parametersToFitList[i] or tempVaryBeta, min=max(betaArr[j]-betaTolList[i], -np.pi), max=min(betaArr[j]+betaTolList[i], np.pi))
params.add(f'a{j}', value=alphaArr[j], vary='a' in parametersToFitList[i], min=alphaArr[j]-alphaTolList[i], max=alphaArr[j]+alphaTolList[i])
else:
params.add(f'f{j}', value=forceArr[j], vary='f' in parametersToFitList[i], min=forceBounds[0], max=forceBounds[1])
params.add(f'b{j}', value=betaArr[j], vary='b' in parametersToFitList[i] or tempVaryBeta, min=betaBounds[0], max=betaBounds[1])
params.add(f'a{j}', value=alphaArr[j], vary='a' in parametersToFitList[i], min=alphaBounds[0], max=alphaBounds[1])
# Reset the vary beta bool so we only do it this round
tempVaryBeta = False
# If alpha is the only parameter being adjusted, it can help to look only in a
# localized region around each contact, since changing alpha has such a subtle
# effect on the overall pattern.
if localizeAlphaOptimization and 'a' in parametersToFitList[i] and not 'b' in parametersToFitList[i] and not 'f' in parametersToFitList[i]:
# The *10 is to make sure that all points end up inside of the particle
# eg. if you have two close contacts, and just look for places >= 2, we could
# get a region that satisfies that condition, but isn't actually in the particle
localizedMask = np.copy(particleMask) * 10
for j in range(len(betaArr)):
contactPoint = scaledCenter + scaledRadius * np.array([np.cos(betaArr[j]), np.sin(betaArr[j])])
# Create a mask just over the small area inside of the particle
localizedMask += circularMask(scaledImage.shape, contactPoint, scaledContactMaskRadius)[:,:,0]
# >= 10 + 1 such that the points must be inside the
# The value 10 is mostly arbitrary, but makes it very unlikely
# that 10 separate contacts would overlap.
localizedMask = (localizedMask >= 11).astype(np.uint8)
else:
# Otherwise we just set value to none, which will be handled by pepe.simulate.genSyntheticResponse()
localizedMask = None
# This is the callback function after each minimization step
# There seems to be an error where sometimes the optimization
# will raise an error if the max number of iterations is reached
# (and sometimes it doesn't care) so we use this to make sure
# we get the best set of parameters out, even if this error
# comes up
#def iterCallback(params, iter, resid, *args, **kwargs):
# if iter >= maxEvalsList[i]:
# Just kidding, I'll just use a try and ignore the error, since I
# am pretty sure that the params variable is constantly updated, not just
# at the end.
try:
# Now do the optimization
# The fatol kwarg is the minimum change between iterations before the
# fit is considered to have converged. I just choose this based on an educated guess,
# and it seems to work (worst case, you can just it really small and run the optimization
# for the max number of evals every time).
result = minimize(objectiveFunction, params,
args=(maskedImage, z, scaledRadius, scaledCenter, localizedMask),
method=methodList[i], max_nfev=maxEvalsList[i], nan_policy='omit', options={"fatol": 1e-2})
# Copy over the new values of the forces, alphas, and betas
for j in range(z):
forceArr[j] = result.params[f"f{j}"]
betaArr[j] = result.params[f"b{j}"]
alphaArr[j] = result.params[f"a{j}"]
# If we run out of function evaluations, that not too huge a deal,
# so we just grab the best value and move on. If we run into an actual error,
# that shouldn't be caught here, since that might be serious, so we re-trhow it.
except Exception as e:
if not isinstance(e, AbortFitException):
raise e
if debug:
print(e)
# Otherwise, we take the last good value (since we kept track outside of
# the optimization function)
if bestParams is not None:
for j in range(z):
forceArr[j] = bestParams[f"f{j}"]
betaArr[j] = bestParams[f"b{j}"]
alphaArr[j] = bestParams[f"a{j}"]
else:
# If we don't have any good parameter values, just return the initial
# guess. The arrays are initialized as this value, so we don't need
# to do anything in this case.
pass
# ---------------------
# Detect missing forces
# ---------------------
# If the code detects there is a missing force (no idea how yet)
if result is not None and result.chisqr > missingForceChiSqrThreshold and allowAddForces:
# We sweep around the edge of the particle to see if there
# are any regions that look like they could have a force
# (denoted by a particularly high g2 value, or rather a peak)
testBetaCount = 30
avgG2Arr = np.zeros(testBetaCount)
newBetaArr = np.linspace(-np.pi, np.pi, testBetaCount)
# Calculate all of the g2s around the edge of the particle
gSqr = gSquared(scaledImage)
for j in range(testBetaCount):
contactPoint = scaledCenter + scaledRadius * np.array([np.cos(newBetaArr[j]), np.sin(newBetaArr[j])])
# Create a mask just over the small area inside of the particle
contactMask = circularMask(scaledImage.shape, contactPoint, scaledContactMaskRadius)[:,:,0]
contactMask = (contactMask + particleMask) == 2
avgG2Arr[j] = np.sum(contactMask * gSqr) / np.sum(contactMask)
# Identify any peaks in the average g2s
peakIndArr = find_peaks(avgG2Arr, height=newBetaG2Height)[0]
peakIndArr = np.sort(peakIndArr)
# Make sure that there aren't any artifacts of periodicity
# Usually this isn't actually a problem, because the peak
# finding algorithm requires a proper peak, which can only
# be on one side (but we'll leave it here just in case)
if np.arange(3).any() in peakIndArr and np.arange(len(avgG2Arr)-3, len(avgG2Arr)).any() in peakIndArr:
# Remove last entry
peakIndArr = peakIndArr[:-1]
peakBetaArr = newBetaArr[peakIndArr]
# Now we have a list of likely points, we need to see if our original
# list is missing any of these.
differenceArr = np.abs(np.subtract.outer(peakBetaArr, betaArr))
# Check to see if there is a new peak that doesn't have
# a previous force close to it
for j in range(len(peakBetaArr)):
if np.min(differenceArr[j]) > newBetaMinSeparation and np.max(differenceArr[j]) < 2*np.pi - newBetaMinSeparation:
# Add the new force
betaArr = np.append(betaArr, peakBetaArr[j])
forceArr = np.append(forceArr, .1) # Value isn't too imporant here
alphaArr = np.append(alphaArr, 0.)
# If we have added a force, we should run the optimization again, and see if it improves
if len(forceArr) > z:
if debug:
print(f'Added {len(forceArr) - z} force(s).')
# We also want to make sure we're allowed to vary beta on the next iteration
tempVaryBeta = True
# This skips the i += 1 at the end of the loop, and makes the optimization run again
continue
# ------------------------------------
# Remove forces that don't do anything
# ------------------------------------
if len(forceArr[forceArr < minForceThreshold]) > 0 and allowRemoveForces:
# Remove forces that aren't actually doing anything
betaArr = betaArr[forceArr > minForceThreshold]
alphaArr = alphaArr[forceArr > minForceThreshold]
# This one has to be done last for the other indexing to work
forceArr = forceArr[forceArr > minForceThreshold]
if debug:
print(f'Removed {z - len(forceArr)} force(s).')
# Iterate (since we have a while not a for)
i += 1
if returnOptResult:
return result
else:
return forceArr, betaArr, alphaArr, residuals
@numba.jit(nopython=True)
def g2ForceCalibration(fSigma, radius, pxPerMeter=1., g2Padding=1, alphaArr=np.array([0., 0.]), betaArr=np.array([0., -np.pi]), forceSteps=100, forceBounds=np.array([.01, 1.]), brightfield=True):
"""
Use synthetic photoelastic response to fit the conversion constant between
gradient squared value and force (in Newtons), assuming a linear relationship.
Note that this computes the least squares of force (N) vs **average** gradient
squared. The Matlab just uses the sum of gradient squared, but this is not invariant
under changes of resolution, so I have opted to use the average. Because of this, a
calibration value calculated in Matlab will **not** work here unless you divide out
the number of points it is summed over first.
Parameters
----------
fSigma : float
Stress optic coefficient, relating to material thickness, wavelength of light
and other material property (C).
radius : float
Radius of the particle that is being simulated in pixels. If pxPerMeter is
not provided (or set to 1), this value will be assumed to already have been converted to meters.
contactMaskRadius : float
Radius of the circular mask that is applied over each contact to find the average gradient
squared.
pxPerMeter : float
The number of pixels per meter for the simulated image. If not provided, or set to 1, the radius
value will be assumed to already have been converted to meters.
g2Padding : int
Number of pixels to ignore at the edge of the particle. We don't expect any boundary
artifacts in our synthetic data, but we will eventually do this for the real data,
so it is important to keep the size of the particles the same throughout.
alphaArr : np.ndarray[Z]
Array of angles representing force contact angles.
betaArr : np.ndarray[Z]
Array of angles representing force contact positions.
forceSteps : int
The number of points to use for fitting our line of g^2 vs. force.
forceBounds : [float, float]
The minimum and maximum value of force applied to calculate the calibration value.
brightfield : bool
Whether the intensity should be simulated as seen through a brightfield (True)
polariscope or darkfield (False) polariscope.
Returns
-------
g2ForceSlope : float
Slope found via linear regression to convert average g^2 to force.
"""
# The magnitude of the forces that will be acting at each step
forceValues = np.linspace(forceBounds[0], forceBounds[1], forceSteps)
gSqrAvgArr = np.zeros(forceSteps)
imageSize = (np.int16(radius*2)+11, np.int16(radius*2)+11)
center = np.array([imageSize[0]/2, imageSize[1]/2], dtype=np.int64)
particleMask = circularMask(imageSize, center, radius - g2Padding)[:,:,0]
# The contact mask is a circle placed over the edge of the particle where the force is applied
#contactMask1 = circularMask(imageSize,
# np.array([imageSize[0]/2 + radius*np.cos(betaArr[0]), imageSize[1]/2 + radius*np.sin(betaArr[0])]),
# contactMaskRadius)[:,:,0]
#contactMask2 = circularMask(imageSize,
# np.array([imageSize[0]/2 + radius*np.cos(betaArr[1]), imageSize[1]/2 + radius*np.sin(betaArr[1])]),
# contactMaskRadius)[:,:,0]
# Get rid of the parts outside of the circle
#contactMask1 = contactMask1 * particleMask
#contactMask2 = contactMask2 * particleMask
# Add them together
#contactMask = contactMask1 + contactMask2
# To divide out the number of points
#numPoints = np.sum(contactMask)
numPoints = np.sum(particleMask)
for i in range(forceSteps):
# Assume two forces acting on the particle with equal magnitude
forceArr = np.array([forceValues[i], forceValues[i]])
# Create a synthetic photoelastic response
particleImg = genSyntheticResponse(forceArr, alphaArr, betaArr, fSigma, radius, pxPerMeter, brightfield, imageSize, center)
# Calculate the gradient
gSqr = gSquared(particleImg)
# Multiply by the mask to avoid weird edge effects
gSqrAvgArr[i] = np.sum(gSqr * particleMask) / numPoints
# Now fit a straight line to the data
# Create col vector of forces
# Note that we multiply by 2 here
# Since we are calculating g2 over the entire particle, we should
# compare it to the force on the entire particle, not just one side.
forceColMat = forceValues.reshape((forceSteps, 1)) * 2
# Perform least squares
solution = np.linalg.lstsq(forceColMat, gSqrAvgArr)
return solution[0][0]
def g2ForceCalibrationDebug(fSigma, radius, pxPerMeter, alphaArr=np.array([0., 0.]), betaArr=np.array([0., -np.pi]), forceSteps=100, forceBounds=np.array([.01, 1.]), brightfield=True):
# TODO: Also find the point at which the linear fit breaks down, so we can
# return the max amount of force that can be converted using this method
# before it stops working
"""
Use synthetic photoelastic response to fit the conversion constant between
gradient squared value and force (in Newtons), assuming a linear relationship.
Returns the X and Y values that a line would be fit to, instead of the
fit paramters themselves. See pepe.analysis.g2ForceCalibration().
Note that this computes the least squares of force (N) vs **average** gradient
squared. The Matlab just uses the sum of gradient squared, but this is not invariant
under changes of resolution, so I have opted to use the average. Because of this, a
calibration value calculated in Matlab will **not** work here unless you divide out
the number of points it is summed over first.
Parameters
----------
fSigma : float
Stress optic coefficient, relating to material thickness, wavelength of light
and other material property (C).
radius : float
Radius of the particle that is being simulated in pixels. If pxPerMeter is
not provided (or set to 1), this value will be assumed to already have been converted to meters.
contactMaskRadius : float
Radius of the circular mask that is applied over each contact to find the average gradient
squared.
pxPerMeter : float
The number of pixels per meter for the simulated image. If not provided, or set to 1, the radius
value will be assumed to already have been converted to meters.
alphaArr : np.ndarray[Z]
Array of angles representing force contact angles.
betaArr : np.ndarray[Z]
Array of angles representing force contact positions.
forceSteps : int
The number of points to use for fitting our line of g^2 vs. force.
forceBounds : [float, float]
The minimum and maximum value of force applied to calculate the calibration value.
brightfield : bool
Whether the intensity should be simulated as seen through a brightfield (True)
polariscope or darkfield (False) polariscope.
Returns
-------
forceArr : np.ndarray[forceSteps]
Force applied (in Newtons) at each point
g2AvgArr : np.ndarray[forceSteps]
Resultant average gradient squared across the particle at each point.
"""
# The magnitude of the forces that will be acting at each step
forceValues = np.linspace(forceBounds[0], forceBounds[1], forceSteps)
gSqrAvgArr = np.zeros(forceSteps)
imageSize = np.array([np.int(radius*2.2), np.int(radius*2.2)])
particleMask = circularMask(imageSize, imageSize/2, radius)[:,:,0]
# The contact mask is a circle placed over the edge of the particle where the force is applied
#contactMask1 = circularMask(imageSize,
# np.array([imageSize[0]/2 + radius*np.cos(betaArr[0]), imageSize[1]/2 + radius*np.sin(betaArr[0])]),
# contactMaskRadius)[:,:,0]
#contactMask2 = circularMask(imageSize,
# np.array([imageSize[0]/2 + radius*np.cos(betaArr[1]), imageSize[1]/2 + radius*np.sin(betaArr[1])]),
# contactMaskRadius)[:,:,0]
# Get rid of the parts outside of the circle
#contactMask1 = contactMask1 * particleMask
#contactMask2 = contactMask2 * particleMask
# Add them together
#contactMask = contactMask1 + contactMask2
# To divide out the number of points
#numPoints = np.sum(contactMask)
numPoints = np.sum(particleMask)
for i in range(forceSteps):
# Assume two forces acting on the particle with equal magnitude
forceArr = np.array([forceValues[i], forceValues[i]])
# Create a synthetic photoelastic response
particleImg = genSyntheticResponse(forceArr, alphaArr, betaArr, fSigma, radius, pxPerMeter, brightfield, imageSize, imageSize/2)
# Calculate the gradient
gSqr = gSquared(particleImg)
# Multiply by the mask to avoid weird edge effects
gSqrAvgArr[i] = np.sum(gSqr * particleMask) / numPoints
# We multiply by 2 because that is the total force acting on the particle
return 2*forceValues, gSqrAvgArr
<EMAIL>(nopython=True)
def detectWallContacts(centers, radii, boundaryMask, photoelasticSingleChannel=None, contactPadding=10, g2EdgePadding=.95, contactG2Threshold=1e-4, angleClusterThreshold=.2, contactMaskRadius=50, extendedContacts=False, maxContactExtent=.75):
"""
Detect potential particle contacts with the wall.
Parameters
----------
centers : np.ndarray[N,2]
A list of N centers of format [y, x].
radii : np.ndarray[N]
A list of N radii, corresponding to each particle center
boundaryMask : np.uint8[H,W]
Mask removing uneccesary parts of the image, that is used
to detect boundaries. Locations with a value of 0 will be assumed to be
solid walls, that a particle could potentially be in contact with.
photelasticSingleChannel : np.uint8[H,W]
A single channel image in array form of the photoelastic response of particles.
Is used to determine if a contact is actually force bearing or not. If not
provided, all potential contacts will be returned.
contactPadding : int
Maximum difference between distance between a particle's edge and the wall
that will still be tested as a wall contact.
g2EdgePadding : int or float
Number of pixels to ignore at the edge of each particle when calculating the average
G^2. If float value < 1 is passed, gradient mask radius will be taken as that percent
of the full particle radius. A value of 0 means no padding is included.
contactG2Threshold : float
The neighbor weight value under which edges will be removed from the network,
as they would be considered to weak to represent anything physical. This will
help remove particle neighbors that are only barely touching, but not transmitting
any real force.
angleClusterThreshold : float
The minimum difference in consecutive angle (beta) for which two wall contacts
will be considered unique (and not merged into the same contact).
contactMaskRadius : int
The size of the circular mask that is used to determine average gradient squared
value around detected contacts.
extendedContacts : bool
Whether or not to break up larger potential contacts in multiple. See `maxContactExtent`.
maxContactExtent : float
The maximum range of angles that can be included in a single cluster. If any particular
cluster exceeds this value, it will be divided up into multiple new clusters.
Returns
-------
numWallContacts : np.ndarray[N]
The number of wall contacts for each particle (denoted as Z_i in other return values).
betaArr : np.ndarray[N, Z_i]
A list of angles for each particle in which there likely is a wall contact.
contactG2Arr : np.ndarray[N, Z_i]
A list of average gradient squared values for each wall contact, to be
multiplied by the g2 calibration value to get the initial force magnitude guess.
"""
# Figure out how much of the particle we will be calculating the g2 over
g2MaskRadii = radii.astype(np.float64)
if g2EdgePadding < 1. and g2EdgePadding > 0:
g2MaskRadii = (radii.astype(np.float64) * g2EdgePadding)
elif g2EdgePadding >= 1:
g2MaskRadii = (radii - g2EdgePadding).astype(np.float64)
# Things that will be returned
numWallContacts = np.zeros(len(centers), dtype=np.int16)
betaArr = []
contactG2Arr = []
# Iterate over each particle
for i in range(len(centers)):
# Create a mask slightly larger than the particle
wallContactMask = circularMask(boundaryMask.shape, centers[i], radii[i] + contactPadding)
# Find the overlap (similarity) of this enlarged particle mask and the boundary mask
invBoundaryMask = 1 - boundaryMask
similarity = np.floor((invBoundaryMask + wallContactMask).astype(np.double)/2).astype(np.uint8)[:,:,0]
# Determine if there is a chance of a wall contact
if np.sum(similarity) > 0:
# ------------------------------
# Find every point that overlaps
# ------------------------------
# Normally we could just do this:
#points = np.transpose(np.where(cMask > 0))
# but numba doesn't quite like this way, so we have to be
# a little more creative
whereIndices = np.where(similarity > 0)
points = np.zeros((len(whereIndices[0]), 2), dtype=np.int16)
# There is a chance that these indices are backwards, but
# because we have rotational symmetry, it doesn't really matter...
# BUT if there is ever some weird anisotropy bug or something,
# try switching these indices
points[:,0] = whereIndices[0]
points[:,1] = whereIndices[1]
# ------------------------------
# Cluster them based on position
# ------------------------------
# Convert to angles
angles = np.arctan2(points[:,1] - centers[i][1], points[:,0] - centers[i][0])
# Sort the angles, since they may not exactly be in order
sortedIndices = np.argsort(angles)
angles = np.sort(angles)
# To apply it to the original points, we would have to reverse the sorting
# we did earlier, or much easier, just sort the original points the same way
points = points[sortedIndices]
# Calculate the difference between angles
# There should be a jump once we move to a new cluster
dAngle = angles[1:] - angles[:-1]
# Add 1 to these, because we started at 1
# These are the indices of places that have large jumps in angle
clusterSeparation = np.where(dAngle > angleClusterThreshold)[0].astype(np.int16) + 1
# Generate an array with labels for each angle
numClusters = len(clusterSeparation) + 1
labels = np.zeros(len(points))
startingIndex = 0
for j in range(numClusters-1):
labels[startingIndex:clusterSeparation[j]] = j
startingIndex = clusterSeparation[j]
# And the final cluster
labels[startingIndex:] = numClusters-1
# Check to see if we have any artifacts of the periodicity of the angle
# (and fix them by setting the last cluster to be equal to the first one)
#print(angles[0], angles[-1])
if numClusters > 1 and abs(angles[0] - angles[-1]) < angleClusterThreshold or abs(abs(angles[0] - angles[-1]) - 2*np.pi) < angleClusterThreshold:
labels[labels == np.max(labels)] = 0
numClusters -= 1
# --------------------------------------
# Calculate the centroid of each cluster
# --------------------------------------
clusterCentroids = np.zeros((numClusters, 2))
for j in range(numClusters):
clusterCentroids[j] = [np.mean(points[labels == j][:,0]), np.mean(points[labels == j][:,1])]
# --------------------------------------------------------------
# Calculate the angle with respect to the particle center (beta)
# --------------------------------------------------------------
clusterBetas = np.zeros(numClusters)
for j in range(numClusters):
clusterBetas[j] = np.arctan2(clusterCentroids[j,1] - centers[i][1], clusterCentroids[j,0] - centers[i][0])
# -------------------------------------
# Divide up big clusters (if necessary)
# -------------------------------------
if extendedContacts:
newBetas = np.zeros(0)
for j in range(numClusters):
# First, calculate the extent of the cluster
# This isn't as simple as subtract max from min, because
# of the periodicity, so the most reliable method is as follows
# Locate every unique angle in this cluster (in order)
uniqueBetas = np.sort(np.unique(angles[labels == j]))
# If you only have 1 beta, then clearly we don't need to divide
# this cluster up
if len(uniqueBetas) < 2:
newBetas = np.append(newBetas, clusterBetas[j])
continue
clusterBounds = np.array([np.max(np.array([uniqueBetas[0], uniqueBetas[-1]])), np.min(np.array([uniqueBetas[0], uniqueBetas[-1]]))])
clusterExtent = clusterBounds[0] - clusterBounds[1]
# This is usually a good way to identify that the region
# passes across the top of the circle
if (clusterExtent < .01 or 2*np.pi - clusterExtent < .01) and np.min(uniqueBetas) < 0 and np.max(uniqueBetas) > 0:
#if (clusterBetas[j] < clusterBounds[0] and clusterBetas[j] > clusterBounds[1]):
clusterBounds = [np.max(uniqueBetas[uniqueBetas < 0]), np.min(uniqueBetas[uniqueBetas > 0])]
clusterExtent = 2*np.pi - (clusterBounds[1] - clusterBounds[0])
if clusterExtent > maxContactExtent:
numNewClusters = np.int16(np.ceil(clusterExtent / maxContactExtent))
dBeta = clusterExtent/numNewClusters
newBetas = np.append(newBetas, np.linspace(clusterBetas[j] + clusterExtent/2., clusterBetas[j] - clusterExtent/2., numNewClusters))
else:
newBetas = np.append(newBetas, clusterBetas[j])
#print(newBetas)
clusterBetas = newBetas.copy()
numClusters = len(clusterBetas)
# Now we want to recalculate our centroids, since there are
# potentially some new ones
clusterCentroids = np.zeros((numClusters, 2))
for j in range(numClusters):
clusterCentroids[j] = centers[i] + radii[i] * np.array([np.cos(clusterBetas[j]), np.sin(clusterBetas[j])])
# --------------------------------------------------------------------------
# Apply a mask to get the magnitude of the average g2 value for that contact
# --------------------------------------------------------------------------
# We only do this if we are provided a photoelastic image
clusterAvgG2 = np.zeros(numClusters)
if photoelasticSingleChannel is not None:
# Calculate G2
gSqr = gSquared(photoelasticSingleChannel)
for j in range(numClusters):
# Create an elliptical mask around the area the photoelastic response would
# be in. This is actually more a half ellipse, since we want a reasonably
# wide area around the contact itself, and only a small bit of the center
# of the particle.
#majorAxisDir = clusterCentroids[j] - centers[i]
#majorAxisDir /= np.sqrt(majorAxisDir[0]**2 + majorAxisDir[1]**2)
#ellipseE radii[i] - g2EdgePaddingndpoint = majorAxisDir * 2 *radii[i] + centers[i]
#contactMask = ellipticalMask(photoelasticSingleChannel.shape, centers[i], ellipseEndpoint, radii[i]/2)
# Just kidding, a circular mask seems to work better
contactMask = circularMask(photoelasticSingleChannel.shape, clusterCentroids[j], contactMaskRadius)
# Now cut off the part outside of the particle
contactMask = (contactMask + circularMask(photoelasticSingleChannel.shape, centers[i], g2MaskRadii[i]))[:,:,0] == 2
# Calculate average G2
clusterAvgG2[j] = np.sum(contactMask * gSqr) / np.sum(contactMask)
# ---------------------------
# Save all of the information
# ---------------------------
numWallContacts[i] = len(clusterBetas)
betaArr.append(clusterBetas)
contactG2Arr.append(clusterAvgG2)
else:
# No possible wall contacts
# numWallContacts is initialized at 0, so don't need to set it
# Numba doesn't allow untyped lists, so this is a little trick
# to get an empty list into this array
betaArr.append(np.array([np.double(x) for x in range(0)]))
contactG2Arr.append(np.array([np.double(x) for x in range(0)]))
pass
return numWallContacts, betaArr, contactG2Arr
```
#### File: pepe/analysis/LinearRegions.py
```python
import numpy as np
from scipy.stats import linregress
def determineLinearRegions(data, minLength=.1, minR2=.96, maxSlopeInterceptDiff=.75):
"""
Determine regions of a plot that are approximately linear by performing
linear least-squares on a rolling window.
Parameters
----------
data : array_like
Data within which linear regions are to be identified
minLength : int or float
The minimum length of a linear segment, either as an
integer number of indices, or as a float fraction of the
overall data length.
minR2 : float
The minimum r-squared value for a region to be
considered linear.
maxSlopeInterceptDiff : float
The float percentage difference allowed between slopes
and intercepts of adjacent slices for them to be
considered the same region.
Returns
-------
regionIndices : np.ndarray[N,2]
The start and end indices for the N detected regions.
slopes : np.ndarray[N]
The slope of each region.
intercepts : np.ndarray[N]
The intercept of each region.
"""
if minLength < 1:
minLinSteps = int(len(data)*minLength)
else:
minLinSteps = int(minLength)
inLinearRegion = False
linearRegions = []
slopes = []
intercepts = []
# Perform least squares on a rolling window
i = 0
while i < len(data) - minLinSteps:
xArr = np.arange(i, i+minLinSteps)
slope, intercept, r2, p_value, std_err = linregress(xArr, data[i:i+minLinSteps])
if np.abs(r2) > minR2:
if inLinearRegion:
# Calculate how different new slope is from old one
if np.abs((np.mean(slopes[-1]) - slope) / np.mean(slopes[-1])) < maxSlopeInterceptDiff and np.abs((np.mean(intercepts[-1]) - intercept) / np.mean(intercepts[-1])) < maxSlopeInterceptDiff:
# This is still the same linear region, so we extend the bounds
linearRegions[-1][1] = i+minLinSteps
# And average in the slopes and intercepts
slopes[-1] += [slope]
intercepts[-1] += [intercept]
else:
# Otherwise, we have a new linear region, which we start
# at the end of the other one
i = linearRegions[-1][1]
inLinearRegion = False
continue
else:
# New linear region
linearRegions.append([i, i+minLinSteps])
slopes.append([slope])
intercepts.append([intercept])
inLinearRegion = True
else:
inLinearRegion = False
i += 1
slopes = np.array([np.mean(s) for s in slopes])
intercepts = np.array([np.mean(inter) for inter in intercepts])
return np.array(linearRegions), slopes, intercepts
```
#### File: pepe/auto/ForceSolve.py
```python
import numpy as np
import os
import cv2
import time
import pickle
import inspect
import ast
import tqdm
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw
from IPython.display import clear_output
import pepe
from pepe.preprocess import checkImageType, lightCorrectionDiff, circularMask
from pepe.analysis import initialForceSolve, forceOptimize, gSquared, g2ForceCalibration, singleParticleForceBalance, forceOptimizeArgDTypes
from pepe.tracking import houghCircle, convCircle, angularConvolution, circleTrackArgDTypes
from pepe.simulate import genSyntheticResponse
from pepe.utils import preserveOrderArgsort, rectangularizeForceArrays, explicitKwargs, parseList
from pepe.visualize import genColors, visCircles, visForces, visContacts, visRotation
from pepe.topology import findPeaksMulti
# All of the dtypes of the args for the below method
# The following args are not included, because they are not
# important: progressBarOffset, progressBarTitle
forceSolveArgDTypes = {"imageDirectory": str,
"imageExtension": str,
"imageEndIndex": int,
"imageStartIndex": int,
"carryOverAlpha": bool,
"carryOverForce": bool,
"showProgressBar": bool,
"lightCorrectionImage": str,
"lightCorrectionVerticalMask": str,
"lightCorrectionHorizontalMask": str,
"g2CalibrationImage": str,
"g2CalibrationCutoffFactor": float,
"maskImage": str,
"cropXMin": int,
"cropXMax": int,
"circleDetectionMethod": str,
"guessRadius": float,
"fSigma": float,
"pxPerMeter": float,
"brightfield": bool,
"contactPadding": int,
"g2MaskPadding": int,
"contactMaskRadius": int,
"peBlurKernel": int,
"requireForceBalance": bool,
"circleTrackingChannel": int,
"circleTrackingKwargs": dict,
"photoelasticChannel": int,
"optimizationKwargs": dict,
"maxBetaDisplacement": float,
"forceNoiseWidth": float,
"alphaNoiseWidth": float,
"saveMovie": bool,
"pickleArrays": bool,
"outputRootFolder": str,
"outputExtension": str,
"genFitReport": bool,
"performOptimization": bool,
"inputSettingsFile": str,
"debug": bool}
# Decorator that allows us to identify which keyword arguments were explicitly
# passed to the function, and which were left as default values. See beginning
# of method code for more information/motivation.
@explicitKwargs()
def forceSolve(imageDirectory, guessRadius=0.0, fSigma=0.0, pxPerMeter=0.0, brightfield=True, contactPadding=15, g2MaskPadding=2, contactMaskRadius=30, lightCorrectionImage=None, lightCorrectionHorizontalMask=None, lightCorrectionVerticalMask=None, g2CalibrationImage=None, g2CalibrationCutoffFactor=.9, maskImage=None, cropXMin=None, cropXMax=None, peBlurKernel=3, imageExtension='bmp', requireForceBalance=False, imageStartIndex=None, imageEndIndex=None, carryOverAlpha=True, carryOverForce=True, circleDetectionMethod='convolution', circleTrackingKwargs={}, circleTrackingChannel=0, maxBetaDisplacement=.5, photoelasticChannel=1, forceNoiseWidth=.03, alphaNoiseWidth=.01, optimizationKwargs={}, performOptimization=True, debug=False, showProgressBar=True, progressBarOffset=0, progressBarTitle=None, saveMovie=False, outputRootFolder='./', inputSettingsFile=None, pickleArrays=True, genFitReport=True, outputExtension=''):
"""
Complete pipeline to solve for forces and particle positions for all image files
in a directory. Results will be returned and potentially written to various files.
See `Returns` section for more information
Expects all particles to be the same (or very similar) sizes. This assumption is made
by the calculation of the gradient squared calibration value, which is computed just
once using the guess of the radii. This should not be a problem if the radii are only
slightly varied (~10 pixels or something) but any more than that and errors will begin
to accumulate.
This method has **a lot** of arguments; it is intended to be used once reasonable
values for all of these have already been found. While the `debug` option for this function
is very helpful, it is recommended to utilize the various notebooks/examples to find good
choices for parameters first.
The output readme file can also serve as a cache of the parameter values/settings, which
can be passed back to future calls of this method using the `inputSettingsFile` argument.
Parameters
----------
imageDirectory : str
An absolute or relative path to the directory containing the images that are
to be analyzed. The names of the images need not follow any particular naming
scheme, but they should be such that sorting the list alphabetically will give
the images in their proper order.
guessRadius : float
The radius of the particles to be detected, in pixels. As of now, this value will
be taken as the particle radius, but future versions may be able to vary this to
find the optimal value.
Currently no support for particles of different sizes.
fSigma : float
Stress optic coefficient, relating to material thickness, wavelength of light and
other material property (denoted as C in most literature; sometimes also called the
"stress optic coefficient").
pxPerMeter : float
The number of pixels per meter in the images. Depends on the camera, lens, and
zoom settings used to capture the images.
Note that this is **not** the inverse of the number of pixels per meter, as is used
in some of the other force solving implementations.
brightfield : bool
Whether the images are captured using a brightfield polariscope (`True`) or
a darkfield polariscope (`False`).
contactPadding : int
Maximum distance (in pixels) between a particle's edge and the wall or the edges of two
particles that will still be considered a potential force-bearing contact.
g2MaskPadding : int or float
Number of pixels to ignore at the edge of each particle when calculating the average G^2.
If float value < 1 is passed, gradient mask radius will be taken as that percent of the full
particle radius. A value of 0 means no padding is included.
contactMaskRadius : float
The radius of the circular mask that will be constructed around each contact to estimate
the magnitude of the force using the gradient squared in that region.
lightCorrectionImage : str or np.ndarray[H,W[,C]]
The path to an image (or an array representing an image) that contains no particles, and
can be used to correct for any light gradients present in the actual data.
lightCorrectionHorizontalMask : str or np.ndarray[H,W[,C]]
A mask array, containing values of `0` or `1`, with the latter representing areas over which
the horizontal light correction should be calculated. Can also be a path to an image.
lightCorrectionVerticalMask : str or np.ndarray[H,W[,C]]
A mask array, containing values of `0` or `1`, with the latter representing areas over which
the vertical light correction should be calculated. Can also be a path to an image.
g2CalibrationImage : str or np.ndarray[H,W,C]
An image with a single particle (or at least one particle) that has no force acting on it.
Used to determine the base level of gradient squared (due to noise) for a free particle. Can
also be a path to an image.
g2CalibrationCutoffFactor : float
The factor that is multipled by the mean gradient squared value of the particles in the
calibration image. Any particle that has an average gradient squared value below the
calibration value multipled by this factor will be assumed to have no forces acting on it.
maskImage : str or np.ndarray[H,W,C]
A mask array, containing values of `0` or `1`, with the latter representing the regions of
importance for the image. Used in detecting particles, generating initial guesses, and
calculating error during non-linear optimization. can also be a path to an image.
cropXMin : int or None
Left bound to crop down the image in the x direction.
cropXMax : int or None
Right bound to crop down the image in the x direction.
peBlurKernel : int
The kernel size that will be used for bluring the photoelastic channel of each image, to
reduce noise. Should be an odd integer.
imageExtension : str
The extension of the image files that will be read in from `imageDirectory`. Should not include
the '.' before the extension.
requireForceBalance : bool
Whether to impose particle-wise force balance at each step (`True`) or to take the results of
the optimization process as they are (`False`).
Currently WIP, and does not do anything.
forceBalanceWeighting : float
If a non-zero positive value, adds a contribution to the optimization cost
pertaining to how well the ensemble of forces satisfy force balance.
imageStartIndex : int or None
The index of which image to start at when analyzing the files in `imageDirectory`. Value
of `None` will start at the first (alphabetically sorted) image.
imageEndIndex : int or None
The index of which image to end at when analyzing the files in `imageDirectory`. Value
of `None` will end at the last (alphabetically sorted) image.
circleDetectionMethod : ['convolution' or 'hough']
Whether to use the convolution or hough circle detection method to identify particles.
See `pepe.tracking.convCircle()` and `pepe.tracking.houghCircle()` for more information.
circleTrackingKwargs : **kwargs
Keyword arguments to be passed to the selected circle tracking function.
See `pepe.tracking.convCircle()` and `pepe.tracking.houghCircle()` for more information.
circleTrackingChannel : int
The channel of the image that will be used to track the particles. `0` for red, `1` for
green, and `2` for blue.
maxBetaDisplacement : float
The maximum distance (angle) that a force can move between frames and still be identified
as the same force. If a force moves more than this value, it will still be recorded as a force,
but will be considered a new and independent force from any of the ones in the previous frame.
photoelasticChannel : int
The channel of the image that will be used to gauge the photoelastic response. `0` for red, `1` for
green, and `2` for blue.
forceNoiseWidth : float or None
The width of the gaussian distribution (centered at 0) that noise is sampled from to add to the
force guesses (potentially from the previous frame). This is done to avoid getting stuck in a local
minimum for too long (adds some Monte-Carlo-esque behavior to the solving).
alphaNoiseWidth : float or None
The width of the gaussian distribution (centered at 0) that noise is sampled from to add to the
alpha guesses (potentially from the previous frame). This is done to avoid getting stuck in a local
minimum for too long (adds some Monte-Carlo-esque behavior to the solving).
optimizationKwargs : **kwargs
Keyword arguments to be passed to the optimization process.
For more information, see `pepe.analysis.forceOptimize()`.
performOptimization : bool
Whether or not to perform optimization on the particles.
Mostly included as a debug option, but any real data analysis should
utilize the optimization, as the initial guessing is often not nearly
accurate enough to get any real results.
debug : bool
Whether to print progress updates for each frame to the screen (`True`) or not (`False`).
showProgressBar : bool
Whether to show a progress bar throughout the analysis (`True`) or not (`False`). Uses
`tqdm` library.
progressBarOffset : int
The number of lines to offset the progress bar by. Generally an internal variable
used when multiple threads are active.
progressBarTitle : str
The text to be written to the left of the progress bar. Generally an internal variable
controlled by some solving script.
saveMovie : bool
Whether to save a compiled gif of the reconstructed forces at each frame at the end (`True`)
or not (`False`).
outputRootFolder : str
The location where the output folder (potentially containg the movie, pickle files, readme, etc.)
will be created. Output folder itself will be named after the `imageDirectory`, with '_Synthetic'
appended to the end.
pickleArrays : bool
Whether to save the forces, betas, alphas, centers, and radii as pickle files (`True`) or not (`False`).
Files will be located in the output folder (see `outputRootFolder`).
inputSettingsFile : str
Path to a readme file containg parameters for the solving process, likely generated from
a previous iteration of the program. Explicitly passed arguments will override those that
are included in the settings file.
Currently WIP and does not do anything.
genFitReport : bool
Whether or not to generate a fit report of the results, including errors per frame,
examinations of all particles/forces, and settings, compiled in a latex pdf.
Will generate both the compiled file 'FitReport.pdf' and the source directory
'FitReport_src/'.
Returns
-------
rectForceArr : list[P](np.ndarray[F,T])
A list of arrays representing the force magnitude for each force on each particle.
rectAlphaArr : list[P](np.ndarray[F,T])
A list of arrays representing the alpha angle for each force on each particle.
rectBetaArr : list[P](np.ndarray[F,T])
A list of arrays representing the beta angle for force on each particle.
rectCenterArr : np.ndarray[P,T,2]
Particle centers for each timestep. Elements take on a value of `[np.nan, np.nan]`
if the particle does not exist for a given timestep.
rectRadiusArr : np.ndarray[P,T]
Particle radii for each timestep. Elements take on a value of `np.nan` if the particle
does not exist for a given timestep.
Depending on kwarg values, several files may be written created in the output
folder, which will be located in `outputRootFolder` and named according
to: '<`imageDirectory`>_Synthetic/'.
"""
overallStartTime = time.perf_counter()
# For the sake of saving the options to a readme file (and potentially)
# reading them back out, it is easiest to keep all of the settings in a
# dictionary
# We have 3 layers of precedence for reading in settings:
# 1. Explicitly passed kwarg
# 2. Read in from settings file
# 3. Default value of a kwarg
# So we assign the elements of our settings dict in opposite order
# 3. All of the default values
# The following variables are not present:
# progressBarOffset, progressBarTitle
# This is because we don't care about saving them
settings = {"imageDirectory": os.path.abspath(imageDirectory) + '/', # Convert to absolute path
"imageExtension": imageExtension,
"imageEndIndex": imageEndIndex,
"imageStartIndex": imageStartIndex,
"carryOverAlpha": carryOverAlpha,
"carryOverForce": carryOverForce,
"lightCorrectionImage": lightCorrectionImage,
"lightCorrectionVerticalMask": lightCorrectionVerticalMask,
"lightCorrectionHorizontalMask": lightCorrectionHorizontalMask,
"g2CalibrationImage": g2CalibrationImage,
"g2CalibrationCutoffFactor": g2CalibrationCutoffFactor,
"maskImage": maskImage,
"cropXMin": cropXMin,
"cropXMax": cropXMax,
"circleDetectionMethod": circleDetectionMethod,
"guessRadius": guessRadius,
"fSigma": fSigma,
"pxPerMeter": pxPerMeter,
"brightfield": brightfield,
"contactPadding": contactPadding,
"g2MaskPadding": g2MaskPadding,
"contactMaskRadius": contactMaskRadius,
"peBlurKernel": peBlurKernel,
"requireForceBalance": requireForceBalance,
"circleTrackingChannel": circleTrackingChannel,
"photoelasticChannel": photoelasticChannel,
"maxBetaDisplacement": maxBetaDisplacement,
"forceNoiseWidth": forceNoiseWidth,
"alphaNoiseWidth": alphaNoiseWidth,
"showProgressBar": showProgressBar,
"saveMovie": saveMovie,
"pickleArrays": pickleArrays,
"outputRootFolder": outputRootFolder,
"outputExtension": outputExtension,
"genFitReport": genFitReport,
"performOptimization": performOptimization,
"debug": debug}
# For the next step, we will need to know all of the data types of each
# argument (to properly cast). Because certain arguments have None as a default
# value, we can't automatically generate this information.
# See above this method for the list of these, since they are also used
# in the TrialObject file
# We need to do the same thing for the kwargs for both
# circle tracking and optimization
# These have both been moved to either the tracking/DTypes.py
# or the analysis/ForceSolve.py files, respectively
# Now add all the dictionaries together
argDTypes = forceSolveArgDTypes.copy()
argDTypes.update(circleTrackArgDTypes)
argDTypes.update(forceOptimizeArgDTypes)
# 2. Anything read in from a settings file
# Note that it works to our advantage that we already have values for most entries,
# since the settings file doesn't include type information, so we need the old
# values to cast properly. 1. is actually contained in here as well, because
# we can just check to see if that variable was explicitly passed before overwriting it.
# 1. The kwargs that are explicitly passed
# This one is a little tricky, because there isn't a super great way by default
# to differentiate whether a kwarg is explicitly passed or is its default value
# (without just keeping a list of default values). I also don't want to
# replace the entire function signature with (*args, **kwargs) because then the
# documentation would not be as good (I think). So the solution here is to define
# a decorator that has the (*args, **kwargs) signature, and to create an attribute
# of this method that is a list of the kwargs that are explicitly passed to the
# decorator. See `pepe.utils.explicitKwargs()` for more info.
if inputSettingsFile is not None:
if os.path.exists(inputSettingsFile):
fileObj = open(inputSettingsFile, 'r')
for line in fileObj:
# Check each line and see if it looks like a dictionary value
split = line.split(':')
# Read settings into the master settings file
if len(split) == 2 and split[0].strip() in argDTypes.keys() and not split[0].strip() in forceSolve.explicit_kwargs:
# Cast to the type of the value already in the dict
if split[1].strip() == 'None':
settings[split[0].strip()] = None
else:
if '[' in split[1]:
settings[split[0].strip()] = parseList(split[1].strip(), dtype=argDTypes[split[0].strip()])
else:
# Bools need a special condition
if argDTypes[split[0].strip()] is bool:
val = split[1].strip() == 'True'
else:
val = argDTypes[split[0].strip()](split[1].strip())
settings[split[0].strip()] = val
else:
print(f'Warning: provided settings file does not exist! Attempting to run regardless...')
# While the following variables all have a default value of 0, they cannot actually
# be left as this value. The reason they have a default value is so that if these
# values are indicated by a settings file, we don't want to have to enter them again.
# So here, we make sure we have values for them all, either explicitly passed or read in.
requiredVars = ["guessRadius", "fSigma", "pxPerMeter"]
for r in requiredVars:
assert settings[r] != 0, f'Error: {r} value not supplied explicitly or implicitly!'
# Now carry over the kwargs that are sent to the optimization procedure into that
# dictionary. We can find the names of arguments by using the `inspect` library
possibleOptimKwargs = list(inspect.signature(forceOptimize).parameters.keys())
for pkw in possibleOptimKwargs:
if pkw in settings.keys():
optimizationKwargs[pkw] = settings[pkw]
# We want to do the same thing for the circle tracking function, but we don't
# yet know which circle tracking function we are using yet, so we'll carry
# that over a bit later.
# Find all images in the directory
imageFiles = os.listdir(settings["imageDirectory"])
# This goes before the sorting/extension filtering so we can get more specific
# error messages (and we have another one of these below)
if len(imageFiles) < 1:
print(f'Error: directory {imageDirectory} contains no files!')
return None
imageFiles = np.sort([img for img in imageFiles if img[-len(settings["imageExtension"]):] == settings["imageExtension"]])
# We have to do the end index first, so it doesn't mess up the start one
if settings["imageEndIndex"] is not None:
imageFiles = imageFiles[:min(settings["imageEndIndex"], len(imageFiles))]
if settings["imageStartIndex"] is not None:
imageFiles = imageFiles[max(settings["imageStartIndex"], 0):]
# Make sure we still have some proper images
if len(imageFiles) < 1:
print(f'Error: directory \'{settings["imageDirectory"]}\' contains no files with extension \'{settings["imageExtension"]}\'!')
return None
xB = [settings["cropXMin"], settings["cropXMax"]]
imageSize = checkImageType(settings["imageDirectory"] + imageFiles[0])[:,xB[0]:xB[1],0].shape
# This will calculation the light correction across the images
if settings["lightCorrectionImage"] is not None:
# Convert to absolute paths if they are paths
if type(settings["lightCorrectionImage"]) is str:
settings["lightCorrectionImage"] = os.path.abspath(settings["lightCorrectionImage"])
if type(settings["lightCorrectionVerticalMask"]) is str:
settings["lightCorrectionVerticalMask"] = os.path.abspath(settings["lightCorrectionVerticalMask"])
if type(settings["lightCorrectionHorizontalMask"]) is str:
settings["lightCorrectionHorizontalMask"] = os.path.abspath(settings["lightCorrectionHorizontalMask"])
cImageProper = checkImageType(settings["lightCorrectionImage"])[:,xB[0]:xB[1]]
vMask = checkImageType(settings["lightCorrectionVerticalMask"])[:,xB[0]:xB[1]]
hMask = checkImageType(settings["lightCorrectionHorizontalMask"])[:,xB[0]:xB[1]]
if vMask.ndim == 3:
vMask = vMask[:,:,0]
if hMask.ndim == 3:
hMask = hMask[:,:,0]
lightCorrection = lightCorrectionDiff(cImageProper, vMask, hMask)
trackCorrection = lightCorrection[:,:,settings["circleTrackingChannel"]]
peCorrection = lightCorrection[:,:,settings["photoelasticChannel"]]
else:
# It probably isn't great hygiene to have this variableflip between a single
# value and an array, but you can always add a scalar to a numpy array, so
# this is the easiest way (since we haven't loaded any images yet)
trackCorrection = 0
peCorrection = 0
# Load up the mask image, which will be used to remove parts of the images
# that we don't care about, and also potentially indicate which particles
# are close to the boundary.
if settings["maskImage"] is not None:
maskArr = checkImageType(settings["maskImage"])[:,xB[0]:xB[1]]
ignoreBoundary = False
else:
# Same deal as above: scalar multiplication functions exactly how we want
# in the case that we don't have a mask, so it's just easier to do this.
maskArr = 1
ignoreBoundary = True
# Which method we will be using to detect circles
if settings["circleDetectionMethod"] == 'convolution':
circFunc = convCircle
elif settings["circleDetectionMethod"] == 'hough':
circFunc = houghCircle
else:
print(f'Error: circle detection option \'{settings["circleDetectionMethod"]}\' not recognized!')
return None
# Now that we have a circle tracking function, we can carry over any possible kwargs
possibleCircleKwargs = list(inspect.signature(circFunc).parameters.keys())
for pkw in possibleCircleKwargs:
if pkw in settings.keys():
circleTrackingKwargs[pkw] = settings[pkw]
# Calculate the lowest g2 value that we care about, so we can throw everything
# that is below that away when solving (optional)
checkMinG2 = False
if settings["g2CalibrationImage"] is not None:
g2CalImage = checkImageType(settings["g2CalibrationImage"])[:,xB[0]:xB[1]]
g2CalPEImage = cv2.blur((g2CalImage[:,:,settings["photoelasticChannel"]] + peCorrection).astype(np.float64) / 255, (settings["peBlurKernel"],settings["peBlurKernel"]))
# Locate particles
centers, radii = circFunc((g2CalImage[:,:,settings["circleTrackingChannel"]] + trackCorrection) * maskArr[:,:,0], settings["guessRadius"], **circleTrackingKwargs)
# There should only be 1 particle in the calibration image
if len(centers) < 0:
print(f'Warning: Gradient-squared calibration image does not contain any particles! Ignoring...')
else:
particleMask = circularMask(g2CalPEImage.shape, centers[0], radii[0])[:,:,0]
gSqr = gSquared(g2CalPEImage)
minParticleG2 = np.sum(gSqr * particleMask) / np.sum(particleMask) * settings["g2CalibrationCutoffFactor"]
checkMinG2 = True
# The arrays that we will be building for each timestep. It is better to just
# use an untyped list since the arrays are all triangular and whatnot.
centersArr = []
radiiArr = []
forceArr = []
betaArr = []
alphaArr = []
imageArr = []
errorArr = []
# For keeping track of time (though will only be display if debug=True)
trackingTimes = np.zeros(len(imageFiles))
initialGuessTimes = np.zeros(len(imageFiles))
optimizationTimes = np.zeros(len(imageFiles))
miscTimes = np.zeros(len(imageFiles))
totalFailedParticles = 0
errorMsgs = []
if settings["showProgressBar"]:
bar = tqdm.tqdm(total=len(imageFiles)+1, position=progressBarOffset, desc=progressBarTitle)
# Calculate the gradient-squared-to-force calibration value
g2Cal = g2ForceCalibration(settings["fSigma"], settings["guessRadius"], settings["pxPerMeter"])
# The big loop that iterates over every image
for i in range(len(imageFiles)):
image = checkImageType(settings["imageDirectory"] + imageFiles[i])[:,xB[0]:xB[1]]
# Convert to floats on the domain [0,1], so we can compare to the output of
# genSyntheticResponse()
peImage = cv2.blur((image[:,:,settings["photoelasticChannel"]] + peCorrection).astype(np.float64) / 255, (settings["peBlurKernel"],settings["peBlurKernel"]))
# -------------
# Track circles
# -------------
start = time.perf_counter()
centers, radii = circFunc((image[:,:,settings["circleTrackingChannel"]] + trackCorrection) * maskArr[:,:,0], settings["guessRadius"], **circleTrackingKwargs)
# We do some indexing using the centers/radii, so it is helpful
# to have them as an integer type
centers = centers.astype(np.int64)
radii = radii.astype(np.int64)
# We want to keep the order of particles constant, so we make sure
# that they are (to whatever extent possible) in the same order
# as the previous frame. This involves finding the closest neighbor
# from the previous frame.
if len(centersArr) > 0:
centerOrder = preserveOrderArgsort(centersArr[-1], centers, padMissingValues=False)
centers = centers[centerOrder]
radii = radii[centerOrder]
trackingTimes[i] = time.perf_counter() - start
# ----------------------
# Generate initial guess
# ----------------------
# We run the initial guess regardless of whether we are going to overwrite
# with values from the previous frame. This is because the beta values
# are caluclated via the contact network, which should not be carried over
# (since the particles are moving).
forceGuessArr, alphaGuessArr, betaGuessArr = initialForceSolve(peImage,
centers, radii, settings["fSigma"], settings["pxPerMeter"],
settings["contactPadding"], settings["g2MaskPadding"],
contactMaskRadius=settings["contactMaskRadius"],
boundaryMask=maskArr, ignoreBoundary=ignoreBoundary, g2Cal=g2Cal)
if len(centersArr) > 0:
# If we have added/lost particles, we want to carry over the previous values where
# possible, and otherwise take the results of initialForceSolve
# Note that this is the complement to the center order calculated previously:
# this orders the old centers according the new ones.
# We make the assumption that a particle cannot travel more than it's radius in a single frame
oldCenterOrder = preserveOrderArgsort(centers, centersArr[-1], padMissingValues=True, maxDistance=settings["guessRadius"])
# Now find each new particle's old counterpart (if it exists), and then
# line up the forces using the value of beta, such that we can (optionally)
# carry over force magnitudes and alpha values.
for j in range(len(betaGuessArr)):
if oldCenterOrder[j] is None:
continue
# maxBetaDisplacement should be an angle value (in radians) that a force would
# never move in a single frame, but is large enough to not lose a force if it
# moves because of noise/small fluctuations.
forceOrder = preserveOrderArgsort(betaGuessArr[j], betaArr[-1][oldCenterOrder[j]], padMissingValues=True, maxDistance=settings["maxBetaDisplacement"])
#print(f'frame {i}, particle {j}: {forceOrder}')
for k in range(len(forceGuessArr[j])):
if forceOrder[k] is not None:
if settings["carryOverForce"]:
forceGuessArr[j][k] = forceArr[-1][oldCenterOrder[j]][forceOrder[k]]
if settings["carryOverAlpha"]:
alphaGuessArr[j][k] = alphaArr[-1][oldCenterOrder[j]][forceOrder[k]]
# In this case, we want to add a small randomly generated contribution
# so that the algorithm doesn't get stuck in some incorrect loop and so that it
# explores a little more of the parameter space to find a nice minimum at each step
if settings["forceNoiseWidth"] is not None:
forceGuessArr = [np.abs(np.array(f) + np.random.normal(0, settings["forceNoiseWidth"], size=len(f))) for f in forceGuessArr]
if settings["alphaNoiseWidth"] is not None:
alphaGuessArr = [np.abs(np.array(a) + np.random.normal(0, settings["alphaNoiseWidth"], size=len(a))) for a in alphaGuessArr]
initialGuessTimes[i] = time.perf_counter() - trackingTimes[i] - start
# -------------------------------
# Optimize each particle's forces
# -------------------------------
optimizedForceArr = []
optimizedBetaArr = []
optimizedAlphaArr = []
failed = [False for i in range(len(centers))]
# Drop forces on any particles whose g2 is lower than the min value
skipParticles = [False for i in range(len(centers))]
if checkMinG2:
gSqr = gSquared(peImage)
for j in range(len(centers)):
cMask = circularMask(peImage.shape, centers[j], radii[j])[:,:,0]
avgG2 = np.sum(gSqr * cMask) / np.sum(cMask)
skipParticles[j] = avgG2 < minParticleG2
# Mostly just a debug option, so we can test particle tracking
if not settings["performOptimization"]:
optimizedForceArr = forceGuessArr
optimizedAlphaArr = alphaGuessArr
optimizedBetaArr = betaGuessArr
else:
# This is what should run the majority of the time
for j in range(len(centers)):
if not skipParticles[j]:
try:
# We don't need to pass fSigma, pxPerMeter, or brightfield to the method
# because they will get added to optimizationKwargs automatically.
optForceArr, optBetaArr, optAlphaArr, res = forceOptimize(forceGuessArr[j], betaGuessArr[j], alphaGuessArr[j], radii[j], centers[j], peImage,
#settings["fSigma"], settings["pxPerMeter"], settings["brightfield"],
**optimizationKwargs)
optimizedForceArr.append(optForceArr)
optimizedBetaArr.append(optBetaArr)
optimizedAlphaArr.append(optAlphaArr)
except Exception as ex:
print(ex)
errorMsgs.append(f'File {imageFiles[i]}: ' + str(ex) + '\n')
failed[j] = True
totalFailedParticles += 1
# Append empty lists (ie say there are no forces)
#optimizedForceArr.append(forceGuessArr[j])
#optimizedBetaArr.append(betaGuessArr[j])
#optimizedAlphaArr.append(alphaGuessArr[j])
optimizedForceArr.append([])
optimizedBetaArr.append([])
optimizedAlphaArr.append([])
else:
optimizedForceArr.append([])
optimizedBetaArr.append([])
optimizedAlphaArr.append([])
# If necessary, impose force balance on all particles
if requireForceBalance:
for j in range(len(centers)):
optimizedForceArr[j], optimizedAlphaArr[j] = singleParticleForceBalance(optimizedForceArr[j], optimizedAlphaArr[j], optimizedBetaArr[j])
optimizationTimes[i] = time.perf_counter() - initialGuessTimes[i] - trackingTimes[i] - start
# Save all of our values
forceArr.append(optimizedForceArr)
betaArr.append(optimizedBetaArr)
alphaArr.append(optimizedAlphaArr)
centersArr.append(centers)
radiiArr.append(radii)
if settings["debug"] or settings["saveMovie"] or settings["genFitReport"]:
estimatedPhotoelasticChannel = np.zeros_like(peImage, dtype=np.float64)
for j in range(len(centers)):
estimatedPhotoelasticChannel += genSyntheticResponse(np.array(forceGuessArr[j]),
np.array(alphaGuessArr[j]),
np.array(betaGuessArr[j]),
settings["fSigma"], radii[j],
settings["pxPerMeter"], settings["brightfield"], imageSize=peImage.shape,
center=centers[j])
optimizedPhotoelasticChannel = np.zeros(peImage.shape)
for j in range(len(centers)):
optimizedPhotoelasticChannel += genSyntheticResponse(np.array(optimizedForceArr[j]),
np.array(optimizedAlphaArr[j]),
np.array(optimizedBetaArr[j]),
settings["fSigma"], radii[j],
settings["pxPerMeter"], settings["brightfield"], imageSize=peImage.shape,
center=centers[j])
# Just simple mean-squared error
errorArr.append(np.sqrt(np.sum((optimizedPhotoelasticChannel - peImage)**2)))
imgArr = np.zeros((*optimizedPhotoelasticChannel.shape, 3))
img = Image.fromarray(optimizedPhotoelasticChannel*255)
img = img.convert('RGB')
drawObj = ImageDraw.Draw(img)
for j in range(len(centers)):
leftUpPoint = (centers[j][1]-radii[j], centers[j][0]-radii[j])
rightDownPoint = (centers[j][1]+radii[j], centers[j][0]+radii[j])
twoPointList = [leftUpPoint, rightDownPoint]
color = '#FF0000' if failed[j] else '#00AAAA'
drawObj.ellipse(twoPointList, outline=color, fill=None, width=3)
if settings["debug"]:
clear_output(wait=True)
fig, ax = plt.subplots(1, 3, figsize=(12,4))
ax[0].imshow(maskArr * image)
ax[0].set_title('Tracked Particles')
for j in range(len(centers)):
c = plt.Circle(centers[j][::-1], radii[j], label='Detected particles', color='teal', fill=False, linewidth=1)
ax[0].add_artist(c)
# Now add contacts
for k in range(len(betaGuessArr[j])):
contactPoint = centers[j] + radii[j] * np.array([np.cos(betaGuessArr[j][k]), np.sin(betaGuessArr[j][k])])
cc = plt.Circle(contactPoint[::-1], 12, color='red', fill=False, linewidth=1)
ax[1].add_artist(cc)
# Now plot past center positions
#for k in range(len(centersArr)):
# if len(centersArr[k]) >= j:
# cc = plt.Circle(centersArr[k][j][::-1], 5, color=centerColors[j], fill=True)
# ax[0].add_artist(cc)
ax[1].imshow(estimatedPhotoelasticChannel)
ax[1].set_title('Initial Guess for Optimizer\n(known forces)')
ax[2].imshow(img)
ax[2].set_title('Optimized Forces\n(known forces)')
fig.suptitle(imageFiles[i])
fig.tight_layout()
plt.show()
if settings["saveMovie"]:
imageArr.append(img)
miscTimes[i] = time.perf_counter() - optimizationTimes[i] - initialGuessTimes[i] - trackingTimes[i] - start
if settings["debug"]:
print(f'Took {time.perf_counter() - start:.5}s to solve frame:')
print(f'{5*" "}Tracking: {trackingTimes[i]:.3}s')
print(f'{5*" "}Initial guess: {initialGuessTimes[i]:.3}s')
print(f'{5*" "}Optimization: {optimizationTimes[i]:.3}s')
print(f'{5*" "}Misc. processes: {miscTimes[i]:.3}s')
if settings["showProgressBar"]:
bar.update()
# Restructure the arrays to make them more friendly, and to track forces/particles across timesteps
rectForceArr, rectAlphaArr, rectBetaArr, rectCenterArr, rectRadiusArr = rectangularizeForceArrays(forceArr, alphaArr, betaArr, centersArr, radiiArr)
# --------------
# Track rotation
# --------------
# We choose to do this after the actual solving because it helps
# to have the rectangular force arrays.
padding = settings["guessRadius"] + 5
# First, we generate our reference images, which are the first
# time a particle is completely in frame.
refImages = [None] * len(rectCenterArr)
for i in range(len(refImages)):
for j in range(len(imageFiles)):
if not True in np.isnan(rectCenterArr[i][j]):
# Continue to the next frame if this one is partially offscreen
if True in ((rectCenterArr[i][j] - padding) < 0) or True in ((rectCenterArr[i][j] - np.array(imageSize) + padding) > 0):
continue
# Otherwise, this is a good frame, so we save it
refImageFull = checkImageType(settings["imageDirectory"] + imageFiles[j])[:,xB[0]:xB[1],settings["circleTrackingChannel"]]
refImageFull *= circularMask(refImageFull.shape, rectCenterArr[i][j], rectRadiusArr[i][j])[:,:,0]
refImages[i] = refImageFull[int(rectCenterArr[i][j][0] - padding):int(rectCenterArr[i][j][0] + padding), int(rectCenterArr[i][j][1] - padding):int(rectCenterArr[i][j][1] + padding)]
# And move onto the next particle
break
# Same shape as the radius array: 1 value for each timestep, for each particle
rectAngleArr = np.zeros(rectRadiusArr.shape)
# Set all values to be np.nan initially
rectAngleArr[:,:] = np.nan
# Now we compare that reference particle to each subsequent frame
# (probably not best practice that I've switched the indices
# with respect to the previous statements, but :/)
for i in range(len(imageFiles)):
currentImageFull = checkImageType(settings["imageDirectory"] + imageFiles[i])[:,xB[0]:xB[1],settings["circleTrackingChannel"]]
for j in range(len(refImages)):
# Make sure we have a reference image, and the particle is in full view
if True in np.isnan(rectCenterArr[j][i]):
continue
if True in ((rectCenterArr[j][i] - padding) < 0) or True in ((rectCenterArr[j][i] - np.array(imageSize) + padding) > 0):
continue
# Crop out around the particle and mask it
currImage = (circularMask(currentImageFull.shape, rectCenterArr[j][i], rectRadiusArr[j][i])[:,:,0] * currentImageFull)[int(rectCenterArr[j][i][0] - padding):int(rectCenterArr[j][i][0] + padding), int(rectCenterArr[j][i][1] - padding):int(rectCenterArr[j][i][1] + padding)]
# Which is the kernel and which is the reference image doesn't really matter
# (as long as we are consistent)
# We can choose our bounds based on the previous value of the rotation
if i >= 1 and not np.isnan(rectAngleArr[j,i-1]):
rotationBounds = (rectAngleArr[j,i-1] - .1, rectAngleArr[j,i-1] + .1)
else:
# If either i=0 or the previous rotation value is nan, we should start around 0
# anyway (since we define 0 arbitrarily)
rotationBounds = (-.2, .2)
# .003 was chosen based on the data presented in the wiki
# https://github.com/Jfeatherstone/pepe/wiki/Angular-Convolution
thetaArr, convArr = angularConvolution(refImages[j], currImage, dTheta=.003, angleBounds=rotationBounds)
rectAngleArr[j,i] = thetaArr[findPeaksMulti(convArr)[0][0][0]]
# Reuse the name of the folder the images come from as a part of
# the output folder name
# [-2] element for something of form 'path/to/final/folder/' will be 'folder'
# If we are missing the final /, you have to take just the [-1] element
if settings["imageDirectory"][-1] == '/':
outputFolderPath = outputRootFolder + settings["imageDirectory"].split('/')[-2] + f'_Synthetic{settings["outputExtension"]}/'
else:
outputFolderPath = outputRootFolder + settings["imageDirectory"].split('/')[-1] + f'_Synthetic{settings["outputExtension"]}/'
if not os.path.exists(outputFolderPath):
os.mkdir(outputFolderPath)
if settings["saveMovie"]:
imageArr[0].save(outputFolderPath + 'Synthetic.gif', save_all=True, append_images=imageArr[1:], duration=30, optimize=False, loop=0)
# Write a readme file that contains all of the parameters that the solving used
lines = ['#####################\n',
'# README FILE #\n',
'#####################\n']
lines += [f'Generated: {time.ctime()}\n\n']
lines += ['Note: this file was autogenerated by the `pepe.auto.forceSolve()` function\n',
' and it is not recommended to be manually edited. To reuse the settings\n',
' and parameters that were used here, the path of this file\n',
f' (\'{outputFolderPath}readme.txt\') \n',
' can be passed via the \'settingsFile\' keyword argument of `pepe.auto.forceSolve()`.\n',
' In this case, explictly passed arguments will override the values in the settings file.\n']
lines += ['\n## Runtime Information\n',
f'Version: pepe {pepe.__version__}\n',
f'Total runtime: {time.perf_counter() - overallStartTime:.6}s\n',
f'Mean tracking time: {np.mean(trackingTimes):.4}s\n',
f'Mean guess generation time: {np.mean(initialGuessTimes):.4}s\n',
f'Mean optimization time: {np.mean(optimizationTimes):.4}s\n',
f'Mean misc. time: {np.mean(miscTimes):.4}s\n',
f'Number of failed particles: {totalFailedParticles}\n']
settings.update(circleTrackingKwargs)
settings.update(optimizationKwargs)
lines += ['\n## Settings\n']
for k,v in settings.items():
lines += [f'{k}: {v}\n']
lines += ['\n## Errors\n']
if len(errorMsgs) > 0:
lines += errorMsgs
else:
lines += ['None :)']
with open(outputFolderPath + 'readme.txt', 'w') as readmeFile:
readmeFile.writelines(lines)
# Save the arrays to pickle files (optional)
if settings["pickleArrays"]:
with open(outputFolderPath + 'forces.pickle', 'wb') as f:
pickle.dump(rectForceArr, f)
with open(outputFolderPath + 'alphas.pickle', 'wb') as f:
pickle.dump(rectAlphaArr, f)
with open(outputFolderPath + 'betas.pickle', 'wb') as f:
pickle.dump(rectBetaArr, f)
with open(outputFolderPath + 'centers.pickle', 'wb') as f:
pickle.dump(rectCenterArr, f)
with open(outputFolderPath + 'radii.pickle', 'wb') as f:
pickle.dump(rectRadiusArr, f)
with open(outputFolderPath + 'angles.pickle', 'wb') as f:
pickle.dump(rectAngleArr, f)
# Save the raw arrays too, since I think I have a bug in my rectangularization process
# if settings["pickleArrays"]:
# with open(outputFolderPath + 'forces_raw.pickle', 'wb') as f:
# pickle.dump(forceArr, f)
#
# with open(outputFolderPath + 'alphas_raw.pickle', 'wb') as f:
# pickle.dump(alphaArr, f)
#
# with open(outputFolderPath + 'betas_raw.pickle', 'wb') as f:
# pickle.dump(betaArr, f)
#
# with open(outputFolderPath + 'centers_raw.pickle', 'wb') as f:
# pickle.dump(centersArr, f)
#
# with open(outputFolderPath + 'radii_raw.pickle', 'wb') as f:
# pickle.dump(radiiArr, f)
# Generate a fit report (optional)
# This include informtaion about the error for each frame, all of the forces/alphas/betas/
# centers/radii for each particle at each timestep, and all settings in a nicely compiled
# (via latex) pdf.
if settings["genFitReport"]:
# Make the source directory
if not os.path.exists(outputFolderPath + 'FitReport_src'):
os.mkdir(outputFolderPath + 'FitReport_src')
# First, generate a plot of the error
fig, ax = plt.subplots()
ax.plot(errorArr)
ax.set_xlabel('Frame')
ax.set_ylabel('Mean-squared error')
ax.set_title('Difference Between Optimized Result and Real Image')
fig.savefig(outputFolderPath + 'FitReport_src/error.pdf')
fig.savefig(outputFolderPath + 'FitReport_src/error.png')
plt.close(fig)
# Draw all of the circles, with their labeled numbers
fig, ax = plt.subplots(1, 2, figsize=(8,3))
# First timestep
visCircles([rectCenterArr[i][0] for i in range(len(rectCenterArr))], [rectRadiusArr[i][0] for i in range(len(rectRadiusArr))],
ax=ax[0], annotations=np.arange(len(rectCenterArr)), setBounds=True)
# Last timestep
visCircles([rectCenterArr[i][-1] for i in range(len(rectCenterArr))], [rectRadiusArr[i][-1] for i in range(len(rectRadiusArr))],
ax=ax[1], annotations=np.arange(len(rectCenterArr)), setBounds=True)
for i in range(2):
ax[i].set_xlabel('X [px]')
ax[i].set_ylabel('Y [px]')
ax[i].invert_yaxis()
ax[0].set_title('First Frame')
ax[1].set_title('Last Frame')
fig.savefig(outputFolderPath + 'FitReport_src/particle_identities.pdf')
fig.savefig(outputFolderPath + 'FitReport_src/particle_identities.png')
plt.close(fig)
# Next, draw the forces/betas/alphas/centers for each particle
# through time
for i in range(len(rectForceArr)):
fig, ax = visForces(rectForceArr[i], rectAlphaArr[i], rectBetaArr[i], rectCenterArr[i], rectAngleArr[i])
fig.suptitle(f'Particle {i}')
fig.savefig(outputFolderPath + f'FitReport_src/particle_{i}_forces.pdf')
fig.savefig(outputFolderPath + f'FitReport_src/particle_{i}_forces.png')
plt.close(fig)
# Create a gif of the particle orientation through time, overlaid
# on the original images
visRotation([settings["imageDirectory"] + f for f in imageFiles],
rectCenterArr, rectRadiusArr, rectAngleArr, outputFolderPath + 'FitReport_src/', (0, cropXMin))
# Create gifs of the contacts
forceColors = genColors(len(rectBetaArr))
# The list comprehension is to make sure that we index a particle that actually has forces acting
# on it.
tSteps = len(imageFiles)#len([b for b in rectBetaArr if len(b) > 0][0])
contactPointImages = [None for i in range(tSteps)]
contactAngleImages = [None for i in range(tSteps)]
for i in range(tSteps):
# Have to do this, because the settings variable could be None
startI = settings["imageStartIndex"] if settings["imageStartIndex"] is not None else 0
# First, just the contact points
fig, ax = plt.subplots()
visCircles([rectCenterArr[p][i] for p in range(len(rectCenterArr))], [rectRadiusArr[p][i] for p in range(len(rectRadiusArr))], ax=ax)
for particleIndex in range(len(rectBetaArr)):
visContacts(rectCenterArr[particleIndex][i], rectRadiusArr[particleIndex][i],
rectBetaArr[particleIndex][:,i], ax=ax, forceColors=forceColors[particleIndex])
ax.set_xlim([0, 1280])
ax.set_ylim([0, 1024])
ax.set_aspect('equal')
ax.set_title(f'Frame {i + startI}')
ax.invert_yaxis()
canvas = plt.get_current_fig_manager().canvas
canvas.draw()
contactPointImages[i] = Image.frombytes('RGB', canvas.get_width_height(),
canvas.tostring_rgb())
plt.close(fig)
# Now the one with angles
fig, ax = plt.subplots()
visCircles([rectCenterArr[p][i] for p in range(len(rectCenterArr))], [rectRadiusArr[p][i] for p in range(len(rectRadiusArr))], ax=ax)
for particleIndex in range(len(rectBetaArr)):
visContacts(rectCenterArr[particleIndex][i], rectRadiusArr[particleIndex][i],
rectBetaArr[particleIndex][:,i], ax=ax, forceColors=forceColors[particleIndex], alphaArr=rectAlphaArr[particleIndex][:,i])
ax.set_xlim([0, 1280])
ax.set_ylim([0, 1024])
ax.set_aspect('equal')
ax.set_title(f'Frame {i + startI}')
ax.invert_yaxis()
canvas = plt.get_current_fig_manager().canvas
canvas.draw()
contactAngleImages[i] = Image.frombytes('RGB', canvas.get_width_height(),
canvas.tostring_rgb())
plt.close(fig)
contactPointImages[0].save(outputFolderPath + 'FitReport_src/contact_points.gif', save_all=True,
append_images=contactPointImages[1:], duration=20, optimize=True, loop=0)
contactAngleImages[0].save(outputFolderPath + 'FitReport_src/contact_angles.gif', save_all=True,
append_images=contactAngleImages[1:], duration=20, optimize=True, loop=0)
if settings["showProgressBar"]:
bar.update()
bar.close()
return rectForceArr, rectAlphaArr, rectBetaArr, rectCenterArr, rectRadiusArr, rectAngleArr
```
#### File: pepe/preprocess/Image.py
```python
import numpy as np
from PIL import Image
import cv2
from scipy.signal import savgol_filter
import matplotlib.pyplot as plt
def checkImageType(frame):
"""
Make sure that the image is a proper image, and not a path
Parameters
----------
frame : str or numpy.ndarray
Either a path to an image, or an image array
Returns
-------
numpy.ndarray : The image array
"""
if isinstance(frame, str):
# I don't want to overwrite the image itself, so create a new var for that
newFrame = np.array(Image.open(frame), dtype=np.uint8)
else:
newFrame = frame
return newFrame
def lightCorrectionDiff(calibrationImage, verticalMask=None, horizontalMask=None, smoothCorrection=True, debug=False, smoothingKernel=31, channel=0, rectify=False):
"""
"""
calImg = checkImageType(calibrationImage)
# Slightly different behavior depending on whether we are passed a multichannel
# image vs a grayscale one. For multichannel, we calculate a correction for each
# channel separately.
if calImg.ndim == 3:
imgSize = calImg.shape[:2]
numChannels = calImg.shape[-1]
else:
imgSize = calImg.shape
numChannels = 1
# Add a third dim, so we can treat multi/single channel images
# exactly the same way
calImg = calImg[:,:,None]
if verticalMask is None:
verticalMask = np.ones(imgSize)
if horizontalMask is None:
horizontalMask = np.ones(imgSize)
fullLightCorrection = np.zeros((*imgSize, numChannels))
for i in range(numChannels):
verticallyMasked = calImg[:,:,i] * verticalMask
horizontallyMasked = calImg[:,:,i] * horizontalMask
# If there are no non-zero pixels, we just move on to the next channel
# (and leave this correction as an array of zeros)
if len(np.where(verticallyMasked != 0)[0]) == 0:
continue
# This excludes all values of zero, so that we get an actual pixel value we can directly add
brightnessByRow = np.nanmean(np.where(verticallyMasked != 0, verticallyMasked, np.nan), axis=1)
brightnessByColumn = np.nanmean(np.where(horizontallyMasked != 0, horizontallyMasked, np.nan), axis=0)
# Now smooth the two curves
# Can't say I know much about this filter, but it seems to work pretty well
if smoothCorrection:
smoothedBrightnessByColumn = savgol_filter(brightnessByColumn, smoothingKernel, 1)
smoothedBrightnessByRow = savgol_filter(brightnessByRow, smoothingKernel, 1)
else:
smoothedBrightnessByColumn = brightnessByColumn
smoothedBrightnessByRow = brightnessByRow
# Now calculate the correction
horizontalCorrection = np.mean(smoothedBrightnessByColumn) - smoothedBrightnessByColumn
verticalCorrection = np.mean(smoothedBrightnessByRow) - smoothedBrightnessByRow
# This object will have the same size as the image, and can just added to
# any similar image to correct detected light gradients
fullLightCorrection[:,:,i] = np.add.outer(verticalCorrection, horizontalCorrection)
if rectify:
fullLightCorrection[:,:,i] -= np.min(fullLightCorrection[:,:,i])
# If we have a single channel image originally, we want to keep the same shape
# for our return value -- so that the return can immediately be multipled by the
# original image -- so we remove the last channel dimension
if numChannels == 1:
fullLightCorrection = fullLightCorrection[:,:,0]
if debug:
if numChannels > 1:
fig, ax = plt.subplots(2, 3, figsize=(12, 8))
channelNames = ['Red', 'Green', 'Blue']
for i in range(3):
ax[0,i].imshow(calImg[:,:,i])
ax[0,i].set_title(f'Original {channelNames[i]} Channel')
ax[1,i].imshow(calImg[:,:,i] + fullLightCorrection[:,:,i])
ax[1,i].set_title(f'Corrected {channelNames[i]} Channel')
else:
fig, ax = plt.subplots(1, 2, figsize=(8,4))
ax[0].imshow(calImg[:,:,0])
ax[0].set_title('Original Image')
ax[1].imshow(calImg[:,:,0] + fullLightCorrection)
ax[1].set_title('Corrected Image')
fig.tight_layout()
plt.show()
return fullLightCorrection
```
#### File: pepe/test/test_auto.py
```python
import inspect
import numpy as np
from pepe.auto import forceSolve, forceSolveArgDTypes
def test_forceSolve_DTypeCoverage():
"""
Make sure that all of the dtypes for the arguments in
pepe.auto.forceSolve are defined for the purpose of
reading in values from a settings file.
"""
args, counts= np.unique(list(inspect.signature(forceSolve).parameters.keys()) + list(forceSolveArgDTypes.keys()), return_counts=True)
missingArgs = args[counts == 1]
assert len(missingArgs) == 0, f"Missing data types for args: {missingArgs}"
```
#### File: pepe/visualize/Colors.py
```python
import numpy as np
import matplotlib.colors as mcolors
from sklearn.neighbors import KDTree
# Some manually selected colors that are good
# Taken from xkcd survey: https://blog.xkcd.com/2010/05/03/color-survey-results/
MANUAL_COLORS = ['#7E1E9C', # Purple
'#15B01A', # Green
'#0343DF', # Blue
'#FF88C0', # Pink
'#653700', # Brown
'#E50000', # Red
'#029386', # Teal
'#F97306', # Orange
'#033500', # Dark green
'#00035B', # Dark blue
'#650021', # Maroon
'#BF77F6', # Light purple
'#929591', # Gray
'#6E750E', # Olive
'#00FFFF', # Cyan
]
def rgbToHex(rgbC):
return '%02x%02x%02x' % rgbC
def hexToRgb(hexC):
return [int(hexC[i:i+2], 16) for i in (0, 2, 4)]
def genColors(size, seed=21):
"""
Generate a set of colors for visualization by a hierarchy of methods:
1. First, select from MANUAL_COLORS defined above.
2. Use genRandomDistancedColors()
3. Use genRandomColors()
"""
if size < len(MANUAL_COLORS):
return [MANUAL_COLORS[i] for i in range(size)]
else:
# genRandomDistanceColors will already supplement with full random
# colors if size is too big, so no need to deal with it here.
return MANUAL_COLORS + genRandomDistancedColors(size - len(MANUAL_COLORS), seed)
def genRandomColors(size, seed=21):
"""
Generate colors by randomly selecting rgb values.
Parameters
----------
size : int
The number of colors to generate.
seed : int
The seed to use for random generation.
Returns
-------
colors : list[size]
A list of hex codes for colors.
"""
np.random.seed(seed)
randomColors = [f"#{rgbToHex(tuple(np.random.choice(range(256), size=3).flatten()))}" for i in range(size)]
return randomColors
def genRandomDistancedColors(size, seed=21):
"""
Generate colors by sampling from the list of CSS named colors,
where the first color will be randomly selected, and each subsequent
one will be chosen to maximize it's distance from previously selected
colors in LAB color space (using a kd-tree).
If the number of requested colors is greater than the number of named
colors, the remaining ones will be randomly generated.
Maybe a bit overkill...
Parameters
----------
size : int
The number of colors to generate.
seed : int
The seed to use for random generation.
Returns
-------
colors : list[size]
A list of hex codes for colors.
"""
allCSSColors = list(mcolors.CSS4_COLORS.values())
if size > len(allCSSColors):
allCSSColors += genRandomColors(size - len(allCSSColors), seed)
return allCSSColors
np.random.seed(seed)
allCSSColorsLAB = np.array([rgbToLab(hexToRgb(c[1:])) for c in allCSSColors])
# Randomly generate the starting one
selection = np.random.randint(len(allCSSColors))
addedIndices = [selection]
colors = [allCSSColors[selection]]
colorsLAB = np.zeros((size, 3))
colorsLAB[0] = allCSSColorsLAB[selection]
kdTree = KDTree(allCSSColorsLAB, leaf_size=10)
# For each other color, we want to select the furthest possible color from our current set
for i in range(1, size):
dist, ind = kdTree.query(colorsLAB[:i], k=len(allCSSColorsLAB))
dist = np.array([dist[j,:][np.argsort(ind[j])] for j in range(len(dist))])
fullCost = np.sum(dist, axis=0)
selection = np.argsort(fullCost)[::-1]
selection = [s for s in selection if not s in addedIndices]
colors.append(allCSSColors[selection[0]])
addedIndices.append(selection[0])
colorsLAB[i] = allCSSColorsLAB[selection[0]]
return colors
def maxDifferenceOrderColors(colors):
"""
Sort a set of colors such that the difference between consecutive
colors is maximized.
A fun little application of the (inverse of the) travelling salesman problem :)
"""
pass
# This method was taken from:
# https://gist.github.com/manojpandey/f5ece715132c572c80421febebaf66ae
# TODO: Clean it up
def rgbToLab(inputColor):
num = 0
RGB = [0, 0, 0]
for value in inputColor:
value = float(value) / 255
if value > 0.04045:
value = ((value + 0.055) / 1.055) ** 2.4
else:
value = value / 12.92
RGB[num] = value * 100
num = num + 1
XYZ = [0, 0, 0, ]
X = RGB[0] * 0.4124 + RGB[1] * 0.3576 + RGB[2] * 0.1805
Y = RGB[0] * 0.2126 + RGB[1] * 0.7152 + RGB[2] * 0.0722
Z = RGB[0] * 0.0193 + RGB[1] * 0.1192 + RGB[2] * 0.9505
XYZ[0] = round(X, 4)
XYZ[1] = round(Y, 4)
XYZ[2] = round(Z, 4)
# Observer= 2°, Illuminant= D65
XYZ[0] = float(XYZ[0]) / 95.047 # ref_X = 95.047
XYZ[1] = float(XYZ[1]) / 100.0 # ref_Y = 100.000
XYZ[2] = float(XYZ[2]) / 108.883 # ref_Z = 108.883
num = 0
for value in XYZ:
if value > 0.008856:
value = value ** (0.3333333333333333)
else:
value = (7.787 * value) + (16 / 116)
XYZ[num] = value
num = num + 1
Lab = [0, 0, 0]
L = (116 * XYZ[1]) - 16
a = 500 * (XYZ[0] - XYZ[1])
b = 200 * (XYZ[1] - XYZ[2])
Lab[0] = round(L, 4)
Lab[1] = round(a, 4)
Lab[2] = round(b, 4)
return Lab
```
#### File: pepe/visualize/Forces.py
```python
import numpy as np
import matplotlib.pyplot as plt
from pepe.visualize import visCircles, genRandomDistancedColors, genColors
def visForces(forceArr, alphaArr, betaArr, centerArr=None, angleArr=None, fps=None):
"""
Visualize all of the forces acting on a single particle, by plotting
the magnitudes, alphas, and betas (and optionally center position).
Requires that `forceArr`, `alphaArr`, `betaArr`, and `centerArr` have
indices of `forceArr[forceIndex][timeIndex]`, as would be given by
rectangularizing via `pepe.utils.rectangularizeForceArrays()` and
indexing a single value in the first dimension.
Examples
--------
```
# Solve for some forces
forceArr, alphaArr, betaArr, centerArr, radiusArr = forceOptimize(...)
# Particle index
pI = 0
fig, ax = visForces(forceArr[pI], alphaArr[pI], betaArr[pI], centerArr[pI])
plt.show()
```
Parameters
----------
forceArr : np.ndarray[F,T]
Array containing magnitudes of F forces for T timesteps.
alphaArr : np.ndarray[F,T]
Array containing alpha angles of F forces for T timesteps.
betaArr : np.ndarray[F,T]
Array containing beta angles of F forces for T timesteps.
centerArr : np.ndarray[T,2] or None
Array containing center position in form [y,x] of the particle for T timesteps.
angleArr : np.ndarray[T] or None
Array containing angles in radians of the particle for T timesteps.
fps : float or None
The number of frames per second of the capture video, used to convert the x-axis
units from frame number to proper seconds.
Returns
-------
fig : plt.figure()
The figure object the quantities are plotted on.
ax : plt.axis()
The list of 3 (or 4, if centers are provided) axes that the quantities are plotted on.
"""
fig, ax = plt.subplots(1, 3 + int(centerArr is not None) + int(angleArr is not None), figsize=(3.6*(3+int(centerArr is not None)+int(angleArr is not None)),4))
if len(forceArr) == 0:
return fig, ax
tArr = np.arange(len(forceArr[0]))
if fps is None:
fps = 1
if centerArr is not None:
ax[-1 - int(angleArr is not None)].plot(tArr/fps, centerArr[:,1], label='X')
ax[-1 - int(angleArr is not None)].plot(tArr/fps, centerArr[:,0], label='Y')
ax[-1 - int(angleArr is not None)].set_ylabel('Position [px]')
ax[-1 - int(angleArr is not None)].legend()
if angleArr is not None:
# The weird ax indexing is to make sure it works regardless of
# whether centerArr is passed or not
ax[-1].plot(tArr/fps, angleArr)
ax[-1].set_ylabel('Angle [rad]')
for i in range(len(forceArr)):
ax[0].plot(tArr/fps, forceArr[i])
ax[1].plot(tArr/fps, alphaArr[i], 'o')
ax[2].plot(tArr/fps, betaArr[i], 'o')
ax[0].set_ylabel('Force [N]')
ax[1].set_ylabel('Alpha [rad]')
ax[2].set_ylabel('Beta [rad]')
for i in range(3 + int(centerArr is not None) + int(angleArr is not None)):
if fps == 1:
ax[i].set_xlabel('Time [frame]')
else:
ax[i].set_xlabel('Time [s]')
fig.tight_layout()
return fig, ax
def visContacts(center, radius, betaArr, alphaArr=None, forceArr=None, ax=None, setBounds=False, circleColor=None, forceColors=None, drawCircle=False):
"""
Visualize the contacts for a system of particles, indicating either positions
of contacts, or positions and contact angles (if `alphaArr` is provided).
Returns
-------
ax : plt.axis()
"""
npCenter = np.array(center)
if ax is None:
fig, ax = plt.subplots()
if forceColors is None:
colors = genColors(len(betaArr), 1000)
elif type(forceColors) is str:
colors = [forceColors for i in range(len(betaArr))]
else:
colors = forceColors
if drawCircle:
visCircles([npCenter], [radius], ax, setBounds=setBounds, colors=colors[0])
if len(betaArr) == 0:
return ax
if forceArr is None:
weighting = np.zeros(len(betaArr)) + radius/4
else:
weighting = np.array(forceArr) * radius/4
if alphaArr is None:
for i in range(len(betaArr)):
contactPoint = npCenter + .95*radius * np.array([np.cos(betaArr[i]), np.sin(betaArr[i])])
ax.scatter([contactPoint[1]], [contactPoint[0]], c=colors[i])
#c = plt.Circle(contactPoint[::-1], weighting[i], color='red', fill=False, linewidth=1)
#ax.add_artist(c)
else:
for i in range(len(betaArr)):
if not np.isnan(alphaArr[i]):
contactPoint = npCenter + radius * np.array([np.cos(betaArr[i]), np.sin(betaArr[i])])
point1 = contactPoint + weighting[i] * np.array([np.cos(betaArr[i] + alphaArr[i]), np.sin(betaArr[i] + alphaArr[i])])
point2 = contactPoint - weighting[i] * np.array([np.cos(betaArr[i] + alphaArr[i]), np.sin(betaArr[i] + alphaArr[i])])
ax.plot([point1[1], point2[1]], [point1[0], point2[0]], linewidth=5, c=colors[i])
return ax
def fullVisContacts(outputDir, centerArr, radiusArr, betaArr, alphaArr=None, forceArr=None, forceColors=None, circleColors=None, startIndex=0, imageSize=(1024, 1280), fps=25):
if forceColors is None:
forceColorArr = [genRandomColors(len(b), int(time.perf_counter()*1e6) % 1024) for b in betaArr]
else:
forceColorArr = forceColors
if len(betaArr) == 0:
return False
# The list comprehension is to make sure that we index a particle that actually has forces acting
# on it.
tSteps = len([b for b in betaArr if len(b) > 0][0])
# To save the image of each plot so we can create a gif at the end
images = [None for i in range(tSteps)]
for i in range(tSteps):
clear_output(wait=True)
fig, ax = plt.subplots()
visCircles([centerArr[p][i] for p in range(len(centerArr))], [radiusArr[p][i] for p in range(len(radiusArr))], ax=ax)
for particleIndex in range(len(betaArr)):
visContacts(centerArr[particleIndex][i], radiusArr[particleIndex][i],
betaArr[particleIndex][:,i], ax=ax, forceColors=forceColors[particleIndex])#, alphaArr=alphaArr[particleIndex][:,i])
ax.set_xlim([0, imageSize[1]])
ax.set_ylim([0, imageSize[0]])
ax.set_aspect('equal')
ax.set_title(f'Frame {startIndex + i}')
canvas = plt.get_current_fig_manager().canvas
canvas.draw()
images[i] = Image.frombytes('RGB', canvas.get_width_height(),
canvas.tostring_rgb())
plt.close(fig)
images[0].save(outputDir + 'contact_tracking.gif', save_all=True, append_images=images[1:], duration=fps, optimize=True, loop=True)
```
|
{
"source": "JFeaux/cython_demo",
"score": 3
}
|
#### File: cython_demo/passing_numpy_array/run.py
```python
import array_tools
import time
import numpy as np
class Timer(object):
def __enter__(self):
self.start = time.clock()
return self
def __exit__(self, exception_type, exception_value, traceback):
self.end = time.clock()
self.interval = self.end - self.start
if __name__ == '__main__':
array = np.linspace(0, 10, 10000)
with Timer() as t:
result = np.sum(array)
print('{:7.5f} (s) {:7.5f}'.format(t.interval, result))
with Timer() as t:
result = array_tools.sum(array)
print('{:7.5f} (s) {:7.5f}'.format(t.interval, result))
```
|
{
"source": "JFeaux/flash_cards",
"score": 3
}
|
#### File: JFeaux/flash_cards/flash_cards.py
```python
import argparse
import Tkinter as tk
import tkFont
import numpy as np
import operator
color = "#%02x%02x%02x" % (255, 51, 102)
operations = {
'multiply' : u'\u00D7',
'divide' : u'\u00F7',
'add' : '+',
'subtract' : u'\u2212'
}
operators = {
'multiply': operator.mul,
'divide': operator.mul,
'add': operator.add,
'subtract': operator.add
}
def get_problems(args):
problems = []
if args.focus:
a, b = args.focus - 1, args.focus
else:
a, b = 0, args.max_int
for i in range(a, b):
for j in range(args.max_int):
x, y = i + 1, j + 1
op = operations[args.fact_type]
result = operators[args.fact_type](x, y)
if args.fact_type == 'add' or args.fact_type == 'multiply':
Q = u'{} {} {} = '.format(y, op, x)
A = u'{} {} {} = {}'.format(y, op, x, result)
problems.append((Q, A))
else:
if j >= i:
Q = u'{} {} {} = '.format(result, op, x)
A = u'{} {} {} = {}'.format(result, op, x, y)
problems.append((Q, A))
return problems
class FlashCard(tk.Frame):
def __init__(self, parent, text):
tk.Frame.__init__(self, master=parent)
font = tkFont.Font(family='Arial', size=120)
self.button = tk.Button(self, text=text, font=font,
fg=color,
bg='white',
bd=0,
activeforeground=color,
activebackground='white',
highlightthickness=0)
self.button.bind('<Return>',self.send)
self.button.pack()
self.place(anchor='c', relx=.5, rely=.5)
self.button.focus()
parent.wait_window(self.button)
def send(self,event=None):
self.destroy()
class Options:
def __init__(self, parent):
#tk.Frame.__init__(self, master=parent)
self.frame = tk.Frame(parent)
self.shuffle = 1
self.fact_type = 'multiply'
self.focus = 0
self.max_int = tk.IntVar()
self.max_int.set(12)
pady = 30
padx = 30
shuffle_frame = tk.Frame(self.frame)
shuffle_label = tk.Label(shuffle_frame,
text='Shuffle Facts?')
self.shuffle_b1 = tk.Button(shuffle_frame,
text='Yes',
state=tk.DISABLED,
disabledforeground='blue',
command=lambda: self.set_shuffle(0))
self.shuffle_b2 = tk.Button(shuffle_frame,
text='No',
disabledforeground='blue',
command=lambda: self.set_shuffle(1))
shuffle_label.grid(row=0, column=0, columnspan=2)
self.shuffle_b1.grid(row=1, column=0)
self.shuffle_b2.grid(row=1, column=1)
shuffle_frame.grid(row=0, column=1, padx=padx)
type_frame = tk.Frame(self.frame)
type_label = tk.Label(type_frame,
text='Fact Type')
self.type_buttons = [0 for i in range(4)]
self.type_buttons[0] = tk.Button(type_frame,
text='+',
disabledforeground='blue',
command=lambda: self.set_type(0))
self.type_buttons[1] = tk.Button(type_frame,
text=u'\u2212',
disabledforeground='blue',
command=lambda: self.set_type(1))
self.type_buttons[2] = tk.Button(type_frame,
text=u'\u00D7',
disabledforeground='blue',
command=lambda: self.set_type(2),
state=tk.DISABLED
)
self.type_buttons[3] = tk.Button(type_frame,
text=u'\u00F7',
disabledforeground='blue',
command=lambda: self.set_type(3))
type_label.grid(row=0, column=0, columnspan=2)
self.type_buttons[0].grid(row=1, column=0)
self.type_buttons[1].grid(row=1, column=1)
self.type_buttons[2].grid(row=2, column=0)
self.type_buttons[3].grid(row=2, column=1)
type_frame.grid(row=0, column=0)
max_frame = tk.Frame(self.frame)
max_text = tk.Label(max_frame,
text='Max Number')
max_entry = tk.Entry(max_frame,
justify='left',
width=3,
textvariable=self.max_int)
max_text.grid(row=0, column=0)
max_entry.grid(row=1, column=0)
max_frame.grid(row=1, column=0, pady=pady)
focus_frame = tk.Frame(self.frame)
focus_text = tk.Label(focus_frame,
text='Focus')
self.focus_entry = tk.Entry(focus_frame,
justify='left',
width=3)
focus_text.grid(row=0, column=0)
self.focus_entry.grid(row=1, column=0)
focus_frame.grid(row=1, column=1, padx=padx, pady=pady)
self.advance = tk.Button(self.frame, text='Continue', command=self.start)
self.advance.grid(row=2, column=0, columnspan=2)
self.frame.place(anchor='c', relx=0.5, rely=0.5)
def set_shuffle(self, val):
if val == 0:
self.shuffle_b1['state'] = tk.DISABLED
self.shuffle_b2['state'] = tk.NORMAL
self.shuffle = 1
else:
self.shuffle_b1['state'] = tk.NORMAL
self.shuffle_b2['state'] = tk.DISABLED
self.shuffle = 0
def set_type(self, val):
convert = ['add',
'subtract',
'multiply',
'divide']
self.fact_type = convert[val]
for i in range(4):
if i != val:
self.type_buttons[i]['state'] = tk.NORMAL
else:
self.type_buttons[i]['state'] = tk.DISABLED
def start(self):
try:
max_int = int(self.max_int.get())
focus = self.focus_entry.get()
if focus != '':
self.focus = int(focus)
if self.focus <= max_int:
self.max_int = max_int
self.frame.destroy()
except (AttributeError, ValueError):
pass
def main():
root = tk.Tk()
root.title('')
root.option_add('*Background', 'white')
root.option_add('*Foreground', 'black')
root.option_add('*Font', 'Arial 40')
## Full screen and set focus
root.geometry("{0}x{1}+0+0".format(root.winfo_screenwidth(), root.winfo_screenheight()))
root.focus_set() # <-- move focus to this widget
## background color
root.configure(background='white')
while True:
options = Options(root)
root.wait_window(options.frame)
problems = get_problems(options)
if options.shuffle:
np.random.shuffle(problems)
for i in range(len(problems)):
Q = FlashCard(root, problems[i][0])
A = FlashCard(root, problems[i][1])
root.mainloop()
if __name__ == '__main__':
main()
```
|
{
"source": "JFeaux/Tools",
"score": 3
}
|
#### File: Tools/mpi/mpi_test.py
```python
import sys
from master_worker_mpi import MasterWorkerMPI
""" Script to test function MasterWorkerMPI"""
def test_func(x, m, b):
return m * x[0] + b
number_of_jobs = int(sys.argv[1])
m, b = -2., 10.
x0, xf = 0., 10.
dx = (xf - x0) / (number_of_jobs - 1)
jobs = [(x0 + i * dx,) for i in range(number_of_jobs)]
# Run jobs in parallel
results = MasterWorkerMPI(jobs, test_func, m, b)
keys = sorted(results.keys())
for key in keys:
print '',key, results[key]
```
|
{
"source": "jfecroft/DOS",
"score": 2
}
|
#### File: jfecroft/DOS/DOSModule.py
```python
import numpy as np
import scipy.constants
from math import pi
from collections import OrderedDict
import yaml
# pylint: disable=E1103
# pylint: disable=R0902
# pylint: disable=R0903
# pylint: disable=R0913,R0914
# define physical constants
EH2K = scipy.constants.physical_constants["hartree-kelvin relationship"][0]
K2EH = scipy.constants.physical_constants["kelvin-hartree relationship"][0]
ATOMICUNITOFTIME = scipy.constants.physical_constants["atomic unit of time"][0]
def get_data(filen):
"""reads a file of the form 1st line heading columns
2nd lines on values for those headings
returns a nested dictionary
converts values to float if possible"""
with open(filen) as input:
try:
data_dict = yaml.load(input)
except yaml.YAMLError as exc:
print(exc)
return data_dict
def _read_data(dir_loc, inputfile, qnmax):
"""read in the data from precalculated rovibrational energy levels of the
system files are og the form 'txtjXXXtxt' where XXX is the L quantum number
starting at 0
"""
bound_states = [] # bound states for different L quntum numbers
for i in xrange(qnmax+1):
filen = dir_loc + inputfile.replace('j', 'j'+str(i))
try:
data = np.sort(np.loadtxt(filen, usecols=(1,)))
except IOError: # maximum quantum number reached set qnmax and return
qnmax = i-1
return bound_states, qnmax
if len(np.atleast_1d(data)) == 1:
data = np.reshape(data, 1) # accounts for behavior of 0d arrays
bound_states.append(data)
return bound_states, qnmax
def compute_dos(jqn, mqn, bound_states_c, bound_states_d, vmax,
energy_range=10.0, energy_offset=0.0):
"""
computes the dos for an atom moleucle collsiions as described in
Statistical aspects of ultracold resonant scattering Mayle, Ruzic and
Bohn
Variables
jqn - total jqn quantum number.
mqn - mj_qn projection quantum number.
bound_states_c - boundstates of the complex obtained from _read_data
bound_states_d - boundstates of the molecule obtained from _read_data
vmax - maximum vibrational quantum number.
energy_range - dos = number of states counted/energy range
energy_offset - calculate dos around energy = energy_offset
"""
if abs(mqn) > abs(jqn):
raise Exception('physically impossible (abs(mj_qn) > abs(jqn))')
limit = (energy_range/2.0)*K2EH # convert energy_range K to hartrees
energy_offset *= K2EH
outside = np.ma.masked_outside # mask enties in list outside range
count = np.ma.count # counts the number of unmasked entries in a list
abs_gs = bound_states_d[0][0]
num = 0
# looping over rotational state of dimer a
for nqn in xrange(0,len(bound_states_d)):
# looping over all l constant with jqn
for lqn in xrange(len(bound_states_c)):
# only include pairs which couple to form jqn
if abs(nqn-lqn) <= jqn and nqn+lqn >= jqn:
# looping over all vibrational levels of a
for vqn in xrange(min(len(bound_states_d[nqn]), vmax+1)):
# degeneracy
deg = len(xrange(max(-lqn, mqn-nqn),
min(lqn, mqn+nqn)))+1
# deg = 1
threshold_energy = bound_states_d[nqn][vqn]-abs_gs
num += count(
outside(bound_states_c[lqn],
-limit-threshold_energy+energy_offset,
limit-threshold_energy+energy_offset))*deg
dos = (float(num)/energy_range)*1.0E-3
lifetime = dos*1.0E3*EH2K*2.0*pi*ATOMICUNITOFTIME*1.0e9
# return dos in mK-1 and lifetime in ns
return dos, lifetime
def get_dos(cmplx_dirn, cmplx_filen, dimer_dirn, dimer_filen,
jqn=0, mqn=0, nmax=100, vmax=9999,
energy_offset=0.0, **kwargs):
"""
simple wrapper around _read_data and AMDOS
Variables
jqn - total J quantum number.
mqn - mj_qn projection quantum number.
nmax - maximum allowed rotaional quantum number of the molecule.
vmax - maximum allowed vibrational quantum number of the molecule.
energy_offset - calculate dos around energy = energy_offset
"""
lmax = nmax + jqn
bound_states_d, nmax = _read_data(dimer_dirn, dimer_filen, nmax)
bound_states_c, lmax = _read_data(cmplx_dirn, cmplx_filen, lmax)
dos, lifetime = compute_dos(jqn, mqn, bound_states_c, bound_states_d,
vmax, energy_offset=energy_offset)
return dos, lifetime
def compute_mm_dos(jqn, mj_qn, nmax, vmax, lmax,
bound_states_d, bound_states_c,
energy_range=10.0):
"""
computes the dos for an atom moleucle collsiions as described in
Scattering of Ultracold Molecules in the Highly Resonant Regime --
Mayle, Ruzic, Quememer and Bohn
Variables
jqn - total J quantum number.
mqn - mj_qn projection quantum number.
bound_states_c - boundstates of the complex obtained from _read_data
bound_states_d - boundstates of the molecule obtained from _read_data
vmax - maximum vibrational quantum number.
lmax - maximum end-over-end rotational quantum number of the two
molecules
nmax - maximum rotational quantum number of a single molecule.
energy_range - dos = number of states counted/energy range
"""
if abs(mj_qn) > abs(jqn):
raise Exception('physically impossible (abs(mj_qn) > abs(jqn))')
num = 0 # variable to hold the number of states between limits
limit = (energy_range/2.0)*K2EH # convert energy_range K to hartrees
abs_gs = bound_states_d[0][0] # energy of the absolute ground state
for nqn in xrange(max(0, jqn-lmax), min(2*nmax, lmax-jqn)+1):
for lqn in xrange(abs(jqn-nqn), min(lmax, jqn+nqn)+1):
for n1qn in xrange(max(0, nqn-nmax), nmax+1):
for n2qn in xrange(abs(nqn-n1qn), min(nqn+n1qn, nmax)+1):
# looping over all vibrational levels of dimer1
for v1qn in xrange(min(len(bound_states_d[n1qn]),
vmax+1)):
# looping over all vibrational levels of dimer2
for v2qn in xrange(min(len(bound_states_d[n2qn]),
vmax+1)):
threshold_energy = (
bound_states_d[n1qn][v1qn] - abs_gs +
bound_states_d[n2qn][v2qn] - abs_gs)
if (bound_states_c[lqn][0] >
limit-threshold_energy):
# the lowest state is higher than highest
# threshold
break
else:
start_end = np.searchsorted(
bound_states_c[lqn],
[-limit-threshold_energy,
limit-threshold_energy],
'left')
deg = (abs(max(-lqn, mj_qn-nqn)) +
min(lqn, mj_qn+nqn) + 1)
num += deg*(start_end[1] - start_end[0])
# return dos in per uK i.e.times 10**-6K
dos = (float(num)/energy_range)*1.0E-6
# return lifetime in ms
lifetime = dos*1.0E6*EH2K*2.0*pi*ATOMICUNITOFTIME * 1.0e3
return dos, lifetime
def get_mm_dos(cmplx_dirn, cmplx_filen, dimer_dirn, dimer_filen,
jqn=0, mqn=0, nmax=5, vmax=5, **kwargs):
"""
simple wrapper around _read_data and MMDOS
Variables
jqn - total J quantum number.
mqn - mj_qn projection quantum number.
nmax - maximum allowed rotaional quantum number of the molecule.
vmax - maximum allowed vibrational quantum number of the molecule.
energy_offset - calculate dos around energy = energy_offset
"""
lmax = 2*nmax+jqn
bound_states_d, nmax = _read_data(dimer_dirn, dimer_filen, nmax)
bound_states_c, lmax = _read_data(cmplx_dirn, cmplx_filen, lmax)
dos, lifetime = compute_mm_dos(jqn, mqn, nmax, vmax, lmax, bound_states_d, bound_states_c)
return dos, lifetime
```
|
{
"source": "jfecroft/Hamilton",
"score": 3
}
|
#### File: Hamilton/src/generic.py
```python
from yaml import load
def reduce_output(func, item, *args, **kwargs):
"""
simple function to reduce output from existing functions
if func returns an iterable - just return item
"""
def inner_func(*args, **kwargs):
return func(*args, **kwargs)[item]
return inner_func
def load_yaml(filen):
"""
load a yaml file and return the json object
"""
with open('{}.yml'.format(filen), 'r') as open_file:
return_dict = load(open_file)
return return_dict
```
|
{
"source": "jfecroft/parse-text",
"score": 3
}
|
#### File: jfecroft/parse-text/repetition_count.py
```python
from itertools import tee, izip, islice, groupby
import yaml
from collections import defaultdict
from cluster import HierarchicalClustering # , KMeansClustering
from fuzzywuzzy import fuzz
from functools import partial
# pylint: disable=R0913
FUZZY_METRICS = {
'ratio': fuzz.ratio,
'partial_ratio': fuzz.partial_ratio,
'partial_token_sort_ratio': fuzz.partial_token_sort_ratio,
'partial_token_set_ratio': fuzz.partial_token_set_ratio,
'token_set_ratio': fuzz.token_set_ratio,
'token_sort_ratio': fuzz.token_sort_ratio,
}
def nwise(iterable, npairs=2):
"""
return a iterator which return consecutive npairs
"""
iters = tee(iterable, npairs)
for i, iterator in enumerate(iters):
next(islice(iterator, i, i), None)
return izip(*iters)
def load_yaml(filen):
"""
load a yaml file and return the json object
"""
with open('{}.yml'.format(filen), 'r') as open_file:
return_dict = yaml.load(open_file)
return return_dict
class GroupWords(object):
"""
methods used to group words
"""
@classmethod
def group_phrases(cls, items, num_max, iter_num=0,
return_groups=None, item=-1):
"""
recursively group until groups small enough
"""
if return_groups is None:
return_groups = []
if len(items) <= num_max:
return_groups.append(items)
else:
for group in GroupWords.group_by(items, iter_num+1, item=item):
GroupWords.group_phrases(group,
num_max,
iter_num+1,
return_groups=return_groups)
return return_groups
@staticmethod
def group_by(items, num=0, item=-1):
"""
return phrases grouped by their initial *num* letters
"""
if item >= 0:
func = lambda x: x[item][:num]
else:
func = lambda x: x[:num]
items.sort(key=func)
return [list(group) for _, group in groupby(items, func)]
class CountRepetitions(object):
"""
count repetitions in text
"""
def __init__(self, books):
self.books = books
self.repeated_phrases = set() # store matched phrases
@staticmethod
def fuzzy_distance(word1, word2, metric):
"""
return the fuzzy distance between two phrases
"""
return 100 - metric(word1[0], word2[0])
def count_exact_repetitions(self, npairs=7):
"""
group identical words
"""
words = self.get_words(npairs=npairs)
exact_repetitions = defaultdict(list)
for word, line in words:
exact_repetitions[word].append(line)
return exact_repetitions
def update_repeated_phrases(self, items):
"""
create a set of already matched phrases
needed to avoid 3 word reps in a 4 word phrase for instance
"""
reps = sum(len(item[1]) for item in items)
if reps > 1:
self.repeated_phrases.update(
{(' '.join(words), line_num)
for item in items
for line_num in item[1]
for n in range(1, len(item[0].split())+1)
for words in nwise(item[0].split(),
npairs=n)})
def count_fuzzy_repetitions(
self, dist=10, max_group_size=50, npairs=7,
dist_func='token_sort_ratio'):
"""
return a fuzzy matching of phrases
"""
fuzzy_repetitions = list()
unique_words = self.count_exact_repetitions(npairs=npairs).items()
groups = GroupWords.group_phrases(unique_words, max_group_size, item=0)
dist_func = partial(CountRepetitions.fuzzy_distance,
metric=FUZZY_METRICS[dist_func])
for group in groups:
if len(group) == 1:
fuzzy_repetitions.append(group)
else:
clusters = HierarchicalClustering(
group,
dist_func).getlevel(dist)
fuzzy_repetitions.extend(clusters)
# update format
for i, repeated_phrase in enumerate(fuzzy_repetitions):
self.update_repeated_phrases(repeated_phrase)
phrase = {item[0] for item in repeated_phrase}
lines = {line for item in repeated_phrase for line in item[1]}
fuzzy_repetitions[i] = (phrase, lines)
return fuzzy_repetitions
def get_words(self, npairs):
"""
return a list of tuples of the form (word, line)
"""
words = []
for i, book in enumerate(self.books):
with open(book+'.txt') as open_file:
content = open_file.readlines()
for line_num, line in enumerate(content):
for word in nwise(line.split(), npairs=npairs):
word = ' '.join(word)
word_line = (word, '{}.{}'.format(i+1, line_num+1))
if word_line not in self.repeated_phrases:
words.append(word_line)
words.sort()
return words
```
|
{
"source": "jfecunha/NLPiper",
"score": 2
}
|
#### File: nlpiper/core/document.py
```python
from copy import deepcopy
from typing import Any, List, Optional
from pydantic import BaseModel, validator, Extra
from nlpiper.logger import log
def _check_if_embedded_in_numpy_array(v):
try:
import numpy as np
if not isinstance(v, np.ndarray):
raise TypeError('Embedding value is not a numpy array.')
except ImportError:
log.error("To use embeddings please install numpy. "
"See the docs at https://numpy.org/ for more information.")
raise
return v
class Token(BaseModel):
original: str
cleaned: Optional[str] = None
lemma: Optional[str] = None
stem: Optional[str] = None
ner: Optional[str] = None
embedded: Optional[Any] = None
def __init__(self, original: str, **data) -> None:
super().__init__(original=original, cleaned=original, **data)
@validator('embedded', pre=True)
def check_if_embedded_in_numpy_array(cls, v):
return _check_if_embedded_in_numpy_array(v)
class Config:
validate_assignment = True
extra = Extra.allow
class Document(BaseModel):
original: str
cleaned: str
tokens: Optional[List[Token]] = None
embedded: Optional[Any] = None
steps: List[str] = []
def __init__(self, original: str, **data) -> None:
super().__init__(original=original, cleaned=original, **data)
def _deepcopy(self):
return deepcopy(self)
@validator('embedded', pre=True)
def check_if_embedded_in_numpy_array(cls, v):
return _check_if_embedded_in_numpy_array(v)
class Config:
validate_assignment = True
extra = Extra.allow
```
#### File: nlpiper/transformers/tokenizers.py
```python
from typing import Optional
from nlpiper.core.document import (
Document,
Token
)
from nlpiper.logger import log
from nlpiper.transformers.base import (
BaseTransformer,
TransformersType,
add_step,
validate
)
__all__ = [
"BasicTokenizer",
"MosesTokenizer",
"StanzaTokenizer"
]
class BasicTokenizer(BaseTransformer):
"""Basic tokenizer which tokenizes a document by splitting tokens by its blank spaces."""
@validate(TransformersType.TOKENIZERS)
@add_step
def __call__(self, doc: Document, inplace: bool = False) -> Optional[Document]:
"""Tokenize the document in a list of tokens.
Args:
doc (Document): Text to be tokenized.
inplace (bool): if False will return a new doc object,
otherwise will change the object passed as parameter.
Returns: Document
"""
d = doc if inplace else doc._deepcopy()
d.tokens = [Token(token) for token in d.cleaned.split()]
return None if inplace else d
class MosesTokenizer(BaseTransformer):
"""SacreMoses tokenizer.
Transformer to tokenize a Document using Sacremoses, https://github.com/alvations/sacremoses
"""
def __init__(self, *args, **kwargs):
"""SacreMoses tokenizer.
Args:
*args: See the docs at https://github.com/alvations/sacremoses for more information.
**kwargs: See the docs at https://github.com/alvations/sacremoses for more information.
"""
super().__init__(*args, **kwargs)
try:
from sacremoses import MosesTokenizer
self.t = MosesTokenizer(*args, **kwargs)
except ImportError:
log.error("Please install SacreMoses. "
"See the docs at https://github.com/alvations/sacremoses for more information.")
raise
@validate(TransformersType.TOKENIZERS)
@add_step
def __call__(self, doc: Document, inplace: bool = False) -> Optional[Document]:
"""Tokenize the document in a list of tokens.
Args:
doc (Document): Document to be tokenized.
inplace (bool): if False will return a new doc object,
otherwise will change the object passed as parameter.
Returns: Document
"""
d = doc if inplace else doc._deepcopy()
d.tokens = [Token(token) for token in self.t.tokenize(d.cleaned)]
return None if inplace else d
class StanzaTokenizer(BaseTransformer):
"""Stanza tokenizer.
Transformer to tokenize a Document using stanza, https://github.com/stanfordnlp/stanza
"""
def __init__(self, language: str = 'en', processors='tokenize', *args, **kwargs):
"""Stanza tokenizer.
Args:
language (str): document main language.
*args: See the docs at https://stanfordnlp.github.io/stanza/tokenize.html add for more information.
**kwargs: See the docs at https://stanfordnlp.github.io/stanza/tokenize.html add for more information.
"""
super().__init__(language=language, processors=processors, *args, **kwargs)
try:
import stanza
from stanza import Pipeline
stanza.download(language)
assert 'tokenize' in processors.lower(), 'StanzaTokenizer needs `"tokenize"` on processors'
self.p = Pipeline(lang=language, processors=processors, tokenize_pretokenized=False, *args,
**kwargs)
self.processors = processors
except ImportError:
log.error("Please install Stanza. "
"See the docs at https://github.com/stanfordnlp/stanza for more information.")
raise
@validate(TransformersType.TOKENIZERS)
@add_step
def __call__(self, doc: Document, inplace: bool = False) -> Optional[Document]:
"""Tokenize the document in a list of tokens.
Args:
doc (Document): Document to be tokenized.
inplace (bool): if False will return a new doc object,
otherwise will change the object passed as parameter.
Returns: Document
"""
d = doc if inplace else doc._deepcopy()
tokens = []
for sentence in self.p(doc.cleaned).sentences:
for word in sentence.words:
token = Token(word.parent.text)
if 'lemma' in self.processors.lower():
token.lemma = word.lemma
if 'ner' in self.processors.lower():
token.ner = word.parent.ner
tokens.append(token)
d.tokens = tokens
return None if inplace else d
```
#### File: tests/transformers/test_cleaners.py
```python
import pytest
from nlpiper.transformers.cleaners import (
CleanAccents,
CleanEmail,
CleanEOF,
CleanMarkup,
CleanNumber,
CleanPunctuation,
CleanURL,
)
from nlpiper.core.document import (
Document,
Token
)
class TestCleanersValidations:
@pytest.mark.parametrize('inputs', ["string", 2])
def test_with_invalid_input(self, inputs):
with pytest.raises(TypeError):
c = CleanEOF()
c(inputs)
@pytest.mark.parametrize('inputs', ["test"])
def test_with_doc_tokens(self, inputs):
doc = Document("test")
doc.tokens = list()
doc.tokens.append(Token("test"))
c = CleanEOF()
with pytest.raises(RuntimeError):
c(doc)
@pytest.mark.parametrize('hide_available_pkg', ['bs4'], indirect=['hide_available_pkg'])
def test_if_no_package(self, hide_available_pkg): # noqa: F811
with pytest.raises(ModuleNotFoundError):
CleanMarkup()
class TestCleanURL:
@pytest.mark.parametrize('inputs,results', [
('TEST', 'TEST'),
('test www.web.com', 'test '),
('testwww.web.com', 'test'),
('test http:\\www.web.com', 'test '),
('test https:\\www.web.com', 'test '),
('testhttps:\\www.web.com', 'test'),
])
def test_clean_url(self, inputs, results):
doc = Document(inputs)
# Inplace False
c = CleanURL()
out = c(doc)
assert out.cleaned == results
assert out.steps == [repr(c)]
assert doc.cleaned == inputs
assert doc.steps == []
# Inplace True
out = c(doc, True)
assert doc.cleaned == results
assert doc.steps == [repr(c)]
assert out is None
class TestCleanEmail:
@pytest.mark.parametrize('inputs,results', [
('TEST', 'TEST'),
('test <EMAIL>', 'test '),
('<EMAIL>', ''),
('<EMAIL>', ''),
])
def test_clean_email(self, inputs, results):
doc = Document(inputs)
c = CleanEmail()
# Inplace False
out = c(doc)
assert out.cleaned == results
assert out.steps == [repr(c)]
assert doc.cleaned == inputs
assert doc.steps == []
# Inplace True
out = c(doc, True)
assert doc.cleaned == results
assert doc.steps == [repr(c)]
assert out is None
class TestCleanNumber:
@pytest.mark.parametrize('inputs,results', [
('TEST', 'TEST'),
('test 12 test', 'test test'),
('test123test', 'testtest'),
])
def test_clean_number(self, inputs, results):
doc = Document(inputs)
c = CleanNumber()
# Inplace False
out = c(doc)
assert out.cleaned == results
assert out.steps == [repr(c)]
assert doc.cleaned == inputs
assert doc.steps == []
# Inplace True
out = c(doc, True)
assert doc.cleaned == results
assert doc.steps == [repr(c)]
assert out is None
class TestCleanPunctuation:
@pytest.mark.parametrize('inputs,results', [
('TEST.%$#"#', 'TEST'),
(r'!"te""!"#$%&()*+,-.s/:;<=>?@[\]^_`{|}~""t', 'test'),
])
def test_clean_punctuation(self, inputs, results):
doc = Document(inputs)
c = CleanPunctuation()
# Inplace False
out = c(doc)
assert out.cleaned == results
assert out.steps == [repr(c)]
assert doc.cleaned == inputs
assert doc.steps == []
# Inplace True
out = c(doc, True)
assert doc.cleaned == results
assert doc.steps == [repr(c)]
assert out is None
class TestCleanMarkup:
@pytest.mark.parametrize('inputs,results', [
('<html><title>TEST</title>', 'TEST'),
('<p class="title"><b>test 12 test</b></p>', 'test 12 test'),
('<?xml version="1.0" encoding="UTF-8"?><note><body>test123test</body></note>', 'test123test'),
('<html><title>TEST<br><br>TEST</title>', 'TEST TEST'),
])
def test_clean_markup(self, inputs, results):
pytest.importorskip('bs4')
doc = Document(inputs)
c = CleanMarkup()
# Inplace False
out = c(doc)
assert out.cleaned == results
assert out.steps == [repr(c)]
assert doc.cleaned == inputs
assert doc.steps == []
# Inplace True
out = c(doc, True)
assert doc.cleaned == results
assert doc.steps == [repr(c)]
assert out is None
class TestCleanAccents:
@pytest.mark.parametrize('mode,inputs,results', [
('unicode', 'àáâãäåçèéêë', 'aaaaaaceeee'),
('ascii', 'àáâãäåçèéêë', 'aaaaaaceeee'),
('unicode', 'ìíîïñòóôõöùúûüý', 'iiiinooooouuuuy'),
('ascii', 'ìíîïñòóôõöùúûüý', 'iiiinooooouuuuy'),
('unicode', 'this is à test', 'this is a test'),
('unicode', 'this is a test', 'this is a test'),
('ascii', 'this is à test', 'this is a test'),
('unicode', '\u0625', '\u0627'),
('ascii', '\u0625', ''),
('unicode', 'o\u0308', 'o'),
('ascii', 'o\u0308', 'o'),
('unicode', '\u0300\u0301\u0302\u0303', ''),
('ascii', '\u0300\u0301\u0302\u0303', ''),
('unicode', 'o\u0308\u0304', 'o'),
('ascii', 'o\u0308\u0304', 'o'),
])
def test_clean_accents(self, mode, inputs, results):
doc = Document(inputs)
c = CleanAccents(mode=mode)
# Inplace False
out = c(doc)
assert out.cleaned == results
assert out.steps == [repr(c)]
assert doc.cleaned == inputs
assert doc.steps == []
# Inplace True
out = c(doc, True)
assert doc.cleaned == results
assert doc.steps == [repr(c)]
assert out is None
@pytest.mark.parametrize('mode', ["random", 2])
def test_with_invalid_mode(self, mode):
with pytest.raises(ValueError):
CleanAccents(mode=mode)
class TestCleanEOF:
@pytest.mark.parametrize('inputs,results', [
('', ''),
('a basic phrase', 'a basic phrase'),
('line\nline', 'line line'),
('line.\nline', 'line. line')
])
def test_clean_eof(self, inputs, results):
doc = Document(inputs)
c = CleanEOF()
# Inplace False
out = c(doc)
assert out.cleaned == results
assert out.steps == [repr(c)]
assert doc.cleaned == inputs
assert doc.steps == []
# Inplace True
out = c(doc, True)
assert doc.cleaned == results
assert doc.steps == [repr(c)]
assert out is None
```
#### File: tests/transformers/test_embeddings.py
```python
import pytest
from nlpiper.core.document import Document
from nlpiper.transformers.embeddings import GensimEmbeddings
from nlpiper.transformers.tokenizers import BasicTokenizer
from nlpiper.transformers.normalizers import CaseTokens
class TestGensimEmbeddings:
pytest.importorskip('gensim')
pytest.importorskip('numpy')
import gensim.downloader
import numpy as np
glove_vectors = gensim.downloader.load('glove-twitter-25')
@pytest.mark.parametrize('apply_doc', ['sum', 'mean'])
@pytest.mark.parametrize('document', ['Test random stuff.', ''])
def test_embedding_token(self, apply_doc, document):
doc = Document(document)
# To apply a embeddings is necessary to have tokens
t = BasicTokenizer()
t(doc, inplace=True)
e = GensimEmbeddings(self.glove_vectors, apply_doc)
# Inplace False
out = e(doc)
assert out.steps == [repr(t), repr(e)]
assert out.embedded.shape == (25,)
assert all([isinstance(token.embedded, self.np.ndarray) for token in out.tokens])
assert all([token.embedded is None for token in doc.tokens])
assert doc.steps == [repr(t)]
assert doc.embedded is None
# Inplace True
out = e(doc, True)
assert all([isinstance(token.embedded, self.np.ndarray) for token in doc.tokens])
assert doc.steps == [repr(t), repr(e)]
assert doc.embedded.shape == (25,)
assert out is None
def test_random_apply_doc(self):
with pytest.raises(AssertionError):
GensimEmbeddings(self.glove_vectors, 'random')
class TestEmbeddings:
pytest.importorskip('hunspell')
pytest.importorskip('numpy')
import gensim
@pytest.mark.parametrize('inputs', ["string", 2])
def test_with_invalid_input(self, inputs):
with pytest.raises(TypeError):
t = GensimEmbeddings(self.gensim.models.KeyedVectors(1))
t(inputs)
@pytest.mark.parametrize('inputs', ["test"])
def test_without_doc_tokens(self, inputs):
doc = Document("test")
t = GensimEmbeddings(self.gensim.models.KeyedVectors(1))
with pytest.raises(RuntimeError):
t(doc)
@pytest.mark.parametrize('hide_available_pkg', ['numpy', 'gensim'], indirect=['hide_available_pkg'])
def test_if_no_package(self, hide_available_pkg): # noqa: F811
with pytest.raises(ModuleNotFoundError):
GensimEmbeddings(1)
@pytest.mark.parametrize('document', ['Test random stuff.', ''])
def test_embedding_applied_twice(self, document):
doc = Document(document)
# To apply a embeddings is necessary to have tokens
t = BasicTokenizer()
t(doc, inplace=True)
e = GensimEmbeddings(self.gensim.models.KeyedVectors(1))
with pytest.raises(RuntimeError):
e(e(doc))
@pytest.mark.parametrize('document', ['Test random stuff.', ''])
def test_embedding_applied_then_normalizer(self, document):
doc = Document(document)
# To apply a embeddings is necessary to have tokens
t = BasicTokenizer()
n = CaseTokens()
t(doc, inplace=True)
e = GensimEmbeddings(self.gensim.models.KeyedVectors(1))
with pytest.raises(RuntimeError):
n(e(doc))
```
#### File: tests/transformers/test_normalizers.py
```python
import pytest
from nlpiper.transformers.normalizers import (
CaseTokens,
RemovePunctuation,
RemoveStopWords,
VocabularyFilter,
Stemmer,
SpellCheck
)
from nlpiper.transformers.tokenizers import BasicTokenizer
from nlpiper.core.document import (
Document,
Token
)
class TestNormalizersValidations:
@pytest.mark.parametrize('inputs', ["string", 2])
def test_with_invalid_input(self, inputs):
with pytest.raises(TypeError):
t = CaseTokens()
t(inputs)
@pytest.mark.parametrize('inputs', ["test"])
def test_without_doc_tokens(self, inputs):
doc = Document("test")
t = CaseTokens()
with pytest.raises(RuntimeError):
t(doc)
@pytest.mark.parametrize('hide_available_pkg', ['nltk'], indirect=['hide_available_pkg'])
def test_if_no_package_nltk(self, hide_available_pkg): # noqa: F811
with pytest.raises(ModuleNotFoundError):
RemoveStopWords()
with pytest.raises(ModuleNotFoundError):
SpellCheck(max_distance=1)
with pytest.raises(ModuleNotFoundError):
Stemmer(version='nltk')
@pytest.mark.parametrize('hide_available_pkg', ['hunspell'], indirect=['hide_available_pkg'])
def test_if_no_package_hunspell(self, hide_available_pkg): # noqa: F811
with pytest.raises(ModuleNotFoundError):
SpellCheck(max_distance=None)
with pytest.raises(ModuleNotFoundError):
Stemmer(version='hunspell')
class TestCaseTokens:
@pytest.mark.parametrize('mode,inputs,results', [
('lower', ['TEST'], ['test']),
('lower', ['test'], ['test']),
('upper', ['test'], ['TEST']),
('upper', ['TEST'], ['TEST']),
])
def test_modes(self, mode, inputs, results):
results_expected = [Token(tk) for tk in inputs]
for tk, out in zip(results_expected, results):
tk.cleaned = out
doc = Document(" ".join(inputs))
# To apply a normalizer is necessary to have tokens
t = BasicTokenizer()
t(doc, inplace=True)
n = CaseTokens(mode)
# Inplace False
out = n(doc)
assert out.tokens == results_expected
assert out.steps == [repr(t), repr(n)]
assert doc.tokens == [Token(token) for token in inputs]
assert doc.steps == [repr(t)]
# Inplace True
out2 = n(doc, True)
assert doc.tokens == results_expected
assert doc.steps == [repr(t), repr(n)]
assert out2 is None
@pytest.mark.parametrize('mode', [1, 'other'])
def test_non_existent_mode(self, mode):
with pytest.raises(ValueError):
CaseTokens(mode)
class TestRemovePunctuation:
@pytest.mark.parametrize('inputs,results', [
(['TEST.%$#"#'], ['TEST']),
([r'!"te""!"#$%&()*+,-.s/:;<=>?@[\]^_`{|}~""t'], ['test']),
])
def test_remove_punctuation(self, inputs, results):
results_expected = [Token(tk) for tk in inputs]
for tk, out in zip(results_expected, results):
tk.cleaned = out
doc = Document(" ".join(inputs))
# To apply a normalizer is necessary to have tokens
t = BasicTokenizer()
t(doc, inplace=True)
n = RemovePunctuation()
# Inplace False
out = n(doc)
assert out.tokens == results_expected
assert out.steps == [repr(t), repr(n)]
assert doc.tokens == [Token(token) for token in inputs]
assert doc.steps == [repr(t)]
# Inplace True
out = n(doc, True)
assert doc.tokens == results_expected
assert doc.steps == [repr(t), repr(n)]
assert out is None
class TestRemoveStopWords:
@pytest.mark.parametrize('sensitive,inputs,results', [
(True, ['This', 'is', 'a', 'stop', 'Word'], ['This', '', '', 'stop', 'Word']),
(False, ['This', 'is', 'a', 'stop', 'Word'], ['', '', '', 'stop', 'Word']),
])
def test_remove_stop_words_w_case_sensitive(self, sensitive, inputs, results):
pytest.importorskip('nltk')
results_expected = [Token(tk) for tk in inputs]
for tk, out in zip(results_expected, results):
tk.cleaned = out
doc = Document(" ".join(inputs))
# To apply a normalizer is necessary to have tokens
t = BasicTokenizer()
t(doc, inplace=True)
n = RemoveStopWords(case_sensitive=sensitive)
# Inplace False
out = n(doc)
assert out.tokens == results_expected
assert out.steps == [repr(t), repr(n)]
assert doc.tokens == [Token(token) for token in inputs]
assert doc.steps == [repr(t)]
# Inplace True
out = n(doc, True)
assert doc.tokens == results_expected
assert doc.steps == [repr(t), repr(n)]
assert out is None
class TestVocabularyFilter:
vocabulary = ['this', 'is', 'a', 'token']
@pytest.mark.parametrize('sensitive,inputs,results', [
(True, ['This', 'is', 'a', 'Token'], ['', 'is', 'a', '']),
(False, ['This', 'is', 'a', 'Token'], ['This', 'is', 'a', 'Token']),
])
def test_vocabulary_filter_w_case_sensitive(self, sensitive, inputs, results):
results_expected = [Token(tk) for tk in inputs]
for tk, out in zip(results_expected, results):
tk.cleaned = out
doc = Document(" ".join(inputs))
# To apply a normalizer is necessary to have tokens
t = BasicTokenizer()
t(doc, inplace=True)
n = VocabularyFilter(vocabulary=self.vocabulary, case_sensitive=sensitive)
# Inplace False
out = n(doc)
assert out.tokens == results_expected
assert out.steps == [repr(t), repr(n)]
assert doc.tokens == [Token(token) for token in inputs]
assert doc.steps == [repr(t)]
# Inplace True
out = n(doc, True)
assert doc.tokens == results_expected
assert doc.steps == [repr(t), repr(n)]
assert out is None
class TestSpellCheck:
@pytest.mark.parametrize('max_distance,inputs,results', [
(None, ['This', 'isx', 'a', 'stop', 'Word'], ['This', '', 'a', 'stop', 'Word']),
(1, ['Thisx', 'iszk', 'a', 'stop', 'Word'], ['This', 'iszk', 'a', 'stop', 'Word']),
])
def test_spell_checking(self, max_distance, inputs, results):
pytest.importorskip('hunspell')
pytest.importorskip('nltk')
results_expected = [Token(tk) for tk in inputs]
for tk, out in zip(results_expected, results):
tk.cleaned = out
doc = Document(" ".join(inputs))
# To apply a normalizer is necessary to have tokens
t = BasicTokenizer()
t(doc, inplace=True)
n = SpellCheck(max_distance=max_distance)
# Inplace False
out = n(doc)
assert out.tokens == results_expected
assert out.steps == [repr(t), repr(n)]
assert doc.tokens == [Token(token) for token in inputs]
assert doc.steps == [repr(t)]
# Inplace True
out = n(doc, True)
assert doc.tokens == results_expected
assert doc.steps == [repr(t), repr(n)]
assert out is None
class TestStemmer:
@pytest.mark.parametrize('version,language,inputs,results', [
('nltk', 'english', ['This', 'computer', 'is', 'fastest', 'because'],
['this', 'comput', 'is', 'fastest', 'becaus']),
('hunspell', 'en_GB', ['This', 'computer', 'is', 'fastest', 'because'],
['this', 'computer', 'is', 'fast', 'because'])])
def test_stemmer(self, version, language, inputs, results):
pytest.importorskip('nltk')
pytest.importorskip('hunspell')
results_expected = [Token(tk) for tk in inputs]
for tk, out in zip(results_expected, results):
tk.cleaned = out
tk.stem = out
doc = Document(" ".join(inputs))
# To apply a normalizer is necessary to have tokens
t = BasicTokenizer()
t(doc, inplace=True)
n = Stemmer(version=version, language=language)
# Inplace False
out = n(doc)
assert out.tokens == results_expected
assert out.steps == [repr(t), repr(n)]
assert doc.tokens == [Token(token) for token in inputs]
assert doc.steps == [repr(t)]
# Inplace True
out = n(doc, True)
assert doc.tokens == results_expected
assert doc.steps == [repr(t), repr(n)]
assert out is None
def test_unavailable_version(self):
with pytest.raises(ValueError):
Stemmer(version='random')
```
|
{
"source": "jfehre/meine-stadt-transparent",
"score": 2
}
|
#### File: mainapp/models/user_profile.py
```python
from io import BytesIO
from typing import Optional
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import gettext_lazy as _
from mainapp.functions.minio import minio_client, minio_pgp_keys_bucket
class UserProfile(models.Model):
user = models.OneToOneField(
User,
null=True,
related_name="profile",
verbose_name=_(u"User"),
on_delete=models.CASCADE,
)
# Normally pgp keys are 40 chars long (for sha-1), but we're going to use some padding in case a different
# hash is used
pgp_key_fingerprint = models.CharField(max_length=64, null=True, blank=True)
def add_pgp_key(self, pgp_key_fingerprint: str, pgp_key: str):
"""This should eventually be abstracted away into a file manager class"""
key_bytes = pgp_key.encode()
minio_client().put_object(
minio_pgp_keys_bucket,
pgp_key_fingerprint,
BytesIO(key_bytes),
len(key_bytes),
)
self.pgp_key_fingerprint = pgp_key_fingerprint
self.save()
def remove_pgp_key(self):
# If the user clicks "remove" when the key is already removed, we can ignore that
if not self.pgp_key_fingerprint:
return
minio_client().remove_object(minio_pgp_keys_bucket, self.pgp_key_fingerprint)
self.pgp_key_fingerprint = None
self.save()
def get_pgp_key(self) -> Optional[bytes]:
"""Returns fingerprint and key"""
if not self.pgp_key_fingerprint:
return None
return (
minio_client()
.get_object(minio_pgp_keys_bucket, self.pgp_key_fingerprint)
.read()
)
class Meta:
verbose_name = _("User profile")
verbose_name_plural = _("User profiles")
def __unicode__(self):
return "User profile: %s" % self.user.username
# noinspection PyUnresolvedReferences
def has_unverified_email_adresses(self):
for email in self.user.emailaddress_set.all():
if not email.verified:
return True
return False
```
|
{
"source": "jfeinstein10/generator-angular-flask-sqlalchemy",
"score": 2
}
|
#### File: app/templates/app.py
```python
import os
from flask import Flask, render_template, send_file
from flask.ext.assets import Environment
from config import debug, secret
app = Flask(__name__)
app.secret_key = secret
app.debug = debug
assets = Environment(app)
assets.debug = debug
assets.auto_build = True
assets.from_yaml(os.path.join(app.static_folder, 'bundles.yaml'))
assets.config['STATIC_FOLDER'] = app.static_folder
assets.config['STATIC_URL_PATH'] = app.static_url_path
@app.errorhandler(404)
def not_found(error):
return 'Not found', 404
@app.errorhandler(500)
def internal_error(error):
return 'Internal error', 500
@app.route('/static/<path:path>', methods=['GET'])
def static_files(path):
return send_file('static/{0}'.format(path))
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
```
#### File: endpoints/templates/_controller.py
```python
from app import app
from models import *
class <%= controllerName %>(object):
<% endpoints.forEach(function(endpoint){ %>
@classmethod
@app.route('<%= endpoint.url %>', methods=<%= JSON.stringify(endpoint.methods) %>)
def <%= endpoint.functionName %>():
# TODO
pass
<% }); %>
```
|
{
"source": "jfeist/jftools",
"score": 3
}
|
#### File: jftools/jftools/interpolate.py
```python
from scipy.interpolate import InterpolatedUnivariateSpline
from numpy import exp, angle
from . import unroll_phase
arg = lambda x: unroll_phase(angle(x))
def interp_cmplx(x,y,*args,absarg=True,interpolator=InterpolatedUnivariateSpline,**kwargs):
if absarg:
return interp_cmplx_absarg(interpolator,x,y,*args,**kwargs)
else:
return interp_cmplx_reim (interpolator,x,y,*args,**kwargs)
class interp_cmplx_absarg:
def __init__(self,interpolator,x,y,*args,**kwargs):
self.abs = interpolator(x,abs(y),*args,**kwargs)
self.arg = interpolator(x,arg(y),*args,**kwargs)
def __call__(self,*args,**kwargs):
return self.abs(*args,**kwargs)*exp(1j*self.arg(*args,**kwargs))
class interp_cmplx_reim:
def __init__(self,interpolator,x,y,*args,**kwargs):
self.real = interpolator(x,y.real,*args,**kwargs)
self.imag = interpolator(x,y.imag,*args,**kwargs)
def __call__(self,*args,**kwargs):
return self.real(*args,**kwargs) + 1j*self.imag(*args,**kwargs)
```
|
{
"source": "jfelipesouza/Invasores-do-python",
"score": 3
}
|
#### File: Invasores-do-python/PyInvaders/pyinvaders.py
```python
from random import choice, randint
from math import ceil
import pygame
import os
# Variáveis globais que são necessárias em diversos momentos
#### Cores ####
#### (R, G, B) ####
WHITE = (255, 255, 255)
RED = (255, 0, 0)
BLACK = (0, 0, 0)
GREEN = (0, 255, 0)
GOLD = (255, 215, 0)
SPRINGREEN = (0, 250, 154)
#### TELA ####
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
#### DIRETÓRIO ####
# Comando para pegar o caminho onde a pasta do jogo está
DIRECTORY = os.getcwd()
#### FONTE ####
FONT = DIRECTORY + "/fonts/space_invaders.ttf"
class Edge(pygame.sprite.Sprite):
"""
Estrutura criada para facilitar a análise de colisões com as bordas.
"""
def __init__(self, width, height, x, y):
"""
Cria as bordas do jogo.
width: largura da borda.
height: altura da borda.
x: posição no eixo x.
y: posição no eixo y.
"""
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((width, height))
self.image.fill(WHITE)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
class Block(pygame.sprite.Sprite):
"""
Responsável por criar barreiras que protegem a nave.
"""
def __init__(self, color = WHITE, size = 10):
"""
Cria cada bloco com uma determinada cor e tamanho.
color: cor que o bloco terá.
size: tamanho de cada quadrado.
"""
pygame.sprite.Sprite.__init__(self)
self.color = color
self.size = size
self.image = pygame.Surface([self.size, self.size])
self.image.fill(color)
self.rect = self.image.get_rect()
class Ship(pygame.sprite.Sprite):
"""
Classe que representa a nave do jogador.
"""
def __init__(self, path, pos_x, pos_y, speed = 5):
"""
Cria uma nave.
path: caminho onde está a imagem que representa a nave.
pos_x: posição inicial da nave no eixo x.
pos_y: posição inicial da nave no eixo y.
speed: velocidade que a nave se deslocará. Por padrão ela é 5px.
"""
pygame.sprite.Sprite.__init__(self)
self.__initial_position = (pos_x, pos_y)
# Carregando a imagem da nave
self.__image = pygame.image.load(path)
self.image = pygame.transform.scale(self.__image, (60, 60))
self.rect = self.image.get_rect()
self.rect.x = pos_x
self.rect.y = pos_y
self.speed = speed
# A nave inicia com 3 vidas
self.lifes = 3
# Sons que a nave possui quando atira ou quando é atingida
self.__sound_shot = pygame.mixer.Sound(DIRECTORY + "/sounds/shoot.wav")
self.__ship_explosion = pygame.mixer.Sound(DIRECTORY + "/sounds/shipexplosion.wav")
def initial_position(self):
""" logo
Define as posições iniciais da nave.
"""
self.rect.x = self.__initial_position[0]
self.rect.y = self.__initial_position[1]
def die(self):
"""
Assim que a nave é atingida esse método é chamado para realizar as devidas
ações que são necessárias.
As ações são: reproduzir o som de explosão definido na construção do objeto,
retornar a nave à posição inicial e reduzir a sua quantidade de vidas.
"""
self.__ship_explosion.play()
self.initial_position()
self.lifes -= 1
def shoot(self):
"""
Método que permite a nave atirar.
Ao ser chamado é liberado o som de tiro e criado um projétil.
"""
self.__sound_shot.play()
bullet = Bullet(self.rect.midtop, 1)
return bullet
def update(self, *args):
"""
Método que realiza a atualização da posição da nave à medida que o
jogador a movimenta.
"""
# Se estiver pressionando o botão da seta da direita
if pygame.key.get_pressed()[pygame.K_RIGHT]:
if self.rect.right < (SCREEN_WIDTH - self.speed):
self.rect.x += self.speed
# Se estiver pressionando o botão da seta da esquerda
elif pygame.key.get_pressed()[pygame.K_LEFT]:
if self.rect.left > self.speed:
self.rect.x -= self.speed
def __str__(self):
"""
Representação textual do objeto, indicando a sua posição atual.
"""
return "Ship in (%s, %s)" % (self.rect.x, self.rect.y)
class Invader(pygame.sprite.Sprite):
"""
Classe que representa os invasores.
"""
def __init__(self, sprite, pos_x, pos_y, speed = 1):
"""
Método responsável por criar cada invasor.
sprite: imagem que representa cada invasor.
pos_x: posição inicial do invasor no eixo x.
pos_y: posição inicial do invasor no eixo y.
speed: velocidade em que cada objeto se movimenta.
"""
pygame.sprite.Sprite.__init__(self)
self.image = sprite
self.rect = sprite.get_rect()
self.rect.x = pos_x
self.rect.y = pos_y
self.speed = speed
def shoot(self):
"""
Metódo que permite cada invasor disparar.
"""
bullet = Bullet(self.rect.midtop, -1, speed = 4, color = RED)
return bullet
def up_speed(self):
"""
Aumenta a velocidade com que cada invasor se movimenta.
"""
self.speed += 0.5
def down_invader(self):
"""
Método que movimenta as sprites ao longo do eixo y.
"""
self.rect.y += 20
def update(self, direction):
"""
Método que movimentas as sprites ao longo do eixo x.
"""
self.rect = self.rect.move(self.speed * direction, 0)
def __str__(self):
"""
Representação textual do objeto, indicando a sua posição atual.
"""
return "Invader in (%s, %s)" % (self.rect.x, self.rect.y)
class Mystery(pygame.sprite.Sprite):
"""
Classe que representa a Mystery do jogo tradicional.
"""
def __init__(self, sprite, pos_x, pos_y, speed = 2.7):
"""
Método responsável por criar a nave-mãe.
sprite: imagem que representa a nave.
pos_x: posição inicial da nave no eixo x.
pos_y: posição inicial da nave no eixo y.
speed: velocidade em que a nave se movimenta.
"""
pygame.sprite.Sprite.__init__(self)
self.image = sprite
self.rect = self.image.get_rect(topleft = (pos_x, pos_y))
self.speed = speed
def update(self, *args):
if (self.rect.x >= (SCREEN_WIDTH + 200)):
self.kill()
else:
self.rect = self.rect.move(self.speed, 0)
class Bullet(pygame.sprite.Sprite):
"""
Classe que representa as balas de todos os objetos que realizam disparos.
"""
def __init__(self, pos_xy, direction, color = SPRINGREEN, speed = 8):
"""
Constrói um objeto do tipo Bullet.
pos_xy: é o ponto (x, y) que é o ponto médio da borda superior
da sprite.
direction: direção que o objeto se deslocará.
speed: velocidade que o objeto se deslocará.
Por padrão é 8px.
color: cor do objeto.
Por padrão é azul-marinho.
"""
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((5,10))
self.image.fill(color)
self.rect = self.image.get_rect()
self.rect.x = pos_xy[0]
self.rect.y = pos_xy[1]
self.direction = direction
self.speed = speed * direction
def update(self, *args):
"""
Atualiza o objeto durante o seu deslocamento.
"""
self.rect.y -= self.speed
if self.rect.bottom <= 0:
self.kill()
class SpaceInvaders():
"""
Classe que comanda os principais comandos do jogo.
"""
def __init__(self):
"""
Permite criar uma instância do jogo.
"""
# Definindo os caminhos dos arquivos necessários para o jogo
self.ship_shot = pygame.sprite.GroupSingle()
self.invader_shot = pygame.sprite.Group()
"""
Para colocar em tela cheia usa o comando:
pygame.display.set_mode((0, 0), pygame.FULLSCREEN)
"""
self.window = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
# Coloca uma legenda para a janela
pygame.display.set_caption("Space Invaders")
# Coloca um ícone na janela
logo = pygame.image.load(DIRECTORY + "/images/logo.png")
pygame.display.set_icon(logo)
self.score = 0
self.level = 0
self.speed = 0
'''
Criando um objeto do tipo pygame.font.Font, onde é passada a fonte e o tamanho
se a fonte for passada como None é utilizada a padrão do sistema.
'''
self.font = self.create_font(60)
self.score_font = self.create_font(15)
self.explosion_sound = pygame.mixer.Sound(DIRECTORY + "/sounds/invaderkilled.wav")
# Carregando imagens necessárias
self.path_image_ship = DIRECTORY + "/images/ship.png"
background_image = pygame.image.load(DIRECTORY + "/images/back.png")
explosion_image = pygame.image.load(DIRECTORY + "/images/explosion.png")
lifes_image = pygame.image.load(DIRECTORY + "/images/heart.png")
self.ship = pygame.sprite.GroupSingle(
Ship(self.path_image_ship, (SCREEN_WIDTH - 50) // 2,
(SCREEN_HEIGHT - 110)))
self.ship_sprite = self.ship.sprites()[0]
mystery_image = pygame.image.load(DIRECTORY + "/images/boss3.png")
self.mystery_image = pygame.transform.scale(mystery_image, [71, 39])
self.mystery = pygame.sprite.GroupSingle(
Mystery(self.mystery_image, self.random_position(), 15))
self.background = pygame.transform.scale(background_image, (SCREEN_WIDTH, SCREEN_HEIGHT))
self.lifes_image = pygame.transform.scale(lifes_image, (25, 25))
self.explosion_image = pygame.transform.scale(explosion_image, (
(SCREEN_WIDTH // 20), (SCREEN_WIDTH // 20)))
self.clock = pygame.time.Clock()
self.invaders = pygame.sprite.OrderedUpdates()
self.invaders_direction = 1
self.increment_speed = False
self.left_edge = pygame.sprite.GroupSingle(Edge(5, SCREEN_HEIGHT, 0, 0))
self.right_edge = pygame.sprite.GroupSingle(Edge(5, SCREEN_HEIGHT, 795, 0))
self.bottom_edge = pygame.sprite.GroupSingle(Edge(SCREEN_WIDTH, 5, 0, 560))
self.groups = pygame.sprite.Group(self.ship_shot, self.invader_shot,
self.invaders, self.mystery)
def random_position(self):
"""
Escolhe aleatoriamente uma posição inicial no eixo x
para a nave-mãe.
"""
return choice([-1700, -1900, -2200, -2500, -1500])
def home_screen(self):
"""
Cria a tela inicial do jogo.
"""
menu = True
music_menu = pygame.mixer.Sound(DIRECTORY + "/sounds/menu.wav")
music_menu.play(-1)
text = self.font.render("SPACE INVADERS", True, GREEN)
self.font = self.create_font(20)
command1 = self.font.render(" ENTER : START ", True, WHITE, None)
command2 = self.font.render(" ESC : OUT ", True, WHITE, None)
command1_rect = command1.get_rect(center = (SCREEN_WIDTH // 2, SCREEN_HEIGHT - 100))
command2_rect = command2.get_rect(center = ((SCREEN_WIDTH + 15) // 2, SCREEN_HEIGHT - 50))
mystery = pygame.image.load(DIRECTORY + "/images/boss1.png")
mystery = pygame.transform.scale(mystery, [110, 60])
speed = [-5, 5]
rect_mystery = mystery.get_rect()
self.window.fill(BLACK)
self.window.blit(text, [(SCREEN_WIDTH - 570) // 2, 50])
self.window.blit(command1, command1_rect)
self.window.blit(command2, command2_rect)
pygame.display.update()
while menu:
# Mudando a direção que a nave se movimenta.
if rect_mystery.left < 0 or rect_mystery.right > SCREEN_WIDTH:
speed[0] = -speed[0]
if rect_mystery.top < 0 or rect_mystery.bottom > SCREEN_HEIGHT:
speed[1] = -speed[1]
rect_mystery.x += speed[0]
rect_mystery.y += speed[1]
for event in pygame.event.get():
if event.type == pygame.QUIT:
music_menu.stop()
return False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN or event.key == pygame.K_SPACE:
self.start_game()
music_menu.stop()
return True
if event.key == pygame.K_ESCAPE:
music_menu.stop()
return False
self.window.fill(BLACK)
self.window.blit(mystery, rect_mystery)
self.window.blit(text, [(SCREEN_WIDTH - 570) // 2, 50])
self.window.blit(command1, command1_rect)
self.window.blit(command2, command2_rect)
self.clock.tick(60)
pygame.display.update()
def final_screen(self):
"""
Cria a tela final do jogo.
"""
music_menu = pygame.mixer.Sound(DIRECTORY + "/sounds/menu.wav")
music_menu.play(-1)
self.game_over_screen()
self.font50 = self.create_font(50)
self.font20 = self.create_font(20)
text1 = self.font50.render("FINAL SCORE: %d" % self.score, True, GOLD)
text2 = self.font20.render("PRESS ENTER TO TRY AGAIN", True, WHITE)
text3 = self.font20.render("PRESS ESC TO OUT", True, WHITE)
text1_rect = text1.get_rect(center = (SCREEN_WIDTH // 2, SCREEN_HEIGHT - 350))
text2_rect = text2.get_rect(center = (SCREEN_WIDTH // 2, SCREEN_HEIGHT - 100))
text3_rect = text3.get_rect(center = (SCREEN_WIDTH // 2, SCREEN_HEIGHT - 40))
self.window.fill(BLACK)
self.window.blit(text1, text1_rect)
self.window.blit(text2, text2_rect)
self.window.blit(text3, text3_rect)
pygame.display.update()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
music_menu.stop()
pygame.quit()
exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
music_menu.stop()
self.level = 0
self.speed = 0
self.ship = pygame.sprite.GroupSingle(
Ship(self.path_image_ship, (SCREEN_WIDTH) // 2, (SCREEN_HEIGHT - 110)))
self.ship_sprite = self.ship.sprites()[0]
self.start_game()
return
if event.key == pygame.K_ESCAPE:
music_menu.stop()
pygame.quit()
exit()
def level_screen(self):
"""
Cria a tela que indica o nível que irá iniciar.
Ela dura 1 segundo (1000 milisegundos).
"""
self.level += 1
if (self.level > 1 and self.level < 6):
self.speed += 0.3
elif (self.level == 1):
self.speed += 1
font = self.create_font(100)
text = font.render('LEVEL: ' + str(self.level), True, GOLD)
self.time = pygame.time.get_ticks()
while ((pygame.time.get_ticks() - self.time) < 1000):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit()
self.window.blit(self.background, [0, 0])
self.window.blit(text, [(SCREEN_WIDTH - 450) // 2, 220])
pygame.display.update()
def game_over_screen(self):
"""
Cria a tela de game over.
Ela dura 1.5 segundo (1500 milisegundos).
"""
font = self.create_font(100)
text = font.render('GAME OVER', True, RED)
time = pygame.time.get_ticks()
while ((pygame.time.get_ticks() - time) < 1500):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit()
self.window.blit(self.background, [0, 0])
self.window.blit(text, [(SCREEN_WIDTH - 600) // 2, 220])
pygame.display.update()
def start_game(self):
"""
Limpa todas as variáveis para que se possa iniciar o jogo.
"""
self.groups.add(self.ship)
self.invaders_direction = 1
self.blocks = pygame.sprite.Group(self.build_blocks(0),
self.build_blocks(1),
self.build_blocks(2))
self.invaders.empty()
self.mystery.empty()
self.invader_shot.empty()
self.ship_shot.empty()
self.level_screen()
self.create_invaders()
self.ship_sprite.initial_position()
self.update()
def build_blocks(self, number):
"""
Constrói cada bloco que fica entre os invasores e a nave.
Cada bloco é constituído de diversos pequenos blocos, o que facilita
analisar as colisões.
number: é o número do bloco, isso ajuda a indicar a posição
que ele será construído.
"""
aux = pygame.sprite.Group()
for row in range(4):
for column in range(10):
blocker = Block()
blocker.rect.x = 45 + (300 * number) + (column * blocker.size)
blocker.rect.y = 400 + (row * blocker.size)
aux.add(blocker)
return aux
def create_invaders(self):
"""
Cria os invasores e coloca todos em um grupo.
"""
enemy_types = []
for i in range(1, 8):
enemy_types.append(pygame.image.load(DIRECTORY + ("/images/invader%d.png" % (i % 2))))
x = 25
# Preenchendo os invasores pelas colunas.
for j in range(7):
y = 60
for i in range(5):
sprite = pygame.transform.scale(enemy_types[i],
((SCREEN_WIDTH // 20), (SCREEN_WIDTH // 20)))
self.invaders.add(Invader(sprite, x, y, self.speed))
y += 45
x += 62
def showShipLives(self):
"""
Mostra na tela as vidas que a nave possui.
"""
y = 10
for i in range(self.ship_sprite.lifes):
self.window.blit(self.lifes_image, (y, 570))
y += 40
def enemy_shot(self):
"""
Escolhe aleatoriamente um invasor para que ele realize algum disparo.
"""
enemy = [i for i in self.invaders]
for i in range(2):
invader = choice(enemy)
self.invader_shot.add(invader.shoot())
def update(self):
"""
Realiza todas as atualizações necessárias para o jogo.
"""
score = self.score_font.render("SCORE: %d" % self.score, True, WHITE)
current_time = pygame.time.get_ticks()
if (current_time % 2000.0 < 20):
self.enemy_shot()
if ((len(self.mystery) == 0)):
self.mystery.add(Mystery(self.mystery_image, self.random_position(), 15))
self.window.blit(self.background, [0, 0])
self.window.blit(score, (SCREEN_WIDTH - 150, SCREEN_HEIGHT - 30))
self.groups.draw(self.window)
self.blocks.draw(self.window)
self.groups.update(self.invaders_direction)
self.update_direction()
self.check_collisions()
self.showShipLives()
self.groups = pygame.sprite.Group(self.ship, self.ship_shot, self.invader_shot, self.invaders,
self.left_edge, self.bottom_edge, self.right_edge, self.mystery)
self.clock.tick(60)
pygame.display.update()
def check_collisions(self):
"""
Realiza todas as checagens de colisões do jogo e as suas
consequências.
"""
"""
Se ocorrer uma colisão entre os projéteis da nave e de algum
invasor ambas são mortas e ocorre um acréscimo de um valor entre
5 e 20 na pontuação do jogador.
"""
if pygame.sprite.groupcollide(self.ship_shot, self.invader_shot, True, True):
self.score += randint(5, 20)
# Colisão entre as balas e a borda inferior, apenas os disparos são mortos.
pygame.sprite.groupcollide(self.invader_shot, self.bottom_edge, True, False)
# Colisão entre os disparos dos invasores e os blocos, ambos morrem.
pygame.sprite.groupcollide(self.invader_shot, self.blocks, True, True)
# Colisão entre o disparo da nave e os blocos, ambos morrem.
pygame.sprite.groupcollide(self.ship_shot, self.blocks, True, True)
# Colisão entre os blocos e os invasores, apenas os blocos morrem.
pygame.sprite.groupcollide(self.blocks, self.invaders, True, False)
# Colisão entre a nave e os invasores
if pygame.sprite.groupcollide(self.ship, self.invaders, False, False):
self.ship_sprite.die()
# Colisão entre a nave-mãe e o tiro da nave. A pontuação será um valor entre 25 e 55.
if pygame.sprite.groupcollide(self.mystery, self.ship_shot, True, True):
self.score += choice([25, 35, 45, 55])
self.explosion_sound.play()
'''
Colisão entre os disparos da nave e os invasores, ambos morrem.
Além disso é colocado a imagem, o som de explosão e um acréscimo
na pontuação podendo ser [10, 15, 20, 25, 30, 35, 40].
'''
for atingidos in pygame.sprite.groupcollide(self.ship_shot, self.invaders, True, True).values():
for invasor in atingidos:
self.explosion_sound.play()
self.window.blit(self.explosion_image, (invasor.rect.x, invasor.rect.y))
self.score += choice([10, 15, 20, 25, 30, 35, 40])
# Verifica a colisão dos disparos realizados pelos invasores e a nave. Apenas o tiro morre.
if (pygame.sprite.groupcollide(self.ship, self.invader_shot, False, True)):
self.explosion_sound.play()
self.window.blit(self.explosion_image, (self.ship_sprite.rect.x, self.ship_sprite.rect.y))
self.ship_sprite.die()
def update_direction(self):
"""
Atualiza a direção dos invasores.
Já que os invasores são colocados na tela coluna a coluna é possível
saber quem são os primeiros e os últimos que foram inseridos para poder
controlar os limites da tela.
"""
arr = self.invaders.sprites()
first = arr[0]
last = arr[-1]
if ((last.rect.x > (SCREEN_WIDTH - last.rect.width - 10)) or (first.rect.x < 10)):
self.invaders_direction *= -1
current_time = pygame.time.get_ticks()
if (current_time - self.time > (8000 // self.speed)):
self.down_invader(arr)
def down_invader(self, arr):
"""
Movimenta cada invasor pelo eixo y.
"""
up_speed = (len(self.invaders) <= 8)
for enemy in arr:
if up_speed:
enemy.up_speed()
enemy.down_invader()
def create_font(self, size):
return pygame.font.Font(FONT, size)
def main(self):
"""
Método principal do jogo.
"""
# Variável necessária para que o loop onde o jogo ocorre dure o tempo necessário.
run = True
menu = True
while menu:
command = self.home_screen()
if not command:
menu = False
pygame.quit()
exit()
while run:
if self.ship_sprite.lifes <= 0:
self.final_screen()
self.score = 0
elif len(self.invaders) == 0:
self.start_game()
else:
for event in pygame.event.get():
# Verificando se o usuário clicou na opção de fechar a janela.
if event.type == pygame.QUIT:
run, menu = False, False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
run, menu = False, False
if ((event.key == pygame.K_UP or event.key == pygame.K_SPACE) and not self.ship_shot):
self.ship_shot.add(self.ship_sprite.shoot())
self.update()
if __name__ == "__main__":
# Comando necessário para se inicializar os módulos do Pygame
pygame.init()
pygame.mixer.pre_init(22050, -16, 2, 1024)
pygame.mixer.init(22050, -16, 2, 1024)
game = SpaceInvaders()
game.main()
os.environ['SDL_VIDEO_CENTERED'] = '1'
# Comando que encerra os módulos do Pygame
pygame.quit()
```
|
{
"source": "jfelip/font_tracer",
"score": 3
}
|
#### File: jfelip/font_tracer/font2vertices.py
```python
import freetype
import numpy as np
from bezier_sampling import bezier_curve
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, <NAME>"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
def trace_cubic_off_points(c_points, c_tags, idx, n_segments=5):
"""
Traces the bezier cubic arc represented by the contour off point at idx into n_segments line segments. Details of
font outline in: https://www.freetype.org/freetype2/docs/glyphs/glyphs-6.html
Two successive conic off points force the creation of a virtual on point at the exact middle of the two off
points to sample the conic curve. Cubic points must be in pairs and have to be surrounded by on points. To avoid
line segment duplication this function only traces line segments if the c_points[idx] is the second of a pair of
off cubic points.
:param c_points: Contour points arranged in a (N,2) array.
:param c_tags: N-sized array with a FreeType tag for each point in c_points.
:param idx: Index of the contour point used to create the traced segments.
:param n_segments: Number of segments used to trace the bezier arc. Defaults to 5.
:return: Set of line segments (M, 2)
"""
sampled_pts = np.array([])
idx_prev = idx - 1
idx_next = idx + 1
# If it is the last point of the contour. Use the first as the pivot
if idx == len(c_points) - 1:
idx_next = 0
# Only trace if the point is the second of a pair of off cubic points.
if c_tags[idx_prev] == freetype.FT_Curve_Tag_Cubic and c_tags[idx] == freetype.FT_Curve_Tag_Cubic:
sampled_pts = bezier_curve([c_points[idx_prev - 1],
c_points[idx_prev],
c_points[idx],
c_points[idx_next]], n_segments)
return sampled_pts.reshape(-1)
def trace_conic_off_points(c_points, c_tags, idx, n_segments=5):
"""
Traces the bezier arc represented by the contour off point at idx into n_segments line segments. Details of font
outline in: https://www.freetype.org/freetype2/docs/glyphs/glyphs-6.html
Two successive conic off points force the creation of a virtual on point at the exact middle of the two off
points to sample the conic curve. Cubic points must be in pairs and have to be surrounded by on points.
:param c_points: Contour points arranged in a (N,2) array.
:param c_tags: N-sized array with a FreeType tag for each point in c_points.
:param idx: Index of the contour point used to create the traced segments.
:param n_segments: Number of segments used to trace the bezier arc. Defaults to 5.
:return: Set of line segments (M, 2)
"""
# If it is the last point of the contour. Use the first as the pivot
idx_prev = idx - 1
idx_next = idx + 1
if idx == len(c_points) - 1:
idx_next = 0
if c_tags[idx_prev] == freetype.FT_Curve_Tag_On:
point_start = c_points[idx_prev]
elif c_tags[idx_prev] == freetype.FT_Curve_Tag_Conic:
point_start = (c_points[idx] + c_points[idx_prev]) / 2
else:
raise ValueError("While tracing point index %d. Previous point with unsupported type: " % idx
+ str(c_tags[idx_prev]))
if c_tags[idx_next] == freetype.FT_Curve_Tag_On:
point_end = c_points[idx_next]
elif c_tags[idx_next] == freetype.FT_Curve_Tag_Conic:
point_end = (c_points[idx] + c_points[idx_next]) / 2
else:
raise ValueError("While tracing point index %d. Previous point with unsupported type: " % idx
+ str(c_tags[idx_next]))
sampled_pts = bezier_curve([point_start, c_points[idx], point_end], n_segments)
return sampled_pts.reshape(-1)
def glyph2vertices(glyph, n_segments=5):
"""
Sample glyph outline vertices checking points on the curve and control points for conic and cubic bezier arcs as
described in: https://www.freetype.org/freetype2/docs/glyphs/glyphs-6.html
Vertex coordinates are scaled such that the vertical advance is 1 unit and the return is a list of arrays,
each list element represents a closed contour. Each closed contour is a numpy array of size Nx2 representing the
set of 2d vertices that form the contour outline.
:param glyph: FreeFont glyph to be traced
:param n_segments: Number of segments used to sample each bezier arc. Defaults to 5.
:return: A list of sampled contours. Each contour is a numpy array of Nx2 vertices representing a glyph contour.
"""
# Get the points describing the outline
points = np.array(glyph.outline.points, dtype=np.float32)
# Get contour start indices
contours = glyph.outline.contours
# Obtain the point tags from the glyph outline description.
tags = []
for i, t in enumerate(glyph.outline.tags):
tags.append(freetype.FT_CURVE_TAG(t))
# Process each contour separately
prev_c = -1
c_draw_contours = []
for c in contours:
# Extract the points and tags for the current contour
c_points = points[prev_c + 1:c + 1]
c_draw_points = np.array([])
c_tags = tags[prev_c + 1:c + 1]
# Generate points depending on their tag
for i in range(len(c_points)):
# If the point is on, just add it
if c_tags[i] == freetype.FT_Curve_Tag_On:
c_draw_points = np.concatenate((c_draw_points, c_points[i]))
# If the point is off conic
elif c_tags[i] == freetype.FT_Curve_Tag_Conic:
sampled_pts = trace_conic_off_points(c_points, c_tags, i, n_segments)
c_draw_points = np.concatenate((c_draw_points, sampled_pts))
# If the point is off cubic
elif c_tags[i] == freetype.FT_Curve_Tag_Cubic:
sampled_pts = trace_cubic_off_points(c_points, c_tags, i, n_segments)
c_draw_points = np.concatenate((c_draw_points, sampled_pts))
# Normalize vertices (scale to fit 1 unit height bbox)
c_draw_points = c_draw_points / glyph.metrics.vertAdvance
c_draw_contours.append(c_draw_points)
prev_c = c
# Return a list of contour vertices
return c_draw_contours
def vertices2lines(contours):
"""
Generate a set of 2d lines from a set of closed contours.
:param contours: List of 2d closed contours. Each contour is an array of flattened 2d points.
:return: Array of vertices representing lines.
"""
lines = np.array([])
for contour in contours:
# Draw a line between each contour sampled sequential point
for i in range(0, len(contour), 2):
line = contour[i:i+4]
lines = np.concatenate((lines, line))
# Close the contour
if len(contour) % 2 != 0:
lines = np.concatenate((lines, contour[-2:2]))
else:
lines = np.concatenate((lines, contour[0:2]))
return lines.flatten()
```
|
{
"source": "jfellien/TemperatureMonitor",
"score": 3
}
|
#### File: jfellien/TemperatureMonitor/main.py
```python
from PIL import ImageFont
from datetime import datetime
from w1thermsensor import W1ThermSensor
import os, sys, time
import EPD.epdAPI as EPD
FONT_PATH = './EPD/fonts/DroidSans.ttf'
FONT_BIG = ImageFont.truetype(FONT_PATH, 72)
FONT_SMALL = ImageFont.truetype(FONT_PATH, 12)
BLACK = 0
epd = EPD.EPScreen('landscape')
def get_time():
full_time = datetime.now().time()
hour = str(full_time.hour)
minute = "{:02d}".format(full_time.minute)
short_time = hour + ':' + minute
return(short_time)
def get_current_temperature():
sensor = W1ThermSensor()
temperature = sensor.get_temperature()
temperature_formated = "{:3.1f}".format(temperature)
return(temperature_formated)
def show_temperature():
temperature = get_current_temperature()
title = 'Temperatur um ' + get_time()
temperature_value = str(temperature) + "°C"
epd.set_title(title)
epd.add_text_middle(60, temperature_value, FONT_BIG, BLACK)
epd.update_screen()
print(title)
print(temperature)
while True:
show_temperature()
time.sleep(10)
```
|
{
"source": "jfemiani/srp-boxes",
"score": 3
}
|
#### File: srp/data/generate_csv.py
```python
from __future__ import division, print_function
import os
import fiona
import numpy as np
import pandas as pd
import rasterio
import rasterio.errors
import rasterio.features
import rasterio.transform
import shapely.geometry
from srp.config import C
from srp.data.orientedboundingbox import OrientedBoundingBox
from srp.util import tqdm
from logging import info, warning
class SampleGenerator:
"""SampleGenerator
Generates patches with positive and negative examples for training.
"""
def __init__(self, **kwargs):
"""
This class primarily reads its parameters from the 'config.py' file. Please specify your parameters there.
Given a very high resolution ortho-image, a pre-processed LiDAR density file and human labeled '.geojson'
(all in the same coordinate system), this class prepares two CSV files. The positive csv specifies the (x,y)
coordinates, rotation angle (we constrain the angle within the first quadrant), length (x-axis vector length),
and width (y-axis vector length) IN THAT ORDER. The negative CSV only provides the center coordinates (x,y)
of each sample.
Arguments
---------
rgb_path (str):
path to the ortho-image
volume_path (str):
path to the volumetric data
annotation_path (str):
a .geojson file that labels the four corners of the box clock-wise
outdir (str):
where the .csv fies should be stored. This class can generate both positive and negative .csv files
min_seperation (float):
the minimum diameter around a positive center that we DO NOT draw negative samples from
threshold (float0):
the minimum density count that the joined first and second tile above "fake ground" have to pass to count as
an "useful" negative spot. We need to make sure the negative samples are draw from meaningful sites
(exculding streets and such)
num_samples (int):
The number of positive samples + negative samples. (default config.TRAIN.SAMPLES.GENERATOR.NUM_SAMPLES)
density_layers (list of int):
The layers of the volume densities that we use to decide if we should use that location as a sample.
"""
self.rgb_path = kwargs.pop('rgb_path', C.COLOR.FILE)
self.volume_path = kwargs.pop('volume_path', C.VOLUME.FILE)
self.annotation_path = kwargs.pop('annotation_path', C.ANNOTATIONS.FILE)
self.num_of_samples = kwargs.pop('num_samples', C.TRAIN.SAMPLES.GENERATOR.NUM_SAMPLES)
self.min_seperation = kwargs.pop('min_seperation', C.TRAIN.SAMPLES.GENERATOR.MIN_SEPARATION)
self.threshold = kwargs.pop('threshold', C.TRAIN.SAMPLES.GENERATOR.MIN_DENSITY)
self.patch_size = kwargs.pop('patch_size', C.TRAIN.PATCH_SIZE)
self.csv_dir = kwargs.pop('outdir', C.TRAIN.SAMPLES.DIR)
self.density_layers = kwargs.pop('density_layers', C.TRAIN.SAMPLES.GENERATOR.DENSITY_LAYERS)
self.sample_name_pattern = kwargs.pop('sample_pattern', C.TRAIN.SAMPLES.GENERATOR.NAME_PATTERN)
self.positive_csv_file = os.path.join(self.csv_dir, 'positives.csv')
self.negative_csv_file = os.path.join(self.csv_dir, 'negatives.csv')
self.bounds = None
with fiona.open(self.annotation_path) as vector_file:
self.hotspots = np.array([f['geometry']['coordinates'] for f in vector_file if f['geometry'] is not None])
def _get_tight_rectangle_info(self, box):
"""
This is an internal use function. Given 4 human labeled corner points, this function returns the estimated
rotation. length and width of the minimum_rotated_rectangle
:param box:
4 by 2 array (each row is a point).
:return:
returns center-x, center-y, rotation, length, width of the box
Example:
>>> sc = SampleGenerator()
>>> sc._get_tight_rectangle_info(box=np.array([[0,0],
... [0,1],
... [2,1],
... [2,0]])) # doctest: +NORMALIZE_WHITESPACE
array([1. , 0.5, 0. , 2. , 1. ])
"""
poly = shapely.geometry.Polygon(box)
points = np.array(poly.minimum_rotated_rectangle.exterior)[:-1]
return OrientedBoundingBox.rot_length_width_from_points(points)
def make_pos_csv(self):
"""
This file generates a ".csv" file for the positive samples. It specifies the center(x,y) coordinates,
rotated angle, length, width IN THAT ORDER. It currently supports squares and rectangular inputs.
"""
with tqdm(self.hotspots, desc='Generating positive samples') as progress:
pos_samples = []
for i, b in enumerate(progress):
x, y, deg, length, width = self._get_tight_rectangle_info(b)
rel_path = self.sample_name_pattern.format(label='pos', index=i + 1)
pos_samples.append([rel_path, x, y, deg, length, width])
colnames = ['name', 'orig-x', 'orig-y', 'box-ori-deg', 'box-ori-length', 'box-ori-width']
posdf = pd.DataFrame(data=pos_samples, columns=colnames)
os.makedirs(os.path.dirname(self.positive_csv_file), exist_ok=True)
posdf.to_csv(path_or_buf=self.positive_csv_file, index=False)
info("Positive data .csv file saved as {}".format(self.positive_csv_file))
def make_neg_csv(self):
"""Generate a CSV file with the locations of some negative examples.
We look through the volume, and choose samples that are "interesting"
in the sense that they include points at the hight-layers where
we expect to see boxes.
"""
pos_xy = pd.read_csv(self.positive_csv_file).iloc[:, 1:1 + 2].values
num_of_negs = self.num_of_samples - len(pos_xy)
assert num_of_negs > 0
# Generate region that should not be negative
positive_region = shapely.geometry.MultiPoint(points=pos_xy)
positive_region = positive_region.buffer(self.min_seperation)
densities = rasterio.open(self.volume_path)
colors = rasterio.open(self.rgb_path)
self.bounds = tuple((max(densities.bounds.left, colors.bounds.left),
max(densities.bounds.bottom, colors.bounds.bottom),
min(densities.bounds.right, colors.bounds.right),
min(densities.bounds.top, colors.bounds.top)))
combined_window = densities.window(*self.bounds)
block_size = C.TRAIN.SAMPLES.GENERATOR.BLOCK_SIZE
rowcols = np.mgrid[0:densities.shape[0]:block_size, 0:densities.shape[1]:block_size].reshape(2, -1)
block_windows = [rasterio.windows.Window(row, col, block_size, block_size) for row, col in rowcols.T]
neg_xy = []
with tqdm(block_windows, desc="Processing volume data blocks") as progress:
for window in progress:
try:
overlapping_window = combined_window.intersection(window)
stack = densities.read([3, 4], window=overlapping_window, boundless=True)
tfm = densities.window_transform(overlapping_window)
except rasterio.errors.WindowError as e:
# Non overlapping window: windows do not intersect"
continue
density_mask = stack.sum(0) > self.threshold
positive_mask = rasterio.features.geometry_mask(positive_region, stack.shape[1:], tfm)
sample_mask = density_mask & positive_mask
ij = np.argwhere(sample_mask.T)
if len(ij):
xy = np.c_[tfm * ij.T]
neg_xy.append(xy)
neg_xy = np.concatenate(neg_xy)
# Choose which samples to keep
indices = np.random.choice(len(neg_xy), num_of_negs)
neg_xy = neg_xy[indices]
samples = [[self.sample_name_pattern.format(label='neg', index=i + 1), x, y]
for i, (x, y) in enumerate(tqdm(neg_xy))]
colnames = ['name', 'orig-x', 'orig-y']
negdf = pd.DataFrame(data=samples, columns=colnames)
negdf.to_csv(path_or_buf=self.negative_csv_file, index=False)
info("Negative data .csv file saved as {}".format(self.negative_csv_file))
def generate_samples():
generator = SampleGenerator()
if os.path.isfile(generator.positive_csv_file):
warning("Skipping positive CSV generation:"
" output file {} already exists.".format(generator.positive_csv_file))
else:
generator.make_pos_csv()
if os.path.isfile(generator.negative_csv_file):
warning("Skipping negative CSV generation:"
" output file {} already exists".format(generator.negative_csv_file))
else:
generator.make_neg_csv()
if __name__ == '__main__':
generate_samples()
```
#### File: srp/data/generate_variations.py
```python
import os
import pickle
import skimage.filters
import affine
import glob
import scipy
import numpy as np
from skimage.transform import rotate
from logging import debug
from srp.data.generate_patches import Patch
from srp.config import C
from srp.data.orientedboundingbox import OrientedBoundingBox
from srp.util import tqdm
class VariationMaker(object):
"""DataAugment
Generates variations(including synthetic) from patches generated by module 'generate_patches.py' randomly.
Attributes:
variations:
The number of variations to generate (default C.TRAIN.AUGMENTATION.VARIATIONS)
max_offset:
The maximum offset for augmentation, both dx and dy (default C.TRAIN.AUGMENTATION.MAX_OFFSET)
radius:
The size of the output patches (default is C.TRAIN.PATCH_SIZE/2)
synthetic_prop:
The probability of making synthetic data that lines up perfectly with the expected result
(default C.TRAIN.AUGMENTATION.SYNTHETIC_PROBABILITY)
current_fold:
A integer value specifing fold that is being used now (default C.TRAIN.SAMPLES.CURRENT_FOLD)
cache_root: the root where all subfolders will reside (default is C.TRAIN.SAMPLES.DIR)
"""
def __init__(self, **kwargs):
super().__init__()
self.variations = kwargs.pop('variations', C.TRAIN.AUGMENTATION.VARIATIONS)
self.max_offset = kwargs.pop('max_offset', C.TRAIN.AUGMENTATION.MAX_OFFSET)
self.radius = kwargs.pop('radius', C.TRAIN.PATCH_SIZE / 2)
self.synthetic_prop = kwargs.pop('synthetic_prop', C.TRAIN.AUGMENTATION.SYNTHETIC_PROBABILITY)
self.cache_root = kwargs.pop('cache_root', C.TRAIN.SAMPLES.DIR)
def _cropped_rotate_patch(self, source_patch, rothate_angle, p_center, dr, dc):
rotated_patch = np.zeros((source_patch.shape))
for i in range(len(source_patch)):
rotated_patch[i] = rotate(source_patch[i], rotate_angle, preserve_range=True)
cropped_patch = rotated_patch[:, p_center - radius + dc:p_center + radius + dc, p_center - radius -
dr:p_center + radius - dr]
return cropped_patch
def _fake_positive_layer(self, obb, radius, edge_factor=1, sigma=12, fg_noise=0.1, bg_noise=0.1):
diameter = int(radius * 2)
square = np.zeros((diameter, diameter))
cd = obb.u_length
rd = obb.v_length
square[int(radius - rd):int(radius + rd), int(radius - cd):int(radius + cd)] = 1
outline = scipy.ndimage.morphology.morphological_gradient(square, 3)
outline[int(radius - rd):, int(radius - cd):int(radius + cd)] = 0
square = (1 - edge_factor) * square + edge_factor * outline
gradient = np.zeros_like(square)
gradient[:64] = 1
gradient = skimage.filters.gaussian(gradient, sigma=sigma)
square *= gradient
square /= np.percentile(square.flat, 99.9)
background = square == 0
noisy = square
noisy += background * np.random.randn(diameter, diameter) * bg_noise
noisy += ~background * np.random.randn(diameter, diameter) * fg_noise
noisy = noisy.clip(0, 1)
return noisy
def _fake_data(self, obb, radius=C.TRAIN.PATCH_SIZE / 2):
radius = int(C.TRAIN.SAMPLES.GENERATOR.PADDED_PATCH_SIZE / 2)
data = np.zeros((6, 2 * radius, 2 * radius))
data[2] = self._fake_positive_layer(obb, radius, edge_factor=1)
data[3] = self._fake_positive_layer(obb, radius, edge_factor=0.7)
data[3] *= 0.3
data *= 40
return data
def _augment(self, p, radius):
"""Generate an augmented version of patch `p`.
:param p: The original patch.
:param radius: The radius for the augmented patch (typically smaller
to accomodate rotation and cropping)
"""
radius = int(radius)
dr = int(np.random.uniform(-1, 1) * C.TRAIN.AUGMENTATION.MAX_OFFSET)
dc = int(np.random.uniform(-1, 1) * C.TRAIN.AUGMENTATION.MAX_OFFSET)
rotate_angle = np.random.rand() * 360
p_center = int(p.volumetric.shape[1] / 2)
vol = p.volumetric
if p.label and np.random.random() <= self.synthetic_prop:
vol = self._fake_data(p.obb, C.TRAIN.SAMPLES.GENERATOR.PADDED_PATCH_SIZE)
assert vol.shape[1:] == p.rgb.shape[1:]
source_patch = np.concatenate((p.rgb, vol))
rotated_patch = np.zeros((source_patch.shape))
obb = p.obb
for i in range(len(source_patch)):
rotated_patch[i] = rotate(source_patch[i], rotate_angle, preserve_range=True)
cropped_patch = rotated_patch[:, p_center - radius + dc:p_center + radius + dc, p_center - radius -
dr:p_center + radius - dr]
if p.label:
R = affine.Affine.rotation(rotate_angle)
T = affine.Affine.translation(dr, dc)
A = T * R
after = np.vstack(A * p.obb.points().T).T
obb = OrientedBoundingBox.from_points(after)
return Patch(
name=p.name,
obb=obb,
ori_xy=p.ori_xy,
rgb=cropped_patch[:3],
label=p.label,
volumetric=cropped_patch[3:],
dr_dc_angle=(dr, dc, rotate_angle))
def make_variations(self, patch):
"""Precompute the variations using original patches in designated cache_dir.
This is used to pre-compute data augmentation for deep learning.
The number and types of variation are controlled by the configuration file.
:param patch: an original 'Patch' class object
"""
label, name = patch.name.split('/')[-2:]
name = name.split('.')[0]
for i in range(self.variations):
var = self._augment(patch, radius=C.TRAIN.PATCH_SIZE / 2)
var_name = os.path.join(self.cache_root,
C.TRAIN.AUGMENTATION.NAME_PATTERN.format(label=label, name=name, var_idx=i + 1))
os.makedirs(os.path.dirname(var_name), exist_ok=True)
with open(var_name, 'wb') as handle:
pickle.dump(var, handle, protocol=pickle.HIGHEST_PROTOCOL)
def generate_variations(names=None, synthetic_prop=C.TRAIN.AUGMENTATION.SYNTHETIC_PROBABILITY, cache_root=None):
"""
:param names: a list of names relative to the C.TRAIN.SAMPLES.DIR (default is all patches pos + neg)
:param cache_root: the root where all subfolders will reside (default is C.TRAIN.SAMPLES.DIR)
"""
cache_root = cache_root or C.TRAIN.SAMPLES.DIR
if names:
samples = [os.path.join(C.TRAIN.SAMPLES.DIR, n) for n in list(names)]
else:
samples = glob.glob(os.path.join(C.TRAIN.SAMPLES.DIR, '*/*.pkl'))
maker = VariationMaker(synthetic_prop=synthetic_prop, cache_root=cache_root)
progress = tqdm(samples, desc='Generating variation patches, syn_prop={}'.format(synthetic_prop))
for i, name_dir in enumerate(progress):
with open(os.path.join(C.TRAIN.SAMPLES.DIR, name_dir), 'rb') as handle:
p = pickle.load(handle)
maker.make_variations(p)
if __name__ == '__main__':
generate_variations()
```
#### File: srp/data/train_val_split.py
```python
import numpy as np
from srp.config import C
import os
import pandas as pd
from sklearn.model_selection import KFold
def train_val_split():
kf = KFold(n_splits=C.TRAIN.SAMPLES.FOLDS, shuffle=True, random_state=C.TRAIN.SRAND)
posinfo = pd.read_csv(os.path.join(C.TRAIN.SAMPLES.DIR, "positives.csv")).values
neginfo = pd.read_csv(os.path.join(C.TRAIN.SAMPLES.DIR, "negatives.csv")).values
for i, (train_index, test_index) in enumerate(kf.split(posinfo)):
fold_dir = os.path.join(C.TRAIN.SAMPLES.DIR, "fold{}".format(i + 1))
os.makedirs(fold_dir, exist_ok=True)
with open(os.path.join(fold_dir, "train.txt"), mode='w') as file:
for idx in train_index:
file.write(posinfo[idx, 0] + '\n')
# file.write("1, {}\n".format(idx+1))
with open(os.path.join(fold_dir, "test.txt"), mode='w') as file:
for idx in test_index:
file.write(posinfo[idx, 0] + '\n')
# file.write("1, {}\n".format(idx+1))
for i, (train_index, test_index) in enumerate(kf.split(neginfo)):
fold_dir = os.path.join(C.TRAIN.SAMPLES.DIR, "fold{}".format(i + 1))
with open(os.path.join(fold_dir, "train.txt"), mode='a') as file:
for idx in train_index:
file.write(neginfo[idx, 0] + '\n')
# file.write("0, {}\n".format(idx+1))
with open(os.path.join(fold_dir, "test.txt"), mode='a') as file:
for idx in test_index:
file.write(neginfo[idx, 0] + '\n')
# file.write("0, {}\n".format(idx+1))
if __name__ == '__main__':
train_val_split()
```
#### File: srp/experiments/make_experiments.py
```python
from builtins import str
import os
from collections import OrderedDict
import itertools
import pandas as pd
import oyaml as yaml
from tqdm import tqdm
from srp.config import C
def make_experiments(options, dirname=C.INT_DATA, progress=True):
"""
Generate grid-search parameters to explore a set of options.
The idea is that you can write a single script that reads in a config file
to do an experiments. The config files will be generated by this function.
This script will not actually _do_ the experiments, it just generates the
configuration files.
:param options: An ordered dictionary; the first keys will vary fastest
in a grid search.
:param dirname: A folder that will hold configuration settings for
each combination of options we explore.
:param progress: whether to show a progress bar
Example
-------
>>> options = yaml.load('''
... A:
... - 1
... - 2
... - 3
... B:
... - alpha
... - beta
... C:
... - orange
... - yellow
... ''')
>>> os.makedirs('data/test', exist_ok=True)
>>> make_experiments(options, dirname='data/test', progress=False)
>>> os.path.isfile('data/test/experiments.csv')
True
>>> os.path.isfile('data/test/options.yml')
True
The `experiments.csv` file has a header and a row with the settings for
each trial. The first option is changes fastest.
>>> f = open('data/test/experiments.csv').readlines()
>>> print(f[0].strip())
,A,B,C
>>> print(f[1].strip())
0,1,alpha,orange
>>> print(f[2].strip())
1,2,alpha,orange
Files are created for each combination of options
>>> os.path.isfile('data/test/experiments/00000/config.yml')
True
The experiment config files have al of the options as YAML data
>>> c = yaml.load(open('data/test/experiments/00000/config.yml'))
>>> c['A']
1
>>> c['B']
'alpha'
>>> c['C']
'orange'
"""
# Save the options
with open(os.path.join(dirname, 'options.yml'), 'w') as f:
yaml.dump(options, f, default_flow_style=False)
# Save the master list of experiments
combos = [reversed(combo) for combo in itertools.product(*reversed(list(options.values())))]
experiments = pd.DataFrame(combos, columns=list(options.keys()))
experiments.to_csv(os.path.join(dirname, 'experiments.csv'))
# Make s folder and config file for each experiment
if progress:
combos = tqdm(experiments.iterrows(), "generating configs")
else:
combos = experiments.iterrows()
for _, combo in combos:
subdirname = '{:05}'.format(combo.name)
rec = OrderedDict()
rec['NUMBER'] = combo.name
rec['NAME'] = '-'.join((subdirname, ) + tuple((str(x) for x in combo)))
rec.update(combo)
exp_dir = os.path.join(dirname, 'experiments', subdirname)
os.makedirs(exp_dir, exist_ok=True)
with open(os.path.join(exp_dir, 'config.yml'), 'w') as f:
yaml.dump(rec, f, default_flow_style=False)
```
|
{
"source": "jfemiani/synwin",
"score": 3
}
|
#### File: jfemiani/synwin/fetch_wall_textures.py
```python
import os
import shutil
import time
from glob import glob
import tqdm as tq
from bs4 import BeautifulSoup
# This site uses jacascript for pagination -- access needs to be done through
# an actual browser
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
BASE_URL = "https://www.cgbookcase.com"
def download(url, filename, tqparams=None):
import functools
import pathlib
import shutil
import requests
from tqdm.auto import tqdm
if tqparams is None:
tqparams = dict(leave=False)
r = requests.get(url, stream=True, allow_redirects=True)
if r.status_code != 200:
r.raise_for_status() # Will only raise for 4xx codes, so...
raise RuntimeError(f"Request to {url} returned status code {r.status_code}")
file_size = int(r.headers.get('Content-Length', 0))
path = pathlib.Path(filename).expanduser().resolve()
path.parent.mkdir(parents=True, exist_ok=True)
desc = "(Unknown total file size)" if file_size == 0 else ""
r.raw.read = functools.partial(r.raw.read, decode_content=True) # Decompress if needed
with tqdm.wrapattr(r.raw, "read", total=file_size, desc=desc, **tqparams) as r_raw:
with path.open("wb") as f:
shutil.copyfileobj(r_raw, f)
return path
def get_driver():
# Not using requests -- we need to use an actual browser
# to load the page and run javascript
options = Options()
options.headless = True
driver = webdriver.Chrome('/usr/lib/chromium-browser/chromedriver', options=options)
return driver
def get_content(url, driver=None, timeout=2):
if driver is None:
driver = get_driver()
driver.get(url)
time.sleep(5) # Give any scripts time to execute as the page loads
content = driver.page_source
return content
def get_texture_pages(query):
max_pages = 10
driver = get_driver()
for page in range(1, max_pages+1):
url = f'{BASE_URL}/textures/?search={query}&page={page}'
content = get_content(url, driver)
soup = BeautifulSoup(content, 'html.parser')
links = soup.find_all('a', {'class': 'results-itemWrapper'})
if len(links) == 0:
break
for link in links:
image_url = f"{BASE_URL}{link['href']}"
yield image_url
def get_zip_links(texture_page):
drv = get_driver()
drv.get(texture_page)
download_button = drv.find_element_by_class_name('btn-red')
zip_url = download_button.get_property('href')
return zip_url
def iter_download_walls(outdir):
for wall in get_texture_pages('wall'):
zip_url = get_zip_links(wall)
zip_path = os.path.join(outdir, os.path.basename(zip_url))
tq.tqdm.write(f'Downloading {wall} to {zip_path}')
download(zip_url, zip_path)
yield zip_path
def download_walls(outdir='data/PBRS/walls'):
return list(iter_download_walls(outdir))
def extract_materials(outdir='data/PBRS/walls', tqargs=None):
if tqargs is None:
tqargs = dict(leave=False)
zips = glob(f'{outdir}/*.zip')
for zip in tq.tqdm(zips, desc='Extracting zrchives', **tqargs):
shutil.unpack_archive(zip, extract_dir=outdir)
```
|
{
"source": "jfeng08/CoolProp",
"score": 3
}
|
#### File: incompressible_liquids/CPIncomp/DataObjects.py
```python
from __future__ import division, print_function
import numpy as np
import os, math
from .BaseObjects import IncompressibleData,IncompressibleFitter
from abc import ABCMeta
class SolutionData(object):
"""
A base class that defines all the variables needed
in order to make a proper fit. You can copy this code
put in your data and add some documentation for where the
information came from.
"""
ifrac_mass = "mass"
ifrac_mole = "mole"
ifrac_volume = "volume"
ifrac_undefined = "not defined"
ifrac_pure = "pure"
def __init__(self):
self.significantDigits = 7
self.name = None # Name of the current fluid
self.description = None # Description of the current fluid
self.reference = None # Reference data for the current fluid
self.Tmax = None # Maximum temperature in K
self.Tmin = None # Minimum temperature in K
self.xmax = 1.0 # Maximum concentration
self.xmin = 0.0 # Minimum concentration
self.xid = self.ifrac_undefined # Concentration is mole, mass or volume-based
self.TminPsat = None # Minimum saturation temperature in K
self.Tbase = None # Base value for temperature fits
self.xbase = None # Base value for concentration fits
self.temperature = IncompressibleData() # Temperature for data points in K
self.concentration = IncompressibleData() # Concentration data points in weight fraction
self.density = IncompressibleData() # Density in kg/m3
self.specific_heat = IncompressibleData() # Heat capacity in J/(kg.K)
self.viscosity = IncompressibleData() # Dynamic viscosity in Pa.s
self.conductivity = IncompressibleData() # Thermal conductivity in W/(m.K)
self.saturation_pressure = IncompressibleData() # Saturation pressure in Pa
self.T_freeze = IncompressibleData() # Freezing temperature in K
self.mass2input = IncompressibleData() # dd
self.volume2input = IncompressibleData() # dd
self.mole2input = IncompressibleData() # dd
## Some of the functions might need a guess array
#self.viscosity.type = self.viscosity.INCOMPRESSIBLE_EXPONENTIAL
#self.viscosity.coeffs = np.array([+7e+2, -6e+1, +1e+1])
#self.saturation_pressure.type = self.saturation_pressure.INCOMPRESSIBLE_EXPONENTIAL
#self.saturation_pressure.coeffs = np.array([-5e+3, +3e+1, -1e+1])
self.xref = 0.0
self.Tref = 0.0
self.pref = 0.0
self.href = 0.0
self.sref = 0.0
self.uref = 0.0
self.rhoref = 0.0
# def getDataObjects(self):
# objList = {}
# objList["temperature"] = self.temperature
# objList["concentration"] = self.concentration
# objList["density"] = self.density
# objList["specific_heat"] = self.specific_heat
# objList["viscosity"] = self.viscosity
# objList["conductivity"] = self.conductivity
# objList["saturation_pressure"] = self.saturation_pressure
# objList["T_freeze"] = self.T_freeze
# objList["volume2mass"] = self.volume2mass
# objList["mass2mole"] = self.mass2mole
# return objList
def roundSingle(self,x):
if x==0.0: return 0.0
return round(x, self.significantDigits-int(math.floor(math.log10(abs(x))))-1)
def round(self, x):
r,c,res = IncompressibleFitter.shapeArray(x)
#digits = -1*np.floor(np.log10(res))+self.significantDigits-1
for i in range(r):
for j in range(c):
if np.isfinite(res[i,j]):
res[i,j] = self.roundSingle(res[i,j])
return res
# def getPolyObjects(self):
# objList = {}
# objList["density"] = self.density
# objList["specific heat"] = self.specific_heat
## objList["viscosity"] = self.viscosity
# objList["conductivity"] = self.conductivity
## objList["saturation_pressure"] = self.saturation_pressure
## objList["T_freeze"] = self.T_freeze
## objList["volume2mass"] = self.volume2mass
## objList["mass2mole"] = self.mass2mole
# return objList
#
# def getExpPolyObjects(self):
# objList = {}
# objList["viscosity"] = self.viscosity
# objList["saturation pressure"] = self.saturation_pressure
# return objList
def checkT(self, T, p, x):
if self.Tmin <= 0.: raise ValueError("Please specify the minimum temperature.")
if self.Tmax <= 0.: raise ValueError("Please specify the maximum temperature.");
if ((self.Tmin > T) or (T > self.Tmax)): raise ValueError("Your temperature {0} is not between {1} and {2}.".format(T, self.Tmin, self.Tmax))
TF = 0.0
if (self.T_freeze.type!=IncompressibleData.INCOMPRESSIBLE_NOT_SET): TF = self.Tfreeze(T, p, x)
if ( T<TF ): raise ValueError("Your temperature {0} is below the freezing point of {1}.".format(T, TF))
else: return True
return False
def checkP(self, T, p, x):
ps = 0.0
if (self.saturation_pressure.type!=IncompressibleData.INCOMPRESSIBLE_NOT_SET): ps = self.psat(T, p, x)
if (p < 0.0): raise ValueError("You cannot use negative pressures: {0} < {1}. ".format(p, 0.0))
if (p < ps) : raise ValueError("Equations are valid for liquid phase only: {0} < {1}. ".format(p, ps))
else : return True
return False
def checkX(self, x):
if (self.xmin < 0.0 or self.xmin > 1.0): raise ValueError("Please specify the minimum concentration between 0 and 1.");
if (self.xmax < 0.0 or self.xmax > 1.0): raise ValueError("Please specify the maximum concentration between 0 and 1.");
if ((self.xmin > x) or (x > self.xmax)): raise ValueError("Your composition {0} is not between {1} and {2}.".format(x, self.xmin, self.xmax))
else: return True
return False
def checkTPX(self, T, p, x):
try:
return (self.checkT(T,p,x) and self.checkP(T,p,x) and self.checkX(x))
except ValueError as ve:
#print("Check failed: {0}".format(ve))
pass
return False
def rho (self, T, p=0.0, x=0.0, c=None):
if not self.checkTPX(T, p, x): return np.NAN
if c is None:
c=self.density.coeffs
if self.density.type==self.density.INCOMPRESSIBLE_POLYNOMIAL:
return np.polynomial.polynomial.polyval2d(T-self.Tbase, x-self.xbase, c)
else: raise ValueError("Unknown function.")
def c (self, T, p=0.0, x=0.0, c=None):
if not self.checkTPX(T, p, x): return np.NAN
if c is None:
c = self.specific_heat.coeffs
if self.specific_heat.type==self.specific_heat.INCOMPRESSIBLE_POLYNOMIAL:
return np.polynomial.polynomial.polyval2d(T-self.Tbase, x-self.xbase, c)
else: raise ValueError("Unknown function.")
def cp (self, T, p=0.0, x=0.0, c=None):
return self.c(T,p,x,c)
def cv (self, T, p=0.0, x=0.0, c=None):
return self.c(T,p,x,c)
def u (self, T, p=0.0, x=0.0, c=None):
if not self.checkTPX(T, p, x): return np.NAN
if c is None:
c = self.specific_heat.coeffs
if self.specific_heat.type==self.specific_heat.INCOMPRESSIBLE_POLYNOMIAL:
c_tmp = np.polynomial.polynomial.polyint(c)
return np.polynomial.polynomial.polyval2d(T-self.Tbase, x-self.xbase, c_tmp)
else: raise ValueError("Unknown function.")
#def u (T, p, x);
def h (self, T, p=0.0, x=0.0):
return self.h_u(T,p,x)
def visc(self, T, p=0.0, x=0.0, c=None):
if not self.checkTPX(T, p, x): return np.NAN
return self.viscosity.baseFunction(T, x, self.Tbase, self.xbase, c=c)
def cond(self, T, p=0.0, x=0.0, c=None):
if not self.checkTPX(T, p, x): return np.NAN
return self.conductivity.baseFunction(T, x, self.Tbase, self.xbase, c=c)
def psat(self, T, p=0.0, x=0.0, c=None):
if (T<=self.TminPsat): return 0.0
return self.saturation_pressure.baseFunction(T, x, self.Tbase, self.xbase, c=c)
def Tfreeze(self, T, p=0.0, x=0.0, c=None):
if c is None:
c = self.T_freeze.coeffs
if self.T_freeze.type==self.T_freeze.INCOMPRESSIBLE_POLYNOMIAL:
return np.polynomial.polynomial.polyval2d(p-0.0, x-self.xbase, c)
elif self.T_freeze.type==self.T_freeze.INCOMPRESSIBLE_POLYOFFSET:
#if y!=0.0: raise ValueError("This is 1D only, use x not y.")
return self.T_freeze.basePolyOffset(c, x) # offset included in coeffs
elif self.T_freeze.type==self.T_freeze.INCOMPRESSIBLE_EXPONENTIAL:
#if y!=0.0: raise ValueError("This is 1D only, use x not y.")
return self.T_freeze.baseExponential(c, x)
elif self.T_freeze.type==IncompressibleData.INCOMPRESSIBLE_LOGEXPONENTIAL:
#if y!=0.0: raise ValueError("This is 1D only, use x not y.")
return self.T_freeze.baseLogexponential(c, x)
elif self.T_freeze.type==self.T_freeze.INCOMPRESSIBLE_EXPPOLYNOMIAL:
return np.exp(np.polynomial.polynomial.polyval2d(p-0.0, x-self.xbase, c))
else:
raise ValueError("Unknown function: {0}.".format(self.T_freeze.type))
#def V2M (T, y);
#def M2M (T, x);
def h_u(self, T, p, x):
return self.u(T,p,x)+p/self.rho(T,p,x)-self.href
def u_h(self, T, p, x):
return self.h(T,p,x)-p/self.rho(T,p,x)+self.href
def set_reference_state(self, T0, p0, x0=0.0, h0=0.0, s0=0.0):
self.rhoref = self.rho(T0,p0,x0)
self.pref = p0
self.uref = h0 - p0/self.rhoref
self.uref = self.u(T0,p0,x0)
self.href = h0
self.sref = s0
self.href = self.h(T0,p0,x0)
self.sref = self.s(T0,p0,x0)
class PureData(SolutionData):
"""
An extension of the solution data that makes it
easier to gather data for pure fluids.
"""
__metaclass__ = ABCMeta
def __init__(self):
SolutionData.__init__(self)
self.xbase = 0.0
self.xid = self.ifrac_pure
self.concentration.data = np.array([ 0 ]) # mass fraction
def reshapeData(self, dataArray, length):
"""
Makes any array 1-dimensional and implicitly
checks the length.
"""
if not dataArray is None:
return np.reshape(dataArray, (length,1))
else:
return None
def reshapeAll(self):
len_T = len(self.temperature.data)
#len_x = len(self.concentration.data)
self.density.data = self.reshapeData(self.density.data, len_T)
self.specific_heat.data = self.reshapeData(self.specific_heat.data, len_T)
self.viscosity.data = self.reshapeData(self.viscosity.data, len_T)
self.conductivity.data = self.reshapeData(self.conductivity.data, len_T)
self.saturation_pressure.data = self.reshapeData(self.saturation_pressure.data, len_T)
#self.T_freeze.data = self.reshapeData(self.T_freeze.data, len_T)
#self.volume2mass.data = self.reshapeData(self.volume2mass.data, len_T)
#self.mass2mole.data = self.reshapeData(self.mass2mole.data, len_T)
class DigitalData(SolutionData):
"""
An extension of the solution data that makes it
easier to generate fitting data from fluids available
as Python packages.
"""
__metaclass__ = ABCMeta
def __init__(self):
SolutionData.__init__(self)
def getFile(self, data):
return os.path.join(os.path.dirname(__file__), 'data', self.name+"_"+data+".txt")
def getFromFile(self, data):
fullPath = self.getFile(data)
_,_,res = IncompressibleFitter.shapeArray(np.loadtxt(fullPath))
return res
def writeToFile(self, data, array):
fullPath = self.getFile(data)
if not os.path.exists(os.path.dirname(fullPath)):
os.makedirs(os.path.dirname(fullPath))
stdFmt = "%1.{0}e".format(int(self.significantDigits-1))
return np.savetxt(fullPath, array, fmt=stdFmt)
def getTrange(self):
if self.Tmin<self.Tmax:
return np.linspace(self.Tmin, self.Tmax, 20)
else:
return np.array([0.0])
def getxrange(self):
if self.xmin<self.xmax and self.xid!=self.ifrac_pure:
return np.linspace(self.xmin, self.xmax, 20)
else:
return np.array([0.0])
def getArray(self, dataID=None, func=None, x_in=None, y_in=None, DEBUG=False):
""" Tries to read a data file, overwrites it if x or y do not match
:param dataID : ID to contruct the path to the data file
:param func : Callable object that can take x_in and y_in
:param x_in : a 1D array in x direction or 2D with one column, most likely temperature
:param y_in : a 1D array in y direction or 2D with one row, most likely cocentration
:param DEBUG : a boolean that controls verbosity
:returns : Returns a tuple with three entries: x(1D),y(1D),data(2D)
"""
x = None
y = None
z = None
# First we try to read the file
if (not dataID is None and os.path.isfile(self.getFile(dataID))): # File found
fileArray = self.getFromFile(dataID)
x = np.copy(fileArray[1:,0 ])
y = np.copy(fileArray[0 ,1:])
z = np.copy(fileArray[1:,1:])
else:
if DEBUG: print("No readable file found for {0}: {1}".format(dataID,self.getFile(dataID)))
updateFile = DEBUG
if not x_in is None: # Might need update
if not x is None: # Both given, check if different
mask = np.isfinite(x)
if IncompressibleFitter.allClose(x[mask], x_in[mask]):
if DEBUG: print("Both x-arrays are the same, no action required.")
updateFile = (updateFile or False) # Do not change a True value to False
else:
updateFile = True
if DEBUG: print("x-arrays do not match. {0} contains \n {1} \n and will be updated with \n {2}".format(self.getFile(dataID),x,x_in))
else: updateFile = True
elif x is None:
if DEBUG: print("Could not load x from file {0} and no x_in provided, aborting.".format(self.getFile(dataID)))
return None,None,None
else: updateFile = (updateFile or False) # Do not change a True value to False
if not y_in is None: # Might need update
if not y is None: # Both given, check if different
mask = np.isfinite(y)
if IncompressibleFitter.allClose(y[mask], y_in[mask]):
if DEBUG: print("Both y-arrays are the same, no action required.")
updateFile = (updateFile or False) # Do not change a True value to False
else:
updateFile = True
if DEBUG: print("y-arrays do not match. {0} contains \n {1} \n and will be updated with \n {2}".format(self.getFile(dataID),y,y_in))
else: updateFile = True
elif y is None:
if DEBUG: print("Could not load y from file {0} and no y_in provided, aborting.".format(self.getFile(dataID)))
return None,None,None
else: updateFile = (updateFile or False) # Do not change a True value to False
if DEBUG: print("Updating data file {0}".format(updateFile))
if not updateFile: return x,y,z # Done, data read from file
# Overwrite inputs
x = x_in
y = y_in
z = np.zeros( (len(x)+1,len(y)+1) )
r,c = z.shape
if func is None: raise ValueError("Need a function to update the data file.")
for i in range(r-1):
for j in range(c-1):
z[i+1,j+1] = func(x[i],y[j])
z[0,0 ] = np.NaN
z[1:,0] = x
z[0,1:] = y
if not dataID is None:
self.writeToFile(dataID, z)
else:
if DEBUG: print("Not updating data file, dataID is missing.")
return x,y,z[1:,1:]
class CoefficientData(SolutionData):
"""
A class to convert parameter arrays from different other sources
"""
__metaclass__ = ABCMeta
def __init__(self):
SolutionData.__init__(self)
self.reference = "Some other software"
def convertSecCoolArray(self, array):
if len(array)!=18:
raise ValueError("The lenght is not equal to 18!")
self.reference = "SecCool software"
array = np.array(array)
tmp = np.zeros((4,6))
tmp[0][0] = array[0]
tmp[0][1] = array[1]
tmp[0][2] = array[2]
tmp[0][3] = array[3]
tmp[0][4] = array[4]
tmp[0][5] = array[5]
tmp[1][0] = array[6]
tmp[1][1] = array[7]
tmp[1][2] = array[8]
tmp[1][3] = array[9]
tmp[1][4] = array[10]
#tmp[1][5] = array[11]
tmp[2][0] = array[11]
tmp[2][1] = array[12]
tmp[2][2] = array[13]
tmp[2][3] = array[14]
#tmp[2][4] = array[4]
#tmp[2][5] = array[5]
tmp[3][0] = array[15]
tmp[3][1] = array[16]
tmp[3][2] = array[17]
#tmp[3][3] = array[3]
#tmp[3][4] = array[4]
#tmp[3][5] = array[5]
# Concentration is no longer handled in per cent!
for i in range(6):
tmp.T[i] *= 100.0**i
return tmp
def convertSecCoolTfreeze(self, array):
expo = -1.0
for i in range(len(array)):
array[i] = array[i]*np.power(100.0,expo+i)
array[1] = array[1] + 273.15
#self.T_freeze.type = self.T_freeze.INCOMPRESSIBLE_POLYOFFSET
return array
def convertMelinderArray(self, array):
"""The same function as the SecCool converter,
the original source code is slightly different though.
That is why the implementation is in a transposed form..."""
if len(array)!=18:
raise ValueError("The lenght is not equal to 18!")
#self.reference = "Melinder Book"
array = np.array(array)
tmp = np.zeros((6,4))
tmp[0][0] = array[0]
tmp[0][1] = array[6]
tmp[0][2] = array[11]
tmp[0][3] = array[15]
tmp[1][0] = array[1]
tmp[1][1] = array[7]
tmp[1][2] = array[12]
tmp[1][3] = array[16]
tmp[2][0] = array[2]
tmp[2][1] = array[8]
tmp[2][2] = array[13]
tmp[2][3] = array[17]
tmp[3][0] = array[3]
tmp[3][1] = array[9]
tmp[3][2] = array[14]
tmp[4][0] = array[4]
tmp[4][1] = array[10]
tmp[5][0] = array[5]
# Concentration is no longer handled in per cent!
for i in range(6):
tmp[i] *= 100.0**i
return tmp.T
def convertMelinderMatrix(self, array):
"""Function to convert the full coefficient array
from the very first CoolProp implementation
based on the book by Melinder"""
if len(array)!=18:
raise ValueError("The lenght is not equal to 18!")
if len(array[0])!=5:
raise ValueError("The lenght is not equal to 5!")
array = np.array(array)
tmp = np.zeros((18,5))
for j in range(5):
tmp[ 0][j] = array[ 0][j]
tmp[ 1][j] = array[ 4][j]
tmp[ 2][j] = array[ 8][j]
tmp[ 3][j] = array[12][j]
tmp[ 4][j] = array[15][j]
tmp[ 5][j] = array[17][j]
tmp[ 6][j] = array[ 1][j]
tmp[ 7][j] = array[ 5][j]
tmp[ 8][j] = array[ 9][j]
tmp[ 9][j] = array[13][j]
tmp[10][j] = array[16][j]
tmp[11][j] = array[ 2][j]
tmp[12][j] = array[ 6][j]
tmp[13][j] = array[10][j]
tmp[14][j] = array[14][j]
tmp[15][j] = array[ 3][j]
tmp[16][j] = array[ 7][j]
tmp[17][j] = array[11][j]
return tmp
def setMelinderMatrix(self, matrix):
# matrix = np.array([
# [-26.29 , 958.1 ,3887 , 0.4175 , 1.153 ],
# [ -0.000002575 , -0.4151 , 7.201 , 0.0007271 , -0.03866 ],
# [ -0.000006732 , -0.002261 , -0.08979 , 0.0000002823 , 0.0002779 ],
# [ 0.000000163 , 0.0000002998 , -0.000439 , 0.000000009718 , -0.000001543 ],
# [ -1.187 , -1.391 , -18.5 , -0.004421 , 0.005448 ],
# [ -0.00001609 , -0.0151 , 0.2984 , -0.00002952 , 0.0001008 ],
# [ 0.000000342 , 0.0001113 , -0.001865 , 0.00000007336 , -0.000002809 ],
# [ 0.0000000005687, -0.0000003264 , -0.00001718 , 0.0000000004328 , 0.000000009811 ],
# [ -0.01218 , -0.01105 , -0.03769 , 0.00002044 , -0.0005552 ],
# [ 0.0000003865 , 0.0001828 , -0.01196 , 0.0000003413 , 0.000008384 ],
# [ 0.000000008768 , -0.000001641 , 0.00009801 , -0.000000003665 , -0.00000003997 ],
# [ -0.0000000002095, 0.0000000151 , 0.000000666 , -0.00000000002791 , -0.0000000003466 ],
# [ -0.00006823 , -0.0001208 , -0.003776 , 0.0000002943 , 0.000003038 ],
# [ 0.00000002137 , 0.000002992 , -0.00005611 , -0.0000000009646 , -0.00000007435 ],
# [ -0.0000000004271, 0.000000001455, -0.0000007811, 0.00000000003174 , 0.0000000007442 ],
# [ 0.0000001297 , 0.000004927 , -0.0001504 , -0.0000000008666 , 0.00000006669 ],
# [ -0.0000000005407, -0.0000001325 , 0.000007373 , -0.0000000000004573, -0.0000000009105 ],
# [ 0.00000002363 , -0.00000007727 , 0.000006433 , -0.0000000002033 , -0.0000000008472 ]
# ])
coeffs = self.convertMelinderMatrix(matrix).T
self.T_freeze.source = self.T_freeze.SOURCE_COEFFS
self.T_freeze.type = self.T_freeze.INCOMPRESSIBLE_POLYNOMIAL
self.T_freeze.coeffs = self.convertMelinderArray(coeffs[0])
self.T_freeze.coeffs[0,0] += 273.15
self.T_freeze.coeffs = np.array([self.T_freeze.coeffs[0]])
#print(self.T_freeze.coeffs)
self.density.source = self.density.SOURCE_COEFFS
self.density.type = self.density.INCOMPRESSIBLE_POLYNOMIAL
self.density.coeffs = self.convertMelinderArray(coeffs[1])
self.specific_heat.source = self.specific_heat.SOURCE_COEFFS
self.specific_heat.type = self.specific_heat.INCOMPRESSIBLE_POLYNOMIAL
self.specific_heat.coeffs = self.convertMelinderArray(coeffs[2])
self.conductivity.source = self.conductivity.SOURCE_COEFFS
self.conductivity.type = self.conductivity.INCOMPRESSIBLE_POLYNOMIAL
self.conductivity.coeffs = self.convertMelinderArray(coeffs[3])
self.viscosity.source = self.viscosity.SOURCE_COEFFS
self.viscosity.type = self.viscosity.INCOMPRESSIBLE_EXPPOLYNOMIAL
self.viscosity.coeffs = self.convertMelinderArray(coeffs[4])
self.viscosity.coeffs[0,0] -= math.log(1000) # Fixes the units mPa s -> Pa s
```
#### File: dev/incompressible_liquids/DEPRECATED_testSolutions.py
```python
import sys
import CoolProp.CoolProp as cp
import numpy as np
def props(in1=None, in2=None, in3=None, in4=None, in5=None, in6=None):
try:
return cp.PropsU(in1=in1,in2=in2,in3=in3,in4=in4,in5=in5,in6=in6,in7="SI")
except ValueError as ve:
#print "Error in CoolProp, try adjusting the fluid or T and p:"
print ve
return -1.0*np.NAN
#print "{0:14.8f}".format(CP.Props('V','D',13,'P',500,'n-Pentane'))
#print "{0:14.8f}".format(CP.Props('V','H',158,'P',1000,'TX22'))
#T = 300
T = float(sys.argv[1])+273.15
P = float(sys.argv[2])*1e5
print "Temperature: "+str(T-273.15)+" C"
print "Pressure: "+str(P/1e5)+" bar"
print
Melinder = ["MEG", "MPG", "MEA", "MMA", "MGL", "MAM", "MKC", "MCA", "MMG", "MNA", "MKA", "MKF", "MLI"]
SecCool = ["ZiAC", "IceEA", "IcePG", "IceNA", "PK2000"]
Other = ["LiBr"]
fluids = []
#fluids.extend(Melinder)
#fluids.extend(SecCool)
fluids.extend(Other)
for fluid in fluids:
print "Fluid: "+str(fluid)
try:
print "Density: "+"{0:14.8f} kg/m3 ".format(props('D','T',T,'P',P,fluid+'-20%'))
print "Heat cap.: "+"{0:14.8f} kJ/kg/K".format(props('C','T',T,'P',P,fluid+'-20%')/1e3)
print "Th. cond.: "+"{0:14.8f} W/m/K ".format(props('L','T',T,'P',P,fluid+'-20%'))
print "Dyn. visc.: "+"{0:14.8f} mPas ".format(props('V','T',T,'P',P,fluid+'-20%')*1e3)
print "Enthalpy: "+"{0:14.8f} kJ/kg ".format(props('H','T',T,'P',P,fluid+'-20%')/1e3)
print "In. energy: "+"{0:14.8f} kJ/kg ".format(props('U','T',T,'P',P,fluid+'-20%')/1e3)
print "Entropy: "+"{0:14.8f} kJ/kg/K".format(props('S','T',T,'P',P,fluid+'-20%')/1e3)
print "Saturation: "+"{0:14.8f} bar ".format(props('Psat','T',T,'P',P,fluid+'-20%')/1e5)
print "Freezing: "+"{0:14.8f} C ".format(props('Tfreeze','T',T,'P',P,fluid+'-20%')-273.15)
except ValueError as ve:
print "Error in CoolProp, try adjusting T and p:"
print ve
print
```
#### File: dev/TTSE/check_TTSE_v4.py
```python
import CoolProp.CoolProp as CP
import matplotlib
matplotlib.rc('font', family='serif', serif='Times New Roman')
#from matplotlib2tikz import save as tikz_save
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
import matplotlib.ticker
from matplotlib.patches import Ellipse
from matplotlib.transforms import ScaledTranslation
import numpy as np
import random
from numpy import linspace, meshgrid
from matplotlib.mlab import griddata
from matplotlib.gridspec import GridSpec
# Create the colourmap
#import numpy as np
#import matplotlib.pyplot as plt
import matplotlib._cm, matplotlib.cm
specs = matplotlib._cm.cubehelix(gamma=1.4,s=0.4,r=-0.8,h=2.0)
specs_r = matplotlib.cm._reverse_cmap_spec(specs)
matplotlib.cm.register_cmap(name="jorrithelix" , data=specs)
matplotlib.cm.register_cmap(name="jorrithelix"+"_r", data=specs_r)
def makeGrid(x, y, z, resX=200, resY=200):
"Convert 3 column data to matplotlib grid"
xi = linspace(min(x), max(x), resX)
yi = linspace(min(y), max(y), resY)
Z = griddata(x, y, z, xi, yi)
X, Y = meshgrid(xi, yi)
return X, Y, Z
def getErrors(p, h, out='D', Ref=''):
"Get the relative errors from table-based interpolation"
errorTTSE = 1e3
errorBICUBIC = 1e3
try:
# Using the EOS
CP.disable_TTSE_LUT(Ref)
EOS = CP.PropsSI(out,'P',p,'H',h,Ref)
# Using the TTSE method
CP.enable_TTSE_LUT(Ref)
CP.set_TTSE_mode(Ref,"TTSE")
TTSE = CP.PropsSI(out,'P',p,'H',h,Ref)
# Using the Bicubic method
CP.enable_TTSE_LUT(Ref)
CP.set_TTSE_mode(Ref,"BICUBIC")
BICUBIC = CP.PropsSI(out,'P',p,'H',h,Ref)
errorTTSE = abs(TTSE /EOS-1.0)*100.0
errorBICUBIC = abs(BICUBIC/EOS-1.0)*100.0
except ValueError as VE:
print VE
pass
return errorTTSE,errorBICUBIC
#['YlOrRd', 'PuBuGn', 'hot', 'cubehelix', 'gnuplot', 'gnuplot2']:
for colourmap in ['jorrithelix']:
for out in ['D']:
## landscape figure
#fig = plt.figure(figsize=(10,5))
#ax1 = fig.add_axes((0.08,0.1,0.32,0.83))
#ax2 = fig.add_axes((0.50,0.1,0.32,0.83))
#cbar_ax = fig.add_axes([0.80, 0.075, 0.05, 0.875])
# portrait figure
#fig = plt.figure(figsize=(5,8))
#ax1 = plt.subplot2grid((2,8), (0,0), colspan=7)
#ax2 = plt.subplot2grid((2,8), (1,0), colspan=7)
#cbar_ax = plt.subplot2grid((2,8), (0,7), colspan=1, rowspan=2)
#fig = plt.figure(figsize=(8,4))
#ax1 = plt.subplot2grid((1,7), (0,0), colspan=3)
#ax2 = plt.subplot2grid((1,7), (0,3), colspan=3)
#cbar_ax = plt.subplot2grid((1,7), (0,6), colspan=1, rowspan=1)
#plt.tight_layout()
fig = plt.figure(figsize=(8,4))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
#cbar_ax = plt.subplot2grid((1,7), (0,6), colspan=1, rowspan=1)
#plt.tight_layout()
#Ref = 'R245fa'
#Ref = 'Isopentane'
Ref = 'Air'
T = np.linspace(CP.PropsSI(Ref,'Tmin')+0.1,CP.PropsSI(Ref,'Tcrit')-0.01,300)
pV = CP.PropsSI('P','T',T,'Q',1,Ref)
hL = CP.PropsSI('H','T',T,'Q',0,Ref)
hV = CP.PropsSI('H','T',T,'Q',1,Ref)
hTP= np.append(hL,[hV[::-1]])
pTP= np.append(pV,[pV[::-1]])
HHH1, PPP1, EEE1 = [], [], []
HHH2, PPP2, EEE2 = [], [], []
cNorm = colors.LogNorm(vmin=1e-10, vmax=1e-1)
scalarMap = cmx.ScalarMappable(norm = cNorm, cmap = plt.get_cmap(colourmap))
# Setting the limits for enthalpy and pressure
p_min = CP.PropsSI(Ref,'ptriple')
p_max = 60e5
h_min = CP.PropsSI('H','T',CP.PropsSI(Ref,'Ttriple')+0.5,'Q',0,Ref)
h_max = CP.PropsSI('H','T',500+273.15,'P',p_max,Ref)
# Creating some isotherms for better illustration of the cycle
isoT = np.array([0,100,200,300,400])+273.15
isoP = np.logspace(np.log10(p_min),np.log10(p_max),base=10)
ones = np.ones(isoP.shape)
isoH = [ CP.PropsSI('H','T',T*ones,'P',isoP,Ref) for T in isoT ]
print "Lower left and upper right coordinates: ({0},{1}), ({2},{3})".format(h_min,p_min,h_max,p_max)
CP.set_TTSESinglePhase_LUT_range(Ref,h_min,h_max*1.05,p_min,p_max*1.05)
for a_useless_counter in range(40000):
h = random.uniform(h_min,h_max)
p = 10**random.uniform(np.log10(p_min),np.log10(p_max))
try:
# Using the EOS
CP.disable_TTSE_LUT(Ref)
rhoEOS = CP.PropsSI('D','P',p,'H',h,Ref)
TEOS = CP.PropsSI('T','P',p,'H',h,Ref)
if out =='C': cpEOS = CP.PropsSI('C','P',p,'H',h,Ref)
# Using the TTSE method
CP.enable_TTSE_LUT(Ref)
CP.set_TTSE_mode(Ref,"TTSE")
rhoTTSE = CP.PropsSI('D','P',p,'H',h,Ref)
TTTSE = CP.PropsSI('T','P',p,'H',h,Ref)
if out =='C': cpTTSE = CP.PropsSI('C','P',p,'H',h,Ref)
# Using the Bicubic method
CP.enable_TTSE_LUT(Ref)
CP.set_TTSE_mode(Ref,"BICUBIC")
rhoBICUBIC = CP.PropsSI('D','P',p,'H',h,Ref)
TBICUBIC = CP.PropsSI('T','P',p,'H',h,Ref)
if out =='C': cpBICUBIC = CP.PropsSI('C','P',p,'H',h,Ref)
if out == 'D':
errorTTSE = abs(rhoTTSE/rhoEOS-1)*100
errorBICUBIC = abs(rhoBICUBIC/rhoEOS-1)*100
elif out == 'T':
errorTTSE = abs(TTTSE/TEOS-1)*100
errorBICUBIC = abs(TBICUBIC/TEOS-1)*100
elif out == 'C':
errorTTSE = abs(cpTTSE/cpEOS-1)*100
errorBICUBIC = abs(cpBICUBIC/cpEOS-1)*100
HHH1.append(h)
PPP1.append(p)
EEE1.append(errorTTSE)
HHH2.append(h)
PPP2.append(p)
EEE2.append(errorBICUBIC)
except ValueError as VE:
#print VE
pass
HHH1 = np.array(HHH1)
PPP1 = np.array(PPP1)
SC1 = ax1.scatter(HHH1/1e3, PPP1/1e5, s=8, c=EEE1, edgecolors = 'none', cmap = plt.get_cmap(colourmap), norm = cNorm, rasterized=True)
#X, Y, Z = makeGrid(HHH1, np.log10(PPP1), EEE1)
#SC1 = matplotlib.pyplot.contourf(X, Y, Z,
# alpha=0.75,
# norm=cNorm,
# cmap=matplotlib.pyplot.get_cmap(colourmap))#,
# #rasterized=True)
HHH2 = np.array(HHH2)
PPP2 = np.array(PPP2)
SC2 = ax2.scatter(HHH2/1e3, PPP2/1e5, s=8, c=EEE2, edgecolors = 'none', cmap = plt.get_cmap(colourmap), norm = cNorm, rasterized=True)
if out == 'D':
ax1.set_title('rel. density error, TTSE')
ax2.set_title('rel. density error, bicubic')
elif out == 'T':
ax1.set_title('rel. temperature error, TTSE')
ax2.set_title('rel. temperature error, bicubic')
elif out == 'C':
ax1.set_title('rel. heat capacity error, TTSE')
ax2.set_title('rel. heat capacity error, bicubic')
for ax in [ax1, ax2]:
#h_min = np.ceil(h_min)
delta = 0.1
delta_min = 1.0+delta
delta_max = 1.0-delta
#ax.set_xlim(delta_min*h_min/1e3, delta_max*h_max/1e3)
#ax.set_ylim(delta_min*p_min/1e5, delta_max*p_max/1e5)
ax.set_xlim(-155, 800)
ax.set_ylim(0.025, 58)
ax.set_yscale('log')
#ticks = np.array([0.02,0.05,0.1,0.2,0.5,1,2,5,10,20,50])
ticks = np.array([0.05,0.1,0.2,0.5,1,2,5,10,20,50])
labels = [str(tick) for tick in ticks]
ax.set_yticks(ticks)
ax.set_yticklabels(labels)
ax.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
#ticks = [150,250,350,450,550]
#labels = [str(tick) for tick in ticks]
#ax.set_xticks(ticks)
#ax.set_xticklabels(labels)
#ax.tick_params(axis='y',which='minor',left='off')
#ax.set_xlabel('Enthalpy [kJ \cdot kg^{-1}]')
ax.set_xlabel('Specific Enthalpy [kJ$\cdot$kg$\mathdefault{^{-1}\!}$]')
ax.set_ylabel('Pressure [bar]')
#ax.plot(hL/1e3,pV/1e5,'k',lw = 4)
#ax.plot(hV/1e3,pV/1e5,'k',lw = 4)
ax.plot(hTP/1e3,pTP/1e5,'k',lw = 3)
for i,T in enumerate(isoT):
ax.plot(isoH[i]/1e3,isoP/1e5,'k',lw = 1)
#CB = fig.colorbar(SC1)
#cbar_ax = fig.add_axes([0.80, 0.075, 0.05, 0.875])
#CB = fig.colorbar(SC1, cax=cbar_ax)
#CB = matplotlib.pyplot.colorbar(SC2)
#CB.solids.set_rasterized(True)
#ax2.yaxis.set_visible(False)
#[x0,y0,width,height]
#cbar_ax = fig.add_axes([0.95, 0.00, 0.05, 1.00])
#CB = fig.colorbar(SC2, ax=[ax1,ax2], cax=cbar_ax)
#CB.solids.set_rasterized(True)
#from mpl_toolkits.axes_grid1 import make_axes_locatable
#divider = make_axes_locatable(ax2)
#cbar_ax = divider.append_axes("right", "5%", pad="0%")
#CB = plt.colorbar(SC2, cax=cbar_ax)
#CB.solids.set_rasterized(True)
#CB = fig.colorbar(SC2)
#CB.solids.set_rasterized(True)
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(ax2)
ax_cb = divider.new_horizontal(size="5%", pad=0.05)
#fig1 = ax.get_figure()
fig.add_axes(ax_cb)
CB = fig.colorbar(SC2, cax=ax_cb)
#aspect = 5./2.
#ax1.set_aspect(aspect)
#ax2.set_aspect(aspect)
CB.solids.set_rasterized(True)
if out == 'D':
CB.set_label(r'$\|\rho/\rho\mathdefault{_{EOS}-1\|\times 100}$ [%]')
elif out == 'T':
CB.set_label(r'$\|T/T\mathdefault{_{EOS}-1\|\times 100}$ [%]')
elif out == 'C':
CB.set_label(r'$\|c\mathdefault{_p}/c\mathdefault{_{p,EOS}-1\|\times 100}$ [%]')
# The plot is finished, now we add an ellipse
#circle=plt.Circle((5,5),.5,color='b',fill=False)
#A scale-free ellipse.
#xy - center of ellipse
#width - total length (diameter) of horizontal axis
#height - total length (diameter) of vertical axis
#angle - rotation in degrees (anti-clockwise)
p_op_min = 1e5
p_op_max = 3e5
h_op_min = CP.PropsSI('H','T',400+273.15,'P',p_op_max,Ref)
h_op_max = CP.PropsSI('H','T', 25+273.15,'P',p_op_max,Ref)
p_op_cen = (p_op_min + p_op_max) / 2.0
h_op_cen = (h_op_min + h_op_max) / 2.0
p_op_hei = p_op_max - p_op_min
h_op_wid = h_op_max - h_op_min
#for ax in [ax1, ax2]:
##x,y = 10,0
### use the axis scale tform to figure out how far to translate
##circ_offset = ScaledTranslation(x,y,ax.transScale)
### construct the composite tform
##circ_tform = circ_offset + ax.transLimits + ax.transAxes
#ellipse = Ellipse(xy=(h_op_cen,p_op_cen), width=h_op_wid, height=p_op_hei, angle=15, color='black')#, transform=circ_tform)
#ax.add_artist(ellipse)
# font_def = font_manager.FontProperties(family='Helvetica', style='normal',
# size=sizeOfFont, weight='normal', stretch='normal')
#
# for a in fig.axes:
# for label in [a.get_xticklabels(), a.get_yticklabels()]:
# label.set_fontproperties(ticks_font
#plt.savefig(out+'_'+colourmap+'_TTSE_BICUBIC.png', dpi = 300, transparent = True)
#plt.savefig(out+'_'+colourmap+'_TTSE_BICUBIC.eps')
# plt.savefig(out+'_'+colourmap+'_TTSE_BICUBIC.pdf')
plt.tight_layout()
plt.savefig('check_TTSE_'+colourmap+'.pdf' )
#tikz_save( 'check_TTSE.tikz')
#plt.savefig(out+'_'+colourmap+'_TTSE_BICUBIC.jpg', dpi = 1200)
plt.close()
```
#### File: Web/scripts/__init__.py
```python
import os.path, glob, subprocess, sys, time, datetime, pytz
#
if len(sys.argv) < 2:
full_rebuild = False
if len(sys.argv)== 2:
if sys.argv[1]=="True": full_rebuild = True
elif sys.argv[1]=="1" : full_rebuild = True
else: full_rebuild = False
if len(sys.argv) > 2:
full_rebuild = False
print "Cannot process more than one parameter: {0}".format(str(sys.argv))
#
def touch(fname):
if os.path.exists(fname): os.utime(fname, None)
else: open(fname, 'a').close()
#
def get_ftime(fname):
if os.path.isfile(fname): return os.path.getctime(fname)
else: return 0
#
web_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),'..'))
script_dir = os.path.abspath(os.path.join(web_dir,'scripts'))
touch_file = os.path.abspath(os.path.join(script_dir,'last_run'))
root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..'))
#
cur_time = time.time()
fil_time = get_ftime(touch_file)
#
# Static execution time
#reg_hour = time.strftime("%H")
#reg_minute = time.strftime("%M")
#sch_hour = 12 #scheduled hour = 3am Boulder = 12pm CPH
#sch_minute = 7 #scheduled minute = 7 past
#
# Dynamically calculated execution (includes daylight saving time etc
masterTime = pytz.timezone('US/Pacific')
#slaveTime = pytz.timezone('Europe/Copenhagen')
now_master = datetime.datetime.now(masterTime)
run_master = datetime.datetime.strptime("03:00:00", '%H:%M:%S')
#
now_master = datetime.time(now_master.hour, now_master.minute , now_master.second)
run_master = datetime.time(run_master.hour, run_master.minute , run_master.second)
run_master_end = datetime.time(run_master.hour, run_master.minute+5, run_master.second)
#
lim_days = 0.90
lim_time = cur_time - 60*60*24*lim_days # seconds
#
if now_master >= run_master and \
now_master <= run_master_end and \
not full_rebuild:
print "This is a scheduled rebuild at {0}.".format(run_master)
if fil_time < lim_time: full_rebuild = True
else: print "It looks like the files have been rebuilt during the last day."
#
lim_days = 3
lim_time = cur_time - 60*60*24*lim_days # seconds
if fil_time < lim_time and not full_rebuild:
print "The static files have not been updated in {0} days, forcing an update now.".format(lim_days)
full_rebuild = True
#req_dir = [os.path.abspath(os.path.join(web_dir,'_static','fluid_properties','Incompressibles_reports'))]
#req_fil = [os.path.abspath(os.path.join(web_dir,'fluid_properties','Mixtures.csv')),
# os.path.abspath(os.path.join(web_dir,'fluid_properties','PurePseudoPure.csv')),
# os.path.abspath(os.path.join(web_dir,'fluid_properties','Incompressibles_pure-fluids.csv'))]
#
#for d in req_dir:
# if not os.path.exists(d) and not full_rebuild:
# print "The required directory {0} is missing, trying to rebuild it.".format(d)
# full_rebuild = True
#for f in req_fil:
# if not os.path.exists(f):
# print "The required file {0} is missing, trying to rebuild it.".format(f)
# full_rebuild = True
# print "Executing the normal scripts for generating the static files."
# script_files = glob.glob(os.path.join(script_dir,'*.py')) # Avoid recursion
# script_files = [os.path.abspath(f) for f in script_files if not os.path.abspath(f)==os.path.abspath(__file__)]
# for script in script_files:
# print "Executing {0}".format(script)
# subprocess.call('python {0}'.format(os.path.basename(script)), cwd=script_dir, shell=True)
#
def run_script(path):
if os.path.exists(path):
file_path = os.path.dirname(path)
file_name = os.path.basename(path)
file_extension = path.split(".")[-1]
#file_name, file_extension = os.path.splitext(path)
if file_extension.lower()=="py":
subprocess.check_call('python {0}'.format(file_name), cwd=file_path, shell=True)
elif file_extension.lower()=="sh" or file_extension.lower()=="bsh":
subprocess.check_call('chmod +x {0}'.format(file_name), cwd=file_path, shell=True)
subprocess.check_call('./{0}'.format(file_name), cwd=file_path, shell=True)
else:
print "Unknown file extension in {0}".format(path)
else:
print "Could not find the file {0}".format(path)
# Inject the version of CoolProp into the doxygen configuration files
# Put it at the end, overwrites prior value
import CoolProp
with open(os.path.join(root_dir,'Doxyfile'),'a+') as fp:
fp.write('\n\n PROJECT_NUMBER = ' + CoolProp.__version__ + '\n')
# The normal tasks that are carried out each time the script runs
normal_tasks = ["../../dev/scripts/examples/OSXRun.py","coolprop.tabular.speed.py", "fluid_properties.phase_envelope.py", "fluid_properties.PurePseudoPure.py", "fluid_properties.Mixtures.py","coolprop.parametric_table.py","coolprop.configuration.py"]
# The expensive tasks that are fired when full_rebuild is True
expensive_tasks = ["fluid_properties.Consistency.py", "fluid_properties.Incompressibles.sh", "logo_2014.py", "fluid_properties.REFPROPcomparison.py"]
print "Executing the normal scripts for generating static files."
for script in normal_tasks:
print "Executing {0}".format(script)
run_script(os.path.normpath(os.path.join(script_dir,script)))
#
if full_rebuild:
print "Executing the computationally expensive scripts for generating the static files."
for script in expensive_tasks:
print "Executing {0}".format(script)
run_script(os.path.join(script_dir,script))
touch(touch_file)
else:
print "Skipping the computationally expensive scripts for generating the static files."
```
#### File: CoolProp/Plots/Tests.py
```python
from __future__ import print_function, division, absolute_import
from CoolProp.Plots import PropertyPlot #TODO: Change to absolute import
def main():
fluid_ref = 'n-Pentane'
for plot_type in ['Ts']: #['pt', 'ph', 'ps', 'ts', 'pt', 'prho', 'trho']:
plt = PropertyPlot(fluid_ref, plot_type)
plt.set_axis_limits([-0.5*1e3, 1.5*1e3, 300, 530])
plt.draw_isolines('Q', [0.1, 0.9])
plt.draw_isolines('P', [100*1e3, 2000*1e3])
plt.draw_isolines('D', [2, 600])
plt.show()
if __name__ == "__main__":
main()
```
#### File: CoolProp/tests/test_CoolPropState.py
```python
from __future__ import division, print_function
import CoolProp.CoolProp as CP
from CoolProp.State import State
def first_derivative(S, func, iVal, Val, iConstant, Constant, epsilon = 1e-3):
S.update({iVal:Val,iConstant:Constant})
val1 = func()
S.update({iVal:Val+epsilon,iConstant:Constant})
val2 = func()
S.update({iVal:Val,iConstant:Constant})
return (val2-val1)/epsilon
def second_derivative(S, func, iVal, Val, iConstant, Constant, epsilon = 2):
S.update({iVal:Val-epsilon,iConstant:Constant})
val1 = func()
S.update({iVal:Val,iConstant:Constant})
val2 = func()
S.update({iVal:Val+epsilon,iConstant:Constant})
val3 = func()
S.update({iVal:Val,iConstant:Constant})
print(val1, val2, val3, S.T, S.p, S.rho, (val1-2*val2+val3))
return (val1-2*val2+val3)/(epsilon*epsilon)
def teest_1phase_first_derivatives():
for US in [CoolProp.UNIT_SYSTEM_SI, CoolProp.UNIT_SYSTEM_KSI]:
CP.set_standard_unit_system(US)
S = State('R134a',dict(T=300,D=1))
l = [(S.get_rho,'T',S.T,'P',S.p,S.PFC.drhodT_constp),
(S.get_rho,'P',S.p,'T',S.T,S.PFC.drhodp_constT),
(S.get_p,'D',S.rho,'T',S.T,S.PFC.dpdrho_constT),
#(S.get_p,'D',S.rho,'H',S.h,S.PFC.dpdrho_consth), #(these inputs not supported)
(S.get_p,'T',S.T,'D',S.rho,S.PFC.dpdT_constrho),
#(S.get_p,'T',S.T,'H',S.h,S.PFC.dpdT_consth), #(these inputs not supported)
(S.get_h,'D',S.rho,'T',S.T,S.PFC.dhdrho_constT),
(S.get_h,'D',S.rho,'P',S.p,S.PFC.dhdrho_constp),
(S.get_h,'T',S.T,'D',S.rho,S.PFC.dhdT_constrho),
(S.get_h,'T',S.T,'P',S.p,S.PFC.dhdT_constp),
(S.get_h,'P',S.p,'T',S.T,S.PFC.dhdp_constT),
(S.get_s,'D',S.rho,'T',S.T,S.PFC.dsdrho_constT),
(S.get_s,'T',S.T,'D',S.rho,S.PFC.dsdT_constrho),
(S.get_s,'D',S.rho,'P',S.p,S.PFC.dsdrho_constp),
(S.get_s,'T',S.T,'P',S.p,S.PFC.dsdT_constp),
(S.get_s,'P',S.p,'T',S.T,S.PFC.dsdp_constT),
]
for args in l:
yield (check_1phase_first_derivatives,)+(S,)+args
def check_1phase_first_derivatives(S, func, iVal, Val, iConstant, Constant, deriv_func):
Deriv_val = first_derivative(S, func, iVal, Val, iConstant, Constant)
EOS_val = deriv_func()
if abs(EOS_val/Deriv_val-1) > 1e-2:
raise ValueError('Finite Diff: ' + str(Deriv_val) + ' EOS: ' +str(EOS_val))
def teest_sat_first_derivatives():
for US in [CoolProp.UNIT_SYSTEM_SI, CoolProp.UNIT_SYSTEM_KSI]:
CP.set_standard_unit_system(US)
S = State('R134a',dict(T=300,Q=1))
l = [(S.get_T,'P',S.p,'Q',0,S.PFC.dTdp_along_sat),
(S.get_rho,'P',S.p,'Q',0,S.PFC.drhodp_along_sat_liquid),
(S.get_rho,'P',S.p,'Q',1,S.PFC.drhodp_along_sat_vapor),
(S.get_rho,'T',S.T,'Q',0,S.PFC.drhodT_along_sat_liquid),
(S.get_rho,'T',S.T,'Q',1,S.PFC.drhodT_along_sat_vapor),
(S.get_h,'P',S.p,'Q',0,S.PFC.dhdp_along_sat_liquid),
(S.get_h,'P',S.p,'Q',1,S.PFC.dhdp_along_sat_vapor),
(S.get_s,'P',S.p,'Q',0,S.PFC.dsdp_along_sat_liquid),
(S.get_s,'P',S.p,'Q',1,S.PFC.dsdp_along_sat_vapor),
]
for args in l:
yield (check_sat_first_derivatives,)+(S,)+args
def check_sat_first_derivatives(S, func, iVal, Val, iConstant, Constant, deriv_func):
Deriv_val = first_derivative(S, func, iVal, Val, iConstant, Constant)
EOS_val = deriv_func()
if abs(EOS_val/Deriv_val-1) > 1e-2:
raise ValueError('Finite Diff: ' + str(Deriv_val) + ' EOS: ' +str(EOS_val))
def teest_sat_second_derivatives():
for US in [CoolProp.UNIT_SYSTEM_SI, CoolProp.UNIT_SYSTEM_KSI]:
CP.set_standard_unit_system(US)
S = State('R134a',dict(T=290,Q=1))
l = [(S.get_T,'P',S.p,'Q',0,S.PFC.d2Tdp2_along_sat),
(S.get_rho,'P',S.p,'Q',0,S.PFC.d2rhodp2_along_sat_liquid),
(S.get_rho,'P',S.p,'Q',1,S.PFC.d2rhodp2_along_sat_vapor),
(S.get_h,'P',S.p,'Q',0,S.PFC.d2hdp2_along_sat_liquid),
(S.get_h,'P',S.p,'Q',1,S.PFC.d2hdp2_along_sat_vapor),
(S.get_s,'P',S.p,'Q',0,S.PFC.d2sdp2_along_sat_liquid),
(S.get_s,'P',S.p,'Q',1,S.PFC.d2sdp2_along_sat_vapor),
]
for args in l:
yield (check_sat_second_derivatives,)+(S,)+args
def check_sat_second_derivatives(S, func, iVal, Val, iConstant, Constant, deriv_func):
Deriv_val = second_derivative(S, func, iVal, Val, iConstant, Constant)
EOS_val = deriv_func()
if abs(EOS_val/Deriv_val-1) > 1e-2:
raise ValueError('Finite Diff: ' + str(Deriv_val) + ' EOS: ' +str(EOS_val))
if __name__=='__main__':
import nose
nose.runmodule()
```
#### File: wrappers/Python/generate_meta_info.py
```python
from jinja2 import Environment
import os,sys
import requests
import json
from distutils.version import LooseVersion #, StrictVersion
import codecs
""" A simple script to create a conda recipe and the infrastructure files for PyPI"""
first_line = "# CAUTION: This file is generated automatically, any customisation will be lost.\n"
python_dir = os.path.abspath(os.path.dirname(__file__))
target_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..'))
pypi = False
local = not pypi
run_pkgs = ["numpy", "scipy", "matplotlib", "pandas"]
dev_pkgs = run_pkgs + ["cython"]
tst_pkgs = dev_pkgs + ["nose"]
if pypi:
# Get the additional information from PyPI
r = requests.get('https://pypi.python.org/pypi/CoolProp/json')
if(r.ok):
item = json.loads(r.text or r.content)
version = item['info']['version']
#version = sorted(item['releases'].keys())[-1]
home = item['info']['home_page']
license = 'MIT'
summary = item['info']['summary']
for u in item['urls']:
if u['python_version'] != 'source': continue
fil = u['filename']
url = u['url']
md5 = u['md5_digest']
continue
if local:
coolprop_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..'))
sys.path.append(os.path.join(coolprop_dir, 'dev'))
import generate_headers
# Generate the headers - does nothing if up to date - but only if not pypi
generate_headers.generate()
del generate_headers
version = open(os.path.join(coolprop_dir,'.version'),'r').read().strip()
home = "http://www.coolprop.org"
license = "MIT"
summary = "Open-source thermodynamic and transport properties database"
fil = None
url = None
md5 = None
local_info = dict(
pypi=pypi,
local=local,
version=version,
fil=fil,
url=url,
md5=md5,
run_pkgs=run_pkgs,
dev_pkgs=dev_pkgs,
tst_pkgs=tst_pkgs,
home = home,
license = license,
summary = summary
)
#######################
template = """{% for pkg in run_pkgs %}{{ pkg }}
{% endfor %}
"""
target = "requirements.txt"
template =Environment().from_string(template)
f = codecs.open(os.path.join(python_dir,target),mode='wb',encoding='utf-8')
f.write(first_line)
f.write(template.render(**local_info))
f.close()
template = """
package:
name: coolprop
version: {{ version }}
{% if pypi %}source:
fn: {{ fil }}
url: {{ url }}
md5: {{ md5 }}
{% endif %}
{% if local %}source:
path: .
{% endif %}
#build:
# script: python setup.py install [not win]
# script: "%PYTHON%" setup.py install & if errorlevel 1 exit 1 [win]
# If this is a new build for the same version, increment the build
# number. If you do not include this key, it defaults to 0.
# number: 1
requirements:
build:
- python
- setuptools{% for pkg in dev_pkgs %}
- {{ pkg -}}
{% endfor %}
run:
- python{% for pkg in run_pkgs %}
- {{ pkg -}}
{% endfor %}
test:
# Python imports
imports:
- CoolProp
# #- CoolProp.GUI
# #- CoolProp.Plots
# - CoolProp.tests
# commands:
# You can put test commands to be run here. Use this to test that the
# entry points work.
# You can also put a file called run_test.py in the recipe that will be run
# at test time.
requires:
# Put any additional test requirements here. For example
# - nose{% for pkg in tst_pkgs %}
- {{ pkg -}}
{% endfor %}
about:
home: {{ home }}
license: {{ license }}
summary: {{ summary }}
"""
target = 'meta.yaml'
template =Environment().from_string(template)
f = codecs.open(os.path.join(target_dir,target),mode='wb',encoding='utf-8')
f.write(first_line)
f.write(template.render(**local_info))
f.close()
template = """
pushd wrappers\Python
"%PYTHON%" setup.py install
if errorlevel 1 exit 1
popd
:: Add more build steps here, if they are necessary.
:: See
:: http://docs.continuum.io/conda/build.html
:: for a list of environment variables that are set during the build process.
"""
target = "bld.bat"
f = codecs.open(os.path.join(target_dir,target),mode='wb',encoding='utf-8')
f.write(":: "+first_line)
f.write(template)
f.close()
template = """
pushd wrappers/Python
$PYTHON setup.py install
popd
# Add more build steps here, if they are necessary.
# See
# http://docs.continuum.io/conda/build.html
# for a list of environment variables that are set during the build process.
"""
target = "build.sh"
f = codecs.open(os.path.join(target_dir,target),mode='wb',encoding='utf-8')
f.write("#!/bin/bash\n"+first_line)
f.write(template)
f.close()
template = """
from __future__ import print_function
import sys, shutil, subprocess, os, stat
#
def run_command(cmd):
'''given shell command, returns communication tuple of stdout and stderr'''
print(str(__file__)+": "+' '.join(cmd))
return subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE).communicate()
#
tar = os.path.abspath(os.path.join(os.path.dirname(__file__),'install_root')).strip()
ver = sys.version_info
cmd = ['conda','build','--python',str(ver[0])+'.'+str(ver[1])]
print(run_command(['conda', 'clean', '-y', '-lts'])[0].decode("utf-8").strip())
filename = os.path.abspath(run_command(cmd+['--output','.'])[0].decode("utf-8").strip())
tar = os.path.join(tar,'Python_conda',os.path.basename(os.path.dirname(filename))).strip()
try:
subprocess.check_call(cmd+['.'], stdout=sys.stdout, stderr=sys.stderr)
except Exception as e:
print("conda build failed: "+str(e))
pass
try:
os.makedirs(tar)
except Exception as e:
if os.path.isdir(tar): pass
else: raise
try:
print("Copying: "+str(filename)+" to "+str(tar))
shutil.copy(filename,tar)
except Exception as e:
print("Copy operation failed: "+str(e))
pass
sys.exit(0)
"""
target = "runner.py"
f = codecs.open(os.path.join(target_dir,target),mode='wb',encoding='utf-8')
f.write(template)
f.close()
sys.exit(0)
```
|
{
"source": "jfengan/barrier_option",
"score": 2
}
|
#### File: jfengan/barrier_option/simulator.py
```python
import numpy as np
import time
import torch
from scipy.stats import norm
class Simulator:
@staticmethod
def simulate_pseudo(spot, r, q, sigma, dt, num_paths, time_steps):
np.random.seed(1234)
half_path = int(num_paths / 2) + 1
sqrt_var = sigma * np.sqrt(dt)
st = spot * np.ones((num_paths, time_steps+1))
# start = timeit.default_timer()
simu = np.random.normal(0, 1, (half_path, time_steps))
anti_simu = -simu
simulation = np.concatenate((simu, anti_simu))[:num_paths,:]
growth = (r - q - 0.5*sigma*sigma) * dt + sqrt_var * simulation
factor = np.exp(growth)
for i in range(1, time_steps+1):
st[:, i] = st[:, i-1] * factor[:, i-1]
return st
@staticmethod
def simulate_sobol(spot, r, q, sigma, dt, num_paths, time_steps):
sqrt_var = sigma * np.sqrt(dt)
st = spot * np.ones((num_paths, time_steps+1))
sobol_engine = torch.quasirandom.SobolEngine(dimension=time_steps, scramble=True, seed=int(time.time()))
sobol_rng = np.array(sobol_engine.draw(num_paths, dtype=torch.float64))
simulation = norm.ppf(sobol_rng)
growth = (r - q - 0.5*sigma*sigma) * dt + sqrt_var * simulation
factor = np.exp(growth)
for i in range(1, time_steps+1):
st[:, i] = st[:, i-1] * factor[:, i-1]
return st
```
|
{
"source": "jfengan/quanto_project",
"score": 2
}
|
#### File: jfengan/quanto_project/BlackScholes.py
```python
import bisect
import numpy as np
import math
from scipy.optimize import brentq, newton
from scipy.special import ndtr
class BlackScholes:
TAG_CALL = 'call'
TAG_PUT = 'put'
TAG_MID_VOL = 'mid_vol'
TAG_ASK_VOL = 'ask_vol'
TAG_BID_VOL = 'bid_vol'
TAG_BS_DELTA = 'bs_delta'
TAG_EXP_IN_DAYS = 'exp_in_days'
TAG_STRIKE = 'strike'
TAG_FORWARD = 'forward'
CONST_NORM = math.sqrt(2.0 * np.pi)
def __init__(self, s, k, r, q, vol_pnt, t, payoff):
self.s = s
self.k = k
self.r = r
self.q = q
self.vol = vol_pnt
self.t = t
self.payoff = payoff.lower()
self.is_call = BlackScholes.is_call(self.payoff)
self.sqrt_t = math.sqrt(self.t) if self.t >= 0 else np.nan
self.exp_neg_q_t = np.e ** (-self.q * self.t)
self.exp_neg_r_t = np.e ** (-self.r * self.t)
self.d1 = self.__d1() if self.sqrt_t > 0 else np.nan
self.d2 = self.d1 - self.sqrt_t * self.vol if self.sqrt_t > 0 else np.nan
self.n_d1 = ndtr(self.d1) if self.sqrt_t > 0 else np.nan
self.n_d2 = ndtr(self.d2) if self.sqrt_t > 0 else np.nan
self.raw_price = self.__price()
self.raw_delta = self.__delta()
self.phi_d1 = BlackScholes.phi(self.d1) if self.sqrt_t > 0 else np.nan
self.phi_d2 = BlackScholes.phi(self.d2) if self.sqrt_t > 0 else np.nan
@classmethod
def phi(cls, x):
return np.e ** (-x * x / 2.0) / BlackScholes.CONST_NORM
def __delta(self):
if self.t == 0:
if self.is_call:
return 1.0 if self.s > self.k else 0.0
else:
return -1.0 if self.s < self.k else 0.0
if self.is_call:
return self.exp_neg_q_t * self.n_d1
else:
return -self.exp_neg_q_t * (1. - self.n_d1)
@classmethod
def cnorm(cls, x):
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
@classmethod
def is_call(cls, option_type):
return option_type == cls.TAG_CALL
def __d1(self):
return (math.log(self.s / self.k) +
(self.r - self.q + self.vol * self.vol / 2.0) * self.t) / (self.vol * self.sqrt_t)
def __d2(self):
return (math.log(self.s / self.k) +
(self.r - self.q - self.vol * self.vol / 2.0) * self.t) / (self.vol * self.sqrt_t)
def __price(self):
return self.call_price() if self.is_call else self.put_price()
def price(self):
return self.raw_price
def dollar_delta(self):
return self.raw_delta * self.s
@classmethod
def intrinsic(cls, is_call, s, k, exp_neg_q_t, exp_neg_r_t):
sign = 1.0 if is_call else -1.0
payout = sign * (exp_neg_q_t * s - exp_neg_r_t * k)
return max(0.0, payout)
def call_price(self):
if self.t == 0:
return BlackScholes.intrinsic(True, self.s, self.k, 1.0, 1.0)
return self.s * self.exp_neg_q_t * self.n_d1 - self.exp_neg_r_t * self.k * self.n_d2
def put_price(self):
if self.t == 0:
return BlackScholes.intrinsic(False, self.s, self.k, 1.0, 1.0)
return self.exp_neg_r_t * self.k * (1.0 - self.n_d2) - self.s * self.exp_neg_q_t * (1.0 - self.n_d1)
def delta(self):
return self.raw_delta
def __bump_spot(self, move):
return BlackScholes(self.s * (1 + move), self.k, self.r, self.q, self.vol, self.t, self.payoff)
def __bump_strike(self, move):
return BlackScholes(self.s * (1 + move), self.k, self.r, self.q, self.vol, self.t, self.payoff)
def gamma(self):
if self.t == 0:
return 0
return self.exp_neg_q_t * self.phi_d1 / (self.s * self.vol * self.sqrt_t)
def dollar_gamma(self):
return self.gamma() * self.s ** 2
def get_iv(mkt_price, payoff, s, k, r, q, t, precision=1e-10, max_iter=100) -> float:
lower_bdd = 1e-6
upper_bdd = 1000
def function(vol):
return BlackScholes(s, k, r, q, vol, t, payoff).price() - mkt_price
if function(lower_bdd) * function(upper_bdd) > 0:
return np.nan
else:
solved_vol = brentq(function, lower_bdd, upper_bdd, xtol=precision, maxiter=max_iter)
return solved_vol
def get_iv_newton_r(mkt_price, payoff, s, k, r, q, t, precision=1e-10, max_iter=100) -> float:
def function(vol):
return BlackScholes(s, k, r, q, vol, t, payoff).price() - mkt_price
def bs_vega(vol):
return s * np.sqrt(t) * ndtr((np.log(s/k) + (r - q + 0.5 * vol ** 2))/(vol * np.sqrt(t)))
solved_vol = newton(function, 1, fprime=bs_vega, maxiter=500, rtol=1.e-10)
return solved_vol
def get_forwad_price(call_price, put_price, strike, r, t):
return call_price - put_price + strike * math.exp(-1 * r * t)
def get_iq(call_price, put_price, strike, r, t, spot):
fwd_curr = (call_price - put_price) + np.exp(-r * t) * strike
# print(f"forward price at {t}: {fwd}")
return -1 / t * math.log(fwd_curr / spot)
def get_iv_newton(mkt_price, payoff, s, k, r, q, t, precision=1e-10, max_iter=100):
def function(vol):
return BlackScholes(s, k, r, q, vol, t, payoff).price() - mkt_price
i = 0
dvol = 0.00001
init_vol = 0.9
while(i < 100):
price1 = function(init_vol)
price2 = function(init_vol-dvol)
if(price1 - price2) < precision or i >= max_iter:
break
else:
init_vol = init_vol - dvol * price2 / (price1 - price2)
i += 1
return init_vol
if __name__ == "__main__":
iv = get_iv_newton(1.8, BlackScholes.TAG_PUT, 5128.22, 4600, 0.026463, 0.174961582, 0.021917808)
print(iv)
```
|
{
"source": "jfennell/wordle",
"score": 3
}
|
#### File: jfennell/wordle/solve_analysis.py
```python
import collections
import json
from typing import Optional, List, Dict
def _display_solve_histogram(hist: Dict[int, int]) -> None:
total = sum(hist.values())
print(f"Total words: {sum(hist.values())}")
print("# guesses: # words that took that many guesses | % of all words | cumulative %")
cumulation = 0.0
for solve_length, freq in sorted(hist.items()):
probability_mass = freq / total
cumulation += probability_mass
print(f"{solve_length:02d}: {freq:6d} | {probability_mass:.02f} | {cumulation:.02f}")
def _display_solve(seed: str, guesses: List[str]) -> None:
path = ' -> '.join(guesses)
print(f"{seed} ({len(guesses):02d}): {path}")
def _display_heavy_paths(seed_to_guesses: Dict[str, List[str]]) -> None:
print("The 'hardest to guess' words")
for seed, guesses in seed_to_guesses.items():
if len(guesses) <= 10:
continue
_display_solve(seed, guesses)
def main(args: Optional[List[str]] = None) -> None:
with open('solve_path_out.json', 'r') as f:
X = json.load(f)
C: Dict[int, int] = collections.Counter()
for k, vs in X.items():
C[len(vs)] += 1
_display_solve_histogram(C)
print()
_display_heavy_paths(X)
if __name__ == '__main__':
main()
```
|
{
"source": "jfennick/biobb_adapters",
"score": 2
}
|
#### File: biobb_chemistry/acpype/acpype_params_ac.py
```python
import os
import sys
import traceback
# Pycompss
from pycompss.api.task import task
from pycompss.api.parameter import FILE_IN, FILE_OUT
# Adapters commons pycompss
from biobb_adapters.pycompss.biobb_commons import task_config
# Wrapped Biobb
from biobb_chemistry.acpype.acpype_params_ac import AcpypeParamsAC # Importing class instead of module to avoid name collision
task_time_out = int(os.environ.get('TASK_TIME_OUT', 0))
@task(input_path=FILE_IN, output_path_frcmod=FILE_OUT, output_path_inpcrd=FILE_OUT, output_path_lib=FILE_OUT, output_path_prmtop=FILE_OUT,
on_failure="IGNORE", time_out=task_time_out)
def _acpypeparamsac(input_path, output_path_frcmod, output_path_inpcrd, output_path_lib, output_path_prmtop, properties, **kwargs):
task_config.pop_pmi(os.environ)
try:
AcpypeParamsAC(input_path=input_path, output_path_frcmod=output_path_frcmod, output_path_inpcrd=output_path_inpcrd, output_path_lib=output_path_lib, output_path_prmtop=output_path_prmtop, properties=properties, **kwargs).launch()
except Exception as e:
traceback.print_exc()
raise e
finally:
sys.stdout.flush()
sys.stderr.flush()
def acpype_params_ac(input_path, output_path_frcmod, output_path_inpcrd, output_path_lib, output_path_prmtop, properties=None, **kwargs):
if (output_path_frcmod is None or (os.path.exists(output_path_frcmod) and os.stat(output_path_frcmod).st_size > 0)) and \
(output_path_inpcrd is None or (os.path.exists(output_path_inpcrd) and os.stat(output_path_inpcrd).st_size > 0)) and \
(output_path_lib is None or (os.path.exists(output_path_lib) and os.stat(output_path_lib).st_size > 0)) and \
(output_path_prmtop is None or (os.path.exists(output_path_prmtop) and os.stat(output_path_prmtop).st_size > 0)) and \
True:
print("WARN: Task AcpypeParamsAC already executed.")
else:
_acpypeparamsac( input_path, output_path_frcmod, output_path_inpcrd, output_path_lib, output_path_prmtop, properties, **kwargs)
```
#### File: biobb_md/gromacs_extra/append_ligand_pc.py
```python
import traceback
from pycompss.api.task import task
from pycompss.api.constraint import constraint
from pycompss.api.parameter import FILE_IN, FILE_OUT
from biobb_common.tools import file_utils as fu
from biobb_md.gromacs_extra import append_ligand
import os
import sys
@constraint(computingUnits="1")
@task(input_top_zip_path=FILE_IN, input_itp_path=FILE_IN,
output_top_zip_path=FILE_OUT, input_posres_itp_path=FILE_IN,
on_failure="IGNORE")
def append_ligand_pc(input_top_zip_path, input_itp_path,
output_top_zip_path, input_posres_itp_path,
properties, **kwargs):
try:
os.environ.pop('PMI_FD', None)
os.environ.pop('PMI_JOBID', None)
os.environ.pop('PMI_RANK', None)
os.environ.pop('PMI_SIZE', None)
append_ligand.AppendLigand(input_top_zip_path=input_top_zip_path, input_itp_path=input_itp_path,
output_top_zip_path=output_top_zip_path, input_posres_itp_path=input_posres_itp_path,
properties=properties, **kwargs).launch()
except Exception:
traceback.print_exc()
fu.write_failed_output(output_top_zip_path)
finally:
sys.stdout.flush()
sys.stderr.flush()
```
#### File: biobb_md/gromacs/mdrun.py
```python
import os
import sys
import traceback
# Pycompss
from pycompss.api.task import task
from pycompss.api.parameter import FILE_IN, FILE_OUT
from pycompss.api.multinode import multinode
from pycompss.api.constraint import constraint
# Adapters commons pycompss
from biobb_adapters.pycompss.biobb_commons import task_config
# Wrapped Biobb
from biobb_md.gromacs.mdrun import Mdrun # Importing class instead of module to avoid name collision
task_time_out = int(os.environ.get('TASK_TIME_OUT', 0))
computing_nodes = str(os.environ.get('TASK_COMPUTING_NODES', "1"))
computing_units = str(os.environ.get('TASK_COMPUTING_UNITS', "1"))
gpu_units = str(os.environ.get('TASK_GPU_UNITS', "0"))
@constraint(processors=[{'processorType':'CPU', 'computingUnits':computing_units}, {'processorType':'GPU', 'computingUnits':gpu_units}])
@multinode(computing_nodes=computing_nodes)
@task(input_tpr_path=FILE_IN, output_trr_path=FILE_OUT, output_gro_path=FILE_OUT, output_edr_path=FILE_OUT, output_log_path=FILE_OUT, input_cpt_path=FILE_IN, output_xtc_path=FILE_OUT, output_cpt_path=FILE_OUT, output_dhdl_path=FILE_OUT,
on_failure="IGNORE", time_out=task_time_out)
def _mdrun(input_tpr_path, output_trr_path, output_gro_path, output_edr_path, output_log_path, input_cpt_path, output_xtc_path, output_cpt_path, output_dhdl_path, properties, **kwargs):
task_config.config_multinode(properties)
try:
Mdrun(input_tpr_path=input_tpr_path, output_trr_path=output_trr_path, output_gro_path=output_gro_path, output_edr_path=output_edr_path, output_log_path=output_log_path, input_cpt_path=input_cpt_path, output_xtc_path=output_xtc_path, output_cpt_path=output_cpt_path, output_dhdl_path=output_dhdl_path, properties=properties, **kwargs).launch()
except Exception as e:
traceback.print_exc()
raise e
finally:
sys.stdout.flush()
sys.stderr.flush()
def mdrun(input_tpr_path, output_trr_path, output_gro_path, output_edr_path, output_log_path, input_cpt_path=None, output_xtc_path=None, output_cpt_path=None, output_dhdl_path=None, properties=None, **kwargs):
if (output_trr_path is None or (os.path.exists(output_trr_path) and os.stat(output_trr_path).st_size > 0)) and \
(output_gro_path is None or (os.path.exists(output_gro_path) and os.stat(output_gro_path).st_size > 0)) and \
(output_edr_path is None or (os.path.exists(output_edr_path) and os.stat(output_edr_path).st_size > 0)) and \
(output_log_path is None or (os.path.exists(output_log_path) and os.stat(output_log_path).st_size > 0)) and \
(output_xtc_path is None or (os.path.exists(output_xtc_path) and os.stat(output_xtc_path).st_size > 0)) and \
(output_cpt_path is None or (os.path.exists(output_cpt_path) and os.stat(output_cpt_path).st_size > 0)) and \
(output_dhdl_path is None or (os.path.exists(output_dhdl_path) and os.stat(output_dhdl_path).st_size > 0)) and \
True:
print("WARN: Task Mdrun already executed.")
else:
_mdrun( input_tpr_path, output_trr_path, output_gro_path, output_edr_path, output_log_path, input_cpt_path, output_xtc_path, output_cpt_path, output_dhdl_path, properties, **kwargs)
```
#### File: biobb_pmx/pmx/mutate_pc.py
```python
import traceback
from pycompss.api.task import task
from pycompss.api.constraint import constraint
from pycompss.api.multinode import multinode
from pycompss.api.parameter import FILE_IN, FILE_OUT
from biobb_common.tools import file_utils as fu
from biobb_pmx.pmx import mutate
import os
import sys
@constraint(computingUnits="1")
@task(input_structure_path=FILE_IN, output_structure_path=FILE_OUT,
on_failure="IGNORE")
def mutate_pc(input_structure_path, output_structure_path,
properties, **kwargs):
try:
os.environ.pop('PMI_FD', None)
os.environ.pop('PMI_JOBID', None)
os.environ.pop('PMI_RANK', None)
os.environ.pop('PMI_SIZE', None)
mutate.Mutate(input_structure_path=input_structure_path, output_structure_path=output_structure_path,
properties=properties, **kwargs).launch()
if not os.path.exists(output_structure_path):
fu.write_failed_output(output_structure_path)
except Exception:
traceback.print_exc()
fu.write_failed_output(output_structure_path)
finally:
sys.stdout.flush()
sys.stderr.flush()
```
#### File: biobb_md/gromacs/mdrun_rmt.py
```python
import os
import argparse
import json
from biobb_common.configuration import settings
from biobb_common.tools import file_utils as fu
from biobb_common.tools.file_utils import launchlogger
#from biobb_common.command_wrapper import cmd_wrapper
#from biobb_md.gromacs.common import get_gromacs_version
#from biobb_md.gromacs.common import GromacsVersionError
from biobb_remote.slurm import Slurm
from biobb_remote.task import FINISHED
from biobb_remote.ssh_credentials import SSHCredentials
class MdrunRmt:
"""Adapter for remote execution of the biobb_md/gromacs/mdrun module.
Args:
File paths passed to mdrun.
input_tpr_path (str): Path to the portable binary run input file TPR. File type: input. `Sample file <https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/data/gromacs/mdrun.tpr>`_. Accepted formats: tpr. Passed to mdrun.
output_trr_path (str): Path to the GROMACS uncompressed raw trajectory file TRR. File type: output. `Sample file <https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/reference/gromacs/ref_mdrun.trr>`_. Accepted formats: trr. Passed to mdrun
output_gro_path (str): Path to the output GROMACS structure GRO file. File type: output. `Sample file <https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/reference/gromacs/ref_mdrun.gro>`_. Accepted formats: gro. Passed to mdrun.
output_edr_path (str): Path to the output GROMACS portable energy file EDR. File type: output. `Sample file <https://github.com/bioexcel/biobb_md/raw/master/biobb_md/test/reference/gromacs/ref_mdrun.edr>`_. Accepted formats: edr. Passed to mdrun.
output_log_path (str): Path to the output GROMACS trajectory log file LOG. File type: output. Accepted formats: log. Passed to mdrun.
output_xtc_path (str) (Optional): Path to the GROMACS compressed trajectory file XTC. File type: output. Accepted formats: xtc. Passed to mdrun.
output_cpt_path (str) (Optional): Path to the output GROMACS checkpoint file CPT. File type: output. Accepted formats: cpt. Passed to mdrun.
output_dhdl_path (str) (Optional): Path to the output dhdl.xvg file only used when free energy calculation is turned on. File type: output. Accepted formats: xvg. Passed to mdrun.
Adapter files.
keys_file (*str*) - Credentials (biobb_remote.ssh_credentials) file (optional, if missing users' own ssh keys are used.
local_path (*str*) - Path to local files
remote_path (*str*) - Path to remote base folder. Unique working will be created when necessary.
task_data_path (*str*) - Path to task metadata file (json format). Used to keep live information of the remote task.
properties (dic):
* **host** (*str*) - Remote host name (optional if keys_file is provided)
* **userid** (*str*) - Remote user ida (optional if keys_file is provided)
* **queue_settings** (*str*) - One of queue settings predefined sets (default: whole HPC node open_mp)
* **queue_settings_patch** (*json*) - Patch to modified queue settings
* **modules** (*str*) - One of modules predifined hpc module setsi (default: Biobb module)
* **wait** (*bool*) - Wait for job completion
* **poll_time** (*int*) - Time between job status checks (seconds)
* **re_use_task** (*bool*) - Reuse remote working dir if available (requires task_data_path)
* **remove_remote** (*bool*) - Remove remote working dir
properties passed to mdrun
* **num_threads** (*int*) - (0) Let GROMACS guess. The number of threads that are going to be used.
* **gmx_lib** (*str*) - (None) Path set GROMACS GMXLIB environment variable.
* **gmx_path** (*str*) - ("gmx") Path to the GROMACS executable binary.
* **mpi_bin** (*str*) - (None) Path to the MPI runner. Usually "mpirun" or "srun".
* **mpi_np** (*str*) - (None) Number of MPI processes. Usually an integer bigger than 1.
* **mpi_hostlist** (*str*) - (None) Path to the MPI hostlist file.
"""
def __init__(self, input_tpr_path: str, output_trr_path: str, output_gro_path: str,
output_edr_path: str, output_log_path: str, output_xtc_path: str = None,
output_cpt_path: str = None, output_dhdl_path: str = None,
host_config_path: str=None, keys_file: str=None, local_path: str=None,
remote_path: str=None, task_data_path: str= None,
properties: dict =None, **kwargs) -> None:
self.properties = properties or {}
self.host = properties.get('host', '')
self.userid = properties.get('userid', '')
self.queue_settings = properties.get('queue_settings', 'default')
self.queue_settings_patch = json.loads(properties.get('queue_settings', '{}'))
self.modules = properties.get('modules', 'biobb')
self.wait = properties.get('wait', True)
if self.wait:
self.poll_time = int(properties.get('poll_time', '10'))
else:
self.poll_time = 0
self.set_debug = properties.get('set_debug',False)
self.re_use_task = properties.get('re_use_task', True)
self.io_dict = {
"in": {
'keys_file': keys_file,
'local_path': local_path,
'remote_path': remote_path,
'host_config_path': host_config_path,
},
"inout": {
'task_data_path': task_data_path
}
}
# Properties common in all BB
self.can_write_console_log = properties.get('can_write_console_log', True)
self.global_log = properties.get('global_log', None)
self.remove_remote = properties.get('remove_remote', False)
self.restart = properties.get('restart', False)
self.prefix = properties.get('prefix', None)
self.step = properties.get('step', None)
self.path = properties.get('path', '')
self.remove_tmp = properties.get('remove_tmp', False)
self.files = {
'input_tpr_path' : input_tpr_path,
'output_trr_path' : output_trr_path,
'output_gro_path' : output_gro_path,
'output_edr_path' : output_edr_path,
'output_log_path' : output_log_path,
'output_xtc_path' : output_xtc_path,
'output_cpt_path' : output_cpt_path,
'output_dhdl_path' : output_dhdl_path
}
#clean local properties
for p in ('host', 'userid', 'queue_settings', 'modules', 'poll_time', 'wait', 'working_dir_path','path','step','remote_tmp'):
if p in self.properties:
del self.properties[p]
# Check the properties
fu.check_properties(self, properties)
@launchlogger
def launch(self) -> int:
"""Launches the execution of the remote GROMACS mdrun module."""
# Get local loggers from launchlogger decorator
out_log = getattr(self, 'out_log', None)
err_log = getattr(self, 'err_log', None)
if self.io_dict['in']['keys_file']:
self.credentials = SSHCredentials()
self.credentials.load_from_file(self.io_dict['in']['keys_file'])
slurm = Slurm()
slurm.set_credentials(self.credentials)
else:
slurm = Slurm(host=self.host, userid=self.userid, look_for_keys=True)
if self.re_use_task:
try:
slurm.load_data_from_file(self.io_dict['inout']['task_data_path'])
if slurm.task_data['remote_path']:
self.io_dict['in']['remote_path'] = slurm.task_data['remote_path']
except:
print("Warning: Task data not found")
pass
slurm.load_host_config(self.io_dict['in']['host_config_path'])
slurm.save(self.io_dict['inout']['task_data_path'])
slurm.set_local_data_bundle(self.io_dict['in']['local_path'], add_files=False)
slurm.task_data['local_data_bundle'].add_file(
self.io_dict['in']['local_path'] + "/" + self.files['input_tpr_path']
)
if not self.io_dict['in']['remote_path']:
print('Remote path not available')
return 1
slurm.send_input_data(self.io_dict['in']['remote_path'], overwrite=False)
slurm.save(self.io_dict['inout']['task_data_path'])
#queue settings
if self.queue_settings_patch:
slurm.set_custom_settings(ref_setting=self.queue_settings, patch=self.queue_settings_patch)
self.queue_settings = 'custom'
slurm.submit(
queue_settings=self.queue_settings,
modules=self.modules,
poll_time=self.poll_time,
set_debug=self.set_debug,
local_run_script=slurm.get_remote_py_script(
'from biobb_md.gromacs.mdrun import Mdrun',
self.files,
'Mdrun',
properties=self.properties
)
)
slurm.save(self.io_dict['inout']['task_data_path'])
if self.wait:
slurm.check_job(poll_time=int(self.poll_time))
if slurm.task_data['status'] == FINISHED:
slurm.get_output_data(overwrite=False)
out_log, err_log = slurm.get_logs()
slurm.save(self.io_dict['inout']['task_data_path'])
if self.remove_remote:
slurm.clean_remote()
returncode = 0 #TODO
return returncode
def main():
parser = argparse.ArgumentParser(description="Wrapper for the GROMACS mdrun module.",
formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))
parser.add_argument('-c', '--config', required=False, help="This file can be a YAML file, JSON file or JSON string")
# Specific args of each building block
required_args = parser.add_argument_group('required arguments')
required_args.add_argument('--input_tpr_path', required=True)
required_args.add_argument('--output_trr_path', required=True)
required_args.add_argument('--output_gro_path', required=True)
required_args.add_argument('--output_edr_path', required=True)
required_args.add_argument('--output_log_path', required=True)
required_args.add_argument('--local_path', required=True)
required_args.add_argument('--task_data_path', required=True)
required_args.add_argument('--host_config_path', required=True)
required_args.add_argument('--queue_settings', required=True)
parser.add_argument('--output_xtc_path', required=False)
parser.add_argument('--output_cpt_path', required=False)
parser.add_argument('--output_dhdl_path', required=False)
parser.add_argument('--keys_file_path', required=False)
parser.add_argument('--queue_settings_patch', required=False)
parser.add_argument('--remote_path', required=False)
parser.add_argument('--set_debug', required=False)
args = parser.parse_args()
config = args.config if args.config else None
properties = settings.ConfReader(config=config).get_prop_dic()
# Specific call of each building block
MdrunRmt(input_tpr_path=args.input_tpr_path, output_trr_path=args.output_trr_path,
output_gro_path=args.output_gro_path, output_edr_path=args.output_edr_path,
output_log_path=args.output_log_path, output_xtc_path=args.output_xtc_path,
output_cpt_path=args.output_cpt_path, output_dhdl_path=args.output_dhdl_path,
host_config_path=args.host_config_path, keys_file=args.keys_file_path, local_path=args.local_path,
remote_path=args.remote_path, task_data_path=args.task_data_path,queue_settings=args.queue_settings,
queue_settings_patch=args.queue_settings_patch, set_debug=args.set_debug,
properties=properties).launch()
if __name__ == '__main__':
main()
```
|
{
"source": "jfennick/workflow_inference_compiler",
"score": 3
}
|
#### File: src/wic/utils.py
```python
import argparse
import copy
from pathlib import Path
from typing import Any, Dict, List, Tuple
import graphviz
import yaml
from . import auto_gen_header
from .wic_types import (GraphData, GraphReps, Json, Namespaces, NodeData,
RoseTree, Tool, Tools, Yaml, YamlForest, YamlTree)
def read_lines_pairs(filename: Path) -> List[Tuple[str, str]]:
"""Reads a whitespace-delimited file containing two paired entries per line (i.e. a serialized Dict).
Args:
filename (Path): The full path of the file to be read.
Raises:
Exception: If any non-blank, non-comment lines do not contain exactly two entries.
Returns:
List[Tuple[str, str]]: The file contents, with blank lines and comments removed.
"""
with open(filename, mode='r', encoding='utf-8') as f:
lines = []
for line in f.readlines():
if line.strip() == '': # Skip blank lines
continue
if line.startswith('#'): # Skip comment lines
continue
l_s = line.split()
if not len(l_s) == 2:
print(line)
raise Exception("Error! Line must contain exactly two entries!")
lines.append((l_s[0], l_s[1]))
return lines
def step_name_str(yaml_stem: str, i: int, step_key: str) -> str:
"""Returns a string which uniquely and hierarchically identifies a step in a workflow
Args:
yaml_stem (str): The name of the workflow (filepath stem)
i (int): The (zero-based) step number
step_key (str): The name of the step (used as a dict key)
Returns:
str: The parameters (and the word 'step') joined together with double underscores
"""
# Use double underscore so we can '__'.split() below.
# (This should work as long as yaml_stem and step_key do not contain __)
return f'{yaml_stem}__step__{i+1}__{step_key}'
def parse_step_name_str(step_name: str) -> Tuple[str, int, str]:
"""The inverse function to step_name_str()
Args:
step_name (str): A string of the same form as returned by step_name_str()
Raises:
Exception: If the argument is not of the same form as returned by step_name_str()
Returns:
Tuple[str, int, str]: The parameters used to create step_name
"""
vals = step_name.split('__') # double underscore
if not len(vals) == 4:
raise Exception(f"Error! {step_name} is not of the format \n"
+ '{yaml_stem}__step__{i+1}__{step_key}\n'
+ 'yaml_stem and step_key should not contain any double underscores.')
try:
i = int(vals[2])
except Exception as ex:
raise Exception(f"Error! {step_name} is not of the format \n"
+ '{yaml_stem}__step__{i+1}__{step_key}') from ex
return (vals[0], i-1, vals[3])
def add_graph_edge(args: argparse.Namespace, graph: GraphReps,
nss1: Namespaces, nss2: Namespaces,
label: str, color: str = '') -> None:
"""Adds edges to (all of) our graph representations, with the ability to
collapse all nodes below a given depth to a single node.
This function utilizes the fact that nodes have been carefully designed to
have unique, hierarchical names. If we want to hide all of the details
below a given depth, we can simply truncate each of the namespaces!
(and do the same when creating the nodes)
Args:
args (argparse.Namespace): The command line arguments
graph (GraphReps): A tuple of a GraphViz DiGraph and a networkx DiGraph
nss1 (Namespaces): The namespaces associated with the first node
nss2 (Namespaces): The namespaces associated with the second node
label (str): The edge label
color (str, optional): The edge color
"""
if color == '':
color = 'black' if args.graph_dark_theme else 'white'
nss1 = nss1[:(1 + args.graph_inline_depth)]
edge_node1 = '___'.join(nss1)
nss2 = nss2[:(1 + args.graph_inline_depth)]
edge_node2 = '___'.join(nss2)
graph_gv = graph.graphviz
graph_nx = graph.networkx
graphdata = graph.graphdata
# Hide internal self-edges
if edge_node1 != edge_node2:
attrs = {'color': color}
if args.graph_label_edges:
attrs['label'] = label
graph_gv.edge(edge_node1, edge_node2, **attrs)
graph_nx.add_edge(edge_node1, edge_node2)
graphdata.edges.append((edge_node1, edge_node2, attrs))
def flatten_graphdata(graphdata: GraphData, parent: str = '') -> GraphData:
"""Flattens graphdata by recursively inlineing all subgraphs.
Args:
graphdata (GraphData): A data structure which contains recursive subgraphs and other metadata.
parent (str, optional): The name of the parent graph is encoded into the node attributes so that\n
the subgraph information can be preserved after flattening. (Used for cytoscape) Defaults to ''.
Returns:
GraphData: A GraphDath instance with all of the recursive instances inlined
"""
subgraphs = [flatten_graphdata(subgraph, str(graphdata.name)) for subgraph in graphdata.subgraphs]
# NOTE: Even though all of the following default list arguments are [],
# you MUST explicitly supply the empty lists!!! Otherwise, after
# instantiation, the lists will contain values from previous instances!!!
# This shallow copy causes an infinite loop because as we copy nodes,
# they end up getting appended to the original lists!
# This makes absolutely no sense. Since the lists are defined at the
# instance level (NOT the class level), there should be zero sharing!
g_d = GraphData(str(graphdata.name), [], [], [], []) # This is fine
#g_d = GraphData(str(graphdata.name)) # This is NOT fine!
# i.e. The following statement will NOT print zeros (in the second case)!
#print(g_d.name, len(g_d.nodes), len(g_d.edges), len(g_d.subgraphs), len(g_d.ranksame))
for subgraph in subgraphs:
# We need to add a placeholder node for each subgraph first
attrs = {} if parent == '' else {'parent': parent}
# NOTE: This does not yet work with args.graph_inline_depth
g_d.nodes.append((subgraph.name, attrs))
for subgraph in subgraphs:
# Then we can add the nodes and edges from the subgraphs.
# (Otherwise, cytoscape won't render the subgraphs correctly.)
for (subnode, subattrs) in subgraph.nodes:
g_d.nodes.append((subnode, subattrs))
for (subnode1, subnode2, subattrs) in subgraph.edges:
g_d.edges.append((subnode1, subnode2, subattrs))
# Finally, add the nodes and edges from the current graph
for (node, attrs) in graphdata.nodes:
attrs['parent'] = graphdata.name
g_d.nodes.append((node, attrs))
for (node1, node2, attrs) in graphdata.edges:
g_d.edges.append((node1, node2, attrs))
return g_d
def graphdata_to_cytoscape(graphdata: GraphData) -> Json:
"""Converts a flattened graph into cytoscape json format.
Args:
graphdata (GraphData): A flattened GraphData instance
Returns:
Json: A Json object compatible with cytoscape.
"""
nodes = []
for (node, attrs) in list(graphdata.nodes):
nodes.append({'data': {'id': node, **attrs}})
edges = []
for (node1, node2, attrs) in list(graphdata.edges):
edges.append({'data': {'source': node1, 'target': node2, **attrs}})
return {'nodes': nodes, 'edges': edges}
def partition_by_lowest_common_ancestor(nss1: Namespaces, nss2: Namespaces) -> Tuple[Namespaces, Namespaces]:
"""See https://en.wikipedia.org/wiki/Lowest_common_ancestor
Args:
nss1 (Namespaces): The namespaces associated with the first node
nss2 (Namespaces): The namespaces associated with the second node
Returns:
Tuple[Namespaces, Namespaces]: nss1, partitioned by lowest common ancestor
"""
# Only partition nss1; if you want to partition nss1
# just switch the arguments at the call site.
if nss1 == [] or nss2 == []:
return ([], nss1) # Base case
if nss1[0] == nss2[0]: # Keep going
(nss1_heads, nss1_tails) = partition_by_lowest_common_ancestor(nss1[1:], nss2[1:])
return ([nss1[0]] + nss1_heads, nss1_tails)
return ([], nss1)
def get_steps_keys(steps: List[Yaml]) -> List[str]:
"""Returns the name (dict key) of each step in the given CWL workflow
Args:
steps (List[Yaml]): The steps: tag of a CWL workflow
Returns:
List[str]: The name of each step in the given CWL workflow
"""
# Get the dictionary key (i.e. the name) of each step.
steps_keys = []
for step in steps:
steps_keys += list(step)
#print(steps_keys)
return steps_keys
def extract_backend(yaml_tree: Yaml, wic: Yaml, yaml_path: Path) -> Tuple[str, Yaml]:
"""Chooses a specific backend for a given CWL workflow step.
The backends should be thought of as either 'exactly' identical, or at
least the same high-level protocol but implemented with a different algorithm.
Args:
yaml_tree (Yaml): A Yaml AST dict with sub-dicts for each backend.
yaml_path (Path): The filepath of yaml_tree, only used for error reporting.
Raises:
Exception: If the steps: and/or backend: tags are not present.
Returns:
Tuple[str, Yaml]: The Yaml AST dict of the chosen backend.
"""
yaml_tree_copy = copy.deepcopy(yaml_tree)
backend = ''
if 'backends' in wic:
if 'default_backend' in wic:
backend = wic['default_backend']
if 'backend' in wic:
backend = wic['backend']
if backend == '':
raise Exception(f'Error! No backend in {yaml_path}!')
if backend not in wic['backends']:
raise Exception(f'Error! No steps for backend {backend} in {yaml_path}!')
steps = wic['backends'][backend]['steps']
yaml_tree_copy.update({'steps': steps})
elif 'steps' in yaml_tree_copy:
pass # steps = yaml_tree_copy['steps']
else:
raise Exception(f'Error! No backends and/or steps in {yaml_path}!')
return (backend, yaml_tree_copy)
def inline_sub_steps(yaml_path: Path, tools: Tools, yml_paths: Dict[str, Path]) -> List[Yaml]:
"""Recursively inlines the contents of ALL of the yml sub-workflows. (deprecated)
This function is deprecated and will soon be replaced with a better implementation.
Args:
yaml_path (Path): The filepath of the yml workflow.
tools (Tools): The CWL CommandLineTool definitions found using get_tools_cwl()
yml_paths (Dict[str, Path]): The yml workflow definitions found using get_yml_paths()
Returns:
List[Yaml]: The recursively inlined contents of the given yml workflow.
"""
# Load the high-level yaml workflow file.
with open(Path(yaml_path), mode='r', encoding='utf-8') as y:
yaml_tree: Yaml = yaml.safe_load(y.read())
wic = yaml_tree.get('wic', {})
(back_name_, yaml_tree) = extract_backend(yaml_tree, wic, yaml_path)
steps = yaml_tree['steps']
# Get the dictionary key (i.e. the name) of each step.
steps_keys = []
for step in steps:
steps_keys += list(step)
subkeys = [key for key in steps_keys if key not in tools]
steps_all = []
for i, step_key in enumerate(steps_keys):
if step_key in subkeys:
path = yml_paths[Path(step_key).stem]
steps_i = inline_sub_steps(path, tools, yml_paths)
else:
steps_i = [steps[i]]
steps_all.append(steps_i)
steps_all_flattened = [step for steps in steps_all for step in steps]
return steps_all_flattened
def flatten(lists: List[List[Any]]) -> List[Any]:
"""Concatenates a list of lists into a single list.
Args:
lists (List[List[Any]]): A list of lists
Returns:
List[Any]: A single list
"""
return [x for lst in lists for x in lst]
def flatten_rose_tree(rose_tree: RoseTree) -> List[Any]:
"""Flattens the data contained in the Rose Tree into a List
Args:
rose_tree (RoseTree): A Rose Tree
Returns:
List[Any]: The list of data associated with each node in the RoseTree
"""
sub_rose_trees = [flatten_rose_tree(r) for r in rose_tree.sub_trees]
return [rose_tree.data] + flatten(sub_rose_trees)
def pretty_print_forest(forest: YamlForest) -> None:
"""pretty prints a YamlForest
Args:
forest (YamlForest): The forest to be printed
"""
print(forest.yaml_tree.name)
print(yaml.dump(forest.yaml_tree.yml))
print(yaml.dump(forest.sub_forests))
def flatten_forest(forest: YamlForest) -> List[YamlForest]:
"""Flattens the sub-trees encountered while traversing an AST
Args:
forest (YamlForest): The yaml AST forest to be flattened
Raises:
Exception: If backend: tags are missing.
Returns:
List[YamlForest]: The flattened forest
"""
#pretty_print_forest(forest)
if forest == {}:
return []
yaml_tree = forest.yaml_tree.yml
wic = {'wic': yaml_tree.get('wic', {})}
if 'backends' in wic['wic']:
#pretty_print_forest(forest)
back_name = ''
if 'default_backend' in wic['wic']:
back_name = wic['wic']['default_backend']
if 'backend' in wic['wic']:
back_name = wic['wic']['backend']
if back_name == '':
pretty_print_forest(forest)
raise Exception('Error! No backend in yaml forest!\n')
yaml_tree_back: YamlTree = forest.sub_forests[back_name].yaml_tree
step_1 = yaml_tree_back.yml['steps'][0]
step_name_1 = list(step_1.keys())[0]
if Path(step_name_1).suffix == '.yml':
# Choose a specific backend
return flatten_forest(forest.sub_forests[back_name])
return [forest]
forests = list(forest.sub_forests.values())
sub_forests = [flatten_forest(f) for f in forests]
# Use depth first search flattening to match flatten_rose_tree()
#bfs = forests + flatten(sub_forests)
dfs_lists = [[f] + fs for f, fs in zip(forests, sub_forests)]
dfs = flatten(dfs_lists)
return dfs
def write_to_disk(rose_tree: RoseTree, path: Path, relative_run_path: bool) -> None:
"""Writes the compiled CWL files and their associated yml inputs files to disk.
NOTE: Only the yml input file associated with the root workflow is
guaranteed to have all inputs. In other words, subworkflows will all have
valid CWL files, but may not be executable due to 'missing' inputs.
Args:
rose_tree (RoseTree): The data associated with compiled subworkflows
path (Path): The directory in which to write the files
relative_run_path (bool): Controls whether to use subdirectories or just one directory.
"""
node_data: NodeData = rose_tree.data
namespaces = node_data.namespaces
yaml_stem = node_data.name
cwl_tree = node_data.compiled_cwl
yaml_inputs = node_data.workflow_inputs_file
path.mkdir(parents=True, exist_ok=True)
if relative_run_path:
filename_cwl = f'{yaml_stem}.cwl'
filename_yml = f'{yaml_stem}_inputs.yml'
else:
filename_cwl = '___'.join(namespaces + [f'{yaml_stem}.cwl'])
filename_yml = '___'.join(namespaces + [f'{yaml_stem}_inputs.yml'])
# Dump the compiled CWL file contents to disk.
# Use sort_keys=False to preserve the order of the steps.
yaml_content = yaml.dump(cwl_tree, sort_keys=False, line_break='\n', indent=2)
with open(path / filename_cwl, mode='w', encoding='utf-8') as w:
w.write('#!/usr/bin/env cwl-runner\n')
w.write(auto_gen_header)
w.write(''.join(yaml_content))
yaml_content = yaml.dump(yaml_inputs, sort_keys=False, line_break='\n', indent=2)
with open(path / filename_yml, mode='w', encoding='utf-8') as inp:
inp.write(auto_gen_header)
inp.write(yaml_content)
for sub_rose_tree in rose_tree.sub_trees:
subpath = path
if relative_run_path:
sub_node_data: NodeData = sub_rose_tree.data
sub_step_name = sub_node_data.namespaces[-1]
subpath = path / sub_step_name
write_to_disk(sub_rose_tree, subpath, relative_run_path)
def recursively_delete_dict_key(key: str, obj: Any) -> Any:
"""Recursively deletes any dict entries with the given key.
Args:
key (str): The key to be deleted
obj (Any): The object from which to delete key.
Returns:
Any: The original dict with the given key recursively deleted.
"""
if isinstance(obj, List):
return [recursively_delete_dict_key(key, x) for x in obj]
if isinstance(obj, Dict):
new_dict = {}
for key_ in obj.keys():
if not key_ == key: # i.e. effectively delete key
new_dict[key_] = recursively_delete_dict_key(key, obj[key_])
return new_dict
return obj
def make_tool_dag(tool_stem: str, tool: Tool, graph_dark_theme: bool) -> None:
"""Uses the `dot` executable from the graphviz package to make a Directed
Acyclic Graph corresponding to the given CWL CommandLineTool
Args:
tool_stem (str): The name of the Tool
tool (Tool): The CWL ComandLineTool
graph_dark_theme (bool): See args.graph_dark_theme
"""
(tool_path, tool_cwl) = tool
yaml_path = f'autogenerated/DAG/{tool_path}'
Path(yaml_path).parent.mkdir(parents=True, exist_ok=True)
graph = graphviz.Digraph(name=yaml_path)
graph.attr(bgcolor="transparent") # Useful for making slides
font_edge_color = 'black' if graph_dark_theme else 'white'
graph.attr(fontcolor=font_edge_color)
graph.attr(rankdir='LR')
attrs = {'shape':'box', 'style':'rounded, filled'}
graph.node(tool_stem, fillcolor='lightblue', **attrs)
for input_cwl in tool_cwl['inputs']:
input_initial_ns = input_cwl.split('___')[0].split('__')[0]
input_no_initial_ns = input_cwl.replace(f'{input_initial_ns}__', '')
# Hide optional inputs that could be confusing.
if not 'output' in input_no_initial_ns:
graph.node(f'input_{input_cwl}', label=input_no_initial_ns, fillcolor='lightgreen', **attrs)
graph.edge(f'input_{input_cwl}', tool_stem, color=font_edge_color)
for output_cwl in tool_cwl['outputs']:
output_initial_ns = output_cwl.split('___')[0].split('__')[0]
output_no_initial_ns = output_cwl.replace(f'{output_initial_ns}__', '')
graph.node(f'output_{output_cwl}', label=output_no_initial_ns, fillcolor='lightyellow', **attrs)
graph.edge(tool_stem, f'output_{output_cwl}', color=font_edge_color)
graph.render(format='png')
def make_plugins_dag(tools: Tools, graph_dark_theme: bool) -> None:
"""Uses the `neato` executable from the graphviz package to make a Directed
Acyclic Graph consisting of a node for each CWL CommandLineTool and no edges.
Args:
tools (Tools): The CWL CommandLineTool definitions found using get_tools_cwl()
graph_dark_theme (bool): See args.graph_dark_theme
"""
# NOTE: Do not use the default 'dot' engine. Use neato / fdp / sfdp
# and set pack=0 to remove the massive blank space around each node.
# Also note that despite my best efforts, I cannot force graphviz to
# change the aspect ratio (to double the width for making slides)
# without simply stretching it and distorting the nodes, so we can just
# partition the tools into two squares and display them side by side.
num_tools_half = int(len(list(tools)) / 2)
for i in [0,1]:
yaml_path = f'autogenerated/DAG/plugins{i}'
Path(yaml_path).mkdir(parents=True, exist_ok=True)
graph = graphviz.Digraph(name=yaml_path)
graph.engine = 'neato'
graph.attr(pack='0')
graph.attr(bgcolor="transparent") # Useful for making slides
font_edge_color = 'black' if graph_dark_theme else 'white'
graph.attr(fontcolor=font_edge_color)
for tool in list(tools)[i*num_tools_half:(i+1)*num_tools_half]:
(tool_path, tool_cwl) = tools[tool]
attrs = {'shape':'box', 'style':'rounded, filled'}
graph.node(Path(tool_path).stem, fillcolor='lightblue', fontsize="24", width='0.75', **attrs)
graph.render(format='png')
def parse_int_string_tuple(string: str) -> Tuple[int, str]:
"""Parses a string of the form '(int, string)'
Args:
string (str): A string with the above encoding
Returns:
Tuple[int, str]: The parsed result
"""
string_no_parens = string.strip()[1:-1]
(str1, str2) = string_no_parens.split(',')
return (int(str1.strip()), str2.strip())
def reindex_wic_steps(wic_steps: Yaml, index: int) -> Yaml:
"""After inserting a step into a workflow, we need to increment the steps in\n
the wic: metadata annotations tag whose original index is >= the given index.
Args:
wic_steps (Yaml): The steps: subtag of the wic: metadata annotations tag.
index (int): The (zero-based) index of the inserted workflow step.
Returns:
Yaml: The updated wic: steps: tag, with the appropriate indices incremented.
"""
wic_steps_reindexed = {}
for keystr, val in wic_steps.items():
(i, s) = parse_int_string_tuple(keystr)
newstr = f'({i+1}, {s})' if i >= index else keystr
wic_steps_reindexed[newstr] = val
return wic_steps_reindexed
def add_subgraphs(args: argparse.Namespace,
graph: GraphReps,
sibling_subgraphs: List[GraphReps],
namespaces: Namespaces,
step_1_names: List[str],
steps_ranksame: List[str]) -> None:
"""Add all subgraphs to the current graph, except for GraphViz subgraphs
below a given depth, which allows us to hide irrelevant details.
Args:
args (argparse.Namespace): The command line arguments
graph (GraphReps): A tuple of a GraphViz DiGraph and a networkx DiGraph
sibling_subgraphs (List[Graph]): The subgraphs of the immediate children of the current workflow
namespaces (Namespaces): Specifies the path in the AST of the current subworkflow
step_1_names (List[str]): The names of the first step
steps_ranksame (List[str]): Additional node names to be aligned using ranksame
"""
graph_gv = graph.graphviz
graph_nx = graph.networkx
# Add the cluster subgraphs to the main graph, but we need to add them in
# reverse order to trick the graphviz layout algorithm.
for sibling in sibling_subgraphs[::-1]: # Reverse!
(sib_graph_gv, sib_graph_nx, sib_graphdata) = sibling
if len(namespaces) < args.graph_inline_depth:
graph_gv.subgraph(sib_graph_gv)
graph_nx.add_nodes_from(sib_graph_nx.nodes)
graph_nx.add_edges_from(sib_graph_nx.edges)
for sibling in sibling_subgraphs:
graph.graphdata.subgraphs.append(sibling.graphdata)
# Align the cluster subgraphs using the same rank as the first node of each subgraph.
# See https://stackoverflow.com/questions/6824431/placing-clusters-on-the-same-rank-in-graphviz
if len(namespaces) < args.graph_inline_depth:
step_1_names_display = [name for name in step_1_names if len(name.split('___')) < 2 + args.graph_inline_depth]
if len(step_1_names_display) > 1:
nodes_same_rank = '\t{rank=same; ' + '; '.join(step_1_names_display) + '}\n'
graph_gv.body.append(nodes_same_rank)
graph.graphdata.ranksame = step_1_names_display
if len(steps_ranksame) > 1:
nodes_same_rank = '\t{rank=same; ' + '; '.join(steps_ranksame) + '}\n'
graph_gv.body.append(nodes_same_rank)
graph.graphdata.ranksame = steps_ranksame
def get_step_name_1(step_1_names: List[str],
yaml_stem: str,
namespaces: Namespaces,
steps_keys: List[str],
subkeys: List[str]) -> str:
"""Finds the name of the first step in the current subworkflow. If the first
step is itself subworkflow, the call site recurses until it finds a node.
This is necessary because ranksame in GraphViz can only be applied to
individual nodes, not cluster_subgraphs.
Args:
step_1_names (List[str]): The list of potential first node names
yaml_stem (str): The name of the current subworkflow (stem of the yaml filepath)
namespaces (Namespaces): Specifies the path in the AST of the current subworkflow
steps_keys (List[str]): The name of each step in the current CWL workflow
subkeys (List[str]): The keys associated with subworkflows
Returns:
str: The name of the first step
"""
if steps_keys[0] in subkeys:
step_name_1 = step_1_names[0]
else:
step_name_1 = step_name_str(yaml_stem, 0, steps_keys[0])
step_name_1 = '___'.join(namespaces + [step_name_1])
# NOTE: Since the names of subgraphs '*.yml' contain a period, we need to
# escape them by enclosing the whole name in double quotes. Otherwise:
# "Error: *.yml.gv: syntax error in line n near '.'"
step_name_1 = f'"{step_name_1}"'
return step_name_1
```
|
{
"source": "jferard/script_to_monitor_Covid19",
"score": 3
}
|
#### File: jferard/script_to_monitor_Covid19/scatterCountries.py
```python
from pylab import *
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import datetime as dt
from scipy.signal import savgol_filter
from pathlib import Path
############### Basic use #############################
# example: plot_country("France",dataParam,fitParam,'3/17/20',ax)
# Argument 1: string, as it appears in the CSV file
# Argument 2: data parameters, AUTOMATICALLY generated
# Argument 3: fitting parameters, AUTOMATICALLY generated
# Argument 4: date of the confinement start. If no confinement, enter a date in the future
# Argument 5: axis (matplotlib object) where to plot the curves
################ Parameters to define manually ######################
# Path to the folder containing the time series:
path="https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/"
figures_path = "../FIGURES"
daysInterval = 7 # To set Major x-axis
startDate = datetime.date(2020, 2,22) # Start date of the plot:
extrapolPeriod = 14 # How many days to extrapolate?
fittingPeriod = 8 # On how long do we fit the data?
yscale = 'linear'
#yscale = 'log'
#field = "Confirmed"
#field = "Deaths"
field = "Active"
#field = "DeathRate"
#evolutionType = "cumulative"
evolutionType = "daily"
#evolutionType = "curvature"
#evolutionType = "smoothedCurvature"
#evolutionType = "R0"
iExtrapol = 0
vSmoothing = [5,3] # [window size,order of fitting polynomial]
################ Parameters to define manually ######################
######################## Definition of Functions ############################
def evolution_single(strCountry,data):
size=len(data.iloc[0].values[4:])
evolution = zeros(size,dtype=int)
for ic,cntry in enumerate(data['Country/Region']):
if (cntry in lstCountry) or (strCountry=="World"):
locRegion = data.iloc[ic].values[4:]
locRegion[isnan(locRegion.tolist())] = 0
evolution[:] += locRegion.astype(int)
return evolution
def evolution_country(strCountry,dataParam):
if field=="Confirmed":
evolution = evolution_single(strCountry,dataParam['Confirmed'])
elif field=="Deaths":
evolution = evolution_single(strCountry,dataParam['Deaths'])
elif field=="Active":
evolC = evolution_single(strCountry,dataParam['Confirmed'])
evolD = evolution_single(strCountry,dataParam['Deaths'])
evolR = evolution_single(strCountry,dataParam['Recovered'])
evolution = evolC - evolR - evolD
elif field=="DeathRate":
evolC = evolution_single(strCountry,dataParam['Confirmed'])
evolD = evolution_single(strCountry,dataParam['Deaths'])
evolution = evolD/evolC*100
if dataParam['EvolutionType'] == "cumulative":
evol = evolution[dataParam['FilterDate']]
elif dataParam['EvolutionType'] == "daily":
dedt = np.zeros(len(evolution))
dedt[1:] = np.diff(evolution)
evol = dedt[dataParam['FilterDate']]
elif dataParam['EvolutionType'] == "curvature":
d2edt2 = np.zeros(len(evolution))
d2edt2[2:] = np.diff(evolution,2)
evol = d2edt2[dataParam['FilterDate']]
elif dataParam['EvolutionType'] == "smoothedCurvature":
#dedt = np.zeros(len(evolution))
#dedt[1:] = np.diff(evolution)
dedt = np.diff(evolution)
evol = savgol_filter(dedt, dataParam['Smoothing'][0], dataParam['Smoothing'][1]) # arg2: window size; arg3: polynomial order
d2edt2 = np.zeros(len(evolution))
d2edt2[2:] = np.diff(evol)/evol[-1]
evol = d2edt2[dataParam['FilterDate']]
elif dataParam['EvolutionType'] == "R0":
R0 = np.zeros(len(evolution))
delta0 = np.diff(evolution)
delta = savgol_filter(delta0, dataParam['Smoothing'][0], dataParam['Smoothing'][1]) # arg2: window size; arg3: polynomial order
R0[1:] = delta/np.roll(delta,5)
## TEST:
#print((delta/np.roll(delta,5))[-10:])
#print((delta*5/np.roll(delta,1))[-10:])
evol = R0[dataParam['FilterDate']]
return evol
def get_trend(dates,evol1,fitParam,extParam):
dtFitBeg = fitParam[0]
dtFitEnd = fitParam[1]
dtExtBeg = extParam[0]
dtExtEnd = extParam[1]
print("Time windows for fitting: ", dateOut(dtFitBeg), " - ", dateOut(dtFitEnd))
print("Time windows for extrapo: ", dateOut(dtExtBeg), " - ", dateOut(dtExtEnd))
bfitDate = (dates>=dtFitBeg) * (dates<=dtFitEnd)
fitDate = dates[bfitDate]
Ndfit = (dtFitEnd - dtFitBeg).days + 1
Ndext = (dtExtEnd - dtExtBeg).days + 1
Ndtot = (dtExtEnd - dtFitBeg).days + 1
xfit = np.arange(Ndfit)
xext = np.arange(Ndtot-Ndext,Ndtot)
yfit = evol1[bfitDate]
nz = (yfit>0)
if sum(nz)>0:
p1=polyfit(xfit[nz],log(yfit[nz]),1)
yext = exp(polyval(p1,xext))
else:
p1=polyfit(xfit,log(-yfit),1)
yext = exp(-polyval(p1,xext))
print(p1)
correl1 = yext
xcorrel1 = []
for i in range(Ndext):
xcorrel1.append(dateOut(dtExtBeg + dt.timedelta(days=i)))
rate=correl1[-1]/correl1[-2]-1
if rate>0: strRate='+%.1f%%' %(rate*100)
else:strRate='%.1f%%' %(rate*100)
return xcorrel1, correl1, strRate
def dateOut(date):
return date.strftime('%m/%d/%y').lstrip("0").replace("/0", "/")
def dateIn(strDate):
spl = strDate.split('/')
month = int(spl[0])
day = int(spl[1])
year = int("20%s" %spl[2])
return datetime.date(year, month,day)
def plot_country(strCountry,dataParam,displayParam,fitParam,quarParam,ax):
print("########## Treating country: ", strCountry, " #############")
quarDate = quarParam
fittingPeriod = fitParam[0]
extrapolPeriod = fitParam[1]
iExtrapol = fitParam[2]
# Extract evolution for this country
evol1 = evolution_country(strCountry,dataParam)
# find the quarantine date
iQuar = np.where(dataParam['Dates']>=dateIn(quarDate))
iQuar = dataParam['Dates']>=dateIn(quarDate)
fitParam1 = []
extParam1 = []
# Define the period for the trend
if sum(iQuar) > 3: # Quarantine found
dtFitEnd = dateIn(quarDate)
fitParam2 = []
extParam2 = []
fitParam2.append(dateIn(quarDate))
fitParam2.append(dt.date.today() - dt.timedelta(days=1))
extParam2.append(dtFitEnd)
extParam2.append(dtFitEnd + dt.timedelta(days=extrapolPeriod+1))
else:
dtFitEnd = dt.date.today() - dt.timedelta(days=1)
dtFitBeg = dtFitEnd - dt.timedelta(days=fittingPeriod+1)
dtExtBeg = dtFitEnd
dtExtEnd = dtExtBeg + dt.timedelta(days=extrapolPeriod+1)
fitParam1.append(dtFitBeg)
fitParam1.append(dtFitEnd)
extParam1.append(dtExtBeg)
extParam1.append(dtExtEnd)
if dataParam['Smoothing'][0] != 0:
evol1 = savgol_filter(evol1, dataParam['Smoothing'][0], dataParam['Smoothing'][1]) # arg2: window size; arg3: polynomial order
if displayParam['YScale'] == 'log':
evol1 = np.ma.masked_where(evol1<=0,evol1)
p = ax.semilogy(dataParam['DateAxis'],evol1,ls='-',lw=4.0,label=strCountry)
col = p[0].get_color()
if sum(iQuar) > 0: # Quarantine found
# Plot the quarantine date
ax.scatter(dataParam['DateAxis'][iQuar][0],evol1[iQuar][0],c=col,s=300,marker="X")
if (iExtrapol==0): return
# Get the trend
xextrapol, yextrapol, strRate = get_trend(dataParam['Dates'],evol1,fitParam1,extParam1)
ax.semilogy(xextrapol,yextrapol,ls='--',lw=2.0,c=col)
ax.annotate(strRate, xy=(xextrapol[-1],yextrapol[-1]), xytext=(3, 3), textcoords="offset points", ha='center', va='bottom',color=col,weight='bold')
if sum(iQuar) > 3: # Quarantine found
xextrapol, yextrapol, strRate = get_trend(dataParam['Dates'],evol1,fitParam2,extParam2)
ax.semilogy(xextrapol,yextrapol,ls='-',lw=2.0,c=col)
ax.annotate(strRate, xy=(xextrapol[-1],yextrapol[-1]), xytext=(3, 3), textcoords="offset points", ha='center', va='bottom',color=col,weight='bold')
def setDisplayParam(field,evolutionType,yscale):
displayParam = {}
strUnit = "[-]"
if field=="Confirmed":
txtField = "confirmed cases"
elif field=="Deaths":
txtField = "deaths"
elif field=="Active":
txtField = "active cases"
elif field=="DeathRate":
txtField = "death rate"
strUnit = "[%]"
if evolutionType == 'cumulative':
txtEvol = "Cumulative"
elif evolutionType == 'daily':
txtEvol = 'Daily'
elif evolutionType == 'curvature':
txtEvol = 'Derivative of daily'
elif evolutionType == 'smoothedCurvature':
txtEvol = 'Derivative of smoothed daily'
elif evolutionType == 'R0':
txtEvol = 'R0 from'
txtTitle = "%s %s in some Western countries\n (Source: Johns Hopkins University)" %(txtEvol,txtField)
txtYaxis = "%s %s %s" %(txtEvol,txtField,strUnit)
displayParam['title'] = txtTitle
displayParam['YaxisLabel'] = txtYaxis
strDateToday = dt.date.today().strftime("%Y%m%d")
Path(figures_path).mkdir(parents=True, exist_ok=True)
fname = figures_path + "/%s_evolCovid19_%s_%s.png" %(strDateToday,txtEvol,txtField)
displayParam['FileName'] = fname.replace(" ","_")
displayParam['YScale'] = yscale
return displayParam
def loadData(path,field,evolutionType,vSmoothing,startDate=datetime.date(2020, 1,1)):
dataParam = {}
dataParam['Confirmed'] = pd.read_csv(path+"time_series_covid19_confirmed_global.csv")
dataParam['Deaths'] = pd.read_csv(path+"time_series_covid19_deaths_global.csv")
dataParam['Recovered'] = pd.read_csv(path+"time_series_covid19_recovered_global.csv")
dataParam['Field'] = field
dataParam['EvolutionType'] = evolutionType
dataParam['Smoothing'] = vSmoothing
#dateax = dataParam['Confirmed'].columns[4:].values.astype(str)
dateax = dataParam['Deaths'].columns[4:].values.astype(str)
# Convert date axis to date vector
dates = np.array([dt.datetime.strptime(plof,'%m/%d/%y').date() for plof in dateax])
# Filter axe of dates
filterDate = (dates>=startDate)
dateax = dateax[filterDate]
dates = dates[filterDate]
dataParam['FilterDate'] = filterDate
dataParam['DateAxis'] = dateax
dataParam['Dates'] = dates
return dataParam
def setFitExtraParam(fittingPeriod, extrapolPeriod,dataParam,iExtrapol):
if field=="Confirmed":
return [fittingPeriod, 14, iExtrapol]
elif field=="Deaths":
return [fittingPeriod, 21, iExtrapol]
elif field=="Active":
return [fittingPeriod, 21, iExtrapol]
elif field=="DeathRate":
return [fittingPeriod, 21, iExtrapol]
######################## Definition of Functions ############################
dataParam = loadData(path,field,evolutionType,vSmoothing,startDate=startDate)
displayParam = setDisplayParam(field,evolutionType,yscale)
fitParam = setFitExtraParam(fittingPeriod, extrapolPeriod,dataParam,iExtrapol)
close(1)
fig = figure(num=1,figsize=(10,6))
ax = fig.add_subplot(111)
ax.set_title(displayParam['title'])
ax.set_yscale(displayParam['YScale'])
ax.set_xlabel("Date")
ax.xaxis.set_major_locator(ticker.MultipleLocator(daysInterval))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(1))
ax.set_ylabel(displayParam['YaxisLabel'])
ax.legend(loc=2)
ax.grid(which='major',color='grey', linestyle='-', linewidth=1)
ax.grid(which='minor',color='grey', linestyle='-', linewidth=0.5)
fig.tight_layout()
savefig(displayParam['FileName'],dpi=600,bbox='tight')
show()
```
|
{
"source": "jferg368/sqlalchemy-redshift",
"score": 3
}
|
#### File: tests/rs_sqla_test_utils/utils.py
```python
__author__ = 'haleemur'
import re
from sqlalchemy_redshift import dialect
def clean(query):
query = re.sub(r'\s+ENCODE\s+\w+', '', query)
query = re.sub(r'\s+CONSTRAINT\s+[a-zA-Z0-9_".]+', '', query)
return re.sub(r'\s+', ' ', query).strip()
def compile_query(q):
return str(q.compile(
dialect=dialect.RedshiftDialect(),
compile_kwargs={'literal_binds': True})
)
```
#### File: sqlalchemy-redshift/tests/test_alembic_dialect.py
```python
from alembic.ddl.base import RenameTable
from alembic import migration
from sqlalchemy_redshift import dialect
def test_configure_migration_context():
context = migration.MigrationContext.configure(
url='redshift+psycopg2://mydb'
)
assert isinstance(context.impl, dialect.RedshiftImpl)
def test_rename_table():
compiler = dialect.RedshiftDDLCompiler(dialect.RedshiftDialect(), None)
sql = compiler.process(RenameTable("old", "new", "scheme"))
assert sql == 'ALTER TABLE scheme."old" RENAME TO "new"'
```
#### File: sqlalchemy-redshift/tests/test_compiler.py
```python
from sqlalchemy import func, select
from sqlalchemy_redshift.dialect import RedshiftDialect
def test_func_now():
dialect = RedshiftDialect()
s = select([func.NOW().label("time")])
compiled = s.compile(dialect=dialect)
assert str(compiled) == "SELECT SYSDATE AS time"
```
|
{
"source": "jferic/erda",
"score": 2
}
|
#### File: migrations/cmdb/20220519-cluster-scheduler.py
```python
import json
import django.db.models
class CoClusters(django.db.models.Model):
"""
generated by erda-cli
"""
id = django.db.models.BigIntegerField()
org_id = django.db.models.BigIntegerField()
name = django.db.models.CharField()
display_name = django.db.models.CharField()
type = django.db.models.CharField()
cloud_vendor = django.db.models.CharField()
logo = django.db.models.TextField()
description = django.db.models.TextField()
wildcard_domain = django.db.models.CharField()
config = django.db.models.TextField()
urls = django.db.models.TextField()
settings = django.db.models.TextField()
scheduler = django.db.models.TextField()
opsconfig = django.db.models.TextField()
resource = django.db.models.TextField()
sys = django.db.models.TextField()
created_at = django.db.models.DateTimeField(auto_now=True)
updated_at = django.db.models.DateTimeField(auto_now=True, auto_now_add=True)
manage_config = django.db.models.TextField()
cluster_info = django.db.models.TextField()
class Meta:
db_table = "co_clusters"
def entry():
clusters = CoClusters.objects.all()
for cluster in clusters:
try:
schedConfig = json.loads(cluster.scheduler)
schedConfig['enableWorkspace'] = True
s = json.dumps(schedConfig)
except Execption as e:
print("cluster %s json parse error, exception: %s, skip" % (cluster.name, e))
continue
cluster.scheduler = s
cluster.save()
entries: [callable] = [
entry,
]
```
#### File: migrations/pipeline/20220526-fill-cluster-name-for-cron.py
```python
import django.db.models
import json
class PipelineCrons(django.db.models.Model):
"""
generated by erda-cli
"""
id = django.db.models.BigIntegerField()
application_id = django.db.models.BigIntegerField()
branch = django.db.models.CharField()
cron_expr = django.db.models.CharField()
enable = django.db.models.BooleanField()
pipeline_source = django.db.models.CharField()
pipeline_yml_name = django.db.models.CharField()
base_pipeline_id = django.db.models.BigIntegerField()
extra = django.db.models.TextField()
time_created = django.db.models.DateTimeField()
time_updated = django.db.models.DateTimeField()
pipeline_definition_id = django.db.models.CharField()
is_edge = django.db.models.BooleanField()
cluster_name = django.db.models.CharField()
class Meta:
db_table = "pipeline_crons"
def entry():
"""
please implement this and add it to the list entries
"""
crons = PipelineCrons.objects.all()
for cron in crons:
if cron.extra is not None:
clusterName = ""
try:
cronExtra = json.loads(cron.extra)
if "clusterName" in cronExtra:
clusterName = cronExtra["clusterName"]
except Exception as e:
print("cron extra %s json parse error, exception: %s, skip" % (cluster.name, e))
continue
cron.cluster_name = clusterName
cron.save()
entries: [callable] = [
entry,
]
```
|
{
"source": "jfernan2/PRInspector",
"score": 2
}
|
#### File: jfernan2/PRInspector/config.py
```python
IS_TEST = True
REPOSITORY = 'cms-sw/cmssw'
def get_repo_url():
return 'https://github.com/' + REPOSITORY + '/'
CERN_SSO_CERT_FILE = 'private/cert.pem'
CERN_SSO_KEY_FILE = 'private/cert.key'
CERN_SSO_COOKIES_LOCATION = 'private/'
TWIKI_CONTACTS_URL = 'https://ppdcontacts.web.cern.ch/PPDContacts/ppd_contacts'
TWIKI_TAG_COLLECTOR_URL = 'https://twiki.cern.ch/twiki/bin/edit/CMS/DQMP5TagCollector?nowysiwyg=1'
TWIKI_TAG_COLLECTOR_CANCEL_EDIT_URL = 'https://twiki.cern.ch/twiki/bin/save/CMS/DQMP5TagCollector'
CATEGORIES_MAP_URL = 'https://raw.githubusercontent.com/cms-sw/cms-bot/master/categories_map.py'
TWIKI_TIMEOUT_SECONDS = 10
__github_client_id = None
__github_client_secret = None
def get_github_client_id():
global __github_client_id
if __github_client_id == None:
__github_client_id = open('private/github_oauth_data.txt', 'r').readlines()[1].strip()
return __github_client_id
def get_github_client_secret():
global __github_client_secret
if __github_client_secret == None:
__github_client_secret = open('private/github_oauth_data.txt', 'r').readlines()[2].strip()
return __github_client_secret
def get_subsystems():
return ['l1t',
'hlt',
'tracker',
'sistrip',
'pixel',
'ecal',
'hcal',
'dt',
'rpc',
'csc',
'ct-pps',
'ctpps',
'bril',
'gem',
'hgcal',
'tracking',
'btag',
'vertexing',
'e-gamma',
'jetmet',
'lumi',
'muon',
'tau',
'generators',
'hfnose',
'beamspot',
'jme',
'jet',
'eventdisplay',
'castor',
'validation',
]
```
#### File: PRInspector/services/cern_sso_service.py
```python
import requests
from urllib.parse import urlparse, urljoin
import xml.etree.ElementTree as ET
DEFAULT_TIMEOUT_SECONDS = 10
def _init_session(s, url, cookiejar, auth_url_fragment):
"""
Internal helper function: initialise the sesion by trying to access
a given URL, setting up cookies etc.
:param: auth_url_fragment: a URL fragment which will be joined to
the base URL after the redirect, before the parameters. Examples are
auth/integrated/ (kerberos) and auth/sslclient/ (SSL)
"""
if cookiejar is not None:
# log.debug("Using provided cookiejar")
s.cookies = cookiejar
# Try getting the URL we really want, and get redirected to SSO
# log.info("Fetching URL: %s" % url)
r1 = s.get(url, timeout=DEFAULT_TIMEOUT_SECONDS)
# Parse out the session keys from the GET arguments:
redirect_url = urlparse(r1.url)
# log.debug("Was redirected to SSO URL: %s" % str(redirect_url))
# ...and inject them into the Kerberos authentication URL
final_auth_url = "{auth_url}?{parameters}".format(
auth_url=urljoin(r1.url, auth_url_fragment),
parameters=redirect_url.query)
return final_auth_url
def _finalise_login(s, auth_results):
"""
Perform the final POST authentication steps to fully authenticate
the session, saving any cookies in s' cookie jar.
"""
r2 = auth_results
# Did it work? Raise Exception otherwise.
r2.raise_for_status()
# Get the contents
try:
tree = ET.fromstring(r2.content)
except ET.ParseError as e:
# log.error("Could not parse response from server!")
# log.error("The contents returned was:\n{}".format(r2.content))
raise e
action = tree.findall("body/form")[0].get('action')
# Unpack the hidden form data fields
form_data = dict((
(elm.get('name'), elm.get('value'))
for elm in tree.findall("body/form/input")))
# ...and submit the form (WHY IS THIS STEP EVEN HERE!?)
# log.debug("Performing final authentication POST to %s" % action)
r3 = s.post(url=action, data=form_data, timeout=DEFAULT_TIMEOUT_SECONDS)
# Did _that_ work?
r3.raise_for_status()
# The session cookie jar should now contain the necessary cookies.
# log.debug("Cookie jar now contains: %s" % str(s.cookies))
return s.cookies
def cert_sign_on(url, cert_file, key_file, cookiejar={}):
"""
Perform Single-Sign On with a robot/user certificate specified by
cert_file and key_file agains the target url. Note that the key
needs to be passwordless. cookiejar, if provided, will be used to
store cookies, and can be a Requests CookieJar, or a
MozillaCookieJar. Or even a dict.
Cookies will be returned on completion, but cookiejar will also be
modified in-place.
If you have a PKCS12 (.p12) file, you need to convert it. These
steps will not work for passwordless keys.
`openssl pkcs12 -clcerts -nokeys -in myCert.p12 -out ~/private/myCert.pem`
`openssl pkcs12 -nocerts -in myCert.p12 -out ~/private/myCert.tmp.key`
`openssl rsa -in ~/private/myCert.tmp.key -out ~/private/myCert.key`
Note that the resulting key file is *unencrypted*!
"""
with requests.Session() as s:
# Set up the certificates (this needs to be done _before_ any
# connection is opened!)
s.cert = (cert_file, key_file)
cert_auth_url = _init_session(s=s, url=url, cookiejar=cookiejar,
auth_url_fragment=u"auth/sslclient/")
# log.info("Performing SSL Cert authentication against %s"
# % cert_auth_url)
r2 = s.get(cert_auth_url, cookies=cookiejar, verify=False,
timeout=DEFAULT_TIMEOUT_SECONDS)
return _finalise_login(s, auth_results=r2)
```
#### File: PRInspector/services/twiki_service.py
```python
from services.cern_sso_service import cert_sign_on
import hashlib
import requests
import pickle
import config
import re
def get_contacts_list_html(errors=[]):
return __get_with_timeout(config.TWIKI_CONTACTS_URL, errors)
def get_tag_collector_html(errors=[]):
cookies = __get_and_save_cookies(config.TWIKI_TAG_COLLECTOR_URL)
text = __get_with_timeout(config.TWIKI_TAG_COLLECTOR_URL, errors=errors, cookies=cookies)
try:
# Cancel editing so other people can access the edit page
requests.post(config.TWIKI_TAG_COLLECTOR_CANCEL_EDIT_URL, cookies=cookies, data = {'action_cancel': 'Cancel'})
except requests.exceptions.RequestException:
errors.append('Error canceling tag collector edit by posting <a href="%s" target="_blank" class="alert-link">here</a>'%(config.TWIKI_TAG_COLLECTOR_CANCEL_EDIT_URL))
if ('action="https://twiki.cern.ch/Shibboleth.sso/ADFS"' in text or
'document.forms[0].submit()' in text or
'Sign in with your CERN account' in text):
cookies = __get_and_save_cookies(config.TWIKI_TAG_COLLECTOR_URL, True)
text = __get_with_timeout(config.TWIKI_TAG_COLLECTOR_URL, errors=errors, cookies=cookies)
text = text.replace('*', '*')
text = text.replace('%', '%')
text = text.replace('[', '[')
return text
def get_author_mentioned_info(author, html):
git_user_str = 'Git: [[https://github.com/%s][%s]]'%(author, author)
if git_user_str in html:
return { 'text': 'Author is known', 'class': 'text-success', 'description': "Author's Github username is mentioned in DQM Contacts Twiki page" }
elif author in html:
return { 'text': 'Author is mentioned', 'class': 'text-warning', 'description': "Author's Github username appears in DQM Contacts Twiki page, but it wasn't entered deliberately. This might be a coinsidence" }
else:
return { 'text': 'Author is unknown', 'class': 'text-danger', 'description': "Author's Github username doesn't appear in DQM Contacts Twiki page" }
def get_tag_collector_info(pr_number, html):
regex_ok = r'%OK%([^%\n]*?)\]\[PR ' + str(pr_number) + r'\]\]'
m_ok = re.compile(regex_ok)
regex_prod = r'%PROD%([^%\n]*?)\]\[PR ' + str(pr_number) + r'\]\]'
m_prod = re.compile(regex_prod)
regex_notok = r'%NOTOK%([^%\n]*?)\]\[PR ' + str(pr_number) + r'\]\]'
m_notok = re.compile(regex_notok)
regex_mentioned = r'\*([^%\n]*?)\]\[PR ' + str(pr_number) + r'\]\]'
m_mentioned = re.compile(regex_mentioned)
if m_ok.search(html) or m_prod.search(html):
return { 'tested': True, 'text': 'Tested in Playback', 'class': 'text-success', 'description': "This PR was tested in playback system and tests passed" }
elif m_notok.search(html):
return { 'tested': False, 'text': 'Rejected in Playback', 'class': 'text-danger', 'description': "This PR was tested and rejected in playback system" }
elif m_mentioned.search(html):
return { 'tested': False, 'text': 'Mentioned in Playback', 'class': 'text-warning', 'description': "This PR is mentioned in the playback system but it is not yet tested" }
else:
return { 'tested': False, 'text': 'Not mentioned in Playback', 'class': 'text-secondary', 'description': "This PR was does not appear in Tag Collector page" }
def __get_and_save_cookies(url, force_reload=False):
hash = hashlib.md5(url.encode()).hexdigest()
file = config.CERN_SSO_COOKIES_LOCATION + hash + '_cookies.p'
cookies = None
if not force_reload:
try:
cookies = pickle.load(open(file, 'rb'))
except:
pass
if cookies == None:
cookies = cert_sign_on(url, cert_file=config.CERN_SSO_CERT_FILE, key_file=config.CERN_SSO_KEY_FILE, cookiejar=None)
pickle.dump(cookies, open(file, 'wb'))
return cookies
def __get_with_timeout(url, errors=[], cookies=None):
try:
return requests.get(url, cookies=cookies, timeout=config.TWIKI_TIMEOUT_SECONDS).text
except requests.exceptions.Timeout:
errors.append('<a href="%s" target="_blank" class="alert-link">%s</a> load timeouted after %s seconds. Content related to data from mentioned page is presented incorrectly.'%(url, url, config.TWIKI_TIMEOUT_SECONDS))
return ''
except requests.exceptions.RequestException as e:
errors.append('Error loading <a href="%s" target="_blank" class="alert-link">%s</a>. Content related to data from mentioned page is presented incorrectly.'%(url, url))
print(e)
return ''
```
#### File: PRInspector/views/prs.py
```python
from flask import Flask, make_response, render_template, request, redirect
from services.github_service import get_prs, get_merged_prs, get_last_comment, exchange_code_to_token, get_not_merged_prs_count, get_dqm_categories
from services.pr_info_service import get_subsystem_in_title_info
from services.twiki_service import get_contacts_list_html, get_tag_collector_html, get_author_mentioned_info, get_tag_collector_info
import config
import urllib.request
import json
def get_prs_view(code):
if code != None and code != '':
access_token = exchange_code_to_token(code)
if access_token != None and access_token != '':
# Save cookie and redirect user to main page (without code parameter)
resp = make_response(redirect('/'))
resp.set_cookie('access_token', access_token)
return resp
else:
access_token = request.cookies.get('access_token')
prs = get_prs(access_token)
merged_prs = get_merged_prs(access_token)
# Init key for additional properties
for pr in prs:
pr['additional'] = {}
pr['additional']['merged'] = False
# Init key for additional properties
for pr in merged_prs:
pr['additional'] = {}
pr['additional']['merged'] = True
# Add merged and not merged PRs to one list
prs += merged_prs
# Define errors that will be displayd in frontend
errors = []
# Check if subsystem was mentioned in the title
for pr in prs:
pr['additional']['subsystem'] = get_subsystem_in_title_info(pr['title'])
# Check if author is in contacts list
contacts_html = get_contacts_list_html(errors)
for pr in prs:
pr['additional']['author'] = get_author_mentioned_info(pr['user']['login'], contacts_html)
# Check if pr is tested in tag collector
# tag_collector_html = get_tag_collector_html(errors)
tag_collector_html = ''
for pr in prs:
pr['additional']['tag_collector'] = get_tag_collector_info(pr['number'], tag_collector_html)
# Chose correct background color based on test state
for pr in prs:
pr['additional']['background'] = 'bg-white'
if pr['additional']['merged'] == True:
pr['additional']['background'] = 'bg-action-needed'
elif any(x for x in pr['labels'] if x['name'] == 'tests-pending'):
pr['additional']['background'] = 'bg-action-needed'
elif any(x for x in pr['labels'] if x['name'] == 'tests-approved') and pr['additional']['tag_collector']['tested']:
pr['additional']['background'] = 'bg-ready'
elif any(x for x in pr['labels'] if x['name'] == 'hold'):
pr['additional']['background'] = 'bg-hold'
return make_response(render_template('index.html',
prs=prs,
access_token=access_token,
oauth_client_id=config.get_github_client_id(),
not_merged_prs_count=get_not_merged_prs_count(access_token),
dqm_categories=get_dqm_categories(),
errors = errors))
```
|
{
"source": "JFernandez1995/hiring-engineers",
"score": 2
}
|
#### File: JFernandez1995/hiring-engineers/checkvalue.py
```python
from checks import AgentCheck
import random
class HelloCheck(AgentCheck):
def check(self, instance):
self.gauge('my_metric2', random.randint(0,1000))
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.