ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a4c52141bc68d9cb390a033eda90eddc2f235f7 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
AutoAnchor utils
"""
import random
import numpy as np
import torch
import yaml
from tqdm import tqdm
from utils.general import LOGGER, colorstr, emojis
PREFIX = colorstr('AutoAnchor: ')
def check_anchor_order(m):
# Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary
a = m.anchors.prod(-1).mean(-1).view(-1) # mean anchor area per output layer
da = a[-1] - a[0] # delta a
ds = m.stride[-1] - m.stride[0] # delta s
if da and (da.sign() != ds.sign()): # same order
LOGGER.info(f'{PREFIX}Reversing anchor order')
m.anchors[:] = m.anchors.flip(0)
def check_anchors(dataset, model, thr=4.0, imgsz=640):
# Check anchor fit to data, recompute if necessary
m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect()
shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)
scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale
wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh
def metric(k): # compute metric
r = wh[:, None] / k[None]
x = torch.min(r, 1 / r).min(2)[0] # ratio metric
best = x.max(1)[0] # best_x
aat = (x > 1 / thr).float().sum(1).mean() # anchors above threshold
bpr = (best > 1 / thr).float().mean() # best possible recall
return bpr, aat
stride = m.stride.to(m.anchors.device).view(-1, 1, 1) # model strides
anchors = m.anchors.clone() * stride # current anchors
bpr, aat = metric(anchors.cpu().view(-1, 2))
s = f'\n{PREFIX}{aat:.2f} anchors/target, {bpr:.3f} Best Possible Recall (BPR). '
if bpr > 0.98: # threshold to recompute
LOGGER.info(emojis(f'{s}Current anchors are a good fit to dataset ✅'))
else:
LOGGER.info(emojis(f'{s}Anchors are a poor fit to dataset ⚠️, attempting to improve...'))
na = m.anchors.numel() // 2 # number of anchors
try:
anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)
except Exception as e:
LOGGER.info(f'{PREFIX}ERROR: {e}')
new_bpr = metric(anchors)[0]
if new_bpr > bpr: # replace anchors
anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors)
m.anchors[:] = anchors.clone().view_as(m.anchors)
check_anchor_order(m) # must be in pixel-space (not grid-space)
m.anchors /= stride
s = f'{PREFIX}Done ✅ (optional: update model *.yaml to use these anchors in the future)'
else:
s = f'{PREFIX}Done ⚠️ (original anchors better than new anchors, proceeding with original anchors)'
LOGGER.info(emojis(s))
def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True):
""" Creates kmeans-evolved anchors from training dataset
Arguments:
dataset: path to data.yaml, or a loaded dataset
n: number of anchors
img_size: image size used for training
thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0
gen: generations to evolve anchors using genetic algorithm
verbose: print all results
Return:
k: kmeans evolved anchors
Usage:
from utils.autoanchor import *; _ = kmean_anchors()
"""
from scipy.cluster.vq import kmeans
npr = np.random
thr = 1 / thr
def metric(k, wh): # compute metrics
r = wh[:, None] / k[None]
x = torch.min(r, 1 / r).min(2)[0] # ratio metric
# x = wh_iou(wh, torch.tensor(k)) # iou metric
return x, x.max(1)[0] # x, best_x
def anchor_fitness(k): # mutation fitness
_, best = metric(torch.tensor(k, dtype=torch.float32), wh)
return (best * (best > thr).float()).mean() # fitness
def print_results(k, verbose=True):
k = k[np.argsort(k.prod(1))] # sort small to large
x, best = metric(k, wh0)
bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr
s = f'{PREFIX}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr\n' \
f'{PREFIX}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' \
f'past_thr={x[x > thr].mean():.3f}-mean: '
for x in k:
s += '%i,%i, ' % (round(x[0]), round(x[1]))
if verbose:
LOGGER.info(s[:-2])
return k
if isinstance(dataset, str): # *.yaml file
with open(dataset, errors='ignore') as f:
data_dict = yaml.safe_load(f) # model dict
from utils.dataloaders import LoadImagesAndLabels
dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)
# Get label wh
shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)
wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh
# Filter
i = (wh0 < 3.0).any(1).sum()
if i:
LOGGER.info(f'{PREFIX}WARNING: Extremely small objects found: {i} of {len(wh0)} labels are < 3 pixels in size')
wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels
# wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1
# Kmeans init
try:
LOGGER.info(f'{PREFIX}Running kmeans for {n} anchors on {len(wh)} points...')
assert n <= len(wh) # apply overdetermined constraint
s = wh.std(0) # sigmas for whitening
k = kmeans(wh / s, n, iter=30)[0] * s # points
assert n == len(k) # kmeans may return fewer points than requested if wh is insufficient or too similar
except Exception:
LOGGER.warning(f'{PREFIX}WARNING: switching strategies from kmeans to random init')
k = np.sort(npr.rand(n * 2)).reshape(n, 2) * img_size # random init
wh, wh0 = (torch.tensor(x, dtype=torch.float32) for x in (wh, wh0))
k = print_results(k, verbose=False)
# Plot
# k, d = [None] * 20, [None] * 20
# for i in tqdm(range(1, 21)):
# k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance
# fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True)
# ax = ax.ravel()
# ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')
# fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh
# ax[0].hist(wh[wh[:, 0]<100, 0],400)
# ax[1].hist(wh[wh[:, 1]<100, 1],400)
# fig.savefig('wh.png', dpi=200)
# Evolve
f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma
pbar = tqdm(range(gen), bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar
for _ in pbar:
v = np.ones(sh)
while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
v = ((npr.random(sh) < mp) * random.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0)
kg = (k.copy() * v).clip(min=2.0)
fg = anchor_fitness(kg)
if fg > f:
f, k = fg, kg.copy()
pbar.desc = f'{PREFIX}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}'
if verbose:
print_results(k, verbose)
return print_results(k)
|
py | 1a4c52438f7ea2ba7f5ffd51058c32ddd0e47429 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dnf
from artifact_registry._vendor.google.auth import compute_engine, default
from artifact_registry._vendor.google.auth.exceptions import DefaultCredentialsError, RefreshError
from artifact_registry._vendor.google.auth.transport import requests
from artifact_registry._vendor.google.oauth2 import service_account
class ArtifactRegistry(dnf.Plugin):
"""DNF Plugin for authenticated access to Google Artifact Registry."""
name = 'artifact-registry'
cloud_platform_scope = 'https://www.googleapis.com/auth/cloud-platform'
def __init__(self, base, cli):
super(ArtifactRegistry, self).__init__(base, cli)
self.base = base
self.credentials = self._get_creds()
def config(self):
for repo in self.base.repos.iter_enabled():
opts = dict(repo.cfg.items(repo.id))
if 'pkg.dev' in opts.get('baseurl', ''):
self._add_headers(repo)
def _get_creds(self):
config = self.read_config(self.base.conf)
if config.has_section('main'):
if config.has_option('main', 'service_account_json'):
service_account_json = config.get('main', 'service_account_json')
return service_account.Credentials.from_service_account_file(
service_account_json, scopes=[self.cloud_platform_scope])
if config.has_option('main', 'service_account_email'):
service_account_email = config.get('main', 'service_account_email')
return compute_engine.Credentials(service_account_email)
try:
creds, _ = default()
except DefaultCredentialsError:
return None
return creds
def _add_headers(self, repo):
token = self._get_token()
if token:
headers = repo.get_http_headers()
new_headers = ('Authorization: Bearer %s' % token,) + headers
repo.set_http_headers(new_headers)
def _get_token(self):
if not self.credentials:
return None
if not self.credentials.valid:
try:
self.credentials.refresh(requests.Request())
except RefreshError:
return None
return self.credentials.token
|
py | 1a4c5286cdd5131b4261ffb3679872a07f2c328f | import hashlib
import json
import pickle
import uuid
from imp import find_module
from importlib import import_module
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.db import OperationalError
from django.db.models import Manager, Model
from larvik.logging import get_module_logger
CONSUMERS = {}
ISDISCOVER = False
def setDiscover(mode):
global ISDISCOVER
ISDISCOVER = mode
NODES = {}
logger = get_module_logger(__file__)
def createUniqeNodeName(channel=None):
"""This function generate 10 character long hash"""
hash = hashlib.sha1()
salt = channel if channel is not None else str(uuid.uuid4())
hash.update(salt.encode('utf-8'))
return hash.hexdigest()
class NodeType(object):
inputs = []
outputs = []
name = None
path = None
settings = {}
type = None
def saveConsumers(CONSUMERLIST):
pickle.dump(CONSUMERLIST, "consumers")
class register_consumer(object):
def __init__(self, channel, model: Model= None):
"""
If there are decorator arguments, the function
to be decorated is not passed to the constructor!
"""
self.channel = channel
self.model = model
def getModelForPuts(self, puts):
return json.dumps([input.lower() if isinstance(input,str) else input.__name__.lower() for input in puts]) if puts is not None else json.dumps([])
def __call__(self, cls: NodeType):
self.name = cls.name if cls.name is not None else cls.channel
self.path = cls.path if cls.path is not None else cls.name
self.type = cls.type if cls.type is not None else "consumer"
self.inputmodel = self.getModelForPuts(cls.inputs)
self.outputmodel = self.getModelForPuts(cls.outputs)
self.settings = json.dumps(cls.settings) if cls.settings is not None else json.dumps({})
"""
If there are decorator arguments, __call__() is only called
once, as part of the decoration process! You can only give
it a single argument, which is the function object.
"""
if self.channel in NODES: raise Exception(f"The node {self.node} does already exist. Check for Duplicates")
if self.channel in CONSUMERS: raise Exception(f"The node {self.node} does already exist. Check for Duplicates")
if self.model is not None and ISDISCOVER:
from flow.models import Node
logger.info(f"{self.name} checking {self.model.__name__} - Checking")
manager: Manager = self.model.objects
try:
try:
object = manager.get(channel=self.channel)
object.name = self.name
object.channel = self.channel
object.save()
except ObjectDoesNotExist as e:
logger.info(f"{self.name} did not yet exist on {self.model.__name__} - Creating")
object = manager.create(name=self.name, channel=self.channel, settings=self.settings)
try:
node = Node.objects.get(hash=createUniqeNodeName(self.channel))
node.name = self.name
node.path = self.path
node.variety = self.type
node.inputmodel = self.inputmodel
node.outputmodel = self.outputmodel
node.defaultsettings = self.settings
node.channel = self.channel
node.entityid = object.id
node.save()
except ObjectDoesNotExist as e:
node = Node.objects.create(hash=createUniqeNodeName(self.channel),
entityid=object.id,
name=self.name,
path=self.path,
variety=self.type,
channel=self.channel,
inputmodel=self.inputmodel,
outputmodel=self.outputmodel,
defaultsettings=self.settings)
logger.info(f"{self.name} did not yet exist on {self.channel} - Creating")
# TODO: When everything was mirated consumers should be called here CONSUMERS[self.name] = cls
except OperationalError as e:
logger.error(f'Consumer cannot be installed, migrate first: {e}')
CONSUMERS[self.channel] = cls
NODES[self.channel] = cls
return cls
class register_node(object):
def __init__(self, node):
"""
If there are decorator arguments, the function
to be decorated is not passed to the constructor!
"""
self.node = node
def getModelForPuts(self, puts):
return json.dumps([input.lower() if isinstance(input,str) else input.__name__.lower() for input in puts]) if puts is not None else json.dumps([])
def __call__(self, cls: NodeType):
from flow.models import Node
"""
If there are decorator arguments, __call__() is only called
once, as part of the decoration process! You can only give
it a single argument, which is the function object.
"""
if self.node in NODES: raise Exception(f"The node {self.node} does already exist. Check for Duplicates")
if ISDISCOVER is False:
NODES[self.node] = cls
return cls
try:
try:
node = Node.objects.get(hash=createUniqeNodeName(self.node))
node.name = cls.name
node.path = cls.path
node.variety = cls.type
node.inputmodel = self.getModelForPuts(cls.inputs)
node.outputmodel = self.getModelForPuts(cls.outputs)
node.defaultsettings = json.dumps(cls.settings)
node.channel = "None"
node.entityid = None
node.save()
logger.info(f"Updating {cls.__name__} as {self.node} on {self.node}")
except ObjectDoesNotExist as e:
node = Node.objects.create(hash=createUniqeNodeName(self.node),
entityid=None,
name=cls.name,
path=cls.path,
variety=cls.type,
channel="None",
inputmodel=self.getModelForPuts(cls.inputs),
outputmodel=self.getModelForPuts(cls.outputs),
defaultsettings=json.dumps(cls.settings))
logger.info(f"Installing {cls.__name__} as {self.node} on {self.node}")
# TODO: When everything was mirated consumers should be called here CONSUMERS[self.name] = cls
except OperationalError as e:
logger.error(f'Consumer cannot be installed, migrate first: {e}')
NODES[self.node] = cls
return cls
def autodiscover():
for app in settings.INSTALLED_APPS:
# For each app, we need to look for an consumers.py inside that app's
# package. We can't use os.path here -- recall that modules may be
# imported different ways (think zip files) -- so we need to get
# the app's __path__ and look for admin.py on that path.
# Step 1: find out the app's __path__ Import errors here will (and
# should) bubble up, but a missing __path__ (which is legal, but weird)
# fails silently -- apps that do weird things with __path__ might
# need to roll their own admin registration.
try:
app_path = import_module(app).__path__
except AttributeError:
continue
# Step 2: use imp.find_module to find the app's consumers.py. For some
# reason imp.find_module raises ImportError if the app can't be found
# but doesn't actually try to import the module. So skip this app if
# its admin.py doesn't exist
try:
find_module('consumers', app_path)
except ImportError:
continue
# Step 3: import the app's admin file. If this has errors we want them
# to bubble up.
import_module("%s.consumers" % app)
# autodiscover was successful, reset loading flag.
for app in settings.INSTALLED_APPS:
# For each app, we need to look for an consumers.py inside that app's
# package. We can't use os.path here -- recall that modules may be
# imported different ways (think zip files) -- so we need to get
# the app's __path__ and look for admin.py on that path.
# Step 1: find out the app's __path__ Import errors here will (and
# should) bubble up, but a missing __path__ (which is legal, but weird)
# fails silently -- apps that do weird things with __path__ might
# need to roll their own admin registration.
try:
app_path = import_module(app).__path__
except AttributeError:
continue
# Step 2: use imp.find_module to find the app's consumers.py. For some
# reason imp.find_module raises ImportError if the app can't be found
# but doesn't actually try to import the module. So skip this app if
# its admin.py doesn't exist
try:
find_module('nodes', app_path)
except ImportError:
continue
# Step 3: import the app's admin file. If this has errors we want them
# to bubble up.
import_module("%s.nodes" % app)
# autodiscover was successful, reset loading flag.
return CONSUMERS |
py | 1a4c52eb012340aae601074fe0bfe0bb5f8538e4 | #!/usr/bin/env python3.6
"""MISP feed worker pulling down feeds in misp_feeds.txt
and adding data to the platform"""
import argparse
import hashlib
import json
import os
import sys
import traceback
from logging import error, info
from typing import Dict, Generator, Optional, Text
import caep
import requests
import act
import act.api.helpers
from act.workers.libs import misp, worker
try:
import urlparse
except ModuleNotFoundError: # Python3
import urllib.parse as urlparse # type: ignore
def parseargs() -> argparse.ArgumentParser:
""" Parse arguments """
parser = worker.parseargs('Get MISP feeds from MISP sharing directories')
parser.add_argument('--manifest-dir', default=caep.get_cache_dir('misp_manifest'),
help='The directory to store latest manifests')
return parser
def verify_manifest_dir(manifest_dir: Text) -> None:
"""Verify that the directory structure exists and that there is
always a feed file (Even empty)"""
# Manifest is at default location - create directory if it does not exists
if manifest_dir == caep.get_cache_dir('misp_manifest'):
caep.get_cache_dir('misp_manifest', create=True)
# If there is specified a manifest directory in the .ini file we
# verify that it exists (or fail hard). If no such directory
# is defined, we default to using $XDG_CACHE_DIR and create a new
# 'misp_maifest' directory there.
if not os.path.isdir(manifest_dir):
print("Could not open manifest directory:", manifest_dir)
sys.exit(1)
# Check that the misp_feeds.txt file actually exists. If not 'touch'
# the file to make sure there is at least some default config present.
feed_file = os.path.join(manifest_dir, 'misp_feeds.txt')
if not os.path.isfile(feed_file):
with open(feed_file, 'w') as feed_h:
feed_h.write("https://www.circl.lu/doc/misp/feed-osint/")
def handle_event_file(feed_url: Text, uuid: Text, proxy_string: Optional[Text] = None, cert_file: Optional[Text] = None) -> misp.Event:
"""Download, parse and store single event file"""
info("Handling {0} from {1}".format(uuid, feed_url))
proxies: Optional[Dict[Text, Text]] = None
if proxy_string:
proxies = {
'http': proxy_string,
'https': proxy_string
}
url = urlparse.urljoin(feed_url, "{0}.json".format(uuid))
req = requests.get(url, proxies=proxies, verify=cert_file)
return misp.Event(loads=req.text)
def handle_feed(manifest_dir: Text,
feed_url: Text,
proxy_string: Optional[Text] = None,
cert_file: Optional[Text] = None) -> Generator[misp.Event, None, None]:
"""Get the manifest file, check if an event file is downloaded
before (cache) and dispatch event handling of separate files"""
proxies: Optional[Dict[Text, Text]] = None
if proxy_string:
proxies = {
'http': proxy_string,
'https': proxy_string
}
manifest_url = urlparse.urljoin(feed_url, "manifest.json")
req = requests.get(manifest_url, proxies=proxies, verify=cert_file)
manifest = json.loads(req.text)
feed_sha1 = hashlib.sha1(feed_url.encode("utf-8")).hexdigest()
old_manifest = {}
if manifest_dir != "NODIR":
try:
with open(os.path.join(manifest_dir, feed_sha1)) as infile:
old_manifest = json.load(infile)
except IOError:
pass
for uuid in manifest:
if uuid not in old_manifest:
yield handle_event_file(feed_url, uuid, proxy_string, cert_file)
if manifest_dir != "NODIR":
with open(os.path.join(manifest_dir, feed_sha1), "wb") as outfile:
outfile.write(json.dumps(manifest).encode("utf-8"))
def main() -> None:
"""program entry point"""
# Look for default ini file in "/etc/actworkers.ini" and ~/config/actworkers/actworkers.ini
# (or replace .config with $XDG_CONFIG_DIR if set)
args = worker.handle_args(parseargs())
manifest_dir = args.manifest_dir
actapi = worker.init_act(args)
verify_manifest_dir(manifest_dir)
misp_feeds_file = os.path.join(manifest_dir, "misp_feeds.txt")
with open(misp_feeds_file) as f:
for line in f:
feed_data = handle_feed(manifest_dir, line.strip(), args.proxy_string, args.cert_file)
for event in feed_data:
n = 0
e = 0
act.api.helpers.handle_fact(
actapi.fact("name", event.info)
.source("report", str(event.uuid)),
output_format=args.output_format)
n += 1
try:
act.api.helpers.handle_fact(
actapi.fact("externalLink")
.source("uri", "{0}/{1}.json".format(line.strip(), event.uuid))
.destination("report", str(event.uuid)),
output_format=args.output_format)
n += 1
except act.api.base.ResponseError as err:
e += 1
error("misp_feeds, main unable to add fact to platform, error calling actapi: %s" % err, exc_info=True)
for attribute in event.attributes:
if not attribute.act_type:
continue
try:
act.api.helpers.handle_fact(
actapi.fact("mentions")
.source("report", str(event.uuid))
.destination(attribute.act_type, attribute.value),
output_format=args.output_format)
n += 1
except act.api.base.ResponseError as err:
e += 1
error("misp_feeds: main unable to add attribute fact to platform, error calling actapi: %s" % err, exc_info=True)
info("{0} facts. {1} errors.".format(n, e))
def main_log_error() -> None:
"Call main() and log all exceptions as errors"
try:
main()
except Exception:
error("Unhandled exception: {}".format(traceback.format_exc()))
raise
if __name__ == '__main__':
main_log_error()
|
py | 1a4c53fe227210ca8ef7bd3538add49b0d249b79 | import argparse
from collections import Counter
import numpy as np
from mpd import load
def main():
parser = argparse.ArgumentParser()
parser.add_argument('jsonfile')
args = parser.parse_args()
playlists = load(args.jsonfile)
print("N =", len(playlists))
lens = [len(p['tracks']) for p in playlists]
print("Playlist track count:", Counter(lens))
has_playlist = ['name' in p for p in playlists]
print("Has playlist name:", Counter(has_playlist))
nameless_lens = [len(p['tracks']) for p in playlists if 'name' not in p]
print("Playlist track count among nameless:", Counter(nameless_lens))
named_lens = [len(p['tracks']) for p in playlists if 'name' in p]
print("Playlist track count among nameless:", Counter(named_lens))
try:
holdouts = np.array([p['num_holdouts'] for p in playlists])
print("Holdouts: {:.2f} {:.2f}".format(holdouts.mean(), holdouts.std()))
except KeyError:
print("[warn] Num holdouts property missing")
if __name__ == '__main__':
main()
|
py | 1a4c545bb05e6ce0b722880022a1d8668da3b328 | from typing import Tuple
import numpy as np
import torch
from .bandits import DataBasedBandit
class WheelBandit(DataBasedBandit):
"""The wheel contextual bandit from the Riquelme et al 2018 paper.
Source:
https://github.com/tensorflow/models/tree/archive/research/deep_contextual_bandits
Citation:
Riquelme, Tucker, Snoek. Deep Bayesian bandits showdown: An empirical comparison of Bayesian deep networks for Thompson sampling. InProceedings ofthe 6th International Conference on Learning Representations, 2018.
Args:
device (str): Device to use for tensor operations.
"cpu" for cpu or "cuda" for cuda. Defaults to "cpu".
Attributes:
n_actions (int): Number of actions available.
context_dim (int): The length of context vector.
len (int): The number of examples (context, reward pairs) in the dataset.
device (torch.device): Device to use for tensor operations.
"""
def __init__(self, delta=0.5, n_samples=2000, **kwargs):
super(WheelBandit, self).__init__(kwargs.get("device", "cpu"))
self.delta = delta
self.n_actions = 5
self.context_dim = 2
self.len = n_samples
self.mu = [1.2, 1.0, 50.0]
self.sigma = 0.01
self._sign_opt_action = {
(1.0, 1.0): 1,
(1.0, -1.0): 2,
(-1.0, 1.0): 3,
(-1.0, -1.0): 4,
}
self._generate_contexts()
self._generate_rewards()
def _generate_rewards(self):
r_all = np.random.normal(self.mu[1], self.sigma, size=(self.len, self.n_actions))
r_all[:,0] += self.mu[0] - self.mu[1]
for t in range(self.len):
if np.linalg.norm(self._context[t]) > self.delta:
signs = np.sign(self._context[t])
opt_act = self._sign_opt_action[(signs[0], signs[1])]
r_all[t, opt_act] += self.mu[2] - self.mu[1]
self.rewards = r_all
self.max_rewards = np.max(self.rewards, axis=1)
def reset(self) -> torch.Tensor:
"""Reset bandit by shuffling indices and get new context.
Returns:
torch.Tensor: Current context selected by bandit.
"""
self._reset()
self._generate_contexts()
self._generate_rewards()
return self._get_context()
def _compute_reward(self, action: int) -> Tuple[int, int]:
"""Compute the reward for a given action.
Args:
action (int): The action to compute reward for.
Returns:
Tuple[int, int]: Computed reward.
"""
r = self.rewards[self.idx, action]
max_r = self.max_rewards[self.idx]
return r, max_r
def _generate_contexts(self) -> None:
"""Returns 2-dim samples falling in the unit circle.
"""
theta = np.random.uniform(0.0, 2.0 * np.pi, (self.len))
r = np.sqrt(np.random.uniform(size=self.len)) # sqrt is in the original code of Riquelme et al
self._context = np.stack([r * np.cos(theta), r * np.sin(theta)], axis=1)
def _get_context(self) -> torch.Tensor:
"""Get the vector for current selected context.
Returns:
torch.Tensor: Current context vector.
"""
return torch.tensor(
self._context[self.idx],
device=self.device,
dtype=torch.float,
)
|
py | 1a4c5479125366541d28d6b670f86aec1d93a535 | # -*- coding: utf-8 -*-
# MIT License
# Copyright (c) 2021 Arthur
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# The socket url for the dogehouse API
apiUrl ="wss://api.dogehouse.tv/socket"
# The websocket heartbeat interval (ping's on this interval)
heartbeatInterval = 8
# The interval for the public rooms requests
topPublicRoomsInterval = 15
# The time it should take for the connection to be determined dead.
connectionTimeout = 15
|
py | 1a4c549c8c96204ac81cf639f812ff824f2c4a8d | import math
import operator as op
from functools import reduce
def memoize(f):
"""memoization decorator for a function taking one or more arguments"""
class memodict(dict):
def __getitem__(self, *key):
return dict.__getitem__(self, key)
def __missing__(self, key):
ret = self[key] = f(*key)
return ret
return memodict().__getitem__
@memoize
def catalan_recursive(n):
if n == 0:
return 1
return (2 * (2 * n - 1) * catalan_recursive(n - 1)) // (n + 1)
@memoize
def euler_recursive(n, k):
if (k == 0) or (n - 1 == k):
return 1
return (n - k) * euler_recursive(n - 1, k - 1) + (k + 1) * euler_recursive(n - 1, k)
@memoize
def stirling_1_recursive(n, k):
if (n == k == 0):
return 1
if (n == 0) or (k == 0):
return 0
return stirling_1_recursive(n - 1, k - 1) + (n - 1) * stirling_1_recursive(n - 1, k)
@memoize
def stirling_2_recursive(n, k):
if (k == 1) or (n == k):
return 1
return stirling_2_recursive(n - 1, k - 1) + k * stirling_2_recursive(n - 1, k)
nCr = lambda n, r: reduce(op.mul, range(n - r + 1, n + 1), 1) // math.factorial(r)
multinomial = lambda k: math.factorial(sum(k)) // reduce(op.mul, (math.factorial(i) for i in k))
derangements = lambda n: int(math.factorial(n) / math.e + 0.5)
bell = lambda n: sum(stirling_2_recursive(k, n) for k in range(n + 1))
catalan = lambda n: nCr(2 * n, n) // (n + 1)
euler = lambda n, k: sum((1 - 2 * (j & 1)) * nCr(n + 1, j) * ((k + 1 - j)**n) for j in range(k + 1))
stirling_2 = lambda n, k: sum(((-1)**(k - j)) * nCr(k, j) * (j**n) for j in range(k + 1)) // math.factorial(k)
|
py | 1a4c551df577eb8cdb92e0ef88a532f0f649e42c | """Simple version of MBIE-EB
Paper:An analysis of model-based Interval Estimation for Markov
Decision Processes (Strehl and Littman, 2008)
Link: https://doi.org/10.1016/j.jcss.2007.08.009
"""
import numpy as np
from rlpy.representations import Enumerable
from .agent import Agent
from ._vi_impl import compute_q_values
__author__ = "Yuji Kanagawa"
class MBIE_EB(Agent):
"""
Simplified version of MBIE-EB algorithm,
which executes VI only when the episode ends.
"""
def __init__(
self,
*args,
beta=0.1,
seed=1,
spread_prior=False,
show_reward=False,
vi_threshold=1e-6,
):
"""
:param beta: β parameter in MBIB-EB
:param mu0: Prior mean rewards.
:param tau0: Precision of prior mean rewards.
:param tau: Precision of reward noise.
:param spread_prior: Use alpha0/n_states as alpha0
"""
super().__init__(*args, seed=seed)
if not isinstance(self.representation, Enumerable):
raise ValueError("PSRL works only with a tabular representation.")
n_states = self.representation.features_num
n_actions = self.representation.domain.num_actions
self.beta = beta
self.sa_count = np.zeros((n_states, n_actions))
self.r_sum = np.zeros((n_states, n_actions))
self.sas_count = np.zeros((n_states, n_actions, n_states))
self.n_states = n_states
self.n_actions = n_actions
self.ep_cap = self.representation.domain.episode_cap
self.update_steps = 0
self.show_reward = show_reward
self.vi_threshold = vi_threshold
def _update_prior(self, s, a, reward, terminal, ns):
s_id = self.representation.state_id(s)
self.sa_count[s_id, a] += 1
self.r_sum[s_id, a] += reward
if not terminal:
ns_id = self.representation.state_id(ns)
self.sas_count[s_id, a, ns_id] += 1
def _sample_mdp(self, show_reward=False):
r_sample = np.zeros_like(self.sa_count)
p_sample = np.zeros_like(self.sas_count)
for s in range(self.n_states):
for a in range(self.n_actions):
n = self.sa_count[s, a]
if n == 0:
continue
r = self.r_sum[s, a] / n
r_sample[s, a] = r + self.beta / np.sqrt(n)
p_sample[s, a] = self.sas_count[s, a] / n
if show_reward and hasattr(self.representation.domain, "show_reward"):
self.representation.domain.show_reward(r_sample.mean(axis=-1))
return r_sample, p_sample
def _solve_sampled_mdp(self):
r, p = self._sample_mdp(show_reward=self.show_reward)
q_value, _ = compute_q_values(
r, p, self.ep_cap, self.discount_factor, self.vi_threshold
)
self.representation.weight_vec = q_value.T.flatten()
self.update_steps += 1
def learn(self, s, p_actions, a, r, ns, np_actions, na, terminal):
self._update_prior(s, a, r, terminal, ns)
if terminal is False:
return
self._solve_sampled_mdp()
|
py | 1a4c56a1689591ef87655a077e4505c54204f4b7 | # Generated by Django 3.0.3 on 2020-03-04 17:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('orderManager', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='Articulos',
new_name='Articles',
),
migrations.RenameModel(
old_name='Pedidos',
new_name='Orders',
),
]
|
py | 1a4c5798462211e8907f3886b3b0de87de6172e8 | """Module for building the autocompletion indices."""
from __future__ import print_function
import os
import json
from six import BytesIO
from docutils.core import publish_string
from botocore.docs.bcdoc import textwriter
import awscli.clidriver
from awscli.argprocess import ParamShorthandDocGen
from awsshell import determine_doc_index_filename
from awsshell.utils import remove_html
from awsshell import docs
SHORTHAND_DOC = ParamShorthandDocGen()
def new_index():
return {'arguments': [], 'argument_metadata': {},
'commands': [], 'children': {}}
def index_command(index_dict, help_command):
arg_table = help_command.arg_table
for arg in arg_table:
arg_obj = arg_table[arg]
metadata = {
'required': arg_obj.required,
'type_name': arg_obj.cli_type_name,
'minidoc': '',
'example': '',
# The name used in the API call/botocore,
# typically CamelCased.
'api_name': getattr(arg_obj, '_serialized_name', '')
}
if arg_obj.documentation:
metadata['minidoc'] = remove_html(
arg_obj.documentation.split('\n')[0])
if SHORTHAND_DOC.supports_shorthand(arg_obj.argument_model):
example = SHORTHAND_DOC.generate_shorthand_example(
arg, arg_obj.argument_model)
metadata['example'] = example
index_dict['arguments'].append('--%s' % arg)
index_dict['argument_metadata']['--%s' % arg] = metadata
for cmd in help_command.command_table:
index_dict['commands'].append(cmd)
# Each sub command will trigger a recurse.
child = new_index()
index_dict['children'][cmd] = child
sub_command = help_command.command_table[cmd]
sub_help_command = sub_command.create_help_command()
index_command(child, sub_help_command)
def write_index(output_filename=None):
driver = awscli.clidriver.create_clidriver()
help_command = driver.create_help_command()
index = {'aws': new_index()}
current = index['aws']
index_command(current, help_command)
result = json.dumps(index)
if not os.path.isdir(os.path.dirname(output_filename)):
os.makedirs(os.path.dirname(output_filename))
with open(output_filename, 'w') as f:
f.write(result)
def write_doc_index(output_filename=None, db=None, help_command=None):
if output_filename is None:
output_filename = determine_doc_index_filename()
user_provided_db = True
if db is None:
user_provided_db = False
db = docs.load_doc_db(output_filename)
if help_command is None:
driver = awscli.clidriver.create_clidriver()
help_command = driver.create_help_command()
should_close = not user_provided_db
do_write_doc_index(db, help_command, close_db_on_finish=should_close)
def do_write_doc_index(db, help_command, close_db_on_finish):
try:
_index_docs(db, help_command)
db['__complete__'] = 'true'
finally:
if close_db_on_finish:
# If the user provided their own db object,
# they are responsible for closing it.
# If we created our own db object, we own
# closing the db.
db.close()
def _index_docs(db, help_command):
for command_name in help_command.command_table:
command = help_command.command_table[command_name]
sub_help_command = command.create_help_command()
text_docs = render_docs_for_cmd(sub_help_command)
dotted_name = '.'.join(['aws'] + command.lineage_names)
db[dotted_name] = text_docs
_index_docs(db, sub_help_command)
def render_docs_for_cmd(help_command):
renderer = FileRenderer()
help_command.renderer = renderer
help_command(None, None)
# The report_level override is so that we don't print anything
# to stdout/stderr on rendering issues.
original_cli_help = renderer.contents.decode('utf-8')
text_content = convert_rst_to_basic_text(original_cli_help)
index = text_content.find('DESCRIPTION')
if index > 0:
text_content = text_content[index + len('DESCRIPTION'):]
return text_content
def convert_rst_to_basic_text(contents):
"""Converts restructured text to basic text output.
This function removes most of the decorations added
in restructured text.
This function is used to generate documentation we
can show to users in a cross platform manner.
Basic indentation and list formatting are kept,
but many RST features are removed (such as
section underlines).
"""
# The report_level override is so that we don't print anything
# to stdout/stderr on rendering issues.
converted = publish_string(
contents, writer=BasicTextWriter(),
settings_overrides={'report_level': 5})
return converted.decode('utf-8')
class FileRenderer(object):
def __init__(self):
self._io = BytesIO()
def render(self, contents):
self._io.write(contents)
@property
def contents(self):
return self._io.getvalue()
class BasicTextWriter(textwriter.TextWriter):
def translate(self):
visitor = BasicTextTranslator(self.document)
self.document.walkabout(visitor)
self.output = visitor.body
class BasicTextTranslator(textwriter.TextTranslator):
def depart_title(self, node):
# Make the section titles upper cased, similar to
# the man page output.
text = ''.join(x[1] for x in self.states.pop() if x[0] == -1)
self.stateindent.pop()
self.states[-1].append((0, ['', text.upper(), '']))
# The botocore TextWriter has additional formatting
# for literals, for the aws-shell docs we don't want any
# special processing so these nodes are noops.
def visit_literal(self, node):
pass
def depart_literal(self, node):
pass
|
py | 1a4c57a30dac8849fa4a5beb01f4c981bff29214 | import json
import redis
from collections import defaultdict
class RedisDB:
"""Backend using Redis.
Parameters to open the database can be passed with the url format::
redis://[:password]@localhost:6379/0
"""
def __init__(self, name):
self.name = name
self._dbm = redis.from_url(name)
self._db = defaultdict(dict)
self.dirty = set()
def dump(self):
"""save/close DBM file"""
for task_id in self.dirty:
self._dbm[task_id] = json.dumps(self._db[task_id])
self.dirty = set()
sync = dump
def set(self, task_id, dependency, value):
"""Store value in the DB."""
self._db[task_id][dependency] = value
self.dirty.add(task_id)
def get(self, task_id, dependency):
"""Get value stored in the DB."""
# optimization, just try to get it without checking it exists
if task_id in self._db:
return self._db[task_id].get(dependency, None)
else:
try:
task_data = self._dbm[task_id]
except KeyError:
return
self._db[task_id] = json.loads(task_data.decode('utf-8'))
return self._db[task_id].get(dependency, None)
def in_(self, task_id):
"""@return bool if task_id is in DB"""
return task_id in self._dbm or task_id in self.dirty
def remove(self, task_id):
"""remove saved dependecies from DB for taskId"""
if task_id in self._db:
del self._db[task_id]
if task_id in self._dbm:
del self._dbm[task_id]
if task_id in self.dirty:
self.dirty.remove(task_id)
def remove_all(self):
"""remove saved dependecies from DB for all tasks"""
self._db = defaultdict(dict)
self._dbm.flushdb()
self.dirty = set()
|
py | 1a4c5854add010bf5939c15e9207ff7d3c7c9fdb | ur"""
.. _sec-binseg:
Binary segmentation
====================================================================================================
Description
----------------------------------------------------------------------------------------------------
Binary change point detection is used to perform fast signal segmentation and is implemented in
:class:`ruptures.detection.BinSeg`.
It is a sequential approach: first, one change point is detected in the complete input signal, then
series is split around this change point, then the operation is repeated on the two resulting
sub-signals. See for instance :cite:`bs-Bai1997` and :cite:`bs-fryzlewicz2014` for a theoretical and
algorithmic analysis of :class:`ruptures.detection.BinSeg`.
The benefits of binary segmentation includes low complexity (of the order of
:math:`\mathcal{O}(n\log n)`, where :math:`n` is the number of samples), the fact that it can extend
any single change point detection method to detect multiple changes points and that it can work
whether the number of regimes is known beforehand or not.
.. figure:: /images/schema_binseg.png
:scale: 50 %
:alt: Schematic view of the binary segmentation algorithm
Schematic view of the binary segmentation algorithm.
Usage
----------------------------------------------------------------------------------------------------
Start with the usual imports and create a signal.
.. code-block:: python
import numpy as np
import matplotlib.pylab as plt
import ruptures as rpt
# creation of data
n = 500 # number of samples
n_bkps, sigma = 3, 5 # number of change points, noise standart deviation
signal, bkps = rpt.pw_constant(n, dim, n_bkps, noise_std=sigma)
To perform a binary segmentation of a signal, initialize a :class:`ruptures.detection.BinSeg`
instance.
.. code-block:: python
# change point detection
model = "l2" # "l1", "rbf", "linear", "normal", "ar"
algo = rpt.Binseg(model=model).fit(signal)
my_bkps = algo.predict(n_bkps=3)
# show results
rpt.show.display(signal, bkps, my_bkps, figsize=(10, 6))
plt.show()
In the situation in which the number of change points is unknown, one can specify a penalty using
the ``'pen'`` parameter or a threshold on the residual norm using ``'epsilon'``.
.. code-block:: python
my_bkps = algo.predict(pen=np.log(n)*dim*sigma**2)
# or
my_bkps = algo.predict(epsilon=3*n*sigma**2)
.. seealso:: :ref:`sec-general-formulation` for more information about stopping rules of sequential algorithms.
For faster predictions, one can modify the ``'jump'`` parameter during initialization.
The higher it is, the faster the prediction is achieved (at the expense of precision).
.. code-block:: python
algo = rpt.Binseg(model=model, jump=10).fit(signal)
Code explanation
----------------------------------------------------------------------------------------------------
.. autoclass:: ruptures.detection.Binseg
:members:
:special-members: __init__
.. rubric:: References
.. bibliography:: ../biblio.bib
:style: alpha
:cited:
:labelprefix: BS
:keyprefix: bs-
"""
from __future__ import absolute_import
from ruptures.base import BaseCost, BaseEstimator
from ruptures.costs import cost_factory
from ruptures.utils import pairwise
# 3 to 2 compatibility
try:
from functools import lru_cache
except ImportError:
from backports.functools_lru_cache import lru_cache
class Binseg(BaseEstimator):
u"""Binary segmentation."""
def __init__(self, model=u"l2", custom_cost=None, min_size=2, jump=5, params=None):
u"""Initialize a Binseg instance.
Args:
model (str, optional): segment model, ["l1", "l2", "rbf",...]. Not used if ``'custom_cost'`` is not None.
custom_cost (BaseCost, optional): custom cost function. Defaults to None.
min_size (int, optional): minimum segment length. Defaults to 2 samples.
jump (int, optional): subsample (one every *jump* points). Defaults to 5 samples.
params (dict, optional): a dictionary of parameters for the cost instance.
Returns:
self
"""
if custom_cost is not None and isinstance(custom_cost, BaseCost):
self.cost = custom_cost
else:
if params is None:
self.cost = cost_factory(model=model)
else:
self.cost = cost_factory(model=model, **params)
self.min_size = max(min_size, self.cost.min_size)
self.jump = jump
self.n_samples = None
self.signal = None
# cache for intermediate results
self.single_bkp = lru_cache(maxsize=None)(self._single_bkp)
def _seg(self, n_bkps=None, pen=None, epsilon=None):
u"""Computes the binary segmentation.
The stopping rule depends on the parameter passed to the function.
Args:
n_bkps (int): number of breakpoints to find before stopping.
penalty (float): penalty value (>0)
epsilon (float): reconstruction budget (>0)
Returns:
dict: partition dict {(start, end): cost value,...}
"""
# initialization
bkps = [self.n_samples]
stop = False
while not stop:
stop = True
new_bkps = [self.single_bkp(start, end)
for start, end in pairwise([0] + bkps)]
bkp, gain = max(new_bkps, key=lambda x: x[1])
if bkp is None: # all possible configuration have been explored.
break
if n_bkps is not None:
if len(bkps) - 1 < n_bkps:
stop = False
elif pen is not None:
if gain > pen:
stop = False
elif epsilon is not None:
error = self.cost.sum_of_costs(bkps)
if error > epsilon:
stop = False
if not stop:
bkps.append(bkp)
bkps.sort()
partition = dict(((start, end), self.cost.error(start, end))
for start, end in pairwise([0] + bkps))
return partition
def _single_bkp(self, start, end):
u"""Return the optimal breakpoint of [start:end] (if it exists)."""
segment_cost = self.cost.error(start, end)
gain_list = list()
for bkp in xrange(start, end, self.jump):
if bkp - start > self.min_size and end - bkp > self.min_size:
gain = segment_cost - \
self.cost.error(start, bkp) - self.cost.error(bkp, end)
gain_list.append((gain, bkp))
try:
gain, bkp = max(gain_list)
except ValueError: # if empty sub_sampling
return None, 0
return bkp, gain
def fit(self, signal):
u"""Compute params to segment signal.
Args:
signal (array): signal to segment. Shape (n_samples, n_features) or (n_samples,).
Returns:
self
"""
# update some params
if signal.ndim == 1:
self.signal = signal.reshape(-1, 1)
else:
self.signal = signal
self.n_samples, _ = self.signal.shape
self.cost.fit(signal)
self.single_bkp.cache_clear()
return self
def predict(self, n_bkps=None, pen=None, epsilon=None):
u"""Return the optimal breakpoints.
Must be called after the fit method. The breakpoints are associated with the signal passed
to fit().
The stopping rule depends on the parameter passed to the function.
Args:
n_bkps (int): number of breakpoints to find before stopping.
penalty (float): penalty value (>0)
epsilon (float): reconstruction budget (>0)
Returns:
list: sorted list of breakpoints
"""
msg = u"Give a parameter."
assert any(param is not None for param in (n_bkps, pen, epsilon)), msg
partition = self._seg(n_bkps=n_bkps, pen=pen, epsilon=epsilon)
bkps = sorted(e for s, e in partition.keys())
return bkps
def fit_predict(self, signal, n_bkps=None, pen=None, epsilon=None):
u"""Fit to the signal and return the optimal breakpoints.
Helper method to call fit and predict once
Args:
signal (array): signal. Shape (n_samples, n_features) or (n_samples,).
n_bkps (int): number of breakpoints.
penalty (float): penalty value (>0)
epsilon (float): reconstruction budget (>0)
Returns:
list: sorted list of breakpoints
"""
self.fit(signal)
return self.predict(n_bkps=n_bkps, pen=pen, epsilon=epsilon)
|
bzl | 1a4c58be6567024495a65fc9e1fbb2d7d1bb74f5 | """Play Routes rules
Bazel rules for running the
[Play routes file compiler](https://github.com/playframework/playframework/tree/master/framework/src/routes-compiler/src/main/scala/play/routes/compiler)
on Play routes files
"""
gendir_base_path = "play/routes"
play_imports = [
"controllers.Assets.Asset",
]
# TODO: update this
canonical_external_repo_name = "XXX_name_goes_here"
def _sanitize_string_for_usage(s):
res_array = []
for i in range(len(s)):
c = s[i]
if c.isalnum() or c == ".":
res_array.append(c)
else:
res_array.append("_")
return "".join(res_array)
def _format_import_args(imports):
return ["--routesImport={}".format(i) for i in imports]
def _impl(ctx):
gendir = ctx.actions.declare_directory(
gendir_base_path + "/" + _sanitize_string_for_usage(ctx.attr.name)
)
paths = [f.path for f in ctx.files.srcs]
args = [gendir.path] + [",".join(paths)]
if ctx.attr.include_play_imports:
args = args + _format_import_args(play_imports)
args = args + _format_import_args(ctx.attr.routes_imports)
if ctx.attr.generate_reverse_router:
args = args + ["--generateReverseRouter"]
if ctx.attr.namespace_reverse_router:
args = args + ["--namespaceReverserRouter"]
if ctx.attr.routes_generator:
args = args + ["--routesGenerator={}".format(ctx.attr.routes_generator)]
ctx.actions.run(
inputs = ctx.files.srcs,
outputs = [gendir],
arguments = args,
progress_message = "Compiling play routes",
executable = ctx.executable._play_routes_compiler,
)
# TODO: something more portable
ctx.actions.run_shell(
inputs = [gendir],
outputs = [ctx.outputs.srcjar],
arguments = [ctx.executable._zipper.path, gendir.path, gendir.short_path, ctx.outputs.srcjar.path],
command = """$1 c $4 META-INF/= $(find -L $2 -type f | while read v; do echo ${v#"${2%$3}"}=$v; done)""",
progress_message = "Bundling compiled play routes into srcjar",
tools = [ctx.executable._zipper],
)
play_routes = rule(
implementation = _impl,
doc = "Compiles Play routes files templates to Scala sources files.",
attrs = {
"srcs": attr.label_list(
doc = "Play routes files",
allow_files = True,
mandatory = True
),
"routes_imports": attr.string_list(
doc = "Additional imports to import to the Play routes",
),
"routes_generator": attr.string(
doc = "The full class of the routes generator, e.g., `play.routes.compiler.InjectedRoutesGenerator`",
default = ""
),
"generate_reverse_router": attr.bool(
doc = "Whether the reverse router should be generated. Setting to false may reduce compile times if it's not needed.",
default = False
),
"namespace_reverse_router": attr.bool(
doc = "Whether the reverse router should be namespaced. Useful if you have many routers that use the same actions.",
default = False
),
"include_play_imports": attr.bool(
doc = "If true, include the imports the Play project includes by default.",
default = False
),
"_play_routes_compiler": attr.label(
executable = True,
cfg = "host",
allow_files = True,
default = Label("@"+canonical_external_repo_name+"//:compiler"),
),
"_zipper": attr.label(cfg = "host", default = "@bazel_tools//tools/zip:zipper", executable = True),
},
outputs = {
"srcjar": "play_routes_%{name}.srcjar",
}
)
# This is the implementation of the repository rule. It downloads the
# play-routes compiler as a deploy JAR from the releases page.
#
# See https://docs.bazel.build/versions/master/skylark/lib/globals.html#repository_rule
def _play_app_repository_rule_implementation(repository_ctx):
"""Implementation for play_app_repository_rule"""
base_url = "https://github.com/lucidsoftware/rules_play_routes/releases/download"
compiler_url = "{}/{}/play-routes-compiler_deploy.jar".format(
base_url,
repository_ctx.attr.version,
)
repository_ctx.report_progress("Downloading compiler from {}".format(compiler_url))
download_info = repository_ctx.download(
compiler_url,
output = "play-routes-compiler_deploy.jar",
sha256 = repository_ctx.attr.sha256,
)
repository_ctx.report_progress("Successfully downloaded compiler from {}, sha256={}".format(
compiler_url,
download_info.sha256,
))
# Write a build file that turns the deployment JAR into a Java binary that
# we can run.
build_file_content = """java_import(
name = "deployjar",
jars = [":play-routes-compiler_deploy.jar"],
)
java_binary(
name = "compiler",
main_class = "rulesplayroutes.routes.CommandLinePlayRoutesCompiler",
visibility = ["//visibility:public"],
runtime_deps = [":deployjar"],
)
"""
repository_ctx.file("BUILD", content = build_file_content, executable = False)
# Declares the repository rule.
_play_app_repository_rule = repository_rule(
implementation = _play_app_repository_rule_implementation,
local = True,
attrs = {
"version": attr.string(mandatory = True),
"sha256": attr.string(mandatory = True),
},
doc = "play_repositories loads the Play Framework rules into a WORKSPACE"
)
# Default release versions specified to play_repositories.
_default_compiler_version = "GITHUB RELEASE NAME HERE"
_default_compiler_jar_sha = "JAR SHA HERE"
# play_repositories is a repository rule that introduces a new external
# repository into the WORKSPACE that invokes this rule. This activates the
# Play rules and is the main entrypoint for consumers of these rules. This is
# required in the WORKSPACE that will depend on the rules.
#
# The rules depend on a small number of compiled binaries which are available
# on the Github releases page for this repository. The argument to this
# function, tools_version_and_shas, is a tuple specifying the
#
# 1. Name of a release (e.g. "v0.0.2")
# 2. SHA256 of the play-routes-compiler_deploy.jar from that release.
#
# A default is provided.
def play_repositories(tools_version_and_shas = (_default_compiler_version, _default_compiler_jar_sha)):
(compiler_vers, jar_shas) = tools_version_and_shas
_play_app_repository_rule(
name = canonical_external_repo_name,
version = compiler_vers,
sha256 = jar_shas,
)
|
py | 1a4c59ee9810ac7323d7f6cda19f599fce219f33 | class InputBinding(Freezable,ISealable,ICommandSource):
"""
Represents a binding between an System.Windows.Input.InputGesture and a command. The command is potentially a System.Windows.Input.RoutedCommand.
InputBinding(command: ICommand,gesture: InputGesture)
"""
def CloneCore(self,*args):
"""
CloneCore(self: InputBinding,sourceFreezable: Freezable)
Copies the base (non-animated) values of the properties of the specified object.
sourceFreezable: The object to clone.
"""
pass
def CloneCurrentValueCore(self,*args):
"""
CloneCurrentValueCore(self: InputBinding,sourceFreezable: Freezable)
Copies the current values of the properties of the specified object.
sourceFreezable: The object to clone.
"""
pass
def CreateInstance(self,*args):
"""
CreateInstance(self: Freezable) -> Freezable
Initializes a new instance of the System.Windows.Freezable class.
Returns: The new instance.
"""
pass
def CreateInstanceCore(self,*args):
"""
CreateInstanceCore(self: InputBinding) -> Freezable
Creates an instance of an System.Windows.Input.InputBinding.
Returns: The new object.
"""
pass
def FreezeCore(self,*args):
"""
FreezeCore(self: Freezable,isChecking: bool) -> bool
Makes the System.Windows.Freezable object unmodifiable or tests whether it can
be made unmodifiable.
isChecking: true to return an indication of whether the object can be frozen (without
actually freezing it); false to actually freeze the object.
Returns: If isChecking is true,this method returns true if the System.Windows.Freezable
can be made unmodifiable,or false if it cannot be made unmodifiable. If
isChecking is false,this method returns true if the if the specified
System.Windows.Freezable is now unmodifiable,or false if it cannot be made
unmodifiable.
"""
pass
def GetAsFrozenCore(self,*args):
"""
GetAsFrozenCore(self: InputBinding,sourceFreezable: Freezable)
Makes the instance a frozen clone of the specified System.Windows.Freezable by
using base (non-animated) property values.
sourceFreezable: The object to clone.
"""
pass
def GetCurrentValueAsFrozenCore(self,*args):
"""
GetCurrentValueAsFrozenCore(self: InputBinding,sourceFreezable: Freezable)
Makes the current instance a frozen clone of the specified
System.Windows.Freezable. If the object has animated dependency properties,
their current animated values are copied.
sourceFreezable: The object to clone.
"""
pass
def OnChanged(self,*args):
"""
OnChanged(self: Freezable)
Called when the current System.Windows.Freezable object is modified.
"""
pass
def OnFreezablePropertyChanged(self,*args):
"""
OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject,property: DependencyProperty)
This member supports the Windows Presentation Foundation (WPF) infrastructure
and is not intended to be used directly from your code.
oldValue: The previous value of the data member.
newValue: The current value of the data member.
property: The property that changed.
OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject)
Ensures that appropriate context pointers are established for a
System.Windows.DependencyObjectType data member that has just been set.
oldValue: The previous value of the data member.
newValue: The current value of the data member.
"""
pass
def OnPropertyChanged(self,*args):
"""
OnPropertyChanged(self: Freezable,e: DependencyPropertyChangedEventArgs)
Overrides the System.Windows.DependencyObject implementation of
System.Windows.DependencyObject.OnPropertyChanged(System.Windows.DependencyPrope
rtyChangedEventArgs) to also invoke any System.Windows.Freezable.Changed
handlers in response to a changing dependency property of type
System.Windows.Freezable.
e: Event data that contains information about which property changed,and its old
and new values.
"""
pass
def ReadPreamble(self,*args):
"""
ReadPreamble(self: Freezable)
Ensures that the System.Windows.Freezable is being accessed from a valid
thread. Inheritors of System.Windows.Freezable must call this method at the
beginning of any API that reads data members that are not dependency
properties.
"""
pass
def ShouldSerializeProperty(self,*args):
"""
ShouldSerializeProperty(self: DependencyObject,dp: DependencyProperty) -> bool
Returns a value that indicates whether serialization processes should serialize
the value for the provided dependency property.
dp: The identifier for the dependency property that should be serialized.
Returns: true if the dependency property that is supplied should be value-serialized;
otherwise,false.
ShouldSerializeProperty(self: Window_16$17,dp: DependencyProperty) -> bool
ShouldSerializeProperty(self: Label_17$18,dp: DependencyProperty) -> bool
ShouldSerializeProperty(self: TextBox_18$19,dp: DependencyProperty) -> bool
ShouldSerializeProperty(self: Button_19$20,dp: DependencyProperty) -> bool
ShouldSerializeProperty(self: CheckBox_20$21,dp: DependencyProperty) -> bool
ShouldSerializeProperty(self: ComboBox_21$22,dp: DependencyProperty) -> bool
ShouldSerializeProperty(self: Separator_22$23,dp: DependencyProperty) -> bool
"""
pass
def WritePostscript(self,*args):
"""
WritePostscript(self: Freezable)
Raises the System.Windows.Freezable.Changed event for the
System.Windows.Freezable and invokes its System.Windows.Freezable.OnChanged
method. Classes that derive from System.Windows.Freezable should call this
method at the end of any API that modifies class members that are not stored as
dependency properties.
"""
pass
def WritePreamble(self,*args):
"""
WritePreamble(self: Freezable)
Verifies that the System.Windows.Freezable is not frozen and that it is being
accessed from a valid threading context. System.Windows.Freezable inheritors
should call this method at the beginning of any API that writes to data members
that are not dependency properties.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,command,gesture):
"""
__new__(cls: type)
__new__(cls: type,command: ICommand,gesture: InputGesture)
"""
pass
Command=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the System.Windows.Input.ICommand associated with this input binding.
Get: Command(self: InputBinding) -> ICommand
Set: Command(self: InputBinding)=value
"""
CommandParameter=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the command-specific data for a particular command.
Get: CommandParameter(self: InputBinding) -> object
Set: CommandParameter(self: InputBinding)=value
"""
CommandTarget=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the target element of the command.
Get: CommandTarget(self: InputBinding) -> IInputElement
Set: CommandTarget(self: InputBinding)=value
"""
Gesture=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the System.Windows.Input.InputGesture associated with this input binding.
Get: Gesture(self: InputBinding) -> InputGesture
Set: Gesture(self: InputBinding)=value
"""
CommandParameterProperty=None
CommandProperty=None
CommandTargetProperty=None
|
py | 1a4c5a3b005d87a65df32a38e1c5ccecc7d3bce0 | class User:
def __init__(self, user: dict):
self.id = user.get("id")
self.type = user.get("type")
self.roles = user.get("roles")
self.email = user.get("email")
|
py | 1a4c5a66595d855648e401b765348c65a7e268b1 | import typing as t
from typing import TYPE_CHECKING
from starlette.requests import Request
from multipart.multipart import parse_options_header
from starlette.responses import Response
from .base import IOType
from .base import IODescriptor
from ...exceptions import InvalidArgument
from ...exceptions import BentoMLException
from ..utils.formparser import populate_multipart_requests
from ..utils.formparser import concat_to_multipart_responses
if TYPE_CHECKING:
from .file import File
from .json import JSON
from .text import Text
from .image import Image
from .numpy import NumpyNdarray
from .pandas import PandasSeries
from .pandas import PandasDataFrame
MultipartIO = t.Dict[str, IOType]
class Multipart(IODescriptor[MultipartIO]):
"""
:code:`Multipart` defines API specification for the inputs/outputs of a Service, where inputs/outputs
of a Service can receive/send a *multipart* request/responses as specified in your API function signature.
Sample implementation of a sklearn service:
.. code-block:: python
# sklearn_svc.py
import bentoml
from bentoml.io import NumpyNdarray, Multipart, JSON
import bentoml.sklearn
runner = bentoml.sklearn.load_runner("sklearn_model_clf")
svc = bentoml.Service("iris-classifier", runners=[runner])
input_spec = Multipart(arr=NumpyNdarray(), annotations=JSON())
output_spec = Multipart(output=NumpyNdarray(), result=JSON())
@svc.api(input=input_spec, output=output_spec)
def predict(arr, annotations):
res = runner.run(arr)
return {"output":res, "result":annotations}
Users then can then serve this service with :code:`bentoml serve`:
.. code-block:: bash
% bentoml serve ./sklearn_svc.py:svc --reload
(Press CTRL+C to quit)
[INFO] Starting BentoML API server in development mode with auto-reload enabled
[INFO] Serving BentoML Service "iris-classifier" defined in "sklearn_svc.py"
[INFO] API Server running on http://0.0.0.0:3000
Users can then send requests to the newly started services with any client:
.. tabs::
.. code-tab:: python
import requests
from requests_toolbelt.multipart.encoder import MultipartEncoder
m = MultipartEncoder(
fields={'field0': 'value', 'field1': 'value',
'field2': ('filename', open('test.json', 'rb'), 'application/json')}
)
requests.post('http://0.0.0.0:3000/predict', data=m, headers={'Content-Type': m.content_type})
.. code-tab:: bash
% curl -X POST -H "Content-Type: multipart/form-data" -F [email protected] -F arr='[5,4,3,2]' http://0.0.0.0:3000/predict
--b1d72c201a064ecd92a17a412eb9208e
Content-Disposition: form-data; name="output"
content-length: 1
content-type: application/json
1
--b1d72c201a064ecd92a17a412eb9208e
Content-Disposition: form-data; name="result"
content-length: 13
content-type: application/json
{"foo":"bar"}
--b1d72c201a064ecd92a17a412eb9208e--
Args:
inputs (:code:`Dict[str, IODescriptor]`):
Dictionary consisting keys as inputs definition for a Multipart
request/response, values as IODescriptor supported by BentoML. Currently,
Multipart supports Image, NumpyNdarray, PandasDataFrame, PandasSeries, Text,
and File.
Make sure to match the input params in an API function to the keys defined
under :code:`Multipart`:
.. code-block:: bash
+----------------------------------------------------------------+
| |
| +--------------------------------------------------------+ |
| | | |
| | Multipart(arr=NumpyNdarray(), annotations=JSON() | |
| | | |
| +----------------+-----------------------+---------------+ |
| | | |
| | | |
| | | |
| +----+ +---------+ |
| | | |
| +---------------v--------v---------+ |
| | def predict(arr, annotations): | |
| +----------------------------------+ |
| |
+----------------------------------------------------------------+
Returns:
:obj:`~bentoml._internal.io_descriptors.IODescriptor`: IO Descriptor that Multipart request/response.
"""
def __init__(
self,
**inputs: t.Union[
"Image",
"JSON",
"Text",
"NumpyNdarray",
"PandasDataFrame",
"PandasSeries",
"File",
],
):
for descriptor in inputs.values():
if isinstance(descriptor, Multipart): # pragma: no cover
raise InvalidArgument(
"Multipart IO can not contain nested Multipart item"
)
self._inputs: t.Dict[
str,
t.Union[
"Image",
"JSON",
"Text",
"NumpyNdarray",
"PandasDataFrame",
"PandasSeries",
"File",
],
] = inputs
def openapi_schema_type(self) -> t.Dict[str, t.Any]:
return {
"type": "object",
"properties": {
k: io.openapi_schema_type() for k, io in self._inputs.items()
},
}
def openapi_request_schema(self) -> t.Dict[str, t.Any]:
"""Returns OpenAPI schema for incoming requests"""
return {"multipart/form-data": {"schema": self.openapi_schema_type()}}
def openapi_responses_schema(self) -> t.Dict[str, t.Any]:
"""Returns OpenAPI schema for outcoming responses"""
return {"multipart/form-data": {"schema": self.openapi_schema_type()}}
async def from_http_request(self, request: Request) -> MultipartIO:
ctype, _ = parse_options_header(request.headers["content-type"])
if ctype != b"multipart/form-data":
raise BentoMLException(
f"{self.__class__.__name__} only accepts `multipart/form-data` as Content-Type header, got {ctype} instead."
)
res: MultipartIO = dict()
reqs = await populate_multipart_requests(request)
for k, i in self._inputs.items():
req = reqs[k]
v = await i.from_http_request(req)
res[k] = v
return res
async def to_http_response(self, obj: MultipartIO) -> Response:
res_mapping: t.Dict[str, Response] = {}
for k, io_ in self._inputs.items():
data = obj[k]
# TODO(aarnphm): fix with stubs
res_mapping[k] = await io_.to_http_response(data) # type: ignore[reportGeneralTypeIssue]
return await concat_to_multipart_responses(res_mapping)
|
py | 1a4c5a78982bae2a81a8570905c77c32f7a34369 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.protocol}.
"""
from __future__ import division, absolute_import
from zope.interface.verify import verifyObject
from zope.interface import implementer
from twisted.python.failure import Failure
from twisted.internet.interfaces import (
IProtocol, ILoggingContext, IProtocolFactory, IConsumer)
from twisted.internet.defer import CancelledError
from twisted.internet.protocol import (
Protocol, ClientCreator, Factory, ProtocolToConsumerAdapter,
ConsumerToProtocolAdapter)
from twisted.trial.unittest import TestCase
from twisted.test.proto_helpers import MemoryReactorClock, StringTransport
from twisted.logger import LogLevel, globalLogPublisher
class ClientCreatorTests(TestCase):
"""
Tests for L{twisted.internet.protocol.ClientCreator}.
"""
def _basicConnectTest(self, check):
"""
Helper for implementing a test to verify that one of the I{connect}
methods of L{ClientCreator} passes the right arguments to the right
reactor method.
@param check: A function which will be invoked with a reactor and a
L{ClientCreator} instance and which should call one of the
L{ClientCreator}'s I{connect} methods and assert that all of its
arguments except for the factory are passed on as expected to the
reactor. The factory should be returned.
"""
class SomeProtocol(Protocol):
pass
reactor = MemoryReactorClock()
cc = ClientCreator(reactor, SomeProtocol)
factory = check(reactor, cc)
protocol = factory.buildProtocol(None)
self.assertIsInstance(protocol, SomeProtocol)
def test_connectTCP(self):
"""
L{ClientCreator.connectTCP} calls C{reactor.connectTCP} with the host
and port information passed to it, and with a factory which will
construct the protocol passed to L{ClientCreator.__init__}.
"""
def check(reactor, cc):
cc.connectTCP('example.com', 1234, 4321, ('1.2.3.4', 9876))
host, port, factory, timeout, bindAddress = reactor.tcpClients.pop()
self.assertEqual(host, 'example.com')
self.assertEqual(port, 1234)
self.assertEqual(timeout, 4321)
self.assertEqual(bindAddress, ('1.2.3.4', 9876))
return factory
self._basicConnectTest(check)
def test_connectUNIX(self):
"""
L{ClientCreator.connectUNIX} calls C{reactor.connectUNIX} with the
filename passed to it, and with a factory which will construct the
protocol passed to L{ClientCreator.__init__}.
"""
def check(reactor, cc):
cc.connectUNIX('/foo/bar', 123, True)
address, factory, timeout, checkPID = reactor.unixClients.pop()
self.assertEqual(address, '/foo/bar')
self.assertEqual(timeout, 123)
self.assertEqual(checkPID, True)
return factory
self._basicConnectTest(check)
def test_connectSSL(self):
"""
L{ClientCreator.connectSSL} calls C{reactor.connectSSL} with the host,
port, and context factory passed to it, and with a factory which will
construct the protocol passed to L{ClientCreator.__init__}.
"""
def check(reactor, cc):
expectedContextFactory = object()
cc.connectSSL('example.com', 1234, expectedContextFactory, 4321, ('4.3.2.1', 5678))
host, port, factory, contextFactory, timeout, bindAddress = reactor.sslClients.pop()
self.assertEqual(host, 'example.com')
self.assertEqual(port, 1234)
self.assertIs(contextFactory, expectedContextFactory)
self.assertEqual(timeout, 4321)
self.assertEqual(bindAddress, ('4.3.2.1', 5678))
return factory
self._basicConnectTest(check)
def _cancelConnectTest(self, connect):
"""
Helper for implementing a test to verify that cancellation of the
L{Deferred} returned by one of L{ClientCreator}'s I{connect} methods is
implemented to cancel the underlying connector.
@param connect: A function which will be invoked with a L{ClientCreator}
instance as an argument and which should call one its I{connect}
methods and return the result.
@return: A L{Deferred} which fires when the test is complete or fails if
there is a problem.
"""
reactor = MemoryReactorClock()
cc = ClientCreator(reactor, Protocol)
d = connect(cc)
connector = reactor.connectors.pop()
self.assertFalse(connector._disconnected)
d.cancel()
self.assertTrue(connector._disconnected)
return self.assertFailure(d, CancelledError)
def test_cancelConnectTCP(self):
"""
The L{Deferred} returned by L{ClientCreator.connectTCP} can be cancelled
to abort the connection attempt before it completes.
"""
def connect(cc):
return cc.connectTCP('example.com', 1234)
return self._cancelConnectTest(connect)
def test_cancelConnectUNIX(self):
"""
The L{Deferred} returned by L{ClientCreator.connectTCP} can be cancelled
to abort the connection attempt before it completes.
"""
def connect(cc):
return cc.connectUNIX('/foo/bar')
return self._cancelConnectTest(connect)
def test_cancelConnectSSL(self):
"""
The L{Deferred} returned by L{ClientCreator.connectTCP} can be cancelled
to abort the connection attempt before it completes.
"""
def connect(cc):
return cc.connectSSL('example.com', 1234, object())
return self._cancelConnectTest(connect)
def _cancelConnectTimeoutTest(self, connect):
"""
Like L{_cancelConnectTest}, but for the case where the L{Deferred} is
cancelled after the connection is set up but before it is fired with the
resulting protocol instance.
"""
reactor = MemoryReactorClock()
cc = ClientCreator(reactor, Protocol)
d = connect(reactor, cc)
connector = reactor.connectors.pop()
# Sanity check - there is an outstanding delayed call to fire the
# Deferred.
self.assertEqual(len(reactor.getDelayedCalls()), 1)
# Cancel the Deferred, disconnecting the transport just set up and
# cancelling the delayed call.
d.cancel()
self.assertEqual(reactor.getDelayedCalls(), [])
# A real connector implementation is responsible for disconnecting the
# transport as well. For our purposes, just check that someone told the
# connector to disconnect.
self.assertTrue(connector._disconnected)
return self.assertFailure(d, CancelledError)
def test_cancelConnectTCPTimeout(self):
"""
L{ClientCreator.connectTCP} inserts a very short delayed call between
the time the connection is established and the time the L{Deferred}
returned from one of its connect methods actually fires. If the
L{Deferred} is cancelled in this interval, the established connection is
closed, the timeout is cancelled, and the L{Deferred} fails with
L{CancelledError}.
"""
def connect(reactor, cc):
d = cc.connectTCP('example.com', 1234)
host, port, factory, timeout, bindAddress = reactor.tcpClients.pop()
protocol = factory.buildProtocol(None)
transport = StringTransport()
protocol.makeConnection(transport)
return d
return self._cancelConnectTimeoutTest(connect)
def test_cancelConnectUNIXTimeout(self):
"""
L{ClientCreator.connectUNIX} inserts a very short delayed call between
the time the connection is established and the time the L{Deferred}
returned from one of its connect methods actually fires. If the
L{Deferred} is cancelled in this interval, the established connection is
closed, the timeout is cancelled, and the L{Deferred} fails with
L{CancelledError}.
"""
def connect(reactor, cc):
d = cc.connectUNIX('/foo/bar')
address, factory, timeout, bindAddress = reactor.unixClients.pop()
protocol = factory.buildProtocol(None)
transport = StringTransport()
protocol.makeConnection(transport)
return d
return self._cancelConnectTimeoutTest(connect)
def test_cancelConnectSSLTimeout(self):
"""
L{ClientCreator.connectSSL} inserts a very short delayed call between
the time the connection is established and the time the L{Deferred}
returned from one of its connect methods actually fires. If the
L{Deferred} is cancelled in this interval, the established connection is
closed, the timeout is cancelled, and the L{Deferred} fails with
L{CancelledError}.
"""
def connect(reactor, cc):
d = cc.connectSSL('example.com', 1234, object())
host, port, factory, contextFactory, timeout, bindADdress = reactor.sslClients.pop()
protocol = factory.buildProtocol(None)
transport = StringTransport()
protocol.makeConnection(transport)
return d
return self._cancelConnectTimeoutTest(connect)
def _cancelConnectFailedTimeoutTest(self, connect):
"""
Like L{_cancelConnectTest}, but for the case where the L{Deferred} is
cancelled after the connection attempt has failed but before it is fired
with the resulting failure.
"""
reactor = MemoryReactorClock()
cc = ClientCreator(reactor, Protocol)
d, factory = connect(reactor, cc)
connector = reactor.connectors.pop()
factory.clientConnectionFailed(
connector, Failure(Exception("Simulated failure")))
# Sanity check - there is an outstanding delayed call to fire the
# Deferred.
self.assertEqual(len(reactor.getDelayedCalls()), 1)
# Cancel the Deferred, cancelling the delayed call.
d.cancel()
self.assertEqual(reactor.getDelayedCalls(), [])
return self.assertFailure(d, CancelledError)
def test_cancelConnectTCPFailedTimeout(self):
"""
Similar to L{test_cancelConnectTCPTimeout}, but for the case where the
connection attempt fails.
"""
def connect(reactor, cc):
d = cc.connectTCP('example.com', 1234)
host, port, factory, timeout, bindAddress = reactor.tcpClients.pop()
return d, factory
return self._cancelConnectFailedTimeoutTest(connect)
def test_cancelConnectUNIXFailedTimeout(self):
"""
Similar to L{test_cancelConnectUNIXTimeout}, but for the case where the
connection attempt fails.
"""
def connect(reactor, cc):
d = cc.connectUNIX('/foo/bar')
address, factory, timeout, bindAddress = reactor.unixClients.pop()
return d, factory
return self._cancelConnectFailedTimeoutTest(connect)
def test_cancelConnectSSLFailedTimeout(self):
"""
Similar to L{test_cancelConnectSSLTimeout}, but for the case where the
connection attempt fails.
"""
def connect(reactor, cc):
d = cc.connectSSL('example.com', 1234, object())
host, port, factory, contextFactory, timeout, bindADdress = reactor.sslClients.pop()
return d, factory
return self._cancelConnectFailedTimeoutTest(connect)
class ProtocolTests(TestCase):
"""
Tests for L{twisted.internet.protocol.Protocol}.
"""
def test_interfaces(self):
"""
L{Protocol} instances provide L{IProtocol} and L{ILoggingContext}.
"""
proto = Protocol()
self.assertTrue(verifyObject(IProtocol, proto))
self.assertTrue(verifyObject(ILoggingContext, proto))
def test_logPrefix(self):
"""
L{Protocol.logPrefix} returns the protocol class's name.
"""
class SomeThing(Protocol):
pass
self.assertEqual("SomeThing", SomeThing().logPrefix())
def test_makeConnection(self):
"""
L{Protocol.makeConnection} sets the given transport on itself, and
then calls C{connectionMade}.
"""
result = []
class SomeProtocol(Protocol):
def connectionMade(self):
result.append(self.transport)
transport = object()
protocol = SomeProtocol()
protocol.makeConnection(transport)
self.assertEqual(result, [transport])
class FactoryTests(TestCase):
"""
Tests for L{protocol.Factory}.
"""
def test_interfaces(self):
"""
L{Factory} instances provide both L{IProtocolFactory} and
L{ILoggingContext}.
"""
factory = Factory()
self.assertTrue(verifyObject(IProtocolFactory, factory))
self.assertTrue(verifyObject(ILoggingContext, factory))
def test_logPrefix(self):
"""
L{Factory.logPrefix} returns the name of the factory class.
"""
class SomeKindOfFactory(Factory):
pass
self.assertEqual("SomeKindOfFactory", SomeKindOfFactory().logPrefix())
def test_defaultBuildProtocol(self):
"""
L{Factory.buildProtocol} by default constructs a protocol by calling
its C{protocol} attribute, and attaches the factory to the result.
"""
class SomeProtocol(Protocol):
pass
f = Factory()
f.protocol = SomeProtocol
protocol = f.buildProtocol(None)
self.assertIsInstance(protocol, SomeProtocol)
self.assertIs(protocol.factory, f)
def test_forProtocol(self):
"""
L{Factory.forProtocol} constructs a Factory, passing along any
additional arguments, and sets its C{protocol} attribute to the given
Protocol subclass.
"""
class ArgTakingFactory(Factory):
def __init__(self, *args, **kwargs):
self.args, self.kwargs = args, kwargs
factory = ArgTakingFactory.forProtocol(Protocol, 1, 2, foo=12)
self.assertEqual(factory.protocol, Protocol)
self.assertEqual(factory.args, (1, 2))
self.assertEqual(factory.kwargs, {"foo": 12})
def test_doStartLoggingStatement(self):
"""
L{Factory.doStart} logs that it is starting a factory, followed by
the L{repr} of the L{Factory} instance that is being started.
"""
events = []
globalLogPublisher.addObserver(events.append)
self.addCleanup(
lambda: globalLogPublisher.removeObserver(events.append))
f = Factory()
f.doStart()
self.assertIs(events[0]['factory'], f)
self.assertEqual(events[0]['log_level'], LogLevel.info)
self.assertEqual(events[0]['log_format'],
'Starting factory {factory!r}')
def test_doStopLoggingStatement(self):
"""
L{Factory.doStop} logs that it is stopping a factory, followed by
the L{repr} of the L{Factory} instance that is being stopped.
"""
events = []
globalLogPublisher.addObserver(events.append)
self.addCleanup(
lambda: globalLogPublisher.removeObserver(events.append))
class MyFactory(Factory):
numPorts = 1
f = MyFactory()
f.doStop()
self.assertIs(events[0]['factory'], f)
self.assertEqual(events[0]['log_level'], LogLevel.info)
self.assertEqual(events[0]['log_format'],
'Stopping factory {factory!r}')
class AdapterTests(TestCase):
"""
Tests for L{ProtocolToConsumerAdapter} and L{ConsumerToProtocolAdapter}.
"""
def test_protocolToConsumer(self):
"""
L{IProtocol} providers can be adapted to L{IConsumer} providers using
L{ProtocolToConsumerAdapter}.
"""
result = []
p = Protocol()
p.dataReceived = result.append
consumer = IConsumer(p)
consumer.write(b"hello")
self.assertEqual(result, [b"hello"])
self.assertIsInstance(consumer, ProtocolToConsumerAdapter)
def test_consumerToProtocol(self):
"""
L{IConsumer} providers can be adapted to L{IProtocol} providers using
L{ProtocolToConsumerAdapter}.
"""
result = []
@implementer(IConsumer)
class Consumer(object):
def write(self, d):
result.append(d)
c = Consumer()
protocol = IProtocol(c)
protocol.dataReceived(b"hello")
self.assertEqual(result, [b"hello"])
self.assertIsInstance(protocol, ConsumerToProtocolAdapter)
|
py | 1a4c5a8b1b813fffafff60b8e54107697f3ce64f | import asyncio
import re
import requests
import spotipy
from aiohttp import ClientSession
from nextcord import User
from emoji import demojize
from googleapiclient.discovery import build
from spotipy.oauth2 import SpotifyClientCredentials
from src.bot.__tokens__ import __tokens__
from src.music.song import Song
youtube = build('youtube', 'v3', developerKey=__tokens__['google'])
sp = spotipy.Spotify(client_credentials_manager=SpotifyClientCredentials(
__tokens__['spotify_client'], __tokens__['spotify']))
async def get_songs(requester: User, query: str) -> list[Song]:
# Youtube
if query.find('youtube') != -1:
# Playlist
if query.find('list=') != -1:
playlist_id = query[query.find('list=')+5:]
if playlist_id.find('&') != -1:
playlist_id = playlist_id[:playlist_id.find('&')]
return await get_youtube_playlist(requester, playlist_id)
# # Video
if query.find('watch?v=') != -1:
video_id = query[query.find('watch?v=')+8:]
if video_id.find('&') != -1:
video_id = video_id[:video_id.find('&')]
return await get_youtube_video(requester, [video_id])
# Spotify
if query.find('spotify') != -1:
# Playlist
if query.find('playlist/') != -1:
return await get_spotify_playlist(requester, query[query.find('playlist/') + 9:])
# Video
return await getSpotifyTrack(requester, query[query.find('track/')+6:])
# Youtube Search
return await search_youtube_video(requester, query)
async def get_youtube_playlist(requester: User, playlist_id: str) -> list[Song]:
playlist = []
response = {'nextPageToken': None}
# Go through each playlist page and extract all videos in it
while True:
video_ids = []
if 'nextPageToken' not in response.keys():
break
request = youtube.playlistItems().list(
part='contentDetails, snippet',
maxResults=50,
pageToken=response['nextPageToken'],
playlistId=playlist_id
)
response = request.execute()
if response['items']:
for video in response['items'][:-1]:
if video['snippet']['thumbnails']:
video_ids.append(video['contentDetails']['videoId'])
playlist += await get_youtube_video(requester, video_ids)
return playlist
async def get_youtube_video(requester: User, video_ids: list) -> list[Song]:
videos = []
if video_ids:
id_string = ''.join(video_id + ',' for video_id in video_ids[:-1])
id_string += video_ids[-1]
request = youtube.videos().list(
part='snippet,contentDetails',
id=id_string
)
response = request.execute()
for video in response['items']:
videos.append(Song(requester, video))
return videos
async def search_youtube_video(requester: User, query: str, max_results: int = 1) -> list[Song]:
url = demojize(f'https://www.youtube.com/results?search_query={query.replace(" ", "+")}&sp=EgIQAQ%253D%253D')
response = requests.get(url)
return await get_youtube_video(requester, re.findall(r'watch\?v=(\S{11})', response.text)[:max_results])
async def fetch(url: str, session) -> str:
async with session.get(demojize(url)) as response:
html_body = await response.read()
ids = re.findall(r'watch\?v=(\S{11})', html_body.decode())
if ids and len(ids):
return ids[0]
else:
return ""
async def youtube_multi_search(queries: list[str]) -> list[str]:
async with ClientSession() as session:
tasks = []
for query in queries:
url = f'https://www.youtube.com/results?search_query={query.replace(" ", "+")}&sp=EgIQAQ%253D%253D'
tasks.append(
asyncio.create_task(
fetch(url, session)
)
)
pages = await asyncio.gather(*tasks)
return list(filter(None, pages))
async def get_spotify_playlist(requester: User, playlist_id: str) -> list[Song]:
songs = []
results = sp.playlist(playlist_id)
tracks = results['tracks']
items = [await get_track_query(track_meta) for track_meta in tracks['items']]
while tracks['next']:
tracks = sp.next(tracks)
items.extend([await get_track_query(track_meta) for track_meta in tracks['items']])
ids = await youtube_multi_search(items)
for x in range(0, len(ids), 50):
songs.extend(await get_youtube_video(requester, ids[x:x+50]))
return songs
async def get_track_query(track_meta):
return f'{track_meta["track"]["name"]} {track_meta["track"]["album"]["artists"][0]["name"]}'
async def getSpotifyTrack(requester: User, track_id: str) -> list[Song]:
meta = sp.track(track_id)
track_name = f'{meta["name"]} {meta["album"]["artists"][0]["name"]}'
return await search_youtube_video(requester, track_name)
|
py | 1a4c5b131f8d8071b7c2e664749b336c96cb1d32 | #-----------------------------------------------------------------------------
# Copyright (c) 2013-2017, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
hiddenimports = ['sip', 'PyQt4.QtCore', 'PyQt4.QtGui']
|
py | 1a4c5b2ea37e8bbd4ed615a6655b8a636a2eb45d | #Façaumalgoritmoquerecebaovalordosaláriomínimoeovalordosaláriodeumfuncionário,
#calculeemostreaquantidadedesaláriosmínimosqueganhaessefuncionário.
salarioMin=float(input("Informe o valor do salário mín:"))
salarioFun=float(input("Informe o valor do salário do funcionário:"))
qtdSalMin= salarioFun/salarioMin
if(qtdSalMin < 1):
print("O funcionário ganha menos que um salário mínimo!")
else:
print("O funcionário recebe {0:.2f}".format(round(qtdSalMin,2))," salários mínimos." ) |
py | 1a4c5c55929a3cfc420e87a64cd29f73e5baf364 | """Classes that define the inference engines"""
from typing import Optional
import numpy as np
from ase import Atoms
from matminer.featurizers.structure import CoulombMatrix
from proxima.data import BaseDataSource
from pymatgen.io.ase import AseAtomsAdaptor
from sklearn.linear_model import BayesianRidge
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import RobustScaler
from sklearn.pipeline import Pipeline
from proxima.inference import ScikitLearnInferenceEngine, BaseInferenceEngine
from mcdemo.lfa.gap.skl import SOAPConverter, ScalableKernel
class CoulombMatrixKNNSurrogate(ScikitLearnInferenceEngine):
"""A very fast implementation for a model: Coulomb Matrix via Matminer plus a KNN surrogate model"""
def __init__(self, n_neighbors: int = 5):
"""
Args:
n_neighbors (int): Number of neighboring points to use for the NN model
"""
cm = CoulombMatrix(flatten=True)
cm.set_n_jobs(1)
model = Pipeline([
('featurizer', cm),
('scaler', RobustScaler()),
('model', KNeighborsRegressor(n_neighbors))
])
super().__init__(model)
def infer(self, X: Atoms) -> float:
# Convert to pymatgen format needed by matminer
strc = AseAtomsAdaptor.get_molecule(X[0])
return self.model.predict([strc])[0]
class GAPSurrogate(BaseInferenceEngine):
"""Inference engine using the Gaussian Approximation Potentials
Uses SOAP to compute molecular representation, a scalable kernel
to compute molecular symmetries, and BayesianRidge regression to
fit model parameters
"""
def __init__(self, max_kernel_size: int = 256, soap_settings: Optional[dict] = None,
gamma: float = 1.0):
"""
Args:
max_kernel_size (int): Maximum number of training entries to use in the GAP KRR model
Larger values lead to higher accuracies at a greater computational cost
soap_settings (dict): Settings that define the SOAP representation for a molecule
gamma (float): Width parameter for the kernels
"""
super().__init__()
self.max_kernel_size = max_kernel_size
if soap_settings is None:
soap_settings = dict()
# Build the model
self.model = Pipeline([
('soap', SOAPConverter(**soap_settings)),
('kernel', ScalableKernel(max_points=max_kernel_size, gamma=gamma)),
('model', BayesianRidge(fit_intercept=True))
])
def infer(self, X: Atoms) -> float:
return self.model.predict([X])[0]
def retrain(self, data: BaseDataSource):
X, y = data.get_all_data()
# Fit the model
# TODO (wardlt): Consider adding some hyperparameter optimization
self.model.fit(X, y)
|
py | 1a4c5cc3b26b20726f97f2c5b2ef715a96b2f1a1 | # Generated by Django 2.0.5 on 2018-05-22 18:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ActorMobile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ActorName', models.CharField(max_length=255)),
('Coin', models.IntegerField(default=0)),
('Mint', models.IntegerField(default=0)),
('Exp', models.IntegerField(default=0)),
('Level', models.IntegerField(default=0)),
('BeginDay', models.IntegerField(default=0)),
('GameLogicDrierAddExp', models.IntegerField(default=0)),
('GameLogicLaserAddExp', models.IntegerField(default=0)),
('GameLogicDrierAddHappy', models.IntegerField(default=0)),
('GameLogicLaserAddHappy', models.IntegerField(default=0)),
('DailyTaskGetTime', models.CharField(max_length=255)),
('BuyMedicineNum', models.IntegerField(default=0)),
('ActorID', models.CharField(blank=True, max_length=255, null=True)),
],
),
migrations.CreateModel(
name='AnimationStatic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('AnimationName', models.CharField(max_length=255)),
('AnimationStateName', models.CharField(max_length=255)),
('AnimationParamName', models.CharField(blank=True, max_length=255, null=True)),
('AnimationParamVariable', models.FloatField(blank=True, default=0, null=True)),
('AnimationParam2Name', models.CharField(blank=True, max_length=255, null=True)),
('AnimationParam2Variable', models.FloatField(blank=True, default=0, null=True)),
],
),
migrations.CreateModel(
name='AudioStatic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Describe', models.CharField(blank=True, max_length=255, null=True)),
('AudioType', models.IntegerField(default=0)),
('AudioName', models.CharField(max_length=255)),
('AudioVolumn', models.FloatField(default=0)),
('AudioLoopParamName', models.BooleanField(default=False)),
('Audio3DMinDistance', models.FloatField(blank=True, default=0, null=True)),
('Audio3DMaxDistance', models.FloatField(blank=True, default=0, null=True)),
],
),
migrations.CreateModel(
name='CardBagStatic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('CardBagType', models.IntegerField(default=0)),
('CardBagProbability', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='CardStatic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('CardProbability', models.IntegerField(default=0)),
('CardRes', models.CharField(max_length=255)),
('CardStar', models.IntegerField(default=0)),
('RecycleCoin', models.IntegerField(default=0)),
('CardBagID', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='CardBagID_Related', to='app.CardBagStatic')),
],
),
migrations.CreateModel(
name='CatLevelStatic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('LevelNum', models.IntegerField(default=0)),
('ExpNum', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='CatMobile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('CatStaticID', models.IntegerField(default=0)),
('CatName', models.CharField(max_length=255)),
('CatAge', models.IntegerField(default=0)),
('LevelPoint', models.IntegerField(default=0)),
('HappyPoint', models.IntegerField(default=0)),
('HungryPoint', models.IntegerField(default=0)),
('HealthPoint', models.IntegerField(default=0)),
('ExpPoint', models.IntegerField(default=0)),
('WeightPoint', models.IntegerField(default=0)),
('CatBirthday', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='CatOwnMobile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('OwnItemType', models.IntegerField(default=0)),
('OwnItemID', models.IntegerField(default=0)),
('EuipOrNot', models.BooleanField(default=False)),
('OwnNum', models.IntegerField(default=0)),
('CatID', models.IntegerField(blank=True, default=0, null=True)),
],
),
migrations.CreateModel(
name='CatPathStatic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('TriggerHealthPointMin', models.IntegerField(default=0)),
('TriggerHealthPointMax', models.IntegerField(default=0)),
('TriggerCatLevelMin', models.IntegerField(default=0)),
('TriggerCatLevelMax', models.IntegerField(default=0)),
('TriggerToyCardConditionID1', models.IntegerField(blank=True, default=0, null=True)),
('TriggerCardProbability1', models.IntegerField(blank=True, default=0, null=True)),
('TriggerToyCardConditionID2', models.IntegerField(blank=True, default=0, null=True)),
('TriggerCardProbability2', models.IntegerField(blank=True, default=0, null=True)),
('TriggerToyCardConditionID3', models.IntegerField(blank=True, default=0, null=True)),
('TriggerCardProbability3', models.IntegerField(blank=True, default=0, null=True)),
('TriggerToyCardConditionID4', models.IntegerField(blank=True, default=0, null=True)),
('TriggerCardProbability4', models.IntegerField(blank=True, default=0, null=True)),
('TriggerToyCardConditionID5', models.IntegerField(blank=True, default=0, null=True)),
('TriggerCardProbability5', models.IntegerField(blank=True, default=0, null=True)),
('TriggerToyCardConditionID6', models.IntegerField(blank=True, default=0, null=True)),
('TriggerCardProbability6', models.IntegerField(blank=True, default=0, null=True)),
('TriggerBaseCardBagID', models.ManyToManyField(related_name='TriggerBaseCardBagID_Related', to='app.CardBagStatic')),
],
),
migrations.CreateModel(
name='CatSoundStatic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('CatID', models.TextField()),
('AnimationName', models.CharField(max_length=255)),
('SoundNameArray', models.TextField()),
],
),
migrations.CreateModel(
name='CatStatic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('CatType', models.IntegerField(default=0)),
('CatBaseResource', models.CharField(max_length=255)),
('CatWholeBodyMeshName', models.CharField(max_length=255)),
('CatBodyNotEarMeshName', models.CharField(max_length=255)),
('CatPartResource', models.TextField()),
('IdleBehaviorParam', models.CharField(blank=True, max_length=255, null=True)),
('FeedBehaviorParam', models.CharField(blank=True, max_length=255, null=True)),
('LazerBehaviorParam', models.CharField(blank=True, max_length=255, null=True)),
('DryerBehaviorParam', models.CharField(blank=True, max_length=255, null=True)),
('HealthPoint', models.IntegerField(default=0)),
('HealthMaxPoint', models.IntegerField(default=0)),
('WeightPoint', models.IntegerField(default=0)),
('WeightMaxPoint', models.IntegerField(default=0)),
('ExpPoint', models.IntegerField(default=0)),
('ExpAddTimeSpan', models.IntegerField(default=0)),
('ExpAddNum', models.IntegerField(default=0)),
('HappyPointAddHealthPoint', models.IntegerField(default=0)),
('HungryPointAddHealthPoint', models.IntegerField(default=0)),
('HappyInitialPoint', models.IntegerField(default=0)),
('HappyMaxPoint', models.IntegerField(default=0)),
('HungryInitialPoint', models.IntegerField(default=0)),
('HungryMaxPoint', models.IntegerField(default=0)),
('LevelPoint', models.IntegerField(default=0)),
('MaxLevelPoint', models.IntegerField(blank=True, default=0, null=True)),
('NexLevelID', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='PrevLevelID', to='app.CatStatic')),
],
),
migrations.CreateModel(
name='ConstStatic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('CatTriggerTime', models.IntegerField(default=0)),
('MaxRestoreCardNum', models.IntegerField(default=0)),
('BgSoundName_ARRoom', models.CharField(max_length=255)),
('BgSoundName_3DRoom', models.CharField(max_length=255)),
('BgSoundName_FittingRoom', models.CharField(max_length=255)),
('CommonSound_GetCoin', models.CharField(max_length=255)),
('CommonSound_GetCard', models.CharField(max_length=255)),
('CommonSound_OpenView', models.CharField(max_length=255)),
('CommonSound_GetSunperCard', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='CostumeStatic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('CostumeType', models.IntegerField(default=0)),
('BodyWithEar', models.BooleanField(default=False)),
('CostumeResource', models.CharField(max_length=255)),
('CostumeHangPoint', models.CharField(max_length=255)),
('CostumeCharm', models.IntegerField(default=0)),
('CostumePrice', models.IntegerField(default=0)),
('MintPrice', models.IntegerField(default=0)),
('CostumeIconName', models.CharField(max_length=255)),
('CostumeMatchedCat', models.ManyToManyField(related_name='CostumeMatchedCat_Related', to='app.CatStatic')),
],
),
migrations.CreateModel(
name='DailyTaskRewardStatic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('DayNum', models.IntegerField(blank=True, default=0, null=True)),
('CreditExp', models.IntegerField(blank=True, default=0, null=True)),
('CreditCoin', models.IntegerField(blank=True, default=0, null=True)),
('CreditMint', models.IntegerField(blank=True, default=0, null=True)),
('CreditPropID', models.IntegerField(blank=True, default=0, null=True)),
('CreditCostum', models.IntegerField(blank=True, default=0, null=True)),
],
),
migrations.CreateModel(
name='DailyTaskStatic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('DailyTaskProbability', models.IntegerField(default=0)),
('EventType', models.IntegerField(default=0)),
('TaskEventNum', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='DialogStatic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('DialogType', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='ExpAddStatic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('LevelPoint', models.IntegerField(default=0)),
('ExpPoint', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='FoodStatic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('FoodType', models.IntegerField(default=0)),
('UnlockCatAge', models.IntegerField(blank=True, default=0, null=True)),
('UnlockCoin', models.IntegerField(blank=True, default=0, null=True)),
('UnlockEnjoy', models.BooleanField(default=False)),
('Coin', models.IntegerField(default=0)),
('Mint', models.IntegerField(default=0)),
('HungryPoint', models.IntegerField(default=0)),
('HappyPoint', models.IntegerField(default=0)),
('LevelPoint', models.IntegerField(default=0)),
('HealthPoint', models.IntegerField(default=0)),
('FoodIconName', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='GameStatic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('GameType', models.IntegerField(default=0)),
('GameIcon', models.CharField(max_length=255)),
('GameResource', models.CharField(blank=True, max_length=255, null=True)),
('GameDescription', models.CharField(blank=True, max_length=255, null=True)),
('MinHungryNum', models.IntegerField(default=0)),
('HappyPerTime', models.IntegerField(default=0)),
('AddHappyNum', models.IntegerField(default=0)),
('MaxHappyNum', models.IntegerField(default=0)),
('ExpPerTime', models.IntegerField(default=0)),
('AddExpNum', models.IntegerField(default=0)),
('MaxExpNum', models.IntegerField(default=0)),
('GoldPerTime', models.IntegerField(default=0)),
('AddGoldNum', models.IntegerField(default=0)),
('MaxGoldNum', models.IntegerField(default=0)),
('LogicGameParam', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='GiftMobile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('CoinNum', models.IntegerField(default=0)),
('CardStaticID', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='GoldMintShopStatic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ShopItemType', models.IntegerField(default=0)),
('ShopItemTagType', models.IntegerField(blank=True, default=0, null=True)),
('ShopItemPrice', models.IntegerField(blank=True, default=0, null=True)),
('ProductID', models.CharField(max_length=255)),
('ShopItemNum', models.IntegerField(blank=True, default=0, null=True)),
('ShopItemSequence', models.IntegerField(blank=True, default=0, null=True)),
('ShopItemCNPrice', models.CharField(blank=True, max_length=255, null=True)),
('ShopItemENPrice', models.CharField(blank=True, max_length=255, null=True)),
],
),
migrations.CreateModel(
name='LanguageStatic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('LanguageEngInfo', models.CharField(blank=True, max_length=255, null=True)),
('LanguageChnInfo', models.CharField(blank=True, max_length=255, null=True)),
],
),
migrations.CreateModel(
name='PropShopStatic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ShopItemType', models.IntegerField(default=0)),
('ShopItemID', models.IntegerField(default=0)),
('ShopItemPrice', models.IntegerField(default=0)),
('ShopItemNum', models.IntegerField(default=0)),
('ShopItemSequence', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='PropStatic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('PropType', models.IntegerField(default=0)),
('PropIcon', models.CharField(max_length=255)),
('PropMaxNum', models.IntegerField(default=0)),
('Coin', models.IntegerField(default=0)),
('Mint', models.IntegerField(default=0)),
('TimeMin', models.IntegerField(default=0)),
('TimeMax', models.IntegerField(default=0)),
('GoldMin', models.IntegerField(default=0)),
('GoldMax', models.IntegerField(default=0)),
('PropDescirption', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='PropDescirption_Related', to='app.LanguageStatic')),
('PropName', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='PropName_Related', to='app.LanguageStatic')),
],
),
migrations.CreateModel(
name='SevenDaySignStatic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('DayNum', models.IntegerField(default=0)),
('AddCoin', models.IntegerField(blank=True, default=0, null=True)),
('AddPropID', models.IntegerField(blank=True, default=0, null=True)),
],
),
migrations.CreateModel(
name='TaskEventMobile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('EventType', models.IntegerField(default=0)),
('EventParam', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='TaskOwnMobile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('TaskType', models.IntegerField(default=0)),
('TaskID', models.IntegerField(default=0)),
('TaskFinish', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='UserGuideConditionStatic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('UserGuideConditionType', models.IntegerField(default=0)),
('ConditionParam', models.TextField()),
],
),
migrations.CreateModel(
name='UserGuideEventStatic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('UserGuideEventType', models.IntegerField(default=0)),
('EventParam', models.TextField()),
('NextGuideEventIDArray', models.ManyToManyField(related_name='PrevGuideEventIDArray', to='app.UserGuideEventStatic')),
],
),
migrations.CreateModel(
name='UserGuideStatic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('NextGuideID', models.ManyToManyField(related_name='PrevGuideID', to='app.UserGuideStatic')),
('UserGuideCondition', models.ManyToManyField(related_name='UserGuideCondition_Related', to='app.UserGuideConditionStatic')),
('UserGuideEvent', models.ManyToManyField(related_name='UserGuideEvent_Related', to='app.UserGuideEventStatic')),
],
),
migrations.CreateModel(
name='WeightAddStatic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('WeightNumHealthZeroPoint', models.IntegerField(default=0)),
('WeightNumHealthOnePoint', models.IntegerField(default=0)),
('WeightNumHealthTwoPoint', models.IntegerField(default=0)),
('WeightNumHealthThreePoint', models.IntegerField(default=0)),
('WeightNumHealthFourPoint', models.IntegerField(default=0)),
('WeightNumHealthFivePoint', models.IntegerField(default=0)),
],
),
migrations.AddField(
model_name='dialogstatic',
name='DialogInfo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='DialogInfo_Related', to='app.LanguageStatic'),
),
migrations.AddField(
model_name='dialogstatic',
name='DialogTitle',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='DialogTitle_Related', to='app.LanguageStatic'),
),
migrations.AddField(
model_name='dailytaskstatic',
name='DailyTaskName',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='DailyTaskName_Related', to='app.LanguageStatic'),
),
migrations.AddField(
model_name='costumestatic',
name='CostumeName',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='CostumeName_Related', to='app.LanguageStatic'),
),
migrations.AddField(
model_name='catpathstatic',
name='TriggerGrassID',
field=models.ManyToManyField(blank=True, null=True, related_name='TriggerGrassID_Related', to='app.PropStatic'),
),
migrations.AddField(
model_name='catpathstatic',
name='TriggerRareCardBagID',
field=models.ManyToManyField(blank=True, null=True, related_name='TriggerRareCardBagID_Related', to='app.CardBagStatic'),
),
migrations.AddField(
model_name='catpathstatic',
name='TriggerToyCardBagID1',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='TriggerToyCardBagID1_Related', to='app.CardBagStatic'),
),
migrations.AddField(
model_name='catpathstatic',
name='TriggerToyCardBagID2',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='TriggerToyCardBagID2_Related', to='app.CardBagStatic'),
),
migrations.AddField(
model_name='catpathstatic',
name='TriggerToyCardBagID3',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='TriggerToyCardBagID3_Related', to='app.CardBagStatic'),
),
migrations.AddField(
model_name='catpathstatic',
name='TriggerToyCardBagID4',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='TriggerToyCardBagID4_Related', to='app.CardBagStatic'),
),
migrations.AddField(
model_name='catpathstatic',
name='TriggerToyCardBagID5',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='TriggerToyCardBagID5_Related', to='app.CardBagStatic'),
),
migrations.AddField(
model_name='catpathstatic',
name='TriggerToyCardBagID6',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='TriggerToyCardBagID6_Related', to='app.CardBagStatic'),
),
]
|
py | 1a4c5d58a5be9d0961562ea809534653da56bdfc | #coding=utf-8
#!/usr/bin/env python
"""
convert image to pdf file
"""
#Author: mrbeann <https://github.com/mrbeann/jpg2pdf>
import os
import sys
import glob
import platform
from reportlab.lib.pagesizes import letter, A4, landscape
from reportlab.platypus import SimpleDocTemplate, Image
from reportlab.lib.units import inch
from reportlab.pdfgen import canvas
from reportlab import rl_settings
import Image
reload(sys)
sys.setdefaultencoding("utf-8")
def topdf(path,recursion=None,pictureType=None,sizeMode=None,width=None,height=None,fit=None,save=None):
"""
Parameters
----------
path : string
path of the pictures
recursion : boolean
None or False for no recursion
True for recursion to children folder
wether to recursion or not
pictureType : list
type of pictures,for example :jpg,png...
sizeMode : int
None or 0 for pdf's pagesize is the biggest of all the pictures
1 for pdf's pagesize is the min of all the pictures
2 for pdf's pagesize is the given value of width and height
to choose how to determine the size of pdf
width : int
width of the pdf page
height : int
height of the pdf page
fit : boolean
None or False for fit the picture size to pagesize
True for keep the size of the pictures
wether to keep the picture size or not
save : string
path to save the pdf
"""
print path
if platform.system() == 'Windows':
path = path.replace('\\','/')
if path[-1] != '/':
path = (path + '/')
print path
if recursion == True:
for i in os.listdir(path):
if os.path.isdir(os.path.abspath(os.path.join(path, i))):
topdf(path+i,recursion,pictureType,sizeMode,width,height,fit,save)
filelist = []
if pictureType == None:
filelist = glob.glob(os.path.join(path, '*.jpg'))
else:
for i in pictureType:
filelist.extend(glob.glob(os.path.join(path, '*.'+i)))
maxw = 0
maxh = 0
if sizeMode == None or sizeMode == 0:
for i in filelist:
print '----',i
im = Image.open(i)
if maxw < im.size[0]:
maxw = im.size[0]
if maxh < im.size[1]:
maxh = im.size[1]
elif sizeMode == 1:
maxw = 999999
maxh = 999999
for i in filelist:
im = Image.open(i)
if maxw > im.size[0]:
maxw = im.size[0]
if maxh > im.size[1]:
maxh = im.size[1]
else:
if width == None or height == None:
raise Exception("no width or height provid")
maxw = width
maxh = height
maxsize = (maxw,maxh)
if save == None:
filename_pdf = path + path.split('/')[-2]
else:
filename_pdf = save + path.split('/')[-2]
filename_pdf = filename_pdf.decode('utf8','ignore')
filename_pdf = filename_pdf + '.pdf'
print filename_pdf
c = canvas.Canvas(filename_pdf, pagesize=maxsize )
l = len(filelist)
for i in range(l):
print filelist[i]
(w, h) =maxsize
width, height = letter
if fit == True:
c.drawImage(filelist[i] , 0,0)
else:
c.drawImage(filelist[i] , 0,0,maxw,maxh)
c.showPage()
c.save()
print "end."
def main():
topdf(u'G:/R/jpg2pdf/test/新建文件夹',pictureType=['png','jpg'],save='G:/R/jpg2pdf/')
if __name__ == '__main__':
main() |
py | 1a4c5d94fdd37452dc50e5aaa0d5cef3a1bb4fae | # Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import re
import time
import urllib
from tempest_lib import exceptions as lib_exc
from tempest.common import service_client
from tempest import exceptions
class OrchestrationClient(service_client.ServiceClient):
def list_stacks(self, params=None):
"""Lists all stacks for a user."""
uri = 'stacks'
if params:
uri += '?%s' % urllib.urlencode(params)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['stacks'])
def create_stack(self, name, disable_rollback=True, parameters=None,
timeout_mins=60, template=None, template_url=None,
environment=None, files=None):
if parameters is None:
parameters = {}
headers, body = self._prepare_update_create(
name,
disable_rollback,
parameters,
timeout_mins,
template,
template_url,
environment,
files)
uri = 'stacks'
resp, body = self.post(uri, headers=headers, body=body)
self.expected_success(201, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def update_stack(self, stack_identifier, name, disable_rollback=True,
parameters=None, timeout_mins=60, template=None,
template_url=None, environment=None, files=None):
if parameters is None:
parameters = {}
headers, body = self._prepare_update_create(
name,
disable_rollback,
parameters,
timeout_mins,
template,
template_url,
environment)
uri = "stacks/%s" % stack_identifier
resp, body = self.put(uri, headers=headers, body=body)
self.expected_success(202, resp.status)
return service_client.ResponseBody(resp, body)
def _prepare_update_create(self, name, disable_rollback=True,
parameters=None, timeout_mins=60,
template=None, template_url=None,
environment=None, files=None):
if parameters is None:
parameters = {}
post_body = {
"stack_name": name,
"disable_rollback": disable_rollback,
"parameters": parameters,
"timeout_mins": timeout_mins,
"template": "HeatTemplateFormatVersion: '2012-12-12'\n",
"environment": environment,
"files": files
}
if template:
post_body['template'] = template
if template_url:
post_body['template_url'] = template_url
body = json.dumps(post_body)
# Password must be provided on stack create so that heat
# can perform future operations on behalf of the user
headers = self.get_headers()
headers['X-Auth-Key'] = self.password
headers['X-Auth-User'] = self.user
return headers, body
def get_stack(self, stack_identifier):
"""Returns the details of a single stack."""
url = "stacks/%s" % stack_identifier
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['stack'])
def suspend_stack(self, stack_identifier):
"""Suspend a stack."""
url = 'stacks/%s/actions' % stack_identifier
body = {'suspend': None}
resp, body = self.post(url, json.dumps(body))
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp)
def resume_stack(self, stack_identifier):
"""Resume a stack."""
url = 'stacks/%s/actions' % stack_identifier
body = {'resume': None}
resp, body = self.post(url, json.dumps(body))
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp)
def list_resources(self, stack_identifier):
"""Returns the details of a single resource."""
url = "stacks/%s/resources" % stack_identifier
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['resources'])
def get_resource(self, stack_identifier, resource_name):
"""Returns the details of a single resource."""
url = "stacks/%s/resources/%s" % (stack_identifier, resource_name)
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['resource'])
def delete_stack(self, stack_identifier):
"""Deletes the specified Stack."""
resp, _ = self.delete("stacks/%s" % str(stack_identifier))
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp)
def wait_for_resource_status(self, stack_identifier, resource_name,
status, failure_pattern='^.*_FAILED$'):
"""Waits for a Resource to reach a given status."""
start = int(time.time())
fail_regexp = re.compile(failure_pattern)
while True:
try:
body = self.get_resource(
stack_identifier, resource_name)
except lib_exc.NotFound:
# ignore this, as the resource may not have
# been created yet
pass
else:
resource_name = body['resource_name']
resource_status = body['resource_status']
if resource_status == status:
return
if fail_regexp.search(resource_status):
raise exceptions.StackResourceBuildErrorException(
resource_name=resource_name,
stack_identifier=stack_identifier,
resource_status=resource_status,
resource_status_reason=body['resource_status_reason'])
if int(time.time()) - start >= self.build_timeout:
message = ('Resource %s failed to reach %s status '
'(current %s) within the required time (%s s).' %
(resource_name,
status,
resource_status,
self.build_timeout))
raise exceptions.TimeoutException(message)
time.sleep(self.build_interval)
def wait_for_stack_status(self, stack_identifier, status,
failure_pattern='^.*_FAILED$'):
"""Waits for a Stack to reach a given status."""
start = int(time.time())
fail_regexp = re.compile(failure_pattern)
while True:
try:
body = self.get_stack(stack_identifier)
except lib_exc.NotFound:
if status == 'DELETE_COMPLETE':
return
stack_name = body['stack_name']
stack_status = body['stack_status']
if stack_status == status:
return body
if fail_regexp.search(stack_status):
raise exceptions.StackBuildErrorException(
stack_identifier=stack_identifier,
stack_status=stack_status,
stack_status_reason=body['stack_status_reason'])
if int(time.time()) - start >= self.build_timeout:
message = ('Stack %s failed to reach %s status (current: %s) '
'within the required time (%s s).' %
(stack_name, status, stack_status,
self.build_timeout))
raise exceptions.TimeoutException(message)
time.sleep(self.build_interval)
def show_resource_metadata(self, stack_identifier, resource_name):
"""Returns the resource's metadata."""
url = ('stacks/{stack_identifier}/resources/{resource_name}'
'/metadata'.format(**locals()))
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['metadata'])
def list_events(self, stack_identifier):
"""Returns list of all events for a stack."""
url = 'stacks/{stack_identifier}/events'.format(**locals())
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['events'])
def list_resource_events(self, stack_identifier, resource_name):
"""Returns list of all events for a resource from stack."""
url = ('stacks/{stack_identifier}/resources/{resource_name}'
'/events'.format(**locals()))
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['events'])
def show_event(self, stack_identifier, resource_name, event_id):
"""Returns the details of a single stack's event."""
url = ('stacks/{stack_identifier}/resources/{resource_name}/events'
'/{event_id}'.format(**locals()))
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['event'])
def show_template(self, stack_identifier):
"""Returns the template for the stack."""
url = ('stacks/{stack_identifier}/template'.format(**locals()))
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def _validate_template(self, post_body):
"""Returns the validation request result."""
post_body = json.dumps(post_body)
resp, body = self.post('validate', post_body)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def validate_template(self, template, parameters=None):
"""Returns the validation result for a template with parameters."""
if parameters is None:
parameters = {}
post_body = {
'template': template,
'parameters': parameters,
}
return self._validate_template(post_body)
def validate_template_url(self, template_url, parameters=None):
"""Returns the validation result for a template with parameters."""
if parameters is None:
parameters = {}
post_body = {
'template_url': template_url,
'parameters': parameters,
}
return self._validate_template(post_body)
def list_resource_types(self):
"""List resource types."""
resp, body = self.get('resource_types')
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['resource_types'])
def get_resource_type(self, resource_type_name):
"""Return the schema of a resource type."""
url = 'resource_types/%s' % resource_type_name
resp, body = self.get(url)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, json.loads(body))
def get_resource_type_template(self, resource_type_name):
"""Return the template of a resource type."""
url = 'resource_types/%s/template' % resource_type_name
resp, body = self.get(url)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, json.loads(body))
def create_software_config(self, name=None, config=None, group=None,
inputs=None, outputs=None, options=None):
headers, body = self._prep_software_config_create(
name, config, group, inputs, outputs, options)
url = 'software_configs'
resp, body = self.post(url, headers=headers, body=body)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def get_software_config(self, conf_id):
"""Returns a software configuration resource."""
url = 'software_configs/%s' % str(conf_id)
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def delete_software_config(self, conf_id):
"""Deletes a specific software configuration."""
url = 'software_configs/%s' % str(conf_id)
resp, _ = self.delete(url)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp)
def create_software_deploy(self, server_id=None, config_id=None,
action=None, status=None,
input_values=None, output_values=None,
status_reason=None, signal_transport=None):
"""Creates or updates a software deployment."""
headers, body = self._prep_software_deploy_update(
None, server_id, config_id, action, status, input_values,
output_values, status_reason, signal_transport)
url = 'software_deployments'
resp, body = self.post(url, headers=headers, body=body)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def update_software_deploy(self, deploy_id=None, server_id=None,
config_id=None, action=None, status=None,
input_values=None, output_values=None,
status_reason=None, signal_transport=None):
"""Creates or updates a software deployment."""
headers, body = self._prep_software_deploy_update(
deploy_id, server_id, config_id, action, status, input_values,
output_values, status_reason, signal_transport)
url = 'software_deployments/%s' % str(deploy_id)
resp, body = self.put(url, headers=headers, body=body)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def get_software_deploy_list(self):
"""Returns a list of all deployments."""
url = 'software_deployments'
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def get_software_deploy(self, deploy_id):
"""Returns a specific software deployment."""
url = 'software_deployments/%s' % str(deploy_id)
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def get_software_deploy_meta(self, server_id):
"""Return a config metadata for a specific server."""
url = 'software_deployments/metadata/%s' % server_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def delete_software_deploy(self, deploy_id):
"""Deletes a specific software deployment."""
url = 'software_deployments/%s' % str(deploy_id)
resp, _ = self.delete(url)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp)
def _prep_software_config_create(self, name=None, conf=None, group=None,
inputs=None, outputs=None, options=None):
"""Prepares a software configuration body."""
post_body = {}
if name is not None:
post_body["name"] = name
if conf is not None:
post_body["config"] = conf
if group is not None:
post_body["group"] = group
if inputs is not None:
post_body["inputs"] = inputs
if outputs is not None:
post_body["outputs"] = outputs
if options is not None:
post_body["options"] = options
body = json.dumps(post_body)
headers = self.get_headers()
return headers, body
def _prep_software_deploy_update(self, deploy_id=None, server_id=None,
config_id=None, action=None, status=None,
input_values=None, output_values=None,
status_reason=None,
signal_transport=None):
"""Prepares a deployment create or update (if an id was given)."""
post_body = {}
if deploy_id is not None:
post_body["id"] = deploy_id
if server_id is not None:
post_body["server_id"] = server_id
if config_id is not None:
post_body["config_id"] = config_id
if action is not None:
post_body["action"] = action
if status is not None:
post_body["status"] = status
if input_values is not None:
post_body["input_values"] = input_values
if output_values is not None:
post_body["output_values"] = output_values
if status_reason is not None:
post_body["status_reason"] = status_reason
if signal_transport is not None:
post_body["signal_transport"] = signal_transport
body = json.dumps(post_body)
headers = self.get_headers()
return headers, body
|
py | 1a4c5e11f29cdf4c1c41bad1fd0fadcb9e9c9a30 | import plistlib
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv, is_platform_windows
def get_wifi(files_found, report_folder, seeker):
data_list = []
file_found = str(files_found[0])
with open(file_found, "rb") as fp:
pl = plistlib.load(fp)
if 'List of known networks' in pl.keys():
for dic in pl['List of known networks']:
ssid = dic['SSID_STR']
bssid = ""
if 'BSSID' in dic.keys():
bssid = dic['BSSID']
netusage = ""
if 'networkUsage' in dic.keys():
netusage = str(dic['networkUsage'])
countrycode = ""
if '80211D_IE' in dic.keys():
for key2, val2 in dic['80211D_IE'].items():
if key2 == 'IE_KEY_80211D_COUNTRY_CODE':
countrycode = val2
devname = ""
mfr = ""
serialnum = ""
modelname = ""
if 'WPS_PROB_RESP_IE' in dic.keys():
for key3, val3 in dic['WPS_PROB_RESP_IE'].items():
if key3 == 'IE_KEY_WPS_DEV_NAME':
devname = val3
if key3 == 'IE_KEY_WPS_MANUFACTURER':
mfr = val3
if key3 == 'IE_KEY_WPS_SERIAL_NUM':
serialnum = val3
if key3 == 'IE_KEY_WPS_MODEL_NAME':
modelname = val3
lastjoined = ""
if 'lastJoined' in dic.keys():
lastjoined = str(dic['lastJoined'])
lastautojoined = ""
if 'lastAutoJoined' in dic.keys():
lastautojoined = str(dic['lastAutoJoined'])
enabled = ""
if 'enabled' in dic.keys():
enabled = str(dic['enabled'])
data_list.append((ssid, bssid, netusage, countrycode, devname, mfr, serialnum, modelname, lastjoined, lastautojoined, enabled))
if len(data_list) > 0:
report = ArtifactHtmlReport('Wifi')
report.start_artifact_report(report_folder, 'Wifi')
report.add_script()
data_headers = ('SSID','BSSID', 'Network usage', 'Country code', 'Device name', 'Manufacturer', 'Serial number', 'Model name', 'Last joined', 'Last autojoined', 'Enabled')
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = 'Wifi'
tsv(report_folder, data_headers, data_list, tsvname)
else:
logfunc('No Networks data')
|
py | 1a4c5edda956e765f65fb6b716111fee22582622 | #!/usr/bin/env python
# -*- coding: utf8 -*-
"""
The MetadataWizard(pymdwizard) software was developed by the
U.S. Geological Survey Fort Collins Science Center.
See: https://github.com/usgs/fort-pymdwizard for current project source code
See: https://usgs.github.io/fort-pymdwizard/ for current user documentation
See: https://github.com/usgs/fort-pymdwizard/tree/master/examples
for examples of use in other scripts
License: Creative Commons Attribution 4.0 International (CC BY 4.0)
http://creativecommons.org/licenses/by/4.0/
PURPOSE
------------------------------------------------------------------------------
Module contains a variety of miscellaneous functions
SCRIPT DEPENDENCIES
------------------------------------------------------------------------------
This script is part of the pymdwizard package and is not intented to be
used independently. All pymdwizard package requirements are needed.
See imports section for external packages used in this script as well as
inter-package dependencies
U.S. GEOLOGICAL SURVEY DISCLAIMER
------------------------------------------------------------------------------
This software has been approved for release by the U.S. Geological Survey (USGS).
Although the software has been subjected to rigorous review,
the USGS reserves the right to update the software as needed pursuant to
further analysis and review. No warranty, expressed or implied, is made by
the USGS or the U.S. Government as to the functionality of the software and
related material nor shall the fact of release constitute any such warranty.
Furthermore, the software is released on condition that neither the USGS nor
the U.S. Government shall be held liable for any damages resulting from
its authorized or unauthorized use.
Any use of trade, product or firm names is for descriptive purposes only and
does not imply endorsement by the U.S. Geological Survey.
Although this information product, for the most part, is in the public domain,
it also contains copyrighted material as noted in the text. Permission to
reproduce copyrighted items for other than personal use must be secured from
the copyright owner.
------------------------------------------------------------------------------
"""
import sys
import os
import traceback
import pkg_resources
import re
try:
from urllib.parse import urlparse
except:
from urlparse import urlparse
from pathlib import Path
from datetime import datetime, timedelta
import pandas as pd
from PyQt5.QtWidgets import QLineEdit
from PyQt5.QtWidgets import QTextBrowser
from PyQt5.QtWidgets import QPlainTextEdit
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QComboBox
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import QSettings
def set_text(widget, text):
"""
set the text of a widget regardless of it's base type
Parameters
----------
widget : QtGui:QWidget
This widget is a QlineEdit or QPlainText edit
text : str
The text that will be inserted
Returns
-------
None
"""
if isinstance(widget, QLineEdit):
widget.setText(text)
widget.setCursorPosition(0)
if isinstance(widget, QPlainTextEdit):
widget.setPlainText(text)
if isinstance(widget, QTextBrowser):
widget.setText(text)
if isinstance(widget, QComboBox):
index = widget.findText(text, Qt.MatchFixedString)
if index >= 0:
widget.setCurrentIndex(index)
else:
widget.setEditText(text)
def launch_widget(Widget, title="", **kwargs):
"""
run a widget within it's own application
Parameters
----------
widget : QWidget
title : str
The title to use for the application
Returns
-------
None
"""
try:
app = QApplication([])
app.title = title
widget = Widget(**kwargs)
print('blah')
widget.setWindowTitle(title)
widget.show()
sys.exit(app.exec_())
# return widget
except:
e = sys.exc_info()[0]
print('problem encountered', e)
print(traceback.format_exc())
# def get_resource_path(fname):
# """
#
# Parameters
# ----------
# fname : str
# filename that you would like to find
#
# Returns
# -------
# the full file path to the resource specified
# """
#
# if getattr(sys, 'frozen') and hasattr(sys, '_MEIPASS'):
#
# return pkg_resources.resource_filename('guanoeditor',
# 'DATA/{}'.format(fname))
# else:
# return pkg_resources.resource_filename('guanoeditor',
# 'resources/{}'.format(fname))
def set_window_icon(widget, remove_help=True):
"""
Add our default ducky icon to a widget
Parameters
----------
widget : PyQt widget
remove_help : Bool
Whether to show the help question mark icon.
Returns
-------
None
"""
icon = QIcon(get_resource_path('icons/Ducky.ico'))
widget.setWindowIcon(icon)
if remove_help:
widget.setWindowFlags(Qt.Window |
Qt.CustomizeWindowHint |
Qt.WindowTitleHint |
Qt.WindowCloseButtonHint |
Qt.WindowStaysOnTopHint)
def get_setting(which, default=None):
"""
return a pymdwizard application setting
Parameters
----------
which: str
name of setting to return
Returns
-------
setting in native format, string, integer, etc
"""
settings = QSettings('USGS', 'guanoeditor')
if default is None:
return settings.value(which)
else:
return settings.value(which, default)
def resource_path(relative_path):
if hasattr(sys, '_MEIPASS'):
relative_path = relative_path.split('/')[-1]
return os.path.join(sys._MEIPASS, f"DATA/{relative_path}")
else:
return os.path.join(os.path.abspath('.'), relative_path)
def read_namespace(fname):
namespace_df = pd.read_csv(fname)
namespace_df = namespace_df[['tag', 'description', 'required', 'data_type', 'picklist']]
# namespace_df = namespace_df[namespace_df.tag.str.startswith('NABat|')]
namespace_df.picklist = namespace_df.picklist.fillna('')
namespace_dict = namespace_df.to_dict('records')
for thing in namespace_dict:
if thing['picklist']:
thing['picklist'] = thing['picklist'].split('|')
return namespace_dict
def clean_name(fname):
if isinstance(fname, Path):
fname = str(fname)
f = Path(fname)
name = f.stem
extension = f.suffix
# Step 1: remove anything in square brackets
name = re.sub("\[.*\]", '', name)
# Step 2: replace any non word characters with underscores
name = re.sub("\W", '_', name)
# Step 3: replace multiple underscores with a single
name = re.sub("_+", '_', name)
# Step 4: replace underscore separated single digits
name = re.sub("_[0-9]_", '_', name)
# Step 5: remove non digit characters at the begining of the file
name = re.sub("^\D+", '', name)
# Step 6: remove trailing _000, _001, _005, _0001 etc.
name = re.sub("_[0-9]{3,4}$", '', name)
return name + extension
|
py | 1a4c5ef62880319aeb25984f9586aced728ee945 | # Automatically generated by pb2py
# fmt: off
from .. import protobuf as p
if __debug__:
try:
from typing import Dict, List # noqa: F401
from typing_extensions import Literal # noqa: F401
except ImportError:
pass
class MoneroAccountPublicAddress(p.MessageType):
def __init__(
self,
spend_public_key: bytes = None,
view_public_key: bytes = None,
) -> None:
self.spend_public_key = spend_public_key
self.view_public_key = view_public_key
@classmethod
def get_fields(cls) -> Dict:
return {
1: ('spend_public_key', p.BytesType, 0),
2: ('view_public_key', p.BytesType, 0),
}
|
py | 1a4c5f646d066bd8c17775bf314b5705e577e3ea | """Requirements specific to SQLAlchemy's own unit tests.
"""
import sys
from sqlalchemy import exc
from sqlalchemy.sql import text
from sqlalchemy.testing import exclusions
from sqlalchemy.testing.exclusions import against
from sqlalchemy.testing.exclusions import fails_if
from sqlalchemy.testing.exclusions import fails_on
from sqlalchemy.testing.exclusions import fails_on_everything_except
from sqlalchemy.testing.exclusions import LambdaPredicate
from sqlalchemy.testing.exclusions import NotPredicate
from sqlalchemy.testing.exclusions import only_if
from sqlalchemy.testing.exclusions import only_on
from sqlalchemy.testing.exclusions import skip_if
from sqlalchemy.testing.exclusions import SpecPredicate
from sqlalchemy.testing.exclusions import succeeds_if
from sqlalchemy.testing.requirements import SuiteRequirements
def no_support(db, reason):
return SpecPredicate(db, description=reason)
def exclude(db, op, spec, description=None):
return SpecPredicate(db, op, spec, description=description)
class DefaultRequirements(SuiteRequirements):
@property
def deferrable_or_no_constraints(self):
"""Target database must support deferrable constraints."""
return skip_if(
[
no_support("firebird", "not supported by database"),
no_support("mysql", "not supported by database"),
no_support("mariadb", "not supported by database"),
no_support("mssql", "not supported by database"),
]
)
@property
def check_constraints(self):
"""Target database must support check constraints."""
return exclusions.open()
@property
def enforces_check_constraints(self):
"""Target database must also enforce check constraints."""
return self.check_constraints + fails_on(
self._mysql_check_constraints_dont_exist,
"check constraints don't enforce on MySQL, MariaDB<10.2",
)
@property
def named_constraints(self):
"""target database must support names for constraints."""
return exclusions.open()
@property
def implicitly_named_constraints(self):
"""target database must apply names to unnamed constraints."""
return skip_if([no_support("sqlite", "not supported by database")])
@property
def foreign_keys(self):
"""Target database must support foreign keys."""
return skip_if(no_support("sqlite", "not supported by database"))
@property
def table_ddl_if_exists(self):
"""target platform supports IF NOT EXISTS / IF EXISTS for tables."""
return only_on(["postgresql", "mysql", "mariadb", "sqlite"])
@property
def index_ddl_if_exists(self):
"""target platform supports IF NOT EXISTS / IF EXISTS for indexes."""
# mariadb but not mysql, tested up to mysql 8
return only_on(["postgresql", "mariadb", "sqlite"])
@property
def on_update_cascade(self):
"""target database must support ON UPDATE..CASCADE behavior in
foreign keys."""
return skip_if(
["sqlite", "oracle"],
"target backend %(doesnt_support)s ON UPDATE CASCADE",
)
@property
def non_updating_cascade(self):
"""target database must *not* support ON UPDATE..CASCADE behavior in
foreign keys."""
return fails_on_everything_except("sqlite", "oracle") + skip_if(
"mssql"
)
@property
def recursive_fk_cascade(self):
"""target database must support ON DELETE CASCADE on a self-referential
foreign key"""
return skip_if(["mssql"])
@property
def deferrable_fks(self):
"""target database must support deferrable fks"""
return only_on(["oracle", "postgresql"])
@property
def foreign_key_constraint_option_reflection_ondelete(self):
return only_on(["postgresql", "mysql", "mariadb", "sqlite", "oracle"])
@property
def fk_constraint_option_reflection_ondelete_restrict(self):
return only_on(["postgresql", "sqlite", self._mysql_80])
@property
def fk_constraint_option_reflection_ondelete_noaction(self):
return only_on(["postgresql", "mysql", "mariadb", "sqlite"])
@property
def foreign_key_constraint_option_reflection_onupdate(self):
return only_on(["postgresql", "mysql", "mariadb", "sqlite"])
@property
def fk_constraint_option_reflection_onupdate_restrict(self):
return only_on(["postgresql", "sqlite", self._mysql_80])
@property
def comment_reflection(self):
return only_on(["postgresql", "mysql", "mariadb", "oracle"])
@property
def unbounded_varchar(self):
"""Target database must support VARCHAR with no length"""
return skip_if(
["firebird", "oracle", "mysql", "mariadb"],
"not supported by database",
)
@property
def boolean_col_expressions(self):
"""Target database must support boolean expressions as columns"""
return skip_if(
[
no_support("firebird", "not supported by database"),
no_support("oracle", "not supported by database"),
no_support("mssql", "not supported by database"),
no_support("sybase", "not supported by database"),
]
)
@property
def non_native_boolean_unconstrained(self):
"""target database is not native boolean and allows arbitrary integers
in it's "bool" column"""
return skip_if(
[
LambdaPredicate(
lambda config: against(config, "mssql"),
"SQL Server drivers / odbc seem to change "
"their mind on this",
),
LambdaPredicate(
lambda config: config.db.dialect.supports_native_boolean,
"native boolean dialect",
),
]
)
@property
def standalone_binds(self):
"""target database/driver supports bound parameters as column expressions
without being in the context of a typed column.
"""
return skip_if(["firebird", "mssql+mxodbc"], "not supported by driver")
@property
def qmark_paramstyle(self):
return only_on(
[
"firebird",
"sqlite",
"+pyodbc",
"+mxodbc",
"mysql+oursql",
"mariadb+oursql",
]
)
@property
def named_paramstyle(self):
return only_on(["sqlite", "oracle+cx_oracle"])
@property
def format_paramstyle(self):
return only_on(
[
"mysql+mysqldb",
"mysql+pymysql",
"mysql+cymysql",
"mysql+mysqlconnector",
"mariadb+mysqldb",
"mariadb+pymysql",
"mariadb+cymysql",
"mariadb+mysqlconnector",
"postgresql+pg8000",
]
)
@property
def pyformat_paramstyle(self):
return only_on(
[
"postgresql+psycopg2",
"postgresql+psycopg2cffi",
"postgresql+pypostgresql",
"postgresql+pygresql",
"mysql+mysqlconnector",
"mysql+pymysql",
"mysql+cymysql",
"mariadb+mysqlconnector",
"mariadb+pymysql",
"mariadb+cymysql",
"mssql+pymssql",
]
)
@property
def no_quoting_special_bind_names(self):
"""Target database will quote bound parameter names, doesn't support
EXPANDING"""
return skip_if(["oracle"])
@property
def temporary_tables(self):
"""target database supports temporary tables"""
return skip_if(["firebird", self._sqlite_file_db], "not supported (?)")
@property
def temp_table_reflection(self):
return self.temporary_tables
@property
def temp_table_reflect_indexes(self):
return skip_if(
["mssql", "firebird", self._sqlite_file_db], "not supported (?)"
)
@property
def reflectable_autoincrement(self):
"""Target database must support tables that can automatically generate
PKs assuming they were reflected.
this is essentially all the DBs in "identity" plus PostgreSQL, which
has SERIAL support. FB and Oracle (and sybase?) require the Sequence
to be explicitly added, including if the table was reflected.
"""
return skip_if(
["firebird", "oracle", "sybase"], "not supported by database"
)
@property
def insert_from_select(self):
return skip_if(["firebird"], "crashes for unknown reason")
@property
def fetch_rows_post_commit(self):
return skip_if(["firebird"], "not supported")
@property
def non_broken_binary(self):
"""target DBAPI must work fully with binary values"""
# see https://github.com/pymssql/pymssql/issues/504
return skip_if(["mssql+pymssql"])
@property
def binary_comparisons(self):
"""target database/driver can allow BLOB/BINARY fields to be compared
against a bound parameter value.
"""
return skip_if(["oracle", "mssql"], "not supported by database/driver")
@property
def binary_literals(self):
"""target backend supports simple binary literals, e.g. an
expression like::
SELECT CAST('foo' AS BINARY)
Where ``BINARY`` is the type emitted from :class:`.LargeBinary`,
e.g. it could be ``BLOB`` or similar.
Basically fails on Oracle.
"""
# adding mssql here since it doesn't support comparisons either,
# have observed generally bad behavior with binary / mssql.
return skip_if(["oracle", "mssql"], "not supported by database/driver")
@property
def tuple_in(self):
def _sqlite_tuple_in(config):
return against(
config, "sqlite"
) and config.db.dialect.dbapi.sqlite_version_info >= (3, 15, 0)
return only_on(
["mysql", "mariadb", "postgresql", _sqlite_tuple_in, "oracle"]
)
@property
def tuple_in_w_empty(self):
return self.tuple_in + skip_if(["oracle"])
@property
def independent_cursors(self):
"""Target must support simultaneous, independent database cursors
on a single connection."""
return skip_if(["mssql", "mysql", "mariadb"], "no driver support")
@property
def independent_connections(self):
"""
Target must support simultaneous, independent database connections.
"""
# This is also true of some configurations of UnixODBC and probably
# win32 ODBC as well.
return skip_if(
[
no_support(
"sqlite",
"independent connections disabled "
"when :memory: connections are used",
),
exclude(
"mssql",
"<",
(9, 0, 0),
"SQL Server 2005+ is required for "
"independent connections",
),
]
)
@property
def memory_process_intensive(self):
"""Driver is able to handle the memory tests which run in a subprocess
and iterate through hundreds of connections
"""
return skip_if(
[
no_support("oracle", "Oracle XE usually can't handle these"),
no_support("mssql+pyodbc", "MS ODBC drivers struggle"),
self._running_on_windows(),
]
)
@property
def updateable_autoincrement_pks(self):
"""Target must support UPDATE on autoincrement/integer primary key."""
return skip_if(
["mssql", "sybase"], "IDENTITY columns can't be updated"
)
@property
def isolation_level(self):
return only_on(
("postgresql", "sqlite", "mysql", "mariadb", "mssql", "oracle"),
"DBAPI has no isolation level support",
) + fails_on(
"postgresql+pypostgresql",
"pypostgresql bombs on multiple isolation level calls",
)
def get_isolation_levels(self, config):
levels = set(config.db.dialect._isolation_lookup)
if against(config, "sqlite"):
default = "SERIALIZABLE"
levels.add("AUTOCOMMIT")
elif against(config, "postgresql"):
default = "READ COMMITTED"
levels.add("AUTOCOMMIT")
elif against(config, "mysql"):
default = "REPEATABLE READ"
levels.add("AUTOCOMMIT")
elif against(config, "mariadb"):
default = "REPEATABLE READ"
levels.add("AUTOCOMMIT")
elif against(config, "mssql"):
default = "READ COMMITTED"
levels.add("AUTOCOMMIT")
elif against(config, "oracle"):
default = "READ COMMITTED"
levels.add("AUTOCOMMIT")
else:
raise NotImplementedError()
return {"default": default, "supported": levels}
@property
def autocommit(self):
"""target dialect supports 'AUTOCOMMIT' as an isolation_level"""
return self.isolation_level + only_if(
lambda config: "AUTOCOMMIT"
in self.get_isolation_levels(config)["supported"]
)
@property
def row_triggers(self):
"""Target must support standard statement-running EACH ROW triggers."""
return skip_if(
[
# no access to same table
no_support("mysql", "requires SUPER priv"),
no_support("mariadb", "requires SUPER priv"),
exclude("mysql", "<", (5, 0, 10), "not supported by database"),
]
)
@property
def sequences_as_server_defaults(self):
"""Target database must support SEQUENCE as a server side default."""
return only_on(
"postgresql", "doesn't support sequences as a server side default."
)
@property
def sql_expressions_inserted_as_primary_key(self):
return only_if([self.returning, self.sqlite])
@property
def computed_columns_on_update_returning(self):
return self.computed_columns + skip_if("oracle")
@property
def correlated_outer_joins(self):
"""Target must support an outer join to a subquery which
correlates to the parent."""
return skip_if(
"oracle",
'Raises "ORA-01799: a column may not be '
'outer-joined to a subquery"',
)
@property
def update_from(self):
"""Target must support UPDATE..FROM syntax"""
return only_on(
["postgresql", "mssql", "mysql", "mariadb"],
"Backend does not support UPDATE..FROM",
)
@property
def delete_from(self):
"""Target must support DELETE FROM..FROM or DELETE..USING syntax"""
return only_on(
["postgresql", "mssql", "mysql", "mariadb", "sybase"],
"Backend does not support DELETE..FROM",
)
@property
def update_where_target_in_subquery(self):
"""Target must support UPDATE (or DELETE) where the same table is
present in a subquery in the WHERE clause.
This is an ANSI-standard syntax that apparently MySQL can't handle,
such as::
UPDATE documents SET flag=1 WHERE documents.title IN
(SELECT max(documents.title) AS title
FROM documents GROUP BY documents.user_id
)
"""
return fails_if(
self._mysql_not_mariadb_103,
'MySQL error 1093 "Cant specify target table '
'for update in FROM clause", resolved by MariaDB 10.3',
)
@property
def savepoints(self):
"""Target database must support savepoints."""
return skip_if(
["sqlite", "sybase", ("mysql", "<", (5, 0, 3))],
"savepoints not supported",
)
@property
def savepoints_w_release(self):
return self.savepoints + skip_if(
["oracle", "mssql"],
"database doesn't support release of savepoint",
)
@property
def schemas(self):
"""Target database must support external schemas, and have one
named 'test_schema'."""
return skip_if(["firebird"], "no schema support")
@property
def cross_schema_fk_reflection(self):
"""target system must support reflection of inter-schema foreign
keys"""
return only_on(["postgresql", "mysql", "mariadb", "mssql"])
@property
def implicit_default_schema(self):
"""target system has a strong concept of 'default' schema that can
be referred to implicitly.
basically, PostgreSQL.
"""
return only_on(["postgresql"])
@property
def default_schema_name_switch(self):
return only_on(["postgresql", "oracle"])
@property
def unique_constraint_reflection(self):
return fails_on_everything_except(
"postgresql", "mysql", "mariadb", "sqlite", "oracle"
)
@property
def unique_constraint_reflection_no_index_overlap(self):
return (
self.unique_constraint_reflection
+ skip_if("mysql")
+ skip_if("mariadb")
+ skip_if("oracle")
)
@property
def check_constraint_reflection(self):
return fails_on_everything_except(
"postgresql",
"sqlite",
"oracle",
self._mysql_and_check_constraints_exist,
)
@property
def indexes_with_expressions(self):
return only_on(["postgresql", "sqlite>=3.9.0"])
@property
def temp_table_names(self):
"""target dialect supports listing of temporary table names"""
return only_on(["sqlite", "oracle"]) + skip_if(self._sqlite_file_db)
@property
def temporary_views(self):
"""target database supports temporary views"""
return only_on(["sqlite", "postgresql"]) + skip_if(
self._sqlite_file_db
)
@property
def update_nowait(self):
"""Target database must support SELECT...FOR UPDATE NOWAIT"""
return skip_if(
["firebird", "mssql", "mysql", "mariadb", "sqlite", "sybase"],
"no FOR UPDATE NOWAIT support",
)
@property
def subqueries(self):
"""Target database must support subqueries."""
return exclusions.open()
@property
def ctes(self):
"""Target database supports CTEs"""
return only_on(
[
lambda config: against(config, "mysql")
and (
(
config.db.dialect._is_mariadb
and config.db.dialect._mariadb_normalized_version_info
>= (10, 2)
)
or (
not config.db.dialect._is_mariadb
and config.db.dialect.server_version_info >= (8,)
)
),
"mariadb>10.2",
"postgresql",
"mssql",
"oracle",
"sqlite>=3.8.3",
]
)
@property
def ctes_with_update_delete(self):
"""target database supports CTES that ride on top of a normal UPDATE
or DELETE statement which refers to the CTE in a correlated subquery.
"""
return only_on(
[
"postgresql",
"mssql",
# "oracle" - oracle can do this but SQLAlchemy doesn't support
# their syntax yet
]
)
@property
def ctes_on_dml(self):
"""target database supports CTES which consist of INSERT, UPDATE
or DELETE *within* the CTE, e.g. WITH x AS (UPDATE....)"""
return only_if(["postgresql"])
@property
def mod_operator_as_percent_sign(self):
"""target database must use a plain percent '%' as the 'modulus'
operator."""
return only_if(
["mysql", "mariadb", "sqlite", "postgresql+psycopg2", "mssql"]
)
@property
def intersect(self):
"""Target database must support INTERSECT or equivalent."""
return fails_if(
["firebird", self._mysql_not_mariadb_103, "sybase"],
"no support for INTERSECT",
)
@property
def except_(self):
"""Target database must support EXCEPT or equivalent (i.e. MINUS)."""
return fails_if(
["firebird", self._mysql_not_mariadb_103, "sybase"],
"no support for EXCEPT",
)
@property
def order_by_col_from_union(self):
"""target database supports ordering by a column from a SELECT
inside of a UNION
E.g. (SELECT id, ...) UNION (SELECT id, ...) ORDER BY id
Fails on SQL Server
"""
return fails_if("mssql")
@property
def parens_in_union_contained_select_w_limit_offset(self):
"""Target database must support parenthesized SELECT in UNION
when LIMIT/OFFSET is specifically present.
E.g. (SELECT ... LIMIT ..) UNION (SELECT .. OFFSET ..)
This is known to fail on SQLite.
"""
return fails_if("sqlite")
@property
def parens_in_union_contained_select_wo_limit_offset(self):
"""Target database must support parenthesized SELECT in UNION
when OFFSET/LIMIT is specifically not present.
E.g. (SELECT ...) UNION (SELECT ..)
This is known to fail on SQLite. It also fails on Oracle
because without LIMIT/OFFSET, there is currently no step that
creates an additional subquery.
"""
return fails_if(["sqlite", "oracle"])
@property
def offset(self):
"""Target database must support some method of adding OFFSET or
equivalent to a result set."""
return fails_if(["sybase"], "no support for OFFSET or equivalent")
@property
def sql_expression_limit_offset(self):
return (
fails_if(
["mysql", "mariadb"],
"Target backend can't accommodate full expressions in "
"OFFSET or LIMIT",
)
+ self.offset
)
@property
def window_functions(self):
return only_if(
["postgresql>=8.4", "mssql", "oracle", "sqlite>=3.25.0"],
"Backend does not support window functions",
)
@property
def two_phase_transactions(self):
"""Target database must support two-phase transactions."""
def pg_prepared_transaction(config):
if not against(config, "postgresql"):
return True
with config.db.connect() as conn:
try:
num = conn.scalar(
text(
"select cast(setting AS integer) from pg_settings "
"where name = 'max_prepared_transactions'"
)
)
except exc.OperationalError:
return False
else:
return num > 0
return skip_if(
[
no_support("firebird", "no SA implementation"),
no_support("mssql", "two-phase xact not supported by drivers"),
no_support(
"oracle", "two-phase xact not implemented in SQLA/oracle"
),
no_support(
"sqlite", "two-phase xact not supported by database"
),
no_support(
"sybase", "two-phase xact not supported by drivers/SQLA"
),
# in Ia3cbbf56d4882fcc7980f90519412f1711fae74d
# we are evaluating which modern MySQL / MariaDB versions
# can handle two-phase testing without too many problems
# no_support(
# "mysql",
# "recent MySQL communiity editions have too many issues "
# "(late 2016), disabling for now",
# ),
NotPredicate(
LambdaPredicate(
pg_prepared_transaction,
"max_prepared_transactions not available or zero",
)
),
]
)
@property
def two_phase_recovery(self):
return self.two_phase_transactions + (
skip_if(
["mysql", "mariadb"],
"still can't get recover to work w/ MariaDB / MySQL",
)
)
@property
def views(self):
"""Target database must support VIEWs."""
return skip_if("drizzle", "no VIEW support")
@property
def empty_strings_varchar(self):
"""
target database can persist/return an empty string with a varchar.
"""
return fails_if(
["oracle"], "oracle converts empty strings to a blank space"
)
@property
def empty_strings_text(self):
"""target database can persist/return an empty string with an
unbounded text."""
return fails_if(
["oracle"], "oracle converts empty strings to a blank space"
)
@property
def expressions_against_unbounded_text(self):
"""target database supports use of an unbounded textual field in a
WHERE clause."""
return fails_if(
["oracle"],
"ORA-00932: inconsistent datatypes: expected - got CLOB",
)
@property
def unicode_data(self):
"""target drive must support unicode data stored in columns."""
return skip_if([no_support("sybase", "no unicode driver support")])
@property
def unicode_connections(self):
"""
Target driver must support some encoding of Unicode across the wire.
"""
return exclusions.open()
@property
def unicode_ddl(self):
"""Target driver must support some degree of non-ascii symbol names."""
return skip_if(
[
no_support("oracle", "FIXME: no support in database?"),
no_support("sybase", "FIXME: guessing, needs confirmation"),
no_support("mssql+pymssql", "no FreeTDS support"),
]
)
@property
def symbol_names_w_double_quote(self):
"""Target driver can create tables with a name like 'some " table'"""
return skip_if(
[no_support("oracle", "ORA-03001: unimplemented feature")]
)
@property
def emulated_lastrowid(self):
""" "target dialect retrieves cursor.lastrowid or an equivalent
after an insert() construct executes.
"""
return fails_on_everything_except(
"mysql",
"mariadb",
"sqlite+pysqlite",
"sqlite+pysqlcipher",
"sybase",
"mssql",
)
@property
def emulated_lastrowid_even_with_sequences(self):
""" "target dialect retrieves cursor.lastrowid or an equivalent
after an insert() construct executes, even if the table has a
Sequence on it.
"""
return fails_on_everything_except(
"mysql",
"mariadb",
"sqlite+pysqlite",
"sqlite+pysqlcipher",
"sybase",
)
@property
def implements_get_lastrowid(self):
return skip_if([no_support("sybase", "not supported by database")])
@property
def dbapi_lastrowid(self):
""" "target backend includes a 'lastrowid' accessor on the DBAPI
cursor object.
"""
return skip_if(
"mssql+pymssql", "crashes on pymssql"
) + fails_on_everything_except(
"mysql",
"mariadb",
"sqlite+pysqlite",
"sqlite+pysqlcipher",
"mssql",
)
@property
def nullsordering(self):
"""Target backends that support nulls ordering."""
return fails_on_everything_except(
"postgresql", "oracle", "firebird", "sqlite >= 3.30.0"
)
@property
def reflects_pk_names(self):
"""Target driver reflects the name of primary key constraints."""
return fails_on_everything_except(
"postgresql", "oracle", "mssql", "sybase", "sqlite"
)
@property
def nested_aggregates(self):
"""target database can select an aggregate from a subquery that's
also using an aggregate"""
return skip_if(["mssql", "sqlite"])
@property
def array_type(self):
return only_on(
[
lambda config: against(config, "postgresql")
and not against(config, "+pg8000")
]
)
@property
def json_type(self):
return only_on(
[
lambda config: against(config, "mysql")
and (
(
not config.db.dialect._is_mariadb
and against(config, "mysql >= 5.7")
)
or (
config.db.dialect._mariadb_normalized_version_info
>= (10, 2, 7)
)
),
"mariadb>=10.2.7",
"postgresql >= 9.3",
self._sqlite_json,
"mssql",
]
)
@property
def json_index_supplementary_unicode_element(self):
# for sqlite see https://bugs.python.org/issue38749
return skip_if(
[
lambda config: against(config, "mysql")
and config.db.dialect._is_mariadb,
"mariadb",
"sqlite",
]
)
@property
def legacy_unconditional_json_extract(self):
"""Backend has a JSON_EXTRACT or similar function that returns a
valid JSON string in all cases.
Used to test a legacy feature and is not needed.
"""
return self.json_type + only_on(
["postgresql", "mysql", "mariadb", "sqlite"]
)
def _sqlite_file_db(self, config):
return against(config, "sqlite") and config.db.dialect._is_url_file_db(
config.db.url
)
def _sqlite_memory_db(self, config):
return against(
config, "sqlite"
) and not config.db.dialect._is_url_file_db(config.db.url)
def _sqlite_json(self, config):
if not against(config, "sqlite >= 3.9"):
return False
else:
with config.db.connect() as conn:
try:
return (
conn.exec_driver_sql(
"""select json_extract('{"foo": "bar"}', """
"""'$."foo"')"""
).scalar()
== "bar"
)
except exc.DBAPIError:
return False
@property
def reflects_json_type(self):
return only_on(
[
lambda config: against(config, "mysql >= 5.7")
and not config.db.dialect._is_mariadb,
"postgresql >= 9.3",
"sqlite >= 3.9",
]
)
@property
def json_array_indexes(self):
return self.json_type
@property
def datetime_literals(self):
"""target dialect supports rendering of a date, time, or datetime as a
literal string, e.g. via the TypeEngine.literal_processor() method.
"""
return fails_on_everything_except("sqlite")
@property
def datetime(self):
"""target dialect supports representation of Python
datetime.datetime() objects."""
return exclusions.open()
@property
def datetime_microseconds(self):
"""target dialect supports representation of Python
datetime.datetime() with microsecond objects."""
return skip_if(
["mssql", "mysql", "mariadb", "firebird", "oracle", "sybase"]
)
@property
def timestamp_microseconds(self):
"""target dialect supports representation of Python
datetime.datetime() with microsecond objects but only
if TIMESTAMP is used."""
return only_on(["oracle"])
@property
def datetime_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1900) values."""
return succeeds_if(["sqlite", "postgresql", "firebird"])
@property
def date(self):
"""target dialect supports representation of Python
datetime.date() objects."""
return exclusions.open()
@property
def date_coerces_from_datetime(self):
"""target dialect accepts a datetime object as the target
of a date column."""
# does not work as of pyodbc 4.0.22
return fails_on("mysql+mysqlconnector") + skip_if("mssql+pyodbc")
@property
def date_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1900) values."""
return succeeds_if(["sqlite", "postgresql", "firebird"])
@property
def time(self):
"""target dialect supports representation of Python
datetime.time() objects."""
return skip_if(["oracle"])
@property
def time_microseconds(self):
"""target dialect supports representation of Python
datetime.time() with microsecond objects."""
return skip_if(
["mssql", "mysql", "mariadb", "firebird", "oracle", "sybase"]
)
@property
def precision_numerics_general(self):
"""target backend has general support for moderately high-precision
numerics."""
return exclusions.open()
@property
def precision_numerics_enotation_small(self):
"""target backend supports Decimal() objects using E notation
to represent very small values."""
# NOTE: this exclusion isn't used in current tests.
return exclusions.open()
@property
def precision_numerics_enotation_large(self):
"""target backend supports Decimal() objects using E notation
to represent very large values."""
return fails_if(
[
(
"sybase+pyodbc",
None,
None,
"Don't know how do get these values through "
"FreeTDS + Sybase",
),
("firebird", None, None, "Precision must be from 1 to 18"),
]
)
@property
def precision_numerics_many_significant_digits(self):
"""target backend supports values with many digits on both sides,
such as 319438950232418390.273596, 87673.594069654243
"""
def broken_cx_oracle(config):
return (
against(config, "oracle+cx_oracle")
and config.db.dialect.cx_oracle_ver <= (6, 0, 2)
and config.db.dialect.cx_oracle_ver > (6,)
)
return fails_if(
[
("sqlite", None, None, "TODO"),
("firebird", None, None, "Precision must be from 1 to 18"),
("sybase+pysybase", None, None, "TODO"),
]
)
@property
def cast_precision_numerics_many_significant_digits(self):
"""same as precision_numerics_many_significant_digits but within the
context of a CAST statement (hello MySQL)
"""
return self.precision_numerics_many_significant_digits + fails_if(
"mysql"
)
@property
def precision_numerics_retains_significant_digits(self):
"""A precision numeric type will return empty significant digits,
i.e. a value such as 10.000 will come back in Decimal form with
the .000 maintained."""
return fails_if(
[
("oracle", None, None, "driver doesn't do this automatically"),
(
"firebird",
None,
None,
"database and/or driver truncates decimal places.",
),
]
)
@property
def precision_generic_float_type(self):
"""target backend will return native floating point numbers with at
least seven decimal places when using the generic Float type."""
return fails_if(
[
(
"mysql",
None,
None,
"mysql FLOAT type only returns 4 decimals",
),
(
"mariadb",
None,
None,
"mysql FLOAT type only returns 4 decimals",
),
(
"firebird",
None,
None,
"firebird FLOAT type isn't high precision",
),
]
)
@property
def floats_to_four_decimals(self):
return fails_if(
[
("mysql+oursql", None, None, "Floating point error"),
("mariadb+oursql", None, None, "Floating point error"),
(
"firebird",
None,
None,
"Firebird still has FP inaccuracy even "
"with only four decimal places",
),
]
)
@property
def implicit_decimal_binds(self):
"""target backend will return a selected Decimal as a Decimal, not
a string.
e.g.::
expr = decimal.Decimal("15.7563")
value = e.scalar(
select(literal(expr))
)
assert value == expr
See :ticket:`4036`
"""
return exclusions.open()
@property
def fetch_null_from_numeric(self):
return skip_if(("mssql+pyodbc", None, None, "crashes due to bug #351"))
@property
def duplicate_key_raises_integrity_error(self):
return exclusions.open()
def _has_pg_extension(self, name):
def check(config):
if not against(config, "postgresql"):
return False
count = (
config.db.connect(close_with_result=True)
.exec_driver_sql(
"SELECT count(*) FROM pg_extension "
"WHERE extname='%s'" % name
)
.scalar()
)
return bool(count)
return only_if(check, "needs %s extension" % name)
@property
def hstore(self):
return self._has_pg_extension("hstore")
@property
def btree_gist(self):
return self._has_pg_extension("btree_gist")
@property
def range_types(self):
def check_range_types(config):
if not against(
config, ["postgresql+psycopg2", "postgresql+psycopg2cffi"]
):
return False
try:
config.db.connect(close_with_result=True).exec_driver_sql(
"select '[1,2)'::int4range;"
).scalar()
return True
except Exception:
return False
return only_if(check_range_types)
@property
def async_dialect(self):
"""dialect makes use of await_() to invoke operations on the DBAPI."""
return only_on(
LambdaPredicate(
lambda config: config.db.dialect.is_async,
"Async dialect required",
)
)
@property
def oracle_test_dblink(self):
return skip_if(
lambda config: not config.file_config.has_option(
"sqla_testing", "oracle_db_link"
),
"oracle_db_link option not specified in config",
)
@property
def postgresql_test_dblink(self):
return skip_if(
lambda config: not config.file_config.has_option(
"sqla_testing", "postgres_test_db_link"
),
"postgres_test_db_link option not specified in config",
)
@property
def postgresql_jsonb(self):
return only_on("postgresql >= 9.4") + skip_if(
lambda config: config.db.dialect.driver == "pg8000"
and config.db.dialect._dbapi_version <= (1, 10, 1)
)
@property
def psycopg2_native_hstore(self):
return self.psycopg2_compatibility
@property
def psycopg2_compatibility(self):
return only_on(["postgresql+psycopg2", "postgresql+psycopg2cffi"])
@property
def psycopg2_or_pg8000_compatibility(self):
return only_on(
[
"postgresql+psycopg2",
"postgresql+psycopg2cffi",
"postgresql+pg8000",
]
)
@property
def percent_schema_names(self):
return skip_if(
["mysql+aiomysql", "mariadb+aiomysql"],
"see pr https://github.com/aio-libs/aiomysql/pull/545",
)
@property
def order_by_label_with_expression(self):
return fails_if(
[
(
"firebird",
None,
None,
"kinterbasdb doesn't send full type information",
),
("postgresql", None, None, "only simple labels allowed"),
("sybase", None, None, "only simple labels allowed"),
("mssql", None, None, "only simple labels allowed"),
]
)
def get_order_by_collation(self, config):
lookup = {
# will raise without quoting
"postgresql": "POSIX",
# note MySQL databases need to be created w/ utf8mb4 charset
# for the test suite
"mysql": "utf8mb4_bin",
"mariadb": "utf8mb4_bin",
"sqlite": "NOCASE",
# will raise *with* quoting
"mssql": "Latin1_General_CI_AS",
}
try:
return lookup[config.db.name]
except KeyError:
raise NotImplementedError()
@property
def skip_mysql_on_windows(self):
"""Catchall for a large variety of MySQL on Windows failures"""
return skip_if(
self._has_mysql_on_windows, "Not supported on MySQL + Windows"
)
@property
def mssql_freetds(self):
return only_on(["mssql+pymssql"])
@property
def legacy_engine(self):
return exclusions.skip_if(lambda config: config.db._is_future)
@property
def ad_hoc_engines(self):
return (
exclusions.skip_if(
["oracle"],
"works, but Oracle just gets tired with "
"this much connection activity",
)
+ skip_if(self._sqlite_file_db)
)
@property
def no_mssql_freetds(self):
return self.mssql_freetds.not_()
@property
def pyodbc_fast_executemany(self):
def has_fastexecutemany(config):
if not against(config, "mssql+pyodbc"):
return False
if config.db.dialect._dbapi_version() < (4, 0, 19):
return False
with config.db.connect() as conn:
drivername = conn.connection.connection.getinfo(
config.db.dialect.dbapi.SQL_DRIVER_NAME
)
# on linux this is something like 'libmsodbcsql-13.1.so.9.2'.
# on Windows this is something like 'msodbcsql17.dll'.
return "msodbc" in drivername
return only_if(
has_fastexecutemany, "only on pyodbc > 4.0.19 w/ msodbc driver"
)
@property
def python_fixed_issue_8743(self):
return exclusions.skip_if(
lambda: sys.version_info < (2, 7, 8),
"Python issue 8743 fixed in Python 2.7.8",
)
@property
def granular_timezone(self):
"""the datetime.timezone class, or SQLAlchemy's port, supports
seconds and microseconds.
SQLAlchemy ported the Python 3.7 version for Python 2, so
it passes on that. For Python 3.6 and earlier, it is not supported.
"""
return exclusions.skip_if(
lambda: sys.version_info >= (3,) and sys.version_info < (3, 7)
)
@property
def selectone(self):
"""target driver must support the literal statement 'select 1'"""
return skip_if(
["oracle", "firebird"], "non-standard SELECT scalar syntax"
)
@property
def mysql_for_update(self):
return skip_if(
"mysql+mysqlconnector",
"lock-sensitive operations crash on mysqlconnector",
)
@property
def mysql_fsp(self):
return only_if(["mysql >= 5.6.4", "mariadb"])
@property
def mysql_fully_case_sensitive(self):
return only_if(self._has_mysql_fully_case_sensitive)
@property
def mysql_zero_date(self):
def check(config):
if not against(config, "mysql"):
return False
row = (
config.db.connect(close_with_result=True)
.exec_driver_sql("show variables like 'sql_mode'")
.first()
)
return not row or "NO_ZERO_DATE" not in row[1]
return only_if(check)
@property
def mysql_non_strict(self):
def check(config):
if not against(config, "mysql"):
return False
row = (
config.db.connect(close_with_result=True)
.exec_driver_sql("show variables like 'sql_mode'")
.first()
)
return not row or "STRICT_TRANS_TABLES" not in row[1]
return only_if(check)
@property
def mysql_ngram_fulltext(self):
def check(config):
return (
against(config, "mysql")
and not config.db.dialect._is_mariadb
and config.db.dialect.server_version_info >= (5, 7)
)
return only_if(check)
def _mysql_80(self, config):
return (
against(config, "mysql")
and config.db.dialect._is_mysql
and config.db.dialect.server_version_info >= (8,)
)
def _mariadb_102(self, config):
return (
against(config, "mysql")
and config.db.dialect._is_mariadb
and config.db.dialect._mariadb_normalized_version_info > (10, 2)
)
def _mysql_and_check_constraints_exist(self, config):
# 1. we have mysql / mariadb and
# 2. it enforces check constraints
if exclusions.against(config, ["mysql", "mariadb"]):
if config.db.dialect._is_mariadb:
norm_version_info = (
config.db.dialect._mariadb_normalized_version_info
)
return norm_version_info >= (10, 2)
else:
norm_version_info = config.db.dialect.server_version_info
return norm_version_info >= (8, 0, 16)
else:
return False
def _mysql_check_constraints_exist(self, config):
# 1. we dont have mysql / mariadb or
# 2. we have mysql / mariadb that enforces check constraints
return not exclusions.against(
config, ["mysql", "mariadb"]
) or self._mysql_and_check_constraints_exist(config)
def _mysql_check_constraints_dont_exist(self, config):
# 1. we have mysql / mariadb and
# 2. they dont enforce check constraints
return not self._mysql_check_constraints_exist(config)
def _mysql_not_mariadb_102(self, config):
return (against(config, ["mysql", "mariadb"])) and (
not config.db.dialect._is_mariadb
or config.db.dialect._mariadb_normalized_version_info < (10, 2)
)
def _mysql_not_mariadb_103(self, config):
return (against(config, ["mysql", "mariadb"])) and (
not config.db.dialect._is_mariadb
or config.db.dialect._mariadb_normalized_version_info < (10, 3)
)
def _mysql_not_mariadb_104(self, config):
return (against(config, ["mysql", "mariadb"])) and (
not config.db.dialect._is_mariadb
or config.db.dialect._mariadb_normalized_version_info < (10, 4)
)
def _has_mysql_on_windows(self, config):
return (
against(config, ["mysql", "mariadb"])
) and config.db.dialect._detect_casing(config.db) == 1
def _has_mysql_fully_case_sensitive(self, config):
return (
against(config, "mysql")
and config.db.dialect._detect_casing(config.db) == 0
)
@property
def postgresql_utf8_server_encoding(self):
def go(config):
if not against(config, "postgresql"):
return False
with config.db.connect() as conn:
enc = conn.exec_driver_sql("show server_encoding").scalar()
return enc.lower() == "utf8"
return only_if(go)
@property
def cxoracle6_or_greater(self):
return only_if(
lambda config: against(config, "oracle+cx_oracle")
and config.db.dialect.cx_oracle_ver >= (6,)
)
@property
def oracle5x(self):
return only_if(
lambda config: against(config, "oracle+cx_oracle")
and config.db.dialect.cx_oracle_ver < (6,)
)
@property
def computed_columns(self):
return skip_if(["postgresql < 12", "sqlite < 3.31", "mysql < 5.7"])
@property
def python_profiling_backend(self):
return only_on([self._sqlite_memory_db])
@property
def computed_columns_stored(self):
return self.computed_columns + skip_if(["oracle", "firebird"])
@property
def computed_columns_virtual(self):
return self.computed_columns + skip_if(["postgresql", "firebird"])
@property
def computed_columns_default_persisted(self):
return self.computed_columns + only_if("postgresql")
@property
def computed_columns_reflect_persisted(self):
return self.computed_columns + skip_if("oracle")
@property
def regexp_match(self):
return only_on(["postgresql", "mysql", "mariadb", "oracle", "sqlite"])
@property
def regexp_replace(self):
return only_on(["postgresql", "mysql>=8", "mariadb", "oracle"])
@property
def supports_distinct_on(self):
"""If a backend supports the DISTINCT ON in a select"""
return only_if(["postgresql"])
@property
def supports_for_update_of(self):
return only_if(lambda config: config.db.dialect.supports_for_update_of)
@property
def sequences_in_other_clauses(self):
"""sequences allowed in WHERE, GROUP BY, HAVING, etc."""
return skip_if(["mssql", "oracle"])
@property
def supports_lastrowid_for_expressions(self):
"""cursor.lastrowid works if an explicit SQL expression was used."""
return only_on(["sqlite", "mysql", "mariadb"])
@property
def supports_sequence_for_autoincrement_column(self):
"""for mssql, autoincrement means IDENTITY, not sequence"""
return skip_if("mssql")
@property
def identity_columns(self):
return only_if(["postgresql >= 10", "oracle >= 12", "mssql"])
@property
def identity_columns_standard(self):
return self.identity_columns + skip_if("mssql")
@property
def index_reflects_included_columns(self):
return only_on(["postgresql >= 11", "mssql"])
# mssql>= 11 -> >= MS_2012_VERSION
@property
def fetch_first(self):
return only_on(["postgresql", "mssql >= 11", "oracle >= 12"])
@property
def fetch_percent(self):
return only_on(["mssql >= 11", "oracle >= 12"])
@property
def fetch_ties(self):
return only_on(["postgresql >= 13", "mssql >= 11", "oracle >= 12"])
@property
def fetch_no_order_by(self):
return only_on(["postgresql", "oracle >= 12"])
@property
def fetch_offset_with_options(self):
return skip_if("mssql")
|
py | 1a4c6021d432fa802f40396ebc750bf945605dcb | #!/usr/bin/env python3
from PIL import Image
from struct import pack
def pre(p):
p = list(p)
p[0] = p[0]*p[3]//255
p[1] = p[1]*p[3]//255
p[2] = p[2]*p[3]//255
return p
def write(i, o, X, Y):
for y in range(Y):
for x in range(X):
p = pre(i.getpixel((x, y)))
o.write(pack('4B', p[2], p[1], p[0], p[3]))
i = Image.open('images/fish.png')
with open('fish.bin', 'wb') as o:
write(i, o, 100, 59)
i = Image.open('images/window.png')
with open('window.bin', 'wb') as o:
write(i, o, 320, 200)
|
py | 1a4c606bb09e868361ae67e94dae7e80599f5861 | # -*- coding: utf-8 -*-
# Module: default
# Author: joen, bodems
# Created on: 24.08.2017
# License: MIT
from __future__ import print_function
import json
import operator
import routing
import sys
import urllib
from urllib import urlencode
import urllib2
import urlparse
from urlparse import parse_qsl
import cookielib
import xbmcgui
import xbmcplugin
import xbmcaddon
addon = xbmcaddon.Addon('plugin.video.streama')
streamaurl = addon.getSetting('url')
username = addon.getSetting('username')
password = addon.getSetting('password')
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
login_data = urllib.urlencode({'username' : username, 'password' : password, 'remember_me' : 'on'})
opener.open(streamaurl + '/login/authenticate', login_data)
movies = opener.open(streamaurl + '/dash/listMovies.json')
movies_json = json.loads(movies.read())
# Get the plugin url in plugin:// notation.
_url = sys.argv[0]
# Get the plugin handle as an integer number.
_handle = int(sys.argv[1])
# Free sample videos are provided by www.vidsplay.com
# Here we use a fixed set of properties simply for demonstrating purposes
# In a "real life" plugin you will need to get info and links to video files/streams
# from some web-site or online service.
VIDEOS = {'New': [{'name': 'Crab',
'thumb': 'http://www.vidsplay.com/wp-content/uploads/2017/04/crab-screenshot.jpg',
'video': 'http://www.vidsplay.com/wp-content/uploads/2017/04/crab.mp4',
'genre': 'Animals'},
{'name': 'Alligator',
'thumb': 'http://www.vidsplay.com/wp-content/uploads/2017/04/alligator-screenshot.jpg',
'video': 'http://www.vidsplay.com/wp-content/uploads/2017/04/alligator.mp4',
'genre': 'Animals'},
{'name': 'Turtle',
'thumb': 'http://www.vidsplay.com/wp-content/uploads/2017/04/turtle-screenshot.jpg',
'video': 'http://www.vidsplay.com/wp-content/uploads/2017/04/turtle.mp4',
'genre': 'Animals'}
],
'Movies': [{'name': 'Postal Truck',
'thumb': 'http://www.vidsplay.com/wp-content/uploads/2017/05/us_postal-screenshot.jpg',
'video': 'http://www.vidsplay.com/wp-content/uploads/2017/05/us_postal.mp4',
'genre': 'Cars'},
{'name': 'Traffic',
'thumb': 'http://www.vidsplay.com/wp-content/uploads/2017/05/traffic1-screenshot.jpg',
'video': 'http://www.vidsplay.com/wp-content/uploads/2017/05/traffic1.mp4',
'genre': 'Cars'},
{'name': 'Traffic Arrows',
'thumb': 'http://www.vidsplay.com/wp-content/uploads/2017/05/traffic_arrows-screenshot.jpg',
'video': 'http://www.vidsplay.com/wp-content/uploads/2017/05/traffic_arrows.mp4',
'genre': 'Cars'}
],
'Shows': [{'name': 'Chicken',
'thumb': 'http://www.vidsplay.com/wp-content/uploads/2017/05/bbq_chicken-screenshot.jpg',
'video': 'http://www.vidsplay.com/wp-content/uploads/2017/05/bbqchicken.mp4',
'genre': 'Food'},
{'name': 'Hamburger',
'thumb': 'http://www.vidsplay.com/wp-content/uploads/2017/05/hamburger-screenshot.jpg',
'video': 'http://www.vidsplay.com/wp-content/uploads/2017/05/hamburger.mp4',
'genre': 'Food'},
{'name': 'Pizza',
'thumb': 'http://www.vidsplay.com/wp-content/uploads/2017/05/pizza-screenshot.jpg',
'video': 'http://www.vidsplay.com/wp-content/uploads/2017/05/pizza.mp4',
'genre': 'Food'}
],
'Genres': [{'name': 'Chicken',
'thumb': 'http://www.vidsplay.com/wp-content/uploads/2017/05/bbq_chicken-screenshot.jpg',
'video': 'http://www.vidsplay.com/wp-content/uploads/2017/05/bbqchicken.mp4',
'genre': 'Food'},
{'name': 'Hamburger',
'thumb': 'http://www.vidsplay.com/wp-content/uploads/2017/05/hamburger-screenshot.jpg',
'video': 'http://www.vidsplay.com/wp-content/uploads/2017/05/hamburger.mp4',
'genre': 'Food'},
{'name': 'Pizza',
'thumb': 'http://www.vidsplay.com/wp-content/uploads/2017/05/pizza-screenshot.jpg',
'video': 'http://www.vidsplay.com/wp-content/uploads/2017/05/pizza.mp4',
'genre': 'Food'}
]}
# STRVIDEOS = opener.open('https://streama.example.net/dash/listGenres.json')
# streama output genres
#[{"id":1,"apiId":28,"name":"Action"},{"id":2,"apiId":12,"name":"Adventure"},{"id":3,"apiId":16,"name":"Animation"},{"id":4,"apiId":35,"name":"Comedy"},{"id":5,"apiId":80,"name":"Crime"},{"id":6,"apiId":99,"name":"Documentary"},{"id":7,"apiId":18,"name":"Drama"},{"id":8,"apiId":10751,"name":"Family"},{"id":9,"apiId":14,"name":"Fantasy"},{"id":10,"apiId":36,"name":"History"},{"id":11,"apiId":27,"name":"Horror"},{"id":12,"apiId":10402,"name":"Music"},{"id":13,"apiId":9648,"name":"Mystery"},{"id":14,"apiId":10749,"name":"Romance"},{"id":15,"apiId":878,"name":"Science Fiction"},{"id":16,"apiId":10770,"name":"TV Movie"},{"id":17,"apiId":53,"name":"Thriller"},{"id":18,"apiId":10752,"name":"War"},{"id":19,"apiId":37,"name":"Western"},{"id":20,"apiId":10759,"name":"Action & Adventure"},{"id":21,"apiId":10762,"name":"Kids"},{"id":22,"apiId":10763,"name":"News"},{"id":23,"apiId":10764,"name":"Reality"},{"id":24,"apiId":10765,"name":"Sci-Fi & Fantasy"},{"id":25,"apiId":10766,"name":"Soap"},{"id":26,"apiId":10767,"name":"Talk"},{"id":27,"apiId":10768,"name":"War & Politics"}]
def get_url(**kwargs):
"""
Create a URL for calling the plugin recursively from the given set of keyword arguments.
:param kwargs: "argument=value" pairs
:type kwargs: dict
:return: plugin call URL
:rtype: str
"""
return '{0}?{1}'.format(_url, urlencode(kwargs))
def get_categories():
"""
Get the list of video categories.
Here you can insert some parsing code that retrieves
the list of video categories (e.g. 'Movies', 'TV-shows', 'Documentaries' etc.)
from some site or server.
.. note:: Consider using `generator functions <https://wiki.python.org/moin/Generators>`_
instead of returning lists.
:return: The list of video categories
:rtype: list
"""
return movies.iterkeys()
#return VIDEOS.iterkeys()
# return STRVIDEOS.iterkeys()
def get_videos(category):
"""
Get the list of videofiles/streams.
Here you can insert some parsing code that retrieves
the list of video streams in the given category from some site or server.
.. note:: Consider using `generators functions <https://wiki.python.org/moin/Generators>`_
instead of returning lists.
:param category: Category name
:type category: str
:return: the list of videos in the category
:rtype: list
"""
return VIDEOS[category]
def list_categories():
"""
Create the list of video categories in the Kodi interface.
"""
# Get video categories
categories = get_categories()
# Iterate through categories
for category in categories:
# Create a list item with a text label and a thumbnail image.
list_item = xbmcgui.ListItem(label=category)
# Set graphics (thumbnail, fanart, banner, poster, landscape etc.) for the list item.
# Here we use the same image for all items for simplicity's sake.
# In a real-life plugin you need to set each image accordingly.
list_item.setArt({'thumb': VIDEOS[category][0]['thumb'],
'icon': VIDEOS[category][0]['thumb'],
'fanart': VIDEOS[category][0]['thumb']})
# Set additional info for the list item.
# Here we use a category name for both properties for for simplicity's sake.
# setInfo allows to set various information for an item.
# For available properties see the following link:
# http://mirrors.xbmc.org/docs/python-docs/15.x-isengard/xbmcgui.html#ListItem-setInfo
list_item.setInfo('video', {'title': category, 'genre': category})
# Create a URL for a plugin recursive call.
# Example: plugin://plugin.video.example/?action=listing&category=Animals
url = get_url(action='listing', category=category)
# is_folder = True means that this item opens a sub-list of lower level items.
is_folder = True
# Add our item to the Kodi virtual folder listing.
xbmcplugin.addDirectoryItem(_handle, url, list_item, is_folder)
# Add a sort method for the virtual folder items (alphabetically, ignore articles)
xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)
# Finish creating a virtual folder.
xbmcplugin.endOfDirectory(_handle)
def list_videos(category):
"""
Create the list of playable videos in the Kodi interface.
:param category: Category name
:type category: str
"""
# Get the list of videos in the category.
videos = get_videos(category)
# Iterate through videos.
for video in videos:
# Create a list item with a text label and a thumbnail image.
list_item = xbmcgui.ListItem(label=video['name'])
# Set additional info for the list item.
list_item.setInfo('video', {'title': video['name'], 'genre': video['genre']})
# Set graphics (thumbnail, fanart, banner, poster, landscape etc.) for the list item.
# Here we use the same image for all items for simplicity's sake.
# In a real-life plugin you need to set each image accordingly.
list_item.setArt({'thumb': video['thumb'], 'icon': video['thumb'], 'fanart': video['thumb']})
# Set 'IsPlayable' property to 'true'.
# This is mandatory for playable items!
list_item.setProperty('IsPlayable', 'true')
# Create a URL for a plugin recursive call.
# Example: plugin://plugin.video.example/?action=play&video=http://www.vidsplay.com/wp-content/uploads/2017/04/crab.mp4
url = get_url(action='play', video=video['video'])
# Add the list item to a virtual Kodi folder.
# is_folder = False means that this item won't open any sub-list.
is_folder = False
# Add our item to the Kodi virtual folder listing.
xbmcplugin.addDirectoryItem(_handle, url, list_item, is_folder)
# Add a sort method for the virtual folder items (alphabetically, ignore articles)
xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)
# Finish creating a virtual folder.
xbmcplugin.endOfDirectory(_handle)
def play_video(path):
"""
Play a video by the provided path.
:param path: Fully-qualified video URL
:type path: str
"""
# Create a playable item with a path to play.
play_item = xbmcgui.ListItem(path=path)
# Pass the item to the Kodi player.
xbmcplugin.setResolvedUrl(_handle, True, listitem=play_item)
def router(paramstring):
"""
Router function that calls other functions
depending on the provided paramstring
:param paramstring: URL encoded plugin paramstring
:type paramstring: str
"""
# Parse a URL-encoded paramstring to the dictionary of
# {<parameter>: <value>} elements
params = dict(parse_qsl(paramstring))
# Check the parameters passed to the plugin
if params:
if params['action'] == 'listing':
# Display the list of videos in a provided category.
list_videos(params['category'])
elif params['action'] == 'play':
# Play a video from a provided URL.
play_video(params['video'])
else:
# If the provided paramstring does not contain a supported action
# we raise an exception. This helps to catch coding errors,
# e.g. typos in action names.
raise ValueError('Invalid paramstring: {0}!'.format(paramstring))
else:
# If the plugin is called from Kodi UI without any parameters,
# display the list of video categories
list_categories()
if __name__ == '__main__':
# Call the router function and pass the plugin call parameters to it.
# We use string slicing to trim the leading '?' from the plugin call paramstring
router(sys.argv[2][1:])
# cj = cookielib.CookieJar()
# opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
# login_data = urllib.urlencode({'username' : username, 'password' : password, 'remember_me' : 'on'})
# opener.open('https://streama.example.net/login/authenticate', login_data)
# shows = opener.open('https://streama.example.net/dash/listShows.json')
# movies = opener.open('https://streama.example.net/dash/listMovies.json')
# genericmovies = opener.open('https://streama.example.net/dash/listGenericVideos.json')
# genres = opener.open('https://streama.example.net/dash/listGenres.json')
# https://streama.example.net/tvShow/episodesForTvShow.json?id=35
# https://streama.example.net/video/show.json?id=130
# https://streama.example.net/dash/searchMedia.json?query=crowd
# print shows.read()
# print movies.read()
# print genericmovies.read()
# print genres.read()
|
py | 1a4c60bf31e128d880fa1e03bb7cab2ee38947d2 | #!/usr/bin/python3 -OO
# Copyright 2007-2019 The SABnzbd-Team <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
sabnzbd.newsunpack
"""
import os
import sys
import re
import subprocess
import logging
import time
import binascii
import shutil
import functools
from subprocess import Popen
import sabnzbd
from sabnzbd.encoding import platform_btou
import sabnzbd.utils.rarfile as rarfile
from sabnzbd.misc import format_time_string, find_on_path, int_conv, \
get_all_passwords, calc_age, cmp, caller_name
from sabnzbd.filesystem import make_script_path, real_path, globber, globber_full, \
renamer, clip_path, long_path, remove_file, recursive_listdir, setname_from_path
from sabnzbd.sorting import SeriesSorter
import sabnzbd.cfg as cfg
from sabnzbd.constants import Status
if sabnzbd.WIN32:
try:
import win32api
import win32con
import win32process
# Define scheduling priorities
WIN_SCHED_PRIOS = {1: win32process.IDLE_PRIORITY_CLASS, 2: win32process.BELOW_NORMAL_PRIORITY_CLASS,
3: win32process.NORMAL_PRIORITY_CLASS, 4: win32process.ABOVE_NORMAL_PRIORITY_CLASS,}
except ImportError:
pass
else:
# Define dummy WindowsError for non-Windows
class WindowsError(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
# Regex globals
RAR_RE = re.compile(r'\.(?P<ext>part\d*\.rar|rar|r\d\d|s\d\d|t\d\d|u\d\d|v\d\d|\d\d\d?\d)$', re.I)
RAR_RE_V3 = re.compile(r'\.(?P<ext>part\d*)$', re.I)
LOADING_RE = re.compile(r'^Loading "(.+)"')
TARGET_RE = re.compile(r'^(?:File|Target): "(.+)" -')
EXTRACTFROM_RE = re.compile(r'^Extracting\sfrom\s(.+)')
EXTRACTED_RE = re.compile(r'^(Extracting|Creating|...)\s+(.*?)\s+OK\s*$')
SPLITFILE_RE = re.compile(r'\.(\d\d\d?\d$)', re.I)
ZIP_RE = re.compile(r'\.(zip$)', re.I)
SEVENZIP_RE = re.compile(r'\.7z$', re.I)
SEVENMULTI_RE = re.compile(r'\.7z\.\d+$', re.I)
TS_RE = re.compile(r'\.(\d+)\.(ts$)', re.I)
PAR2_COMMAND = None
MULTIPAR_COMMAND = None
RAR_COMMAND = None
NICE_COMMAND = None
ZIP_COMMAND = None
SEVEN_COMMAND = None
IONICE_COMMAND = None
RAR_PROBLEM = False
PAR2_MT = True
RAR_VERSION = 0
def find_programs(curdir):
""" Find external programs """
def check(path, program):
p = os.path.abspath(os.path.join(path, program))
if os.access(p, os.X_OK):
return p
else:
return None
if sabnzbd.DARWIN:
sabnzbd.newsunpack.PAR2_COMMAND = check(curdir, 'osx/par2/par2-sl64')
sabnzbd.newsunpack.RAR_COMMAND = check(curdir, 'osx/unrar/unrar')
sabnzbd.newsunpack.SEVEN_COMMAND = check(curdir, 'osx/7zip/7za')
if sabnzbd.WIN32:
if sabnzbd.WIN64:
# 64 bit versions
sabnzbd.newsunpack.MULTIPAR_COMMAND = check(curdir, 'win/par2/multipar/par2j64.exe')
sabnzbd.newsunpack.RAR_COMMAND = check(curdir, 'win/unrar/x64/UnRAR.exe')
else:
# 32 bit versions
sabnzbd.newsunpack.MULTIPAR_COMMAND = check(curdir, 'win/par2/multipar/par2j.exe')
sabnzbd.newsunpack.RAR_COMMAND = check(curdir, 'win/unrar/UnRAR.exe')
sabnzbd.newsunpack.PAR2_COMMAND = check(curdir, 'win/par2/par2.exe')
sabnzbd.newsunpack.SEVEN_COMMAND = check(curdir, 'win/7zip/7za.exe')
else:
if not sabnzbd.newsunpack.PAR2_COMMAND:
sabnzbd.newsunpack.PAR2_COMMAND = find_on_path('par2')
if not sabnzbd.newsunpack.RAR_COMMAND:
sabnzbd.newsunpack.RAR_COMMAND = find_on_path(('unrar', 'rar', 'unrar3', 'rar3',))
sabnzbd.newsunpack.NICE_COMMAND = find_on_path('nice')
sabnzbd.newsunpack.IONICE_COMMAND = find_on_path('ionice')
if not sabnzbd.newsunpack.ZIP_COMMAND:
sabnzbd.newsunpack.ZIP_COMMAND = find_on_path('unzip')
if not sabnzbd.newsunpack.SEVEN_COMMAND:
sabnzbd.newsunpack.SEVEN_COMMAND = find_on_path('7za')
if not sabnzbd.newsunpack.SEVEN_COMMAND:
sabnzbd.newsunpack.SEVEN_COMMAND = find_on_path('7z')
if not (sabnzbd.WIN32 or sabnzbd.DARWIN):
# Run check on rar version
version, original = unrar_check(sabnzbd.newsunpack.RAR_COMMAND)
sabnzbd.newsunpack.RAR_PROBLEM = not original or version < sabnzbd.constants.REC_RAR_VERSION
sabnzbd.newsunpack.RAR_VERSION = version
# Run check on par2-multicore
sabnzbd.newsunpack.PAR2_MT = par2_mt_check(sabnzbd.newsunpack.PAR2_COMMAND)
ENV_NZO_FIELDS = ['bytes', 'bytes_downloaded', 'bytes_tried', 'cat', 'duplicate', 'encrypted',
'fail_msg', 'filename', 'final_name', 'group', 'nzo_id', 'oversized', 'password', 'pp',
'priority', 'repair', 'script', 'status', 'unpack', 'unwanted_ext', 'url']
def external_processing(extern_proc, nzo, complete_dir, nicename, status):
""" Run a user postproc script, return console output and exit value """
failure_url = nzo.nzo_info.get('failure', '')
# Items can be bool or null, causing POpen to fail
command = [str(extern_proc), str(complete_dir), str(nzo.filename), str(nicename), '',
str(nzo.cat), str(nzo.group), str(status), str(failure_url)]
# Add path to original NZB
nzb_paths = globber_full(nzo.workpath, '*.gz')
# Fields not in the NZO directly
extra_env_fields = {'failure_url': failure_url,
'complete_dir': complete_dir,
'pp_status': status,
'download_time': nzo.nzo_info.get('download_time', ''),
'avg_bps': int(nzo.avg_bps_total / nzo.avg_bps_freq) if nzo.avg_bps_freq else 0,
'age': calc_age(nzo.avg_date),
'orig_nzb_gz': clip_path(nzb_paths[0]) if nzb_paths else ''}
try:
stup, need_shell, command, creationflags = build_command(command)
env = create_env(nzo, extra_env_fields)
logging.info('Running external script %s(%s, %s, %s, %s, %s, %s, %s, %s)',
extern_proc, complete_dir, nzo.filename, nicename, '', nzo.cat, nzo.group, status, failure_url)
p = Popen(command, shell=need_shell, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
startupinfo=stup, env=env, creationflags=creationflags)
# Follow the output, so we can abort it
proc = p.stdout
if p.stdin:
p.stdin.close()
lines = []
while 1:
line = platform_btou(proc.readline())
if not line:
break
line = line.strip()
lines.append(line)
# Show current line in history
nzo.set_action_line(T('Running script'), line)
# Check if we should still continue
if not nzo.pp_active:
p.kill()
lines.append(T('PostProcessing was aborted (%s)') % T('Script'))
# Print at least what we got
output = '\n'.join(lines)
return output, 1
except:
logging.debug("Failed script %s, Traceback: ", extern_proc, exc_info=True)
return "Cannot run script %s\r\n" % extern_proc, -1
output = '\n'.join(lines)
ret = p.wait()
return output, ret
def external_script(script, p1, p2, p3=None, p4=None):
""" Run a user script with two parameters, return console output and exit value """
command = [script, p1, p2, p3, p4]
try:
stup, need_shell, command, creationflags = build_command(command)
env = create_env()
logging.info('Running user script %s(%s, %s)', script, p1, p2)
p = Popen(command, shell=need_shell, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
startupinfo=stup, env=env, creationflags=creationflags)
except:
logging.debug("Failed script %s, Traceback: ", script, exc_info=True)
return "Cannot run script %s\r\n" % script, -1
output = platform_btou(p.stdout.read())
ret = p.wait()
return output, ret
def unpack_magic(nzo, workdir, workdir_complete, dele, one_folder, joinables, zips, rars, sevens, ts, depth=0):
""" Do a recursive unpack from all archives in 'workdir' to 'workdir_complete' """
if depth > 5:
logging.warning(T('Unpack nesting too deep [%s]'), nzo.final_name)
return False, []
depth += 1
if depth == 1:
# First time, ignore anything in workdir_complete
xjoinables, xzips, xrars, xsevens, xts = build_filelists(workdir)
else:
xjoinables, xzips, xrars, xsevens, xts = build_filelists(workdir, workdir_complete, check_both=dele)
force_rerun = False
newfiles = []
error = None
new_joins = new_ts = None
if cfg.enable_filejoin():
new_joins = [jn for jn in xjoinables if jn not in joinables]
if new_joins:
logging.info('Filejoin starting on %s', workdir)
error, newf = file_join(nzo, workdir, workdir_complete, dele, new_joins)
if newf:
newfiles.extend(newf)
logging.info('Filejoin finished on %s', workdir)
if cfg.enable_unrar():
new_rars = [rar for rar in xrars if rar not in rars]
if new_rars:
logging.info('Unrar starting on %s', workdir)
error, newf = rar_unpack(nzo, workdir, workdir_complete, dele, one_folder, new_rars)
if newf:
newfiles.extend(newf)
logging.info('Unrar finished on %s', workdir)
if cfg.enable_7zip():
new_sevens = [seven for seven in xsevens if seven not in sevens]
if new_sevens:
logging.info('7za starting on %s', workdir)
error, newf = unseven(nzo, workdir, workdir_complete, dele, one_folder, new_sevens)
if newf:
newfiles.extend(newf)
logging.info('7za finished on %s', workdir)
if cfg.enable_unzip():
new_zips = [zip for zip in xzips if zip not in zips]
if new_zips:
logging.info('Unzip starting on %s', workdir)
if SEVEN_COMMAND:
error, newf = unseven(nzo, workdir, workdir_complete, dele, one_folder, new_zips)
else:
error, newf = unzip(nzo, workdir, workdir_complete, dele, one_folder, new_zips)
if newf:
newfiles.extend(newf)
logging.info('Unzip finished on %s', workdir)
if cfg.enable_tsjoin():
new_ts = [_ts for _ts in xts if _ts not in ts]
if new_ts:
logging.info('TS Joining starting on %s', workdir)
error, newf = file_join(nzo, workdir, workdir_complete, dele, new_ts)
if newf:
newfiles.extend(newf)
logging.info('TS Joining finished on %s', workdir)
# Refresh history and set output
nzo.set_action_line()
# Only re-run if something was unpacked and it was success
rerun = error in (False, 0)
# During a Retry we might miss files that failed during recursive unpack
if nzo.reuse and depth == 1 and any(build_filelists(workdir, workdir_complete)):
rerun = True
# We can't recursive unpack on long paths on Windows
# See: https://github.com/sabnzbd/sabnzbd/pull/771
if sabnzbd.WIN32 and len(workdir_complete) > 256:
rerun = False
# Double-check that we didn't miss any files in workdir
# But only if dele=True, otherwise of course there will be files left
if rerun and dele and depth == 1 and any(build_filelists(workdir)):
force_rerun = True
# Clear lists to force re-scan of files
xjoinables, xzips, xrars, xsevens, xts = ([], [], [], [], [])
if rerun and (cfg.enable_recursive() or new_ts or new_joins or force_rerun):
z, y = unpack_magic(nzo, workdir, workdir_complete, dele, one_folder,
xjoinables, xzips, xrars, xsevens, xts, depth)
if z:
error = z
if y:
newfiles.extend(y)
return error, newfiles
##############################################################################
# Filejoin Functions
##############################################################################
def match_ts(file):
""" Return True if file is a joinable TS file """
match = TS_RE.search(file)
if not match:
return False, '', 0
num = int(match.group(1))
try:
set = file[:match.start()]
set += '.ts'
except:
set = ''
return match, set, num
def clean_up_joinables(names):
""" Remove joinable files and their .1 backups """
for name in names:
if os.path.exists(name):
try:
remove_file(name)
except:
pass
name1 = name + ".1"
if os.path.exists(name1):
try:
remove_file(name1)
except:
pass
def get_seq_number(name):
""" Return sequence number if name as an int """
head, tail = os.path.splitext(name)
if tail == '.ts':
match, set, num = match_ts(name)
else:
num = tail[1:]
if num.isdigit():
return int(num)
else:
return 0
def file_join(nzo, workdir, workdir_complete, delete, joinables):
""" Join and joinable files in 'workdir' to 'workdir_complete' and
when successful, delete originals
"""
newfiles = []
bufsize = 24 * 1024 * 1024
# Create matching sets from the list of files
joinable_sets = {}
joinable_set = None
for joinable in joinables:
head, tail = os.path.splitext(joinable)
if tail == '.ts':
head = match_ts(joinable)[1]
if head not in joinable_sets:
joinable_sets[head] = []
joinable_sets[head].append(joinable)
logging.debug("joinable_sets: %s", joinable_sets)
try:
# Handle each set
for joinable_set in joinable_sets:
current = joinable_sets[joinable_set]
joinable_sets[joinable_set].sort()
# If par2 already did the work, just remove the files
if os.path.exists(joinable_set):
logging.debug("file_join(): Skipping %s, (probably) joined by par2", joinable_set)
if delete:
clean_up_joinables(current)
# done, go to next set
continue
# Only join when there is more than one file
size = len(current)
if size < 2:
continue
# Prepare joined file
filename = joinable_set
if workdir_complete:
filename = filename.replace(workdir, workdir_complete)
logging.debug("file_join(): Assembling %s", filename)
joined_file = open(filename, 'ab')
# Join the segments
n = get_seq_number(current[0])
seq_error = n > 1
for joinable in current:
if get_seq_number(joinable) != n:
seq_error = True
perc = (100.0 / size) * n
logging.debug("Processing %s", joinable)
nzo.set_action_line(T('Joining'), '%.0f%%' % perc)
f = open(joinable, 'rb')
shutil.copyfileobj(f, joined_file, bufsize)
f.close()
if delete:
remove_file(joinable)
n += 1
# Remove any remaining .1 files
clean_up_joinables(current)
# Finish up
joined_file.flush()
joined_file.close()
newfiles.append(filename)
setname = setname_from_path(joinable_set)
if seq_error:
msg = T('Incomplete sequence of joinable files')
nzo.fail_msg = T('File join of %s failed') % setname
nzo.set_unpack_info('Filejoin', T('[%s] Error "%s" while joining files') % (setname, msg))
logging.error(T('Error "%s" while running file_join on %s'), msg, nzo.final_name)
return True, []
else:
msg = T('[%s] Joined %s files') % (joinable_set, size)
nzo.set_unpack_info('Filejoin', msg, setname)
except:
msg = sys.exc_info()[1]
nzo.fail_msg = T('File join of %s failed') % msg
nzo.set_unpack_info('Filejoin', T('[%s] Error "%s" while joining files') % (setname_from_path(joinable_set), msg))
logging.error(T('Error "%s" while running file_join on %s'), msg, nzo.final_name)
return True, []
return False, newfiles
##############################################################################
# (Un)Rar Functions
##############################################################################
def rar_unpack(nzo, workdir, workdir_complete, delete, one_folder, rars):
""" Unpack multiple sets 'rars' of RAR files from 'workdir' to 'workdir_complete.
When 'delete' is set, originals will be deleted.
When 'one_folder' is set, all files will be in a single folder
"""
newfiles = extracted_files = []
rar_sets = {}
for rar in rars:
rar_set = setname_from_path(rar)
if RAR_RE_V3.search(rar_set):
# Remove the ".partXX" part
rar_set = os.path.splitext(rar_set)[0]
if rar_set not in rar_sets:
rar_sets[rar_set] = []
rar_sets[rar_set].append(rar)
logging.debug('Rar_sets: %s', rar_sets)
for rar_set in rar_sets:
# Run the RAR extractor
rar_sets[rar_set].sort(key=functools.cmp_to_key(rar_sort))
rarpath = rar_sets[rar_set][0]
if workdir_complete and rarpath.startswith(workdir):
extraction_path = workdir_complete
else:
extraction_path = os.path.split(rarpath)[0]
# Is the direct-unpacker still running? We wait for it
if nzo.direct_unpacker:
wait_count = 0
last_stats = nzo.direct_unpacker.get_formatted_stats()
while nzo.direct_unpacker.is_alive():
logging.debug('DirectUnpacker still alive for %s: %s', nzo.final_name, last_stats)
# Bump the file-lock in case it's stuck
with nzo.direct_unpacker.next_file_lock:
nzo.direct_unpacker.next_file_lock.notify()
time.sleep(2)
# Did something change? Might be stuck
if last_stats == nzo.direct_unpacker.get_formatted_stats():
wait_count += 1
if wait_count > 60:
# We abort after 2 minutes of no changes
nzo.direct_unpacker.abort()
else:
wait_count = 0
last_stats = nzo.direct_unpacker.get_formatted_stats()
# Did we already direct-unpack it? Not when recursive-unpacking
if nzo.direct_unpacker and rar_set in nzo.direct_unpacker.success_sets:
logging.info("Set %s completed by DirectUnpack", rar_set)
fail = False
success = True
rars, newfiles = nzo.direct_unpacker.success_sets.pop(rar_set)
else:
logging.info("Extracting rarfile %s (belonging to %s) to %s",
rarpath, rar_set, extraction_path)
try:
fail, newfiles, rars = rar_extract(rarpath, len(rar_sets[rar_set]),
one_folder, nzo, rar_set, extraction_path)
# Was it aborted?
if not nzo.pp_active:
fail = True
break
success = not fail
except:
success = False
fail = True
msg = sys.exc_info()[1]
nzo.fail_msg = T('Unpacking failed, %s') % msg
setname = nzo.final_name
nzo.set_unpack_info('Unpack', T('[%s] Error "%s" while unpacking RAR files') % (setname, msg))
logging.error(T('Error "%s" while running rar_unpack on %s'), msg, setname)
logging.debug("Traceback: ", exc_info=True)
if success:
logging.debug('rar_unpack(): Rars: %s', rars)
logging.debug('rar_unpack(): Newfiles: %s', newfiles)
extracted_files.extend(newfiles)
# Do not fail if this was a recursive unpack
if fail and rarpath.startswith(workdir_complete):
# Do not delete the files, leave it to user!
logging.info('Ignoring failure to do recursive unpack of %s', rarpath)
fail = 0
success = True
newfiles = []
# Do not fail if this was maybe just some duplicate fileset
# Multipar and par2tbb will detect and log them, par2cmdline will not
if fail and rar_set.endswith(('.1', '.2')):
# Just in case, we leave the raw files
logging.info('Ignoring failure of unpack for possible duplicate file %s', rarpath)
fail = 0
success = True
newfiles = []
# Delete the old files if we have to
if success and delete and newfiles:
for rar in rars:
try:
remove_file(rar)
except OSError:
if os.path.exists(rar):
logging.warning(T('Deleting %s failed!'), rar)
brokenrar = '%s.1' % rar
if os.path.exists(brokenrar):
logging.info("Deleting %s", brokenrar)
try:
remove_file(brokenrar)
except OSError:
if os.path.exists(brokenrar):
logging.warning(T('Deleting %s failed!'), brokenrar)
return fail, extracted_files
def rar_extract(rarfile_path, numrars, one_folder, nzo, setname, extraction_path):
""" Unpack single rar set 'rarfile' to 'extraction_path',
with password tries
Return fail==0(ok)/fail==1(error)/fail==2(wrong password), new_files, rars
"""
fail = 0
new_files = None
rars = []
passwords = get_all_passwords(nzo)
for password in passwords:
if password:
logging.debug('Trying unrar with password "%s"', password)
msg = T('Trying unrar with password "%s"') % password
nzo.fail_msg = msg
nzo.set_unpack_info('Unpack', msg, setname)
fail, new_files, rars = rar_extract_core(rarfile_path, numrars, one_folder, nzo, setname, extraction_path, password)
if fail != 2:
break
if fail == 2:
logging.error('%s (%s)', T('Unpacking failed, archive requires a password'), os.path.split(rarfile_path)[1])
return fail, new_files, rars
def rar_extract_core(rarfile_path, numrars, one_folder, nzo, setname, extraction_path, password):
""" Unpack single rar set 'rarfile_path' to 'extraction_path'
Return fail==0(ok)/fail==1(error)/fail==2(wrong password)/fail==3(crc-error), new_files, rars
"""
start = time.time()
logging.debug("rar_extract(): Extractionpath: %s", extraction_path)
if password:
password_command = '-p%s' % password
else:
password_command = '-p-'
############################################################################
if one_folder or cfg.flat_unpack():
action = 'e'
else:
action = 'x'
if cfg.overwrite_files():
overwrite = '-o+' # Enable overwrite
rename = '-o+' # Dummy
else:
overwrite = '-o-' # Disable overwrite
rename = '-or' # Auto renaming
if sabnzbd.WIN32:
# For Unrar to support long-path, we need to cricumvent Python's list2cmdline
# See: https://github.com/sabnzbd/sabnzbd/issues/1043
command = ['%s' % RAR_COMMAND, action, '-idp', overwrite, rename, '-ai', password_command,
'%s' % clip_path(rarfile_path), '%s\\' % long_path(extraction_path)]
elif RAR_PROBLEM:
# Use only oldest options (specifically no "-or")
command = ['%s' % RAR_COMMAND, action, '-idp', overwrite, password_command,
'%s' % rarfile_path, '%s/' % extraction_path]
else:
# Don't use "-ai" (not needed for non-Windows)
command = ['%s' % RAR_COMMAND, action, '-idp', overwrite, rename, password_command,
'%s' % rarfile_path, '%s/' % extraction_path]
if cfg.ignore_unrar_dates():
command.insert(3, '-tsm-')
stup, need_shell, command, creationflags = build_command(command, flatten_command=True)
# Get list of all the volumes part of this set
logging.debug("Analyzing rar file ... %s found", rarfile.is_rarfile(rarfile_path))
logging.debug("Running unrar %s", command)
p = Popen(command, shell=need_shell, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
startupinfo=stup, creationflags=creationflags)
proc = p.stdout
if p.stdin:
p.stdin.close()
nzo.set_action_line(T('Unpacking'), '00/%02d' % numrars)
# Loop over the output from rar!
curr = 0
extracted = []
rarfiles = []
fail = 0
inrecovery = False
lines = []
while 1:
line = platform_btou(proc.readline())
if not line:
break
# Check if we should still continue
if not nzo.pp_active:
p.kill()
msg = T('PostProcessing was aborted (%s)') % T('Unpack')
nzo.fail_msg = msg
nzo.set_unpack_info('Unpack', msg, setname)
nzo.status = Status.FAILED
return fail, (), ()
line = line.strip()
lines.append(line)
if line.startswith('Extracting from'):
filename = (re.search(EXTRACTFROM_RE, line).group(1))
if filename not in rarfiles:
rarfiles.append(filename)
curr += 1
nzo.set_action_line(T('Unpacking'), '%02d/%02d' % (curr, numrars))
elif line.find('recovery volumes found') > -1:
inrecovery = True # and thus start ignoring "Cannot find volume" for a while
logging.debug("unrar recovery start: %s" % line)
elif line.startswith('Reconstruct'):
# end of reconstruction: 'Reconstructing... 100%' or 'Reconstructing... ' (both success), or 'Reconstruction impossible'
inrecovery = False
logging.debug("unrar recovery result: %s" % line)
elif line.startswith('Cannot find volume') and not inrecovery:
filename = os.path.basename(line[19:])
msg = T('Unpacking failed, unable to find %s') % filename
nzo.fail_msg = msg
nzo.set_unpack_info('Unpack', msg, setname)
logging.warning(T('ERROR: unable to find "%s"'), filename)
fail = 1
elif line.endswith('- CRC failed'):
msg = T('Unpacking failed, CRC error')
nzo.fail_msg = msg
nzo.set_unpack_info('Unpack', msg, setname)
logging.warning(T('ERROR: CRC failed in "%s"'), setname)
fail = 2 # Older unrar versions report a wrong password as a CRC error
elif line.startswith('File too large'):
msg = T('Unpacking failed, file too large for filesystem (FAT?)')
nzo.fail_msg = msg
nzo.set_unpack_info('Unpack', msg, setname)
# ERROR: File too large for file system (bigfile-5000MB)
logging.error(T('ERROR: File too large for filesystem (%s)'), setname)
fail = 1
elif line.startswith('Write error'):
msg = T('Unpacking failed, write error or disk is full?')
nzo.fail_msg = msg
nzo.set_unpack_info('Unpack', msg, setname)
logging.error(T('ERROR: write error (%s)'), line[11:])
fail = 1
elif line.startswith('Cannot create'):
line2 = platform_btou(proc.readline())
if 'must not exceed 260' in line2:
msg = '%s: %s' % (T('Unpacking failed, path is too long'), line[13:])
nzo.fail_msg = msg
logging.error(T('ERROR: path too long (%s)'), line[13:])
else:
msg = '%s: %s' % (T('Unpacking failed, write error or disk is full?'), line[13:])
nzo.fail_msg = msg
logging.error(T('ERROR: write error (%s)'), line[13:])
nzo.set_unpack_info('Unpack', msg, setname)
fail = 1
# Kill the process (can stay in endless loop on Windows Server)
p.kill()
elif line.startswith('ERROR: '):
msg = T('ERROR: %s' % line[7:])
nzo.fail_msg = msg
logging.warning(msg)
nzo.set_unpack_info('Unpack', msg, setname)
fail = 1
elif 'The specified password is incorrect' in line or 'Incorrect password' in line or \
('ncrypted file' in line and (('CRC failed' in line) or ('Checksum error' in line))):
# unrar 3.x: "Encrypted file: CRC failed in oLKQfrcNVivzdzSG22a2xo7t001.part1.rar (password incorrect ?)"
# unrar 4.x: "CRC failed in the encrypted file oLKQfrcNVivzdzSG22a2xo7t001.part1.rar. Corrupt file or wrong password."
# unrar 5.x: "Checksum error in the encrypted file oLKQfrcNVivzdzSG22a2xo7t001.part1.rar. Corrupt file or wrong password."
# unrar 5.01: "The specified password is incorrect."
# unrar 5.80: "Incorrect password for oLKQfrcNVivzdzSG22a2xo7t001.part1.rar"
msg = T('Unpacking failed, archive requires a password')
nzo.fail_msg = msg
nzo.set_unpack_info('Unpack', msg, setname)
fail = 2
elif 'is not RAR archive' in line:
# Unrecognizable RAR file
msg = T('Unusable RAR file')
nzo.fail_msg = msg
nzo.set_unpack_info('Unpack', msg, setname)
fail = 3
elif 'checksum error' in line or 'Unexpected end of archive' in line:
# Corrupt archive or passworded, we can't know
# packed data checksum error in volume FILE
msg = T('Corrupt RAR file')
nzo.fail_msg = msg
nzo.set_unpack_info('Unpack', msg, setname)
fail = 3
else:
m = re.search(EXTRACTED_RE, line)
if m:
# In case of flat-unpack, UnRar still prints the whole path (?!)
unpacked_file = m.group(2)
if cfg.flat_unpack():
unpacked_file = os.path.basename(unpacked_file)
extracted.append(real_path(extraction_path, unpacked_file))
if fail:
if proc:
proc.close()
p.wait()
logging.debug('UNRAR output %s', '\n'.join(lines))
return fail, (), ()
if proc:
proc.close()
p.wait()
# Which files did we use to extract this?
rarfiles = rar_volumelist(rarfile_path, password, rarfiles)
logging.debug('UNRAR output %s', '\n'.join(lines))
nzo.fail_msg = ''
msg = T('Unpacked %s files/folders in %s') % (str(len(extracted)), format_time_string(time.time() - start))
nzo.set_unpack_info('Unpack', msg, setname)
logging.info('%s', msg)
return 0, extracted, rarfiles
##############################################################################
# (Un)Zip Functions
##############################################################################
def unzip(nzo, workdir, workdir_complete, delete, one_folder, zips):
""" Unpack multiple sets 'zips' of ZIP files from 'workdir' to 'workdir_complete.
When 'delete' is ste, originals will be deleted.
"""
try:
i = 0
unzip_failed = False
tms = time.time()
# For file-bookkeeping
orig_dir_content = recursive_listdir(workdir_complete)
for _zip in zips:
logging.info("Starting extract on zipfile: %s ", _zip)
nzo.set_action_line(T('Unpacking'), '%s' % setname_from_path(_zip))
if workdir_complete and _zip.startswith(workdir):
extraction_path = workdir_complete
else:
extraction_path = os.path.split(_zip)[0]
if ZIP_Extract(_zip, extraction_path, one_folder):
unzip_failed = True
else:
i += 1
msg = T('%s files in %s') % (str(i), format_time_string(time.time() - tms))
nzo.set_unpack_info('Unpack', msg)
# What's new?
new_files = list(set(orig_dir_content + recursive_listdir(workdir_complete)))
# Delete the old files if we have to
if delete and not unzip_failed:
i = 0
for _zip in zips:
try:
remove_file(_zip)
i += 1
except OSError:
logging.warning(T('Deleting %s failed!'), _zip)
brokenzip = '%s.1' % _zip
if os.path.exists(brokenzip):
try:
remove_file(brokenzip)
i += 1
except OSError:
logging.warning(T('Deleting %s failed!'), brokenzip)
return unzip_failed, new_files
except:
msg = sys.exc_info()[1]
nzo.fail_msg = T('Unpacking failed, %s') % msg
logging.error(T('Error "%s" while running unzip() on %s'), msg, nzo.final_name)
return True, []
def ZIP_Extract(zipfile, extraction_path, one_folder):
""" Unzip single zip set 'zipfile' to 'extraction_path' """
command = ['%s' % ZIP_COMMAND, '-o', '-Pnone', '%s' % clip_path(zipfile),
'-d%s' % extraction_path]
if one_folder or cfg.flat_unpack():
command.insert(3, '-j') # Unpack without folders
stup, need_shell, command, creationflags = build_command(command)
logging.debug('Starting unzip: %s', command)
p = Popen(command, shell=need_shell, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
startupinfo=stup, creationflags=creationflags)
output = platform_btou(p.stdout.read())
logging.debug('unzip output: \n%s', output)
ret = p.wait()
return ret
##############################################################################
# 7Zip Functions
##############################################################################
def unseven(nzo, workdir, workdir_complete, delete, one_folder, sevens):
""" Unpack multiple sets '7z' of 7Zip files from 'workdir' to 'workdir_complete.
When 'delete' is set, originals will be deleted.
"""
i = 0
unseven_failed = False
new_files = []
tms = time.time()
# Find multi-volume sets, because 7zip will not provide actual set members
sets = {}
for seven in sevens:
name, ext = os.path.splitext(seven)
ext = ext.strip('.')
if not ext.isdigit():
name = seven
ext = None
if name not in sets:
sets[name] = []
if ext:
sets[name].append(ext)
# Unpack each set
for seven in sets:
extensions = sets[seven]
logging.info("Starting extract on 7zip set/file: %s ", seven)
nzo.set_action_line(T('Unpacking'), '%s' % setname_from_path(seven))
if workdir_complete and seven.startswith(workdir):
extraction_path = workdir_complete
else:
extraction_path = os.path.split(seven)[0]
res, new_files_set, msg = seven_extract(nzo, seven, extensions, extraction_path, one_folder, delete)
if res:
unseven_failed = True
nzo.set_unpack_info('Unpack', msg, setname_from_path(seven))
else:
i += 1
new_files.extend(new_files_set)
if not unseven_failed:
msg = T('%s files in %s') % (str(i), format_time_string(time.time() - tms))
nzo.set_unpack_info('Unpack', msg)
return unseven_failed, new_files
def seven_extract(nzo, sevenset, extensions, extraction_path, one_folder, delete):
""" Unpack single set 'sevenset' to 'extraction_path', with password tries
Return fail==0(ok)/fail==1(error)/fail==2(wrong password), new_files, sevens
"""
# Before we start, make sure the 7z binary SEVEN_COMMAND is defined
if not SEVEN_COMMAND:
msg = T('No 7za binary found, cannot unpack "%s"') % os.path.basename(sevenset)
logging.error(msg)
return 1, [], msg
fail = 0
passwords = get_all_passwords(nzo)
for password in passwords:
if password:
msg = T('Trying 7zip with password "%s"') % password
logging.debug(msg)
nzo.fail_msg = msg
nzo.set_unpack_info('Unpack', msg, setname_from_path(sevenset))
fail, new_files, msg = seven_extract_core(sevenset, extensions, extraction_path, one_folder, delete, password)
if fail != 2:
break
nzo.fail_msg = ''
if fail == 2:
msg = '%s (%s)' % (T('Unpacking failed, archive requires a password'), os.path.basename(sevenset))
if fail > 0:
nzo.fail_msg = msg
nzo.status = Status.FAILED
logging.error(msg)
return fail, new_files, msg
def seven_extract_core(sevenset, extensions, extraction_path, one_folder, delete, password):
""" Unpack single 7Z set 'sevenset' to 'extraction_path'
Return fail==0(ok)/fail==1(error)/fail==2(wrong password), new_files, message
"""
if one_folder:
method = 'e' # Unpack without folders
else:
method = 'x' # Unpack with folders
if sabnzbd.WIN32 or sabnzbd.DARWIN:
case = '-ssc-' # Case insensitive
else:
case = '-ssc' # Case sensitive
if cfg.overwrite_files():
overwrite = '-aoa'
else:
overwrite = '-aou'
if password:
password = '-p%s' % password
else:
password = '-p'
if len(extensions) > 0:
name = '%s.001' % sevenset
parm = '-tsplit'
else:
name = sevenset
parm = '-tzip' if sevenset.lower().endswith('.zip') else '-t7z'
if not os.path.exists(name):
return 1, [], T('7ZIP set "%s" is incomplete, cannot unpack') % setname_from_path(sevenset)
# For file-bookkeeping
orig_dir_content = recursive_listdir(extraction_path)
command = [SEVEN_COMMAND, method, '-y', overwrite, parm, case, password,
'-o%s' % extraction_path, name]
stup, need_shell, command, creationflags = build_command(command)
logging.debug('Starting 7za: %s', command)
p = Popen(command, shell=need_shell, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
startupinfo=stup, creationflags=creationflags)
output = platform_btou(p.stdout.read())
logging.debug('7za output: %s', output)
ret = p.wait()
# Return-code for CRC and Password is the same
if ret == 2 and 'ERROR: CRC Failed' in output:
# We can output a more general error
ret = 1
msg = T('ERROR: CRC failed in "%s"') % setname_from_path(sevenset)
else:
# Default message
msg = T('Could not unpack %s') % setname_from_path(sevenset)
# What's new?
new_files = list(set(orig_dir_content + recursive_listdir(extraction_path)))
if ret == 0 and delete:
if extensions:
for ext in extensions:
path = '%s.%s' % (sevenset, ext)
try:
remove_file(path)
except:
logging.warning(T('Deleting %s failed!'), path)
else:
try:
remove_file(sevenset)
except:
logging.warning(T('Deleting %s failed!'), sevenset)
# Always return an error message, even when return code is 0
return ret, new_files, msg
##############################################################################
# PAR2 Functions
##############################################################################
def par2_repair(parfile_nzf, nzo, workdir, setname, single):
""" Try to repair a set, return readd or correctness """
# Check if file exists, otherwise see if another is done
parfile_path = os.path.join(workdir, parfile_nzf.filename)
if not os.path.exists(parfile_path) and nzo.extrapars[setname]:
for new_par in nzo.extrapars[setname]:
test_parfile = os.path.join(workdir, new_par.filename)
if os.path.exists(test_parfile):
parfile_nzf = new_par
break
else:
# No file was found, we assume this set already finished
return False, True
parfile = os.path.join(workdir, parfile_nzf.filename)
old_dir_content = os.listdir(workdir)
used_joinables = ()
joinables = ()
used_for_repair = ()
result = readd = False
# Need to copy now, gets pop-ed during repair
setpars = nzo.extrapars[setname][:]
# Start QuickCheck
nzo.status = Status.QUICK_CHECK
nzo.set_action_line(T('Repair'), T('Quick Checking'))
qc_result = QuickCheck(setname, nzo)
if qc_result:
logging.info("Quick-check for %s is OK, skipping repair", setname)
nzo.set_unpack_info('Repair', T('[%s] Quick Check OK') % setname)
result = True
if not result and cfg.enable_all_par():
# Download all par2 files that haven't been downloaded yet
readd = False
for extrapar in nzo.extrapars[setname][:]:
# Make sure we only get new par2 files
if extrapar not in nzo.finished_files and extrapar not in nzo.files:
nzo.add_parfile(extrapar)
readd = True
if readd:
return readd, result
if not result:
nzo.status = Status.REPAIRING
result = False
readd = False
try:
nzo.set_action_line(T('Repair'), T('Starting Repair'))
logging.info('Scanning "%s"', parfile)
joinables, zips, rars, sevens, ts = build_filelists(workdir, check_rar=False)
# Multipar or not?
if sabnzbd.WIN32 and cfg.multipar():
finished, readd, datafiles, used_joinables, used_for_repair = MultiPar_Verify(parfile, nzo, setname, joinables, single=single)
else:
finished, readd, datafiles, used_joinables, used_for_repair = PAR_Verify(parfile, nzo, setname, joinables, single=single)
if finished:
result = True
logging.info('Par verify finished ok on %s!', parfile)
# Remove this set so we don't try to check it again
nzo.remove_parset(parfile_nzf.setname)
else:
logging.info('Par verify failed on %s!', parfile)
if not readd:
# Failed to repair -> remove this set
nzo.remove_parset(parfile_nzf.setname)
return readd, False
except:
msg = sys.exc_info()[1]
nzo.fail_msg = T('Repairing failed, %s') % msg
logging.error(T('Error %s while running par2_repair on set %s'), msg, setname)
logging.info("Traceback: ", exc_info=True)
return readd, result
try:
if cfg.enable_par_cleanup():
deletables = []
new_dir_content = os.listdir(workdir)
# Remove extra files created during repair and par2 base files
for path in new_dir_content:
if os.path.splitext(path)[1] == '.1' and path not in old_dir_content:
deletables.append(os.path.join(workdir, path))
deletables.append(os.path.join(workdir, setname + '.par2'))
deletables.append(os.path.join(workdir, setname + '.PAR2'))
deletables.append(parfile)
# Add output of par2-repair to remove
deletables.extend(used_joinables)
deletables.extend([os.path.join(workdir, f) for f in used_for_repair])
# Delete pars of the set
deletables.extend([os.path.join(workdir, nzf.filename) for nzf in setpars])
for filepath in deletables:
if filepath in joinables:
joinables.remove(filepath)
if os.path.exists(filepath):
try:
remove_file(filepath)
except OSError:
logging.warning(T('Deleting %s failed!'), filepath)
except:
msg = sys.exc_info()[1]
nzo.fail_msg = T('Repairing failed, %s') % msg
logging.error(T('Error "%s" while running par2_repair on set %s'), msg, setname, exc_info=True)
return readd, result
_RE_BLOCK_FOUND = re.compile(r'File: "([^"]+)" - found \d+ of \d+ data blocks from "([^"]+)"')
_RE_IS_MATCH_FOR = re.compile(r'File: "([^"]+)" - is a match for "([^"]+)"')
_RE_LOADING_PAR2 = re.compile(r'Loading "([^"]+)"\.')
_RE_LOADED_PAR2 = re.compile(r'Loaded (\d+) new packets')
def PAR_Verify(parfile, nzo, setname, joinables, single=False):
""" Run par2 on par-set """
used_joinables = []
used_for_repair = []
# set the current nzo status to "Verifying...". Used in History
nzo.status = Status.VERIFYING
start = time.time()
options = cfg.par_option().strip()
command = [str(PAR2_COMMAND), 'r', options, parfile]
# Append the wildcard for this set
parfolder = os.path.split(parfile)[0]
if single or len(globber(parfolder, setname + '*')) < 2:
# Support bizarre naming conventions
wildcard = '*'
else:
# Normal case, everything is named after set
wildcard = setname + '*'
if sabnzbd.WIN32 or sabnzbd.DARWIN:
command.append(os.path.join(parfolder, wildcard))
else:
# For Unix systems, remove folders, due to bug in some par2cmdline versions
flist = [item for item in globber_full(parfolder, wildcard) if os.path.isfile(item)]
command.extend(flist)
# We need to check for the bad par2cmdline that skips blocks
# Or the one that complains about basepath
# Only if we're not doing multicore
if not sabnzbd.WIN32 and not sabnzbd.DARWIN:
par2text = run_simple([command[0], '-h'])
if 'No data skipping' in par2text:
logging.info('Detected par2cmdline version that skips blocks, adding -N parameter')
command.insert(2, '-N')
if 'Set the basepath' in par2text:
logging.info('Detected par2cmdline version that needs basepath, adding -B<path> parameter')
command.insert(2, '-B')
command.insert(3, parfolder)
stup, need_shell, command, creationflags = build_command(command)
# par2multicore wants to see \\.\ paths on Windows
# See: https://github.com/sabnzbd/sabnzbd/pull/771
if sabnzbd.WIN32:
command = [clip_path(x) if x.startswith('\\\\?\\') else x for x in command]
# Run the external command
logging.info('Starting par2: %s', command)
lines = []
try:
p = Popen(command, shell=need_shell, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
startupinfo=stup, creationflags=creationflags)
proc = p.stdout
if p.stdin:
p.stdin.close()
# Set up our variables
datafiles = []
renames = {}
reconstructed = []
linebuf = ''
finished = 0
readd = False
verifynum = 1
verifytotal = 0
verified = 0
in_verify_repaired = False
# Loop over the output, whee
while 1:
char = platform_btou(proc.read(1))
if not char:
break
# Line not complete yet
if char not in ('\n', '\r'):
linebuf += char
continue
line = linebuf.strip()
linebuf = ''
# Check if we should still continue
if not nzo.pp_active:
p.kill()
msg = T('PostProcessing was aborted (%s)') % T('Repair')
nzo.fail_msg = msg
nzo.set_unpack_info('Repair', msg, setname)
nzo.status = Status.FAILED
readd = False
break
# Skip empty lines
if line == '':
continue
if 'Repairing:' not in line:
lines.append(line)
if line.startswith(('Invalid option specified', 'Invalid thread option', 'Cannot specify recovery file count')):
msg = T('[%s] PAR2 received incorrect options, check your Config->Switches settings') % setname
nzo.set_unpack_info('Repair', msg)
nzo.status = Status.FAILED
logging.error(msg)
elif line.startswith('All files are correct'):
msg = T('[%s] Verified in %s, all files correct') % (setname, format_time_string(time.time() - start))
nzo.set_unpack_info('Repair', msg)
logging.info('Verified in %s, all files correct',
format_time_string(time.time() - start))
finished = 1
elif line.startswith('Repair is required'):
msg = T('[%s] Verified in %s, repair is required') % (setname, format_time_string(time.time() - start))
nzo.set_unpack_info('Repair', msg)
logging.info('Verified in %s, repair is required',
format_time_string(time.time() - start))
start = time.time()
verified = 1
# Reset to use them again for verification of repair
verifytotal = 0
verifynum = 0
elif line.startswith('Main packet not found') or 'The recovery file does not exist' in line:
# Initialparfile probably didn't decode properly or bad user parameters
# We will try to get another par2 file, but 99% of time it's user parameters
msg = T('Invalid par2 files or invalid PAR2 parameters, cannot verify or repair')
logging.info(msg)
logging.info("Extra pars = %s", nzo.extrapars[setname])
# Look for the smallest par2file
block_table = {}
for nzf in nzo.extrapars[setname]:
if not nzf.completed:
block_table[nzf.blocks] = nzf
if block_table:
nzf = block_table[min(block_table.keys())]
logging.info("Found new par2file %s", nzf.filename)
# Move from extrapar list to files to be downloaded
# and remove it from the extrapars list
nzo.add_parfile(nzf)
readd = True
else:
nzo.fail_msg = msg
nzo.set_unpack_info('Repair', msg, setname)
nzo.status = Status.FAILED
elif line.startswith('You need'):
# We need more blocks, but are they available?
chunks = line.split()
needed_blocks = int(chunks[2])
# Check if we have enough blocks
added_blocks = nzo.get_extra_blocks(setname, needed_blocks)
if added_blocks:
msg = T('Fetching %s blocks...') % str(added_blocks)
nzo.set_action_line(T('Fetching'), msg)
readd = True
else:
# Failed
msg = T('Repair failed, not enough repair blocks (%s short)') % str(needed_blocks)
nzo.fail_msg = msg
nzo.set_unpack_info('Repair', msg, setname)
nzo.status = Status.FAILED
elif line.startswith('Repair is possible'):
start = time.time()
nzo.set_action_line(T('Repairing'), '%2d%%' % 0)
elif line.startswith('Repairing:'):
chunks = line.split()
per = float(chunks[-1][:-1])
nzo.set_action_line(T('Repairing'), '%2d%%' % per)
nzo.status = Status.REPAIRING
elif line.startswith('Repair complete'):
msg = T('[%s] Repaired in %s') % (setname, format_time_string(time.time() - start))
nzo.set_unpack_info('Repair', msg)
logging.info('Repaired in %s', format_time_string(time.time() - start))
finished = 1
elif verified and line.endswith(('are missing.', 'exist but are damaged.')):
# Files that will later be verified after repair
chunks = line.split()
verifytotal += int(chunks[0])
elif line.startswith('Verifying repaired files'):
in_verify_repaired = True
nzo.set_action_line(T('Verifying repair'), '%02d/%02d' % (verifynum, verifytotal))
elif in_verify_repaired and line.startswith('Target'):
verifynum += 1
if verifynum <= verifytotal:
nzo.set_action_line(T('Verifying repair'), '%02d/%02d' % (verifynum, verifytotal))
elif line.startswith('File:') and line.find('data blocks from') > 0:
m = _RE_BLOCK_FOUND.search(line)
if m:
workdir = os.path.split(parfile)[0]
old_name = m.group(1)
new_name = m.group(2)
if joinables:
# Find out if a joinable file has been used for joining
for jn in joinables:
if line.find(os.path.split(jn)[1]) > 0:
used_joinables.append(jn)
break
# Special case of joined RAR files, the "of" and "from" must both be RAR files
# This prevents the joined rars files from being seen as an extra rar-set
if '.rar' in old_name.lower() and '.rar' in new_name.lower():
used_joinables.append(os.path.join(workdir, old_name))
else:
logging.debug('PAR2 will reconstruct "%s" from "%s"', new_name, old_name)
reconstructed.append(os.path.join(workdir, old_name))
elif 'Could not write' in line and 'at offset 0:' in line:
# If there are joinables, this error will only happen in case of 100% complete files
# We can just skip the retry, because par2cmdline will fail in those cases
# becauses it refuses to scan the ".001" file
if joinables:
finished = 1
used_joinables = []
elif ' cannot be renamed to ' in line:
msg = line.strip()
nzo.fail_msg = msg
nzo.set_unpack_info('Repair', msg, setname)
nzo.status = Status.FAILED
elif 'There is not enough space on the disk' in line:
# Oops, disk is full!
msg = T('Repairing failed, %s') % T('Disk full')
nzo.fail_msg = msg
nzo.set_unpack_info('Repair', msg, setname)
nzo.status = Status.FAILED
# File: "oldname.rar" - is a match for "newname.rar".
elif 'is a match for' in line:
m = _RE_IS_MATCH_FOR.search(line)
if m:
old_name = m.group(1)
new_name = m.group(2)
logging.debug('PAR2 will rename "%s" to "%s"', old_name, new_name)
renames[new_name] = old_name
# Show progress
if verifytotal == 0 or verifynum < verifytotal:
verifynum += 1
nzo.set_action_line(T('Verifying'), '%02d/%02d' % (verifynum, verifytotal))
elif 'Scanning extra files' in line:
# Obfuscated post most likely, so reset counter to show progress
verifynum = 1
elif 'No details available for recoverable file' in line:
msg = line.strip()
nzo.fail_msg = msg
nzo.set_unpack_info('Repair', msg, setname)
nzo.status = Status.FAILED
elif line.startswith('Repair Failed.'):
# Unknown repair problem
msg = T('Repairing failed, %s') % line
nzo.fail_msg = msg
nzo.set_unpack_info('Repair', msg, setname)
nzo.status = Status.FAILED
finished = 0
elif not verified:
if line.startswith('Verifying source files'):
nzo.set_action_line(T('Verifying'), '01/%02d' % verifytotal)
nzo.status = Status.VERIFYING
elif line.startswith('Scanning:'):
pass
# Target files
m = TARGET_RE.match(line)
if m:
nzo.status = Status.VERIFYING
verifynum += 1
if verifytotal == 0 or verifynum < verifytotal:
nzo.set_action_line(T('Verifying'), '%02d/%02d' % (verifynum, verifytotal))
else:
nzo.set_action_line(T('Checking extra files'), '%02d' % verifynum)
# Remove redundant extra files that are just duplicates of original ones
if 'duplicate data blocks' in line:
used_for_repair.append(m.group(1))
else:
datafiles.append(m.group(1))
continue
# Verify done
m = re.match(r'There are (\d+) recoverable files', line)
if m:
verifytotal = int(m.group(1))
p.wait()
except WindowsError as err:
raise WindowsError(err)
# Also log what is shown to user in history
if nzo.fail_msg:
logging.info(nzo.fail_msg)
logging.debug('PAR2 output was\n%s', '\n'.join(lines))
# If successful, add renamed files to the collection
if finished and renames:
nzo.renamed_file(renames)
# If successful and files were reconstructed, remove incomplete original files
if finished and reconstructed:
# Use 'used_joinables' as a vehicle to get rid of the files
used_joinables.extend(reconstructed)
return finished, readd, datafiles, used_joinables, used_for_repair
_RE_FILENAME = re.compile(r'"([^"]+)"')
def MultiPar_Verify(parfile, nzo, setname, joinables, single=False):
""" Run par2 on par-set """
parfolder = os.path.split(parfile)[0]
used_joinables = []
used_for_repair = []
# set the current nzo status to "Verifying...". Used in History
nzo.status = Status.VERIFYING
start = time.time()
# Caching of verification implemented by adding:
# But not really required due to prospective-par2
command = [str(MULTIPAR_COMMAND), 'r', '-vs2', '-vd%s' % parfolder, parfile]
# Check if there are maybe par2cmdline/par2tbb commands supplied
if '-t' in cfg.par_option() or '-p' in cfg.par_option():
logging.info('Removing old par2cmdline/par2tbb options for MultiPar')
cfg.par_option.set('')
# Only add user-options if supplied
options = cfg.par_option().strip()
if options:
# We wrongly instructed users to use /x parameter style instead of -x
options = options.replace('/', '-', 1)
command.insert(2, options)
# Append the wildcard for this set
if single or len(globber(parfolder, setname + '*')) < 2:
# Support bizarre naming conventions
wildcard = '*'
else:
# Normal case, everything is named after set
wildcard = setname + '*'
command.append(os.path.join(parfolder, wildcard))
stup, need_shell, command, creationflags = build_command(command)
logging.info('Starting MultiPar: %s', command)
lines = []
p = Popen(command, shell=need_shell, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
startupinfo=stup, creationflags=creationflags)
proc = p.stdout
if p.stdin:
p.stdin.close()
# Set up our variables
datafiles = []
renames = {}
reconstructed = []
linebuf = ''
finished = 0
readd = False
verifynum = 0
verifytotal = 0
in_check = False
in_verify = False
in_repair = False
in_verify_repaired = False
misnamed_files = False
old_name = None
# Loop over the output, whee
while 1:
char = platform_btou(proc.read(1))
if not char:
break
# Line not complete yet
if char not in ('\n', '\r'):
linebuf += char
continue
line = linebuf.strip()
linebuf = ''
# Check if we should still continue
if not nzo.pp_active:
p.kill()
msg = T('PostProcessing was aborted (%s)') % T('Repair')
nzo.fail_msg = msg
nzo.set_unpack_info('Repair', msg, setname)
nzo.status = Status.FAILED
readd = False
break
# Skip empty lines
if line == '':
continue
# Save it all
lines.append(line)
# ----------------- Startup
if line.startswith('invalid option'):
# Option error
msg = T('[%s] PAR2 received incorrect options, check your Config->Switches settings') % setname
nzo.set_unpack_info('Repair', msg)
nzo.status = Status.FAILED
logging.error(msg)
elif line.startswith('valid file is not found'):
# Initialparfile probably didn't decode properly, or bad user parameters
# We will try to get another par2 file, but 99% of time it's user parameters
msg = T('Invalid par2 files or invalid PAR2 parameters, cannot verify or repair')
logging.info(msg)
logging.info("Extra pars = %s", nzo.extrapars[setname])
# Look for the smallest par2file
block_table = {}
for nzf in nzo.extrapars[setname]:
if not nzf.completed:
block_table[nzf.blocks] = nzf
if block_table:
nzf = block_table[min(block_table.keys())]
logging.info("Found new par2file %s", nzf.filename)
# Move from extrapar list to files to be downloaded
# and remove it from the extrapars list
nzo.add_parfile(nzf)
readd = True
else:
nzo.fail_msg = msg
nzo.set_unpack_info('Repair', msg, setname)
nzo.status = Status.FAILED
elif line.startswith('There is not enough space on the disk'):
msg = T('Repairing failed, %s') % T('Disk full')
nzo.fail_msg = msg
nzo.set_unpack_info('Repair', msg, setname)
nzo.status = Status.FAILED
# ----------------- Start check/verify stage
elif line.startswith('Recovery Set ID'):
# Remove files were MultiPar stores verification result when repaired succesfull
recovery_id = line.split()[-1]
used_for_repair.append('2_%s.bin' % recovery_id)
used_for_repair.append('2_%s.ini' % recovery_id)
elif line.startswith('Input File total count'):
# How many files will it try to find?
verifytotal = int(line.split()[-1])
# ----------------- Misnamed-detection stage
# Misnamed files
elif line.startswith('Searching misnamed file'):
# We are in the misnamed files block
misnamed_files = True
verifynum = 0
elif misnamed_files and 'Found' in line:
# First it reports the current filename
m = _RE_FILENAME.search(line)
if m:
verifynum += 1
nzo.set_action_line(T('Checking'), '%02d/%02d' % (verifynum, verifytotal))
old_name = m.group(1)
elif misnamed_files and 'Misnamed' in line:
# Then it finds the actual
m = _RE_FILENAME.search(line)
if m and old_name:
new_name = m.group(1)
logging.debug('MultiPar will rename "%s" to "%s"', old_name, new_name)
renames[new_name] = old_name
# New name is also part of data!
datafiles.append(new_name)
reconstructed.append(old_name)
# ----------------- Checking stage
# Checking input files
elif line.startswith('Complete file count'):
in_check = False
verifynum = 0
old_name = None
elif line.startswith('Verifying Input File'):
in_check = True
nzo.status = Status.VERIFYING
elif in_check:
m = _RE_FILENAME.search(line)
if m:
# Only increase counter if it was really the detection line
if line.startswith('= ') or '%' not in line:
verifynum += 1
nzo.set_action_line(T('Checking'), '%02d/%02d' % (verifynum, verifytotal))
old_name = m.group(1)
# ----------------- Verify stage
# Which files need extra verification?
elif line.startswith('Damaged file count'):
verifytotal = int(line.split()[-1])
elif line.startswith('Missing file count'):
verifytotal += int(line.split()[-1])
# Actual verification
elif line.startswith('Input File Slice found'):
# End of verification AND end of misnamed file search
in_verify = False
misnamed_files = False
old_name = None
elif line.startswith('Finding available slice'):
# The actual scanning of the files
in_verify = True
nzo.set_action_line(T('Verifying'), T('Checking'))
elif in_verify:
m = _RE_FILENAME.search(line)
if m:
# It prints the filename couple of times, so we save it to check
# 'datafiles' will not contain all data-files in par-set, only the
# ones that got scanned, but it's ouput is never used!
nzo.status = Status.VERIFYING
if line.split()[1] in ('Damaged', 'Found'):
verifynum += 1
datafiles.append(m.group(1))
# Set old_name in case it was misnamed and found (not when we are joining)
old_name = None
if line.split()[1] == 'Found' and not joinables:
old_name = m.group(1)
# Sometimes we don't know the total (filejoin)
if verifytotal <= 1:
nzo.set_action_line(T('Verifying'), '%02d' % verifynum)
else:
nzo.set_action_line(T('Verifying'), '%02d/%02d' % (verifynum, verifytotal))
elif old_name and old_name != m.group(1):
# Hey we found another misnamed one!
new_name = m.group(1)
logging.debug('MultiPar will rename "%s" to "%s"', old_name, new_name)
renames[new_name] = old_name
# Put it back with it's new name!
datafiles.pop()
datafiles.append(new_name)
# Need to remove the old file after repair (Multipar keeps it)
used_for_repair.append(old_name)
# Need to reset it to avoid collision
old_name = None
else:
# It's scanning extra files that don't belong to the set
# For damaged files it reports the filename twice, so only then start
verifynum += 1
if verifynum / 2 > verifytotal:
nzo.set_action_line(T('Checking extra files'), '%02d' % verifynum)
if joinables:
# Find out if a joinable file has been used for joining
for jn in joinables:
if line.find(os.path.split(jn)[1]) > 0:
used_joinables.append(jn)
datafiles.append(m.group(1))
break
elif line.startswith('Need'):
# We need more blocks, but are they available?
chunks = line.split()
needed_blocks = int(chunks[1])
# Check if we have enough blocks
added_blocks = nzo.get_extra_blocks(setname, needed_blocks)
if added_blocks:
msg = T('Fetching %s blocks...') % str(added_blocks)
nzo.set_action_line(T('Fetching'), msg)
readd = True
else:
# Failed
msg = T('Repair failed, not enough repair blocks (%s short)') % str(needed_blocks)
nzo.fail_msg = msg
nzo.set_unpack_info('Repair', msg, setname)
nzo.status = Status.FAILED
# MultiPar can say 'PAR File(s) Incomplete' also when it needs more blocks
# But the Need-more-blocks message is always last, so force failure
finished = 0
# Result of verification
elif line.startswith('All Files Complete') or line.endswith('PAR File(s) Incomplete'):
# Completed without damage!
# 'PAR File(s) Incomplete' is reported for success
# but when there are very similar filenames in the folder
msg = T('[%s] Verified in %s, all files correct') % (setname, format_time_string(time.time() - start))
nzo.set_unpack_info('Repair', msg)
logging.info('Verified in %s, all files correct',
format_time_string(time.time() - start))
finished = 1
elif line.startswith(('Ready to repair', 'Ready to rejoin')):
# Ready to repair!
# Or we are re-joining a split file when there's no damage but takes time
msg = T('[%s] Verified in %s, repair is required') % (setname, format_time_string(time.time() - start))
nzo.set_unpack_info('Repair', msg)
logging.info('Verified in %s, repair is required',
format_time_string(time.time() - start))
start = time.time()
# Set message for user in case of joining
if line.startswith('Ready to rejoin'):
nzo.set_action_line(T('Joining'), '%2d' % len(used_joinables))
else:
# If we are repairing a joinable set, it won't actually
# do the joining. So we can't remove those files!
used_joinables = []
# ----------------- Repair stage
elif 'Recovering slice' in line:
# Before this it will calculate matrix, here is where it starts
start = time.time()
in_repair = True
nzo.set_action_line(T('Repairing'), '%2d%%' % 0)
elif in_repair and line.startswith('Verifying repair'):
in_repair = False
in_verify_repaired = True
# How many will be checked?
verifytotal = int(line.split()[-1])
verifynum = 0
elif in_repair:
try:
# Line with percentage of repair (nothing else)
per = float(line[:-1])
nzo.set_action_line(T('Repairing'), '%2d%%' % per)
nzo.status = Status.REPAIRING
except:
# Checksum error
if 'checksum' in line:
# Failed due to checksum error of multipar
msg = T('Repairing failed, %s') % line
nzo.fail_msg = msg
nzo.set_unpack_info('Repair', msg, setname)
nzo.status = Status.FAILED
else:
# Not sure, log error
logging.info("Traceback: ", exc_info=True)
elif line.startswith('Repaired successfully'):
msg = T('[%s] Repaired in %s') % (setname, format_time_string(time.time() - start))
nzo.set_unpack_info('Repair', msg)
logging.info('Repaired in %s', format_time_string(time.time() - start))
finished = 1
elif in_verify_repaired and line.startswith('Repaired :'):
# Track verification of repaired files (can sometimes take a while)
verifynum += 1
nzo.set_action_line(T('Verifying repair'), '%02d/%02d' % (verifynum, verifytotal))
elif line.startswith('Failed to repair'):
# Unknown repair problem
msg = T('Repairing failed, %s') % line
nzo.fail_msg = msg
nzo.set_unpack_info('Repair', msg, setname)
nzo.status = Status.FAILED
finished = 0
p.wait()
# Also log what is shown to user in history
if nzo.fail_msg:
logging.info(nzo.fail_msg)
logging.debug('MultiPar output was\n%s', '\n'.join(lines))
# Add renamed files to the collection
# MultiPar always(!!) renames automatically whatever it can in the 'Searching misnamed file:'-section
# Even if the repair did not complete fully it will rename those!
# But the ones in 'Finding available slices'-section will only be renamed after succesfull repair
if renames:
# If succes, we also remove the possibly previously renamed ones
if finished:
reconstructed.extend(list(renames.values()))
# Adding to the collection
nzo.renamed_file(renames)
# Remove renamed original files
workdir = os.path.split(parfile)[0]
used_joinables.extend([os.path.join(workdir, name) for name in reconstructed])
return finished, readd, datafiles, used_joinables, used_for_repair
def create_env(nzo=None, extra_env_fields={}):
""" Modify the environment for pp-scripts with extra information
OSX: Return copy of environment without PYTHONPATH and PYTHONHOME
other: return None
"""
env = os.environ.copy()
# Are we adding things?
if nzo:
# Add basic info
for field in ENV_NZO_FIELDS:
try:
field_value = getattr(nzo, field)
# Special filters for Python types
if field_value is None:
env['SAB_' + field.upper()] = ''
elif isinstance(field_value, bool):
env['SAB_' + field.upper()] = str(field_value*1)
else:
env['SAB_' + field.upper()] = str(field_value)
except:
# Catch key errors
pass
# Always supply basic info
extra_env_fields.update({'program_dir': sabnzbd.DIR_PROG,
'par2_command': sabnzbd.newsunpack.PAR2_COMMAND,
'multipar_command': sabnzbd.newsunpack.MULTIPAR_COMMAND,
'rar_command': sabnzbd.newsunpack.RAR_COMMAND,
'zip_command': sabnzbd.newsunpack.ZIP_COMMAND,
'7zip_command': sabnzbd.newsunpack.SEVEN_COMMAND,
'version': sabnzbd.__version__})
# Add extra fields
for field in extra_env_fields:
try:
if extra_env_fields[field] is not None:
env['SAB_' + field.upper()] = str(extra_env_fields[field])
else:
env['SAB_' + field.upper()] = ''
except:
# Catch key errors
pass
if sabnzbd.DARWIN:
if 'PYTHONPATH' in env:
del env['PYTHONPATH']
if 'PYTHONHOME' in env:
del env['PYTHONHOME']
elif not nzo:
# No modification
return None
return env
def userxbit(filename):
# Returns boolean if the x-bit for user is set on the given file
# This is a workaround: os.access(filename, os.X_OK) does not work on certain mounted file systems
# Does not work on Windows, but it is not called on Windows
# rwx rwx rwx
# 876 543 210 # we want bit 6 from the right, counting from 0
userxbit = 1<<6 # bit 6
rwxbits = os.stat(filename)[0] # the first element of os.stat() is "mode"
# do logical AND, check if it is not 0:
xbitset = (rwxbits & userxbit) > 0
return xbitset
def build_command(command, flatten_command=False):
""" Prepare list from running an external program
On Windows we need to run our own list2cmdline for Unrar
"""
# command[0] should be set, and thus not None
if not command[0]:
logging.error(T('[%s] The command in build_command is undefined.'), caller_name())
raise IOError
if not sabnzbd.WIN32:
if command[0].endswith('.py'):
with open(command[0], 'r') as script_file:
if not userxbit(command[0]):
# Inform user that Python scripts need x-bit and then stop
logging.error(T('Python script "%s" does not have execute (+x) permission set'), command[0])
raise IOError
elif script_file.read(2) != '#!':
# No shebang (#!) defined, add default python
command.insert(0, 'python')
if IONICE_COMMAND and cfg.ionice().strip():
lst = cfg.ionice().split()
lst.reverse()
for arg in lst:
command.insert(0, arg)
command.insert(0, IONICE_COMMAND)
if NICE_COMMAND and cfg.nice().strip():
lst = cfg.nice().split()
lst.reverse()
for arg in lst:
command.insert(0, arg)
command.insert(0, NICE_COMMAND)
need_shell = False
stup = None
creationflags = 0
else:
# For Windows we always need to add python interpreter
if command[0].endswith('.py'):
command.insert(0, 'python')
need_shell = os.path.splitext(command[0])[1].lower() not in ('.exe', '.com')
stup = subprocess.STARTUPINFO()
stup.dwFlags = win32process.STARTF_USESHOWWINDOW
stup.wShowWindow = win32con.SW_HIDE
creationflags = WIN_SCHED_PRIOS[cfg.win_process_prio()]
if need_shell or flatten_command:
command = list2cmdline(command)
return stup, need_shell, command, creationflags
def rar_volumelist(rarfile_path, password, known_volumes):
""" Extract volumes that are part of this rarset
and merge them with existing list, removing duplicates
"""
# UnRar is required to read some RAR files
# RarFile can fail in special cases
try:
rarfile.UNRAR_TOOL = RAR_COMMAND
zf = rarfile.RarFile(rarfile_path)
# setpassword can fail due to bugs in RarFile
if password:
try:
zf.setpassword(password)
except:
pass
zf_volumes = zf.volumelist()
except:
zf_volumes = []
# Remove duplicates
known_volumes_base = [os.path.basename(vol) for vol in known_volumes]
for zf_volume in zf_volumes:
if os.path.basename(zf_volume) not in known_volumes_base:
# Long-path notation just to be sure
known_volumes.append(long_path(zf_volume))
return known_volumes
# Sort the various RAR filename formats properly :\
def rar_sort(a, b):
""" Define sort method for rar file names """
aext = a.split('.')[-1]
bext = b.split('.')[-1]
if aext == 'rar' and bext == 'rar':
return cmp(a, b)
elif aext == 'rar':
return -1
elif bext == 'rar':
return 1
else:
return cmp(a, b)
def build_filelists(workdir, workdir_complete=None, check_both=False, check_rar=True):
""" Build filelists, if workdir_complete has files, ignore workdir.
Optionally scan both directories.
Optionally test content to establish RAR-ness
"""
sevens, joinables, zips, rars, ts, filelist = ([], [], [], [], [], [])
if workdir_complete:
filelist.extend(recursive_listdir(workdir_complete))
if workdir and (not filelist or check_both):
filelist.extend(recursive_listdir(workdir))
for file in filelist:
# Extra check for rar (takes CPU/disk)
file_is_rar = False
if check_rar:
file_is_rar = rarfile.is_rarfile(file)
# Run through all the checks
if SEVENZIP_RE.search(file) or SEVENMULTI_RE.search(file):
# 7zip
sevens.append(file)
elif SPLITFILE_RE.search(file) and not file_is_rar:
# Joinables, optional with RAR check
joinables.append(file)
elif ZIP_RE.search(file):
# ZIP files
zips.append(file)
elif RAR_RE.search(file):
# RAR files
rars.append(file)
elif TS_RE.search(file):
# TS split files
ts.append(file)
logging.debug("build_filelists(): joinables: %s", joinables)
logging.debug("build_filelists(): zips: %s", zips)
logging.debug("build_filelists(): rars: %s", rars)
logging.debug("build_filelists(): 7zips: %s", sevens)
logging.debug("build_filelists(): ts: %s", ts)
return joinables, zips, rars, sevens, ts
def QuickCheck(set, nzo):
""" Check all on-the-fly md5sums of a set """
md5pack = nzo.md5packs.get(set)
if md5pack is None:
return False
# We use bitwise assigment (&=) so False always wins in case of failure
# This way the renames always get saved!
result = True
nzf_list = nzo.finished_files
renames = {}
# Files to ignore
ignore_ext = cfg.quick_check_ext_ignore()
for file in md5pack:
found = False
file_to_ignore = os.path.splitext(file)[1].lower().replace('.', '') in ignore_ext
for nzf in nzf_list:
# Do a simple filename based check
if file == nzf.filename:
found = True
if (nzf.md5sum is not None) and nzf.md5sum == md5pack[file]:
logging.debug('Quick-check of file %s OK', file)
result &= True
elif file_to_ignore:
# We don't care about these files
logging.debug('Quick-check ignoring file %s', file)
result &= True
else:
logging.info('Quick-check of file %s failed!', file)
result = False
break
# Now lets do obfuscation check
if nzf.md5sum == md5pack[file]:
try:
logging.debug('Quick-check will rename %s to %s', nzf.filename, file)
renamer(os.path.join(nzo.downpath, nzf.filename), os.path.join(nzo.downpath, file))
renames[file] = nzf.filename
nzf.filename = file
result &= True
found = True
break
except IOError:
# Renamed failed for some reason, probably already done
break
if not found:
if file_to_ignore:
# We don't care about these files
logging.debug('Quick-check ignoring missing file %s', file)
continue
logging.info('Cannot Quick-check missing file %s!', file)
result = False
# Save renames
if renames:
nzo.renamed_file(renames)
return result
def unrar_check(rar):
""" Return version number of unrar, where "5.01" returns 501
Also return whether an original version is found
(version, original)
"""
version = 0
original = ''
if rar:
try:
version = run_simple(rar)
except:
return version, original
original = "Alexander Roshal" in version
m = re.search(r"RAR\s(\d+)\.(\d+)", version)
if m:
version = int(m.group(1)) * 100 + int(m.group(2))
else:
version = 0
return version, original
def par2_mt_check(par2_path):
""" Detect if we have multicore par2 variants """
try:
par2_version = run_simple([par2_path, '-h'])
# Look for a threads option
if '-t<' in par2_version:
return True
except:
pass
return False
def sfv_check(sfv_path):
""" Verify files using SFV file,
input: full path of sfv, file are assumed to be relative to sfv
returns: List of failing files or [] when all is OK
"""
failed = []
try:
fp = open(sfv_path, 'r')
except:
logging.info('Cannot open SFV file %s', sfv_path)
failed.append(sfv_path)
return failed
root = os.path.split(sfv_path)[0]
for line in fp:
line = line.strip('\n\r ')
if line and line[0] != ';':
x = line.rfind(' ')
if x > 0:
filename = line[:x].strip()
checksum = line[x:].strip()
path = os.path.join(root, filename)
if os.path.exists(path):
if crc_check(path, checksum):
logging.debug('File %s passed SFV check', path)
else:
logging.info('File %s did not pass SFV check', path)
failed.append(filename)
else:
logging.info('File %s missing in SFV check', path)
failed.append(filename)
fp.close()
return failed
def crc_check(path, target_crc):
""" Return True if file matches CRC """
try:
fp = open(path, 'rb')
except:
return False
crc = 0
while 1:
data = fp.read(4096)
if not data:
break
crc = binascii.crc32(data, crc)
fp.close()
crc = '%08x' % (crc & 0xffffffff,)
return crc.lower() == target_crc.lower()
def analyse_show(name):
""" Do a quick SeasonSort check and return basic facts """
job = SeriesSorter(None, name, None, None)
job.match(force=True)
if job.is_match():
job.get_values()
info = job.show_info
show_name = info.get('show_name', '').replace('.', ' ').replace('_', ' ')
show_name = show_name.replace(' ', ' ')
return show_name, \
info.get('season_num', ''), \
info.get('episode_num', ''), \
info.get('ep_name', '')
def pre_queue(nzo, pp, cat):
""" Run pre-queue script (if any) and process results.
pp and cat are supplied seperate since they can change.
"""
def fix(p):
# If added via API, some items can still be "None" (as a string)
if not p or str(p).lower() == 'none':
return ''
return str(p)
values = [1, nzo.final_name_pw_clean, pp, cat, nzo.script, nzo.priority, None]
script_path = make_script_path(cfg.pre_script())
if script_path:
# Basic command-line parameters
command = [script_path, nzo.final_name_pw_clean, pp, cat, nzo.script, nzo.priority, str(nzo.bytes), ' '.join(nzo.groups)]
command.extend(analyse_show(nzo.final_name_pw_clean))
command = [fix(arg) for arg in command]
# Fields not in the NZO directly
extra_env_fields = {'groups': ' '.join(nzo.groups),
'show_name': command[8],
'show_season': command[9],
'show_episode': command[10],
'show_episode_name': command[11]}
try:
stup, need_shell, command, creationflags = build_command(command)
env = create_env(nzo, extra_env_fields)
logging.info('Running pre-queue script %s', command)
p = Popen(command, shell=need_shell, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, startupinfo=stup, env=env,
creationflags=creationflags)
except:
logging.debug("Failed script %s, Traceback: ", script_path, exc_info=True)
return values
output = platform_btou(p.stdout.read())
ret = p.wait()
logging.info('Pre-queue script returns %s and output=\n%s', ret, output)
if ret == 0:
n = 0
for line in output.split('\n'):
line = line.strip('\r\n \'"')
if n < len(values) and line:
values[n] = line
n += 1
accept = int_conv(values[0])
if accept < 1:
logging.info('Pre-Q refuses %s', nzo.final_name_pw_clean)
elif accept == 2:
logging.info('Pre-Q accepts&fails %s', nzo.final_name_pw_clean)
else:
logging.info('Pre-Q accepts %s', nzo.final_name_pw_clean)
return values
def list2cmdline(lst):
""" convert list to a cmd.exe-compatible command string """
nlst = []
for arg in lst:
if not arg:
nlst.append('""')
else:
nlst.append('"%s"' % arg)
return ' '.join(nlst)
def is_sevenfile(path):
""" Return True if path has proper extension and 7Zip is installed """
return SEVEN_COMMAND and os.path.splitext(path)[1].lower() == '.7z'
class SevenZip:
""" Minimal emulation of ZipFile class for 7Zip """
def __init__(self, path):
self.path = path
def namelist(self):
""" Return list of names in 7Zip """
names = []
# Future extension: use '-sccUTF-8' to get names in UTF8 encoding
command = [SEVEN_COMMAND, 'l', '-p', '-y', '-slt', self.path]
stup, need_shell, command, creationflags = build_command(command)
p = Popen(command, shell=need_shell, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
startupinfo=stup, creationflags=creationflags)
output = platform_btou(p.stdout.read())
_ = p.wait()
re_path = re.compile('^Path = (.+)')
for line in output.split('\n'):
m = re_path.search(line)
if m:
names.append(m.group(1).strip('\r'))
if names:
# Remove name of archive itself
del names[0]
return names
def read(self, name):
""" Read named file from 7Zip and return data """
command = [SEVEN_COMMAND, 'e', '-p', '-y', '-so', self.path, name]
stup, need_shell, command, creationflags = build_command(command)
# Ignore diagnostic output, otherwise it will be appended to content
if sabnzbd.WIN32:
stderr = open('nul', 'w')
else:
stderr = open('/dev/null', 'w')
p = Popen(command, shell=need_shell, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=stderr,
startupinfo=stup, creationflags=creationflags)
output = platform_btou(p.stdout.read())
_ = p.wait()
stderr.close()
return output
def close(self):
""" Close file """
pass
def run_simple(cmd):
""" Run simple external command and return output """
p = Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
txt = platform_btou(p.stdout.read())
p.wait()
return txt
|
py | 1a4c61159ce623773ea543c9d0ea0aca43b20f9d | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 22 02:51:53 2016
@author: utkarsh
"""
# FREQEST - Estimate fingerprint ridge frequency within image block
#
# Function to estimate the fingerprint ridge frequency within a small block
# of a fingerprint image. This function is used by RIDGEFREQ
#
# Usage:
# freqim = freqest(im, orientim, windsze, minWaveLength, maxWaveLength)
#
# Arguments:
# im - Image block to be processed.
# orientim - Ridge orientation image of image block.
# windsze - Window length used to identify peaks. This should be
# an odd integer, say 3 or 5.
# minWaveLength, maxWaveLength - Minimum and maximum ridge
# wavelengths, in pixels, considered acceptable.
#
# Returns:
# freqim - An image block the same size as im with all values
# set to the estimated ridge spatial frequency. If a
# ridge frequency cannot be found, or cannot be found
# within the limits set by min and max Wavlength
# freqim is set to zeros.
#
# Suggested parameters for a 500dpi fingerprint image
# freqim = freqest(im,orientim, 5, 5, 15);
#
# See also: RIDGEFREQ, RIDGEORIENT, RIDGESEGMENT
# REFERENCES
# Peter Kovesi
# School of Computer Science & Software Engineering
# The University of Western Australia
# pk at csse uwa edu au
# http://www.csse.uwa.edu.au/~pk
import numpy as np
import math
import scipy.ndimage
# import cv2
def frequest(im, orientim, windsze, minWaveLength, maxWaveLength):
rows, cols = np.shape(im)
# Find mean orientation within the block. This is done by averaging the
# sines and cosines of the doubled angles before reconstructing the
# angle again. This avoids wraparound problems at the origin.
cosorient = np.mean(np.cos(2 * orientim))
sinorient = np.mean(np.sin(2 * orientim))
orient = math.atan2(sinorient, cosorient) / 2
# Rotate the image block so that the ridges are vertical
# ROT_mat = cv2.getRotationMatrix2D((cols/2,rows/2),orient/np.pi*180 + 90,1)
# rotim = cv2.warpAffine(im,ROT_mat,(cols,rows))
rotim = scipy.ndimage.rotate(im, orient / np.pi * 180 + 90, axes=(1, 0), reshape=False, order=3, mode='nearest')
# Now crop the image so that the rotated image does not contain any
# invalid regions. This prevents the projection down the columns
# from being mucked up.
cropsze = int(np.fix(rows / np.sqrt(2)))
offset = int(np.fix((rows - cropsze) / 2))
rotim = rotim[offset:offset + cropsze][:, offset:offset + cropsze]
# Sum down the columns to get a projection of the grey values down
# the ridges.
proj = np.sum(rotim, axis=0)
dilation = scipy.ndimage.grey_dilation(proj, windsze, structure=np.ones(windsze))
temp = np.abs(dilation - proj)
peak_thresh = 2
maxpts = (temp < peak_thresh) & (proj > np.mean(proj))
maxind = np.where(maxpts)
rows_maxind, cols_maxind = np.shape(maxind)
# Determine the spatial frequency of the ridges by divinding the
# distance between the 1st and last peaks by the (No of peaks-1). If no
# peaks are detected, or the wavelength is outside the allowed bounds,
# the frequency image is set to 0
if cols_maxind < 2:
freqim = np.zeros(im.shape)
else:
NoOfPeaks = cols_maxind
waveLength = (maxind[0][cols_maxind - 1] - maxind[0][0]) / (NoOfPeaks - 1)
if minWaveLength <= waveLength <= maxWaveLength:
freqim = 1 / np.double(waveLength) * np.ones(im.shape)
else:
freqim = np.zeros(im.shape)
return freqim
|
py | 1a4c615c61b741c93b24c5e4479ffc49aeb8ec3d | from automata_tools.Automata import Automata
from typing import Dict, List, Union, Callable
import numpy as np
class WFA:
dfa: Automata
def __init__(self, dfa: Automata, word2index: Dict[str, int],
dfa_to_tensor: Callable) -> None:
self.dfa = dfa
self.dfaDict = self.dfa.to_dict()
wfaTensor, wfaState2idx, wildcardMatrix, language = dfa_to_tensor(
self.dfaDict, word2index)
self.word2index = word2index
self.wfaTensor = wfaTensor + wildcardMatrix # word sparse transition matrix and wildcard all 1 transition matrix
self.wfaState2idx = wfaState2idx
self.language = language
self.tokenizer = lambda inputText: self.dfa.tokenizer(inputText)
def setTokenizer(self, tokenizerFunction: Callable[[str], List[str]]):
self.tokenizer = tokenizerFunction
def getStateLength(self) -> int:
return len(self.dfaDict['states'])
def getFinalStateIndex(self) -> List[int]:
return [self.wfaState2idx[i] for i in self.dfaDict['finalStates']]
def getStartStateIndex(self) -> int:
return self.wfaState2idx[self.dfaDict['startstate']]
def execute(self, inputWords: Union[str, np.array]) -> bool:
if isinstance(inputWords, str):
inputWordTensor = np.array(
list(
map(lambda word: self.word2index[word],
self.tokenizer(inputWords))))
else:
inputWordTensor = inputWords
stateTensor = np.zeros((self.getStateLength(), 1))
stateTensor[self.getStartStateIndex(
)] = 1 # set initial state's probability to 1
# every word have a size SxS transition matrix, where S = self.getStateLength()
for inputIndex in range(len(inputWordTensor)):
inputWordIndex = inputWordTensor[inputIndex]
transitionMatrixOfCurrentInputWord = self.wfaTensor[int(
inputWordIndex)].transpose()
stateTensor = np.dot(transitionMatrixOfCurrentInputWord,
stateTensor)
for index in self.getFinalStateIndex():
if int(stateTensor[index]) >= 1:
return True
return False |
py | 1a4c61c6148763f62204d15e4582ae02f14c3f60 | #!/usr/bin/env python3
"""
Generate MUX.
MUXes come in two types,
1) Configurable via logic signals,
2) Statically configured by PnR (called "routing") muxes.
"""
import argparse
import io
import itertools
import lxml.etree as ET
import math
import os
import sys
from lib import mux as mux_lib
from lib.argparse_extra import ActionStoreBool
from lib.asserts import assert_eq
parser = argparse.ArgumentParser(
description='Generate a MUX wrapper.',
fromfile_prefix_chars='@',
prefix_chars='-~')
parser.add_argument(
'--verbose',
'--no-verbose',
action=ActionStoreBool,
default=os.environ.get('V', '') == '1',
help="Print lots of information about the generation.")
parser.add_argument('--width', type=int, default=8, help="Width of the MUX.")
parser.add_argument(
'--data-width', type=int, default=1, help="data width of the MUX.")
parser.add_argument(
'--type',
choices=['logic', 'routing'],
default='logic',
help="Type of MUX.")
parser.add_argument(
'--split-inputs',
action=ActionStoreBool,
default=False,
help="Split the inputs into separate signals")
parser.add_argument(
'--split-selects',
action=ActionStoreBool,
default=False,
help="Split the selects into separate signals")
parser.add_argument(
'--name-mux', type=str, default='MUX', help="Name of the mux.")
parser.add_argument(
'--name-input',
type=str,
default='I',
help="Name of the input values for the mux.")
parser.name_inputs = parser.add_argument(
'--name-inputs',
type=str,
default=None,
help=
"Comma deliminator list for the name of each input to the mux (implies --split-inputs)."
)
parser.add_argument(
'--name-output',
type=str,
default='O',
help="Name of the output value for the mux.")
parser.add_argument(
'--name-select',
type=str,
default='S',
help="Name of the select parameter for the mux.")
parser.name_selects = parser.add_argument(
'--name-selects',
type=str,
default=None,
help=
"Comma deliminator list for the name of each select to the mux (implies --split-selects)."
)
parser.add_argument(
'--order',
choices=[''.join(x) for x in itertools.permutations('ios')] +
[''.join(x) for x in itertools.permutations('io')],
default='iso',
help=
"""Order of the arguments for the MUX. (i - Inputs, o - Output, s - Select)"""
)
parser.add_argument(
'--outdir',
default=None,
help="""Directory to output generated content too.""")
parser.add_argument(
'--outfilename',
default=None,
help="""Filename to output generated content too.""")
parser.add_argument(
'--comment', default=None, help="""Add some type of comment to the mux.""")
parser.add_argument(
'--num_pb', default=1, help="""Set the num_pb for the mux.""")
parser.add_argument(
'--subckt', default=None, help="""Override the subcircuit name.""")
def main(argv):
call_args = list(argv)
args = parser.parse_args()
def output_block(name, s):
if args.verbose:
print()
print(name, '-' * (75 - (len(name) + 1)))
print(s, end="")
if s[-1] != '\n':
print()
print('-' * 75)
args.width_bits = mux_lib.clog2(args.width)
def normpath(p, to=None):
p = os.path.realpath(os.path.abspath(p))
if to is None:
return p
return os.path.relpath(p, normpath(to))
mypath = normpath(__file__)
if not args.outdir:
outdir = os.path.join(".", args.name_mux.lower())
else:
outdir = args.outdir
outdir = normpath(outdir)
mydir = normpath(os.path.dirname(mypath), to=outdir)
mux_dir = normpath(os.path.join(mydir, '..', 'vpr', 'muxes'), to=outdir)
buf_dir = normpath(os.path.join(mydir, '..', 'vpr', 'buf'), to=outdir)
if args.data_width > 1 and not args.split_inputs:
assert False, "data_width(%d) > 1 requires using split_inputs" % (
args.data_width)
if args.name_inputs:
assert_eq(args.name_input, parser.get_default("name_input"))
args.name_input = None
args.split_inputs = True
names = args.name_inputs.split(',')
assert len(names) == args.width, "%s input names, but %s needed." % (
names, args.width)
args.name_inputs = names
elif args.split_inputs:
args.name_inputs = [
args.name_input + str(i) for i in range(args.width)
]
parser.name_inputs.default = args.name_inputs
assert_eq(parser.get_default("name_inputs"), args.name_inputs)
if args.name_selects:
assert_eq(args.name_select, parser.get_default("name_select"))
args.name_select = None
args.split_selects = True
names = args.name_selects.split(',')
assert len(
names) == args.width_bits, "%s select names, but %s needed." % (
names, args.width_bits)
args.name_selects = names
elif args.split_selects:
args.name_selects = [
args.name_select + str(i) for i in range(args.width_bits)
]
parser.name_selects.default = args.name_selects
assert_eq(parser.get_default("name_selects"), args.name_selects)
os.makedirs(outdir, exist_ok=True)
# Generated headers
generated_with = """
Generated with %s
""" % mypath
if args.comment:
generated_with = "\n".join([args.comment, generated_with])
# XML Files can't have "--" in them, so instead we use ~~
xml_comment = generated_with.replace("--", "~~")
if not args.outfilename:
args.outfilename = args.name_mux.lower()
model_xml_filename = '%s.model.xml' % args.outfilename
pbtype_xml_filename = '%s.pb_type.xml' % args.outfilename
sim_filename = '%s.sim.v' % args.outfilename
output_files = [
model_xml_filename, pbtype_xml_filename, sim_filename,
]
# ------------------------------------------------------------------------
# Work out the port and their names
# ------------------------------------------------------------------------
port_names = []
for i in args.order:
if i == 'i':
if args.split_inputs:
port_names.extend(
mux_lib.ModulePort(mux_lib.MuxPinType.INPUT,
args.name_inputs[
j], 1, '[%i]' % j, args.data_width)
for j in range(args.width))
else:
# verilog range bounds are inclusive and convention is [<width-1>:0]
port_names.append(
mux_lib.ModulePort(mux_lib.MuxPinType.INPUT,
args.name_input, args.width,
'[%i:0]' % (args.width - 1)))
elif i == 's':
if args.split_selects:
port_names.extend(
mux_lib.ModulePort(mux_lib.MuxPinType.SELECT,
args.name_selects[j], 1, '[%i]' % j)
for j in range(args.width_bits))
else:
# verilog range bounds are inclusive and convention is [<width-1>:0]
assert args.name_select is not None
port_names.append(
mux_lib.ModulePort(mux_lib.MuxPinType.SELECT,
args.name_select, args.width_bits,
'[%i:0]' % (args.width_bits - 1)))
elif i == 'o':
port_names.append(
mux_lib.ModulePort(mux_lib.MuxPinType.OUTPUT, args.name_output,
1, '', args.data_width))
# ------------------------------------------------------------------------
# Generate the sim.v Verilog module
# ------------------------------------------------------------------------
defs = {'i': 'input wire', 's': 'input wire', 'o': 'output wire'}
sim_pathname = os.path.join(outdir, sim_filename)
with open(sim_pathname, "w") as f:
module_args = []
for port in port_names:
if args.type == 'routing' and port.pin_type == mux_lib.MuxPinType.SELECT:
continue
module_args.append(port.name)
mux_prefix = {'logic': '', 'routing': 'r'}[args.type]
mux_class = {'logic': 'mux', 'routing': 'routing'}[args.type]
f.write("/* ")
f.write("\n * ".join(generated_with.splitlines()))
f.write("\n */\n\n")
f.write('`include "%s/%s/%smux%i/%smux%i.sim.v"\n' % (
mux_dir,
'logic',
'',
args.width,
'',
args.width,
))
f.write("\n")
f.write('(* blackbox *) (* CLASS="%s" *)\n' % mux_class)
f.write("module %s(%s);\n" % (args.name_mux, ", ".join(module_args)))
previous_type = None
for port in port_names:
if previous_type != port.pin_type:
f.write("\n")
previous_type = port.pin_type
if args.type == 'routing' and port.pin_type == mux_lib.MuxPinType.SELECT:
f.write(port.getParameterString())
continue
else:
f.write(port.getDefinition())
f.write("\n")
if args.data_width > 1:
f.write('\tgenvar\tii;\n')
f.write('\tfor(ii=0; ii<%d; ii++) begin: bitmux\n' %
(args.data_width))
f.write('\tMUX%s mux (\n' % args.width)
for i in range(0, args.width):
j = 0
for port in port_names:
if port.pin_type != mux_lib.MuxPinType.INPUT:
continue
if j + port.width <= i:
j += port.width
continue
break
if port.width == 1:
if args.data_width > 1:
f.write('\t\t.I%i(%s[ii]),\n' % (i, port.name))
else:
f.write('\t\t.I%i(%s),\n' % (i, port.name))
else:
f.write('\t\t.I%i(%s[%i]),\n' % (i, port.name, i - j))
for i in range(0, args.width_bits):
j = 0
for port in port_names:
if port.pin_type != mux_lib.MuxPinType.SELECT:
continue
if j + port.width < i:
j += port.width
continue
break
if port.width == 1:
f.write('\t\t.S%i(%s),\n' % (i, port.name))
else:
f.write('\t\t.S%i(%s[%i]),\n' % (i, port.name, i - j))
for port in port_names:
if port.pin_type != mux_lib.MuxPinType.OUTPUT:
continue
break
assert_eq(port.width, 1)
if args.data_width > 1:
f.write('\t\t.O(%s[ii])\n\t);\n' % port.name)
else:
f.write('\t\t.O(%s)\n\t);\n' % port.name)
if args.data_width > 1:
f.write('end\n')
f.write('endmodule\n')
output_block(sim_filename, open(sim_pathname).read())
if args.type == 'logic':
subckt = args.subckt or args.name_mux
assert subckt
elif args.type == 'routing':
assert args.subckt is None
subckt = None
# ------------------------------------------------------------------------
# Generate the Model XML form.
# ------------------------------------------------------------------------
def xml_comment_indent(n, s):
return ("\n" + " " * n).join(s.splitlines() + [""])
if args.type == 'logic':
models_xml = ET.Element('models')
models_xml.append(ET.Comment(xml_comment_indent(4, xml_comment)))
model_xml = ET.SubElement(models_xml, 'model', {'name': subckt})
input_ports = ET.SubElement(model_xml, 'input_ports')
output_ports = ET.SubElement(model_xml, 'output_ports')
for port in port_names:
if port.pin_type in (mux_lib.MuxPinType.INPUT,
mux_lib.MuxPinType.SELECT):
ET.SubElement(
input_ports, 'port', {
'name':
port.name,
'combinational_sink_ports':
','.join(
port.name for port in port_names
if port.pin_type in (mux_lib.MuxPinType.OUTPUT, )),
})
elif port.pin_type in (mux_lib.MuxPinType.OUTPUT, ):
ET.SubElement(output_ports, 'port', {'name': args.name_output})
models_str = ET.tostring(models_xml, pretty_print=True).decode('utf-8')
else:
models_str = "<models><!-- No models for routing elements.--></models>"
output_block(model_xml_filename, models_str)
with open(os.path.join(outdir, model_xml_filename), "w") as f:
f.write(models_str)
# ------------------------------------------------------------------------
# Generate the pb_type XML form.
# ------------------------------------------------------------------------
pb_type_xml = mux_lib.pb_type_xml(
mux_lib.MuxType[args.type.upper()],
args.name_mux,
port_names,
subckt=subckt,
num_pb=args.num_pb,
comment=xml_comment_indent(4, xml_comment),
)
pb_type_str = ET.tostring(pb_type_xml, pretty_print=True).decode('utf-8')
output_block(pbtype_xml_filename, pb_type_str)
with open(os.path.join(outdir, pbtype_xml_filename), "w") as f:
f.write(pb_type_str)
print("Generated mux {} in {}".format(args.name_mux, outdir))
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
py | 1a4c620f501e5ee507f89d8ed1227534f80c2e3f | from .pdfview import PdfView
|
py | 1a4c6236065c91b7ec85f2a4a6cbac5cfecba562 | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 7
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_2_0
from isi_sdk_8_2_0.models.providers_krb5_extended import ProvidersKrb5Extended # noqa: E501
from isi_sdk_8_2_0.rest import ApiException
class TestProvidersKrb5Extended(unittest.TestCase):
"""ProvidersKrb5Extended unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testProvidersKrb5Extended(self):
"""Test ProvidersKrb5Extended"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_2_0.models.providers_krb5_extended.ProvidersKrb5Extended() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | 1a4c64ba198eafbc0f62c636a473a7624990d111 | from setuptools import setup
with open('README.md', 'r') as fp:
long_desc = fp.read()
setup(
name='HTTPserver-mock',
version='2',
author='Tom YU Choe',
author_email='[email protected]',
description='a simple http-server mockup to test web crawler.',
long_description=long_desc,
url='https://github.com/YUChoe/HTTPserver-mock',
long_description_content_type="text/markdown",
py_modules=['HTTPserver_mock'],
package_dir={'': 'src'},
license='MIT',
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: System Administrators",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.6",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
],
install_requires=[],
)
|
py | 1a4c64d44810c1ed4d1e7329214309ed8c851391 | # -*- coding: utf-8 -*-
import os, sys, json
path = os.path.join(os.path.dirname(__file__), '../lib/')
sys.path.insert(0, path)
import requests
from thrift.transport import THttpClient
from thrift.protocol import TCompactProtocol
from curve import LineService
from curve.ttypes import *
import tempfile
class Channel:
client = None
host = "gd2.line.naver.jp"
http_query_path = "/S4"
channel_query_path = "/CH4"
UA = "Line/6.0.0 iPad4,1 9.0.2"
LA = "DESKTOPMAC 10.10.2-YOSEMITE-x64 MAC 4.5.0"
authToken = None
mid = None
channel_access_token = None
token = None
obs_token = None
refresh_token = None
def __init__(self, authToken):
self.authToken = authToken
self.transport = THttpClient.THttpClient('https://gd2.line.naver.jp:443'+self.http_query_path)
self.transport.setCustomHeaders({ "User-Agent" : self.UA,
"X-Line-Application" : self.LA,
"X-Line-Access": self.authToken
})
self.transport.open()
self.protocol = TCompactProtocol.TCompactProtocol(self.transport)
self.client = LineService.Client(self.protocol)
self.mid = self.client.getProfile().mid
self.transport.path = self.channel_query_path
def login(self):
result = self.client.issueChannelToken("1341209950")
self.channel_access_token = result.channelAccessToken
self.token = result.token
self.obs_token = result.obsToken
self.refresh_token = result.refreshToken
print "channelAccessToken:" + result.channelAccessToken
print "token:" + result.token
print "obs_token:" + result.obsToken
print "refreshToken:" + result.refreshToken
def new_post(self, text):
header = {
"Content-Type": "application/json",
"User-Agent" : self.UA,
"X-Line-Mid" : self.mid,
"x-lct" : self.channel_access_token,
}
payload = {
"postInfo" : { "readPermission" : { "type" : "ALL" } },
"sourceType" : "TIMELINE",
"contents" : { "text" : text }
}
r = requests.post(
"http://" + self.host + "/mh/api/v24/post/create.json",
headers = header,
data = json.dumps(payload)
)
return r.json()
def postPhoto(self,text,path):
header = {
"Content-Type": "application/json",
"User-Agent" : self.UA,
"X-Line-Mid" : self.mid,
"x-lct" : self.channel_access_token,
}
payload = {
"postInfo" : { "readPermission" : { "type" : "ALL" } },
"sourceType" : "TIMELINE",
"contents" : { "text" : text ,"media" : [{u'objectId': u'F57144CF9ECC4AD2E162E68554D1A8BD1a1ab0t04ff07f6'}]}
}
r = requests.post(
"http://" + self.host + "/mh/api/v24/post/create.json",
headers = header,
data = json.dumps(payload)
)
return r.json()
def like(self, mid, postid, likeType=1001):
header = {
"Content-Type" : "application/json",
"X-Line-Mid" : self.mid,
"x-lct" : self.channel_access_token,
}
payload = {
"likeType" : likeType,
"activityExternalId" : postid,
"actorId" : mid
}
r = requests.post(
"http://" + self.host + "/mh/api/v23/like/create.json?homeId=" + mid,
headers = header,
data = json.dumps(payload)
)
return r.json()
def comment(self, mid, postid, text):
header = {
"Content-Type" : "application/json",
"X-Line-Mid" : self.mid,
"x-lct" : self.channel_access_token,
}
payload = {
"commentText" : text,
"activityExternalId" : postid,
"actorId" : mid
}
r = requests.post(
"http://" + self.host + "/mh/api/v23/comment/create.json?homeId=" + mid,
headers = header,
data = json.dumps(payload)
)
return r.json()
def activity(self, limit=20):
header = {
"Content-Type" : "application/json",
"X-Line-Mid" : self.mid,
"x-lct" : self.channel_access_token,
}
r = requests.get(
"http://" + self.host + "/tl/mapi/v21/activities?postLimit=" + str(limit),
headers = header
)
return r.json()
def getAlbum(self, gid):
header = {
"Content-Type" : "application/json",
"X-Line-Mid" : self.mid,
"x-lct": self.channel_access_token,
}
r = requests.get(
"http://" + self.host + "/mh/album/v3/albums?type=g&sourceType=TALKROOM&homeId=" + gid,
headers = header
)
return r.json()
def changeAlbumName(self,gid,name,albumId):
header = {
"Content-Type" : "application/json",
"X-Line-Mid" : self.mid,
"x-lct": self.channel_access_token,
}
payload = {
"title": name
}
r = requests.put(
"http://" + self.host + "/mh/album/v3/album/" + albumId + "?homeId=" + gid,
headers = header,
data = json.dumps(payload),
)
return r.json()
def deleteAlbum(self,gid,albumId):
header = {
"Content-Type" : "application/json",
"X-Line-Mid" : self.mid,
"x-lct": self.channel_access_token,
}
r = requests.delete(
"http://" + self.host + "/mh/album/v3/album/" + albumId + "?homeId=" + gid,
headers = header,
)
return r.json()
def getNote(self,gid, commentLimit, likeLimit):
header = {
"Content-Type" : "application/json",
"X-Line-Mid" : self.mid,
"x-lct": self.channel_access_token,
}
r = requests.get(
"http://" + self.host + "/mh/api/v27/post/list.json?homeId=" + gid + "&commentLimit=" + commentLimit + "&sourceType=TALKROOM&likeLimit=" + likeLimit,
headers = header
)
return r.json()
def postNote(self, gid, text):
header = {
"Content-Type": "application/json",
"User-Agent" : self.UA,
"X-Line-Mid" : self.mid,
"x-lct" : self.channel_access_token,
}
payload = {"postInfo":{"readPermission":{"homeId":gid}},
"sourceType":"GROUPHOME",
"contents":{"text":text}
}
r = requests.post(
"http://" + self.host + "/mh/api/v27/post/create.json",
headers = header,
data = json.dumps(payload)
)
return r.json()
def getDetail(self, mid):
header = {
"Content-Type": "application/json",
"User-Agent" : self.UA,
"X-Line-Mid" : self.mid,
"x-lct" : self.channel_access_token,
}
r = requests.get(
"http://" + self.host + "/ma/api/v1/userpopup/getDetail.json?userMid=" + mid,
headers = header
)
return r.json()
def getHome(self,mid):
header = {
"Content-Type": "application/json",
"User-Agent" : self.UA,
"X-Line-Mid" : self.mid,
"x-lct" : self.channel_access_token,
}
r = requests.get(
"http://" + self.host + "/mh/api/v27/post/list.json?homeId=" + mid + "&commentLimit=2&sourceType=LINE_PROFILE_COVER&likeLimit=6",
headers = header
)
return r.json()
def getCover(self,mid):
h = self.getHome(mid)
objId = h["result"]["homeInfo"]["objectId"]
return "http://dl.profile.line-cdn.net/myhome/c/download.nhn?userid=" + mid + "&oid=" + objId
def createAlbum(self,gid,name):
header = {
"Content-Type": "application/json",
"User-Agent" : self.UA,
"X-Line-Mid" : self.mid,
"x-lct" : self.channel_access_token,
}
payload = {
"type" : "image",
"title" : name
}
r = requests.post(
"http://" + self.host + "/mh/album/v3/album?count=1&auto=0&homeId=" + gid,
headers = header,
data = json.dumps(payload)
)
return r.json()
def createAlbum2(self,gid,name,path,oid):
header = {
"Content-Type": "application/json",
"User-Agent" : self.UA,
"X-Line-Mid" : self.mid,
"x-lct" : self.channel_access_token,
}
payload = {
"type" : "image",
"title" : name
}
r = requests.post(
"http://" + self.host + "/mh/album/v3/album?count=1&auto=0&homeId=" + gid,
headers = header,
data = json.dumps(payload)
)
#albumId = r.json()["result"]["items"][0]["id"]
#h = {
# "Content-Type": "application/x-www-form-urlencoded",
# "User-Agent" : self.UA,
# "X-Line-Mid" : gid,
# "X-Line-Album" : albumId,
# "x-lct" : self.channel_access_token,
#"x-obs-host" : "obs-jp.line-apps.com:443",
#}
#print r.json()
#files = {
# 'file': open(path, 'rb'),
#}
#p = {
# "userid" : gid,
# "type" : "image",
# "oid" : oid,
# "ver" : "1.0"
#}
#data = {
# 'params': json.dumps(p)
#}
#r = requests.post(
#"http://obs-jp.line-apps.com/oa/album/a/object_info.nhn:443",
#headers = h,
#data = data,
#files = files
#)
return r.json()
#cl.createAlbum("cea9d61ba824e937aaf91637991ac934b","ss3ai","kawamuki.png") |
py | 1a4c65a0e36d78c761ee23bdf2bd6e5c4574248d | from __future__ import annotations
import ipaddress
import json
import logging
import struct
import sys
import time
import tkinter
import zlib
from dataclasses import astuple
from pathlib import Path
from tkinter import messagebox, ttk
from typing import Optional, Tuple
import dns
import dns.resolver
from idlelib.tooltip import Hovertip
from twisted.internet import reactor, task, tksupport
from modules.Client import ClientInstance
from modules.Common import (CarInfo, Credidentials, DataQueue, NetData,
NetworkQueue, PitStop)
from modules.DriverInputs import DriverInputs
from modules.Server import ServerInstance
from modules.Strategy import StrategyUI
from modules.Telemetry import Telemetry, TelemetryRT, TelemetryUI
from modules.TyreGraph import PrevLapsGraph, TyreGraph
from modules.TyreSets import TyreSets, TyresSetData
from modules.Users import UserUI
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format="%(asctime)s.%(msecs)03d | %(name)s | %(message)s",
datefmt="%H:%M:%S")
_VERSION_ = "1.5.9"
class ConnectionPage(ttk.Frame):
def __init__(self, app: App, root):
ttk.Frame.__init__(self, master=root)
self.main_app = app
self.connection_path = "./Config/connection.json"
self.is_connected = None
self.connection_msg = ""
self.credis = None
self.is_connected_loop = task.LoopingCall(self.check_connection)
self.credidentials = None
key_check = ("saved_ip", "tcp_port", "udp_port", "username",
"driverID")
logging.info(f"Loading {self.connection_path}")
if Path(self.connection_path).is_file():
fp = open(self.connection_path, "r")
try:
self.credidentials = json.load(fp)
if (type(self.credidentials) is not dict or
tuple(self.credidentials.keys()) != key_check):
logging.info(f"Invalid connection.json file")
self.credidentials = None
except json.JSONDecodeError as msg:
self.credidentials = None
logging.info(f"JSON Error: {msg}")
fp.close()
else:
logging.info(f"{self.connection_path} not found")
self.credidentials = None
self.as_server = False
self.f_connection_info = tkinter.Frame(
self, bd=2, relief=tkinter.RIDGE)
self.f_connection_info.grid()
self.l_ip = tkinter.Label(self.f_connection_info, text="Address",
anchor=tkinter.E, width=10)
self.l_ip.grid(row=0, column=0, padx=5, pady=2)
Hovertip(self.l_ip, "Address of the server host ip or domain", 10)
self.l_tcp_port = tkinter.Label(self.f_connection_info,
text="TCP port", anchor=tkinter.E,
width=10)
self.l_tcp_port.grid(row=1, column=0, padx=5, pady=2)
Hovertip(self.l_ip, "TCP port of the host server (1024 - 10 000),"
" can be the same UDP", 10)
self.l_udp_port = tkinter.Label(self.f_connection_info,
text="UDP port", anchor=tkinter.E,
width=10)
self.l_udp_port.grid(row=2, column=0, padx=5, pady=2)
Hovertip(self.l_ip, "UDP port of the host server (1024 - 10 000),"
" can be the same as TCP", 10)
self.l_username = tkinter.Label(self.f_connection_info,
text="Username",
anchor=tkinter.E, width=10)
self.l_username.grid(row=3, column=0, padx=5, pady=2)
Hovertip(self.l_username, "Your name in ACC", 10)
self.l_driverID = tkinter.Label(self.f_connection_info,
text="Driver ID",
anchor=tkinter.E, width=10)
self.l_driverID.grid(row=4, column=0, padx=5, pady=2)
Hovertip(self.l_driverID, "Driver ID for driver swap "
"(Driver 1, 2, 3, 4, etc), not your SteamID", 10)
if self.credidentials is None:
self.cb_ip = ttk.Combobox(self.f_connection_info, width=30,
values=[])
else:
self.cb_ip = ttk.Combobox(self.f_connection_info, width=30,
values=self.credidentials["saved_ip"])
self.cb_ip.grid(row=0, column=1, padx=5, pady=2)
self.e_tcp_port = tkinter.Entry(self.f_connection_info, width=30)
self.e_tcp_port.grid(row=1, column=1, padx=5, pady=2)
self.e_udp_port = tkinter.Entry(self.f_connection_info, width=30)
self.e_udp_port.grid(row=2, column=1, padx=5, pady=2)
self.e_username = tkinter.Entry(self.f_connection_info, width=30)
self.e_username.grid(row=3, column=1, padx=5, pady=2)
Hovertip(self.e_username, "Your name in ACC", 10)
self.e_driverID = tkinter.Entry(self.f_connection_info, width=30)
self.e_driverID.grid(row=4, column=1, padx=5, pady=2)
Hovertip(self.e_driverID, "Driver ID for driver swap "
"(Driver 1, 2, 3, 4, etc), not your SteamID", 10)
self.b_connect = tkinter.Button(self, text="Connect",
command=self.connect)
self.b_connect.grid(row=1, padx=10, pady=5)
if self.credidentials is not None:
self.e_tcp_port.insert(tkinter.END, self.credidentials["tcp_port"])
self.e_udp_port.insert(tkinter.END, self.credidentials["udp_port"])
self.e_username.insert(tkinter.END, self.credidentials["username"])
self.e_driverID.insert(tkinter.END, self.credidentials["driverID"])
else:
self.e_tcp_port.insert(tkinter.END, "4269")
self.e_udp_port.insert(tkinter.END, "4270")
logging.info("Displaying connection window")
def set_as_server(self) -> None:
self.cb_ip.set("127.0.0.1")
self.cb_ip["state"] = "disabled"
self.as_server = True
def set_as_client(self) -> None:
self.cb_ip.set("")
self.cb_ip["state"] = "normal"
self.as_server = False
def connect(self) -> None:
logging.info("Connect button pressed")
self.b_connect.config(state="disabled")
error_message = ""
ip = None
try:
ip = ipaddress.ip_address(self.cb_ip.get()).compressed
except ValueError:
logging.info("Querrying dns server...")
try:
results = dns.resolver.resolve(self.cb_ip.get())
for result in results:
logging.info(f"Found ip: {result.address}")
logging.info(f"Picking first dns answer: {results[0].address}")
ip = results[0].address
except dns.resolver.NXDOMAIN:
error_message += "Invalide IP address or Domain name\n"
except dns.resolver.NoAnswer:
error_message += ("DNS didn't replied to the request"
f" for {self.cb_ip.get()}")
except dns.resolver.NoNameservers:
error_message += "No DNS server available"
except dns.resolver.YXDOMAIN:
error_message += ("The query name is too long after "
"DNAME substitution")
if self.e_tcp_port.get().isnumeric():
self.e_tcp_port.config(background="White")
else:
self.e_tcp_port.config(background="Red")
error_message += "Invalide TCP port\n"
if self.e_udp_port.get().isnumeric():
self.e_udp_port.config(background="White")
else:
self.e_udp_port.config(background="Red")
error_message += "Invalide UDP port\n"
if self.e_username.get() != "":
self.e_username.config(background="White")
else:
self.e_username.config(background="Red")
error_message += "Invalide username\n"
driverID = self.e_driverID.get()
if driverID != "" and driverID.isnumeric() and 0 < int(driverID) <= 5:
self.e_driverID.config(background="White")
else:
self.e_driverID.config(background="Red")
if (driverID.isnumeric() and 1 > int(driverID) > 5):
error_message += ("Are you sure you are the driver N° "
f"{driverID} in your team ?")
else:
error_message += "Invalide driver ID\n"
if error_message == "":
logging.info("No error in the credidentials")
self.credits = Credidentials(
ip=ip,
tcp_port=int(self.e_tcp_port.get()),
udp_port=int(self.e_udp_port.get()),
username=self.e_username.get(),
driverID=int(self.e_driverID.get())
)
if self.as_server:
self.main_app.as_server(self.credits)
else:
self.main_app.connect_to_server(self.credits)
self.is_connected_loop.start(0.1)
logging.info("Waiting for connection confirmation")
else:
logging.info(f"Error: {error_message}")
messagebox.showerror("Error", error_message)
self.b_connect.config(state="normal")
def check_connection(self) -> None:
if self.is_connected is None:
return
if self.is_connected:
logging.info("Connected")
self.save_credidentials(self.credits)
else:
logging.info("Connection failed")
messagebox.showerror("Error", self.connection_msg)
self.b_connect.config(state="normal")
self.is_connected = None
self.is_connected_loop.stop()
def connected(self, succes: bool, error: str) -> None:
self.is_connected = succes
self.connection_msg = error
def save_credidentials(self, credits: Credidentials) -> None:
logging.info("Saving credidentials")
if self.credidentials is None:
saved_ip = [self.cb_ip.get()]
elif credits.ip not in self.credidentials["saved_ip"]:
saved_ip = [self.cb_ip.get(), *self.credidentials["saved_ip"]]
if len(saved_ip) > 5:
self.credidentials["saved_ip"].pop()
else:
saved_ip = self.credidentials["saved_ip"]
with open(self.connection_path, "w") as fp:
connection = {
"saved_ip": saved_ip,
"tcp_port": credits.tcp_port,
"udp_port": credits.udp_port,
"username": credits.username,
"driverID": credits.driverID,
}
json.dump(connection, fp, indent=4)
class App(tkinter.Tk):
def __init__(self) -> None:
tkinter.Tk.__init__(self)
tksupport.install(self)
self.geometry("830x580+0+0")
try:
with open("./Config/gui.json", "r") as fp:
self.gui_config = json.load(fp)
except FileNotFoundError:
print("APP: './Config/gui.json' not found.")
return
self.font = (self.gui_config["font"], self.gui_config["font_size"])
app_style = ttk.Style(self)
app_style.configure('.',
font=self.font,
background=self.gui_config["background_colour"],
foreground=self.gui_config["foreground_colour"])
app_style.configure('TNotebook.Tab', foreground="#000000")
app_style.configure('TButton', foreground="#000000")
app_style.configure('TCombobox', foreground="#000000")
app_style.configure("ActiveDriver.TLabel",
background=self.gui_config["active_driver_colour"])
app_style.configure("Users.TFrame", background="#000000")
app_style.configure("TelemetryGrid.TFrame", background="#000000")
app_style.configure("PressureInfo.TFrame", background="#000000")
app_style.configure("TEntry", foreground="#000000")
self.title(f"PyAccEngineer {_VERSION_}")
self.config(bg="Grey")
self.protocol("WM_DELETE_WINDOW", self.on_close)
# Networking
self.is_connected = False
self.client: Optional[ClientInstance] = None
self.server: Optional[ServerInstance] = None
self.net_queue = DataQueue([], [])
self.menu_bar = tkinter.Menu(self)
self.menu_bar.add_command(label="Connect",
command=self.show_connection_page,
font=self.font)
self.menu_bar.add_command(label="As Server",
command=lambda: self.show_connection_page(
True), font=self.font)
self.menu_bar.add_command(label="Disconnect",
command=self.disconnect, state="disabled",
font=self.font)
self.config(menu=self.menu_bar)
self.main_canvas = tkinter.Canvas(self)
self.main_frame = ttk.Frame(self)
self.hsb = ttk.Scrollbar(self)
self.vsb = ttk.Scrollbar(self)
self.main_canvas.config(xscrollcommand=self.hsb.set,
yscrollcommand=self.vsb.set,
highlightthickness=0)
self.hsb.config(orient=tkinter.HORIZONTAL,
command=self.main_canvas.xview)
self.vsb.config(orient=tkinter.VERTICAL,
command=self.main_canvas.yview)
self.hsb.pack(fill=tkinter.X, side=tkinter.BOTTOM,
expand=tkinter.FALSE)
self.vsb.pack(fill=tkinter.Y, side=tkinter.RIGHT,
expand=tkinter.FALSE)
self.main_canvas.pack(fill=tkinter.BOTH, side=tkinter.LEFT,
expand=tkinter.TRUE)
self.main_canvas.create_window(0, 0, window=self.main_frame,
anchor=tkinter.NW)
self.user_ui = UserUI(self.main_frame)
self.user_ui.grid(row=1, column=0)
self.tab_control = ttk.Notebook(self.main_frame)
self.tab_control.grid(row=0, column=0, pady=3)
self.f_connection_ui = ttk.Frame(self.tab_control)
self.f_connection_ui.pack(fill=tkinter.BOTH, expand=1)
self.connection_page = ConnectionPage(self, self.f_connection_ui)
self.connection_page.place(anchor=tkinter.CENTER,
in_=self.f_connection_ui,
relx=.5, rely=.5)
# Center StrategyUI in the notebook frame
f_strategy_ui = ttk.Frame(self.tab_control)
f_strategy_ui.pack(fill=tkinter.BOTH, expand=1)
self.strategy_ui = StrategyUI(f_strategy_ui, self.gui_config)
self.strategy_ui.place(anchor=tkinter.CENTER, in_=f_strategy_ui,
relx=.5, rely=.5)
self.telemetry_ui = TelemetryUI(self.tab_control)
self.telemetry_ui.pack(fill=tkinter.BOTH, side=tkinter.LEFT,
expand=tkinter.TRUE)
self.driver_inputs = DriverInputs(self.tab_control)
self.driver_inputs.pack(fill=tkinter.BOTH, side=tkinter.LEFT,
expand=tkinter.TRUE)
self.tyre_graph = TyreGraph(self.tab_control, self.gui_config)
self.tyre_graph.pack(fill=tkinter.BOTH, expand=1)
self.prev_lap_graph = PrevLapsGraph(self.tab_control, self.gui_config)
self.prev_lap_graph.pack(fill=tkinter.BOTH, expand=1)
self.tyre_sets = TyreSets(self.tab_control, self.gui_config)
self.tyre_sets.pack(fill=tkinter.BOTH, expand=1)
self.tab_control.add(self.f_connection_ui, text="Connection")
self.tab_control.add(f_strategy_ui, text="Strategy")
self.tab_control.add(self.telemetry_ui, text="Telemetry")
self.tab_control.add(self.driver_inputs, text="Driver Inputs")
self.tab_control.add(self.tyre_graph, text="Pressures")
self.tab_control.add(self.prev_lap_graph, text="Previous Laps")
self.tab_control.add(self.tyre_sets, text="Tyre sets")
self.tab_control.hide(0)
self.last_time = time.time()
self.rt_last_time = time.time()
self.rt_min_delta = self.gui_config["driver_input_speed"]
self.min_delta = 0.5
self.last_telemetry = time.time()
self.telemetry_timeout = 2
logging.info("Main UI created.")
self.client_loopCall = task.LoopingCall(self.client_loop)
self.client_loopCall.start(0.01)
self.eval('tk::PlaceWindow . center')
self.updateScrollRegion()
def updateScrollRegion(self):
self.main_canvas.update_idletasks()
self.main_canvas.config(scrollregion=self.main_frame.bbox())
def client_loop(self) -> None:
selected_tab_name = self.tab_control.tab(self.tab_control.select(),
"text")
if selected_tab_name == "Driver Inputs":
if not self.driver_inputs.is_animating:
self.driver_inputs.start_animation()
else:
if self.driver_inputs.is_animating:
self.driver_inputs.stop_animation()
if selected_tab_name == "Pressures":
if not self.tyre_graph.is_animating:
self.tyre_graph.start_animation()
else:
if self.tyre_graph.is_animating:
self.tyre_graph.stop_animation()
for element in self.net_queue.q_out:
if element.data_type == NetworkQueue.ConnectionReply:
logging.info("Received Connection reply for server")
succes = bool(element.data[0])
msg_lenght = element.data[1]
msg = element.data[2:2 + msg_lenght]
self.connection_page.connected(succes, msg)
self.mb_connected(succes)
self.is_connected = succes
if not succes:
self.client.close()
elif element.data_type == NetworkQueue.ServerData:
server_data = CarInfo.from_bytes(element.data)
is_first_update = self.strategy_ui.server_data is None
self.strategy_ui.server_data = server_data
if is_first_update:
self.strategy_ui.update_values()
elif element.data_type == NetworkQueue.Strategy:
logging.info("Received: Strategy")
self.strategy_ui.b_set_strat.config(state="disabled")
asm_data = self.strategy_ui.asm.read_shared_memory()
pit_stop = PitStop.from_bytes(element.data)
self.strategy_ui.save_strategy(pit_stop)
if asm_data is not None:
self.strategy_ui.apply_strategy(pit_stop)
elif element.data_type == NetworkQueue.StategyHistory:
self.strategy_ui.clear_strategy_history()
strategy_count = element.data[0]
byte_index = 1
for _ in range(strategy_count):
strat = PitStop.from_bytes(element.data[byte_index:])
self.strategy_ui.save_strategy(strat)
byte_index += PitStop.byte_size
elif element.data_type == NetworkQueue.StrategyDone:
logging.info("Received: Strategy Done")
self.strategy_ui.b_set_strat.config(state="normal")
self.strategy_ui.update_values()
elif element.data_type == NetworkQueue.Telemetry:
telemetry, err = Telemetry.from_bytes(element.data)
if (telemetry is None):
messagebox.showerror("Unexpected error", err)
self.on_close()
return
self.telemetry_ui.update_values(telemetry)
self.tyre_graph.update_data(telemetry)
self.strategy_ui.updade_telemetry_data(telemetry)
self.driver_inputs.update_lap(telemetry.lap)
if not self.strategy_ui.is_driver_active:
self.strategy_ui.is_driver_active = True
self.user_ui.set_active(telemetry.driver)
self.last_telemetry = time.time()
elif element.data_type == NetworkQueue.TelemetryRT:
telemetry = TelemetryRT.from_bytes(element.data)
self.driver_inputs.update_values(telemetry)
elif element.data_type == NetworkQueue.UpdateUsers:
logging.info("Received user update")
user_update = element.data
nb_users = user_update[0]
self.user_ui.reset()
self.strategy_ui.reset_drivers()
index = 1
for _ in range(nb_users):
lenght = user_update[index]
index += 1
name = user_update[index:index+lenght].decode("utf-8")
index += lenght
driverID = user_update[index]
index += 1
self.user_ui.add_user(name, driverID)
self.strategy_ui.add_driver(name, driverID)
elif element.data_type == NetworkQueue.TyreSets:
data = zlib.decompress(element.data)
tyres_data = []
nb_of_set = data[0]
byte_index = 1
for _ in range(nb_of_set):
tyre_info = TyresSetData.from_bytes(
data[byte_index:byte_index+TyresSetData.byte_size])
tyres_data.append(tyre_info)
byte_index += TyresSetData.byte_size
self.tyre_sets.update_tyre_set_data(tyres_data)
self.net_queue.q_out.clear()
if not self.is_connected:
return
if not self.strategy_ui.is_connected:
self.strategy_ui.is_connected = True
if self.telemetry_ui.driver_swap or self.user_ui.active_user is None:
if self.telemetry_ui.current_driver is not None:
self.user_ui.set_active(self.telemetry_ui.current_driver)
self.telemetry_ui.driver_swap = False
self.strategy_ui.set_driver(self.telemetry_ui.current_driver)
rt_delta_time = time.time() - self.rt_last_time
delta_time = time.time() - self.last_time
if (self.strategy_ui.is_driver_active and
time.time() > self.last_telemetry + self.telemetry_timeout):
logging.info("Telemetry timeout, not received "
f"telemetry for {self.telemetry_timeout}s")
self.strategy_ui.is_driver_active = False
self.user_ui.remove_active()
self.telemetry_ui.current_driver = None
asm_data = self.strategy_ui.asm.read_shared_memory()
if asm_data is not None:
if self.rt_min_delta < rt_delta_time:
self.rt_last_time = time.time()
telemetry_rt = TelemetryRT(
asm_data.Physics.gas,
asm_data.Physics.brake,
asm_data.Physics.steer_angle,
asm_data.Physics.gear,
asm_data.Physics.speed_kmh
)
self.net_queue.q_in.append(NetData(NetworkQueue.TelemetryRT,
telemetry_rt.to_bytes()))
if self.min_delta < delta_time:
self.last_time = time.time()
infos = CarInfo(
*astuple(asm_data.Graphics.mfd_tyre_pressure),
asm_data.Graphics.mfd_fuel_to_add,
asm_data.Static.max_fuel,
asm_data.Graphics.mfd_tyre_set)
self.net_queue.q_in.append(NetData(NetworkQueue.CarInfoData,
infos.to_bytes()))
# Telemetry
name = asm_data.Static.player_name.split("\x00")[0]
surname = asm_data.Static.player_surname.split("\x00")[0]
driver = f"{name} {surname}"
telemetry_data = Telemetry(
driver,
asm_data.Graphics.completed_lap,
asm_data.Physics.fuel,
asm_data.Graphics.fuel_per_lap,
asm_data.Graphics.fuel_estimated_laps,
asm_data.Physics.pad_life,
asm_data.Physics.disc_life,
asm_data.Graphics.current_time,
asm_data.Graphics.best_time,
asm_data.Graphics.last_time,
asm_data.Graphics.is_in_pit,
asm_data.Graphics.is_in_pit_lane,
asm_data.Graphics.session_type,
asm_data.Graphics.driver_stint_time_left,
asm_data.Physics.wheel_pressure,
asm_data.Physics.tyre_core_temp,
asm_data.Physics.brake_temp,
asm_data.Graphics.rain_tyres,
asm_data.Graphics.session_time_left,
asm_data.Graphics.track_grip_status,
asm_data.Physics.front_brake_compound,
asm_data.Physics.rear_brake_compound,
asm_data.Physics.car_damage,
asm_data.Graphics.rain_intensity,
asm_data.Physics.suspension_damage,
asm_data.Graphics.current_sector_index,
asm_data.Graphics.last_sector_time,
asm_data.Graphics.is_valid_lap,
asm_data.Physics.air_temp,
asm_data.Physics.road_temp,
asm_data.Graphics.wind_speed,
asm_data.Graphics.driver_stint_total_time_left,
asm_data.Graphics.current_tyre_set,
)
self.net_queue.q_in.append(NetData(NetworkQueue.Telemetry,
telemetry_data.to_bytes()))
if self.strategy_ui.strategy is not None:
logging.info("Sending strategy")
strategy = self.strategy_ui.strategy
self.strategy_ui.strategy = None
self.net_queue.q_in.append(NetData(NetworkQueue.StrategySet,
strategy.to_bytes()))
if self.strategy_ui.strategy_ok:
logging.info("Send strategy Done")
self.net_queue.q_in.append(NetData(NetworkQueue.StrategyDone))
self.strategy_ui.strategy_ok = False
if self.tyre_sets.updated:
data = b""
data += struct.pack("!B", len(self.tyre_sets.tyres_data))
for tyre_set in self.tyre_sets.tyres_data:
data += tyre_set.to_bytes()
data_compressed = zlib.compress(data)
print(f"{len(data)} vs {len(data_compressed)}")
self.net_queue.q_in.append(NetData(NetworkQueue.TyreSets,
data_compressed))
self.tyre_sets.updated = False
logging.info("Sending tyre set data")
def show_connection_page(self, as_server: bool = False) -> None:
logging.info("Show connection page")
self.tab_control.add(self.f_connection_ui, text="Connection")
self.tab_control.select(0)
if as_server:
self.connection_page.set_as_server()
else:
self.connection_page.set_as_client()
def connect_to_server(self, credits: Credidentials) -> None:
logging.info("Creating a ClientInstance connecting"
f" to {credits.ip}:{credits.tcp_port}")
self.client = ClientInstance(credits, self.net_queue)
def as_server(self, credis: Credidentials) -> Tuple[bool, str]:
logging.info("Creating a ServerInstance")
self.server = ServerInstance(credis.tcp_port, credis.udp_port)
self.connect_to_server(credis)
def mb_connected(self, state: bool) -> None:
if state:
self.menu_bar.entryconfig("Disconnect", state="active")
self.menu_bar.entryconfig("Connect", state="disabled")
self.menu_bar.entryconfig("As Server", state="disabled")
self.tab_control.hide(0)
else:
self.menu_bar.entryconfig("Disconnect", state="disabled")
self.menu_bar.entryconfig("Connect", state="active")
self.menu_bar.entryconfig("As Server", state="active")
def disconnect(self) -> None:
logging.info("Disconnecting")
self.stop_networking()
self.mb_connected(False)
self.strategy_ui.reset()
self.user_ui.reset()
self.tyre_graph.reset()
def stop_networking(self) -> None:
if self.is_connected:
self.client.close()
self.is_connected = False
logging.info("Client stopped.")
if self.server is not None:
self.server.close()
self.server = None
logging.info("Server stopped.")
def on_close(self) -> None:
logging.info("Closing the app")
self.strategy_ui.close()
self.tyre_graph.close()
self.prev_lap_graph.close()
self.tyre_sets.close()
self.disconnect()
self.client_loopCall.stop()
tksupport.uninstall()
reactor.stop()
self.destroy()
logging.info("App closed")
def create_gui() -> None:
App()
def main():
reactor.callLater(0, create_gui)
reactor.run()
if __name__ == "__main__":
main()
|
py | 1a4c662ce7f216b17f3cd602e050b56a62af025b | from gremlin_python.driver import client, serializer
import sys, traceback
_gremlin_cleanup_graph = "g.V().drop()"
_gremlin_insert_vertices = [
"g.addV('person').property('id', 'thomas').property('firstName', 'Thomas').property('age', 44)",
"g.addV('person').property('id', 'mary').property('firstName', 'Mary').property('lastName', 'Andersen').property('age', 39)",
"g.addV('person').property('id', 'ben').property('firstName', 'Ben').property('lastName', 'Miller')",
"g.addV('person').property('id', 'robin').property('firstName', 'Robin').property('lastName', 'Wakefield')"
]
_gremlin_insert_edges = [
"g.V('thomas').addE('knows').to(g.V('mary'))",
"g.V('thomas').addE('knows').to(g.V('ben'))",
"g.V('ben').addE('knows').to(g.V('robin'))"
]
_gremlin_update_vertices = [
"g.V('thomas').property('age', 44)"
]
_gremlin_count_vertices = "g.V().count()"
_gremlin_traversals = {
"Get all persons older than 40" : "g.V().hasLabel('person').has('age', gt(40)).values('firstName', 'age')",
"Get all persons and their first name" : "g.V().hasLabel('person').values('firstName')",
"Get all persons sorted by first name" : "g.V().hasLabel('person').order().by('firstName', incr).values('firstName')",
"Get all persons that Thomas knows" : "g.V('thomas').out('knows').hasLabel('person').values('firstName')",
"People known by those who Thomas knows" : "g.V('thomas').out('knows').hasLabel('person').out('knows').hasLabel('person').values('firstName')",
"Get the path from Thomas to Robin" : "g.V('thomas').repeat(out()).until(has('id', 'robin')).path().by('firstName')"
}
_gremlin_drop_operations = {
"Drop Edge - Thomas no longer knows Mary" : "g.V('thomas').outE('knows').where(inV().has('id', 'mary')).drop()",
"Drop Vertex - Drop Thomas" : "g.V('thomas').drop()"
}
def cleanup_graph(client):
print("\tRunning this Gremlin query:\n\t{0}".format(_gremlin_cleanup_graph))
callback = client.submitAsync(_gremlin_cleanup_graph)
if callback.result() is not None:
print("\tCleaned up the graph!")
print("\n")
def insert_vertices(client):
for query in _gremlin_insert_vertices:
print("\tRunning this Gremlin query:\n\t{0}\n".format(query))
callback = client.submitAsync(query)
if callback.result() is not None:
print("\tInserted this vertex:\n\t{0}\n".format(callback.result().one()))
else:
print("Something went wrong with this query: {0}".format(query))
print("\n")
def insert_edges(client):
for query in _gremlin_insert_edges:
print("\tRunning this Gremlin query:\n\t{0}\n".format(query))
callback = client.submitAsync(query)
if callback.result() is not None:
print("\tInserted this edge:\n\t{0}\n".format(callback.result().one()))
else:
print("Something went wrong with this query:\n\t{0}".format(query))
print("\n")
def update_vertices(client):
for query in _gremlin_update_vertices:
print("\tRunning this Gremlin query:\n\t{0}\n".format(query))
callback = client.submitAsync(query)
if callback.result() is not None:
print("\tUpdated this vertex:\n\t{0}\n".format(callback.result().one()))
else:
print("Something went wrong with this query:\n\t{0}".format(query))
print("\n")
def count_vertices(client):
print("\tRunning this Gremlin query:\n\t{0}".format(_gremlin_count_vertices))
callback = client.submitAsync(_gremlin_count_vertices)
if callback.result() is not None:
print("\tCount of vertices: {0}".format(callback.result().one()))
else:
print("Something went wrong with this query: {0}".format(_gremlin_count_vertices))
print("\n")
def execute_traversals(client):
for key in _gremlin_traversals:
print("\t{0}:".format(key))
print("\tRunning this Gremlin query:\n\t{0}\n".format(_gremlin_traversals[key]))
callback = client.submitAsync(_gremlin_traversals[key])
for result in callback.result():
print("\t{0}".format(str(result)))
print("\n")
def execute_drop_operations(client):
for key in _gremlin_drop_operations:
print("\t{0}:".format(key))
print("\tRunning this Gremlin query:\n\t{0}".format(_gremlin_drop_operations[key]))
callback = client.submitAsync(_gremlin_drop_operations[key])
for result in callback.result():
print(result)
print("\n")
try:
client = client.Client('https://localhost:8901','g',
username="/dbs/Employee/colls/Employee",
password="C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw==",
message_serializer=serializer.GraphSONSerializersV2d0()
)
print("Welcome to Azure Cosmos DB + Gremlin on Python!")
# Drop the entire Graph
input("We're about to drop whatever graph is on the server. Press any key to continue...")
cleanup_graph(client)
# Insert all vertices
input("Let's insert some vertices into the graph. Press any key to continue...")
insert_vertices(client)
# Create edges between vertices
input("Now, let's add some edges between the vertices. Press any key to continue...")
insert_edges(client)
# Update a couple of vertices
input("Ah, sorry. I made a mistake. Let's change the ages of these two vertices. Press any key to continue...")
update_vertices(client)
# Count all vertices
input("Okay. Let's count how many vertices we have. Press any key to continue...")
count_vertices(client)
# Execute traversals and get results
input("Cool! Let's run some traversals on our graph. Press any key to continue...")
execute_traversals(client)
# Drop a few vertices and edges
input("So, life happens and now we will make some changes to the graph. Press any key to continue...")
execute_drop_operations(client)
# Count all vertices again
input("How many vertices do we have left? Press any key to continue...")
count_vertices(client)
except Exception as e:
print('There was an exception: {0}'.format(e))
traceback.print_exc(file=sys.stdout)
sys.exit(1)
print("\nAnd that's all! Sample complete")
input("Press Enter to continue...")
|
py | 1a4c66df8cd193e5c393c019ee8603944a4fbb08 | import pandas as pd
from finvizfinance.util import webScrap, numberCovert, NUMBER_COL, util_dict
BASE_URL = 'https://finviz.com/screener.ashx?v={screener}{filter}&ft=4&o={order}&r={row}'
FILTER_DICT = util_dict['filter']
def set_filters(filters_dict):
"""Set filters.
Args:
filters_dict(dict): dictionary of filters
Returns:
url_filter(str): filter string for url
"""
filters = []
for key, value in filters_dict.items():
if key not in FILTER_DICT:
filter_keys = list(FILTER_DICT.keys())
raise ValueError("Invalid filter '{}'. Possible filter: {}".format(key, filter_keys))
if value not in FILTER_DICT[key]['option']:
filter_options = list(FILTER_DICT[key]['option'].keys())
raise ValueError("Invalid filter option '{}'. Possible filter options: {}".format(value,
filter_options))
prefix = FILTER_DICT[key]['prefix']
urlcode = FILTER_DICT[key]['option'][value]
if urlcode != '':
filters.append('{}_{}'.format(prefix, urlcode))
url_filter = ''
if len(filters) != 0:
url_filter = '&f=' + ','.join(filters)
return url_filter
def screener_helper(rows, num_col_index, table_header):
"""Get screener table helper function.
Returns:
df(pandas.DataFrame): screener information table
"""
rows = rows[1:]
df = pd.DataFrame([], columns=table_header)
for index, row in enumerate(rows):
cols = row.findAll('td')[1:]
info_dict = {}
for i, col in enumerate(cols):
# check if the col is number
if i not in num_col_index:
info_dict[table_header[i]] = col.text
else:
info_dict[table_header[i]] = numberCovert(col.text)
df = df.append(info_dict, ignore_index=True)
return df
def get_screener(screener, filters=None, order='ticker', page=1, ascend=True):
'''get_screener
Get screener from finviz website
Args:
screener(str): screener type
filters(list): filters
order(str): order of the dataframe.
page(int): page number
'''
if screener == 'overview':
screener_code = '111'
elif screener == 'financial':
screener_code = '161'
elif screener == 'ownership':
screener_code = '131'
elif screener == 'performance':
screener_code = '141'
elif screener == 'technical':
screener_code = '171'
elif screener == 'valuation':
screener_code = '121'
# get url
url_filter = ''
if filters:
url_filter = set_filters(filters)
url_order = order
if not ascend:
url_order = '-' + order
url_row = (page - 1) * 20 + 1
url = BASE_URL.format(screener=screener_code, filter=url_filter, order=url_order, row=url_row)
# scrap website
soup = webScrap(url)
page = len(soup.findAll('table')[17].findAll('option'))
if page == 0:
print('No information found.')
return None, 0
table = soup.findAll('table')[18]
rows = table.findAll('tr')
table_header = [i.text for i in rows[0].findAll('td')][1:]
num_col_index = [table_header.index(i) for i in table_header if i in NUMBER_COL]
df = screener_helper(rows, num_col_index, table_header)
return df, page
if __name__ == '__main__':
filters_dict = {'Exchange':'AMEX','Sector':'Basic Materials'}
df, page = get_screener('Overview', filters=filters_dict, order='company', page=3, ascend=False)
print(df)
print(page)
|
py | 1a4c674bd3a0a46e25b3a60e70eca27ec551df0d | import requests
import json
payload = {
'username': 'guest',
'password': 'guest'
}
attack_payload = {
'field1': 'abc@',
'field2': '123@',
'field3': 'abc+123+ABC@',
'field4': 'seven@',
'field5': '90210@',
'field6': '90210-1111@',
'field7': '301-604-4882@'
}
login_url = 'http://127.0.0.1:8080/WebGoat/login.mvc'
post_url = 'http://127.0.0.1:8080/WebGoat/j_spring_security_check'
session = requests.Session()
login_page = session.get(login_url)
logging = session.post(post_url, data=payload)
if logging.status_code == 200 :
menu_url = 'http://127.0.0.1:8080/WebGoat/service/lessonmenu.mvc'
menu = session.get(menu_url)
parse_menu = menu.json()
attack = 'Bypass Client Side JavaScript Validation'
attack_start = menu.text.find(attack)
attack_after_screen = menu.text.find('Screen', attack_start)
screen_menu = menu.text[attack_after_screen: attack_after_screen + 21]
attack_url = 'http://127.0.0.1:8080/WebGoat/attack?' + screen_menu
attack_page = session.get(attack_url)
now_attack = session.post(attack_url, data=attack_payload)
attack_successful = 'Congratulations. You have successfully completed this lesson'
now_attack.text.find(attack_successful)
if now_attack.text.find(attack_successful) == -1 :
print 'vuln-30 not present'
else :
print 'vuln-30 is present'
|
py | 1a4c68a4bc1f527409902060ff05b2684776eecc | #!/usr/bin/env python
#Must import rospy and msgs
import rospy
from GUI.Tic_Tac_GUI import Program
class GUI_GameNode():
# Callback for msgs
# Must have __init__(self) function for a class
def __init__(self):
p = Program()
p.master.title('Tic-Tac-Drone-GUI')
mainloop()
# Create a publisher for commands
# Set the message to publish as command.
# Initialize message variables.
# Create a subscriber for color msg
# Main while loop.
rate = rospy.Rate(1)
while not rospy.is_shutdown():
rate.sleep()
if __name__ == '__main__':
# Initialize the node and name it.
rospy.init_node('GUI')
# Go to class functions that do all the heavy lifting.
# Do error checking.
try:
gg = GUI_GameNode()
except rospy.ROSInterruptException:
pass |
py | 1a4c68fe918c50cb725397eaa22b1258358545aa | """Real-Time Unsupervised Anomaly Detection via Conditional Normalizing Flows.
[CW-AD](https://arxiv.org/pdf/2107.12571v1.pdf)
"""
# Copyright (C) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
|
py | 1a4c6a117fdec79b14e7e5b0963e457a4c98e167 | # pylint: disable=I0011,W0613,W0201,W0212,E1101,E1103
import os
from collections import Counter
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from glue.config import colormaps
from glue.core.message import SubsetUpdateMessage
from glue.core import HubListener, Data
from glue.core.roi import XRangeROI, RectangularROI, CircularROI
from glue.core.subset import RoiSubsetState, AndState
from glue import core
from glue.core.component_id import ComponentID
from glue.utils.qt import combo_as_string, process_events
from glue.viewers.matplotlib.qt.tests.test_data_viewer import BaseTestMatplotlibDataViewer
from glue.core.state import GlueUnSerializer
from glue.app.qt.layer_tree_widget import LayerTreeWidget
from glue.app.qt import GlueApplication
from ..data_viewer import ScatterViewer
DATA = os.path.join(os.path.dirname(__file__), 'data')
class TestScatterCommon(BaseTestMatplotlibDataViewer):
def init_data(self):
return Data(label='d1', x=[3.4, 2.3, -1.1, 0.3], y=['a', 'b', 'c', 'a'])
viewer_cls = ScatterViewer
class TestScatterViewer(object):
def setup_method(self, method):
self.data = Data(label='d1', x=[3.4, 2.3, -1.1, 0.3],
y=[3.2, 3.3, 3.4, 3.5], z=['a', 'b', 'c', 'a'])
self.data_2d = Data(label='d2', a=[[1, 2], [3, 4]], b=[[5, 6], [7, 8]],
x=[[3, 5], [5.4, 1]], y=[[1.2, 4], [7, 8]])
self.app = GlueApplication()
self.session = self.app.session
self.hub = self.session.hub
self.data_collection = self.session.data_collection
self.data_collection.append(self.data)
self.data_collection.append(self.data_2d)
self.viewer = self.app.new_data_viewer(ScatterViewer)
def teardown_method(self, method):
self.viewer.close()
self.viewer = None
self.app.close()
self.app = None
def test_basic(self):
viewer_state = self.viewer.state
# Check defaults when we add data
self.viewer.add_data(self.data)
assert combo_as_string(self.viewer.options_widget().ui.combosel_x_att) == 'Main components:x:y:z:Coordinate components:Pixel Axis 0 [x]'
assert combo_as_string(self.viewer.options_widget().ui.combosel_y_att) == 'Main components:x:y:z:Coordinate components:Pixel Axis 0 [x]'
assert viewer_state.x_att is self.data.id['x']
assert_allclose(viewer_state.x_min, -1.1 - 0.18)
assert_allclose(viewer_state.x_max, 3.4 + 0.18)
assert viewer_state.y_att is self.data.id['y']
assert_allclose(viewer_state.y_min, 3.2 - 0.012)
assert_allclose(viewer_state.y_max, 3.5 + 0.012)
assert not viewer_state.x_log
assert not viewer_state.y_log
assert len(viewer_state.layers) == 1
# Change to categorical component and check new values
viewer_state.y_att = self.data.id['z']
assert viewer_state.x_att is self.data.id['x']
assert_allclose(viewer_state.x_min, -1.1 - 0.18)
assert_allclose(viewer_state.x_max, 3.4 + 0.18)
assert viewer_state.y_att is self.data.id['z']
assert_allclose(viewer_state.y_min, -0.5 - 0.12)
assert_allclose(viewer_state.y_max, 2.5 + 0.12)
assert not viewer_state.x_log
assert not viewer_state.y_log
def test_flip(self):
viewer_state = self.viewer.state
self.viewer.add_data(self.data)
assert_allclose(viewer_state.x_min, -1.1 - 0.18)
assert_allclose(viewer_state.x_max, 3.4 + 0.18)
self.viewer.options_widget().button_flip_x.click()
assert_allclose(viewer_state.x_max, -1.1 - 0.18)
assert_allclose(viewer_state.x_min, 3.4 + 0.18)
assert_allclose(viewer_state.y_min, 3.2 - 0.012)
assert_allclose(viewer_state.y_max, 3.5 + 0.012)
self.viewer.options_widget().button_flip_y.click()
assert_allclose(viewer_state.y_max, 3.2 - 0.012)
assert_allclose(viewer_state.y_min, 3.5 + 0.012)
def test_remove_data(self):
self.viewer.add_data(self.data)
assert combo_as_string(self.viewer.options_widget().ui.combosel_x_att) == 'Main components:x:y:z:Coordinate components:Pixel Axis 0 [x]'
assert combo_as_string(self.viewer.options_widget().ui.combosel_y_att) == 'Main components:x:y:z:Coordinate components:Pixel Axis 0 [x]'
self.data_collection.remove(self.data)
assert combo_as_string(self.viewer.options_widget().ui.combosel_x_att) == ''
assert combo_as_string(self.viewer.options_widget().ui.combosel_y_att) == ''
def test_update_component_updates_title(self):
self.viewer.add_data(self.data)
assert self.viewer.windowTitle() == '2D Scatter'
self.viewer.state.x_att = self.data.id['y']
assert self.viewer.windowTitle() == '2D Scatter'
def test_combo_updates_with_component_add(self):
self.viewer.add_data(self.data)
self.data.add_component([3, 4, 1, 2], 'a')
assert self.viewer.state.x_att is self.data.id['x']
assert self.viewer.state.y_att is self.data.id['y']
assert combo_as_string(self.viewer.options_widget().ui.combosel_x_att) == 'Main components:x:y:z:a:Coordinate components:Pixel Axis 0 [x]'
assert combo_as_string(self.viewer.options_widget().ui.combosel_y_att) == 'Main components:x:y:z:a:Coordinate components:Pixel Axis 0 [x]'
def test_nonnumeric_first_component(self):
# regression test for #208. Shouldn't complain if
# first component is non-numerical
data = core.Data()
data.add_component(['a', 'b', 'c'], label='c1')
data.add_component([1, 2, 3], label='c2')
self.data_collection.append(data)
self.viewer.add_data(data)
def test_apply_roi(self):
self.viewer.add_data(self.data)
roi = RectangularROI(0, 3, 3.25, 3.45)
assert len(self.viewer.layers) == 1
self.viewer.apply_roi(roi)
assert len(self.viewer.layers) == 2
assert len(self.data.subsets) == 1
assert_allclose(self.data.subsets[0].to_mask(), [0, 1, 0, 0])
state = self.data.subsets[0].subset_state
assert isinstance(state, RoiSubsetState)
def test_apply_roi_categorical(self):
viewer_state = self.viewer.state
self.viewer.add_data(self.data)
viewer_state.y_att = self.data.id['z']
roi = RectangularROI(0, 3, -0.4, 0.3)
assert len(self.viewer.layers) == 1
self.viewer.apply_roi(roi)
assert len(self.viewer.layers) == 2
assert len(self.data.subsets) == 1
assert_allclose(self.data.subsets[0].to_mask(), [0, 0, 0, 1])
state = self.data.subsets[0].subset_state
assert isinstance(state, AndState)
def test_apply_roi_empty(self):
# Make sure that doing an ROI selection on an empty viewer doesn't
# produce error messsages
roi = XRangeROI(-0.2, 0.1)
self.viewer.apply_roi(roi)
def test_axes_labels(self):
viewer_state = self.viewer.state
self.viewer.add_data(self.data)
assert self.viewer.axes.get_xlabel() == 'x'
assert self.viewer.axes.get_ylabel() == 'y'
viewer_state.x_log = True
assert self.viewer.axes.get_xlabel() == 'Log x'
assert self.viewer.axes.get_ylabel() == 'y'
viewer_state.x_att = self.data.id['y']
assert self.viewer.axes.get_xlabel() == 'y'
assert self.viewer.axes.get_ylabel() == 'y'
viewer_state.y_log = True
assert self.viewer.axes.get_xlabel() == 'y'
assert self.viewer.axes.get_ylabel() == 'Log y'
def test_component_replaced(self):
# regression test for 508 - if a component ID is replaced, we should
# make sure that the component ID is selected if the old component ID
# was selected
self.viewer.add_data(self.data)
self.viewer.state.x_att = self.data.id['x']
test = ComponentID('test')
self.data.update_id(self.viewer.state.x_att, test)
assert self.viewer.state.x_att is test
assert combo_as_string(self.viewer.options_widget().ui.combosel_x_att) == 'Main components:test:y:z:Coordinate components:Pixel Axis 0 [x]'
def test_nan_component(self):
# regression test for case when all values are NaN in a component
data = core.Data()
data.add_component([np.nan, np.nan, np.nan], label='c1')
self.data_collection.append(data)
self.viewer.add_data(data)
def test_density_map(self):
kwargs = dict(range=[(-5, 5), (-5, 5)], bins=(2, 2))
self.viewer.add_data(self.data)
self.viewer.state.layers[0].points_mode = 'auto'
assert self.viewer.layers[0].state.compute_density_map(**kwargs).sum() == 0
self.viewer.state.layers[0].points_mode = 'density'
assert self.viewer.layers[0].state.compute_density_map(**kwargs).sum() == 4
self.viewer.state.layers[0].points_mode = 'markers'
assert self.viewer.layers[0].state.compute_density_map(**kwargs).sum() == 0
def test_density_map_color(self):
# Regression test to make sure things don't crash when changing
# back to markers if the color mode is cmap
self.viewer.add_data(self.data)
self.viewer.state.layers[0].points_mode = 'density'
self.viewer.state.layers[0].cmap_mode = 'Linear'
self.viewer.state.layers[0].size_mode = 'Linear'
self.viewer.state.layers[0].points_mode = 'markers'
self.viewer.state.layers[0].points_mode = 'density'
@pytest.mark.parametrize('protocol', [0, 1])
def test_session_back_compat(self, protocol):
filename = os.path.join(DATA, 'scatter_v{0}.glu'.format(protocol))
with open(filename, 'r') as f:
session = f.read()
state = GlueUnSerializer.loads(session)
ga = state.object('__main__')
dc = ga.session.data_collection
assert len(dc) == 1
assert dc[0].label == 'basic'
viewer1 = ga.viewers[0][0]
assert len(viewer1.state.layers) == 3
assert viewer1.state.x_att is dc[0].id['a']
assert viewer1.state.y_att is dc[0].id['b']
assert_allclose(viewer1.state.x_min, -1.04)
assert_allclose(viewer1.state.x_max, 1.04)
assert_allclose(viewer1.state.y_min, 1.98)
assert_allclose(viewer1.state.y_max, 3.02)
assert not viewer1.state.x_log
assert not viewer1.state.y_log
assert viewer1.state.layers[0].visible
assert viewer1.state.layers[1].visible
assert viewer1.state.layers[2].visible
viewer2 = ga.viewers[0][1]
assert len(viewer2.state.layers) == 3
assert viewer2.state.x_att is dc[0].id['a']
assert viewer2.state.y_att is dc[0].id['c']
assert_allclose(viewer2.state.x_min, 9.5e-6)
assert_allclose(viewer2.state.x_max, 1.05)
assert_allclose(viewer2.state.y_min, 0.38)
assert_allclose(viewer2.state.y_max, 5.25)
assert viewer2.state.x_log
assert viewer2.state.y_log
assert viewer2.state.layers[0].visible
assert not viewer2.state.layers[1].visible
assert viewer2.state.layers[2].visible
viewer3 = ga.viewers[0][2]
assert len(viewer3.state.layers) == 3
assert viewer3.state.x_att is dc[0].id['b']
assert viewer3.state.y_att is dc[0].id['a']
assert_allclose(viewer3.state.x_min, 0)
assert_allclose(viewer3.state.x_max, 5)
assert_allclose(viewer3.state.y_min, -5)
assert_allclose(viewer3.state.y_max, 5)
assert not viewer3.state.x_log
assert not viewer3.state.y_log
assert viewer3.state.layers[0].visible
assert viewer3.state.layers[1].visible
assert not viewer3.state.layers[2].visible
ga.close()
def test_session_line_back_compat(self):
# Backward-compatibility for v0.11 files in which the line and scatter
# plots were defined as separate styles.
filename = os.path.join(DATA, 'scatter_and_line_v1.glu')
with open(filename, 'r') as f:
session = f.read()
state = GlueUnSerializer.loads(session)
ga = state.object('__main__')
dc = ga.session.data_collection
assert len(dc) == 1
assert dc[0].label == 'table'
viewer1 = ga.viewers[0][0]
assert len(viewer1.state.layers) == 1
assert viewer1.state.x_att is dc[0].id['a']
assert viewer1.state.y_att is dc[0].id['b']
assert viewer1.state.layers[0].markers_visible
assert not viewer1.state.layers[0].line_visible
viewer1 = ga.viewers[0][1]
assert len(viewer1.state.layers) == 1
assert viewer1.state.x_att is dc[0].id['a']
assert viewer1.state.y_att is dc[0].id['b']
assert not viewer1.state.layers[0].markers_visible
assert viewer1.state.layers[0].line_visible
ga.close()
def test_save_svg(self, tmpdir):
# Regression test for a bug in AxesCache that caused SVG saving to
# fail (because renderer.buffer_rgba did not exist)
self.viewer.add_data(self.data)
filename = tmpdir.join('test.svg').strpath
self.viewer.axes.figure.savefig(filename)
def test_2d(self):
viewer_state = self.viewer.state
self.viewer.add_data(self.data_2d)
assert viewer_state.x_att is self.data_2d.id['a']
assert_allclose(viewer_state.x_min, 1 - 0.12)
assert_allclose(viewer_state.x_max, 4 + 0.12)
assert viewer_state.y_att is self.data_2d.id['b']
assert_allclose(viewer_state.y_min, 5 - 0.12)
assert_allclose(viewer_state.y_max, 8 + 0.12)
assert self.viewer.layers[0].plot_artist.get_xdata().shape == (4,)
def test_apply_roi_single(self):
# Regression test for a bug that caused mode.update to be called
# multiple times and resulted in all other viewers receiving many
# messages regarding subset updates (this occurred when multiple)
# datasets were present.
layer_tree = LayerTreeWidget(session=self.session)
layer_tree.set_checkable(False)
layer_tree.setup(self.data_collection)
layer_tree.bind_selection_to_edit_subset()
class Client(HubListener):
def __init__(self, *args, **kwargs):
super(Client, self).__init__(*args, **kwargs)
self.count = Counter()
def ping(self, message):
self.count[message.sender] += 1
def register_to_hub(self, hub):
hub.subscribe(self, SubsetUpdateMessage, handler=self.ping)
d1 = Data(a=[1, 2, 3], label='d3')
d2 = Data(b=[1, 2, 3], label='d4')
d3 = Data(c=[1, 2, 3], label='d5')
d4 = Data(d=[1, 2, 3], label='d6')
self.data_collection.append(d1)
self.data_collection.append(d2)
self.data_collection.append(d3)
self.data_collection.append(d4)
client = Client()
client.register_to_hub(self.hub)
self.viewer.add_data(d1)
self.viewer.add_data(d3)
roi = XRangeROI(2.5, 3.5)
self.viewer.apply_roi(roi)
for subset in client.count:
assert client.count[subset] == 1
@pytest.mark.parametrize('ndim', [1, 2])
def test_all_options(self, ndim):
# This test makes sure that all the code for the different scatter modes
# gets run, though does not check the result.
viewer_state = self.viewer.state
if ndim == 1:
data = self.data
elif ndim == 2:
data = self.data_2d
self.viewer.add_data(data)
layer_state = viewer_state.layers[0]
layer_state.style = 'Scatter'
layer_state.size_mode = 'Linear'
layer_state.size_att = data.id['y']
layer_state.size_vmin = 1.2
layer_state.size_vmax = 4.
layer_state.size_scaling = 2
layer_state.cmap_mode = 'Linear'
layer_state.cmap_att = data.id['x']
layer_state.cmap_vmin = -1
layer_state.cmap_vmax = 2.
layer_state.cmap = colormaps.members[3][1]
# Check inverting works
layer_state.cmap_vmin = 3.
layer_state.size_mode = 'Fixed'
layer_state.xerr_visible = True
layer_state.xerr_att = data.id['x']
layer_state.yerr_visible = True
layer_state.yerr_att = data.id['y']
layer_state.style = 'Line'
layer_state.linewidth = 3
layer_state.linestyle = 'dashed'
def test_session_categorical(self, tmpdir):
def visible_xaxis_labels(ax):
# Due to a bug in Matplotlib the labels returned outside the field
# of view may be incorrect: https://github.com/matplotlib/matplotlib/issues/9397
pos = ax.xaxis.get_ticklocs()
labels = [tick.get_text() for tick in ax.xaxis.get_ticklabels()]
xmin, xmax = ax.get_xlim()
return [labels[i] for i in range(len(pos)) if pos[i] >= xmin and pos[i] <= xmax]
# Regression test for a bug that caused a restored scatter viewer
# with a categorical component to not show the categorical labels
# as tick labels.
filename = tmpdir.join('test_session_categorical.glu').strpath
self.viewer.add_data(self.data)
self.viewer.state.x_att = self.data.id['z']
process_events()
assert visible_xaxis_labels(self.viewer.axes) == ['a', 'b', 'c']
self.session.application.save_session(filename)
with open(filename, 'r') as f:
session = f.read()
state = GlueUnSerializer.loads(session)
ga = state.object('__main__')
dc = ga.session.data_collection
viewer = ga.viewers[0][0]
assert viewer.state.x_att is dc[0].id['z']
assert visible_xaxis_labels(self.viewer.axes) == ['a', 'b', 'c']
ga.close()
def test_enable_disable_components_combo(self):
# Regression test for a bug that caused an error when turning off pixel
# components from combo boxes.
self.viewer.add_data(self.data)
self.data['a'] = self.data.id['x'] + 5
self.viewer.state.x_att_helper.pixel_coord = True
self.viewer.state.x_att = self.data.pixel_component_ids[0]
self.viewer.state.x_att_helper.pixel_coord = False
def test_datetime64_support(self, tmpdir):
self.data.add_component(np.array([100, 200, 300, 400], dtype='M8[D]'), 't1')
self.data.add_component(np.array([200, 300, 400, 500], dtype='M8[D]'), 't2')
self.viewer.add_data(self.data)
self.viewer.state.x_att = self.data.id['t1']
self.viewer.state.y_att = self.data.id['y']
# Matplotlib deals with dates by converting them to the number of days
# since 01-01-0001, so we can check that the limits are correctly
# converted (and not 100 to 400)
assert self.viewer.axes.get_xlim() == (719251.0, 719575.0)
assert self.viewer.axes.get_ylim() == (3.2 - 0.012, 3.5 + 0.012)
# Apply an ROI selection in plotting coordinates
roi = RectangularROI(xmin=719313, xmax=719513, ymin=3, ymax=4)
self.viewer.apply_roi(roi)
# Check that the two middle elements are selected
assert_equal(self.data.subsets[0].to_mask(), [0, 1, 1, 0])
# Now do the same with the y axis
self.viewer.state.y_att = self.data.id['t2']
assert self.viewer.axes.get_xlim() == (719251.0, 719575.0)
assert self.viewer.axes.get_ylim() == (719351.0, 719675.0)
# Apply an ROI selection in plotting coordinates
edit = self.session.edit_subset_mode
edit.edit_subset = []
roi = CircularROI(xc=719463, yc=719563, radius=200)
self.viewer.apply_roi(roi)
assert_equal(self.data.subsets[1].to_mask(), [0, 1, 1, 1])
# Make sure that the Qt labels look ok
self.viewer.state.y_att = self.data.id['y']
options = self.viewer.options_widget().ui
assert options.valuetext_x_min.text() == '1970-03-30'
assert options.valuetext_x_max.text() == '1971-02-17'
assert options.valuetext_y_min.text() == '3.188'
assert options.valuetext_y_max.text() == '3.512'
# Make sure that we can set the xmin/xmax to a string date
assert_equal(self.viewer.state.x_min, np.datetime64('1970-03-30', 'D'))
options.valuetext_x_min.setText('1970-04-14')
options.valuetext_x_min.editingFinished.emit()
assert self.viewer.axes.get_xlim() == (719266.0, 719575.0)
assert_equal(self.viewer.state.x_min, np.datetime64('1970-04-14', 'D'))
# Make sure that everything works fine after saving/reloading
filename = tmpdir.join('test_datetime64.glu').strpath
self.session.application.save_session(filename)
with open(filename, 'r') as f:
session = f.read()
state = GlueUnSerializer.loads(session)
ga = state.object('__main__')
viewer = ga.viewers[0][0]
options = viewer.options_widget().ui
assert_equal(self.viewer.state.x_min, np.datetime64('1970-04-14', 'D'))
assert options.valuetext_x_min.text() == '1970-04-14'
assert options.valuetext_x_max.text() == '1971-02-17'
assert options.valuetext_y_min.text() == '3.188'
assert options.valuetext_y_max.text() == '3.512'
ga.close()
def test_datetime64_disabled(self, capsys):
# Make sure that datetime components aren't options for the vector and
# error markers.
data = Data(label='test')
data.add_component(np.array([100, 200, 300, 400], dtype='M8[D]'), 't1')
data.add_component(np.array([200, 300, 400, 500], dtype='M8[D]'), 't2')
data.add_component(np.array([200., 300., 400., 500.]), 'x')
data.add_component(np.array([200., 300., 400., 500.]), 'y')
self.data_collection.append(data)
self.viewer.add_data(data)
self.viewer.state.x_att = data.id['x']
self.viewer.state.y_att = data.id['y']
self.viewer.state.layers[0].cmap_mode = 'Linear'
self.viewer.state.layers[0].cmap_att = data.id['x']
self.viewer.state.layers[0].size_mode = 'Linear'
self.viewer.state.layers[0].size_att = data.id['y']
self.viewer.state.layers[0].vector_visible = True
self.viewer.state.layers[0].xerr_visible = True
self.viewer.state.layers[0].yerr_visible = True
process_events()
self.viewer.state.x_att = data.id['t1']
self.viewer.state.y_att = data.id['t2']
process_events()
# We use capsys here because the # error is otherwise only apparent in stderr.
out, err = capsys.readouterr()
assert out.strip() == ""
assert err.strip() == ""
def test_density_map_incompatible_subset(self, capsys):
# Regression test for a bug that caused the scatter viewer to crash
# if subset for density map was incompatible.
data2 = Data(label='d1', x=[3.4, 2.3, -1.1, 0.3], y=[3.2, 3.3, 3.4, 3.5], z=['a', 'b', 'c', 'a'])
self.data_collection.append(data2)
self.viewer.add_data(self.data)
self.viewer.add_data(data2)
self.data_collection.new_subset_group('test', self.data.id['x'] > 1)
for layer in self.viewer.state.layers:
layer.density_map = True
self.viewer.figure.canvas.draw()
process_events()
assert self.viewer.layers[0].enabled
assert not self.viewer.layers[1].enabled
assert self.viewer.layers[2].enabled
assert not self.viewer.layers[3].enabled
def test_density_map_line_error_vector(self, capsys):
# Make sure that we don't allow/show lines/errors/vectors
# if in density map mode.
self.viewer.add_data(self.data)
self.viewer.state.layers[0].line_visible = True
self.viewer.state.layers[0].xerr_visible = True
self.viewer.state.layers[0].yerr_visible = True
self.viewer.state.layers[0].vector_visible = True
# Setting density_map to True resets the visibility of
# lines/errors/vectors.
self.viewer.state.layers[0].density_map = True
assert not self.viewer.state.layers[0].line_visible
assert not self.viewer.state.layers[0].xerr_visible
assert not self.viewer.state.layers[0].yerr_visible
assert not self.viewer.state.layers[0].vector_visible
|
py | 1a4c6a2d7952e349c59d1c0e196b1bf44aefe4f9 | import logging
import os
import shutil
import sqlite3
from src.hyperopt_trainer import HyperoptTrainer
from src.pirate import Pirate
import src.config as config
def _check_dna(func):
"""
Decorator makes sure dna is a string and not none
:raises ValueError: if dna is not string
"""
def wrapper(*args, **kwargs):
dna = kwargs.get('dna', None)
# TODO: Check if it is actually a uuid4, not just if its a string
if not isinstance(dna, str):
raise ValueError('dna must be a string UUID4')
return func(*args, **kwargs)
return wrapper
class Ship(object):
"""
The Ship is where the Pirates live. It contains the interface to the sqlite database
which is where scores and meta-data for each pirate is kept.
"""
def __init__(self, ship_name='Boat'):
"""
:param ship_name: (string) name this vessel!
"""
self.log = logging.getLogger(__name__)
self.name = ship_name
# Database connection params
self._c = None
self._conn = None
self._database_path = os.path.abspath(os.path.join(config.SHIP_DIR, self.name + '.db'))
def __enter__(self):
"""
Set off to sea! Connects to local sqlite db. Creates table if database does not yet exist
"""
# Connect to the database, set up cursor
self.log.debug('Starting up database at %s' % self._database_path)
self._conn = sqlite3.connect(self._database_path)
self._conn.row_factory = sqlite3.Row
self._c = self._conn.cursor()
# Create the table
with self._conn:
try:
self._c.execute("""CREATE TABLE pirates (
dna TEXT,
name TEXT DEFAULT 'Unborn',
rank INTEGER DEFAULT 0,
win INTEGER DEFAULT 0,
loss INTEGER DEFAULT 0,
saltyness INTEGER DEFAULT 0
)""")
except sqlite3.OperationalError:
pass # Table was already created
return self
def __exit__(self, exception_type, exception_value, traceback):
pass
@_check_dna
def _add_pirate(self, dna=None):
"""
Adds a pirate to the ship
:param dna: (string) identifier for the pirate
:raises ValueError: if dna is not string
"""
with self._conn:
try:
self._c.execute('INSERT INTO pirates(dna) VALUES (:dna)', {'dna': dna})
except sqlite3.Error as e:
self.log.warning('Could not add pirate to ship. Error: %s' % e)
@_check_dna
def _walk_the_plank(self, dna=None):
"""
Removes a pirate from the ship
:param dna: (string) identifier for the pirate
:raises ValueError: if dna is not string
"""
with self._conn:
try:
self._c.execute('DELETE FROM pirates WHERE dna=:dna', {'dna': dna})
except sqlite3.Error as e:
self.log.warning('Could not remove pirate from ship. Error: %s' % e)
@_check_dna
def _set_prop(self, dna=None, prop=None):
"""
Updates properties of pirate on ship
:param dna: (string) identifier for the pirate
:param prop: {string:val,} name:value of the properties
:return: (bool) error
"""
# TODO: take a list of attributes to update
if not isinstance(prop, dict) and not all(isinstance(p, str) for p in prop.keys()):
raise ValueError('Must give a dictionary of properties with string keys to find')
with self._conn:
try:
prop_str = ''
for key, value in prop.items():
prop_str += key + ' = ' + str(value) + ' , '
# TODO: This is unsafe SQL practices
query = 'UPDATE pirates SET ' + prop_str[:-2] + 'WHERE dna = \'' + dna + '\''
self._c.execute(query)
return False
except sqlite3.Error as e:
self.log.warning('Could not set pirate properties. Error: %s' % e)
return True
@_check_dna
def _get_prop(self, dna=None, prop=None):
"""
Returns properties of pirate on ship
:param dna: (string) identifier for the pirate
:param prop: [string,] name(s) of the property
:return: (bool), [val,] error, name:value of the properties
"""
if not isinstance(prop, list) and not all(isinstance(p, str) for p in prop):
raise ValueError('Must give a list of string properties to find')
with self._conn:
try:
query = 'SELECT ' + ','.join(prop) + ' FROM pirates WHERE dna = \'' + dna + '\''
self._c.execute(query)
sql_row = [dict(a) for a in self._c.fetchall()] # TODO: clean up b2b list comprehension
return False, [row[key] for key, row in zip(prop, sql_row)]
except (TypeError, sqlite3.Error) as e:
self.log.warning('Could not get pirate properties. Error: %s' % e)
return True, None
@_check_dna
def create_pirate(self, dna=None):
"""
Creates a pirate on the ship. Watch out: this loads pirate model to memory.
:param dna: (string) identifier for the pirate
:return: (bool), (Pirate) error, the pirate object
:raises ValueError: if dna is not string
"""
with self._conn:
try:
self._c.execute('SELECT * FROM pirates WHERE dna=:dna', {'dna': dna})
pirate_info = dict(self._c.fetchone())
except (TypeError, sqlite3.Error) as e:
self.log.warning('Could not find pirate in ship. Error: %s' % e)
return True, None
try:
pirate = Pirate(dna=pirate_info.get('dna', None),
name=pirate_info.get('name', None),
rank=pirate_info.get('rank', None),
win=pirate_info.get('win', None),
loss=pirate_info.get('loss', None),
saltyness=pirate_info.get('saltyness', None))
# Update the name for the pirate
self._set_prop(dna=dna, prop={'name': '\'' + pirate.name + '\''})
except FileNotFoundError:
self.log.warning('Could not create pirate. Could not find model associated with it')
return True, None
return False, pirate
def get_best_pirates(self, n=1):
"""
The (up-to) N saltiest pirates on board the ship.
:param n: (int) up to this number of pirates, less if not many pirates in db
:return: [pirates+] list of pirates
"""
with self._conn:
self._c.execute('SELECT dna FROM pirates ORDER BY saltyness DESC LIMIT 50')
sql_row = [dict(a) for a in self._c.fetchall()]
pirates = []
for i, d in enumerate(sql_row):
if i >= n:
break
err, pirate = self.create_pirate(dna=d['dna'])
if not err: # Don't add pirates that throw an error on creation
pirates.append(pirate)
return pirates
def marooning_update(self, winner, losers):
"""
Updates the ship with the results from a marooning
:param winners: [string,] list of string dnas for winning pirates
:param losers: [string,] list of string dnas for losing pirates
:return: (bool) error
"""
# Update wins, losses, and saltyness for the winner and the losers
if winner: # Empty string is False in python
# We can use the +1 formulation because we use string concatentation
self._set_prop(dna=winner, prop={'win': 'win + 1'})
self._set_prop(dna=winner, prop={'saltyness': 'saltyness + ' + str(config.SALT_PER_WIN)})
for dna in losers:
self._set_prop(dna=dna, prop={'loss': 'loss + 1'})
self._set_prop(dna=dna, prop={'saltyness': 'saltyness - ' + str(config.SALT_PER_LOSS)})
return True # Not yet implemented
def headcount(self):
"""
How many pirates are on this ship?
:return: (int) number of pirates on this ship (or 0 if error)
"""
with self._conn:
try:
self._c.execute('SELECT count() FROM pirates')
sql_row = [dict(a) for a in self._c.fetchall()]
num_pirates = sql_row[0]['count()']
self.log.info('There are currently %s pirates on the ship' % num_pirates)
return num_pirates
except (TypeError, sqlite3.Error) as e:
self.log.warning('Failed to perform headcount ship. Error: %s' % e)
return 0
@_check_dna
def delete_local_pirate_files(self, dna=None):
"""
Deletes local files associated with a pirate (model, docs, logs).
:param dna: (string) identifier for the pirate
:raise FileNotFoundError: can't find the local files
"""
removed = {'model': False, 'doc': False, 'log': False}
for dirpath, _, files in os.walk(config.MODEL_DIR):
if dna + '.h5' in files:
os.remove(os.path.join(dirpath, dna + '.h5'))
removed['model'] = True
if dna + '.pickle' in files:
os.remove(os.path.join(dirpath, dna + '.pickle'))
removed['doc'] = True
for dirpath, dirs, files in os.walk(config.LOGS_DIR):
if dna in dirs:
# Tensorboard logs are a folder
shutil.rmtree(os.path.join(dirpath, dna))
removed['log'] = True
if not all(removed.values()): # All of the files should be removed
self.log.warning('When removing local files for %s, could not find %s' % (dna, removed))
def less_pirates(self, n=config.NUM_PIRATES_PER_CULLING):
"""
Removes the N pirates with lowest saltyness from the ship (and associated local files)
:param n: (int) how many pirates to be removed
"""
with self._conn:
self._c.execute('SELECT dna FROM pirates ORDER BY saltyness ASC LIMIT ?', (str(n),))
sql_row = [dict(a) for a in self._c.fetchall()]
for d in sql_row:
self.delete_local_pirate_files(dna=d['dna'])
self._walk_the_plank(dna=d['dna'])
def more_pirates(self, num_pirates=config.NUM_PIRATES_PER_TRAIN, max_tries=config.MAX_TRAIN_TRIES, space=config.SPACE):
"""
Create pirates using hyperopt, adds them to the ship
:param num_pirates: (int) number of pirates to generate
:param max_tries: (int) max number of hyperopt runs before choosing best pirates
"""
assert space, 'Please provide a hyperparameter space for creating pirate models'
with HyperoptTrainer() as trainer:
results = trainer.run_hyperopt(max_tries, space)
# Sort results by highest validation accuracy
top = sorted(results.items(), key=lambda e: e[1])
self.log.info('Making %s more pirates' % num_pirates)
for idx, (dna, _) in enumerate(top):
if idx < num_pirates: # Only add the best N pirates
self._add_pirate(dna=dna)
self._set_prop(dna=dna, prop={'rank': idx})
self._set_prop(dna=dna, prop={'saltyness': config.STARTING_SALT})
else:
self.delete_local_pirate_files(dna=dna) # Delete pirate model from memory |
py | 1a4c6b8e87dbd319d34a000cdbed1718f4b9dde4 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Autologging documentation build configuration file, created by
# sphinx-quickstart on Wed Mar 27 21:07:11 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if (on_rtd):
html_theme = 'default'
else:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Autologging'
copyright = '2013, 2015, 2016, 2018, 2019 Matthew Zipay'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.3"
# The full version, including alpha/beta/rc tags.
release = "1.3.2"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
html_use_opensearch = 'http://ninthtest.info/python-autologging/'
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Autologgingdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Autologging.tex', 'Autologging Documentation',
'Matthew Zipay', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'autologging', 'Autologging Documentation',
['Matthew Zipay'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Autologging', 'Autologging Documentation',
'Matthew Zipay', 'Autologging', 'Eliminate boilerplate logging and tracing code.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/3': None}
autodoc_member_order = 'bysource'
autodoc_default_flags = ['show-inheritance', 'members']
|
py | 1a4c6bf5a7fea2726b47ce8c54f26017c829e87e | from re import L
from .TypingData import TypingData
from .TypingNet import TypingNet, TypingTrain
import torch as th
import dgl
class TypingUtility:
def __init__(self):
self.fn = None
self.data = None
self.net = None
self.prob = th.nn.Softmax(dim=0)
def Predict(self, fn):
residue = self.DLText2Residue(fn)
return residue, self.PredictResidue(residue)
def PredictResidue(self, residue):
with th.no_grad():
y = self.net(residue)
types = [self.data.atomic_types[residue[0].ndata[self.data.features_str][i][0].item()][th.argmax(y0).item()] for i, y0 in enumerate(y)]
probs = [self.prob(y0) for y0 in y]
return types, probs
def DLText2Residue(self, fn):
with open(fn, "r") as fd:
lines = fd.readlines()
atom_numbers = [int(word) for word in lines[0].split()]
edge_lines = [int(word) for word in lines[1].split()]
edges1 = []
edges2 = []
for i in range(0, len(edge_lines), 2):
edges1.append(edge_lines[i])
edges2.append(edge_lines[i+1])
edges1.append(edge_lines[i+1])
edges2.append(edge_lines[i])
graph = dgl.graph((th.tensor(edges1), th.tensor(edges2)))
graph.ndata[self.data.features_str] = th.tensor(atom_numbers).unsqueeze(1)
return (graph,
{element:[idx for idx,value in enumerate(graph.ndata[self.data.features_str][:, 0].tolist()) if value == element]
for element in set(graph.ndata[self.data.features_str][:, 0].tolist())},
fn.upper())
def Calibrate(self):
type_stats = {key:[[0,0] for _ in range(len(value))] for key,value in self.data.atomic_types.items()}
for residue in self.data.residues:
y = self.net(residue)
for element, type, prob in zip(residue[0].ndata[self.data.features_str][:, 0].tolist(), residue[0].ndata[self.data.atomic_type_str].squeeze(1).tolist(), y):
type_stats[element][type][0] += 1
predict_type = th.argmax(prob).item()
if predict_type != type:
type_stats[element][type][1] += 1
if self.data.atomic_types[element][type] == "CG321":
print("In %8s: should be %8s, but is %s" % (residue[2], self.data.atomic_types[element][type], self.data.atomic_types[element][predict_type]))
return type_stats
def ExportParams(self, fn):
with open(fn, "w") as fd:
maxK = -1
num_layers = len(self.net.tclayers[1].gcnlayers)
layer_str = ""
for i in range(num_layers-1): layer_str += "layer%d, " % (i)
layer_str += "layer%d" % (num_layers-1)
fd.write("Layer %s;\n" % (layer_str))
for element, net in self.net.tclayers.items():
params = net.export()
param_str = ""
for i, param in enumerate(params):
b = param[0]
K = param[1]
maxK = max(K, maxK)
Ws = param[2]
dim0 = Ws[0].shape[0]
dim1 = Ws[0].shape[1]
param_str += "layer%d = {{" % (i)
param_str += ("MatrixXd::Zero(%d, %d), " % (dim0, dim1))*(K+1)
param_str += "}, RowVectorXd::Zero(%d)};\n" % (dim1)
# Write down parameters.
for k in range(K+1):
param_str += "layer%d.Ws[%d] << " % (i, k)
for x in range(dim0-1):
for y in range(dim1):
param_str += "%15.8f, " % (Ws[k][x][y])
param_str += "\n"
for y in range(dim1-1): param_str += "%15.8f, " % (Ws[k][dim0-1][y])
param_str += "%15.8f;\n" % (Ws[k][dim0-1][dim1-1])
param_str += "layer%d.b << " % (i)
for y in range(dim1-1): param_str += "%15.8f, " % (b[y])
param_str += "%15.8f;\n" % (b[dim1-1])
param_str += "Nets[%d] = {%s};" % (element, layer_str)
fd.write("%s\n" % (param_str))
fd.write("MaxK = %d;" % (maxK))
def Build(self, fn, params_fn = None, type = "CHARMM", training_ratio = 1., learning_rate = 1E-3, max_epochs = 1000000, output_freq = 100, device = th.device("cpu")):
print(" -- Build Atom Typing Predictor --")
# Build typing model.
self.fn = fn
self.data = TypingData()
typing_type_fun = {"CHARMM":self.data.ParseFromCHARMM}
typing_type_fun[type.upper()](self.fn, device)
print("Definitions from: %s" % (self.fn))
num_atomic_types = len(self.data.atomic_types)
print(" Atomic types: %d" % (sum(len(value) for value in self.data.atomic_types.values())))
for key, value in self.data.atomic_types.items():
print(" %-2s: %-3d" % ([k for k, v in self.data.periodic_table.items() if v == key][0], len(value)), value)
print(" Residue names: %d" % (len(self.data.residues)))
# Build net.
print("Net parameters: %s" % ("To be trained" if params_fn is None else params_fn))
num_features = self.data.num_features
features_str = self.data.features_str
atomic_type_str = self.data.atomic_type_str
save_fn_prefix = self.fn[:self.fn.rfind(".")]
self.net = TypingNet(num_features, self.data.atomic_types, features_str)
if params_fn is not None:
# Only load parameters, not architecture.
self.net.load(params_fn)
if max_epochs > 0:
TypingTrain(self.net, self.data.residues, training_ratio, learning_rate, max_epochs, output_freq, save_fn_prefix, atomic_type_str, device)
self.net.eval()
print(" -- Building Atom Typing Predictor Accomplished --")
|
py | 1a4c6d524ee0956a9c24c61414cdff476ccbb946 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
from google import auth
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.ads.googleads.v6.resources.types import search_term_view
from google.ads.googleads.v6.services.types import search_term_view_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class SearchTermViewServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for SearchTermViewService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_search_term_view: gapic_v1.method.wrap_method(
self.get_search_term_view,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_search_term_view(
self,
) -> typing.Callable[
[search_term_view_service.GetSearchTermViewRequest],
search_term_view.SearchTermView,
]:
raise NotImplementedError
__all__ = ("SearchTermViewServiceTransport",)
|
py | 1a4c6ddeec491034f4c2f3b07ef3d82567ff9cd5 | import argparse
import codecs
import csv
import datetime
import errno
import importlib
import json
import logging
import os
import shutil
import subprocess
import sys
import traceback
from functools import singledispatch
from pathlib import Path
from typing import (
Any,
Iterable,
List,
Tuple,
Union,
cast,
)
from types import ModuleType
from urllib.error import URLError
import publicsuffix
import requests
import strict_rfc3339
MANDATORY_SCANNER_PROPERTIES = (
"headers",
"to_rows"
)
# global in-memory cache
suffix_list = None
# Time Conveniences #
# Now, in UTC, in seconds (with decimal microseconds).
def local_now() -> float:
return datetime.datetime.now().timestamp()
def format_datetime(obj) -> Union[str, None]:
if isinstance(obj, datetime.date):
return obj.isoformat()
elif isinstance(obj, str):
return obj
else:
return None
# Cut off floating point errors, always output duration down to
# microseconds.
def just_microseconds(duration: float) -> str:
if duration is None:
return None
return "%.6f" % duration
# RFC 3339 timestamp for a given UTC time.
# seconds can be a float, down to microseconds.
# A given time needs to be passed in *as* UTC already.
def utc_timestamp(seconds: Union[float, int]) -> Union[str, None]:
if not seconds:
return None
return strict_rfc3339.timestamp_to_rfc3339_utcoffset(seconds)
# /Time Conveniences #
# Filesystem Conveniences #
# mkdir -p in python, from:
# https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
def mkdir_p(path: str) -> None:
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST:
pass
else:
raise
def read(source):
with open(source) as f:
contents = f.read()
return contents
def write(content: Union[bytes, str], destination: str,
binary: bool=False) -> None:
mkdir_p(os.path.dirname(destination))
if binary:
binary_content = cast(bytes, content) # mypy wrangling
with open(destination, "bw") as fb:
fb.write(binary_content)
else:
string_content = cast(str, content) # mypy wrangling
with open(destination, "w", encoding="utf-8") as fs:
fs.write(string_content)
# /Filesystem Conveniences #
# Error Conveniences #
def format_last_exception():
exc_type, exc_value, exc_traceback = sys.exc_info()
return "\n".join(traceback.format_exception(exc_type, exc_value,
exc_traceback))
# Error Conveniences #
# Command Line Conveniences #
def scan(command: List[str], env: dict=None,
allowed_return_codes: list=[]) -> Union[str, None]:
try:
response = subprocess.check_output(
command,
stderr=subprocess.STDOUT,
shell=False, env=env
)
return str(response, encoding='UTF-8')
except subprocess.CalledProcessError as exc:
if exc.returncode in allowed_return_codes:
return str(exc.stdout, encoding='UTF-8')
else:
logging.warning("Error running %s." % (str(command)))
logging.warning("Error running %s." % (str(exc.output)))
logging.warning(format_last_exception())
return None
# test if a command exists, don't print output
def try_command(command):
try:
subprocess.check_call(["which", command], shell=False,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
return True
except subprocess.CalledProcessError:
logging.warning(format_last_exception())
logging.warning("No command found: %s" % (str(command)))
return False
# /Command Line Conveniences #
# JSON Conveniences #
# Format datetimes, sort keys, pretty-print.
def json_for(object: object) -> str:
return json.dumps(object, sort_keys=True, indent=2, default=format_datetime)
# Mirror image of json_for.
def from_json(string):
return json.loads(string)
# /JSON Conveniences #
# Logging Conveniences #
def configure_logging(options: Union[dict, None]=None) -> None:
options = {} if not options else options
if options.get('debug', False):
log_level = "debug"
else:
log_level = options.get("log", "warn")
if log_level not in ["debug", "info", "warn", "error"]:
print("Invalid log level (specify: debug, info, warn, error).")
sys.exit(1)
logging.basicConfig(format='%(message)s', level=log_level.upper())
# /Logging Conveniences #
# CSV Handling #
# Sort a CSV by domain name, "in-place" (by making a temporary copy).
# This loads the whole thing into memory: it's not a great solution for
# super-large lists of domains.
def sort_csv(input_filename):
logging.warning("Sorting %s..." % input_filename)
input_file = open(input_filename, encoding='utf-8', newline='')
tmp_filename = "%s.tmp" % input_filename
tmp_file = open(tmp_filename, 'w', newline='')
tmp_writer = csv.writer(tmp_file)
# store list of domains, to sort at the end
domains = []
# index rows by domain
rows = {}
header = None
for row in csv.reader(input_file):
# keep the header around
if (row[0].lower() == "domain"):
header = row
continue
# index domain for later reference
domain = row[0]
domains.append(domain)
rows[domain] = row
# straight alphabet sort
domains.sort()
# write out to a new file
tmp_writer.writerow(header)
for domain in domains:
tmp_writer.writerow(rows[domain])
# close the file handles
input_file.close()
tmp_file.close()
# replace the original
shutil.move(tmp_filename, input_filename)
def write_rows(rows, domain, base_domain, scanner, csv_writer, meta={}):
# If we didn't get any info, we'll still output information about why the scan failed.
if rows is None:
empty_row = [None] * len(scanner.headers)
rows = [empty_row]
# Always output Domain and Base Domain.
standard_prefix = [
domain,
base_domain,
]
# If requested, add local and Lambda scan data.
meta_fields = []
if bool(meta):
meta_fields.append(" ".join(meta.get('errors', [])))
meta_fields.append(utc_timestamp(meta.get("start_time")))
meta_fields.append(utc_timestamp(meta.get("end_time")))
meta_fields.append(just_microseconds(meta.get("duration")))
if meta.get("lambda") is not None:
meta_fields.append(meta['lambda'].get('request_id'))
meta_fields.append(meta['lambda'].get('log_group_name'))
meta_fields.append(meta['lambda'].get('log_stream_name'))
meta_fields.append(utc_timestamp(meta['lambda'].get('start_time')))
meta_fields.append(utc_timestamp(meta['lambda'].get('end_time')))
meta_fields.append(meta['lambda'].get('memory_limit'))
meta_fields.append(just_microseconds(meta['lambda'].get('measured_duration')))
# Write out prefix, scan data, and meta scan data.
for row in rows:
csv_writer.writerow(standard_prefix + row + meta_fields)
# CSV Handling #
# Cache Handling #
def cache_single(filename, cache_dir="./cache"):
return os.path.join(cache_dir, filename)
# Predictable cache path for a domain and operation.
def cache_path(domain, operation, ext="json", cache_dir="./cache"):
return os.path.join(cache_dir, operation, ("%s.%s" % (domain, ext)))
# Used to quickly get cached data for a domain.
def data_for(domain, operation, cache_dir="./cache"):
path = cache_path(domain, operation, cache_dir=cache_dir)
if os.path.exists(path):
raw = read(path)
data = json.loads(raw)
if isinstance(data, dict) and (data.get('invalid', False)):
return None
else:
return data
else:
return {}
# marker for a cached invalid response
def invalid(data=None):
if data is None:
data = {}
data['invalid'] = True
return json_for(data)
# Return base domain for a subdomain, factoring in the Public Suffix List.
def base_domain_for(subdomain, cache_dir="./cache"):
global suffix_list
"""
For "x.y.domain.gov", return "domain.gov".
If suffix_list is None, the caches have not been initialized, so do that.
"""
if suffix_list is None:
suffix_list, discard = load_suffix_list(cache_dir=cache_dir)
if suffix_list is None:
logging.warning("Error downloading the PSL.")
exit(1)
return suffix_list.get_public_suffix(subdomain)
# Returns an instantiated PublicSuffixList object, and the
# list of lines read from the file.
def load_suffix_list(cache_dir="./cache"):
cached_psl = cache_single("public-suffix-list.txt", cache_dir=cache_dir)
if os.path.exists(cached_psl):
logging.debug("Using cached Public Suffix List...")
with codecs.open(cached_psl, encoding='utf-8') as psl_file:
suffixes = publicsuffix.PublicSuffixList(psl_file)
content = psl_file.readlines()
else:
# File does not exist, download current list and cache it at given location.
logging.debug("Downloading the Public Suffix List...")
try:
cache_file = publicsuffix.fetch()
except URLError as err:
logging.warning("Unable to download the Public Suffix List...")
logging.debug("{}".format(err))
return None, None
content = cache_file.readlines()
suffixes = publicsuffix.PublicSuffixList(content)
# Cache for later.
write(''.join(content), cached_psl)
return suffixes, content
# /Cache Handling #
# Argument Parsing #
class ArgumentParser(argparse.ArgumentParser):
"""
This lets us test for errors from argparse by overriding the error method.
See https://stackoverflow.com/questions/5943249
"""
def _get_action_from_name(self, name):
"""Given a name, get the Action instance registered with this parser.
If only it were made available in the ArgumentError object. It is
passed as its first arg...
"""
container = self._actions
if name is None:
return None
for action in container:
if '/'.join(action.option_strings) == name:
return action
elif action.metavar == name:
return action
elif action.dest == name:
return action
def error(self, message):
exc = sys.exc_info()[1]
if exc:
exc.argument = self._get_action_from_name(exc.argument_name)
raise exc
super(ArgumentParser, self).error(message)
def build_scan_options_parser() -> ArgumentParser:
""" Builds the argparse parser object.
Remember that it changes '-' to '_' in the options name.
"""
parser = ArgumentParser(prefix_chars="--")
parser.add_argument("domains", help="".join([
"Either a comma-separated list of domains or the url of a CSV ",
"file/path to a local CSV file containing the domains to be ",
"domains to be scanned. The CSV's header row will be ignored ",
"if the first cell starts with \"Domain\" (case-insensitive).",
]))
parser.add_argument("--cache", action="store_true", help="".join([
"Use previously cached scan data to avoid scans hitting the network ",
"where possible.",
]))
parser.add_argument("--debug", action="store_true",
help="Print out more stuff. Useful with '--serial'")
parser.add_argument("--lambda", action="store_true", help="".join([
"Run certain scanners inside Amazon Lambda instead of locally.",
]))
parser.add_argument("--lambda-profile", nargs=1, help="".join([
"When running Lambda-related commands, use a specified AWS named ",
"profile. Credentials/config for this named profile should already ",
"be configured separately in the execution environment.",
]))
parser.add_argument("--lambda-retries", type=int, help="".join([
"The maximum number of times to retry a Lambda job that fails. ",
"If not specified then the value 0 is used."
]))
parser.add_argument("--meta", action="store_true", help="".join([
"Append some additional columns to each row with information about ",
"the scan itself. This includes start/end times and durations, as ",
"well as any encountered errors. When also using '--lambda', ",
"additional, Lambda-specific information will be appended.",
]))
parser.add_argument("--scan", nargs=1, required=True,
help="Comma-separated list of scanners (required).")
parser.add_argument("--sort", action="store_true", help="".join([
"Sort result CSVs by domain name, alphabetically. (Note: this causes ",
"the entire dataset to be read into memory.)",
]))
parser.add_argument("--serial", action="store_true", help="".join([
"Disable parallelization, force each task to be done simultaneously. ",
"Helpful for testing and debugging.",
]))
parser.add_argument("--suffix", nargs=1, help="".join([
"Add a suffix to all input domains. For example, a --suffix of ",
"'virginia.gov' will add '.virginia.gov' to the end of all ",
"input domains."
]))
parser.add_argument("--output", nargs=1, default=["./"], help="".join([
"Where to output the 'cache/' and 'results/' directories. ",
"Defaults to './'.",
]))
parser.add_argument("--workers", nargs=1,
help="Limit parallel threads per-scanner to a number.")
# TODO: Should workers have a default value?
parser.add_argument("--no-fast-cache", action="store_true", help="".join([
"Do not use fast caching even if a scanner supports it. This option ",
"will cause domain-scan to use less memory, but some (possibly ",
"expensive) network activity or other operations may be repeated."
]))
# TODO: Move the scanner-specific argument parsing to each scanner's code.
# a11y:
parser.add_argument("--a11y-config",
help="a11y: Location of pa11y config file (used with a11y scanner.")
parser.add_argument("--a11y-redirects",
help="a11y: Location of YAML file with redirects to inform the a11y scanner.")
# pshtt:
parser.add_argument("--ca_file",
help="ca_file: Location of PEM file of trust store to verify certs with.")
parser.add_argument("--pt_int_ca_file",
help="pt_int_ca_file: Location of PEM file of public trust store with any needed intermediate certificates to verify certs with.")
parser.add_argument("--cache-third-parties",
help="cache-third-parties: Location ot store third party cache files.")
parser.add_argument("--user_agent",
help="user_agent: User agent string to use in scan request header.")
parser.add_argument("--adfs-hsts", action="store_true",
help="adfs-hsts: Specifically scan /adfs/ls/ for an HSTS header even without a redirect to it.")
# sslyze:
parser.add_argument("--sslyze-serial",
help="sslyze: If set, will use a synchronous (single-threaded in-process) scanner. Defaults to true.")
parser.add_argument("--sslyze-certs",
help="sslyze: If set, will use the CertificateInfoScanner and return certificate info. Defaults to true.")
parser.add_argument("--sslyze-reneg",
help="sslyze: If set, will use the SessionRenegotiationScanner and return session renegotiation info. Defaults to true.")
# trustymail:
parser.add_argument("--starttls", action='store_true', help="".join([
"trustymail: Only check mx records and STARTTLS support. ",
"(Implies --mx.)"
]))
parser.add_argument("--timeout", help="".join([
"trustymail: The DNS lookup timeout in seconds. (Default is 5.)"
]))
parser.add_argument("--smtp-timeout", help="".join([
"trustymail: The SMTP connection timeout in seconds. (Default is 5.)"
]))
parser.add_argument("--smtp-localhost", help="".join([
"trustymail: The hostname to use when connecting to SMTP ",
"servers. (Default is the FQDN of the host from ",
"which trustymail is being run.)"
]))
parser.add_argument("--smtp-ports", help="".join([
"trustymail: A comma-delimited list of ports at which to look ",
"for SMTP servers. (Default is '25,465,587'.)"
]))
parser.add_argument("--dns", help="".join([
"trustymail: A comma-delimited list of DNS servers to query ",
"against. For example, if you want to use ",
"Google's DNS then you would use the ",
"value --dns-hostnames='8.8.8.8,8.8.4.4'. By ",
"default the DNS configuration of the host OS ",
"(/etc/resolv.conf) is used. Note that ",
"the host's DNS configuration is not used at all ",
"if this option is used."
]))
parser.add_argument("--no-smtp-cache", help="".join([
"trustymail: Do not cache SMTP results during the run. This",
"may results in slower scans due to testing the ",
"same mail servers multiple times."
]))
parser.add_argument("--mx", action='store_true', help="".join([
"trustymail: Only check MX records"
]))
parser.add_argument("--spf", action='store_true', help="".join([
"trustymail: Only check SPF records"
]))
parser.add_argument("--dmarc", action='store_true', help="".join([
"trustymail: Only check DMARC records"
]))
return parser
def options() -> Tuple[dict, list]:
"""
Parse options for the ``scan`` command.
Impure
Reads from sys.argv.
"""
parser = build_scan_options_parser()
parsed, unknown = parser.parse_known_args()
opts = {k: v for k, v in vars(parsed).items() if v is not None}
if opts.get("lambda_profile") and not opts.get("lambda"):
raise argparse.ArgumentTypeError(
"Can't set lambda profile unless lambda flag is set.")
# We know we want one value, but the ``nargs`` flag means we get a list.
should_be_singles = (
"lambda_profile",
"output",
"scan",
"suffix",
"workers",
)
opts = make_values_single(opts, should_be_singles)
# Derive some options not set directly at CLI:
opts["_"] = {
"cache_dir": os.path.join(opts.get("output", "./"), "cache"),
"report_dir": opts.get("output", "./"),
"results_dir": os.path.join(opts.get("output", "./"), "results"),
}
return (opts, unknown)
def make_values_single(dct: dict, should_be_singles: Iterable[str]) -> dict:
for key in (k for k in should_be_singles if k in dct):
dct[key] = dct[key][0]
return dct
def handle_scanner_arguments(scans: List[ModuleType], opts: dict, unknown: List[str]):
for scan in scans:
if hasattr(scan, "handle_scanner_args"):
scan_opts, unknown = scan.handle_scanner_args(unknown, opts) # type: ignore
opts.update(scan_opts)
return (opts, unknown)
# /Argument Parsing #
def build_scanner_list(names: List[str],
mod: str="scanners") -> List[ModuleType]:
"""
Given a list of names, load modules corresponding to those names from the
scanners directory. Also verify that they have the required properties.
"""
scans = []
for name in names:
try:
scan = importlib.import_module(
"%s.%s" % (mod, name))
verify_scanner_properties(scan)
except ImportError:
exc_type, exc_value, exc_traceback = sys.exc_info()
errmsg = "\n".join([
"[%s] Scanner not found, or had an error during loading." % name,
"\tERROR: %s" % exc_type,
"\t%s" % exc_value,
])
logging.error(errmsg)
raise ImportError(errmsg)
scans.append(scan)
return scans
def verify_scanner_properties(scanner: ModuleType) -> None:
name = scanner.__name__
for prop in MANDATORY_SCANNER_PROPERTIES:
if not hasattr(scanner, prop):
raise ImportError("%s lacks required %s property" % (name, prop))
# If the scan has a canonical command, make sure it exists.
# mypy doesn't handle optional properties well, it seems.
if hasattr(scan, "command") and scan.command and (not try_command(scan.command)): # type: ignore
errmsg = "[%s] Command not found: %s" % (name, scan.command) # type: ignore
logging.error(errmsg)
raise ImportError(errmsg)
def begin_csv_writing(scanner: ModuleType, options: dict,
base_hdrs: Tuple[List[str], List[str], List[str]]) -> dict:
"""
Determine the CSV output file path for the scanner, open the file at that
path, instantiate a CSV writer for it, determine whether or not to use
lambda, determine what the headers are, write the headers to the CSV.
Return a dict containing the above.
"""
PREFIX_HEADERS, LOCAL_HEADERS, LAMBDA_HEADERS = base_hdrs
name = scanner.__name__.split(".")[-1] # e.g. 'pshtt'
results_dir = options["_"]["results_dir"]
meta = options.get("meta")
lambda_mode = options.get("lambda")
use_lambda = lambda_mode and \
hasattr(scanner, "lambda_support") and \
scanner.lambda_support # type: ignore # it's an optional variable.
# Write the header row, factoring in Lambda detail if needed.
headers = PREFIX_HEADERS + scanner.headers # type: ignore # optional again
# Local scan timing/errors.
if meta:
headers += LOCAL_HEADERS
# Lambda scan timing/errors. (At this step, only partial fields.)
if meta and use_lambda:
headers += LAMBDA_HEADERS
scanner_csv_path = Path(results_dir, "%s.csv" % name).resolve()
scanner_file = scanner_csv_path.open('w', newline='')
scanner_writer = csv.writer(scanner_file)
print("Opening csv file for scanner {}: {}".format(name, scanner_csv_path))
scanner_writer.writerow(headers)
return {
'name': name,
'file': scanner_file,
'filename': str(scanner_csv_path),
'writer': scanner_writer,
'headers': headers,
'use_lambda': use_lambda,
}
def determine_scan_workers(scanner: ModuleType, options: dict, w_default: int,
w_max: int) -> int:
"""
Given a number of inputs, determines the right number of workers to set
when running scans.
"""
if options.get("serial"):
workers = 1
elif hasattr(scanner, "workers"):
workers = scanner.workers # type: ignore # The subclass objects set this sometimes.
else:
# mypy has trouble with this, presumably because we're using a dict
workers = int(options.get("workers", w_default)) # type: ignore
# Enforce a local worker maximum as a safety valve.
return min(workers, w_max)
# Yield domain names from a single string, or a CSV of them.
@singledispatch
def domains_from(arg: Any, domain_suffix=None) -> Iterable[str]:
raise TypeError("'%s' is not a recognized source for domains." % arg)
@domains_from.register(str)
def _df_str(arg: str, domain_suffix: Union[str, None]=None) -> Iterable[str]:
# TODO: how do we handle domain_suffix here?
if domain_suffix is not None:
errmsg = "Passing in domains at CLI not compatible with --suffix."
raise argparse.ArgumentError(errmsg)
for x in arg.split(","):
yield x
@domains_from.register(Path)
def _df_path(arg: Path, domain_suffix: Union[str, None]=None) -> Iterable[str]:
if arg.suffix == ".csv":
with arg.open(encoding='utf-8', newline='') as csvfile:
for row in csv.reader(csvfile):
if (not row) or (not row[0]) or (row[0].lower() == "domain") or (row[0].lower() == "domain name"):
continue
domain = row[0].lower()
if domain_suffix:
sep = "."
if domain_suffix.startswith("."):
sep = ""
yield "%s%s%s" % (domain, sep, domain_suffix)
else:
yield domain
else:
# Note: the path referred to below will be the path to the local cached
# download and not to the original URL. It shouldn't be possible to get
# here with that being a problem, but noting it anyway.
msg = "\n".join([
"Domains should be specified as a comma-separated list ",
"or as the URL or path to a .csv file. ",
"%s does not appear to be any of those." % arg
])
raise TypeError(msg)
def handle_domains_argument(domains: str, cache_dir: Path) -> Union[Path, str]:
# `domains` can be either a path or a domain name.
# It can also be a URL, and if it is we want to download it now,
# and then adjust the value to be the path of the cached download.
# Note that the cache_dir is basically guaranteed to exist by the time
# we reach this point in the execution path.
if domains.startswith("http:") or domains.startswith("https:"):
domains_path = Path(cache_dir, "domains.csv")
try:
response = requests.get(domains)
write(response.text, str(domains_path))
except requests.exceptions.RequestException as err:
msg = "\n".join([
"Domains URL not downloaded successfully; RequestException",
str(err),
])
logging.error(msg)
raise IOError(msg)
return domains_path
elif domains.endswith(".csv"):
# Assume file is either absolute or relative from current dir.
try:
domains_path = Path(os.path.curdir, domains).resolve()
if not domains_path.exists():
raise FileNotFoundError
return domains_path
except FileNotFoundError as err:
msg = "\n".join([
"Domains CSV file not found.",
"(Curdir: %s CSV file: %s)" % (os.path.curdir, domains),
str(err),
])
logging.error(msg)
raise FileNotFoundError(msg)
return domains
|
py | 1a4c6e86867a2669b024610eb5fb7c79bf685ffe | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=unused-variable
# pylint: disable=missing-docstring
import argparse
import sys
from unittest.mock import patch
import pytest
from bkyml.skeleton import Block, \
Comment, \
Steps, \
Env, \
Command, \
Plugin, \
Wait, \
Trigger, \
parse_main, \
run, \
check_positive, \
bool_or_string, \
plugin_or_key_value_pair
__author__ = "Joscha Feth"
__copyright__ = "Joscha Feth"
__license__ = "mit"
def describe_bkyaml():
@pytest.fixture
def run_run(capsys, snapshot, argv):
with patch.object(sys, 'argv', [''] + argv):
run()
captured = capsys.readouterr()
snapshot.assert_match(captured.out)
@pytest.fixture
def args():
return argparse.Namespace()
def describe_plugin_or_key_value_pair():
def test_plugin_or_key_value_pair_plugin():
assert plugin_or_key_value_pair('org/repo#1.0.0') == 'org/repo#1.0.0'
def test_plugin_or_key_value_pair_pair():
assert plugin_or_key_value_pair('a=b=c') == ['a', 'b=c']
def describe_check_positive():
def test_check_positive_1():
assert check_positive(1)
def test_check_positive_minus_1():
with pytest.raises(argparse.ArgumentTypeError) as err:
check_positive(-1)
assert '-1 is an invalid positive int value' in str(err.value)
def describe_bool_or_string():
def test_bool_or_string_true():
assert bool_or_string('TRUE')
def test_bool_or_string_false():
assert bool_or_string('FALSE') is False
def test_bool_or_string():
assert bool_or_string('bla') == 'bla'
def describe_comment():
def test_comment_missing_str(args, snapshot):
with pytest.raises(AssertionError):
Comment.comment(args)
def test_comment(args, snapshot):
args.str = ['a', 'b']
snapshot.assert_match(Comment.comment(args))
def test_comment_multiline(args, snapshot):
args.str = ['multiline\ncomments', 'are fun']
snapshot.assert_match(Comment.comment(args))
def describe_steps():
def test_steps(args, snapshot):
snapshot.assert_match(Steps.steps(args))
def describe_wait():
def test_wait(args, snapshot):
snapshot.assert_match(Wait.wait(args))
def test_wait_continue_on_failure(args, snapshot):
args.continue_on_failure = True
snapshot.assert_match(Wait.wait(args))
def describe_env():
def test_env_all(args, snapshot):
args.var = [['a', 'b'], ['c', 'd']]
snapshot.assert_match(Env.env(args))
def describe_block():
@pytest.fixture
def generic_block_call(args, snapshot):
args.label = ':rocket: Release'
snapshot.assert_match(Block.block(args))
def test_block_missing(args, snapshot):
with pytest.raises(AssertionError):
Block.block(args)
def test_block_simple(args, snapshot):
generic_block_call(args, snapshot)
def test_block_prompt(args, snapshot):
args.prompt = 'Really release?'
generic_block_call(args, snapshot)
def test_block_branches(args, snapshot):
args.branches = ['master', 'release-*']
generic_block_call(args, snapshot)
def test_block_field_text(args, snapshot):
args.field_text = [['key', 'label', 'hint', 'false', 'default']]
generic_block_call(args, snapshot)
def test_block_field_text_no_key(args, snapshot):
args.field_text = [['', 'label', 'hint', 'false', 'default']]
with pytest.raises(argparse.ArgumentTypeError):
generic_block_call(args, snapshot)
def test_block_field_text_no_label(args, snapshot):
args.field_text = [['key', '', 'hint', 'false', 'default']]
with pytest.raises(argparse.ArgumentTypeError):
generic_block_call(args, snapshot)
def test_block_field_text_no_hint(args, snapshot):
args.field_text = [['key', 'label', '', 'false', 'default']]
generic_block_call(args, snapshot)
def test_block_field_text_no_default(args, snapshot):
args.field_text = [['key', 'label', 'hint', 'false', '']]
generic_block_call(args, snapshot)
def test_block_field_text_required(args, snapshot):
args.field_text = [['key', 'label', 'hint', 'true', 'default']]
generic_block_call(args, snapshot)
def test_block_field_select(args, snapshot):
args.field_select = [[
'key',
'label',
'hint',
'false',
'default',
'opt1=Label1',
'opt2=Label2'
]]
generic_block_call(args, snapshot)
def test_block_field_select_no_key(args, snapshot):
args.field_select = [[
'',
'label',
'hint',
'false',
'default',
'opt1=Label1',
'opt2=Label2'
]]
with pytest.raises(argparse.ArgumentTypeError):
generic_block_call(args, snapshot)
def test_block_field_select_no_label(args, snapshot):
args.field_select = [[
'key',
'',
'hint',
'false',
'default',
'opt1=Label1',
'opt2=Label2'
]]
with pytest.raises(argparse.ArgumentTypeError):
generic_block_call(args, snapshot)
def test_block_field_select_no_key_value(args, snapshot):
args.field_select = [['key', 'label', 'hint', 'false', 'default']]
with pytest.raises(argparse.ArgumentTypeError):
generic_block_call(args, snapshot)
def test_block_field_select_key_value_no_pair(args, snapshot):
args.field_select = [['key', 'label', 'hint', 'false', 'default', 'opt']]
with pytest.raises(ValueError):
generic_block_call(args, snapshot)
def test_block_field_multi_fields(args, snapshot):
args.field_text = [['key', 'label', 'hint', 'false', 'default']]
args.field_select = [[
'key',
'label',
'hint',
'false',
'default',
'opt1=Label1',
'opt2=Label2'
]]
generic_block_call(args, snapshot)
def describe_trigger():
@pytest.fixture
def generic_trigger_call(args, snapshot):
args.pipeline = 'my-pipeline'
snapshot.assert_match(Trigger.trigger(args))
def test_trigger_missing(args, snapshot):
with pytest.raises(AssertionError):
Trigger.trigger(args)
def test_trigger_simple(args, snapshot):
generic_trigger_call(args, snapshot)
def test_trigger_label(args, snapshot):
args.label = ':rocket: Deploy'
generic_trigger_call(args, snapshot)
def test_trigger_async(args, snapshot):
args.is_async = True
generic_trigger_call(args, snapshot)
def test_trigger_branches(args, snapshot):
args.branches = ['master', 'release-*']
generic_trigger_call(args, snapshot)
def test_trigger_build_branch(args, snapshot):
args.build_branch = 'master'
generic_trigger_call(args, snapshot)
def test_trigger_build_commit(args, snapshot):
args.build_commit = 'c0ffee'
generic_trigger_call(args, snapshot)
def test_trigger_build_message(args, snapshot):
args.build_message = 'Put the lime in the coconut'
generic_trigger_call(args, snapshot)
def test_trigger_build_env(args, snapshot):
args.build_env = [['a', 'b']]
generic_trigger_call(args, snapshot)
def test_trigger_build_meta_data(args, snapshot):
args.build_meta_data = [['a', 'b']]
generic_trigger_call(args, snapshot)
def describe_command():
@pytest.fixture
def generic_command_call(args, snapshot):
args.command = [['cmd']]
snapshot.assert_match(Command.command(args))
@pytest.fixture
def assert_command_call_error(capsys, command_args, message):
with pytest.raises(SystemExit) as sys_exit:
parse_main(['command', '--command', 'cmd'] + command_args)
assert '2' in str(sys_exit.value)
captured = capsys.readouterr()
assert message in captured.err
def test_command_1(args, snapshot):
args.command = [["my-command arg1 'arg 2'"]]
snapshot.assert_match(Command.command(args))
def test_command_n(args, snapshot):
args.command = [['a', 'b'], ['c', 'd']]
snapshot.assert_match(Command.command(args))
def test_label(args, snapshot):
args.label = 'My label'
generic_command_call(args, snapshot)
def test_branches(args, snapshot):
args.branches = ['master', 'release-*']
generic_command_call(args, snapshot)
def test_env(args, snapshot):
args.env = [['a', 'b'], ['c', 'd']]
generic_command_call(args, snapshot)
def test_agents(args, snapshot):
args.agents = [['npm', 'true'], ['mvn', 'true']]
generic_command_call(args, snapshot)
def test_artifact_paths_0(args, snapshot):
args.artifact_paths = []
generic_command_call(args, snapshot)
def test_artifact_paths_1(args, snapshot):
args.artifact_paths = [["logs/**/*;coverage/**/*"]]
generic_command_call(args, snapshot)
def test_artifact_paths_n(args, snapshot):
args.artifact_paths = [["logs/**/*", "coverage/**/*"]]
generic_command_call(args, snapshot)
def test_parallelism(args, snapshot):
args.parallelism = 4
generic_command_call(args, snapshot)
def test_parallelism_1(args, snapshot):
args.parallelism = 1
generic_command_call(args, snapshot)
def test_concurrency(args, snapshot):
args.concurrency = 2
args.concurrency_group = 'my/group'
generic_command_call(args, snapshot)
def test_concurrency_cli(capsys):
assert_command_call_error(
capsys,
['--concurrency', '1'],
'--concurrency requires --concurrency-group'
)
def test_timeout_in_minutes_minus(args, snapshot):
args.timeout_in_minutes = -1
generic_command_call(args, snapshot)
def test_timeout_in_minutes_0(args, snapshot):
args.timeout_in_minutes = 0
generic_command_call(args, snapshot)
def test_timeout_in_minutes_1(args, snapshot):
args.timeout_in_minutes = 1
generic_command_call(args, snapshot)
def test_skip_bool_true(args, snapshot):
args.skip = True
generic_command_call(args, snapshot)
def test_skip_bool_false(args, snapshot):
args.skip = False
generic_command_call(args, snapshot)
def test_skip_string(args, snapshot):
args.skip = 'Some reason'
generic_command_call(args, snapshot)
def describe_retry():
def test_retry_unknown(args, snapshot):
args.retry = 'unknown'
with pytest.raises(argparse.ArgumentTypeError) as err:
generic_command_call(args, snapshot)
assert 'unknown is an invalid retry value' in str(err.value)
def describe_manual():
def test_retry_manual(args, snapshot):
args.retry = 'manual'
generic_command_call(args, snapshot)
def test_retry_manual_allowed(args, snapshot):
args.retry = 'manual'
args.retry_manual_allowed = True
generic_command_call(args, snapshot)
args.retry_manual_allowed = False
generic_command_call(args, snapshot)
def test_retry_reason(args, snapshot):
args.retry = 'manual'
args.retry_manual_reason = 'Some reason why'
generic_command_call(args, snapshot)
# pylint: disable=invalid-name
def test_retry_manual_permit_on_passed(args, snapshot):
args.retry = 'manual'
args.retry_manual_permit_on_passed = True
generic_command_call(args, snapshot)
args.retry_manual_permit_on_passed = False
generic_command_call(args, snapshot)
def test_retry_manual_missing_allowed_missing_retry(capsys):
assert_command_call_error(
capsys,
['--no-retry-manual-allowed'],
'--[no-]retry-manual-allowed requires --retry manual'
)
def test_retry_manual_reason_missing_retry(capsys):
assert_command_call_error(
capsys,
['--retry-manual-reason', 'My reason'],
'--retry-manual-reason requires --retry manual'
)
# pylint: disable=invalid-name
def test_retry_manual_pop_missing_retry(capsys):
assert_command_call_error(
capsys,
['--retry-manual-permit-on-passed'],
'--[no-]retry-manual-permit-on-passed'
+ ' requires --retry manual'
)
# pylint: disable=invalid-name
def test_retry_manual_pop_manual_retry(capsys, snapshot):
run_run(
capsys,
snapshot,
['command',
'--command', 'cmd',
'--retry', 'manual',
'--retry-manual-permit-on-passed']
)
def describe_automatic():
def test_retry_automatic(args, snapshot):
args.retry = 'automatic'
generic_command_call(args, snapshot)
def test_retry_automatic_limit(args, snapshot):
args.retry = 'automatic'
args.retry_automatic_limit = 2
generic_command_call(args, snapshot)
def test_retry_automatic_limit_11(args, snapshot):
args.retry = 'automatic'
args.retry_automatic_limit = 11
generic_command_call(args, snapshot)
def test_retry_automatic_exit_status_star(args, snapshot):
args.retry = 'automatic'
args.retry_automatic_exit_status = '*'
generic_command_call(args, snapshot)
def test_retry_automatic_exit_status_number(args, snapshot):
args.retry = 'automatic'
args.retry_automatic_exit_status = 1
generic_command_call(args, snapshot)
def test_retry_automatic_exit_status_and_limit(args, snapshot):
args.retry = 'automatic'
args.retry_automatic_limit = 2
args.retry_automatic_exit_status = 1
generic_command_call(args, snapshot)
def test_retry_automatic_tuple_0(args, snapshot):
args.retry = 'automatic'
args.retry_automatic_tuple = []
generic_command_call(args, snapshot)
def test_retry_automatic_tuple_n(args, snapshot):
args.retry = 'automatic'
args.retry_automatic_tuple = [['*', 2], [1, 3]]
generic_command_call(args, snapshot)
def test_exit_status_string(capsys):
assert_command_call_error(
capsys,
['--retry', 'automatic',
'--retry-automatic-exit-status', 'xxx'],
'xxx is an invalid value'
)
def test_retry_exit_status_missing_retry(capsys):
assert_command_call_error(
capsys,
['--retry-automatic-exit-status', '*'],
'--retry-automatic-exit-status'
+ ' requires --retry automatic'
)
def test_retry_limit_missing_retry(capsys):
assert_command_call_error(
capsys,
['--retry-automatic-limit', '2'],
'--retry-automatic-limit requires --retry automatic'
)
def test_retry_tuple_missing_retry(capsys):
assert_command_call_error(
capsys,
['--retry-automatic-tuple', '*', '2'],
'--retry-automatic-tuple requires --retry automatic'
)
def test_retry_tuple_missing_combine_exit_status(capsys):
assert_command_call_error(
capsys,
['--retry', 'automatic',
'--retry-automatic-tuple', '*', '2',
'--retry-automatic-exit-status', '*'],
'--retry-automatic-tuple can not be combined with'
+ ' --retry-automatic-exit-status'
)
def test_retry_tuple_missing_combine_limit(capsys):
assert_command_call_error(
capsys,
['--retry', 'automatic',
'--retry-automatic-tuple', '*', '2',
'--retry-automatic-limit', '2'],
'--retry-automatic-tuple'
+ ' can not be combined with '
+ '--retry-automatic-limit'
)
def describe_command_plugin_attr():
def test_command_plugin_none(args, snapshot):
args.plugin = []
generic_command_call(args, snapshot)
def test_command_plugin_no_args(args, snapshot):
args.plugin = [['org/repo#1.0.0']]
generic_command_call(args, snapshot)
def test_command_plugin_1(args, snapshot):
args.plugin = [
['org/repo#1.0.0', ['a', 'b'], ['c', 'd']]
]
generic_command_call(args, snapshot)
def test_command_plugin_n(args, snapshot):
args.plugin = [
['org/repo#1.0.0', ['a', 'b'], ['c', 'd']],
['other_org/other_repo', ['e', 'f'], ['g', 'h=i']],
]
generic_command_call(args, snapshot)
def describe_plugin():
@pytest.fixture
def generic_plugin_call(args, snapshot):
snapshot.assert_match(Plugin.plugin(args))
def describe_plugin_name_attr():
def test_plugin_name_attr(args, snapshot):
args.plugin = [['org/repo#1.0.0']]
args.name = 'My plugin run'
generic_plugin_call(args, snapshot)
def describe_plugin_plugin_attr():
def test_plugin_plugin_none(args, snapshot):
args.plugin = []
generic_plugin_call(args, snapshot)
def test_plugin_plugin_no_args(args, snapshot):
args.plugin = [['org/repo#1.0.0']]
generic_plugin_call(args, snapshot)
def test_plugin_plugin_1(args, snapshot):
args.plugin = [
['org/repo#1.0.0', ['a', 'b'], ['c', 'd']]
]
generic_plugin_call(args, snapshot)
def test_plugin_plugin_n(args, snapshot):
args.plugin = [
['org/repo#1.0.0', ['a', 'b'], ['c', 'd']],
['other_org/other_repo', ['e', 'f'], ['g', 'h=i']],
]
generic_plugin_call(args, snapshot)
def describe_parse_main():
def test_main(snapshot):
snapshot.assert_match(parse_main(['command', '--command', 'x']))
def describe_cli():
def test_cli_command(snapshot, capsys):
run_run(capsys, snapshot, ['command', '--command', 'x'])
def test_empty_command(snapshot, capsys):
with pytest.raises(SystemExit):
with patch.object(sys, 'argv', ['']):
run()
captured = capsys.readouterr()
snapshot.assert_match(captured.err)
def test_help(snapshot, capsys):
for subcommand in [
'comment',
'steps',
'env',
'command',
'plugin',
'wait',
'trigger',
'block'
]:
with pytest.raises(SystemExit):
with patch.object(sys, 'argv', ['', subcommand, '--help']):
run()
captured = capsys.readouterr()
snapshot.assert_match(captured.out)
|
py | 1a4c7218a40992480f4fd75f7d221951501a6b59 | """
Tests for the bootstrap.py (formerly bootstrap_controller.py) file.
"""
import unittest
from collections import OrderedDict
import numpy as np
import numpy.testing as npt
import pandas as pd
from scipy.sparse import csr_matrix, eye
import pylogit.bootstrap as bc
import pylogit.asym_logit as asym
import pylogit.mixed_logit_calcs as mlc
import pylogit.mixed_logit as mixed_logit
import pylogit.nested_logit as nested_logit
from pylogit.conditional_logit import MNL
try:
# Python 3.x does not natively support xrange
from past.builtins import xrange
except ImportError:
pass
class BootstrapTests(unittest.TestCase):
def setUp(self):
# The set up being used is one where there are two choice situations,
# The first having three alternatives, and the second having only two
# alternatives. There is one generic variable. Two alternative
# specific constants and all three shape parameters are used.
# Create the betas to be used during the tests
self.fake_betas = np.array([-0.6])
# Create the fake outside intercepts to be used during the tests
self.fake_intercepts = np.array([1, 0.5])
# Create names for the intercept parameters
self.fake_intercept_names = ["ASC 1", "ASC 2"]
# Record the position of the intercept that is not being estimated
self.fake_intercept_ref_pos = 2
# Create the shape parameters to be used during the tests. Note that
# these are the reparameterized shape parameters, thus they will be
# exponentiated in the fit_mle process and various calculations.
self.fake_shapes = np.array([-1, 1])
# Create names for the intercept parameters
self.fake_shape_names = ["Shape 1", "Shape 2"]
# Record the position of the shape parameter that is being constrained
self.fake_shape_ref_pos = 2
# Calculate the 'natural' shape parameters
self.natural_shapes = asym._convert_eta_to_c(self.fake_shapes,
self.fake_shape_ref_pos)
# Create an array of all model parameters
self.fake_all_params = np.concatenate((self.fake_shapes,
self.fake_intercepts,
self.fake_betas))
# The mapping between rows and alternatives is given below.
self.fake_rows_to_alts = csr_matrix(np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]))
# Get the mappping between rows and observations
self.fake_rows_to_obs = csr_matrix(np.array([[1, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1]]))
# Create the fake design matrix with columns denoting X
# The intercepts are not included because they are kept outside the
# index in the scobit model.
self.fake_design = np.array([[1],
[2],
[3],
[1.5],
[3.5],
[0.78],
[0.23],
[1.04],
[2.52],
[1.49],
[0.85],
[1.37],
[1.17],
[2.03],
[1.62],
[1.94]])
# Create the index array for this set of choice situations
self.fake_index = self.fake_design.dot(self.fake_betas)
# Create the needed dataframe for the Asymmetric Logit constructor
self.fake_df = pd.DataFrame({"obs_id": [1, 1, 1, 2, 2, 3, 3, 3,
4, 4, 5, 5, 5, 6, 6, 6],
"alt_id": [1, 2, 3, 1, 3, 1, 2, 3,
2, 3, 1, 2, 3, 1, 2, 3],
"choice": [0, 1, 0, 0, 1, 1, 0, 0,
1, 0, 1, 0, 0, 0, 0, 1],
"x": self.fake_design[:, 0],
"intercept":
np.ones(self.fake_design.shape[0])})
# Record the various column names
self.alt_id_col = "alt_id"
self.obs_id_col = "obs_id"
self.choice_col = "choice"
# Create the index specification and name dictionaryfor the model
self.fake_specification = OrderedDict()
self.fake_names = OrderedDict()
self.fake_specification["x"] = [[1, 2, 3]]
self.fake_names["x"] = ["x (generic coefficient)"]
# Bundle args and kwargs used to construct the Asymmetric Logit model.
self.constructor_args = [self.fake_df,
self.alt_id_col,
self.obs_id_col,
self.choice_col,
self.fake_specification]
# Create a variable for the kwargs being passed to the constructor
self.constructor_kwargs = {"intercept_ref_pos":
self.fake_intercept_ref_pos,
"shape_ref_pos": self.fake_shape_ref_pos,
"names": self.fake_names,
"intercept_names":
self.fake_intercept_names,
"shape_names": self.fake_shape_names}
# Initialize a basic Asymmetric Logit model whose coefficients will be
# estimated.
self.asym_model_obj = asym.MNAL(*self.constructor_args,
**self.constructor_kwargs)
self.asym_model_obj.coefs = pd.Series(self.fake_betas)
self.asym_model_obj.intercepts =\
pd.Series(self.fake_intercepts, index=self.fake_intercept_names)
self.asym_model_obj.shapes =\
pd.Series(self.fake_shapes, index=self.fake_shape_names)
self.asym_model_obj.params =\
pd.Series(np.concatenate([self.fake_shapes,
self.fake_intercepts,
self.fake_betas]),
index=self.fake_shape_names +
self.fake_intercept_names +
self.fake_names["x"])
self.asym_model_obj.nests = None
#####
# Initialize a basic MNL model
#####
# Create the MNL specification and name dictionaries.
self.mnl_spec, self.mnl_names = OrderedDict(), OrderedDict()
self.mnl_spec["intercept"] = [1, 2]
self.mnl_names["intercept"] = self.fake_intercept_names
self.mnl_spec.update(self.fake_specification)
self.mnl_names.update(self.fake_names)
mnl_construct_args = self.constructor_args[:-1] + [self.mnl_spec]
mnl_kwargs = {"names": self.mnl_names}
self.mnl_model_obj = MNL(*mnl_construct_args, **mnl_kwargs)
return None
def test_get_param_names(self):
# Alias the function being tested.
func = bc.get_param_names
# Get the function results
func_results = func(self.asym_model_obj)
# Get the expected results
expected_results = self.asym_model_obj.params.index.tolist()
# Test the function results
self.assertIsInstance(func_results, list)
self.assertEqual(func_results, expected_results)
# Set the nest names and re-test the function.
self.asym_model_obj.nest_names = ["No Nest"]
expected_results_2 = self.asym_model_obj.nest_names + expected_results
func_results_2 = func(self.asym_model_obj)
self.assertIsInstance(func_results_2, list)
self.assertEqual(func_results_2, expected_results_2)
return None
def test_get_param_list_for_prediction(self):
# Determine the number of replicates
num_replicates = 10
# Create a fake model object with the needed attributes
class FakeModel(object):
def __init__(self):
self.nest_names = ['one', 'oneA']
self.shape_names = ['two', 'twoA', 'twoB']
self.intercept_names = ['three']
self.ind_var_names =\
['four', 'fourA', 'fourB', 'fourC', 'fourD']
fake_model_obj = FakeModel()
# Create a fake set of bootstrap replicates
fake_replicates =\
(np.array([1, 1, 2, 2, 2, 3, 4, 4, 4, 4, 4])[None, :] *
np.ones(num_replicates)[:, None])
# Create the expected result
expected_param_list = [4 * np.ones((5, num_replicates)),
3 * np.ones((1, num_replicates)),
2 * np.ones((3, num_replicates)),
np.ones((2, num_replicates))]
# Alias the function being tested
func = bc.get_param_list_for_prediction
# Calculate the function result
func_result = func(fake_model_obj, fake_replicates)
# Perform the desired tests with a full set of parameters
self.assertIsInstance(func_result, list)
self.assertEqual(len(func_result), 4)
for pos, func_array in enumerate(func_result):
expected_array = expected_param_list[pos]
self.assertIsInstance(func_array, np.ndarray)
self.assertEqual(func_array.shape, expected_array.shape)
npt.assert_allclose(func_array, expected_array)
# Perform the desired tests with just index coefficients
for attr in ['intercept_names', 'shape_names', 'nest_names']:
setattr(fake_model_obj, attr, None)
func_result_2 = func(fake_model_obj, fake_replicates[:, -5:])
expected_result_2 =\
[4 * np.ones((5, num_replicates)), None, None, None]
self.assertIsInstance(func_result_2, list)
for pos in xrange(1, 4):
self.assertIsNone(func_result_2[pos])
self.assertIsInstance(func_result_2[0], np.ndarray)
self.assertEqual(func_result_2[0].shape, expected_result_2[0].shape)
npt.assert_allclose(func_result_2[0], expected_result_2[0])
return None
def test_ensure_replicates_kwarg_validity(self):
# Create the 'good' and 'bad' arguments for testing
good_args = ['bootstrap', 'jackknife']
bad_args = ['bad', 2, None]
# Alias the function being tested
func = bc.ensure_replicates_kwarg_validity
# Note the expected error messsage
expected_error_msg =\
"`replicates` MUST be either 'bootstrap' or 'jackknife'."
# Perform the desired tests
for good_arg in good_args:
self.assertIsNone(func(good_arg))
for bad_arg in bad_args:
self.assertRaisesRegexp(ValueError,
expected_error_msg,
func,
bad_arg)
return None
def test_boot_initialization(self):
# Create the bootstrap object
boot_obj =\
bc.Boot(self.asym_model_obj, self.asym_model_obj.params.values)
# Test the bootstrap object.
self.assertIsInstance(boot_obj, bc.Boot)
self.assertEqual(id(boot_obj.model_obj), id(self.asym_model_obj))
self.assertEqual(self.asym_model_obj.params.index.tolist(),
boot_obj.mle_params.index.tolist())
expected_attrs =\
["bootstrap_replicates", "jackknife_replicates",
"percentile_interval", "bca_interval",
"abc_interval", "all_intervals",
"jackknife_log_likehoods",
"bootstrap_log_likelihoods"]
for current_attr in expected_attrs:
self.assertTrue(hasattr(boot_obj, current_attr))
self.assertIsNone(getattr(boot_obj, current_attr))
return None
def test_generate_bootstrap_replicates(self):
# Create the bootstrap object.
boot_obj =\
bc.Boot(self.asym_model_obj, self.asym_model_obj.params.values)
# Determine the number of bootstrap samples that we wish to take
num_samples = 3
# Create the necessary keyword arguments.
mnl_init_vals =\
np.zeros(len(self.fake_intercept_names) +
sum([len(x) for x in self.fake_names.values()]))
mnl_kwargs = {"ridge": 0.01,
"maxiter": 1200,
"method": "bfgs"}
bootstrap_kwargs = {"mnl_obj": self.mnl_model_obj,
"mnl_init_vals": mnl_init_vals,
"mnl_fit_kwargs": mnl_kwargs,
"constrained_pos": [0],
"boot_seed": 1988}
# Alias the needed function
func = boot_obj.generate_bootstrap_replicates
# Get the function results
func_results =\
func(num_samples,
mnl_obj=self.mnl_model_obj,
mnl_init_vals=mnl_init_vals,
mnl_fit_kwargs=mnl_kwargs,
constrained_pos=[0],
boot_seed=1988)
# Perform the requisite tests
self.assertIsNone(func_results)
self.assertIsInstance(boot_obj.bootstrap_replicates, pd.DataFrame)
self.assertEqual(boot_obj.bootstrap_replicates.ndim, 2)
expected_shape = (num_samples, self.asym_model_obj.params.size)
self.assertEqual(boot_obj.bootstrap_replicates.shape, expected_shape)
self.assertEqual(boot_obj.bootstrap_replicates
.iloc[:, 0].unique().size, 1)
self.assertEqual(boot_obj.bootstrap_replicates
.iloc[:, 0].unique()[0], 0)
self.assertTrue(boot_obj.bootstrap_replicates
.iloc[:, 1].unique().size > 1)
return None
def test_generate_jackknife_replicates(self):
# Create the bootstrap object.
boot_obj =\
bc.Boot(self.asym_model_obj, self.asym_model_obj.params.values)
# Create the necessary keyword arguments.
mnl_init_vals =\
np.zeros(len(self.fake_intercept_names) +
sum([len(x) for x in self.fake_names.values()]))
mnl_kwargs = {"ridge": 0.01,
"maxiter": 1200,
"method": "bfgs"}
bootstrap_kwargs = {"mnl_obj": self.mnl_model_obj,
"mnl_init_vals": mnl_init_vals,
"mnl_fit_kwargs": mnl_kwargs,
"constrained_pos": [0],
"boot_seed": 1988}
# Alias the needed function
func = boot_obj.generate_jackknife_replicates
# Get the function results
func_results =\
func(mnl_obj=self.mnl_model_obj,
mnl_init_vals=mnl_init_vals,
mnl_fit_kwargs=mnl_kwargs,
constrained_pos=[0])
# Perform the requisite tests
self.assertIsNone(func_results)
self.assertIsInstance(boot_obj.jackknife_replicates, pd.DataFrame)
self.assertEqual(boot_obj.jackknife_replicates.ndim, 2)
expected_shape =\
(self.fake_rows_to_obs.shape[1], self.asym_model_obj.params.size)
self.assertEqual(boot_obj.jackknife_replicates.shape, expected_shape)
self.assertEqual(boot_obj.jackknife_replicates
.iloc[:, 0].unique().size, 1)
self.assertEqual(boot_obj.jackknife_replicates
.iloc[:, 0].unique()[0], 0)
self.assertTrue(boot_obj.jackknife_replicates
.iloc[:, 1].unique().size > 1)
return None
class IntervalTests(unittest.TestCase):
"""
References
----------
Efron, Bradley, and Robert J. Tibshirani. An Introduction to the
Bootstrap. CRC press, 1994. Chapter 14.
Notes
-----
The data and tests used in the `IntervalTests` test suite come from the
Efron & Tibshirani reference cited above.
"""
def setUp(self):
# Store the spatial test data from Efron and Tibshirani (1994)
self.test_data =\
np.array([48, 36, 20, 29, 42, 42, 20, 42, 22, 41, 45, 14, 6,
0, 33, 28, 34, 4, 32, 24, 47, 41, 24, 26, 30, 41])
# Note how many test data observations there are.
self.num_test_obs = self.test_data.size
# Store the MLE estimate
self.test_theta_hat = self.calc_theta(self.test_data)
# Create a pandas series of the data. Allows for easy case deletion.
self.raw_series = pd.Series(self.test_data)
# Create the array of jackknife replicates
self.jackknife_replicates =\
np.empty((self.num_test_obs, 1), dtype=float)
for obs in xrange(self.num_test_obs):
current_data = self.raw_series[self.raw_series.index != obs].values
self.jackknife_replicates[obs] = self.calc_theta(current_data)[0]
# Create the bootstrap replicates
num_test_reps = 5000
test_indices = np.arange(self.num_test_obs)
boot_indx_shape = (num_test_reps, self.num_test_obs)
np.random.seed(8292017)
boot_indices =\
np.random.choice(test_indices,
replace=True,
size=self.num_test_obs*num_test_reps)
self.bootstrap_replicates =\
np.fromiter((self.calc_theta(self.test_data[x])[0] for x in
boot_indices.reshape(boot_indx_shape)),
dtype=float)[:, None]
self.rows_to_obs = eye(self.test_data.size, format='csr', dtype=int)
# Create a fake model object and a fake model class that will implement the
# T(P) function through it's fit_mle method.
test_data = self.test_data
fake_rows_to_obs = self.rows_to_obs
calc_theta = self.calc_theta
class FakeModel(object):
def __init__(self):
# Create needed attributes to successfully mock an MNDC_Model
#instance in this test
self.data = pd.Series([pos for pos, x in enumerate(test_data)])
self.obs_id_col = np.arange(self.data.size, dtype=int)
needed_names = ['ind_var_names', 'intercept_names',
'shape_names', 'nest_names']
for name in needed_names:
setattr(self, name, None)
self.ind_var_names = ['variance']
# Create a get_mappings_for_fit function that will allow for
# successful mocking in this test
def get_mappings_for_fit(self):
return {"rows_to_obs": fake_rows_to_obs}
# Use the T(P) function from the spatial test data example.
def fit_mle(self,
init_vals,
weights=None,
**kwargs):
return {'x': calc_theta(test_data, weights=weights)}
self.fake_model_obj = FakeModel()
# Create the bootstrap object
self.boot =\
bc.Boot(self.fake_model_obj,
pd.Series(self.test_theta_hat, index=["variance"]))
self.boot.bootstrap_replicates =\
pd.DataFrame(self.bootstrap_replicates, columns=['variance'])
self.boot.jackknife_replicates =\
pd.DataFrame(self.jackknife_replicates, columns=['variance'])
# Store the confidence percentage that will be used for the test
self.conf_percentage = 90
return None
# Create the function to calculate the objective function.
def calc_theta(self, array, weights=None):
if weights is None:
result = ((array - array.mean())**2).sum() / float(array.size)
else:
a_mean = weights.dot(array)
differences = (array - a_mean)
squared_diffs = differences**2
result = weights.dot(squared_diffs)
return np.array([result])
def test_calc_percentile_interval(self):
# Alias the function being tested
func = self.boot.calc_percentile_interval
# Perform the first test
self.assertIsNone(self.boot.percentile_interval)
# Calculate the function result
func(self.conf_percentage)
# Note the expected result is from Table 14.2 on page 183 of
# Efron & Tibshirani (1994)
expected_result = np.array([100.8, 233.9])
expected_columns = ['5%', '95%']
# Perform the remaining tests
self.assertIsInstance(self.boot.percentile_interval, pd.DataFrame)
self.assertEqual(expected_columns,
self.boot.percentile_interval.columns.tolist())
self.assertIn("variance", self.boot.percentile_interval.index)
self.assertEqual(self.boot.percentile_interval.shape, (1, 2))
npt.assert_allclose(self.boot.percentile_interval.iloc[0, :],
expected_result, rtol=0.02)
# Set the percentile interval back to none.
self.boot.percentile_interval = None
self.assertIsNone(self.boot.percentile_interval)
return None
def test_calc_bca_interval(self):
# Alias the function being tested
func = self.boot.calc_bca_interval
# Perform the first test
self.assertIsNone(self.boot.bca_interval)
# Calculate the function result
func(self.conf_percentage)
# Note the expected result is from Table 14.2 on page 183 of
# Efron & Tibshirani (1994)
expected_result = np.array([115.8, 259.6])
expected_columns = ['5%', '95%']
# Perform the remaining tests
self.assertIsInstance(self.boot.bca_interval, pd.DataFrame)
self.assertEqual(expected_columns,
self.boot.bca_interval.columns.tolist())
self.assertIn("variance", self.boot.bca_interval.index)
self.assertEqual(self.boot.bca_interval.shape, (1, 2))
npt.assert_allclose(self.boot.bca_interval.iloc[0, :],
expected_result, rtol=0.01)
# Set the percentile interval back to none.
self.boot.bca_interval = None
self.assertIsNone(self.boot.bca_interval)
return None
def test_calc_abc_interval(self):
# Alias the function being tested
func = self.boot.calc_abc_interval
# Perform the first test
self.assertIsNone(self.boot.abc_interval)
# Calculate the function result
func(self.conf_percentage, self.test_theta_hat, epsilon=0.001)
# Note the expected result, from Table 14.2 on page 183 of
# Efron & Tibshirani (1994)
expected_result = np.array([116.7, 260.9])
expected_columns = ['5%', '95%']
# Perform the remaining tests
self.assertIsInstance(self.boot.abc_interval, pd.DataFrame)
self.assertEqual(expected_columns,
self.boot.abc_interval.columns.tolist())
self.assertIn("variance", self.boot.abc_interval.index)
self.assertEqual(self.boot.abc_interval.shape, (1, 2))
npt.assert_allclose(self.boot.abc_interval.iloc[0, :],
expected_result, rtol=0.01)
# Set the percentile interval back to none.
self.boot.abc_interval = None
self.assertIsNone(self.boot.abc_interval)
return None
def test_calc_conf_intervals_except_all(self):
kwargs = {"init_vals": self.test_theta_hat,
"epsilon": 0.001}
# Alias the function being tested
func = self.boot.calc_conf_intervals
# Create the list of attributes to be tested
tested_attrs = ['percentile_interval', 'bca_interval', 'abc_interval']
interval_types = ['pi', 'bca', 'abc']
# Note the expected result, from Table 14.2 on page 183 of
# Efron & Tibshirani (1994)
expected_result =\
np.array([[100.8, 233.9], [115.8, 259.6], [116.7, 260.9]])
expected_columns = ['5%', '95%']
# Perform the desired tests
for pos, i_type in enumerate(interval_types):
desired_attr = getattr(self.boot, tested_attrs[pos])
self.assertIsNone(desired_attr)
# Calculate the function result
kwargs['interval_type'] = i_type
func(self.conf_percentage, **kwargs)
# Perform the remaining tests
desired_attr = getattr(self.boot, tested_attrs[pos])
self.assertIsInstance(desired_attr, pd.DataFrame)
self.assertEqual(expected_columns,
desired_attr.columns.tolist())
self.assertIn("variance", desired_attr.index)
self.assertEqual(desired_attr.shape, (1, 2))
npt.assert_allclose(desired_attr.iloc[0, :],
expected_result[pos], rtol=0.02)
# Perform clean-up activities after the test
setattr(self.boot, tested_attrs[pos], None)
return None
def test_calc_conf_intervals_all(self):
kwargs = {"interval_type": 'all',
"init_vals": self.test_theta_hat,
"epsilon": 0.001}
# Alias the function being tested
func = self.boot.calc_conf_intervals
# Create the list of attributes to be tested
tested_attrs = ['percentile_interval', 'bca_interval', 'abc_interval']
# Note the expected result, from Table 14.2 on page 183 of
# Efron & Tibshirani (1994)
expected_result =\
np.array([[100.8, 233.9], [115.8, 259.6], [116.7, 260.9]])
# Note the expected MultiIndex columns
expected_columns_all = [("percentile_interval", "5%"),
("percentile_interval", "95%"),
("BCa_interval", "5%"),
("BCa_interval", "95%"),
("ABC_interval", "5%"),
("ABC_interval", "95%")]
expected_columns_single = ["5%", "95%"]
# Perform the expected tests before running the function
for attr in tested_attrs:
self.assertIsNone(getattr(self.boot, attr))
# Calculate the function results
func(self.conf_percentage, **kwargs)
# Perform the remaining tests
for pos, attr in enumerate(tested_attrs):
desired_attr = getattr(self.boot, attr)
self.assertEqual(expected_columns_single,
desired_attr.columns.tolist())
self.assertIn("variance", desired_attr.index)
self.assertEqual(desired_attr.shape, (1, 2))
npt.assert_allclose(desired_attr.iloc[0, :],
expected_result[pos], rtol=0.02)
# Test the 'all_intervals' attribute.
self.assertIsInstance(self.boot.all_intervals, pd.DataFrame)
self.assertEqual(expected_columns_all,
self.boot.all_intervals.columns.tolist())
self.assertIn("variance", self.boot.all_intervals.index)
self.assertEqual(self.boot.all_intervals.shape, (1, 6))
npt.assert_allclose(self.boot.all_intervals.values,
expected_result.reshape((1, 6)), rtol=0.02)
# Set the various intervals back to None.
for attr in tested_attrs + ['all_intervals']:
setattr(self.boot, attr, None)
self.assertIsNone(getattr(self.boot, attr))
return None
def test_interval_type_error_in_calc_conf_intervals(self):
# Alias the function being tested
func = self.boot.calc_conf_intervals
# Create kwargs for the function to be tested
kwargs = {"interval_type": 'bad_type',
"init_vals": self.test_theta_hat,
"epsilon": 0.001}
# Note the expected error message.
expected_error_msg =\
"interval_type MUST be in `\['pi', 'bca', 'abc', 'all'\]`"
# Ensure that the appropriate errors are raised.
self.assertRaisesRegexp(ValueError,
expected_error_msg,
func,
self.conf_percentage,
**kwargs)
return None
class AnalysisTests(unittest.TestCase):
def make_mnl_model(self):
# The set up being used is one where there are two choice situations,
# The first having three alternatives, and the second having only two
# alternatives. There is one generic variable. Two alternative
# specific constants and all three shape parameters are used.
# Create the betas to be used during the tests
fake_betas = np.array([-0.6])
# Create the fake design matrix with columns denoting X
# The intercepts are not included because they are kept outside the
# index in the scobit model.
fake_design = np.array([[1],
[2],
[3],
[1.5],
[3.5]])
# Create the index array for this set of choice situations
fake_index = fake_design.dot(fake_betas)
# Create the needed dataframe for the model constructor
fake_df = pd.DataFrame({"obs_id": [1, 1, 1, 2, 2],
"alt_id": [1, 2, 3, 1, 3],
"choice": [0, 1, 0, 0, 1],
"x": fake_design[:, 0]})
# Record the various column names
alt_id_col = "alt_id"
obs_id_col = "obs_id"
choice_col = "choice"
# Create the index specification and name dictionaryfor the model
mnl_spec = OrderedDict()
mnl_names = OrderedDict()
mnl_spec["x"] = [[1, 2, 3]]
mnl_names["x"] = ["x (generic coefficient)"]
# Bundle args and kwargs used to construct the Asymmetric Logit model.
mnl_args = [fake_df, alt_id_col, obs_id_col, choice_col, mnl_spec]
# Create a variable for the kwargs being passed to the constructor
mnl_kwargs = {"names": mnl_names}
# Initialize a basic choice model.
mnl_obj = MNL(*mnl_args, **mnl_kwargs)
# Create the desired model attributes for the clog log model
mnl_obj.coefs = pd.Series(fake_betas, index=mnl_names["x"])
mnl_obj.intercepts = None
mnl_obj.shapes = None
mnl_obj.nests = None
mnl_obj.params = mnl_obj.coefs.copy()
return mnl_obj
def make_asym_model(self):
# The set up being used is one where there are two choice situations,
# The first having three alternatives, and the second having only two
# alternatives. There is one generic variable. Two alternative
# specific constants and all three shape parameters are used.
# Create the betas to be used during the tests
fake_betas = np.array([-0.6])
# Create the fake outside intercepts to be used during the tests
fake_intercepts = np.array([1, 0.5])
# Create names for the intercept parameters
fake_intercept_names = ["ASC 1", "ASC 2"]
# Record the position of the intercept that is not being estimated
fake_intercept_ref_pos = 2
# Create the shape parameters to be used during the tests. Note that
# these are the reparameterized shape parameters, thus they will be
# exponentiated in the fit_mle process and various calculations.
fake_shapes = np.array([-1, 1])
# Create names for the intercept parameters
fake_shape_names = ["Shape 1", "Shape 2"]
# Record the position of the shape parameter that is being constrained
fake_shape_ref_pos = 2
# Calculate the 'natural' shape parameters
natural_shapes = asym._convert_eta_to_c(fake_shapes,
fake_shape_ref_pos)
# Create an array of all model parameters
fake_all_params = np.concatenate((fake_shapes,
fake_intercepts,
fake_betas))
# The mapping between rows and alternatives is given below.
fake_rows_to_alts = csr_matrix(np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[0, 0, 1]]))
# Get the mappping between rows and observations
fake_rows_to_obs = csr_matrix(np.array([[1, 0],
[1, 0],
[1, 0],
[0, 1],
[0, 1]]))
# Create the fake design matrix with columns denoting X
# The intercepts are not included because they are kept outside the
# index in the scobit model.
fake_design = np.array([[1],
[2],
[3],
[1.5],
[3.5]])
# Create the index array for this set of choice situations
fake_index = fake_design.dot(fake_betas)
# Create the needed dataframe for the Asymmetric Logit constructor
fake_df = pd.DataFrame({"obs_id": [1, 1, 1, 2, 2],
"alt_id": [1, 2, 3, 1, 3],
"choice": [0, 1, 0, 0, 1],
"x": fake_design[:, 0],
"intercept": [1 for i in range(5)]})
# Record the various column names
alt_id_col = "alt_id"
obs_id_col = "obs_id"
choice_col = "choice"
# Create the index specification and name dictionaryfor the model
fake_specification = OrderedDict()
fake_names = OrderedDict()
fake_specification["x"] = [[1, 2, 3]]
fake_names["x"] = ["x (generic coefficient)"]
# Bundle args and kwargs used to construct the Asymmetric Logit model.
constructor_args = [fake_df,
alt_id_col,
obs_id_col,
choice_col,
fake_specification]
# Create a variable for the kwargs being passed to the constructor
constructor_kwargs = {"intercept_ref_pos": fake_intercept_ref_pos,
"shape_ref_pos": fake_shape_ref_pos,
"names": fake_names,
"intercept_names": fake_intercept_names,
"shape_names": fake_shape_names}
# Initialize a basic Asymmetric Logit model whose coefficients will be
# estimated.
model_obj = asym.MNAL(*constructor_args, **constructor_kwargs)
# Get the fitted probabilities for this model and dataset
# Note this relies on the calc_probabilities function being functional.
# args = [fake_betas,
# fake_design,
# fake_df[alt_id_col].values,
# fake_rows_to_obs,
# fake_rows_to_alts,
# model_obj.utility_transform]
# kwargs = {"intercept_params": fake_intercepts,
# "shape_params": fake_shapes,
# "return_long_probs": True}
# model_obj.prob_array =\
# choice_calcs.calc_probabilities(*args, **kwargs)
model_obj.coefs = pd.Series(fake_betas, index=fake_names["x"])
model_obj.intercepts =\
pd.Series(fake_intercepts, index=fake_intercept_names)
model_obj.shapes = pd.Series(fake_shapes, index=fake_shape_names)
model_obj.nests = None
model_obj.params =\
pd.concat([model_obj.shapes,
model_obj.intercepts,
model_obj.coefs],
axis=0, ignore_index=False)
return model_obj
def make_mixed_model(self):
# Fake random draws where Row 1 is for observation 1 and row 2 is
# for observation 2. Column 1 is for draw 1 and column 2 is for draw 2
fake_draws = mlc.get_normal_draws(2, 2, 1, seed=1)[0]
# Create the betas to be used during the tests
fake_betas = np.array([0.3, -0.6, 0.2])
fake_std = 1
fake_betas_ext = np.concatenate((fake_betas,
np.array([fake_std])),
axis=0)
# Create the fake design matrix with columns denoting ASC_1, ASC_2, X
fake_design = np.array([[1, 0, 1],
[0, 1, 2],
[0, 0, 3],
[1, 0, 1.5],
[0, 1, 2.5],
[0, 0, 3.5],
[1, 0, 0.5],
[0, 1, 1.0],
[0, 0, 1.5]])
# Record what positions in the design matrix are being mixed over
mixing_pos = [2]
# Create the arrays that specify the choice situation, individual id
# and alternative ids
situation_ids = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3])
individual_ids = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2])
alternative_ids = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3])
# Create a fake array of choices
choice_array = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0])
# Create the 'rows_to_mixers' sparse array for this dataset
# Denote the rows that correspond to observation 1 and observation 2
obs_1_rows = np.ones(fake_design.shape[0])
# Make sure the rows for observation 2 are given a zero in obs_1_rows
obs_1_rows[-3:] = 0
obs_2_rows = 1 - obs_1_rows
# Create the row_to_mixers scipy.sparse matrix
fake_rows_to_mixers = csr_matrix(obs_1_rows[:, None] ==
np.array([1, 0])[None, :])
# Create the rows_to_obs scipy.sparse matrix
fake_rows_to_obs = csr_matrix(situation_ids[:, None] ==
np.arange(1, 4)[None, :])
# Create the rows_to_alts scipy.sparse matrix
fake_rows_to_alts = csr_matrix(alternative_ids[:, None] ==
np.arange(1, 4)[None, :])
# Create the design matrix that we should see for draw 1 and draw 2
arrays_to_join = (fake_design.copy(),
fake_design.copy()[:, -1][:, None])
fake_design_draw_1 = np.concatenate(arrays_to_join, axis=1)
fake_design_draw_2 = fake_design_draw_1.copy()
# Multiply the 'random' coefficient draws by the corresponding variable
fake_design_draw_1[:, -1] *= (obs_1_rows *
fake_draws[0, 0] +
obs_2_rows *
fake_draws[1, 0])
fake_design_draw_2[:, -1] *= (obs_1_rows *
fake_draws[0, 1] +
obs_2_rows *
fake_draws[1, 1])
extended_design_draw_1 = fake_design_draw_1[:, None, :]
extended_design_draw_2 = fake_design_draw_2[:, None, :]
fake_design_3d = np.concatenate((extended_design_draw_1,
extended_design_draw_2),
axis=1)
# Create the fake systematic utility values
sys_utilities_draw_1 = fake_design_draw_1.dot(fake_betas_ext)
sys_utilities_draw_2 = fake_design_draw_2.dot(fake_betas_ext)
#####
# Calculate the probabilities of each alternatve in each choice
# situation
#####
long_exp_draw_1 = np.exp(sys_utilities_draw_1)
long_exp_draw_2 = np.exp(sys_utilities_draw_2)
ind_exp_sums_draw_1 = fake_rows_to_obs.T.dot(long_exp_draw_1)
ind_exp_sums_draw_2 = fake_rows_to_obs.T.dot(long_exp_draw_2)
long_exp_sum_draw_1 = fake_rows_to_obs.dot(ind_exp_sums_draw_1)
long_exp_sum_draw_2 = fake_rows_to_obs.dot(ind_exp_sums_draw_2)
long_probs_draw_1 = long_exp_draw_1 / long_exp_sum_draw_1
long_probs_draw_2 = long_exp_draw_2 / long_exp_sum_draw_2
prob_array = np.concatenate((long_probs_draw_1[:, None],
long_probs_draw_2[:, None]),
axis=1)
###########
# Create a mixed logit object for later use.
##########
# Create a fake old long format dataframe for mixed logit model object
alt_id_column = "alt_id"
situation_id_column = "situation_id"
obs_id_column = "observation_id"
choice_column = "choice"
data = {"x": fake_design[:, 2],
alt_id_column: alternative_ids,
situation_id_column: situation_ids,
obs_id_column: individual_ids,
choice_column: choice_array}
fake_old_df = pd.DataFrame(data)
fake_old_df["intercept"] = 1
# Create a fake specification
fake_spec = OrderedDict()
fake_names = OrderedDict()
fake_spec["intercept"] = [1, 2]
fake_names["intercept"] = ["ASC 1", "ASC 2"]
fake_spec["x"] = [[1, 2, 3]]
fake_names["x"] = ["beta_x"]
# Specify the mixing variable
fake_mixing_vars = ["beta_x"]
# Create a fake version of a mixed logit model object
args = [fake_old_df,
alt_id_column,
situation_id_column,
choice_column,
fake_spec]
kwargs = {"names": fake_names,
"mixing_id_col": obs_id_column,
"mixing_vars": fake_mixing_vars}
mixl_obj = mixed_logit.MixedLogit(*args, **kwargs)
# Set all the necessary attributes for prediction:
# design_3d, coefs, intercepts, shapes, nests, mixing_pos
mixl_obj.design_3d = fake_design_3d
mixl_obj.ind_var_names += ["Sigma X"]
mixl_obj.coefs =\
pd.Series(fake_betas_ext, index=mixl_obj.ind_var_names)
mixl_obj.intercepts = None
mixl_obj.shapes = None
mixl_obj.nests = None
mixl_obj.params = mixl_obj.coefs.copy()
return mixl_obj
def make_nested_model(self):
# Create the betas to be used during the tests
fake_betas = np.array([0.3, -0.6, 0.2])
# Create the fake nest coefficients to be used during the tests
# Note that these are the 'natural' nest coefficients, i.e. the
# inverse of the scale parameters for each nest. They should be bigger
# than or equal to 1.
natural_nest_coefs = np.array([1 - 1e-16, 0.5])
# Create an array of all model parameters
fake_all_params = np.concatenate((natural_nest_coefs,
fake_betas))
# The set up being used is one where there are two choice situations,
# The first having three alternatives, and the second having only two.
# The nest memberships of these alternatives are given below.
fake_rows_to_nests = csr_matrix(np.array([[1, 0],
[1, 0],
[0, 1],
[1, 0],
[0, 1]]))
# Create a sparse matrix that maps the rows of the design matrix to the
# observatins
fake_rows_to_obs = csr_matrix(np.array([[1, 0],
[1, 0],
[1, 0],
[0, 1],
[0, 1]]))
# Create the fake design matrix with columns denoting ASC_1, ASC_2, X
fake_design = np.array([[1, 0, 1],
[0, 1, 2],
[0, 0, 3],
[1, 0, 1.5],
[0, 0, 3.5]])
# Create fake versions of the needed arguments for the MNL constructor
fake_df = pd.DataFrame({"obs_id": [1, 1, 1, 2, 2],
"alt_id": [1, 2, 3, 1, 3],
"choice": [0, 1, 0, 0, 1],
"x": range(5),
"intercept": [1 for i in range(5)]})
# Record the various column names
alt_id_col = "alt_id"
obs_id_col = "obs_id"
choice_col = "choice"
# Store the choice array
choice_array = fake_df[choice_col].values
# Create a sparse matrix that maps the chosen rows of the design
# matrix to the observatins
fake_chosen_rows_to_obs = csr_matrix(np.array([[0, 0],
[1, 0],
[0, 0],
[0, 0],
[0, 1]]))
# Create the index specification and name dictionaryfor the model
fake_specification = OrderedDict()
fake_specification["intercept"] = [1, 2]
fake_specification["x"] = [[1, 2, 3]]
fake_names = OrderedDict()
fake_names["intercept"] = ["ASC 1", "ASC 2"]
fake_names["x"] = ["x (generic coefficient)"]
# Create the nesting specification
fake_nest_spec = OrderedDict()
fake_nest_spec["Nest 1"] = [1, 2]
fake_nest_spec["Nest 2"] = [3]
# Create a nested logit object
args = [fake_df,
alt_id_col,
obs_id_col,
choice_col,
fake_specification]
kwargs = {"names": fake_names,
"nest_spec": fake_nest_spec}
model_obj = nested_logit.NestedLogit(*args, **kwargs)
model_obj.coefs = pd.Series(fake_betas, index=model_obj.ind_var_names)
model_obj.intercepts = None
model_obj.shapes = None
def logit(x):
return np.log(x / (1 - x))
model_obj.nests =\
pd.Series(logit(natural_nest_coefs), index=fake_nest_spec.keys())
model_obj.params =\
pd.concat([model_obj.nests, model_obj.coefs],
axis=0, ignore_index=False)
# Store a ridge parameter
# ridge = 0.5
# Gather the arguments needed for the calc_nested_probs function
# args = [natural_nest_coefs,
# fake_betas,
# model_obj.design,
# fake_rows_to_obs,
# fake_rows_to_nests]
# kwargs = {"return_type": "long_probs"}
# model_obj.prob_array = nlc.calc_nested_probs(*args, **kwargs)
return model_obj
def setUp(self):
"""
Create the real model objects.
"""
self.mnl_model = self.make_mnl_model()
self.asym_model = self.make_asym_model()
self.mixed_model = self.make_mixed_model()
self.nested_model = self.make_nested_model()
return None
def test_calc_log_likes_for_replicates(self):
# Create the keyword arguments needed for the test.
kwargs = {'num_draws': 10, 'seed': 932017}
# Note the objects that are to be tested
model_objects = [self.mnl_model,
self.asym_model,
self.mixed_model,
self.nested_model]
# Iterate over the Asym, MNL, Mixed, and Nested models.
for model_obj in model_objects:
# Create the bootstrap object based on the model object.
boot = bc.Boot(model_obj, model_obj.params.values)
# Create the bootstrap and jackknife replicate attributes.
replicates =\
pd.DataFrame(np.concatenate([model_obj.params.values[None, :],
model_obj.params.values[None, :]],
axis=0))
boot.bootstrap_replicates = replicates
boot.jackknife_replicates = replicates
# Alias the function being tested.
func = boot.calc_log_likes_for_replicates
for replicate_type in ['bootstrap', 'jackknife']:
# Calculate function results using each bootstrap object
kwargs["replicates"] = replicate_type
func_result = func(**kwargs)
# Ensure function results have the expected properties
self.assertIsInstance(func_result, np.ndarray)
self.assertEqual(func_result.ndim, 1)
self.assertEqual(func_result.shape, (replicates.shape[0],))
return None
def test_calc_gradient_norm_for_replicates(self):
# Create the bootstrap object based on the MNL model.
base_array = self.mnl_model.params.values
base_array_2d = base_array[None, :]
boot = bc.Boot(self.mnl_model, base_array)
# Create the bootstrap and jackknife replicate attributes.
replicates = pd.DataFrame(np.concatenate((base_array_2d,
base_array_2d + 1,
base_array_2d - 1),
axis=0))
boot.bootstrap_replicates = replicates
boot.jackknife_replicates = replicates
# Alias the function being tested.
func = boot.calc_gradient_norm_for_replicates
# Perform the desired tests.
for replicate_type in ['bootstrap', 'jackknife']:
func_result = func(replicates=replicate_type)
self.assertIsInstance(func_result, np.ndarray)
self.assertEqual(func_result.shape, (replicates.shape[0],))
self.assertTrue(np.unique(func_result).size == func_result.size)
return None
|
py | 1a4c73889ca2e61ddd56a54813e16052baae97aa | __author__ = "Nitin Kumar, Rick Sherman"
__credits__ = "Jeremy Schulman"
import unittest
from nose.plugins.attrib import attr
from jnpr.junos.jxml import NAME, INSERT, remove_namespaces
@attr('unit')
class Test_JXML(unittest.TestCase):
def test_name(self):
op = NAME('test')
self.assertEqual(op['name'], 'test')
def test_insert(self):
op = INSERT('test')
self.assertEqual(op['insert'], 'test')
def test_remove_namespaces(self):
xmldata = \
"""<xsl:stylesheet xmlns:xsl="http://xml.juniper.net/junos">
<xsl:template>
<xsl:attribute name="{myname}">
</xsl:attribute>
</xsl:template>
</xsl:stylesheet>"""
import xml.etree.ElementTree as ET
root = ET.fromstring(xmldata)
test = remove_namespaces(root)
for elem in test.getiterator():
i = elem.tag.find('}')
if i > 0:
i = i + 1
self.assertTrue(i <= 0)
|
py | 1a4c742765c7d0d5d33062911c35dc023598ed74 | # -*- coding: utf-8 -*-
import asyncio
import collections
import functools
import json
import time
from typing import List, Optional
from threading import Thread
from vk_api import VkApi
from vk_api.bot_longpoll import VkBotEventType, VkBotLongPoll
from vk_api.execute import VkFunction
from vk_api.upload import VkUpload
from vk_api.utils import get_random_id
API_VERSION = '5.130'
vk_execute = VkFunction(
args=('methods',),
clean_args=('methods',),
code='''
%(methods)s;
return 1;
''')
def threaded(fn):
def wrapper(*args, **kwargs):
Thread(target=fn, args=args, kwargs=kwargs, daemon=True).start()
return wrapper
class VKMessage:
__slots__ = ('id', 'peer_id', 'user_id', 'text', 'payload', 'reply')
def __init__(self, raw: dict, vk: 'VK') -> None:
self.id = raw['id']
self.peer_id = raw['peer_id']
self.user_id = raw['from_id']
self.text = raw['text']
self.payload = json.loads(raw['payload']) if 'payload' in raw else None
self.reply = functools.partial(vk.send, self.peer_id)
class VK:
__slots__ = ('vk', 'logger', 'event_queue', 'msg_queue', 'user_cache', 'group_id')
def __init__(self, token: str, logger) -> None:
self.vk = VkApi(token=token, api_version=API_VERSION)
self.logger = logger
self.event_queue = collections.deque()
self.msg_queue = []
self.user_cache = {}
self.group_id = self.method('groups.getById')[0]['id']
self.init_group_settings()
def method(self, method: str, args: dict = None) -> dict:
return self.vk.method(method, args)
def send(self, peer_id: int, message: str, keyboard=None, attach=None, sticker=None, disable_mentions=True) -> None:
if 4000 < len(message) < 100000 and (not attach) and (not sticker):
for message_part in [message[j:j + 4000] for j in range(0, len(message), 4000)]:
self.msg_queue.append({'peer_id': peer_id, 'message': message_part, 'random_id': get_random_id(), 'disable_mentions': disable_mentions,
'keyboard': keyboard})
else:
self.msg_queue.append({'peer_id': peer_id, 'message': message, 'random_id': get_random_id(), 'disable_mentions': disable_mentions,
'keyboard': keyboard, 'attachment': attach, 'sticker_id': sticker})
def send_multiple(self, peer_ids: List[int], message: str, keyboard=None, disable_mentions=True) -> None:
self.msg_queue.append({'peer_ids': peer_ids, 'message': message, 'random_id': get_random_id(), 'disable_mentions': disable_mentions,
'keyboard': keyboard})
def get_user_link(self, target_id: int, name_case: str = 'nom') -> str:
if target_id not in self.user_cache and target_id != 0:
if target_id < 0:
self.user_cache[target_id] = self.method('groups.getById', {'group_id': -target_id})[0]
else:
self.user_cache[target_id] = self.method('users.get', {'user_ids': target_id, 'name_case': name_case})[0]
if target_id < 0:
return ''.join(['[id', str(target_id), '|', self.user_cache[target_id]['first_name'], ']'])
elif target_id == 0:
return '@id0'
else:
self.user_cache[target_id] = self.method('users.get', {'user_ids': target_id, 'name_case': name_case})[0]
return f"[id{target_id}|{self.user_cache[target_id]['first_name']}]"
def get_user_links(self, target_ids: List[int]) -> dict:
cached = True
for i in target_ids:
if i not in self.user_cache:
cached = False
break
if not cached:
for i in self.method('users.get', {'user_ids': ','.join(list(map(str, target_ids)))}):
self.user_cache[i['id']] = i
return {i: f"[id{i}|{self.user_cache[i]['first_name']}]" for i in target_ids}
def get_target_id(self, s: str) -> Optional[int]:
r = s.replace('https://', '').replace('vk.com/', '').replace('@id', '').replace('@', '').replace('[', '').replace(']', '')
if '|' in r:
r = r.split('|')[0]
if not r.isdecimal():
r = self.method('utils.resolveScreenName', {'screen_name': r.replace('-', 'club')})
if not r:
return
if r['type'] == 'user':
r = r['object_id']
elif r['type'] == 'group':
r = -r['object_id']
return int(r)
def is_chat_member(self, peer_id: int, user_id: int) -> bool:
members = self.method('messages.getConversationMembers', {'peer_id': peer_id})['items']
for i in members:
if i['member_id'] == user_id:
return True
def is_chat_admin(self, peer_id: int, user_id: int, check_if_owner: bool = False) -> bool:
members = self.method('messages.getConversationMembers', {'peer_id': peer_id})['items']
for i in members:
if i['member_id'] == user_id and 'is_admin' in i and i['is_admin'] and ((not check_if_owner) or ('is_owner' in i and i['is_owner'])):
return True
def get_chat_owner(self, peer_id: int) -> Optional[int]:
members = self.method('messages.getConversationMembers', {'peer_id': peer_id})['items']
for i in members:
if 'is_owner' in i and i['is_owner']:
return i['member_id']
def get_upload(self) -> VkUpload:
return VkUpload(self.vk)
def init_group_settings(self) -> None:
self.method('groups.setSettings', {
'group_id': self.group_id,
'messages': 1,
'bots_capabilities': 1,
'bots_start_button': 1,
'bots_add_to_chat': 1,
})
self.method('groups.setLongPollSettings', {
'group_id': self.group_id,
'enabled': 1,
'api_version': API_VERSION,
'message_new': 1,
})
async def messages_sender(self) -> None:
while True:
queue = self.msg_queue[:25]
if queue:
self.msg_queue = self.msg_queue[25:]
try:
vk_execute(self.vk, ''.join(('API.messages.send(' + json.dumps(i, ensure_ascii=False, separators=(',', ':')) + ');') for i in queue))
except Exception as ex:
self.logger.warning('Произошла ошибка при отправке сообщений', exc_info=ex)
await asyncio.sleep(0.05)
@threaded
def event_handler(self) -> None:
convs = self.method('messages.getConversations', {'count': 200, 'filter': 'unanswered'})['items']
for i in convs:
self.event_queue.append(VKMessage(i['last_message'], self))
lp = VkBotLongPoll(self.vk, self.group_id)
while True:
try:
for event in lp.check():
if event.type == VkBotEventType.MESSAGE_NEW:
self.event_queue.append(VKMessage(event.raw['object']['message'], self))
else:
self.event_queue.append(event)
except Exception as ex:
self.logger.warning('Произошла ошибка в LongPoll', exc_info=ex)
time.sleep(3)
|
py | 1a4c76728e9fe4d13e4df62ffeaa9b7b5fc82d51 | from .chatgetter import ChatGetter
from .sendergetter import SenderGetter
from ..._misc import utils, helpers
class Forward(ChatGetter, SenderGetter):
"""
Custom class that encapsulates a :tl:`MessageFwdHeader` providing an
abstraction to easily access information like the original sender.
Remember that this class implements `ChatGetter
<telethon.tl.custom.chatgetter.ChatGetter>` and `SenderGetter
<telethon.tl.custom.sendergetter.SenderGetter>` which means you
have access to all their sender and chat properties and methods.
Attributes:
original_fwd (:tl:`MessageFwdHeader`):
The original :tl:`MessageFwdHeader` instance.
Any other attribute:
Attributes not described here are the same as those available
in the original :tl:`MessageFwdHeader`.
"""
def __init__(self, client, original, entities):
# Copy all the fields, not reference! It would cause memory cycles:
# self.original_fwd.original_fwd.original_fwd.original_fwd
# ...would be valid if we referenced.
for slot in original.__slots__:
setattr(self, slot, getattr(original, slot))
self.original_fwd = original
sender_id = sender = input_sender = peer = chat = input_chat = None
if original.from_id:
ty = helpers._entity_type(original.from_id)
if ty == helpers._EntityType.USER:
sender_id = utils.get_peer_id(original.from_id)
sender, input_sender = utils._get_entity_pair(sender_id, entities)
elif ty in (helpers._EntityType.CHAT, helpers._EntityType.CHANNEL):
peer = original.from_id
chat, input_chat = utils._get_entity_pair(utils.get_peer_id(peer), entities)
# This call resets the client
ChatGetter.__init__(self, peer, chat=chat, input_chat=input_chat)
SenderGetter.__init__(self, sender_id, sender=sender, input_sender=input_sender)
self._client = client
# TODO We could reload the message
|
py | 1a4c78a357eb552887be40c1581659c424555c18 | from tensorflow.keras.applications.vgg16 import VGG16, preprocess_input
import tensorflow as tf
from tensorflow.keras import layers, regularizers
def rpn(feature_map, anchors_per_location=9):
shared = layers.Conv2D(512, (3, 3), padding='same', activation='relu', name='rpn_conv_shared')(feature_map)
# Anchor class (foreground, background)
# [batch, height, width, anchors_per_location * 2]
x = layers.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',
activation='linear', name='rpn_class_raw')(shared)
# Reshape to [batch, anchors, 2]
x = layers.Reshape((-1, 2))(x)
rpn_class = layers.Activation('softmax', name='rpn_class_probs')(x)
# Bounding box refinement
# [batch, height, width, anchors_per_location * (x, y, log(w), log(h))]
x = layers.Conv2D(4 * anchors_per_location, (1, 1), padding='valid',
activation='linear', name='rpn_bbox_pred')(shared)
# Reshape to [batch, anchors, 4]
rpn_bbox = layers.Reshape((-1, 4))(x)
return rpn_class, rpn_bbox
def get_anchors():
anchors=[]
scales=(8,16,32)
ratios=(0.5,1,2)
# for
def build_model():
inputs = layers.Input(shape=(None, None, 3)) # default shape is 224*224*3
x = preprocess_input(inputs)
backbone = VGG16(weights='imagenet', include_top=False)
feature_map = backbone(x)
rpn_class, rpn_bbox = rpn(feature_map)
anchors=get_anchors()
|
py | 1a4c79d3bbbb99481b29978c8d4303866d646488 | '''
-----------------------------------
### Acknowledgements
Copied directly from the [dask tutorial](https://github.com/dask/dask-tutorial) that has the following copyright:
Copyright (c) 2017-2018, Anaconda, Inc. and contributors
All rights reserved.
'''
flights_url = "https://storage.googleapis.com/dask-tutorial-data/nycflights.tar.gz"
lazy_url = "http://www.google.com"
bag_url = "s3://dask-data/nyc-taxi/2015/yellow_tripdata_2015-01.csv"
|
py | 1a4c7aa56c2368b09550486b474516297cb17453 | import galsim
from scipy.stats import truncnorm
def generate_galaxy(psf = moffat_psf(0.01,-0.02),**kwargs):
"random galaxy generator"
defaults = {'re_mean' : 3.0,
're_scatter' : 0.1,
'g_range' : [-.6,-6],
'g_rms' : 0.3,
'flux' : 1,
'pixel_scale' : 0.2,
'stamp_size' : 50,
'method' : "no_pixel",
'interpolator' : "linear"}
defaults.update(kwargs)
g1 = truncnorm.rvs(-.6, .6, loc=0, scale=0.2)
g2 = truncnorm.rvs(-.6, .6, loc=0, scale=0.2)
re = truncnorm.rvs(.5, 5, loc=3, scale=0.2)
gal = galsim.Exponential(flux=defaults['flux'] ,
half_light_radius=re)
gal = gal.shear(g1=g1,g2=g2)
gal_image = gal.drawImage(nx=defaults['stamp_size'],
ny=defaults['stamp_size'],
scale=defaults['pixel_scale'],
method=defaults['method'])
return g1, g2, gal_image.array
|
py | 1a4c7c1c0a940f8056f75f68b37aebbe0725b81f | # -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2018, 2019, 2020, 2021 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""REANA Workflow Controller errors."""
class REANAWorkflowNameError(Exception):
"""."""
class REANAWorkflowControllerError(Exception):
"""Error when trying to manage workflows."""
class REANAUploadPathError(Exception):
"""Provided paths contain '../'."""
class REANAWorkflowDeletionError(Exception):
"""Error when trying to delete a workflow."""
class REANAInteractiveSessionError(Exception):
"""Error when trying to create an interactive session."""
class REANAExternalCallError(Exception):
"""Error when connecting to an external service."""
class REANAWorkflowStatusError(Exception):
"""Error when trying to change workflow status."""
class REANAWorkflowStopError(Exception):
"""Error when trying to stop a workflow."""
|
py | 1a4c7c52eeb1655f0693daddda23b80fa8eae8aa | import unittest
from bbscript.stdlib import cmd_var, cmd_doc
from bbscript.errors import InvalidOperation
class TestVariables(unittest.TestCase):
def test_var_get(self):
doc = {"docname": "testdoc"}
ctx = {"$test_var": "test value", "$doc": doc}
self.assertEqual(cmd_var(ctx, "test_var"), "test value")
self.assertEqual(cmd_var(ctx, "doc"), doc)
self.assertEqual(cmd_var(ctx, "doc", "docname"), doc.get("docname"))
def test_doc(self):
doc = {"field1": "value1", "field2": True, "field3": 123}
meta = {
"fields": {
"field1": {
"type": "string"
},
"field2": {
"type": "boolean"
},
"field3": {
"type": "int"
}
}
}
ctx = {
"$doc": doc
}
self.assertEqual(cmd_doc(ctx, "doc", meta), doc)
|
py | 1a4c7c55876530add2f52bb02861b11cbe3cde0b | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes for different algorithms of reduction and broadcasting."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import enum
import six
from tensorflow.python.client import device_lib
from tensorflow.python.distribute import cross_device_utils
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
from tensorflow.tools.docs import doc_controls
def check_destinations(destinations):
"""Checks whether `destinations` is not empty.
Args:
destinations: a `DistributedValues`, variable, or string object.
Returns:
Boolean which is True if `destinations` is not empty.
"""
# Calling bool() on a ResourceVariable is not allowed.
if isinstance(destinations, resource_variable_ops.ResourceVariable):
return bool(destinations.device)
return bool(destinations)
def validate_destinations(destinations):
if not isinstance(destinations,
(value_lib.DistributedValues,
resource_variable_ops.ResourceVariable,
value_lib.AggregatingVariable,
six.string_types,
value_lib.TPUMirroredVariable,
# LogicalDeviceSpec is only used internally, e.g. as a
# broadcast destination, never supplied by a user.
value_lib.LogicalDeviceSpec)):
raise ValueError("destinations must be one of a `DistributedValues` object,"
" a tf.Variable object, or a device string.")
if not check_destinations(destinations):
raise ValueError("destinations can not be empty")
def reduce_non_distributed_value(reduce_op, device_map, value, destinations):
"""Reduce a non-DistributedValue `value` to `destinations`."""
if isinstance(value, value_lib.DistributedValues):
raise ValueError("You are passing a `DistributedValue` to "
"`reduce_non_distributed_value`, which is not allowed.")
# If the same value is present on all replicas then the PerReplica value will
# be a single value. We also handle the case when `value` is a single value
# and equal to 0.
if value == 0:
return 0
# If there is only a single value and the reduce op is MEAN,
# that value should be on all destinations.
if reduce_op == reduce_util.ReduceOp.MEAN:
return value
validate_destinations(destinations)
# We do not support a reduce op of SUM if the value is the same across
# all replicas. We call this as part of assign functions for MirroredVariables
# and summing up identical values across replicas is not clearly defined.
if device_map.num_replicas_in_graph != 1:
raise ValueError("A non-DistributedValues value %s cannot be reduced with "
"the given reduce op %s." % (value, reduce_op))
return simple_broadcast(value, destinations)
def _make_tensor_into_per_replica(input_tensor):
"""Converts a single tensor into a PerReplica object."""
if isinstance(input_tensor, (tuple, list)):
raise ValueError("Cannot convert `input_tensor` to a `PerReplica` object, "
"got %r but expected a object that is not a tuple or list."
% (input_tensor,))
if isinstance(input_tensor, value_lib.PerReplica):
return input_tensor
try:
device = input_tensor.device
except AttributeError:
raise ValueError("Cannot convert `input_tensor` to a `PerReplica` object "
"because it doesn't have device set.")
device_map = value_lib.SingleDeviceMap(device)
return value_lib.PerReplica(device_map, (input_tensor,))
def _normalize_value_destination_pairs(value_destination_pairs):
"""Converts each tensor into a PerReplica object in the input list."""
result = []
value_destination_pairs = list(value_destination_pairs)
if not isinstance(value_destination_pairs, (list, tuple)):
raise ValueError("`value_destination_pairs` should be a list or tuple")
for pair in value_destination_pairs:
if not isinstance(pair, tuple):
raise ValueError(
"Each element of `value_destination_pairs` should be a tuple.")
if len(pair) != 2:
raise ValueError("Each element of `value_destination_pairs` should be a "
"tuple of size 2.")
per_replica = _make_tensor_into_per_replica(pair[0])
result.append((per_replica, pair[1]))
return result
def _validate_value_destination_pairs(value_destination_pairs):
# TODO(yuefengz): raise exceptions instead of returning False.
# pylint: disable=g-missing-docstring
if not value_destination_pairs: return False
if not isinstance(value_destination_pairs, (list, tuple)): return False
if not all(isinstance(pair, tuple) for pair in value_destination_pairs):
return False
if not all(isinstance(v[0], value_lib.PerReplica)
for v in value_destination_pairs):
return False
return True
# TODO(yuefengz): consider calling this function in the caller of
# CrossDeviceOps.
def get_devices_from(destinations):
if isinstance(destinations, value_lib.DistributedValues):
return destinations.devices
elif isinstance(destinations, value_lib.LogicalDeviceSpec):
return destinations.device_map.logical_to_actual_devices(
destinations.logical_device)
elif isinstance(destinations, six.string_types):
return (device_util.resolve(destinations),)
return (destinations.device,)
def get_device_map_from(destinations):
if isinstance(destinations, (value_lib.DistributedValues,
value_lib.LogicalDeviceSpec)):
return destinations.device_map, destinations.logical_device
if isinstance(destinations, six.string_types):
device = device_util.resolve(destinations)
else:
device = destinations.device
return value_lib.SingleDeviceMap(device), 0
def _devices_match(left, right):
return set(get_devices_from(left)) == set(get_devices_from(right))
def _all_devices_match(value_destination_pairs):
if not all(_devices_match(v, d) for v, d in value_destination_pairs):
return False
if not all(_devices_match(v, value_destination_pairs[0][0])
for v, _ in value_destination_pairs[1:]):
return False
return True
def simple_broadcast(value, destinations, always_mirrored=False):
"""Broadcast `value` to `destinations` using simple copies."""
device_map, logical_device = get_device_map_from(destinations)
devices = device_map.logical_to_actual_devices(logical_device)
if len(devices) == 1 and not always_mirrored:
return cross_device_utils.copy_tensor_or_indexed_slices_to_device(
value, devices[0])
else:
value_updates = []
for d in devices:
value_updates.append(
cross_device_utils.copy_tensor_or_indexed_slices_to_device(
value, d))
return value_lib.Mirrored(device_map, value_updates, logical_device)
def _simple_reduce(per_replica_value, reduce_to_device, accumulation_fn,
reduce_op):
# pylint: disable=g-missing-docstring
all_values = per_replica_value.values
if not all_values:
raise ValueError("`per_replica_value` must be non-empty")
count = len(all_values)
with ops.device(reduce_to_device):
with context.device_policy(context.DEVICE_PLACEMENT_SILENT):
reduced = cross_device_utils.aggregate_tensors_or_indexed_slices(
all_values, accumulation_fn)
if reduce_op == reduce_util.ReduceOp.MEAN:
reduced = cross_device_utils.divide_by_n_tensors_or_indexed_slices(
reduced, count)
elif reduce_op != reduce_util.ReduceOp.SUM:
raise ValueError("`reduce_op` must be Reduce.SUM or Reduce.MEAN.")
return reduced
@tf_export("distribute.CrossDeviceOps")
class CrossDeviceOps(object):
"""Base class for cross-device reduction and broadcasting algorithms."""
def __init__(self):
pass
def reduce(self, reduce_op, per_replica_value, destinations):
"""Reduce `per_replica_value` to `destinations`.
It runs the reduction operation defined by `reduce_op` and put the
result on `destinations`.
Args:
reduce_op: Indicates how per_replica_value will be reduced. Accepted
values are `tf.distribute.ReduceOp.SUM`, `tf.distribute.ReduceOp.MEAN`.
per_replica_value: a PerReplica object or a tensor with device set.
destinations: the reduction destinations.
Returns:
a Mirrored object.
Raises:
ValueError: if per_replica_value can't be converted to a PerReplica
object.
"""
if not isinstance(per_replica_value, value_lib.PerReplica):
per_replica_value = _make_tensor_into_per_replica(per_replica_value)
validate_destinations(destinations)
return self.reduce_implementation(reduce_op, per_replica_value,
destinations)
def batch_reduce(self, reduce_op, value_destination_pairs):
"""Reduce PerReplica objects in a batch.
Reduce each first element in `value_destination_pairs` to each second
element which indicates the destinations.
Args:
reduce_op: Indicates how per_replica_value will be reduced. Accepted
values are `tf.distribute.ReduceOp.SUM`, `tf.distribute.ReduceOp.MEAN`.
value_destination_pairs: a list or a tuple of tuples of PerReplica objects
(or tensors with device set if there is one device) and destinations.
Returns:
a list of Mirrored objects.
Raises:
ValueError: if `value_destination_pairs` is not a list or a tuple of
tuples of PerReplica objects and destinations
"""
# TODO(yuefengz): if destinations are different, split into several
# `_batch_reduce` invocations.
if not _validate_value_destination_pairs(value_destination_pairs):
# If the first element of each pair is a tensor, we try to turn it into a
# PerReplica object.
value_destination_pairs = _normalize_value_destination_pairs(
value_destination_pairs)
for _, d in value_destination_pairs:
validate_destinations(d)
return self.batch_reduce_implementation(reduce_op, value_destination_pairs)
def broadcast(self, tensor, destinations):
"""Broadcast the `tensor` to destinations.
Args:
tensor: the tensor to broadcast.
destinations: the broadcast destinations.
Returns:
a Mirrored object.
"""
validate_destinations(destinations)
return self.broadcast_implementation(tensor, destinations)
@doc_controls.for_subclass_implementers
def reduce_implementation(self, reduce_op, per_replica_value, destinations):
"""The implementation of reduce of `per_replica_value` to `destinations`.
It runs the reduction operation defined by `reduce_op` and put the
result on `destinations`.
Args:
reduce_op: Indicates how per_replica_value will be reduced. Accepted
values are `tf.distribute.ReduceOp.SUM`, `tf.distribute.ReduceOp.MEAN`.
per_replica_value: a PerReplica object or a tensor with device set.
destinations: the reduction destinations.
Returns:
a Mirrored object.
Raises:
ValueError: if per_replica_value can't be converted to a PerReplica
object.
"""
raise NotImplementedError(
"_reduce method must be implemented in descendants.")
@doc_controls.for_subclass_implementers
def batch_reduce_implementation(self, reduce_op, value_destination_pairs):
"""Implementation of reduce PerReplica objects in a batch.
Reduce each first element in `value_destination_pairs` to each second
element which indicates the destinations.
Args:
reduce_op: Indicates how per_replica_value will be reduced. Accepted
values are `tf.distribute.ReduceOp.SUM`, `tf.distribute.ReduceOp.MEAN`.
value_destination_pairs: a list or a tuple of tuples of PerReplica objects
(or tensors with device set if there is one device) and destinations.
Returns:
a list of Mirrored objects.
Raises:
ValueError: if `value_destination_pairs` is not a list or a tuple of
tuples of PerReplica objects and destinations
"""
raise NotImplementedError(
"_batch_reduce method must be implemented in descendants.")
@doc_controls.for_subclass_implementers
def broadcast_implementation(self, tensor, destinations):
"""Implementation of broadcast the `tensor` to destinations.
Args:
tensor: the tensor to broadcast.
destinations: the broadcast destinations.
Returns:
a Mirrored object.
"""
return simple_broadcast(tensor, destinations, always_mirrored=True)
@tf_export("distribute.ReductionToOneDevice")
class ReductionToOneDevice(CrossDeviceOps):
"""Always do reduction to one device first and then do broadcasting.
Batch reduction is done by reduction on each element one by one.
"""
def __init__(self, reduce_to_device=None, accumulation_fn=None):
"""Constructor.
Args:
reduce_to_device: the intermediate device to reduce to. If None, reduce
to the first device in `destinations` of the reduce() method.
accumulation_fn: a function that does accumulation. If None, then
`tf.math.add_n` is used.
"""
self.reduce_to_device = reduce_to_device
self.accumulation_fn = accumulation_fn or math_ops.add_n
super(ReductionToOneDevice, self).__init__()
def reduce_implementation(self, reduce_op, per_replica_value, destinations):
if check_destinations(destinations):
devices = get_devices_from(destinations)
else:
devices = get_devices_from(per_replica_value)
reduce_to_device = self.reduce_to_device or devices[0]
logging.log_first_n(
logging.INFO,
"Reduce to %s then broadcast to %r." % (reduce_to_device, devices), 10)
reduced = _simple_reduce(per_replica_value, reduce_to_device,
self.accumulation_fn, reduce_op)
return self.broadcast(reduced, destinations)
def batch_reduce_implementation(self, reduce_op, value_destination_pairs):
return [
self.reduce_implementation(reduce_op, t, destinations=v)
for t, v in value_destination_pairs
]
def _group_value_by_device(per_replica_values):
"""Group values into sublists by their devices.
This grouping is needed to call the all-reduce library because it expects a
list of the following form:
[[(grad0_gpu0, v0_gpu0), (grad1_gpu0, v1_gpu0), (grad2_gpu0, v2_gpu0) ...],
[(grad0_gpu1, v0_gpu1), (grad1_gpu1, v1_gpu1), (grad2_gpu1, v2_gpu1) ...],
[(grad0_gpu2, v0_gpu2), (grad1_gpu0, v1_gpu2), (grad2_gpu0, v2_gpu2) ...],
...
]
Args:
per_replica_values: a list of PerReplica obejcts.
Returns:
a list of lists, each sublist has components for its corresponding device of
PerReplica objects, paired with a None.
"""
destinations = per_replica_values[0].devices
grouped = [[] for _ in range(len(destinations))]
for per_replica_value in per_replica_values:
# pylint: disable=protected-access
for i, v in enumerate(per_replica_value.values):
assert per_replica_value.devices == destinations
grouped[i].append((v, None))
return grouped
def _ungroup_and_make_mirrored(grouped_reduced,
destinations,
reduce_op,
num_between_graph_workers=1):
"""Ungroup results from all-reduce and make Mirrored objects.
Each all-reduce result will be divided by the number of destinations before
Mirrored objects are created if reduce_op is "mean".
Args:
grouped_reduced: a list of lists, each sublist has components for each
device, paired with a None. It is the result from
cross_device_utils.aggregate_gradients_using*.
destinations: a value to colocate the result with.
reduce_op: Indicates how values will be aggregated. Accepted values
are `tf.distribute.ReduceOp.SUM`, `tf.distribute.ReduceOp.MEAN`.
num_between_graph_workers: number of workers in the between-graph
replication.
Returns:
a list of Mirrored objects.
"""
device_map, logical_device = get_device_map_from(destinations)
num_replicas = device_map.num_replicas_in_graph * num_between_graph_workers
index = [[] for _ in range(len(grouped_reduced[0]))]
for per_replica_reduced in grouped_reduced:
for i, (v, _) in enumerate(per_replica_reduced):
if reduce_op == reduce_util.ReduceOp.MEAN:
index[i].append(v / num_replicas)
else:
index[i].append(v)
return [value_lib.Mirrored(device_map, v, logical_device) for v in index]
class _ConcatAndSplitPacker(object):
"""Concatenate and split tensors for reduction."""
def __init__(self, num_packs=1):
"""Initialize the _ConcatAndSplitPacker object.
Args:
num_packs: specifies the number of split packs that will be
formed.
Raises:
ValueError: if num_packs is not greater than 0.
"""
if num_packs <= 0:
raise ValueError("num_packs must be greater than zero.")
self.num_packs = num_packs
def pack(self, grouped_grads_and_vars):
"""Pack tensors."""
self.grouped_grads_and_vars = grouped_grads_and_vars
self.all_device_shapes = []
self.all_device_sizes = []
device_grad_packs = []
for device_grads_and_vars in grouped_grads_and_vars:
with ops.colocate_with(device_grads_and_vars[0][0]):
# Flatten all the grads.
flat_grads = [
array_ops.reshape(g, [-1]) for g, _ in device_grads_and_vars
]
# Remember the original shape of all the grads.
device_shapes = [array_ops.shape(g) for g, _ in device_grads_and_vars]
# Remember the original sizes of all the grads.
device_sizes = [array_ops.size(g) for g, _ in device_grads_and_vars]
# Concat all the flat grads into a big flat tensor.
concat_grads = array_ops.concat(flat_grads, 0)
# Split the big tensor into num_splits packs. In cases where the
# total size is not divisible num_splits, the last pack gets
# more elements.
# TODO(zhengxq): it is also possible to optimize away all the concat
# as well.
num_splits = self.num_packs
# The array_ops.size function will sometimes remove static shapes. So if
# all gradient shapes are defined, we use another method to get the
# total size.
# TODO(yuefengz): move this logic to array_ops.size.
if all(g.shape.is_fully_defined() for g, _ in device_grads_and_vars):
total_grad_size = sum(
[g.shape.num_elements() for g, _ in device_grads_and_vars])
else:
total_grad_size = array_ops.size(concat_grads)
split_size = total_grad_size // num_splits
split_size_last = total_grad_size - split_size * (num_splits - 1)
split_sizes = [split_size] * (num_splits - 1) + [split_size_last]
grad_packs = array_ops.split(concat_grads, split_sizes)
# Ready to aggregate the repacked gradients, with fake variables.
# TODO(zhengxq): It is hacky to have to use fake variables.
# We should remove the need for variables in
# aggregate_gradients_using*.
device_grad_packs.append(zip(grad_packs, [None] * num_splits))
self.all_device_shapes.append(device_shapes)
self.all_device_sizes.append(device_sizes)
return device_grad_packs
def unpack(self, summed_device_grad_packs):
"""Reverse the pack."""
aggregated_device_grads = []
for (summed_device_grad_packs,
device_grads_and_vars, device_shapes, device_sizes) in zip(
summed_device_grad_packs, self.grouped_grads_and_vars,
self.all_device_shapes, self.all_device_sizes):
# pylint: enable=line-too-long
# Reverse the packing operations in the previous steps. Form the
# summed gradients back into their original shapes.
with ops.colocate_with(summed_device_grad_packs[0][0]):
# Form a list of the summed grad packs.
device_grad_packs = [g for g, _ in summed_device_grad_packs]
# Concat them back into a big flat tensor.
device_grads_concat = array_ops.concat(device_grad_packs, 0)
# Split the tensors back into their original sizes.
grads_with_sizes = array_ops.split(device_grads_concat, device_sizes)
# Reshape the tensors back into their original shapes.
grads_with_shapes = [
array_ops.reshape(grad, shape)
for shape, grad in zip(device_shapes, grads_with_sizes)
]
# Form the list with the original list of variables.
summed_device_grads = [
(g, v) for g, (_, v) in zip(grads_with_shapes,
device_grads_and_vars)
]
aggregated_device_grads.append(summed_device_grads)
return aggregated_device_grads
class _AggregateSmallTensorPacker(object):
"""Concatenate small gradient tensors together for reduction."""
def __init__(self,
agg_small_grads_max_bytes=1048576,
agg_small_grads_max_group=16):
"""Initialize the _AggregateSmallTensorPacker object.
Args:
agg_small_grads_max_bytes: largest tensor eligible for aggregation,
in number of bytes.
agg_small_grads_max_group: largest permitted aggregation of small
tensors.
Raises:
ValueError: if `agg_small_grads_max_bytes` or `agg_small_grads_max_group`
is not greater than 0.
"""
if agg_small_grads_max_bytes <= 0 or agg_small_grads_max_group <= 0:
raise ValueError("agg_small_grads_max_bytes and agg_small_grads_max_group"
" should both be greater than zero.")
self.agg_small_grads_max_bytes = agg_small_grads_max_bytes
self.agg_small_grads_max_group = agg_small_grads_max_group
def pack(self, grouped_grads_and_vars):
"""Aggregate small tensors."""
if (self.agg_small_grads_max_bytes > 0 and
self.agg_small_grads_max_group > 0):
device_grads, self.packing = cross_device_utils.pack_small_tensors(
grouped_grads_and_vars,
max_bytes=self.agg_small_grads_max_bytes,
max_group=self.agg_small_grads_max_group)
return device_grads
def unpack(self, summed_device_grad_packs):
"""Reverse the aggregation process."""
return cross_device_utils.unpack_small_tensors(summed_device_grad_packs,
self.packing)
def _pack_tensors(device_grads,
num_packs=0,
agg_small_grads_max_bytes=0,
agg_small_grads_max_group=0):
"""Pack tensors if specified."""
if num_packs > 0:
tensor_packer = _ConcatAndSplitPacker(num_packs)
device_grad_packs = tensor_packer.pack(device_grads)
elif agg_small_grads_max_bytes > 0 and agg_small_grads_max_group > 0:
tensor_packer = _AggregateSmallTensorPacker(agg_small_grads_max_bytes,
agg_small_grads_max_group)
device_grad_packs = tensor_packer.pack(device_grads)
else:
tensor_packer = None
device_grad_packs = device_grads
return device_grad_packs, tensor_packer
def _unpack_tensors(reduced, tensor_packer=None):
"""Unpack tensors if they are packed before all-reduce."""
if tensor_packer:
return tensor_packer.unpack(reduced)
return reduced
class AllReduceCrossDeviceOps(CrossDeviceOps):
"""Reduction using all-reduce."""
def __init__(self,
all_reduce_alg="nccl",
num_packs=1,
agg_small_grads_max_bytes=0,
agg_small_grads_max_group=10):
"""All-reduce implementation of CrossDeviceOps.
Before performing all-reduce, tensors will be repacked or aggregated for
more efficient cross-device transportation:
1) If `num_packs` is non-zero, pack values into
`num_packs` splits.
2) Otherwise, if `agg_small_grads_max_bytes` > 0 and
`agg_small_grads_max_group` > 0, aggregate values smaller than
`agg_small_grads_max_bytes` into groups with at most
`agg_small_grads_max_group` values.
3) Otherwise, no repacking or grouping will happen.
Args:
all_reduce_alg: the all-reduce algorithm to use, currently only "nccl" or
"hierarchical_copy" are supported.
num_packs: see above.
agg_small_grads_max_bytes: see above.
agg_small_grads_max_group: see above.
"""
self._all_reduce_alg = all_reduce_alg
self._num_packs = num_packs
self._agg_small_grads_max_bytes = agg_small_grads_max_bytes
self._agg_small_grads_max_group = agg_small_grads_max_group
self._simple_cross_replica_ops = ReductionToOneDevice()
super(AllReduceCrossDeviceOps, self).__init__()
def reduce_implementation(self, reduce_op, per_replica_value, destinations):
if _devices_match(per_replica_value, destinations):
return self._batch_all_reduce(reduce_op, [per_replica_value])[0]
else:
return self._simple_cross_replica_ops.reduce(reduce_op, per_replica_value,
destinations)
def batch_reduce_implementation(self, reduce_op, value_destination_pairs):
all_devices_match = _all_devices_match(value_destination_pairs)
contains_indexed_slices = cross_device_utils.contains_indexed_slices(
value_destination_pairs)
if (all_devices_match and not context.executing_eagerly()
and not contains_indexed_slices):
return self._batch_all_reduce(reduce_op,
[v[0] for v in value_destination_pairs])
else:
if not all_devices_match:
logging.log_first_n(logging.WARN,
"Efficient batch_reduce is not supported if "
"destinations are different.",
10)
return [
self.reduce_implementation(reduce_op, t, destinations=v)
for t, v in value_destination_pairs
]
def _batch_all_reduce(self, reduce_op, per_replica_values):
"""All-reduce algorithm in a batch."""
dense_values, dense_indices, sparse_values, sparse_indices = (
cross_device_utils.split_by_sparsity(per_replica_values))
if dense_values:
dense_results = self._do_batch_all_reduce(reduce_op, dense_values)
else:
dense_results = []
if sparse_values:
sparse_results = self._do_batch_all_reduce_sparse(reduce_op,
sparse_values)
else:
sparse_results = []
return cross_device_utils.stitch_values(((dense_results, dense_indices),
(sparse_results, sparse_indices)))
def _do_batch_all_reduce(self, reduce_op, dense_values):
"""Run batch all-reduces."""
logging.log_first_n(
logging.INFO, "batch_all_reduce: %d all-reduces with algorithm = %s,"
"num_packs = %d, agg_small_grads_max_bytes = %d and "
"agg_small_grads_max_group = %d" %
(len(dense_values), self._all_reduce_alg, self._num_packs,
self._agg_small_grads_max_bytes, self._agg_small_grads_max_group), 10)
destinations = dense_values[0].devices
grouped = _group_value_by_device(dense_values)
device_grad_packs, tensor_packer = _pack_tensors(
grouped, self._num_packs, self._agg_small_grads_max_bytes,
self._agg_small_grads_max_group)
# The actual aggregation of the repacked gradients. Note that they are
# sharded among different aggregation trees. So it is important to strike
# the balance on num_splits.
if self._all_reduce_alg == "nccl":
# TODO(yuefengz): merge this into the all-reduce library.
reduced = cross_device_utils.aggregate_gradients_using_nccl(
device_grad_packs)
else:
# TODO(yuefengz): check that gpu ids in `destinations` are in ascending
# order.
reduced = (
cross_device_utils.aggregate_gradients_using_hierarchical_copy(
destinations, device_grad_packs))
reduced = _unpack_tensors(reduced, tensor_packer)
return _ungroup_and_make_mirrored(reduced, dense_values[0], reduce_op)
def _do_batch_all_reduce_sparse(self, reduce_op, sparse_values):
"""Run batch all-reduce for sparse values."""
logging.log_first_n(
logging.WARN,
"Efficient allreduce is not supported for %d IndexedSlices" %
len(sparse_values), 10)
# Use `sparse_values` as destinations to do all-reduces. It is effectively
# an allgather under the hood but not an efficient one.
return self._simple_cross_replica_ops.batch_reduce(
reduce_op, zip(sparse_values, sparse_values))
# For compatibility with code using the old name of `AllReduceCrossDeviceOps`.
AllReduceCrossTowerOps = AllReduceCrossDeviceOps
AllReduceSpecTuple = collections.namedtuple("AllReduceSpecTuple",
"alg shards limit")
@tf_export("distribute.NcclAllReduce")
class NcclAllReduce(AllReduceCrossDeviceOps):
"""Reduction using NCCL all-reduce."""
def __init__(self, num_packs=1):
"""NCCL all-reduce implementation of CrossDeviceOps.
Before performing all-reduce, tensors will be repacked or aggregated for
more efficient cross-device transportation.
Args:
num_packs: values will be packed in this many splits. `num_packs` should
be greater than 0.
"""
assert num_packs > 0, (
"NCLL all-reduce requires num_packs > 0, but {} is specified".format(
num_packs))
super(NcclAllReduce, self).__init__(
all_reduce_alg="nccl", num_packs=num_packs)
@tf_export("distribute.HierarchicalCopyAllReduce")
class HierarchicalCopyAllReduce(AllReduceCrossDeviceOps):
"""Reduction using hierarchical copy all-reduce.
This is a good reduction for configurations like Nvidia DGX-1.
"""
def __init__(self, num_packs=1):
"""Hierarchical copy all-reduce implementation of CrossDeviceOps.
Before performing all-reduce, tensors will be repacked or aggregated for
more efficient cross-device transportation.
Args:
num_packs: values will be packed in this many splits. `num_packs` should
be greater than 0.
"""
super(HierarchicalCopyAllReduce, self).__init__(
all_reduce_alg="hierarchical_copy",
num_packs=num_packs)
class MultiWorkerAllReduce(AllReduceCrossDeviceOps):
"""All-reduce algorithms for distributed TensorFlow."""
def __init__(self,
worker_devices,
num_gpus_per_worker,
all_reduce_spec=("pscpu/pscpu", 2, -1),
num_packs=0,
agg_small_grads_max_bytes=0,
agg_small_grads_max_group=10):
"""Initialize the all-reduce algorithm.
Args:
worker_devices: a list of device strings for workers participating in
all-reduce.
num_gpus_per_worker: number of GPU devices per worker.
all_reduce_spec: a tuple or a named tuple or a list of tuples specifying
the all-reduce algorithm.
1. The first element of a tuple is the name of the all-reduce algorithm.
Valid algorithm names are: "nccl", "nccl/xring", "nccl/rechd",
"nccl/pscpu", "xring", "pscpu", "psgpu", "pscpu/pscpu". Algorithms with
a "/" are hierarchical, so two all-reduces are executed, the first one
aggregates tensors within a worker and the second aggregates across
workers.
2. The second element of a tuple is the number of shards when doing
all-reduce. Let's say its values is M, each tensor after packing will be
split into M shards and then M parallel all-reduces would be performed
before finally they are concatenated backed into a complete tensor.
3. The third element is the maximum size of tensors that will be
applicable for the algorithm specified by the first element. For
example, if all_reduce_spec=[("nccl", 2, 1024), ("pscpu/pscpu", 2, -1)],
tensors with size not larger than 1024 bytes will be applied a 2-shard
"nccl" all-reduce and other tensors will be applied a 2-shard
"pscpu/pscpu" algorithm. The third elements should be in increasing
order across tuples and end with -1 which indicates infinity.
num_packs: see AllReduceCrossDeviceOps.
agg_small_grads_max_bytes: see AllReduceCrossDeviceOps.
agg_small_grads_max_group: see AllReduceCrossDeviceOps.
"""
self._worker_devices = worker_devices
self._num_gpus_per_worker = num_gpus_per_worker
super(MultiWorkerAllReduce, self).__init__(
num_packs=num_packs,
agg_small_grads_max_bytes=agg_small_grads_max_bytes,
agg_small_grads_max_group=agg_small_grads_max_group)
def validate_and_complete_spec(spec):
"""Validate and complete the all-reduce spec."""
# TODO(yuefengz): support namedtuple.
if not isinstance(spec, tuple):
raise ValueError(
"A tuple is expected for all-reduce spec: %r" % all_reduce_spec)
if not spec or len(spec) > 3:
raise ValueError(
"Too many elements in the all-reduce spec tuple: %r" % spec)
if len(spec) == 1:
return AllReduceSpecTuple(spec[0], 1, -1)
elif len(spec) == 2:
return AllReduceSpecTuple(spec[0], spec[1], -1)
else:
return AllReduceSpecTuple(*spec)
self._all_reduce_spec = []
if isinstance(all_reduce_spec, six.string_types):
self._all_reduce_spec.append(AllReduceSpecTuple(all_reduce_spec, 1, -1))
elif isinstance(all_reduce_spec, tuple):
self._all_reduce_spec.append(validate_and_complete_spec(all_reduce_spec))
elif isinstance(all_reduce_spec, list):
self._all_reduce_spec = [
validate_and_complete_spec(spec) for spec in all_reduce_spec
]
def _batch_all_reduce(self, reduce_op, per_replica_values):
"""All-reduce algorithm in a batch."""
logging.log_first_n(
logging.INFO,
"Distributed batch_all_reduce: %d all-reduces with "
"allreduce_spec = %r, num_packs = %d, agg_small_grads_max_bytes = %d, "
"and agg_small_grads_max_group = %d" %
(len(per_replica_values), self._all_reduce_spec, self._num_packs,
self._agg_small_grads_max_bytes, self._agg_small_grads_max_group), 10)
device_grads = _group_value_by_device(per_replica_values)
# The all-reduce library requires fully defined shapes.
# TODO(yuefengz): when tensor sharding is not needed, static shapes are not
# required as well.
for device_grad in device_grads:
for grad, _ in device_grad:
if not grad.shape.is_fully_defined():
raise ValueError("Shape is unknown for node %r" % grad)
remaining_grads = device_grads
aggregated_grads = []
for spec_tuple in self._all_reduce_spec:
if spec_tuple.limit < 0:
this_grads = remaining_grads
remaining_grads = []
else:
(this_grads, remaining_grads) = cross_device_utils.split_grads_by_size(
spec_tuple.limit, remaining_grads)
if this_grads:
device_grad_packs, tensor_packer = _pack_tensors(
this_grads, self._num_packs, self._agg_small_grads_max_bytes,
self._agg_small_grads_max_group)
range_agg_grads = cross_device_utils.sum_gradients_all_reduce(
self._worker_devices, device_grad_packs, len(self._worker_devices),
spec_tuple.alg, spec_tuple.shards, range(self._num_gpus_per_worker))
range_agg_grads = _unpack_tensors(range_agg_grads, tensor_packer)
if not aggregated_grads:
aggregated_grads = range_agg_grads
else:
assert len(aggregated_grads) == len(range_agg_grads)
for i in range(len(aggregated_grads)):
aggregated_grads[i] += range_agg_grads[i]
assert not remaining_grads
return _ungroup_and_make_mirrored(aggregated_grads, per_replica_values[0],
reduce_op)
@tf_export("distribute.experimental.CollectiveCommunication")
class CollectiveCommunication(enum.Enum):
"""Communication choices for CollectiveOps.
* `AUTO`: Default to runtime's automatic choices.
* `RING`: TensorFlow's ring algorithms for all-reduce and
all-gather.
* `NCCL`: Use ncclAllReduce for all-reduce, and ring algorithms for
all-gather. TODO(ayushd): add ncclAllGather implementation.
"""
AUTO = "AUTO"
RING = "RING"
NCCL = "NCCL"
# TODO(yuefengz): support in-graph collective all-reduce.
class CollectiveAllReduce(CrossDeviceOps):
"""All-reduce cross device ops using collective ops.
In the between-graph replicated training, it will still do all-reduces across
all workers and then put results on the right destinations.
"""
def __init__(self,
num_workers=1,
num_gpus_per_worker=0,
all_reduce_merge_scope=32,
collective_keys=None):
"""Initializes the object.
Args:
num_workers: number of workers in the between-graph replicated training.
num_gpus_per_worker: number of GPUs per worker.
all_reduce_merge_scope: size of groups into which to partition consecutive
gradients grouped under a common 'allreduce' name scope. This is useful
for some optimization of collective ops.
collective_keys: an optional CollectiveKey object.
"""
self._num_workers = num_workers
self._num_gpus_per_worker = num_gpus_per_worker
self._all_reduce_merge_scope = all_reduce_merge_scope
self._collective_keys = (collective_keys or
cross_device_utils.CollectiveKeys())
super(CollectiveAllReduce, self).__init__()
def reduce_implementation(self, reduce_op, per_replica_value, destinations):
all_reduced = self._batch_all_reduce(reduce_op, [per_replica_value])[0]
device_map, logical_device = get_device_map_from(destinations)
if (all_reduced.device_map is device_map and
all_reduced.logical_device == logical_device):
return all_reduced
devices = device_map.logical_to_actual_devices(logical_device)
index = []
for d in devices:
if d in all_reduced.devices:
index.append(all_reduced.get(d))
else:
# TODO(josh11b): Once we add support for model parallelism, get the
# copy from the corresponding replica instead of the primary.
with ops.control_dependencies(all_reduced.values), ops.device(d):
index.append(array_ops.identity(all_reduced.primary))
return value_lib.Mirrored(device_map, index, logical_device)
def batch_reduce_implementation(self, reduce_op, value_destination_pairs):
all_devices_match = _all_devices_match(value_destination_pairs)
if all_devices_match:
return self._batch_all_reduce(reduce_op,
[v[0] for v in value_destination_pairs])
else:
if not all_devices_match:
logging.log_first_n(
logging.WARN, "Efficient batch_reduce is not supported if "
"destinations are different.", 10)
return [
self.reduce_implementation(reduce_op, t, destinations=v)
for t, v in value_destination_pairs
]
def _make_gradient_chunks(self, per_replica_values, all_reduce_merge_scope):
"""Make `per_replica_values` into chunks."""
grouped_by_device = _group_value_by_device(per_replica_values)
grouped_by_var = list(zip(*grouped_by_device))
# grouped_by_var is grouped by variables and takes the following format:
# [((grad0_gpu0, v0_gpu0), (grad0_gpu1, v0_gpu1), (grad0_gpu2, v0_gpu2) ..),
# ((grad1_gpu0, v1_gpu0), (grad1_gpu1, v1_gpu1), (grad1_gpu0, v1_gpu2) ..),
# ((grad2_gpu0, v2_gpu0), (grad2_gpu1, v2_gpu1), (grad2_gpu0, v2_gpu2) ..),
# ...
# ]
chunked_gv = [
grouped_by_var[x:x + all_reduce_merge_scope]
for x in range(0, len(grouped_by_var), all_reduce_merge_scope)
]
return chunked_gv
def _batch_all_reduce(self, reduce_op, per_replica_values):
"""All reduce algorithm in a batch."""
logging.log_first_n(
logging.INFO, "Collective batch_all_reduce: %d all-reduces, "
"num_workers = %d" % (len(per_replica_values), self._num_workers), 10)
dense_values, dense_indices, sparse_values, sparse_indices = (
cross_device_utils.split_by_sparsity(per_replica_values))
if dense_values:
dense_results = self._do_batch_all_reduce_dense(reduce_op, dense_values)
else:
dense_results = []
if sparse_values:
sparse_results = self._do_batch_all_reduce_sparse(reduce_op,
sparse_values)
else:
sparse_results = []
return cross_device_utils.stitch_values(((dense_results, dense_indices),
(sparse_results, sparse_indices)))
def _do_batch_all_reduce_dense(self, reduce_op, per_replica_values):
"""All-reduce across all workers in a batch."""
logging.log_first_n(
logging.INFO, "Collective batch_all_reduce: %d all-reduces, "
"num_workers = %d" % (len(per_replica_values), self._num_workers), 10)
chunked_gv = self._make_gradient_chunks(per_replica_values,
self._all_reduce_merge_scope)
reduced_gv_list = []
for chunk in chunked_gv:
with ops.name_scope("allreduce"):
for grad_and_vars in chunk:
# Gradients for the same variable but from different devices.
scaled_grads = [g for g, _ in grad_and_vars]
collective_reduced = cross_device_utils.build_collective_reduce(
scaled_grads, self._num_workers, self._collective_keys, "Add",
"Id")
result = []
for (_, v), g in zip(grad_and_vars, collective_reduced):
result.append([g, v])
reduced_gv_list.append(result)
new_device_grads = [list(x) for x in zip(*reduced_gv_list)]
return _ungroup_and_make_mirrored(
new_device_grads,
per_replica_values[0],
reduce_op,
num_between_graph_workers=self._num_workers)
def _do_batch_all_reduce_sparse(self, reduce_op, per_replica_values):
"""All-reduce IndexedSlices across all workers in a batch."""
logging.log_first_n(
logging.INFO, "Collective batch_all_reduce for IndexedSlices: "
"%d all-reduces, num_workers = %d" %
(len(per_replica_values), self._num_workers), 10)
chunked_gv = self._make_gradient_chunks(per_replica_values,
self._all_reduce_merge_scope)
reduced_gv_list = []
for chunk in chunked_gv:
with ops.name_scope("allreduce"):
for grad_and_vars in chunk:
# Gradients for the same variable but from different devices.
scaled_grads = [g for g, _ in grad_and_vars]
values = [g.values for g in scaled_grads]
indices = [g.indices for g in scaled_grads]
assert len(values) == len(indices)
# Build two separate allgathers, one for values, the other one for
# indices.
gathered_values = cross_device_utils.build_collective_gather(
values, self._num_workers, self._collective_keys)
gathered_indices = cross_device_utils.build_collective_gather(
indices, self._num_workers, self._collective_keys)
assert len(gathered_values) == len(gathered_indices)
collective_reduced = []
for i in range(len(values)):
reduced = ops.IndexedSlices(
gathered_values[i],
gathered_indices[i],
dense_shape=scaled_grads[i].dense_shape)
collective_reduced.append(reduced)
result = []
for (_, v), g in zip(grad_and_vars, collective_reduced):
result.append([g, v])
reduced_gv_list.append(result)
new_device_grads = [list(x) for x in zip(*reduced_gv_list)]
return _ungroup_and_make_mirrored(
new_device_grads,
per_replica_values[0],
reduce_op,
num_between_graph_workers=self._num_workers)
_dgx1_links = [[1, 2, 3, 4], [0, 2, 3, 5], [0, 1, 3, 6], [0, 1, 2, 7],
[0, 5, 6, 7], [1, 4, 6, 7], [2, 4, 5, 7], [3, 4, 5, 6]]
def _has_dgx1_like_links(gpu_links):
if not gpu_links:
return False
# TODO(yuefengz): figure out the right topology for hierarchical copy if
# number of gpus are less than 8.
if len(gpu_links) < 8:
return False
for i, (gpu_link, dgx1_link) in enumerate(zip(gpu_links, _dgx1_links)):
if (set(gpu_link) != set(dgx1_link) and
set(gpu_link) != set(dgx1_link + [i])):
return False
return True
def _choose_all_reduce_algorithm(device_links):
if _has_dgx1_like_links(device_links):
return HierarchicalCopyAllReduce(num_packs=len(device_links))
else:
return NcclAllReduce(num_packs=1)
def choose_the_best(devices, session_config=None):
"""Find the best subclass of CrossDeviceOps given a session config.
Args:
devices: a list of devices passed to `tf.distribute.Strategy`.
session_config: a `tf.ConfigProto` or `None`. If `None`, it will make
decision based on all local devices.
Returns:
A subclass of `CrossDeviceOps`.
"""
requested_devices = set([device_util.canonicalize(d) for d in devices])
machine_devices = device_lib.list_local_devices(session_config=session_config)
using_devices = []
for d in machine_devices:
if device_util.canonicalize(d.name) in requested_devices:
using_devices.append(d)
else:
logging.info(
"Device is available but not used by distribute strategy: %s", d.name)
if len(using_devices) != len(requested_devices):
logging.warning("Not all devices in `tf.distribute.Strategy` are visible "
"to TensorFlow.")
return ReductionToOneDevice()
if any(d.device_type.lower() != "gpu" for d in using_devices):
logging.warning("Not all devices in `tf.distribute.Strategy` are visible "
"to TensorFlow.")
return ReductionToOneDevice()
device_links = [[] for _ in range(len(using_devices))]
for i, device in enumerate(using_devices):
for link in device.locality.links.link:
device_links[i].append(link.device_id)
return _choose_all_reduce_algorithm(device_links)
|
py | 1a4c7cb68576ba7f912418413a30439c3d49db7d | import numpy as np
from .base_actuator import Actuator
from spike_swarm_sim.register import actuator_registry
@actuator_registry(name='wheel_actuator')
class WheelActuator(Actuator):
""" Robot wheel actuator using a differential drive system.
"""
def __init__(self, *args, robot_radius=0.11, dt=1., min_thresh=0.0, **kwargs):
super(WheelActuator, self).__init__(*args, **kwargs)
self.robot_radius = robot_radius
self.dt = dt
self.delta_pos = np.zeros(2)
self.delta_theta = 0
self.min_thresh = min_thresh
def step(self, v_motors ):
if isinstance(v_motors, list):
v_motors = np.array(v_motors)
current_pos = self.actuator_owner.position
current_theta = self.actuator_owner.orientation
v_motors[np.abs(v_motors) < self.min_thresh] = 0.0
delta_t = self.dt
R = .5 * self.robot_radius * v_motors.sum() / (v_motors[0] - v_motors[1] + 1e-3)
w = (v_motors[0] - v_motors[1] + 1e-3) / (self.robot_radius * .5)
icc = current_pos + R * np.array([-np.sin(current_theta), np.cos(current_theta)])
transf_mat = lambda x: np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
self.delta_pos = transf_mat(w * delta_t).dot(current_pos - icc) + icc - current_pos
self.delta_theta = w * delta_t
new_pos = self.actuator_owner.position + self.delta_pos.astype(float)
print(self.actuator_owner.position, new_pos)
self.actuator_owner.position = new_pos
self.actuator_owner.orientation = self.actuator_owner.orientation + self.delta_theta
self.actuator_owner.orientation = self.actuator_owner.orientation % (2 * np.pi)
self.delta_pos = np.zeros(2)
self.delta_theta = 0.0 |
py | 1a4c7d92829eaf40fe2cbccfefe9f65699c0c1fb | """This module converts movie data stored in a text file (movie_list.txt),
into a list of Movie objects, which are passed to fresh_tomatoes.py for
rendering.
"""
import media
import fresh_tomatoes
def load_movie_site():
"""Reads data from file, creates Movie list, passes to fresh_tomatoes"""
# Import unformated string from file
full_list_file = open(r"movie_list.txt")
# Convert string into 2-D list
full_list = full_list_file.read().split("|")
itemized_list = list(map(lambda x: x.split("* "), full_list))
#Create list of Movie objects
movie_list = []
for i in range(0, len(itemized_list)):
itemized_list[i][0] = itemized_list[i][0][1:]
movie_list.append(media.Movie(itemized_list[i][0],
itemized_list[i][1],
itemized_list[i][2],
itemized_list[i][3],
itemized_list[i][4],
itemized_list[i][5],
itemized_list[i][6],
itemized_list[i][7],
itemized_list[i][8]))
# Pass list to rendering module
fresh_tomatoes.open_movies_page(movie_list)
# Run the main code
load_movie_site()
|
py | 1a4c7e3a6ec149004b72ce8f5da7096f3e4729e5 | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.resnet import ResNet50, ResNet101, ResNet152
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.001
args_model = 'resnet50'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_max_pwr/' + job_name + '*'
total_epochs = 4
starting_epoch = 0
# first step is to update the PID
pid_dict = {}
with open('pid_lock.json', 'r') as fp:
pid_dict = json.load(fp)
pid_dict[job_name] = os.getpid()
json_file = json.dumps(pid_dict)
with open('pid_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('pid_lock.json', 'pid.json')
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
if '50' in args_model:
base_model = ResNet50(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '101' in args_model:
base_model = ResNet101(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '152' in args_model:
base_model = ResNet152(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
model.add(base_model)
model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
epoch_waste_dict = {}
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
epoch_waste_dict[job_name] += epoch_waste_time
json_file3 = json.dumps(epoch_waste_dict)
with open('epoch_waste.json', 'w') as fp:
fp.write(json_file3)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_max_pwr/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
checkpoint_dict[job_name] = 1
json_file3 = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file3)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
|
py | 1a4c859e5777b65b4d2fea31a8cc144639991178 | import socket
server = socket.socket()
server.bind(('127.0.0.1', 5000))
server.listen(5)
while True:
client, (client_host, client_port) = server.accept()
client.recv(4096)
response_type = 'HTTP/1.1 200 OK\n'
headers = 'Content-Type: text/html\n\n'
with open('task_3/index.html', 'r') as f:
body = f.read()
response = response_type + headers + body
client.send(response.encode('utf-8'))
client.close()
|
py | 1a4c85f6a333f4c5e044abeacb0abf1d28dadba3 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""dim_spec
Revision ID: c611f2b591b8
Revises: ad4d656d92bc
Create Date: 2016-11-02 17:36:04.970448
"""
# revision identifiers, used by Alembic.
revision = 'c611f2b591b8'
down_revision = 'ad4d656d92bc'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('columns', sa.Column('dimension_spec_json', sa.Text(), nullable=True))
def downgrade():
op.drop_column('columns', 'dimension_spec_json')
|
py | 1a4c875919dfef302a4d82f168f19e82ffe2316b | from operator import itemgetter
from nltk.corpus import wordnet
from nltk.stem import WordNetLemmatizer
from nltk.stem.lancaster import LancasterStemmer
from players.codemaster import Codemaster
class AICodemaster(Codemaster):
def __init__(self, brown_ic=None, glove_vecs=None, word_vectors=None):
super().__init__()
self.brown_ic = brown_ic
self.glove_vecs = glove_vecs
self.word_vectors = word_vectors
self.wordnet_lemmatizer = WordNetLemmatizer()
self.lancaster_stemmer = LancasterStemmer()
self.cm_wordlist = []
with open('players/cm_wordlist.txt') as infile:
for line in infile:
self.cm_wordlist.append(line.rstrip())
self.syns = []
for word in self.cm_wordlist:
for synset_in_cmwordlist in wordnet.synsets(word):
self.syns.append(synset_in_cmwordlist)
def set_game_state(self, words, maps):
self.words = words
self.maps = maps
def get_clue(self):
lin_results = []
count = 0
red_words = []
bad_words = []
for i in range(25):
if self.words[i][0] == '*':
continue
elif self.maps[i] == "Assassin" or self.maps[i] == "Blue" or self.maps[i] == "Civilian":
bad_words.append(self.words[i].lower())
else:
red_words.append(self.words[i].lower())
print("RED:\t", red_words)
for red_word in red_words:
for synset_in_cmwordlist in self.syns:
lin_clue = 0
for red_synset in wordnet.synsets(red_word):
try:
# only if the two compared words have the same part of speech
lin_score = synset_in_cmwordlist.lin_similarity(red_synset, self.brown_ic)
except :
continue
if lin_score:
if not self.arr_not_in_word(synset_in_cmwordlist.lemma_names()[0], red_words + bad_words):
continue
lin_results.append((lin_score, synset_in_cmwordlist))
if lin_score > lin_clue:
lin_clue = lin_score
lin_results = list(reversed(sorted(lin_results, key=itemgetter(0))))
return [lin_results[0][1].lemma_names()[0], 1]
def arr_not_in_word(self, word, arr):
if word in arr:
return False
lemm = self.wordnet_lemmatizer.lemmatize(word)
lancas = self.lancaster_stemmer.stem(word)
for i in arr:
if i == lemm or i == lancas:
return False
if i.find(word) != -1:
return False
if word.find(i) != -1:
return False
return True
|
py | 1a4c8815b912bc3d3a13065d0b145c90b6ecd79d | # Imports for flask and sql
from flask import Flask, render_template, url_for, request, redirect, flash
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import func
# Imports for plots
import plotly.express as px
import pandas as pd
import numpy as np
import json
import plotly
# Other imports
from datetime import datetime, date
import re
# Create flask app object and hooking up database
app = Flask(__name__)
app.config.from_pyfile('config.cfg')
db = SQLAlchemy(app)
# This class is responsible for a table 'ledger' in database
# In this table are stored information about user operations
class Ledger(db.Model):
id = db.Column(db.Integer, primary_key=True)
amount = db.Column(db.Float(precision=2)) # The amount of the operation
date = db.Column(db.Date) # The date of the operation
description = db.Column(db.String(200)) # The description of the operation
category = db.Column(db.String(50)) # The category of the operation - 'Deposit' for each deposit, and custom for others
def get_current_date():
"""
Function return current date as datetime.
:return: datetime in format YYYY-MM-DD
"""
today = date.today()
today = today.strftime("%Y-%m-%d")
return today
def get_balance():
"""
Function return current user balance.
:return: string looks like float with two decimal places
"""
if len(Ledger.query.all()) == 0:
# This part is responsible for return 0 with two decimal places
return '0.00'
else:
# Statement below:
# - get column amount from ledger
# - sums this column
# - return only number(without parentheses)
# - return number rounded to two decimal places
return "{:.2f}".format(db.session.query(func.sum(Ledger.amount)).all()[0][0])
@app.route('/')
def home():
# Home view displayed when user enter to the website
db.create_all() # Create table in database based on Ledger class
return render_template('home.html', # Render 'home.html' template
nav_active='home', # This param is responsible for activation right overlap in menu
balance=get_balance()) # Menu keeps showing users balance
@app.route('/add_deposit', methods=['GET', 'POST'])
def add_deposit():
# This page allows add deposit to ledger
# When method is GET, render template with form
if request.method == 'GET':
return render_template('add_deposit.html', # Render template 'add_deposit.html'
nav_active='add', # Highlighted tab in menu
balance=get_balance(), # Menu keeps showing users balance
today=get_current_date()) # Current date is set in form in input date
# If method is POST, get values from form and save it in table
else:
# Default value of dollars - when user doesn't type value
dollars = 0
# If value is typed in form, get it and save to 'dollars' var
if 'ad-amount-dollars' in request.form:
dollars = request.form['ad-amount-dollars']
# Defaul value of cents - when user doesn't type value
cents = 0
# If value is typed in form, get it and save to 'cents' var
if 'ad-amount-cents' in request.form:
cents = request.form['ad-amount-cents']
# Check if value from form is not empty
if cents == '':
# If value is empty, set value = 0
cents = 0
# If value is <10 we have to save eg. 03 insted of 3
# So we have to save cents to string with 0 before char
if int(cents) < 10:
cents = '0' + str(cents)
# Get date from form and save it as datetime to 'date' var
if 'ad-date' in request.form:
date = request.form['ad-date']
date = datetime.strptime(date, '%Y-%m-%d')
# Default description - get from form and save to 'desc' var
desc = ''
if 'ad-description' in request.form:
desc = request.form['ad-description']
# Concat dollars and cents into amount
# And change amount to float dtype
amount = str(dollars) + "." + str(cents)
amount = float(amount)
# Create object Ledger with data from form
added_row = Ledger(amount=amount,
description=desc,
date=date,
category='Deposit') # This category is default for each deposit
# Add row above to database
db.session.add(added_row)
db.session.commit()
# Display flash about adding a row
flash("Row has been added")
# Redirect to display ledger
return redirect(url_for('ledger'))
@app.route('/add_withdrawal', methods=['GET', 'POST'])
def add_withdrawal():
# This page allows to add withdrawal to ledger
# When method is GET, render template with form
if request.method == 'GET':
return render_template('add_withdrawal.html', # Render template 'add_withdrawal.html'
nav_active='add', # Highlighted tab in menu
balance=get_balance(), # Menu keeps showing users balance
today=get_current_date()) # Current date is set in form in input date
# If method is POST, get values from form and save it in table
else:
# Default value of dollars - when user doesn't type value
dollars = 0
# If value is typed in form, get it and save to 'dollars' var
if 'aw-amount-dollars' in request.form:
dollars = request.form['aw-amount-dollars']
# Defaul value of cents - when user doesn't type value
cents = 0
# If value is typed in form, get it and save to 'cents' var
if 'aw-amount-cents' in request.form:
cents = request.form['aw-amount-cents']
# Check if value from form is not empty
if cents == '':
# If value is empty, set value = 0
cents = 0
# If value is <10 we have to save eg. 03 insted of 3
# So we have to save cents to string with 0 before char
if int(cents) < 10:
cents = '0' + str(cents)
# Get date from form and save it as datetime to 'date' var
if 'aw-date' in request.form:
date = request.form['aw-date']
date = datetime.strptime(date, '%Y-%m-%d')
# Default description
desc = ''
# If description is in form, get from form and save to 'desc' var
if 'aw-description' in request.form:
desc = request.form['aw-description']
# Default category
category = 'Unsigned'
# If category is in form, get from form and save to 'category' var
if 'aw-category' in request.form:
category = request.form['aw-category']
# Concat dollars and cents into amount
# And change amount to float dtype
amount = '-' + str(dollars) + "." + str(cents)
amount = float(amount)
# Create object Ledger with data from form
added_row = Ledger(amount=amount, description=desc, date=date, category=category)
# Add row above to database
db.session.add(added_row)
db.session.commit()
# Display flash about adding a row
flash("Row has been added")
# Redirect to display ledger
return redirect(url_for('ledger'))
@app.route('/ledger', methods=['GET', 'POST'])
def ledger():
# When method GET this page display ledger
if request.method == 'GET':
# Get all rows from table ordered by date and save it to 'full_ledger'
# In 'ledger.html' is counted post-transactional balance after each of operations ordered by date
full_ledger = Ledger.query.order_by(Ledger.date).all()
# Get length ledger - if it is empty, in 'ledger.html' an appropriate text will be displayed
ledger_len = len(full_ledger)
return render_template('ledger.html', # Render template 'ledger.html'
nav_active='ledger', # Highlighted tab in menu
full_ledger=full_ledger, # Full ledger to display
ledger_len=ledger_len, # Ledger len to check if ledger not empty
balance=get_balance()) # Menu keeps showing users balance
# When ledger is called with method POST, it will deleted row
else:
# Button 'delete' in row in ledger call modal
# Inside modal there is form with id row to delete
# Get value(id row) from this form
if 'id_row_to_del' in request.form:
id_row_to_del = request.form['id_row_to_del']
# Choose row from database, where id == id from form
row_to_del = Ledger.query.filter(Ledger.id == id_row_to_del).first()
# Delete row and save changes in database
db.session.delete(row_to_del)
db.session.commit()
# Display flash with information about delete row
flash('Row has been deleted')
# Redirect to itselft, but with method GET
return redirect(url_for('ledger'))
@app.route('/edit_row/<int:id_row_to_edit>', methods=['GET', 'POST'])
def edit_row(id_row_to_edit):
# This page allows to edit deposit and withdrawal
if request.method == 'GET':
# With method GET, get row from table in database, where id == id sent in URL
# Next - load this row into 'row' variable
# This row will be used to display values from this row in inputs of form in rendered page
row = Ledger.query.filter(Ledger.id == id_row_to_edit).first()
# If amount in row is < 0, render edit withdrawal, else - edit deposit
if row.amount > 0:
return render_template('edit_deposit_row.html', # Render template 'edit_deposit_row.html'
nav_active='ledger', # Highlighted tab in menu
row=row, # Inputs have values from this row
balance=get_balance()) # Menu keeps showing users balance
else:
return render_template('edit_withdrawal_row.html', # Render template 'edit_withdrawal_row.html'
nav_active='ledger', # Highlighted tab in menu
row=row, # Inputs have values from this row
balance=get_balance()) # Menu keeps showing users balance
# With method POST, values are getting from form and save in table in database
else:
# Default value of dollars - when user delete previous value and send form with empty field
dollars = 0
# If value is typed in form, get it and save to 'dollars' var
if 'edit-dollars' in request.form:
dollars = request.form['edit-dollars']
# Default value of cents
cents = 0
# If value is typed in form, get it and save to 'cents' var
if 'edit-cents' in request.form:
cents = request.form['edit-cents']
# Check if value from form is not empty
if cents == '':
# If value is empty, set value = 0
cents = 0
# If value is <10 we have to save eg. 03 insted of 3
# So we have to save cents to string with 0 before char
if int(cents) < 10:
cents = '0' + str(cents)
# Get value from date input
if 'edit-date' in request.form:
date = request.form['edit-date']
date = datetime.strptime(date, '%Y-%m-%d')
# Default description
desc = ''
# If description is in form, get from form and save to 'desc' var
if 'edit-description' in request.form:
desc = request.form['edit-description']
# If 'edit-category' is in form, that means the form is for withdrawal
# Else - form is about deposit
if 'edit-category' in request.form:
# If withdrawal - save category and add '-' before value of amount
category = request.form['edit-category']
amount = '-' + str(dollars) + "." + str(cents)
else:
# If deposit - save 'Deposit' category and amount without '-'
category = 'Deposit'
amount = str(dollars) + "." + str(cents)
# Change amount into float
amount = float(amount)
# When we have information from form, we can change them in database
# Get value from table in database, where id row == id to edit(from URL)
row = Ledger.query.filter(Ledger.id == id_row_to_edit).first()
# Save information from form into variables from row
row.amount = amount
row.description = desc
row.date = date
row.category = category
# Save changes in database
db.session.commit()
# Print information about edition
flash("Row has been edited")
# Redirect to ledger
return redirect(url_for('ledger'))
@app.route('/analysis')
def analysis():
# This page display analysis and visualisations based on rows from ledger
# Get full ledger ordered by date
full_ledger = Ledger.query.order_by(Ledger.date).all()
# Save ledger into dataframe(to analysis)
ledger_df = pd.DataFrame({
# Simply values from these rows
'date': [x.date for x in full_ledger],
'amount': [x.amount for x in full_ledger],
'category': [x.category for x in full_ledger]
})
# Add new column into data frame about post transaction balance
# This column will be visualise
ledger_df['balance'] = np.cumsum(ledger_df['amount'])
# First, we get number of rows in dataframe
# If there is not enough rows, plot not display or display ugly
# So we will display information insted of plot
len_line = ledger_df.shape[0]
# Create line plot for balance
fig_line = px.line(ledger_df, # Data from main ledger dataframe
x='date', # Date as X axis
y='balance') # Balance as Y axis
# We introduce some cosmetic changes to plot
fig_line.update_layout(xaxis_title='Date', # X axis title
yaxis_title='Balance', # Y axis title
title='Balance of your ledger') # Plot title
# We have to encode plot into json to send it to view
plot_json_line = json.dumps(fig_line, cls=plotly.utils.PlotlyJSONEncoder)
# To create pie plot of withdrawals by category we have to rebuild dataframe
# First - get only rows with category other than deposit
ledger_df_pie = ledger_df[ledger_df['category'] != 'Deposit']
# Next step is save amount from withdrawal(default negative values) as absolute values
ledger_df_pie['amount'] = ledger_df_pie['amount'].abs()
# As with line plot - we need some rows to display plot
len_pie = ledger_df_pie.shape[0]
# Now we can create pie plot
fig_pie = px.pie(ledger_df_pie, values='amount', names='category', title='Expenses by category')
# Now we can encode this plot into json
plot_json_pie = json.dumps(fig_pie, cls=plotly.utils.PlotlyJSONEncoder)
# When we have everything, we can render template
return render_template(
'analysis.html',
nav_active='analysis', # Highlighted tab in menu
balance=get_balance(), # Menu keeps showing users balance
len_line=len_line, # Length of full ledger(to line plot)
plot_json_line=plot_json_line, # Line plot
len_pie=len_pie, # Length of dataframe with withdrawals
plot_json_pie=plot_json_pie, # Pie plot
)
@app.route('/about')
def about():
# This page show information about project
return render_template('about.html',
nav_active='about', # Highlighted tab in menu
balance=get_balance()) # Menu keeps showing users balance
if __name__ == '__main__':
app.run()
|
py | 1a4c89773f03fc6a261c6eadadd5c4e2d38aea34 | from django.urls import path
from . import views # , include
urlpatterns = [
# path('', views.index, name='index'),
path('at_random', views.at_random, name='at_random'),
]
|
py | 1a4c8996f02d689f10c31d0ebc08862e0d3adb34 | from collections.abc import Mapping, Iterable
from ctypes import c_int, c_int32, c_double, c_char_p, POINTER
from weakref import WeakValueDictionary
import numpy as np
from numpy.ctypeslib import as_array
from openmc.exceptions import AllocationError, InvalidIDError
from . import _dll
from .core import _FortranObjectWithID
from .error import _error_handler
from .material import Material
__all__ = ['Cell', 'cells']
# Cell functions
_dll.openmc_extend_cells.argtypes = [c_int32, POINTER(c_int32), POINTER(c_int32)]
_dll.openmc_extend_cells.restype = c_int
_dll.openmc_extend_cells.errcheck = _error_handler
_dll.openmc_cell_get_id.argtypes = [c_int32, POINTER(c_int32)]
_dll.openmc_cell_get_id.restype = c_int
_dll.openmc_cell_get_id.errcheck = _error_handler
_dll.openmc_cell_get_fill.argtypes = [
c_int32, POINTER(c_int), POINTER(POINTER(c_int32)), POINTER(c_int32)]
_dll.openmc_cell_get_fill.restype = c_int
_dll.openmc_cell_get_fill.errcheck = _error_handler
_dll.openmc_cell_set_fill.argtypes = [
c_int32, c_int, c_int32, POINTER(c_int32)]
_dll.openmc_cell_set_fill.restype = c_int
_dll.openmc_cell_set_fill.errcheck = _error_handler
_dll.openmc_cell_set_id.argtypes = [c_int32, c_int32]
_dll.openmc_cell_set_id.restype = c_int
_dll.openmc_cell_set_id.errcheck = _error_handler
_dll.openmc_cell_set_temperature.argtypes = [
c_int32, c_double, POINTER(c_int32)]
_dll.openmc_cell_set_temperature.restype = c_int
_dll.openmc_cell_set_temperature.errcheck = _error_handler
_dll.openmc_get_cell_index.argtypes = [c_int32, POINTER(c_int32)]
_dll.openmc_get_cell_index.restype = c_int
_dll.openmc_get_cell_index.errcheck = _error_handler
class Cell(_FortranObjectWithID):
"""Cell stored internally.
This class exposes a cell that is stored internally in the OpenMC
library. To obtain a view of a cell with a given ID, use the
:data:`openmc.capi.cells` mapping.
Parameters
----------
index : int
Index in the `cells` array.
Attributes
----------
id : int
ID of the cell
"""
__instances = WeakValueDictionary()
def __new__(cls, uid=None, new=True, index=None):
mapping = cells
if index is None:
if new:
# Determine ID to assign
if uid is None:
uid = max(mapping, default=0) + 1
else:
if uid in mapping:
raise AllocationError('A cell with ID={} has already '
'been allocated.'.format(uid))
index = c_int32()
_dll.openmc_extend_cells(1, index, None)
index = index.value
else:
index = mapping[uid]._index
if index not in cls.__instances:
instance = super().__new__(cls)
instance._index = index
if uid is not None:
instance.id = uid
cls.__instances[index] = instance
return cls.__instances[index]
@property
def id(self):
cell_id = c_int32()
_dll.openmc_cell_get_id(self._index, cell_id)
return cell_id.value
@id.setter
def id(self, cell_id):
_dll.openmc_cell_set_id(self._index, cell_id)
@property
def fill(self):
fill_type = c_int()
indices = POINTER(c_int32)()
n = c_int32()
_dll.openmc_cell_get_fill(self._index, fill_type, indices, n)
if fill_type.value == 1:
if n.value > 1:
#TODO: off-by-one
return [Material(index=i+1 if i >= 0 else i)
for i in indices[:n.value]]
else:
#TODO: off-by-one
index = indices[0] + 1 if indices[0] >= 0 else indices[0]
return Material(index=index)
else:
raise NotImplementedError
@fill.setter
def fill(self, fill):
if isinstance(fill, Iterable):
n = len(fill)
indices = (c_int32*n)(*(m._index if m is not None else -1
for m in fill))
_dll.openmc_cell_set_fill(self._index, 1, n, indices)
elif isinstance(fill, Material):
indices = (c_int32*1)(fill._index)
_dll.openmc_cell_set_fill(self._index, 1, 1, indices)
elif fill is None:
indices = (c_int32*1)(-1)
_dll.openmc_cell_set_fill(self._index, 1, 1, indices)
def set_temperature(self, T, instance=None):
"""Set the temperature of a cell
Parameters
----------
T : float
Temperature in K
instance : int or None
Which instance of the cell
"""
_dll.openmc_cell_set_temperature(self._index, T, c_int32(instance))
class _CellMapping(Mapping):
def __getitem__(self, key):
index = c_int32()
try:
_dll.openmc_get_cell_index(key, index)
except (AllocationError, InvalidIDError) as e:
# __contains__ expects a KeyError to work correctly
raise KeyError(str(e))
return Cell(index=index.value)
def __iter__(self):
for i in range(len(self)):
yield Cell(index=i + 1).id
def __len__(self):
return c_int32.in_dll(_dll, 'n_cells').value
def __repr__(self):
return repr(dict(self))
cells = _CellMapping()
|
py | 1a4c89b84f50d2f350e4a437b7f7b06642dac218 | from django.contrib import admin
from recipes.models import Ingredient, IngredientUnitMeasure, \
IngredientFamily, IngredientPhoto, \
RecipeType, RecipeDifficulty
@admin.register(IngredientUnitMeasure)
class IngredientUnitMeasureAdmin(admin.ModelAdmin):
list_display = ['name', 'label']
actions_on_bottom = True
@admin.register(IngredientFamily)
class IngredientFamilyAdmin(admin.ModelAdmin):
list_display = ['name']
actions_on_bottom = True
@admin.register(RecipeType)
class RecipeTypeAdmin(admin.ModelAdmin):
list_display = ['label']
actions_on_bottom = True
@admin.register(RecipeDifficulty)
class RecipeDifficultyAdmin(admin.ModelAdmin):
list_display = ['label', 'level']
ordering = ['level']
actions_on_bottom = True
@admin.register(Ingredient)
class IngredientAdmin(admin.ModelAdmin):
list_display = ['name', 'family']
# custom tabular inline for many to many relationship
class IngredientUnitMeasureInline(admin.TabularInline):
model = Ingredient.unit_measure.through
extra = 1
class IngredientPhotoInline(admin.StackedInline):
model = IngredientPhoto
inlines = [
IngredientUnitMeasureInline,
IngredientPhotoInline
]
# exclude unit_measure because it will be used into inlines
exclude = ('unit_measure',)
|
py | 1a4c8a1f1f7a1e4fca1afc52b008b3d11d39f2b4 | from django import forms
from django.contrib import admin
from .models import (
Author, BinaryTree, CapoFamiglia, Chapter, ChildModel1, ChildModel2,
Consigliere, EditablePKBook, ExtraTerrestrial, Fashionista, Holder,
Holder2, Holder3, Holder4, Inner, Inner2, Inner3, Inner4Stacked,
Inner4Tabular, NonAutoPKBook, NonAutoPKBookChild, Novel,
ParentModelWithCustomPk, Poll, Profile, ProfileCollection, Question,
ReadOnlyInline, ShoppingWeakness, Sighting, SomeChildModel,
SomeParentModel, SottoCapo, Title, TitleCollection,
)
site = admin.AdminSite(name="admin")
class BookInline(admin.TabularInline):
model = Author.books.through
class NonAutoPKBookTabularInline(admin.TabularInline):
model = NonAutoPKBook
classes = ('collapse',)
class NonAutoPKBookChildTabularInline(admin.TabularInline):
model = NonAutoPKBookChild
classes = ('collapse',)
class NonAutoPKBookStackedInline(admin.StackedInline):
model = NonAutoPKBook
classes = ('collapse',)
class EditablePKBookTabularInline(admin.TabularInline):
model = EditablePKBook
class EditablePKBookStackedInline(admin.StackedInline):
model = EditablePKBook
class AuthorAdmin(admin.ModelAdmin):
inlines = [
BookInline, NonAutoPKBookTabularInline, NonAutoPKBookStackedInline,
EditablePKBookTabularInline, EditablePKBookStackedInline,
NonAutoPKBookChildTabularInline,
]
class InnerInline(admin.StackedInline):
model = Inner
can_delete = False
readonly_fields = ('readonly',) # For bug #13174 tests.
class HolderAdmin(admin.ModelAdmin):
class Media:
js = ('my_awesome_admin_scripts.js',)
class ReadOnlyInlineInline(admin.TabularInline):
model = ReadOnlyInline
readonly_fields = ['name']
class InnerInline2(admin.StackedInline):
model = Inner2
class Media:
js = ('my_awesome_inline_scripts.js',)
class InnerInline3(admin.StackedInline):
model = Inner3
class Media:
js = ('my_awesome_inline_scripts.js',)
class TitleForm(forms.ModelForm):
title1 = forms.CharField(max_length=100)
def clean(self):
cleaned_data = self.cleaned_data
title1 = cleaned_data.get("title1")
title2 = cleaned_data.get("title2")
if title1 != title2:
raise forms.ValidationError("The two titles must be the same")
return cleaned_data
class TitleInline(admin.TabularInline):
model = Title
form = TitleForm
extra = 1
class Inner4StackedInline(admin.StackedInline):
model = Inner4Stacked
show_change_link = True
class Inner4TabularInline(admin.TabularInline):
model = Inner4Tabular
show_change_link = True
class Holder4Admin(admin.ModelAdmin):
inlines = [Inner4StackedInline, Inner4TabularInline]
class InlineWeakness(admin.TabularInline):
model = ShoppingWeakness
extra = 1
class QuestionInline(admin.TabularInline):
model = Question
readonly_fields = ['call_me']
def call_me(self, obj):
return 'Callable in QuestionInline'
class PollAdmin(admin.ModelAdmin):
inlines = [QuestionInline]
def call_me(self, obj):
return 'Callable in PollAdmin'
class ChapterInline(admin.TabularInline):
model = Chapter
readonly_fields = ['call_me']
def call_me(self, obj):
return 'Callable in ChapterInline'
class NovelAdmin(admin.ModelAdmin):
inlines = [ChapterInline]
class ConsigliereInline(admin.TabularInline):
model = Consigliere
class SottoCapoInline(admin.TabularInline):
model = SottoCapo
class ProfileInline(admin.TabularInline):
model = Profile
extra = 1
# admin for #18433
class ChildModel1Inline(admin.TabularInline):
model = ChildModel1
class ChildModel2Inline(admin.StackedInline):
model = ChildModel2
# admin for #19425 and #18388
class BinaryTreeAdmin(admin.TabularInline):
model = BinaryTree
def get_extra(self, request, obj=None, **kwargs):
extra = 2
if obj:
return extra - obj.binarytree_set.count()
return extra
def get_max_num(self, request, obj=None, **kwargs):
max_num = 3
if obj:
return max_num - obj.binarytree_set.count()
return max_num
# admin for #19524
class SightingInline(admin.TabularInline):
model = Sighting
# admin and form for #18263
class SomeChildModelForm(forms.ModelForm):
class Meta:
fields = '__all__'
model = SomeChildModel
widgets = {
'position': forms.HiddenInput,
}
def __init__(self, *args, **kwargs):
super(SomeChildModelForm, self).__init__(*args, **kwargs)
self.fields['name'].label = 'new label'
class SomeChildModelInline(admin.TabularInline):
model = SomeChildModel
form = SomeChildModelForm
site.register(TitleCollection, inlines=[TitleInline])
# Test bug #12561 and #12778
# only ModelAdmin media
site.register(Holder, HolderAdmin, inlines=[InnerInline])
# ModelAdmin and Inline media
site.register(Holder2, HolderAdmin, inlines=[InnerInline2])
# only Inline media
site.register(Holder3, inlines=[InnerInline3])
site.register(Poll, PollAdmin)
site.register(Novel, NovelAdmin)
site.register(Fashionista, inlines=[InlineWeakness])
site.register(Holder4, Holder4Admin)
site.register(Author, AuthorAdmin)
site.register(CapoFamiglia, inlines=[ConsigliereInline, SottoCapoInline, ReadOnlyInlineInline])
site.register(ProfileCollection, inlines=[ProfileInline])
site.register(ParentModelWithCustomPk, inlines=[ChildModel1Inline, ChildModel2Inline])
site.register(BinaryTree, inlines=[BinaryTreeAdmin])
site.register(ExtraTerrestrial, inlines=[SightingInline])
site.register(SomeParentModel, inlines=[SomeChildModelInline])
site.register([Question, Inner4Stacked, Inner4Tabular])
|
py | 1a4c8b99367627b48c439ca4f5f91525728bda18 | # This is an automatically generated file.
# DO NOT EDIT or your changes may be overwritten
import base64
from xdrlib import Packer, Unpacker
from ..type_checked import type_checked
from .base import Integer
from .scp_history_entry_v0 import SCPHistoryEntryV0
__all__ = ["SCPHistoryEntry"]
@type_checked
class SCPHistoryEntry:
"""
XDR Source Code::
union SCPHistoryEntry switch (int v)
{
case 0:
SCPHistoryEntryV0 v0;
};
"""
def __init__(
self,
v: int,
v0: SCPHistoryEntryV0 = None,
) -> None:
self.v = v
self.v0 = v0
def pack(self, packer: Packer) -> None:
Integer(self.v).pack(packer)
if self.v == 0:
if self.v0 is None:
raise ValueError("v0 should not be None.")
self.v0.pack(packer)
return
@classmethod
def unpack(cls, unpacker: Unpacker) -> "SCPHistoryEntry":
v = Integer.unpack(unpacker)
if v == 0:
v0 = SCPHistoryEntryV0.unpack(unpacker)
return cls(v=v, v0=v0)
return cls(v=v)
def to_xdr_bytes(self) -> bytes:
packer = Packer()
self.pack(packer)
return packer.get_buffer()
@classmethod
def from_xdr_bytes(cls, xdr: bytes) -> "SCPHistoryEntry":
unpacker = Unpacker(xdr)
return cls.unpack(unpacker)
def to_xdr(self) -> str:
xdr_bytes = self.to_xdr_bytes()
return base64.b64encode(xdr_bytes).decode()
@classmethod
def from_xdr(cls, xdr: str) -> "SCPHistoryEntry":
xdr_bytes = base64.b64decode(xdr.encode())
return cls.from_xdr_bytes(xdr_bytes)
def __eq__(self, other: object):
if not isinstance(other, self.__class__):
return NotImplemented
return self.v == other.v and self.v0 == other.v0
def __str__(self):
out = []
out.append(f"v={self.v}")
out.append(f"v0={self.v0}") if self.v0 is not None else None
return f"<SCPHistoryEntry {[', '.join(out)]}>"
|
py | 1a4c8bb2cb22fd298ceb4a5440e16bffc8bb1bac | # -*- coding: utf-8 -*-
from model.group import Group
import random
import string
import os.path
import jsonpickle
import getopt
import sys
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of groups", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "data/groups.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters+string.digits + string.punctuation + " "*10
return prefix + "".join((random.choice(symbols) for i in range(random.randrange(maxlen))))
testdata = [Group(name="", header="", footer="")] + [
Group(name=random_string("name", 10), header=random_string("header", 20), footer=random_string("footer", 20))
for i in range(n)
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent = 2)
out.write(jsonpickle.encode(testdata)) |
py | 1a4c8c117a73111c693355d2b1874c48815fe88f | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
The SUCCD Ansatz.
"""
from typing import List, Optional, Tuple
import itertools
import logging
from qiskit.circuit import QuantumCircuit
from qiskit_nature import QiskitNatureError
from qiskit_nature.converters.second_quantization import QubitConverter
from .ucc import UCC
from .utils.fermionic_excitation_generator import generate_fermionic_excitations
logger = logging.getLogger(__name__)
# TODO: figure out how to implement `succ_full`: a variant of this class, which does include also
# the symmetrically mirrored double excitations, but assigns the same circuit parameter to them.
class SUCCD(UCC):
"""The SUCCD Ansatz.
The SUCCD Ansatz (by default) only contains double excitations. Furthermore, it only considers
the set of excitations which is symmetrically invariant with respect to spin-flips of both
particles. For more information see also [1].
Note, that this Ansatz can only work for singlet-spin systems. Therefore, the number of alpha
and beta electrons must be equal.
This is a convenience subclass of the UCC Ansatz. For more information refer to :class:`UCC`.
References:
[1] https://arxiv.org/abs/1911.10864
"""
def __init__(
self,
qubit_converter: Optional[QubitConverter] = None,
num_particles: Optional[Tuple[int, int]] = None,
num_spin_orbitals: Optional[int] = None,
reps: int = 1,
initial_state: Optional[QuantumCircuit] = None,
include_singles: Tuple[bool, bool] = (False, False),
):
"""
Args:
qubit_converter: the QubitConverter instance which takes care of mapping a
:class:`~.SecondQuantizedOp` to a :class:`PauliSumOp` as well as performing all
configured symmetry reductions on it.
num_particles: the tuple of the number of alpha- and beta-spin particles.
num_spin_orbitals: the number of spin orbitals.
reps: The number of times to repeat the evolved operators.
initial_state: A `QuantumCircuit` object to prepend to the circuit.
include_singles: enables the inclusion of single excitations per spin species.
Raises:
QiskitNatureError: if the number of alpha and beta electrons is not equal.
"""
self._validate_num_particles(num_particles)
self._include_singles = include_singles
super().__init__(
qubit_converter=qubit_converter,
num_particles=num_particles,
num_spin_orbitals=num_spin_orbitals,
excitations=self.generate_excitations,
alpha_spin=True,
beta_spin=True,
max_spin_excitation=None,
reps=reps,
initial_state=initial_state,
)
@property
def include_singles(self) -> Tuple[bool, bool]:
"""Whether to include single excitations."""
return self._include_singles
@include_singles.setter
def include_singles(self, include_singles: Tuple[bool, bool]) -> None:
"""Sets whether to include single excitations."""
self._include_singles = include_singles
def generate_excitations(
self, num_spin_orbitals: int, num_particles: Tuple[int, int]
) -> List[Tuple[Tuple[int, ...], Tuple[int, ...]]]:
"""Generates the excitations for the SUCCD Ansatz.
Args:
num_spin_orbitals: the number of spin orbitals.
num_particles: the number of alpha and beta electrons. Note, these must be identical for
this class.
Raises:
QiskitNatureError: if the number of alpha and beta electrons is not equal.
Returns:
The list of excitations encoded as tuples of tuples. Each tuple in the list is a pair of
tuples. The first tuple contains the occupied spin orbital indices whereas the second
one contains the indices of the unoccupied spin orbitals.
"""
self._validate_num_particles(num_particles)
excitations: List[Tuple[Tuple[int, ...], Tuple[int, ...]]] = []
excitations.extend(
generate_fermionic_excitations(
1,
num_spin_orbitals,
num_particles,
alpha_spin=self.include_singles[0],
beta_spin=self.include_singles[1],
)
)
num_electrons = num_particles[0]
beta_index_shift = num_spin_orbitals // 2
# generate alpha-spin orbital indices for occupied and unoccupied ones
alpha_occ = list(range(num_electrons))
alpha_unocc = list(range(num_electrons, beta_index_shift))
# the Cartesian product of these lists gives all possible single alpha-spin excitations
alpha_excitations = list(itertools.product(alpha_occ, alpha_unocc))
logger.debug("Generated list of single alpha excitations: %s", alpha_excitations)
# Find all possible double excitations constructed from the list of single excitations.
# Note, that we use `combinations_with_replacement` here, in order to also get those double
# excitations which excite from the same occupied level twice. We will need those in the
# following post-processing step.
pool = itertools.combinations_with_replacement(alpha_excitations, 2)
for exc in pool:
# find the two excitations (Note: SUCCD only works for double excitations!)
alpha_exc, second_exc = exc[0], exc[1]
# shift the second excitation into the beta-spin orbital index range
beta_exc = (
second_exc[0] + beta_index_shift,
second_exc[1] + beta_index_shift,
)
# add the excitation tuple
occ: Tuple[int, ...]
unocc: Tuple[int, ...]
occ, unocc = zip(alpha_exc, beta_exc)
exc_tuple = (occ, unocc)
excitations.append(exc_tuple)
logger.debug("Added the excitation: %s", exc_tuple)
return excitations
def _validate_num_particles(self, num_particles):
try:
assert num_particles[0] == num_particles[1]
except AssertionError as exc:
raise QiskitNatureError(
"The SUCCD Ansatz only works for singlet-spin systems. However, you specified "
"differing numbers of alpha and beta electrons:",
str(num_particles),
) from exc
|
py | 1a4c8c35f39868bbfea9ec17866547cedf774569 | from openmoc import *
import openmoc.log as log
import openmoc.plotter as plotter
import openmoc.materialize as materialize
from openmoc.options import Options
###############################################################################
####################### Main Simulation Parameters ########################
###############################################################################
options = Options()
num_threads = options.getNumThreads()
track_spacing = options.getTrackSpacing()
num_azim = options.getNumAzimAngles()
tolerance = options.getTolerance()
max_iters = options.getMaxIterations()
log.set_log_level('NORMAL')
log.py_printf('TITLE', 'Simulating the LRA Benchmark Problem...')
###############################################################################
########################### Creating Materials ############################
###############################################################################
log.py_printf('NORMAL', 'Importing materials data from py...')
materials = materialize.materialize('LRA-materials.py')
region1 = materials['region_1'].getId()
region2 = materials['region_2'].getId()
region3 = materials['region_3'].getId()
region4 = materials['region_4'].getId()
region5 = materials['region_5'].getId()
region6 = materials['region_6'].getId()
###############################################################################
########################### Creating Surfaces #############################
###############################################################################
log.py_printf('NORMAL', 'Creating surfaces...')
planes = []
planes.append(XPlane(x=-82.5))
planes.append(XPlane(x=82.5))
planes.append(YPlane(y=-82.5))
planes.append(YPlane(y=82.5))
planes[0].setBoundaryType(REFLECTIVE)
planes[1].setBoundaryType(VACUUM)
planes[2].setBoundaryType(REFLECTIVE)
planes[3].setBoundaryType(VACUUM)
###############################################################################
############################# Creating Cells ##############################
###############################################################################
log.py_printf('NORMAL', 'Creating cells...')
cells = []
cells.append(CellBasic(universe=1, material=region1))
cells.append(CellBasic(universe=2, material=region2))
cells.append(CellBasic(universe=3, material=region3))
cells.append(CellBasic(universe=4, material=region4))
cells.append(CellBasic(universe=5, material=region5))
cells.append(CellBasic(universe=6, material=region6))
cells.append(CellFill(universe=21, universe_fill=31))
cells.append(CellFill(universe=22, universe_fill=32))
cells.append(CellFill(universe=23, universe_fill=33))
cells.append(CellFill(universe=24, universe_fill=34))
cells.append(CellFill(universe=25, universe_fill=35))
cells.append(CellFill(universe=26, universe_fill=36))
cells.append(CellFill(universe=0, universe_fill=7))
cells[12].addSurface(halfspace=+1, surface=planes[0])
cells[12].addSurface(halfspace=-1, surface=planes[1])
cells[12].addSurface(halfspace=+1, surface=planes[2])
cells[12].addSurface(halfspace=-1, surface=planes[3])
###############################################################################
########################### Creating Lattices #############################
###############################################################################
log.py_printf('NORMAL', 'Creating LRA lattice...')
assembly1 = Lattice(id=31, width_x=1.5, width_y=1.5)
assembly1.setLatticeCells([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
assembly2 = Lattice(id=32, width_x=1.5, width_y=1.5)
assembly2.setLatticeCells([[2, 2, 2, 2, 2, 2, 2, 2, 2, 2],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2]])
assembly3 = Lattice(id=33, width_x=1.5, width_y=1.5)
assembly3.setLatticeCells([[3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[3, 3, 3, 3, 3, 3, 3, 3, 3, 3]])
assembly4 = Lattice(id=34, width_x=1.5, width_y=1.5)
assembly4.setLatticeCells([[4, 4, 4, 4, 4, 4, 4, 4, 4, 4],
[4, 4, 4, 4, 4, 4, 4, 4, 4, 4],
[4, 4, 4, 4, 4, 4, 4, 4, 4, 4],
[4, 4, 4, 4, 4, 4, 4, 4, 4, 4],
[4, 4, 4, 4, 4, 4, 4, 4, 4, 4],
[4, 4, 4, 4, 4, 4, 4, 4, 4, 4],
[4, 4, 4, 4, 4, 4, 4, 4, 4, 4],
[4, 4, 4, 4, 4, 4, 4, 4, 4, 4],
[4, 4, 4, 4, 4, 4, 4, 4, 4, 4],
[4, 4, 4, 4, 4, 4, 4, 4, 4, 4]])
assembly5 = Lattice(id=35, width_x=1.5, width_y=1.5)
assembly5.setLatticeCells([[5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[5, 5, 5, 5, 5, 5, 5, 5, 5, 5]])
assembly6 = Lattice(id=36, width_x=1.5, width_y=1.5)
assembly6.setLatticeCells([[6, 6, 6, 6, 6, 6, 6, 6, 6, 6],
[6, 6, 6, 6, 6, 6, 6, 6, 6, 6],
[6, 6, 6, 6, 6, 6, 6, 6, 6, 6],
[6, 6, 6, 6, 6, 6, 6, 6, 6, 6],
[6, 6, 6, 6, 6, 6, 6, 6, 6, 6],
[6, 6, 6, 6, 6, 6, 6, 6, 6, 6],
[6, 6, 6, 6, 6, 6, 6, 6, 6, 6],
[6, 6, 6, 6, 6, 6, 6, 6, 6, 6],
[6, 6, 6, 6, 6, 6, 6, 6, 6, 6],
[6, 6, 6, 6, 6, 6, 6, 6, 6, 6]])
core = Lattice(id=7, width_x=15.0, width_y=15.0)
core.setLatticeCells([[26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26],
[26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26],
[23, 23, 23, 23, 23, 23, 23, 26, 26, 26, 26],
[23, 23, 23, 23, 23, 23, 23, 24, 26, 26, 26],
[22, 21, 21, 21, 21, 22, 22, 25, 25, 26, 26],
[22, 21, 21, 21, 21, 22, 22, 25, 25, 26, 26],
[21, 21, 21, 21, 21, 21, 21, 23, 23, 26, 26],
[21, 21, 21, 21, 21, 21, 21, 23, 23, 26, 26],
[21, 21, 21, 21, 21, 21, 21, 23, 23, 26, 26],
[21, 21, 21, 21, 21, 21, 21, 23, 23, 26, 26],
[22, 21, 21, 21, 21, 22, 22, 23, 23, 26, 26]])
###############################################################################
########################## Creating Cmfd mesh ##########################
###############################################################################
log.py_printf('NORMAL', 'Creating Cmfd mesh...')
cmfd = Cmfd()
cmfd.setLatticeStructure(110,110)
###############################################################################
########################## Creating the Geometry ##########################
###############################################################################
log.py_printf('NORMAL', 'Creating geometry...')
geometry = Geometry()
geometry.setCmfd(cmfd)
for material in materials.values(): geometry.addMaterial(material)
for cell in cells: geometry.addCell(cell)
geometry.addLattice(assembly1)
geometry.addLattice(assembly2)
geometry.addLattice(assembly3)
geometry.addLattice(assembly4)
geometry.addLattice(assembly5)
geometry.addLattice(assembly6)
geometry.addLattice(core)
geometry.initializeFlatSourceRegions()
###############################################################################
######################## Creating the TrackGenerator ######################
###############################################################################
log.py_printf('NORMAL', 'Initializing the track generator...')
track_generator = TrackGenerator(geometry, num_azim, track_spacing)
track_generator.setNumThreads(num_threads)
track_generator.generateTracks()
###############################################################################
########################### Running a Simulation ##########################
###############################################################################
solver = CPUSolver(geometry, track_generator)
solver.setSourceConvergenceThreshold(tolerance)
solver.setNumThreads(num_threads)
solver.convergeSource(max_iters)
solver.printTimerReport()
###############################################################################
############################ Generating Plots #############################
###############################################################################
log.py_printf('NORMAL', 'Plotting data...')
#plotter.plot_tracks(track_generator)
#plotter.plot_materials(geometry, gridsize=500)
#plotter.plot_cells(geometry, gridsize=500)
#plotter.plot_flat_source_regions(geometry, gridsize=500)
#plotter.plot_fluxes(geometry, solver, energy_groups=[1,2,3,4,5,6,7])
#plotter.plot_mesh_fluxes(mesh, energy_groups=[1,2,3,4,5,6,7])
#plotter.plot_cmfd_cells(geometry, cmfd, gridsize=500)
log.py_printf('TITLE', 'Finished')
|
py | 1a4c8cc634c9897ed53fc13e4a5ee8e3011c0797 | # -*- coding: utf-8 -*-
# This scaffolding model makes your app work on Google App Engine too
# File is released under public domain and you can use without limitations
if request.global_settings.web2py_version < "2.14.1":
raise HTTP(500, "Requires web2py 2.13.3 or newer")
# if SSL/HTTPS is properly configured and you want all HTTP requests to
# be redirected to HTTPS, uncomment the line below:
# request.requires_https()
# app configuration made easy. Look inside private/appconfig.ini
from gluon.contrib.appconfig import AppConfig
# once in production, remove reload=True to gain full speed
myconf = AppConfig(reload=True)
if not request.env.web2py_runtime_gae:
# if NOT running on Google App Engine use SQLite or other DB
db = DAL(myconf.get('db.uri'),
pool_size=myconf.get('db.pool_size'),
migrate_enabled=myconf.get('db.migrate'),
check_reserved=['all'])
# I like to keep the session in the db.
session.connect(request, response, db=db)
else:
# connect to Google BigTable (optional 'google:datastore://namespace')
db = DAL('google:datastore+ndb')
# store sessions and tickets there
session.connect(request, response, db=db)
#
# or store session in Memcache, Redis, etc.
# from gluon.contrib.memdb import MEMDB
# from google.appengine.api.memcache import Client
# session.connect(request, response, db = MEMDB(Client()))
# by default give a view/generic.extension to all actions from localhost
# none otherwise. a pattern can be 'controller/function.extension'
response.generic_patterns = ['*'] if request.is_local else []
# choose a style for forms
response.formstyle = myconf.get('forms.formstyle') # or 'bootstrap3_stacked' or 'bootstrap2' or other
response.form_label_separator = myconf.get('forms.separator') or ''
# (optional) optimize handling of static files
# response.optimize_css = 'concat,minify,inline'
# response.optimize_js = 'concat,minify,inline'
# (optional) static assets folder versioning
# response.static_version = '0.0.0'
# Here is sample code if you need for
# - email capabilities
# - authentication (registration, login, logout, ... )
# - authorization (role based authorization)
# - services (xml, csv, json, xmlrpc, jsonrpc, amf, rss)
# - old style crud actions
# (more options discussed in gluon/tools.py)
from gluon.tools import Auth, Service, PluginManager
# host names must be a list of allowed host names (glob syntax allowed)
auth = Auth(db, host_names=myconf.get('host.names'))
service = Service()
plugins = PluginManager()
# create all tables needed by auth if not custom tables
auth.define_tables(username=False, signature=False)
# configure email
mail = auth.settings.mailer
mail.settings.server = 'logging' if request.is_local else myconf.get('smtp.server')
mail.settings.sender = myconf.get('smtp.sender')
mail.settings.login = myconf.get('smtp.login')
mail.settings.tls = myconf.get('smtp.tls') or False
mail.settings.ssl = myconf.get('smtp.ssl') or False
# configure auth policy
auth.settings.registration_requires_verification = False
auth.settings.registration_requires_approval = False
auth.settings.reset_password_requires_verification = True
# More API examples for controllers:
#
# >>> db.mytable.insert(myfield='value')
# >>> rows = db(db.mytable.myfield == 'value').select(db.mytable.ALL)
# >>> for row in rows: print row.id, row.myfield
######################
# Logging
import logging, sys
FORMAT = "%(asctime)s %(levelname)s %(process)s %(thread)s %(funcName)s():%(lineno)d %(message)s"
logging.basicConfig(stream=sys.stderr)
logger = logging.getLogger(request.application)
logger.setLevel(logging.INFO)
# Let's log the request.
logger.info("====> Request: %r %r" % (request.env.request_method, request.env.path_info))
|
py | 1a4c8d4865b27b7c58e69c4d3910a25be4d6e917 | from nturgbd_rnn import *
|
py | 1a4c8e2b76d04267f1956a50c810b9566c05aca1 | """
Replace compatibility imports for django.core.exceptions.EmptyResultSet:
https://docs.djangoproject.com/en/3.1/releases/3.1/#id1
"""
import ast
from functools import partial
from typing import Iterable, Tuple
from tokenize_rt import Offset
from django_upgrade.ast import ast_start_offset, is_rewritable_import_from
from django_upgrade.data import Fixer, State, TokenFunc
from django_upgrade.tokens import update_import_modules
fixer = Fixer(
__name__,
min_version=(1, 9),
)
REWRITES = {
"django.forms.forms": {
"pretty_name": "django.forms.utils",
"BoundField": "django.forms.boundfield",
},
}
@fixer.register(ast.ImportFrom)
def visit_ImportFrom(
state: State,
node: ast.ImportFrom,
parent: ast.AST,
) -> Iterable[Tuple[Offset, TokenFunc]]:
if (
node.module in REWRITES
and is_rewritable_import_from(node)
and any(alias.name in REWRITES[node.module] for alias in node.names)
):
yield ast_start_offset(node), partial(
update_import_modules, node=node, module_rewrites=REWRITES[node.module]
)
|
py | 1a4c900c0a3d188ff7953d5a3ff3908896856844 | for i in range(2**9):
binaryString = "{0:b}".format(i).rjust(9,'0')
negbours = 0
for i,n in enumerate(binaryString):
if (i != 4) and (n == '1'):
negbours += 1
if (binaryString[4] == '1'):
#alive
if (negbours == 2) or (negbours == 3):
binaryString += ' 1'
else:
binaryString += ' 0'
else:
#dead
if negbours == 3 :
binaryString += ' 1'
else:
binaryString += ' 0'
print(binaryString)
|
py | 1a4c90c2b0d2e9a1f61a4a1f2733859357242d88 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This package contains a scaffold of a handler."""
from typing import Optional, cast
from aea.protocols.base import Message
from aea.skills.base import Handler
from packages.fetchai.protocols.register.message import RegisterMessage
from packages.fetchai.protocols.signing.message import SigningMessage
from packages.fetchai.skills.registration_aw1.dialogues import (
RegisterDialogue,
RegisterDialogues,
SigningDialogue,
SigningDialogues,
)
from packages.fetchai.skills.registration_aw1.strategy import Strategy
class AW1RegistrationHandler(Handler):
"""This class handles register messages."""
SUPPORTED_PROTOCOL = RegisterMessage.protocol_id
def setup(self) -> None:
"""
Implement the setup.
:return: None
"""
def handle(self, message: Message) -> None:
"""
Implement the reaction to an envelope.
:param message: the message
:return: None
"""
register_msg = cast(RegisterMessage, message)
# recover dialogue
register_dialogues = cast(RegisterDialogues, self.context.register_dialogues)
register_dialogue = cast(
Optional[RegisterDialogue], register_dialogues.update(register_msg)
)
if register_dialogue is None:
self._handle_unidentified_dialogue(register_msg)
return
# handle message
if register_msg.performative is RegisterMessage.Performative.SUCCESS:
self._handle_success(register_msg, register_dialogue)
elif register_msg.performative is RegisterMessage.Performative.ERROR:
self._handle_error(register_msg, register_dialogue)
else:
self._handle_invalid(register_msg, register_dialogue)
def teardown(self) -> None:
"""
Implement the handler teardown.
:return: None
"""
def _handle_unidentified_dialogue(self, register_msg: RegisterMessage) -> None:
"""
Handle an unidentified dialogue.
:param msg: the message
"""
self.context.logger.info(
f"received invalid register_msg message={register_msg}, unidentified dialogue."
)
def _handle_success(
self, register_msg: RegisterMessage, register_dialogue: RegisterDialogue
) -> None:
"""
Handle an register message.
:param register_msg: the register message
:param register_dialogue: the dialogue
:return: None
"""
self.context.logger.debug(
f"received register_msg success message={register_msg} in dialogue={register_dialogue}."
)
self.context.logger.info(
f"received register message success, info={register_msg.info}. Stop me now!"
)
strategy = cast(Strategy, self.context.strategy)
strategy.is_registered = True
strategy.is_registration_pending = False
strategy.is_ready_to_register = False
def _handle_error(
self, register_msg: RegisterMessage, register_dialogue: RegisterDialogue
) -> None:
"""
Handle an register message.
:param register_msg: the register message
:param register_dialogue: the dialogue
:return: None
"""
self.context.logger.debug(
f"received register_msg error message={register_msg} in dialogue={register_dialogue}."
)
self.context.logger.info(
f"received register message error, error_msg={register_msg.error_msg}. Stop me now!"
)
strategy = cast(Strategy, self.context.strategy)
strategy.is_registration_pending = False
strategy.is_ready_to_register = False
def _handle_invalid(
self, register_msg: RegisterMessage, register_dialogue: RegisterDialogue
) -> None:
"""
Handle an register message.
:param register_msg: the register message
:param register_dialogue: the dialogue
:return: None
"""
self.context.logger.warning(
f"cannot handle register_msg message of performative={register_msg.performative} in dialogue={register_dialogue}."
)
class SigningHandler(Handler):
"""Implement the transaction handler."""
SUPPORTED_PROTOCOL = SigningMessage.protocol_id
def setup(self) -> None:
"""Implement the setup for the handler."""
def handle(self, message: Message) -> None:
"""
Implement the reaction to a message.
:param message: the message
:return: None
"""
signing_msg = cast(SigningMessage, message)
# recover dialogue
signing_dialogues = cast(SigningDialogues, self.context.signing_dialogues)
signing_dialogue = cast(
Optional[SigningDialogue], signing_dialogues.update(signing_msg)
)
if signing_dialogue is None:
self._handle_unidentified_dialogue(signing_msg)
return
# handle message
if signing_msg.performative is SigningMessage.Performative.SIGNED_MESSAGE:
self._handle_signed_message(signing_msg, signing_dialogue)
elif signing_msg.performative is SigningMessage.Performative.ERROR:
self._handle_error(signing_msg, signing_dialogue)
else:
self._handle_invalid(signing_msg, signing_dialogue)
def teardown(self) -> None:
"""
Implement the handler teardown.
:return: None
"""
def _handle_unidentified_dialogue(self, signing_msg: SigningMessage) -> None:
"""
Handle an unidentified dialogue.
:param msg: the message
"""
self.context.logger.info(
f"received invalid signing message={signing_msg}, unidentified dialogue."
)
def _handle_signed_message(
self, signing_msg: SigningMessage, signing_dialogue: SigningDialogue
) -> None:
"""
Handle a signed message.
:param signing_msg: the signing message
:param signing_dialogue: the dialogue
:return: None
"""
self.context.logger.debug(
f"received signing message from decision maker, message={signing_msg} in dialogue={signing_dialogue}"
)
self.context.logger.info(
f"received signing message from decision maker, signature={signing_msg.signed_message.body} stored!"
)
strategy = cast(Strategy, self.context.strategy)
strategy.signature_of_ethereum_address = signing_msg.signed_message.body
strategy.is_ready_to_register = True
def _handle_error(
self, signing_msg: SigningMessage, signing_dialogue: SigningDialogue
) -> None:
"""
Handle an oef search message.
:param signing_msg: the signing message
:param signing_dialogue: the dialogue
:return: None
"""
self.context.logger.info(
f"transaction signing was not successful. Error_code={signing_msg.error_code} in dialogue={signing_dialogue}"
)
def _handle_invalid(
self, signing_msg: SigningMessage, signing_dialogue: SigningDialogue
) -> None:
"""
Handle an oef search message.
:param signing_msg: the signing message
:param signing_dialogue: the dialogue
:return: None
"""
self.context.logger.warning(
f"cannot handle signing message of performative={signing_msg.performative} in dialogue={signing_dialogue}."
)
|
py | 1a4c91acc1f6410e2402bdd46dcee2432c539473 | from rest_framework.views import APIView
from mysystem.models import Users
from apps.oauth.models import OAuthWXUser
from utils.jsonResponse import SuccessResponse,ErrorResponse
from rest_framework_simplejwt.serializers import TokenObtainPairSerializer
from rest_framework_simplejwt.views import TokenObtainPairView
from utils.common import get_parameter_dic,REGEX_MOBILE
from config import WX_XCX_APPID,WX_XCX_APPSECRET,WX_GZH_APPID,WX_GZH_APPSECRET,WX_GZPT_APPSECRET,WX_GZPT_APPID
import requests
import base64
import json
from Crypto.Cipher import AES
from django.utils.translation import gettext_lazy as _
from rest_framework_simplejwt.authentication import JWTAuthentication
from rest_framework.permissions import IsAuthenticated
import re
import uuid
from django.db import transaction
from django.db.models import F
from django.core.cache import cache
import logging
from django_redis import get_redis_connection
logger = logging.getLogger(__name__)
# Create your views here.
# ================================================= #
# ************** 微信小程序登录 view ************** #
# ================================================= #
class XCXLoginSerializer(TokenObtainPairSerializer):
"""
登录的序列化器:
重写djangorestframework-simplejwt的序列化器
"""
@classmethod
def get_token(cls, user):
refresh = super(XCXLoginSerializer,cls).get_token(user)
data = {}
data['openid'] = user.oauthwxuser.xcx_openid
data['userId'] = user.id
data['refresh'] = str(refresh)
data['access'] = str(refresh.access_token)
return data
'''
WeChat Crypt
'''
class WeChatCrypt:
def __init__(self, appId, sessionKey):
self.appId = appId
self.sessionKey = sessionKey
def decrypt(self, encryptedData, iv):
# base64 decode
sessionKey = base64.b64decode(self.sessionKey)
encryptedData = base64.b64decode(encryptedData)
iv = base64.b64decode(iv)
cipher = AES.new(sessionKey, AES.MODE_CBC, iv)
decrypted = json.loads(self._unpad(cipher.decrypt(encryptedData)))
if decrypted['watermark']['appid'] != self.appId:
raise Exception('Invalid Buffer')
return decrypted
def _unpad(self, s):
return s[:-ord(s[len(s)-1:])]
#获取微信用户的openid等用户信息
def get_wechat_login_code_url(jscode):
api_url = 'https://api.weixin.qq.com/sns/jscode2session?appid={0}&secret={1}&js_code={2}&grant_type=authorization_code'
get_url = api_url.format(WX_XCX_APPID,WX_XCX_APPSECRET,jscode)
r = requests.get(get_url)
return r
#微信小程序登录接口
class WeChatXCXLoginAPIView(APIView):
"""
post:
微信小程序登录接口
微信小程序code获取openid
"""
permission_classes = []
authentication_classes = []
@transaction.atomic # 开启事务,当方法执行完成以后,自动提交事务
def post(self, request):
jscode = get_parameter_dic(request)['code']
inviter = get_parameter_dic(request).get('inviter')#为推广者的userid
if not jscode:
return ErrorResponse(msg="code不能为空")
resp = get_wechat_login_code_url(jscode)
openid = None
session_key = None
unionid = None
if resp.status_code != 200:
return ErrorResponse(msg="服务器到微信网络连接失败,请重试")
# json_data = {'errcode':0,'openid':'111','session_key':'test'}
json_data =json.loads(resp.content)
if 'errcode' in json_data:#如果获取失败返回失败信息
return ErrorResponse(msg=json_data['errmsg'])
openid = json_data['openid']
session_key = json_data['session_key']
if "unionid" in json_data:
unionid = json_data['unionid']
# 判断用户是否存在
try:
wxuser = Users.objects.get(username=openid)
wxuser.oauthwxuser.session_key = session_key # 小写oauthwxuser 表示关联的外键
wxuser.oauthwxuser.xcx_openid = openid
wxuser.oauthwxuser.unionId = unionid
wxuser.oauthwxuser.save()
resdata = XCXLoginSerializer.get_token(wxuser)
return SuccessResponse(data=resdata, msg="success")
except Exception as e:
with transaction.atomic():
savepoint = transaction.savepoint()
user = Users()
user.username = openid
user.password = uuid.uuid4() # 先随机生成一个密码,防止别人获取openid直接被登录情况
user.identity = [0] # 用户身份0表示普通用户
user.save()
OAuthWXUser.objects.create(user=user,session_key=session_key,xcx_openid=openid,unionid=unionid)
# if inviter: # 如果存在邀请码
# integral = FenXiaoManage.objects.filter(type=1, status=True).values_list('content', flat=True).first()
# if integral: # 如果推广积分活动还存在
# Users.objects.filter(id=inviter).update(integral=F('integral') + int(integral))
# InviteRecord.objects.create(inv_user_id=inviter, invitee_user=user, get_integral=integral)
# IntegralRecord.objects.create(user_id=inviter,type=4,income=1,integral=integral)
# 清除保存点
transaction.savepoint_commit(savepoint)
resdata = XCXLoginSerializer.get_token(user)
return SuccessResponse(data=resdata, msg="success")
def filter_emoji(desstr, restr=''):
# 过滤表情
try:
res = re.compile(u'[\U00010000-\U0010ffff]')
except re.error:
res = re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]')
return res.sub(restr, desstr)
#微信小程序手机号授权登录接口
class WeChatXCXMobileLoginAPIView(APIView):
"""
post:
微信小程序手机号授权登录接口
微信小程序code获取openid,并解密前端传的手机号encryptedData加密数据
"""
permission_classes = []
authentication_classes = []
def post(self, request):
inviter = get_parameter_dic(request).get('inviter')#邀请码#为推广者的userid
jscode = get_parameter_dic(request)['code']
iv = get_parameter_dic(request)['iv']
encryptedData = get_parameter_dic(request)['encryptedData']
nickname = get_parameter_dic(request)['nickname']
avatar_url = get_parameter_dic(request)['avatar_url']
gender = get_parameter_dic(request)['gender']
nickname = filter_emoji(nickname, '')
if jscode is None:
return ErrorResponse(msg="code不能为空")
if iv is None:
return ErrorResponse(msg="iv不能为空")
if encryptedData is None:
return ErrorResponse(msg="encryptedData不能为空")
if avatar_url is None:
return ErrorResponse(msg="avatar_url不能为空")
resp = get_wechat_login_code_url(jscode)
openid = None
session_key = None
unionid = ""
if resp.status_code != 200:
return ErrorResponse(msg="服务器到微信网络连接失败,请重试")
# json_data = {'errcode':0,'openid':'111','session_key':'test'}
json_data =json.loads(resp.content)
if 'errcode' in json_data:#如果获取失败返回失败信息
return ErrorResponse(msg=json_data['errmsg'])
openid = json_data['openid']
session_key = json_data['session_key']
if "unionid" in json_data:
unionid = json_data['unionid']
wxdc = WeChatCrypt(WX_XCX_APPID, session_key)
pResult = wxdc.decrypt(encryptedData, iv)
#判断用户是否存在
try:
wxuser = Users.objects.get(username = openid)
if not wxuser.is_active:
return ErrorResponse(msg="该用户已禁用,请联系管理员")
wxuser.oauthwxuser.session_key = session_key#小写oauthwxuser 表示关联的外键
wxuser.oauthwxuser.xcx_openid = openid
wxuser.oauthwxuser.unionId = unionid
wxuser.oauthwxuser.avatarUrl=avatar_url
wxuser.oauthwxuser.sex = gender
wxuser.oauthwxuser.mobilePhoneNumber = pResult['phoneNumber']
wxuser.oauthwxuser.nick = nickname
wxuser.oauthwxuser.save()
wxuser.nickname = nickname
wxuser.avatar = avatar_url
wxuser.gender = gender
wxuser.save()
resdata = XCXLoginSerializer.get_token(wxuser)
return SuccessResponse(data=resdata,msg="success")
except Exception as e:#新用户
with transaction.atomic():
try:
savepoint = transaction.savepoint()
user = Users()
user.username = openid
user.password = uuid.uuid4() #先随机生成一个密码,防止别人获取openid直接被登录情况
user.identity=[0]#用户身份0表示普通用户
user.nickname = nickname
user.name = nickname
user.avatar = avatar_url
user.mobile = pResult['phoneNumber']
user.save()
OAuthWXUser.objects.create(user=user,session_key=session_key,xcx_openid=openid,avatarUrl=avatar_url,sex=gender,mobilePhoneNumber=pResult['phoneNumber'],nick=nickname)
# if inviter:#如果存在邀请码
# integral = FenXiaoManage.objects.filter(type=1,status=True).values_list('content',flat=True).first()
# if integral:#如果推广积分活动还存在
# Users.objects.filter(id=inviter).update(integral=F('integral')+int(integral))
# InviteRecord.objects.create(inv_user_id=inviter,invitee_user=user,get_integral=integral)
# IntegralRecord.objects.create(user_id=inviter, type=4, income=1, integral=integral)
except Exception as e:
transaction.savepoint_rollback(savepoint)
return ErrorResponse(msg=str(e))
# 清除保存点
transaction.savepoint_commit(savepoint)
resdata = XCXLoginSerializer.get_token(user)
return SuccessResponse(data=resdata, msg="success")
#微信小程序更新(获取)用户信息wx.getUserInfo,用户解密获取的用户信息
class XCXWeChatUserInfoUpdateAPIView(APIView):
"""
post:
微信小程序更新用户信息
"""
permission_classes = [IsAuthenticated]
authentication_classes = [JWTAuthentication]
def post(self, request):
encryptedData = get_parameter_dic(request)['encryptedData']
iv = get_parameter_dic(request)['iv']
if not encryptedData:
return ErrorResponse(msg="encryptedData不能为空")
if not iv:
return ErrorResponse(msg="iv不能为空")
wechat_user = OAuthWXUser.objects.filter(user=request.user).first()
if not wechat_user:
return ErrorResponse(msg="无此用户")
pc = WeChatCrypt(WX_XCX_APPID, wechat_user.session_key)
user = pc.decrypt(encryptedData, iv)
wechat_user.nick = user['nickName']
wechat_user.sex = user['gender']
wechat_user.city = user['city']
wechat_user.avatarUrl = user['avatarUrl']
wechat_user.save()
myuser = request.user
myuser.nickname = user['nickName']
myuser.avatar = user['avatarUrl']
return SuccessResponse(data=user,msg="success")
# ================================================= #
# ************** 微信小程序生成推广小程序码view ************** #
# ================================================= #
#获取小程序的access_token
"""
正常返回,access_token 的有效期目前为 2 个小时,重复获取将导致上次获取的 access_token 失效
{"access_token":"ACCESS_TOKEN","expires_in":7200}
错误返回
{"errcode":40013,"errmsg":"invalid appid"}
"""
def get_wechat_xcx_access_token_url():
api_url = "https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid={0}&secret={1}"
get_url = api_url.format(WX_XCX_APPID,WX_XCX_APPSECRET)
r = requests.get(get_url)
return r
#这个url生成二维码是无限个,返回的二维码是buffer类型(正式版小程序才能生成,体验版不行)
"""
正常返回
{
"errcode": 0,
"errmsg": "ok",
"contentType": "image/jpeg",
"buffer": Buffer
}
"""
def get_wechat_qrcode_url(access_token,scene,page,width=430,auto_color=True,is_hyaline=False):
if not page:
page = "pages/index/index"
api_url = 'https://api.weixin.qq.com/wxa/getwxacodeunlimit?access_token={0}'
get_url = api_url.format(access_token)
headers = {"Content-type":"application/json"}
data = dict(scene=scene,page=page,width=width,auto_color=auto_color,is_hyaline=is_hyaline)
r = requests.post(url=get_url,data=json.dumps(data),headers=headers)
return r
class GetXCXShareQrcodeView(APIView):
"""
post:
微信小程序获取推广二维码
scene 分享用户的userid
page 要跳转的页面
"""
permission_classes = [IsAuthenticated]
authentication_classes = [JWTAuthentication]
def post(self,request):
scene = get_parameter_dic(request)['scene']#分享用户的userid
page = get_parameter_dic(request)['page']
if scene is None or page is None:
return ErrorResponse("提交参数不能为空")
restoken = get_wechat_xcx_access_token_url()
if restoken.status_code != 200:
return ErrorResponse(msg="服务器到微信网络连接失败,请重试1")
json_data = json.loads(restoken.content)
if 'errcode' in json_data and json_data['errcode'] !=0: # 如果获取失败返回失败信息
return ErrorResponse(msg=json_data['errmsg'])
access_token = json_data['access_token']
res = get_wechat_qrcode_url(access_token,scene,page)
if res.status_code != 200:
return ErrorResponse(msg="服务器到微信网络连接失败,请重试2")
json_data2 = json.loads(res.content)
return SuccessResponse(data=json_data2,msg="success")
# ================================================= #
# ************** 微信小程序发送服务通知消息 view ************** #
# ================================================= #
"""
1、form_id提交表单的id(支付时用)
2、data 提交的请求体
push_data={
"keyword1":{
"value":obj.order_sn
},
"keyword2":{
"value":obj.time
},
}
"""
def send_wx_xcx_message(access_token,openid,template_id,form_id,push_data):
api_url = "https://api.weixin.qq.com/cgi-bin/message/subscribe/send?access_token={0}"
get_url = api_url.format(access_token)
payload={
"touser": openid, #这里为用户的openid
"template_id": template_id, #模板id
"form_id": form_id, #表单id或者prepay_id
"data": push_data
}
r = requests.post(get_url,json=payload)
return r
def send_wx_xcx_message_cache(openid,template_id,form_id,push_data):
access_token = cache.get('xcx_access_token')
if access_token:#有缓存
res = send_wx_xcx_message(access_token,openid,template_id,form_id,push_data)
json_data = json.loads(res.content)
if 'errcode' in json_data: # 如果获取失败返回失败信息
if json_data['errcode'] == 40001:
restoken = get_wechat_xcx_access_token_url()
json_data1 = json.loads(restoken.content)
access_token1 = json_data1['access_token']
res1 = send_wx_xcx_message(access_token1, openid, template_id, form_id, push_data)
json_data2 = json.loads(res1.content)
if 'errcode' in json_data2 and json_data2['errcode'] !=0:
logger.error("微信小程序发送消息服务错误,用户openid:%s,template_id:%s,form_id:%s,data:%s,微信返回错误信息:%s"%(openid,template_id,form_id,push_data,json_data2))
return False
cache.set('xcx_access_token', access_token,7000)
return True
else:#无缓存
restoken = get_wechat_xcx_access_token_url()
json_data1 = json.loads(restoken.content)
access_token1 = json_data1['access_token']
res1 = send_wx_xcx_message(access_token1, openid, template_id, form_id, push_data)
json_data2 = json.loads(res1.content)
if 'errcode' in json_data2 and json_data2['errcode'] !=0:
logger.error("微信小程序发送消息服务错误,用户openid:%s,template_id:%s,form_id:%s,data:%s,微信返回错误信息:%s" % (
openid, template_id, form_id, push_data, json_data2))
return False
cache.set('xcx_access_token', access_token,7000)
return True
# ================================================= #
# ************** 微信公众号app授权登录 view ************** #
# ================================================= #
#通过 code 换取 access_token 和 openid,code为前端获取后传过来得
"""
正确返回
{
"access_token": "ACCESS_TOKEN", 有效期2小时
"expires_in": 7200,
"refresh_token": "REFRESH_TOKEN",有效期30天
"openid": "OPENID",
"scope": "SCOPE",
"unionid": "o6_bmasdasdsad6_2sgVt7hMZOPfL"
}
错误返回
{
"errcode": 40029,
"errmsg": "invalid code"
}
"""
def get_wechat_access_token_url(code):
api_url = "https://api.weixin.qq.com/sns/oauth2/access_token?appid={0}&secret={1}&code={2}&grant_type=authorization_code"
get_url = api_url.format(WX_GZPT_APPID,WX_GZPT_APPSECRET,code)
r = requests.get(get_url)
return r
#获取微信用户公开个人信息
"""
正确返回
{
"openid": "OPENID",
"nickname": "NICKNAME",
"sex": 1,
"province": "PROVINCE",
"city": "CITY",
"country": "COUNTRY",
"headimgurl": "https://thirdwx.qlogo.cn/mmopen/g3MonUZtNHkdmzicIlibx6iaFqAc56vxLSUfpb6n5WKSYVY0ChQKkiaJSgQ1dZuTOgvLLrhJbERQQ4eMsv84eavHiaiceqxibJxCfHe/0",
"privilege": ["PRIVILEGE1", "PRIVILEGE2"],
"unionid": " o6_bmasdasdsad6_2sgVt7hMZOPfL"
}
错误返回
{
"errcode": 40003,
"errmsg": "invalid openid"
}
"""
def getWxUserInfo(access_token,openid):
api_url = "https://api.weixin.qq.com/sns/userinfo?access_token={0}&openid={1}"
get_url = api_url.format(access_token,openid)
r = requests.get(get_url)
return r
#检验授权凭证access_token 是否有效
"""
有效返回
{
"errcode": 0,
"errmsg": "ok"
}
"""
def is_access_token_valid(access_token, openid):
api_url = "https://api.weixin.qq.com/sns/auth?access_token={0}&openid={1}"
get_url = api_url.format(access_token, openid)
r = requests.get(get_url)
return r
#通过refresh_token刷新过期的access_token
"""
有效返回
{
"access_token": "ACCESS_TOKEN",
"expires_in": 7200,
"refresh_token": "REFRESH_TOKEN",
"openid": "OPENID",
"scope": "SCOPE"
}
错误返回
{
"errcode": 40030,
"errmsg": "invalid refresh_token"
}
"""
def refresh_access_token(refresh_token):
api_url = "https://api.weixin.qq.com/sns/oauth2/refresh_token?appid={0}&grant_type=refresh_token&refresh_token={1}"
get_url = api_url.format(WX_GZPT_APPID,refresh_token)
r = requests.get(get_url)
return r
#微信公众号app登录接口
class WeChatGZHLoginAPIView(APIView):
"""
post:
微信公众号登录接口
微信公众号code获取openid和access_token
"""
permission_classes = []
authentication_classes = []
def post(self, request):
jscode = get_parameter_dic(request)['code']
if not jscode:
return ErrorResponse(msg="code不能为空")
resp = get_wechat_access_token_url(jscode)
openid = ""
unionid = ""
access_token = ""
refresh_token = ""
scope = None
if resp.status_code != 200:
return ErrorResponse(msg="服务器到微信网络连接失败,请重试")
json_data =json.loads(resp.content)
if 'errcode' in json_data and json_data['errcode'] !=0:#如果获取失败返回失败信息
logger.error("微信app登录服务错误,用户提交code:%s,微信返回错误信息:%s" % (jscode, json_data))
return ErrorResponse(msg=json_data['errmsg'])
openid = json_data['openid']
access_token = json_data['access_token']
refresh_token = json_data['refresh_token']
scope = json_data['scope']
if "unionid" in json_data:
unionid = json_data['unionid']
#判断用户是否存在(根据openID判断用户是否是第一次登陆)
user = Users.objects.filter(is_active=True,oauthwxuser__gzh_openid=openid).first()
if not user:#如果不存在则提示绑定用户关系
return ErrorResponse(code=301,data={'openid':openid,'is_bind':False},msg="无此用户,请先绑定")
#返回token
resdata = XCXLoginSerializer.get_token(user)
return SuccessResponse(data=resdata,msg="success")
class WeChatGZHBindAPIView(APIView):
"""
绑定微信用户
post:
绑定微信用户
微信公众号openid、mobile(绑定手机号)、code(验证码)
"""
permission_classes = []
authentication_classes = []
def post(self,request):
openid = get_parameter_dic(request)['openid']
mobile = get_parameter_dic(request)['mobile']
code = get_parameter_dic(request)['code']
# 验证手机号是否合法
if not re.match(REGEX_MOBILE, mobile):
return ErrorResponse(msg="请输入正确手机号")
# 判断短信验证码是否正确
redis_conn = get_redis_connection('verify_codes')
send_flag = redis_conn.get('sms_%s' % mobile) # send_flag的值为bytes,需要转换成str ,send_flag.decode()
if not send_flag: # 如果取不到标记,则说明验证码过期
return ErrorResponse(msg="短信验证码已过期")
else:
if str(send_flag.decode()) != str(code):
return ErrorResponse(msg="验证码错误")
user = Users.objects.filter(is_active=True,username=mobile+"app",identity__contains="1",oauthwxuser__isnull=True).first()
if not user:#如果不存在
return ErrorResponse(msg="无法绑定,无此用户或已绑定")
OAuthWXUser.objects.create(user=user,gzh_openid=openid)
resdata = XCXLoginSerializer.get_token(user)
return SuccessResponse(data=resdata,msg="success") |
py | 1a4c91b3fd326ef3ecc77dce44da51a43f5af354 | #!/usr/bin/env python
# coding: utf-8
import json
import pandas as pd
from pandas.api.types import is_numeric_dtype
import numpy as np
from scipy.stats import ks_2samp
#import matplotlib.pyplot as plt
import plotly.graph_objs as go
import plotly.figure_factory as ff
from evidently.model.widget import BaseWidgetInfo, AlertStats, AdditionalGraphInfo
from evidently.widgets.widget import Widget
red = "#ed0400"
grey = "#4d4d4d"
class NumTargetCorrWidget(Widget):
def __init__(self, title: str):
super().__init__()
self.title = title
def get_info(self) -> BaseWidgetInfo:
#if self.wi:
return self.wi
#raise ValueError("No prediction data provided")
def calculate(self, reference_data: pd.DataFrame, production_data: pd.DataFrame, column_mapping):
if column_mapping:
date_column = column_mapping.get('datetime')
id_column = column_mapping.get('id')
target_column = column_mapping.get('target')
prediction_column = column_mapping.get('prediction')
num_feature_names = column_mapping.get('numerical_features')
if num_feature_names is None:
num_feature_names = []
else:
num_feature_names = [name for name in num_feature_names if is_numeric_dtype(reference_data[name])]
cat_feature_names = column_mapping.get('categorical_features')
if cat_feature_names is None:
cat_feature_names = []
else:
cat_feature_names = [name for name in cat_feature_names if is_numeric_dtype(reference_data[name])]
else:
date_column = 'datetime' if 'datetime' in reference_data.columns else None
id_column = None
target_column = 'target' if 'target' in reference_data.columns else None
prediction_column = 'prediction' if 'prediction' in reference_data.columns else None
utility_columns = [date_column, id_column, target_column, prediction_column]
num_feature_names = list(set(reference_data.select_dtypes([np.number]).columns) - set(utility_columns))
cat_feature_names = list(set(reference_data.select_dtypes([np.object]).columns) - set(utility_columns))
if target_column is not None:
#calculate corr
ref_target_corr = reference_data[num_feature_names + [target_column]].corr()[target_column]
prod_target_corr = production_data[num_feature_names + [target_column]].corr()[target_column]
#plot output correlations
target_corr = go.Figure()
target_corr.add_trace(go.Bar(y = ref_target_corr, x = ref_target_corr.index,
marker_color = grey, name = 'Reference'))
target_corr.add_trace(go.Bar(y = prod_target_corr, x = ref_target_corr.index,
marker_color = red, name = 'Production'))
target_corr.update_layout(xaxis_title = "Features", yaxis_title = "Correlation",
yaxis = dict(
range=(-1, 1),
showticklabels=True
))
target_corr_json = json.loads(target_corr.to_json())
self.wi = BaseWidgetInfo(
title=self.title,
type="big_graph",
details="",
alertStats=AlertStats(),
alerts=[],
alertsPosition="row",
insights=[],
size=1,
params={
"data": target_corr_json['data'],
"layout": target_corr_json['layout']
},
additionalGraphs=[],
)
else:
self.wi = None
|
py | 1a4c92524140c388d683a18792b9d88d9a3bdbb7 | # -*- coding: utf-8 -*-
# @Time : 20-6-4 下午4:19
# @Author : zhuying
# @Company : Minivision
# @File : transform.py
# @Software : PyCharm
from __future__ import division
import math
import random
from PIL import Image
try:
import accimage
except ImportError:
accimage = None
import numpy as np
import numbers
import types
from anti_spoof.src.data_io import functional as F
__all__ = ["Compose", "ToTensor", "ToPILImage", "Normalize", "RandomHorizontalFlip",
"Lambda", "RandomResizedCrop", "ColorJitter", "RandomRotation"]
class Compose(object):
"""Composes several transforms together.
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
Example:
>>> transforms.Compose([
>>> transforms.CenterCrop(10),
>>> transforms.ToTensor(),
>>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img):
for t in self.transforms:
img = t(img)
return img
class ToTensor(object):
"""Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
Converts a PIL Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].
"""
def __call__(self, pic):
"""
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
return F.to_tensor(pic)
class Lambda(object):
"""Apply a user-defined lambda as a transform.
Args:
lambd (function): Lambda/function to be used for transform.
"""
def __init__(self, lambd):
assert isinstance(lambd, types.LambdaType)
self.lambd = lambd
def __call__(self, img):
return self.lambd(img)
class ToPILImage(object):
"""Convert a tensor or an ndarray to PIL Image.
Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
H x W x C to a PIL Image while preserving the value range.
Args:
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
1. If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
2. If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
3. If the input has 1 channel, the ``mode`` is determined by the data type (i,e,
``int``, ``float``, ``short``).
.. _PIL.Image mode: http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#modes
"""
def __init__(self, mode=None):
self.mode = mode
def __call__(self, pic):
"""
Args:
pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
Returns:
PIL Image: Image converted to PIL Image.
"""
return F.to_pil_image(pic, self.mode)
class Normalize(object):
"""Normalize an tensor image with mean and standard deviation.
Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this transform
will normalize each channel of the input ``torch.*Tensor`` i.e.
``input[channel] = (input[channel] - mean[channel]) / std[channel]``
Args:
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized Tensor image.
"""
return F.normalize(tensor, self.mean, self.std)
class RandomHorizontalFlip(object):
"""Horizontally flip the given PIL Image randomly with a probability of 0.5."""
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be flipped.
Returns:
PIL Image: Randomly flipped image.
"""
if random.random() < 0.5:
return F.hflip(img)
return img
class RandomResizedCrop(object):
"""Crop the given PIL Image to random size and aspect ratio.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: expected output size of each edge
scale: range of size of the origin size cropped
ratio: range of aspect ratio of the origin aspect ratio cropped
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation=Image.BILINEAR):
if isinstance(size, tuple):
self.size = size
else:
self.size = (size, size)
self.interpolation = interpolation
self.scale = scale
self.ratio = ratio
@staticmethod
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
for attempt in range(10):
area = img.size[0] * img.size[1]
target_area = random.uniform(*scale) * area
aspect_ratio = random.uniform(*ratio)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if random.random() < 0.5:
w, h = h, w
if w <= img.size[0] and h <= img.size[1]:
i = random.randint(0, img.size[1] - h)
j = random.randint(0, img.size[0] - w)
return i, j, h, w
# Fallback
w = min(img.size[0], img.size[1])
i = (img.size[1] - w) // 2
j = (img.size[0] - w) // 2
return i, j, w, w
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be flipped.
Returns:
PIL Image: Randomly cropped and resize image.
"""
i, j, h, w = self.get_params(img, self.scale, self.ratio)
return F.resized_crop(img, i, j, h, w, self.size, self.interpolation)
class ColorJitter(object):
"""Randomly change the brightness, contrast and saturation of an image.
Args:
brightness (float): How much to jitter brightness. brightness_factor
is chosen uniformly from [max(0, 1 - brightness), 1 + brightness].
contrast (float): How much to jitter contrast. contrast_factor
is chosen uniformly from [max(0, 1 - contrast), 1 + contrast].
saturation (float): How much to jitter saturation. saturation_factor
is chosen uniformly from [max(0, 1 - saturation), 1 + saturation].
hue(float): How much to jitter hue. hue_factor is chosen uniformly from
[-hue, hue]. Should be >=0 and <= 0.5.
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
@staticmethod
def get_params(brightness, contrast, saturation, hue):
"""Get a randomized transform to be applied on image.
Arguments are same as that of __init__.
Returns:
Transform which randomly adjusts brightness, contrast and
saturation in a random order.
"""
transforms = []
if brightness > 0:
brightness_factor = np.random.uniform(max(0, 1 - brightness), 1 + brightness)
transforms.append(Lambda(lambda img: F.adjust_brightness(img, brightness_factor)))
if contrast > 0:
contrast_factor = np.random.uniform(max(0, 1 - contrast), 1 + contrast)
transforms.append(Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))
if saturation > 0:
saturation_factor = np.random.uniform(max(0, 1 - saturation), 1 + saturation)
transforms.append(Lambda(lambda img: F.adjust_saturation(img, saturation_factor)))
if hue > 0:
hue_factor = np.random.uniform(-hue, hue)
transforms.append(Lambda(lambda img: F.adjust_hue(img, hue_factor)))
np.random.shuffle(transforms)
transform = Compose(transforms)
return transform
def __call__(self, img):
"""
Args:
img (PIL Image): Input image.
Returns:
PIL Image: Color jittered image.
"""
transform = self.get_params(self.brightness, self.contrast,
self.saturation, self.hue)
return transform(img)
class RandomRotation(object):
"""Rotate the image by angle.
Args:
degrees (sequence or float or int): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees).
resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
An optional resampling filter.
See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters
If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
expand (bool, optional): Optional expansion flag.
If true, expands the output to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (2-tuple, optional): Optional center of rotation.
Origin is the upper left corner.
Default is the center of the image.
"""
def __init__(self, degrees, resample=False, expand=False, center=None):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError("If degrees is a single number, it must be positive.")
self.degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError("If degrees is a sequence, it must be of len 2.")
self.degrees = degrees
self.resample = resample
self.expand = expand
self.center = center
@staticmethod
def get_params(degrees):
"""Get parameters for ``rotate`` for a random rotation.
Returns:
sequence: params to be passed to ``rotate`` for random rotation.
"""
angle = np.random.uniform(degrees[0], degrees[1])
return angle
def __call__(self, img):
"""
img (PIL Image): Image to be rotated.
Returns:
PIL Image: Rotated image.
"""
angle = self.get_params(self.degrees)
return F.rotate(img, angle, self.resample, self.expand, self.center)
|
py | 1a4c9273282d92d1bbe85ff56d44f40d9662fd55 | import json
from datetime import datetime
from flask import (
current_app,
flash,
redirect,
render_template,
session,
url_for,
)
from flask_babel import _
from itsdangerous import SignatureExpired
from notifications_utils.url_safe_token import check_token
from app.main import main
from app.main.forms import NewPasswordForm
from app.main.views.two_factor import log_in_user
from app.models.user import User
@main.route('/new-password/<path:token>', methods=['GET', 'POST'])
def new_password(token):
try:
token_data = check_token(token, current_app.config['SECRET_KEY'], current_app.config['DANGEROUS_SALT'],
current_app.config['EMAIL_EXPIRY_SECONDS'])
except SignatureExpired:
flash(_('The security code in the email we sent you has expired. Enter your email address to re-send.'))
return redirect(url_for('.forgot_password'))
email_address = json.loads(token_data)['email']
user = User.from_email_address(email_address)
if user.password_changed_at and datetime.strptime(user.password_changed_at, '%Y-%m-%d %H:%M:%S.%f') > \
datetime.strptime(json.loads(token_data)['created_at'], '%Y-%m-%d %H:%M:%S.%f'):
flash(_('The security code in the email has already been used'))
return redirect(url_for('main.index'))
form = NewPasswordForm()
if form.validate_on_submit():
user.reset_failed_login_count()
session['user_details'] = {
'id': user.id,
'email': user.email_address,
'password': form.new_password.data}
if user.auth_type == 'email_auth':
# they've just clicked an email link, so have done an email auth journey anyway. Just log them in.
return log_in_user(user.id)
else:
# send user a 2fa sms code
user.send_verify_code()
return redirect(url_for('main.two_factor_sms_sent'))
else:
return render_template('views/new-password.html', token=token, form=form, user=user)
|
py | 1a4c938992afeb73cef02c3fa2bb5753201ee63d | ## Advent of Code 2019: Intcode Computer v2
## https://adventofcode.com/2019
## Jesse Williams | github.com/vblank182
# **Compatible with Day 5, Part 1**
# Changelog:
# - Added IN and OUT instructions
# - Added support for parameter modes
#~# Opcodes #~#
ADD, MUL, IN, OUT = 1, 2, 3, 4
END = 99
#~# Parameter Modes #~#
POS = 0
IMM = 1
# Numbers of expected parameters for each opcode
num_params = {1:3, 2:3, 3:1, 4:1, 99:0}
def loadProgram(inputFile):
''' Loads a program file in "0,1,2,3,..." format and returns a list of integers. '''
with open(inputFile) as f:
initialTapeStrs = f.read()[:-1].split(',')
initialTape = [int(i) for i in initialTapeStrs]
return initialTape
def runProgram(initialTape, input, debugLevel=0):
# Make a copy of the initial tape.
workTape = initialTape.copy()
running = True
output = []
ptr = 0
while running:
# Determine the current opcode and parameter modes
opcode = int( str(workTape[ptr])[-2:] ) # get the opcode from the last 2 digits of the current position
param_modes = [0]*num_params[opcode]
for i in range(num_params[opcode]):
try:
# Set param mode to digit found (scanning right-to-left from opcode)
param_modes[i] = int( str(workTape[ptr])[-3-i] )
except IndexError:
# Default to param mode 0 if no digit is found
param_modes[i] = 0
#:: [1] Addition ::#
if opcode == ADD:
param = [0]*num_params[opcode] # initialize list of parameters
# Param 1 (left addend)
if param_modes[0] == POS:
param[0] = workTape[workTape[ptr+1]] # position mode
elif param_modes[0] == IMM:
param[0] = workTape[ptr+1] # immediate mode
# Param 2 (right addend)
if param_modes[1] == POS:
param[1] = workTape[workTape[ptr+2]] # position mode
elif param_modes[1] == IMM:
param[1] = workTape[ptr+2] # immediate mode
# Param 3 (sum)
if param_modes[2] == POS:
workTape[workTape[ptr+3]] = param[0] + param[1] # set output (position mode)
elif param_modes[2] == IMM:
raise InvalidParameterMode(opcode, 3, param_modes[2], "Immediate mode not supported for output.")
break
ptr += num_params[opcode] + 1 # advance instruction pointer
#:: [2] Multiplication ::#
elif opcode == MUL:
param = [0]*num_params[opcode] # initialize list of parameters
# Param 1 (left multiplicand)
if param_modes[0] == POS:
param[0] = workTape[workTape[ptr+1]] # position mode
elif param_modes[0] == IMM:
param[0] = workTape[ptr+1] # immediate mode
# Param 2 (right multiplicand)
if param_modes[1] == POS:
param[1] = workTape[workTape[ptr+2]] # position mode
elif param_modes[1] == IMM:
param[1] = workTape[ptr+2] # immediate mode
# Param 3 (product)
if param_modes[2] == POS:
workTape[workTape[ptr+3]] = param[0] * param[1] # set output (position mode)
elif param_modes[2] == IMM:
raise InvalidParameterMode(opcode, 3, param_modes[2], "Immediate mode not supported for output.")
break
ptr += num_params[opcode] + 1 # advance instruction pointer
#:: [3] Input ::#
elif opcode == IN:
# Param 1 (position)
if param_modes[0] == POS:
workTape[workTape[ptr+1]] = input # store input at position in parameter (position mode)
elif param_modes[0] == IMM:
raise InvalidParameterMode(opcode, 1, param_modes[0], "Immediate mode not supported for this instruction.")
break
ptr += num_params[opcode] + 1 # advance instruction pointer
#:: [4] Output ::#
elif opcode == OUT:
# Param 1 (position)
if param_modes[0] == POS:
output.append(workTape[workTape[ptr+1]]) # write output (position mode)
elif param_modes[0] == IMM:
output.append(workTape[ptr+1]) # write output (immediate mode)
ptr += num_params[opcode] + 1 # advance instruction pointer
#:: [99] End of Program ::#
elif opcode == END: # Program finished
running = False
else:
raise UnknownOpcode(opcode, ptr, workTape, debugLevel)
return False
return output # output
## Exception Classes ##
class InvalidParameterMode(Exception):
'''Exception raised for an invalid parameter mode.'''
def __init__(self, opcode, position, param_mode, message):
print("[Error] Invalid parameter mode '{}' for parameter {} of opcode {}.\n".format(param_mode, position, opcode))
if message != "":
print(message)
class UnknownOpcode(Exception):
'''Exception raised for an unknown opcode.'''
def __init__(self, opcode, ptr, workTape, debugLevel):
if debugLevel == 1:
print("[Error] Unknown opcode '{}' at location {}. Following instructions: ".format(opcode, ptr, workTape[ptr:ptr+9]))
elif debugLevel == 2:
print("[Error] Unknown opcode '{}' at location {}.".format(opcode, ptr))
print("Current tape state:\n")
print(workTape)
else: # debug level 0
print("[Error] Unknown opcode '{}' at location {}.".format(opcode, ptr))
|
py | 1a4c93d294fcd0cbf88beb242272756a0783d64e | from collections import Counter
def read_signals():
file_name = "Data/day8.txt"
file = open(file_name, "r")
signals = []
digits = []
for line in file:
line = line.strip("\n").split(" | ")
signals.append(line[0].split())
digits.append(line[1].split())
return signals, digits
def sort_connections(signals, digits):
output = []
for signal, numbers in zip(signals, digits):
connections = {"a": 0, "b": 0, "c": 0, "d": 0, "e": 0, "f": 0, "g": 0}
letters = {i: set(connections.keys()) for i in range(2,8)}
for segments in signal:
letters[len(segments)] = letters[len(segments)].intersection(set(segments))
connections["a"] = list(letters[3] - letters[2])[0]
connections["f"] = list(letters[6].intersection(letters[2]))[0]
connections["c"] = list(letters[2] - {connections["f"]})[0]
connections["d"] = list(letters[4].intersection(letters[5]))[0]
connections["b"] = list(letters[4].intersection(letters[6]) - {connections["f"]})[0]
connections["g"] = list(letters[5].intersection(letters[6]) - {connections["a"]})[0]
connections["e"] = list(set(connections.keys()) - {connections["a"]}
- {connections["b"]} - {connections["c"]} - {connections["d"]}
- {connections["f"]} - {connections["g"]})[0]
connections = {v: k for k, v in connections.items()}
for number in numbers:
number = set(connections[letter] for letter in number)
if number == {"c", "f"}:
output.append(1)
elif number == {"a", "c", "f"}:
output.append(7)
elif number == {"b", "d", "c", "f"}:
output.append(4)
elif number == {"a", "c", "d", "e", "g"}:
output.append(2)
elif number == {"a", "c", "d", "f", "g"}:
output.append(3)
elif number == {"a", "b", "d", "f", "g"}:
output.append(5)
elif number == {"a", "b", "c", "e", "f", "g"}:
output.append(0)
elif number == {"a", "b", "d", "e", "f", "g"}:
output.append(6)
elif number == {"a", "b", "c", "d", "f", "g"}:
output.append(9)
elif number == {"a", "b", "c", "d", "e", "f", "g"}:
output.append(8)
count = Counter(output)
print(f"Part one: {count[1] + count[4] + count[7] + count[8]}")
total = sum(output[i] * 10**(3 - (i%4)) for i in range(len(output)))
print(f"Part two: {total}")
if __name__ == "__main__":
signals, digits = read_signals()
sort_connections(signals, digits)
|
py | 1a4c949c0c61fe569c94c90ea148bad6123abec2 | from flask import Flask
from flask_s3_viewer import FlaskS3Viewer
from flask_s3_viewer.aws.ref import Region
import logging
logging.basicConfig(
level=logging.INFO,
format='%(levelname)s: %(asctime)s: %(message)s'
)
app = Flask(__name__)
# For test, disable template caching
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 1
app.config['TEMPLATES_AUTO_RELOAD'] = True
# FlaskS3Viewer Init
FS3V_NAMESPACE = 'flask-s3-viewer'
s3viewer = FlaskS3Viewer(
app, # Flask app
namespace=FS3V_NAMESPACE, # namespace be unique
template_namespace='mdl',
object_hostname='http://flask-s3-viewer.com', # file's hostname
config={ # Bucket configs and else
'profile_name': 'test',
'access_key': None,
'secret_key': None,
'region_name': Region.SEOUL.value,
'endpoint_url': None,
'bucket_name': 'hwjeongtest',
'cache_dir': '/tmp/flask_s3_viewer',
'use_cache': True,
'ttl': 86400,
}
)
# Init another one
s3viewer.add_new_one(
object_hostname='http://namespace2.com',
namespace='np2', # namespace be unique
upload_type='presign',
config={
'profile_name': 'test',
'region_name': Region.SEOUL.value,
'bucket_name': 'hwjeongtest'
}
)
# You can see registerd configs
# print(s3viewer.FLASK_S3_VIEWER_BUCKET_CONFIGS)
# You can use boto3's session and client if you want
# print(FlaskS3Viewer.get_boto_client(FS3V_NAMESPACE))
# print(FlaskS3Viewer.get_boto_session(FS3V_NAMESPACE))
# Apply FlaskS3Viewer blueprint
s3viewer.register()
@app.route('/index')
def index ():
return 'Your app index page'
# Usage: python example.py test (run debug mode)
if __name__ == '__main__':
app.run(debug=True, port=3000)
|
py | 1a4c94d89d8f90d35832a3dda6ffd07b6f90f11b | # -*- coding: utf-8 -*-
from ldap.dn import explode_dn
from node.behaviors import Adopt
from node.behaviors import Alias
from node.behaviors import Attributes
from node.behaviors import DefaultInit
from node.behaviors import NodeChildValidate
from node.behaviors import Nodespaces
from node.behaviors import Nodify
from node.behaviors import OdictStorage
from node.behaviors import Storage
from node.behaviors.alias import DictAliaser
from node.ext.ldap._node import LDAPNode
from node.ext.ldap.base import ensure_text
from node.ext.ldap.interfaces import ILDAPGroupsConfig as IGroupsConfig
from node.ext.ldap.interfaces import ILDAPUsersConfig as IUsersConfig
from node.ext.ldap.scope import BASE
from node.ext.ldap.scope import ONELEVEL
from node.ext.ldap.ugm.defaults import creation_defaults
from node.ext.ldap.ugm.samba import sambaLMPassword
from node.ext.ldap.ugm.samba import sambaNTPassword
from node.ext.ugm import Group as UgmGroup
from node.ext.ugm import Groups as UgmGroups
from node.ext.ugm import Ugm as UgmBase
from node.ext.ugm import User as UgmUser
from node.ext.ugm import Users as UgmUsers
from node.locking import locktree
from node.utils import debug
from plumber import Behavior
from plumber import default
from plumber import finalize
from plumber import override
from plumber import plumb
from plumber import plumbing
from zope.interface import implementer
import ldap
import logging
import six
import time
logger = logging.getLogger('node.ext.ldap')
# group member format
FORMAT_DN = 0
FORMAT_UID = 1
# mapping from object-class to properties
MEMBER_LOOKUP_BY_CLASS = {
'groupOfNames': {
'format': FORMAT_DN,
'attribute': 'member',
},
'groupOfUniqueNames': {
'format': FORMAT_DN,
'attribute': 'uniqueMember',
},
'posixGroup': {
'format': FORMAT_UID,
'attribute': 'memberUid',
},
'group': {
'format': FORMAT_DN,
'attribute': 'member',
},
}
# expiration unit
EXPIRATION_DAYS = 0
EXPIRATION_SECONDS = 1
class AccountExpired(object):
def __nonzero__(self):
return False
__bool__ = __nonzero__
def __repr__(self):
return 'ACCOUNT_EXPIRED'
__str__ = __repr__
ACCOUNT_EXPIRED = AccountExpired()
class PrincipalsConfig(object):
def __init__(
self,
baseDN='',
attrmap={},
scope=ONELEVEL,
queryFilter='',
objectClasses=[],
defaults={},
strict=True,
memberOfSupport=False,
recursiveGroups=False,
memberOfExternalGroupDNs=[],
expiresAttr=None,
expiresUnit=EXPIRATION_DAYS
):
self.baseDN = baseDN
self.attrmap = attrmap
self.scope = scope
self.queryFilter = queryFilter
self.objectClasses = objectClasses
self.defaults = defaults
self.strict = strict
self.memberOfSupport = memberOfSupport
self.recursiveGroups = recursiveGroups
self.memberOfExternalGroupDNs = memberOfExternalGroupDNs
# XXX: currently expiresAttr only gets considered for user
# authentication group and role expiration is not implemented yet.
self.expiresAttr = expiresAttr
self.expiresUnit = expiresUnit
# XXX: member_relation
# self.member_relation = member_relation
@implementer(IUsersConfig)
class UsersConfig(PrincipalsConfig):
"""Define how users look and where they are.
"""
@implementer(IGroupsConfig)
class GroupsConfig(PrincipalsConfig):
"""Define how groups look and where they are.
"""
class RolesConfig(PrincipalsConfig):
"""Define how roles are mapping in LDAP. Basically a role mapping works
like a group mapping, but the id attribute is considered as the role name,
and the members set have this role granted.
"""
@plumbing(
Alias,
NodeChildValidate,
Adopt,
Nodify,
Storage,
)
class PrincipalAliasedAttributes(object):
allow_non_node_children = True
def __init__(self, context, aliaser=None):
"""
:param context: The node whose children to alias
:param aliaser: The aliaser to be used
"""
self.__name__ = context.name
self.__parent__ = None
self.context = context
self.aliaser = aliaser
@property
def storage(self):
return self.context
@property
def changed(self):
return self.context.changed
def __repr__(self):
return "Aliased " + self.context.__repr__()
class AliasedPrincipal(Behavior):
@override
def __init__(self, context, attraliaser):
self.context = context
self.attraliaser = attraliaser
@default
def principal_attributes_factory(self, name=None, parent=None):
aliased_attrs = PrincipalAliasedAttributes(
self.context.attrs,
self.attraliaser
)
return aliased_attrs
attributes_factory = finalize(principal_attributes_factory)
@default
@locktree
def __call__(self):
# add object classes from creation defaults. if missing.
# happens if object classes are added after principals were already
# created with another set of default object classes or if editing
# existing principals from a database not created with this
# API/configuration.
ocs = self.context.attrs['objectClass']
ocs = [ocs] if isinstance(ocs, six.text_type) else ocs
ocsc = len(ocs)
for oc in self.parent.context.child_defaults['objectClass']:
if oc not in ocs:
ocs.append(oc)
# reset object classes only if changed to avoid unnecessary write
# operations to LDAP backend
if ocsc != len(ocs):
self.context.attrs['objectClass'] = ocs
# finally persist
self.context()
class LDAPPrincipal(AliasedPrincipal):
@default
def add_role(self, role):
self.parent.parent.add_role(role, self)
@default
def remove_role(self, role):
self.parent.parent.remove_role(role, self)
@default
@property
def roles(self):
return self.parent.parent.roles(self)
@default
@property
def changed(self):
return self.context.changed
@default
@property
def member_of_attr(self):
"""memberOf is in openldap realized as overlay and in Active
Directory also computed. In case of openldap this attribute is not
delivered in LDAP response unless explicitly queried. Thus a separate
property is used to query memberOf information explicit.
"""
entry = self.context.ldap_session.search(
scope=BASE,
baseDN=self.context.DN,
force_reload=self.context._reload,
attrlist=['memberOf']
)
return entry[0][1].get('memberOf', list())
class LDAPUser(LDAPPrincipal, UgmUser):
@default
@property
def groups(self):
groups = self.parent.parent.groups
return [groups[uid] for uid in self.group_ids if uid in groups]
@default
@property
def group_ids(self):
groups = self.parent.parent.groups
if self.parent.parent.ucfg.memberOfSupport:
group_dns = [groups.context.DN]
group_dns += self.parent.parent.ucfg.memberOfExternalGroupDNs
res = list()
for dn in self.member_of_attr:
dn = ensure_text(dn)
matching_group_dns = {
gdn for gdn in group_dns
if dn.endswith(gdn)
}
if not matching_group_dns:
# Skip DN outside groups base DN
continue
try:
res.append(groups.idbydn(dn))
except KeyError:
# happens if DN is returned which does not fit the groups
# base DN.
pass
else:
member_format = groups._member_format
attribute = groups._member_attribute
# Support LDAP_MATCHING_RULE_IN_CHAIN (recursive/nested groups)
# See https://msdn.microsoft.com/en-us/library/aa746475(v=vs.85).aspx
if self.parent.parent.ucfg.recursiveGroups:
attribute += ':1.2.840.113556.1.4.1941:'
if member_format == FORMAT_DN:
criteria = {attribute: self.context.DN}
elif member_format == FORMAT_UID:
criteria = {attribute: self.context.attrs['uid']}
attrlist = [groups._key_attr]
# if roles configuration points to child of groups container, and
# group configuration has search scope SUBTREE, and groups are
# specified by the same criteria as roles, the search returns the
# role id's as well.
# XXX: such edge cases should be resolved at UGM init time
matches_generator = groups.context.batched_search(
criteria=criteria,
attrlist=attrlist
)
res = [att[groups._key_attr][0] for _, att in matches_generator]
return res
@default
@property
def expired(self):
if not self.parent.expiresAttr:
return False
expires = self.context.attrs.get(self.parent.expiresAttr)
return calculate_expired(self.parent.expiresUnit, expires)
@plumbing(
LDAPUser,
Nodespaces,
Attributes,
Nodify,
)
class User(object):
pass
class LDAPGroupMapping(Behavior):
@override
def __getitem__(self, key):
key = ensure_text(key)
if key not in self:
raise KeyError(key)
return self.related_principals(key)[key]
@override
@locktree
def __delitem__(self, key):
key = ensure_text(key)
if key not in self:
raise KeyError(key)
if self._member_format == FORMAT_DN:
val = self.related_principals(key)[key].context.DN
elif self._member_format == FORMAT_UID:
val = key
# self.context.attrs[self._member_attribute].remove won't work here
# issue in LDAPNodeAttributes, does not recognize changed this way.
members = self.context.attrs[self._member_attribute]
members.remove(val)
self.context.attrs[self._member_attribute] = members
# XXX: call here immediately?
self.context()
@override
def __iter__(self):
return iter(self.member_ids)
@override
def __contains__(self, key):
key = ensure_text(key)
for uid in self:
if uid == key:
return True
return False
@default
@locktree
def add(self, key):
key = ensure_text(key)
if key not in self.member_ids:
val = self.translate_key(key)
# self.context.attrs[self._member_attribute].append won't work here
# issue in LDAPNodeAttributes, does not recognize changed this way.
old = self.context.attrs.get(self._member_attribute, list())
self.context.attrs[self._member_attribute] = old + [val]
# XXX: call here immediately?
# self.context()
@default
@property
def member_ids(self):
ugm = self.parent.parent
if ugm:
# XXX: roles with memberOf use rcfg!
gcfg = ugm.gcfg
if gcfg and gcfg.memberOfSupport:
users = ugm.users
criteria = {'memberOf': self.context.DN}
attrlist = [users._key_attr]
matches_generator = users.context.batched_search(
criteria=criteria,
attrlist=attrlist
)
return [
att[users._key_attr][0] for _, att in matches_generator
]
ret = list()
members = self.context.attrs.get(self._member_attribute, list())
for member in members:
if member in ['nobody', 'cn=nobody']:
continue
ret.append(member)
ret = self.translate_ids(ret)
keys = self.existing_member_ids
ret = [uid for uid in ret if uid in keys]
return ret
@default
@property
def _member_format(self):
return self.parent._member_format
@default
@property
def _member_attribute(self):
return self.parent._member_attribute
class LDAPGroup(LDAPGroupMapping, LDAPPrincipal, UgmGroup):
@default
def related_principals(self, key=None):
return self.parent.parent.users
@default
@property
def users(self):
return [self.parent.parent.users[uid] for uid in self.member_ids]
@default
@property
def existing_member_ids(self):
return self.related_principals().keys()
@default
def translate_ids(self, members):
if self._member_format != FORMAT_DN:
return members
principals = self.related_principals()
translated = list()
for dn in members:
try:
translated.append(principals.idbydn(dn))
except KeyError:
# inexistent DN
pass
return translated
@default
def translate_key(self, key):
ret = None
if self._member_format == FORMAT_DN:
principals = self.related_principals()
# make sure principal is loaded
principal = principals[key]
ret = principal.context.DN
elif self._member_format == FORMAT_UID:
ret = key
return ret
@plumbing(
LDAPGroup,
NodeChildValidate,
Nodespaces,
Attributes,
Nodify,
)
class Group(object):
pass
class LDAPPrincipals(OdictStorage):
principal_attrmap = default(None)
principal_attraliaser = default(None)
@override
def __init__(self, props, cfg):
context = LDAPNode(name=cfg.baseDN, props=props)
context.search_filter = cfg.queryFilter
context.search_scope = int(cfg.scope)
context.child_defaults = dict()
context.child_defaults['objectClass'] = cfg.objectClasses
context.child_defaults.update(cfg.defaults)
for oc in cfg.objectClasses:
for key, val in creation_defaults.get(oc, dict()).items():
if key not in context.child_defaults:
context.child_defaults[key] = val
# if cfg.member_relation:
# context.search_relation = cfg.member_relation
self._rdn_attr = cfg.attrmap['rdn']
self._key_attr = cfg.attrmap['id']
if self._key_attr not in cfg.attrmap:
cfg.attrmap[self._key_attr] = self._key_attr
self._login_attr = cfg.attrmap['id']
if cfg.attrmap.get('login'):
self._login_attr = cfg.attrmap['login']
self.expiresAttr = getattr(cfg, 'expiresAttr', None)
self.expiresUnit = getattr(cfg, 'expiresUnit', None)
self.principal_attrmap = cfg.attrmap
self.principal_attraliaser = DictAliaser(cfg.attrmap, cfg.strict)
self.context = context
@default
def idbydn(self, dn, strict=False):
"""Return a principal's id for a given dn.
Raise KeyError if not enlisted.
"""
# XXX: rename to id_by_dn
# XXX: what was strict good for? remove
# if strict:
# raise KeyError(dn)
try:
search = self.context.ldap_session.search
res = search(baseDN=dn)[0]
return ensure_text(res[1][self._key_attr][0])
except ldap.NO_SUCH_OBJECT:
raise KeyError(dn)
@override
@property
def ids(self):
return list(self.__iter__())
@default
@locktree
def __delitem__(self, key):
principal = self[key]
context = principal.context
del context.parent[context.name]
del self.storage[key]
@default
@locktree
def __getitem__(self, key):
key = ensure_text(key)
try:
return self.storage[key]
except KeyError:
criteria = {self._key_attr: key}
attrlist = ['rdn', self._key_attr]
res = self.context.search(criteria=criteria, attrlist=attrlist)
if not res:
raise KeyError(key)
if len(res) > 1: # pragma: no cover
msg = u'More than one principal with id "{0}" found.'
logger.warning(msg.format(key))
prdn = res[0][1]['rdn']
if prdn in self.context._deleted_children:
raise KeyError(key)
dn = res[0][0]
path = explode_dn(dn)[:len(self.context.DN.split(',')) * -1]
context = self.context
for rdn in reversed(path):
context = context[rdn]
principal = self.principal_factory(
context,
attraliaser=self.principal_attraliaser
)
principal.__name__ = key
principal.__parent__ = self
self.storage[key] = principal
return principal
@default
@locktree
def __iter__(self):
attrlist = ['rdn', self._key_attr]
for principal in self.context.batched_search(attrlist=attrlist):
prdn = principal[1]['rdn']
if prdn in self.context._deleted_children:
continue
yield ensure_text(principal[1][self._key_attr][0])
for principal in self.context._added_children:
yield self.context[principal].attrs[self._key_attr]
@default
@locktree
def __setitem__(self, name, value):
if not isinstance(value, self.principal_factory):
raise ValueError(u"Given value not instance of '{0}'".format(
self.principal_factory.__name__
))
# XXX: check if there is valid user context
exists = False
try:
self[name]
exists = True
except KeyError:
pass
if exists:
raise KeyError(
u"Principal with id '{0}' already exists.".format(name)
)
value.__name__ = name
value.__parent__ = self
self.storage[name] = value
@default
@property
def changed(self):
return self.context.changed
@default
@locktree
def invalidate(self, key=None):
"""Invalidate LDAPPrincipals.
"""
if key is None:
self.context.invalidate()
self.storage.clear()
return
try:
principal = self.storage[key]
principal.context.parent.invalidate(principal.context.name)
del self.storage[key]
except KeyError:
pass
@default
@locktree
def __call__(self):
self.context()
@default
def _alias_dict(self, dct):
ret = dict()
for key, val in six.iteritems(self.principal_attraliaser):
for k, v in six.iteritems(dct):
if val == k:
ret[key] = v
return ret
@default
def _unalias_list(self, lst):
unalias = self.principal_attraliaser.unalias
return [unalias(x) for x in lst]
@default
def _unalias_dict(self, dct):
if dct is None:
return None
unalias = self.principal_attraliaser.unalias
unaliased_dct = dict(
[(unalias(key), val) for key, val in six.iteritems(dct)])
return unaliased_dct
@default
def raw_search(self, criteria=None, attrlist=None,
exact_match=False, or_search=False, or_keys=None,
or_values=None, page_size=None, cookie=None):
search_attrlist = [self._key_attr]
if attrlist is not None and self._key_attr not in attrlist:
search_attrlist += attrlist
try:
results = self.context.search(
criteria=self._unalias_dict(criteria),
attrlist=self._unalias_list(search_attrlist),
exact_match=exact_match,
or_search=or_search,
or_keys=or_keys,
or_values=or_values,
page_size=page_size,
cookie=cookie
)
except ldap.NO_SUCH_OBJECT: # pragma: no cover
logger.debug("LDAPPrincipals.raw_search: ldap.NO_SUCH_OBJECT")
return []
if isinstance(results, tuple):
results, cookie = results
if attrlist is not None:
_results = list()
for _, att in results:
try:
principal_id = att[self._key_attr][0]
except (KeyError, IndexError):
continue
aliased = self._alias_dict(att)
for key in list(aliased.keys()):
if key not in attrlist:
del aliased[key]
_results.append((principal_id, aliased))
results = _results
else:
results = [att[self._key_attr][0] for _, att in results]
if cookie is not None:
return results, cookie
return results
@default
def search(self, criteria=None, attrlist=None,
exact_match=False, or_search=False):
result = []
cookie = ''
while True:
chunk, cookie = self.raw_search(
criteria=criteria,
attrlist=attrlist,
exact_match=exact_match,
or_search=or_search,
page_size=self.context.ldap_session._props.page_size,
cookie=cookie
)
result += chunk
if not cookie:
break
return result
@default
@locktree
def create(self, pid, **kw):
# XXX: mechanism for defining a target container if scope is SUBTREE
# create principal with LDAPNode as context
context = LDAPNode()
principal = self.principal_factory(
context,
attraliaser=self.principal_attraliaser
)
# ensure id on attributes
kw['id'] = pid
# avoid overwriting key attribute if given in kw
if self._key_attr in kw:
del kw[self._key_attr]
# set additional attributes on principal
for k, v in kw.items():
principal.attrs[k] = v
# set principal to self
self[pid] = principal
# if setting principal has been successful, hook up principal context
# to ldap tree
rdn = u'{0}={1}'.format(
self._rdn_attr,
principal.context.attrs[self._rdn_attr]
)
self.context[rdn] = context
# return newly created principal
return self[pid]
def calculate_expired(expiresUnit, expires):
"""Return bool whether expired.
"""
if expires and expires not in ['99999', '-1']:
# check expiration timestamp
expires = int(expires)
# XXX: maybe configurable?
# shadow account specific
# if self.expiresAttr == 'shadowExpire':
# expires += int(user.attrs.get('shadowInactive', '0'))
days = time.time()
if expiresUnit == EXPIRATION_DAYS:
# numer of days since epoch
days /= 86400
if days >= expires:
return True
return False
class LDAPUsers(LDAPPrincipals, UgmUsers):
principal_factory = default(User)
@override
@locktree
def __delitem__(self, key):
user = self[key]
try:
groups = user.groups
except AttributeError:
groups = list()
for group in groups:
del group[user.name]
parent = self.parent
if parent and parent.rcfg is not None:
for role in user.roles:
user.remove_role(role)
context = user.context
del context.parent[context.name]
del self.storage[key]
@default
def id_for_login(self, login):
criteria = {self._login_attr: login}
attrlist = [self._key_attr]
res = self.context.search(criteria=criteria, attrlist=attrlist)
if not res:
return ensure_text(login)
if len(res) > 1: # pragma: no cover
msg = u'More than one principal with login "{0}" found.'
logger.warning(msg.format(login))
return ensure_text(res[0][1][self._key_attr][0])
@default
@debug
def authenticate(self, login=None, pw=None, id=None):
if id is not None:
# bbb. deprecated usage
login = id
user_id = self.id_for_login(login)
criteria = {self._key_attr: user_id}
attrlist = ['dn']
if self.expiresAttr:
attrlist.append(self.expiresAttr)
try:
res = self.context.search(criteria=criteria, attrlist=attrlist)
except ldap.NO_SUCH_OBJECT: # pragma: no cover
return False
if not res:
return False
if len(res) > 1: # pragma: no cover
msg = u'More than one principal with login "{0}" found.'
logger.warning(msg.format(user_id))
if self.expiresAttr:
expires = res[0][1].get(self.expiresAttr)
expires = expires and expires[0] or None
try:
expired = calculate_expired(self.expiresUnit, expires)
except ValueError:
# unknown expires field data
msg = (
u"Accound expiration flag for user '{0}' "
u"contains unknown data"
)
logger.error(msg.format(id))
return False
if expired:
return ACCOUNT_EXPIRED
user_dn = res[0][1]['dn']
session = self.context.ldap_session
authenticated = session.authenticate(user_dn, pw)
return authenticated and user_id or False
@default
@debug
def passwd(self, id, oldpw, newpw):
user_id = self.id_for_login(id)
criteria = {self._key_attr: user_id}
attrlist = ['dn']
if self.expiresAttr:
attrlist.append(self.expiresAttr)
res = self.context.search(criteria=criteria, attrlist=attrlist)
if not res:
raise KeyError(id)
if len(res) > 1: # pragma: no cover
msg = u'More than one principal with login "{0}" found.'
logger.warning(msg.format(user_id))
user_dn = res[0][1]['dn']
self.context.ldap_session.passwd(user_dn, oldpw, newpw)
object_classes = self.context.child_defaults['objectClass']
user_node = self[user_id].context
user_node.attrs.load()
if 'sambaSamAccount' in object_classes:
user_node.attrs['sambaNTPassword'] = sambaNTPassword(newpw)
user_node.attrs['sambaLMPassword'] = sambaLMPassword(newpw)
user_node()
@plumbing(
LDAPUsers,
NodeChildValidate,
Nodespaces,
Adopt,
Attributes,
Nodify,
)
class Users(object):
pass
def member_format(object_classes):
for object_class in MEMBER_LOOKUP_BY_CLASS:
if object_class in object_classes:
return MEMBER_LOOKUP_BY_CLASS[object_class]['format']
raise Exception(
u"Can not lookup member format for object-classes: {0}".format(
object_classes,
)
)
def member_attribute(object_classes):
for object_class in MEMBER_LOOKUP_BY_CLASS:
if object_class in object_classes:
return MEMBER_LOOKUP_BY_CLASS[object_class]['attribute']
raise Exception(
u"Can not lookup member attribute for object-classes: {0}".format(
object_classes,
)
)
class LDAPGroupsMapping(LDAPPrincipals, UgmGroups):
@default
@property
def _member_format(self):
return member_format(self.context.child_defaults['objectClass'])
@default
@property
def _member_attribute(self):
return member_attribute(self.context.child_defaults['objectClass'])
@plumb
def __init__(_next, self, props, cfg):
mem_attr = member_attribute(cfg.objectClasses)
cfg.attrmap[mem_attr] = mem_attr
_next(self, props, cfg)
@plumb
def __setitem__(_next, self, key, value):
# XXX: kick this, dummy member should be created by default value
# callback
if self._member_attribute not in value.attrs:
value.attrs[self._member_attribute] = []
if self._member_format is FORMAT_UID:
value.attrs[self._member_attribute].insert(0, 'nobody')
else:
value.attrs[self._member_attribute].insert(0, 'cn=nobody')
_next(self, key, value)
class LDAPGroups(LDAPGroupsMapping):
principal_factory = default(Group)
@override
@locktree
def __delitem__(self, key):
key = ensure_text(key)
group = self[key]
parent = self.parent
if parent and parent.rcfg is not None:
for role in group.roles:
group.remove_role(role)
context = group.context
del context.parent[context.name]
del self.storage[key]
@plumbing(
LDAPGroups,
NodeChildValidate,
Nodespaces,
Adopt,
Attributes,
Nodify,
)
class Groups(object):
pass
class LDAPRole(LDAPGroupMapping, AliasedPrincipal):
@default
def related_principals(self, key):
ugm = self.parent.parent
if key.startswith('group:'):
return ugm.groups
return ugm.users
@default
@property
def existing_member_ids(self):
ugm = self.parent.parent
users = ugm.users
groups = ugm.groups
ret = [key for key in users]
for key in groups:
ret.append('group:{}'.format(key))
return ret
@default
def translate_ids(self, members):
if self._member_format == FORMAT_DN:
ugm = self.parent.parent
users = ugm.users
groups = ugm.groups
user_members = list()
for dn in members:
try:
user_members.append(users.idbydn(dn, True))
except KeyError:
pass
group_members = list()
for dn in members:
try:
group_members.append('group:{}'.format(groups.idbydn(dn, True)))
except KeyError:
pass
members = user_members + group_members
return members
@default
def translate_key(self, key):
ret = None
if self._member_format == FORMAT_DN:
if key.startswith('group:'):
key = key[6:]
principals = self.parent.parent.groups
else:
principals = self.parent.parent.users
# make sure principal is loaded
principal = principals[key]
ret = principal.context.DN
elif self._member_format == FORMAT_UID:
ret = key
return ret
@override
@locktree
def __getitem__(self, key):
key = ensure_text(key)
if key not in self:
raise KeyError(key)
principals = self.related_principals(key)
if key.startswith('group:'):
key = key[6:]
return principals[key]
@override
@locktree
def __delitem__(self, key):
key = ensure_text(key)
if key not in self:
raise KeyError(key)
principals = self.related_principals(key)
if self._member_format == FORMAT_DN:
real_key = key
if key.startswith('group:'):
real_key = key[6:]
val = principals[real_key].context.DN
elif self._member_format == FORMAT_UID:
val = key
# self.context.attrs[self._member_attribute].remove won't work here
# issue in LDAPNodeAttributes, does not recognize changed this way.
members = self.context.attrs[self._member_attribute]
members.remove(val)
self.context.attrs[self._member_attribute] = members
# XXX: call here immediately?
self.context()
@plumbing(
LDAPRole,
NodeChildValidate,
Nodespaces,
Attributes,
Nodify,
)
class Role(object):
pass
class LDAPRoles(LDAPGroupsMapping):
principal_factory = default(Role)
@plumbing(
LDAPRoles,
NodeChildValidate,
Nodespaces,
Adopt,
Attributes,
Nodify,
)
class Roles(object):
pass
class LDAPUgm(UgmBase):
@override
def __init__(self, name=None, parent=None, props=None,
ucfg=None, gcfg=None, rcfg=None):
"""
:param name: Node name.
:param parent: Node parent.
:param props: LDAPProps instance.
:param ucfg: UsersConfig instance.
:param gcfg: GroupsConfig instance.
:param rcfg: RolesConfig instance.
"""
self.__name__ = name
self.__parent__ = parent
self.props = props
self.ucfg = ucfg
self.gcfg = gcfg
self.rcfg = rcfg
@override
@locktree
def __getitem__(self, key):
if key not in self.storage:
if key == 'users':
self['users'] = Users(self.props, self.ucfg)
elif key == 'groups':
self['groups'] = Groups(self.props, self.gcfg)
return self.storage[key]
@override
@locktree
def __setitem__(self, key, value):
self._chk_key(key)
self.storage[key] = value
@override
def __delitem__(self, key):
raise NotImplementedError(u"Operation forbidden on this node.")
@override
def __iter__(self):
for key in ['users', 'groups']:
yield key
@override
@locktree
def __call__(self):
self.users()
self.groups()
roles_storage = self.roles_storage
if roles_storage is not None:
roles_storage()
@default
@property
def users(self):
return self['users']
@default
@property
def groups(self):
return self['groups']
@default
@property
def roles_storage(self):
return self._roles
@default
@locktree
def roles(self, principal):
uid = self._principal_id(principal)
roles = self._roles
ret = list()
if roles is None:
# XXX: logging
return ret
for role in roles.values():
if uid in role.member_ids:
ret.append(role.name)
return ret
# XXX: Below is the logic for querying roles from LDAP via query. Integrate
# to use this logic whenever roles are queried and the roles node is
# unchanged.
# attribute = roles._member_attribute
# format = roles._member_format
# if format == FORMAT_DN:
# criteria = { attribute: principal.context.DN }
# elif format == FORMAT_UID:
# # XXX: this is hacky. we really need member relations!!!
# if isinstance(principal, Group):
# attrkey = principal.parent.context._rdn_attr
# value = 'group:%s' % principal.context.attrs[attrkey]
# else:
# value = principal.context.attrs['uid']
# criteria = { attribute: value }
# return roles.context.search(criteria=criteria)
@default
@locktree
def add_role(self, rolename, principal):
uid = self._principal_id(principal)
roles = self._roles
if roles is None:
raise ValueError(u"Role support not configured properly")
role = roles.get(rolename)
if role is None:
role = roles.create(rolename)
if uid in role.member_ids:
raise ValueError(u"Principal already has role '{}'".format(rolename))
role.add(uid)
@default
@locktree
def remove_role(self, rolename, principal):
uid = self._principal_id(principal)
roles = self._roles
if roles is None:
raise ValueError(u"Role support not configured properly")
role = roles.get(rolename)
if role is None:
raise ValueError(u"Role not exists '{}'".format(rolename))
if uid not in role.member_ids:
raise ValueError(u"Principal does not has role '{}'".format(rolename))
del role[uid]
if not role.member_ids:
parent = role.parent
del parent[rolename]
@default
@property
def _roles(self):
if 'roles' not in self.storage:
try:
roles = Roles(self.props, self.rcfg)
except Exception:
# XXX: logging
return None
roles.__name__ = 'roles'
roles.__parent__ = self
self.storage['roles'] = roles
return self.storage['roles']
@default
def _principal_id(self, principal):
uid = principal.name
if isinstance(principal, Group):
uid = 'group:{}'.format(uid)
return uid
@default
def _chk_key(self, key):
if key not in ['users', 'groups']:
raise KeyError(key)
@plumbing(
LDAPUgm,
NodeChildValidate,
Nodespaces,
Adopt,
Attributes,
DefaultInit,
Nodify,
OdictStorage,
)
class Ugm(object):
def invalidate(self, key=None):
if key is None:
self.storage.clear()
return
del self.storage[key]
|
py | 1a4c95fc7c61b093c7eb5b22d0de05a4ec521535 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import ray
import time
import unittest
import pyarrow as pa
class ComponentFailureTest(unittest.TestCase):
def tearDown(self):
ray.worker.cleanup()
# This test checks that when a worker dies in the middle of a get, the
# plasma store and manager will not die.
@unittest.skipIf(
os.environ.get('RAY_USE_NEW_GCS', False),
"Not working with new GCS API.")
def testDyingWorkerGet(self):
obj_id = 20 * b"a"
@ray.remote
def f():
ray.worker.global_worker.plasma_client.get(obj_id)
ray.worker._init(
num_workers=1,
driver_mode=ray.SILENT_MODE,
start_workers_from_local_scheduler=False,
start_ray_local=True,
redirect_output=True)
# Have the worker wait in a get call.
f.remote()
# Kill the worker.
time.sleep(1)
(ray.services.all_processes[ray.services.PROCESS_TYPE_WORKER][0]
.terminate())
time.sleep(0.1)
# Seal the object so the store attempts to notify the worker that the
# get has been fulfilled.
ray.worker.global_worker.plasma_client.create(
pa.plasma.ObjectID(obj_id), 100)
ray.worker.global_worker.plasma_client.seal(pa.plasma.ObjectID(obj_id))
time.sleep(0.1)
# Make sure that nothing has died.
self.assertTrue(
ray.services.all_processes_alive(
exclude=[ray.services.PROCESS_TYPE_WORKER]))
# This test checks that when a worker dies in the middle of a wait, the
# plasma store and manager will not die.
@unittest.skipIf(
os.environ.get('RAY_USE_NEW_GCS', False),
"Not working with new GCS API.")
def testDyingWorkerWait(self):
obj_id = 20 * b"a"
@ray.remote
def f():
ray.worker.global_worker.plasma_client.wait([obj_id])
ray.worker._init(
num_workers=1,
driver_mode=ray.SILENT_MODE,
start_workers_from_local_scheduler=False,
start_ray_local=True,
redirect_output=True)
# Have the worker wait in a get call.
f.remote()
# Kill the worker.
time.sleep(1)
(ray.services.all_processes[ray.services.PROCESS_TYPE_WORKER][0]
.terminate())
time.sleep(0.1)
# Seal the object so the store attempts to notify the worker that the
# get has been fulfilled.
ray.worker.global_worker.plasma_client.create(
pa.plasma.ObjectID(obj_id), 100)
ray.worker.global_worker.plasma_client.seal(pa.plasma.ObjectID(obj_id))
time.sleep(0.1)
# Make sure that nothing has died.
self.assertTrue(
ray.services.all_processes_alive(
exclude=[ray.services.PROCESS_TYPE_WORKER]))
def _testWorkerFailed(self, num_local_schedulers):
@ray.remote
def f(x):
time.sleep(0.5)
return x
num_initial_workers = 4
ray.worker._init(
num_workers=(num_initial_workers * num_local_schedulers),
num_local_schedulers=num_local_schedulers,
start_workers_from_local_scheduler=False,
start_ray_local=True,
num_cpus=[num_initial_workers] * num_local_schedulers,
redirect_output=True)
# Submit more tasks than there are workers so that all workers and
# cores are utilized.
object_ids = [
f.remote(i)
for i in range(num_initial_workers * num_local_schedulers)
]
object_ids += [f.remote(object_id) for object_id in object_ids]
# Allow the tasks some time to begin executing.
time.sleep(0.1)
# Kill the workers as the tasks execute.
for worker in (
ray.services.all_processes[ray.services.PROCESS_TYPE_WORKER]):
worker.terminate()
time.sleep(0.1)
# Make sure that we can still get the objects after the executing tasks
# died.
ray.get(object_ids)
def testWorkerFailed(self):
self._testWorkerFailed(1)
def testWorkerFailedMultinode(self):
self._testWorkerFailed(4)
def _testComponentFailed(self, component_type):
"""Kill a component on all worker nodes and check workload succeeds."""
@ray.remote
def f(x, j):
time.sleep(0.2)
return x
# Start with 4 workers and 4 cores.
num_local_schedulers = 4
num_workers_per_scheduler = 8
ray.worker._init(
num_workers=num_workers_per_scheduler,
num_local_schedulers=num_local_schedulers,
start_ray_local=True,
num_cpus=[num_workers_per_scheduler] * num_local_schedulers,
redirect_output=True)
# Submit more tasks than there are workers so that all workers and
# cores are utilized.
object_ids = [
f.remote(i, 0)
for i in range(num_workers_per_scheduler * num_local_schedulers)
]
object_ids += [f.remote(object_id, 1) for object_id in object_ids]
object_ids += [f.remote(object_id, 2) for object_id in object_ids]
# Kill the component on all nodes except the head node as the tasks
# execute.
time.sleep(0.1)
components = ray.services.all_processes[component_type]
for process in components[1:]:
process.terminate()
time.sleep(1)
for process in components[1:]:
process.kill()
process.wait()
self.assertNotEqual(process.poll(), None)
# Make sure that we can still get the objects after the executing tasks
# died.
results = ray.get(object_ids)
expected_results = 4 * list(
range(num_workers_per_scheduler * num_local_schedulers))
self.assertEqual(results, expected_results)
def check_components_alive(self, component_type, check_component_alive):
"""Check that a given component type is alive on all worker nodes.
"""
components = ray.services.all_processes[component_type][1:]
for component in components:
if check_component_alive:
self.assertTrue(component.poll() is None)
else:
print("waiting for " + component_type + " with PID " +
str(component.pid) + "to terminate")
component.wait()
print("done waiting for " + component_type + " with PID " +
str(component.pid) + "to terminate")
self.assertTrue(not component.poll() is None)
@unittest.skipIf(
os.environ.get('RAY_USE_NEW_GCS', False), "Hanging with new GCS API.")
def testLocalSchedulerFailed(self):
# Kill all local schedulers on worker nodes.
self._testComponentFailed(ray.services.PROCESS_TYPE_LOCAL_SCHEDULER)
# The plasma stores and plasma managers should still be alive on the
# worker nodes.
self.check_components_alive(ray.services.PROCESS_TYPE_PLASMA_STORE,
True)
self.check_components_alive(ray.services.PROCESS_TYPE_PLASMA_MANAGER,
True)
self.check_components_alive(ray.services.PROCESS_TYPE_LOCAL_SCHEDULER,
False)
@unittest.skipIf(
os.environ.get('RAY_USE_NEW_GCS', False), "Hanging with new GCS API.")
def testPlasmaManagerFailed(self):
# Kill all plasma managers on worker nodes.
self._testComponentFailed(ray.services.PROCESS_TYPE_PLASMA_MANAGER)
# The plasma stores should still be alive (but unreachable) on the
# worker nodes.
self.check_components_alive(ray.services.PROCESS_TYPE_PLASMA_STORE,
True)
self.check_components_alive(ray.services.PROCESS_TYPE_PLASMA_MANAGER,
False)
self.check_components_alive(ray.services.PROCESS_TYPE_LOCAL_SCHEDULER,
False)
@unittest.skipIf(
os.environ.get('RAY_USE_NEW_GCS', False), "Hanging with new GCS API.")
def testPlasmaStoreFailed(self):
# Kill all plasma stores on worker nodes.
self._testComponentFailed(ray.services.PROCESS_TYPE_PLASMA_STORE)
# No processes should be left alive on the worker nodes.
self.check_components_alive(ray.services.PROCESS_TYPE_PLASMA_STORE,
False)
self.check_components_alive(ray.services.PROCESS_TYPE_PLASMA_MANAGER,
False)
self.check_components_alive(ray.services.PROCESS_TYPE_LOCAL_SCHEDULER,
False)
@unittest.skipIf(
os.environ.get('RAY_USE_NEW_GCS', False),
"Not working with new GCS API.")
def testDriverLivesSequential(self):
ray.worker.init(redirect_output=True)
all_processes = ray.services.all_processes
processes = [
all_processes[ray.services.PROCESS_TYPE_PLASMA_STORE][0],
all_processes[ray.services.PROCESS_TYPE_PLASMA_MANAGER][0],
all_processes[ray.services.PROCESS_TYPE_LOCAL_SCHEDULER][0],
all_processes[ray.services.PROCESS_TYPE_GLOBAL_SCHEDULER][0]
]
# Kill all the components sequentially.
for process in processes:
process.terminate()
time.sleep(0.1)
process.kill()
process.wait()
# If the driver can reach the tearDown method, then it is still alive.
@unittest.skipIf(
os.environ.get('RAY_USE_NEW_GCS', False),
"Not working with new GCS API.")
def testDriverLivesParallel(self):
ray.worker.init(redirect_output=True)
all_processes = ray.services.all_processes
processes = [
all_processes[ray.services.PROCESS_TYPE_PLASMA_STORE][0],
all_processes[ray.services.PROCESS_TYPE_PLASMA_MANAGER][0],
all_processes[ray.services.PROCESS_TYPE_LOCAL_SCHEDULER][0],
all_processes[ray.services.PROCESS_TYPE_GLOBAL_SCHEDULER][0]
]
# Kill all the components in parallel.
for process in processes:
process.terminate()
time.sleep(0.1)
for process in processes:
process.kill()
for process in processes:
process.wait()
# If the driver can reach the tearDown method, then it is still alive.
if __name__ == "__main__":
unittest.main(verbosity=2)
|
py | 1a4c9609ddcba8b57bd45c352dabbb227ec0cd6a | # Copyright 2018, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import mock
import unittest
from opencensus.common.monitored_resource import aws_identity_doc_utils
class TestAwsIdentityDocumentUtils(unittest.TestCase):
@mock.patch('opencensus.common.monitored_resource.'
'aws_identity_doc_utils.get_request')
def test_get_aws_metadata(self, http_request_mock):
mocked_http_response = {
'availabilityZone': 'us-west-2b',
'instanceId': 'i-1234567890abcdef0',
'imageId': 'ami-5fb8c835',
'privateIp': '10.158.112.84',
'pendingTime': '2016-11-19T16:32:11Z',
'accountId': '123456789012',
'region': 'us-west-2',
'marketplaceProductCodes': ["1abc2defghijklm3nopqrs4tu"],
'instanceType': 't2.micro',
'version': '2017-09-30',
'architecture': 'x86_64',
}
http_request_mock.return_value = json.dumps(mocked_http_response)
aws_identity_doc_utils.AwsIdentityDocumentUtils.inited = False
aws_identity_doc_utils.AwsIdentityDocumentUtils.is_running = False
aws_identity_doc_utils.aws_metadata_map = {}
self.assertTrue(aws_identity_doc_utils.AwsIdentityDocumentUtils
.is_running_on_aws())
labels_list = aws_identity_doc_utils.AwsIdentityDocumentUtils(
).get_aws_metadata()
self.assertEquals(len(labels_list), 3)
expected_labels = {
'instance_id': 'i-1234567890abcdef0',
'aws_account': '123456789012',
'region': 'us-west-2'
}
self.assertEquals(labels_list, expected_labels)
@mock.patch('opencensus.common.monitored_resource.'
'aws_identity_doc_utils.get_request')
def test_get_aws_metadata_none_fields(self, http_request_mock):
mocked_http_response = {
'availabilityZone': 'us-west-2b',
'imageId': 'ami-5fb8c835',
'privateIp': '10.158.112.84',
'pendingTime': '2016-11-19T16:32:11Z',
'accountId': '123456789012',
'region': 'us-west-2',
'marketplaceProductCodes': ["1abc2defghijklm3nopqrs4tu"],
'instanceType': 't2.micro',
'version': '2017-09-30',
'architecture': 'x86_64',
}
http_request_mock.return_value = json.dumps(mocked_http_response)
aws_identity_doc_utils.AwsIdentityDocumentUtils.inited = False
aws_identity_doc_utils.AwsIdentityDocumentUtils.is_running = False
aws_identity_doc_utils.aws_metadata_map = {}
self.assertTrue(aws_identity_doc_utils.AwsIdentityDocumentUtils
.is_running_on_aws())
labels_list = aws_identity_doc_utils.AwsIdentityDocumentUtils(
).get_aws_metadata()
self.assertEquals(len(labels_list), 2)
expected_labels = {
'aws_account': '123456789012',
'region': 'us-west-2'
}
self.assertEquals(labels_list, expected_labels)
@mock.patch('opencensus.common.monitored_resource.'
'aws_identity_doc_utils.get_request')
def test_aws_not_running(self, http_request_mock):
http_request_mock.return_value = None
aws_identity_doc_utils.inited = False
aws_identity_doc_utils.is_running_on_aws = False
aws_identity_doc_utils.aws_metadata_map = {}
self.assertFalse(aws_identity_doc_utils.AwsIdentityDocumentUtils
.is_running_on_aws())
labels_list = aws_identity_doc_utils.AwsIdentityDocumentUtils(
).get_aws_metadata()
self.assertEquals(len(labels_list), 0)
|
py | 1a4c96439bb7ce45e0fa69409a47f98bb3526a0e | import pytest
from diot import Diot
from pyppl import Proc
from pyppl.job import Job
from pyppl.utils import fs
from pyppl.logger import logger, LEVEL_GROUPS
from pyppl_echo import expand_numbers, fileflush, echo_jobs_converter, echo_types_converter, flush, logger_init, job_poll
@pytest.fixture
def fd_fileflush(tmp_path):
tmpfile = tmp_path / 'fileflush.txt'
tmpfile.write_text('')
with open(tmpfile, 'r') as fd_read, open(tmpfile, 'a') as fd_append:
yield fd_read, fd_append
@pytest.fixture(params = range(5))
def fixt_fileflush(request, fd_fileflush):
fd_read, fd_append = fd_fileflush
if request.param == 0:
return Diot(filed = fd_read, residue = '', expt_lines = [], expt_residue = '')
if request.param == 1:
fd_append.write('abcde')
fd_append.flush()
return Diot(filed = fd_read, residue = '', expt_lines = [], expt_residue = 'abcde')
if request.param == 2:
fd_append.write('ccc\ne1')
fd_append.flush()
return Diot(filed = fd_read, residue = 'abcde', expt_lines = ['abcdeccc\n'], expt_residue = 'e1')
if request.param == 3:
fd_append.write('ccc')
fd_append.flush()
return Diot(filed = fd_read, residue = '', end = True, expt_lines = ['ccc\n'], expt_residue = '')
if request.param == 4:
return Diot(filed = fd_read, residue = 'end', end = True, expt_lines = ['end\n'], expt_residue = '')
@pytest.fixture
def job0(tmp_path):
job = Job(0, Proc(
workdir = tmp_path/'pJob',
dirsig = True,
config = Diot(echo_jobs=0, types='stderr')
))
# pretend it's running
job.proc.runtime_config = {'dirsig': True}
fs.mkdir(job.dir)
(job.dir / 'job.script').write_text('')
return job
@pytest.mark.parametrize('numbers,expt',[
('1,2,3,4', [1,2,3,4]),
('1-4', [1,2,3,4]),
('1-4,7,8-10', [1,2,3,4,7,8,9,10]),
])
def test_expand_numbers(numbers, expt):
assert expand_numbers(numbers) == expt
def test_fileflush(fixt_fileflush):
lines, residue = fileflush(
fixt_fileflush.filed, fixt_fileflush.residue, fixt_fileflush.get('end', False))
assert lines == fixt_fileflush.expt_lines
assert residue == fixt_fileflush.expt_residue
@pytest.mark.parametrize('jobs,expected', [
([], []),
([0,1], [0,1]),
(0, [0]),
('0,1', [0,1]),
])
def test_echo_jobs_converter(jobs, expected):
assert echo_jobs_converter(jobs) == expected
@pytest.mark.parametrize('types,expected', [
('', {'stderr': None, 'stdout': None}),
('stderr', {'stderr': None}),
({'all': '^log'}, {'stderr': '^log', 'stdout': '^log'}),
])
def test_echo_types_converter(types, expected):
assert echo_types_converter(types) == expected
def test_flush(job0, caplog):
job0.proc.config.echo_jobs = [1]
flush(job0)
assert '' == caplog.text
assert job0.config.echo_lastout == ''
assert job0.config.echo_lasterr == ''
job0.proc.config.echo_jobs = [0]
job0.proc.config.echo_types = {
'stdout': '', 'stderr': r'^[^&].+$'}
(job0.dir / 'job.stdout').write_text('out: line1\nout: line2')
(job0.dir / 'job.stderr').write_text('err: line1\nerr: line2')
caplog.clear()
flush(job0)
assert 'out: line1' in caplog.text
assert 'err: line1' in caplog.text
assert 'line2' not in caplog.text
assert job0.config.echo_lastout == 'out: line2'
assert job0.config.echo_lasterr == 'err: line2'
(job0.dir / 'job.stderr').write_text(
'err: line1\nerr: line23\n& ignored\npyppl.logger.abc\npyppl.logger.msg: hello world!')
caplog.clear()
job_poll(job0, status = 'done')
#flush(job0, end = True)
assert 'err: line23' in caplog.text
assert '_MSG' in caplog.text
assert '_ABC' in caplog.text
assert 'hello world' in caplog.text
assert 'ignored' not in caplog.text
assert job0.config.echo_lastout == ''
assert job0.config.echo_lasterr == ''
def test_hook():
logger_init(logger)
assert 'STDOUT' in LEVEL_GROUPS['INFO']
assert 'STDERR' in LEVEL_GROUPS['INFO']
|
py | 1a4c9682da4b81a0f168d8e9419bc7e161393b09 | #!/usr/bin/python
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# ltr.py - LISP EID Traceroute Client - Trace the encap/decap paths
#
# Usage: python ltr.py [-s <source-eid>] <destination-EID | DNS-name>
#
# -s: Optional source EID.
# <destination-EID>: required parameter [<iid>] in front is optional
#
# This application is run on an xTR. Typically a ITR or RTR, where the
# encapsulator adds to the ltr message with the RLOC the ITR is encapsulating
# to. Then the decapsulator will decapsulate and swap the source and
# destination addresses to return the packet to the source-EID (running the
# client program). If the ETR is not the EID, then the packet will be re-
# encapsulated in which more data is added to the ltr message.
#
# ltr messages run in UDP on port 2434 (4342 backwards) and are returned
# to the client program.
#
# The LISP-Trace message takes the following path:
#
# (1) ltr sends LISP-TRACE packet from its EID to the EID of the ETR on
# port 2434. It builds a type=9 packet with a nonce and an empty JSON field.
#
# (2) ITR will look up destination EID as part of forwarding logic and add
# RLOC information to LISP-Trace message. The message is encapsulated to
# the ETR.
#
# (3) The ETR (or RTR) will decap packet. It will add information to the LISP-
# packet. If it is the destination EID, it will send the LISP-Trace packet
# using itself as the source and the original source as the destination.
#
# (4) The local ITR will encapsulate the packet and add RLOC information to
# the LISP-Trace packet. It encapsulates the return packet to the ETR.
#
# (5) The ETR decapsulates the packet and sends it to the ltr client so the
# accumulated JSON data can be displayed for the user.
#
# This functionality works on a chain of encapsulating tunnels to give the
# user what RLOCs are used and the arrival time of the packet. It allows an
# ltr client to not only determine path and latency of the network, but if
# the encapsulation paths are symmetric or asymmetric.
#
# If there an error along the path, the node detecting the error will return
# the LISP-Trace packet to the RLOC of the originating ITR.
#
# The JSON format of an LISP-Trace packet is an array of dictionary arrays.
# The array will typically have 2 elements, one from ltr source to destination
# EID and one for the return path. Each dictionary array is keyed with "seid",
# "deid", and "paths". The array "paths" is the node data that is appended
# at each encapsulation hop. Note example below:
#
# [
# { "se" : "[<iid>]<orig-eid>", "de" : "[<iid>]<dest-eid>", "paths" : a
# [
# { "n" : "ITR", "sr" : "<source-rloc>", "dr" : "<dest_rloc>",
# "ets" : "<ts>", "hn" : "<hn>", "rtts" : [...], "hops" : [...] },
# { "n" : "RTR", "sr" : "<source-rloc>", "dr" : "<dest_rloc>",
# "dts" : "<ts>", "hn" : "<hn>" },
# { "n" : "RTR", "sr" : "<source-rloc>", "dr" : "<dest_rloc>",
# "ets" : "<ts>", "hn" : "<hn>", "rtts" : [...], "hops" : [...] },
# { "n" : "ETR", "sr" : "<source-rloc>", "dr" : "<dest_rloc>",
# "ets" : "<ts>", "hn" : "<hn>" }, ...
# ] },
#
# { "se" : "[<iid>]<dest-eid>", "de" : "[<iid>]<orig-eid>", "paths" :
# [
# { "n" : "ITR", "sr" : "<source-rloc>", "dr" : "<dest_rloc>",
# "ets" : "<ts>", "hn" : "<hn>", "rtts" : [...], "hops" : [...] },
# { "n" : "RTR", "sr" : "<source-rloc>", "dr" : "<dest_rloc>",
# "dts" : "<ts>", "hn" : "<hn>" },
# { "n" : "RTR", "sr" : "<source-rloc>", "dr" : "<dest_rloc>",
# "ets" : "<ts>", "hn" : "<hn>", "rtts" : [...], "hops" : [...] },
# { "n" : "ETR", "sr" : "<source-rloc>", "dr" : "<dest_rloc>",
# "ets" : "<ts>", "hn" : "<hn>" }, ...
# ] }
# ]
#
# Environment variable LISP_LTR_PORT is used to determine if the connection to
# the LISP API is done with a particular port. And if the port has a minus
# sign in front of it, it will use http rather https to connect to the
# lispers.net API. Environment variables LISP_LTR_USER and LISP_LTR_PW are
# used when lispers.net API is running with a password on username root.
#
#------------------------------------------------------------------------------
from __future__ import print_function
from future import standard_library
standard_library . install_aliases ( )
from builtins import hex
import sys
import struct
import random
import socket
import json
import time
import os
import binascii
from subprocess import getoutput
if 64 - 64: i11iIiiIii
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
II1iII1i = "https"
oO0oIIII = 8080
if 59 - 59: i1IIi * i1IIi % OOooOOo + II111iiii
II = os . getenv ( "LISP_LTR_PORT" )
if ( II != None ) :
if ( II [ 0 ] == "-" ) :
II1iII1i = "http"
II = II [ 1 : : ]
if 100 - 100: i1IIi . I1Ii111 / IiII * OoooooooOO + I11i * oO0o
if ( II . isdigit ( ) == False ) :
print ( "Invalid value for env variable LISP_LTR_PORT" )
exit ( 1 )
if 99 - 99: iII111i . OOooOOo / iIii1I11I1II1 * iIii1I11I1II1
oO0oIIII = int ( II )
if 11 - 11: oO0o / i1IIi % II111iiii - OoOoOO00
OOo = os . getenv ( "LISP_LTR_USER" )
Ii1IIii11 = os . getenv ( "LISP_LTR_PW" )
if ( OOo == None ) : OOo = "root"
if ( Ii1IIii11 == None ) : Ii1IIii11 = ""
if 55 - 55: iIii1I11I1II1 - I1IiiI . Ii1I * IiII * i1IIi / iIii1I11I1II1
OOo000 = 2434
if 82 - 82: I11i . I1Ii111 / IiII % II111iiii % iIii1I11I1II1 % IiII
if 86 - 86: OoOoOO00 % I1IiiI
if 80 - 80: OoooooooOO . I1IiiI
if 87 - 87: oO0o / ooOoO0o + I1Ii111 - ooOoO0o . ooOoO0o / II111iiii
if 11 - 11: I1IiiI % o0oOOo0O0Ooo - Oo0Ooo
if 58 - 58: i11iIiiIii % I1Ii111
if 54 - 54: OOooOOo % O0 + I1IiiI - iII111i / I11i
if 31 - 31: OoO0O00 + II111iiii
if 13 - 13: OOooOOo * oO0o * I1IiiI
if 55 - 55: II111iiii
if 43 - 43: OoOoOO00 - i1IIi + I1Ii111 + Ii1I
if 17 - 17: o0oOOo0O0Ooo
if 64 - 64: Ii1I % i1IIi % OoooooooOO
if 3 - 3: iII111i + O0
if 42 - 42: OOooOOo / i1IIi + i11iIiiIii - Ii1I
if 78 - 78: OoO0O00
if 18 - 18: O0 - iII111i / iII111i + ooOoO0o % ooOoO0o - IiII
if 62 - 62: iII111i - IiII - OoOoOO00 % i1IIi / oO0o
if 77 - 77: II111iiii - II111iiii . I1IiiI / o0oOOo0O0Ooo
if 14 - 14: I11i % O0
if 41 - 41: i1IIi + I1Ii111 + OOooOOo - IiII
def oO ( rloc , port ) :
OO0OOooOoO0Oo = socket . htonl ( 0x90000000 + port )
iiIIiIiIi = struct . pack ( "I" , OO0OOooOoO0Oo )
if 38 - 38: Ii1I / Oo0Ooo
OooO0 = rloc . split ( "." )
II11iiii1Ii = int ( OooO0 [ 0 ] ) << 24
II11iiii1Ii += int ( OooO0 [ 1 ] ) << 16
II11iiii1Ii += int ( OooO0 [ 2 ] ) << 8
II11iiii1Ii += int ( OooO0 [ 3 ] )
iiIIiIiIi += struct . pack ( "I" , socket . htonl ( II11iiii1Ii ) )
if 70 - 70: oO0o / iIii1I11I1II1 % ooOoO0o % i11iIiiIii . I1IiiI
O0o0Oo = random . randint ( 0 , ( 2 ** 64 ) - 1 )
iiIIiIiIi += struct . pack ( "Q" , O0o0Oo )
return ( O0o0Oo , iiIIiIiIi )
if 78 - 78: iIii1I11I1II1 - Ii1I * OoO0O00 + o0oOOo0O0Ooo + iII111i + iII111i
if 11 - 11: iII111i - OoO0O00 % ooOoO0o % iII111i / OoOoOO00 - OoO0O00
if 74 - 74: iII111i * O0
if 89 - 89: oO0o + Oo0Ooo
if 3 - 3: i1IIi / I1IiiI % I11i * i11iIiiIii / O0 * I11i
if 49 - 49: oO0o % Ii1I + i1IIi . I1IiiI % I1ii11iIi11i
if 48 - 48: I11i + I11i / II111iiii / iIii1I11I1II1
def i1iiI11I ( nonce , packet ) :
if ( len ( packet ) < 12 ) : return ( False )
if 29 - 29: OoooooooOO
iI = "II"
I1i1I1II = struct . calcsize ( iI )
OO0OOooOoO0Oo , i1 = struct . unpack ( iI , packet [ : I1i1I1II ] )
packet = packet [ I1i1I1II : : ]
if ( socket . ntohl ( OO0OOooOoO0Oo ) != 0x90000000 ) :
print ( "Invalid LISP-Trace message" )
return ( { } )
if 48 - 48: O0 + O0 - I1ii11iIi11i . ooOoO0o / iIii1I11I1II1
if 77 - 77: i1IIi % OoOoOO00 - IiII + ooOoO0o
iI = "Q"
I1i1I1II = struct . calcsize ( iI )
I11iiIiii = struct . unpack ( iI , packet [ : I1i1I1II ] ) [ 0 ]
packet = packet [ I1i1I1II : : ]
if 1 - 1: II111iiii - I11i / I11i
if 46 - 46: Ii1I * OOooOOo - OoO0O00 * oO0o - I1Ii111
if 83 - 83: OoooooooOO
if 31 - 31: II111iiii - OOooOOo . I1Ii111 % OoOoOO00 - O0
if ( I11iiIiii != nonce ) :
print ( "Invalid nonce, sent {}, received {}" . format ( nonce , I11iiIiii ) )
return ( { } )
if 4 - 4: II111iiii / ooOoO0o . iII111i
if 58 - 58: OOooOOo * i11iIiiIii / OoOoOO00 % I1Ii111 - I1ii11iIi11i / oO0o
if ( len ( packet ) == 0 ) :
print ( "No JSON data in payload" )
return ( { } )
if 50 - 50: I1IiiI
if 34 - 34: I1IiiI * II111iiii % iII111i * OoOoOO00 - I1IiiI
if 33 - 33: o0oOOo0O0Ooo + OOooOOo * OoO0O00 - Oo0Ooo / oO0o % Ii1I
if 21 - 21: OoO0O00 * iIii1I11I1II1 % oO0o * i1IIi
if 16 - 16: O0 - I1Ii111 * iIii1I11I1II1 + iII111i
try :
Ii11iII1 = json . loads ( packet )
except :
print ( "Invalid JSON data: '{}'" . format ( packet ) )
return ( { } )
if 51 - 51: II111iiii * OoO0O00 % o0oOOo0O0Ooo * II111iiii % I1ii11iIi11i / ooOoO0o
return ( Ii11iII1 )
if 49 - 49: o0oOOo0O0Ooo
if 35 - 35: OoOoOO00 - OoooooooOO / I1ii11iIi11i % i1IIi
if 78 - 78: I11i
if 71 - 71: OOooOOo + ooOoO0o % i11iIiiIii + I1ii11iIi11i - IiII
if 88 - 88: OoOoOO00 - OoO0O00 % OOooOOo
if 16 - 16: I1IiiI * oO0o % IiII
if 86 - 86: I1IiiI + Ii1I % i11iIiiIii * oO0o . ooOoO0o * I11i
def i1I11i1iI ( jd ) :
for I1ii1Ii1 in jd :
iii11 = I1ii1Ii1 [ "se" ] if ( jd . index ( I1ii1Ii1 ) == 0 ) else oOOOOo0 ( I1ii1Ii1 [ "se" ] )
iiII1i1 = oOOOOo0 ( I1ii1Ii1 [ "de" ] ) if ( jd . index ( I1ii1Ii1 ) == 0 ) else I1ii1Ii1 [ "de" ]
if 66 - 66: OOooOOo - I11i
print ( "Path from {} to {}:" . format ( iii11 , iiII1i1 ) )
for I1i1III in I1ii1Ii1 [ "paths" ] :
if ( "ets" in I1i1III ) :
OO0O0OoOO0 = I1i1III [ "ets" ]
iiiI1I11i1 = "encap"
if 49 - 49: I1IiiI % ooOoO0o . ooOoO0o . I11i * ooOoO0o
if ( "dts" in I1i1III ) :
OO0O0OoOO0 = I1i1III [ "dts" ]
iiiI1I11i1 = "decap"
if 97 - 97: Ii1I + o0oOOo0O0Ooo . OOooOOo + I1ii11iIi11i % iII111i
oo0O = I1i1III [ "hn" ]
o0 = I1i1III [ "dr" ]
if ( o0 . find ( "?" ) != - 1 ) : o0 = oo0oOo ( o0 )
if 89 - 89: OoOoOO00
print ( " {} {}: {} -> {}, ts {}, node {}" . format ( I1i1III [ "n" ] , iiiI1I11i1 , I1i1III [ "sr" ] , o0 , OO0O0OoOO0 , OO0oOoOO0oOO0 ( oo0O ) ) )
if 86 - 86: OOooOOo
if 55 - 55: Oo0Ooo + iIii1I11I1II1 / OoOoOO00 * oO0o - i11iIiiIii - Ii1I
if ( "rtts" in I1i1III and "hops" in I1i1III and "lats" in I1i1III ) :
ii1ii1ii = json . dumps ( I1i1III [ "rtts" ] )
ii1ii1ii = ii1ii1ii . replace ( "-1" , "?" )
oooooOoo0ooo = json . dumps ( I1i1III [ "hops" ] )
oooooOoo0ooo = oooooOoo0ooo . replace ( "u" , "" )
oooooOoo0ooo = oooooOoo0ooo . replace ( "'" , "" )
oooooOoo0ooo = oooooOoo0ooo . replace ( '"' , "" )
I1I1IiI1 = json . dumps ( I1i1III [ "lats" ] )
I1I1IiI1 = I1I1IiI1 . replace ( "u" , "" )
I1I1IiI1 = I1I1IiI1 . replace ( "'" , "" )
I1I1IiI1 = I1I1IiI1 . replace ( '"' , "" )
print ( " " , end = ' ' )
print ( "recent-rtts {}, recent-hops {}" . format ( ii1ii1ii , oooooOoo0ooo ) )
print ( " recent-latencies {}" . format ( I1I1IiI1 ) )
if 5 - 5: o0oOOo0O0Ooo * ooOoO0o + OoOoOO00 . OOooOOo + OoOoOO00
if 91 - 91: O0
print ( "" )
if 61 - 61: II111iiii
if 64 - 64: ooOoO0o / OoOoOO00 - O0 - I11i
if 86 - 86: I11i % OoOoOO00 / I1IiiI / OoOoOO00
if 42 - 42: OoO0O00
if 67 - 67: I1Ii111 . iII111i . O0
if 10 - 10: I1ii11iIi11i % I1ii11iIi11i - iIii1I11I1II1 / OOooOOo + Ii1I
if 87 - 87: oO0o * I1ii11iIi11i + OOooOOo / iIii1I11I1II1 / iII111i
if 37 - 37: iII111i - ooOoO0o * oO0o % i11iIiiIii - I1Ii111
def o0oO ( eid ) :
IIiIi1iI = True
if 35 - 35: Ii1I % O0 - O0
if 16 - 16: II111iiii % OoOoOO00 - II111iiii + Ii1I
if 12 - 12: OOooOOo / OOooOOo + i11iIiiIii
if 40 - 40: I1IiiI . iIii1I11I1II1 / I1IiiI / i11iIiiIii
if 75 - 75: I11i + o0oOOo0O0Ooo
O0i1II1Iiii1I11 = eid . find ( "]" )
if ( O0i1II1Iiii1I11 == - 1 ) :
IIII = "0"
else :
IIiIi1iI = False
IIII = eid [ 1 : O0i1II1Iiii1I11 ]
eid = eid [ O0i1II1Iiii1I11 + 1 : : ]
if 32 - 32: OoooooooOO / iIii1I11I1II1 - o0oOOo0O0Ooo
if 91 - 91: iII111i % i1IIi % iIii1I11I1II1
if 20 - 20: OOooOOo % Ii1I / Ii1I + Ii1I
if 45 - 45: oO0o - IiII - OoooooooOO - OoO0O00 . II111iiii / O0
if 51 - 51: O0 + iII111i
if ( eid . find ( ":" ) == - 1 ) :
try : eid = socket . gethostbyname ( eid )
except : pass
if 8 - 8: oO0o * OoOoOO00 - Ii1I - OoO0O00 * OOooOOo % I1IiiI
return ( IIII , eid , IIiIi1iI )
if 48 - 48: O0
if 11 - 11: I11i + OoooooooOO - OoO0O00 / o0oOOo0O0Ooo + Oo0Ooo . II111iiii
if 41 - 41: Ii1I - O0 - O0
if 68 - 68: OOooOOo % I1Ii111
if 88 - 88: iIii1I11I1II1 - ooOoO0o + OOooOOo
if 40 - 40: I1IiiI * Ii1I + OOooOOo % iII111i
if 74 - 74: oO0o - Oo0Ooo + OoooooooOO + I1Ii111 / OoOoOO00
def i1I1iI1iIi111i ( eid , eid_prefix , ml ) :
iiIi1IIi1I = 2 ** ml - 1
if 84 - 84: ooOoO0o * II111iiii + Oo0Ooo
O0ooO0Oo00o = eid . split ( "." )
if ( len ( O0ooO0Oo00o ) == 1 ) : O0ooO0Oo00o = eid . split ( ":" )
if ( len ( O0ooO0Oo00o ) == 1 ) : return ( False )
if 77 - 77: iIii1I11I1II1 * OoO0O00
if ( len ( O0ooO0Oo00o ) == 4 ) :
iiIi1IIi1I = iiIi1IIi1I << ( 32 - ml )
eid = int ( O0ooO0Oo00o [ 0 ] ) << 24 | int ( O0ooO0Oo00o [ 1 ] ) << 16 | int ( O0ooO0Oo00o [ 2 ] ) << 8 | int ( O0ooO0Oo00o [ 3 ] )
O0ooO0Oo00o = eid & iiIi1IIi1I
eid = "{}.{}.{}.{}" . format ( ( O0ooO0Oo00o >> 24 ) & 0xff , ( O0ooO0Oo00o >> 16 ) & 0xff ,
( O0ooO0Oo00o >> 8 ) & 0xff , O0ooO0Oo00o & 0xff )
else :
iiIi1IIi1I = iiIi1IIi1I << ( 128 - ml )
eid = socket . inet_pton ( socket . AF_INET6 , eid )
eid = int ( binascii . hexlify ( eid ) , 16 )
O0ooO0Oo00o = eid & iiIi1IIi1I
eid = binascii . unhexlify ( hex ( O0ooO0Oo00o ) [ 2 : - 1 ] )
eid = socket . inet_ntop ( socket . AF_INET6 , eid )
if 95 - 95: I1IiiI + i11iIiiIii
return ( eid == eid_prefix )
if 6 - 6: ooOoO0o / i11iIiiIii + iII111i * oO0o
if 80 - 80: II111iiii
if 83 - 83: I11i . i11iIiiIii + II111iiii . o0oOOo0O0Ooo * I11i
if 53 - 53: II111iiii
if 31 - 31: OoO0O00
if 80 - 80: I1Ii111 . i11iIiiIii - o0oOOo0O0Ooo
if 25 - 25: OoO0O00
if 62 - 62: OOooOOo + O0
if 98 - 98: o0oOOo0O0Ooo
if 51 - 51: Oo0Ooo - oO0o + II111iiii * Ii1I . I11i + oO0o
def OoO0o ( match_iid , match_eid , user , pw , http , port , v4v6 ) :
oO0o0Ooooo = ( "curl --silent --insecure -u {}:{} {}://localhost:{}/lisp/" + "api/data/database-mapping" ) . format ( user , pw , http , port )
if 94 - 94: o0oOOo0O0Ooo * Ii1I / Oo0Ooo / Ii1I
oO0 = getoutput ( oO0o0Ooooo )
if 75 - 75: ooOoO0o + OoOoOO00 + o0oOOo0O0Ooo * I11i % oO0o . iII111i
try :
oOI1Ii1I1 = json . loads ( oO0 )
except :
return ( None , None , None , None )
if 28 - 28: O0 * Oo0Ooo - OOooOOo % iIii1I11I1II1 * Ii1I - i11iIiiIii
if 7 - 7: Oo0Ooo + oO0o - I1Ii111 % Ii1I + I1ii11iIi11i
for ooo0OOOoo in oOI1Ii1I1 :
if ( ( "eid-prefix" in ooo0OOOoo ) == False ) : continue
I1Ii1 = ooo0OOOoo [ "eid-prefix" ]
if 46 - 46: O0 + iII111i % I1IiiI / o0oOOo0O0Ooo . IiII * I11i
if 93 - 93: o0oOOo0O0Ooo % i1IIi . Ii1I . i11iIiiIii
if 56 - 56: I1ii11iIi11i % O0 - I1IiiI
if 100 - 100: Ii1I - O0 % oO0o * OOooOOo + I1IiiI
if 88 - 88: OoooooooOO - OoO0O00 * O0 * OoooooooOO . OoooooooOO
if ( I1Ii1 . count ( "'" ) == 2 ) : continue
if ( I1Ii1 . count ( "." ) != 3 and I1Ii1 . find ( ":" ) == - 1 ) : continue
if 33 - 33: I1Ii111 + iII111i * oO0o / iIii1I11I1II1 - I1IiiI
I1Ii1 , O0oO = I1Ii1 . split ( "/" )
IIII , I1Ii1 , IIiIi1iI = o0oO ( I1Ii1 )
if ( v4v6 and I1Ii1 . find ( "." ) == - 1 ) : continue
if ( v4v6 == False and I1Ii1 . find ( ":" ) == - 1 ) : continue
if 73 - 73: I1ii11iIi11i * i11iIiiIii % oO0o . I1ii11iIi11i
i1 = ooo0OOOoo [ "rlocs" ] [ 0 ] [ "rloc" ]
OOOOo0 = "translated-rloc" in ooo0OOOoo [ "rlocs" ] [ 0 ]
if 49 - 49: II111iiii % O0 . OoOoOO00 + oO0o / I1IiiI
if ( match_iid == None ) : return ( IIII , I1Ii1 , i1 , OOOOo0 )
if 72 - 72: ooOoO0o * Oo0Ooo . I1IiiI - II111iiii + i1IIi
iIi1ii = i1I1iI1iIi111i ( match_eid , I1Ii1 , int ( O0oO ) )
if ( match_iid == IIII and iIi1ii ) :
return ( None , None , i1 , OOOOo0 )
if 58 - 58: OoOoOO00 % o0oOOo0O0Ooo
if 50 - 50: I1Ii111 . o0oOOo0O0Ooo
return ( None , None , None , None )
if 97 - 97: O0 + OoOoOO00
if 89 - 89: o0oOOo0O0Ooo + OoO0O00 * I11i * Ii1I
if 37 - 37: OoooooooOO - O0 - o0oOOo0O0Ooo
if 77 - 77: OOooOOo * iIii1I11I1II1
if 98 - 98: I1IiiI % Ii1I * OoooooooOO
if 51 - 51: iIii1I11I1II1 . OoOoOO00 / oO0o + o0oOOo0O0Ooo
if 33 - 33: ooOoO0o . II111iiii % iII111i + o0oOOo0O0Ooo
def oO00O000oO0 ( user , pw , http , port ) :
oO0o0Ooooo = ( "curl --silent --insecure -u {}:{} {}://localhost:{}/lisp/" + "api/data/map-cache" ) . format ( user , pw , http , port )
if 79 - 79: I11i - OoooooooOO - oO0o - iIii1I11I1II1 * OOooOOo
oO0 = getoutput ( oO0o0Ooooo )
if 4 - 4: i11iIiiIii . OoooooooOO / OoO0O00 % I1Ii111 % I11i * O0
try :
oOI1Ii1I1 = json . loads ( oO0 )
except :
return ( [ ] )
if 14 - 14: OOooOOo / o0oOOo0O0Ooo
if 32 - 32: I1IiiI * Oo0Ooo
O0OooOo0o = [ ]
for ooo0OOOoo in oOI1Ii1I1 :
if ( "group-prefix" in ooo0OOOoo ) : continue
if ( ( "eid-prefix" in ooo0OOOoo ) == False ) : continue
if ( ooo0OOOoo [ "eid-prefix" ] != "0.0.0.0/0" ) : continue
if 29 - 29: I1IiiI % I1IiiI
for i1 in ooo0OOOoo [ "rloc-set" ] :
if ( ( "rloc-name" in i1 ) == False ) : continue
if ( i1 [ "rloc-name" ] != "RTR" ) : continue
if ( ( "address" in i1 ) == False ) : continue
O0OooOo0o . append ( i1 [ "address" ] )
if 94 - 94: iIii1I11I1II1 / Oo0Ooo % iII111i * iII111i * II111iiii
if 29 - 29: OoO0O00 + OoOoOO00 / o0oOOo0O0Ooo / OOooOOo * iIii1I11I1II1
return ( O0OooOo0o )
if 62 - 62: OOooOOo / oO0o - OoO0O00 . I11i
if 11 - 11: I1ii11iIi11i . OoO0O00 * IiII * OoooooooOO + ooOoO0o
if 33 - 33: O0 * o0oOOo0O0Ooo - I1Ii111 % I1Ii111
if 18 - 18: I1Ii111 / Oo0Ooo * I1Ii111 + I1Ii111 * i11iIiiIii * I1ii11iIi11i
if 11 - 11: ooOoO0o / OoOoOO00 - IiII * OoooooooOO + OoooooooOO . OoOoOO00
if 26 - 26: Ii1I % I1ii11iIi11i
if 76 - 76: IiII * iII111i
def oOOOOo0 ( string ) :
return ( "\033[1m" + string + "\033[0m" )
if 52 - 52: OOooOOo
if 19 - 19: I1IiiI
if 25 - 25: Ii1I / ooOoO0o
if 31 - 31: OOooOOo . O0 % I1IiiI . o0oOOo0O0Ooo + IiII
if 71 - 71: I1Ii111 . II111iiii
if 62 - 62: OoooooooOO . I11i
if 61 - 61: OoOoOO00 - OOooOOo - i1IIi
def OO0oOoOO0oOO0 ( string ) :
return ( "\033[94m" + oOOOOo0 ( string ) + "\033[0m" )
if 25 - 25: O0 * I11i + I1ii11iIi11i . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 58 - 58: I1IiiI
if 53 - 53: i1IIi
if 59 - 59: o0oOOo0O0Ooo
if 81 - 81: OoOoOO00 - OoOoOO00 . iII111i
if 73 - 73: I11i % i11iIiiIii - I1IiiI
if 7 - 7: O0 * i11iIiiIii * Ii1I + ooOoO0o % OoO0O00 - ooOoO0o
def oo0oOo ( string ) :
return ( "\033[91m" + oOOOOo0 ( string ) + "\033[0m" )
if 39 - 39: Oo0Ooo * OOooOOo % OOooOOo - OoooooooOO + o0oOOo0O0Ooo - I11i
if 23 - 23: i11iIiiIii
if 30 - 30: o0oOOo0O0Ooo - i1IIi % II111iiii + I11i * iIii1I11I1II1
if 81 - 81: IiII % i1IIi . iIii1I11I1II1
if 4 - 4: i11iIiiIii % OoO0O00 % i1IIi / IiII
if 6 - 6: iII111i / I1IiiI % OOooOOo - I1IiiI
if 31 - 31: OOooOOo
if 23 - 23: I1Ii111 . IiII
def OO0000o ( deid , v4v6 ) :
if ( v4v6 ) :
i1I1i1 = int ( deid . split ( "." ) [ 0 ] )
if ( i1I1i1 < 224 or i1I1i1 >= 240 ) : return
else :
if ( deid [ 0 : 2 ] . lower ( ) != "ff" ) : return
if 81 - 81: ooOoO0o - iIii1I11I1II1 - i1IIi / I1Ii111 - O0 * I11i
print ( "Multicast EID not supported" )
exit ( 1 )
if 20 - 20: oO0o % IiII
if 19 - 19: I1ii11iIi11i % IiII + ooOoO0o / I1Ii111 . ooOoO0o
if 12 - 12: i1IIi + i1IIi - I1ii11iIi11i * Oo0Ooo % Oo0Ooo - II111iiii
if 52 - 52: ooOoO0o . iII111i + I1Ii111
if 38 - 38: i1IIi - II111iiii . I1Ii111
if 58 - 58: I1IiiI . iII111i + OoOoOO00
if 66 - 66: iII111i / oO0o * OoooooooOO + OoooooooOO % I11i
if ( "-s" in sys . argv ) :
IIii1111 = len ( sys . argv ) != 4
else :
IIii1111 = len ( sys . argv ) != 2
if 42 - 42: I11i / o0oOOo0O0Ooo . oO0o + oO0o % OoOoOO00 + i11iIiiIii
if ( IIii1111 ) :
print ( "Usage: python ltr.py [-s <source-eid>] <destination-EID | DNS-name>" )
exit ( 1 )
if 56 - 56: o0oOOo0O0Ooo
if 28 - 28: iII111i . iII111i % iIii1I11I1II1 * iIii1I11I1II1 . o0oOOo0O0Ooo / iII111i
iII1i1 , O0oOOoooOO0O , IIiIi1iI = o0oO ( sys . argv [ - 1 ] )
if ( iII1i1 == None ) :
print ( "<destinaton-eid> parse error" )
exit ( 1 )
if 86 - 86: o0oOOo0O0Ooo
i1Iii11Ii1i1 = O0oOOoooOO0O . find ( ":" ) == - 1
if 59 - 59: Oo0Ooo % OoooooooOO . iII111i / IiII + I1IiiI
if 76 - 76: ooOoO0o
if 73 - 73: O0 * iII111i + Ii1I + ooOoO0o
if 40 - 40: II111iiii . OoOoOO00 * I1Ii111 + OOooOOo + OOooOOo
if 9 - 9: I11i % OoooooooOO . oO0o % I11i
OO0000o ( O0oOOoooOO0O , i1Iii11Ii1i1 )
if 32 - 32: i11iIiiIii
if 31 - 31: iIii1I11I1II1 / OoO0O00 / I1ii11iIi11i
if 41 - 41: Oo0Ooo
if 10 - 10: Oo0Ooo / Oo0Ooo / I1Ii111 . I1Ii111
if ( "-s" in sys . argv ) :
O0i1II1Iiii1I11 = sys . argv . index ( "-s" ) + 1
OOoo , iIIiiiI , IIiIi1iI = o0oO ( sys . argv [ O0i1II1Iiii1I11 ] )
if ( OOoo == None ) :
print ( "-s <source-eid> parse error" )
exit ( 1 )
if 60 - 60: I1IiiI . I1Ii111
if ( IIiIi1iI ) : OOoo = None
IiI111ii1ii , O0OOo , i1 , OOOOo0 = OoO0o ( OOoo , iIIiiiI , OOo , Ii1IIii11 , II1iII1i , oO0oIIII , i1Iii11Ii1i1 )
if ( i1 == None ) :
print ( "[{}]{} not a local EID, maybe lispers.net API pw/port wrong" . format ( OOoo , iIIiiiI ) )
if 38 - 38: iIii1I11I1II1 + I1ii11iIi11i - OOooOOo - ooOoO0o - OoOoOO00
exit ( 1 )
if 71 - 71: OOooOOo / Ii1I % OoO0O00
else :
OOoo , iIIiiiI , i1 , OOOOo0 = OoO0o ( None , None , OOo , Ii1IIii11 , II1iII1i , oO0oIIII , i1Iii11Ii1i1 )
if ( OOoo == None ) :
print ( "Could not find local EID, maybe lispers.net API pw/port wrong?" )
exit ( 1 )
if 50 - 50: OOooOOo / Ii1I % ooOoO0o . OoOoOO00
if 41 - 41: OOooOOo * Ii1I - IiII + o0oOOo0O0Ooo
if 64 - 64: Ii1I
if 66 - 66: i11iIiiIii - OOooOOo * Oo0Ooo
if 76 - 76: i11iIiiIii + o0oOOo0O0Ooo / I1ii11iIi11i - OoO0O00 - Ii1I + I1ii11iIi11i
if 51 - 51: iIii1I11I1II1 . ooOoO0o + iIii1I11I1II1
iII1i1 = OOoo if iII1i1 == "0" else iII1i1
if ( iII1i1 != OOoo ) :
print ( "Instance-IDs must be the same for source and destination EIDs" )
exit ( 1 )
if 95 - 95: I1IiiI
if 46 - 46: OoOoOO00 + OoO0O00
if 70 - 70: iII111i / iIii1I11I1II1
if 85 - 85: OoooooooOO % i1IIi * OoooooooOO / I1ii11iIi11i
if 96 - 96: OoooooooOO + oO0o
iiII1i11i = socket . socket ( socket . AF_INET6 , socket . SOCK_DGRAM )
iiII1i11i . bind ( ( "0::0" , 0 ) )
iiII1i11i . settimeout ( 3 )
II = iiII1i11i . getsockname ( ) [ 1 ]
if 11 - 11: I1IiiI / II111iiii + o0oOOo0O0Ooo * I1ii11iIi11i - I1ii11iIi11i - I1IiiI
if 85 - 85: I11i % oO0o / iIii1I11I1II1 . iIii1I11I1II1
if 31 - 31: o0oOOo0O0Ooo % OoO0O00
if 14 - 14: oO0o / oO0o % ooOoO0o
O0o0Oo , iiIIiIiIi = oO ( i1 , II )
if 56 - 56: I1IiiI . O0 + Oo0Ooo
if 1 - 1: iII111i
if 97 - 97: OOooOOo + iII111i + O0 + i11iIiiIii
if 77 - 77: o0oOOo0O0Ooo / OoooooooOO
if ( OOOOo0 ) :
O0OooOo0o = oO00O000oO0 ( OOo , Ii1IIii11 , II1iII1i , oO0oIIII )
for IIii11I1i1I in O0OooOo0o :
print ( "Send NAT-traversal LISP-Trace to RTR {} ..." . format ( IIii11I1i1I ) )
iiII1i11i . sendto ( iiIIiIiIi , ( "::ffff:" + IIii11I1i1I , OOo000 ) )
if 99 - 99: iII111i
if 76 - 76: OoO0O00 * I1IiiI
if 82 - 82: Ii1I * iII111i / I1ii11iIi11i
print ( "Send round-trip LISP-Trace between EIDs [{}]{} and [{}]{} ..." . format ( OOoo , iIIiiiI , iII1i1 , O0oOOoooOO0O ) )
if 36 - 36: OoooooooOO - i1IIi . O0 / II111iiii + o0oOOo0O0Ooo
if 33 - 33: II111iiii / ooOoO0o * O0 % Ii1I * I1Ii111
O0o = O0oOOoooOO0O if ( O0oOOoooOO0O . find ( ":" ) != - 1 ) else "::ffff:" + O0oOOoooOO0O
OO0O0OoOO0 = time . time ( )
if 72 - 72: OOooOOo % I1ii11iIi11i + OoO0O00 / oO0o + IiII
if 10 - 10: I1Ii111 / ooOoO0o + i11iIiiIii / Ii1I
if 74 - 74: OOooOOo + O0 + i1IIi - i1IIi + II111iiii
if 83 - 83: I1ii11iIi11i - I1IiiI + OOooOOo
try :
iiII1i11i . sendto ( iiIIiIiIi , ( O0o , OOo000 ) )
except socket . error as O0ooO0Oo00o :
print ( "sock.sendto() failed: {}" . format ( O0ooO0Oo00o ) )
exit ( 1 )
if 5 - 5: Ii1I
if 46 - 46: IiII
if 45 - 45: ooOoO0o
if 21 - 21: oO0o . I1Ii111 . OOooOOo / Oo0Ooo / I1Ii111
if 17 - 17: OOooOOo / OOooOOo / I11i
try :
iiIIiIiIi , ii1 = iiII1i11i . recvfrom ( 9000 )
ii1 = ii1 [ 0 ] . replace ( "::ffff:" , "" )
except socket . timeout :
exit ( 1 )
except socket . error as O0ooO0Oo00o :
print ( "sock.recvfrom() failed, error: {}" . format ( O0ooO0Oo00o ) )
exit ( 1 )
if 1 - 1: ooOoO0o % iIii1I11I1II1 + Oo0Ooo . iIii1I11I1II1 % I1IiiI
if 89 - 89: Ii1I
ooOoOO0OoO00o = round ( time . time ( ) - OO0O0OoOO0 , 3 )
if 11 - 11: Oo0Ooo - I1IiiI * II111iiii . I1ii11iIi11i . oO0o
print ( "Received reply from {}, rtt {} secs" . format ( ii1 , ooOoOO0OoO00o ) )
print ( "" )
Ii11iII1 = i1iiI11I ( O0o0Oo , iiIIiIiIi )
if ( Ii11iII1 == { } ) : exit ( 1 )
if 61 - 61: iII111i % I1IiiI - o0oOOo0O0Ooo - II111iiii % O0
if 90 - 90: iIii1I11I1II1 + I1ii11iIi11i + ooOoO0o - I1Ii111 * IiII . I1ii11iIi11i
if 37 - 37: ooOoO0o % i11iIiiIii % II111iiii . O0 . Ii1I
if 51 - 51: OoO0O00 - O0 % oO0o - II111iiii
i1I11i1iI ( Ii11iII1 )
if 31 - 31: iII111i / Oo0Ooo - iII111i - OOooOOo
iiII1i11i . close ( )
exit ( 0 )
if 7 - 7: iII111i % O0 . OoOoOO00 + I1IiiI - I11i
if 75 - 75: I11i
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
|
py | 1a4c98a56e909af80d3999ee07e1fb118e06fb4b |
def lantern_fish(filename, days):
# Data structure that will contain occurrences of fish
occurrences = [0, 0, 0, 0, 0, 0, 0, 0, 0]
# Retrieval of data from the input file
with open(filename, 'r', encoding='utf-8') as values:
for value in values:
fish_list = value.split(",")
# Data entry into the data structure of occurrences of fish
for fish in fish_list:
occurrences[int(fish)] += 1
# Solution algorithm
for day in range(1, days+1):
tmp = occurrences[0]
for i in range(0, len(occurrences)-1):
occurrences[i] = occurrences[i+1]
occurrences[8] = tmp
occurrences[6] += tmp
# Returns the number of fish
return sum(occurrences)
|
py | 1a4c9a8dd20ea4a1f8166ef8e115e3e05847f147 | ####################################################
# Python Data Science Toolbox (Part 2) - Bringing it all together!
# 03 Nov 2020
# VNTBJR
####################################################
#
# Load package
library(reticulate)
####################################################
# Welcome to the case study! ------------------------------
####################################################
#
# Dcitionaries for data science
feature_names = ['CountryName', 'CountryCode',
'IndicatorName', 'IndicatorCode',
'Year', 'Value']
row_vals = ['Arab World', 'ARB',
'Adolescent fertility rate (births per 1,000 women ages 15-19)',
'SP.ADO.TFRT', '1960', '133.56090740552298']
# Zip lists: zipped_lists
zipped_lists = zip(feature_names, row_vals)
# Create a dictionary: rs_dict
rs_dict = dict(zipped_lists)
# Print the dictionary
print(rs_dict)
# Writing a function to help you
# Define lists2dict()
def lists2dict(list1, list2):
"""Return a dictionary where list1 provides
the keys and list2 provides the values."""
# Zip lists: zipped_lists
zipped_lists = zip(list1, list2)
# Create a dictionary: rs_dict
rs_dict = dict(zipped_lists)
# Return the dictionary
return rs_dict
quit()
# Call lists2dict: rs_fxn
rs_fxn = lists2dict(feature_names, row_vals)
# Print rs_fxn
print(rs_fxn)
# Using a list comprehension
row_lists = [['Arab World', 'ARB', 'Adolescent fertility rate (births per 1,000 women ages 15-19)', 'SP.ADO.TFRT', '1960', '133.56090740552298'], ['Arab World', 'ARB', 'Age dependency ratio (% of working-age population)', 'SP.POP.DPND', '1960', '87.7976011532547'], ['Arab World', 'ARB', 'Age dependency ratio, old (% of working-age population)', 'SP.POP.DPND.OL', '1960', '6.634579191565161'], ['Arab World', 'ARB', 'Age dependency ratio, young (% of working-age population)', 'SP.POP.DPND.YG', '1960', '81.02332950839141'], ['Arab World', 'ARB', 'Arms exports (SIPRI trend indicator values)', 'MS.MIL.XPRT.KD', '1960', '3000000.0'], ['Arab World', 'ARB', 'Arms imports (SIPRI trend indicator values)', 'MS.MIL.MPRT.KD', '1960', '538000000.0'], ['Arab World', 'ARB', 'Birth rate, crude (per 1,000 people)', 'SP.DYN.CBRT.IN', '1960', '47.697888095096395'], ['Arab World', 'ARB', 'CO2 emissions (kt)', 'EN.ATM.CO2E.KT', '1960', '59563.9892169935'], ['Arab World', 'ARB', 'CO2 emissions (metric tons per capita)', 'EN.ATM.CO2E.PC', '1960', '0.6439635478877049'], ['Arab World', 'ARB', 'CO2 emissions from gaseous fuel consumption (% of total)', 'EN.ATM.CO2E.GF.ZS', '1960', '5.041291753975099'], ['Arab World', 'ARB', 'CO2 emissions from liquid fuel consumption (% of total)', 'EN.ATM.CO2E.LF.ZS', '1960', '84.8514729446567'], ['Arab World', 'ARB', 'CO2 emissions from liquid fuel consumption (kt)', 'EN.ATM.CO2E.LF.KT', '1960', '49541.707291032304'], ['Arab World', 'ARB', 'CO2 emissions from solid fuel consumption (% of total)', 'EN.ATM.CO2E.SF.ZS', '1960', '4.72698138789597'], ['Arab World', 'ARB', 'Death rate, crude (per 1,000 people)', 'SP.DYN.CDRT.IN', '1960', '19.7544519237187'], ['Arab World', 'ARB', 'Fertility rate, total (births per woman)', 'SP.DYN.TFRT.IN', '1960', '6.92402738655897'], ['Arab World', 'ARB', 'Fixed telephone subscriptions', 'IT.MLT.MAIN', '1960', '406833.0'], ['Arab World', 'ARB', 'Fixed telephone subscriptions (per 100 people)', 'IT.MLT.MAIN.P2', '1960', '0.6167005703199'], ['Arab World', 'ARB', 'Hospital beds (per 1,000 people)', 'SH.MED.BEDS.ZS', '1960', '1.9296220724398703'], ['Arab World', 'ARB', 'International migrant stock (% of population)', 'SM.POP.TOTL.ZS', '1960', '2.9906371279862403'], ['Arab World', 'ARB', 'International migrant stock, total', 'SM.POP.TOTL', '1960', '3324685.0']]
# Print the first two lists in row_lists
print(row_lists[0])
print(row_lists[1])
# Turn list of lists into list of dicts: list_of_dicts
list_of_dicts = [lists2dict(feature_names, sublist) for sublist in row_lists]
# Print the first two dictionaries in list_of_dicts
print(list_of_dicts[0])
print(list_of_dicts[1])
# Turning this all into a DataFrame
# Import the pandas package
import pandas as pd
# Turn list of lists into list of dicts: list_of_dicts
list_of_dicts = [lists2dict(feature_names, sublist) for sublist in row_lists]
# Turn list of dicts into a DataFrame: df
df = pd.DataFrame(list_of_dicts)
# Print the head of the DataFrame
print(df.head())
####################################################
# Using Python generators for streaming data ------------------------------
####################################################
# Porcessing data in chunks(1)
# Open a connection to the file
with open('Datasets/world_dev_ind.csv') as file:
# Skip the column names
file.readline()
# Initialize an empty dictionary: counts_dict
counts_dict = {}
# Process only the first 1000 rows
for j in range(0, 1000):
# Split the current line into a list: line
line = file.readline().split(',')
# Get the value for the first column: first_col
first_col = line[0]
# If the column value is in the dict, increment its value
if first_col in counts_dict.keys():
counts_dict[first_col] += 1
# Else, add to the dict and set value to 1
else:
counts_dict[first_col] = 1
quit()
# Print the resulting dictionary
print(counts_dict)
# Writing a generator to load data in chunks (2)
# Define read_large_file()
def read_large_file(file_object):
"""A generator function to read a large file lazily."""
# Loop indefinitely until the end of the file
while True:
# Read a line from the file: data
data = file_object.readline()
# Break if this is the end of the file
if not data:
break
# Yield the line of data
yield data
quit()
# Open a connection to the file
with open('Datasets/world_dev_ind.csv') as file:
# Create a generator object for the file: gen_file
gen_file = read_large_file(file)
quit()
# Print the first three lines of the file
print(next(gen_file))
print(next(gen_file))
print(next(gen_file))
# Writing a generator to load data in chunks (3)
# Initialize an empty dictionary: counts_dict
counts_dict = {}
# Open a connection to the file
with open('Datasets/world_dev_ind.csv') as file:
# Iterate over the generator from read_large_file()
for line in read_large_file(file):
row = line.split(',')
first_col = row[0]
if first_col in counts_dict.keys():
counts_dict[first_col] += 1
else:
counts_dict[first_col] = 1
quit()
# Print
print(counts_dict)
####################################################
# Using pandas'read_csv iterator for streaming data ------------------------------
####################################################
# Writing an iterator to load data in chunks (1)
# Import the pandas package
import pandas as pd
# Initialize reader object: df_reader
df_reader = pd.read_csv('Datasets/world_dev_ind.csv', chunksize = 10)
# Print two chunks
print(next(df_reader))
print(next(df_reader))
# Writing an iterator to load data in chunks (2)
# Initialize reader object: urb_pop_reader
urb_pop_reader = pd.read_csv('Datasets/world_dev_ind.csv', chunksize = 1000)
# Get the first DataFrame chunk: df_urb_pop
df_urb_pop = next(urb_pop_reader)
# Check out the head of the DataFrame
print(df_urb_pop.head())
# Check out specific country: df_pop_ceb
df_pop_ceb = df_urb_pop[df_urb_pop['CountryCode'] == 'CEB']
# Zip DataFrame columns of interest: pops
pops = zip(df_pop_ceb['Total Population'], df_pop_ceb['Urban population (% of total)'])
# Turn zip object into list: pops_list
pops_list = list(pops)
# Print pops_list
print(pops_list)
# Writing an iterator to load data in chunks (3)
import matplotlib.pyplot as plt
# Use list comprehension to create new DataFrame column 'Total Urban Population'
df_pop_ceb['Total Urban Population'] = [int(total * urban * 0.01) for total, urban in pops_list]
# Plot urban population data
df_pop_ceb.plot(kind = 'scatter', x = 'Year', y = 'Total Urban Population')
plt.show()
plt.clf()
# Writing an iterator to load data in chunks (4)
# Initialize reader object: urb_pop_reader
urb_pop_reader = pd.read_csv('Datasets/world_dev_ind.csv', chunksize=1000)
# Initialize empty DataFrame: data
data = pd.DataFrame()
# Iterate over each DataFrame chunk
for df_urb_pop in urb_pop_reader:
# Check out specific country: df_pop_ceb
df_pop_ceb = df_urb_pop[df_urb_pop['CountryCode'] == 'CEB']
# Zip DataFrame columns of interest: pops
pops = zip(df_pop_ceb['Total Population'],
df_pop_ceb['Urban population (% of total)'])
# Turn zip object into list: pops_list
pops_list = list(pops)
# Use list comprehension to create new DataFrame column 'Total Urban Population'
df_pop_ceb['Total Urban Population'] = [int(tup[0] * tup[1] * 0.01) for tup in pops_list]
# Append DataFrame chunk to data: data
data = data.append(df_pop_ceb)
quit()
# Plot urban population data
data.plot(kind = 'scatter', x = 'Year', y = 'Total Urban Population')
plt.show()
plt.clf()
# Writing an iterator to load data in chunks (5)
# Define plot_pop()
def plot_pop(filename, country_code):
# Initialize reader object: urb_pop_reader
urb_pop_reader = pd.read_csv(filename, chunksize=1000)
# Initialize empty DataFrame: data
data = pd.DataFrame()
# Iterate over each DataFrame chunk
for df_urb_pop in urb_pop_reader:
# Check out specific country: df_pop_ceb
df_pop_ceb = df_urb_pop[df_urb_pop['CountryCode'] == country_code]
# Zip DataFrame columns of interest: pops
pops = zip(df_pop_ceb['Total Population'],
df_pop_ceb['Urban population (% of total)'])
# Turn zip object into list: pops_list
pops_list = list(pops)
# Use list comprehension to create new DataFrame column 'Total Urban Population'
df_pop_ceb['Total Urban Population'] = [int(tup[0] * tup[1] * 0.01) for tup in pops_list]
# Append DataFrame chunk to data: data
data = data.append(df_pop_ceb)
# Plot urban population data
data.plot(kind = 'scatter', x = 'Year', y = 'Total Urban Population')
plt.show()
plt.clf()
quit()
# Set the filename: fn
fn = 'Datasets/world_dev_ind.csv'
# Call plot_pop for country code 'CEB'
plot_pop(fn, 'CEB')
# Call plot_pop for country code 'ARB'
plot_pop(fn, 'ARB')
|
py | 1a4c9aeed4419b798b8c024810ab3f4d09c28753 | """[Practice: Light Switch]
Variable name class name
("instance")
ice = Ice)
Returns:
[type]: [description]
"""
class Light:
def __init__(self):
self.on = False
def is_on(sel):
return self.on
def toggle(self):
self.on = not self.on
|
py | 1a4c9bf5bd877dcb6969f89716b9c2bc704baec8 | from fly_behavior import FlyBehavior
class FlyNoWay(FlyBehavior):
def fly(self):
print('I can\'t fly')
|
py | 1a4c9c358baf46a5a6e91d6943e0cc0c13017175 | # Copyright (c) Facebook, Inc. and its affiliates.
import logging
import torch
import tqdm
from multimodelity.common.sample import Sample
from multimodelity.datasets.multimodelity_dataset import multimodelityDataset
from multimodelity.utils.distributed import is_master
logger = logging.getLogger(__name__)
class VQA2Dataset(multimodelityDataset):
def __init__(self, config, dataset_type, imdb_file_index, *args, **kwargs):
if "name" in kwargs:
name = kwargs["name"]
elif "dataset_name" in kwargs:
name = kwargs["dataset_name"]
else:
name = "vqa2"
super().__init__(name, config, dataset_type, index=imdb_file_index)
self._should_fast_read = self.config.get("fast_read", False)
self.use_ocr = self.config.use_ocr
self.use_ocr_info = self.config.use_ocr_info
def init_processors(self):
super().init_processors()
if not self._use_features:
self.image_db.transform = self.image_processor
def try_fast_read(self):
# Don't fast read in case of test set.
if self._dataset_type == "test":
return
if hasattr(self, "_should_fast_read") and self._should_fast_read is True:
logger.info(
f"Starting to fast read {self.dataset_name} {self.dataset_type} "
+ "dataset"
)
self.cache = {}
for idx in tqdm.tqdm(
range(len(self.annotation_db)), miniters=100, disable=not is_master()
):
self.cache[idx] = self.load_item(idx)
def __getitem__(self, idx):
if self._should_fast_read is True and self._dataset_type != "test":
return self.cache[idx]
else:
return self.load_item(idx)
def load_item(self, idx):
sample_info = self.annotation_db[idx]
current_sample = Sample()
if "question_tokens" in sample_info:
text_processor_argument = {
"tokens": sample_info["question_tokens"],
"text": sample_info["question_str"],
}
else:
text_processor_argument = {"text": sample_info["question"]}
processed_question = self.text_processor(text_processor_argument)
current_sample.text = processed_question["text"]
if "input_ids" in processed_question:
current_sample.update(processed_question)
current_sample.question_id = torch.tensor(
sample_info["question_id"], dtype=torch.int
)
if isinstance(sample_info["image_id"], int):
current_sample.image_id = torch.tensor(
sample_info["image_id"], dtype=torch.int
)
else:
current_sample.image_id = sample_info["image_id"]
if "question_tokens" in sample_info:
current_sample.text_len = torch.tensor(
len(sample_info["question_tokens"]), dtype=torch.int
)
if self._use_features:
features = self.features_db[idx]
if hasattr(self, "transformer_bbox_processor"):
features["image_info_0"] = self.transformer_bbox_processor(
features["image_info_0"]
)
current_sample.update(features)
else:
image_path = sample_info["image_name"] + ".jpg"
current_sample.image = self.image_db.from_path(image_path)["images"][0]
# Add details for OCR like OCR bbox, vectors, tokens here
current_sample = self.add_ocr_details(sample_info, current_sample)
# Depending on whether we are using soft copy this can add
# dynamic answer space
current_sample = self.add_answer_info(sample_info, current_sample)
return current_sample
def add_ocr_details(self, sample_info, sample):
if self.use_ocr:
# Preprocess OCR tokens
ocr_tokens = [
self.ocr_token_processor({"text": token})["text"]
for token in sample_info["ocr_tokens"]
]
# Get embeddings for tokens
context = self.context_processor({"tokens": ocr_tokens})
sample.context = context["text"]
sample.context_tokens = context["tokens"]
sample.context_feature_0 = context["text"]
sample.context_info_0 = Sample()
sample.context_info_0.max_features = context["length"]
order_vectors = torch.eye(len(sample.context_tokens))
order_vectors[context["length"] :] = 0
sample.order_vectors = order_vectors
if self.use_ocr_info and "ocr_info" in sample_info:
sample.ocr_bbox = self.bbox_processor({"info": sample_info["ocr_info"]})[
"bbox"
]
return sample
def add_answer_info(self, sample_info, sample):
if "answers" in sample_info:
answers = sample_info["answers"]
answer_processor_arg = {"answers": answers}
if self.use_ocr:
answer_processor_arg["tokens"] = sample_info["ocr_tokens"]
processed_soft_copy_answers = self.answer_processor(answer_processor_arg)
# sample.answers = processed_soft_copy_answers["answers"]
sample.targets = processed_soft_copy_answers["answers_scores"]
return sample
def idx_to_answer(self, idx):
return self.answer_processor.convert_idx_to_answer(idx)
def format_for_prediction(self, report):
answers = report.scores.argmax(dim=1)
predictions = []
answer_space_size = self.answer_processor.get_true_vocab_size()
for idx, question_id in enumerate(report.question_id):
answer_id = answers[idx].item()
if answer_id >= answer_space_size:
answer_id -= answer_space_size
answer = report.context_tokens[idx][answer_id]
if answer == self.context_processor.PAD_TOKEN:
answer = "unanswerable"
else:
answer = self.answer_processor.idx2word(answer_id)
# actual_answer = report.answers[idx]
predictions.append(
{
"question_id": question_id.item(),
"answer": answer,
# "actual_answers": actual_answer,
# "question_tokens": report.question_tokens[idx],
# "image_id": report.image_id[idx].item()
}
)
return predictions
|
py | 1a4c9d8eacf3e455113631bcaad2db3dae502379 | import struct
class Primitive:
"""
The most basic structure of the protocol. Subclassed, never used directly.
Used as a building block for the various actually-used primitives outlined
in the Zookeeper jute file:
https://github.com/apache/zookeeper/blob/trunk/src/zookeeper.jute
"""
fmt = None
def __init__(self, value):
self.value = value
def render(self):
"""
Returns a two-element tuple with the ``struct`` format and list value.
The value is wrapped in a list, as there are some primitives that deal
with multiple values. Any caller of `render()` should expect a list.
"""
return self.fmt, [self.value]
@classmethod
def parse(cls, buff, offset):
"""
Given a buffer and offset, returns the parsed value and new offset.
Uses the ``format`` class attribute to unpack the data from the buffer
and determine the used up number of bytes.
"""
primitive_struct = struct.Struct("!" + cls.fmt)
value = primitive_struct.unpack_from(buff, offset)[0]
offset += primitive_struct.size
return value, offset
def __eq__(self, other):
"""
Basic equality method that tests equality of the ``value`` attributes.
"""
return self.value == other.value
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, self.value)
class VariablePrimitive(Primitive):
"""
Base primitive for variable-length scalar primitives (strings and bytes).
"""
size_primitive = None
def render_value(self, value):
raise NotImplementedError
@classmethod
def parse_value(cls, value):
raise NotImplementedError
def render(self):
"""
Returns the ``struct`` format and list of the size and value.
The format is derived from the size primitive and the length of the
resulting encoded value (e.g. the format for a string of 'foo' ends
up as 'h3s'.
.. note ::
The value is expected to be string-able (wrapped in ``str()``) and is
then encoded as UTF-8.
"""
size_format = self.size_primitive.fmt
if self.value is None:
return size_format, [-1]
value = self.render_value(self.value)
size = len(value)
fmt = "%s%ds" % (size_format, size)
return fmt, [size, value]
@classmethod
def parse(cls, buff, offset):
"""
Given a buffer and offset, returns the parsed value and new offset.
Parses the ``size_primitive`` first to determine how many more bytes to
consume to extract the value.
"""
size, offset = cls.size_primitive.parse(buff, offset)
if size == -1:
return None, offset
var_struct = struct.Struct("!%ds" % size)
value = var_struct.unpack_from(buff, offset)[0]
value = cls.parse_value(value)
offset += var_struct.size
return value, offset
class Bool(Primitive):
"""
Represents a boolean (true or false) value.
Renders as an unsigned char (1 byte).
"""
fmt = "?"
class Byte(Primitive):
"""
Represents a single 8-bit byte.
"""
fmt = "b"
class Int(Primitive):
"""
Represents an 32-bit signed integer.
"""
fmt = "i"
class Long(Primitive):
"""
Represents an 64-bit signed integer.
"""
fmt = "q"
class Float(Primitive):
"""
Represents a single-precision floating poing conforming to IEEE 754.
"""
fmt = "f"
class Double(Primitive):
"""
Represents a double-precision floating poing conforming to IEEE 754.
"""
fmt = "d"
class UString(VariablePrimitive):
"""
Represents a unicode string value, length denoted by a 32-bit integer.
"""
size_primitive = Int
def render_value(self, value):
return bytes(str(value).encode("utf-8"))
@classmethod
def parse_value(cls, value):
return value.decode("utf-8")
def __str__(self):
return str(self.value)
class Buffer(VariablePrimitive):
"""
Represents a bytestring value, length denoted by a 32-bit signed integer.
"""
size_primitive = Int
def render_value(self, value):
if isinstance(value, str):
return value.encode()
# return bytes(value)
return value
@classmethod
def parse_value(cls, value):
return value
class Vector(Primitive):
"""
Represents an array of any arbitrary `Primitive` or ``Part``.
Not used directly but rather by its ``of()`` classmethod to denote an
``Vector.of(<something>)``.
"""
item_class = None
@classmethod
def of(cls, part_class):
"""
Creates a new class with the ``item_class`` attribute properly set.
"""
copy = type(
"VectorOf%s" % part_class.__name__,
cls.__bases__, dict(cls.__dict__)
)
copy.item_class = part_class
return copy
def render(self):
"""
Creates a composite ``struct`` format and the data to render with it.
The format and data are prefixed with a 32-bit integer denoting the
number of elements, after which each of the items in the array value
are ``render()``-ed and added to the format and data as well.
"""
value = self.value
if value is None:
value = []
fmt = [Int.fmt]
data = [len(value)]
for item_value in value:
if issubclass(self.item_class, Primitive):
item = self.item_class(item_value)
else:
item = item_value
item_format, item_data = item.render()
fmt.extend(item_format)
data.extend(item_data)
return "".join(fmt), data
@classmethod
def parse(cls, buff, offset):
"""
Parses a raw buffer at offset and returns the resulting array value.
Starts off by `parse()`-ing the 32-bit element count, followed by
parsing items out of the buffer "count" times.
"""
count, offset = Int.parse(buff, offset)
values = []
for _ in range(count):
value, new_offset = cls.item_class.parse(buff, offset)
values.append(value)
offset = new_offset
return values, offset
def __str__(self):
return "%s[%s]" % (
self.item_class.__name__, ", ".join(map(str, self.value))
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.